LLVM 22.0.0git
LoopVectorize.cpp
Go to the documentation of this file.
1//===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10// and generates target-independent LLVM-IR.
11// The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12// of instructions in order to estimate the profitability of vectorization.
13//
14// The loop vectorizer combines consecutive loop iterations into a single
15// 'wide' iteration. After this transformation the index is incremented
16// by the SIMD vector width, and not by one.
17//
18// This pass has three parts:
19// 1. The main loop pass that drives the different parts.
20// 2. LoopVectorizationLegality - A unit that checks for the legality
21// of the vectorization.
22// 3. InnerLoopVectorizer - A unit that performs the actual
23// widening of instructions.
24// 4. LoopVectorizationCostModel - A unit that checks for the profitability
25// of vectorization. It decides on the optimal vector width, which
26// can be one, if vectorization is not profitable.
27//
28// There is a development effort going on to migrate loop vectorizer to the
29// VPlan infrastructure and to introduce outer loop vectorization support (see
30// docs/VectorizationPlan.rst and
31// http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32// purpose, we temporarily introduced the VPlan-native vectorization path: an
33// alternative vectorization path that is natively implemented on top of the
34// VPlan infrastructure. See EnableVPlanNativePath for enabling.
35//
36//===----------------------------------------------------------------------===//
37//
38// The reduction-variable vectorization is based on the paper:
39// D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40//
41// Variable uniformity checks are inspired by:
42// Karrenberg, R. and Hack, S. Whole Function Vectorization.
43//
44// The interleaved access vectorization is based on the paper:
45// Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved
46// Data for SIMD
47//
48// Other ideas/concepts are from:
49// A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50//
51// S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of
52// Vectorizing Compilers.
53//
54//===----------------------------------------------------------------------===//
55
58#include "VPRecipeBuilder.h"
59#include "VPlan.h"
60#include "VPlanAnalysis.h"
61#include "VPlanCFG.h"
62#include "VPlanHelpers.h"
63#include "VPlanPatternMatch.h"
64#include "VPlanTransforms.h"
65#include "VPlanUtils.h"
66#include "VPlanVerifier.h"
67#include "llvm/ADT/APInt.h"
68#include "llvm/ADT/ArrayRef.h"
69#include "llvm/ADT/DenseMap.h"
71#include "llvm/ADT/Hashing.h"
72#include "llvm/ADT/MapVector.h"
73#include "llvm/ADT/STLExtras.h"
76#include "llvm/ADT/Statistic.h"
77#include "llvm/ADT/StringRef.h"
78#include "llvm/ADT/Twine.h"
79#include "llvm/ADT/TypeSwitch.h"
84#include "llvm/Analysis/CFG.h"
101#include "llvm/IR/Attributes.h"
102#include "llvm/IR/BasicBlock.h"
103#include "llvm/IR/CFG.h"
104#include "llvm/IR/Constant.h"
105#include "llvm/IR/Constants.h"
106#include "llvm/IR/DataLayout.h"
107#include "llvm/IR/DebugInfo.h"
108#include "llvm/IR/DebugLoc.h"
109#include "llvm/IR/DerivedTypes.h"
111#include "llvm/IR/Dominators.h"
112#include "llvm/IR/Function.h"
113#include "llvm/IR/IRBuilder.h"
114#include "llvm/IR/InstrTypes.h"
115#include "llvm/IR/Instruction.h"
116#include "llvm/IR/Instructions.h"
118#include "llvm/IR/Intrinsics.h"
119#include "llvm/IR/MDBuilder.h"
120#include "llvm/IR/Metadata.h"
121#include "llvm/IR/Module.h"
122#include "llvm/IR/Operator.h"
123#include "llvm/IR/PatternMatch.h"
125#include "llvm/IR/Type.h"
126#include "llvm/IR/Use.h"
127#include "llvm/IR/User.h"
128#include "llvm/IR/Value.h"
129#include "llvm/IR/Verifier.h"
130#include "llvm/Support/Casting.h"
132#include "llvm/Support/Debug.h"
147#include <algorithm>
148#include <cassert>
149#include <cstdint>
150#include <functional>
151#include <iterator>
152#include <limits>
153#include <memory>
154#include <string>
155#include <tuple>
156#include <utility>
157
158using namespace llvm;
159using namespace SCEVPatternMatch;
160
161#define LV_NAME "loop-vectorize"
162#define DEBUG_TYPE LV_NAME
163
164#ifndef NDEBUG
165const char VerboseDebug[] = DEBUG_TYPE "-verbose";
166#endif
167
168STATISTIC(LoopsVectorized, "Number of loops vectorized");
169STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
170STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
171STATISTIC(LoopsEarlyExitVectorized, "Number of early exit loops vectorized");
172
174 "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
175 cl::desc("Enable vectorization of epilogue loops."));
176
178 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
179 cl::desc("When epilogue vectorization is enabled, and a value greater than "
180 "1 is specified, forces the given VF for all applicable epilogue "
181 "loops."));
182
184 "epilogue-vectorization-minimum-VF", cl::Hidden,
185 cl::desc("Only loops with vectorization factor equal to or larger than "
186 "the specified value are considered for epilogue vectorization."));
187
188/// Loops with a known constant trip count below this number are vectorized only
189/// if no scalar iteration overheads are incurred.
191 "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
192 cl::desc("Loops with a constant trip count that is smaller than this "
193 "value are vectorized only if no scalar iteration overheads "
194 "are incurred."));
195
197 "vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
198 cl::desc("The maximum allowed number of runtime memory checks"));
199
200// Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
201// that predication is preferred, and this lists all options. I.e., the
202// vectorizer will try to fold the tail-loop (epilogue) into the vector body
203// and predicate the instructions accordingly. If tail-folding fails, there are
204// different fallback strategies depending on these values:
211} // namespace PreferPredicateTy
212
214 "prefer-predicate-over-epilogue",
217 cl::desc("Tail-folding and predication preferences over creating a scalar "
218 "epilogue loop."),
220 "scalar-epilogue",
221 "Don't tail-predicate loops, create scalar epilogue"),
223 "predicate-else-scalar-epilogue",
224 "prefer tail-folding, create scalar epilogue if tail "
225 "folding fails."),
227 "predicate-dont-vectorize",
228 "prefers tail-folding, don't attempt vectorization if "
229 "tail-folding fails.")));
230
232 "force-tail-folding-style", cl::desc("Force the tail folding style"),
235 clEnumValN(TailFoldingStyle::None, "none", "Disable tail folding"),
238 "Create lane mask for data only, using active.lane.mask intrinsic"),
240 "data-without-lane-mask",
241 "Create lane mask with compare/stepvector"),
243 "Create lane mask using active.lane.mask intrinsic, and use "
244 "it for both data and control flow"),
246 "data-and-control-without-rt-check",
247 "Similar to data-and-control, but remove the runtime check"),
249 "Use predicated EVL instructions for tail folding. If EVL "
250 "is unsupported, fallback to data-without-lane-mask.")));
251
253 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
254 cl::desc("Maximize bandwidth when selecting vectorization factor which "
255 "will be determined by the smallest type in loop."));
256
258 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
259 cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
260
261/// An interleave-group may need masking if it resides in a block that needs
262/// predication, or in order to mask away gaps.
264 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
265 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
266
268 "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
269 cl::desc("A flag that overrides the target's number of scalar registers."));
270
272 "force-target-num-vector-regs", cl::init(0), cl::Hidden,
273 cl::desc("A flag that overrides the target's number of vector registers."));
274
276 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
277 cl::desc("A flag that overrides the target's max interleave factor for "
278 "scalar loops."));
279
281 "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
282 cl::desc("A flag that overrides the target's max interleave factor for "
283 "vectorized loops."));
284
286 "force-target-instruction-cost", cl::init(0), cl::Hidden,
287 cl::desc("A flag that overrides the target's expected cost for "
288 "an instruction to a single constant value. Mostly "
289 "useful for getting consistent testing."));
290
292 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
293 cl::desc(
294 "Pretend that scalable vectors are supported, even if the target does "
295 "not support them. This flag should only be used for testing."));
296
298 "small-loop-cost", cl::init(20), cl::Hidden,
299 cl::desc(
300 "The cost of a loop that is considered 'small' by the interleaver."));
301
303 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
304 cl::desc("Enable the use of the block frequency analysis to access PGO "
305 "heuristics minimizing code growth in cold regions and being more "
306 "aggressive in hot regions."));
307
308// Runtime interleave loops for load/store throughput.
310 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
311 cl::desc(
312 "Enable runtime interleaving until load/store ports are saturated"));
313
314/// The number of stores in a loop that are allowed to need predication.
316 "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
317 cl::desc("Max number of stores to be predicated behind an if."));
318
320 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
321 cl::desc("Count the induction variable only once when interleaving"));
322
324 "enable-cond-stores-vec", cl::init(true), cl::Hidden,
325 cl::desc("Enable if predication of stores during vectorization."));
326
328 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
329 cl::desc("The maximum interleave count to use when interleaving a scalar "
330 "reduction in a nested loop."));
331
332static cl::opt<bool>
333 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
335 cl::desc("Prefer in-loop vector reductions, "
336 "overriding the targets preference."));
337
339 "force-ordered-reductions", cl::init(false), cl::Hidden,
340 cl::desc("Enable the vectorisation of loops with in-order (strict) "
341 "FP reductions"));
342
344 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
345 cl::desc(
346 "Prefer predicating a reduction operation over an after loop select."));
347
349 "enable-vplan-native-path", cl::Hidden,
350 cl::desc("Enable VPlan-native vectorization path with "
351 "support for outer loop vectorization."));
352
354 llvm::VerifyEachVPlan("vplan-verify-each",
355#ifdef EXPENSIVE_CHECKS
356 cl::init(true),
357#else
358 cl::init(false),
359#endif
361 cl::desc("Verfiy VPlans after VPlan transforms."));
362
363// This flag enables the stress testing of the VPlan H-CFG construction in the
364// VPlan-native vectorization path. It must be used in conjuction with
365// -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
366// verification of the H-CFGs built.
368 "vplan-build-stress-test", cl::init(false), cl::Hidden,
369 cl::desc(
370 "Build VPlan for every supported loop nest in the function and bail "
371 "out right after the build (stress test the VPlan H-CFG construction "
372 "in the VPlan-native vectorization path)."));
373
375 "interleave-loops", cl::init(true), cl::Hidden,
376 cl::desc("Enable loop interleaving in Loop vectorization passes"));
378 "vectorize-loops", cl::init(true), cl::Hidden,
379 cl::desc("Run the Loop vectorization passes"));
380
382 "force-widen-divrem-via-safe-divisor", cl::Hidden,
383 cl::desc(
384 "Override cost based safe divisor widening for div/rem instructions"));
385
387 "vectorizer-maximize-bandwidth-for-vector-calls", cl::init(true),
389 cl::desc("Try wider VFs if they enable the use of vector variants"));
390
392 "enable-early-exit-vectorization", cl::init(true), cl::Hidden,
393 cl::desc(
394 "Enable vectorization of early exit loops with uncountable exits."));
395
397 "vectorizer-consider-reg-pressure", cl::init(false), cl::Hidden,
398 cl::desc("Discard VFs if their register pressure is too high."));
399
400// Likelyhood of bypassing the vectorized loop because there are zero trips left
401// after prolog. See `emitIterationCountCheck`.
402static constexpr uint32_t MinItersBypassWeights[] = {1, 127};
403
404/// A helper function that returns true if the given type is irregular. The
405/// type is irregular if its allocated size doesn't equal the store size of an
406/// element of the corresponding vector type.
407static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
408 // Determine if an array of N elements of type Ty is "bitcast compatible"
409 // with a <N x Ty> vector.
410 // This is only true if there is no padding between the array elements.
411 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
412}
413
414/// A version of ScalarEvolution::getSmallConstantTripCount that returns an
415/// ElementCount to include loops whose trip count is a function of vscale.
417 const Loop *L) {
418 if (unsigned ExpectedTC = SE->getSmallConstantTripCount(L))
419 return ElementCount::getFixed(ExpectedTC);
420
421 const SCEV *BTC = SE->getBackedgeTakenCount(L);
423 return ElementCount::getFixed(0);
424
425 const SCEV *ExitCount = SE->getTripCountFromExitCount(BTC, BTC->getType(), L);
426 if (isa<SCEVVScale>(ExitCount))
428
429 const APInt *Scale;
430 if (match(ExitCount, m_scev_Mul(m_scev_APInt(Scale), m_SCEVVScale())))
431 if (cast<SCEVMulExpr>(ExitCount)->hasNoUnsignedWrap())
432 if (Scale->getActiveBits() <= 32)
434
435 return ElementCount::getFixed(0);
436}
437
438/// Returns "best known" trip count, which is either a valid positive trip count
439/// or std::nullopt when an estimate cannot be made (including when the trip
440/// count would overflow), for the specified loop \p L as defined by the
441/// following procedure:
442/// 1) Returns exact trip count if it is known.
443/// 2) Returns expected trip count according to profile data if any.
444/// 3) Returns upper bound estimate if known, and if \p CanUseConstantMax.
445/// 4) Returns std::nullopt if all of the above failed.
446static std::optional<ElementCount>
448 bool CanUseConstantMax = true) {
449 // Check if exact trip count is known.
450 if (auto ExpectedTC = getSmallConstantTripCount(PSE.getSE(), L))
451 return ExpectedTC;
452
453 // Check if there is an expected trip count available from profile data.
455 if (auto EstimatedTC = getLoopEstimatedTripCount(L))
456 return ElementCount::getFixed(*EstimatedTC);
457
458 if (!CanUseConstantMax)
459 return std::nullopt;
460
461 // Check if upper bound estimate is known.
462 if (unsigned ExpectedTC = PSE.getSmallConstantMaxTripCount())
463 return ElementCount::getFixed(ExpectedTC);
464
465 return std::nullopt;
466}
467
468namespace {
469// Forward declare GeneratedRTChecks.
470class GeneratedRTChecks;
471
472using SCEV2ValueTy = DenseMap<const SCEV *, Value *>;
473} // namespace
474
475namespace llvm {
476
478
479/// InnerLoopVectorizer vectorizes loops which contain only one basic
480/// block to a specified vectorization factor (VF).
481/// This class performs the widening of scalars into vectors, or multiple
482/// scalars. This class also implements the following features:
483/// * It inserts an epilogue loop for handling loops that don't have iteration
484/// counts that are known to be a multiple of the vectorization factor.
485/// * It handles the code generation for reduction variables.
486/// * Scalarization (implementation using scalars) of un-vectorizable
487/// instructions.
488/// InnerLoopVectorizer does not perform any vectorization-legality
489/// checks, and relies on the caller to check for the different legality
490/// aspects. The InnerLoopVectorizer relies on the
491/// LoopVectorizationLegality class to provide information about the induction
492/// and reduction variables that were found to a given vectorization factor.
494public:
498 ElementCount VecWidth, unsigned UnrollFactor,
500 ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks,
501 VPlan &Plan)
502 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TTI(TTI), AC(AC),
503 VF(VecWidth), UF(UnrollFactor), Builder(PSE.getSE()->getContext()),
506 Plan.getVectorLoopRegion()->getSinglePredecessor())) {}
507
508 virtual ~InnerLoopVectorizer() = default;
509
510 /// Creates a basic block for the scalar preheader. Both
511 /// EpilogueVectorizerMainLoop and EpilogueVectorizerEpilogueLoop overwrite
512 /// the method to create additional blocks and checks needed for epilogue
513 /// vectorization.
515
516 /// Fix the vectorized code, taking care of header phi's, and more.
518
519 /// Fix the non-induction PHIs in \p Plan.
521
522 /// Returns the original loop trip count.
523 Value *getTripCount() const { return TripCount; }
524
525 /// Used to set the trip count after ILV's construction and after the
526 /// preheader block has been executed. Note that this always holds the trip
527 /// count of the original loop for both main loop and epilogue vectorization.
528 void setTripCount(Value *TC) { TripCount = TC; }
529
530protected:
532
533 /// Create and return a new IR basic block for the scalar preheader whose name
534 /// is prefixed with \p Prefix.
536
537 /// Allow subclasses to override and print debug traces before/after vplan
538 /// execution, when trace information is requested.
539 virtual void printDebugTracesAtStart() {}
540 virtual void printDebugTracesAtEnd() {}
541
542 /// The original loop.
544
545 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
546 /// dynamic knowledge to simplify SCEV expressions and converts them to a
547 /// more usable form.
549
550 /// Loop Info.
552
553 /// Dominator Tree.
555
556 /// Target Transform Info.
558
559 /// Assumption Cache.
561
562 /// The vectorization SIMD factor to use. Each vector will have this many
563 /// vector elements.
565
566 /// The vectorization unroll factor to use. Each scalar is vectorized to this
567 /// many different vector instructions.
568 unsigned UF;
569
570 /// The builder that we use
572
573 // --- Vectorization state ---
574
575 /// Trip count of the original loop.
576 Value *TripCount = nullptr;
577
578 /// The profitablity analysis.
580
581 /// BFI and PSI are used to check for profile guided size optimizations.
584
585 /// Structure to hold information about generated runtime checks, responsible
586 /// for cleaning the checks, if vectorization turns out unprofitable.
587 GeneratedRTChecks &RTChecks;
588
590
591 /// The vector preheader block of \p Plan, used as target for check blocks
592 /// introduced during skeleton creation.
594};
595
596/// Encapsulate information regarding vectorization of a loop and its epilogue.
597/// This information is meant to be updated and used across two stages of
598/// epilogue vectorization.
601 unsigned MainLoopUF = 0;
603 unsigned EpilogueUF = 0;
606 Value *TripCount = nullptr;
609
611 ElementCount EVF, unsigned EUF,
613 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF),
615 assert(EUF == 1 &&
616 "A high UF for the epilogue loop is likely not beneficial.");
617 }
618};
619
620/// An extension of the inner loop vectorizer that creates a skeleton for a
621/// vectorized loop that has its epilogue (residual) also vectorized.
622/// The idea is to run the vplan on a given loop twice, firstly to setup the
623/// skeleton and vectorize the main loop, and secondly to complete the skeleton
624/// from the first step and vectorize the epilogue. This is achieved by
625/// deriving two concrete strategy classes from this base class and invoking
626/// them in succession from the loop vectorizer planner.
628public:
639
640 /// Holds and updates state information required to vectorize the main loop
641 /// and its epilogue in two separate passes. This setup helps us avoid
642 /// regenerating and recomputing runtime safety checks. It also helps us to
643 /// shorten the iteration-count-check path length for the cases where the
644 /// iteration count of the loop is so small that the main vector loop is
645 /// completely skipped.
647
648protected:
650};
651
652/// A specialized derived class of inner loop vectorizer that performs
653/// vectorization of *main* loops in the process of vectorizing loops and their
654/// epilogues.
656public:
668 /// Implements the interface for creating a vectorized skeleton using the
669 /// *main loop* strategy (i.e., the first pass of VPlan execution).
671
672protected:
673 /// Introduces a new VPIRBasicBlock for \p CheckIRBB to Plan between the
674 /// vector preheader and its predecessor, also connecting the new block to the
675 /// scalar preheader.
676 void introduceCheckBlockInVPlan(BasicBlock *CheckIRBB);
677
678 // Create a check to see if the main vector loop should be executed
680 unsigned UF) const;
681
682 /// Emits an iteration count bypass check once for the main loop (when \p
683 /// ForEpilogue is false) and once for the epilogue loop (when \p
684 /// ForEpilogue is true).
686 bool ForEpilogue);
687 void printDebugTracesAtStart() override;
688 void printDebugTracesAtEnd() override;
689};
690
691// A specialized derived class of inner loop vectorizer that performs
692// vectorization of *epilogue* loops in the process of vectorizing loops and
693// their epilogues.
695public:
705 /// Implements the interface for creating a vectorized skeleton using the
706 /// *epilogue loop* strategy (i.e., the second pass of VPlan execution).
708
709protected:
710 void printDebugTracesAtStart() override;
711 void printDebugTracesAtEnd() override;
712};
713} // end namespace llvm
714
715/// Look for a meaningful debug location on the instruction or its operands.
717 if (!I)
718 return DebugLoc::getUnknown();
719
721 if (I->getDebugLoc() != Empty)
722 return I->getDebugLoc();
723
724 for (Use &Op : I->operands()) {
725 if (Instruction *OpInst = dyn_cast<Instruction>(Op))
726 if (OpInst->getDebugLoc() != Empty)
727 return OpInst->getDebugLoc();
728 }
729
730 return I->getDebugLoc();
731}
732
733/// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
734/// is passed, the message relates to that particular instruction.
735#ifndef NDEBUG
736static void debugVectorizationMessage(const StringRef Prefix,
737 const StringRef DebugMsg,
738 Instruction *I) {
739 dbgs() << "LV: " << Prefix << DebugMsg;
740 if (I != nullptr)
741 dbgs() << " " << *I;
742 else
743 dbgs() << '.';
744 dbgs() << '\n';
745}
746#endif
747
748/// Create an analysis remark that explains why vectorization failed
749///
750/// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p
751/// RemarkName is the identifier for the remark. If \p I is passed it is an
752/// instruction that prevents vectorization. Otherwise \p TheLoop is used for
753/// the location of the remark. If \p DL is passed, use it as debug location for
754/// the remark. \return the remark object that can be streamed to.
755static OptimizationRemarkAnalysis
756createLVAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop,
757 Instruction *I, DebugLoc DL = {}) {
758 BasicBlock *CodeRegion = I ? I->getParent() : TheLoop->getHeader();
759 // If debug location is attached to the instruction, use it. Otherwise if DL
760 // was not provided, use the loop's.
761 if (I && I->getDebugLoc())
762 DL = I->getDebugLoc();
763 else if (!DL)
764 DL = TheLoop->getStartLoc();
765
766 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
767}
768
769namespace llvm {
770
771/// Return a value for Step multiplied by VF.
773 int64_t Step) {
774 assert(Ty->isIntegerTy() && "Expected an integer step");
775 ElementCount VFxStep = VF.multiplyCoefficientBy(Step);
776 assert(isPowerOf2_64(VF.getKnownMinValue()) && "must pass power-of-2 VF");
777 if (VF.isScalable() && isPowerOf2_64(Step)) {
778 return B.CreateShl(
779 B.CreateVScale(Ty),
780 ConstantInt::get(Ty, Log2_64(VFxStep.getKnownMinValue())), "", true);
781 }
782 return B.CreateElementCount(Ty, VFxStep);
783}
784
785/// Return the runtime value for VF.
787 return B.CreateElementCount(Ty, VF);
788}
789
791 const StringRef OREMsg, const StringRef ORETag,
792 OptimizationRemarkEmitter *ORE, Loop *TheLoop,
793 Instruction *I) {
794 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I));
795 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
796 ORE->emit(
797 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
798 << "loop not vectorized: " << OREMsg);
799}
800
801/// Reports an informative message: print \p Msg for debugging purposes as well
802/// as an optimization remark. Uses either \p I as location of the remark, or
803/// otherwise \p TheLoop. If \p DL is passed, use it as debug location for the
804/// remark. If \p DL is passed, use it as debug location for the remark.
805static void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
807 Loop *TheLoop, Instruction *I = nullptr,
808 DebugLoc DL = {}) {
810 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
811 ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop,
812 I, DL)
813 << Msg);
814}
815
816/// Report successful vectorization of the loop. In case an outer loop is
817/// vectorized, prepend "outer" to the vectorization remark.
819 VectorizationFactor VF, unsigned IC) {
821 "Vectorizing: ", TheLoop->isInnermost() ? "innermost loop" : "outer loop",
822 nullptr));
823 StringRef LoopType = TheLoop->isInnermost() ? "" : "outer ";
824 ORE->emit([&]() {
825 return OptimizationRemark(LV_NAME, "Vectorized", TheLoop->getStartLoc(),
826 TheLoop->getHeader())
827 << "vectorized " << LoopType << "loop (vectorization width: "
828 << ore::NV("VectorizationFactor", VF.Width)
829 << ", interleaved count: " << ore::NV("InterleaveCount", IC) << ")";
830 });
831}
832
833} // end namespace llvm
834
835namespace llvm {
836
837// Loop vectorization cost-model hints how the scalar epilogue loop should be
838// lowered.
840
841 // The default: allowing scalar epilogues.
843
844 // Vectorization with OptForSize: don't allow epilogues.
846
847 // A special case of vectorisation with OptForSize: loops with a very small
848 // trip count are considered for vectorization under OptForSize, thereby
849 // making sure the cost of their loop body is dominant, free of runtime
850 // guards and scalar iteration overheads.
852
853 // Loop hint predicate indicating an epilogue is undesired.
855
856 // Directive indicating we must either tail fold or not vectorize
858};
859
860/// LoopVectorizationCostModel - estimates the expected speedups due to
861/// vectorization.
862/// In many cases vectorization is not profitable. This can happen because of
863/// a number of reasons. In this class we mainly attempt to predict the
864/// expected speedup/slowdowns due to the supported instruction set. We use the
865/// TargetTransformInfo to query the different backends for the cost of
866/// different operations.
869
870public:
881 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
882 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
883 Hints(Hints), InterleaveInfo(IAI) {
884 if (TTI.supportsScalableVectors() || ForceTargetSupportsScalableVectors)
885 initializeVScaleForTuning();
887 // Query this against the original loop and save it here because the profile
888 // of the original loop header may change as the transformation happens.
889 OptForSize = llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
891 }
892
893 /// \return An upper bound for the vectorization factors (both fixed and
894 /// scalable). If the factors are 0, vectorization and interleaving should be
895 /// avoided up front.
896 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC);
897
898 /// \return True if runtime checks are required for vectorization, and false
899 /// otherwise.
900 bool runtimeChecksRequired();
901
902 /// Setup cost-based decisions for user vectorization factor.
903 /// \return true if the UserVF is a feasible VF to be chosen.
906 return expectedCost(UserVF).isValid();
907 }
908
909 /// \return True if maximizing vector bandwidth is enabled by the target or
910 /// user options, for the given register kind.
911 bool useMaxBandwidth(TargetTransformInfo::RegisterKind RegKind);
912
913 /// \return True if register pressure should be considered for the given VF.
914 bool shouldConsiderRegPressureForVF(ElementCount VF);
915
916 /// \return The size (in bits) of the smallest and widest types in the code
917 /// that needs to be vectorized. We ignore values that remain scalar such as
918 /// 64 bit loop indices.
919 std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
920
921 /// Memory access instruction may be vectorized in more than one way.
922 /// Form of instruction after vectorization depends on cost.
923 /// This function takes cost-based decisions for Load/Store instructions
924 /// and collects them in a map. This decisions map is used for building
925 /// the lists of loop-uniform and loop-scalar instructions.
926 /// The calculated cost is saved with widening decision in order to
927 /// avoid redundant calculations.
928 void setCostBasedWideningDecision(ElementCount VF);
929
930 /// A call may be vectorized in different ways depending on whether we have
931 /// vectorized variants available and whether the target supports masking.
932 /// This function analyzes all calls in the function at the supplied VF,
933 /// makes a decision based on the costs of available options, and stores that
934 /// decision in a map for use in planning and plan execution.
935 void setVectorizedCallDecision(ElementCount VF);
936
937 /// Collect values we want to ignore in the cost model.
938 void collectValuesToIgnore();
939
940 /// Collect all element types in the loop for which widening is needed.
941 void collectElementTypesForWidening();
942
943 /// Split reductions into those that happen in the loop, and those that happen
944 /// outside. In loop reductions are collected into InLoopReductions.
945 void collectInLoopReductions();
946
947 /// Returns true if we should use strict in-order reductions for the given
948 /// RdxDesc. This is true if the -enable-strict-reductions flag is passed,
949 /// the IsOrdered flag of RdxDesc is set and we do not allow reordering
950 /// of FP operations.
951 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) const {
952 return !Hints->allowReordering() && RdxDesc.isOrdered();
953 }
954
955 /// \returns The smallest bitwidth each instruction can be represented with.
956 /// The vector equivalents of these instructions should be truncated to this
957 /// type.
959 return MinBWs;
960 }
961
962 /// \returns True if it is more profitable to scalarize instruction \p I for
963 /// vectorization factor \p VF.
965 assert(VF.isVector() &&
966 "Profitable to scalarize relevant only for VF > 1.");
967 assert(
968 TheLoop->isInnermost() &&
969 "cost-model should not be used for outer loops (in VPlan-native path)");
970
971 auto Scalars = InstsToScalarize.find(VF);
972 assert(Scalars != InstsToScalarize.end() &&
973 "VF not yet analyzed for scalarization profitability");
974 return Scalars->second.contains(I);
975 }
976
977 /// Returns true if \p I is known to be uniform after vectorization.
979 assert(
980 TheLoop->isInnermost() &&
981 "cost-model should not be used for outer loops (in VPlan-native path)");
982 // Pseudo probe needs to be duplicated for each unrolled iteration and
983 // vector lane so that profiled loop trip count can be accurately
984 // accumulated instead of being under counted.
986 return false;
987
988 if (VF.isScalar())
989 return true;
990
991 auto UniformsPerVF = Uniforms.find(VF);
992 assert(UniformsPerVF != Uniforms.end() &&
993 "VF not yet analyzed for uniformity");
994 return UniformsPerVF->second.count(I);
995 }
996
997 /// Returns true if \p I is known to be scalar after vectorization.
999 assert(
1000 TheLoop->isInnermost() &&
1001 "cost-model should not be used for outer loops (in VPlan-native path)");
1002 if (VF.isScalar())
1003 return true;
1004
1005 auto ScalarsPerVF = Scalars.find(VF);
1006 assert(ScalarsPerVF != Scalars.end() &&
1007 "Scalar values are not calculated for VF");
1008 return ScalarsPerVF->second.count(I);
1009 }
1010
1011 /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1012 /// for vectorization factor \p VF.
1014 return VF.isVector() && MinBWs.contains(I) &&
1015 !isProfitableToScalarize(I, VF) &&
1017 }
1018
1019 /// Decision that was taken during cost calculation for memory instruction.
1022 CM_Widen, // For consecutive accesses with stride +1.
1023 CM_Widen_Reverse, // For consecutive accesses with stride -1.
1029 };
1030
1031 /// Save vectorization decision \p W and \p Cost taken by the cost model for
1032 /// instruction \p I and vector width \p VF.
1035 assert(VF.isVector() && "Expected VF >=2");
1036 WideningDecisions[{I, VF}] = {W, Cost};
1037 }
1038
1039 /// Save vectorization decision \p W and \p Cost taken by the cost model for
1040 /// interleaving group \p Grp and vector width \p VF.
1044 assert(VF.isVector() && "Expected VF >=2");
1045 /// Broadcast this decicion to all instructions inside the group.
1046 /// When interleaving, the cost will only be assigned one instruction, the
1047 /// insert position. For other cases, add the appropriate fraction of the
1048 /// total cost to each instruction. This ensures accurate costs are used,
1049 /// even if the insert position instruction is not used.
1050 InstructionCost InsertPosCost = Cost;
1051 InstructionCost OtherMemberCost = 0;
1052 if (W != CM_Interleave)
1053 OtherMemberCost = InsertPosCost = Cost / Grp->getNumMembers();
1054 ;
1055 for (unsigned Idx = 0; Idx < Grp->getFactor(); ++Idx) {
1056 if (auto *I = Grp->getMember(Idx)) {
1057 if (Grp->getInsertPos() == I)
1058 WideningDecisions[{I, VF}] = {W, InsertPosCost};
1059 else
1060 WideningDecisions[{I, VF}] = {W, OtherMemberCost};
1061 }
1062 }
1063 }
1064
1065 /// Return the cost model decision for the given instruction \p I and vector
1066 /// width \p VF. Return CM_Unknown if this instruction did not pass
1067 /// through the cost modeling.
1069 assert(VF.isVector() && "Expected VF to be a vector VF");
1070 assert(
1071 TheLoop->isInnermost() &&
1072 "cost-model should not be used for outer loops (in VPlan-native path)");
1073
1074 std::pair<Instruction *, ElementCount> InstOnVF(I, VF);
1075 auto Itr = WideningDecisions.find(InstOnVF);
1076 if (Itr == WideningDecisions.end())
1077 return CM_Unknown;
1078 return Itr->second.first;
1079 }
1080
1081 /// Return the vectorization cost for the given instruction \p I and vector
1082 /// width \p VF.
1084 assert(VF.isVector() && "Expected VF >=2");
1085 std::pair<Instruction *, ElementCount> InstOnVF(I, VF);
1086 assert(WideningDecisions.contains(InstOnVF) &&
1087 "The cost is not calculated");
1088 return WideningDecisions[InstOnVF].second;
1089 }
1090
1098
1100 Function *Variant, Intrinsic::ID IID,
1101 std::optional<unsigned> MaskPos,
1103 assert(!VF.isScalar() && "Expected vector VF");
1104 CallWideningDecisions[{CI, VF}] = {Kind, Variant, IID, MaskPos, Cost};
1105 }
1106
1108 ElementCount VF) const {
1109 assert(!VF.isScalar() && "Expected vector VF");
1110 auto I = CallWideningDecisions.find({CI, VF});
1111 if (I == CallWideningDecisions.end())
1112 return {CM_Unknown, nullptr, Intrinsic::not_intrinsic, std::nullopt, 0};
1113 return I->second;
1114 }
1115
1116 /// Return True if instruction \p I is an optimizable truncate whose operand
1117 /// is an induction variable. Such a truncate will be removed by adding a new
1118 /// induction variable with the destination type.
1120 // If the instruction is not a truncate, return false.
1121 auto *Trunc = dyn_cast<TruncInst>(I);
1122 if (!Trunc)
1123 return false;
1124
1125 // Get the source and destination types of the truncate.
1126 Type *SrcTy = toVectorTy(Trunc->getSrcTy(), VF);
1127 Type *DestTy = toVectorTy(Trunc->getDestTy(), VF);
1128
1129 // If the truncate is free for the given types, return false. Replacing a
1130 // free truncate with an induction variable would add an induction variable
1131 // update instruction to each iteration of the loop. We exclude from this
1132 // check the primary induction variable since it will need an update
1133 // instruction regardless.
1134 Value *Op = Trunc->getOperand(0);
1135 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1136 return false;
1137
1138 // If the truncated value is not an induction variable, return false.
1139 return Legal->isInductionPhi(Op);
1140 }
1141
1142 /// Collects the instructions to scalarize for each predicated instruction in
1143 /// the loop.
1144 void collectInstsToScalarize(ElementCount VF);
1145
1146 /// Collect values that will not be widened, including Uniforms, Scalars, and
1147 /// Instructions to Scalarize for the given \p VF.
1148 /// The sets depend on CM decision for Load/Store instructions
1149 /// that may be vectorized as interleave, gather-scatter or scalarized.
1150 /// Also make a decision on what to do about call instructions in the loop
1151 /// at that VF -- scalarize, call a known vector routine, or call a
1152 /// vector intrinsic.
1154 // Do the analysis once.
1155 if (VF.isScalar() || Uniforms.contains(VF))
1156 return;
1158 collectLoopUniforms(VF);
1160 collectLoopScalars(VF);
1162 }
1163
1164 /// Returns true if the target machine supports masked store operation
1165 /// for the given \p DataType and kind of access to \p Ptr.
1166 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment,
1167 unsigned AddressSpace) const {
1168 return Legal->isConsecutivePtr(DataType, Ptr) &&
1169 TTI.isLegalMaskedStore(DataType, Alignment, AddressSpace);
1170 }
1171
1172 /// Returns true if the target machine supports masked load operation
1173 /// for the given \p DataType and kind of access to \p Ptr.
1174 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment,
1175 unsigned AddressSpace) const {
1176 return Legal->isConsecutivePtr(DataType, Ptr) &&
1177 TTI.isLegalMaskedLoad(DataType, Alignment, AddressSpace);
1178 }
1179
1180 /// Returns true if the target machine can represent \p V as a masked gather
1181 /// or scatter operation.
1183 bool LI = isa<LoadInst>(V);
1184 bool SI = isa<StoreInst>(V);
1185 if (!LI && !SI)
1186 return false;
1187 auto *Ty = getLoadStoreType(V);
1189 if (VF.isVector())
1190 Ty = VectorType::get(Ty, VF);
1191 return (LI && TTI.isLegalMaskedGather(Ty, Align)) ||
1192 (SI && TTI.isLegalMaskedScatter(Ty, Align));
1193 }
1194
1195 /// Returns true if the target machine supports all of the reduction
1196 /// variables found for the given VF.
1198 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1199 const RecurrenceDescriptor &RdxDesc = Reduction.second;
1200 return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1201 }));
1202 }
1203
1204 /// Given costs for both strategies, return true if the scalar predication
1205 /// lowering should be used for div/rem. This incorporates an override
1206 /// option so it is not simply a cost comparison.
1208 InstructionCost SafeDivisorCost) const {
1209 switch (ForceSafeDivisor) {
1210 case cl::BOU_UNSET:
1211 return ScalarCost < SafeDivisorCost;
1212 case cl::BOU_TRUE:
1213 return false;
1214 case cl::BOU_FALSE:
1215 return true;
1216 }
1217 llvm_unreachable("impossible case value");
1218 }
1219
1220 /// Returns true if \p I is an instruction which requires predication and
1221 /// for which our chosen predication strategy is scalarization (i.e. we
1222 /// don't have an alternate strategy such as masking available).
1223 /// \p VF is the vectorization factor that will be used to vectorize \p I.
1224 bool isScalarWithPredication(Instruction *I, ElementCount VF) const;
1225
1226 /// Returns true if \p I is an instruction that needs to be predicated
1227 /// at runtime. The result is independent of the predication mechanism.
1228 /// Superset of instructions that return true for isScalarWithPredication.
1229 bool isPredicatedInst(Instruction *I) const;
1230
1231 /// Return the costs for our two available strategies for lowering a
1232 /// div/rem operation which requires speculating at least one lane.
1233 /// First result is for scalarization (will be invalid for scalable
1234 /// vectors); second is for the safe-divisor strategy.
1235 std::pair<InstructionCost, InstructionCost>
1236 getDivRemSpeculationCost(Instruction *I,
1237 ElementCount VF) const;
1238
1239 /// Returns true if \p I is a memory instruction with consecutive memory
1240 /// access that can be widened.
1241 bool memoryInstructionCanBeWidened(Instruction *I, ElementCount VF);
1242
1243 /// Returns true if \p I is a memory instruction in an interleaved-group
1244 /// of memory accesses that can be vectorized with wide vector loads/stores
1245 /// and shuffles.
1246 bool interleavedAccessCanBeWidened(Instruction *I, ElementCount VF) const;
1247
1248 /// Check if \p Instr belongs to any interleaved access group.
1250 return InterleaveInfo.isInterleaved(Instr);
1251 }
1252
1253 /// Get the interleaved access group that \p Instr belongs to.
1256 return InterleaveInfo.getInterleaveGroup(Instr);
1257 }
1258
1259 /// Returns true if we're required to use a scalar epilogue for at least
1260 /// the final iteration of the original loop.
1261 bool requiresScalarEpilogue(bool IsVectorizing) const {
1262 if (!isScalarEpilogueAllowed()) {
1263 LLVM_DEBUG(dbgs() << "LV: Loop does not require scalar epilogue\n");
1264 return false;
1265 }
1266 // If we might exit from anywhere but the latch and early exit vectorization
1267 // is disabled, we must run the exiting iteration in scalar form.
1268 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch() &&
1269 !(EnableEarlyExitVectorization && Legal->hasUncountableEarlyExit())) {
1270 LLVM_DEBUG(dbgs() << "LV: Loop requires scalar epilogue: not exiting "
1271 "from latch block\n");
1272 return true;
1273 }
1274 if (IsVectorizing && InterleaveInfo.requiresScalarEpilogue()) {
1275 LLVM_DEBUG(dbgs() << "LV: Loop requires scalar epilogue: "
1276 "interleaved group requires scalar epilogue\n");
1277 return true;
1278 }
1279 LLVM_DEBUG(dbgs() << "LV: Loop does not require scalar epilogue\n");
1280 return false;
1281 }
1282
1283 /// Returns true if a scalar epilogue is not allowed due to optsize or a
1284 /// loop hint annotation.
1286 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1287 }
1288
1289 /// Returns the TailFoldingStyle that is best for the current loop.
1290 TailFoldingStyle getTailFoldingStyle(bool IVUpdateMayOverflow = true) const {
1291 if (!ChosenTailFoldingStyle)
1293 return IVUpdateMayOverflow ? ChosenTailFoldingStyle->first
1294 : ChosenTailFoldingStyle->second;
1295 }
1296
1297 /// Selects and saves TailFoldingStyle for 2 options - if IV update may
1298 /// overflow or not.
1299 /// \param IsScalableVF true if scalable vector factors enabled.
1300 /// \param UserIC User specific interleave count.
1301 void setTailFoldingStyles(bool IsScalableVF, unsigned UserIC) {
1302 assert(!ChosenTailFoldingStyle && "Tail folding must not be selected yet.");
1303 if (!Legal->canFoldTailByMasking()) {
1304 ChosenTailFoldingStyle = {TailFoldingStyle::None, TailFoldingStyle::None};
1305 return;
1306 }
1307
1308 // Default to TTI preference, but allow command line override.
1309 ChosenTailFoldingStyle = {
1310 TTI.getPreferredTailFoldingStyle(/*IVUpdateMayOverflow=*/true),
1311 TTI.getPreferredTailFoldingStyle(/*IVUpdateMayOverflow=*/false)};
1312 if (ForceTailFoldingStyle.getNumOccurrences())
1313 ChosenTailFoldingStyle = {ForceTailFoldingStyle.getValue(),
1314 ForceTailFoldingStyle.getValue()};
1315
1316 if (ChosenTailFoldingStyle->first != TailFoldingStyle::DataWithEVL &&
1317 ChosenTailFoldingStyle->second != TailFoldingStyle::DataWithEVL)
1318 return;
1319 // Override EVL styles if needed.
1320 // FIXME: Investigate opportunity for fixed vector factor.
1321 bool EVLIsLegal = UserIC <= 1 && IsScalableVF &&
1322 TTI.hasActiveVectorLength() && !EnableVPlanNativePath;
1323 if (EVLIsLegal)
1324 return;
1325 // If for some reason EVL mode is unsupported, fallback to a scalar epilogue
1326 // if it's allowed, or DataWithoutLaneMask otherwise.
1327 if (ScalarEpilogueStatus == CM_ScalarEpilogueAllowed ||
1328 ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate)
1329 ChosenTailFoldingStyle = {TailFoldingStyle::None, TailFoldingStyle::None};
1330 else
1331 ChosenTailFoldingStyle = {TailFoldingStyle::DataWithoutLaneMask,
1333
1334 LLVM_DEBUG(
1335 dbgs() << "LV: Preference for VP intrinsics indicated. Will "
1336 "not try to generate VP Intrinsics "
1337 << (UserIC > 1
1338 ? "since interleave count specified is greater than 1.\n"
1339 : "due to non-interleaving reasons.\n"));
1340 }
1341
1342 /// Returns true if all loop blocks should be masked to fold tail loop.
1343 bool foldTailByMasking() const {
1344 // TODO: check if it is possible to check for None style independent of
1345 // IVUpdateMayOverflow flag in getTailFoldingStyle.
1347 }
1348
1349 /// Return maximum safe number of elements to be processed per vector
1350 /// iteration, which do not prevent store-load forwarding and are safe with
1351 /// regard to the memory dependencies. Required for EVL-based VPlans to
1352 /// correctly calculate AVL (application vector length) as min(remaining AVL,
1353 /// MaxSafeElements).
1354 /// TODO: need to consider adjusting cost model to use this value as a
1355 /// vectorization factor for EVL-based vectorization.
1356 std::optional<unsigned> getMaxSafeElements() const { return MaxSafeElements; }
1357
1358 /// Returns true if the instructions in this block requires predication
1359 /// for any reason, e.g. because tail folding now requires a predicate
1360 /// or because the block in the original loop was predicated.
1362 return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1363 }
1364
1365 /// Returns true if VP intrinsics with explicit vector length support should
1366 /// be generated in the tail folded loop.
1370
1371 /// Returns true if the Phi is part of an inloop reduction.
1372 bool isInLoopReduction(PHINode *Phi) const {
1373 return InLoopReductions.contains(Phi);
1374 }
1375
1376 /// Returns true if the predicated reduction select should be used to set the
1377 /// incoming value for the reduction phi.
1379 // Force to use predicated reduction select since the EVL of the
1380 // second-to-last iteration might not be VF*UF.
1381 if (foldTailWithEVL())
1382 return true;
1384 TTI.preferPredicatedReductionSelect();
1385 }
1386
1387 /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1388 /// with factor VF. Return the cost of the instruction, including
1389 /// scalarization overhead if it's needed.
1390 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const;
1391
1392 /// Estimate cost of a call instruction CI if it were vectorized with factor
1393 /// VF. Return the cost of the instruction, including scalarization overhead
1394 /// if it's needed.
1395 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF) const;
1396
1397 /// Invalidates decisions already taken by the cost model.
1399 WideningDecisions.clear();
1400 CallWideningDecisions.clear();
1401 Uniforms.clear();
1402 Scalars.clear();
1403 }
1404
1405 /// Returns the expected execution cost. The unit of the cost does
1406 /// not matter because we use the 'cost' units to compare different
1407 /// vector widths. The cost that is returned is *not* normalized by
1408 /// the factor width.
1409 InstructionCost expectedCost(ElementCount VF);
1410
1411 bool hasPredStores() const { return NumPredStores > 0; }
1412
1413 /// Returns true if epilogue vectorization is considered profitable, and
1414 /// false otherwise.
1415 /// \p VF is the vectorization factor chosen for the original loop.
1416 /// \p Multiplier is an aditional scaling factor applied to VF before
1417 /// comparing to EpilogueVectorizationMinVF.
1418 bool isEpilogueVectorizationProfitable(const ElementCount VF,
1419 const unsigned IC) const;
1420
1421 /// Returns the execution time cost of an instruction for a given vector
1422 /// width. Vector width of one means scalar.
1423 InstructionCost getInstructionCost(Instruction *I, ElementCount VF);
1424
1425 /// Return the cost of instructions in an inloop reduction pattern, if I is
1426 /// part of that pattern.
1427 std::optional<InstructionCost> getReductionPatternCost(Instruction *I,
1428 ElementCount VF,
1429 Type *VectorTy) const;
1430
1431 /// Returns true if \p Op should be considered invariant and if it is
1432 /// trivially hoistable.
1433 bool shouldConsiderInvariant(Value *Op);
1434
1435 /// Return the value of vscale used for tuning the cost model.
1436 std::optional<unsigned> getVScaleForTuning() const { return VScaleForTuning; }
1437
1438private:
1439 unsigned NumPredStores = 0;
1440
1441 /// Used to store the value of vscale used for tuning the cost model. It is
1442 /// initialized during object construction.
1443 std::optional<unsigned> VScaleForTuning;
1444
1445 /// Initializes the value of vscale used for tuning the cost model. If
1446 /// vscale_range.min == vscale_range.max then return vscale_range.max, else
1447 /// return the value returned by the corresponding TTI method.
1448 void initializeVScaleForTuning() {
1449 const Function *Fn = TheLoop->getHeader()->getParent();
1450 if (Fn->hasFnAttribute(Attribute::VScaleRange)) {
1451 auto Attr = Fn->getFnAttribute(Attribute::VScaleRange);
1452 auto Min = Attr.getVScaleRangeMin();
1453 auto Max = Attr.getVScaleRangeMax();
1454 if (Max && Min == Max) {
1455 VScaleForTuning = Max;
1456 return;
1457 }
1458 }
1459
1460 VScaleForTuning = TTI.getVScaleForTuning();
1461 }
1462
1463 /// \return An upper bound for the vectorization factors for both
1464 /// fixed and scalable vectorization, where the minimum-known number of
1465 /// elements is a power-of-2 larger than zero. If scalable vectorization is
1466 /// disabled or unsupported, then the scalable part will be equal to
1467 /// ElementCount::getScalable(0).
1468 FixedScalableVFPair computeFeasibleMaxVF(unsigned MaxTripCount,
1469 ElementCount UserVF,
1470 bool FoldTailByMasking);
1471
1472 /// If \p VF > MaxTripcount, clamps it to the next lower VF that is <=
1473 /// MaxTripCount.
1474 ElementCount clampVFByMaxTripCount(ElementCount VF, unsigned MaxTripCount,
1475 bool FoldTailByMasking) const;
1476
1477 /// \return the maximized element count based on the targets vector
1478 /// registers and the loop trip-count, but limited to a maximum safe VF.
1479 /// This is a helper function of computeFeasibleMaxVF.
1480 ElementCount getMaximizedVFForTarget(unsigned MaxTripCount,
1481 unsigned SmallestType,
1482 unsigned WidestType,
1483 ElementCount MaxSafeVF,
1484 bool FoldTailByMasking);
1485
1486 /// Checks if scalable vectorization is supported and enabled. Caches the
1487 /// result to avoid repeated debug dumps for repeated queries.
1488 bool isScalableVectorizationAllowed();
1489
1490 /// \return the maximum legal scalable VF, based on the safe max number
1491 /// of elements.
1492 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements);
1493
1494 /// Calculate vectorization cost of memory instruction \p I.
1495 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1496
1497 /// The cost computation for scalarized memory instruction.
1498 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1499
1500 /// The cost computation for interleaving group of memory instructions.
1501 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1502
1503 /// The cost computation for Gather/Scatter instruction.
1504 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1505
1506 /// The cost computation for widening instruction \p I with consecutive
1507 /// memory access.
1508 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1509
1510 /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1511 /// Load: scalar load + broadcast.
1512 /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1513 /// element)
1514 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1515
1516 /// Estimate the overhead of scalarizing an instruction. This is a
1517 /// convenience wrapper for the type-based getScalarizationOverhead API.
1519 ElementCount VF) const;
1520
1521 /// Returns true if an artificially high cost for emulated masked memrefs
1522 /// should be used.
1523 bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF);
1524
1525 /// Map of scalar integer values to the smallest bitwidth they can be legally
1526 /// represented as. The vector equivalents of these values should be truncated
1527 /// to this type.
1528 MapVector<Instruction *, uint64_t> MinBWs;
1529
1530 /// A type representing the costs for instructions if they were to be
1531 /// scalarized rather than vectorized. The entries are Instruction-Cost
1532 /// pairs.
1533 using ScalarCostsTy = MapVector<Instruction *, InstructionCost>;
1534
1535 /// A set containing all BasicBlocks that are known to present after
1536 /// vectorization as a predicated block.
1537 DenseMap<ElementCount, SmallPtrSet<BasicBlock *, 4>>
1538 PredicatedBBsAfterVectorization;
1539
1540 /// Records whether it is allowed to have the original scalar loop execute at
1541 /// least once. This may be needed as a fallback loop in case runtime
1542 /// aliasing/dependence checks fail, or to handle the tail/remainder
1543 /// iterations when the trip count is unknown or doesn't divide by the VF,
1544 /// or as a peel-loop to handle gaps in interleave-groups.
1545 /// Under optsize and when the trip count is very small we don't allow any
1546 /// iterations to execute in the scalar loop.
1547 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1548
1549 /// Control finally chosen tail folding style. The first element is used if
1550 /// the IV update may overflow, the second element - if it does not.
1551 std::optional<std::pair<TailFoldingStyle, TailFoldingStyle>>
1552 ChosenTailFoldingStyle;
1553
1554 /// true if scalable vectorization is supported and enabled.
1555 std::optional<bool> IsScalableVectorizationAllowed;
1556
1557 /// Maximum safe number of elements to be processed per vector iteration,
1558 /// which do not prevent store-load forwarding and are safe with regard to the
1559 /// memory dependencies. Required for EVL-based veectorization, where this
1560 /// value is used as the upper bound of the safe AVL.
1561 std::optional<unsigned> MaxSafeElements;
1562
1563 /// A map holding scalar costs for different vectorization factors. The
1564 /// presence of a cost for an instruction in the mapping indicates that the
1565 /// instruction will be scalarized when vectorizing with the associated
1566 /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1567 MapVector<ElementCount, ScalarCostsTy> InstsToScalarize;
1568
1569 /// Holds the instructions known to be uniform after vectorization.
1570 /// The data is collected per VF.
1571 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1572
1573 /// Holds the instructions known to be scalar after vectorization.
1574 /// The data is collected per VF.
1575 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1576
1577 /// Holds the instructions (address computations) that are forced to be
1578 /// scalarized.
1579 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1580
1581 /// PHINodes of the reductions that should be expanded in-loop.
1582 SmallPtrSet<PHINode *, 4> InLoopReductions;
1583
1584 /// A Map of inloop reduction operations and their immediate chain operand.
1585 /// FIXME: This can be removed once reductions can be costed correctly in
1586 /// VPlan. This was added to allow quick lookup of the inloop operations.
1587 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1588
1589 /// Returns the expected difference in cost from scalarizing the expression
1590 /// feeding a predicated instruction \p PredInst. The instructions to
1591 /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1592 /// non-negative return value implies the expression will be scalarized.
1593 /// Currently, only single-use chains are considered for scalarization.
1594 InstructionCost computePredInstDiscount(Instruction *PredInst,
1595 ScalarCostsTy &ScalarCosts,
1596 ElementCount VF);
1597
1598 /// Collect the instructions that are uniform after vectorization. An
1599 /// instruction is uniform if we represent it with a single scalar value in
1600 /// the vectorized loop corresponding to each vector iteration. Examples of
1601 /// uniform instructions include pointer operands of consecutive or
1602 /// interleaved memory accesses. Note that although uniformity implies an
1603 /// instruction will be scalar, the reverse is not true. In general, a
1604 /// scalarized instruction will be represented by VF scalar values in the
1605 /// vectorized loop, each corresponding to an iteration of the original
1606 /// scalar loop.
1607 void collectLoopUniforms(ElementCount VF);
1608
1609 /// Collect the instructions that are scalar after vectorization. An
1610 /// instruction is scalar if it is known to be uniform or will be scalarized
1611 /// during vectorization. collectLoopScalars should only add non-uniform nodes
1612 /// to the list if they are used by a load/store instruction that is marked as
1613 /// CM_Scalarize. Non-uniform scalarized instructions will be represented by
1614 /// VF values in the vectorized loop, each corresponding to an iteration of
1615 /// the original scalar loop.
1616 void collectLoopScalars(ElementCount VF);
1617
1618 /// Keeps cost model vectorization decision and cost for instructions.
1619 /// Right now it is used for memory instructions only.
1620 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1621 std::pair<InstWidening, InstructionCost>>;
1622
1623 DecisionList WideningDecisions;
1624
1625 using CallDecisionList =
1626 DenseMap<std::pair<CallInst *, ElementCount>, CallWideningDecision>;
1627
1628 CallDecisionList CallWideningDecisions;
1629
1630 /// Returns true if \p V is expected to be vectorized and it needs to be
1631 /// extracted.
1632 bool needsExtract(Value *V, ElementCount VF) const {
1634 if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1635 TheLoop->isLoopInvariant(I) ||
1636 getWideningDecision(I, VF) == CM_Scalarize ||
1637 (isa<CallInst>(I) &&
1638 getCallWideningDecision(cast<CallInst>(I), VF).Kind == CM_Scalarize))
1639 return false;
1640
1641 // Assume we can vectorize V (and hence we need extraction) if the
1642 // scalars are not computed yet. This can happen, because it is called
1643 // via getScalarizationOverhead from setCostBasedWideningDecision, before
1644 // the scalars are collected. That should be a safe assumption in most
1645 // cases, because we check if the operands have vectorizable types
1646 // beforehand in LoopVectorizationLegality.
1647 return !Scalars.contains(VF) || !isScalarAfterVectorization(I, VF);
1648 };
1649
1650 /// Returns a range containing only operands needing to be extracted.
1651 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1652 ElementCount VF) const {
1653
1654 SmallPtrSet<const Value *, 4> UniqueOperands;
1656 for (Value *Op : Ops) {
1657 if (isa<Constant>(Op) || !UniqueOperands.insert(Op).second ||
1658 !needsExtract(Op, VF))
1659 continue;
1660 Res.push_back(Op);
1661 }
1662 return Res;
1663 }
1664
1665public:
1666 /// The loop that we evaluate.
1668
1669 /// Predicated scalar evolution analysis.
1671
1672 /// Loop Info analysis.
1674
1675 /// Vectorization legality.
1677
1678 /// Vector target information.
1680
1681 /// Target Library Info.
1683
1684 /// Demanded bits analysis.
1686
1687 /// Assumption cache.
1689
1690 /// Interface to emit optimization remarks.
1692
1694
1695 /// Loop Vectorize Hint.
1697
1698 /// The interleave access information contains groups of interleaved accesses
1699 /// with the same stride and close to each other.
1701
1702 /// Values to ignore in the cost model.
1704
1705 /// Values to ignore in the cost model when VF > 1.
1707
1708 /// All element types found in the loop.
1710
1711 /// The kind of cost that we are calculating
1713
1714 /// Whether this loop should be optimized for size based on function attribute
1715 /// or profile information.
1717
1718 /// The highest VF possible for this loop, without using MaxBandwidth.
1720};
1721} // end namespace llvm
1722
1723namespace {
1724/// Helper struct to manage generating runtime checks for vectorization.
1725///
1726/// The runtime checks are created up-front in temporary blocks to allow better
1727/// estimating the cost and un-linked from the existing IR. After deciding to
1728/// vectorize, the checks are moved back. If deciding not to vectorize, the
1729/// temporary blocks are completely removed.
1730class GeneratedRTChecks {
1731 /// Basic block which contains the generated SCEV checks, if any.
1732 BasicBlock *SCEVCheckBlock = nullptr;
1733
1734 /// The value representing the result of the generated SCEV checks. If it is
1735 /// nullptr no SCEV checks have been generated.
1736 Value *SCEVCheckCond = nullptr;
1737
1738 /// Basic block which contains the generated memory runtime checks, if any.
1739 BasicBlock *MemCheckBlock = nullptr;
1740
1741 /// The value representing the result of the generated memory runtime checks.
1742 /// If it is nullptr no memory runtime checks have been generated.
1743 Value *MemRuntimeCheckCond = nullptr;
1744
1745 DominatorTree *DT;
1746 LoopInfo *LI;
1748
1749 SCEVExpander SCEVExp;
1750 SCEVExpander MemCheckExp;
1751
1752 bool CostTooHigh = false;
1753
1754 Loop *OuterLoop = nullptr;
1755
1757
1758 /// The kind of cost that we are calculating
1760
1761public:
1762 GeneratedRTChecks(PredicatedScalarEvolution &PSE, DominatorTree *DT,
1765 : DT(DT), LI(LI), TTI(TTI), SCEVExp(*PSE.getSE(), DL, "scev.check"),
1766 MemCheckExp(*PSE.getSE(), DL, "scev.check"), PSE(PSE),
1767 CostKind(CostKind) {}
1768
1769 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
1770 /// accurately estimate the cost of the runtime checks. The blocks are
1771 /// un-linked from the IR and are added back during vector code generation. If
1772 /// there is no vector code generation, the check blocks are removed
1773 /// completely.
1774 void create(Loop *L, const LoopAccessInfo &LAI,
1775 const SCEVPredicate &UnionPred, ElementCount VF, unsigned IC) {
1776
1777 // Hard cutoff to limit compile-time increase in case a very large number of
1778 // runtime checks needs to be generated.
1779 // TODO: Skip cutoff if the loop is guaranteed to execute, e.g. due to
1780 // profile info.
1781 CostTooHigh =
1783 if (CostTooHigh)
1784 return;
1785
1786 BasicBlock *LoopHeader = L->getHeader();
1787 BasicBlock *Preheader = L->getLoopPreheader();
1788
1789 // Use SplitBlock to create blocks for SCEV & memory runtime checks to
1790 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
1791 // may be used by SCEVExpander. The blocks will be un-linked from their
1792 // predecessors and removed from LI & DT at the end of the function.
1793 if (!UnionPred.isAlwaysTrue()) {
1794 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
1795 nullptr, "vector.scevcheck");
1796
1797 SCEVCheckCond = SCEVExp.expandCodeForPredicate(
1798 &UnionPred, SCEVCheckBlock->getTerminator());
1799 if (isa<Constant>(SCEVCheckCond)) {
1800 // Clean up directly after expanding the predicate to a constant, to
1801 // avoid further expansions re-using anything left over from SCEVExp.
1802 SCEVExpanderCleaner SCEVCleaner(SCEVExp);
1803 SCEVCleaner.cleanup();
1804 }
1805 }
1806
1807 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
1808 if (RtPtrChecking.Need) {
1809 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
1810 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
1811 "vector.memcheck");
1812
1813 auto DiffChecks = RtPtrChecking.getDiffChecks();
1814 if (DiffChecks) {
1815 Value *RuntimeVF = nullptr;
1816 MemRuntimeCheckCond = addDiffRuntimeChecks(
1817 MemCheckBlock->getTerminator(), *DiffChecks, MemCheckExp,
1818 [VF, &RuntimeVF](IRBuilderBase &B, unsigned Bits) {
1819 if (!RuntimeVF)
1820 RuntimeVF = getRuntimeVF(B, B.getIntNTy(Bits), VF);
1821 return RuntimeVF;
1822 },
1823 IC);
1824 } else {
1825 MemRuntimeCheckCond = addRuntimeChecks(
1826 MemCheckBlock->getTerminator(), L, RtPtrChecking.getChecks(),
1828 }
1829 assert(MemRuntimeCheckCond &&
1830 "no RT checks generated although RtPtrChecking "
1831 "claimed checks are required");
1832 }
1833
1834 SCEVExp.eraseDeadInstructions(SCEVCheckCond);
1835
1836 if (!MemCheckBlock && !SCEVCheckBlock)
1837 return;
1838
1839 // Unhook the temporary block with the checks, update various places
1840 // accordingly.
1841 if (SCEVCheckBlock)
1842 SCEVCheckBlock->replaceAllUsesWith(Preheader);
1843 if (MemCheckBlock)
1844 MemCheckBlock->replaceAllUsesWith(Preheader);
1845
1846 if (SCEVCheckBlock) {
1847 SCEVCheckBlock->getTerminator()->moveBefore(
1848 Preheader->getTerminator()->getIterator());
1849 auto *UI = new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
1850 UI->setDebugLoc(DebugLoc::getTemporary());
1851 Preheader->getTerminator()->eraseFromParent();
1852 }
1853 if (MemCheckBlock) {
1854 MemCheckBlock->getTerminator()->moveBefore(
1855 Preheader->getTerminator()->getIterator());
1856 auto *UI = new UnreachableInst(Preheader->getContext(), MemCheckBlock);
1857 UI->setDebugLoc(DebugLoc::getTemporary());
1858 Preheader->getTerminator()->eraseFromParent();
1859 }
1860
1861 DT->changeImmediateDominator(LoopHeader, Preheader);
1862 if (MemCheckBlock) {
1863 DT->eraseNode(MemCheckBlock);
1864 LI->removeBlock(MemCheckBlock);
1865 }
1866 if (SCEVCheckBlock) {
1867 DT->eraseNode(SCEVCheckBlock);
1868 LI->removeBlock(SCEVCheckBlock);
1869 }
1870
1871 // Outer loop is used as part of the later cost calculations.
1872 OuterLoop = L->getParentLoop();
1873 }
1874
1876 if (SCEVCheckBlock || MemCheckBlock)
1877 LLVM_DEBUG(dbgs() << "Calculating cost of runtime checks:\n");
1878
1879 if (CostTooHigh) {
1881 Cost.setInvalid();
1882 LLVM_DEBUG(dbgs() << " number of checks exceeded threshold\n");
1883 return Cost;
1884 }
1885
1886 InstructionCost RTCheckCost = 0;
1887 if (SCEVCheckBlock)
1888 for (Instruction &I : *SCEVCheckBlock) {
1889 if (SCEVCheckBlock->getTerminator() == &I)
1890 continue;
1892 LLVM_DEBUG(dbgs() << " " << C << " for " << I << "\n");
1893 RTCheckCost += C;
1894 }
1895 if (MemCheckBlock) {
1896 InstructionCost MemCheckCost = 0;
1897 for (Instruction &I : *MemCheckBlock) {
1898 if (MemCheckBlock->getTerminator() == &I)
1899 continue;
1901 LLVM_DEBUG(dbgs() << " " << C << " for " << I << "\n");
1902 MemCheckCost += C;
1903 }
1904
1905 // If the runtime memory checks are being created inside an outer loop
1906 // we should find out if these checks are outer loop invariant. If so,
1907 // the checks will likely be hoisted out and so the effective cost will
1908 // reduce according to the outer loop trip count.
1909 if (OuterLoop) {
1910 ScalarEvolution *SE = MemCheckExp.getSE();
1911 // TODO: If profitable, we could refine this further by analysing every
1912 // individual memory check, since there could be a mixture of loop
1913 // variant and invariant checks that mean the final condition is
1914 // variant.
1915 const SCEV *Cond = SE->getSCEV(MemRuntimeCheckCond);
1916 if (SE->isLoopInvariant(Cond, OuterLoop)) {
1917 // It seems reasonable to assume that we can reduce the effective
1918 // cost of the checks even when we know nothing about the trip
1919 // count. Assume that the outer loop executes at least twice.
1920 unsigned BestTripCount = 2;
1921
1922 // Get the best known TC estimate.
1923 if (auto EstimatedTC = getSmallBestKnownTC(
1924 PSE, OuterLoop, /* CanUseConstantMax = */ false))
1925 if (EstimatedTC->isFixed())
1926 BestTripCount = EstimatedTC->getFixedValue();
1927
1928 InstructionCost NewMemCheckCost = MemCheckCost / BestTripCount;
1929
1930 // Let's ensure the cost is always at least 1.
1931 NewMemCheckCost = std::max(NewMemCheckCost.getValue(),
1932 (InstructionCost::CostType)1);
1933
1934 if (BestTripCount > 1)
1936 << "We expect runtime memory checks to be hoisted "
1937 << "out of the outer loop. Cost reduced from "
1938 << MemCheckCost << " to " << NewMemCheckCost << '\n');
1939
1940 MemCheckCost = NewMemCheckCost;
1941 }
1942 }
1943
1944 RTCheckCost += MemCheckCost;
1945 }
1946
1947 if (SCEVCheckBlock || MemCheckBlock)
1948 LLVM_DEBUG(dbgs() << "Total cost of runtime checks: " << RTCheckCost
1949 << "\n");
1950
1951 return RTCheckCost;
1952 }
1953
1954 /// Remove the created SCEV & memory runtime check blocks & instructions, if
1955 /// unused.
1956 ~GeneratedRTChecks() {
1957 SCEVExpanderCleaner SCEVCleaner(SCEVExp);
1958 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp);
1959 bool SCEVChecksUsed = !SCEVCheckBlock || !pred_empty(SCEVCheckBlock);
1960 bool MemChecksUsed = !MemCheckBlock || !pred_empty(MemCheckBlock);
1961 if (SCEVChecksUsed)
1962 SCEVCleaner.markResultUsed();
1963
1964 if (MemChecksUsed) {
1965 MemCheckCleaner.markResultUsed();
1966 } else {
1967 auto &SE = *MemCheckExp.getSE();
1968 // Memory runtime check generation creates compares that use expanded
1969 // values. Remove them before running the SCEVExpanderCleaners.
1970 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
1971 if (MemCheckExp.isInsertedInstruction(&I))
1972 continue;
1973 SE.forgetValue(&I);
1974 I.eraseFromParent();
1975 }
1976 }
1977 MemCheckCleaner.cleanup();
1978 SCEVCleaner.cleanup();
1979
1980 if (!SCEVChecksUsed)
1981 SCEVCheckBlock->eraseFromParent();
1982 if (!MemChecksUsed)
1983 MemCheckBlock->eraseFromParent();
1984 }
1985
1986 /// Retrieves the SCEVCheckCond and SCEVCheckBlock that were generated as IR
1987 /// outside VPlan.
1988 std::pair<Value *, BasicBlock *> getSCEVChecks() const {
1989 using namespace llvm::PatternMatch;
1990 if (!SCEVCheckCond || match(SCEVCheckCond, m_ZeroInt()))
1991 return {nullptr, nullptr};
1992
1993 return {SCEVCheckCond, SCEVCheckBlock};
1994 }
1995
1996 /// Retrieves the MemCheckCond and MemCheckBlock that were generated as IR
1997 /// outside VPlan.
1998 std::pair<Value *, BasicBlock *> getMemRuntimeChecks() const {
1999 using namespace llvm::PatternMatch;
2000 if (MemRuntimeCheckCond && match(MemRuntimeCheckCond, m_ZeroInt()))
2001 return {nullptr, nullptr};
2002 return {MemRuntimeCheckCond, MemCheckBlock};
2003 }
2004
2005 /// Return true if any runtime checks have been added
2006 bool hasChecks() const {
2007 return getSCEVChecks().first || getMemRuntimeChecks().first;
2008 }
2009};
2010} // namespace
2011
2017
2022
2023// Return true if \p OuterLp is an outer loop annotated with hints for explicit
2024// vectorization. The loop needs to be annotated with #pragma omp simd
2025// simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
2026// vector length information is not provided, vectorization is not considered
2027// explicit. Interleave hints are not allowed either. These limitations will be
2028// relaxed in the future.
2029// Please, note that we are currently forced to abuse the pragma 'clang
2030// vectorize' semantics. This pragma provides *auto-vectorization hints*
2031// (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
2032// provides *explicit vectorization hints* (LV can bypass legal checks and
2033// assume that vectorization is legal). However, both hints are implemented
2034// using the same metadata (llvm.loop.vectorize, processed by
2035// LoopVectorizeHints). This will be fixed in the future when the native IR
2036// representation for pragma 'omp simd' is introduced.
2037static bool isExplicitVecOuterLoop(Loop *OuterLp,
2039 assert(!OuterLp->isInnermost() && "This is not an outer loop");
2040 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
2041
2042 // Only outer loops with an explicit vectorization hint are supported.
2043 // Unannotated outer loops are ignored.
2045 return false;
2046
2047 Function *Fn = OuterLp->getHeader()->getParent();
2048 if (!Hints.allowVectorization(Fn, OuterLp,
2049 true /*VectorizeOnlyWhenForced*/)) {
2050 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
2051 return false;
2052 }
2053
2054 if (Hints.getInterleave() > 1) {
2055 // TODO: Interleave support is future work.
2056 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
2057 "outer loops.\n");
2058 Hints.emitRemarkWithHints();
2059 return false;
2060 }
2061
2062 return true;
2063}
2064
2068 // Collect inner loops and outer loops without irreducible control flow. For
2069 // now, only collect outer loops that have explicit vectorization hints. If we
2070 // are stress testing the VPlan H-CFG construction, we collect the outermost
2071 // loop of every loop nest.
2072 if (L.isInnermost() || VPlanBuildStressTest ||
2074 LoopBlocksRPO RPOT(&L);
2075 RPOT.perform(LI);
2077 V.push_back(&L);
2078 // TODO: Collect inner loops inside marked outer loops in case
2079 // vectorization fails for the outer loop. Do not invoke
2080 // 'containsIrreducibleCFG' again for inner loops when the outer loop is
2081 // already known to be reducible. We can use an inherited attribute for
2082 // that.
2083 return;
2084 }
2085 }
2086 for (Loop *InnerL : L)
2087 collectSupportedLoops(*InnerL, LI, ORE, V);
2088}
2089
2090//===----------------------------------------------------------------------===//
2091// Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2092// LoopVectorizationCostModel and LoopVectorizationPlanner.
2093//===----------------------------------------------------------------------===//
2094
2095/// Compute the transformed value of Index at offset StartValue using step
2096/// StepValue.
2097/// For integer induction, returns StartValue + Index * StepValue.
2098/// For pointer induction, returns StartValue[Index * StepValue].
2099/// FIXME: The newly created binary instructions should contain nsw/nuw
2100/// flags, which can be found from the original scalar operations.
2101static Value *
2103 Value *Step,
2105 const BinaryOperator *InductionBinOp) {
2106 using namespace llvm::PatternMatch;
2107 Type *StepTy = Step->getType();
2108 Value *CastedIndex = StepTy->isIntegerTy()
2109 ? B.CreateSExtOrTrunc(Index, StepTy)
2110 : B.CreateCast(Instruction::SIToFP, Index, StepTy);
2111 if (CastedIndex != Index) {
2112 CastedIndex->setName(CastedIndex->getName() + ".cast");
2113 Index = CastedIndex;
2114 }
2115
2116 // Note: the IR at this point is broken. We cannot use SE to create any new
2117 // SCEV and then expand it, hoping that SCEV's simplification will give us
2118 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
2119 // lead to various SCEV crashes. So all we can do is to use builder and rely
2120 // on InstCombine for future simplifications. Here we handle some trivial
2121 // cases only.
2122 auto CreateAdd = [&B](Value *X, Value *Y) {
2123 assert(X->getType() == Y->getType() && "Types don't match!");
2124 if (match(X, m_ZeroInt()))
2125 return Y;
2126 if (match(Y, m_ZeroInt()))
2127 return X;
2128 return B.CreateAdd(X, Y);
2129 };
2130
2131 // We allow X to be a vector type, in which case Y will potentially be
2132 // splatted into a vector with the same element count.
2133 auto CreateMul = [&B](Value *X, Value *Y) {
2134 assert(X->getType()->getScalarType() == Y->getType() &&
2135 "Types don't match!");
2136 if (match(X, m_One()))
2137 return Y;
2138 if (match(Y, m_One()))
2139 return X;
2140 VectorType *XVTy = dyn_cast<VectorType>(X->getType());
2141 if (XVTy && !isa<VectorType>(Y->getType()))
2142 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y);
2143 return B.CreateMul(X, Y);
2144 };
2145
2146 switch (InductionKind) {
2148 assert(!isa<VectorType>(Index->getType()) &&
2149 "Vector indices not supported for integer inductions yet");
2150 assert(Index->getType() == StartValue->getType() &&
2151 "Index type does not match StartValue type");
2152 if (isa<ConstantInt>(Step) && cast<ConstantInt>(Step)->isMinusOne())
2153 return B.CreateSub(StartValue, Index);
2154 auto *Offset = CreateMul(Index, Step);
2155 return CreateAdd(StartValue, Offset);
2156 }
2158 return B.CreatePtrAdd(StartValue, CreateMul(Index, Step));
2160 assert(!isa<VectorType>(Index->getType()) &&
2161 "Vector indices not supported for FP inductions yet");
2162 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
2163 assert(InductionBinOp &&
2164 (InductionBinOp->getOpcode() == Instruction::FAdd ||
2165 InductionBinOp->getOpcode() == Instruction::FSub) &&
2166 "Original bin op should be defined for FP induction");
2167
2168 Value *MulExp = B.CreateFMul(Step, Index);
2169 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
2170 "induction");
2171 }
2173 return nullptr;
2174 }
2175 llvm_unreachable("invalid enum");
2176}
2177
2178static std::optional<unsigned> getMaxVScale(const Function &F,
2179 const TargetTransformInfo &TTI) {
2180 if (std::optional<unsigned> MaxVScale = TTI.getMaxVScale())
2181 return MaxVScale;
2182
2183 if (F.hasFnAttribute(Attribute::VScaleRange))
2184 return F.getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax();
2185
2186 return std::nullopt;
2187}
2188
2189/// For the given VF and UF and maximum trip count computed for the loop, return
2190/// whether the induction variable might overflow in the vectorized loop. If not,
2191/// then we know a runtime overflow check always evaluates to false and can be
2192/// removed.
2194 const LoopVectorizationCostModel *Cost,
2195 ElementCount VF, std::optional<unsigned> UF = std::nullopt) {
2196 // Always be conservative if we don't know the exact unroll factor.
2197 unsigned MaxUF = UF ? *UF : Cost->TTI.getMaxInterleaveFactor(VF);
2198
2199 IntegerType *IdxTy = Cost->Legal->getWidestInductionType();
2200 APInt MaxUIntTripCount = IdxTy->getMask();
2201
2202 // We know the runtime overflow check is known false iff the (max) trip-count
2203 // is known and (max) trip-count + (VF * UF) does not overflow in the type of
2204 // the vector loop induction variable.
2205 if (unsigned TC = Cost->PSE.getSmallConstantMaxTripCount()) {
2206 uint64_t MaxVF = VF.getKnownMinValue();
2207 if (VF.isScalable()) {
2208 std::optional<unsigned> MaxVScale =
2209 getMaxVScale(*Cost->TheFunction, Cost->TTI);
2210 if (!MaxVScale)
2211 return false;
2212 MaxVF *= *MaxVScale;
2213 }
2214
2215 return (MaxUIntTripCount - TC).ugt(MaxVF * MaxUF);
2216 }
2217
2218 return false;
2219}
2220
2221// Return whether we allow using masked interleave-groups (for dealing with
2222// strided loads/stores that reside in predicated blocks, or for dealing
2223// with gaps).
2225 // If an override option has been passed in for interleaved accesses, use it.
2226 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2228
2229 return TTI.enableMaskedInterleavedAccessVectorization();
2230}
2231
2233 BasicBlock *CheckIRBB) {
2234 // Note: The block with the minimum trip-count check is already connected
2235 // during earlier VPlan construction.
2236 VPBlockBase *ScalarPH = Plan.getScalarPreheader();
2237 VPBlockBase *PreVectorPH = VectorPHVPBB->getSinglePredecessor();
2238 assert(PreVectorPH->getNumSuccessors() == 2 && "Expected 2 successors");
2239 assert(PreVectorPH->getSuccessors()[0] == ScalarPH && "Unexpected successor");
2240 VPIRBasicBlock *CheckVPIRBB = Plan.createVPIRBasicBlock(CheckIRBB);
2241 VPBlockUtils::insertOnEdge(PreVectorPH, VectorPHVPBB, CheckVPIRBB);
2242 PreVectorPH = CheckVPIRBB;
2243 VPBlockUtils::connectBlocks(PreVectorPH, ScalarPH);
2244 PreVectorPH->swapSuccessors();
2245
2246 // We just connected a new block to the scalar preheader. Update all
2247 // VPPhis by adding an incoming value for it, replicating the last value.
2248 unsigned NumPredecessors = ScalarPH->getNumPredecessors();
2249 for (VPRecipeBase &R : cast<VPBasicBlock>(ScalarPH)->phis()) {
2250 assert(isa<VPPhi>(&R) && "Phi expected to be VPPhi");
2251 assert(cast<VPPhi>(&R)->getNumIncoming() == NumPredecessors - 1 &&
2252 "must have incoming values for all operands");
2253 R.addOperand(R.getOperand(NumPredecessors - 2));
2254 }
2255}
2256
2258 BasicBlock *VectorPH, ElementCount VF, unsigned UF) const {
2259 // Generate code to check if the loop's trip count is less than VF * UF, or
2260 // equal to it in case a scalar epilogue is required; this implies that the
2261 // vector trip count is zero. This check also covers the case where adding one
2262 // to the backedge-taken count overflowed leading to an incorrect trip count
2263 // of zero. In this case we will also jump to the scalar loop.
2264 auto P = Cost->requiresScalarEpilogue(VF.isVector()) ? ICmpInst::ICMP_ULE
2266
2267 // Reuse existing vector loop preheader for TC checks.
2268 // Note that new preheader block is generated for vector loop.
2269 BasicBlock *const TCCheckBlock = VectorPH;
2271 TCCheckBlock->getContext(),
2272 InstSimplifyFolder(TCCheckBlock->getDataLayout()));
2273 Builder.SetInsertPoint(TCCheckBlock->getTerminator());
2274
2275 // If tail is to be folded, vector loop takes care of all iterations.
2277 Type *CountTy = Count->getType();
2278 Value *CheckMinIters = Builder.getFalse();
2279 auto CreateStep = [&]() -> Value * {
2280 // Create step with max(MinProTripCount, UF * VF).
2281 if (UF * VF.getKnownMinValue() >= MinProfitableTripCount.getKnownMinValue())
2282 return createStepForVF(Builder, CountTy, VF, UF);
2283
2284 Value *MinProfTC =
2285 Builder.CreateElementCount(CountTy, MinProfitableTripCount);
2286 if (!VF.isScalable())
2287 return MinProfTC;
2288 return Builder.CreateBinaryIntrinsic(
2289 Intrinsic::umax, MinProfTC, createStepForVF(Builder, CountTy, VF, UF));
2290 };
2291
2292 TailFoldingStyle Style = Cost->getTailFoldingStyle();
2293 if (Style == TailFoldingStyle::None) {
2294 Value *Step = CreateStep();
2295 ScalarEvolution &SE = *PSE.getSE();
2296 // TODO: Emit unconditional branch to vector preheader instead of
2297 // conditional branch with known condition.
2298 const SCEV *TripCountSCEV = SE.applyLoopGuards(SE.getSCEV(Count), OrigLoop);
2299 // Check if the trip count is < the step.
2300 if (SE.isKnownPredicate(P, TripCountSCEV, SE.getSCEV(Step))) {
2301 // TODO: Ensure step is at most the trip count when determining max VF and
2302 // UF, w/o tail folding.
2303 CheckMinIters = Builder.getTrue();
2305 TripCountSCEV, SE.getSCEV(Step))) {
2306 // Generate the minimum iteration check only if we cannot prove the
2307 // check is known to be true, or known to be false.
2308 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
2309 } // else step known to be < trip count, use CheckMinIters preset to false.
2310 } else if (VF.isScalable() && !TTI->isVScaleKnownToBeAPowerOfTwo() &&
2313 // vscale is not necessarily a power-of-2, which means we cannot guarantee
2314 // an overflow to zero when updating induction variables and so an
2315 // additional overflow check is required before entering the vector loop.
2316
2317 // Get the maximum unsigned value for the type.
2318 Value *MaxUIntTripCount =
2319 ConstantInt::get(CountTy, cast<IntegerType>(CountTy)->getMask());
2320 Value *LHS = Builder.CreateSub(MaxUIntTripCount, Count);
2321
2322 // Don't execute the vector loop if (UMax - n) < (VF * UF).
2323 CheckMinIters = Builder.CreateICmp(ICmpInst::ICMP_ULT, LHS, CreateStep());
2324 }
2325 return CheckMinIters;
2326}
2327
2328/// Replace \p VPBB with a VPIRBasicBlock wrapping \p IRBB. All recipes from \p
2329/// VPBB are moved to the end of the newly created VPIRBasicBlock. All
2330/// predecessors and successors of VPBB, if any, are rewired to the new
2331/// VPIRBasicBlock. If \p VPBB may be unreachable, \p Plan must be passed.
2333 BasicBlock *IRBB,
2334 VPlan *Plan = nullptr) {
2335 if (!Plan)
2336 Plan = VPBB->getPlan();
2337 VPIRBasicBlock *IRVPBB = Plan->createVPIRBasicBlock(IRBB);
2338 auto IP = IRVPBB->begin();
2339 for (auto &R : make_early_inc_range(VPBB->phis()))
2340 R.moveBefore(*IRVPBB, IP);
2341
2342 for (auto &R :
2344 R.moveBefore(*IRVPBB, IRVPBB->end());
2345
2346 VPBlockUtils::reassociateBlocks(VPBB, IRVPBB);
2347 // VPBB is now dead and will be cleaned up when the plan gets destroyed.
2348 return IRVPBB;
2349}
2350
2352 BasicBlock *VectorPH = OrigLoop->getLoopPreheader();
2353 assert(VectorPH && "Invalid loop structure");
2354 assert((OrigLoop->getUniqueLatchExitBlock() ||
2355 Cost->requiresScalarEpilogue(VF.isVector())) &&
2356 "loops not exiting via the latch without required epilogue?");
2357
2358 // NOTE: The Plan's scalar preheader VPBB isn't replaced with a VPIRBasicBlock
2359 // wrapping the newly created scalar preheader here at the moment, because the
2360 // Plan's scalar preheader may be unreachable at this point. Instead it is
2361 // replaced in executePlan.
2362 return SplitBlock(VectorPH, VectorPH->getTerminator(), DT, LI, nullptr,
2363 Twine(Prefix) + "scalar.ph");
2364}
2365
2366/// Return the expanded step for \p ID using \p ExpandedSCEVs to look up SCEV
2367/// expansion results.
2369 const SCEV2ValueTy &ExpandedSCEVs) {
2370 const SCEV *Step = ID.getStep();
2371 if (auto *C = dyn_cast<SCEVConstant>(Step))
2372 return C->getValue();
2373 if (auto *U = dyn_cast<SCEVUnknown>(Step))
2374 return U->getValue();
2375 Value *V = ExpandedSCEVs.lookup(Step);
2376 assert(V && "SCEV must be expanded at this point");
2377 return V;
2378}
2379
2380/// Knowing that loop \p L executes a single vector iteration, add instructions
2381/// that will get simplified and thus should not have any cost to \p
2382/// InstsToIgnore.
2385 SmallPtrSetImpl<Instruction *> &InstsToIgnore) {
2386 auto *Cmp = L->getLatchCmpInst();
2387 if (Cmp)
2388 InstsToIgnore.insert(Cmp);
2389 for (const auto &KV : IL) {
2390 // Extract the key by hand so that it can be used in the lambda below. Note
2391 // that captured structured bindings are a C++20 extension.
2392 const PHINode *IV = KV.first;
2393
2394 // Get next iteration value of the induction variable.
2395 Instruction *IVInst =
2396 cast<Instruction>(IV->getIncomingValueForBlock(L->getLoopLatch()));
2397 if (all_of(IVInst->users(),
2398 [&](const User *U) { return U == IV || U == Cmp; }))
2399 InstsToIgnore.insert(IVInst);
2400 }
2401}
2402
2404 // Create a new IR basic block for the scalar preheader.
2405 BasicBlock *ScalarPH = createScalarPreheader("");
2406 return ScalarPH->getSinglePredecessor();
2407}
2408
2409namespace {
2410
2411struct CSEDenseMapInfo {
2412 static bool canHandle(const Instruction *I) {
2415 }
2416
2417 static inline Instruction *getEmptyKey() {
2419 }
2420
2421 static inline Instruction *getTombstoneKey() {
2422 return DenseMapInfo<Instruction *>::getTombstoneKey();
2423 }
2424
2425 static unsigned getHashValue(const Instruction *I) {
2426 assert(canHandle(I) && "Unknown instruction!");
2427 return hash_combine(I->getOpcode(),
2428 hash_combine_range(I->operand_values()));
2429 }
2430
2431 static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
2432 if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
2433 LHS == getTombstoneKey() || RHS == getTombstoneKey())
2434 return LHS == RHS;
2435 return LHS->isIdenticalTo(RHS);
2436 }
2437};
2438
2439} // end anonymous namespace
2440
2441/// FIXME: This legacy common-subexpression-elimination routine is scheduled for
2442/// removal, in favor of the VPlan-based one.
2443static void legacyCSE(BasicBlock *BB) {
2444 // Perform simple cse.
2446 for (Instruction &In : llvm::make_early_inc_range(*BB)) {
2447 if (!CSEDenseMapInfo::canHandle(&In))
2448 continue;
2449
2450 // Check if we can replace this instruction with any of the
2451 // visited instructions.
2452 if (Instruction *V = CSEMap.lookup(&In)) {
2453 In.replaceAllUsesWith(V);
2454 In.eraseFromParent();
2455 continue;
2456 }
2457
2458 CSEMap[&In] = &In;
2459 }
2460}
2461
2462/// This function attempts to return a value that represents the ElementCount
2463/// at runtime. For fixed-width VFs we know this precisely at compile
2464/// time, but for scalable VFs we calculate it based on an estimate of the
2465/// vscale value.
2467 std::optional<unsigned> VScale) {
2468 unsigned EstimatedVF = VF.getKnownMinValue();
2469 if (VF.isScalable())
2470 if (VScale)
2471 EstimatedVF *= *VScale;
2472 assert(EstimatedVF >= 1 && "Estimated VF shouldn't be less than 1");
2473 return EstimatedVF;
2474}
2475
2478 ElementCount VF) const {
2479 // We only need to calculate a cost if the VF is scalar; for actual vectors
2480 // we should already have a pre-calculated cost at each VF.
2481 if (!VF.isScalar())
2482 return getCallWideningDecision(CI, VF).Cost;
2483
2484 Type *RetTy = CI->getType();
2486 if (auto RedCost = getReductionPatternCost(CI, VF, RetTy))
2487 return *RedCost;
2488
2490 for (auto &ArgOp : CI->args())
2491 Tys.push_back(ArgOp->getType());
2492
2493 InstructionCost ScalarCallCost =
2494 TTI.getCallInstrCost(CI->getCalledFunction(), RetTy, Tys, CostKind);
2495
2496 // If this is an intrinsic we may have a lower cost for it.
2499 return std::min(ScalarCallCost, IntrinsicCost);
2500 }
2501 return ScalarCallCost;
2502}
2503
2505 if (VF.isScalar() || !canVectorizeTy(Ty))
2506 return Ty;
2507 return toVectorizedTy(Ty, VF);
2508}
2509
2512 ElementCount VF) const {
2514 assert(ID && "Expected intrinsic call!");
2515 Type *RetTy = maybeVectorizeType(CI->getType(), VF);
2516 FastMathFlags FMF;
2517 if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
2518 FMF = FPMO->getFastMathFlags();
2519
2522 SmallVector<Type *> ParamTys;
2523 std::transform(FTy->param_begin(), FTy->param_end(),
2524 std::back_inserter(ParamTys),
2525 [&](Type *Ty) { return maybeVectorizeType(Ty, VF); });
2526
2527 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
2530 return TTI.getIntrinsicInstrCost(CostAttrs, CostKind);
2531}
2532
2534 // Fix widened non-induction PHIs by setting up the PHI operands.
2535 fixNonInductionPHIs(State);
2536
2537 // Don't apply optimizations below when no (vector) loop remains, as they all
2538 // require one at the moment.
2539 VPBasicBlock *HeaderVPBB =
2540 vputils::getFirstLoopHeader(*State.Plan, State.VPDT);
2541 if (!HeaderVPBB)
2542 return;
2543
2544 BasicBlock *HeaderBB = State.CFG.VPBB2IRBB[HeaderVPBB];
2545
2546 // Remove redundant induction instructions.
2547 legacyCSE(HeaderBB);
2548}
2549
2551 auto Iter = vp_depth_first_shallow(Plan.getEntry());
2553 for (VPRecipeBase &P : VPBB->phis()) {
2555 if (!VPPhi)
2556 continue;
2557 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi));
2558 // Make sure the builder has a valid insert point.
2559 Builder.SetInsertPoint(NewPhi);
2560 for (const auto &[Inc, VPBB] : VPPhi->incoming_values_and_blocks())
2561 NewPhi->addIncoming(State.get(Inc), State.CFG.VPBB2IRBB[VPBB]);
2562 }
2563 }
2564}
2565
2566void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
2567 // We should not collect Scalars more than once per VF. Right now, this
2568 // function is called from collectUniformsAndScalars(), which already does
2569 // this check. Collecting Scalars for VF=1 does not make any sense.
2570 assert(VF.isVector() && !Scalars.contains(VF) &&
2571 "This function should not be visited twice for the same VF");
2572
2573 // This avoids any chances of creating a REPLICATE recipe during planning
2574 // since that would result in generation of scalarized code during execution,
2575 // which is not supported for scalable vectors.
2576 if (VF.isScalable()) {
2577 Scalars[VF].insert_range(Uniforms[VF]);
2578 return;
2579 }
2580
2582
2583 // These sets are used to seed the analysis with pointers used by memory
2584 // accesses that will remain scalar.
2586 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
2587 auto *Latch = TheLoop->getLoopLatch();
2588
2589 // A helper that returns true if the use of Ptr by MemAccess will be scalar.
2590 // The pointer operands of loads and stores will be scalar as long as the
2591 // memory access is not a gather or scatter operation. The value operand of a
2592 // store will remain scalar if the store is scalarized.
2593 auto IsScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
2594 InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
2595 assert(WideningDecision != CM_Unknown &&
2596 "Widening decision should be ready at this moment");
2597 if (auto *Store = dyn_cast<StoreInst>(MemAccess))
2598 if (Ptr == Store->getValueOperand())
2599 return WideningDecision == CM_Scalarize;
2600 assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
2601 "Ptr is neither a value or pointer operand");
2602 return WideningDecision != CM_GatherScatter;
2603 };
2604
2605 // A helper that returns true if the given value is a getelementptr
2606 // instruction contained in the loop.
2607 auto IsLoopVaryingGEP = [&](Value *V) {
2608 return isa<GetElementPtrInst>(V) && !TheLoop->isLoopInvariant(V);
2609 };
2610
2611 // A helper that evaluates a memory access's use of a pointer. If the use will
2612 // be a scalar use and the pointer is only used by memory accesses, we place
2613 // the pointer in ScalarPtrs. Otherwise, the pointer is placed in
2614 // PossibleNonScalarPtrs.
2615 auto EvaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
2616 // We only care about bitcast and getelementptr instructions contained in
2617 // the loop.
2618 if (!IsLoopVaryingGEP(Ptr))
2619 return;
2620
2621 // If the pointer has already been identified as scalar (e.g., if it was
2622 // also identified as uniform), there's nothing to do.
2623 auto *I = cast<Instruction>(Ptr);
2624 if (Worklist.count(I))
2625 return;
2626
2627 // If the use of the pointer will be a scalar use, and all users of the
2628 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
2629 // place the pointer in PossibleNonScalarPtrs.
2630 if (IsScalarUse(MemAccess, Ptr) &&
2632 ScalarPtrs.insert(I);
2633 else
2634 PossibleNonScalarPtrs.insert(I);
2635 };
2636
2637 // We seed the scalars analysis with three classes of instructions: (1)
2638 // instructions marked uniform-after-vectorization and (2) bitcast,
2639 // getelementptr and (pointer) phi instructions used by memory accesses
2640 // requiring a scalar use.
2641 //
2642 // (1) Add to the worklist all instructions that have been identified as
2643 // uniform-after-vectorization.
2644 Worklist.insert_range(Uniforms[VF]);
2645
2646 // (2) Add to the worklist all bitcast and getelementptr instructions used by
2647 // memory accesses requiring a scalar use. The pointer operands of loads and
2648 // stores will be scalar unless the operation is a gather or scatter.
2649 // The value operand of a store will remain scalar if the store is scalarized.
2650 for (auto *BB : TheLoop->blocks())
2651 for (auto &I : *BB) {
2652 if (auto *Load = dyn_cast<LoadInst>(&I)) {
2653 EvaluatePtrUse(Load, Load->getPointerOperand());
2654 } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
2655 EvaluatePtrUse(Store, Store->getPointerOperand());
2656 EvaluatePtrUse(Store, Store->getValueOperand());
2657 }
2658 }
2659 for (auto *I : ScalarPtrs)
2660 if (!PossibleNonScalarPtrs.count(I)) {
2661 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
2662 Worklist.insert(I);
2663 }
2664
2665 // Insert the forced scalars.
2666 // FIXME: Currently VPWidenPHIRecipe() often creates a dead vector
2667 // induction variable when the PHI user is scalarized.
2668 auto ForcedScalar = ForcedScalars.find(VF);
2669 if (ForcedScalar != ForcedScalars.end())
2670 for (auto *I : ForcedScalar->second) {
2671 LLVM_DEBUG(dbgs() << "LV: Found (forced) scalar instruction: " << *I << "\n");
2672 Worklist.insert(I);
2673 }
2674
2675 // Expand the worklist by looking through any bitcasts and getelementptr
2676 // instructions we've already identified as scalar. This is similar to the
2677 // expansion step in collectLoopUniforms(); however, here we're only
2678 // expanding to include additional bitcasts and getelementptr instructions.
2679 unsigned Idx = 0;
2680 while (Idx != Worklist.size()) {
2681 Instruction *Dst = Worklist[Idx++];
2682 if (!IsLoopVaryingGEP(Dst->getOperand(0)))
2683 continue;
2684 auto *Src = cast<Instruction>(Dst->getOperand(0));
2685 if (llvm::all_of(Src->users(), [&](User *U) -> bool {
2686 auto *J = cast<Instruction>(U);
2687 return !TheLoop->contains(J) || Worklist.count(J) ||
2688 ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
2689 IsScalarUse(J, Src));
2690 })) {
2691 Worklist.insert(Src);
2692 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
2693 }
2694 }
2695
2696 // An induction variable will remain scalar if all users of the induction
2697 // variable and induction variable update remain scalar.
2698 for (const auto &Induction : Legal->getInductionVars()) {
2699 auto *Ind = Induction.first;
2700 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
2701
2702 // If tail-folding is applied, the primary induction variable will be used
2703 // to feed a vector compare.
2704 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
2705 continue;
2706
2707 // Returns true if \p Indvar is a pointer induction that is used directly by
2708 // load/store instruction \p I.
2709 auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar,
2710 Instruction *I) {
2711 return Induction.second.getKind() ==
2714 Indvar == getLoadStorePointerOperand(I) && IsScalarUse(I, Indvar);
2715 };
2716
2717 // Determine if all users of the induction variable are scalar after
2718 // vectorization.
2719 bool ScalarInd = all_of(Ind->users(), [&](User *U) -> bool {
2720 auto *I = cast<Instruction>(U);
2721 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
2722 IsDirectLoadStoreFromPtrIndvar(Ind, I);
2723 });
2724 if (!ScalarInd)
2725 continue;
2726
2727 // If the induction variable update is a fixed-order recurrence, neither the
2728 // induction variable or its update should be marked scalar after
2729 // vectorization.
2730 auto *IndUpdatePhi = dyn_cast<PHINode>(IndUpdate);
2731 if (IndUpdatePhi && Legal->isFixedOrderRecurrence(IndUpdatePhi))
2732 continue;
2733
2734 // Determine if all users of the induction variable update instruction are
2735 // scalar after vectorization.
2736 bool ScalarIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool {
2737 auto *I = cast<Instruction>(U);
2738 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
2739 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I);
2740 });
2741 if (!ScalarIndUpdate)
2742 continue;
2743
2744 // The induction variable and its update instruction will remain scalar.
2745 Worklist.insert(Ind);
2746 Worklist.insert(IndUpdate);
2747 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
2748 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
2749 << "\n");
2750 }
2751
2752 Scalars[VF].insert_range(Worklist);
2753}
2754
2756 Instruction *I, ElementCount VF) const {
2757 if (!isPredicatedInst(I))
2758 return false;
2759
2760 // Do we have a non-scalar lowering for this predicated
2761 // instruction? No - it is scalar with predication.
2762 switch(I->getOpcode()) {
2763 default:
2764 return true;
2765 case Instruction::Call:
2766 if (VF.isScalar())
2767 return true;
2769 case Instruction::Load:
2770 case Instruction::Store: {
2772 auto *Ty = getLoadStoreType(I);
2773 unsigned AS = getLoadStoreAddressSpace(I);
2774 Type *VTy = Ty;
2775 if (VF.isVector())
2776 VTy = VectorType::get(Ty, VF);
2777 const Align Alignment = getLoadStoreAlignment(I);
2778 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment, AS) ||
2779 TTI.isLegalMaskedGather(VTy, Alignment))
2780 : !(isLegalMaskedStore(Ty, Ptr, Alignment, AS) ||
2781 TTI.isLegalMaskedScatter(VTy, Alignment));
2782 }
2783 case Instruction::UDiv:
2784 case Instruction::SDiv:
2785 case Instruction::SRem:
2786 case Instruction::URem: {
2787 // We have the option to use the safe-divisor idiom to avoid predication.
2788 // The cost based decision here will always select safe-divisor for
2789 // scalable vectors as scalarization isn't legal.
2790 const auto [ScalarCost, SafeDivisorCost] = getDivRemSpeculationCost(I, VF);
2791 return isDivRemScalarWithPredication(ScalarCost, SafeDivisorCost);
2792 }
2793 }
2794}
2795
2796// TODO: Fold into LoopVectorizationLegality::isMaskRequired.
2798 // TODO: We can use the loop-preheader as context point here and get
2799 // context sensitive reasoning for isSafeToSpeculativelyExecute.
2801 (isa<LoadInst, StoreInst, CallInst>(I) && !Legal->isMaskRequired(I)) ||
2803 return false;
2804
2805 // If the instruction was executed conditionally in the original scalar loop,
2806 // predication is needed with a mask whose lanes are all possibly inactive.
2807 if (Legal->blockNeedsPredication(I->getParent()))
2808 return true;
2809
2810 // If we're not folding the tail by masking, predication is unnecessary.
2811 if (!foldTailByMasking())
2812 return false;
2813
2814 // All that remain are instructions with side-effects originally executed in
2815 // the loop unconditionally, but now execute under a tail-fold mask (only)
2816 // having at least one active lane (the first). If the side-effects of the
2817 // instruction are invariant, executing it w/o (the tail-folding) mask is safe
2818 // - it will cause the same side-effects as when masked.
2819 switch(I->getOpcode()) {
2820 default:
2822 "instruction should have been considered by earlier checks");
2823 case Instruction::Call:
2824 // Side-effects of a Call are assumed to be non-invariant, needing a
2825 // (fold-tail) mask.
2826 assert(Legal->isMaskRequired(I) &&
2827 "should have returned earlier for calls not needing a mask");
2828 return true;
2829 case Instruction::Load:
2830 // If the address is loop invariant no predication is needed.
2831 return !Legal->isInvariant(getLoadStorePointerOperand(I));
2832 case Instruction::Store: {
2833 // For stores, we need to prove both speculation safety (which follows from
2834 // the same argument as loads), but also must prove the value being stored
2835 // is correct. The easiest form of the later is to require that all values
2836 // stored are the same.
2837 return !(Legal->isInvariant(getLoadStorePointerOperand(I)) &&
2838 TheLoop->isLoopInvariant(cast<StoreInst>(I)->getValueOperand()));
2839 }
2840 case Instruction::UDiv:
2841 case Instruction::SDiv:
2842 case Instruction::SRem:
2843 case Instruction::URem:
2844 // If the divisor is loop-invariant no predication is needed.
2845 return !Legal->isInvariant(I->getOperand(1));
2846 }
2847}
2848
2849std::pair<InstructionCost, InstructionCost>
2851 ElementCount VF) const {
2852 assert(I->getOpcode() == Instruction::UDiv ||
2853 I->getOpcode() == Instruction::SDiv ||
2854 I->getOpcode() == Instruction::SRem ||
2855 I->getOpcode() == Instruction::URem);
2857
2858 // Scalarization isn't legal for scalable vector types
2859 InstructionCost ScalarizationCost = InstructionCost::getInvalid();
2860 if (!VF.isScalable()) {
2861 // Get the scalarization cost and scale this amount by the probability of
2862 // executing the predicated block. If the instruction is not predicated,
2863 // we fall through to the next case.
2864 ScalarizationCost = 0;
2865
2866 // These instructions have a non-void type, so account for the phi nodes
2867 // that we will create. This cost is likely to be zero. The phi node
2868 // cost, if any, should be scaled by the block probability because it
2869 // models a copy at the end of each predicated block.
2870 ScalarizationCost +=
2871 VF.getFixedValue() * TTI.getCFInstrCost(Instruction::PHI, CostKind);
2872
2873 // The cost of the non-predicated instruction.
2874 ScalarizationCost +=
2875 VF.getFixedValue() *
2876 TTI.getArithmeticInstrCost(I->getOpcode(), I->getType(), CostKind);
2877
2878 // The cost of insertelement and extractelement instructions needed for
2879 // scalarization.
2880 ScalarizationCost += getScalarizationOverhead(I, VF);
2881
2882 // Scale the cost by the probability of executing the predicated blocks.
2883 // This assumes the predicated block for each vector lane is equally
2884 // likely.
2885 ScalarizationCost = ScalarizationCost / getPredBlockCostDivisor(CostKind);
2886 }
2887
2888 InstructionCost SafeDivisorCost = 0;
2889 auto *VecTy = toVectorTy(I->getType(), VF);
2890 // The cost of the select guard to ensure all lanes are well defined
2891 // after we speculate above any internal control flow.
2892 SafeDivisorCost +=
2893 TTI.getCmpSelInstrCost(Instruction::Select, VecTy,
2894 toVectorTy(Type::getInt1Ty(I->getContext()), VF),
2896
2897 SmallVector<const Value *, 4> Operands(I->operand_values());
2898 SafeDivisorCost += TTI.getArithmeticInstrCost(
2899 I->getOpcode(), VecTy, CostKind,
2900 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
2901 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
2902 Operands, I);
2903 return {ScalarizationCost, SafeDivisorCost};
2904}
2905
2907 Instruction *I, ElementCount VF) const {
2908 assert(isAccessInterleaved(I) && "Expecting interleaved access.");
2910 "Decision should not be set yet.");
2911 auto *Group = getInterleavedAccessGroup(I);
2912 assert(Group && "Must have a group.");
2913 unsigned InterleaveFactor = Group->getFactor();
2914
2915 // If the instruction's allocated size doesn't equal its type size, it
2916 // requires padding and will be scalarized.
2917 auto &DL = I->getDataLayout();
2918 auto *ScalarTy = getLoadStoreType(I);
2919 if (hasIrregularType(ScalarTy, DL))
2920 return false;
2921
2922 // For scalable vectors, the interleave factors must be <= 8 since we require
2923 // the (de)interleaveN intrinsics instead of shufflevectors.
2924 if (VF.isScalable() && InterleaveFactor > 8)
2925 return false;
2926
2927 // If the group involves a non-integral pointer, we may not be able to
2928 // losslessly cast all values to a common type.
2929 bool ScalarNI = DL.isNonIntegralPointerType(ScalarTy);
2930 for (unsigned Idx = 0; Idx < InterleaveFactor; Idx++) {
2931 Instruction *Member = Group->getMember(Idx);
2932 if (!Member)
2933 continue;
2934 auto *MemberTy = getLoadStoreType(Member);
2935 bool MemberNI = DL.isNonIntegralPointerType(MemberTy);
2936 // Don't coerce non-integral pointers to integers or vice versa.
2937 if (MemberNI != ScalarNI)
2938 // TODO: Consider adding special nullptr value case here
2939 return false;
2940 if (MemberNI && ScalarNI &&
2941 ScalarTy->getPointerAddressSpace() !=
2942 MemberTy->getPointerAddressSpace())
2943 return false;
2944 }
2945
2946 // Check if masking is required.
2947 // A Group may need masking for one of two reasons: it resides in a block that
2948 // needs predication, or it was decided to use masking to deal with gaps
2949 // (either a gap at the end of a load-access that may result in a speculative
2950 // load, or any gaps in a store-access).
2951 bool PredicatedAccessRequiresMasking =
2952 blockNeedsPredicationForAnyReason(I->getParent()) &&
2953 Legal->isMaskRequired(I);
2954 bool LoadAccessWithGapsRequiresEpilogMasking =
2955 isa<LoadInst>(I) && Group->requiresScalarEpilogue() &&
2957 bool StoreAccessWithGapsRequiresMasking =
2958 isa<StoreInst>(I) && !Group->isFull();
2959 if (!PredicatedAccessRequiresMasking &&
2960 !LoadAccessWithGapsRequiresEpilogMasking &&
2961 !StoreAccessWithGapsRequiresMasking)
2962 return true;
2963
2964 // If masked interleaving is required, we expect that the user/target had
2965 // enabled it, because otherwise it either wouldn't have been created or
2966 // it should have been invalidated by the CostModel.
2968 "Masked interleave-groups for predicated accesses are not enabled.");
2969
2970 if (Group->isReverse())
2971 return false;
2972
2973 // TODO: Support interleaved access that requires a gap mask for scalable VFs.
2974 bool NeedsMaskForGaps = LoadAccessWithGapsRequiresEpilogMasking ||
2975 StoreAccessWithGapsRequiresMasking;
2976 if (VF.isScalable() && NeedsMaskForGaps)
2977 return false;
2978
2979 auto *Ty = getLoadStoreType(I);
2980 const Align Alignment = getLoadStoreAlignment(I);
2981 unsigned AS = getLoadStoreAddressSpace(I);
2982 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment, AS)
2983 : TTI.isLegalMaskedStore(Ty, Alignment, AS);
2984}
2985
2987 Instruction *I, ElementCount VF) {
2988 // Get and ensure we have a valid memory instruction.
2989 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction");
2990
2992 auto *ScalarTy = getLoadStoreType(I);
2993
2994 // In order to be widened, the pointer should be consecutive, first of all.
2995 if (!Legal->isConsecutivePtr(ScalarTy, Ptr))
2996 return false;
2997
2998 // If the instruction is a store located in a predicated block, it will be
2999 // scalarized.
3000 if (isScalarWithPredication(I, VF))
3001 return false;
3002
3003 // If the instruction's allocated size doesn't equal it's type size, it
3004 // requires padding and will be scalarized.
3005 auto &DL = I->getDataLayout();
3006 if (hasIrregularType(ScalarTy, DL))
3007 return false;
3008
3009 return true;
3010}
3011
3012void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
3013 // We should not collect Uniforms more than once per VF. Right now,
3014 // this function is called from collectUniformsAndScalars(), which
3015 // already does this check. Collecting Uniforms for VF=1 does not make any
3016 // sense.
3017
3018 assert(VF.isVector() && !Uniforms.contains(VF) &&
3019 "This function should not be visited twice for the same VF");
3020
3021 // Visit the list of Uniforms. If we find no uniform value, we won't
3022 // analyze again. Uniforms.count(VF) will return 1.
3023 Uniforms[VF].clear();
3024
3025 // Now we know that the loop is vectorizable!
3026 // Collect instructions inside the loop that will remain uniform after
3027 // vectorization.
3028
3029 // Global values, params and instructions outside of current loop are out of
3030 // scope.
3031 auto IsOutOfScope = [&](Value *V) -> bool {
3033 return (!I || !TheLoop->contains(I));
3034 };
3035
3036 // Worklist containing uniform instructions demanding lane 0.
3037 SetVector<Instruction *> Worklist;
3038
3039 // Add uniform instructions demanding lane 0 to the worklist. Instructions
3040 // that require predication must not be considered uniform after
3041 // vectorization, because that would create an erroneous replicating region
3042 // where only a single instance out of VF should be formed.
3043 auto AddToWorklistIfAllowed = [&](Instruction *I) -> void {
3044 if (IsOutOfScope(I)) {
3045 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
3046 << *I << "\n");
3047 return;
3048 }
3049 if (isPredicatedInst(I)) {
3050 LLVM_DEBUG(
3051 dbgs() << "LV: Found not uniform due to requiring predication: " << *I
3052 << "\n");
3053 return;
3054 }
3055 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
3056 Worklist.insert(I);
3057 };
3058
3059 // Start with the conditional branches exiting the loop. If the branch
3060 // condition is an instruction contained in the loop that is only used by the
3061 // branch, it is uniform. Note conditions from uncountable early exits are not
3062 // uniform.
3064 TheLoop->getExitingBlocks(Exiting);
3065 for (BasicBlock *E : Exiting) {
3066 if (Legal->hasUncountableEarlyExit() && TheLoop->getLoopLatch() != E)
3067 continue;
3068 auto *Cmp = dyn_cast<Instruction>(E->getTerminator()->getOperand(0));
3069 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
3070 AddToWorklistIfAllowed(Cmp);
3071 }
3072
3073 auto PrevVF = VF.divideCoefficientBy(2);
3074 // Return true if all lanes perform the same memory operation, and we can
3075 // thus choose to execute only one.
3076 auto IsUniformMemOpUse = [&](Instruction *I) {
3077 // If the value was already known to not be uniform for the previous
3078 // (smaller VF), it cannot be uniform for the larger VF.
3079 if (PrevVF.isVector()) {
3080 auto Iter = Uniforms.find(PrevVF);
3081 if (Iter != Uniforms.end() && !Iter->second.contains(I))
3082 return false;
3083 }
3084 if (!Legal->isUniformMemOp(*I, VF))
3085 return false;
3086 if (isa<LoadInst>(I))
3087 // Loading the same address always produces the same result - at least
3088 // assuming aliasing and ordering which have already been checked.
3089 return true;
3090 // Storing the same value on every iteration.
3091 return TheLoop->isLoopInvariant(cast<StoreInst>(I)->getValueOperand());
3092 };
3093
3094 auto IsUniformDecision = [&](Instruction *I, ElementCount VF) {
3095 InstWidening WideningDecision = getWideningDecision(I, VF);
3096 assert(WideningDecision != CM_Unknown &&
3097 "Widening decision should be ready at this moment");
3098
3099 if (IsUniformMemOpUse(I))
3100 return true;
3101
3102 return (WideningDecision == CM_Widen ||
3103 WideningDecision == CM_Widen_Reverse ||
3104 WideningDecision == CM_Interleave);
3105 };
3106
3107 // Returns true if Ptr is the pointer operand of a memory access instruction
3108 // I, I is known to not require scalarization, and the pointer is not also
3109 // stored.
3110 auto IsVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
3111 if (isa<StoreInst>(I) && I->getOperand(0) == Ptr)
3112 return false;
3113 return getLoadStorePointerOperand(I) == Ptr &&
3114 (IsUniformDecision(I, VF) || Legal->isInvariant(Ptr));
3115 };
3116
3117 // Holds a list of values which are known to have at least one uniform use.
3118 // Note that there may be other uses which aren't uniform. A "uniform use"
3119 // here is something which only demands lane 0 of the unrolled iterations;
3120 // it does not imply that all lanes produce the same value (e.g. this is not
3121 // the usual meaning of uniform)
3122 SetVector<Value *> HasUniformUse;
3123
3124 // Scan the loop for instructions which are either a) known to have only
3125 // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
3126 for (auto *BB : TheLoop->blocks())
3127 for (auto &I : *BB) {
3128 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) {
3129 switch (II->getIntrinsicID()) {
3130 case Intrinsic::sideeffect:
3131 case Intrinsic::experimental_noalias_scope_decl:
3132 case Intrinsic::assume:
3133 case Intrinsic::lifetime_start:
3134 case Intrinsic::lifetime_end:
3135 if (TheLoop->hasLoopInvariantOperands(&I))
3136 AddToWorklistIfAllowed(&I);
3137 break;
3138 default:
3139 break;
3140 }
3141 }
3142
3143 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) {
3144 if (IsOutOfScope(EVI->getAggregateOperand())) {
3145 AddToWorklistIfAllowed(EVI);
3146 continue;
3147 }
3148 // Only ExtractValue instructions where the aggregate value comes from a
3149 // call are allowed to be non-uniform.
3150 assert(isa<CallInst>(EVI->getAggregateOperand()) &&
3151 "Expected aggregate value to be call return value");
3152 }
3153
3154 // If there's no pointer operand, there's nothing to do.
3156 if (!Ptr)
3157 continue;
3158
3159 // If the pointer can be proven to be uniform, always add it to the
3160 // worklist.
3161 if (isa<Instruction>(Ptr) && Legal->isUniform(Ptr, VF))
3162 AddToWorklistIfAllowed(cast<Instruction>(Ptr));
3163
3164 if (IsUniformMemOpUse(&I))
3165 AddToWorklistIfAllowed(&I);
3166
3167 if (IsVectorizedMemAccessUse(&I, Ptr))
3168 HasUniformUse.insert(Ptr);
3169 }
3170
3171 // Add to the worklist any operands which have *only* uniform (e.g. lane 0
3172 // demanding) users. Since loops are assumed to be in LCSSA form, this
3173 // disallows uses outside the loop as well.
3174 for (auto *V : HasUniformUse) {
3175 if (IsOutOfScope(V))
3176 continue;
3177 auto *I = cast<Instruction>(V);
3178 bool UsersAreMemAccesses = all_of(I->users(), [&](User *U) -> bool {
3179 auto *UI = cast<Instruction>(U);
3180 return TheLoop->contains(UI) && IsVectorizedMemAccessUse(UI, V);
3181 });
3182 if (UsersAreMemAccesses)
3183 AddToWorklistIfAllowed(I);
3184 }
3185
3186 // Expand Worklist in topological order: whenever a new instruction
3187 // is added , its users should be already inside Worklist. It ensures
3188 // a uniform instruction will only be used by uniform instructions.
3189 unsigned Idx = 0;
3190 while (Idx != Worklist.size()) {
3191 Instruction *I = Worklist[Idx++];
3192
3193 for (auto *OV : I->operand_values()) {
3194 // isOutOfScope operands cannot be uniform instructions.
3195 if (IsOutOfScope(OV))
3196 continue;
3197 // First order recurrence Phi's should typically be considered
3198 // non-uniform.
3199 auto *OP = dyn_cast<PHINode>(OV);
3200 if (OP && Legal->isFixedOrderRecurrence(OP))
3201 continue;
3202 // If all the users of the operand are uniform, then add the
3203 // operand into the uniform worklist.
3204 auto *OI = cast<Instruction>(OV);
3205 if (llvm::all_of(OI->users(), [&](User *U) -> bool {
3206 auto *J = cast<Instruction>(U);
3207 return Worklist.count(J) || IsVectorizedMemAccessUse(J, OI);
3208 }))
3209 AddToWorklistIfAllowed(OI);
3210 }
3211 }
3212
3213 // For an instruction to be added into Worklist above, all its users inside
3214 // the loop should also be in Worklist. However, this condition cannot be
3215 // true for phi nodes that form a cyclic dependence. We must process phi
3216 // nodes separately. An induction variable will remain uniform if all users
3217 // of the induction variable and induction variable update remain uniform.
3218 // The code below handles both pointer and non-pointer induction variables.
3219 BasicBlock *Latch = TheLoop->getLoopLatch();
3220 for (const auto &Induction : Legal->getInductionVars()) {
3221 auto *Ind = Induction.first;
3222 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
3223
3224 // Determine if all users of the induction variable are uniform after
3225 // vectorization.
3226 bool UniformInd = all_of(Ind->users(), [&](User *U) -> bool {
3227 auto *I = cast<Instruction>(U);
3228 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
3229 IsVectorizedMemAccessUse(I, Ind);
3230 });
3231 if (!UniformInd)
3232 continue;
3233
3234 // Determine if all users of the induction variable update instruction are
3235 // uniform after vectorization.
3236 bool UniformIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool {
3237 auto *I = cast<Instruction>(U);
3238 return I == Ind || Worklist.count(I) ||
3239 IsVectorizedMemAccessUse(I, IndUpdate);
3240 });
3241 if (!UniformIndUpdate)
3242 continue;
3243
3244 // The induction variable and its update instruction will remain uniform.
3245 AddToWorklistIfAllowed(Ind);
3246 AddToWorklistIfAllowed(IndUpdate);
3247 }
3248
3249 Uniforms[VF].insert_range(Worklist);
3250}
3251
3253 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
3254
3255 if (Legal->getRuntimePointerChecking()->Need) {
3256 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
3257 "runtime pointer checks needed. Enable vectorization of this "
3258 "loop with '#pragma clang loop vectorize(enable)' when "
3259 "compiling with -Os/-Oz",
3260 "CantVersionLoopWithOptForSize", ORE, TheLoop);
3261 return true;
3262 }
3263
3264 if (!PSE.getPredicate().isAlwaysTrue()) {
3265 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
3266 "runtime SCEV checks needed. Enable vectorization of this "
3267 "loop with '#pragma clang loop vectorize(enable)' when "
3268 "compiling with -Os/-Oz",
3269 "CantVersionLoopWithOptForSize", ORE, TheLoop);
3270 return true;
3271 }
3272
3273 // FIXME: Avoid specializing for stride==1 instead of bailing out.
3274 if (!Legal->getLAI()->getSymbolicStrides().empty()) {
3275 reportVectorizationFailure("Runtime stride check for small trip count",
3276 "runtime stride == 1 checks needed. Enable vectorization of "
3277 "this loop without such check by compiling with -Os/-Oz",
3278 "CantVersionLoopWithOptForSize", ORE, TheLoop);
3279 return true;
3280 }
3281
3282 return false;
3283}
3284
3285bool LoopVectorizationCostModel::isScalableVectorizationAllowed() {
3286 if (IsScalableVectorizationAllowed)
3287 return *IsScalableVectorizationAllowed;
3288
3289 IsScalableVectorizationAllowed = false;
3290 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors)
3291 return false;
3292
3293 if (Hints->isScalableVectorizationDisabled()) {
3294 reportVectorizationInfo("Scalable vectorization is explicitly disabled",
3295 "ScalableVectorizationDisabled", ORE, TheLoop);
3296 return false;
3297 }
3298
3299 LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n");
3300
3301 auto MaxScalableVF = ElementCount::getScalable(
3302 std::numeric_limits<ElementCount::ScalarTy>::max());
3303
3304 // Test that the loop-vectorizer can legalize all operations for this MaxVF.
3305 // FIXME: While for scalable vectors this is currently sufficient, this should
3306 // be replaced by a more detailed mechanism that filters out specific VFs,
3307 // instead of invalidating vectorization for a whole set of VFs based on the
3308 // MaxVF.
3309
3310 // Disable scalable vectorization if the loop contains unsupported reductions.
3311 if (!canVectorizeReductions(MaxScalableVF)) {
3313 "Scalable vectorization not supported for the reduction "
3314 "operations found in this loop.",
3315 "ScalableVFUnfeasible", ORE, TheLoop);
3316 return false;
3317 }
3318
3319 // Disable scalable vectorization if the loop contains any instructions
3320 // with element types not supported for scalable vectors.
3321 if (any_of(ElementTypesInLoop, [&](Type *Ty) {
3322 return !Ty->isVoidTy() &&
3324 })) {
3325 reportVectorizationInfo("Scalable vectorization is not supported "
3326 "for all element types found in this loop.",
3327 "ScalableVFUnfeasible", ORE, TheLoop);
3328 return false;
3329 }
3330
3331 if (!Legal->isSafeForAnyVectorWidth() && !getMaxVScale(*TheFunction, TTI)) {
3332 reportVectorizationInfo("The target does not provide maximum vscale value "
3333 "for safe distance analysis.",
3334 "ScalableVFUnfeasible", ORE, TheLoop);
3335 return false;
3336 }
3337
3338 IsScalableVectorizationAllowed = true;
3339 return true;
3340}
3341
3342ElementCount
3343LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
3344 if (!isScalableVectorizationAllowed())
3345 return ElementCount::getScalable(0);
3346
3347 auto MaxScalableVF = ElementCount::getScalable(
3348 std::numeric_limits<ElementCount::ScalarTy>::max());
3349 if (Legal->isSafeForAnyVectorWidth())
3350 return MaxScalableVF;
3351
3352 std::optional<unsigned> MaxVScale = getMaxVScale(*TheFunction, TTI);
3353 // Limit MaxScalableVF by the maximum safe dependence distance.
3354 MaxScalableVF = ElementCount::getScalable(MaxSafeElements / *MaxVScale);
3355
3356 if (!MaxScalableVF)
3358 "Max legal vector width too small, scalable vectorization "
3359 "unfeasible.",
3360 "ScalableVFUnfeasible", ORE, TheLoop);
3361
3362 return MaxScalableVF;
3363}
3364
3365FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF(
3366 unsigned MaxTripCount, ElementCount UserVF, bool FoldTailByMasking) {
3367 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
3368 unsigned SmallestType, WidestType;
3369 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
3370
3371 // Get the maximum safe dependence distance in bits computed by LAA.
3372 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
3373 // the memory accesses that is most restrictive (involved in the smallest
3374 // dependence distance).
3375 unsigned MaxSafeElementsPowerOf2 =
3376 bit_floor(Legal->getMaxSafeVectorWidthInBits() / WidestType);
3377 if (!Legal->isSafeForAnyStoreLoadForwardDistances()) {
3378 unsigned SLDist = Legal->getMaxStoreLoadForwardSafeDistanceInBits();
3379 MaxSafeElementsPowerOf2 =
3380 std::min(MaxSafeElementsPowerOf2, SLDist / WidestType);
3381 }
3382 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElementsPowerOf2);
3383 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElementsPowerOf2);
3384
3385 if (!Legal->isSafeForAnyVectorWidth())
3386 this->MaxSafeElements = MaxSafeElementsPowerOf2;
3387
3388 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF
3389 << ".\n");
3390 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF
3391 << ".\n");
3392
3393 // First analyze the UserVF, fall back if the UserVF should be ignored.
3394 if (UserVF) {
3395 auto MaxSafeUserVF =
3396 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
3397
3398 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) {
3399 // If `VF=vscale x N` is safe, then so is `VF=N`
3400 if (UserVF.isScalable())
3401 return FixedScalableVFPair(
3402 ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF);
3403
3404 return UserVF;
3405 }
3406
3407 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF));
3408
3409 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it
3410 // is better to ignore the hint and let the compiler choose a suitable VF.
3411 if (!UserVF.isScalable()) {
3412 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
3413 << " is unsafe, clamping to max safe VF="
3414 << MaxSafeFixedVF << ".\n");
3415 ORE->emit([&]() {
3416 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
3417 TheLoop->getStartLoc(),
3418 TheLoop->getHeader())
3419 << "User-specified vectorization factor "
3420 << ore::NV("UserVectorizationFactor", UserVF)
3421 << " is unsafe, clamping to maximum safe vectorization factor "
3422 << ore::NV("VectorizationFactor", MaxSafeFixedVF);
3423 });
3424 return MaxSafeFixedVF;
3425 }
3426
3428 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
3429 << " is ignored because scalable vectors are not "
3430 "available.\n");
3431 ORE->emit([&]() {
3432 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
3433 TheLoop->getStartLoc(),
3434 TheLoop->getHeader())
3435 << "User-specified vectorization factor "
3436 << ore::NV("UserVectorizationFactor", UserVF)
3437 << " is ignored because the target does not support scalable "
3438 "vectors. The compiler will pick a more suitable value.";
3439 });
3440 } else {
3441 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
3442 << " is unsafe. Ignoring scalable UserVF.\n");
3443 ORE->emit([&]() {
3444 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
3445 TheLoop->getStartLoc(),
3446 TheLoop->getHeader())
3447 << "User-specified vectorization factor "
3448 << ore::NV("UserVectorizationFactor", UserVF)
3449 << " is unsafe. Ignoring the hint to let the compiler pick a "
3450 "more suitable value.";
3451 });
3452 }
3453 }
3454
3455 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
3456 << " / " << WidestType << " bits.\n");
3457
3458 FixedScalableVFPair Result(ElementCount::getFixed(1),
3460 if (auto MaxVF =
3461 getMaximizedVFForTarget(MaxTripCount, SmallestType, WidestType,
3462 MaxSafeFixedVF, FoldTailByMasking))
3463 Result.FixedVF = MaxVF;
3464
3465 if (auto MaxVF =
3466 getMaximizedVFForTarget(MaxTripCount, SmallestType, WidestType,
3467 MaxSafeScalableVF, FoldTailByMasking))
3468 if (MaxVF.isScalable()) {
3469 Result.ScalableVF = MaxVF;
3470 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF
3471 << "\n");
3472 }
3473
3474 return Result;
3475}
3476
3477FixedScalableVFPair
3479 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
3480 // TODO: It may be useful to do since it's still likely to be dynamically
3481 // uniform if the target can skip.
3483 "Not inserting runtime ptr check for divergent target",
3484 "runtime pointer checks needed. Not enabled for divergent target",
3485 "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
3487 }
3488
3489 ScalarEvolution *SE = PSE.getSE();
3491 unsigned MaxTC = PSE.getSmallConstantMaxTripCount();
3492 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
3493 if (TC != ElementCount::getFixed(MaxTC))
3494 LLVM_DEBUG(dbgs() << "LV: Found maximum trip count: " << MaxTC << '\n');
3495 if (TC.isScalar()) {
3496 reportVectorizationFailure("Single iteration (non) loop",
3497 "loop trip count is one, irrelevant for vectorization",
3498 "SingleIterationLoop", ORE, TheLoop);
3500 }
3501
3502 // If BTC matches the widest induction type and is -1 then the trip count
3503 // computation will wrap to 0 and the vector trip count will be 0. Do not try
3504 // to vectorize.
3505 const SCEV *BTC = SE->getBackedgeTakenCount(TheLoop);
3506 if (!isa<SCEVCouldNotCompute>(BTC) &&
3507 BTC->getType()->getScalarSizeInBits() >=
3508 Legal->getWidestInductionType()->getScalarSizeInBits() &&
3510 SE->getMinusOne(BTC->getType()))) {
3512 "Trip count computation wrapped",
3513 "backedge-taken count is -1, loop trip count wrapped to 0",
3514 "TripCountWrapped", ORE, TheLoop);
3516 }
3517
3518 switch (ScalarEpilogueStatus) {
3520 return computeFeasibleMaxVF(MaxTC, UserVF, false);
3522 [[fallthrough]];
3524 LLVM_DEBUG(
3525 dbgs() << "LV: vector predicate hint/switch found.\n"
3526 << "LV: Not allowing scalar epilogue, creating predicated "
3527 << "vector loop.\n");
3528 break;
3530 // fallthrough as a special case of OptForSize
3532 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
3533 LLVM_DEBUG(
3534 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
3535 else
3536 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
3537 << "count.\n");
3538
3539 // Bail if runtime checks are required, which are not good when optimising
3540 // for size.
3543
3544 break;
3545 }
3546
3547 // Now try the tail folding
3548
3549 // Invalidate interleave groups that require an epilogue if we can't mask
3550 // the interleave-group.
3552 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
3553 "No decisions should have been taken at this point");
3554 // Note: There is no need to invalidate any cost modeling decisions here, as
3555 // none were taken so far.
3556 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
3557 }
3558
3559 FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(MaxTC, UserVF, true);
3560
3561 // Avoid tail folding if the trip count is known to be a multiple of any VF
3562 // we choose.
3563 std::optional<unsigned> MaxPowerOf2RuntimeVF =
3564 MaxFactors.FixedVF.getFixedValue();
3565 if (MaxFactors.ScalableVF) {
3566 std::optional<unsigned> MaxVScale = getMaxVScale(*TheFunction, TTI);
3567 if (MaxVScale && TTI.isVScaleKnownToBeAPowerOfTwo()) {
3568 MaxPowerOf2RuntimeVF = std::max<unsigned>(
3569 *MaxPowerOf2RuntimeVF,
3570 *MaxVScale * MaxFactors.ScalableVF.getKnownMinValue());
3571 } else
3572 MaxPowerOf2RuntimeVF = std::nullopt; // Stick with tail-folding for now.
3573 }
3574
3575 auto NoScalarEpilogueNeeded = [this, &UserIC](unsigned MaxVF) {
3576 // Return false if the loop is neither a single-latch-exit loop nor an
3577 // early-exit loop as tail-folding is not supported in that case.
3578 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch() &&
3579 !Legal->hasUncountableEarlyExit())
3580 return false;
3581 unsigned MaxVFtimesIC = UserIC ? MaxVF * UserIC : MaxVF;
3582 ScalarEvolution *SE = PSE.getSE();
3583 // Calling getSymbolicMaxBackedgeTakenCount enables support for loops
3584 // with uncountable exits. For countable loops, the symbolic maximum must
3585 // remain identical to the known back-edge taken count.
3586 const SCEV *BackedgeTakenCount = PSE.getSymbolicMaxBackedgeTakenCount();
3587 assert((Legal->hasUncountableEarlyExit() ||
3588 BackedgeTakenCount == PSE.getBackedgeTakenCount()) &&
3589 "Invalid loop count");
3590 const SCEV *ExitCount = SE->getAddExpr(
3591 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3592 const SCEV *Rem = SE->getURemExpr(
3593 SE->applyLoopGuards(ExitCount, TheLoop),
3594 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
3595 return Rem->isZero();
3596 };
3597
3598 if (MaxPowerOf2RuntimeVF > 0u) {
3599 assert((UserVF.isNonZero() || isPowerOf2_32(*MaxPowerOf2RuntimeVF)) &&
3600 "MaxFixedVF must be a power of 2");
3601 if (NoScalarEpilogueNeeded(*MaxPowerOf2RuntimeVF)) {
3602 // Accept MaxFixedVF if we do not have a tail.
3603 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
3604 return MaxFactors;
3605 }
3606 }
3607
3608 auto ExpectedTC = getSmallBestKnownTC(PSE, TheLoop);
3609 if (ExpectedTC && ExpectedTC->isFixed() &&
3610 ExpectedTC->getFixedValue() <=
3611 TTI.getMinTripCountTailFoldingThreshold()) {
3612 if (MaxPowerOf2RuntimeVF > 0u) {
3613 // If we have a low-trip-count, and the fixed-width VF is known to divide
3614 // the trip count but the scalable factor does not, use the fixed-width
3615 // factor in preference to allow the generation of a non-predicated loop.
3616 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedLowTripLoop &&
3617 NoScalarEpilogueNeeded(MaxFactors.FixedVF.getFixedValue())) {
3618 LLVM_DEBUG(dbgs() << "LV: Picking a fixed-width so that no tail will "
3619 "remain for any chosen VF.\n");
3620 MaxFactors.ScalableVF = ElementCount::getScalable(0);
3621 return MaxFactors;
3622 }
3623 }
3624
3626 "The trip count is below the minial threshold value.",
3627 "loop trip count is too low, avoiding vectorization", "LowTripCount",
3628 ORE, TheLoop);
3630 }
3631
3632 // If we don't know the precise trip count, or if the trip count that we
3633 // found modulo the vectorization factor is not zero, try to fold the tail
3634 // by masking.
3635 // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
3636 bool ContainsScalableVF = MaxFactors.ScalableVF.isNonZero();
3637 setTailFoldingStyles(ContainsScalableVF, UserIC);
3638 if (foldTailByMasking()) {
3640 LLVM_DEBUG(
3641 dbgs()
3642 << "LV: tail is folded with EVL, forcing unroll factor to be 1. Will "
3643 "try to generate VP Intrinsics with scalable vector "
3644 "factors only.\n");
3645 // Tail folded loop using VP intrinsics restricts the VF to be scalable
3646 // for now.
3647 // TODO: extend it for fixed vectors, if required.
3648 assert(ContainsScalableVF && "Expected scalable vector factor.");
3649
3650 MaxFactors.FixedVF = ElementCount::getFixed(1);
3651 }
3652 return MaxFactors;
3653 }
3654
3655 // If there was a tail-folding hint/switch, but we can't fold the tail by
3656 // masking, fallback to a vectorization with a scalar epilogue.
3657 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
3658 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
3659 "scalar epilogue instead.\n");
3660 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
3661 return MaxFactors;
3662 }
3663
3664 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
3665 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
3667 }
3668
3669 if (TC.isZero()) {
3671 "unable to calculate the loop count due to complex control flow",
3672 "UnknownLoopCountComplexCFG", ORE, TheLoop);
3674 }
3675
3677 "Cannot optimize for size and vectorize at the same time.",
3678 "cannot optimize for size and vectorize at the same time. "
3679 "Enable vectorization of this loop with '#pragma clang loop "
3680 "vectorize(enable)' when compiling with -Os/-Oz",
3681 "NoTailLoopWithOptForSize", ORE, TheLoop);
3683}
3684
3686 ElementCount VF) {
3687 if (ConsiderRegPressure.getNumOccurrences())
3688 return ConsiderRegPressure;
3689
3690 // TODO: We should eventually consider register pressure for all targets. The
3691 // TTI hook is temporary whilst target-specific issues are being fixed.
3692 if (TTI.shouldConsiderVectorizationRegPressure())
3693 return true;
3694
3695 if (!useMaxBandwidth(VF.isScalable()
3698 return false;
3699 // Only calculate register pressure for VFs enabled by MaxBandwidth.
3701 VF, VF.isScalable() ? MaxPermissibleVFWithoutMaxBW.ScalableVF
3703}
3704
3707 return MaximizeBandwidth || (MaximizeBandwidth.getNumOccurrences() == 0 &&
3708 (TTI.shouldMaximizeVectorBandwidth(RegKind) ||
3710 Legal->hasVectorCallVariants())));
3711}
3712
3713ElementCount LoopVectorizationCostModel::clampVFByMaxTripCount(
3714 ElementCount VF, unsigned MaxTripCount, bool FoldTailByMasking) const {
3715 unsigned EstimatedVF = VF.getKnownMinValue();
3716 if (VF.isScalable() && TheFunction->hasFnAttribute(Attribute::VScaleRange)) {
3717 auto Attr = TheFunction->getFnAttribute(Attribute::VScaleRange);
3718 auto Min = Attr.getVScaleRangeMin();
3719 EstimatedVF *= Min;
3720 }
3721
3722 // When a scalar epilogue is required, at least one iteration of the scalar
3723 // loop has to execute. Adjust MaxTripCount accordingly to avoid picking a
3724 // max VF that results in a dead vector loop.
3725 if (MaxTripCount > 0 && requiresScalarEpilogue(true))
3726 MaxTripCount -= 1;
3727
3728 if (MaxTripCount && MaxTripCount <= EstimatedVF &&
3729 (!FoldTailByMasking || isPowerOf2_32(MaxTripCount))) {
3730 // If upper bound loop trip count (TC) is known at compile time there is no
3731 // point in choosing VF greater than TC (as done in the loop below). Select
3732 // maximum power of two which doesn't exceed TC. If VF is
3733 // scalable, we only fall back on a fixed VF when the TC is less than or
3734 // equal to the known number of lanes.
3735 auto ClampedUpperTripCount = llvm::bit_floor(MaxTripCount);
3736 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not "
3737 "exceeding the constant trip count: "
3738 << ClampedUpperTripCount << "\n");
3739 return ElementCount::get(ClampedUpperTripCount,
3740 FoldTailByMasking ? VF.isScalable() : false);
3741 }
3742 return VF;
3743}
3744
3745ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
3746 unsigned MaxTripCount, unsigned SmallestType, unsigned WidestType,
3747 ElementCount MaxSafeVF, bool FoldTailByMasking) {
3748 bool ComputeScalableMaxVF = MaxSafeVF.isScalable();
3749 const TypeSize WidestRegister = TTI.getRegisterBitWidth(
3750 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
3752
3753 // Convenience function to return the minimum of two ElementCounts.
3754 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) {
3755 assert((LHS.isScalable() == RHS.isScalable()) &&
3756 "Scalable flags must match");
3757 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS;
3758 };
3759
3760 // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
3761 // Note that both WidestRegister and WidestType may not be a powers of 2.
3762 auto MaxVectorElementCount = ElementCount::get(
3763 llvm::bit_floor(WidestRegister.getKnownMinValue() / WidestType),
3764 ComputeScalableMaxVF);
3765 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
3766 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
3767 << (MaxVectorElementCount * WidestType) << " bits.\n");
3768
3769 if (!MaxVectorElementCount) {
3770 LLVM_DEBUG(dbgs() << "LV: The target has no "
3771 << (ComputeScalableMaxVF ? "scalable" : "fixed")
3772 << " vector registers.\n");
3773 return ElementCount::getFixed(1);
3774 }
3775
3776 ElementCount MaxVF = clampVFByMaxTripCount(MaxVectorElementCount,
3777 MaxTripCount, FoldTailByMasking);
3778 // If the MaxVF was already clamped, there's no point in trying to pick a
3779 // larger one.
3780 if (MaxVF != MaxVectorElementCount)
3781 return MaxVF;
3782
3784 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
3786
3787 if (MaxVF.isScalable())
3788 MaxPermissibleVFWithoutMaxBW.ScalableVF = MaxVF;
3789 else
3790 MaxPermissibleVFWithoutMaxBW.FixedVF = MaxVF;
3791
3792 if (useMaxBandwidth(RegKind)) {
3793 auto MaxVectorElementCountMaxBW = ElementCount::get(
3794 llvm::bit_floor(WidestRegister.getKnownMinValue() / SmallestType),
3795 ComputeScalableMaxVF);
3796 MaxVF = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
3797
3798 if (ElementCount MinVF =
3799 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) {
3800 if (ElementCount::isKnownLT(MaxVF, MinVF)) {
3801 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
3802 << ") with target's minimum: " << MinVF << '\n');
3803 MaxVF = MinVF;
3804 }
3805 }
3806
3807 MaxVF = clampVFByMaxTripCount(MaxVF, MaxTripCount, FoldTailByMasking);
3808
3809 if (MaxVectorElementCount != MaxVF) {
3810 // Invalidate any widening decisions we might have made, in case the loop
3811 // requires prediction (decided later), but we have already made some
3812 // load/store widening decisions.
3813 invalidateCostModelingDecisions();
3814 }
3815 }
3816 return MaxVF;
3817}
3818
3819bool LoopVectorizationPlanner::isMoreProfitable(const VectorizationFactor &A,
3820 const VectorizationFactor &B,
3821 const unsigned MaxTripCount,
3822 bool HasTail,
3823 bool IsEpilogue) const {
3824 InstructionCost CostA = A.Cost;
3825 InstructionCost CostB = B.Cost;
3826
3827 // Improve estimate for the vector width if it is scalable.
3828 unsigned EstimatedWidthA = A.Width.getKnownMinValue();
3829 unsigned EstimatedWidthB = B.Width.getKnownMinValue();
3830 if (std::optional<unsigned> VScale = CM.getVScaleForTuning()) {
3831 if (A.Width.isScalable())
3832 EstimatedWidthA *= *VScale;
3833 if (B.Width.isScalable())
3834 EstimatedWidthB *= *VScale;
3835 }
3836
3837 // When optimizing for size choose whichever is smallest, which will be the
3838 // one with the smallest cost for the whole loop. On a tie pick the larger
3839 // vector width, on the assumption that throughput will be greater.
3840 if (CM.CostKind == TTI::TCK_CodeSize)
3841 return CostA < CostB ||
3842 (CostA == CostB && EstimatedWidthA > EstimatedWidthB);
3843
3844 // Assume vscale may be larger than 1 (or the value being tuned for),
3845 // so that scalable vectorization is slightly favorable over fixed-width
3846 // vectorization.
3847 bool PreferScalable = !TTI.preferFixedOverScalableIfEqualCost(IsEpilogue) &&
3848 A.Width.isScalable() && !B.Width.isScalable();
3849
3850 auto CmpFn = [PreferScalable](const InstructionCost &LHS,
3851 const InstructionCost &RHS) {
3852 return PreferScalable ? LHS <= RHS : LHS < RHS;
3853 };
3854
3855 // To avoid the need for FP division:
3856 // (CostA / EstimatedWidthA) < (CostB / EstimatedWidthB)
3857 // <=> (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA)
3858 if (!MaxTripCount)
3859 return CmpFn(CostA * EstimatedWidthB, CostB * EstimatedWidthA);
3860
3861 auto GetCostForTC = [MaxTripCount, HasTail](unsigned VF,
3862 InstructionCost VectorCost,
3863 InstructionCost ScalarCost) {
3864 // If the trip count is a known (possibly small) constant, the trip count
3865 // will be rounded up to an integer number of iterations under
3866 // FoldTailByMasking. The total cost in that case will be
3867 // VecCost*ceil(TripCount/VF). When not folding the tail, the total
3868 // cost will be VecCost*floor(TC/VF) + ScalarCost*(TC%VF). There will be
3869 // some extra overheads, but for the purpose of comparing the costs of
3870 // different VFs we can use this to compare the total loop-body cost
3871 // expected after vectorization.
3872 if (HasTail)
3873 return VectorCost * (MaxTripCount / VF) +
3874 ScalarCost * (MaxTripCount % VF);
3875 return VectorCost * divideCeil(MaxTripCount, VF);
3876 };
3877
3878 auto RTCostA = GetCostForTC(EstimatedWidthA, CostA, A.ScalarCost);
3879 auto RTCostB = GetCostForTC(EstimatedWidthB, CostB, B.ScalarCost);
3880 return CmpFn(RTCostA, RTCostB);
3881}
3882
3883bool LoopVectorizationPlanner::isMoreProfitable(const VectorizationFactor &A,
3884 const VectorizationFactor &B,
3885 bool HasTail,
3886 bool IsEpilogue) const {
3887 const unsigned MaxTripCount = PSE.getSmallConstantMaxTripCount();
3888 return LoopVectorizationPlanner::isMoreProfitable(A, B, MaxTripCount, HasTail,
3889 IsEpilogue);
3890}
3891
3894 using RecipeVFPair = std::pair<VPRecipeBase *, ElementCount>;
3895 SmallVector<RecipeVFPair> InvalidCosts;
3896 for (const auto &Plan : VPlans) {
3897 for (ElementCount VF : Plan->vectorFactors()) {
3898 // The VPlan-based cost model is designed for computing vector cost.
3899 // Querying VPlan-based cost model with a scarlar VF will cause some
3900 // errors because we expect the VF is vector for most of the widen
3901 // recipes.
3902 if (VF.isScalar())
3903 continue;
3904
3905 VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind,
3906 *CM.PSE.getSE());
3907 precomputeCosts(*Plan, VF, CostCtx);
3908 auto Iter = vp_depth_first_deep(Plan->getVectorLoopRegion()->getEntry());
3910 for (auto &R : *VPBB) {
3911 if (!R.cost(VF, CostCtx).isValid())
3912 InvalidCosts.emplace_back(&R, VF);
3913 }
3914 }
3915 }
3916 }
3917 if (InvalidCosts.empty())
3918 return;
3919
3920 // Emit a report of VFs with invalid costs in the loop.
3921
3922 // Group the remarks per recipe, keeping the recipe order from InvalidCosts.
3924 unsigned I = 0;
3925 for (auto &Pair : InvalidCosts)
3926 if (Numbering.try_emplace(Pair.first, I).second)
3927 ++I;
3928
3929 // Sort the list, first on recipe(number) then on VF.
3930 sort(InvalidCosts, [&Numbering](RecipeVFPair &A, RecipeVFPair &B) {
3931 unsigned NA = Numbering[A.first];
3932 unsigned NB = Numbering[B.first];
3933 if (NA != NB)
3934 return NA < NB;
3935 return ElementCount::isKnownLT(A.second, B.second);
3936 });
3937
3938 // For a list of ordered recipe-VF pairs:
3939 // [(load, VF1), (load, VF2), (store, VF1)]
3940 // group the recipes together to emit separate remarks for:
3941 // load (VF1, VF2)
3942 // store (VF1)
3943 auto Tail = ArrayRef<RecipeVFPair>(InvalidCosts);
3944 auto Subset = ArrayRef<RecipeVFPair>();
3945 do {
3946 if (Subset.empty())
3947 Subset = Tail.take_front(1);
3948
3949 VPRecipeBase *R = Subset.front().first;
3950
3951 unsigned Opcode =
3954 [](const auto *R) { return Instruction::PHI; })
3955 .Case<VPWidenSelectRecipe>(
3956 [](const auto *R) { return Instruction::Select; })
3957 .Case<VPWidenStoreRecipe>(
3958 [](const auto *R) { return Instruction::Store; })
3959 .Case<VPWidenLoadRecipe>(
3960 [](const auto *R) { return Instruction::Load; })
3961 .Case<VPWidenCallRecipe, VPWidenIntrinsicRecipe>(
3962 [](const auto *R) { return Instruction::Call; })
3965 [](const auto *R) { return R->getOpcode(); })
3966 .Case<VPInterleaveRecipe>([](const VPInterleaveRecipe *R) {
3967 return R->getStoredValues().empty() ? Instruction::Load
3968 : Instruction::Store;
3969 });
3970
3971 // If the next recipe is different, or if there are no other pairs,
3972 // emit a remark for the collated subset. e.g.
3973 // [(load, VF1), (load, VF2))]
3974 // to emit:
3975 // remark: invalid costs for 'load' at VF=(VF1, VF2)
3976 if (Subset == Tail || Tail[Subset.size()].first != R) {
3977 std::string OutString;
3978 raw_string_ostream OS(OutString);
3979 assert(!Subset.empty() && "Unexpected empty range");
3980 OS << "Recipe with invalid costs prevented vectorization at VF=(";
3981 for (const auto &Pair : Subset)
3982 OS << (Pair.second == Subset.front().second ? "" : ", ") << Pair.second;
3983 OS << "):";
3984 if (Opcode == Instruction::Call) {
3985 StringRef Name = "";
3986 if (auto *Int = dyn_cast<VPWidenIntrinsicRecipe>(R)) {
3987 Name = Int->getIntrinsicName();
3988 } else {
3989 auto *WidenCall = dyn_cast<VPWidenCallRecipe>(R);
3990 Function *CalledFn =
3991 WidenCall ? WidenCall->getCalledScalarFunction()
3992 : cast<Function>(R->getOperand(R->getNumOperands() - 1)
3993 ->getLiveInIRValue());
3994 Name = CalledFn->getName();
3995 }
3996 OS << " call to " << Name;
3997 } else
3998 OS << " " << Instruction::getOpcodeName(Opcode);
3999 reportVectorizationInfo(OutString, "InvalidCost", ORE, OrigLoop, nullptr,
4000 R->getDebugLoc());
4001 Tail = Tail.drop_front(Subset.size());
4002 Subset = {};
4003 } else
4004 // Grow the subset by one element
4005 Subset = Tail.take_front(Subset.size() + 1);
4006 } while (!Tail.empty());
4007}
4008
4009/// Check if any recipe of \p Plan will generate a vector value, which will be
4010/// assigned a vector register.
4012 const TargetTransformInfo &TTI) {
4013 assert(VF.isVector() && "Checking a scalar VF?");
4014 VPTypeAnalysis TypeInfo(Plan);
4015 DenseSet<VPRecipeBase *> EphemeralRecipes;
4016 collectEphemeralRecipesForVPlan(Plan, EphemeralRecipes);
4017 // Set of already visited types.
4018 DenseSet<Type *> Visited;
4021 for (VPRecipeBase &R : *VPBB) {
4022 if (EphemeralRecipes.contains(&R))
4023 continue;
4024 // Continue early if the recipe is considered to not produce a vector
4025 // result. Note that this includes VPInstruction where some opcodes may
4026 // produce a vector, to preserve existing behavior as VPInstructions model
4027 // aspects not directly mapped to existing IR instructions.
4028 switch (R.getVPDefID()) {
4029 case VPDef::VPDerivedIVSC:
4030 case VPDef::VPScalarIVStepsSC:
4031 case VPDef::VPReplicateSC:
4032 case VPDef::VPInstructionSC:
4033 case VPDef::VPCanonicalIVPHISC:
4034 case VPDef::VPVectorPointerSC:
4035 case VPDef::VPVectorEndPointerSC:
4036 case VPDef::VPExpandSCEVSC:
4037 case VPDef::VPEVLBasedIVPHISC:
4038 case VPDef::VPPredInstPHISC:
4039 case VPDef::VPBranchOnMaskSC:
4040 continue;
4041 case VPDef::VPReductionSC:
4042 case VPDef::VPActiveLaneMaskPHISC:
4043 case VPDef::VPWidenCallSC:
4044 case VPDef::VPWidenCanonicalIVSC:
4045 case VPDef::VPWidenCastSC:
4046 case VPDef::VPWidenGEPSC:
4047 case VPDef::VPWidenIntrinsicSC:
4048 case VPDef::VPWidenSC:
4049 case VPDef::VPWidenSelectSC:
4050 case VPDef::VPBlendSC:
4051 case VPDef::VPFirstOrderRecurrencePHISC:
4052 case VPDef::VPHistogramSC:
4053 case VPDef::VPWidenPHISC:
4054 case VPDef::VPWidenIntOrFpInductionSC:
4055 case VPDef::VPWidenPointerInductionSC:
4056 case VPDef::VPReductionPHISC:
4057 case VPDef::VPInterleaveEVLSC:
4058 case VPDef::VPInterleaveSC:
4059 case VPDef::VPWidenLoadEVLSC:
4060 case VPDef::VPWidenLoadSC:
4061 case VPDef::VPWidenStoreEVLSC:
4062 case VPDef::VPWidenStoreSC:
4063 break;
4064 default:
4065 llvm_unreachable("unhandled recipe");
4066 }
4067
4068 auto WillGenerateTargetVectors = [&TTI, VF](Type *VectorTy) {
4069 unsigned NumLegalParts = TTI.getNumberOfParts(VectorTy);
4070 if (!NumLegalParts)
4071 return false;
4072 if (VF.isScalable()) {
4073 // <vscale x 1 x iN> is assumed to be profitable over iN because
4074 // scalable registers are a distinct register class from scalar
4075 // ones. If we ever find a target which wants to lower scalable
4076 // vectors back to scalars, we'll need to update this code to
4077 // explicitly ask TTI about the register class uses for each part.
4078 return NumLegalParts <= VF.getKnownMinValue();
4079 }
4080 // Two or more elements that share a register - are vectorized.
4081 return NumLegalParts < VF.getFixedValue();
4082 };
4083
4084 // If no def nor is a store, e.g., branches, continue - no value to check.
4085 if (R.getNumDefinedValues() == 0 &&
4087 continue;
4088 // For multi-def recipes, currently only interleaved loads, suffice to
4089 // check first def only.
4090 // For stores check their stored value; for interleaved stores suffice
4091 // the check first stored value only. In all cases this is the second
4092 // operand.
4093 VPValue *ToCheck =
4094 R.getNumDefinedValues() >= 1 ? R.getVPValue(0) : R.getOperand(1);
4095 Type *ScalarTy = TypeInfo.inferScalarType(ToCheck);
4096 if (!Visited.insert({ScalarTy}).second)
4097 continue;
4098 Type *WideTy = toVectorizedTy(ScalarTy, VF);
4099 if (any_of(getContainedTypes(WideTy), WillGenerateTargetVectors))
4100 return true;
4101 }
4102 }
4103
4104 return false;
4105}
4106
4107static bool hasReplicatorRegion(VPlan &Plan) {
4109 Plan.getVectorLoopRegion()->getEntry())),
4110 [](auto *VPRB) { return VPRB->isReplicator(); });
4111}
4112
4113#ifndef NDEBUG
4114VectorizationFactor LoopVectorizationPlanner::selectVectorizationFactor() {
4115 InstructionCost ExpectedCost = CM.expectedCost(ElementCount::getFixed(1));
4116 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
4117 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
4118 assert(
4119 any_of(VPlans,
4120 [](std::unique_ptr<VPlan> &P) { return P->hasScalarVFOnly(); }) &&
4121 "Expected Scalar VF to be a candidate");
4122
4123 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost,
4124 ExpectedCost);
4125 VectorizationFactor ChosenFactor = ScalarCost;
4126
4127 bool ForceVectorization = Hints.getForce() == LoopVectorizeHints::FK_Enabled;
4128 if (ForceVectorization &&
4129 (VPlans.size() > 1 || !VPlans[0]->hasScalarVFOnly())) {
4130 // Ignore scalar width, because the user explicitly wants vectorization.
4131 // Initialize cost to max so that VF = 2 is, at least, chosen during cost
4132 // evaluation.
4133 ChosenFactor.Cost = InstructionCost::getMax();
4134 }
4135
4136 for (auto &P : VPlans) {
4137 ArrayRef<ElementCount> VFs(P->vectorFactors().begin(),
4138 P->vectorFactors().end());
4139
4141 if (any_of(VFs, [this](ElementCount VF) {
4142 return CM.shouldConsiderRegPressureForVF(VF);
4143 }))
4144 RUs = calculateRegisterUsageForPlan(*P, VFs, TTI, CM.ValuesToIgnore);
4145
4146 for (unsigned I = 0; I < VFs.size(); I++) {
4147 ElementCount VF = VFs[I];
4148 // The cost for scalar VF=1 is already calculated, so ignore it.
4149 if (VF.isScalar())
4150 continue;
4151
4152 /// If the register pressure needs to be considered for VF,
4153 /// don't consider the VF as valid if it exceeds the number
4154 /// of registers for the target.
4155 if (CM.shouldConsiderRegPressureForVF(VF) &&
4156 RUs[I].exceedsMaxNumRegs(TTI, ForceTargetNumVectorRegs))
4157 continue;
4158
4159 InstructionCost C = CM.expectedCost(VF);
4160
4161 // Add on other costs that are modelled in VPlan, but not in the legacy
4162 // cost model.
4163 VPCostContext CostCtx(CM.TTI, *CM.TLI, *P, CM, CM.CostKind,
4164 *CM.PSE.getSE());
4165 VPRegionBlock *VectorRegion = P->getVectorLoopRegion();
4166 assert(VectorRegion && "Expected to have a vector region!");
4167 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
4168 vp_depth_first_shallow(VectorRegion->getEntry()))) {
4169 for (VPRecipeBase &R : *VPBB) {
4170 auto *VPI = dyn_cast<VPInstruction>(&R);
4171 if (!VPI)
4172 continue;
4173 switch (VPI->getOpcode()) {
4174 // Selects are only modelled in the legacy cost model for safe
4175 // divisors.
4176 case Instruction::Select: {
4177 VPValue *VPV = VPI->getVPSingleValue();
4178 if (VPV->getNumUsers() == 1) {
4179 if (auto *WR = dyn_cast<VPWidenRecipe>(*VPV->user_begin())) {
4180 switch (WR->getOpcode()) {
4181 case Instruction::UDiv:
4182 case Instruction::SDiv:
4183 case Instruction::URem:
4184 case Instruction::SRem:
4185 continue;
4186 default:
4187 break;
4188 }
4189 }
4190 }
4191 C += VPI->cost(VF, CostCtx);
4192 break;
4193 }
4195 unsigned Multiplier =
4196 cast<ConstantInt>(VPI->getOperand(2)->getLiveInIRValue())
4197 ->getZExtValue();
4198 C += VPI->cost(VF * Multiplier, CostCtx);
4199 break;
4200 }
4202 C += VPI->cost(VF, CostCtx);
4203 break;
4204 default:
4205 break;
4206 }
4207 }
4208 }
4209
4210 VectorizationFactor Candidate(VF, C, ScalarCost.ScalarCost);
4211 unsigned Width =
4212 estimateElementCount(Candidate.Width, CM.getVScaleForTuning());
4213 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << VF
4214 << " costs: " << (Candidate.Cost / Width));
4215 if (VF.isScalable())
4216 LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of "
4217 << CM.getVScaleForTuning().value_or(1) << ")");
4218 LLVM_DEBUG(dbgs() << ".\n");
4219
4220 if (!ForceVectorization && !willGenerateVectors(*P, VF, TTI)) {
4221 LLVM_DEBUG(
4222 dbgs()
4223 << "LV: Not considering vector loop of width " << VF
4224 << " because it will not generate any vector instructions.\n");
4225 continue;
4226 }
4227
4228 if (CM.OptForSize && !ForceVectorization && hasReplicatorRegion(*P)) {
4229 LLVM_DEBUG(
4230 dbgs()
4231 << "LV: Not considering vector loop of width " << VF
4232 << " because it would cause replicated blocks to be generated,"
4233 << " which isn't allowed when optimizing for size.\n");
4234 continue;
4235 }
4236
4237 if (isMoreProfitable(Candidate, ChosenFactor, P->hasScalarTail()))
4238 ChosenFactor = Candidate;
4239 }
4240 }
4241
4242 if (!EnableCondStoresVectorization && CM.hasPredStores()) {
4244 "There are conditional stores.",
4245 "store that is conditionally executed prevents vectorization",
4246 "ConditionalStore", ORE, OrigLoop);
4247 ChosenFactor = ScalarCost;
4248 }
4249
4250 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() &&
4251 !isMoreProfitable(ChosenFactor, ScalarCost,
4252 !CM.foldTailByMasking())) dbgs()
4253 << "LV: Vectorization seems to be not beneficial, "
4254 << "but was forced by a user.\n");
4255 return ChosenFactor;
4256}
4257#endif
4258
4259bool LoopVectorizationPlanner::isCandidateForEpilogueVectorization(
4260 ElementCount VF) const {
4261 // Cross iteration phis such as fixed-order recurrences and FMaxNum/FMinNum
4262 // reductions need special handling and are currently unsupported.
4263 if (any_of(OrigLoop->getHeader()->phis(), [&](PHINode &Phi) {
4264 if (!Legal->isReductionVariable(&Phi))
4265 return Legal->isFixedOrderRecurrence(&Phi);
4266 RecurKind RK = Legal->getRecurrenceDescriptor(&Phi).getRecurrenceKind();
4267 return RK == RecurKind::FMinNum || RK == RecurKind::FMaxNum;
4268 }))
4269 return false;
4270
4271 // Phis with uses outside of the loop require special handling and are
4272 // currently unsupported.
4273 for (const auto &Entry : Legal->getInductionVars()) {
4274 // Look for uses of the value of the induction at the last iteration.
4275 Value *PostInc =
4276 Entry.first->getIncomingValueForBlock(OrigLoop->getLoopLatch());
4277 for (User *U : PostInc->users())
4278 if (!OrigLoop->contains(cast<Instruction>(U)))
4279 return false;
4280 // Look for uses of penultimate value of the induction.
4281 for (User *U : Entry.first->users())
4282 if (!OrigLoop->contains(cast<Instruction>(U)))
4283 return false;
4284 }
4285
4286 // Epilogue vectorization code has not been auditted to ensure it handles
4287 // non-latch exits properly. It may be fine, but it needs auditted and
4288 // tested.
4289 // TODO: Add support for loops with an early exit.
4290 if (OrigLoop->getExitingBlock() != OrigLoop->getLoopLatch())
4291 return false;
4292
4293 return true;
4294}
4295
4297 const ElementCount VF, const unsigned IC) const {
4298 // FIXME: We need a much better cost-model to take different parameters such
4299 // as register pressure, code size increase and cost of extra branches into
4300 // account. For now we apply a very crude heuristic and only consider loops
4301 // with vectorization factors larger than a certain value.
4302
4303 // Allow the target to opt out entirely.
4304 if (!TTI.preferEpilogueVectorization())
4305 return false;
4306
4307 // We also consider epilogue vectorization unprofitable for targets that don't
4308 // consider interleaving beneficial (eg. MVE).
4309 if (TTI.getMaxInterleaveFactor(VF) <= 1)
4310 return false;
4311
4312 unsigned MinVFThreshold = EpilogueVectorizationMinVF.getNumOccurrences() > 0
4314 : TTI.getEpilogueVectorizationMinVF();
4315 return estimateElementCount(VF * IC, VScaleForTuning) >= MinVFThreshold;
4316}
4317
4319 const ElementCount MainLoopVF, unsigned IC) {
4322 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n");
4323 return Result;
4324 }
4325
4326 if (!CM.isScalarEpilogueAllowed()) {
4327 LLVM_DEBUG(dbgs() << "LEV: Unable to vectorize epilogue because no "
4328 "epilogue is allowed.\n");
4329 return Result;
4330 }
4331
4332 // Not really a cost consideration, but check for unsupported cases here to
4333 // simplify the logic.
4334 if (!isCandidateForEpilogueVectorization(MainLoopVF)) {
4335 LLVM_DEBUG(dbgs() << "LEV: Unable to vectorize epilogue because the loop "
4336 "is not a supported candidate.\n");
4337 return Result;
4338 }
4339
4341 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n");
4343 if (hasPlanWithVF(ForcedEC))
4344 return {ForcedEC, 0, 0};
4345
4346 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization forced factor is not "
4347 "viable.\n");
4348 return Result;
4349 }
4350
4351 if (OrigLoop->getHeader()->getParent()->hasOptSize()) {
4352 LLVM_DEBUG(
4353 dbgs() << "LEV: Epilogue vectorization skipped due to opt for size.\n");
4354 return Result;
4355 }
4356
4357 if (!CM.isEpilogueVectorizationProfitable(MainLoopVF, IC)) {
4358 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for "
4359 "this loop\n");
4360 return Result;
4361 }
4362
4363 // If MainLoopVF = vscale x 2, and vscale is expected to be 4, then we know
4364 // the main loop handles 8 lanes per iteration. We could still benefit from
4365 // vectorizing the epilogue loop with VF=4.
4366 ElementCount EstimatedRuntimeVF = ElementCount::getFixed(
4367 estimateElementCount(MainLoopVF, CM.getVScaleForTuning()));
4368
4369 ScalarEvolution &SE = *PSE.getSE();
4370 Type *TCType = Legal->getWidestInductionType();
4371 const SCEV *RemainingIterations = nullptr;
4372 unsigned MaxTripCount = 0;
4373 const SCEV *TC =
4374 vputils::getSCEVExprForVPValue(getPlanFor(MainLoopVF).getTripCount(), SE);
4375 assert(!isa<SCEVCouldNotCompute>(TC) && "Trip count SCEV must be computable");
4376 RemainingIterations =
4377 SE.getURemExpr(TC, SE.getElementCount(TCType, MainLoopVF * IC));
4378
4379 // No iterations left to process in the epilogue.
4380 if (RemainingIterations->isZero())
4381 return Result;
4382
4383 if (MainLoopVF.isFixed()) {
4384 MaxTripCount = MainLoopVF.getFixedValue() * IC - 1;
4385 if (SE.isKnownPredicate(CmpInst::ICMP_ULT, RemainingIterations,
4386 SE.getConstant(TCType, MaxTripCount))) {
4387 MaxTripCount = SE.getUnsignedRangeMax(RemainingIterations).getZExtValue();
4388 }
4389 LLVM_DEBUG(dbgs() << "LEV: Maximum Trip Count for Epilogue: "
4390 << MaxTripCount << "\n");
4391 }
4392
4393 for (auto &NextVF : ProfitableVFs) {
4394 // Skip candidate VFs without a corresponding VPlan.
4395 if (!hasPlanWithVF(NextVF.Width))
4396 continue;
4397
4398 // Skip candidate VFs with widths >= the (estimated) runtime VF (scalable
4399 // vectors) or > the VF of the main loop (fixed vectors).
4400 if ((!NextVF.Width.isScalable() && MainLoopVF.isScalable() &&
4401 ElementCount::isKnownGE(NextVF.Width, EstimatedRuntimeVF)) ||
4402 (NextVF.Width.isScalable() &&
4403 ElementCount::isKnownGE(NextVF.Width, MainLoopVF)) ||
4404 (!NextVF.Width.isScalable() && !MainLoopVF.isScalable() &&
4405 ElementCount::isKnownGT(NextVF.Width, MainLoopVF)))
4406 continue;
4407
4408 // If NextVF is greater than the number of remaining iterations, the
4409 // epilogue loop would be dead. Skip such factors.
4410 if (RemainingIterations && !NextVF.Width.isScalable()) {
4411 if (SE.isKnownPredicate(
4413 SE.getConstant(TCType, NextVF.Width.getFixedValue()),
4414 RemainingIterations))
4415 continue;
4416 }
4417
4418 if (Result.Width.isScalar() ||
4419 isMoreProfitable(NextVF, Result, MaxTripCount, !CM.foldTailByMasking(),
4420 /*IsEpilogue*/ true))
4421 Result = NextVF;
4422 }
4423
4424 if (Result != VectorizationFactor::Disabled())
4425 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
4426 << Result.Width << "\n");
4427 return Result;
4428}
4429
4430std::pair<unsigned, unsigned>
4432 unsigned MinWidth = -1U;
4433 unsigned MaxWidth = 8;
4434 const DataLayout &DL = TheFunction->getDataLayout();
4435 // For in-loop reductions, no element types are added to ElementTypesInLoop
4436 // if there are no loads/stores in the loop. In this case, check through the
4437 // reduction variables to determine the maximum width.
4438 if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) {
4439 for (const auto &PhiDescriptorPair : Legal->getReductionVars()) {
4440 const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second;
4441 // When finding the min width used by the recurrence we need to account
4442 // for casts on the input operands of the recurrence.
4443 MinWidth = std::min(
4444 MinWidth,
4445 std::min(RdxDesc.getMinWidthCastToRecurrenceTypeInBits(),
4447 MaxWidth = std::max(MaxWidth,
4449 }
4450 } else {
4451 for (Type *T : ElementTypesInLoop) {
4452 MinWidth = std::min<unsigned>(
4453 MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedValue());
4454 MaxWidth = std::max<unsigned>(
4455 MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedValue());
4456 }
4457 }
4458 return {MinWidth, MaxWidth};
4459}
4460
4462 ElementTypesInLoop.clear();
4463 // For each block.
4464 for (BasicBlock *BB : TheLoop->blocks()) {
4465 // For each instruction in the loop.
4466 for (Instruction &I : BB->instructionsWithoutDebug()) {
4467 Type *T = I.getType();
4468
4469 // Skip ignored values.
4470 if (ValuesToIgnore.count(&I))
4471 continue;
4472
4473 // Only examine Loads, Stores and PHINodes.
4474 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
4475 continue;
4476
4477 // Examine PHI nodes that are reduction variables. Update the type to
4478 // account for the recurrence type.
4479 if (auto *PN = dyn_cast<PHINode>(&I)) {
4480 if (!Legal->isReductionVariable(PN))
4481 continue;
4482 const RecurrenceDescriptor &RdxDesc =
4483 Legal->getRecurrenceDescriptor(PN);
4485 TTI.preferInLoopReduction(RdxDesc.getRecurrenceKind(),
4486 RdxDesc.getRecurrenceType()))
4487 continue;
4488 T = RdxDesc.getRecurrenceType();
4489 }
4490
4491 // Examine the stored values.
4492 if (auto *ST = dyn_cast<StoreInst>(&I))
4493 T = ST->getValueOperand()->getType();
4494
4495 assert(T->isSized() &&
4496 "Expected the load/store/recurrence type to be sized");
4497
4498 ElementTypesInLoop.insert(T);
4499 }
4500 }
4501}
4502
4503unsigned
4505 InstructionCost LoopCost) {
4506 // -- The interleave heuristics --
4507 // We interleave the loop in order to expose ILP and reduce the loop overhead.
4508 // There are many micro-architectural considerations that we can't predict
4509 // at this level. For example, frontend pressure (on decode or fetch) due to
4510 // code size, or the number and capabilities of the execution ports.
4511 //
4512 // We use the following heuristics to select the interleave count:
4513 // 1. If the code has reductions, then we interleave to break the cross
4514 // iteration dependency.
4515 // 2. If the loop is really small, then we interleave to reduce the loop
4516 // overhead.
4517 // 3. We don't interleave if we think that we will spill registers to memory
4518 // due to the increased register pressure.
4519
4520 if (!CM.isScalarEpilogueAllowed())
4521 return 1;
4522
4525 LLVM_DEBUG(dbgs() << "LV: Preference for VP intrinsics indicated. "
4526 "Unroll factor forced to be 1.\n");
4527 return 1;
4528 }
4529
4530 // We used the distance for the interleave count.
4531 if (!Legal->isSafeForAnyVectorWidth())
4532 return 1;
4533
4534 // We don't attempt to perform interleaving for loops with uncountable early
4535 // exits because the VPInstruction::AnyOf code cannot currently handle
4536 // multiple parts.
4537 if (Plan.hasEarlyExit())
4538 return 1;
4539
4540 const bool HasReductions =
4543
4544 // If we did not calculate the cost for VF (because the user selected the VF)
4545 // then we calculate the cost of VF here.
4546 if (LoopCost == 0) {
4547 if (VF.isScalar())
4548 LoopCost = CM.expectedCost(VF);
4549 else
4550 LoopCost = cost(Plan, VF);
4551 assert(LoopCost.isValid() && "Expected to have chosen a VF with valid cost");
4552
4553 // Loop body is free and there is no need for interleaving.
4554 if (LoopCost == 0)
4555 return 1;
4556 }
4557
4558 VPRegisterUsage R =
4559 calculateRegisterUsageForPlan(Plan, {VF}, TTI, CM.ValuesToIgnore)[0];
4560 // We divide by these constants so assume that we have at least one
4561 // instruction that uses at least one register.
4562 for (auto &Pair : R.MaxLocalUsers) {
4563 Pair.second = std::max(Pair.second, 1U);
4564 }
4565
4566 // We calculate the interleave count using the following formula.
4567 // Subtract the number of loop invariants from the number of available
4568 // registers. These registers are used by all of the interleaved instances.
4569 // Next, divide the remaining registers by the number of registers that is
4570 // required by the loop, in order to estimate how many parallel instances
4571 // fit without causing spills. All of this is rounded down if necessary to be
4572 // a power of two. We want power of two interleave count to simplify any
4573 // addressing operations or alignment considerations.
4574 // We also want power of two interleave counts to ensure that the induction
4575 // variable of the vector loop wraps to zero, when tail is folded by masking;
4576 // this currently happens when OptForSize, in which case IC is set to 1 above.
4577 unsigned IC = UINT_MAX;
4578
4579 for (const auto &Pair : R.MaxLocalUsers) {
4580 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(Pair.first);
4581 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
4582 << " registers of "
4583 << TTI.getRegisterClassName(Pair.first)
4584 << " register class\n");
4585 if (VF.isScalar()) {
4586 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
4587 TargetNumRegisters = ForceTargetNumScalarRegs;
4588 } else {
4589 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
4590 TargetNumRegisters = ForceTargetNumVectorRegs;
4591 }
4592 unsigned MaxLocalUsers = Pair.second;
4593 unsigned LoopInvariantRegs = 0;
4594 if (R.LoopInvariantRegs.contains(Pair.first))
4595 LoopInvariantRegs = R.LoopInvariantRegs[Pair.first];
4596
4597 unsigned TmpIC = llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs) /
4598 MaxLocalUsers);
4599 // Don't count the induction variable as interleaved.
4601 TmpIC = llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs - 1) /
4602 std::max(1U, (MaxLocalUsers - 1)));
4603 }
4604
4605 IC = std::min(IC, TmpIC);
4606 }
4607
4608 // Clamp the interleave ranges to reasonable counts.
4609 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF);
4610
4611 // Check if the user has overridden the max.
4612 if (VF.isScalar()) {
4613 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
4614 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
4615 } else {
4616 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
4617 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
4618 }
4619
4620 // Try to get the exact trip count, or an estimate based on profiling data or
4621 // ConstantMax from PSE, failing that.
4622 auto BestKnownTC = getSmallBestKnownTC(PSE, OrigLoop);
4623
4624 // For fixed length VFs treat a scalable trip count as unknown.
4625 if (BestKnownTC && (BestKnownTC->isFixed() || VF.isScalable())) {
4626 // Re-evaluate trip counts and VFs to be in the same numerical space.
4627 unsigned AvailableTC =
4628 estimateElementCount(*BestKnownTC, CM.getVScaleForTuning());
4629 unsigned EstimatedVF = estimateElementCount(VF, CM.getVScaleForTuning());
4630
4631 // At least one iteration must be scalar when this constraint holds. So the
4632 // maximum available iterations for interleaving is one less.
4633 if (CM.requiresScalarEpilogue(VF.isVector()))
4634 --AvailableTC;
4635
4636 unsigned InterleaveCountLB = bit_floor(std::max(
4637 1u, std::min(AvailableTC / (EstimatedVF * 2), MaxInterleaveCount)));
4638
4639 if (getSmallConstantTripCount(PSE.getSE(), OrigLoop).isNonZero()) {
4640 // If the best known trip count is exact, we select between two
4641 // prospective ICs, where
4642 //
4643 // 1) the aggressive IC is capped by the trip count divided by VF
4644 // 2) the conservative IC is capped by the trip count divided by (VF * 2)
4645 //
4646 // The final IC is selected in a way that the epilogue loop trip count is
4647 // minimized while maximizing the IC itself, so that we either run the
4648 // vector loop at least once if it generates a small epilogue loop, or
4649 // else we run the vector loop at least twice.
4650
4651 unsigned InterleaveCountUB = bit_floor(std::max(
4652 1u, std::min(AvailableTC / EstimatedVF, MaxInterleaveCount)));
4653 MaxInterleaveCount = InterleaveCountLB;
4654
4655 if (InterleaveCountUB != InterleaveCountLB) {
4656 unsigned TailTripCountUB =
4657 (AvailableTC % (EstimatedVF * InterleaveCountUB));
4658 unsigned TailTripCountLB =
4659 (AvailableTC % (EstimatedVF * InterleaveCountLB));
4660 // If both produce same scalar tail, maximize the IC to do the same work
4661 // in fewer vector loop iterations
4662 if (TailTripCountUB == TailTripCountLB)
4663 MaxInterleaveCount = InterleaveCountUB;
4664 }
4665 } else {
4666 // If trip count is an estimated compile time constant, limit the
4667 // IC to be capped by the trip count divided by VF * 2, such that the
4668 // vector loop runs at least twice to make interleaving seem profitable
4669 // when there is an epilogue loop present. Since exact Trip count is not
4670 // known we choose to be conservative in our IC estimate.
4671 MaxInterleaveCount = InterleaveCountLB;
4672 }
4673 }
4674
4675 assert(MaxInterleaveCount > 0 &&
4676 "Maximum interleave count must be greater than 0");
4677
4678 // Clamp the calculated IC to be between the 1 and the max interleave count
4679 // that the target and trip count allows.
4680 if (IC > MaxInterleaveCount)
4681 IC = MaxInterleaveCount;
4682 else
4683 // Make sure IC is greater than 0.
4684 IC = std::max(1u, IC);
4685
4686 assert(IC > 0 && "Interleave count must be greater than 0.");
4687
4688 // Interleave if we vectorized this loop and there is a reduction that could
4689 // benefit from interleaving.
4690 if (VF.isVector() && HasReductions) {
4691 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
4692 return IC;
4693 }
4694
4695 // For any scalar loop that either requires runtime checks or predication we
4696 // are better off leaving this to the unroller. Note that if we've already
4697 // vectorized the loop we will have done the runtime check and so interleaving
4698 // won't require further checks.
4699 bool ScalarInterleavingRequiresPredication =
4700 (VF.isScalar() && any_of(OrigLoop->blocks(), [this](BasicBlock *BB) {
4701 return Legal->blockNeedsPredication(BB);
4702 }));
4703 bool ScalarInterleavingRequiresRuntimePointerCheck =
4704 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
4705
4706 // We want to interleave small loops in order to reduce the loop overhead and
4707 // potentially expose ILP opportunities.
4708 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
4709 << "LV: IC is " << IC << '\n'
4710 << "LV: VF is " << VF << '\n');
4711 const bool AggressivelyInterleaveReductions =
4712 TTI.enableAggressiveInterleaving(HasReductions);
4713 if (!ScalarInterleavingRequiresRuntimePointerCheck &&
4714 !ScalarInterleavingRequiresPredication && LoopCost < SmallLoopCost) {
4715 // We assume that the cost overhead is 1 and we use the cost model
4716 // to estimate the cost of the loop and interleave until the cost of the
4717 // loop overhead is about 5% of the cost of the loop.
4718 unsigned SmallIC = std::min(IC, (unsigned)llvm::bit_floor<uint64_t>(
4719 SmallLoopCost / LoopCost.getValue()));
4720
4721 // Interleave until store/load ports (estimated by max interleave count) are
4722 // saturated.
4723 unsigned NumStores = 0;
4724 unsigned NumLoads = 0;
4727 for (VPRecipeBase &R : *VPBB) {
4729 NumLoads++;
4730 continue;
4731 }
4733 NumStores++;
4734 continue;
4735 }
4736
4737 if (auto *InterleaveR = dyn_cast<VPInterleaveRecipe>(&R)) {
4738 if (unsigned StoreOps = InterleaveR->getNumStoreOperands())
4739 NumStores += StoreOps;
4740 else
4741 NumLoads += InterleaveR->getNumDefinedValues();
4742 continue;
4743 }
4744 if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R)) {
4745 NumLoads += isa<LoadInst>(RepR->getUnderlyingInstr());
4746 NumStores += isa<StoreInst>(RepR->getUnderlyingInstr());
4747 continue;
4748 }
4749 if (isa<VPHistogramRecipe>(&R)) {
4750 NumLoads++;
4751 NumStores++;
4752 continue;
4753 }
4754 }
4755 }
4756 unsigned StoresIC = IC / (NumStores ? NumStores : 1);
4757 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
4758
4759 // There is little point in interleaving for reductions containing selects
4760 // and compares when VF=1 since it may just create more overhead than it's
4761 // worth for loops with small trip counts. This is because we still have to
4762 // do the final reduction after the loop.
4763 bool HasSelectCmpReductions =
4764 HasReductions &&
4766 [](VPRecipeBase &R) {
4767 auto *RedR = dyn_cast<VPReductionPHIRecipe>(&R);
4768 return RedR && (RecurrenceDescriptor::isAnyOfRecurrenceKind(
4769 RedR->getRecurrenceKind()) ||
4770 RecurrenceDescriptor::isFindIVRecurrenceKind(
4771 RedR->getRecurrenceKind()));
4772 });
4773 if (HasSelectCmpReductions) {
4774 LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n");
4775 return 1;
4776 }
4777
4778 // If we have a scalar reduction (vector reductions are already dealt with
4779 // by this point), we can increase the critical path length if the loop
4780 // we're interleaving is inside another loop. For tree-wise reductions
4781 // set the limit to 2, and for ordered reductions it's best to disable
4782 // interleaving entirely.
4783 if (HasReductions && OrigLoop->getLoopDepth() > 1) {
4784 bool HasOrderedReductions =
4786 [](VPRecipeBase &R) {
4787 auto *RedR = dyn_cast<VPReductionPHIRecipe>(&R);
4788
4789 return RedR && RedR->isOrdered();
4790 });
4791 if (HasOrderedReductions) {
4792 LLVM_DEBUG(
4793 dbgs() << "LV: Not interleaving scalar ordered reductions.\n");
4794 return 1;
4795 }
4796
4797 unsigned F = MaxNestedScalarReductionIC;
4798 SmallIC = std::min(SmallIC, F);
4799 StoresIC = std::min(StoresIC, F);
4800 LoadsIC = std::min(LoadsIC, F);
4801 }
4802
4804 std::max(StoresIC, LoadsIC) > SmallIC) {
4805 LLVM_DEBUG(
4806 dbgs() << "LV: Interleaving to saturate store or load ports.\n");
4807 return std::max(StoresIC, LoadsIC);
4808 }
4809
4810 // If there are scalar reductions and TTI has enabled aggressive
4811 // interleaving for reductions, we will interleave to expose ILP.
4812 if (VF.isScalar() && AggressivelyInterleaveReductions) {
4813 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
4814 // Interleave no less than SmallIC but not as aggressive as the normal IC
4815 // to satisfy the rare situation when resources are too limited.
4816 return std::max(IC / 2, SmallIC);
4817 }
4818
4819 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
4820 return SmallIC;
4821 }
4822
4823 // Interleave if this is a large loop (small loops are already dealt with by
4824 // this point) that could benefit from interleaving.
4825 if (AggressivelyInterleaveReductions) {
4826 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
4827 return IC;
4828 }
4829
4830 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
4831 return 1;
4832}
4833
4834bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I,
4835 ElementCount VF) {
4836 // TODO: Cost model for emulated masked load/store is completely
4837 // broken. This hack guides the cost model to use an artificially
4838 // high enough value to practically disable vectorization with such
4839 // operations, except where previously deployed legality hack allowed
4840 // using very low cost values. This is to avoid regressions coming simply
4841 // from moving "masked load/store" check from legality to cost model.
4842 // Masked Load/Gather emulation was previously never allowed.
4843 // Limited number of Masked Store/Scatter emulation was allowed.
4844 assert((isPredicatedInst(I)) &&
4845 "Expecting a scalar emulated instruction");
4846 return isa<LoadInst>(I) ||
4847 (isa<StoreInst>(I) &&
4848 NumPredStores > NumberOfStoresToPredicate);
4849}
4850
4852 assert(VF.isVector() && "Expected VF >= 2");
4853
4854 // If we've already collected the instructions to scalarize or the predicated
4855 // BBs after vectorization, there's nothing to do. Collection may already have
4856 // occurred if we have a user-selected VF and are now computing the expected
4857 // cost for interleaving.
4858 if (InstsToScalarize.contains(VF) ||
4859 PredicatedBBsAfterVectorization.contains(VF))
4860 return;
4861
4862 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
4863 // not profitable to scalarize any instructions, the presence of VF in the
4864 // map will indicate that we've analyzed it already.
4865 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
4866
4867 // Find all the instructions that are scalar with predication in the loop and
4868 // determine if it would be better to not if-convert the blocks they are in.
4869 // If so, we also record the instructions to scalarize.
4870 for (BasicBlock *BB : TheLoop->blocks()) {
4872 continue;
4873 for (Instruction &I : *BB)
4874 if (isScalarWithPredication(&I, VF)) {
4875 ScalarCostsTy ScalarCosts;
4876 // Do not apply discount logic for:
4877 // 1. Scalars after vectorization, as there will only be a single copy
4878 // of the instruction.
4879 // 2. Scalable VF, as that would lead to invalid scalarization costs.
4880 // 3. Emulated masked memrefs, if a hacked cost is needed.
4881 if (!isScalarAfterVectorization(&I, VF) && !VF.isScalable() &&
4882 !useEmulatedMaskMemRefHack(&I, VF) &&
4883 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) {
4884 for (const auto &[I, IC] : ScalarCosts)
4885 ScalarCostsVF.insert({I, IC});
4886 // Check if we decided to scalarize a call. If so, update the widening
4887 // decision of the call to CM_Scalarize with the computed scalar cost.
4888 for (const auto &[I, Cost] : ScalarCosts) {
4889 auto *CI = dyn_cast<CallInst>(I);
4890 if (!CI || !CallWideningDecisions.contains({CI, VF}))
4891 continue;
4892 CallWideningDecisions[{CI, VF}].Kind = CM_Scalarize;
4893 CallWideningDecisions[{CI, VF}].Cost = Cost;
4894 }
4895 }
4896 // Remember that BB will remain after vectorization.
4897 PredicatedBBsAfterVectorization[VF].insert(BB);
4898 for (auto *Pred : predecessors(BB)) {
4899 if (Pred->getSingleSuccessor() == BB)
4900 PredicatedBBsAfterVectorization[VF].insert(Pred);
4901 }
4902 }
4903 }
4904}
4905
4906InstructionCost LoopVectorizationCostModel::computePredInstDiscount(
4907 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
4908 assert(!isUniformAfterVectorization(PredInst, VF) &&
4909 "Instruction marked uniform-after-vectorization will be predicated");
4910
4911 // Initialize the discount to zero, meaning that the scalar version and the
4912 // vector version cost the same.
4913 InstructionCost Discount = 0;
4914
4915 // Holds instructions to analyze. The instructions we visit are mapped in
4916 // ScalarCosts. Those instructions are the ones that would be scalarized if
4917 // we find that the scalar version costs less.
4919
4920 // Returns true if the given instruction can be scalarized.
4921 auto CanBeScalarized = [&](Instruction *I) -> bool {
4922 // We only attempt to scalarize instructions forming a single-use chain
4923 // from the original predicated block that would otherwise be vectorized.
4924 // Although not strictly necessary, we give up on instructions we know will
4925 // already be scalar to avoid traversing chains that are unlikely to be
4926 // beneficial.
4927 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
4928 isScalarAfterVectorization(I, VF))
4929 return false;
4930
4931 // If the instruction is scalar with predication, it will be analyzed
4932 // separately. We ignore it within the context of PredInst.
4933 if (isScalarWithPredication(I, VF))
4934 return false;
4935
4936 // If any of the instruction's operands are uniform after vectorization,
4937 // the instruction cannot be scalarized. This prevents, for example, a
4938 // masked load from being scalarized.
4939 //
4940 // We assume we will only emit a value for lane zero of an instruction
4941 // marked uniform after vectorization, rather than VF identical values.
4942 // Thus, if we scalarize an instruction that uses a uniform, we would
4943 // create uses of values corresponding to the lanes we aren't emitting code
4944 // for. This behavior can be changed by allowing getScalarValue to clone
4945 // the lane zero values for uniforms rather than asserting.
4946 for (Use &U : I->operands())
4947 if (auto *J = dyn_cast<Instruction>(U.get()))
4948 if (isUniformAfterVectorization(J, VF))
4949 return false;
4950
4951 // Otherwise, we can scalarize the instruction.
4952 return true;
4953 };
4954
4955 // Compute the expected cost discount from scalarizing the entire expression
4956 // feeding the predicated instruction. We currently only consider expressions
4957 // that are single-use instruction chains.
4958 Worklist.push_back(PredInst);
4959 while (!Worklist.empty()) {
4960 Instruction *I = Worklist.pop_back_val();
4961
4962 // If we've already analyzed the instruction, there's nothing to do.
4963 if (ScalarCosts.contains(I))
4964 continue;
4965
4966 // Cannot scalarize fixed-order recurrence phis at the moment.
4967 if (isa<PHINode>(I) && Legal->isFixedOrderRecurrence(cast<PHINode>(I)))
4968 continue;
4969
4970 // Compute the cost of the vector instruction. Note that this cost already
4971 // includes the scalarization overhead of the predicated instruction.
4972 InstructionCost VectorCost = getInstructionCost(I, VF);
4973
4974 // Compute the cost of the scalarized instruction. This cost is the cost of
4975 // the instruction as if it wasn't if-converted and instead remained in the
4976 // predicated block. We will scale this cost by block probability after
4977 // computing the scalarization overhead.
4978 InstructionCost ScalarCost =
4979 VF.getFixedValue() * getInstructionCost(I, ElementCount::getFixed(1));
4980
4981 // Compute the scalarization overhead of needed insertelement instructions
4982 // and phi nodes.
4983 if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) {
4984 Type *WideTy = toVectorizedTy(I->getType(), VF);
4985 for (Type *VectorTy : getContainedTypes(WideTy)) {
4986 ScalarCost += TTI.getScalarizationOverhead(
4988 /*Insert=*/true,
4989 /*Extract=*/false, CostKind);
4990 }
4991 ScalarCost +=
4992 VF.getFixedValue() * TTI.getCFInstrCost(Instruction::PHI, CostKind);
4993 }
4994
4995 // Compute the scalarization overhead of needed extractelement
4996 // instructions. For each of the instruction's operands, if the operand can
4997 // be scalarized, add it to the worklist; otherwise, account for the
4998 // overhead.
4999 for (Use &U : I->operands())
5000 if (auto *J = dyn_cast<Instruction>(U.get())) {
5001 assert(canVectorizeTy(J->getType()) &&
5002 "Instruction has non-scalar type");
5003 if (CanBeScalarized(J))
5004 Worklist.push_back(J);
5005 else if (needsExtract(J, VF)) {
5006 Type *WideTy = toVectorizedTy(J->getType(), VF);
5007 for (Type *VectorTy : getContainedTypes(WideTy)) {
5008 ScalarCost += TTI.getScalarizationOverhead(
5009 cast<VectorType>(VectorTy),
5010 APInt::getAllOnes(VF.getFixedValue()), /*Insert*/ false,
5011 /*Extract*/ true, CostKind);
5012 }
5013 }
5014 }
5015
5016 // Scale the total scalar cost by block probability.
5017 ScalarCost /= getPredBlockCostDivisor(CostKind);
5018
5019 // Compute the discount. A non-negative discount means the vector version
5020 // of the instruction costs more, and scalarizing would be beneficial.
5021 Discount += VectorCost - ScalarCost;
5022 ScalarCosts[I] = ScalarCost;
5023 }
5024
5025 return Discount;
5026}
5027
5030
5031 // If the vector loop gets executed exactly once with the given VF, ignore the
5032 // costs of comparison and induction instructions, as they'll get simplified
5033 // away.
5034 SmallPtrSet<Instruction *, 2> ValuesToIgnoreForVF;
5035 auto TC = getSmallConstantTripCount(PSE.getSE(), TheLoop);
5036 if (TC == VF && !foldTailByMasking())
5038 ValuesToIgnoreForVF);
5039
5040 // For each block.
5041 for (BasicBlock *BB : TheLoop->blocks()) {
5042 InstructionCost BlockCost;
5043
5044 // For each instruction in the old loop.
5045 for (Instruction &I : BB->instructionsWithoutDebug()) {
5046 // Skip ignored values.
5047 if (ValuesToIgnore.count(&I) || ValuesToIgnoreForVF.count(&I) ||
5048 (VF.isVector() && VecValuesToIgnore.count(&I)))
5049 continue;
5050
5052
5053 // Check if we should override the cost.
5054 if (C.isValid() && ForceTargetInstructionCost.getNumOccurrences() > 0)
5056
5057 BlockCost += C;
5058 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C << " for VF "
5059 << VF << " For instruction: " << I << '\n');
5060 }
5061
5062 // If we are vectorizing a predicated block, it will have been
5063 // if-converted. This means that the block's instructions (aside from
5064 // stores and instructions that may divide by zero) will now be
5065 // unconditionally executed. For the scalar case, we may not always execute
5066 // the predicated block, if it is an if-else block. Thus, scale the block's
5067 // cost by the probability of executing it. blockNeedsPredication from
5068 // Legal is used so as to not include all blocks in tail folded loops.
5069 if (VF.isScalar() && Legal->blockNeedsPredication(BB))
5070 BlockCost /= getPredBlockCostDivisor(CostKind);
5071
5072 Cost += BlockCost;
5073 }
5074
5075 return Cost;
5076}
5077
5078/// Gets Address Access SCEV after verifying that the access pattern
5079/// is loop invariant except the induction variable dependence.
5080///
5081/// This SCEV can be sent to the Target in order to estimate the address
5082/// calculation cost.
5084 Value *Ptr,
5087 const Loop *TheLoop) {
5088
5089 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
5090 if (!Gep)
5091 return nullptr;
5092
5093 // We are looking for a gep with all loop invariant indices except for one
5094 // which should be an induction variable.
5095 auto *SE = PSE.getSE();
5096 unsigned NumOperands = Gep->getNumOperands();
5097 for (unsigned Idx = 1; Idx < NumOperands; ++Idx) {
5098 Value *Opd = Gep->getOperand(Idx);
5099 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
5100 !Legal->isInductionVariable(Opd))
5101 return nullptr;
5102 }
5103
5104 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
5105 return PSE.getSCEV(Ptr);
5106}
5107
5109LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
5110 ElementCount VF) {
5111 assert(VF.isVector() &&
5112 "Scalarization cost of instruction implies vectorization.");
5113 if (VF.isScalable())
5114 return InstructionCost::getInvalid();
5115
5116 Type *ValTy = getLoadStoreType(I);
5117 auto *SE = PSE.getSE();
5118
5119 unsigned AS = getLoadStoreAddressSpace(I);
5121 Type *PtrTy = toVectorTy(Ptr->getType(), VF);
5122 // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost`
5123 // that it is being called from this specific place.
5124
5125 // Figure out whether the access is strided and get the stride value
5126 // if it's known in compile time
5127 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
5128
5129 // Get the cost of the scalar memory instruction and address computation.
5131 PtrTy, SE, PtrSCEV, CostKind);
5132
5133 // Don't pass *I here, since it is scalar but will actually be part of a
5134 // vectorized loop where the user of it is a vectorized instruction.
5135 const Align Alignment = getLoadStoreAlignment(I);
5136 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
5137 Cost += VF.getFixedValue() *
5138 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
5139 AS, CostKind, OpInfo);
5140
5141 // Get the overhead of the extractelement and insertelement instructions
5142 // we might create due to scalarization.
5144
5145 // If we have a predicated load/store, it will need extra i1 extracts and
5146 // conditional branches, but may not be executed for each vector lane. Scale
5147 // the cost by the probability of executing the predicated block.
5148 if (isPredicatedInst(I)) {
5150
5151 // Add the cost of an i1 extract and a branch
5152 auto *VecI1Ty =
5153 VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF);
5155 VecI1Ty, APInt::getAllOnes(VF.getFixedValue()),
5156 /*Insert=*/false, /*Extract=*/true, CostKind);
5157 Cost += TTI.getCFInstrCost(Instruction::Br, CostKind);
5158
5159 if (useEmulatedMaskMemRefHack(I, VF))
5160 // Artificially setting to a high enough value to practically disable
5161 // vectorization with such operations.
5162 Cost = 3000000;
5163 }
5164
5165 return Cost;
5166}
5167
5169LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
5170 ElementCount VF) {
5171 Type *ValTy = getLoadStoreType(I);
5172 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5174 unsigned AS = getLoadStoreAddressSpace(I);
5175 int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr);
5176
5177 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5178 "Stride should be 1 or -1 for consecutive memory access");
5179 const Align Alignment = getLoadStoreAlignment(I);
5181 if (Legal->isMaskRequired(I)) {
5182 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
5183 CostKind);
5184 } else {
5185 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
5186 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
5187 CostKind, OpInfo, I);
5188 }
5189
5190 bool Reverse = ConsecutiveStride < 0;
5191 if (Reverse)
5193 VectorTy, {}, CostKind, 0);
5194 return Cost;
5195}
5196
5198LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
5199 ElementCount VF) {
5200 assert(Legal->isUniformMemOp(*I, VF));
5201
5202 Type *ValTy = getLoadStoreType(I);
5204 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5205 const Align Alignment = getLoadStoreAlignment(I);
5206 unsigned AS = getLoadStoreAddressSpace(I);
5207 if (isa<LoadInst>(I)) {
5208 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5209 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
5210 CostKind) +
5212 VectorTy, {}, CostKind);
5213 }
5214 StoreInst *SI = cast<StoreInst>(I);
5215
5216 bool IsLoopInvariantStoreValue = Legal->isInvariant(SI->getValueOperand());
5217 // TODO: We have existing tests that request the cost of extracting element
5218 // VF.getKnownMinValue() - 1 from a scalable vector. This does not represent
5219 // the actual generated code, which involves extracting the last element of
5220 // a scalable vector where the lane to extract is unknown at compile time.
5222 TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5223 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, CostKind);
5224 if (!IsLoopInvariantStoreValue)
5225 Cost += TTI.getIndexedVectorInstrCostFromEnd(Instruction::ExtractElement,
5226 VectorTy, CostKind, 0);
5227 return Cost;
5228}
5229
5231LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
5232 ElementCount VF) {
5233 Type *ValTy = getLoadStoreType(I);
5234 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5235 const Align Alignment = getLoadStoreAlignment(I);
5237 Type *PtrTy = Ptr->getType();
5238
5239 if (!Legal->isUniform(Ptr, VF))
5240 PtrTy = toVectorTy(PtrTy, VF);
5241
5242 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5243 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr,
5244 Legal->isMaskRequired(I), Alignment,
5245 CostKind, I);
5246}
5247
5249LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
5250 ElementCount VF) {
5251 const auto *Group = getInterleavedAccessGroup(I);
5252 assert(Group && "Fail to get an interleaved access group.");
5253
5254 Instruction *InsertPos = Group->getInsertPos();
5255 Type *ValTy = getLoadStoreType(InsertPos);
5256 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5257 unsigned AS = getLoadStoreAddressSpace(InsertPos);
5258
5259 unsigned InterleaveFactor = Group->getFactor();
5260 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
5261
5262 // Holds the indices of existing members in the interleaved group.
5263 SmallVector<unsigned, 4> Indices;
5264 for (unsigned IF = 0; IF < InterleaveFactor; IF++)
5265 if (Group->getMember(IF))
5266 Indices.push_back(IF);
5267
5268 // Calculate the cost of the whole interleaved group.
5269 bool UseMaskForGaps =
5270 (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) ||
5271 (isa<StoreInst>(I) && !Group->isFull());
5273 InsertPos->getOpcode(), WideVecTy, Group->getFactor(), Indices,
5274 Group->getAlign(), AS, CostKind, Legal->isMaskRequired(I),
5275 UseMaskForGaps);
5276
5277 if (Group->isReverse()) {
5278 // TODO: Add support for reversed masked interleaved access.
5279 assert(!Legal->isMaskRequired(I) &&
5280 "Reverse masked interleaved access not supported.");
5281 Cost += Group->getNumMembers() *
5283 VectorTy, {}, CostKind, 0);
5284 }
5285 return Cost;
5286}
5287
5288std::optional<InstructionCost>
5290 ElementCount VF,
5291 Type *Ty) const {
5292 using namespace llvm::PatternMatch;
5293 // Early exit for no inloop reductions
5294 if (InLoopReductions.empty() || VF.isScalar() || !isa<VectorType>(Ty))
5295 return std::nullopt;
5296 auto *VectorTy = cast<VectorType>(Ty);
5297
5298 // We are looking for a pattern of, and finding the minimal acceptable cost:
5299 // reduce(mul(ext(A), ext(B))) or
5300 // reduce(mul(A, B)) or
5301 // reduce(ext(A)) or
5302 // reduce(A).
5303 // The basic idea is that we walk down the tree to do that, finding the root
5304 // reduction instruction in InLoopReductionImmediateChains. From there we find
5305 // the pattern of mul/ext and test the cost of the entire pattern vs the cost
5306 // of the components. If the reduction cost is lower then we return it for the
5307 // reduction instruction and 0 for the other instructions in the pattern. If
5308 // it is not we return an invalid cost specifying the orignal cost method
5309 // should be used.
5310 Instruction *RetI = I;
5311 if (match(RetI, m_ZExtOrSExt(m_Value()))) {
5312 if (!RetI->hasOneUser())
5313 return std::nullopt;
5314 RetI = RetI->user_back();
5315 }
5316
5317 if (match(RetI, m_OneUse(m_Mul(m_Value(), m_Value()))) &&
5318 RetI->user_back()->getOpcode() == Instruction::Add) {
5319 RetI = RetI->user_back();
5320 }
5321
5322 // Test if the found instruction is a reduction, and if not return an invalid
5323 // cost specifying the parent to use the original cost modelling.
5324 Instruction *LastChain = InLoopReductionImmediateChains.lookup(RetI);
5325 if (!LastChain)
5326 return std::nullopt;
5327
5328 // Find the reduction this chain is a part of and calculate the basic cost of
5329 // the reduction on its own.
5330 Instruction *ReductionPhi = LastChain;
5331 while (!isa<PHINode>(ReductionPhi))
5332 ReductionPhi = InLoopReductionImmediateChains.at(ReductionPhi);
5333
5334 const RecurrenceDescriptor &RdxDesc =
5335 Legal->getRecurrenceDescriptor(cast<PHINode>(ReductionPhi));
5336
5337 InstructionCost BaseCost;
5338 RecurKind RK = RdxDesc.getRecurrenceKind();
5341 BaseCost = TTI.getMinMaxReductionCost(MinMaxID, VectorTy,
5342 RdxDesc.getFastMathFlags(), CostKind);
5343 } else {
5344 BaseCost = TTI.getArithmeticReductionCost(
5345 RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind);
5346 }
5347
5348 // For a call to the llvm.fmuladd intrinsic we need to add the cost of a
5349 // normal fmul instruction to the cost of the fadd reduction.
5350 if (RK == RecurKind::FMulAdd)
5351 BaseCost +=
5352 TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind);
5353
5354 // If we're using ordered reductions then we can just return the base cost
5355 // here, since getArithmeticReductionCost calculates the full ordered
5356 // reduction cost when FP reassociation is not allowed.
5357 if (useOrderedReductions(RdxDesc))
5358 return BaseCost;
5359
5360 // Get the operand that was not the reduction chain and match it to one of the
5361 // patterns, returning the better cost if it is found.
5362 Instruction *RedOp = RetI->getOperand(1) == LastChain
5365
5366 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
5367
5368 Instruction *Op0, *Op1;
5369 if (RedOp && RdxDesc.getOpcode() == Instruction::Add &&
5370 match(RedOp,
5372 match(Op0, m_ZExtOrSExt(m_Value())) &&
5373 Op0->getOpcode() == Op1->getOpcode() &&
5374 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
5375 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) &&
5376 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) {
5377
5378 // Matched reduce.add(ext(mul(ext(A), ext(B)))
5379 // Note that the extend opcodes need to all match, or if A==B they will have
5380 // been converted to zext(mul(sext(A), sext(A))) as it is known positive,
5381 // which is equally fine.
5382 bool IsUnsigned = isa<ZExtInst>(Op0);
5383 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
5384 auto *MulType = VectorType::get(Op0->getType(), VectorTy);
5385
5386 InstructionCost ExtCost =
5387 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType,
5389 InstructionCost MulCost =
5390 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind);
5391 InstructionCost Ext2Cost =
5392 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType,
5394
5395 InstructionCost RedCost = TTI.getMulAccReductionCost(
5396 IsUnsigned, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), ExtType,
5397 CostKind);
5398
5399 if (RedCost.isValid() &&
5400 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost)
5401 return I == RetI ? RedCost : 0;
5402 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) &&
5403 !TheLoop->isLoopInvariant(RedOp)) {
5404 // Matched reduce(ext(A))
5405 bool IsUnsigned = isa<ZExtInst>(RedOp);
5406 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
5407 InstructionCost RedCost = TTI.getExtendedReductionCost(
5408 RdxDesc.getOpcode(), IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
5409 RdxDesc.getFastMathFlags(), CostKind);
5410
5411 InstructionCost ExtCost =
5412 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
5414 if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
5415 return I == RetI ? RedCost : 0;
5416 } else if (RedOp && RdxDesc.getOpcode() == Instruction::Add &&
5417 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) {
5418 if (match(Op0, m_ZExtOrSExt(m_Value())) &&
5419 Op0->getOpcode() == Op1->getOpcode() &&
5420 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
5421 bool IsUnsigned = isa<ZExtInst>(Op0);
5422 Type *Op0Ty = Op0->getOperand(0)->getType();
5423 Type *Op1Ty = Op1->getOperand(0)->getType();
5424 Type *LargestOpTy =
5425 Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty
5426 : Op0Ty;
5427 auto *ExtType = VectorType::get(LargestOpTy, VectorTy);
5428
5429 // Matched reduce.add(mul(ext(A), ext(B))), where the two ext may be of
5430 // different sizes. We take the largest type as the ext to reduce, and add
5431 // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))).
5432 InstructionCost ExtCost0 = TTI.getCastInstrCost(
5433 Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy),
5435 InstructionCost ExtCost1 = TTI.getCastInstrCost(
5436 Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy),
5438 InstructionCost MulCost =
5439 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
5440
5441 InstructionCost RedCost = TTI.getMulAccReductionCost(
5442 IsUnsigned, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), ExtType,
5443 CostKind);
5444 InstructionCost ExtraExtCost = 0;
5445 if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) {
5446 Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1;
5447 ExtraExtCost = TTI.getCastInstrCost(
5448 ExtraExtOp->getOpcode(), ExtType,
5449 VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy),
5451 }
5452
5453 if (RedCost.isValid() &&
5454 (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost))
5455 return I == RetI ? RedCost : 0;
5456 } else if (!match(I, m_ZExtOrSExt(m_Value()))) {
5457 // Matched reduce.add(mul())
5458 InstructionCost MulCost =
5459 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
5460
5461 InstructionCost RedCost = TTI.getMulAccReductionCost(
5462 true, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), VectorTy,
5463 CostKind);
5464
5465 if (RedCost.isValid() && RedCost < MulCost + BaseCost)
5466 return I == RetI ? RedCost : 0;
5467 }
5468 }
5469
5470 return I == RetI ? std::optional<InstructionCost>(BaseCost) : std::nullopt;
5471}
5472
5474LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
5475 ElementCount VF) {
5476 // Calculate scalar cost only. Vectorization cost should be ready at this
5477 // moment.
5478 if (VF.isScalar()) {
5479 Type *ValTy = getLoadStoreType(I);
5481 const Align Alignment = getLoadStoreAlignment(I);
5482 unsigned AS = getLoadStoreAddressSpace(I);
5483
5484 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
5485 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5486 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, CostKind,
5487 OpInfo, I);
5488 }
5489 return getWideningCost(I, VF);
5490}
5491
5493LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
5494 ElementCount VF) const {
5495
5496 // There is no mechanism yet to create a scalable scalarization loop,
5497 // so this is currently Invalid.
5498 if (VF.isScalable())
5499 return InstructionCost::getInvalid();
5500
5501 if (VF.isScalar())
5502 return 0;
5503
5505 Type *RetTy = toVectorizedTy(I->getType(), VF);
5506 if (!RetTy->isVoidTy() &&
5508
5509 for (Type *VectorTy : getContainedTypes(RetTy)) {
5512 /*Insert=*/true,
5513 /*Extract=*/false, CostKind);
5514 }
5515 }
5516
5517 // Some targets keep addresses scalar.
5519 return Cost;
5520
5521 // Some targets support efficient element stores.
5523 return Cost;
5524
5525 // Collect operands to consider.
5526 CallInst *CI = dyn_cast<CallInst>(I);
5527 Instruction::op_range Ops = CI ? CI->args() : I->operands();
5528
5529 // Skip operands that do not require extraction/scalarization and do not incur
5530 // any overhead.
5532 for (auto *V : filterExtractingOperands(Ops, VF))
5533 Tys.push_back(maybeVectorizeType(V->getType(), VF));
5535}
5536
5538 if (VF.isScalar())
5539 return;
5540 NumPredStores = 0;
5541 for (BasicBlock *BB : TheLoop->blocks()) {
5542 // For each instruction in the old loop.
5543 for (Instruction &I : *BB) {
5545 if (!Ptr)
5546 continue;
5547
5548 // TODO: We should generate better code and update the cost model for
5549 // predicated uniform stores. Today they are treated as any other
5550 // predicated store (see added test cases in
5551 // invariant-store-vectorization.ll).
5553 NumPredStores++;
5554
5555 if (Legal->isUniformMemOp(I, VF)) {
5556 auto IsLegalToScalarize = [&]() {
5557 if (!VF.isScalable())
5558 // Scalarization of fixed length vectors "just works".
5559 return true;
5560
5561 // We have dedicated lowering for unpredicated uniform loads and
5562 // stores. Note that even with tail folding we know that at least
5563 // one lane is active (i.e. generalized predication is not possible
5564 // here), and the logic below depends on this fact.
5565 if (!foldTailByMasking())
5566 return true;
5567
5568 // For scalable vectors, a uniform memop load is always
5569 // uniform-by-parts and we know how to scalarize that.
5570 if (isa<LoadInst>(I))
5571 return true;
5572
5573 // A uniform store isn't neccessarily uniform-by-part
5574 // and we can't assume scalarization.
5575 auto &SI = cast<StoreInst>(I);
5576 return TheLoop->isLoopInvariant(SI.getValueOperand());
5577 };
5578
5579 const InstructionCost GatherScatterCost =
5581 getGatherScatterCost(&I, VF) : InstructionCost::getInvalid();
5582
5583 // Load: Scalar load + broadcast
5584 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
5585 // FIXME: This cost is a significant under-estimate for tail folded
5586 // memory ops.
5587 const InstructionCost ScalarizationCost =
5588 IsLegalToScalarize() ? getUniformMemOpCost(&I, VF)
5590
5591 // Choose better solution for the current VF, Note that Invalid
5592 // costs compare as maximumal large. If both are invalid, we get
5593 // scalable invalid which signals a failure and a vectorization abort.
5594 if (GatherScatterCost < ScalarizationCost)
5595 setWideningDecision(&I, VF, CM_GatherScatter, GatherScatterCost);
5596 else
5597 setWideningDecision(&I, VF, CM_Scalarize, ScalarizationCost);
5598 continue;
5599 }
5600
5601 // We assume that widening is the best solution when possible.
5602 if (memoryInstructionCanBeWidened(&I, VF)) {
5603 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
5604 int ConsecutiveStride = Legal->isConsecutivePtr(
5606 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5607 "Expected consecutive stride.");
5608 InstWidening Decision =
5609 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
5610 setWideningDecision(&I, VF, Decision, Cost);
5611 continue;
5612 }
5613
5614 // Choose between Interleaving, Gather/Scatter or Scalarization.
5616 unsigned NumAccesses = 1;
5617 if (isAccessInterleaved(&I)) {
5618 const auto *Group = getInterleavedAccessGroup(&I);
5619 assert(Group && "Fail to get an interleaved access group.");
5620
5621 // Make one decision for the whole group.
5622 if (getWideningDecision(&I, VF) != CM_Unknown)
5623 continue;
5624
5625 NumAccesses = Group->getNumMembers();
5627 InterleaveCost = getInterleaveGroupCost(&I, VF);
5628 }
5629
5630 InstructionCost GatherScatterCost =
5632 ? getGatherScatterCost(&I, VF) * NumAccesses
5634
5635 InstructionCost ScalarizationCost =
5636 getMemInstScalarizationCost(&I, VF) * NumAccesses;
5637
5638 // Choose better solution for the current VF,
5639 // write down this decision and use it during vectorization.
5641 InstWidening Decision;
5642 if (InterleaveCost <= GatherScatterCost &&
5643 InterleaveCost < ScalarizationCost) {
5644 Decision = CM_Interleave;
5645 Cost = InterleaveCost;
5646 } else if (GatherScatterCost < ScalarizationCost) {
5647 Decision = CM_GatherScatter;
5648 Cost = GatherScatterCost;
5649 } else {
5650 Decision = CM_Scalarize;
5651 Cost = ScalarizationCost;
5652 }
5653 // If the instructions belongs to an interleave group, the whole group
5654 // receives the same decision. The whole group receives the cost, but
5655 // the cost will actually be assigned to one instruction.
5656 if (const auto *Group = getInterleavedAccessGroup(&I)) {
5657 if (Decision == CM_Scalarize) {
5658 for (unsigned Idx = 0; Idx < Group->getFactor(); ++Idx) {
5659 if (auto *I = Group->getMember(Idx)) {
5660 setWideningDecision(I, VF, Decision,
5661 getMemInstScalarizationCost(I, VF));
5662 }
5663 }
5664 } else {
5665 setWideningDecision(Group, VF, Decision, Cost);
5666 }
5667 } else
5668 setWideningDecision(&I, VF, Decision, Cost);
5669 }
5670 }
5671
5672 // Make sure that any load of address and any other address computation
5673 // remains scalar unless there is gather/scatter support. This avoids
5674 // inevitable extracts into address registers, and also has the benefit of
5675 // activating LSR more, since that pass can't optimize vectorized
5676 // addresses.
5677 if (TTI.prefersVectorizedAddressing())
5678 return;
5679
5680 // Start with all scalar pointer uses.
5682 for (BasicBlock *BB : TheLoop->blocks())
5683 for (Instruction &I : *BB) {
5684 Instruction *PtrDef =
5686 if (PtrDef && TheLoop->contains(PtrDef) &&
5688 AddrDefs.insert(PtrDef);
5689 }
5690
5691 // Add all instructions used to generate the addresses.
5693 append_range(Worklist, AddrDefs);
5694 while (!Worklist.empty()) {
5695 Instruction *I = Worklist.pop_back_val();
5696 for (auto &Op : I->operands())
5697 if (auto *InstOp = dyn_cast<Instruction>(Op))
5698 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
5699 AddrDefs.insert(InstOp).second)
5700 Worklist.push_back(InstOp);
5701 }
5702
5703 for (auto *I : AddrDefs) {
5704 if (isa<LoadInst>(I)) {
5705 // Setting the desired widening decision should ideally be handled in
5706 // by cost functions, but since this involves the task of finding out
5707 // if the loaded register is involved in an address computation, it is
5708 // instead changed here when we know this is the case.
5709 InstWidening Decision = getWideningDecision(I, VF);
5710 if (Decision == CM_Widen || Decision == CM_Widen_Reverse ||
5711 (!isPredicatedInst(I) && !Legal->isUniformMemOp(*I, VF) &&
5712 Decision == CM_Scalarize))
5713 // Scalarize a widened load of address or update the cost of a scalar
5714 // load of an address.
5716 I, VF, CM_Scalarize,
5717 (VF.getKnownMinValue() *
5718 getMemoryInstructionCost(I, ElementCount::getFixed(1))));
5719 else if (const auto *Group = getInterleavedAccessGroup(I)) {
5720 // Scalarize an interleave group of address loads.
5721 for (unsigned I = 0; I < Group->getFactor(); ++I) {
5722 if (Instruction *Member = Group->getMember(I))
5724 Member, VF, CM_Scalarize,
5725 (VF.getKnownMinValue() *
5726 getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
5727 }
5728 }
5729 } else {
5730 // Cannot scalarize fixed-order recurrence phis at the moment.
5731 if (isa<PHINode>(I) && Legal->isFixedOrderRecurrence(cast<PHINode>(I)))
5732 continue;
5733
5734 // Make sure I gets scalarized and a cost estimate without
5735 // scalarization overhead.
5736 ForcedScalars[VF].insert(I);
5737 }
5738 }
5739}
5740
5742 assert(!VF.isScalar() &&
5743 "Trying to set a vectorization decision for a scalar VF");
5744
5745 auto ForcedScalar = ForcedScalars.find(VF);
5746 for (BasicBlock *BB : TheLoop->blocks()) {
5747 // For each instruction in the old loop.
5748 for (Instruction &I : *BB) {
5750
5751 if (!CI)
5752 continue;
5753
5757 Function *ScalarFunc = CI->getCalledFunction();
5758 Type *ScalarRetTy = CI->getType();
5759 SmallVector<Type *, 4> Tys, ScalarTys;
5760 for (auto &ArgOp : CI->args())
5761 ScalarTys.push_back(ArgOp->getType());
5762
5763 // Estimate cost of scalarized vector call. The source operands are
5764 // assumed to be vectors, so we need to extract individual elements from
5765 // there, execute VF scalar calls, and then gather the result into the
5766 // vector return value.
5767 if (VF.isFixed()) {
5768 InstructionCost ScalarCallCost =
5769 TTI.getCallInstrCost(ScalarFunc, ScalarRetTy, ScalarTys, CostKind);
5770
5771 // Compute costs of unpacking argument values for the scalar calls and
5772 // packing the return values to a vector.
5773 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
5774 ScalarCost = ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
5775 } else {
5776 // There is no point attempting to calculate the scalar cost for a
5777 // scalable VF as we know it will be Invalid.
5779 "Unexpected valid cost for scalarizing scalable vectors");
5780 ScalarCost = InstructionCost::getInvalid();
5781 }
5782
5783 // Honor ForcedScalars and UniformAfterVectorization decisions.
5784 // TODO: For calls, it might still be more profitable to widen. Use
5785 // VPlan-based cost model to compare different options.
5786 if (VF.isVector() && ((ForcedScalar != ForcedScalars.end() &&
5787 ForcedScalar->second.contains(CI)) ||
5788 isUniformAfterVectorization(CI, VF))) {
5789 setCallWideningDecision(CI, VF, CM_Scalarize, nullptr,
5790 Intrinsic::not_intrinsic, std::nullopt,
5791 ScalarCost);
5792 continue;
5793 }
5794
5795 bool MaskRequired = Legal->isMaskRequired(CI);
5796 // Compute corresponding vector type for return value and arguments.
5797 Type *RetTy = toVectorizedTy(ScalarRetTy, VF);
5798 for (Type *ScalarTy : ScalarTys)
5799 Tys.push_back(toVectorizedTy(ScalarTy, VF));
5800
5801 // An in-loop reduction using an fmuladd intrinsic is a special case;
5802 // we don't want the normal cost for that intrinsic.
5804 if (auto RedCost = getReductionPatternCost(CI, VF, RetTy)) {
5807 std::nullopt, *RedCost);
5808 continue;
5809 }
5810
5811 // Find the cost of vectorizing the call, if we can find a suitable
5812 // vector variant of the function.
5813 VFInfo FuncInfo;
5814 Function *VecFunc = nullptr;
5815 // Search through any available variants for one we can use at this VF.
5816 for (VFInfo &Info : VFDatabase::getMappings(*CI)) {
5817 // Must match requested VF.
5818 if (Info.Shape.VF != VF)
5819 continue;
5820
5821 // Must take a mask argument if one is required
5822 if (MaskRequired && !Info.isMasked())
5823 continue;
5824
5825 // Check that all parameter kinds are supported
5826 bool ParamsOk = true;
5827 for (VFParameter Param : Info.Shape.Parameters) {
5828 switch (Param.ParamKind) {
5830 break;
5832 Value *ScalarParam = CI->getArgOperand(Param.ParamPos);
5833 // Make sure the scalar parameter in the loop is invariant.
5834 if (!PSE.getSE()->isLoopInvariant(PSE.getSCEV(ScalarParam),
5835 TheLoop))
5836 ParamsOk = false;
5837 break;
5838 }
5840 Value *ScalarParam = CI->getArgOperand(Param.ParamPos);
5841 // Find the stride for the scalar parameter in this loop and see if
5842 // it matches the stride for the variant.
5843 // TODO: do we need to figure out the cost of an extract to get the
5844 // first lane? Or do we hope that it will be folded away?
5845 ScalarEvolution *SE = PSE.getSE();
5846 if (!match(SE->getSCEV(ScalarParam),
5848 m_SCEV(), m_scev_SpecificSInt(Param.LinearStepOrPos),
5850 ParamsOk = false;
5851 break;
5852 }
5854 break;
5855 default:
5856 ParamsOk = false;
5857 break;
5858 }
5859 }
5860
5861 if (!ParamsOk)
5862 continue;
5863
5864 // Found a suitable candidate, stop here.
5865 VecFunc = CI->getModule()->getFunction(Info.VectorName);
5866 FuncInfo = Info;
5867 break;
5868 }
5869
5870 if (TLI && VecFunc && !CI->isNoBuiltin())
5871 VectorCost = TTI.getCallInstrCost(nullptr, RetTy, Tys, CostKind);
5872
5873 // Find the cost of an intrinsic; some targets may have instructions that
5874 // perform the operation without needing an actual call.
5876 if (IID != Intrinsic::not_intrinsic)
5878
5879 InstructionCost Cost = ScalarCost;
5880 InstWidening Decision = CM_Scalarize;
5881
5882 if (VectorCost <= Cost) {
5883 Cost = VectorCost;
5884 Decision = CM_VectorCall;
5885 }
5886
5887 if (IntrinsicCost <= Cost) {
5889 Decision = CM_IntrinsicCall;
5890 }
5891
5892 setCallWideningDecision(CI, VF, Decision, VecFunc, IID,
5894 }
5895 }
5896}
5897
5899 if (!Legal->isInvariant(Op))
5900 return false;
5901 // Consider Op invariant, if it or its operands aren't predicated
5902 // instruction in the loop. In that case, it is not trivially hoistable.
5903 auto *OpI = dyn_cast<Instruction>(Op);
5904 return !OpI || !TheLoop->contains(OpI) ||
5905 (!isPredicatedInst(OpI) &&
5906 (!isa<PHINode>(OpI) || OpI->getParent() != TheLoop->getHeader()) &&
5907 all_of(OpI->operands(),
5908 [this](Value *Op) { return shouldConsiderInvariant(Op); }));
5909}
5910
5913 ElementCount VF) {
5914 // If we know that this instruction will remain uniform, check the cost of
5915 // the scalar version.
5917 VF = ElementCount::getFixed(1);
5918
5919 if (VF.isVector() && isProfitableToScalarize(I, VF))
5920 return InstsToScalarize[VF][I];
5921
5922 // Forced scalars do not have any scalarization overhead.
5923 auto ForcedScalar = ForcedScalars.find(VF);
5924 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
5925 auto InstSet = ForcedScalar->second;
5926 if (InstSet.count(I))
5928 VF.getKnownMinValue();
5929 }
5930
5931 Type *RetTy = I->getType();
5933 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
5934 auto *SE = PSE.getSE();
5935
5936 Type *VectorTy;
5937 if (isScalarAfterVectorization(I, VF)) {
5938 [[maybe_unused]] auto HasSingleCopyAfterVectorization =
5939 [this](Instruction *I, ElementCount VF) -> bool {
5940 if (VF.isScalar())
5941 return true;
5942
5943 auto Scalarized = InstsToScalarize.find(VF);
5944 assert(Scalarized != InstsToScalarize.end() &&
5945 "VF not yet analyzed for scalarization profitability");
5946 return !Scalarized->second.count(I) &&
5947 llvm::all_of(I->users(), [&](User *U) {
5948 auto *UI = cast<Instruction>(U);
5949 return !Scalarized->second.count(UI);
5950 });
5951 };
5952
5953 // With the exception of GEPs and PHIs, after scalarization there should
5954 // only be one copy of the instruction generated in the loop. This is
5955 // because the VF is either 1, or any instructions that need scalarizing
5956 // have already been dealt with by the time we get here. As a result,
5957 // it means we don't have to multiply the instruction cost by VF.
5958 assert(I->getOpcode() == Instruction::GetElementPtr ||
5959 I->getOpcode() == Instruction::PHI ||
5960 (I->getOpcode() == Instruction::BitCast &&
5961 I->getType()->isPointerTy()) ||
5962 HasSingleCopyAfterVectorization(I, VF));
5963 VectorTy = RetTy;
5964 } else
5965 VectorTy = toVectorizedTy(RetTy, VF);
5966
5967 if (VF.isVector() && VectorTy->isVectorTy() &&
5968 !TTI.getNumberOfParts(VectorTy))
5970
5971 // TODO: We need to estimate the cost of intrinsic calls.
5972 switch (I->getOpcode()) {
5973 case Instruction::GetElementPtr:
5974 // We mark this instruction as zero-cost because the cost of GEPs in
5975 // vectorized code depends on whether the corresponding memory instruction
5976 // is scalarized or not. Therefore, we handle GEPs with the memory
5977 // instruction cost.
5978 return 0;
5979 case Instruction::Br: {
5980 // In cases of scalarized and predicated instructions, there will be VF
5981 // predicated blocks in the vectorized loop. Each branch around these
5982 // blocks requires also an extract of its vector compare i1 element.
5983 // Note that the conditional branch from the loop latch will be replaced by
5984 // a single branch controlling the loop, so there is no extra overhead from
5985 // scalarization.
5986 bool ScalarPredicatedBB = false;
5988 if (VF.isVector() && BI->isConditional() &&
5989 (PredicatedBBsAfterVectorization[VF].count(BI->getSuccessor(0)) ||
5990 PredicatedBBsAfterVectorization[VF].count(BI->getSuccessor(1))) &&
5991 BI->getParent() != TheLoop->getLoopLatch())
5992 ScalarPredicatedBB = true;
5993
5994 if (ScalarPredicatedBB) {
5995 // Not possible to scalarize scalable vector with predicated instructions.
5996 if (VF.isScalable())
5998 // Return cost for branches around scalarized and predicated blocks.
5999 auto *VecI1Ty =
6001 return (
6002 TTI.getScalarizationOverhead(
6003 VecI1Ty, APInt::getAllOnes(VF.getFixedValue()),
6004 /*Insert*/ false, /*Extract*/ true, CostKind) +
6005 (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue()));
6006 }
6007
6008 if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
6009 // The back-edge branch will remain, as will all scalar branches.
6010 return TTI.getCFInstrCost(Instruction::Br, CostKind);
6011
6012 // This branch will be eliminated by if-conversion.
6013 return 0;
6014 // Note: We currently assume zero cost for an unconditional branch inside
6015 // a predicated block since it will become a fall-through, although we
6016 // may decide in the future to call TTI for all branches.
6017 }
6018 case Instruction::Switch: {
6019 if (VF.isScalar())
6020 return TTI.getCFInstrCost(Instruction::Switch, CostKind);
6021 auto *Switch = cast<SwitchInst>(I);
6022 return Switch->getNumCases() *
6023 TTI.getCmpSelInstrCost(
6024 Instruction::ICmp,
6025 toVectorTy(Switch->getCondition()->getType(), VF),
6026 toVectorTy(Type::getInt1Ty(I->getContext()), VF),
6028 }
6029 case Instruction::PHI: {
6030 auto *Phi = cast<PHINode>(I);
6031
6032 // First-order recurrences are replaced by vector shuffles inside the loop.
6033 if (VF.isVector() && Legal->isFixedOrderRecurrence(Phi)) {
6035 std::iota(Mask.begin(), Mask.end(), VF.getKnownMinValue() - 1);
6036 return TTI.getShuffleCost(TargetTransformInfo::SK_Splice,
6037 cast<VectorType>(VectorTy),
6038 cast<VectorType>(VectorTy), Mask, CostKind,
6039 VF.getKnownMinValue() - 1);
6040 }
6041
6042 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
6043 // converted into select instructions. We require N - 1 selects per phi
6044 // node, where N is the number of incoming values.
6045 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) {
6046 Type *ResultTy = Phi->getType();
6047
6048 // All instructions in an Any-of reduction chain are narrowed to bool.
6049 // Check if that is the case for this phi node.
6050 auto *HeaderUser = cast_if_present<PHINode>(
6051 find_singleton<User>(Phi->users(), [this](User *U, bool) -> User * {
6052 auto *Phi = dyn_cast<PHINode>(U);
6053 if (Phi && Phi->getParent() == TheLoop->getHeader())
6054 return Phi;
6055 return nullptr;
6056 }));
6057 if (HeaderUser) {
6058 auto &ReductionVars = Legal->getReductionVars();
6059 auto Iter = ReductionVars.find(HeaderUser);
6060 if (Iter != ReductionVars.end() &&
6062 Iter->second.getRecurrenceKind()))
6063 ResultTy = Type::getInt1Ty(Phi->getContext());
6064 }
6065 return (Phi->getNumIncomingValues() - 1) *
6066 TTI.getCmpSelInstrCost(
6067 Instruction::Select, toVectorTy(ResultTy, VF),
6068 toVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
6070 }
6071
6072 // When tail folding with EVL, if the phi is part of an out of loop
6073 // reduction then it will be transformed into a wide vp_merge.
6074 if (VF.isVector() && foldTailWithEVL() &&
6075 Legal->getReductionVars().contains(Phi) && !isInLoopReduction(Phi)) {
6077 Intrinsic::vp_merge, toVectorTy(Phi->getType(), VF),
6078 {toVectorTy(Type::getInt1Ty(Phi->getContext()), VF)});
6079 return TTI.getIntrinsicInstrCost(ICA, CostKind);
6080 }
6081
6082 return TTI.getCFInstrCost(Instruction::PHI, CostKind);
6083 }
6084 case Instruction::UDiv:
6085 case Instruction::SDiv:
6086 case Instruction::URem:
6087 case Instruction::SRem:
6088 if (VF.isVector() && isPredicatedInst(I)) {
6089 const auto [ScalarCost, SafeDivisorCost] = getDivRemSpeculationCost(I, VF);
6090 return isDivRemScalarWithPredication(ScalarCost, SafeDivisorCost) ?
6091 ScalarCost : SafeDivisorCost;
6092 }
6093 // We've proven all lanes safe to speculate, fall through.
6094 [[fallthrough]];
6095 case Instruction::Add:
6096 case Instruction::Sub: {
6097 auto Info = Legal->getHistogramInfo(I);
6098 if (Info && VF.isVector()) {
6099 const HistogramInfo *HGram = Info.value();
6100 // Assume that a non-constant update value (or a constant != 1) requires
6101 // a multiply, and add that into the cost.
6103 ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1));
6104 if (!RHS || RHS->getZExtValue() != 1)
6105 MulCost =
6106 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
6107
6108 // Find the cost of the histogram operation itself.
6109 Type *PtrTy = VectorType::get(HGram->Load->getPointerOperandType(), VF);
6110 Type *ScalarTy = I->getType();
6111 Type *MaskTy = VectorType::get(Type::getInt1Ty(I->getContext()), VF);
6112 IntrinsicCostAttributes ICA(Intrinsic::experimental_vector_histogram_add,
6113 Type::getVoidTy(I->getContext()),
6114 {PtrTy, ScalarTy, MaskTy});
6115
6116 // Add the costs together with the add/sub operation.
6117 return TTI.getIntrinsicInstrCost(ICA, CostKind) + MulCost +
6118 TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, CostKind);
6119 }
6120 [[fallthrough]];
6121 }
6122 case Instruction::FAdd:
6123 case Instruction::FSub:
6124 case Instruction::Mul:
6125 case Instruction::FMul:
6126 case Instruction::FDiv:
6127 case Instruction::FRem:
6128 case Instruction::Shl:
6129 case Instruction::LShr:
6130 case Instruction::AShr:
6131 case Instruction::And:
6132 case Instruction::Or:
6133 case Instruction::Xor: {
6134 // If we're speculating on the stride being 1, the multiplication may
6135 // fold away. We can generalize this for all operations using the notion
6136 // of neutral elements. (TODO)
6137 if (I->getOpcode() == Instruction::Mul &&
6138 ((TheLoop->isLoopInvariant(I->getOperand(0)) &&
6139 PSE.getSCEV(I->getOperand(0))->isOne()) ||
6140 (TheLoop->isLoopInvariant(I->getOperand(1)) &&
6141 PSE.getSCEV(I->getOperand(1))->isOne())))
6142 return 0;
6143
6144 // Detect reduction patterns
6145 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy))
6146 return *RedCost;
6147
6148 // Certain instructions can be cheaper to vectorize if they have a constant
6149 // second vector operand. One example of this are shifts on x86.
6150 Value *Op2 = I->getOperand(1);
6151 if (!isa<Constant>(Op2) && TheLoop->isLoopInvariant(Op2) &&
6152 PSE.getSE()->isSCEVable(Op2->getType()) &&
6153 isa<SCEVConstant>(PSE.getSCEV(Op2))) {
6154 Op2 = cast<SCEVConstant>(PSE.getSCEV(Op2))->getValue();
6155 }
6156 auto Op2Info = TTI.getOperandInfo(Op2);
6157 if (Op2Info.Kind == TargetTransformInfo::OK_AnyValue &&
6160
6161 SmallVector<const Value *, 4> Operands(I->operand_values());
6162 return TTI.getArithmeticInstrCost(
6163 I->getOpcode(), VectorTy, CostKind,
6164 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6165 Op2Info, Operands, I, TLI);
6166 }
6167 case Instruction::FNeg: {
6168 return TTI.getArithmeticInstrCost(
6169 I->getOpcode(), VectorTy, CostKind,
6170 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6171 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6172 I->getOperand(0), I);
6173 }
6174 case Instruction::Select: {
6176 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
6177 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
6178
6179 const Value *Op0, *Op1;
6180 using namespace llvm::PatternMatch;
6181 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
6182 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) {
6183 // select x, y, false --> x & y
6184 // select x, true, y --> x | y
6185 const auto [Op1VK, Op1VP] = TTI::getOperandInfo(Op0);
6186 const auto [Op2VK, Op2VP] = TTI::getOperandInfo(Op1);
6187 assert(Op0->getType()->getScalarSizeInBits() == 1 &&
6188 Op1->getType()->getScalarSizeInBits() == 1);
6189
6190 return TTI.getArithmeticInstrCost(
6191 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And,
6192 VectorTy, CostKind, {Op1VK, Op1VP}, {Op2VK, Op2VP}, {Op0, Op1}, I);
6193 }
6194
6195 Type *CondTy = SI->getCondition()->getType();
6196 if (!ScalarCond)
6197 CondTy = VectorType::get(CondTy, VF);
6198
6200 if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition()))
6201 Pred = Cmp->getPredicate();
6202 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred,
6203 CostKind, {TTI::OK_AnyValue, TTI::OP_None},
6204 {TTI::OK_AnyValue, TTI::OP_None}, I);
6205 }
6206 case Instruction::ICmp:
6207 case Instruction::FCmp: {
6208 Type *ValTy = I->getOperand(0)->getType();
6209
6211 [[maybe_unused]] Instruction *Op0AsInstruction =
6212 dyn_cast<Instruction>(I->getOperand(0));
6213 assert((!canTruncateToMinimalBitwidth(Op0AsInstruction, VF) ||
6214 MinBWs[I] == MinBWs[Op0AsInstruction]) &&
6215 "if both the operand and the compare are marked for "
6216 "truncation, they must have the same bitwidth");
6217 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[I]);
6218 }
6219
6220 VectorTy = toVectorTy(ValTy, VF);
6221 return TTI.getCmpSelInstrCost(
6222 I->getOpcode(), VectorTy, CmpInst::makeCmpResultType(VectorTy),
6223 cast<CmpInst>(I)->getPredicate(), CostKind,
6224 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None}, I);
6225 }
6226 case Instruction::Store:
6227 case Instruction::Load: {
6228 ElementCount Width = VF;
6229 if (Width.isVector()) {
6230 InstWidening Decision = getWideningDecision(I, Width);
6231 assert(Decision != CM_Unknown &&
6232 "CM decision should be taken at this point");
6235 if (Decision == CM_Scalarize)
6236 Width = ElementCount::getFixed(1);
6237 }
6238 VectorTy = toVectorTy(getLoadStoreType(I), Width);
6239 return getMemoryInstructionCost(I, VF);
6240 }
6241 case Instruction::BitCast:
6242 if (I->getType()->isPointerTy())
6243 return 0;
6244 [[fallthrough]];
6245 case Instruction::ZExt:
6246 case Instruction::SExt:
6247 case Instruction::FPToUI:
6248 case Instruction::FPToSI:
6249 case Instruction::FPExt:
6250 case Instruction::PtrToInt:
6251 case Instruction::IntToPtr:
6252 case Instruction::SIToFP:
6253 case Instruction::UIToFP:
6254 case Instruction::Trunc:
6255 case Instruction::FPTrunc: {
6256 // Computes the CastContextHint from a Load/Store instruction.
6257 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
6259 "Expected a load or a store!");
6260
6261 if (VF.isScalar() || !TheLoop->contains(I))
6263
6264 switch (getWideningDecision(I, VF)) {
6276 llvm_unreachable("Instr did not go through cost modelling?");
6279 llvm_unreachable_internal("Instr has invalid widening decision");
6280 }
6281
6282 llvm_unreachable("Unhandled case!");
6283 };
6284
6285 unsigned Opcode = I->getOpcode();
6287 // For Trunc, the context is the only user, which must be a StoreInst.
6288 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
6289 if (I->hasOneUse())
6290 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
6291 CCH = ComputeCCH(Store);
6292 }
6293 // For Z/Sext, the context is the operand, which must be a LoadInst.
6294 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
6295 Opcode == Instruction::FPExt) {
6296 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
6297 CCH = ComputeCCH(Load);
6298 }
6299
6300 // We optimize the truncation of induction variables having constant
6301 // integer steps. The cost of these truncations is the same as the scalar
6302 // operation.
6303 if (isOptimizableIVTruncate(I, VF)) {
6304 auto *Trunc = cast<TruncInst>(I);
6305 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
6306 Trunc->getSrcTy(), CCH, CostKind, Trunc);
6307 }
6308
6309 // Detect reduction patterns
6310 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy))
6311 return *RedCost;
6312
6313 Type *SrcScalarTy = I->getOperand(0)->getType();
6314 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
6315 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
6316 SrcScalarTy =
6317 IntegerType::get(SrcScalarTy->getContext(), MinBWs[Op0AsInstruction]);
6318 Type *SrcVecTy =
6319 VectorTy->isVectorTy() ? toVectorTy(SrcScalarTy, VF) : SrcScalarTy;
6320
6322 // If the result type is <= the source type, there will be no extend
6323 // after truncating the users to the minimal required bitwidth.
6324 if (VectorTy->getScalarSizeInBits() <= SrcVecTy->getScalarSizeInBits() &&
6325 (I->getOpcode() == Instruction::ZExt ||
6326 I->getOpcode() == Instruction::SExt))
6327 return 0;
6328 }
6329
6330 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
6331 }
6332 case Instruction::Call:
6333 return getVectorCallCost(cast<CallInst>(I), VF);
6334 case Instruction::ExtractValue:
6335 return TTI.getInstructionCost(I, CostKind);
6336 case Instruction::Alloca:
6337 // We cannot easily widen alloca to a scalable alloca, as
6338 // the result would need to be a vector of pointers.
6339 if (VF.isScalable())
6341 [[fallthrough]];
6342 default:
6343 // This opcode is unknown. Assume that it is the same as 'mul'.
6344 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
6345 } // end of switch.
6346}
6347
6349 // Ignore ephemeral values.
6351
6352 SmallVector<Value *, 4> DeadInterleavePointerOps;
6354
6355 // If a scalar epilogue is required, users outside the loop won't use
6356 // live-outs from the vector loop but from the scalar epilogue. Ignore them if
6357 // that is the case.
6358 bool RequiresScalarEpilogue = requiresScalarEpilogue(true);
6359 auto IsLiveOutDead = [this, RequiresScalarEpilogue](User *U) {
6360 return RequiresScalarEpilogue &&
6361 !TheLoop->contains(cast<Instruction>(U)->getParent());
6362 };
6363
6365 DFS.perform(LI);
6366 for (BasicBlock *BB : reverse(make_range(DFS.beginRPO(), DFS.endRPO())))
6367 for (Instruction &I : reverse(*BB)) {
6368 if (VecValuesToIgnore.contains(&I) || ValuesToIgnore.contains(&I))
6369 continue;
6370
6371 // Add instructions that would be trivially dead and are only used by
6372 // values already ignored to DeadOps to seed worklist.
6374 all_of(I.users(), [this, IsLiveOutDead](User *U) {
6375 return VecValuesToIgnore.contains(U) ||
6376 ValuesToIgnore.contains(U) || IsLiveOutDead(U);
6377 }))
6378 DeadOps.push_back(&I);
6379
6380 // For interleave groups, we only create a pointer for the start of the
6381 // interleave group. Queue up addresses of group members except the insert
6382 // position for further processing.
6383 if (isAccessInterleaved(&I)) {
6384 auto *Group = getInterleavedAccessGroup(&I);
6385 if (Group->getInsertPos() == &I)
6386 continue;
6387 Value *PointerOp = getLoadStorePointerOperand(&I);
6388 DeadInterleavePointerOps.push_back(PointerOp);
6389 }
6390
6391 // Queue branches for analysis. They are dead, if their successors only
6392 // contain dead instructions.
6393 if (auto *Br = dyn_cast<BranchInst>(&I)) {
6394 if (Br->isConditional())
6395 DeadOps.push_back(&I);
6396 }
6397 }
6398
6399 // Mark ops feeding interleave group members as free, if they are only used
6400 // by other dead computations.
6401 for (unsigned I = 0; I != DeadInterleavePointerOps.size(); ++I) {
6402 auto *Op = dyn_cast<Instruction>(DeadInterleavePointerOps[I]);
6403 if (!Op || !TheLoop->contains(Op) || any_of(Op->users(), [this](User *U) {
6404 Instruction *UI = cast<Instruction>(U);
6405 return !VecValuesToIgnore.contains(U) &&
6406 (!isAccessInterleaved(UI) ||
6407 getInterleavedAccessGroup(UI)->getInsertPos() == UI);
6408 }))
6409 continue;
6410 VecValuesToIgnore.insert(Op);
6411 append_range(DeadInterleavePointerOps, Op->operands());
6412 }
6413
6414 // Mark ops that would be trivially dead and are only used by ignored
6415 // instructions as free.
6416 BasicBlock *Header = TheLoop->getHeader();
6417
6418 // Returns true if the block contains only dead instructions. Such blocks will
6419 // be removed by VPlan-to-VPlan transforms and won't be considered by the
6420 // VPlan-based cost model, so skip them in the legacy cost-model as well.
6421 auto IsEmptyBlock = [this](BasicBlock *BB) {
6422 return all_of(*BB, [this](Instruction &I) {
6423 return ValuesToIgnore.contains(&I) || VecValuesToIgnore.contains(&I) ||
6424 (isa<BranchInst>(&I) && !cast<BranchInst>(&I)->isConditional());
6425 });
6426 };
6427 for (unsigned I = 0; I != DeadOps.size(); ++I) {
6428 auto *Op = dyn_cast<Instruction>(DeadOps[I]);
6429
6430 // Check if the branch should be considered dead.
6431 if (auto *Br = dyn_cast_or_null<BranchInst>(Op)) {
6432 BasicBlock *ThenBB = Br->getSuccessor(0);
6433 BasicBlock *ElseBB = Br->getSuccessor(1);
6434 // Don't considers branches leaving the loop for simplification.
6435 if (!TheLoop->contains(ThenBB) || !TheLoop->contains(ElseBB))
6436 continue;
6437 bool ThenEmpty = IsEmptyBlock(ThenBB);
6438 bool ElseEmpty = IsEmptyBlock(ElseBB);
6439 if ((ThenEmpty && ElseEmpty) ||
6440 (ThenEmpty && ThenBB->getSingleSuccessor() == ElseBB &&
6441 ElseBB->phis().empty()) ||
6442 (ElseEmpty && ElseBB->getSingleSuccessor() == ThenBB &&
6443 ThenBB->phis().empty())) {
6444 VecValuesToIgnore.insert(Br);
6445 DeadOps.push_back(Br->getCondition());
6446 }
6447 continue;
6448 }
6449
6450 // Skip any op that shouldn't be considered dead.
6451 if (!Op || !TheLoop->contains(Op) ||
6452 (isa<PHINode>(Op) && Op->getParent() == Header) ||
6454 any_of(Op->users(), [this, IsLiveOutDead](User *U) {
6455 return !VecValuesToIgnore.contains(U) &&
6456 !ValuesToIgnore.contains(U) && !IsLiveOutDead(U);
6457 }))
6458 continue;
6459
6460 // If all of Op's users are in ValuesToIgnore, add it to ValuesToIgnore
6461 // which applies for both scalar and vector versions. Otherwise it is only
6462 // dead in vector versions, so only add it to VecValuesToIgnore.
6463 if (all_of(Op->users(),
6464 [this](User *U) { return ValuesToIgnore.contains(U); }))
6465 ValuesToIgnore.insert(Op);
6466
6467 VecValuesToIgnore.insert(Op);
6468 append_range(DeadOps, Op->operands());
6469 }
6470
6471 // Ignore type-promoting instructions we identified during reduction
6472 // detection.
6473 for (const auto &Reduction : Legal->getReductionVars()) {
6474 const RecurrenceDescriptor &RedDes = Reduction.second;
6475 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
6476 VecValuesToIgnore.insert_range(Casts);
6477 }
6478 // Ignore type-casting instructions we identified during induction
6479 // detection.
6480 for (const auto &Induction : Legal->getInductionVars()) {
6481 const InductionDescriptor &IndDes = Induction.second;
6482 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
6483 VecValuesToIgnore.insert_range(Casts);
6484 }
6485}
6486
6488 // Avoid duplicating work finding in-loop reductions.
6489 if (!InLoopReductions.empty())
6490 return;
6491
6492 for (const auto &Reduction : Legal->getReductionVars()) {
6493 PHINode *Phi = Reduction.first;
6494 const RecurrenceDescriptor &RdxDesc = Reduction.second;
6495
6496 // We don't collect reductions that are type promoted (yet).
6497 if (RdxDesc.getRecurrenceType() != Phi->getType())
6498 continue;
6499
6500 // If the target would prefer this reduction to happen "in-loop", then we
6501 // want to record it as such.
6502 RecurKind Kind = RdxDesc.getRecurrenceKind();
6503 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) &&
6504 !TTI.preferInLoopReduction(Kind, Phi->getType()))
6505 continue;
6506
6507 // Check that we can correctly put the reductions into the loop, by
6508 // finding the chain of operations that leads from the phi to the loop
6509 // exit value.
6510 SmallVector<Instruction *, 4> ReductionOperations =
6511 RdxDesc.getReductionOpChain(Phi, TheLoop);
6512 bool InLoop = !ReductionOperations.empty();
6513
6514 if (InLoop) {
6515 InLoopReductions.insert(Phi);
6516 // Add the elements to InLoopReductionImmediateChains for cost modelling.
6517 Instruction *LastChain = Phi;
6518 for (auto *I : ReductionOperations) {
6519 InLoopReductionImmediateChains[I] = LastChain;
6520 LastChain = I;
6521 }
6522 }
6523 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
6524 << " reduction for phi: " << *Phi << "\n");
6525 }
6526}
6527
6528// This function will select a scalable VF if the target supports scalable
6529// vectors and a fixed one otherwise.
6530// TODO: we could return a pair of values that specify the max VF and
6531// min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
6532// `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
6533// doesn't have a cost model that can choose which plan to execute if
6534// more than one is generated.
6537 unsigned WidestType;
6538 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
6539
6541 TTI.enableScalableVectorization()
6544
6545 TypeSize RegSize = TTI.getRegisterBitWidth(RegKind);
6546 unsigned N = RegSize.getKnownMinValue() / WidestType;
6547 return ElementCount::get(N, RegSize.isScalable());
6548}
6549
6552 ElementCount VF = UserVF;
6553 // Outer loop handling: They may require CFG and instruction level
6554 // transformations before even evaluating whether vectorization is profitable.
6555 // Since we cannot modify the incoming IR, we need to build VPlan upfront in
6556 // the vectorization pipeline.
6557 if (!OrigLoop->isInnermost()) {
6558 // If the user doesn't provide a vectorization factor, determine a
6559 // reasonable one.
6560 if (UserVF.isZero()) {
6561 VF = determineVPlanVF(TTI, CM);
6562 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
6563
6564 // Make sure we have a VF > 1 for stress testing.
6565 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
6566 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
6567 << "overriding computed VF.\n");
6568 VF = ElementCount::getFixed(4);
6569 }
6570 } else if (UserVF.isScalable() && !TTI.supportsScalableVectors() &&
6572 LLVM_DEBUG(dbgs() << "LV: Not vectorizing. Scalable VF requested, but "
6573 << "not supported by the target.\n");
6575 "Scalable vectorization requested but not supported by the target",
6576 "the scalable user-specified vectorization width for outer-loop "
6577 "vectorization cannot be used because the target does not support "
6578 "scalable vectors.",
6579 "ScalableVFUnfeasible", ORE, OrigLoop);
6581 }
6582 assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
6584 "VF needs to be a power of two");
6585 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
6586 << "VF " << VF << " to build VPlans.\n");
6587 buildVPlans(VF, VF);
6588
6589 if (VPlans.empty())
6591
6592 // For VPlan build stress testing, we bail out after VPlan construction.
6595
6596 return {VF, 0 /*Cost*/, 0 /* ScalarCost */};
6597 }
6598
6599 LLVM_DEBUG(
6600 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
6601 "VPlan-native path.\n");
6603}
6604
6605void LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
6606 assert(OrigLoop->isInnermost() && "Inner loop expected.");
6607 CM.collectValuesToIgnore();
6608 CM.collectElementTypesForWidening();
6609
6610 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC);
6611 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved.
6612 return;
6613
6614 // Invalidate interleave groups if all blocks of loop will be predicated.
6615 if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) &&
6617 LLVM_DEBUG(
6618 dbgs()
6619 << "LV: Invalidate all interleaved groups due to fold-tail by masking "
6620 "which requires masked-interleaved support.\n");
6621 if (CM.InterleaveInfo.invalidateGroups())
6622 // Invalidating interleave groups also requires invalidating all decisions
6623 // based on them, which includes widening decisions and uniform and scalar
6624 // values.
6625 CM.invalidateCostModelingDecisions();
6626 }
6627
6628 if (CM.foldTailByMasking())
6629 Legal->prepareToFoldTailByMasking();
6630
6631 ElementCount MaxUserVF =
6632 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF;
6633 if (UserVF) {
6634 if (!ElementCount::isKnownLE(UserVF, MaxUserVF)) {
6636 "UserVF ignored because it may be larger than the maximal safe VF",
6637 "InvalidUserVF", ORE, OrigLoop);
6638 } else {
6640 "VF needs to be a power of two");
6641 // Collect the instructions (and their associated costs) that will be more
6642 // profitable to scalarize.
6643 CM.collectInLoopReductions();
6644 if (CM.selectUserVectorizationFactor(UserVF)) {
6645 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
6646 buildVPlansWithVPRecipes(UserVF, UserVF);
6648 return;
6649 }
6650 reportVectorizationInfo("UserVF ignored because of invalid costs.",
6651 "InvalidCost", ORE, OrigLoop);
6652 }
6653 }
6654
6655 // Collect the Vectorization Factor Candidates.
6656 SmallVector<ElementCount> VFCandidates;
6657 for (auto VF = ElementCount::getFixed(1);
6658 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2)
6659 VFCandidates.push_back(VF);
6660 for (auto VF = ElementCount::getScalable(1);
6661 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2)
6662 VFCandidates.push_back(VF);
6663
6664 CM.collectInLoopReductions();
6665 for (const auto &VF : VFCandidates) {
6666 // Collect Uniform and Scalar instructions after vectorization with VF.
6667 CM.collectNonVectorizedAndSetWideningDecisions(VF);
6668 }
6669
6670 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF);
6671 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF);
6672
6674}
6675
6677 ElementCount VF) const {
6678 InstructionCost Cost = CM.getInstructionCost(UI, VF);
6679 if (Cost.isValid() && ForceTargetInstructionCost.getNumOccurrences())
6681 return Cost;
6682}
6683
6685 ElementCount VF) const {
6686 return CM.isUniformAfterVectorization(I, VF);
6687}
6688
6689bool VPCostContext::skipCostComputation(Instruction *UI, bool IsVector) const {
6690 return CM.ValuesToIgnore.contains(UI) ||
6691 (IsVector && CM.VecValuesToIgnore.contains(UI)) ||
6692 SkipCostComputation.contains(UI);
6693}
6694
6696LoopVectorizationPlanner::precomputeCosts(VPlan &Plan, ElementCount VF,
6697 VPCostContext &CostCtx) const {
6699 // Cost modeling for inductions is inaccurate in the legacy cost model
6700 // compared to the recipes that are generated. To match here initially during
6701 // VPlan cost model bring up directly use the induction costs from the legacy
6702 // cost model. Note that we do this as pre-processing; the VPlan may not have
6703 // any recipes associated with the original induction increment instruction
6704 // and may replace truncates with VPWidenIntOrFpInductionRecipe. We precompute
6705 // the cost of induction phis and increments (both that are represented by
6706 // recipes and those that are not), to avoid distinguishing between them here,
6707 // and skip all recipes that represent induction phis and increments (the
6708 // former case) later on, if they exist, to avoid counting them twice.
6709 // Similarly we pre-compute the cost of any optimized truncates.
6710 // TODO: Switch to more accurate costing based on VPlan.
6711 for (const auto &[IV, IndDesc] : Legal->getInductionVars()) {
6713 IV->getIncomingValueForBlock(OrigLoop->getLoopLatch()));
6714 SmallVector<Instruction *> IVInsts = {IVInc};
6715 for (unsigned I = 0; I != IVInsts.size(); I++) {
6716 for (Value *Op : IVInsts[I]->operands()) {
6717 auto *OpI = dyn_cast<Instruction>(Op);
6718 if (Op == IV || !OpI || !OrigLoop->contains(OpI) || !Op->hasOneUse())
6719 continue;
6720 IVInsts.push_back(OpI);
6721 }
6722 }
6723 IVInsts.push_back(IV);
6724 for (User *U : IV->users()) {
6725 auto *CI = cast<Instruction>(U);
6726 if (!CostCtx.CM.isOptimizableIVTruncate(CI, VF))
6727 continue;
6728 IVInsts.push_back(CI);
6729 }
6730
6731 // If the vector loop gets executed exactly once with the given VF, ignore
6732 // the costs of comparison and induction instructions, as they'll get
6733 // simplified away.
6734 // TODO: Remove this code after stepping away from the legacy cost model and
6735 // adding code to simplify VPlans before calculating their costs.
6736 auto TC = getSmallConstantTripCount(PSE.getSE(), OrigLoop);
6737 if (TC == VF && !CM.foldTailByMasking())
6738 addFullyUnrolledInstructionsToIgnore(OrigLoop, Legal->getInductionVars(),
6739 CostCtx.SkipCostComputation);
6740
6741 for (Instruction *IVInst : IVInsts) {
6742 if (CostCtx.skipCostComputation(IVInst, VF.isVector()))
6743 continue;
6744 InstructionCost InductionCost = CostCtx.getLegacyCost(IVInst, VF);
6745 LLVM_DEBUG({
6746 dbgs() << "Cost of " << InductionCost << " for VF " << VF
6747 << ": induction instruction " << *IVInst << "\n";
6748 });
6749 Cost += InductionCost;
6750 CostCtx.SkipCostComputation.insert(IVInst);
6751 }
6752 }
6753
6754 /// Compute the cost of all exiting conditions of the loop using the legacy
6755 /// cost model. This is to match the legacy behavior, which adds the cost of
6756 /// all exit conditions. Note that this over-estimates the cost, as there will
6757 /// be a single condition to control the vector loop.
6759 CM.TheLoop->getExitingBlocks(Exiting);
6760 SetVector<Instruction *> ExitInstrs;
6761 // Collect all exit conditions.
6762 for (BasicBlock *EB : Exiting) {
6763 auto *Term = dyn_cast<BranchInst>(EB->getTerminator());
6764 if (!Term || CostCtx.skipCostComputation(Term, VF.isVector()))
6765 continue;
6766 if (auto *CondI = dyn_cast<Instruction>(Term->getOperand(0))) {
6767 ExitInstrs.insert(CondI);
6768 }
6769 }
6770 // Compute the cost of all instructions only feeding the exit conditions.
6771 for (unsigned I = 0; I != ExitInstrs.size(); ++I) {
6772 Instruction *CondI = ExitInstrs[I];
6773 if (!OrigLoop->contains(CondI) ||
6774 !CostCtx.SkipCostComputation.insert(CondI).second)
6775 continue;
6776 InstructionCost CondICost = CostCtx.getLegacyCost(CondI, VF);
6777 LLVM_DEBUG({
6778 dbgs() << "Cost of " << CondICost << " for VF " << VF
6779 << ": exit condition instruction " << *CondI << "\n";
6780 });
6781 Cost += CondICost;
6782 for (Value *Op : CondI->operands()) {
6783 auto *OpI = dyn_cast<Instruction>(Op);
6784 if (!OpI || CostCtx.skipCostComputation(OpI, VF.isVector()) ||
6785 any_of(OpI->users(), [&ExitInstrs, this](User *U) {
6786 return OrigLoop->contains(cast<Instruction>(U)->getParent()) &&
6787 !ExitInstrs.contains(cast<Instruction>(U));
6788 }))
6789 continue;
6790 ExitInstrs.insert(OpI);
6791 }
6792 }
6793
6794 // Pre-compute the costs for branches except for the backedge, as the number
6795 // of replicate regions in a VPlan may not directly match the number of
6796 // branches, which would lead to different decisions.
6797 // TODO: Compute cost of branches for each replicate region in the VPlan,
6798 // which is more accurate than the legacy cost model.
6799 for (BasicBlock *BB : OrigLoop->blocks()) {
6800 if (CostCtx.skipCostComputation(BB->getTerminator(), VF.isVector()))
6801 continue;
6802 CostCtx.SkipCostComputation.insert(BB->getTerminator());
6803 if (BB == OrigLoop->getLoopLatch())
6804 continue;
6805 auto BranchCost = CostCtx.getLegacyCost(BB->getTerminator(), VF);
6806 Cost += BranchCost;
6807 }
6808
6809 // Pre-compute costs for instructions that are forced-scalar or profitable to
6810 // scalarize. Their costs will be computed separately in the legacy cost
6811 // model.
6812 for (Instruction *ForcedScalar : CM.ForcedScalars[VF]) {
6813 if (CostCtx.skipCostComputation(ForcedScalar, VF.isVector()))
6814 continue;
6815 CostCtx.SkipCostComputation.insert(ForcedScalar);
6816 InstructionCost ForcedCost = CostCtx.getLegacyCost(ForcedScalar, VF);
6817 LLVM_DEBUG({
6818 dbgs() << "Cost of " << ForcedCost << " for VF " << VF
6819 << ": forced scalar " << *ForcedScalar << "\n";
6820 });
6821 Cost += ForcedCost;
6822 }
6823 for (const auto &[Scalarized, ScalarCost] : CM.InstsToScalarize[VF]) {
6824 if (CostCtx.skipCostComputation(Scalarized, VF.isVector()))
6825 continue;
6826 CostCtx.SkipCostComputation.insert(Scalarized);
6827 LLVM_DEBUG({
6828 dbgs() << "Cost of " << ScalarCost << " for VF " << VF
6829 << ": profitable to scalarize " << *Scalarized << "\n";
6830 });
6831 Cost += ScalarCost;
6832 }
6833
6834 return Cost;
6835}
6836
6837InstructionCost LoopVectorizationPlanner::cost(VPlan &Plan,
6838 ElementCount VF) const {
6839 VPCostContext CostCtx(CM.TTI, *CM.TLI, Plan, CM, CM.CostKind, *PSE.getSE());
6840 InstructionCost Cost = precomputeCosts(Plan, VF, CostCtx);
6841
6842 // Now compute and add the VPlan-based cost.
6843 Cost += Plan.cost(VF, CostCtx);
6844#ifndef NDEBUG
6845 unsigned EstimatedWidth = estimateElementCount(VF, CM.getVScaleForTuning());
6846 LLVM_DEBUG(dbgs() << "Cost for VF " << VF << ": " << Cost
6847 << " (Estimated cost per lane: ");
6848 if (Cost.isValid()) {
6849 double CostPerLane = double(Cost.getValue()) / EstimatedWidth;
6850 LLVM_DEBUG(dbgs() << format("%.1f", CostPerLane));
6851 } else /* No point dividing an invalid cost - it will still be invalid */
6852 LLVM_DEBUG(dbgs() << "Invalid");
6853 LLVM_DEBUG(dbgs() << ")\n");
6854#endif
6855 return Cost;
6856}
6857
6858#ifndef NDEBUG
6859/// Return true if the original loop \ TheLoop contains any instructions that do
6860/// not have corresponding recipes in \p Plan and are not marked to be ignored
6861/// in \p CostCtx. This means the VPlan contains simplification that the legacy
6862/// cost-model did not account for.
6864 VPCostContext &CostCtx,
6865 Loop *TheLoop,
6866 ElementCount VF) {
6867 // First collect all instructions for the recipes in Plan.
6868 auto GetInstructionForCost = [](const VPRecipeBase *R) -> Instruction * {
6869 if (auto *S = dyn_cast<VPSingleDefRecipe>(R))
6870 return dyn_cast_or_null<Instruction>(S->getUnderlyingValue());
6871 if (auto *WidenMem = dyn_cast<VPWidenMemoryRecipe>(R))
6872 return &WidenMem->getIngredient();
6873 return nullptr;
6874 };
6875
6876 // Check if a select for a safe divisor was hoisted to the pre-header. If so,
6877 // the select doesn't need to be considered for the vector loop cost; go with
6878 // the more accurate VPlan-based cost model.
6879 for (VPRecipeBase &R : *Plan.getVectorPreheader()) {
6880 auto *VPI = dyn_cast<VPInstruction>(&R);
6881 if (!VPI || VPI->getOpcode() != Instruction::Select ||
6882 VPI->getNumUsers() != 1)
6883 continue;
6884
6885 if (auto *WR = dyn_cast<VPWidenRecipe>(*VPI->user_begin())) {
6886 switch (WR->getOpcode()) {
6887 case Instruction::UDiv:
6888 case Instruction::SDiv:
6889 case Instruction::URem:
6890 case Instruction::SRem:
6891 return true;
6892 default:
6893 break;
6894 }
6895 }
6896 }
6897
6898 DenseSet<Instruction *> SeenInstrs;
6899 auto Iter = vp_depth_first_deep(Plan.getVectorLoopRegion()->getEntry());
6901 for (VPRecipeBase &R : *VPBB) {
6902 if (auto *IR = dyn_cast<VPInterleaveRecipe>(&R)) {
6903 auto *IG = IR->getInterleaveGroup();
6904 unsigned NumMembers = IG->getNumMembers();
6905 for (unsigned I = 0; I != NumMembers; ++I) {
6906 if (Instruction *M = IG->getMember(I))
6907 SeenInstrs.insert(M);
6908 }
6909 continue;
6910 }
6911 // Unused FOR splices are removed by VPlan transforms, so the VPlan-based
6912 // cost model won't cost it whilst the legacy will.
6913 if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R)) {
6914 using namespace VPlanPatternMatch;
6915 if (none_of(FOR->users(),
6916 match_fn(m_VPInstruction<
6918 return true;
6919 }
6920 // The VPlan-based cost model is more accurate for partial reduction and
6921 // comparing against the legacy cost isn't desirable.
6923 return true;
6924
6925 // The VPlan-based cost model can analyze if recipes are scalar
6926 // recursively, but the legacy cost model cannot.
6927 if (auto *WidenMemR = dyn_cast<VPWidenMemoryRecipe>(&R)) {
6928 auto *AddrI = dyn_cast<Instruction>(
6929 getLoadStorePointerOperand(&WidenMemR->getIngredient()));
6930 if (AddrI && vputils::isSingleScalar(WidenMemR->getAddr()) !=
6931 CostCtx.isLegacyUniformAfterVectorization(AddrI, VF))
6932 return true;
6933 }
6934
6935 /// If a VPlan transform folded a recipe to one producing a single-scalar,
6936 /// but the original instruction wasn't uniform-after-vectorization in the
6937 /// legacy cost model, the legacy cost overestimates the actual cost.
6938 if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R)) {
6939 if (RepR->isSingleScalar() &&
6941 RepR->getUnderlyingInstr(), VF))
6942 return true;
6943 }
6944 if (Instruction *UI = GetInstructionForCost(&R)) {
6945 // If we adjusted the predicate of the recipe, the cost in the legacy
6946 // cost model may be different.
6947 using namespace VPlanPatternMatch;
6948 CmpPredicate Pred;
6949 if (match(&R, m_Cmp(Pred, m_VPValue(), m_VPValue())) &&
6950 cast<VPRecipeWithIRFlags>(R).getPredicate() !=
6951 cast<CmpInst>(UI)->getPredicate())
6952 return true;
6953 SeenInstrs.insert(UI);
6954 }
6955 }
6956 }
6957
6958 // Return true if the loop contains any instructions that are not also part of
6959 // the VPlan or are skipped for VPlan-based cost computations. This indicates
6960 // that the VPlan contains extra simplifications.
6961 return any_of(TheLoop->blocks(), [&SeenInstrs, &CostCtx,
6962 TheLoop](BasicBlock *BB) {
6963 return any_of(*BB, [&SeenInstrs, &CostCtx, TheLoop, BB](Instruction &I) {
6964 // Skip induction phis when checking for simplifications, as they may not
6965 // be lowered directly be lowered to a corresponding PHI recipe.
6966 if (isa<PHINode>(&I) && BB == TheLoop->getHeader() &&
6967 CostCtx.CM.Legal->isInductionPhi(cast<PHINode>(&I)))
6968 return false;
6969 return !SeenInstrs.contains(&I) && !CostCtx.skipCostComputation(&I, true);
6970 });
6971 });
6972}
6973#endif
6974
6976 if (VPlans.empty())
6978 // If there is a single VPlan with a single VF, return it directly.
6979 VPlan &FirstPlan = *VPlans[0];
6980 if (VPlans.size() == 1 && size(FirstPlan.vectorFactors()) == 1)
6981 return {*FirstPlan.vectorFactors().begin(), 0, 0};
6982
6983 LLVM_DEBUG(dbgs() << "LV: Computing best VF using cost kind: "
6984 << (CM.CostKind == TTI::TCK_RecipThroughput
6985 ? "Reciprocal Throughput\n"
6986 : CM.CostKind == TTI::TCK_Latency
6987 ? "Instruction Latency\n"
6988 : CM.CostKind == TTI::TCK_CodeSize ? "Code Size\n"
6989 : CM.CostKind == TTI::TCK_SizeAndLatency
6990 ? "Code Size and Latency\n"
6991 : "Unknown\n"));
6992
6994 assert(hasPlanWithVF(ScalarVF) &&
6995 "More than a single plan/VF w/o any plan having scalar VF");
6996
6997 // TODO: Compute scalar cost using VPlan-based cost model.
6998 InstructionCost ScalarCost = CM.expectedCost(ScalarVF);
6999 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ScalarCost << ".\n");
7000 VectorizationFactor ScalarFactor(ScalarVF, ScalarCost, ScalarCost);
7001 VectorizationFactor BestFactor = ScalarFactor;
7002
7003 bool ForceVectorization = Hints.getForce() == LoopVectorizeHints::FK_Enabled;
7004 if (ForceVectorization) {
7005 // Ignore scalar width, because the user explicitly wants vectorization.
7006 // Initialize cost to max so that VF = 2 is, at least, chosen during cost
7007 // evaluation.
7008 BestFactor.Cost = InstructionCost::getMax();
7009 }
7010
7011 for (auto &P : VPlans) {
7012 ArrayRef<ElementCount> VFs(P->vectorFactors().begin(),
7013 P->vectorFactors().end());
7014
7016 if (any_of(VFs, [this](ElementCount VF) {
7017 return CM.shouldConsiderRegPressureForVF(VF);
7018 }))
7019 RUs = calculateRegisterUsageForPlan(*P, VFs, TTI, CM.ValuesToIgnore);
7020
7021 for (unsigned I = 0; I < VFs.size(); I++) {
7022 ElementCount VF = VFs[I];
7023 if (VF.isScalar())
7024 continue;
7025 if (!ForceVectorization && !willGenerateVectors(*P, VF, TTI)) {
7026 LLVM_DEBUG(
7027 dbgs()
7028 << "LV: Not considering vector loop of width " << VF
7029 << " because it will not generate any vector instructions.\n");
7030 continue;
7031 }
7032 if (CM.OptForSize && !ForceVectorization && hasReplicatorRegion(*P)) {
7033 LLVM_DEBUG(
7034 dbgs()
7035 << "LV: Not considering vector loop of width " << VF
7036 << " because it would cause replicated blocks to be generated,"
7037 << " which isn't allowed when optimizing for size.\n");
7038 continue;
7039 }
7040
7041 InstructionCost Cost = cost(*P, VF);
7042 VectorizationFactor CurrentFactor(VF, Cost, ScalarCost);
7043
7044 if (CM.shouldConsiderRegPressureForVF(VF) &&
7045 RUs[I].exceedsMaxNumRegs(TTI, ForceTargetNumVectorRegs)) {
7046 LLVM_DEBUG(dbgs() << "LV(REG): Not considering vector loop of width "
7047 << VF << " because it uses too many registers\n");
7048 continue;
7049 }
7050
7051 if (isMoreProfitable(CurrentFactor, BestFactor, P->hasScalarTail()))
7052 BestFactor = CurrentFactor;
7053
7054 // If profitable add it to ProfitableVF list.
7055 if (isMoreProfitable(CurrentFactor, ScalarFactor, P->hasScalarTail()))
7056 ProfitableVFs.push_back(CurrentFactor);
7057 }
7058 }
7059
7060#ifndef NDEBUG
7061 // Select the optimal vectorization factor according to the legacy cost-model.
7062 // This is now only used to verify the decisions by the new VPlan-based
7063 // cost-model and will be retired once the VPlan-based cost-model is
7064 // stabilized.
7065 VectorizationFactor LegacyVF = selectVectorizationFactor();
7066 VPlan &BestPlan = getPlanFor(BestFactor.Width);
7067
7068 // Pre-compute the cost and use it to check if BestPlan contains any
7069 // simplifications not accounted for in the legacy cost model. If that's the
7070 // case, don't trigger the assertion, as the extra simplifications may cause a
7071 // different VF to be picked by the VPlan-based cost model.
7072 VPCostContext CostCtx(CM.TTI, *CM.TLI, BestPlan, CM, CM.CostKind,
7073 *CM.PSE.getSE());
7074 precomputeCosts(BestPlan, BestFactor.Width, CostCtx);
7075 // Verify that the VPlan-based and legacy cost models agree, except for VPlans
7076 // with early exits and plans with additional VPlan simplifications. The
7077 // legacy cost model doesn't properly model costs for such loops.
7078 assert((BestFactor.Width == LegacyVF.Width || BestPlan.hasEarlyExit() ||
7080 CostCtx, OrigLoop,
7081 BestFactor.Width) ||
7083 getPlanFor(LegacyVF.Width), CostCtx, OrigLoop, LegacyVF.Width)) &&
7084 " VPlan cost model and legacy cost model disagreed");
7085 assert((BestFactor.Width.isScalar() || BestFactor.ScalarCost > 0) &&
7086 "when vectorizing, the scalar cost must be computed.");
7087#endif
7088
7089 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << BestFactor.Width << ".\n");
7090 return BestFactor;
7091}
7092
7094 using namespace VPlanPatternMatch;
7096 "RdxResult must be ComputeFindIVResult");
7097 VPValue *StartVPV = RdxResult->getOperand(1);
7098 match(StartVPV, m_Freeze(m_VPValue(StartVPV)));
7099 return StartVPV->getLiveInIRValue();
7100}
7101
7102// If \p EpiResumePhiR is resume VPPhi for a reduction when vectorizing the
7103// epilog loop, fix the reduction's scalar PHI node by adding the incoming value
7104// from the main vector loop.
7106 VPPhi *EpiResumePhiR, PHINode &EpiResumePhi, BasicBlock *BypassBlock) {
7107 // Get the VPInstruction computing the reduction result in the middle block.
7108 // The first operand may not be from the middle block if it is not connected
7109 // to the scalar preheader. In that case, there's nothing to fix.
7110 VPValue *Incoming = EpiResumePhiR->getOperand(0);
7113 auto *EpiRedResult = dyn_cast<VPInstruction>(Incoming);
7114 if (!EpiRedResult ||
7115 (EpiRedResult->getOpcode() != VPInstruction::ComputeAnyOfResult &&
7116 EpiRedResult->getOpcode() != VPInstruction::ComputeReductionResult &&
7117 EpiRedResult->getOpcode() != VPInstruction::ComputeFindIVResult))
7118 return;
7119
7120 auto *EpiRedHeaderPhi =
7121 cast<VPReductionPHIRecipe>(EpiRedResult->getOperand(0));
7122 RecurKind Kind = EpiRedHeaderPhi->getRecurrenceKind();
7123 Value *MainResumeValue;
7124 if (auto *VPI = dyn_cast<VPInstruction>(EpiRedHeaderPhi->getStartValue())) {
7125 assert((VPI->getOpcode() == VPInstruction::Broadcast ||
7126 VPI->getOpcode() == VPInstruction::ReductionStartVector) &&
7127 "unexpected start recipe");
7128 MainResumeValue = VPI->getOperand(0)->getUnderlyingValue();
7129 } else
7130 MainResumeValue = EpiRedHeaderPhi->getStartValue()->getUnderlyingValue();
7132 [[maybe_unused]] Value *StartV =
7133 EpiRedResult->getOperand(1)->getLiveInIRValue();
7134 auto *Cmp = cast<ICmpInst>(MainResumeValue);
7135 assert(Cmp->getPredicate() == CmpInst::ICMP_NE &&
7136 "AnyOf expected to start with ICMP_NE");
7137 assert(Cmp->getOperand(1) == StartV &&
7138 "AnyOf expected to start by comparing main resume value to original "
7139 "start value");
7140 MainResumeValue = Cmp->getOperand(0);
7142 Value *StartV = getStartValueFromReductionResult(EpiRedResult);
7143 Value *SentinelV = EpiRedResult->getOperand(2)->getLiveInIRValue();
7144 using namespace llvm::PatternMatch;
7145 Value *Cmp, *OrigResumeV, *CmpOp;
7146 [[maybe_unused]] bool IsExpectedPattern =
7147 match(MainResumeValue,
7148 m_Select(m_OneUse(m_Value(Cmp)), m_Specific(SentinelV),
7149 m_Value(OrigResumeV))) &&
7151 m_Value(CmpOp))) &&
7152 ((CmpOp == StartV && isGuaranteedNotToBeUndefOrPoison(CmpOp))));
7153 assert(IsExpectedPattern && "Unexpected reduction resume pattern");
7154 MainResumeValue = OrigResumeV;
7155 }
7156 PHINode *MainResumePhi = cast<PHINode>(MainResumeValue);
7157
7158 // When fixing reductions in the epilogue loop we should already have
7159 // created a bc.merge.rdx Phi after the main vector body. Ensure that we carry
7160 // over the incoming values correctly.
7161 EpiResumePhi.setIncomingValueForBlock(
7162 BypassBlock, MainResumePhi->getIncomingValueForBlock(BypassBlock));
7163}
7164
7166 ElementCount BestVF, unsigned BestUF, VPlan &BestVPlan,
7167 InnerLoopVectorizer &ILV, DominatorTree *DT, bool VectorizingEpilogue) {
7168 assert(BestVPlan.hasVF(BestVF) &&
7169 "Trying to execute plan with unsupported VF");
7170 assert(BestVPlan.hasUF(BestUF) &&
7171 "Trying to execute plan with unsupported UF");
7172 if (BestVPlan.hasEarlyExit())
7173 ++LoopsEarlyExitVectorized;
7174 // TODO: Move to VPlan transform stage once the transition to the VPlan-based
7175 // cost model is complete for better cost estimates.
7180 bool HasBranchWeights =
7181 hasBranchWeightMD(*OrigLoop->getLoopLatch()->getTerminator());
7182 if (HasBranchWeights) {
7183 std::optional<unsigned> VScale = CM.getVScaleForTuning();
7185 BestVPlan, BestVF, VScale);
7186 }
7187
7188 // Checks are the same for all VPlans, added to BestVPlan only for
7189 // compactness.
7190 attachRuntimeChecks(BestVPlan, ILV.RTChecks, HasBranchWeights);
7191
7192 // Retrieving VectorPH now when it's easier while VPlan still has Regions.
7193 VPBasicBlock *VectorPH = cast<VPBasicBlock>(BestVPlan.getVectorPreheader());
7194
7195 VPlanTransforms::optimizeForVFAndUF(BestVPlan, BestVF, BestUF, PSE);
7198 if (BestVPlan.getEntry()->getSingleSuccessor() ==
7199 BestVPlan.getScalarPreheader()) {
7200 // TODO: The vector loop would be dead, should not even try to vectorize.
7201 ORE->emit([&]() {
7202 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationDead",
7203 OrigLoop->getStartLoc(),
7204 OrigLoop->getHeader())
7205 << "Created vector loop never executes due to insufficient trip "
7206 "count.";
7207 });
7209 }
7210
7212 BestVPlan, BestVF,
7213 TTI.getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector));
7215
7217 // Regions are dissolved after optimizing for VF and UF, which completely
7218 // removes unneeded loop regions first.
7220 // Canonicalize EVL loops after regions are dissolved.
7224 BestVPlan, VectorPH, CM.foldTailByMasking(),
7225 CM.requiresScalarEpilogue(BestVF.isVector()));
7226 VPlanTransforms::materializeVFAndVFxUF(BestVPlan, VectorPH, BestVF);
7227 VPlanTransforms::cse(BestVPlan);
7229
7230 // 0. Generate SCEV-dependent code in the entry, including TripCount, before
7231 // making any changes to the CFG.
7232 DenseMap<const SCEV *, Value *> ExpandedSCEVs =
7233 VPlanTransforms::expandSCEVs(BestVPlan, *PSE.getSE());
7234 if (!ILV.getTripCount())
7235 ILV.setTripCount(BestVPlan.getTripCount()->getLiveInIRValue());
7236 else
7237 assert(VectorizingEpilogue && "should only re-use the existing trip "
7238 "count during epilogue vectorization");
7239
7240 // Perform the actual loop transformation.
7241 VPTransformState State(&TTI, BestVF, LI, DT, ILV.AC, ILV.Builder, &BestVPlan,
7242 OrigLoop->getParentLoop(),
7243 Legal->getWidestInductionType());
7244
7245#ifdef EXPENSIVE_CHECKS
7246 assert(DT->verify(DominatorTree::VerificationLevel::Fast));
7247#endif
7248
7249 // 1. Set up the skeleton for vectorization, including vector pre-header and
7250 // middle block. The vector loop is created during VPlan execution.
7251 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
7253 State.CFG.PrevBB->getSingleSuccessor(), &BestVPlan);
7255
7256 assert(verifyVPlanIsValid(BestVPlan, true /*VerifyLate*/) &&
7257 "final VPlan is invalid");
7258
7259 // After vectorization, the exit blocks of the original loop will have
7260 // additional predecessors. Invalidate SCEVs for the exit phis in case SE
7261 // looked through single-entry phis.
7262 ScalarEvolution &SE = *PSE.getSE();
7263 for (VPIRBasicBlock *Exit : BestVPlan.getExitBlocks()) {
7264 if (!Exit->hasPredecessors())
7265 continue;
7266 for (VPRecipeBase &PhiR : Exit->phis())
7268 OrigLoop, cast<PHINode>(&cast<VPIRPhi>(PhiR).getInstruction()));
7269 }
7270 // Forget the original loop and block dispositions.
7271 SE.forgetLoop(OrigLoop);
7273
7275
7276 //===------------------------------------------------===//
7277 //
7278 // Notice: any optimization or new instruction that go
7279 // into the code below should also be implemented in
7280 // the cost-model.
7281 //
7282 //===------------------------------------------------===//
7283
7284 // Retrieve loop information before executing the plan, which may remove the
7285 // original loop, if it becomes unreachable.
7286 MDNode *LID = OrigLoop->getLoopID();
7287 unsigned OrigLoopInvocationWeight = 0;
7288 std::optional<unsigned> OrigAverageTripCount =
7289 getLoopEstimatedTripCount(OrigLoop, &OrigLoopInvocationWeight);
7290
7291 BestVPlan.execute(&State);
7292
7293 // 2.6. Maintain Loop Hints
7294 // Keep all loop hints from the original loop on the vector loop (we'll
7295 // replace the vectorizer-specific hints below).
7296 VPBasicBlock *HeaderVPBB = vputils::getFirstLoopHeader(BestVPlan, State.VPDT);
7297 // Add metadata to disable runtime unrolling a scalar loop when there
7298 // are no runtime checks about strides and memory. A scalar loop that is
7299 // rarely used is not worth unrolling.
7300 bool DisableRuntimeUnroll = !ILV.RTChecks.hasChecks() && !BestVF.isScalar();
7302 HeaderVPBB ? LI->getLoopFor(State.CFG.VPBB2IRBB.lookup(HeaderVPBB))
7303 : nullptr,
7304 HeaderVPBB, BestVPlan, VectorizingEpilogue, LID, OrigAverageTripCount,
7305 OrigLoopInvocationWeight,
7306 estimateElementCount(BestVF * BestUF, CM.getVScaleForTuning()),
7307 DisableRuntimeUnroll);
7308
7309 // 3. Fix the vectorized code: take care of header phi's, live-outs,
7310 // predication, updating analyses.
7311 ILV.fixVectorizedLoop(State);
7312
7314
7315 return ExpandedSCEVs;
7316}
7317
7318//===--------------------------------------------------------------------===//
7319// EpilogueVectorizerMainLoop
7320//===--------------------------------------------------------------------===//
7321
7322/// This function is partially responsible for generating the control flow
7323/// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
7325 BasicBlock *ScalarPH = createScalarPreheader("");
7326 BasicBlock *VectorPH = ScalarPH->getSinglePredecessor();
7327
7328 // Generate the code to check the minimum iteration count of the vector
7329 // epilogue (see below).
7330 EPI.EpilogueIterationCountCheck =
7331 emitIterationCountCheck(VectorPH, ScalarPH, true);
7332 EPI.EpilogueIterationCountCheck->setName("iter.check");
7333
7334 VectorPH = cast<BranchInst>(EPI.EpilogueIterationCountCheck->getTerminator())
7335 ->getSuccessor(1);
7336 // Generate the iteration count check for the main loop, *after* the check
7337 // for the epilogue loop, so that the path-length is shorter for the case
7338 // that goes directly through the vector epilogue. The longer-path length for
7339 // the main loop is compensated for, by the gain from vectorizing the larger
7340 // trip count. Note: the branch will get updated later on when we vectorize
7341 // the epilogue.
7342 EPI.MainLoopIterationCountCheck =
7343 emitIterationCountCheck(VectorPH, ScalarPH, false);
7344
7345 return cast<BranchInst>(EPI.MainLoopIterationCountCheck->getTerminator())
7346 ->getSuccessor(1);
7347}
7348
7350 LLVM_DEBUG({
7351 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
7352 << "Main Loop VF:" << EPI.MainLoopVF
7353 << ", Main Loop UF:" << EPI.MainLoopUF
7354 << ", Epilogue Loop VF:" << EPI.EpilogueVF
7355 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
7356 });
7357}
7358
7361 dbgs() << "intermediate fn:\n"
7362 << *OrigLoop->getHeader()->getParent() << "\n";
7363 });
7364}
7365
7367 BasicBlock *VectorPH, BasicBlock *Bypass, bool ForEpilogue) {
7368 assert(Bypass && "Expected valid bypass basic block.");
7371 Value *CheckMinIters = createIterationCountCheck(
7372 VectorPH, ForEpilogue ? EPI.EpilogueVF : EPI.MainLoopVF,
7373 ForEpilogue ? EPI.EpilogueUF : EPI.MainLoopUF);
7374
7375 BasicBlock *const TCCheckBlock = VectorPH;
7376 if (!ForEpilogue)
7377 TCCheckBlock->setName("vector.main.loop.iter.check");
7378
7379 // Create new preheader for vector loop.
7380 VectorPH = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
7381 static_cast<DominatorTree *>(nullptr), LI, nullptr,
7382 "vector.ph");
7383 if (ForEpilogue) {
7384 // Save the trip count so we don't have to regenerate it in the
7385 // vec.epilog.iter.check. This is safe to do because the trip count
7386 // generated here dominates the vector epilog iter check.
7387 EPI.TripCount = Count;
7388 } else {
7390 }
7391
7392 BranchInst &BI = *BranchInst::Create(Bypass, VectorPH, CheckMinIters);
7393 if (hasBranchWeightMD(*OrigLoop->getLoopLatch()->getTerminator()))
7394 setBranchWeights(BI, MinItersBypassWeights, /*IsExpected=*/false);
7395 ReplaceInstWithInst(TCCheckBlock->getTerminator(), &BI);
7396
7397 // When vectorizing the main loop, its trip-count check is placed in a new
7398 // block, whereas the overall trip-count check is placed in the VPlan entry
7399 // block. When vectorizing the epilogue loop, its trip-count check is placed
7400 // in the VPlan entry block.
7401 if (!ForEpilogue)
7402 introduceCheckBlockInVPlan(TCCheckBlock);
7403 return TCCheckBlock;
7404}
7405
7406//===--------------------------------------------------------------------===//
7407// EpilogueVectorizerEpilogueLoop
7408//===--------------------------------------------------------------------===//
7409
7410/// This function creates a new scalar preheader, using the previous one as
7411/// entry block to the epilogue VPlan. The minimum iteration check is being
7412/// represented in VPlan.
7414 BasicBlock *NewScalarPH = createScalarPreheader("vec.epilog.");
7415 BasicBlock *OriginalScalarPH = NewScalarPH->getSinglePredecessor();
7416 OriginalScalarPH->setName("vec.epilog.iter.check");
7417 VPIRBasicBlock *NewEntry = Plan.createVPIRBasicBlock(OriginalScalarPH);
7418 VPBasicBlock *OldEntry = Plan.getEntry();
7419 for (auto &R : make_early_inc_range(*OldEntry)) {
7420 // Skip moving VPIRInstructions (including VPIRPhis), which are unmovable by
7421 // defining.
7422 if (isa<VPIRInstruction>(&R))
7423 continue;
7424 R.moveBefore(*NewEntry, NewEntry->end());
7425 }
7426
7427 VPBlockUtils::reassociateBlocks(OldEntry, NewEntry);
7428 Plan.setEntry(NewEntry);
7429 // OldEntry is now dead and will be cleaned up when the plan gets destroyed.
7430
7431 return OriginalScalarPH;
7432}
7433
7435 LLVM_DEBUG({
7436 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
7437 << "Epilogue Loop VF:" << EPI.EpilogueVF
7438 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
7439 });
7440}
7441
7444 dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n";
7445 });
7446}
7447
7449VPRecipeBuilder::tryToWidenMemory(Instruction *I, ArrayRef<VPValue *> Operands,
7450 VFRange &Range) {
7452 "Must be called with either a load or store");
7453
7454 auto WillWiden = [&](ElementCount VF) -> bool {
7456 CM.getWideningDecision(I, VF);
7458 "CM decision should be taken at this point.");
7460 return true;
7461 if (CM.isScalarAfterVectorization(I, VF) ||
7462 CM.isProfitableToScalarize(I, VF))
7463 return false;
7465 };
7466
7468 return nullptr;
7469
7470 VPValue *Mask = nullptr;
7471 if (Legal->isMaskRequired(I))
7472 Mask = getBlockInMask(Builder.getInsertBlock());
7473
7474 // Determine if the pointer operand of the access is either consecutive or
7475 // reverse consecutive.
7477 CM.getWideningDecision(I, Range.Start);
7479 bool Consecutive =
7481
7483 if (Consecutive) {
7485 Ptr->getUnderlyingValue()->stripPointerCasts());
7486 VPSingleDefRecipe *VectorPtr;
7487 if (Reverse) {
7488 // When folding the tail, we may compute an address that we don't in the
7489 // original scalar loop and it may not be inbounds. Drop Inbounds in that
7490 // case.
7491 GEPNoWrapFlags Flags =
7492 (CM.foldTailByMasking() || !GEP || !GEP->isInBounds())
7494 : GEPNoWrapFlags::inBounds();
7495 VectorPtr =
7497 /*Stride*/ -1, Flags, I->getDebugLoc());
7498 } else {
7499 VectorPtr = new VPVectorPointerRecipe(Ptr, getLoadStoreType(I),
7500 GEP ? GEP->getNoWrapFlags()
7502 I->getDebugLoc());
7503 }
7504 Builder.insert(VectorPtr);
7505 Ptr = VectorPtr;
7506 }
7507 if (LoadInst *Load = dyn_cast<LoadInst>(I))
7508 return new VPWidenLoadRecipe(*Load, Ptr, Mask, Consecutive, Reverse,
7509 VPIRMetadata(*Load, LVer), I->getDebugLoc());
7510
7511 StoreInst *Store = cast<StoreInst>(I);
7512 return new VPWidenStoreRecipe(*Store, Ptr, Operands[0], Mask, Consecutive,
7513 Reverse, VPIRMetadata(*Store, LVer),
7514 I->getDebugLoc());
7515}
7516
7517/// Creates a VPWidenIntOrFpInductionRecpipe for \p Phi. If needed, it will also
7518/// insert a recipe to expand the step for the induction recipe.
7519static VPWidenIntOrFpInductionRecipe *
7521 VPValue *Start, const InductionDescriptor &IndDesc,
7522 VPlan &Plan, ScalarEvolution &SE, Loop &OrigLoop) {
7523 assert(IndDesc.getStartValue() ==
7524 Phi->getIncomingValueForBlock(OrigLoop.getLoopPreheader()));
7525 assert(SE.isLoopInvariant(IndDesc.getStep(), &OrigLoop) &&
7526 "step must be loop invariant");
7527
7528 VPValue *Step =
7530 if (auto *TruncI = dyn_cast<TruncInst>(PhiOrTrunc)) {
7531 return new VPWidenIntOrFpInductionRecipe(Phi, Start, Step, &Plan.getVF(),
7532 IndDesc, TruncI,
7533 TruncI->getDebugLoc());
7534 }
7535 assert(isa<PHINode>(PhiOrTrunc) && "must be a phi node here");
7536 return new VPWidenIntOrFpInductionRecipe(Phi, Start, Step, &Plan.getVF(),
7537 IndDesc, Phi->getDebugLoc());
7538}
7539
7540VPHeaderPHIRecipe *VPRecipeBuilder::tryToOptimizeInductionPHI(
7541 PHINode *Phi, ArrayRef<VPValue *> Operands, VFRange &Range) {
7542
7543 // Check if this is an integer or fp induction. If so, build the recipe that
7544 // produces its scalar and vector values.
7545 if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi))
7546 return createWidenInductionRecipes(Phi, Phi, Operands[0], *II, Plan,
7547 *PSE.getSE(), *OrigLoop);
7548
7549 // Check if this is pointer induction. If so, build the recipe for it.
7550 if (auto *II = Legal->getPointerInductionDescriptor(Phi)) {
7551 VPValue *Step = vputils::getOrCreateVPValueForSCEVExpr(Plan, II->getStep());
7552 return new VPWidenPointerInductionRecipe(
7553 Phi, Operands[0], Step, &Plan.getVFxUF(), *II,
7555 [&](ElementCount VF) {
7556 return CM.isScalarAfterVectorization(Phi, VF);
7557 },
7558 Range),
7559 Phi->getDebugLoc());
7560 }
7561 return nullptr;
7562}
7563
7564VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate(
7565 TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range) {
7566 // Optimize the special case where the source is a constant integer
7567 // induction variable. Notice that we can only optimize the 'trunc' case
7568 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
7569 // (c) other casts depend on pointer size.
7570
7571 // Determine whether \p K is a truncation based on an induction variable that
7572 // can be optimized.
7573 auto IsOptimizableIVTruncate =
7574 [&](Instruction *K) -> std::function<bool(ElementCount)> {
7575 return [=](ElementCount VF) -> bool {
7576 return CM.isOptimizableIVTruncate(K, VF);
7577 };
7578 };
7579
7581 IsOptimizableIVTruncate(I), Range)) {
7582
7583 auto *Phi = cast<PHINode>(I->getOperand(0));
7584 const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi);
7585 VPValue *Start = Plan.getOrAddLiveIn(II.getStartValue());
7586 return createWidenInductionRecipes(Phi, I, Start, II, Plan, *PSE.getSE(),
7587 *OrigLoop);
7588 }
7589 return nullptr;
7590}
7591
7592VPSingleDefRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
7594 VFRange &Range) {
7596 [this, CI](ElementCount VF) {
7597 return CM.isScalarWithPredication(CI, VF);
7598 },
7599 Range);
7600
7601 if (IsPredicated)
7602 return nullptr;
7603
7605 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
7606 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
7607 ID == Intrinsic::pseudoprobe ||
7608 ID == Intrinsic::experimental_noalias_scope_decl))
7609 return nullptr;
7610
7612
7613 // Is it beneficial to perform intrinsic call compared to lib call?
7614 bool ShouldUseVectorIntrinsic =
7616 [&](ElementCount VF) -> bool {
7617 return CM.getCallWideningDecision(CI, VF).Kind ==
7619 },
7620 Range);
7621 if (ShouldUseVectorIntrinsic)
7622 return new VPWidenIntrinsicRecipe(*CI, ID, Ops, CI->getType(),
7623 CI->getDebugLoc());
7624
7625 Function *Variant = nullptr;
7626 std::optional<unsigned> MaskPos;
7627 // Is better to call a vectorized version of the function than to to scalarize
7628 // the call?
7629 auto ShouldUseVectorCall = LoopVectorizationPlanner::getDecisionAndClampRange(
7630 [&](ElementCount VF) -> bool {
7631 // The following case may be scalarized depending on the VF.
7632 // The flag shows whether we can use a usual Call for vectorized
7633 // version of the instruction.
7634
7635 // If we've found a variant at a previous VF, then stop looking. A
7636 // vectorized variant of a function expects input in a certain shape
7637 // -- basically the number of input registers, the number of lanes
7638 // per register, and whether there's a mask required.
7639 // We store a pointer to the variant in the VPWidenCallRecipe, so
7640 // once we have an appropriate variant it's only valid for that VF.
7641 // This will force a different vplan to be generated for each VF that
7642 // finds a valid variant.
7643 if (Variant)
7644 return false;
7645 LoopVectorizationCostModel::CallWideningDecision Decision =
7646 CM.getCallWideningDecision(CI, VF);
7648 Variant = Decision.Variant;
7649 MaskPos = Decision.MaskPos;
7650 return true;
7651 }
7652
7653 return false;
7654 },
7655 Range);
7656 if (ShouldUseVectorCall) {
7657 if (MaskPos.has_value()) {
7658 // We have 2 cases that would require a mask:
7659 // 1) The block needs to be predicated, either due to a conditional
7660 // in the scalar loop or use of an active lane mask with
7661 // tail-folding, and we use the appropriate mask for the block.
7662 // 2) No mask is required for the block, but the only available
7663 // vector variant at this VF requires a mask, so we synthesize an
7664 // all-true mask.
7665 VPValue *Mask = nullptr;
7666 if (Legal->isMaskRequired(CI))
7667 Mask = getBlockInMask(Builder.getInsertBlock());
7668 else
7669 Mask = Plan.getOrAddLiveIn(
7670 ConstantInt::getTrue(IntegerType::getInt1Ty(CI->getContext())));
7671
7672 Ops.insert(Ops.begin() + *MaskPos, Mask);
7673 }
7674
7675 Ops.push_back(Operands.back());
7676 return new VPWidenCallRecipe(CI, Variant, Ops, CI->getDebugLoc());
7677 }
7678
7679 return nullptr;
7680}
7681
7682bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
7684 !isa<StoreInst>(I) && "Instruction should have been handled earlier");
7685 // Instruction should be widened, unless it is scalar after vectorization,
7686 // scalarization is profitable or it is predicated.
7687 auto WillScalarize = [this, I](ElementCount VF) -> bool {
7688 return CM.isScalarAfterVectorization(I, VF) ||
7689 CM.isProfitableToScalarize(I, VF) ||
7690 CM.isScalarWithPredication(I, VF);
7691 };
7693 Range);
7694}
7695
7696VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
7698 switch (I->getOpcode()) {
7699 default:
7700 return nullptr;
7701 case Instruction::SDiv:
7702 case Instruction::UDiv:
7703 case Instruction::SRem:
7704 case Instruction::URem: {
7705 // If not provably safe, use a select to form a safe divisor before widening the
7706 // div/rem operation itself. Otherwise fall through to general handling below.
7707 if (CM.isPredicatedInst(I)) {
7709 VPValue *Mask = getBlockInMask(Builder.getInsertBlock());
7710 VPValue *One =
7711 Plan.getOrAddLiveIn(ConstantInt::get(I->getType(), 1u, false));
7712 auto *SafeRHS = Builder.createSelect(Mask, Ops[1], One, I->getDebugLoc());
7713 Ops[1] = SafeRHS;
7714 return new VPWidenRecipe(*I, Ops);
7715 }
7716 [[fallthrough]];
7717 }
7718 case Instruction::Add:
7719 case Instruction::And:
7720 case Instruction::AShr:
7721 case Instruction::FAdd:
7722 case Instruction::FCmp:
7723 case Instruction::FDiv:
7724 case Instruction::FMul:
7725 case Instruction::FNeg:
7726 case Instruction::FRem:
7727 case Instruction::FSub:
7728 case Instruction::ICmp:
7729 case Instruction::LShr:
7730 case Instruction::Mul:
7731 case Instruction::Or:
7732 case Instruction::Select:
7733 case Instruction::Shl:
7734 case Instruction::Sub:
7735 case Instruction::Xor:
7736 case Instruction::Freeze: {
7738 if (Instruction::isBinaryOp(I->getOpcode())) {
7739 // The legacy cost model uses SCEV to check if some of the operands are
7740 // constants. To match the legacy cost model's behavior, use SCEV to try
7741 // to replace operands with constants.
7742 ScalarEvolution &SE = *PSE.getSE();
7743 auto GetConstantViaSCEV = [this, &SE](VPValue *Op) {
7744 if (!Op->isLiveIn())
7745 return Op;
7746 Value *V = Op->getUnderlyingValue();
7747 if (isa<Constant>(V) || !SE.isSCEVable(V->getType()))
7748 return Op;
7749 auto *C = dyn_cast<SCEVConstant>(SE.getSCEV(V));
7750 if (!C)
7751 return Op;
7752 return Plan.getOrAddLiveIn(C->getValue());
7753 };
7754 // For Mul, the legacy cost model checks both operands.
7755 if (I->getOpcode() == Instruction::Mul)
7756 NewOps[0] = GetConstantViaSCEV(NewOps[0]);
7757 // For other binops, the legacy cost model only checks the second operand.
7758 NewOps[1] = GetConstantViaSCEV(NewOps[1]);
7759 }
7760 return new VPWidenRecipe(*I, NewOps);
7761 }
7762 case Instruction::ExtractValue: {
7764 Type *I32Ty = IntegerType::getInt32Ty(I->getContext());
7765 auto *EVI = cast<ExtractValueInst>(I);
7766 assert(EVI->getNumIndices() == 1 && "Expected one extractvalue index");
7767 unsigned Idx = EVI->getIndices()[0];
7768 NewOps.push_back(Plan.getOrAddLiveIn(ConstantInt::get(I32Ty, Idx, false)));
7769 return new VPWidenRecipe(*I, NewOps);
7770 }
7771 };
7772}
7773
7774VPHistogramRecipe *
7775VPRecipeBuilder::tryToWidenHistogram(const HistogramInfo *HI,
7777 // FIXME: Support other operations.
7778 unsigned Opcode = HI->Update->getOpcode();
7779 assert((Opcode == Instruction::Add || Opcode == Instruction::Sub) &&
7780 "Histogram update operation must be an Add or Sub");
7781
7783 // Bucket address.
7784 HGramOps.push_back(Operands[1]);
7785 // Increment value.
7786 HGramOps.push_back(getVPValueOrAddLiveIn(HI->Update->getOperand(1)));
7787
7788 // In case of predicated execution (due to tail-folding, or conditional
7789 // execution, or both), pass the relevant mask.
7790 if (Legal->isMaskRequired(HI->Store))
7791 HGramOps.push_back(getBlockInMask(Builder.getInsertBlock()));
7792
7793 return new VPHistogramRecipe(Opcode, HGramOps, HI->Store->getDebugLoc());
7794}
7795
7796VPReplicateRecipe *
7798 VFRange &Range) {
7800 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
7801 Range);
7802
7803 bool IsPredicated = CM.isPredicatedInst(I);
7804
7805 // Even if the instruction is not marked as uniform, there are certain
7806 // intrinsic calls that can be effectively treated as such, so we check for
7807 // them here. Conservatively, we only do this for scalable vectors, since
7808 // for fixed-width VFs we can always fall back on full scalarization.
7809 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) {
7810 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) {
7811 case Intrinsic::assume:
7812 case Intrinsic::lifetime_start:
7813 case Intrinsic::lifetime_end:
7814 // For scalable vectors if one of the operands is variant then we still
7815 // want to mark as uniform, which will generate one instruction for just
7816 // the first lane of the vector. We can't scalarize the call in the same
7817 // way as for fixed-width vectors because we don't know how many lanes
7818 // there are.
7819 //
7820 // The reasons for doing it this way for scalable vectors are:
7821 // 1. For the assume intrinsic generating the instruction for the first
7822 // lane is still be better than not generating any at all. For
7823 // example, the input may be a splat across all lanes.
7824 // 2. For the lifetime start/end intrinsics the pointer operand only
7825 // does anything useful when the input comes from a stack object,
7826 // which suggests it should always be uniform. For non-stack objects
7827 // the effect is to poison the object, which still allows us to
7828 // remove the call.
7829 IsUniform = true;
7830 break;
7831 default:
7832 break;
7833 }
7834 }
7835 VPValue *BlockInMask = nullptr;
7836 if (!IsPredicated) {
7837 // Finalize the recipe for Instr, first if it is not predicated.
7838 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
7839 } else {
7840 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
7841 // Instructions marked for predication are replicated and a mask operand is
7842 // added initially. Masked replicate recipes will later be placed under an
7843 // if-then construct to prevent side-effects. Generate recipes to compute
7844 // the block mask for this region.
7845 BlockInMask = getBlockInMask(Builder.getInsertBlock());
7846 }
7847
7848 // Note that there is some custom logic to mark some intrinsics as uniform
7849 // manually above for scalable vectors, which this assert needs to account for
7850 // as well.
7851 assert((Range.Start.isScalar() || !IsUniform || !IsPredicated ||
7852 (Range.Start.isScalable() && isa<IntrinsicInst>(I))) &&
7853 "Should not predicate a uniform recipe");
7854 auto *Recipe = new VPReplicateRecipe(I, Operands, IsUniform, BlockInMask,
7855 VPIRMetadata(*I, LVer));
7856 return Recipe;
7857}
7858
7859/// Find all possible partial reductions in the loop and track all of those that
7860/// are valid so recipes can be formed later.
7862 // Find all possible partial reductions.
7864 PartialReductionChains;
7865 for (const auto &[Phi, RdxDesc] : Legal->getReductionVars()) {
7866 getScaledReductions(Phi, RdxDesc.getLoopExitInstr(), Range,
7867 PartialReductionChains);
7868 }
7869
7870 // A partial reduction is invalid if any of its extends are used by
7871 // something that isn't another partial reduction. This is because the
7872 // extends are intended to be lowered along with the reduction itself.
7873
7874 // Build up a set of partial reduction ops for efficient use checking.
7875 SmallPtrSet<User *, 4> PartialReductionOps;
7876 for (const auto &[PartialRdx, _] : PartialReductionChains)
7877 PartialReductionOps.insert(PartialRdx.ExtendUser);
7878
7879 auto ExtendIsOnlyUsedByPartialReductions =
7880 [&PartialReductionOps](Instruction *Extend) {
7881 return all_of(Extend->users(), [&](const User *U) {
7882 return PartialReductionOps.contains(U);
7883 });
7884 };
7885
7886 // Check if each use of a chain's two extends is a partial reduction
7887 // and only add those that don't have non-partial reduction users.
7888 for (auto Pair : PartialReductionChains) {
7889 PartialReductionChain Chain = Pair.first;
7890 if (ExtendIsOnlyUsedByPartialReductions(Chain.ExtendA) &&
7891 (!Chain.ExtendB || ExtendIsOnlyUsedByPartialReductions(Chain.ExtendB)))
7892 ScaledReductionMap.try_emplace(Chain.Reduction, Pair.second);
7893 }
7894}
7895
7896bool VPRecipeBuilder::getScaledReductions(
7897 Instruction *PHI, Instruction *RdxExitInstr, VFRange &Range,
7898 SmallVectorImpl<std::pair<PartialReductionChain, unsigned>> &Chains) {
7899 if (!CM.TheLoop->contains(RdxExitInstr))
7900 return false;
7901
7902 auto *Update = dyn_cast<BinaryOperator>(RdxExitInstr);
7903 if (!Update)
7904 return false;
7905
7906 Value *Op = Update->getOperand(0);
7907 Value *PhiOp = Update->getOperand(1);
7908 if (Op == PHI)
7909 std::swap(Op, PhiOp);
7910
7911 // Try and get a scaled reduction from the first non-phi operand.
7912 // If one is found, we use the discovered reduction instruction in
7913 // place of the accumulator for costing.
7914 if (auto *OpInst = dyn_cast<Instruction>(Op)) {
7915 if (getScaledReductions(PHI, OpInst, Range, Chains)) {
7916 PHI = Chains.rbegin()->first.Reduction;
7917
7918 Op = Update->getOperand(0);
7919 PhiOp = Update->getOperand(1);
7920 if (Op == PHI)
7921 std::swap(Op, PhiOp);
7922 }
7923 }
7924 if (PhiOp != PHI)
7925 return false;
7926
7927 using namespace llvm::PatternMatch;
7928
7929 // If the update is a binary operator, check both of its operands to see if
7930 // they are extends. Otherwise, see if the update comes directly from an
7931 // extend.
7932 Instruction *Exts[2] = {nullptr};
7933 BinaryOperator *ExtendUser = dyn_cast<BinaryOperator>(Op);
7934 std::optional<unsigned> BinOpc;
7935 Type *ExtOpTypes[2] = {nullptr};
7937
7938 auto CollectExtInfo = [this, &Exts, &ExtOpTypes,
7939 &ExtKinds](SmallVectorImpl<Value *> &Ops) -> bool {
7940 for (const auto &[I, OpI] : enumerate(Ops)) {
7941 Value *ExtOp;
7942 if (!match(OpI, m_ZExtOrSExt(m_Value(ExtOp))))
7943 return false;
7944 Exts[I] = cast<Instruction>(OpI);
7945
7946 // TODO: We should be able to support live-ins.
7947 if (!CM.TheLoop->contains(Exts[I]))
7948 return false;
7949
7950 ExtOpTypes[I] = ExtOp->getType();
7951 ExtKinds[I] = TTI::getPartialReductionExtendKind(Exts[I]);
7952 }
7953 return true;
7954 };
7955
7956 if (ExtendUser) {
7957 if (!ExtendUser->hasOneUse())
7958 return false;
7959
7960 // Use the side-effect of match to replace BinOp only if the pattern is
7961 // matched, we don't care at this point whether it actually matched.
7962 match(ExtendUser, m_Neg(m_BinOp(ExtendUser)));
7963
7964 SmallVector<Value *> Ops(ExtendUser->operands());
7965 if (!CollectExtInfo(Ops))
7966 return false;
7967
7968 BinOpc = std::make_optional(ExtendUser->getOpcode());
7969 } else if (match(Update, m_Add(m_Value(), m_Value()))) {
7970 // We already know the operands for Update are Op and PhiOp.
7972 if (!CollectExtInfo(Ops))
7973 return false;
7974
7975 ExtendUser = Update;
7976 BinOpc = std::nullopt;
7977 } else
7978 return false;
7979
7980 PartialReductionChain Chain(RdxExitInstr, Exts[0], Exts[1], ExtendUser);
7981
7982 TypeSize PHISize = PHI->getType()->getPrimitiveSizeInBits();
7983 TypeSize ASize = ExtOpTypes[0]->getPrimitiveSizeInBits();
7984 if (!PHISize.hasKnownScalarFactor(ASize))
7985 return false;
7986 unsigned TargetScaleFactor = PHISize.getKnownScalarFactor(ASize);
7987
7989 [&](ElementCount VF) {
7991 Update->getOpcode(), ExtOpTypes[0], ExtOpTypes[1],
7992 PHI->getType(), VF, ExtKinds[0], ExtKinds[1], BinOpc,
7993 CM.CostKind);
7994 return Cost.isValid();
7995 },
7996 Range)) {
7997 Chains.emplace_back(Chain, TargetScaleFactor);
7998 return true;
7999 }
8000
8001 return false;
8002}
8003
8005 VFRange &Range) {
8006 // First, check for specific widening recipes that deal with inductions, Phi
8007 // nodes, calls and memory operations.
8008 VPRecipeBase *Recipe;
8009 Instruction *Instr = R->getUnderlyingInstr();
8010 SmallVector<VPValue *, 4> Operands(R->operands());
8011 if (auto *PhiR = dyn_cast<VPPhi>(R)) {
8012 VPBasicBlock *Parent = PhiR->getParent();
8013 [[maybe_unused]] VPRegionBlock *LoopRegionOf =
8014 Parent->getEnclosingLoopRegion();
8015 assert(LoopRegionOf && LoopRegionOf->getEntry() == Parent &&
8016 "Non-header phis should have been handled during predication");
8017 auto *Phi = cast<PHINode>(R->getUnderlyingInstr());
8018 assert(Operands.size() == 2 && "Must have 2 operands for header phis");
8019 if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands, Range)))
8020 return Recipe;
8021
8022 VPHeaderPHIRecipe *PhiRecipe = nullptr;
8023 assert((Legal->isReductionVariable(Phi) ||
8024 Legal->isFixedOrderRecurrence(Phi)) &&
8025 "can only widen reductions and fixed-order recurrences here");
8026 VPValue *StartV = Operands[0];
8027 if (Legal->isReductionVariable(Phi)) {
8028 const RecurrenceDescriptor &RdxDesc = Legal->getRecurrenceDescriptor(Phi);
8029 assert(RdxDesc.getRecurrenceStartValue() ==
8030 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8031
8032 // If the PHI is used by a partial reduction, set the scale factor.
8033 unsigned ScaleFactor =
8034 getScalingForReduction(RdxDesc.getLoopExitInstr()).value_or(1);
8035 PhiRecipe = new VPReductionPHIRecipe(
8036 Phi, RdxDesc.getRecurrenceKind(), *StartV, CM.isInLoopReduction(Phi),
8037 CM.useOrderedReductions(RdxDesc), ScaleFactor);
8038 } else {
8039 // TODO: Currently fixed-order recurrences are modeled as chains of
8040 // first-order recurrences. If there are no users of the intermediate
8041 // recurrences in the chain, the fixed order recurrence should be modeled
8042 // directly, enabling more efficient codegen.
8043 PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV);
8044 }
8045 // Add backedge value.
8046 PhiRecipe->addOperand(Operands[1]);
8047 return PhiRecipe;
8048 }
8049 assert(!R->isPhi() && "only VPPhi nodes expected at this point");
8050
8051 if (isa<TruncInst>(Instr) && (Recipe = tryToOptimizeInductionTruncate(
8052 cast<TruncInst>(Instr), Operands, Range)))
8053 return Recipe;
8054
8055 // All widen recipes below deal only with VF > 1.
8057 [&](ElementCount VF) { return VF.isScalar(); }, Range))
8058 return nullptr;
8059
8060 if (auto *CI = dyn_cast<CallInst>(Instr))
8061 return tryToWidenCall(CI, Operands, Range);
8062
8063 if (StoreInst *SI = dyn_cast<StoreInst>(Instr))
8064 if (auto HistInfo = Legal->getHistogramInfo(SI))
8065 return tryToWidenHistogram(*HistInfo, Operands);
8066
8067 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8068 return tryToWidenMemory(Instr, Operands, Range);
8069
8070 if (std::optional<unsigned> ScaleFactor = getScalingForReduction(Instr)) {
8071 if (auto PartialRed =
8072 tryToCreatePartialReduction(Instr, Operands, ScaleFactor.value()))
8073 return PartialRed;
8074 }
8075
8076 if (!shouldWiden(Instr, Range))
8077 return nullptr;
8078
8079 if (auto *GEP = dyn_cast<GetElementPtrInst>(Instr))
8080 return new VPWidenGEPRecipe(GEP, Operands);
8081
8082 if (auto *SI = dyn_cast<SelectInst>(Instr)) {
8083 return new VPWidenSelectRecipe(*SI, Operands);
8084 }
8085
8086 if (auto *CI = dyn_cast<CastInst>(Instr)) {
8087 return new VPWidenCastRecipe(CI->getOpcode(), Operands[0], CI->getType(),
8088 *CI);
8089 }
8090
8091 return tryToWiden(Instr, Operands);
8092}
8093
8097 unsigned ScaleFactor) {
8098 assert(Operands.size() == 2 &&
8099 "Unexpected number of operands for partial reduction");
8100
8101 VPValue *BinOp = Operands[0];
8103 VPRecipeBase *BinOpRecipe = BinOp->getDefiningRecipe();
8104 if (isa<VPReductionPHIRecipe>(BinOpRecipe) ||
8105 isa<VPPartialReductionRecipe>(BinOpRecipe))
8106 std::swap(BinOp, Accumulator);
8107
8108 if (ScaleFactor !=
8109 vputils::getVFScaleFactor(Accumulator->getDefiningRecipe()))
8110 return nullptr;
8111
8112 unsigned ReductionOpcode = Reduction->getOpcode();
8113 if (ReductionOpcode == Instruction::Sub) {
8114 auto *const Zero = ConstantInt::get(Reduction->getType(), 0);
8116 Ops.push_back(Plan.getOrAddLiveIn(Zero));
8117 Ops.push_back(BinOp);
8118 BinOp = new VPWidenRecipe(*Reduction, Ops);
8119 Builder.insert(BinOp->getDefiningRecipe());
8120 ReductionOpcode = Instruction::Add;
8121 }
8122
8123 VPValue *Cond = nullptr;
8124 if (CM.blockNeedsPredicationForAnyReason(Reduction->getParent())) {
8125 assert((ReductionOpcode == Instruction::Add ||
8126 ReductionOpcode == Instruction::Sub) &&
8127 "Expected an ADD or SUB operation for predicated partial "
8128 "reductions (because the neutral element in the mask is zero)!");
8129 Cond = getBlockInMask(Builder.getInsertBlock());
8130 VPValue *Zero =
8131 Plan.getOrAddLiveIn(ConstantInt::get(Reduction->getType(), 0));
8132 BinOp = Builder.createSelect(Cond, BinOp, Zero, Reduction->getDebugLoc());
8133 }
8134 return new VPPartialReductionRecipe(ReductionOpcode, Accumulator, BinOp, Cond,
8135 ScaleFactor, Reduction);
8136}
8137
8138void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
8139 ElementCount MaxVF) {
8140 if (ElementCount::isKnownGT(MinVF, MaxVF))
8141 return;
8142
8143 assert(OrigLoop->isInnermost() && "Inner loop expected.");
8144
8145 const LoopAccessInfo *LAI = Legal->getLAI();
8147 OrigLoop, LI, DT, PSE.getSE());
8148 if (!LAI->getRuntimePointerChecking()->getChecks().empty() &&
8150 // Only use noalias metadata when using memory checks guaranteeing no
8151 // overlap across all iterations.
8152 LVer.prepareNoAliasMetadata();
8153 }
8154
8155 // Create initial base VPlan0, to serve as common starting point for all
8156 // candidates built later for specific VF ranges.
8157 auto VPlan0 = VPlanTransforms::buildVPlan0(
8158 OrigLoop, *LI, Legal->getWidestInductionType(),
8159 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()), PSE);
8160
8161 auto MaxVFTimes2 = MaxVF * 2;
8162 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFTimes2);) {
8163 VFRange SubRange = {VF, MaxVFTimes2};
8164 if (auto Plan = tryToBuildVPlanWithVPRecipes(
8165 std::unique_ptr<VPlan>(VPlan0->duplicate()), SubRange, &LVer)) {
8166 bool HasScalarVF = Plan->hasScalarVFOnly();
8167 // Now optimize the initial VPlan.
8168 if (!HasScalarVF)
8170 *Plan, CM.getMinimalBitwidths());
8172 // TODO: try to put it close to addActiveLaneMask().
8173 if (CM.foldTailWithEVL() && !HasScalarVF)
8175 *Plan, CM.getMaxSafeElements());
8176 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
8177 VPlans.push_back(std::move(Plan));
8178 }
8179 VF = SubRange.End;
8180 }
8181}
8182
8183/// Create and return a ResumePhi for \p WideIV, unless it is truncated. If the
8184/// induction recipe is not canonical, creates a VPDerivedIVRecipe to compute
8185/// the end value of the induction.
8187 VPWidenInductionRecipe *WideIV, VPBuilder &VectorPHBuilder,
8188 VPBuilder &ScalarPHBuilder, VPTypeAnalysis &TypeInfo, VPValue *VectorTC) {
8189 auto *WideIntOrFp = dyn_cast<VPWidenIntOrFpInductionRecipe>(WideIV);
8190 // Truncated wide inductions resume from the last lane of their vector value
8191 // in the last vector iteration which is handled elsewhere.
8192 if (WideIntOrFp && WideIntOrFp->getTruncInst())
8193 return nullptr;
8194
8195 VPValue *Start = WideIV->getStartValue();
8196 VPValue *Step = WideIV->getStepValue();
8198 VPValue *EndValue = VectorTC;
8199 if (!WideIntOrFp || !WideIntOrFp->isCanonical()) {
8200 EndValue = VectorPHBuilder.createDerivedIV(
8201 ID.getKind(), dyn_cast_or_null<FPMathOperator>(ID.getInductionBinOp()),
8202 Start, VectorTC, Step);
8203 }
8204
8205 // EndValue is derived from the vector trip count (which has the same type as
8206 // the widest induction) and thus may be wider than the induction here.
8207 Type *ScalarTypeOfWideIV = TypeInfo.inferScalarType(WideIV);
8208 if (ScalarTypeOfWideIV != TypeInfo.inferScalarType(EndValue)) {
8209 EndValue = VectorPHBuilder.createScalarCast(Instruction::Trunc, EndValue,
8210 ScalarTypeOfWideIV,
8211 WideIV->getDebugLoc());
8212 }
8213
8214 auto *ResumePhiRecipe = ScalarPHBuilder.createScalarPhi(
8215 {EndValue, Start}, WideIV->getDebugLoc(), "bc.resume.val");
8216 return ResumePhiRecipe;
8217}
8218
8219/// Create resume phis in the scalar preheader for first-order recurrences,
8220/// reductions and inductions, and update the VPIRInstructions wrapping the
8221/// original phis in the scalar header. End values for inductions are added to
8222/// \p IVEndValues.
8223static void addScalarResumePhis(VPRecipeBuilder &Builder, VPlan &Plan,
8224 DenseMap<VPValue *, VPValue *> &IVEndValues) {
8225 VPTypeAnalysis TypeInfo(Plan);
8226 auto *ScalarPH = Plan.getScalarPreheader();
8227 auto *MiddleVPBB = cast<VPBasicBlock>(ScalarPH->getPredecessors()[0]);
8228 VPRegionBlock *VectorRegion = Plan.getVectorLoopRegion();
8229 VPBuilder VectorPHBuilder(
8230 cast<VPBasicBlock>(VectorRegion->getSinglePredecessor()));
8231 VPBuilder MiddleBuilder(MiddleVPBB, MiddleVPBB->getFirstNonPhi());
8232 VPBuilder ScalarPHBuilder(ScalarPH);
8233 for (VPRecipeBase &ScalarPhiR : Plan.getScalarHeader()->phis()) {
8234 auto *ScalarPhiIRI = cast<VPIRPhi>(&ScalarPhiR);
8235
8236 // TODO: Extract final value from induction recipe initially, optimize to
8237 // pre-computed end value together in optimizeInductionExitUsers.
8238 auto *VectorPhiR =
8239 cast<VPHeaderPHIRecipe>(Builder.getRecipe(&ScalarPhiIRI->getIRPhi()));
8240 if (auto *WideIVR = dyn_cast<VPWidenInductionRecipe>(VectorPhiR)) {
8242 WideIVR, VectorPHBuilder, ScalarPHBuilder, TypeInfo,
8243 &Plan.getVectorTripCount())) {
8244 assert(isa<VPPhi>(ResumePhi) && "Expected a phi");
8245 IVEndValues[WideIVR] = ResumePhi->getOperand(0);
8246 ScalarPhiIRI->addOperand(ResumePhi);
8247 continue;
8248 }
8249 // TODO: Also handle truncated inductions here. Computing end-values
8250 // separately should be done as VPlan-to-VPlan optimization, after
8251 // legalizing all resume values to use the last lane from the loop.
8252 assert(cast<VPWidenIntOrFpInductionRecipe>(VectorPhiR)->getTruncInst() &&
8253 "should only skip truncated wide inductions");
8254 continue;
8255 }
8256
8257 // The backedge value provides the value to resume coming out of a loop,
8258 // which for FORs is a vector whose last element needs to be extracted. The
8259 // start value provides the value if the loop is bypassed.
8260 bool IsFOR = isa<VPFirstOrderRecurrencePHIRecipe>(VectorPhiR);
8261 auto *ResumeFromVectorLoop = VectorPhiR->getBackedgeValue();
8262 assert(VectorRegion->getSingleSuccessor() == Plan.getMiddleBlock() &&
8263 "Cannot handle loops with uncountable early exits");
8264 if (IsFOR)
8265 ResumeFromVectorLoop = MiddleBuilder.createNaryOp(
8266 VPInstruction::ExtractLastElement, {ResumeFromVectorLoop}, {},
8267 "vector.recur.extract");
8268 StringRef Name = IsFOR ? "scalar.recur.init" : "bc.merge.rdx";
8269 auto *ResumePhiR = ScalarPHBuilder.createScalarPhi(
8270 {ResumeFromVectorLoop, VectorPhiR->getStartValue()}, {}, Name);
8271 ScalarPhiIRI->addOperand(ResumePhiR);
8272 }
8273}
8274
8275/// Handle users in the exit block for first order reductions in the original
8276/// exit block. The penultimate value of recurrences is fed to their LCSSA phi
8277/// users in the original exit block using the VPIRInstruction wrapping to the
8278/// LCSSA phi.
8280 VPRegionBlock *VectorRegion = Plan.getVectorLoopRegion();
8281 auto *ScalarPHVPBB = Plan.getScalarPreheader();
8282 auto *MiddleVPBB = Plan.getMiddleBlock();
8283 VPBuilder ScalarPHBuilder(ScalarPHVPBB);
8284 VPBuilder MiddleBuilder(MiddleVPBB, MiddleVPBB->getFirstNonPhi());
8285
8286 auto IsScalableOne = [](ElementCount VF) -> bool {
8287 return VF == ElementCount::getScalable(1);
8288 };
8289
8290 for (auto &HeaderPhi : VectorRegion->getEntryBasicBlock()->phis()) {
8291 auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&HeaderPhi);
8292 if (!FOR)
8293 continue;
8294
8295 assert(VectorRegion->getSingleSuccessor() == Plan.getMiddleBlock() &&
8296 "Cannot handle loops with uncountable early exits");
8297
8298 // This is the second phase of vectorizing first-order recurrences, creating
8299 // extract for users outside the loop. An overview of the transformation is
8300 // described below. Suppose we have the following loop with some use after
8301 // the loop of the last a[i-1],
8302 //
8303 // for (int i = 0; i < n; ++i) {
8304 // t = a[i - 1];
8305 // b[i] = a[i] - t;
8306 // }
8307 // use t;
8308 //
8309 // There is a first-order recurrence on "a". For this loop, the shorthand
8310 // scalar IR looks like:
8311 //
8312 // scalar.ph:
8313 // s.init = a[-1]
8314 // br scalar.body
8315 //
8316 // scalar.body:
8317 // i = phi [0, scalar.ph], [i+1, scalar.body]
8318 // s1 = phi [s.init, scalar.ph], [s2, scalar.body]
8319 // s2 = a[i]
8320 // b[i] = s2 - s1
8321 // br cond, scalar.body, exit.block
8322 //
8323 // exit.block:
8324 // use = lcssa.phi [s1, scalar.body]
8325 //
8326 // In this example, s1 is a recurrence because it's value depends on the
8327 // previous iteration. In the first phase of vectorization, we created a
8328 // VPFirstOrderRecurrencePHIRecipe v1 for s1. Now we create the extracts
8329 // for users in the scalar preheader and exit block.
8330 //
8331 // vector.ph:
8332 // v_init = vector(..., ..., ..., a[-1])
8333 // br vector.body
8334 //
8335 // vector.body
8336 // i = phi [0, vector.ph], [i+4, vector.body]
8337 // v1 = phi [v_init, vector.ph], [v2, vector.body]
8338 // v2 = a[i, i+1, i+2, i+3]
8339 // b[i] = v2 - v1
8340 // // Next, third phase will introduce v1' = splice(v1(3), v2(0, 1, 2))
8341 // b[i, i+1, i+2, i+3] = v2 - v1
8342 // br cond, vector.body, middle.block
8343 //
8344 // middle.block:
8345 // vector.recur.extract.for.phi = v2(2)
8346 // vector.recur.extract = v2(3)
8347 // br cond, scalar.ph, exit.block
8348 //
8349 // scalar.ph:
8350 // scalar.recur.init = phi [vector.recur.extract, middle.block],
8351 // [s.init, otherwise]
8352 // br scalar.body
8353 //
8354 // scalar.body:
8355 // i = phi [0, scalar.ph], [i+1, scalar.body]
8356 // s1 = phi [scalar.recur.init, scalar.ph], [s2, scalar.body]
8357 // s2 = a[i]
8358 // b[i] = s2 - s1
8359 // br cond, scalar.body, exit.block
8360 //
8361 // exit.block:
8362 // lo = lcssa.phi [s1, scalar.body],
8363 // [vector.recur.extract.for.phi, middle.block]
8364 //
8365 // Now update VPIRInstructions modeling LCSSA phis in the exit block.
8366 // Extract the penultimate value of the recurrence and use it as operand for
8367 // the VPIRInstruction modeling the phi.
8368 for (VPUser *U : FOR->users()) {
8369 using namespace llvm::VPlanPatternMatch;
8370 if (!match(U, m_ExtractLastElement(m_Specific(FOR))))
8371 continue;
8372 // For VF vscale x 1, if vscale = 1, we are unable to extract the
8373 // penultimate value of the recurrence. Instead we rely on the existing
8374 // extract of the last element from the result of
8375 // VPInstruction::FirstOrderRecurrenceSplice.
8376 // TODO: Consider vscale_range info and UF.
8378 Range))
8379 return;
8380 VPValue *PenultimateElement = MiddleBuilder.createNaryOp(
8381 VPInstruction::ExtractPenultimateElement, {FOR->getBackedgeValue()},
8382 {}, "vector.recur.extract.for.phi");
8383 cast<VPInstruction>(U)->replaceAllUsesWith(PenultimateElement);
8384 }
8385 }
8386}
8387
8388VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(
8389 VPlanPtr Plan, VFRange &Range, LoopVersioning *LVer) {
8390
8391 using namespace llvm::VPlanPatternMatch;
8392 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
8393
8394 // ---------------------------------------------------------------------------
8395 // Build initial VPlan: Scan the body of the loop in a topological order to
8396 // visit each basic block after having visited its predecessor basic blocks.
8397 // ---------------------------------------------------------------------------
8398
8399 bool RequiresScalarEpilogueCheck =
8401 [this](ElementCount VF) {
8402 return !CM.requiresScalarEpilogue(VF.isVector());
8403 },
8404 Range);
8405 VPlanTransforms::handleEarlyExits(*Plan, Legal->hasUncountableEarlyExit());
8406 VPlanTransforms::addMiddleCheck(*Plan, RequiresScalarEpilogueCheck,
8407 CM.foldTailByMasking());
8408
8410
8411 // Don't use getDecisionAndClampRange here, because we don't know the UF
8412 // so this function is better to be conservative, rather than to split
8413 // it up into different VPlans.
8414 // TODO: Consider using getDecisionAndClampRange here to split up VPlans.
8415 bool IVUpdateMayOverflow = false;
8416 for (ElementCount VF : Range)
8417 IVUpdateMayOverflow |= !isIndvarOverflowCheckKnownFalse(&CM, VF);
8418
8419 TailFoldingStyle Style = CM.getTailFoldingStyle(IVUpdateMayOverflow);
8420 // Use NUW for the induction increment if we proved that it won't overflow in
8421 // the vector loop or when not folding the tail. In the later case, we know
8422 // that the canonical induction increment will not overflow as the vector trip
8423 // count is >= increment and a multiple of the increment.
8424 bool HasNUW = !IVUpdateMayOverflow || Style == TailFoldingStyle::None;
8425 if (!HasNUW) {
8426 auto *IVInc = Plan->getVectorLoopRegion()
8427 ->getExitingBasicBlock()
8428 ->getTerminator()
8429 ->getOperand(0);
8430 assert(match(IVInc, m_VPInstruction<Instruction::Add>(
8431 m_Specific(Plan->getCanonicalIV()), m_VPValue())) &&
8432 "Did not find the canonical IV increment");
8433 cast<VPRecipeWithIRFlags>(IVInc)->dropPoisonGeneratingFlags();
8434 }
8435
8436 // ---------------------------------------------------------------------------
8437 // Pre-construction: record ingredients whose recipes we'll need to further
8438 // process after constructing the initial VPlan.
8439 // ---------------------------------------------------------------------------
8440
8441 // For each interleave group which is relevant for this (possibly trimmed)
8442 // Range, add it to the set of groups to be later applied to the VPlan and add
8443 // placeholders for its members' Recipes which we'll be replacing with a
8444 // single VPInterleaveRecipe.
8445 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
8446 auto ApplyIG = [IG, this](ElementCount VF) -> bool {
8447 bool Result = (VF.isVector() && // Query is illegal for VF == 1
8448 CM.getWideningDecision(IG->getInsertPos(), VF) ==
8450 // For scalable vectors, the interleave factors must be <= 8 since we
8451 // require the (de)interleaveN intrinsics instead of shufflevectors.
8452 assert((!Result || !VF.isScalable() || IG->getFactor() <= 8) &&
8453 "Unsupported interleave factor for scalable vectors");
8454 return Result;
8455 };
8456 if (!getDecisionAndClampRange(ApplyIG, Range))
8457 continue;
8458 InterleaveGroups.insert(IG);
8459 }
8460
8461 // ---------------------------------------------------------------------------
8462 // Predicate and linearize the top-level loop region.
8463 // ---------------------------------------------------------------------------
8464 auto BlockMaskCache = VPlanTransforms::introduceMasksAndLinearize(
8465 *Plan, CM.foldTailByMasking());
8466
8467 // ---------------------------------------------------------------------------
8468 // Construct wide recipes and apply predication for original scalar
8469 // VPInstructions in the loop.
8470 // ---------------------------------------------------------------------------
8471 VPRecipeBuilder RecipeBuilder(*Plan, OrigLoop, TLI, &TTI, Legal, CM, PSE,
8472 Builder, BlockMaskCache, LVer);
8473 RecipeBuilder.collectScaledReductions(Range);
8474
8475 // Scan the body of the loop in a topological order to visit each basic block
8476 // after having visited its predecessor basic blocks.
8477 VPRegionBlock *LoopRegion = Plan->getVectorLoopRegion();
8478 VPBasicBlock *HeaderVPBB = LoopRegion->getEntryBasicBlock();
8479 ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>> RPOT(
8480 HeaderVPBB);
8481
8482 auto *MiddleVPBB = Plan->getMiddleBlock();
8483 VPBasicBlock::iterator MBIP = MiddleVPBB->getFirstNonPhi();
8484 // Mapping from VPValues in the initial plan to their widened VPValues. Needed
8485 // temporarily to update created block masks.
8486 DenseMap<VPValue *, VPValue *> Old2New;
8487 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) {
8488 // Convert input VPInstructions to widened recipes.
8489 for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
8490 auto *SingleDef = cast<VPSingleDefRecipe>(&R);
8491 auto *UnderlyingValue = SingleDef->getUnderlyingValue();
8492 // Skip recipes that do not need transforming, including canonical IV,
8493 // wide canonical IV and VPInstructions without underlying values. The
8494 // latter are added above for masking.
8495 // FIXME: Migrate code relying on the underlying instruction from VPlan0
8496 // to construct recipes below to not use the underlying instruction.
8498 &R) ||
8499 (isa<VPInstruction>(&R) && !UnderlyingValue))
8500 continue;
8501
8502 // FIXME: VPlan0, which models a copy of the original scalar loop, should
8503 // not use VPWidenPHIRecipe to model the phis.
8505 UnderlyingValue && "unsupported recipe");
8506
8507 // TODO: Gradually replace uses of underlying instruction by analyses on
8508 // VPlan.
8509 Instruction *Instr = cast<Instruction>(UnderlyingValue);
8510 Builder.setInsertPoint(SingleDef);
8511
8512 // The stores with invariant address inside the loop will be deleted, and
8513 // in the exit block, a uniform store recipe will be created for the final
8514 // invariant store of the reduction.
8515 StoreInst *SI;
8516 if ((SI = dyn_cast<StoreInst>(Instr)) &&
8517 Legal->isInvariantAddressOfReduction(SI->getPointerOperand())) {
8518 // Only create recipe for the final invariant store of the reduction.
8519 if (Legal->isInvariantStoreOfReduction(SI)) {
8520 auto *Recipe =
8521 new VPReplicateRecipe(SI, R.operands(), true /* IsUniform */,
8522 nullptr /*Mask*/, VPIRMetadata(*SI, LVer));
8523 Recipe->insertBefore(*MiddleVPBB, MBIP);
8524 }
8525 R.eraseFromParent();
8526 continue;
8527 }
8528
8529 VPRecipeBase *Recipe =
8530 RecipeBuilder.tryToCreateWidenRecipe(SingleDef, Range);
8531 if (!Recipe)
8532 Recipe = RecipeBuilder.handleReplication(Instr, R.operands(), Range);
8533
8534 RecipeBuilder.setRecipe(Instr, Recipe);
8535 if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) && isa<TruncInst>(Instr)) {
8536 // Optimized a truncate to VPWidenIntOrFpInductionRecipe. It needs to be
8537 // moved to the phi section in the header.
8538 Recipe->insertBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi());
8539 } else {
8540 Builder.insert(Recipe);
8541 }
8542 if (Recipe->getNumDefinedValues() == 1) {
8543 SingleDef->replaceAllUsesWith(Recipe->getVPSingleValue());
8544 Old2New[SingleDef] = Recipe->getVPSingleValue();
8545 } else {
8546 assert(Recipe->getNumDefinedValues() == 0 &&
8547 "Unexpected multidef recipe");
8548 R.eraseFromParent();
8549 }
8550 }
8551 }
8552
8553 // replaceAllUsesWith above may invalidate the block masks. Update them here.
8554 // TODO: Include the masks as operands in the predicated VPlan directly
8555 // to remove the need to keep a map of masks beyond the predication
8556 // transform.
8557 RecipeBuilder.updateBlockMaskCache(Old2New);
8558 for (VPValue *Old : Old2New.keys())
8559 Old->getDefiningRecipe()->eraseFromParent();
8560
8561 assert(isa<VPRegionBlock>(Plan->getVectorLoopRegion()) &&
8562 !Plan->getVectorLoopRegion()->getEntryBasicBlock()->empty() &&
8563 "entry block must be set to a VPRegionBlock having a non-empty entry "
8564 "VPBasicBlock");
8565
8566 // Update wide induction increments to use the same step as the corresponding
8567 // wide induction. This enables detecting induction increments directly in
8568 // VPlan and removes redundant splats.
8569 for (const auto &[Phi, ID] : Legal->getInductionVars()) {
8570 auto *IVInc = cast<Instruction>(
8571 Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()));
8572 if (IVInc->getOperand(0) != Phi || IVInc->getOpcode() != Instruction::Add)
8573 continue;
8574 VPWidenInductionRecipe *WideIV =
8575 cast<VPWidenInductionRecipe>(RecipeBuilder.getRecipe(Phi));
8576 VPRecipeBase *R = RecipeBuilder.getRecipe(IVInc);
8577 R->setOperand(1, WideIV->getStepValue());
8578 }
8579
8581 DenseMap<VPValue *, VPValue *> IVEndValues;
8582 addScalarResumePhis(RecipeBuilder, *Plan, IVEndValues);
8583
8584 // ---------------------------------------------------------------------------
8585 // Transform initial VPlan: Apply previously taken decisions, in order, to
8586 // bring the VPlan to its final state.
8587 // ---------------------------------------------------------------------------
8588
8589 // Adjust the recipes for any inloop reductions.
8590 adjustRecipesForReductions(Plan, RecipeBuilder, Range.Start);
8591
8592 // Apply mandatory transformation to handle FP maxnum/minnum reduction with
8593 // NaNs if possible, bail out otherwise.
8595 *Plan))
8596 return nullptr;
8597
8598 // Transform recipes to abstract recipes if it is legal and beneficial and
8599 // clamp the range for better cost estimation.
8600 // TODO: Enable following transform when the EVL-version of extended-reduction
8601 // and mulacc-reduction are implemented.
8602 if (!CM.foldTailWithEVL()) {
8603 VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind,
8604 *CM.PSE.getSE());
8606 CostCtx, Range);
8607 }
8608
8609 for (ElementCount VF : Range)
8610 Plan->addVF(VF);
8611 Plan->setName("Initial VPlan");
8612
8613 // Interleave memory: for each Interleave Group we marked earlier as relevant
8614 // for this VPlan, replace the Recipes widening its memory instructions with a
8615 // single VPInterleaveRecipe at its insertion point.
8617 InterleaveGroups, RecipeBuilder,
8618 CM.isScalarEpilogueAllowed());
8619
8620 // Replace VPValues for known constant strides.
8622 Legal->getLAI()->getSymbolicStrides());
8623
8624 auto BlockNeedsPredication = [this](BasicBlock *BB) {
8625 return Legal->blockNeedsPredication(BB);
8626 };
8628 BlockNeedsPredication);
8629
8630 // Sink users of fixed-order recurrence past the recipe defining the previous
8631 // value and introduce FirstOrderRecurrenceSplice VPInstructions.
8633 *Plan, Builder))
8634 return nullptr;
8635
8636 if (useActiveLaneMask(Style)) {
8637 // TODO: Move checks to VPlanTransforms::addActiveLaneMask once
8638 // TailFoldingStyle is visible there.
8639 bool ForControlFlow = useActiveLaneMaskForControlFlow(Style);
8640 bool WithoutRuntimeCheck =
8641 Style == TailFoldingStyle::DataAndControlFlowWithoutRuntimeCheck;
8642 VPlanTransforms::addActiveLaneMask(*Plan, ForControlFlow,
8643 WithoutRuntimeCheck);
8644 }
8645 VPlanTransforms::optimizeInductionExitUsers(*Plan, IVEndValues, *PSE.getSE());
8646
8647 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
8648 return Plan;
8649}
8650
8651VPlanPtr LoopVectorizationPlanner::tryToBuildVPlan(VFRange &Range) {
8652 // Outer loop handling: They may require CFG and instruction level
8653 // transformations before even evaluating whether vectorization is profitable.
8654 // Since we cannot modify the incoming IR, we need to build VPlan upfront in
8655 // the vectorization pipeline.
8656 assert(!OrigLoop->isInnermost());
8657 assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
8658
8659 auto Plan = VPlanTransforms::buildVPlan0(
8660 OrigLoop, *LI, Legal->getWidestInductionType(),
8661 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()), PSE);
8663 /*HasUncountableExit*/ false);
8664 VPlanTransforms::addMiddleCheck(*Plan, /*RequiresScalarEpilogue*/ true,
8665 /*TailFolded*/ false);
8666
8668
8669 for (ElementCount VF : Range)
8670 Plan->addVF(VF);
8671
8673 Plan,
8674 [this](PHINode *P) {
8675 return Legal->getIntOrFpInductionDescriptor(P);
8676 },
8677 *TLI))
8678 return nullptr;
8679
8680 // Collect mapping of IR header phis to header phi recipes, to be used in
8681 // addScalarResumePhis.
8682 DenseMap<VPBasicBlock *, VPValue *> BlockMaskCache;
8683 VPRecipeBuilder RecipeBuilder(*Plan, OrigLoop, TLI, &TTI, Legal, CM, PSE,
8684 Builder, BlockMaskCache, nullptr /*LVer*/);
8685 for (auto &R : Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
8687 continue;
8688 auto *HeaderR = cast<VPHeaderPHIRecipe>(&R);
8689 RecipeBuilder.setRecipe(HeaderR->getUnderlyingInstr(), HeaderR);
8690 }
8691 DenseMap<VPValue *, VPValue *> IVEndValues;
8692 // TODO: IVEndValues are not used yet in the native path, to optimize exit
8693 // values.
8694 addScalarResumePhis(RecipeBuilder, *Plan, IVEndValues);
8695
8696 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
8697 return Plan;
8698}
8699
8700// Adjust the recipes for reductions. For in-loop reductions the chain of
8701// instructions leading from the loop exit instr to the phi need to be converted
8702// to reductions, with one operand being vector and the other being the scalar
8703// reduction chain. For other reductions, a select is introduced between the phi
8704// and users outside the vector region when folding the tail.
8705//
8706// A ComputeReductionResult recipe is added to the middle block, also for
8707// in-loop reductions which compute their result in-loop, because generating
8708// the subsequent bc.merge.rdx phi is driven by ComputeReductionResult recipes.
8709//
8710// Adjust AnyOf reductions; replace the reduction phi for the selected value
8711// with a boolean reduction phi node to check if the condition is true in any
8712// iteration. The final value is selected by the final ComputeReductionResult.
8713void LoopVectorizationPlanner::adjustRecipesForReductions(
8714 VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, ElementCount MinVF) {
8715 using namespace VPlanPatternMatch;
8716 VPRegionBlock *VectorLoopRegion = Plan->getVectorLoopRegion();
8717 VPBasicBlock *Header = VectorLoopRegion->getEntryBasicBlock();
8718 VPBasicBlock *MiddleVPBB = Plan->getMiddleBlock();
8720
8721 for (VPRecipeBase &R : Header->phis()) {
8722 auto *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
8723 if (!PhiR || !PhiR->isInLoop() || (MinVF.isScalar() && !PhiR->isOrdered()))
8724 continue;
8725
8726 RecurKind Kind = PhiR->getRecurrenceKind();
8727 assert(
8730 "AnyOf and FindIV reductions are not allowed for in-loop reductions");
8731
8732 // Collect the chain of "link" recipes for the reduction starting at PhiR.
8733 SetVector<VPSingleDefRecipe *> Worklist;
8734 Worklist.insert(PhiR);
8735 for (unsigned I = 0; I != Worklist.size(); ++I) {
8736 VPSingleDefRecipe *Cur = Worklist[I];
8737 for (VPUser *U : Cur->users()) {
8738 auto *UserRecipe = cast<VPSingleDefRecipe>(U);
8739 if (!UserRecipe->getParent()->getEnclosingLoopRegion()) {
8740 assert((UserRecipe->getParent() == MiddleVPBB ||
8741 UserRecipe->getParent() == Plan->getScalarPreheader()) &&
8742 "U must be either in the loop region, the middle block or the "
8743 "scalar preheader.");
8744 continue;
8745 }
8746 Worklist.insert(UserRecipe);
8747 }
8748 }
8749
8750 // Visit operation "Links" along the reduction chain top-down starting from
8751 // the phi until LoopExitValue. We keep track of the previous item
8752 // (PreviousLink) to tell which of the two operands of a Link will remain
8753 // scalar and which will be reduced. For minmax by select(cmp), Link will be
8754 // the select instructions. Blend recipes of in-loop reduction phi's will
8755 // get folded to their non-phi operand, as the reduction recipe handles the
8756 // condition directly.
8757 VPSingleDefRecipe *PreviousLink = PhiR; // Aka Worklist[0].
8758 for (VPSingleDefRecipe *CurrentLink : drop_begin(Worklist)) {
8759 if (auto *Blend = dyn_cast<VPBlendRecipe>(CurrentLink)) {
8760 assert(Blend->getNumIncomingValues() == 2 &&
8761 "Blend must have 2 incoming values");
8762 if (Blend->getIncomingValue(0) == PhiR) {
8763 Blend->replaceAllUsesWith(Blend->getIncomingValue(1));
8764 } else {
8765 assert(Blend->getIncomingValue(1) == PhiR &&
8766 "PhiR must be an operand of the blend");
8767 Blend->replaceAllUsesWith(Blend->getIncomingValue(0));
8768 }
8769 continue;
8770 }
8771
8772 Instruction *CurrentLinkI = CurrentLink->getUnderlyingInstr();
8773
8774 // Index of the first operand which holds a non-mask vector operand.
8775 unsigned IndexOfFirstOperand;
8776 // Recognize a call to the llvm.fmuladd intrinsic.
8777 bool IsFMulAdd = (Kind == RecurKind::FMulAdd);
8778 VPValue *VecOp;
8779 VPBasicBlock *LinkVPBB = CurrentLink->getParent();
8780 if (IsFMulAdd) {
8781 assert(
8783 "Expected instruction to be a call to the llvm.fmuladd intrinsic");
8784 assert(((MinVF.isScalar() && isa<VPReplicateRecipe>(CurrentLink)) ||
8785 isa<VPWidenIntrinsicRecipe>(CurrentLink)) &&
8786 CurrentLink->getOperand(2) == PreviousLink &&
8787 "expected a call where the previous link is the added operand");
8788
8789 // If the instruction is a call to the llvm.fmuladd intrinsic then we
8790 // need to create an fmul recipe (multiplying the first two operands of
8791 // the fmuladd together) to use as the vector operand for the fadd
8792 // reduction.
8793 VPInstruction *FMulRecipe = new VPInstruction(
8794 Instruction::FMul,
8795 {CurrentLink->getOperand(0), CurrentLink->getOperand(1)},
8796 CurrentLinkI->getFastMathFlags());
8797 LinkVPBB->insert(FMulRecipe, CurrentLink->getIterator());
8798 VecOp = FMulRecipe;
8799 } else if (PhiR->isInLoop() && Kind == RecurKind::AddChainWithSubs &&
8800 CurrentLinkI->getOpcode() == Instruction::Sub) {
8801 Type *PhiTy = PhiR->getUnderlyingValue()->getType();
8802 auto *Zero = Plan->getOrAddLiveIn(ConstantInt::get(PhiTy, 0));
8803 VPWidenRecipe *Sub = new VPWidenRecipe(
8804 Instruction::Sub, {Zero, CurrentLink->getOperand(1)}, {},
8805 VPIRMetadata(), CurrentLinkI->getDebugLoc());
8806 Sub->setUnderlyingValue(CurrentLinkI);
8807 LinkVPBB->insert(Sub, CurrentLink->getIterator());
8808 VecOp = Sub;
8809 } else {
8811 if (isa<VPWidenRecipe>(CurrentLink)) {
8812 assert(isa<CmpInst>(CurrentLinkI) &&
8813 "need to have the compare of the select");
8814 continue;
8815 }
8816 assert(isa<VPWidenSelectRecipe>(CurrentLink) &&
8817 "must be a select recipe");
8818 IndexOfFirstOperand = 1;
8819 } else {
8820 assert((MinVF.isScalar() || isa<VPWidenRecipe>(CurrentLink)) &&
8821 "Expected to replace a VPWidenSC");
8822 IndexOfFirstOperand = 0;
8823 }
8824 // Note that for non-commutable operands (cmp-selects), the semantics of
8825 // the cmp-select are captured in the recurrence kind.
8826 unsigned VecOpId =
8827 CurrentLink->getOperand(IndexOfFirstOperand) == PreviousLink
8828 ? IndexOfFirstOperand + 1
8829 : IndexOfFirstOperand;
8830 VecOp = CurrentLink->getOperand(VecOpId);
8831 assert(VecOp != PreviousLink &&
8832 CurrentLink->getOperand(CurrentLink->getNumOperands() - 1 -
8833 (VecOpId - IndexOfFirstOperand)) ==
8834 PreviousLink &&
8835 "PreviousLink must be the operand other than VecOp");
8836 }
8837
8838 VPValue *CondOp = nullptr;
8839 if (CM.blockNeedsPredicationForAnyReason(CurrentLinkI->getParent()))
8840 CondOp = RecipeBuilder.getBlockInMask(CurrentLink->getParent());
8841
8842 // TODO: Retrieve FMFs from recipes directly.
8843 RecurrenceDescriptor RdxDesc = Legal->getRecurrenceDescriptor(
8844 cast<PHINode>(PhiR->getUnderlyingInstr()));
8845 // Non-FP RdxDescs will have all fast math flags set, so clear them.
8846 FastMathFlags FMFs = isa<FPMathOperator>(CurrentLinkI)
8847 ? RdxDesc.getFastMathFlags()
8848 : FastMathFlags();
8849 auto *RedRecipe = new VPReductionRecipe(
8850 Kind, FMFs, CurrentLinkI, PreviousLink, VecOp, CondOp,
8851 PhiR->isOrdered(), CurrentLinkI->getDebugLoc());
8852 // Append the recipe to the end of the VPBasicBlock because we need to
8853 // ensure that it comes after all of it's inputs, including CondOp.
8854 // Delete CurrentLink as it will be invalid if its operand is replaced
8855 // with a reduction defined at the bottom of the block in the next link.
8856 if (LinkVPBB->getNumSuccessors() == 0)
8857 RedRecipe->insertBefore(&*std::prev(std::prev(LinkVPBB->end())));
8858 else
8859 LinkVPBB->appendRecipe(RedRecipe);
8860
8861 CurrentLink->replaceAllUsesWith(RedRecipe);
8862 ToDelete.push_back(CurrentLink);
8863 PreviousLink = RedRecipe;
8864 }
8865 }
8866 VPBasicBlock *LatchVPBB = VectorLoopRegion->getExitingBasicBlock();
8867 Builder.setInsertPoint(&*std::prev(std::prev(LatchVPBB->end())));
8868 VPBasicBlock::iterator IP = MiddleVPBB->getFirstNonPhi();
8869 for (VPRecipeBase &R :
8870 Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
8871 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
8872 if (!PhiR)
8873 continue;
8874
8875 const RecurrenceDescriptor &RdxDesc = Legal->getRecurrenceDescriptor(
8877 Type *PhiTy = PhiR->getUnderlyingValue()->getType();
8878 // If tail is folded by masking, introduce selects between the phi
8879 // and the users outside the vector region of each reduction, at the
8880 // beginning of the dedicated latch block.
8881 auto *OrigExitingVPV = PhiR->getBackedgeValue();
8882 auto *NewExitingVPV = PhiR->getBackedgeValue();
8883 // Don't output selects for partial reductions because they have an output
8884 // with fewer lanes than the VF. So the operands of the select would have
8885 // different numbers of lanes. Partial reductions mask the input instead.
8886 if (!PhiR->isInLoop() && CM.foldTailByMasking() &&
8887 !isa<VPPartialReductionRecipe>(OrigExitingVPV->getDefiningRecipe())) {
8888 VPValue *Cond = RecipeBuilder.getBlockInMask(PhiR->getParent());
8889 std::optional<FastMathFlags> FMFs =
8890 PhiTy->isFloatingPointTy()
8891 ? std::make_optional(RdxDesc.getFastMathFlags())
8892 : std::nullopt;
8893 NewExitingVPV =
8894 Builder.createSelect(Cond, OrigExitingVPV, PhiR, {}, "", FMFs);
8895 OrigExitingVPV->replaceUsesWithIf(NewExitingVPV, [](VPUser &U, unsigned) {
8896 return isa<VPInstruction>(&U) &&
8897 (cast<VPInstruction>(&U)->getOpcode() ==
8899 cast<VPInstruction>(&U)->getOpcode() ==
8901 cast<VPInstruction>(&U)->getOpcode() ==
8903 });
8904 if (CM.usePredicatedReductionSelect())
8905 PhiR->setOperand(1, NewExitingVPV);
8906 }
8907
8908 // We want code in the middle block to appear to execute on the location of
8909 // the scalar loop's latch terminator because: (a) it is all compiler
8910 // generated, (b) these instructions are always executed after evaluating
8911 // the latch conditional branch, and (c) other passes may add new
8912 // predecessors which terminate on this line. This is the easiest way to
8913 // ensure we don't accidentally cause an extra step back into the loop while
8914 // debugging.
8915 DebugLoc ExitDL = OrigLoop->getLoopLatch()->getTerminator()->getDebugLoc();
8916
8917 // TODO: At the moment ComputeReductionResult also drives creation of the
8918 // bc.merge.rdx phi nodes, hence it needs to be created unconditionally here
8919 // even for in-loop reductions, until the reduction resume value handling is
8920 // also modeled in VPlan.
8921 VPInstruction *FinalReductionResult;
8922 VPBuilder::InsertPointGuard Guard(Builder);
8923 Builder.setInsertPoint(MiddleVPBB, IP);
8924 RecurKind RecurrenceKind = PhiR->getRecurrenceKind();
8926 VPValue *Start = PhiR->getStartValue();
8927 VPValue *Sentinel = Plan->getOrAddLiveIn(RdxDesc.getSentinelValue());
8928 FinalReductionResult =
8929 Builder.createNaryOp(VPInstruction::ComputeFindIVResult,
8930 {PhiR, Start, Sentinel, NewExitingVPV}, ExitDL);
8931 } else if (RecurrenceDescriptor::isAnyOfRecurrenceKind(RecurrenceKind)) {
8932 VPValue *Start = PhiR->getStartValue();
8933 FinalReductionResult =
8934 Builder.createNaryOp(VPInstruction::ComputeAnyOfResult,
8935 {PhiR, Start, NewExitingVPV}, ExitDL);
8936 } else {
8937 VPIRFlags Flags =
8939 ? VPIRFlags(RdxDesc.getFastMathFlags())
8940 : VPIRFlags();
8941 FinalReductionResult =
8942 Builder.createNaryOp(VPInstruction::ComputeReductionResult,
8943 {PhiR, NewExitingVPV}, Flags, ExitDL);
8944 }
8945 // If the vector reduction can be performed in a smaller type, we truncate
8946 // then extend the loop exit value to enable InstCombine to evaluate the
8947 // entire expression in the smaller type.
8948 if (MinVF.isVector() && PhiTy != RdxDesc.getRecurrenceType() &&
8950 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!");
8952 "Unexpected truncated min-max recurrence!");
8953 Type *RdxTy = RdxDesc.getRecurrenceType();
8954 auto *Trunc =
8955 new VPWidenCastRecipe(Instruction::Trunc, NewExitingVPV, RdxTy);
8956 Instruction::CastOps ExtendOpc =
8957 RdxDesc.isSigned() ? Instruction::SExt : Instruction::ZExt;
8958 auto *Extnd = new VPWidenCastRecipe(ExtendOpc, Trunc, PhiTy);
8959 Trunc->insertAfter(NewExitingVPV->getDefiningRecipe());
8960 Extnd->insertAfter(Trunc);
8961 if (PhiR->getOperand(1) == NewExitingVPV)
8962 PhiR->setOperand(1, Extnd->getVPSingleValue());
8963
8964 // Update ComputeReductionResult with the truncated exiting value and
8965 // extend its result.
8966 FinalReductionResult->setOperand(1, Trunc);
8967 FinalReductionResult =
8968 Builder.createScalarCast(ExtendOpc, FinalReductionResult, PhiTy, {});
8969 }
8970
8971 // Update all users outside the vector region. Also replace redundant
8972 // ExtractLastElement.
8973 for (auto *U : to_vector(OrigExitingVPV->users())) {
8974 auto *Parent = cast<VPRecipeBase>(U)->getParent();
8975 if (FinalReductionResult == U || Parent->getParent())
8976 continue;
8977 U->replaceUsesOfWith(OrigExitingVPV, FinalReductionResult);
8979 cast<VPInstruction>(U)->replaceAllUsesWith(FinalReductionResult);
8980 }
8981
8982 // Adjust AnyOf reductions; replace the reduction phi for the selected value
8983 // with a boolean reduction phi node to check if the condition is true in
8984 // any iteration. The final value is selected by the final
8985 // ComputeReductionResult.
8986 if (RecurrenceDescriptor::isAnyOfRecurrenceKind(RecurrenceKind)) {
8987 auto *Select = cast<VPRecipeBase>(*find_if(PhiR->users(), [](VPUser *U) {
8988 return isa<VPWidenSelectRecipe>(U) ||
8989 (isa<VPReplicateRecipe>(U) &&
8990 cast<VPReplicateRecipe>(U)->getUnderlyingInstr()->getOpcode() ==
8991 Instruction::Select);
8992 }));
8993 VPValue *Cmp = Select->getOperand(0);
8994 // If the compare is checking the reduction PHI node, adjust it to check
8995 // the start value.
8996 if (VPRecipeBase *CmpR = Cmp->getDefiningRecipe())
8997 CmpR->replaceUsesOfWith(PhiR, PhiR->getStartValue());
8998 Builder.setInsertPoint(Select);
8999
9000 // If the true value of the select is the reduction phi, the new value is
9001 // selected if the negated condition is true in any iteration.
9002 if (Select->getOperand(1) == PhiR)
9003 Cmp = Builder.createNot(Cmp);
9004 VPValue *Or = Builder.createOr(PhiR, Cmp);
9005 Select->getVPSingleValue()->replaceAllUsesWith(Or);
9006 // Delete Select now that it has invalid types.
9007 ToDelete.push_back(Select);
9008
9009 // Convert the reduction phi to operate on bools.
9010 PhiR->setOperand(0, Plan->getOrAddLiveIn(ConstantInt::getFalse(
9011 OrigLoop->getHeader()->getContext())));
9012 continue;
9013 }
9014
9016 RdxDesc.getRecurrenceKind())) {
9017 // Adjust the start value for FindFirstIV/FindLastIV recurrences to use
9018 // the sentinel value after generating the ResumePhi recipe, which uses
9019 // the original start value.
9020 PhiR->setOperand(0, Plan->getOrAddLiveIn(RdxDesc.getSentinelValue()));
9021 }
9022 RecurKind RK = RdxDesc.getRecurrenceKind();
9026 VPBuilder PHBuilder(Plan->getVectorPreheader());
9027 VPValue *Iden = Plan->getOrAddLiveIn(
9028 getRecurrenceIdentity(RK, PhiTy, RdxDesc.getFastMathFlags()));
9029 // If the PHI is used by a partial reduction, set the scale factor.
9030 unsigned ScaleFactor =
9031 RecipeBuilder.getScalingForReduction(RdxDesc.getLoopExitInstr())
9032 .value_or(1);
9033 Type *I32Ty = IntegerType::getInt32Ty(PhiTy->getContext());
9034 auto *ScaleFactorVPV =
9035 Plan->getOrAddLiveIn(ConstantInt::get(I32Ty, ScaleFactor));
9036 VPValue *StartV = PHBuilder.createNaryOp(
9038 {PhiR->getStartValue(), Iden, ScaleFactorVPV},
9039 PhiTy->isFloatingPointTy() ? RdxDesc.getFastMathFlags()
9040 : FastMathFlags());
9041 PhiR->setOperand(0, StartV);
9042 }
9043 }
9044 for (VPRecipeBase *R : ToDelete)
9045 R->eraseFromParent();
9046
9048}
9049
9050void LoopVectorizationPlanner::attachRuntimeChecks(
9051 VPlan &Plan, GeneratedRTChecks &RTChecks, bool HasBranchWeights) const {
9052 const auto &[SCEVCheckCond, SCEVCheckBlock] = RTChecks.getSCEVChecks();
9053 if (SCEVCheckBlock && SCEVCheckBlock->hasNPredecessors(0)) {
9054 assert((!CM.OptForSize ||
9055 CM.Hints->getForce() == LoopVectorizeHints::FK_Enabled) &&
9056 "Cannot SCEV check stride or overflow when optimizing for size");
9057 VPlanTransforms::attachCheckBlock(Plan, SCEVCheckCond, SCEVCheckBlock,
9058 HasBranchWeights);
9059 }
9060 const auto &[MemCheckCond, MemCheckBlock] = RTChecks.getMemRuntimeChecks();
9061 if (MemCheckBlock && MemCheckBlock->hasNPredecessors(0)) {
9062 // VPlan-native path does not do any analysis for runtime checks
9063 // currently.
9064 assert((!EnableVPlanNativePath || OrigLoop->isInnermost()) &&
9065 "Runtime checks are not supported for outer loops yet");
9066
9067 if (CM.OptForSize) {
9068 assert(
9069 CM.Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
9070 "Cannot emit memory checks when optimizing for size, unless forced "
9071 "to vectorize.");
9072 ORE->emit([&]() {
9073 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
9074 OrigLoop->getStartLoc(),
9075 OrigLoop->getHeader())
9076 << "Code-size may be reduced by not forcing "
9077 "vectorization, or by source-code modifications "
9078 "eliminating the need for runtime checks "
9079 "(e.g., adding 'restrict').";
9080 });
9081 }
9082 VPlanTransforms::attachCheckBlock(Plan, MemCheckCond, MemCheckBlock,
9083 HasBranchWeights);
9084 }
9085}
9086
9088 VPlan &Plan, ElementCount VF, unsigned UF,
9089 ElementCount MinProfitableTripCount) const {
9090 // vscale is not necessarily a power-of-2, which means we cannot guarantee
9091 // an overflow to zero when updating induction variables and so an
9092 // additional overflow check is required before entering the vector loop.
9093 bool IsIndvarOverflowCheckNeededForVF =
9094 VF.isScalable() && !TTI.isVScaleKnownToBeAPowerOfTwo() &&
9095 !isIndvarOverflowCheckKnownFalse(&CM, VF, UF) &&
9096 CM.getTailFoldingStyle() !=
9098 const uint32_t *BranchWeigths =
9099 hasBranchWeightMD(*OrigLoop->getLoopLatch()->getTerminator())
9101 : nullptr;
9103 Plan, VF, UF, MinProfitableTripCount,
9104 CM.requiresScalarEpilogue(VF.isVector()), CM.foldTailByMasking(),
9105 IsIndvarOverflowCheckNeededForVF, OrigLoop, BranchWeigths,
9106 OrigLoop->getLoopPredecessor()->getTerminator()->getDebugLoc(),
9107 *PSE.getSE());
9108}
9109
9111 assert(!State.Lane && "VPDerivedIVRecipe being replicated.");
9112
9113 // Fast-math-flags propagate from the original induction instruction.
9114 IRBuilder<>::FastMathFlagGuard FMFG(State.Builder);
9115 if (FPBinOp)
9116 State.Builder.setFastMathFlags(FPBinOp->getFastMathFlags());
9117
9118 Value *Step = State.get(getStepValue(), VPLane(0));
9119 Value *Index = State.get(getOperand(1), VPLane(0));
9120 Value *DerivedIV = emitTransformedIndex(
9121 State.Builder, Index, getStartValue()->getLiveInIRValue(), Step, Kind,
9123 DerivedIV->setName(Name);
9124 State.set(this, DerivedIV, VPLane(0));
9125}
9126
9127// Determine how to lower the scalar epilogue, which depends on 1) optimising
9128// for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
9129// predication, and 4) a TTI hook that analyses whether the loop is suitable
9130// for predication.
9135 // 1) OptSize takes precedence over all other options, i.e. if this is set,
9136 // don't look at hints or options, and don't request a scalar epilogue.
9137 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from
9138 // LoopAccessInfo (due to code dependency and not being able to reliably get
9139 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection
9140 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without
9141 // versioning when the vectorization is forced, unlike hasOptSize. So revert
9142 // back to the old way and vectorize with versioning when forced. See D81345.)
9143 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
9147
9148 // 2) If set, obey the directives
9149 if (PreferPredicateOverEpilogue.getNumOccurrences()) {
9157 };
9158 }
9159
9160 // 3) If set, obey the hints
9161 switch (Hints.getPredicate()) {
9166 };
9167
9168 // 4) if the TTI hook indicates this is profitable, request predication.
9169 TailFoldingInfo TFI(TLI, &LVL, IAI);
9170 if (TTI->preferPredicateOverEpilogue(&TFI))
9172
9174}
9175
9176// Process the loop in the VPlan-native vectorization path. This path builds
9177// VPlan upfront in the vectorization pipeline, which allows to apply
9178// VPlan-to-VPlan transformations from the very beginning without modifying the
9179// input LLVM IR.
9186 LoopVectorizationRequirements &Requirements) {
9187
9189 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
9190 return false;
9191 }
9192 assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
9193 Function *F = L->getHeader()->getParent();
9194 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
9195
9197 getScalarEpilogueLowering(F, L, Hints, PSI, BFI, TTI, TLI, *LVL, &IAI);
9198
9199 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
9200 &Hints, IAI, PSI, BFI);
9201 // Use the planner for outer loop vectorization.
9202 // TODO: CM is not used at this point inside the planner. Turn CM into an
9203 // optional argument if we don't need it in the future.
9204 LoopVectorizationPlanner LVP(L, LI, DT, TLI, *TTI, LVL, CM, IAI, PSE, Hints,
9205 ORE);
9206
9207 // Get user vectorization factor.
9208 ElementCount UserVF = Hints.getWidth();
9209
9211
9212 // Plan how to best vectorize, return the best VF and its cost.
9213 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
9214
9215 // If we are stress testing VPlan builds, do not attempt to generate vector
9216 // code. Masked vector code generation support will follow soon.
9217 // Also, do not attempt to vectorize if no vector code will be produced.
9219 return false;
9220
9221 VPlan &BestPlan = LVP.getPlanFor(VF.Width);
9222
9223 {
9224 GeneratedRTChecks Checks(PSE, DT, LI, TTI, F->getDataLayout(), CM.CostKind);
9225 InnerLoopVectorizer LB(L, PSE, LI, DT, TTI, AC, VF.Width, /*UF=*/1, &CM,
9226 BFI, PSI, Checks, BestPlan);
9227 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
9228 << L->getHeader()->getParent()->getName() << "\"\n");
9229 LVP.addMinimumIterationCheck(BestPlan, VF.Width, /*UF=*/1,
9231
9232 LVP.executePlan(VF.Width, /*UF=*/1, BestPlan, LB, DT, false);
9233 }
9234
9235 reportVectorization(ORE, L, VF, 1);
9236
9237 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
9238 return true;
9239}
9240
9241// Emit a remark if there are stores to floats that required a floating point
9242// extension. If the vectorized loop was generated with floating point there
9243// will be a performance penalty from the conversion overhead and the change in
9244// the vector width.
9247 for (BasicBlock *BB : L->getBlocks()) {
9248 for (Instruction &Inst : *BB) {
9249 if (auto *S = dyn_cast<StoreInst>(&Inst)) {
9250 if (S->getValueOperand()->getType()->isFloatTy())
9251 Worklist.push_back(S);
9252 }
9253 }
9254 }
9255
9256 // Traverse the floating point stores upwards searching, for floating point
9257 // conversions.
9260 while (!Worklist.empty()) {
9261 auto *I = Worklist.pop_back_val();
9262 if (!L->contains(I))
9263 continue;
9264 if (!Visited.insert(I).second)
9265 continue;
9266
9267 // Emit a remark if the floating point store required a floating
9268 // point conversion.
9269 // TODO: More work could be done to identify the root cause such as a
9270 // constant or a function return type and point the user to it.
9271 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
9272 ORE->emit([&]() {
9273 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
9274 I->getDebugLoc(), L->getHeader())
9275 << "floating point conversion changes vector width. "
9276 << "Mixed floating point precision requires an up/down "
9277 << "cast that will negatively impact performance.";
9278 });
9279
9280 for (Use &Op : I->operands())
9281 if (auto *OpI = dyn_cast<Instruction>(Op))
9282 Worklist.push_back(OpI);
9283 }
9284}
9285
9286/// For loops with uncountable early exits, find the cost of doing work when
9287/// exiting the loop early, such as calculating the final exit values of
9288/// variables used outside the loop.
9289/// TODO: This is currently overly pessimistic because the loop may not take
9290/// the early exit, but better to keep this conservative for now. In future,
9291/// it might be possible to relax this by using branch probabilities.
9293 VPlan &Plan, ElementCount VF) {
9294 InstructionCost Cost = 0;
9295 for (auto *ExitVPBB : Plan.getExitBlocks()) {
9296 for (auto *PredVPBB : ExitVPBB->getPredecessors()) {
9297 // If the predecessor is not the middle.block, then it must be the
9298 // vector.early.exit block, which may contain work to calculate the exit
9299 // values of variables used outside the loop.
9300 if (PredVPBB != Plan.getMiddleBlock()) {
9301 LLVM_DEBUG(dbgs() << "Calculating cost of work in exit block "
9302 << PredVPBB->getName() << ":\n");
9303 Cost += PredVPBB->cost(VF, CostCtx);
9304 }
9305 }
9306 }
9307 return Cost;
9308}
9309
9310/// This function determines whether or not it's still profitable to vectorize
9311/// the loop given the extra work we have to do outside of the loop:
9312/// 1. Perform the runtime checks before entering the loop to ensure it's safe
9313/// to vectorize.
9314/// 2. In the case of loops with uncountable early exits, we may have to do
9315/// extra work when exiting the loop early, such as calculating the final
9316/// exit values of variables used outside the loop.
9317static bool isOutsideLoopWorkProfitable(GeneratedRTChecks &Checks,
9318 VectorizationFactor &VF, Loop *L,
9320 VPCostContext &CostCtx, VPlan &Plan,
9322 std::optional<unsigned> VScale) {
9323 InstructionCost TotalCost = Checks.getCost();
9324 if (!TotalCost.isValid())
9325 return false;
9326
9327 // Add on the cost of any work required in the vector early exit block, if
9328 // one exists.
9329 TotalCost += calculateEarlyExitCost(CostCtx, Plan, VF.Width);
9330
9331 // When interleaving only scalar and vector cost will be equal, which in turn
9332 // would lead to a divide by 0. Fall back to hard threshold.
9333 if (VF.Width.isScalar()) {
9334 // TODO: Should we rename VectorizeMemoryCheckThreshold?
9335 if (TotalCost > VectorizeMemoryCheckThreshold) {
9336 LLVM_DEBUG(
9337 dbgs()
9338 << "LV: Interleaving only is not profitable due to runtime checks\n");
9339 return false;
9340 }
9341 return true;
9342 }
9343
9344 // The scalar cost should only be 0 when vectorizing with a user specified
9345 // VF/IC. In those cases, runtime checks should always be generated.
9346 uint64_t ScalarC = VF.ScalarCost.getValue();
9347 if (ScalarC == 0)
9348 return true;
9349
9350 // First, compute the minimum iteration count required so that the vector
9351 // loop outperforms the scalar loop.
9352 // The total cost of the scalar loop is
9353 // ScalarC * TC
9354 // where
9355 // * TC is the actual trip count of the loop.
9356 // * ScalarC is the cost of a single scalar iteration.
9357 //
9358 // The total cost of the vector loop is
9359 // RtC + VecC * (TC / VF) + EpiC
9360 // where
9361 // * RtC is the cost of the generated runtime checks plus the cost of
9362 // performing any additional work in the vector.early.exit block for loops
9363 // with uncountable early exits.
9364 // * VecC is the cost of a single vector iteration.
9365 // * TC is the actual trip count of the loop
9366 // * VF is the vectorization factor
9367 // * EpiCost is the cost of the generated epilogue, including the cost
9368 // of the remaining scalar operations.
9369 //
9370 // Vectorization is profitable once the total vector cost is less than the
9371 // total scalar cost:
9372 // RtC + VecC * (TC / VF) + EpiC < ScalarC * TC
9373 //
9374 // Now we can compute the minimum required trip count TC as
9375 // VF * (RtC + EpiC) / (ScalarC * VF - VecC) < TC
9376 //
9377 // For now we assume the epilogue cost EpiC = 0 for simplicity. Note that
9378 // the computations are performed on doubles, not integers and the result
9379 // is rounded up, hence we get an upper estimate of the TC.
9380 unsigned IntVF = estimateElementCount(VF.Width, VScale);
9381 uint64_t RtC = TotalCost.getValue();
9382 uint64_t Div = ScalarC * IntVF - VF.Cost.getValue();
9383 uint64_t MinTC1 = Div == 0 ? 0 : divideCeil(RtC * IntVF, Div);
9384
9385 // Second, compute a minimum iteration count so that the cost of the
9386 // runtime checks is only a fraction of the total scalar loop cost. This
9387 // adds a loop-dependent bound on the overhead incurred if the runtime
9388 // checks fail. In case the runtime checks fail, the cost is RtC + ScalarC
9389 // * TC. To bound the runtime check to be a fraction 1/X of the scalar
9390 // cost, compute
9391 // RtC < ScalarC * TC * (1 / X) ==> RtC * X / ScalarC < TC
9392 uint64_t MinTC2 = divideCeil(RtC * 10, ScalarC);
9393
9394 // Now pick the larger minimum. If it is not a multiple of VF and a scalar
9395 // epilogue is allowed, choose the next closest multiple of VF. This should
9396 // partly compensate for ignoring the epilogue cost.
9397 uint64_t MinTC = std::max(MinTC1, MinTC2);
9398 if (SEL == CM_ScalarEpilogueAllowed)
9399 MinTC = alignTo(MinTC, IntVF);
9401
9402 LLVM_DEBUG(
9403 dbgs() << "LV: Minimum required TC for runtime checks to be profitable:"
9404 << VF.MinProfitableTripCount << "\n");
9405
9406 // Skip vectorization if the expected trip count is less than the minimum
9407 // required trip count.
9408 if (auto ExpectedTC = getSmallBestKnownTC(PSE, L)) {
9409 if (ElementCount::isKnownLT(*ExpectedTC, VF.MinProfitableTripCount)) {
9410 LLVM_DEBUG(dbgs() << "LV: Vectorization is not beneficial: expected "
9411 "trip count < minimum profitable VF ("
9412 << *ExpectedTC << " < " << VF.MinProfitableTripCount
9413 << ")\n");
9414
9415 return false;
9416 }
9417 }
9418 return true;
9419}
9420
9422 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
9424 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
9426
9427/// Prepare \p MainPlan for vectorizing the main vector loop during epilogue
9428/// vectorization. Remove ResumePhis from \p MainPlan for inductions that
9429/// don't have a corresponding wide induction in \p EpiPlan.
9430static void preparePlanForMainVectorLoop(VPlan &MainPlan, VPlan &EpiPlan) {
9431 // Collect PHI nodes of widened phis in the VPlan for the epilogue. Those
9432 // will need their resume-values computed in the main vector loop. Others
9433 // can be removed from the main VPlan.
9434 SmallPtrSet<PHINode *, 2> EpiWidenedPhis;
9435 for (VPRecipeBase &R :
9438 continue;
9439 EpiWidenedPhis.insert(
9440 cast<PHINode>(R.getVPSingleValue()->getUnderlyingValue()));
9441 }
9442 for (VPRecipeBase &R :
9443 make_early_inc_range(MainPlan.getScalarHeader()->phis())) {
9444 auto *VPIRInst = cast<VPIRPhi>(&R);
9445 if (EpiWidenedPhis.contains(&VPIRInst->getIRPhi()))
9446 continue;
9447 // There is no corresponding wide induction in the epilogue plan that would
9448 // need a resume value. Remove the VPIRInst wrapping the scalar header phi
9449 // together with the corresponding ResumePhi. The resume values for the
9450 // scalar loop will be created during execution of EpiPlan.
9451 VPRecipeBase *ResumePhi = VPIRInst->getOperand(0)->getDefiningRecipe();
9452 VPIRInst->eraseFromParent();
9453 ResumePhi->eraseFromParent();
9454 }
9456
9457 using namespace VPlanPatternMatch;
9458 // When vectorizing the epilogue, FindFirstIV & FindLastIV reductions can
9459 // introduce multiple uses of undef/poison. If the reduction start value may
9460 // be undef or poison it needs to be frozen and the frozen start has to be
9461 // used when computing the reduction result. We also need to use the frozen
9462 // value in the resume phi generated by the main vector loop, as this is also
9463 // used to compute the reduction result after the epilogue vector loop.
9464 auto AddFreezeForFindLastIVReductions = [](VPlan &Plan,
9465 bool UpdateResumePhis) {
9466 VPBuilder Builder(Plan.getEntry());
9467 for (VPRecipeBase &R : *Plan.getMiddleBlock()) {
9468 auto *VPI = dyn_cast<VPInstruction>(&R);
9469 if (!VPI || VPI->getOpcode() != VPInstruction::ComputeFindIVResult)
9470 continue;
9471 VPValue *OrigStart = VPI->getOperand(1);
9473 continue;
9474 VPInstruction *Freeze =
9475 Builder.createNaryOp(Instruction::Freeze, {OrigStart}, {}, "fr");
9476 VPI->setOperand(1, Freeze);
9477 if (UpdateResumePhis)
9478 OrigStart->replaceUsesWithIf(Freeze, [Freeze](VPUser &U, unsigned) {
9479 return Freeze != &U && isa<VPPhi>(&U);
9480 });
9481 }
9482 };
9483 AddFreezeForFindLastIVReductions(MainPlan, true);
9484 AddFreezeForFindLastIVReductions(EpiPlan, false);
9485
9486 VPBasicBlock *MainScalarPH = MainPlan.getScalarPreheader();
9487 VPValue *VectorTC = &MainPlan.getVectorTripCount();
9488 // If there is a suitable resume value for the canonical induction in the
9489 // scalar (which will become vector) epilogue loop, use it and move it to the
9490 // beginning of the scalar preheader. Otherwise create it below.
9491 auto ResumePhiIter =
9492 find_if(MainScalarPH->phis(), [VectorTC](VPRecipeBase &R) {
9493 return match(&R, m_VPInstruction<Instruction::PHI>(m_Specific(VectorTC),
9494 m_ZeroInt()));
9495 });
9496 VPPhi *ResumePhi = nullptr;
9497 if (ResumePhiIter == MainScalarPH->phis().end()) {
9498 VPBuilder ScalarPHBuilder(MainScalarPH, MainScalarPH->begin());
9499 ResumePhi = ScalarPHBuilder.createScalarPhi(
9500 {VectorTC, MainPlan.getCanonicalIV()->getStartValue()}, {},
9501 "vec.epilog.resume.val");
9502 } else {
9503 ResumePhi = cast<VPPhi>(&*ResumePhiIter);
9504 if (MainScalarPH->begin() == MainScalarPH->end())
9505 ResumePhi->moveBefore(*MainScalarPH, MainScalarPH->end());
9506 else if (&*MainScalarPH->begin() != ResumePhi)
9507 ResumePhi->moveBefore(*MainScalarPH, MainScalarPH->begin());
9508 }
9509 // Add a user to to make sure the resume phi won't get removed.
9510 VPBuilder(MainScalarPH)
9512}
9513
9514/// Prepare \p Plan for vectorizing the epilogue loop. That is, re-use expanded
9515/// SCEVs from \p ExpandedSCEVs and set resume values for header recipes. Some
9516/// reductions require creating new instructions to compute the resume values.
9517/// They are collected in a vector and returned. They must be moved to the
9518/// preheader of the vector epilogue loop, after created by the execution of \p
9519/// Plan.
9521 VPlan &Plan, Loop *L, const SCEV2ValueTy &ExpandedSCEVs,
9523 ScalarEvolution &SE) {
9524 VPRegionBlock *VectorLoop = Plan.getVectorLoopRegion();
9525 VPBasicBlock *Header = VectorLoop->getEntryBasicBlock();
9526 Header->setName("vec.epilog.vector.body");
9527
9529 SmallVector<Instruction *> InstsToMove;
9530 // Ensure that the start values for all header phi recipes are updated before
9531 // vectorizing the epilogue loop.
9532 for (VPRecipeBase &R : Header->phis()) {
9533 if (auto *IV = dyn_cast<VPCanonicalIVPHIRecipe>(&R)) {
9534 // When vectorizing the epilogue loop, the canonical induction start
9535 // value needs to be changed from zero to the value after the main
9536 // vector loop. Find the resume value created during execution of the main
9537 // VPlan. It must be the first phi in the loop preheader.
9538 // FIXME: Improve modeling for canonical IV start values in the epilogue
9539 // loop.
9540 using namespace llvm::PatternMatch;
9541 PHINode *EPResumeVal = &*L->getLoopPreheader()->phis().begin();
9542 for (Value *Inc : EPResumeVal->incoming_values()) {
9543 if (match(Inc, m_SpecificInt(0)))
9544 continue;
9545 assert(!EPI.VectorTripCount &&
9546 "Must only have a single non-zero incoming value");
9547 EPI.VectorTripCount = Inc;
9548 }
9549 // If we didn't find a non-zero vector trip count, all incoming values
9550 // must be zero, which also means the vector trip count is zero. Pick the
9551 // first zero as vector trip count.
9552 // TODO: We should not choose VF * UF so the main vector loop is known to
9553 // be dead.
9554 if (!EPI.VectorTripCount) {
9555 assert(
9556 EPResumeVal->getNumIncomingValues() > 0 &&
9557 all_of(EPResumeVal->incoming_values(),
9558 [](Value *Inc) { return match(Inc, m_SpecificInt(0)); }) &&
9559 "all incoming values must be 0");
9560 EPI.VectorTripCount = EPResumeVal->getOperand(0);
9561 }
9562 VPValue *VPV = Plan.getOrAddLiveIn(EPResumeVal);
9563 assert(all_of(IV->users(),
9564 [](const VPUser *U) {
9565 return isa<VPScalarIVStepsRecipe>(U) ||
9566 isa<VPDerivedIVRecipe>(U) ||
9567 cast<VPRecipeBase>(U)->isScalarCast() ||
9568 cast<VPInstruction>(U)->getOpcode() ==
9569 Instruction::Add;
9570 }) &&
9571 "the canonical IV should only be used by its increment or "
9572 "ScalarIVSteps when resetting the start value");
9573 IV->setOperand(0, VPV);
9574 continue;
9575 }
9576
9577 Value *ResumeV = nullptr;
9578 // TODO: Move setting of resume values to prepareToExecute.
9579 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) {
9580 auto *RdxResult =
9581 cast<VPInstruction>(*find_if(ReductionPhi->users(), [](VPUser *U) {
9582 auto *VPI = dyn_cast<VPInstruction>(U);
9583 return VPI &&
9584 (VPI->getOpcode() == VPInstruction::ComputeAnyOfResult ||
9585 VPI->getOpcode() == VPInstruction::ComputeReductionResult ||
9586 VPI->getOpcode() == VPInstruction::ComputeFindIVResult);
9587 }));
9588 ResumeV = cast<PHINode>(ReductionPhi->getUnderlyingInstr())
9589 ->getIncomingValueForBlock(L->getLoopPreheader());
9590 RecurKind RK = ReductionPhi->getRecurrenceKind();
9592 Value *StartV = RdxResult->getOperand(1)->getLiveInIRValue();
9593 // VPReductionPHIRecipes for AnyOf reductions expect a boolean as
9594 // start value; compare the final value from the main vector loop
9595 // to the start value.
9596 BasicBlock *PBB = cast<Instruction>(ResumeV)->getParent();
9597 IRBuilder<> Builder(PBB, PBB->getFirstNonPHIIt());
9598 ResumeV = Builder.CreateICmpNE(ResumeV, StartV);
9599 if (auto *I = dyn_cast<Instruction>(ResumeV))
9600 InstsToMove.push_back(I);
9602 Value *StartV = getStartValueFromReductionResult(RdxResult);
9603 ToFrozen[StartV] = cast<PHINode>(ResumeV)->getIncomingValueForBlock(
9605
9606 // VPReductionPHIRecipe for FindFirstIV/FindLastIV reductions requires
9607 // an adjustment to the resume value. The resume value is adjusted to
9608 // the sentinel value when the final value from the main vector loop
9609 // equals the start value. This ensures correctness when the start value
9610 // might not be less than the minimum value of a monotonically
9611 // increasing induction variable.
9612 BasicBlock *ResumeBB = cast<Instruction>(ResumeV)->getParent();
9613 IRBuilder<> Builder(ResumeBB, ResumeBB->getFirstNonPHIIt());
9614 Value *Cmp = Builder.CreateICmpEQ(ResumeV, ToFrozen[StartV]);
9615 if (auto *I = dyn_cast<Instruction>(Cmp))
9616 InstsToMove.push_back(I);
9617 Value *Sentinel = RdxResult->getOperand(2)->getLiveInIRValue();
9618 ResumeV = Builder.CreateSelect(Cmp, Sentinel, ResumeV);
9619 if (auto *I = dyn_cast<Instruction>(ResumeV))
9620 InstsToMove.push_back(I);
9621 } else {
9622 VPValue *StartVal = Plan.getOrAddLiveIn(ResumeV);
9623 auto *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
9624 if (auto *VPI = dyn_cast<VPInstruction>(PhiR->getStartValue())) {
9625 assert(VPI->getOpcode() == VPInstruction::ReductionStartVector &&
9626 "unexpected start value");
9627 VPI->setOperand(0, StartVal);
9628 continue;
9629 }
9630 }
9631 } else {
9632 // Retrieve the induction resume values for wide inductions from
9633 // their original phi nodes in the scalar loop.
9634 PHINode *IndPhi = cast<VPWidenInductionRecipe>(&R)->getPHINode();
9635 // Hook up to the PHINode generated by a ResumePhi recipe of main
9636 // loop VPlan, which feeds the scalar loop.
9637 ResumeV = IndPhi->getIncomingValueForBlock(L->getLoopPreheader());
9638 }
9639 assert(ResumeV && "Must have a resume value");
9640 VPValue *StartVal = Plan.getOrAddLiveIn(ResumeV);
9641 cast<VPHeaderPHIRecipe>(&R)->setStartValue(StartVal);
9642 }
9643
9644 // For some VPValues in the epilogue plan we must re-use the generated IR
9645 // values from the main plan. Replace them with live-in VPValues.
9646 // TODO: This is a workaround needed for epilogue vectorization and it
9647 // should be removed once induction resume value creation is done
9648 // directly in VPlan.
9649 for (auto &R : make_early_inc_range(*Plan.getEntry())) {
9650 // Re-use frozen values from the main plan for Freeze VPInstructions in the
9651 // epilogue plan. This ensures all users use the same frozen value.
9652 auto *VPI = dyn_cast<VPInstruction>(&R);
9653 if (VPI && VPI->getOpcode() == Instruction::Freeze) {
9654 VPI->replaceAllUsesWith(Plan.getOrAddLiveIn(
9655 ToFrozen.lookup(VPI->getOperand(0)->getLiveInIRValue())));
9656 continue;
9657 }
9658
9659 // Re-use the trip count and steps expanded for the main loop, as
9660 // skeleton creation needs it as a value that dominates both the scalar
9661 // and vector epilogue loops
9662 auto *ExpandR = dyn_cast<VPExpandSCEVRecipe>(&R);
9663 if (!ExpandR)
9664 continue;
9665 VPValue *ExpandedVal =
9666 Plan.getOrAddLiveIn(ExpandedSCEVs.lookup(ExpandR->getSCEV()));
9667 ExpandR->replaceAllUsesWith(ExpandedVal);
9668 if (Plan.getTripCount() == ExpandR)
9669 Plan.resetTripCount(ExpandedVal);
9670 ExpandR->eraseFromParent();
9671 }
9672
9673 auto VScale = CM.getVScaleForTuning();
9674 unsigned MainLoopStep =
9675 estimateElementCount(EPI.MainLoopVF * EPI.MainLoopUF, VScale);
9676 unsigned EpilogueLoopStep =
9677 estimateElementCount(EPI.EpilogueVF * EPI.EpilogueUF, VScale);
9679 Plan, EPI.TripCount, EPI.VectorTripCount,
9681 EPI.EpilogueUF, MainLoopStep, EpilogueLoopStep, SE);
9682
9683 return InstsToMove;
9684}
9685
9686// Generate bypass values from the additional bypass block. Note that when the
9687// vectorized epilogue is skipped due to iteration count check, then the
9688// resume value for the induction variable comes from the trip count of the
9689// main vector loop, passed as the second argument.
9691 PHINode *OrigPhi, const InductionDescriptor &II, IRBuilder<> &BypassBuilder,
9692 const SCEV2ValueTy &ExpandedSCEVs, Value *MainVectorTripCount,
9693 Instruction *OldInduction) {
9694 Value *Step = getExpandedStep(II, ExpandedSCEVs);
9695 // For the primary induction the additional bypass end value is known.
9696 // Otherwise it is computed.
9697 Value *EndValueFromAdditionalBypass = MainVectorTripCount;
9698 if (OrigPhi != OldInduction) {
9699 auto *BinOp = II.getInductionBinOp();
9700 // Fast-math-flags propagate from the original induction instruction.
9702 BypassBuilder.setFastMathFlags(BinOp->getFastMathFlags());
9703
9704 // Compute the end value for the additional bypass.
9705 EndValueFromAdditionalBypass =
9706 emitTransformedIndex(BypassBuilder, MainVectorTripCount,
9707 II.getStartValue(), Step, II.getKind(), BinOp);
9708 EndValueFromAdditionalBypass->setName("ind.end");
9709 }
9710 return EndValueFromAdditionalBypass;
9711}
9712
9714 VPlan &BestEpiPlan,
9716 const SCEV2ValueTy &ExpandedSCEVs,
9717 Value *MainVectorTripCount) {
9718 // Fix reduction resume values from the additional bypass block.
9719 BasicBlock *PH = L->getLoopPreheader();
9720 for (auto *Pred : predecessors(PH)) {
9721 for (PHINode &Phi : PH->phis()) {
9722 if (Phi.getBasicBlockIndex(Pred) != -1)
9723 continue;
9724 Phi.addIncoming(Phi.getIncomingValueForBlock(BypassBlock), Pred);
9725 }
9726 }
9727 auto *ScalarPH = cast<VPIRBasicBlock>(BestEpiPlan.getScalarPreheader());
9728 if (ScalarPH->hasPredecessors()) {
9729 // If ScalarPH has predecessors, we may need to update its reduction
9730 // resume values.
9731 for (const auto &[R, IRPhi] :
9732 zip(ScalarPH->phis(), ScalarPH->getIRBasicBlock()->phis())) {
9734 BypassBlock);
9735 }
9736 }
9737
9738 // Fix induction resume values from the additional bypass block.
9739 IRBuilder<> BypassBuilder(BypassBlock, BypassBlock->getFirstInsertionPt());
9740 for (const auto &[IVPhi, II] : LVL.getInductionVars()) {
9741 auto *Inc = cast<PHINode>(IVPhi->getIncomingValueForBlock(PH));
9743 IVPhi, II, BypassBuilder, ExpandedSCEVs, MainVectorTripCount,
9744 LVL.getPrimaryInduction());
9745 // TODO: Directly add as extra operand to the VPResumePHI recipe.
9746 Inc->setIncomingValueForBlock(BypassBlock, V);
9747 }
9748}
9749
9750/// Connect the epilogue vector loop generated for \p EpiPlan to the main vector
9751// loop, after both plans have executed, updating branches from the iteration
9752// and runtime checks of the main loop, as well as updating various phis. \p
9753// InstsToMove contains instructions that need to be moved to the preheader of
9754// the epilogue vector loop.
9756 VPlan &EpiPlan, Loop *L, EpilogueLoopVectorizationInfo &EPI,
9758 DenseMap<const SCEV *, Value *> &ExpandedSCEVs, GeneratedRTChecks &Checks,
9759 ArrayRef<Instruction *> InstsToMove) {
9760 BasicBlock *VecEpilogueIterationCountCheck =
9761 cast<VPIRBasicBlock>(EpiPlan.getEntry())->getIRBasicBlock();
9762
9763 BasicBlock *VecEpiloguePreHeader =
9764 cast<BranchInst>(VecEpilogueIterationCountCheck->getTerminator())
9765 ->getSuccessor(1);
9766 // Adjust the control flow taking the state info from the main loop
9767 // vectorization into account.
9769 "expected this to be saved from the previous pass.");
9770 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
9772 VecEpilogueIterationCountCheck, VecEpiloguePreHeader);
9773
9775 VecEpilogueIterationCountCheck},
9777 VecEpiloguePreHeader}});
9778
9779 BasicBlock *ScalarPH =
9780 cast<VPIRBasicBlock>(EpiPlan.getScalarPreheader())->getIRBasicBlock();
9782 VecEpilogueIterationCountCheck, ScalarPH);
9783 DTU.applyUpdates(
9785 VecEpilogueIterationCountCheck},
9787
9788 // Adjust the terminators of runtime check blocks and phis using them.
9789 BasicBlock *SCEVCheckBlock = Checks.getSCEVChecks().second;
9790 BasicBlock *MemCheckBlock = Checks.getMemRuntimeChecks().second;
9791 if (SCEVCheckBlock) {
9792 SCEVCheckBlock->getTerminator()->replaceUsesOfWith(
9793 VecEpilogueIterationCountCheck, ScalarPH);
9794 DTU.applyUpdates({{DominatorTree::Delete, SCEVCheckBlock,
9795 VecEpilogueIterationCountCheck},
9796 {DominatorTree::Insert, SCEVCheckBlock, ScalarPH}});
9797 }
9798 if (MemCheckBlock) {
9799 MemCheckBlock->getTerminator()->replaceUsesOfWith(
9800 VecEpilogueIterationCountCheck, ScalarPH);
9801 DTU.applyUpdates(
9802 {{DominatorTree::Delete, MemCheckBlock, VecEpilogueIterationCountCheck},
9803 {DominatorTree::Insert, MemCheckBlock, ScalarPH}});
9804 }
9805
9806 // The vec.epilog.iter.check block may contain Phi nodes from inductions
9807 // or reductions which merge control-flow from the latch block and the
9808 // middle block. Update the incoming values here and move the Phi into the
9809 // preheader.
9810 SmallVector<PHINode *, 4> PhisInBlock(
9811 llvm::make_pointer_range(VecEpilogueIterationCountCheck->phis()));
9812
9813 for (PHINode *Phi : PhisInBlock) {
9814 Phi->moveBefore(VecEpiloguePreHeader->getFirstNonPHIIt());
9815 Phi->replaceIncomingBlockWith(
9816 VecEpilogueIterationCountCheck->getSinglePredecessor(),
9817 VecEpilogueIterationCountCheck);
9818
9819 // If the phi doesn't have an incoming value from the
9820 // EpilogueIterationCountCheck, we are done. Otherwise remove the
9821 // incoming value and also those from other check blocks. This is needed
9822 // for reduction phis only.
9823 if (none_of(Phi->blocks(), [&](BasicBlock *IncB) {
9824 return EPI.EpilogueIterationCountCheck == IncB;
9825 }))
9826 continue;
9827 Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck);
9828 if (SCEVCheckBlock)
9829 Phi->removeIncomingValue(SCEVCheckBlock);
9830 if (MemCheckBlock)
9831 Phi->removeIncomingValue(MemCheckBlock);
9832 }
9833
9834 auto IP = VecEpiloguePreHeader->getFirstNonPHIIt();
9835 for (auto *I : InstsToMove)
9836 I->moveBefore(IP);
9837
9838 // VecEpilogueIterationCountCheck conditionally skips over the epilogue loop
9839 // after executing the main loop. We need to update the resume values of
9840 // inductions and reductions during epilogue vectorization.
9841 fixScalarResumeValuesFromBypass(VecEpilogueIterationCountCheck, L, EpiPlan,
9842 LVL, ExpandedSCEVs, EPI.VectorTripCount);
9843}
9844
9846 assert((EnableVPlanNativePath || L->isInnermost()) &&
9847 "VPlan-native path is not enabled. Only process inner loops.");
9848
9849 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in '"
9850 << L->getHeader()->getParent()->getName() << "' from "
9851 << L->getLocStr() << "\n");
9852
9853 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI);
9854
9855 LLVM_DEBUG(
9856 dbgs() << "LV: Loop hints:"
9857 << " force="
9859 ? "disabled"
9861 ? "enabled"
9862 : "?"))
9863 << " width=" << Hints.getWidth()
9864 << " interleave=" << Hints.getInterleave() << "\n");
9865
9866 // Function containing loop
9867 Function *F = L->getHeader()->getParent();
9868
9869 // Looking at the diagnostic output is the only way to determine if a loop
9870 // was vectorized (other than looking at the IR or machine code), so it
9871 // is important to generate an optimization remark for each loop. Most of
9872 // these messages are generated as OptimizationRemarkAnalysis. Remarks
9873 // generated as OptimizationRemark and OptimizationRemarkMissed are
9874 // less verbose reporting vectorized loops and unvectorized loops that may
9875 // benefit from vectorization, respectively.
9876
9877 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
9878 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
9879 return false;
9880 }
9881
9882 PredicatedScalarEvolution PSE(*SE, *L);
9883
9884 // Check if it is legal to vectorize the loop.
9885 LoopVectorizationRequirements Requirements;
9886 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, F, *LAIs, LI, ORE,
9887 &Requirements, &Hints, DB, AC, BFI, PSI, AA);
9889 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
9890 Hints.emitRemarkWithHints();
9891 return false;
9892 }
9893
9895 reportVectorizationFailure("Auto-vectorization of loops with uncountable "
9896 "early exit is not enabled",
9897 "UncountableEarlyExitLoopsDisabled", ORE, L);
9898 return false;
9899 }
9900
9901 if (!LVL.getPotentiallyFaultingLoads().empty()) {
9902 reportVectorizationFailure("Auto-vectorization of loops with potentially "
9903 "faulting load is not supported",
9904 "PotentiallyFaultingLoadsNotSupported", ORE, L);
9905 return false;
9906 }
9907
9908 // Entrance to the VPlan-native vectorization path. Outer loops are processed
9909 // here. They may require CFG and instruction level transformations before
9910 // even evaluating whether vectorization is profitable. Since we cannot modify
9911 // the incoming IR, we need to build VPlan upfront in the vectorization
9912 // pipeline.
9913 if (!L->isInnermost())
9914 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
9915 ORE, BFI, PSI, Hints, Requirements);
9916
9917 assert(L->isInnermost() && "Inner loop expected.");
9918
9919 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
9920 bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
9921
9922 // If an override option has been passed in for interleaved accesses, use it.
9923 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
9924 UseInterleaved = EnableInterleavedMemAccesses;
9925
9926 // Analyze interleaved memory accesses.
9927 if (UseInterleaved)
9929
9930 if (LVL.hasUncountableEarlyExit()) {
9931 BasicBlock *LoopLatch = L->getLoopLatch();
9932 if (IAI.requiresScalarEpilogue() ||
9934 [LoopLatch](BasicBlock *BB) { return BB != LoopLatch; })) {
9935 reportVectorizationFailure("Auto-vectorization of early exit loops "
9936 "requiring a scalar epilogue is unsupported",
9937 "UncountableEarlyExitUnsupported", ORE, L);
9938 return false;
9939 }
9940 }
9941
9942 // Check the function attributes and profiles to find out if this function
9943 // should be optimized for size.
9945 getScalarEpilogueLowering(F, L, Hints, PSI, BFI, TTI, TLI, LVL, &IAI);
9946
9947 // Check the loop for a trip count threshold: vectorize loops with a tiny trip
9948 // count by optimizing for size, to minimize overheads.
9949 auto ExpectedTC = getSmallBestKnownTC(PSE, L);
9950 if (ExpectedTC && ExpectedTC->isFixed() &&
9951 ExpectedTC->getFixedValue() < TinyTripCountVectorThreshold) {
9952 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
9953 << "This loop is worth vectorizing only if no scalar "
9954 << "iteration overheads are incurred.");
9956 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
9957 else {
9958 LLVM_DEBUG(dbgs() << "\n");
9959 // Predicate tail-folded loops are efficient even when the loop
9960 // iteration count is low. However, setting the epilogue policy to
9961 // `CM_ScalarEpilogueNotAllowedLowTripLoop` prevents vectorizing loops
9962 // with runtime checks. It's more effective to let
9963 // `isOutsideLoopWorkProfitable` determine if vectorization is
9964 // beneficial for the loop.
9967 }
9968 }
9969
9970 // Check the function attributes to see if implicit floats or vectors are
9971 // allowed.
9972 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
9974 "Can't vectorize when the NoImplicitFloat attribute is used",
9975 "loop not vectorized due to NoImplicitFloat attribute",
9976 "NoImplicitFloat", ORE, L);
9977 Hints.emitRemarkWithHints();
9978 return false;
9979 }
9980
9981 // Check if the target supports potentially unsafe FP vectorization.
9982 // FIXME: Add a check for the type of safety issue (denormal, signaling)
9983 // for the target we're vectorizing for, to make sure none of the
9984 // additional fp-math flags can help.
9985 if (Hints.isPotentiallyUnsafe() &&
9986 TTI->isFPVectorizationPotentiallyUnsafe()) {
9988 "Potentially unsafe FP op prevents vectorization",
9989 "loop not vectorized due to unsafe FP support.",
9990 "UnsafeFP", ORE, L);
9991 Hints.emitRemarkWithHints();
9992 return false;
9993 }
9994
9995 bool AllowOrderedReductions;
9996 // If the flag is set, use that instead and override the TTI behaviour.
9997 if (ForceOrderedReductions.getNumOccurrences() > 0)
9998 AllowOrderedReductions = ForceOrderedReductions;
9999 else
10000 AllowOrderedReductions = TTI->enableOrderedReductions();
10001 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) {
10002 ORE->emit([&]() {
10003 auto *ExactFPMathInst = Requirements.getExactFPInst();
10004 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
10005 ExactFPMathInst->getDebugLoc(),
10006 ExactFPMathInst->getParent())
10007 << "loop not vectorized: cannot prove it is safe to reorder "
10008 "floating-point operations";
10009 });
10010 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
10011 "reorder floating-point operations\n");
10012 Hints.emitRemarkWithHints();
10013 return false;
10014 }
10015
10016 // Use the cost model.
10017 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
10018 F, &Hints, IAI, PSI, BFI);
10019 // Use the planner for vectorization.
10020 LoopVectorizationPlanner LVP(L, LI, DT, TLI, *TTI, &LVL, CM, IAI, PSE, Hints,
10021 ORE);
10022
10023 // Get user vectorization factor and interleave count.
10024 ElementCount UserVF = Hints.getWidth();
10025 unsigned UserIC = Hints.getInterleave();
10026
10027 // Plan how to best vectorize.
10028 LVP.plan(UserVF, UserIC);
10030 unsigned IC = 1;
10031
10032 if (ORE->allowExtraAnalysis(LV_NAME))
10034
10035 GeneratedRTChecks Checks(PSE, DT, LI, TTI, F->getDataLayout(), CM.CostKind);
10036 if (LVP.hasPlanWithVF(VF.Width)) {
10037 // Select the interleave count.
10038 IC = LVP.selectInterleaveCount(LVP.getPlanFor(VF.Width), VF.Width, VF.Cost);
10039
10040 unsigned SelectedIC = std::max(IC, UserIC);
10041 // Optimistically generate runtime checks if they are needed. Drop them if
10042 // they turn out to not be profitable.
10043 if (VF.Width.isVector() || SelectedIC > 1) {
10044 Checks.create(L, *LVL.getLAI(), PSE.getPredicate(), VF.Width, SelectedIC);
10045
10046 // Bail out early if either the SCEV or memory runtime checks are known to
10047 // fail. In that case, the vector loop would never execute.
10048 using namespace llvm::PatternMatch;
10049 if (Checks.getSCEVChecks().first &&
10050 match(Checks.getSCEVChecks().first, m_One()))
10051 return false;
10052 if (Checks.getMemRuntimeChecks().first &&
10053 match(Checks.getMemRuntimeChecks().first, m_One()))
10054 return false;
10055 }
10056
10057 // Check if it is profitable to vectorize with runtime checks.
10058 bool ForceVectorization =
10060 VPCostContext CostCtx(CM.TTI, *CM.TLI, LVP.getPlanFor(VF.Width), CM,
10061 CM.CostKind, *CM.PSE.getSE());
10062 if (!ForceVectorization &&
10063 !isOutsideLoopWorkProfitable(Checks, VF, L, PSE, CostCtx,
10064 LVP.getPlanFor(VF.Width), SEL,
10065 CM.getVScaleForTuning())) {
10066 ORE->emit([&]() {
10068 DEBUG_TYPE, "CantReorderMemOps", L->getStartLoc(),
10069 L->getHeader())
10070 << "loop not vectorized: cannot prove it is safe to reorder "
10071 "memory operations";
10072 });
10073 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
10074 Hints.emitRemarkWithHints();
10075 return false;
10076 }
10077 }
10078
10079 // Identify the diagnostic messages that should be produced.
10080 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
10081 bool VectorizeLoop = true, InterleaveLoop = true;
10082 if (VF.Width.isScalar()) {
10083 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
10084 VecDiagMsg = {
10085 "VectorizationNotBeneficial",
10086 "the cost-model indicates that vectorization is not beneficial"};
10087 VectorizeLoop = false;
10088 }
10089
10090 if (!LVP.hasPlanWithVF(VF.Width) && UserIC > 1) {
10091 // Tell the user interleaving was avoided up-front, despite being explicitly
10092 // requested.
10093 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
10094 "interleaving should be avoided up front\n");
10095 IntDiagMsg = {"InterleavingAvoided",
10096 "Ignoring UserIC, because interleaving was avoided up front"};
10097 InterleaveLoop = false;
10098 } else if (IC == 1 && UserIC <= 1) {
10099 // Tell the user interleaving is not beneficial.
10100 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
10101 IntDiagMsg = {
10102 "InterleavingNotBeneficial",
10103 "the cost-model indicates that interleaving is not beneficial"};
10104 InterleaveLoop = false;
10105 if (UserIC == 1) {
10106 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
10107 IntDiagMsg.second +=
10108 " and is explicitly disabled or interleave count is set to 1";
10109 }
10110 } else if (IC > 1 && UserIC == 1) {
10111 // Tell the user interleaving is beneficial, but it explicitly disabled.
10112 LLVM_DEBUG(dbgs() << "LV: Interleaving is beneficial but is explicitly "
10113 "disabled.\n");
10114 IntDiagMsg = {"InterleavingBeneficialButDisabled",
10115 "the cost-model indicates that interleaving is beneficial "
10116 "but is explicitly disabled or interleave count is set to 1"};
10117 InterleaveLoop = false;
10118 }
10119
10120 // If there is a histogram in the loop, do not just interleave without
10121 // vectorizing. The order of operations will be incorrect without the
10122 // histogram intrinsics, which are only used for recipes with VF > 1.
10123 if (!VectorizeLoop && InterleaveLoop && LVL.hasHistograms()) {
10124 LLVM_DEBUG(dbgs() << "LV: Not interleaving without vectorization due "
10125 << "to histogram operations.\n");
10126 IntDiagMsg = {
10127 "HistogramPreventsScalarInterleaving",
10128 "Unable to interleave without vectorization due to constraints on "
10129 "the order of histogram operations"};
10130 InterleaveLoop = false;
10131 }
10132
10133 // Override IC if user provided an interleave count.
10134 IC = UserIC > 0 ? UserIC : IC;
10135
10136 // Emit diagnostic messages, if any.
10137 const char *VAPassName = Hints.vectorizeAnalysisPassName();
10138 if (!VectorizeLoop && !InterleaveLoop) {
10139 // Do not vectorize or interleaving the loop.
10140 ORE->emit([&]() {
10141 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
10142 L->getStartLoc(), L->getHeader())
10143 << VecDiagMsg.second;
10144 });
10145 ORE->emit([&]() {
10146 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
10147 L->getStartLoc(), L->getHeader())
10148 << IntDiagMsg.second;
10149 });
10150 return false;
10151 }
10152
10153 if (!VectorizeLoop && InterleaveLoop) {
10154 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10155 ORE->emit([&]() {
10156 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
10157 L->getStartLoc(), L->getHeader())
10158 << VecDiagMsg.second;
10159 });
10160 } else if (VectorizeLoop && !InterleaveLoop) {
10161 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10162 << ") in " << L->getLocStr() << '\n');
10163 ORE->emit([&]() {
10164 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
10165 L->getStartLoc(), L->getHeader())
10166 << IntDiagMsg.second;
10167 });
10168 } else if (VectorizeLoop && InterleaveLoop) {
10169 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10170 << ") in " << L->getLocStr() << '\n');
10171 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10172 }
10173
10174 // Report the vectorization decision.
10175 if (VF.Width.isScalar()) {
10176 using namespace ore;
10177 assert(IC > 1);
10178 ORE->emit([&]() {
10179 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
10180 L->getHeader())
10181 << "interleaved loop (interleaved count: "
10182 << NV("InterleaveCount", IC) << ")";
10183 });
10184 } else {
10185 // Report the vectorization decision.
10186 reportVectorization(ORE, L, VF, IC);
10187 }
10188 if (ORE->allowExtraAnalysis(LV_NAME))
10190
10191 // If we decided that it is *legal* to interleave or vectorize the loop, then
10192 // do it.
10193
10194 VPlan &BestPlan = LVP.getPlanFor(VF.Width);
10195 // Consider vectorizing the epilogue too if it's profitable.
10196 VectorizationFactor EpilogueVF =
10198 if (EpilogueVF.Width.isVector()) {
10199 std::unique_ptr<VPlan> BestMainPlan(BestPlan.duplicate());
10200
10201 // The first pass vectorizes the main loop and creates a scalar epilogue
10202 // to be vectorized by executing the plan (potentially with a different
10203 // factor) again shortly afterwards.
10204 VPlan &BestEpiPlan = LVP.getPlanFor(EpilogueVF.Width);
10205 BestEpiPlan.getMiddleBlock()->setName("vec.epilog.middle.block");
10206 BestEpiPlan.getVectorPreheader()->setName("vec.epilog.ph");
10207 preparePlanForMainVectorLoop(*BestMainPlan, BestEpiPlan);
10208 EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1,
10209 BestEpiPlan);
10210 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TTI, AC, EPI, &CM, BFI,
10211 PSI, Checks, *BestMainPlan);
10212 auto ExpandedSCEVs = LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF,
10213 *BestMainPlan, MainILV, DT, false);
10214 ++LoopsVectorized;
10215
10216 // Second pass vectorizes the epilogue and adjusts the control flow
10217 // edges from the first pass.
10218 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TTI, AC, EPI, &CM,
10219 BFI, PSI, Checks, BestEpiPlan);
10221 BestEpiPlan, L, ExpandedSCEVs, EPI, CM, *PSE.getSE());
10222 LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV, DT,
10223 true);
10224 connectEpilogueVectorLoop(BestEpiPlan, L, EPI, DT, LVL, ExpandedSCEVs,
10225 Checks, InstsToMove);
10226 ++LoopsEpilogueVectorized;
10227 } else {
10228 InnerLoopVectorizer LB(L, PSE, LI, DT, TTI, AC, VF.Width, IC, &CM, BFI, PSI,
10229 Checks, BestPlan);
10230 // TODO: Move to general VPlan pipeline once epilogue loops are also
10231 // supported.
10234 IC, PSE);
10235 LVP.addMinimumIterationCheck(BestPlan, VF.Width, IC,
10237
10238 LVP.executePlan(VF.Width, IC, BestPlan, LB, DT, false);
10239 ++LoopsVectorized;
10240 }
10241
10242 assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&
10243 "DT not preserved correctly");
10244 assert(!verifyFunction(*F, &dbgs()));
10245
10246 return true;
10247}
10248
10250
10251 // Don't attempt if
10252 // 1. the target claims to have no vector registers, and
10253 // 2. interleaving won't help ILP.
10254 //
10255 // The second condition is necessary because, even if the target has no
10256 // vector registers, loop vectorization may still enable scalar
10257 // interleaving.
10258 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
10259 TTI->getMaxInterleaveFactor(ElementCount::getFixed(1)) < 2)
10260 return LoopVectorizeResult(false, false);
10261
10262 bool Changed = false, CFGChanged = false;
10263
10264 // The vectorizer requires loops to be in simplified form.
10265 // Since simplification may add new inner loops, it has to run before the
10266 // legality and profitability checks. This means running the loop vectorizer
10267 // will simplify all loops, regardless of whether anything end up being
10268 // vectorized.
10269 for (const auto &L : *LI)
10270 Changed |= CFGChanged |=
10271 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10272
10273 // Build up a worklist of inner-loops to vectorize. This is necessary as
10274 // the act of vectorizing or partially unrolling a loop creates new loops
10275 // and can invalidate iterators across the loops.
10276 SmallVector<Loop *, 8> Worklist;
10277
10278 for (Loop *L : *LI)
10279 collectSupportedLoops(*L, LI, ORE, Worklist);
10280
10281 LoopsAnalyzed += Worklist.size();
10282
10283 // Now walk the identified inner loops.
10284 while (!Worklist.empty()) {
10285 Loop *L = Worklist.pop_back_val();
10286
10287 // For the inner loops we actually process, form LCSSA to simplify the
10288 // transform.
10289 Changed |= formLCSSARecursively(*L, *DT, LI, SE);
10290
10291 Changed |= CFGChanged |= processLoop(L);
10292
10293 if (Changed) {
10294 LAIs->clear();
10295
10296#ifndef NDEBUG
10297 if (VerifySCEV)
10298 SE->verify();
10299#endif
10300 }
10301 }
10302
10303 // Process each loop nest in the function.
10304 return LoopVectorizeResult(Changed, CFGChanged);
10305}
10306
10309 LI = &AM.getResult<LoopAnalysis>(F);
10310 // There are no loops in the function. Return before computing other
10311 // expensive analyses.
10312 if (LI->empty())
10313 return PreservedAnalyses::all();
10322 AA = &AM.getResult<AAManager>(F);
10323
10324 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
10325 PSI = MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
10326 BFI = nullptr;
10327 if (PSI && PSI->hasProfileSummary())
10329 LoopVectorizeResult Result = runImpl(F);
10330 if (!Result.MadeAnyChange)
10331 return PreservedAnalyses::all();
10333
10334 if (isAssignmentTrackingEnabled(*F.getParent())) {
10335 for (auto &BB : F)
10337 }
10338
10339 PA.preserve<LoopAnalysis>();
10343
10344 if (Result.MadeCFGChange) {
10345 // Making CFG changes likely means a loop got vectorized. Indicate that
10346 // extra simplification passes should be run.
10347 // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only
10348 // be run if runtime checks have been added.
10351 } else {
10353 }
10354 return PA;
10355}
10356
10358 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
10359 static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline(
10360 OS, MapClassName2PassName);
10361
10362 OS << '<';
10363 OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;";
10364 OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;";
10365 OS << '>';
10366}
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
static unsigned getIntrinsicID(const SDNode *N)
unsigned RegSize
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
AMDGPU Lower Kernel Arguments
AMDGPU Register Bank Select
Rewrite undef for PHI
This file implements a class to represent arbitrary precision integral constant values and operations...
@ PostInc
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool isEqual(const Function &Caller, const Function &Callee)
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
static bool IsEmptyBlock(MachineBasicBlock *MBB)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
static cl::opt< IntrinsicCostStrategy > IntrinsicCost("intrinsic-cost-strategy", cl::desc("Costing strategy for intrinsic instructions"), cl::init(IntrinsicCostStrategy::InstructionCost), cl::values(clEnumValN(IntrinsicCostStrategy::InstructionCost, "instruction-cost", "Use TargetTransformInfo::getInstructionCost"), clEnumValN(IntrinsicCostStrategy::IntrinsicCost, "intrinsic-cost", "Use TargetTransformInfo::getIntrinsicInstrCost"), clEnumValN(IntrinsicCostStrategy::TypeBasedIntrinsicCost, "type-based-intrinsic-cost", "Calculate the intrinsic cost based only on argument types")))
static InstructionCost getCost(Instruction &Inst, TTI::TargetCostKind CostKind, TargetTransformInfo &TTI, TargetLibraryInfo &TLI)
Definition CostModel.cpp:74
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
#define DEBUG_TYPE
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
#define _
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This defines the Use class.
static bool hasNoUnsignedWrap(BinaryOperator &I)
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
static std::pair< Value *, APInt > getMask(Value *WideMask, unsigned Factor, ElementCount LeafValueEC)
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Legalize the Machine IR a function s Machine IR
Definition Legalizer.cpp:80
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
This header provides classes for managing per-loop analyses.
static const char * VerboseDebug
#define LV_NAME
This file defines the LoopVectorizationLegality class.
This file provides a LoopVectorizationPlanner class.
static void collectSupportedLoops(Loop &L, LoopInfo *LI, OptimizationRemarkEmitter *ORE, SmallVectorImpl< Loop * > &V)
static cl::opt< unsigned > EpilogueVectorizationMinVF("epilogue-vectorization-minimum-VF", cl::Hidden, cl::desc("Only loops with vectorization factor equal to or larger than " "the specified value are considered for epilogue vectorization."))
static cl::opt< unsigned > EpilogueVectorizationForceVF("epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, cl::desc("When epilogue vectorization is enabled, and a value greater than " "1 is specified, forces the given VF for all applicable epilogue " "loops."))
static void addScalarResumePhis(VPRecipeBuilder &Builder, VPlan &Plan, DenseMap< VPValue *, VPValue * > &IVEndValues)
Create resume phis in the scalar preheader for first-order recurrences, reductions and inductions,...
static Type * maybeVectorizeType(Type *Ty, ElementCount VF)
static ElementCount determineVPlanVF(const TargetTransformInfo &TTI, LoopVectorizationCostModel &CM)
static ElementCount getSmallConstantTripCount(ScalarEvolution *SE, const Loop *L)
A version of ScalarEvolution::getSmallConstantTripCount that returns an ElementCount to include loops...
static cl::opt< unsigned > VectorizeMemoryCheckThreshold("vectorize-memory-check-threshold", cl::init(128), cl::Hidden, cl::desc("The maximum allowed number of runtime memory checks"))
static void preparePlanForMainVectorLoop(VPlan &MainPlan, VPlan &EpiPlan)
Prepare MainPlan for vectorizing the main vector loop during epilogue vectorization.
static cl::opt< unsigned > TinyTripCountVectorThreshold("vectorizer-min-trip-count", cl::init(16), cl::Hidden, cl::desc("Loops with a constant trip count that is smaller than this " "value are vectorized only if no scalar iteration overheads " "are incurred."))
Loops with a known constant trip count below this number are vectorized only if no scalar iteration o...
static void debugVectorizationMessage(const StringRef Prefix, const StringRef DebugMsg, Instruction *I)
Write a DebugMsg about vectorization to the debug output stream.
static cl::opt< bool > EnableCondStoresVectorization("enable-cond-stores-vec", cl::init(true), cl::Hidden, cl::desc("Enable if predication of stores during vectorization."))
static void legacyCSE(BasicBlock *BB)
FIXME: This legacy common-subexpression-elimination routine is scheduled for removal,...
static VPIRBasicBlock * replaceVPBBWithIRVPBB(VPBasicBlock *VPBB, BasicBlock *IRBB, VPlan *Plan=nullptr)
Replace VPBB with a VPIRBasicBlock wrapping IRBB.
static VPInstruction * addResumePhiRecipeForInduction(VPWidenInductionRecipe *WideIV, VPBuilder &VectorPHBuilder, VPBuilder &ScalarPHBuilder, VPTypeAnalysis &TypeInfo, VPValue *VectorTC)
Create and return a ResumePhi for WideIV, unless it is truncated.
static Value * emitTransformedIndex(IRBuilderBase &B, Value *Index, Value *StartValue, Value *Step, InductionDescriptor::InductionKind InductionKind, const BinaryOperator *InductionBinOp)
Compute the transformed value of Index at offset StartValue using step StepValue.
static DebugLoc getDebugLocFromInstOrOperands(Instruction *I)
Look for a meaningful debug location on the instruction or its operands.
static Value * createInductionAdditionalBypassValues(PHINode *OrigPhi, const InductionDescriptor &II, IRBuilder<> &BypassBuilder, const SCEV2ValueTy &ExpandedSCEVs, Value *MainVectorTripCount, Instruction *OldInduction)
static void fixReductionScalarResumeWhenVectorizingEpilog(VPPhi *EpiResumePhiR, PHINode &EpiResumePhi, BasicBlock *BypassBlock)
static Value * getStartValueFromReductionResult(VPInstruction *RdxResult)
static cl::opt< bool > ForceTargetSupportsScalableVectors("force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, cl::desc("Pretend that scalable vectors are supported, even if the target does " "not support them. This flag should only be used for testing."))
static bool useActiveLaneMaskForControlFlow(TailFoldingStyle Style)
static cl::opt< bool > EnableEarlyExitVectorization("enable-early-exit-vectorization", cl::init(true), cl::Hidden, cl::desc("Enable vectorization of early exit loops with uncountable exits."))
static cl::opt< bool > ConsiderRegPressure("vectorizer-consider-reg-pressure", cl::init(false), cl::Hidden, cl::desc("Discard VFs if their register pressure is too high."))
static unsigned estimateElementCount(ElementCount VF, std::optional< unsigned > VScale)
This function attempts to return a value that represents the ElementCount at runtime.
static constexpr uint32_t MinItersBypassWeights[]
static cl::opt< unsigned > ForceTargetNumScalarRegs("force-target-num-scalar-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of scalar registers."))
static cl::opt< bool > UseWiderVFIfCallVariantsPresent("vectorizer-maximize-bandwidth-for-vector-calls", cl::init(true), cl::Hidden, cl::desc("Try wider VFs if they enable the use of vector variants"))
static std::optional< unsigned > getMaxVScale(const Function &F, const TargetTransformInfo &TTI)
static cl::opt< unsigned > SmallLoopCost("small-loop-cost", cl::init(20), cl::Hidden, cl::desc("The cost of a loop that is considered 'small' by the interleaver."))
static void connectEpilogueVectorLoop(VPlan &EpiPlan, Loop *L, EpilogueLoopVectorizationInfo &EPI, DominatorTree *DT, LoopVectorizationLegality &LVL, DenseMap< const SCEV *, Value * > &ExpandedSCEVs, GeneratedRTChecks &Checks, ArrayRef< Instruction * > InstsToMove)
Connect the epilogue vector loop generated for EpiPlan to the main vector.
static bool planContainsAdditionalSimplifications(VPlan &Plan, VPCostContext &CostCtx, Loop *TheLoop, ElementCount VF)
Return true if the original loop \ TheLoop contains any instructions that do not have corresponding r...
static cl::opt< unsigned > ForceTargetNumVectorRegs("force-target-num-vector-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of vector registers."))
static bool isExplicitVecOuterLoop(Loop *OuterLp, OptimizationRemarkEmitter *ORE)
static cl::opt< bool > EnableIndVarRegisterHeur("enable-ind-var-reg-heur", cl::init(true), cl::Hidden, cl::desc("Count the induction variable only once when interleaving"))
static cl::opt< TailFoldingStyle > ForceTailFoldingStyle("force-tail-folding-style", cl::desc("Force the tail folding style"), cl::init(TailFoldingStyle::None), cl::values(clEnumValN(TailFoldingStyle::None, "none", "Disable tail folding"), clEnumValN(TailFoldingStyle::Data, "data", "Create lane mask for data only, using active.lane.mask intrinsic"), clEnumValN(TailFoldingStyle::DataWithoutLaneMask, "data-without-lane-mask", "Create lane mask with compare/stepvector"), clEnumValN(TailFoldingStyle::DataAndControlFlow, "data-and-control", "Create lane mask using active.lane.mask intrinsic, and use " "it for both data and control flow"), clEnumValN(TailFoldingStyle::DataAndControlFlowWithoutRuntimeCheck, "data-and-control-without-rt-check", "Similar to data-and-control, but remove the runtime check"), clEnumValN(TailFoldingStyle::DataWithEVL, "data-with-evl", "Use predicated EVL instructions for tail folding. If EVL " "is unsupported, fallback to data-without-lane-mask.")))
static cl::opt< bool > EnableEpilogueVectorization("enable-epilogue-vectorization", cl::init(true), cl::Hidden, cl::desc("Enable vectorization of epilogue loops."))
static ScalarEpilogueLowering getScalarEpilogueLowering(Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, LoopVectorizationLegality &LVL, InterleavedAccessInfo *IAI)
static cl::opt< bool > PreferPredicatedReductionSelect("prefer-predicated-reduction-select", cl::init(false), cl::Hidden, cl::desc("Prefer predicating a reduction operation over an after loop select."))
static VPWidenIntOrFpInductionRecipe * createWidenInductionRecipes(PHINode *Phi, Instruction *PhiOrTrunc, VPValue *Start, const InductionDescriptor &IndDesc, VPlan &Plan, ScalarEvolution &SE, Loop &OrigLoop)
Creates a VPWidenIntOrFpInductionRecpipe for Phi.
static cl::opt< bool > PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), cl::Hidden, cl::desc("Prefer in-loop vector reductions, " "overriding the targets preference."))
static SmallVector< Instruction * > preparePlanForEpilogueVectorLoop(VPlan &Plan, Loop *L, const SCEV2ValueTy &ExpandedSCEVs, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel &CM, ScalarEvolution &SE)
Prepare Plan for vectorizing the epilogue loop.
static cl::opt< bool > EnableLoadStoreRuntimeInterleave("enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, cl::desc("Enable runtime interleaving until load/store ports are saturated"))
static cl::opt< bool > VPlanBuildStressTest("vplan-build-stress-test", cl::init(false), cl::Hidden, cl::desc("Build VPlan for every supported loop nest in the function and bail " "out right after the build (stress test the VPlan H-CFG construction " "in the VPlan-native vectorization path)."))
static bool hasIrregularType(Type *Ty, const DataLayout &DL)
A helper function that returns true if the given type is irregular.
static cl::opt< bool > LoopVectorizeWithBlockFrequency("loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, cl::desc("Enable the use of the block frequency analysis to access PGO " "heuristics minimizing code growth in cold regions and being more " "aggressive in hot regions."))
static std::optional< ElementCount > getSmallBestKnownTC(PredicatedScalarEvolution &PSE, Loop *L, bool CanUseConstantMax=true)
Returns "best known" trip count, which is either a valid positive trip count or std::nullopt when an ...
static Value * getExpandedStep(const InductionDescriptor &ID, const SCEV2ValueTy &ExpandedSCEVs)
Return the expanded step for ID using ExpandedSCEVs to look up SCEV expansion results.
static bool useActiveLaneMask(TailFoldingStyle Style)
static bool hasReplicatorRegion(VPlan &Plan)
static bool isIndvarOverflowCheckKnownFalse(const LoopVectorizationCostModel *Cost, ElementCount VF, std::optional< unsigned > UF=std::nullopt)
For the given VF and UF and maximum trip count computed for the loop, return whether the induction va...
static void addFullyUnrolledInstructionsToIgnore(Loop *L, const LoopVectorizationLegality::InductionList &IL, SmallPtrSetImpl< Instruction * > &InstsToIgnore)
Knowing that loop L executes a single vector iteration, add instructions that will get simplified and...
static cl::opt< PreferPredicateTy::Option > PreferPredicateOverEpilogue("prefer-predicate-over-epilogue", cl::init(PreferPredicateTy::ScalarEpilogue), cl::Hidden, cl::desc("Tail-folding and predication preferences over creating a scalar " "epilogue loop."), cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, "scalar-epilogue", "Don't tail-predicate loops, create scalar epilogue"), clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, "predicate-else-scalar-epilogue", "prefer tail-folding, create scalar epilogue if tail " "folding fails."), clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, "predicate-dont-vectorize", "prefers tail-folding, don't attempt vectorization if " "tail-folding fails.")))
static cl::opt< bool > EnableInterleavedMemAccesses("enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on interleaved memory accesses in a loop"))
static cl::opt< bool > EnableMaskedInterleavedMemAccesses("enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"))
An interleave-group may need masking if it resides in a block that needs predication,...
static cl::opt< bool > ForceOrderedReductions("force-ordered-reductions", cl::init(false), cl::Hidden, cl::desc("Enable the vectorisation of loops with in-order (strict) " "FP reductions"))
static const SCEV * getAddressAccessSCEV(Value *Ptr, LoopVectorizationLegality *Legal, PredicatedScalarEvolution &PSE, const Loop *TheLoop)
Gets Address Access SCEV after verifying that the access pattern is loop invariant except the inducti...
static cl::opt< cl::boolOrDefault > ForceSafeDivisor("force-widen-divrem-via-safe-divisor", cl::Hidden, cl::desc("Override cost based safe divisor widening for div/rem instructions"))
static InstructionCost calculateEarlyExitCost(VPCostContext &CostCtx, VPlan &Plan, ElementCount VF)
For loops with uncountable early exits, find the cost of doing work when exiting the loop early,...
static cl::opt< unsigned > ForceTargetMaxVectorInterleaveFactor("force-target-max-vector-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "vectorized loops."))
static bool processLoopInVPlanNativePath(Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, LoopVectorizationRequirements &Requirements)
static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI)
static cl::opt< unsigned > NumberOfStoresToPredicate("vectorize-num-stores-pred", cl::init(1), cl::Hidden, cl::desc("Max number of stores to be predicated behind an if."))
The number of stores in a loop that are allowed to need predication.
static cl::opt< unsigned > MaxNestedScalarReductionIC("max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, cl::desc("The maximum interleave count to use when interleaving a scalar " "reduction in a nested loop."))
static cl::opt< unsigned > ForceTargetMaxScalarInterleaveFactor("force-target-max-scalar-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "scalar loops."))
static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE)
static bool willGenerateVectors(VPlan &Plan, ElementCount VF, const TargetTransformInfo &TTI)
Check if any recipe of Plan will generate a vector value, which will be assigned a vector register.
static bool isOutsideLoopWorkProfitable(GeneratedRTChecks &Checks, VectorizationFactor &VF, Loop *L, PredicatedScalarEvolution &PSE, VPCostContext &CostCtx, VPlan &Plan, ScalarEpilogueLowering SEL, std::optional< unsigned > VScale)
This function determines whether or not it's still profitable to vectorize the loop given the extra w...
static void addExitUsersForFirstOrderRecurrences(VPlan &Plan, VFRange &Range)
Handle users in the exit block for first order reductions in the original exit block.
static void fixScalarResumeValuesFromBypass(BasicBlock *BypassBlock, Loop *L, VPlan &BestEpiPlan, LoopVectorizationLegality &LVL, const SCEV2ValueTy &ExpandedSCEVs, Value *MainVectorTripCount)
static cl::opt< bool > MaximizeBandwidth("vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, cl::desc("Maximize bandwidth when selecting vectorization factor which " "will be determined by the smallest type in loop."))
static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop, Instruction *I, DebugLoc DL={})
Create an analysis remark that explains why vectorization failed.
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
mir Rename Register Operands
This file implements a map that provides insertion order iteration.
This file contains the declarations for metadata subclasses.
#define T
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define P(N)
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static BinaryOperator * CreateMul(Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore, Value *FlagsOp)
static BinaryOperator * CreateAdd(Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore, Value *FlagsOp)
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static InstructionCost getScalarizationOverhead(const TargetTransformInfo &TTI, Type *ScalarTy, VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={})
This is similar to TargetTransformInfo::getScalarizationOverhead, but if ScalarTy is a FixedVectorTyp...
This file contains some templates that are useful if you are working with the STL at all.
#define OP(OPC)
Definition Instruction.h:46
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
#define DEBUG_WITH_TYPE(TYPE,...)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
Definition Debug.h:72
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
This pass exposes codegen information to IR-level passes.
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
This file implements the TypeSwitch template, which mimics a switch() statement whose cases are type ...
This file contains the declarations of different VPlan-related auxiliary helpers.
This file provides utility VPlan to VPlan transformations.
This file declares the class VPlanVerifier, which contains utility functions to check the consistency...
This file contains the declarations of the Vectorization Plan base classes:
static const char PassName[]
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition blake3_impl.h:83
A manager for alias analyses.
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:234
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1540
unsigned getActiveBits() const
Compute the number of active bits in the value.
Definition APInt.h:1512
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition ArrayRef.h:147
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
LLVM_ABI unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
LLVM_ABI const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
BinaryOps getOpcode() const
Definition InstrTypes.h:374
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
bool isConditional() const
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
BasicBlock * getSuccessor(unsigned i) const
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
bool isNoBuiltin() const
Return true if the call should not be treated as a call to a builtin.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Value * getArgOperand(unsigned i) const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:984
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:678
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:701
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:703
@ ICMP_NE
not equal
Definition InstrTypes.h:700
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:704
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:791
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
A debug info location.
Definition DebugLoc.h:124
static DebugLoc getTemporary()
Definition DebugLoc.h:161
static DebugLoc getUnknown()
Definition DebugLoc.h:162
An analysis that produces DemandedBits for a function.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:194
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:167
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:237
iterator end()
Definition DenseMap.h:81
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition DenseMap.h:158
void insert_range(Range &&R)
Inserts range of 'std::pair<KeyT, ValueT>' values into the map.
Definition DenseMap.h:275
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
Analysis pass which computes a DominatorTree.
Definition Dominators.h:284
void changeImmediateDominator(DomTreeNodeBase< NodeT > *N, DomTreeNodeBase< NodeT > *NewIDom)
changeImmediateDominator - This method is used to update the dominator tree information when a node's...
void eraseNode(NodeT *BB)
eraseNode - Removes a node from the dominator tree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:165
constexpr bool isVector() const
One or more elements.
Definition TypeSize.h:324
static constexpr ElementCount getScalable(ScalarTy MinVal)
Definition TypeSize.h:312
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Definition TypeSize.h:315
constexpr bool isScalar() const
Exactly one element.
Definition TypeSize.h:320
BasicBlock * createVectorizedLoopSkeleton() final
Implements the interface for creating a vectorized skeleton using the epilogue loop strategy (i....
EpilogueVectorizerEpilogueLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &Checks, VPlan &Plan)
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
A specialized derived class of inner loop vectorizer that performs vectorization of main loops in the...
void introduceCheckBlockInVPlan(BasicBlock *CheckIRBB)
Introduces a new VPIRBasicBlock for CheckIRBB to Plan between the vector preheader and its predecesso...
BasicBlock * emitIterationCountCheck(BasicBlock *VectorPH, BasicBlock *Bypass, bool ForEpilogue)
Emits an iteration count bypass check once for the main loop (when ForEpilogue is false) and once for...
EpilogueVectorizerMainLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &Check, VPlan &Plan)
Value * createIterationCountCheck(BasicBlock *VectorPH, ElementCount VF, unsigned UF) const
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
BasicBlock * createVectorizedLoopSkeleton() final
Implements the interface for creating a vectorized skeleton using the main loop strategy (i....
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
Class to represent function types.
param_iterator param_begin() const
param_iterator param_end() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:209
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition Function.cpp:762
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:727
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags none()
void applyUpdates(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Definition IRBuilder.h:345
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2780
A struct for saving information about induction variables.
const SCEV * getStep() const
InductionKind
This enum represents the kinds of inductions that we support.
@ IK_NoInduction
Not an induction variable.
@ IK_FpInduction
Floating point induction variable.
@ IK_PtrInduction
Pointer induction var. Step = C.
@ IK_IntInduction
Integer induction variable. Step = C.
const SmallVectorImpl< Instruction * > & getCastInsts() const
Returns a reference to the type cast instructions in the induction update chain, that are redundant w...
Value * getStartValue() const
InnerLoopAndEpilogueVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &Checks, VPlan &Plan, ElementCount VecWidth, ElementCount MinProfitableTripCount, unsigned UnrollFactor)
EpilogueLoopVectorizationInfo & EPI
Holds and updates state information required to vectorize the main loop and its epilogue in two separ...
InnerLoopVectorizer vectorizes loops which contain only one basic block to a specified vectorization ...
virtual void printDebugTracesAtStart()
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
Value * TripCount
Trip count of the original loop.
const TargetTransformInfo * TTI
Target Transform Info.
LoopVectorizationCostModel * Cost
The profitablity analysis.
BlockFrequencyInfo * BFI
BFI and PSI are used to check for profile guided size optimizations.
Value * getTripCount() const
Returns the original loop trip count.
friend class LoopVectorizationPlanner
PredicatedScalarEvolution & PSE
A wrapper around ScalarEvolution used to add runtime SCEV checks.
LoopInfo * LI
Loop Info.
ProfileSummaryInfo * PSI
DominatorTree * DT
Dominator Tree.
void setTripCount(Value *TC)
Used to set the trip count after ILV's construction and after the preheader block has been executed.
void fixVectorizedLoop(VPTransformState &State)
Fix the vectorized code, taking care of header phi's, and more.
virtual BasicBlock * createVectorizedLoopSkeleton()
Creates a basic block for the scalar preheader.
virtual void printDebugTracesAtEnd()
AssumptionCache * AC
Assumption Cache.
InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, ElementCount VecWidth, unsigned UnrollFactor, LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks, VPlan &Plan)
IRBuilder Builder
The builder that we use.
void fixNonInductionPHIs(VPTransformState &State)
Fix the non-induction PHIs in Plan.
VPBasicBlock * VectorPHVPBB
The vector preheader block of Plan, used as target for check blocks introduced during skeleton creati...
unsigned UF
The vectorization unroll factor to use.
GeneratedRTChecks & RTChecks
Structure to hold information about generated runtime checks, responsible for cleaning the checks,...
virtual ~InnerLoopVectorizer()=default
ElementCount VF
The vectorization SIMD factor to use.
Loop * OrigLoop
The original loop.
BasicBlock * createScalarPreheader(StringRef Prefix)
Create and return a new IR basic block for the scalar preheader whose name is prefixed with Prefix.
InstSimplifyFolder - Use InstructionSimplify to fold operations to existing values.
static InstructionCost getInvalid(CostType Val=0)
static InstructionCost getMax()
CostType getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
bool isBinaryOp() const
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
const char * getOpcodeName() const
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:319
LLVM_ABI APInt getMask() const
For example, this is 0xFF for an 8 bit integer, 0xFFFF for i16, etc.
Definition Type.cpp:343
The group of interleaved loads/stores sharing the same stride and close to each other.
uint32_t getFactor() const
InstTy * getMember(uint32_t Index) const
Get the member with the given index Index.
InstTy * getInsertPos() const
uint32_t getNumMembers() const
Drive the analysis of interleaved memory accesses in the loop.
bool requiresScalarEpilogue() const
Returns true if an interleaved group that may access memory out-of-bounds requires a scalar epilogue ...
LLVM_ABI void analyzeInterleaving(bool EnableMaskedInterleavedGroup)
Analyze the interleaved accesses and collect them in interleave groups.
An instruction for reading from memory.
Type * getPointerOperandType() const
This analysis provides dependence information for the memory accesses of a loop.
Drive the analysis of memory accesses in the loop.
const RuntimePointerChecking * getRuntimePointerChecking() const
unsigned getNumRuntimePointerChecks() const
Number of memchecks required to prove independence of otherwise may-alias pointers.
Analysis pass that exposes the LoopInfo for a function.
Definition LoopInfo.h:569
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
void getExitingBlocks(SmallVectorImpl< BlockT * > &ExitingBlocks) const
Return all blocks inside the loop that have successors outside of the loop.
BlockT * getHeader() const
iterator_range< block_iterator > blocks() const
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
ArrayRef< BlockT * > getBlocks() const
Get a list of the basic blocks which make up this loop.
Store the result of a depth first search within basic blocks contained by a single loop.
RPOIterator beginRPO() const
Reverse iterate over the cached postorder blocks.
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
RPOIterator endRPO() const
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
void removeBlock(BlockT *BB)
This method completely removes BB from all data structures, including all of the Loop objects it is n...
LoopVectorizationCostModel - estimates the expected speedups due to vectorization.
SmallPtrSet< Type *, 16 > ElementTypesInLoop
All element types found in the loop.
bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment, unsigned AddressSpace) const
Returns true if the target machine supports masked load operation for the given DataType and kind of ...
LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, LoopVectorizationLegality *Legal, const TargetTransformInfo &TTI, const TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, const Function *F, const LoopVectorizeHints *Hints, InterleavedAccessInfo &IAI, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
void collectElementTypesForWidening()
Collect all element types in the loop for which widening is needed.
bool canVectorizeReductions(ElementCount VF) const
Returns true if the target machine supports all of the reduction variables found for the given VF.
bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment, unsigned AddressSpace) const
Returns true if the target machine supports masked store operation for the given DataType and kind of...
bool isEpilogueVectorizationProfitable(const ElementCount VF, const unsigned IC) const
Returns true if epilogue vectorization is considered profitable, and false otherwise.
bool isPredicatedInst(Instruction *I) const
Returns true if I is an instruction that needs to be predicated at runtime.
void collectValuesToIgnore()
Collect values we want to ignore in the cost model.
void collectInLoopReductions()
Split reductions into those that happen in the loop, and those that happen outside.
std::pair< unsigned, unsigned > getSmallestAndWidestTypes()
bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be uniform after vectorization.
void collectNonVectorizedAndSetWideningDecisions(ElementCount VF)
Collect values that will not be widened, including Uniforms, Scalars, and Instructions to Scalarize f...
PredicatedScalarEvolution & PSE
Predicated scalar evolution analysis.
const LoopVectorizeHints * Hints
Loop Vectorize Hint.
std::optional< unsigned > getMaxSafeElements() const
Return maximum safe number of elements to be processed per vector iteration, which do not prevent sto...
const TargetTransformInfo & TTI
Vector target information.
LoopVectorizationLegality * Legal
Vectorization legality.
std::optional< InstructionCost > getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy) const
Return the cost of instructions in an inloop reduction pattern, if I is part of that pattern.
InstructionCost getInstructionCost(Instruction *I, ElementCount VF)
Returns the execution time cost of an instruction for a given vector width.
DemandedBits * DB
Demanded bits analysis.
bool interleavedAccessCanBeWidened(Instruction *I, ElementCount VF) const
Returns true if I is a memory instruction in an interleaved-group of memory accesses that can be vect...
const TargetLibraryInfo * TLI
Target Library Info.
bool memoryInstructionCanBeWidened(Instruction *I, ElementCount VF)
Returns true if I is a memory instruction with consecutive memory access that can be widened.
const InterleaveGroup< Instruction > * getInterleavedAccessGroup(Instruction *Instr) const
Get the interleaved access group that Instr belongs to.
InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const
Estimate cost of an intrinsic call instruction CI if it were vectorized with factor VF.
bool OptForSize
Whether this loop should be optimized for size based on function attribute or profile information.
bool useMaxBandwidth(TargetTransformInfo::RegisterKind RegKind)
bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be scalar after vectorization.
bool isOptimizableIVTruncate(Instruction *I, ElementCount VF)
Return True if instruction I is an optimizable truncate whose operand is an induction variable.
FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC)
bool shouldConsiderRegPressureForVF(ElementCount VF)
Loop * TheLoop
The loop that we evaluate.
TTI::TargetCostKind CostKind
The kind of cost that we are calculating.
TailFoldingStyle getTailFoldingStyle(bool IVUpdateMayOverflow=true) const
Returns the TailFoldingStyle that is best for the current loop.
InterleavedAccessInfo & InterleaveInfo
The interleave access information contains groups of interleaved accesses with the same stride and cl...
SmallPtrSet< const Value *, 16 > ValuesToIgnore
Values to ignore in the cost model.
void setVectorizedCallDecision(ElementCount VF)
A call may be vectorized in different ways depending on whether we have vectorized variants available...
void invalidateCostModelingDecisions()
Invalidates decisions already taken by the cost model.
bool isAccessInterleaved(Instruction *Instr) const
Check if Instr belongs to any interleaved access group.
bool selectUserVectorizationFactor(ElementCount UserVF)
Setup cost-based decisions for user vectorization factor.
std::optional< unsigned > getVScaleForTuning() const
Return the value of vscale used for tuning the cost model.
OptimizationRemarkEmitter * ORE
Interface to emit optimization remarks.
LoopInfo * LI
Loop Info analysis.
bool requiresScalarEpilogue(bool IsVectorizing) const
Returns true if we're required to use a scalar epilogue for at least the final iteration of the origi...
SmallPtrSet< const Value *, 16 > VecValuesToIgnore
Values to ignore in the cost model when VF > 1.
bool isInLoopReduction(PHINode *Phi) const
Returns true if the Phi is part of an inloop reduction.
bool isProfitableToScalarize(Instruction *I, ElementCount VF) const
void setWideningDecision(const InterleaveGroup< Instruction > *Grp, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for interleaving group Grp and vector ...
const MapVector< Instruction *, uint64_t > & getMinimalBitwidths() const
CallWideningDecision getCallWideningDecision(CallInst *CI, ElementCount VF) const
bool isLegalGatherOrScatter(Value *V, ElementCount VF)
Returns true if the target machine can represent V as a masked gather or scatter operation.
bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const
bool shouldConsiderInvariant(Value *Op)
Returns true if Op should be considered invariant and if it is trivially hoistable.
bool foldTailByMasking() const
Returns true if all loop blocks should be masked to fold tail loop.
bool foldTailWithEVL() const
Returns true if VP intrinsics with explicit vector length support should be generated in the tail fol...
bool usePredicatedReductionSelect() const
Returns true if the predicated reduction select should be used to set the incoming value for the redu...
bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const
Returns true if the instructions in this block requires predication for any reason,...
void setCallWideningDecision(CallInst *CI, ElementCount VF, InstWidening Kind, Function *Variant, Intrinsic::ID IID, std::optional< unsigned > MaskPos, InstructionCost Cost)
void setTailFoldingStyles(bool IsScalableVF, unsigned UserIC)
Selects and saves TailFoldingStyle for 2 options - if IV update may overflow or not.
AssumptionCache * AC
Assumption cache.
void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for instruction I and vector width VF.
InstWidening
Decision that was taken during cost calculation for memory instruction.
bool isScalarWithPredication(Instruction *I, ElementCount VF) const
Returns true if I is an instruction which requires predication and for which our chosen predication s...
InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF) const
Estimate cost of a call instruction CI if it were vectorized with factor VF.
bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) const
Returns true if we should use strict in-order reductions for the given RdxDesc.
std::pair< InstructionCost, InstructionCost > getDivRemSpeculationCost(Instruction *I, ElementCount VF) const
Return the costs for our two available strategies for lowering a div/rem operation which requires spe...
bool isDivRemScalarWithPredication(InstructionCost ScalarCost, InstructionCost SafeDivisorCost) const
Given costs for both strategies, return true if the scalar predication lowering should be used for di...
InstructionCost expectedCost(ElementCount VF)
Returns the expected execution cost.
void setCostBasedWideningDecision(ElementCount VF)
Memory access instruction may be vectorized in more than one way.
InstWidening getWideningDecision(Instruction *I, ElementCount VF) const
Return the cost model decision for the given instruction I and vector width VF.
FixedScalableVFPair MaxPermissibleVFWithoutMaxBW
The highest VF possible for this loop, without using MaxBandwidth.
bool isScalarEpilogueAllowed() const
Returns true if a scalar epilogue is not allowed due to optsize or a loop hint annotation.
InstructionCost getWideningCost(Instruction *I, ElementCount VF)
Return the vectorization cost for the given instruction I and vector width VF.
void collectInstsToScalarize(ElementCount VF)
Collects the instructions to scalarize for each predicated instruction in the loop.
LoopVectorizationLegality checks if it is legal to vectorize a loop, and to what vectorization factor...
MapVector< PHINode *, InductionDescriptor > InductionList
InductionList saves induction variables and maps them to the induction descriptor.
const SmallPtrSetImpl< const Instruction * > & getPotentiallyFaultingLoads() const
Returns potentially faulting loads.
bool canVectorize(bool UseVPlanNativePath)
Returns true if it is legal to vectorize this loop.
bool canVectorizeFPMath(bool EnableStrictReductions)
Returns true if it is legal to vectorize the FP math operations in this loop.
PHINode * getPrimaryInduction()
Returns the primary induction variable.
const SmallVector< BasicBlock *, 4 > & getCountableExitingBlocks() const
Returns all exiting blocks with a countable exit, i.e.
const InductionList & getInductionVars() const
Returns the induction variables found in the loop.
bool hasUncountableEarlyExit() const
Returns true if the loop has exactly one uncountable early exit, i.e.
bool hasHistograms() const
Returns a list of all known histogram operations in the loop.
const LoopAccessInfo * getLAI() const
Planner drives the vectorization process after having passed Legality checks.
VectorizationFactor selectEpilogueVectorizationFactor(const ElementCount MaxVF, unsigned IC)
VPlan & getPlanFor(ElementCount VF) const
Return the VPlan for VF.
Definition VPlan.cpp:1602
VectorizationFactor planInVPlanNativePath(ElementCount UserVF)
Use the VPlan-native path to plan how to best vectorize, return the best VF and its cost.
void updateLoopMetadataAndProfileInfo(Loop *VectorLoop, VPBasicBlock *HeaderVPBB, const VPlan &Plan, bool VectorizingEpilogue, MDNode *OrigLoopID, std::optional< unsigned > OrigAverageTripCount, unsigned OrigLoopInvocationWeight, unsigned EstimatedVFxUF, bool DisableRuntimeUnroll)
Update loop metadata and profile info for both the scalar remainder loop and VectorLoop,...
Definition VPlan.cpp:1653
void buildVPlans(ElementCount MinVF, ElementCount MaxVF)
Build VPlans for power-of-2 VF's between MinVF and MaxVF inclusive, according to the information gath...
Definition VPlan.cpp:1586
VectorizationFactor computeBestVF()
Compute and return the most profitable vectorization factor.
DenseMap< const SCEV *, Value * > executePlan(ElementCount VF, unsigned UF, VPlan &BestPlan, InnerLoopVectorizer &LB, DominatorTree *DT, bool VectorizingEpilogue)
Generate the IR code for the vectorized loop captured in VPlan BestPlan according to the best selecte...
unsigned selectInterleaveCount(VPlan &Plan, ElementCount VF, InstructionCost LoopCost)
void emitInvalidCostRemarks(OptimizationRemarkEmitter *ORE)
Emit remarks for recipes with invalid costs in the available VPlans.
static bool getDecisionAndClampRange(const std::function< bool(ElementCount)> &Predicate, VFRange &Range)
Test a Predicate on a Range of VF's.
Definition VPlan.cpp:1567
void printPlans(raw_ostream &O)
Definition VPlan.cpp:1731
void plan(ElementCount UserVF, unsigned UserIC)
Build VPlans for the specified UserVF and UserIC if they are non-zero or all applicable candidate VFs...
void addMinimumIterationCheck(VPlan &Plan, ElementCount VF, unsigned UF, ElementCount MinProfitableTripCount) const
Create a check to Plan to see if the vector loop should be executed based on its trip count.
bool hasPlanWithVF(ElementCount VF) const
Look through the existing plans and return true if we have one with vectorization factor VF.
This holds vectorization requirements that must be verified late in the process.
Utility class for getting and setting loop vectorizer hints in the form of loop metadata.
bool allowVectorization(Function *F, Loop *L, bool VectorizeOnlyWhenForced) const
void emitRemarkWithHints() const
Dumps all the hint information.
const char * vectorizeAnalysisPassName() const
If hints are provided that force vectorization, use the AlwaysPrint pass name to force the frontend t...
This class emits a version of the loop where run-time checks ensure that may-alias pointers can't ove...
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
bool hasLoopInvariantOperands(const Instruction *I) const
Return true if all the operands of the specified instruction are loop invariant.
Definition LoopInfo.cpp:67
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Definition LoopInfo.cpp:632
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
Definition LoopInfo.cpp:61
Metadata node.
Definition Metadata.h:1077
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition MapVector.h:119
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
Definition Module.cpp:230
Diagnostic information for optimization analysis remarks related to pointer aliasing.
Diagnostic information for optimization analysis remarks related to floating-point non-commutativity.
Diagnostic information for optimization analysis remarks.
The optimization diagnostic interface.
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
void setIncomingValueForBlock(const BasicBlock *BB, Value *V)
Set every incoming value(s) for block BB to V.
Value * getIncomingValueForBlock(const BasicBlock *BB) const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
LLVM_ABI const SCEVPredicate & getPredicate() const
LLVM_ABI unsigned getSmallConstantMaxTripCount()
Returns the upper bound of the loop trip count as a normal unsigned value, or 0 if the trip count is ...
LLVM_ABI const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
Analysis providing profile information.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
static bool isFMulAddIntrinsic(Instruction *I)
Returns true if the instruction is a call to the llvm.fmuladd intrinsic.
FastMathFlags getFastMathFlags() const
Instruction * getLoopExitInstr() const
static LLVM_ABI unsigned getOpcode(RecurKind Kind)
Returns the opcode corresponding to the RecurrenceKind.
Type * getRecurrenceType() const
Returns the type of the recurrence.
const SmallPtrSet< Instruction *, 8 > & getCastInsts() const
Returns a reference to the instructions used for type-promoting the recurrence.
unsigned getMinWidthCastToRecurrenceTypeInBits() const
Returns the minimum width used by the recurrence in bits.
TrackingVH< Value > getRecurrenceStartValue() const
LLVM_ABI SmallVector< Instruction *, 4 > getReductionOpChain(PHINode *Phi, Loop *L) const
Attempts to find a chain of operations from Phi to LoopExitInst that can be treated as a set of reduc...
static bool isAnyOfRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
bool isSigned() const
Returns true if all source operands of the recurrence are SExtInsts.
RecurKind getRecurrenceKind() const
bool isOrdered() const
Expose an ordered FP reduction to the instance users.
static LLVM_ABI bool isFloatingPointRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is a floating point kind.
static bool isFindIVRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
Value * getSentinelValue() const
Returns the sentinel value for FindFirstIV & FindLastIV recurrences to replace the start value.
static bool isMinMaxRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is any min/max kind.
std::optional< ArrayRef< PointerDiffInfo > > getDiffChecks() const
const SmallVectorImpl< RuntimePointerCheck > & getChecks() const
Returns the checks that generateChecks created.
This class uses information about analyze scalars to rewrite expressions in canonical form.
ScalarEvolution * getSE()
bool isInsertedInstruction(Instruction *I) const
Return true if the specified instruction was inserted by the code rewriter.
LLVM_ABI Value * expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc)
Generates a code sequence that evaluates this predicate.
void eraseDeadInstructions(Value *Root)
Remove inserted instructions that are dead, e.g.
virtual bool isAlwaysTrue() const =0
Returns true if the predicate is always true.
This class represents an analyzed expression in the program.
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
LLVM_ABI const SCEV * getURemExpr(const SCEV *LHS, const SCEV *RHS)
Represents an unsigned remainder expression based on unsigned division.
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
LLVM_ABI const SCEV * getElementCount(Type *Ty, ElementCount EC, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
LLVM_ABI void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
LLVM_ABI void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
const SCEV * getMinusOne(Type *Ty)
Return a SCEV for the constant -1 of a specific type.
LLVM_ABI void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
This class represents the LLVM 'select' instruction.
A vector that has set insertion semantics.
Definition SetVector.h:59
size_type size() const
Determine the number of elements in the SetVector.
Definition SetVector.h:102
void insert_range(Range &&R)
Definition SetVector.h:175
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
Definition SetVector.h:261
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:150
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:338
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
LLVM_ABI std::optional< unsigned > getVScaleForTuning() const
LLVM_ABI InstructionCost getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}) const
Estimate the overhead of scalarizing an instruction.
LLVM_ABI bool supportsEfficientVectorElementLoadStore() const
If target has efficient vector element load/store instructions, it can return true here so that inser...
LLVM_ABI bool prefersVectorizedAddressing() const
Return true if target doesn't mind addresses in vectors.
LLVM_ABI TypeSize getRegisterBitWidth(RegisterKind K) const
LLVM_ABI bool preferFixedOverScalableIfEqualCost(bool IsEpilogue) const
LLVM_ABI InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, OperandValueInfo OpdInfo={OK_AnyValue, OP_None}, const Instruction *I=nullptr) const
LLVM_ABI InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, bool UseMaskForCond=false, bool UseMaskForGaps=false) const
LLVM_ABI InstructionCost getShuffleCost(ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask={}, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, int Index=0, VectorType *SubTp=nullptr, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const
static LLVM_ABI PartialReductionExtendKind getPartialReductionExtendKind(Instruction *I)
Get the kind of extension that an instruction represents.
static LLVM_ABI OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
LLVM_ABI bool isElementTypeLegalForScalableVector(Type *Ty) const
LLVM_ABI ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_CodeSize
Instruction code size.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
@ TCK_Latency
The latency of instruction.
LLVM_ABI InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
LLVM_ABI InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, const SCEV *Ptr, TTI::TargetCostKind CostKind) const
LLVM_ABI InstructionCost getPartialReductionCost(unsigned Opcode, Type *InputTypeA, Type *InputTypeB, Type *AccumType, ElementCount VF, PartialReductionExtendKind OpAExtend, PartialReductionExtendKind OpBExtend, std::optional< unsigned > BinOp, TTI::TargetCostKind CostKind) const
LLVM_ABI InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, const Instruction *I=nullptr) const
LLVM_ABI bool supportsScalableVectors() const
@ TCC_Free
Expected to fold away in lowering.
LLVM_ABI InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TargetCostKind CostKind) const
Estimate the cost of a given IR user when lowered.
LLVM_ABI InstructionCost getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const
LLVM_ABI InstructionCost getOperandsScalarizationOverhead(ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const
Estimate the overhead of scalarizing operands with the given types.
@ SK_Splice
Concatenates elements from the first input vector with elements of the second input vector.
@ SK_Broadcast
Broadcast element 0 to all other elements.
@ SK_Reverse
Reverse the order of the vector.
LLVM_ABI InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const
CastContextHint
Represents a hint about the context in which a cast is used.
@ Reversed
The cast is used with a reversed load/store.
@ Masked
The cast is used with a masked load/store.
@ None
The cast is not used with a load/store of any kind.
@ Normal
The cast is used with a normal load/store.
@ Interleave
The cast is used with an interleaved load/store.
@ GatherScatter
The cast is used with a gather/scatter.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
This class implements a switch-like dispatch statement for a value of 'T' using dyn_cast functionalit...
Definition TypeSwitch.h:87
TypeSwitch< T, ResultT > & Case(CallableT &&caseFn)
Add a case on the given type.
Definition TypeSwitch.h:96
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:281
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:198
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:231
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:294
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_range operands()
Definition User.h:292
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:21
Value * getOperand(unsigned i) const
Definition User.h:232
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
Definition VectorUtils.h:74
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
Definition VPlan.h:3764
void appendRecipe(VPRecipeBase *Recipe)
Augment the existing recipes of a VPBasicBlock with an additional Recipe as the last recipe.
Definition VPlan.h:3839
RecipeListTy::iterator iterator
Instruction iterators...
Definition VPlan.h:3791
iterator end()
Definition VPlan.h:3801
iterator begin()
Recipe iterator methods.
Definition VPlan.h:3799
iterator_range< iterator > phis()
Returns an iterator range over the PHI-like recipes in the block.
Definition VPlan.h:3852
iterator getFirstNonPhi()
Return the position of the first non-phi node recipe in the block.
Definition VPlan.cpp:246
VPRegionBlock * getEnclosingLoopRegion()
Definition VPlan.cpp:619
void insert(VPRecipeBase *Recipe, iterator InsertPt)
Definition VPlan.h:3830
VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
Definition VPlan.h:81
VPRegionBlock * getParent()
Definition VPlan.h:173
const VPBasicBlock * getExitingBasicBlock() const
Definition VPlan.cpp:190
void setName(const Twine &newName)
Definition VPlan.h:166
size_t getNumSuccessors() const
Definition VPlan.h:219
void swapSuccessors()
Swap successors of the block. The block must have exactly 2 successors.
Definition VPlan.h:322
size_t getNumPredecessors() const
Definition VPlan.h:220
VPlan * getPlan()
Definition VPlan.cpp:165
VPBlockBase * getSinglePredecessor() const
Definition VPlan.h:215
const VPBasicBlock * getEntryBasicBlock() const
Definition VPlan.cpp:170
VPBlockBase * getSingleSuccessor() const
Definition VPlan.h:209
const VPBlocksTy & getSuccessors() const
Definition VPlan.h:198
static auto blocksOnly(const T &Range)
Return an iterator range over Range which only includes BlockTy blocks.
Definition VPlanUtils.h:232
static void insertOnEdge(VPBlockBase *From, VPBlockBase *To, VPBlockBase *BlockPtr)
Inserts BlockPtr on the edge between From and To.
Definition VPlanUtils.h:253
static void connectBlocks(VPBlockBase *From, VPBlockBase *To, unsigned PredIdx=-1u, unsigned SuccIdx=-1u)
Connect VPBlockBases From and To bi-directionally.
Definition VPlanUtils.h:191
static void reassociateBlocks(VPBlockBase *Old, VPBlockBase *New)
Reassociate all the blocks connected to Old so that they now point to New.
Definition VPlanUtils.h:218
VPlan-based builder utility analogous to IRBuilder.
VPDerivedIVRecipe * createDerivedIV(InductionDescriptor::InductionKind Kind, FPMathOperator *FPBinOp, VPValue *Start, VPValue *Current, VPValue *Step, const Twine &Name="")
Convert the input value Current to the corresponding value of an induction with Start and Step values...
VPPhi * createScalarPhi(ArrayRef< VPValue * > IncomingValues, DebugLoc DL, const Twine &Name="")
VPInstruction * createNaryOp(unsigned Opcode, ArrayRef< VPValue * > Operands, Instruction *Inst=nullptr, const Twine &Name="")
Create an N-ary operation with Opcode, Operands and set Inst as its underlying Instruction.
VPInstruction * createScalarCast(Instruction::CastOps Opcode, VPValue *Op, Type *ResultTy, DebugLoc DL)
unsigned getNumDefinedValues() const
Returns the number of values defined by the VPDef.
Definition VPlanValue.h:424
VPValue * getVPSingleValue()
Returns the only VPValue defined by the VPDef.
Definition VPlanValue.h:397
void execute(VPTransformState &State) override
Generate the transformed value of the induction at offset StartValue (1.
VPValue * getStepValue() const
Definition VPlan.h:3641
VPValue * getStartValue() const
Definition VPlan.h:3640
A pure virtual base class for all recipes modeling header phis, including phis for first order recurr...
Definition VPlan.h:1977
virtual VPValue * getBackedgeValue()
Returns the incoming value from the loop backedge.
Definition VPlan.h:2025
VPValue * getStartValue()
Returns the start value of the phi, if one is set.
Definition VPlan.h:2014
A special type of VPBasicBlock that wraps an existing IR basic block.
Definition VPlan.h:3917
Helper to manage IR metadata for recipes.
Definition VPlan.h:942
This is a concrete Recipe that models a single VPlan-level instruction.
Definition VPlan.h:983
@ ComputeAnyOfResult
Compute the final result of a AnyOf reduction with select(cmp(),x,y), where one of (x,...
Definition VPlan.h:1016
@ ResumeForEpilogue
Explicit user for the resume phi of the canonical induction in the main VPlan, used by the epilogue v...
Definition VPlan.h:1063
@ FirstOrderRecurrenceSplice
Definition VPlan.h:989
@ ReductionStartVector
Start vector for reductions with 3 operands: the original start value, the identity value for the red...
Definition VPlan.h:1054
unsigned getOpcode() const
Definition VPlan.h:1119
VPInterleaveRecipe is a recipe for transforming an interleave group of load or stores into one wide l...
Definition VPlan.h:2576
In what follows, the term "input IR" refers to code that is fed into the vectorizer whereas the term ...
A recipe for forming partial reductions.
Definition VPlan.h:2753
detail::zippy< llvm::detail::zip_first, VPUser::const_operand_range, const_incoming_blocks_range > incoming_values_and_blocks() const
Returns an iterator range over pairs of incoming values and corresponding incoming blocks.
Definition VPlan.h:1290
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
Definition VPlan.h:394
VPBasicBlock * getParent()
Definition VPlan.h:415
DebugLoc getDebugLoc() const
Returns the debug location of the recipe.
Definition VPlan.h:482
void moveBefore(VPBasicBlock &BB, iplist< VPRecipeBase >::iterator I)
Unlink this recipe and insert into BB before I.
void insertBefore(VPRecipeBase *InsertPos)
Insert an unlinked recipe into a basic block immediately before the specified recipe.
iplist< VPRecipeBase >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Helper class to create VPRecipies from IR instructions.
VPRecipeBase * tryToCreateWidenRecipe(VPSingleDefRecipe *R, VFRange &Range)
Create and return a widened recipe for R if one can be created within the given VF Range.
VPValue * getBlockInMask(VPBasicBlock *VPBB) const
Returns the entry mask for block VPBB or null if the mask is all-true.
std::optional< unsigned > getScalingForReduction(const Instruction *ExitInst)
void collectScaledReductions(VFRange &Range)
Find all possible partial reductions in the loop and track all of those that are valid so recipes can...
VPReplicateRecipe * handleReplication(Instruction *I, ArrayRef< VPValue * > Operands, VFRange &Range)
Build a VPReplicationRecipe for I using Operands.
VPRecipeBase * tryToCreatePartialReduction(Instruction *Reduction, ArrayRef< VPValue * > Operands, unsigned ScaleFactor)
Create and return a partial reduction recipe for a reduction instruction along with binary operation ...
A recipe for handling reduction phis.
Definition VPlan.h:2331
bool isInLoop() const
Returns true, if the phi is part of an in-loop reduction.
Definition VPlan.h:2391
RecurKind getRecurrenceKind() const
Returns the recurrence kind of the reduction.
Definition VPlan.h:2385
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
Definition VPlan.h:3952
const VPBlockBase * getEntry() const
Definition VPlan.h:3988
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
Definition VPlan.h:2856
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
Definition VPlan.h:521
Instruction * getUnderlyingInstr()
Returns the underlying instruction.
Definition VPlan.h:586
An analysis for type-inference for VPValues.
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
Definition VPlanValue.h:199
void setOperand(unsigned I, VPValue *New)
Definition VPlanValue.h:243
VPValue * getOperand(unsigned N) const
Definition VPlanValue.h:238
void addOperand(VPValue *Operand)
Definition VPlanValue.h:232
VPRecipeBase * getDefiningRecipe()
Returns the recipe defining this VPValue or nullptr if it is not defined by a recipe,...
Definition VPlan.cpp:135
Value * getLiveInIRValue() const
Returns the underlying IR value, if this VPValue is defined outside the scope of VPlan.
Definition VPlanValue.h:176
Value * getUnderlyingValue() const
Return the underlying Value attached to this VPValue.
Definition VPlanValue.h:85
void replaceAllUsesWith(VPValue *New)
Definition VPlan.cpp:1403
user_iterator user_begin()
Definition VPlanValue.h:130
unsigned getNumUsers() const
Definition VPlanValue.h:113
void replaceUsesWithIf(VPValue *New, llvm::function_ref< bool(VPUser &U, unsigned Idx)> ShouldReplace)
Go through the uses list for this VPValue and make each use point to New if the callback ShouldReplac...
Definition VPlan.cpp:1407
user_range users()
Definition VPlanValue.h:134
A recipe to compute a pointer to the last element of each part of a widened memory access for widened...
Definition VPlan.h:1841
VPWidenCastRecipe is a recipe to create vector cast instructions.
Definition VPlan.h:1482
A recipe for handling GEP instructions.
Definition VPlan.h:1769
Base class for widened induction (VPWidenIntOrFpInductionRecipe and VPWidenPointerInductionRecipe),...
Definition VPlan.h:2042
VPValue * getStepValue()
Returns the step value of the induction.
Definition VPlan.h:2070
const InductionDescriptor & getInductionDescriptor() const
Returns the induction descriptor for the recipe.
Definition VPlan.h:2087
A recipe for handling phi nodes of integer and floating-point inductions, producing their vector valu...
Definition VPlan.h:2117
A common base class for widening memory operations.
Definition VPlan.h:3133
A recipe for widened phis.
Definition VPlan.h:2253
VPWidenRecipe is a recipe for producing a widened instruction using the opcode and operands of the re...
Definition VPlan.h:1439
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
Definition VPlan.h:4055
bool hasVF(ElementCount VF) const
Definition VPlan.h:4264
VPBasicBlock * getEntry()
Definition VPlan.h:4154
VPValue & getVectorTripCount()
The vector trip count.
Definition VPlan.h:4244
VPValue & getVFxUF()
Returns VF * UF of the vector loop region.
Definition VPlan.h:4250
VPValue & getVF()
Returns the VF of the vector loop region.
Definition VPlan.h:4247
VPValue * getTripCount() const
The trip count of the original loop.
Definition VPlan.h:4216
iterator_range< SmallSetVector< ElementCount, 2 >::iterator > vectorFactors() const
Returns an iterator range over all VFs of the plan.
Definition VPlan.h:4271
bool hasUF(unsigned UF) const
Definition VPlan.h:4282
ArrayRef< VPIRBasicBlock * > getExitBlocks() const
Return an ArrayRef containing VPIRBasicBlocks wrapping the exit blocks of the original scalar loop.
Definition VPlan.h:4206
LLVM_ABI_FOR_TEST VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
Definition VPlan.cpp:1037
bool hasEarlyExit() const
Returns true if the VPlan is based on a loop with an early exit.
Definition VPlan.h:4427
InstructionCost cost(ElementCount VF, VPCostContext &Ctx)
Return the cost of this plan.
Definition VPlan.cpp:1019
void resetTripCount(VPValue *NewTripCount)
Resets the trip count for the VPlan.
Definition VPlan.h:4230
VPBasicBlock * getMiddleBlock()
Returns the 'middle' block of the plan, that is the block that selects whether to execute the scalar ...
Definition VPlan.h:4179
VPValue * getOrAddLiveIn(Value *V)
Gets the live-in VPValue for V or adds a new live-in (if none exists yet) for V.
Definition VPlan.h:4306
bool hasScalarVFOnly() const
Definition VPlan.h:4275
VPBasicBlock * getScalarPreheader() const
Return the VPBasicBlock for the preheader of the scalar loop.
Definition VPlan.h:4197
void execute(VPTransformState *State)
Generate the IR code for this VPlan.
Definition VPlan.cpp:943
VPCanonicalIVPHIRecipe * getCanonicalIV()
Returns the canonical induction recipe of the vector loop.
Definition VPlan.h:4360
VPIRBasicBlock * getScalarHeader() const
Return the VPIRBasicBlock wrapping the header of the scalar loop.
Definition VPlan.h:4202
VPBasicBlock * getVectorPreheader()
Returns the preheader of the vector loop region, if one exists, or null otherwise.
Definition VPlan.h:4159
VPlan * duplicate()
Clone the current VPlan, update all VPValues of the new VPlan and cloned recipes to refer to the clon...
Definition VPlan.cpp:1179
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition Value.cpp:166
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:390
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:546
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1101
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition DenseSet.h:175
constexpr bool hasKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns true if there exists a value X where RHS.multiplyCoefficientBy(X) will result in a value whos...
Definition TypeSize.h:269
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:230
constexpr bool isNonZero() const
Definition TypeSize.h:156
constexpr ScalarTy getKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns a value X where RHS.multiplyCoefficientBy(X) will result in a value whose quantity matches ou...
Definition TypeSize.h:277
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:216
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:169
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
Definition TypeSize.h:256
constexpr bool isFixed() const
Returns true if the quantity is not scaled by vscale.
Definition TypeSize.h:172
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:166
constexpr bool isZero() const
Definition TypeSize.h:154
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:223
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
Definition TypeSize.h:252
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:237
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
IteratorT end() const
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
Changed
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:189
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
OneOps_match< OpTy, Instruction::Freeze > m_Freeze(const OpTy &Op)
Matches FreezeInst.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
MatchFunctor< Val, Pattern > match_fn(const Pattern &P)
A match functor that can be used as a UnaryPredicate in functional algorithms like all_of.
class_match< const SCEVVScale > m_SCEVVScale()
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
specificloop_ty m_SpecificLoop(const Loop *L)
cst_pred_ty< is_specific_signed_cst > m_scev_SpecificSInt(int64_t V)
Match an SCEV constant with a plain signed integer (sign-extended value will be matched)
SCEVAffineAddRec_match< Op0_t, Op1_t, class_match< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
SCEVBinaryExpr_match< SCEVMulExpr, Op0_t, Op1_t > m_scev_Mul(const Op0_t &Op0, const Op1_t &Op1)
bool match(const SCEV *S, const Pattern &P)
class_match< const SCEV > m_SCEV()
match_combine_or< AllRecipe_match< Instruction::ZExt, Op0_t >, AllRecipe_match< Instruction::SExt, Op0_t > > m_ZExtOrSExt(const Op0_t &Op0)
VPInstruction_match< VPInstruction::ExtractLastElement, Op0_t > m_ExtractLastElement(const Op0_t &Op0)
class_match< VPValue > m_VPValue()
Match an arbitrary VPValue and ignore it.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
Add a small namespace to avoid name clashes with the classes used in the streaming interface.
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< InstrNode * > Instr
Definition RDFGraph.h:389
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
bool isSingleScalar(const VPValue *VPV)
Returns true if VPV is a single scalar, either because it produces the same value for all lanes or on...
Definition VPlanUtils.h:44
VPValue * getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr)
Get or create a VPValue that corresponds to the expansion of Expr.
VPBasicBlock * getFirstLoopHeader(VPlan &Plan, VPDominatorTree &VPDT)
Returns the header block of the first, top-level loop, or null if none exist.
const SCEV * getSCEVExprForVPValue(VPValue *V, ScalarEvolution &SE)
Return the SCEV expression for V.
unsigned getVFScaleFactor(VPRecipeBase *R)
Get the VF scaling factor applied to the recipe's output, if the recipe has one.
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE, AssumptionCache *AC, MemorySSAUpdater *MSSAU, bool PreserveLCSSA)
Simplify each loop in a loop nest recursively.
LLVM_ABI void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:318
@ Offset
Definition DWP.cpp:477
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:831
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI Value * addRuntimeChecks(Instruction *Loc, Loop *TheLoop, const SmallVectorImpl< RuntimePointerCheck > &PointerChecks, SCEVExpander &Expander, bool HoistRuntimeChecks=false)
Add code that checks at runtime if the accessed arrays in PointerChecks overlap.
auto cast_if_present(const Y &Val)
cast_if_present<X> - Functionally identical to cast, except that a null value is accepted.
Definition Casting.h:689
LLVM_ABI bool RemoveRedundantDbgInstrs(BasicBlock *BB)
Try to remove redundant dbg.value instructions from given basic block.
cl::opt< bool > VerifyEachVPlan
LLVM_ABI std::optional< unsigned > getLoopEstimatedTripCount(Loop *L, unsigned *EstimatedLoopInvocationWeight=nullptr)
Return either:
static void reportVectorization(OptimizationRemarkEmitter *ORE, Loop *TheLoop, VectorizationFactor VF, unsigned IC)
Report successful vectorization of the loop.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1705
unsigned getLoadStoreAddressSpace(const Value *I)
A helper function that returns the address space of the pointer operand of load or store instruction.
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1657
LLVM_ABI_FOR_TEST bool verifyVPlanIsValid(const VPlan &Plan, bool VerifyLate=false)
Verify invariants for general VPlans.
LLVM_ABI Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
InstructionCost Cost
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2452
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
Value * getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF)
Return the runtime value for VF.
LLVM_ABI bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI, ScalarEvolution *SE)
Put a loop nest into LCSSA form.
Definition LCSSA.cpp:449
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2116
LLVM_ABI bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:293
Align getLoadStoreAlignment(const Value *I)
A helper function that returns the alignment of load or store instruction.
iterator_range< df_iterator< VPBlockShallowTraversalWrapper< VPBlockBase * > > > vp_depth_first_shallow(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order.
Definition VPlanCFG.h:216
LLVM_ABI bool VerifySCEV
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:682
iterator_range< df_iterator< VPBlockDeepTraversalWrapper< VPBlockBase * > > > vp_depth_first_deep(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order while traversing t...
Definition VPlanCFG.h:243
SmallVector< VPRegisterUsage, 8 > calculateRegisterUsageForPlan(VPlan &Plan, ArrayRef< ElementCount > VFs, const TargetTransformInfo &TTI, const SmallPtrSetImpl< const Value * > &ValuesToIgnore)
Estimate the register usage for Plan and vectorization factors in VFs by calculating the highest numb...
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:348
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:759
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1712
void collectEphemeralRecipesForVPlan(VPlan &Plan, DenseSet< VPRecipeBase * > &EphRecipes)
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
LLVM_ABI void setBranchWeights(Instruction &I, ArrayRef< uint32_t > Weights, bool IsExpected)
Create a new branch_weights metadata node and add or overwrite a prof metadata reference to instructi...
bool containsIrreducibleCFG(RPOTraversalT &RPOTraversal, const LoopInfoT &LI)
Return true if the control flow in RPOTraversal is irreducible.
Definition CFG.h:149
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:288
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1624
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1719
LLVM_ABI cl::opt< bool > EnableLoopVectorization
LLVM_ABI bool wouldInstructionBeTriviallyDead(const Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction would have no side effects if it was not used.
Definition Local.cpp:421
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
Type * toVectorizedTy(Type *Ty, ElementCount EC)
A helper for converting to vectorized types.
LLVM_ABI void llvm_unreachable_internal(const char *msg=nullptr, const char *file=nullptr, unsigned line=0)
This function calls abort(), and prints the optional message to stderr.
T * find_singleton(R &&Range, Predicate P, bool AllowRepeats=false)
Return the single value in Range that satisfies P(<member of Range> *, AllowRepeats)->T * returning n...
Definition STLExtras.h:1767
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
cl::opt< unsigned > ForceTargetInstructionCost
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
format_object< Ts... > format(const char *Fmt, const Ts &... Vals)
These are helper functions used to produce formatted output.
Definition Format.h:126
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition MathExtras.h:405
bool canVectorizeTy(Type *Ty)
Returns true if Ty is a valid vector element type, void, or an unpacked literal struct where all elem...
TargetTransformInfo TTI
static void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I=nullptr, DebugLoc DL={})
Reports an informative message: print Msg for debugging purposes as well as an optimization remark.
LLVM_ABI bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
RecurKind
These are the kinds of recurrences that we support.
@ Or
Bitwise or logical OR of integers.
@ FMulAdd
Sum of float products with llvm.fmuladd(a * b + sum).
@ Sub
Subtraction of integers.
LLVM_ABI Value * getRecurrenceIdentity(RecurKind K, Type *Tp, FastMathFlags FMF)
Given information about an recurrence kind, return the identity for the @llvm.vector....
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
LLVM_ABI void reportVectorizationFailure(const StringRef DebugMsg, const StringRef OREMsg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I=nullptr)
Reports a vectorization failure: print DebugMsg for debugging purposes along with the corresponding o...
DWARFExpression::Operation Op
ScalarEpilogueLowering
@ CM_ScalarEpilogueNotAllowedLowTripLoop
@ CM_ScalarEpilogueNotNeededUsePredicate
@ CM_ScalarEpilogueNotAllowedOptSize
@ CM_ScalarEpilogueAllowed
@ CM_ScalarEpilogueNotAllowedUsePredicate
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
Value * createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF, int64_t Step)
Return a value for Step multiplied by VF.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
LLVM_ABI BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="", bool Before=false)
Split the specified block at the specified instruction.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
auto predecessors(const MachineBasicBlock *BB)
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:363
cl::opt< bool > EnableVPlanNativePath
Definition VPlan.cpp:56
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
ArrayRef< Type * > getContainedTypes(Type *const &Ty)
Returns the types contained in Ty.
LLVM_ABI Value * addDiffRuntimeChecks(Instruction *Loc, ArrayRef< PointerDiffInfo > Checks, SCEVExpander &Expander, function_ref< Value *(IRBuilderBase &, unsigned)> GetVF, unsigned IC)
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
@ DataAndControlFlowWithoutRuntimeCheck
Use predicate to control both data and control flow, but modify the trip count so that a runtime over...
@ None
Don't use tail folding.
@ DataWithEVL
Use predicated EVL instructions for tail-folding.
@ DataAndControlFlow
Use predicate to control both data and control flow.
@ DataWithoutLaneMask
Same as Data, but avoids using the get.active.lane.mask intrinsic to calculate the mask and instead i...
@ Data
Use predicate only to mask operations on data in the loop.
unsigned getPredBlockCostDivisor(TargetTransformInfo::TargetCostKind CostKind)
A helper function that returns how much we should divide the cost of a predicated block by.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool hasBranchWeightMD(const Instruction &I)
Checks if an instructions has Branch Weight Metadata.
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition Hashing.h:592
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
Definition bit.h:299
Type * toVectorTy(Type *Scalar, ElementCount EC)
A helper function for converting Scalar types to vector types.
std::unique_ptr< VPlan > VPlanPtr
Definition VPlan.h:77
constexpr detail::IsaCheckPredicate< Types... > IsaPred
Function object wrapper for the llvm::isa type check.
Definition Casting.h:836
LLVM_ABI MapVector< Instruction *, uint64_t > computeMinimumValueSizes(ArrayRef< BasicBlock * > Blocks, DemandedBits &DB, const TargetTransformInfo *TTI=nullptr)
Compute a map of integer instructions to their minimum legal type size.
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition Hashing.h:466
LLVM_ABI cl::opt< bool > EnableLoopInterleaving
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI void collectEphemeralValues(const Loop *L, AssumptionCache *AC, SmallPtrSetImpl< const Value * > &EphValues)
Collect a loop's ephemeral values (those used only by an assume or similar intrinsics in the loop).
An information struct used to provide DenseMap with the various necessary components for a given valu...
Encapsulate information regarding vectorization of a loop and its epilogue.
EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, ElementCount EVF, unsigned EUF, VPlan &EpiloguePlan)
A class that represents two vectorization factors (initialized with 0 by default).
static FixedScalableVFPair getNone()
This holds details about a histogram operation – a load -> update -> store sequence where each lane i...
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
TargetLibraryInfo * TLI
LLVM_ABI LoopVectorizeResult runImpl(Function &F)
LLVM_ABI bool processLoop(Loop *L)
ProfileSummaryInfo * PSI
LoopAccessInfoManager * LAIs
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI LoopVectorizePass(LoopVectorizeOptions Opts={})
BlockFrequencyInfo * BFI
ScalarEvolution * SE
AssumptionCache * AC
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
OptimizationRemarkEmitter * ORE
TargetTransformInfo * TTI
Storage for information about made changes.
A chain of instructions that form a partial reduction.
Instruction * Reduction
The top-level binary operation that forms the reduction to a scalar after the loop body.
Instruction * ExtendA
The extension of each of the inner binary operation's operands.
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70
A marker analysis to determine if extra passes should be run after loop vectorization.
static LLVM_ABI AnalysisKey Key
Holds the VFShape for a specific scalar to vector function mapping.
std::optional< unsigned > getParamIndexForOptionalMask() const
Instruction Set Architecture.
Encapsulates information needed to describe a parameter.
A range of powers-of-2 vectorization factors with fixed start and adjustable end.
ElementCount End
Struct to hold various analysis needed for cost computations.
LoopVectorizationCostModel & CM
bool isLegacyUniformAfterVectorization(Instruction *I, ElementCount VF) const
Return true if I is considered uniform-after-vectorization in the legacy cost model for VF.
bool skipCostComputation(Instruction *UI, bool IsVector) const
Return true if the cost for UI shouldn't be computed, e.g.
InstructionCost getLegacyCost(Instruction *UI, ElementCount VF) const
Return the cost for UI with VF using the legacy cost model as fallback until computing the cost of al...
SmallPtrSet< Instruction *, 8 > SkipCostComputation
A recipe for handling first-order recurrence phis.
Definition VPlan.h:2296
A struct that represents some properties of the register usage of a loop.
VPTransformState holds information passed down when "executing" a VPlan, needed for generating the ou...
A recipe for widening select instructions.
Definition VPlan.h:1723
static void materializeBroadcasts(VPlan &Plan)
Add explicit broadcasts for live-ins and VPValues defined in Plan's entry block if they are used as v...
static void materializeBackedgeTakenCount(VPlan &Plan, VPBasicBlock *VectorPH)
Materialize the backedge-taken count to be computed explicitly using VPInstructions.
static LLVM_ABI_FOR_TEST std::unique_ptr< VPlan > buildVPlan0(Loop *TheLoop, LoopInfo &LI, Type *InductionTy, DebugLoc IVDL, PredicatedScalarEvolution &PSE)
Create a base VPlan0, serving as the common starting point for all later candidates.
static void optimizeInductionExitUsers(VPlan &Plan, DenseMap< VPValue *, VPValue * > &EndValues, ScalarEvolution &SE)
If there's a single exit block, optimize its phi recipes that use exiting IV values by feeding them p...
static LLVM_ABI_FOR_TEST void handleEarlyExits(VPlan &Plan, bool HasUncountableExit)
Update Plan to account for all early exits.
static void canonicalizeEVLLoops(VPlan &Plan)
Transform EVL loops to use variable-length stepping after region dissolution.
static void dropPoisonGeneratingRecipes(VPlan &Plan, const std::function< bool(BasicBlock *)> &BlockNeedsPredication)
Drop poison flags from recipes that may generate a poison value that is used after vectorization,...
static void createInterleaveGroups(VPlan &Plan, const SmallPtrSetImpl< const InterleaveGroup< Instruction > * > &InterleaveGroups, VPRecipeBuilder &RecipeBuilder, const bool &ScalarEpilogueAllowed)
static bool runPass(bool(*Transform)(VPlan &, ArgsTy...), VPlan &Plan, typename std::remove_reference< ArgsTy >::type &...Args)
Helper to run a VPlan transform Transform on VPlan, forwarding extra arguments to the transform.
static void addBranchWeightToMiddleTerminator(VPlan &Plan, ElementCount VF, std::optional< unsigned > VScaleForTuning)
Add branch weight metadata, if the Plan's middle block is terminated by a BranchOnCond recipe.
static void materializeBuildVectors(VPlan &Plan)
Add explicit Build[Struct]Vector recipes that combine multiple scalar values into single vectors.
static void unrollByUF(VPlan &Plan, unsigned UF)
Explicitly unroll Plan by UF.
static DenseMap< const SCEV *, Value * > expandSCEVs(VPlan &Plan, ScalarEvolution &SE)
Expand VPExpandSCEVRecipes in Plan's entry block.
static void convertToConcreteRecipes(VPlan &Plan)
Lower abstract recipes to concrete ones, that can be codegen'd.
static void addMinimumIterationCheck(VPlan &Plan, ElementCount VF, unsigned UF, ElementCount MinProfitableTripCount, bool RequiresScalarEpilogue, bool TailFolded, bool CheckNeededWithTailFolding, Loop *OrigLoop, const uint32_t *MinItersBypassWeights, DebugLoc DL, ScalarEvolution &SE)
static void convertToAbstractRecipes(VPlan &Plan, VPCostContext &Ctx, VFRange &Range)
This function converts initial recipes to the abstract recipes and clamps Range based on cost model f...
static void materializeConstantVectorTripCount(VPlan &Plan, ElementCount BestVF, unsigned BestUF, PredicatedScalarEvolution &PSE)
static DenseMap< VPBasicBlock *, VPValue * > introduceMasksAndLinearize(VPlan &Plan, bool FoldTail)
Predicate and linearize the control-flow in the only loop region of Plan.
static void addExplicitVectorLength(VPlan &Plan, const std::optional< unsigned > &MaxEVLSafeElements)
Add a VPEVLBasedIVPHIRecipe and related recipes to Plan and replaces all uses except the canonical IV...
static void replaceSymbolicStrides(VPlan &Plan, PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &StridesMap)
Replace symbolic strides from StridesMap in Plan with constants when possible.
static bool handleMaxMinNumReductions(VPlan &Plan)
Check if Plan contains any FMaxNum or FMinNum reductions.
static void removeBranchOnConst(VPlan &Plan)
Remove BranchOnCond recipes with true or false conditions together with removing dead edges to their ...
static LLVM_ABI_FOR_TEST void createLoopRegions(VPlan &Plan)
Replace loops in Plan's flat CFG with VPRegionBlocks, turning Plan's flat CFG into a hierarchical CFG...
static void removeDeadRecipes(VPlan &Plan)
Remove dead recipes from Plan.
static void attachCheckBlock(VPlan &Plan, Value *Cond, BasicBlock *CheckBlock, bool AddBranchWeights)
Wrap runtime check block CheckBlock in a VPIRBB and Cond in a VPValue and connect the block to Plan,...
static void materializeVectorTripCount(VPlan &Plan, VPBasicBlock *VectorPHVPBB, bool TailByMasking, bool RequiresScalarEpilogue)
Materialize vector trip count computations to a set of VPInstructions.
static void simplifyRecipes(VPlan &Plan)
Perform instcombine-like simplifications on recipes in Plan.
static LLVM_ABI_FOR_TEST bool tryToConvertVPInstructionsToVPRecipes(VPlanPtr &Plan, function_ref< const InductionDescriptor *(PHINode *)> GetIntOrFpInductionDescriptor, const TargetLibraryInfo &TLI)
Replaces the VPInstructions in Plan with corresponding widen recipes.
static void replicateByVF(VPlan &Plan, ElementCount VF)
Replace each replicating VPReplicateRecipe and VPInstruction outside of any replicate region in Plan ...
static void clearReductionWrapFlags(VPlan &Plan)
Clear NSW/NUW flags from reduction instructions if necessary.
static void cse(VPlan &Plan)
Perform common-subexpression-elimination on Plan.
static void addActiveLaneMask(VPlan &Plan, bool UseActiveLaneMaskForControlFlow, bool DataAndControlFlowWithoutRuntimeCheck)
Replace (ICMP_ULE, wide canonical IV, backedge-taken-count) checks with an (active-lane-mask recipe,...
static void optimize(VPlan &Plan)
Apply VPlan-to-VPlan optimizations to Plan, including induction recipe optimizations,...
static void dissolveLoopRegions(VPlan &Plan)
Replace loop regions with explicit CFG.
static void narrowInterleaveGroups(VPlan &Plan, ElementCount VF, unsigned VectorRegWidth)
Try to convert a plan with interleave groups with VF elements to a plan with the interleave groups re...
static void truncateToMinimalBitwidths(VPlan &Plan, const MapVector< Instruction *, uint64_t > &MinBWs)
Insert truncates and extends for any truncated recipe.
static bool adjustFixedOrderRecurrences(VPlan &Plan, VPBuilder &Builder)
Try to have all users of fixed-order recurrences appear after the recipe defining their previous valu...
static void optimizeForVFAndUF(VPlan &Plan, ElementCount BestVF, unsigned BestUF, PredicatedScalarEvolution &PSE)
Optimize Plan based on BestVF and BestUF.
static void materializeVFAndVFxUF(VPlan &Plan, VPBasicBlock *VectorPH, ElementCount VF)
Materialize VF and VFxUF to be computed explicitly using VPInstructions.
static void addMinimumVectorEpilogueIterationCheck(VPlan &Plan, Value *TripCount, Value *VectorTripCount, bool RequiresScalarEpilogue, ElementCount EpilogueVF, unsigned EpilogueUF, unsigned MainLoopStep, unsigned EpilogueLoopStep, ScalarEvolution &SE)
Add a check to Plan to see if the epilogue vector loop should be executed.
static LLVM_ABI_FOR_TEST void addMiddleCheck(VPlan &Plan, bool RequiresScalarEpilogueCheck, bool TailFolded)
If a check is needed to guard executing the scalar epilogue loop, it will be added to the middle bloc...
TODO: The following VectorizationFactor was pulled out of LoopVectorizationCostModel class.
InstructionCost Cost
Cost of the loop with that width.
ElementCount MinProfitableTripCount
The minimum trip count required to make vectorization profitable, e.g.
ElementCount Width
Vector width with best cost.
InstructionCost ScalarCost
Cost of the scalar loop.
static VectorizationFactor Disabled()
Width 1 means no vectorization, cost 0 means uncomputed cost.
static LLVM_ABI bool HoistRuntimeChecks