LLVM 22.0.0git
LoopVectorize.cpp
Go to the documentation of this file.
1//===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10// and generates target-independent LLVM-IR.
11// The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12// of instructions in order to estimate the profitability of vectorization.
13//
14// The loop vectorizer combines consecutive loop iterations into a single
15// 'wide' iteration. After this transformation the index is incremented
16// by the SIMD vector width, and not by one.
17//
18// This pass has three parts:
19// 1. The main loop pass that drives the different parts.
20// 2. LoopVectorizationLegality - A unit that checks for the legality
21// of the vectorization.
22// 3. InnerLoopVectorizer - A unit that performs the actual
23// widening of instructions.
24// 4. LoopVectorizationCostModel - A unit that checks for the profitability
25// of vectorization. It decides on the optimal vector width, which
26// can be one, if vectorization is not profitable.
27//
28// There is a development effort going on to migrate loop vectorizer to the
29// VPlan infrastructure and to introduce outer loop vectorization support (see
30// docs/VectorizationPlan.rst and
31// http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32// purpose, we temporarily introduced the VPlan-native vectorization path: an
33// alternative vectorization path that is natively implemented on top of the
34// VPlan infrastructure. See EnableVPlanNativePath for enabling.
35//
36//===----------------------------------------------------------------------===//
37//
38// The reduction-variable vectorization is based on the paper:
39// D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40//
41// Variable uniformity checks are inspired by:
42// Karrenberg, R. and Hack, S. Whole Function Vectorization.
43//
44// The interleaved access vectorization is based on the paper:
45// Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved
46// Data for SIMD
47//
48// Other ideas/concepts are from:
49// A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50//
51// S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of
52// Vectorizing Compilers.
53//
54//===----------------------------------------------------------------------===//
55
58#include "VPRecipeBuilder.h"
59#include "VPlan.h"
60#include "VPlanAnalysis.h"
61#include "VPlanCFG.h"
62#include "VPlanHelpers.h"
63#include "VPlanPatternMatch.h"
64#include "VPlanTransforms.h"
65#include "VPlanUtils.h"
66#include "VPlanVerifier.h"
67#include "llvm/ADT/APInt.h"
68#include "llvm/ADT/ArrayRef.h"
69#include "llvm/ADT/DenseMap.h"
71#include "llvm/ADT/Hashing.h"
72#include "llvm/ADT/MapVector.h"
73#include "llvm/ADT/STLExtras.h"
76#include "llvm/ADT/Statistic.h"
77#include "llvm/ADT/StringRef.h"
78#include "llvm/ADT/Twine.h"
79#include "llvm/ADT/TypeSwitch.h"
84#include "llvm/Analysis/CFG.h"
101#include "llvm/IR/Attributes.h"
102#include "llvm/IR/BasicBlock.h"
103#include "llvm/IR/CFG.h"
104#include "llvm/IR/Constant.h"
105#include "llvm/IR/Constants.h"
106#include "llvm/IR/DataLayout.h"
107#include "llvm/IR/DebugInfo.h"
108#include "llvm/IR/DebugLoc.h"
109#include "llvm/IR/DerivedTypes.h"
111#include "llvm/IR/Dominators.h"
112#include "llvm/IR/Function.h"
113#include "llvm/IR/IRBuilder.h"
114#include "llvm/IR/InstrTypes.h"
115#include "llvm/IR/Instruction.h"
116#include "llvm/IR/Instructions.h"
118#include "llvm/IR/Intrinsics.h"
119#include "llvm/IR/MDBuilder.h"
120#include "llvm/IR/Metadata.h"
121#include "llvm/IR/Module.h"
122#include "llvm/IR/Operator.h"
123#include "llvm/IR/PatternMatch.h"
125#include "llvm/IR/Type.h"
126#include "llvm/IR/Use.h"
127#include "llvm/IR/User.h"
128#include "llvm/IR/Value.h"
129#include "llvm/IR/Verifier.h"
130#include "llvm/Support/Casting.h"
132#include "llvm/Support/Debug.h"
147#include <algorithm>
148#include <cassert>
149#include <cstdint>
150#include <functional>
151#include <iterator>
152#include <limits>
153#include <memory>
154#include <string>
155#include <tuple>
156#include <utility>
157
158using namespace llvm;
159using namespace SCEVPatternMatch;
160
161#define LV_NAME "loop-vectorize"
162#define DEBUG_TYPE LV_NAME
163
164#ifndef NDEBUG
165const char VerboseDebug[] = DEBUG_TYPE "-verbose";
166#endif
167
168STATISTIC(LoopsVectorized, "Number of loops vectorized");
169STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
170STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
171STATISTIC(LoopsEarlyExitVectorized, "Number of early exit loops vectorized");
172
174 "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
175 cl::desc("Enable vectorization of epilogue loops."));
176
178 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
179 cl::desc("When epilogue vectorization is enabled, and a value greater than "
180 "1 is specified, forces the given VF for all applicable epilogue "
181 "loops."));
182
184 "epilogue-vectorization-minimum-VF", cl::Hidden,
185 cl::desc("Only loops with vectorization factor equal to or larger than "
186 "the specified value are considered for epilogue vectorization."));
187
188/// Loops with a known constant trip count below this number are vectorized only
189/// if no scalar iteration overheads are incurred.
191 "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
192 cl::desc("Loops with a constant trip count that is smaller than this "
193 "value are vectorized only if no scalar iteration overheads "
194 "are incurred."));
195
197 "vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
198 cl::desc("The maximum allowed number of runtime memory checks"));
199
200// Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
201// that predication is preferred, and this lists all options. I.e., the
202// vectorizer will try to fold the tail-loop (epilogue) into the vector body
203// and predicate the instructions accordingly. If tail-folding fails, there are
204// different fallback strategies depending on these values:
211} // namespace PreferPredicateTy
212
214 "prefer-predicate-over-epilogue",
217 cl::desc("Tail-folding and predication preferences over creating a scalar "
218 "epilogue loop."),
220 "scalar-epilogue",
221 "Don't tail-predicate loops, create scalar epilogue"),
223 "predicate-else-scalar-epilogue",
224 "prefer tail-folding, create scalar epilogue if tail "
225 "folding fails."),
227 "predicate-dont-vectorize",
228 "prefers tail-folding, don't attempt vectorization if "
229 "tail-folding fails.")));
230
232 "force-tail-folding-style", cl::desc("Force the tail folding style"),
235 clEnumValN(TailFoldingStyle::None, "none", "Disable tail folding"),
238 "Create lane mask for data only, using active.lane.mask intrinsic"),
240 "data-without-lane-mask",
241 "Create lane mask with compare/stepvector"),
243 "Create lane mask using active.lane.mask intrinsic, and use "
244 "it for both data and control flow"),
246 "data-and-control-without-rt-check",
247 "Similar to data-and-control, but remove the runtime check"),
249 "Use predicated EVL instructions for tail folding. If EVL "
250 "is unsupported, fallback to data-without-lane-mask.")));
251
253 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
254 cl::desc("Maximize bandwidth when selecting vectorization factor which "
255 "will be determined by the smallest type in loop."));
256
258 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
259 cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
260
261/// An interleave-group may need masking if it resides in a block that needs
262/// predication, or in order to mask away gaps.
264 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
265 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
266
268 "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
269 cl::desc("A flag that overrides the target's number of scalar registers."));
270
272 "force-target-num-vector-regs", cl::init(0), cl::Hidden,
273 cl::desc("A flag that overrides the target's number of vector registers."));
274
276 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
277 cl::desc("A flag that overrides the target's max interleave factor for "
278 "scalar loops."));
279
281 "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
282 cl::desc("A flag that overrides the target's max interleave factor for "
283 "vectorized loops."));
284
286 "force-target-instruction-cost", cl::init(0), cl::Hidden,
287 cl::desc("A flag that overrides the target's expected cost for "
288 "an instruction to a single constant value. Mostly "
289 "useful for getting consistent testing."));
290
292 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
293 cl::desc(
294 "Pretend that scalable vectors are supported, even if the target does "
295 "not support them. This flag should only be used for testing."));
296
298 "small-loop-cost", cl::init(20), cl::Hidden,
299 cl::desc(
300 "The cost of a loop that is considered 'small' by the interleaver."));
301
303 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
304 cl::desc("Enable the use of the block frequency analysis to access PGO "
305 "heuristics minimizing code growth in cold regions and being more "
306 "aggressive in hot regions."));
307
308// Runtime interleave loops for load/store throughput.
310 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
311 cl::desc(
312 "Enable runtime interleaving until load/store ports are saturated"));
313
314/// The number of stores in a loop that are allowed to need predication.
316 "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
317 cl::desc("Max number of stores to be predicated behind an if."));
318
320 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
321 cl::desc("Count the induction variable only once when interleaving"));
322
324 "enable-cond-stores-vec", cl::init(true), cl::Hidden,
325 cl::desc("Enable if predication of stores during vectorization."));
326
328 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
329 cl::desc("The maximum interleave count to use when interleaving a scalar "
330 "reduction in a nested loop."));
331
332static cl::opt<bool>
333 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
335 cl::desc("Prefer in-loop vector reductions, "
336 "overriding the targets preference."));
337
339 "force-ordered-reductions", cl::init(false), cl::Hidden,
340 cl::desc("Enable the vectorisation of loops with in-order (strict) "
341 "FP reductions"));
342
344 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
345 cl::desc(
346 "Prefer predicating a reduction operation over an after loop select."));
347
349 "enable-vplan-native-path", cl::Hidden,
350 cl::desc("Enable VPlan-native vectorization path with "
351 "support for outer loop vectorization."));
352
354 llvm::VerifyEachVPlan("vplan-verify-each",
355#ifdef EXPENSIVE_CHECKS
356 cl::init(true),
357#else
358 cl::init(false),
359#endif
361 cl::desc("Verfiy VPlans after VPlan transforms."));
362
363// This flag enables the stress testing of the VPlan H-CFG construction in the
364// VPlan-native vectorization path. It must be used in conjuction with
365// -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
366// verification of the H-CFGs built.
368 "vplan-build-stress-test", cl::init(false), cl::Hidden,
369 cl::desc(
370 "Build VPlan for every supported loop nest in the function and bail "
371 "out right after the build (stress test the VPlan H-CFG construction "
372 "in the VPlan-native vectorization path)."));
373
375 "interleave-loops", cl::init(true), cl::Hidden,
376 cl::desc("Enable loop interleaving in Loop vectorization passes"));
378 "vectorize-loops", cl::init(true), cl::Hidden,
379 cl::desc("Run the Loop vectorization passes"));
380
382 "force-widen-divrem-via-safe-divisor", cl::Hidden,
383 cl::desc(
384 "Override cost based safe divisor widening for div/rem instructions"));
385
387 "vectorizer-maximize-bandwidth-for-vector-calls", cl::init(true),
389 cl::desc("Try wider VFs if they enable the use of vector variants"));
390
392 "enable-early-exit-vectorization", cl::init(true), cl::Hidden,
393 cl::desc(
394 "Enable vectorization of early exit loops with uncountable exits."));
395
397 "vectorizer-consider-reg-pressure", cl::init(false), cl::Hidden,
398 cl::desc("Discard VFs if their register pressure is too high."));
399
400// Likelyhood of bypassing the vectorized loop because there are zero trips left
401// after prolog. See `emitIterationCountCheck`.
402static constexpr uint32_t MinItersBypassWeights[] = {1, 127};
403
404/// A helper function that returns true if the given type is irregular. The
405/// type is irregular if its allocated size doesn't equal the store size of an
406/// element of the corresponding vector type.
407static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
408 // Determine if an array of N elements of type Ty is "bitcast compatible"
409 // with a <N x Ty> vector.
410 // This is only true if there is no padding between the array elements.
411 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
412}
413
414/// A version of ScalarEvolution::getSmallConstantTripCount that returns an
415/// ElementCount to include loops whose trip count is a function of vscale.
417 const Loop *L) {
418 if (unsigned ExpectedTC = SE->getSmallConstantTripCount(L))
419 return ElementCount::getFixed(ExpectedTC);
420
421 const SCEV *BTC = SE->getBackedgeTakenCount(L);
423 return ElementCount::getFixed(0);
424
425 const SCEV *ExitCount = SE->getTripCountFromExitCount(BTC, BTC->getType(), L);
426 if (isa<SCEVVScale>(ExitCount))
428
429 const APInt *Scale;
430 if (match(ExitCount, m_scev_Mul(m_scev_APInt(Scale), m_SCEVVScale())))
431 if (cast<SCEVMulExpr>(ExitCount)->hasNoUnsignedWrap())
432 if (Scale->getActiveBits() <= 32)
434
435 return ElementCount::getFixed(0);
436}
437
438/// Returns "best known" trip count, which is either a valid positive trip count
439/// or std::nullopt when an estimate cannot be made (including when the trip
440/// count would overflow), for the specified loop \p L as defined by the
441/// following procedure:
442/// 1) Returns exact trip count if it is known.
443/// 2) Returns expected trip count according to profile data if any.
444/// 3) Returns upper bound estimate if known, and if \p CanUseConstantMax.
445/// 4) Returns std::nullopt if all of the above failed.
446static std::optional<ElementCount>
448 bool CanUseConstantMax = true) {
449 // Check if exact trip count is known.
450 if (auto ExpectedTC = getSmallConstantTripCount(PSE.getSE(), L))
451 return ExpectedTC;
452
453 // Check if there is an expected trip count available from profile data.
455 if (auto EstimatedTC = getLoopEstimatedTripCount(L))
456 return ElementCount::getFixed(*EstimatedTC);
457
458 if (!CanUseConstantMax)
459 return std::nullopt;
460
461 // Check if upper bound estimate is known.
462 if (unsigned ExpectedTC = PSE.getSmallConstantMaxTripCount())
463 return ElementCount::getFixed(ExpectedTC);
464
465 return std::nullopt;
466}
467
468namespace {
469// Forward declare GeneratedRTChecks.
470class GeneratedRTChecks;
471
472using SCEV2ValueTy = DenseMap<const SCEV *, Value *>;
473} // namespace
474
475namespace llvm {
476
478
479/// InnerLoopVectorizer vectorizes loops which contain only one basic
480/// block to a specified vectorization factor (VF).
481/// This class performs the widening of scalars into vectors, or multiple
482/// scalars. This class also implements the following features:
483/// * It inserts an epilogue loop for handling loops that don't have iteration
484/// counts that are known to be a multiple of the vectorization factor.
485/// * It handles the code generation for reduction variables.
486/// * Scalarization (implementation using scalars) of un-vectorizable
487/// instructions.
488/// InnerLoopVectorizer does not perform any vectorization-legality
489/// checks, and relies on the caller to check for the different legality
490/// aspects. The InnerLoopVectorizer relies on the
491/// LoopVectorizationLegality class to provide information about the induction
492/// and reduction variables that were found to a given vectorization factor.
494public:
498 ElementCount VecWidth, unsigned UnrollFactor,
500 ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks,
501 VPlan &Plan)
502 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TTI(TTI), AC(AC),
503 VF(VecWidth), UF(UnrollFactor), Builder(PSE.getSE()->getContext()),
506 Plan.getVectorLoopRegion()->getSinglePredecessor())) {}
507
508 virtual ~InnerLoopVectorizer() = default;
509
510 /// Creates a basic block for the scalar preheader. Both
511 /// EpilogueVectorizerMainLoop and EpilogueVectorizerEpilogueLoop overwrite
512 /// the method to create additional blocks and checks needed for epilogue
513 /// vectorization.
515
516 /// Fix the vectorized code, taking care of header phi's, and more.
518
519 /// Fix the non-induction PHIs in \p Plan.
521
522 /// Returns the original loop trip count.
523 Value *getTripCount() const { return TripCount; }
524
525 /// Used to set the trip count after ILV's construction and after the
526 /// preheader block has been executed. Note that this always holds the trip
527 /// count of the original loop for both main loop and epilogue vectorization.
528 void setTripCount(Value *TC) { TripCount = TC; }
529
530protected:
532
533 /// Create and return a new IR basic block for the scalar preheader whose name
534 /// is prefixed with \p Prefix.
536
537 /// Allow subclasses to override and print debug traces before/after vplan
538 /// execution, when trace information is requested.
539 virtual void printDebugTracesAtStart() {}
540 virtual void printDebugTracesAtEnd() {}
541
542 /// The original loop.
544
545 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
546 /// dynamic knowledge to simplify SCEV expressions and converts them to a
547 /// more usable form.
549
550 /// Loop Info.
552
553 /// Dominator Tree.
555
556 /// Target Transform Info.
558
559 /// Assumption Cache.
561
562 /// The vectorization SIMD factor to use. Each vector will have this many
563 /// vector elements.
565
566 /// The vectorization unroll factor to use. Each scalar is vectorized to this
567 /// many different vector instructions.
568 unsigned UF;
569
570 /// The builder that we use
572
573 // --- Vectorization state ---
574
575 /// Trip count of the original loop.
576 Value *TripCount = nullptr;
577
578 /// The profitablity analysis.
580
581 /// BFI and PSI are used to check for profile guided size optimizations.
584
585 /// Structure to hold information about generated runtime checks, responsible
586 /// for cleaning the checks, if vectorization turns out unprofitable.
587 GeneratedRTChecks &RTChecks;
588
590
591 /// The vector preheader block of \p Plan, used as target for check blocks
592 /// introduced during skeleton creation.
594};
595
596/// Encapsulate information regarding vectorization of a loop and its epilogue.
597/// This information is meant to be updated and used across two stages of
598/// epilogue vectorization.
601 unsigned MainLoopUF = 0;
603 unsigned EpilogueUF = 0;
606 Value *TripCount = nullptr;
609
611 ElementCount EVF, unsigned EUF,
613 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF),
615 assert(EUF == 1 &&
616 "A high UF for the epilogue loop is likely not beneficial.");
617 }
618};
619
620/// An extension of the inner loop vectorizer that creates a skeleton for a
621/// vectorized loop that has its epilogue (residual) also vectorized.
622/// The idea is to run the vplan on a given loop twice, firstly to setup the
623/// skeleton and vectorize the main loop, and secondly to complete the skeleton
624/// from the first step and vectorize the epilogue. This is achieved by
625/// deriving two concrete strategy classes from this base class and invoking
626/// them in succession from the loop vectorizer planner.
628public:
639
640 /// Holds and updates state information required to vectorize the main loop
641 /// and its epilogue in two separate passes. This setup helps us avoid
642 /// regenerating and recomputing runtime safety checks. It also helps us to
643 /// shorten the iteration-count-check path length for the cases where the
644 /// iteration count of the loop is so small that the main vector loop is
645 /// completely skipped.
647
648protected:
650};
651
652/// A specialized derived class of inner loop vectorizer that performs
653/// vectorization of *main* loops in the process of vectorizing loops and their
654/// epilogues.
656public:
668 /// Implements the interface for creating a vectorized skeleton using the
669 /// *main loop* strategy (i.e., the first pass of VPlan execution).
671
672protected:
673 /// Introduces a new VPIRBasicBlock for \p CheckIRBB to Plan between the
674 /// vector preheader and its predecessor, also connecting the new block to the
675 /// scalar preheader.
676 void introduceCheckBlockInVPlan(BasicBlock *CheckIRBB);
677
678 // Create a check to see if the main vector loop should be executed
680 unsigned UF) const;
681
682 /// Emits an iteration count bypass check once for the main loop (when \p
683 /// ForEpilogue is false) and once for the epilogue loop (when \p
684 /// ForEpilogue is true).
686 bool ForEpilogue);
687 void printDebugTracesAtStart() override;
688 void printDebugTracesAtEnd() override;
689};
690
691// A specialized derived class of inner loop vectorizer that performs
692// vectorization of *epilogue* loops in the process of vectorizing loops and
693// their epilogues.
695 /// The additional bypass block which conditionally skips over the epilogue
696 /// loop after executing the main loop. Needed to resume inductions and
697 /// reductions during epilogue vectorization.
698 BasicBlock *AdditionalBypassBlock = nullptr;
699
700public:
712 /// Implements the interface for creating a vectorized skeleton using the
713 /// *epilogue loop* strategy (i.e., the second pass of VPlan execution).
715
716 /// Return the additional bypass block which targets the scalar loop by
717 /// skipping the epilogue loop after completing the main loop.
719 assert(AdditionalBypassBlock &&
720 "Trying to access AdditionalBypassBlock but it has not been set");
721 return AdditionalBypassBlock;
722 }
723
724protected:
725 /// Emits an iteration count bypass check after the main vector loop has
726 /// finished to see if there are any iterations left to execute by either
727 /// the vector epilogue or the scalar epilogue.
728 BasicBlock *emitMinimumVectorEpilogueIterCountCheck(BasicBlock *VectorPH,
729 BasicBlock *Bypass,
730 BasicBlock *Insert);
731 void printDebugTracesAtStart() override;
732 void printDebugTracesAtEnd() override;
733};
734} // end namespace llvm
735
736/// Look for a meaningful debug location on the instruction or its operands.
738 if (!I)
739 return DebugLoc::getUnknown();
740
742 if (I->getDebugLoc() != Empty)
743 return I->getDebugLoc();
744
745 for (Use &Op : I->operands()) {
746 if (Instruction *OpInst = dyn_cast<Instruction>(Op))
747 if (OpInst->getDebugLoc() != Empty)
748 return OpInst->getDebugLoc();
749 }
750
751 return I->getDebugLoc();
752}
753
754/// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
755/// is passed, the message relates to that particular instruction.
756#ifndef NDEBUG
757static void debugVectorizationMessage(const StringRef Prefix,
758 const StringRef DebugMsg,
759 Instruction *I) {
760 dbgs() << "LV: " << Prefix << DebugMsg;
761 if (I != nullptr)
762 dbgs() << " " << *I;
763 else
764 dbgs() << '.';
765 dbgs() << '\n';
766}
767#endif
768
769/// Create an analysis remark that explains why vectorization failed
770///
771/// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p
772/// RemarkName is the identifier for the remark. If \p I is passed it is an
773/// instruction that prevents vectorization. Otherwise \p TheLoop is used for
774/// the location of the remark. If \p DL is passed, use it as debug location for
775/// the remark. \return the remark object that can be streamed to.
777createLVAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop,
778 Instruction *I, DebugLoc DL = {}) {
779 BasicBlock *CodeRegion = I ? I->getParent() : TheLoop->getHeader();
780 // If debug location is attached to the instruction, use it. Otherwise if DL
781 // was not provided, use the loop's.
782 if (I && I->getDebugLoc())
783 DL = I->getDebugLoc();
784 else if (!DL)
785 DL = TheLoop->getStartLoc();
786
787 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
788}
789
790namespace llvm {
791
792/// Return a value for Step multiplied by VF.
794 int64_t Step) {
795 assert(Ty->isIntegerTy() && "Expected an integer step");
796 ElementCount VFxStep = VF.multiplyCoefficientBy(Step);
797 assert(isPowerOf2_64(VF.getKnownMinValue()) && "must pass power-of-2 VF");
798 if (VF.isScalable() && isPowerOf2_64(Step)) {
799 return B.CreateShl(
800 B.CreateVScale(Ty),
801 ConstantInt::get(Ty, Log2_64(VFxStep.getKnownMinValue())), "", true);
802 }
803 return B.CreateElementCount(Ty, VFxStep);
804}
805
806/// Return the runtime value for VF.
808 return B.CreateElementCount(Ty, VF);
809}
810
812 const StringRef OREMsg, const StringRef ORETag,
813 OptimizationRemarkEmitter *ORE, Loop *TheLoop,
814 Instruction *I) {
815 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I));
816 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
817 ORE->emit(
818 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
819 << "loop not vectorized: " << OREMsg);
820}
821
822/// Reports an informative message: print \p Msg for debugging purposes as well
823/// as an optimization remark. Uses either \p I as location of the remark, or
824/// otherwise \p TheLoop. If \p DL is passed, use it as debug location for the
825/// remark. If \p DL is passed, use it as debug location for the remark.
826static void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
828 Loop *TheLoop, Instruction *I = nullptr,
829 DebugLoc DL = {}) {
831 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
832 ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop,
833 I, DL)
834 << Msg);
835}
836
837/// Report successful vectorization of the loop. In case an outer loop is
838/// vectorized, prepend "outer" to the vectorization remark.
840 VectorizationFactor VF, unsigned IC) {
842 "Vectorizing: ", TheLoop->isInnermost() ? "innermost loop" : "outer loop",
843 nullptr));
844 StringRef LoopType = TheLoop->isInnermost() ? "" : "outer ";
845 ORE->emit([&]() {
846 return OptimizationRemark(LV_NAME, "Vectorized", TheLoop->getStartLoc(),
847 TheLoop->getHeader())
848 << "vectorized " << LoopType << "loop (vectorization width: "
849 << ore::NV("VectorizationFactor", VF.Width)
850 << ", interleaved count: " << ore::NV("InterleaveCount", IC) << ")";
851 });
852}
853
854} // end namespace llvm
855
856namespace llvm {
857
858// Loop vectorization cost-model hints how the scalar epilogue loop should be
859// lowered.
861
862 // The default: allowing scalar epilogues.
864
865 // Vectorization with OptForSize: don't allow epilogues.
867
868 // A special case of vectorisation with OptForSize: loops with a very small
869 // trip count are considered for vectorization under OptForSize, thereby
870 // making sure the cost of their loop body is dominant, free of runtime
871 // guards and scalar iteration overheads.
873
874 // Loop hint predicate indicating an epilogue is undesired.
876
877 // Directive indicating we must either tail fold or not vectorize
879};
880
881/// LoopVectorizationCostModel - estimates the expected speedups due to
882/// vectorization.
883/// In many cases vectorization is not profitable. This can happen because of
884/// a number of reasons. In this class we mainly attempt to predict the
885/// expected speedup/slowdowns due to the supported instruction set. We use the
886/// TargetTransformInfo to query the different backends for the cost of
887/// different operations.
890
891public:
902 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
903 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
904 Hints(Hints), InterleaveInfo(IAI) {
905 if (TTI.supportsScalableVectors() || ForceTargetSupportsScalableVectors)
906 initializeVScaleForTuning();
908 // Query this against the original loop and save it here because the profile
909 // of the original loop header may change as the transformation happens.
910 OptForSize = llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
912 }
913
914 /// \return An upper bound for the vectorization factors (both fixed and
915 /// scalable). If the factors are 0, vectorization and interleaving should be
916 /// avoided up front.
917 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC);
918
919 /// \return True if runtime checks are required for vectorization, and false
920 /// otherwise.
922
923 /// Setup cost-based decisions for user vectorization factor.
924 /// \return true if the UserVF is a feasible VF to be chosen.
929
930 /// \return True if maximizing vector bandwidth is enabled by the target or
931 /// user options, for the given register kind.
933
934 /// \return True if register pressure should be considered for the given VF.
936
937 /// \return The size (in bits) of the smallest and widest types in the code
938 /// that needs to be vectorized. We ignore values that remain scalar such as
939 /// 64 bit loop indices.
940 std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
941
942 /// Memory access instruction may be vectorized in more than one way.
943 /// Form of instruction after vectorization depends on cost.
944 /// This function takes cost-based decisions for Load/Store instructions
945 /// and collects them in a map. This decisions map is used for building
946 /// the lists of loop-uniform and loop-scalar instructions.
947 /// The calculated cost is saved with widening decision in order to
948 /// avoid redundant calculations.
950
951 /// A call may be vectorized in different ways depending on whether we have
952 /// vectorized variants available and whether the target supports masking.
953 /// This function analyzes all calls in the function at the supplied VF,
954 /// makes a decision based on the costs of available options, and stores that
955 /// decision in a map for use in planning and plan execution.
957
958 /// Collect values we want to ignore in the cost model.
960
961 /// Collect all element types in the loop for which widening is needed.
963
964 /// Split reductions into those that happen in the loop, and those that happen
965 /// outside. In loop reductions are collected into InLoopReductions.
967
968 /// Returns true if we should use strict in-order reductions for the given
969 /// RdxDesc. This is true if the -enable-strict-reductions flag is passed,
970 /// the IsOrdered flag of RdxDesc is set and we do not allow reordering
971 /// of FP operations.
972 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) const {
973 return !Hints->allowReordering() && RdxDesc.isOrdered();
974 }
975
976 /// \returns The smallest bitwidth each instruction can be represented with.
977 /// The vector equivalents of these instructions should be truncated to this
978 /// type.
980 return MinBWs;
981 }
982
983 /// \returns True if it is more profitable to scalarize instruction \p I for
984 /// vectorization factor \p VF.
986 assert(VF.isVector() &&
987 "Profitable to scalarize relevant only for VF > 1.");
988 assert(
989 TheLoop->isInnermost() &&
990 "cost-model should not be used for outer loops (in VPlan-native path)");
991
992 auto Scalars = InstsToScalarize.find(VF);
993 assert(Scalars != InstsToScalarize.end() &&
994 "VF not yet analyzed for scalarization profitability");
995 return Scalars->second.contains(I);
996 }
997
998 /// Returns true if \p I is known to be uniform after vectorization.
1000 assert(
1001 TheLoop->isInnermost() &&
1002 "cost-model should not be used for outer loops (in VPlan-native path)");
1003 // Pseudo probe needs to be duplicated for each unrolled iteration and
1004 // vector lane so that profiled loop trip count can be accurately
1005 // accumulated instead of being under counted.
1007 return false;
1008
1009 if (VF.isScalar())
1010 return true;
1011
1012 auto UniformsPerVF = Uniforms.find(VF);
1013 assert(UniformsPerVF != Uniforms.end() &&
1014 "VF not yet analyzed for uniformity");
1015 return UniformsPerVF->second.count(I);
1016 }
1017
1018 /// Returns true if \p I is known to be scalar after vectorization.
1020 assert(
1021 TheLoop->isInnermost() &&
1022 "cost-model should not be used for outer loops (in VPlan-native path)");
1023 if (VF.isScalar())
1024 return true;
1025
1026 auto ScalarsPerVF = Scalars.find(VF);
1027 assert(ScalarsPerVF != Scalars.end() &&
1028 "Scalar values are not calculated for VF");
1029 return ScalarsPerVF->second.count(I);
1030 }
1031
1032 /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1033 /// for vectorization factor \p VF.
1035 return VF.isVector() && MinBWs.contains(I) &&
1036 !isProfitableToScalarize(I, VF) &&
1038 }
1039
1040 /// Decision that was taken during cost calculation for memory instruction.
1043 CM_Widen, // For consecutive accesses with stride +1.
1044 CM_Widen_Reverse, // For consecutive accesses with stride -1.
1050 };
1051
1052 /// Save vectorization decision \p W and \p Cost taken by the cost model for
1053 /// instruction \p I and vector width \p VF.
1056 assert(VF.isVector() && "Expected VF >=2");
1057 WideningDecisions[{I, VF}] = {W, Cost};
1058 }
1059
1060 /// Save vectorization decision \p W and \p Cost taken by the cost model for
1061 /// interleaving group \p Grp and vector width \p VF.
1065 assert(VF.isVector() && "Expected VF >=2");
1066 /// Broadcast this decicion to all instructions inside the group.
1067 /// When interleaving, the cost will only be assigned one instruction, the
1068 /// insert position. For other cases, add the appropriate fraction of the
1069 /// total cost to each instruction. This ensures accurate costs are used,
1070 /// even if the insert position instruction is not used.
1071 InstructionCost InsertPosCost = Cost;
1072 InstructionCost OtherMemberCost = 0;
1073 if (W != CM_Interleave)
1074 OtherMemberCost = InsertPosCost = Cost / Grp->getNumMembers();
1075 ;
1076 for (unsigned Idx = 0; Idx < Grp->getFactor(); ++Idx) {
1077 if (auto *I = Grp->getMember(Idx)) {
1078 if (Grp->getInsertPos() == I)
1079 WideningDecisions[{I, VF}] = {W, InsertPosCost};
1080 else
1081 WideningDecisions[{I, VF}] = {W, OtherMemberCost};
1082 }
1083 }
1084 }
1085
1086 /// Return the cost model decision for the given instruction \p I and vector
1087 /// width \p VF. Return CM_Unknown if this instruction did not pass
1088 /// through the cost modeling.
1090 assert(VF.isVector() && "Expected VF to be a vector VF");
1091 assert(
1092 TheLoop->isInnermost() &&
1093 "cost-model should not be used for outer loops (in VPlan-native path)");
1094
1095 std::pair<Instruction *, ElementCount> InstOnVF(I, VF);
1096 auto Itr = WideningDecisions.find(InstOnVF);
1097 if (Itr == WideningDecisions.end())
1098 return CM_Unknown;
1099 return Itr->second.first;
1100 }
1101
1102 /// Return the vectorization cost for the given instruction \p I and vector
1103 /// width \p VF.
1105 assert(VF.isVector() && "Expected VF >=2");
1106 std::pair<Instruction *, ElementCount> InstOnVF(I, VF);
1107 assert(WideningDecisions.contains(InstOnVF) &&
1108 "The cost is not calculated");
1109 return WideningDecisions[InstOnVF].second;
1110 }
1111
1119
1121 Function *Variant, Intrinsic::ID IID,
1122 std::optional<unsigned> MaskPos,
1124 assert(!VF.isScalar() && "Expected vector VF");
1125 CallWideningDecisions[{CI, VF}] = {Kind, Variant, IID, MaskPos, Cost};
1126 }
1127
1129 ElementCount VF) const {
1130 assert(!VF.isScalar() && "Expected vector VF");
1131 auto I = CallWideningDecisions.find({CI, VF});
1132 if (I == CallWideningDecisions.end())
1133 return {CM_Unknown, nullptr, Intrinsic::not_intrinsic, std::nullopt, 0};
1134 return I->second;
1135 }
1136
1137 /// Return True if instruction \p I is an optimizable truncate whose operand
1138 /// is an induction variable. Such a truncate will be removed by adding a new
1139 /// induction variable with the destination type.
1141 // If the instruction is not a truncate, return false.
1142 auto *Trunc = dyn_cast<TruncInst>(I);
1143 if (!Trunc)
1144 return false;
1145
1146 // Get the source and destination types of the truncate.
1147 Type *SrcTy = toVectorTy(Trunc->getSrcTy(), VF);
1148 Type *DestTy = toVectorTy(Trunc->getDestTy(), VF);
1149
1150 // If the truncate is free for the given types, return false. Replacing a
1151 // free truncate with an induction variable would add an induction variable
1152 // update instruction to each iteration of the loop. We exclude from this
1153 // check the primary induction variable since it will need an update
1154 // instruction regardless.
1155 Value *Op = Trunc->getOperand(0);
1156 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1157 return false;
1158
1159 // If the truncated value is not an induction variable, return false.
1160 return Legal->isInductionPhi(Op);
1161 }
1162
1163 /// Collects the instructions to scalarize for each predicated instruction in
1164 /// the loop.
1166
1167 /// Collect values that will not be widened, including Uniforms, Scalars, and
1168 /// Instructions to Scalarize for the given \p VF.
1169 /// The sets depend on CM decision for Load/Store instructions
1170 /// that may be vectorized as interleave, gather-scatter or scalarized.
1171 /// Also make a decision on what to do about call instructions in the loop
1172 /// at that VF -- scalarize, call a known vector routine, or call a
1173 /// vector intrinsic.
1175 // Do the analysis once.
1176 if (VF.isScalar() || Uniforms.contains(VF))
1177 return;
1179 collectLoopUniforms(VF);
1181 collectLoopScalars(VF);
1183 }
1184
1185 /// Returns true if the target machine supports masked store operation
1186 /// for the given \p DataType and kind of access to \p Ptr.
1187 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment,
1188 unsigned AddressSpace) const {
1189 return Legal->isConsecutivePtr(DataType, Ptr) &&
1190 TTI.isLegalMaskedStore(DataType, Alignment, AddressSpace);
1191 }
1192
1193 /// Returns true if the target machine supports masked load operation
1194 /// for the given \p DataType and kind of access to \p Ptr.
1195 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment,
1196 unsigned AddressSpace) const {
1197 return Legal->isConsecutivePtr(DataType, Ptr) &&
1198 TTI.isLegalMaskedLoad(DataType, Alignment, AddressSpace);
1199 }
1200
1201 /// Returns true if the target machine can represent \p V as a masked gather
1202 /// or scatter operation.
1204 bool LI = isa<LoadInst>(V);
1205 bool SI = isa<StoreInst>(V);
1206 if (!LI && !SI)
1207 return false;
1208 auto *Ty = getLoadStoreType(V);
1210 if (VF.isVector())
1211 Ty = VectorType::get(Ty, VF);
1212 return (LI && TTI.isLegalMaskedGather(Ty, Align)) ||
1213 (SI && TTI.isLegalMaskedScatter(Ty, Align));
1214 }
1215
1216 /// Returns true if the target machine supports all of the reduction
1217 /// variables found for the given VF.
1219 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1220 const RecurrenceDescriptor &RdxDesc = Reduction.second;
1221 return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1222 }));
1223 }
1224
1225 /// Given costs for both strategies, return true if the scalar predication
1226 /// lowering should be used for div/rem. This incorporates an override
1227 /// option so it is not simply a cost comparison.
1229 InstructionCost SafeDivisorCost) const {
1230 switch (ForceSafeDivisor) {
1231 case cl::BOU_UNSET:
1232 return ScalarCost < SafeDivisorCost;
1233 case cl::BOU_TRUE:
1234 return false;
1235 case cl::BOU_FALSE:
1236 return true;
1237 }
1238 llvm_unreachable("impossible case value");
1239 }
1240
1241 /// Returns true if \p I is an instruction which requires predication and
1242 /// for which our chosen predication strategy is scalarization (i.e. we
1243 /// don't have an alternate strategy such as masking available).
1244 /// \p VF is the vectorization factor that will be used to vectorize \p I.
1246
1247 /// Returns true if \p I is an instruction that needs to be predicated
1248 /// at runtime. The result is independent of the predication mechanism.
1249 /// Superset of instructions that return true for isScalarWithPredication.
1250 bool isPredicatedInst(Instruction *I) const;
1251
1252 /// Return the costs for our two available strategies for lowering a
1253 /// div/rem operation which requires speculating at least one lane.
1254 /// First result is for scalarization (will be invalid for scalable
1255 /// vectors); second is for the safe-divisor strategy.
1256 std::pair<InstructionCost, InstructionCost>
1258 ElementCount VF) const;
1259
1260 /// Returns true if \p I is a memory instruction with consecutive memory
1261 /// access that can be widened.
1263
1264 /// Returns true if \p I is a memory instruction in an interleaved-group
1265 /// of memory accesses that can be vectorized with wide vector loads/stores
1266 /// and shuffles.
1268
1269 /// Check if \p Instr belongs to any interleaved access group.
1271 return InterleaveInfo.isInterleaved(Instr);
1272 }
1273
1274 /// Get the interleaved access group that \p Instr belongs to.
1277 return InterleaveInfo.getInterleaveGroup(Instr);
1278 }
1279
1280 /// Returns true if we're required to use a scalar epilogue for at least
1281 /// the final iteration of the original loop.
1282 bool requiresScalarEpilogue(bool IsVectorizing) const {
1283 if (!isScalarEpilogueAllowed()) {
1284 LLVM_DEBUG(dbgs() << "LV: Loop does not require scalar epilogue\n");
1285 return false;
1286 }
1287 // If we might exit from anywhere but the latch and early exit vectorization
1288 // is disabled, we must run the exiting iteration in scalar form.
1289 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch() &&
1290 !(EnableEarlyExitVectorization && Legal->hasUncountableEarlyExit())) {
1291 LLVM_DEBUG(dbgs() << "LV: Loop requires scalar epilogue: not exiting "
1292 "from latch block\n");
1293 return true;
1294 }
1295 if (IsVectorizing && InterleaveInfo.requiresScalarEpilogue()) {
1296 LLVM_DEBUG(dbgs() << "LV: Loop requires scalar epilogue: "
1297 "interleaved group requires scalar epilogue\n");
1298 return true;
1299 }
1300 LLVM_DEBUG(dbgs() << "LV: Loop does not require scalar epilogue\n");
1301 return false;
1302 }
1303
1304 /// Returns true if a scalar epilogue is not allowed due to optsize or a
1305 /// loop hint annotation.
1307 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1308 }
1309
1310 /// Returns the TailFoldingStyle that is best for the current loop.
1311 TailFoldingStyle getTailFoldingStyle(bool IVUpdateMayOverflow = true) const {
1312 if (!ChosenTailFoldingStyle)
1314 return IVUpdateMayOverflow ? ChosenTailFoldingStyle->first
1315 : ChosenTailFoldingStyle->second;
1316 }
1317
1318 /// Selects and saves TailFoldingStyle for 2 options - if IV update may
1319 /// overflow or not.
1320 /// \param IsScalableVF true if scalable vector factors enabled.
1321 /// \param UserIC User specific interleave count.
1322 void setTailFoldingStyles(bool IsScalableVF, unsigned UserIC) {
1323 assert(!ChosenTailFoldingStyle && "Tail folding must not be selected yet.");
1324 if (!Legal->canFoldTailByMasking()) {
1325 ChosenTailFoldingStyle = {TailFoldingStyle::None, TailFoldingStyle::None};
1326 return;
1327 }
1328
1329 // Default to TTI preference, but allow command line override.
1330 ChosenTailFoldingStyle = {
1331 TTI.getPreferredTailFoldingStyle(/*IVUpdateMayOverflow=*/true),
1332 TTI.getPreferredTailFoldingStyle(/*IVUpdateMayOverflow=*/false)};
1333 if (ForceTailFoldingStyle.getNumOccurrences())
1334 ChosenTailFoldingStyle = {ForceTailFoldingStyle.getValue(),
1335 ForceTailFoldingStyle.getValue()};
1336
1337 if (ChosenTailFoldingStyle->first != TailFoldingStyle::DataWithEVL &&
1338 ChosenTailFoldingStyle->second != TailFoldingStyle::DataWithEVL)
1339 return;
1340 // Override EVL styles if needed.
1341 // FIXME: Investigate opportunity for fixed vector factor.
1342 bool EVLIsLegal = UserIC <= 1 && IsScalableVF &&
1343 TTI.hasActiveVectorLength() && !EnableVPlanNativePath;
1344 if (EVLIsLegal)
1345 return;
1346 // If for some reason EVL mode is unsupported, fallback to a scalar epilogue
1347 // if it's allowed, or DataWithoutLaneMask otherwise.
1348 if (ScalarEpilogueStatus == CM_ScalarEpilogueAllowed ||
1349 ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate)
1350 ChosenTailFoldingStyle = {TailFoldingStyle::None, TailFoldingStyle::None};
1351 else
1352 ChosenTailFoldingStyle = {TailFoldingStyle::DataWithoutLaneMask,
1354
1355 LLVM_DEBUG(
1356 dbgs() << "LV: Preference for VP intrinsics indicated. Will "
1357 "not try to generate VP Intrinsics "
1358 << (UserIC > 1
1359 ? "since interleave count specified is greater than 1.\n"
1360 : "due to non-interleaving reasons.\n"));
1361 }
1362
1363 /// Returns true if all loop blocks should be masked to fold tail loop.
1364 bool foldTailByMasking() const {
1365 // TODO: check if it is possible to check for None style independent of
1366 // IVUpdateMayOverflow flag in getTailFoldingStyle.
1368 }
1369
1370 /// Return maximum safe number of elements to be processed per vector
1371 /// iteration, which do not prevent store-load forwarding and are safe with
1372 /// regard to the memory dependencies. Required for EVL-based VPlans to
1373 /// correctly calculate AVL (application vector length) as min(remaining AVL,
1374 /// MaxSafeElements).
1375 /// TODO: need to consider adjusting cost model to use this value as a
1376 /// vectorization factor for EVL-based vectorization.
1377 std::optional<unsigned> getMaxSafeElements() const { return MaxSafeElements; }
1378
1379 /// Returns true if the instructions in this block requires predication
1380 /// for any reason, e.g. because tail folding now requires a predicate
1381 /// or because the block in the original loop was predicated.
1383 return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1384 }
1385
1386 /// Returns true if VP intrinsics with explicit vector length support should
1387 /// be generated in the tail folded loop.
1391
1392 /// Returns true if the Phi is part of an inloop reduction.
1393 bool isInLoopReduction(PHINode *Phi) const {
1394 return InLoopReductions.contains(Phi);
1395 }
1396
1397 /// Returns true if the predicated reduction select should be used to set the
1398 /// incoming value for the reduction phi.
1400 // Force to use predicated reduction select since the EVL of the
1401 // second-to-last iteration might not be VF*UF.
1402 if (foldTailWithEVL())
1403 return true;
1405 TTI.preferPredicatedReductionSelect();
1406 }
1407
1408 /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1409 /// with factor VF. Return the cost of the instruction, including
1410 /// scalarization overhead if it's needed.
1412
1413 /// Estimate cost of a call instruction CI if it were vectorized with factor
1414 /// VF. Return the cost of the instruction, including scalarization overhead
1415 /// if it's needed.
1417
1418 /// Invalidates decisions already taken by the cost model.
1420 WideningDecisions.clear();
1421 CallWideningDecisions.clear();
1422 Uniforms.clear();
1423 Scalars.clear();
1424 }
1425
1426 /// Returns the expected execution cost. The unit of the cost does
1427 /// not matter because we use the 'cost' units to compare different
1428 /// vector widths. The cost that is returned is *not* normalized by
1429 /// the factor width.
1431
1432 bool hasPredStores() const { return NumPredStores > 0; }
1433
1434 /// Returns true if epilogue vectorization is considered profitable, and
1435 /// false otherwise.
1436 /// \p VF is the vectorization factor chosen for the original loop.
1437 /// \p Multiplier is an aditional scaling factor applied to VF before
1438 /// comparing to EpilogueVectorizationMinVF.
1440 const unsigned IC) const;
1441
1442 /// Returns the execution time cost of an instruction for a given vector
1443 /// width. Vector width of one means scalar.
1445
1446 /// Return the cost of instructions in an inloop reduction pattern, if I is
1447 /// part of that pattern.
1448 std::optional<InstructionCost> getReductionPatternCost(Instruction *I,
1449 ElementCount VF,
1450 Type *VectorTy) const;
1451
1452 /// Returns true if \p Op should be considered invariant and if it is
1453 /// trivially hoistable.
1455
1456 /// Return the value of vscale used for tuning the cost model.
1457 std::optional<unsigned> getVScaleForTuning() const { return VScaleForTuning; }
1458
1459private:
1460 unsigned NumPredStores = 0;
1461
1462 /// Used to store the value of vscale used for tuning the cost model. It is
1463 /// initialized during object construction.
1464 std::optional<unsigned> VScaleForTuning;
1465
1466 /// Initializes the value of vscale used for tuning the cost model. If
1467 /// vscale_range.min == vscale_range.max then return vscale_range.max, else
1468 /// return the value returned by the corresponding TTI method.
1469 void initializeVScaleForTuning() {
1470 const Function *Fn = TheLoop->getHeader()->getParent();
1471 if (Fn->hasFnAttribute(Attribute::VScaleRange)) {
1472 auto Attr = Fn->getFnAttribute(Attribute::VScaleRange);
1473 auto Min = Attr.getVScaleRangeMin();
1474 auto Max = Attr.getVScaleRangeMax();
1475 if (Max && Min == Max) {
1476 VScaleForTuning = Max;
1477 return;
1478 }
1479 }
1480
1481 VScaleForTuning = TTI.getVScaleForTuning();
1482 }
1483
1484 /// \return An upper bound for the vectorization factors for both
1485 /// fixed and scalable vectorization, where the minimum-known number of
1486 /// elements is a power-of-2 larger than zero. If scalable vectorization is
1487 /// disabled or unsupported, then the scalable part will be equal to
1488 /// ElementCount::getScalable(0).
1489 FixedScalableVFPair computeFeasibleMaxVF(unsigned MaxTripCount,
1490 ElementCount UserVF,
1491 bool FoldTailByMasking);
1492
1493 /// If \p VF > MaxTripcount, clamps it to the next lower VF that is <=
1494 /// MaxTripCount.
1495 ElementCount clampVFByMaxTripCount(ElementCount VF, unsigned MaxTripCount,
1496 bool FoldTailByMasking) const;
1497
1498 /// \return the maximized element count based on the targets vector
1499 /// registers and the loop trip-count, but limited to a maximum safe VF.
1500 /// This is a helper function of computeFeasibleMaxVF.
1501 ElementCount getMaximizedVFForTarget(unsigned MaxTripCount,
1502 unsigned SmallestType,
1503 unsigned WidestType,
1504 ElementCount MaxSafeVF,
1505 bool FoldTailByMasking);
1506
1507 /// Checks if scalable vectorization is supported and enabled. Caches the
1508 /// result to avoid repeated debug dumps for repeated queries.
1509 bool isScalableVectorizationAllowed();
1510
1511 /// \return the maximum legal scalable VF, based on the safe max number
1512 /// of elements.
1513 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements);
1514
1515 /// Calculate vectorization cost of memory instruction \p I.
1516 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1517
1518 /// The cost computation for scalarized memory instruction.
1519 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1520
1521 /// The cost computation for interleaving group of memory instructions.
1522 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1523
1524 /// The cost computation for Gather/Scatter instruction.
1525 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1526
1527 /// The cost computation for widening instruction \p I with consecutive
1528 /// memory access.
1529 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1530
1531 /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1532 /// Load: scalar load + broadcast.
1533 /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1534 /// element)
1535 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1536
1537 /// Estimate the overhead of scalarizing an instruction. This is a
1538 /// convenience wrapper for the type-based getScalarizationOverhead API.
1539 InstructionCost getScalarizationOverhead(Instruction *I,
1540 ElementCount VF) const;
1541
1542 /// Returns true if an artificially high cost for emulated masked memrefs
1543 /// should be used.
1544 bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF);
1545
1546 /// Map of scalar integer values to the smallest bitwidth they can be legally
1547 /// represented as. The vector equivalents of these values should be truncated
1548 /// to this type.
1549 MapVector<Instruction *, uint64_t> MinBWs;
1550
1551 /// A type representing the costs for instructions if they were to be
1552 /// scalarized rather than vectorized. The entries are Instruction-Cost
1553 /// pairs.
1554 using ScalarCostsTy = MapVector<Instruction *, InstructionCost>;
1555
1556 /// A set containing all BasicBlocks that are known to present after
1557 /// vectorization as a predicated block.
1558 DenseMap<ElementCount, SmallPtrSet<BasicBlock *, 4>>
1559 PredicatedBBsAfterVectorization;
1560
1561 /// Records whether it is allowed to have the original scalar loop execute at
1562 /// least once. This may be needed as a fallback loop in case runtime
1563 /// aliasing/dependence checks fail, or to handle the tail/remainder
1564 /// iterations when the trip count is unknown or doesn't divide by the VF,
1565 /// or as a peel-loop to handle gaps in interleave-groups.
1566 /// Under optsize and when the trip count is very small we don't allow any
1567 /// iterations to execute in the scalar loop.
1568 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1569
1570 /// Control finally chosen tail folding style. The first element is used if
1571 /// the IV update may overflow, the second element - if it does not.
1572 std::optional<std::pair<TailFoldingStyle, TailFoldingStyle>>
1573 ChosenTailFoldingStyle;
1574
1575 /// true if scalable vectorization is supported and enabled.
1576 std::optional<bool> IsScalableVectorizationAllowed;
1577
1578 /// Maximum safe number of elements to be processed per vector iteration,
1579 /// which do not prevent store-load forwarding and are safe with regard to the
1580 /// memory dependencies. Required for EVL-based veectorization, where this
1581 /// value is used as the upper bound of the safe AVL.
1582 std::optional<unsigned> MaxSafeElements;
1583
1584 /// A map holding scalar costs for different vectorization factors. The
1585 /// presence of a cost for an instruction in the mapping indicates that the
1586 /// instruction will be scalarized when vectorizing with the associated
1587 /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1588 MapVector<ElementCount, ScalarCostsTy> InstsToScalarize;
1589
1590 /// Holds the instructions known to be uniform after vectorization.
1591 /// The data is collected per VF.
1592 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1593
1594 /// Holds the instructions known to be scalar after vectorization.
1595 /// The data is collected per VF.
1596 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1597
1598 /// Holds the instructions (address computations) that are forced to be
1599 /// scalarized.
1600 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1601
1602 /// PHINodes of the reductions that should be expanded in-loop.
1603 SmallPtrSet<PHINode *, 4> InLoopReductions;
1604
1605 /// A Map of inloop reduction operations and their immediate chain operand.
1606 /// FIXME: This can be removed once reductions can be costed correctly in
1607 /// VPlan. This was added to allow quick lookup of the inloop operations.
1608 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1609
1610 /// Returns the expected difference in cost from scalarizing the expression
1611 /// feeding a predicated instruction \p PredInst. The instructions to
1612 /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1613 /// non-negative return value implies the expression will be scalarized.
1614 /// Currently, only single-use chains are considered for scalarization.
1615 InstructionCost computePredInstDiscount(Instruction *PredInst,
1616 ScalarCostsTy &ScalarCosts,
1617 ElementCount VF);
1618
1619 /// Collect the instructions that are uniform after vectorization. An
1620 /// instruction is uniform if we represent it with a single scalar value in
1621 /// the vectorized loop corresponding to each vector iteration. Examples of
1622 /// uniform instructions include pointer operands of consecutive or
1623 /// interleaved memory accesses. Note that although uniformity implies an
1624 /// instruction will be scalar, the reverse is not true. In general, a
1625 /// scalarized instruction will be represented by VF scalar values in the
1626 /// vectorized loop, each corresponding to an iteration of the original
1627 /// scalar loop.
1628 void collectLoopUniforms(ElementCount VF);
1629
1630 /// Collect the instructions that are scalar after vectorization. An
1631 /// instruction is scalar if it is known to be uniform or will be scalarized
1632 /// during vectorization. collectLoopScalars should only add non-uniform nodes
1633 /// to the list if they are used by a load/store instruction that is marked as
1634 /// CM_Scalarize. Non-uniform scalarized instructions will be represented by
1635 /// VF values in the vectorized loop, each corresponding to an iteration of
1636 /// the original scalar loop.
1637 void collectLoopScalars(ElementCount VF);
1638
1639 /// Keeps cost model vectorization decision and cost for instructions.
1640 /// Right now it is used for memory instructions only.
1641 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1642 std::pair<InstWidening, InstructionCost>>;
1643
1644 DecisionList WideningDecisions;
1645
1646 using CallDecisionList =
1647 DenseMap<std::pair<CallInst *, ElementCount>, CallWideningDecision>;
1648
1649 CallDecisionList CallWideningDecisions;
1650
1651 /// Returns true if \p V is expected to be vectorized and it needs to be
1652 /// extracted.
1653 bool needsExtract(Value *V, ElementCount VF) const {
1655 if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1656 TheLoop->isLoopInvariant(I) ||
1658 (isa<CallInst>(I) &&
1660 return false;
1661
1662 // Assume we can vectorize V (and hence we need extraction) if the
1663 // scalars are not computed yet. This can happen, because it is called
1664 // via getScalarizationOverhead from setCostBasedWideningDecision, before
1665 // the scalars are collected. That should be a safe assumption in most
1666 // cases, because we check if the operands have vectorizable types
1667 // beforehand in LoopVectorizationLegality.
1668 return !Scalars.contains(VF) || !isScalarAfterVectorization(I, VF);
1669 };
1670
1671 /// Returns a range containing only operands needing to be extracted.
1672 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1673 ElementCount VF) const {
1674
1675 SmallPtrSet<const Value *, 4> UniqueOperands;
1677 for (Value *Op : Ops) {
1678 if (isa<Constant>(Op) || !UniqueOperands.insert(Op).second ||
1679 !needsExtract(Op, VF))
1680 continue;
1681 Res.push_back(Op);
1682 }
1683 return Res;
1684 }
1685
1686public:
1687 /// The loop that we evaluate.
1689
1690 /// Predicated scalar evolution analysis.
1692
1693 /// Loop Info analysis.
1695
1696 /// Vectorization legality.
1698
1699 /// Vector target information.
1701
1702 /// Target Library Info.
1704
1705 /// Demanded bits analysis.
1707
1708 /// Assumption cache.
1710
1711 /// Interface to emit optimization remarks.
1713
1715
1716 /// Loop Vectorize Hint.
1718
1719 /// The interleave access information contains groups of interleaved accesses
1720 /// with the same stride and close to each other.
1722
1723 /// Values to ignore in the cost model.
1725
1726 /// Values to ignore in the cost model when VF > 1.
1728
1729 /// All element types found in the loop.
1731
1732 /// The kind of cost that we are calculating
1734
1735 /// Whether this loop should be optimized for size based on function attribute
1736 /// or profile information.
1738
1739 /// The highest VF possible for this loop, without using MaxBandwidth.
1741};
1742} // end namespace llvm
1743
1744namespace {
1745/// Helper struct to manage generating runtime checks for vectorization.
1746///
1747/// The runtime checks are created up-front in temporary blocks to allow better
1748/// estimating the cost and un-linked from the existing IR. After deciding to
1749/// vectorize, the checks are moved back. If deciding not to vectorize, the
1750/// temporary blocks are completely removed.
1751class GeneratedRTChecks {
1752 /// Basic block which contains the generated SCEV checks, if any.
1753 BasicBlock *SCEVCheckBlock = nullptr;
1754
1755 /// The value representing the result of the generated SCEV checks. If it is
1756 /// nullptr no SCEV checks have been generated.
1757 Value *SCEVCheckCond = nullptr;
1758
1759 /// Basic block which contains the generated memory runtime checks, if any.
1760 BasicBlock *MemCheckBlock = nullptr;
1761
1762 /// The value representing the result of the generated memory runtime checks.
1763 /// If it is nullptr no memory runtime checks have been generated.
1764 Value *MemRuntimeCheckCond = nullptr;
1765
1766 DominatorTree *DT;
1767 LoopInfo *LI;
1769
1770 SCEVExpander SCEVExp;
1771 SCEVExpander MemCheckExp;
1772
1773 bool CostTooHigh = false;
1774
1775 Loop *OuterLoop = nullptr;
1776
1778
1779 /// The kind of cost that we are calculating
1781
1782public:
1783 GeneratedRTChecks(PredicatedScalarEvolution &PSE, DominatorTree *DT,
1786 : DT(DT), LI(LI), TTI(TTI), SCEVExp(*PSE.getSE(), DL, "scev.check"),
1787 MemCheckExp(*PSE.getSE(), DL, "scev.check"), PSE(PSE),
1788 CostKind(CostKind) {}
1789
1790 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
1791 /// accurately estimate the cost of the runtime checks. The blocks are
1792 /// un-linked from the IR and are added back during vector code generation. If
1793 /// there is no vector code generation, the check blocks are removed
1794 /// completely.
1795 void create(Loop *L, const LoopAccessInfo &LAI,
1796 const SCEVPredicate &UnionPred, ElementCount VF, unsigned IC) {
1797
1798 // Hard cutoff to limit compile-time increase in case a very large number of
1799 // runtime checks needs to be generated.
1800 // TODO: Skip cutoff if the loop is guaranteed to execute, e.g. due to
1801 // profile info.
1802 CostTooHigh =
1804 if (CostTooHigh)
1805 return;
1806
1807 BasicBlock *LoopHeader = L->getHeader();
1808 BasicBlock *Preheader = L->getLoopPreheader();
1809
1810 // Use SplitBlock to create blocks for SCEV & memory runtime checks to
1811 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
1812 // may be used by SCEVExpander. The blocks will be un-linked from their
1813 // predecessors and removed from LI & DT at the end of the function.
1814 if (!UnionPred.isAlwaysTrue()) {
1815 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
1816 nullptr, "vector.scevcheck");
1817
1818 SCEVCheckCond = SCEVExp.expandCodeForPredicate(
1819 &UnionPred, SCEVCheckBlock->getTerminator());
1820 if (isa<Constant>(SCEVCheckCond)) {
1821 // Clean up directly after expanding the predicate to a constant, to
1822 // avoid further expansions re-using anything left over from SCEVExp.
1823 SCEVExpanderCleaner SCEVCleaner(SCEVExp);
1824 SCEVCleaner.cleanup();
1825 }
1826 }
1827
1828 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
1829 if (RtPtrChecking.Need) {
1830 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
1831 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
1832 "vector.memcheck");
1833
1834 auto DiffChecks = RtPtrChecking.getDiffChecks();
1835 if (DiffChecks) {
1836 Value *RuntimeVF = nullptr;
1837 MemRuntimeCheckCond = addDiffRuntimeChecks(
1838 MemCheckBlock->getTerminator(), *DiffChecks, MemCheckExp,
1839 [VF, &RuntimeVF](IRBuilderBase &B, unsigned Bits) {
1840 if (!RuntimeVF)
1841 RuntimeVF = getRuntimeVF(B, B.getIntNTy(Bits), VF);
1842 return RuntimeVF;
1843 },
1844 IC);
1845 } else {
1846 MemRuntimeCheckCond = addRuntimeChecks(
1847 MemCheckBlock->getTerminator(), L, RtPtrChecking.getChecks(),
1849 }
1850 assert(MemRuntimeCheckCond &&
1851 "no RT checks generated although RtPtrChecking "
1852 "claimed checks are required");
1853 }
1854
1855 SCEVExp.eraseDeadInstructions(SCEVCheckCond);
1856
1857 if (!MemCheckBlock && !SCEVCheckBlock)
1858 return;
1859
1860 // Unhook the temporary block with the checks, update various places
1861 // accordingly.
1862 if (SCEVCheckBlock)
1863 SCEVCheckBlock->replaceAllUsesWith(Preheader);
1864 if (MemCheckBlock)
1865 MemCheckBlock->replaceAllUsesWith(Preheader);
1866
1867 if (SCEVCheckBlock) {
1868 SCEVCheckBlock->getTerminator()->moveBefore(
1869 Preheader->getTerminator()->getIterator());
1870 auto *UI = new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
1871 UI->setDebugLoc(DebugLoc::getTemporary());
1872 Preheader->getTerminator()->eraseFromParent();
1873 }
1874 if (MemCheckBlock) {
1875 MemCheckBlock->getTerminator()->moveBefore(
1876 Preheader->getTerminator()->getIterator());
1877 auto *UI = new UnreachableInst(Preheader->getContext(), MemCheckBlock);
1878 UI->setDebugLoc(DebugLoc::getTemporary());
1879 Preheader->getTerminator()->eraseFromParent();
1880 }
1881
1882 DT->changeImmediateDominator(LoopHeader, Preheader);
1883 if (MemCheckBlock) {
1884 DT->eraseNode(MemCheckBlock);
1885 LI->removeBlock(MemCheckBlock);
1886 }
1887 if (SCEVCheckBlock) {
1888 DT->eraseNode(SCEVCheckBlock);
1889 LI->removeBlock(SCEVCheckBlock);
1890 }
1891
1892 // Outer loop is used as part of the later cost calculations.
1893 OuterLoop = L->getParentLoop();
1894 }
1895
1897 if (SCEVCheckBlock || MemCheckBlock)
1898 LLVM_DEBUG(dbgs() << "Calculating cost of runtime checks:\n");
1899
1900 if (CostTooHigh) {
1902 Cost.setInvalid();
1903 LLVM_DEBUG(dbgs() << " number of checks exceeded threshold\n");
1904 return Cost;
1905 }
1906
1907 InstructionCost RTCheckCost = 0;
1908 if (SCEVCheckBlock)
1909 for (Instruction &I : *SCEVCheckBlock) {
1910 if (SCEVCheckBlock->getTerminator() == &I)
1911 continue;
1913 LLVM_DEBUG(dbgs() << " " << C << " for " << I << "\n");
1914 RTCheckCost += C;
1915 }
1916 if (MemCheckBlock) {
1917 InstructionCost MemCheckCost = 0;
1918 for (Instruction &I : *MemCheckBlock) {
1919 if (MemCheckBlock->getTerminator() == &I)
1920 continue;
1922 LLVM_DEBUG(dbgs() << " " << C << " for " << I << "\n");
1923 MemCheckCost += C;
1924 }
1925
1926 // If the runtime memory checks are being created inside an outer loop
1927 // we should find out if these checks are outer loop invariant. If so,
1928 // the checks will likely be hoisted out and so the effective cost will
1929 // reduce according to the outer loop trip count.
1930 if (OuterLoop) {
1931 ScalarEvolution *SE = MemCheckExp.getSE();
1932 // TODO: If profitable, we could refine this further by analysing every
1933 // individual memory check, since there could be a mixture of loop
1934 // variant and invariant checks that mean the final condition is
1935 // variant.
1936 const SCEV *Cond = SE->getSCEV(MemRuntimeCheckCond);
1937 if (SE->isLoopInvariant(Cond, OuterLoop)) {
1938 // It seems reasonable to assume that we can reduce the effective
1939 // cost of the checks even when we know nothing about the trip
1940 // count. Assume that the outer loop executes at least twice.
1941 unsigned BestTripCount = 2;
1942
1943 // Get the best known TC estimate.
1944 if (auto EstimatedTC = getSmallBestKnownTC(
1945 PSE, OuterLoop, /* CanUseConstantMax = */ false))
1946 if (EstimatedTC->isFixed())
1947 BestTripCount = EstimatedTC->getFixedValue();
1948
1949 InstructionCost NewMemCheckCost = MemCheckCost / BestTripCount;
1950
1951 // Let's ensure the cost is always at least 1.
1952 NewMemCheckCost = std::max(NewMemCheckCost.getValue(),
1953 (InstructionCost::CostType)1);
1954
1955 if (BestTripCount > 1)
1957 << "We expect runtime memory checks to be hoisted "
1958 << "out of the outer loop. Cost reduced from "
1959 << MemCheckCost << " to " << NewMemCheckCost << '\n');
1960
1961 MemCheckCost = NewMemCheckCost;
1962 }
1963 }
1964
1965 RTCheckCost += MemCheckCost;
1966 }
1967
1968 if (SCEVCheckBlock || MemCheckBlock)
1969 LLVM_DEBUG(dbgs() << "Total cost of runtime checks: " << RTCheckCost
1970 << "\n");
1971
1972 return RTCheckCost;
1973 }
1974
1975 /// Remove the created SCEV & memory runtime check blocks & instructions, if
1976 /// unused.
1977 ~GeneratedRTChecks() {
1978 SCEVExpanderCleaner SCEVCleaner(SCEVExp);
1979 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp);
1980 bool SCEVChecksUsed = !SCEVCheckBlock || !pred_empty(SCEVCheckBlock);
1981 bool MemChecksUsed = !MemCheckBlock || !pred_empty(MemCheckBlock);
1982 if (SCEVChecksUsed)
1983 SCEVCleaner.markResultUsed();
1984
1985 if (MemChecksUsed) {
1986 MemCheckCleaner.markResultUsed();
1987 } else {
1988 auto &SE = *MemCheckExp.getSE();
1989 // Memory runtime check generation creates compares that use expanded
1990 // values. Remove them before running the SCEVExpanderCleaners.
1991 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
1992 if (MemCheckExp.isInsertedInstruction(&I))
1993 continue;
1994 SE.forgetValue(&I);
1995 I.eraseFromParent();
1996 }
1997 }
1998 MemCheckCleaner.cleanup();
1999 SCEVCleaner.cleanup();
2000
2001 if (!SCEVChecksUsed)
2002 SCEVCheckBlock->eraseFromParent();
2003 if (!MemChecksUsed)
2004 MemCheckBlock->eraseFromParent();
2005 }
2006
2007 /// Retrieves the SCEVCheckCond and SCEVCheckBlock that were generated as IR
2008 /// outside VPlan.
2009 std::pair<Value *, BasicBlock *> getSCEVChecks() const {
2010 using namespace llvm::PatternMatch;
2011 if (!SCEVCheckCond || match(SCEVCheckCond, m_ZeroInt()))
2012 return {nullptr, nullptr};
2013
2014 return {SCEVCheckCond, SCEVCheckBlock};
2015 }
2016
2017 /// Retrieves the MemCheckCond and MemCheckBlock that were generated as IR
2018 /// outside VPlan.
2019 std::pair<Value *, BasicBlock *> getMemRuntimeChecks() const {
2020 using namespace llvm::PatternMatch;
2021 if (MemRuntimeCheckCond && match(MemRuntimeCheckCond, m_ZeroInt()))
2022 return {nullptr, nullptr};
2023 return {MemRuntimeCheckCond, MemCheckBlock};
2024 }
2025
2026 /// Return true if any runtime checks have been added
2027 bool hasChecks() const {
2028 return getSCEVChecks().first || getMemRuntimeChecks().first;
2029 }
2030};
2031} // namespace
2032
2038
2043
2044// Return true if \p OuterLp is an outer loop annotated with hints for explicit
2045// vectorization. The loop needs to be annotated with #pragma omp simd
2046// simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
2047// vector length information is not provided, vectorization is not considered
2048// explicit. Interleave hints are not allowed either. These limitations will be
2049// relaxed in the future.
2050// Please, note that we are currently forced to abuse the pragma 'clang
2051// vectorize' semantics. This pragma provides *auto-vectorization hints*
2052// (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
2053// provides *explicit vectorization hints* (LV can bypass legal checks and
2054// assume that vectorization is legal). However, both hints are implemented
2055// using the same metadata (llvm.loop.vectorize, processed by
2056// LoopVectorizeHints). This will be fixed in the future when the native IR
2057// representation for pragma 'omp simd' is introduced.
2058static bool isExplicitVecOuterLoop(Loop *OuterLp,
2060 assert(!OuterLp->isInnermost() && "This is not an outer loop");
2061 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
2062
2063 // Only outer loops with an explicit vectorization hint are supported.
2064 // Unannotated outer loops are ignored.
2066 return false;
2067
2068 Function *Fn = OuterLp->getHeader()->getParent();
2069 if (!Hints.allowVectorization(Fn, OuterLp,
2070 true /*VectorizeOnlyWhenForced*/)) {
2071 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
2072 return false;
2073 }
2074
2075 if (Hints.getInterleave() > 1) {
2076 // TODO: Interleave support is future work.
2077 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
2078 "outer loops.\n");
2079 Hints.emitRemarkWithHints();
2080 return false;
2081 }
2082
2083 return true;
2084}
2085
2089 // Collect inner loops and outer loops without irreducible control flow. For
2090 // now, only collect outer loops that have explicit vectorization hints. If we
2091 // are stress testing the VPlan H-CFG construction, we collect the outermost
2092 // loop of every loop nest.
2093 if (L.isInnermost() || VPlanBuildStressTest ||
2095 LoopBlocksRPO RPOT(&L);
2096 RPOT.perform(LI);
2098 V.push_back(&L);
2099 // TODO: Collect inner loops inside marked outer loops in case
2100 // vectorization fails for the outer loop. Do not invoke
2101 // 'containsIrreducibleCFG' again for inner loops when the outer loop is
2102 // already known to be reducible. We can use an inherited attribute for
2103 // that.
2104 return;
2105 }
2106 }
2107 for (Loop *InnerL : L)
2108 collectSupportedLoops(*InnerL, LI, ORE, V);
2109}
2110
2111//===----------------------------------------------------------------------===//
2112// Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2113// LoopVectorizationCostModel and LoopVectorizationPlanner.
2114//===----------------------------------------------------------------------===//
2115
2116/// Compute the transformed value of Index at offset StartValue using step
2117/// StepValue.
2118/// For integer induction, returns StartValue + Index * StepValue.
2119/// For pointer induction, returns StartValue[Index * StepValue].
2120/// FIXME: The newly created binary instructions should contain nsw/nuw
2121/// flags, which can be found from the original scalar operations.
2122static Value *
2124 Value *Step,
2126 const BinaryOperator *InductionBinOp) {
2127 using namespace llvm::PatternMatch;
2128 Type *StepTy = Step->getType();
2129 Value *CastedIndex = StepTy->isIntegerTy()
2130 ? B.CreateSExtOrTrunc(Index, StepTy)
2131 : B.CreateCast(Instruction::SIToFP, Index, StepTy);
2132 if (CastedIndex != Index) {
2133 CastedIndex->setName(CastedIndex->getName() + ".cast");
2134 Index = CastedIndex;
2135 }
2136
2137 // Note: the IR at this point is broken. We cannot use SE to create any new
2138 // SCEV and then expand it, hoping that SCEV's simplification will give us
2139 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
2140 // lead to various SCEV crashes. So all we can do is to use builder and rely
2141 // on InstCombine for future simplifications. Here we handle some trivial
2142 // cases only.
2143 auto CreateAdd = [&B](Value *X, Value *Y) {
2144 assert(X->getType() == Y->getType() && "Types don't match!");
2145 if (match(X, m_ZeroInt()))
2146 return Y;
2147 if (match(Y, m_ZeroInt()))
2148 return X;
2149 return B.CreateAdd(X, Y);
2150 };
2151
2152 // We allow X to be a vector type, in which case Y will potentially be
2153 // splatted into a vector with the same element count.
2154 auto CreateMul = [&B](Value *X, Value *Y) {
2155 assert(X->getType()->getScalarType() == Y->getType() &&
2156 "Types don't match!");
2157 if (match(X, m_One()))
2158 return Y;
2159 if (match(Y, m_One()))
2160 return X;
2161 VectorType *XVTy = dyn_cast<VectorType>(X->getType());
2162 if (XVTy && !isa<VectorType>(Y->getType()))
2163 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y);
2164 return B.CreateMul(X, Y);
2165 };
2166
2167 switch (InductionKind) {
2169 assert(!isa<VectorType>(Index->getType()) &&
2170 "Vector indices not supported for integer inductions yet");
2171 assert(Index->getType() == StartValue->getType() &&
2172 "Index type does not match StartValue type");
2173 if (isa<ConstantInt>(Step) && cast<ConstantInt>(Step)->isMinusOne())
2174 return B.CreateSub(StartValue, Index);
2175 auto *Offset = CreateMul(Index, Step);
2176 return CreateAdd(StartValue, Offset);
2177 }
2179 return B.CreatePtrAdd(StartValue, CreateMul(Index, Step));
2181 assert(!isa<VectorType>(Index->getType()) &&
2182 "Vector indices not supported for FP inductions yet");
2183 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
2184 assert(InductionBinOp &&
2185 (InductionBinOp->getOpcode() == Instruction::FAdd ||
2186 InductionBinOp->getOpcode() == Instruction::FSub) &&
2187 "Original bin op should be defined for FP induction");
2188
2189 Value *MulExp = B.CreateFMul(Step, Index);
2190 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
2191 "induction");
2192 }
2194 return nullptr;
2195 }
2196 llvm_unreachable("invalid enum");
2197}
2198
2199static std::optional<unsigned> getMaxVScale(const Function &F,
2200 const TargetTransformInfo &TTI) {
2201 if (std::optional<unsigned> MaxVScale = TTI.getMaxVScale())
2202 return MaxVScale;
2203
2204 if (F.hasFnAttribute(Attribute::VScaleRange))
2205 return F.getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax();
2206
2207 return std::nullopt;
2208}
2209
2210/// For the given VF and UF and maximum trip count computed for the loop, return
2211/// whether the induction variable might overflow in the vectorized loop. If not,
2212/// then we know a runtime overflow check always evaluates to false and can be
2213/// removed.
2215 const LoopVectorizationCostModel *Cost,
2216 ElementCount VF, std::optional<unsigned> UF = std::nullopt) {
2217 // Always be conservative if we don't know the exact unroll factor.
2218 unsigned MaxUF = UF ? *UF : Cost->TTI.getMaxInterleaveFactor(VF);
2219
2220 IntegerType *IdxTy = Cost->Legal->getWidestInductionType();
2221 APInt MaxUIntTripCount = IdxTy->getMask();
2222
2223 // We know the runtime overflow check is known false iff the (max) trip-count
2224 // is known and (max) trip-count + (VF * UF) does not overflow in the type of
2225 // the vector loop induction variable.
2226 if (unsigned TC = Cost->PSE.getSmallConstantMaxTripCount()) {
2227 uint64_t MaxVF = VF.getKnownMinValue();
2228 if (VF.isScalable()) {
2229 std::optional<unsigned> MaxVScale =
2230 getMaxVScale(*Cost->TheFunction, Cost->TTI);
2231 if (!MaxVScale)
2232 return false;
2233 MaxVF *= *MaxVScale;
2234 }
2235
2236 return (MaxUIntTripCount - TC).ugt(MaxVF * MaxUF);
2237 }
2238
2239 return false;
2240}
2241
2242// Return whether we allow using masked interleave-groups (for dealing with
2243// strided loads/stores that reside in predicated blocks, or for dealing
2244// with gaps).
2246 // If an override option has been passed in for interleaved accesses, use it.
2247 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2249
2250 return TTI.enableMaskedInterleavedAccessVectorization();
2251}
2252
2254 BasicBlock *CheckIRBB) {
2255 // Note: The block with the minimum trip-count check is already connected
2256 // during earlier VPlan construction.
2257 VPBlockBase *ScalarPH = Plan.getScalarPreheader();
2258 VPBlockBase *PreVectorPH = VectorPHVPBB->getSinglePredecessor();
2259 assert(PreVectorPH->getNumSuccessors() == 2 && "Expected 2 successors");
2260 assert(PreVectorPH->getSuccessors()[0] == ScalarPH && "Unexpected successor");
2261 VPIRBasicBlock *CheckVPIRBB = Plan.createVPIRBasicBlock(CheckIRBB);
2262 VPBlockUtils::insertOnEdge(PreVectorPH, VectorPHVPBB, CheckVPIRBB);
2263 PreVectorPH = CheckVPIRBB;
2264 VPBlockUtils::connectBlocks(PreVectorPH, ScalarPH);
2265 PreVectorPH->swapSuccessors();
2266
2267 // We just connected a new block to the scalar preheader. Update all
2268 // VPPhis by adding an incoming value for it, replicating the last value.
2269 unsigned NumPredecessors = ScalarPH->getNumPredecessors();
2270 for (VPRecipeBase &R : cast<VPBasicBlock>(ScalarPH)->phis()) {
2271 assert(isa<VPPhi>(&R) && "Phi expected to be VPPhi");
2272 assert(cast<VPPhi>(&R)->getNumIncoming() == NumPredecessors - 1 &&
2273 "must have incoming values for all operands");
2274 R.addOperand(R.getOperand(NumPredecessors - 2));
2275 }
2276}
2277
2279 BasicBlock *VectorPH, ElementCount VF, unsigned UF) const {
2280 // Generate code to check if the loop's trip count is less than VF * UF, or
2281 // equal to it in case a scalar epilogue is required; this implies that the
2282 // vector trip count is zero. This check also covers the case where adding one
2283 // to the backedge-taken count overflowed leading to an incorrect trip count
2284 // of zero. In this case we will also jump to the scalar loop.
2285 auto P = Cost->requiresScalarEpilogue(VF.isVector()) ? ICmpInst::ICMP_ULE
2287
2288 // Reuse existing vector loop preheader for TC checks.
2289 // Note that new preheader block is generated for vector loop.
2290 BasicBlock *const TCCheckBlock = VectorPH;
2292 TCCheckBlock->getContext(),
2293 InstSimplifyFolder(TCCheckBlock->getDataLayout()));
2294 Builder.SetInsertPoint(TCCheckBlock->getTerminator());
2295
2296 // If tail is to be folded, vector loop takes care of all iterations.
2298 Type *CountTy = Count->getType();
2299 Value *CheckMinIters = Builder.getFalse();
2300 auto CreateStep = [&]() -> Value * {
2301 // Create step with max(MinProTripCount, UF * VF).
2302 if (UF * VF.getKnownMinValue() >= MinProfitableTripCount.getKnownMinValue())
2303 return createStepForVF(Builder, CountTy, VF, UF);
2304
2305 Value *MinProfTC =
2306 Builder.CreateElementCount(CountTy, MinProfitableTripCount);
2307 if (!VF.isScalable())
2308 return MinProfTC;
2309 return Builder.CreateBinaryIntrinsic(
2310 Intrinsic::umax, MinProfTC, createStepForVF(Builder, CountTy, VF, UF));
2311 };
2312
2313 TailFoldingStyle Style = Cost->getTailFoldingStyle();
2314 if (Style == TailFoldingStyle::None) {
2315 Value *Step = CreateStep();
2316 ScalarEvolution &SE = *PSE.getSE();
2317 // TODO: Emit unconditional branch to vector preheader instead of
2318 // conditional branch with known condition.
2319 const SCEV *TripCountSCEV = SE.applyLoopGuards(SE.getSCEV(Count), OrigLoop);
2320 // Check if the trip count is < the step.
2321 if (SE.isKnownPredicate(P, TripCountSCEV, SE.getSCEV(Step))) {
2322 // TODO: Ensure step is at most the trip count when determining max VF and
2323 // UF, w/o tail folding.
2324 CheckMinIters = Builder.getTrue();
2326 TripCountSCEV, SE.getSCEV(Step))) {
2327 // Generate the minimum iteration check only if we cannot prove the
2328 // check is known to be true, or known to be false.
2329 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
2330 } // else step known to be < trip count, use CheckMinIters preset to false.
2331 } else if (VF.isScalable() && !TTI->isVScaleKnownToBeAPowerOfTwo() &&
2334 // vscale is not necessarily a power-of-2, which means we cannot guarantee
2335 // an overflow to zero when updating induction variables and so an
2336 // additional overflow check is required before entering the vector loop.
2337
2338 // Get the maximum unsigned value for the type.
2339 Value *MaxUIntTripCount =
2340 ConstantInt::get(CountTy, cast<IntegerType>(CountTy)->getMask());
2341 Value *LHS = Builder.CreateSub(MaxUIntTripCount, Count);
2342
2343 // Don't execute the vector loop if (UMax - n) < (VF * UF).
2344 CheckMinIters = Builder.CreateICmp(ICmpInst::ICMP_ULT, LHS, CreateStep());
2345 }
2346 return CheckMinIters;
2347}
2348
2349/// Replace \p VPBB with a VPIRBasicBlock wrapping \p IRBB. All recipes from \p
2350/// VPBB are moved to the end of the newly created VPIRBasicBlock. All
2351/// predecessors and successors of VPBB, if any, are rewired to the new
2352/// VPIRBasicBlock. If \p VPBB may be unreachable, \p Plan must be passed.
2354 BasicBlock *IRBB,
2355 VPlan *Plan = nullptr) {
2356 if (!Plan)
2357 Plan = VPBB->getPlan();
2358 VPIRBasicBlock *IRVPBB = Plan->createVPIRBasicBlock(IRBB);
2359 auto IP = IRVPBB->begin();
2360 for (auto &R : make_early_inc_range(VPBB->phis()))
2361 R.moveBefore(*IRVPBB, IP);
2362
2363 for (auto &R :
2365 R.moveBefore(*IRVPBB, IRVPBB->end());
2366
2367 VPBlockUtils::reassociateBlocks(VPBB, IRVPBB);
2368 // VPBB is now dead and will be cleaned up when the plan gets destroyed.
2369 return IRVPBB;
2370}
2371
2373 BasicBlock *VectorPH = OrigLoop->getLoopPreheader();
2374 assert(VectorPH && "Invalid loop structure");
2375 assert((OrigLoop->getUniqueLatchExitBlock() ||
2376 Cost->requiresScalarEpilogue(VF.isVector())) &&
2377 "loops not exiting via the latch without required epilogue?");
2378
2379 // NOTE: The Plan's scalar preheader VPBB isn't replaced with a VPIRBasicBlock
2380 // wrapping the newly created scalar preheader here at the moment, because the
2381 // Plan's scalar preheader may be unreachable at this point. Instead it is
2382 // replaced in executePlan.
2383 return SplitBlock(VectorPH, VectorPH->getTerminator(), DT, LI, nullptr,
2384 Twine(Prefix) + "scalar.ph");
2385}
2386
2387/// Return the expanded step for \p ID using \p ExpandedSCEVs to look up SCEV
2388/// expansion results.
2390 const SCEV2ValueTy &ExpandedSCEVs) {
2391 const SCEV *Step = ID.getStep();
2392 if (auto *C = dyn_cast<SCEVConstant>(Step))
2393 return C->getValue();
2394 if (auto *U = dyn_cast<SCEVUnknown>(Step))
2395 return U->getValue();
2396 Value *V = ExpandedSCEVs.lookup(Step);
2397 assert(V && "SCEV must be expanded at this point");
2398 return V;
2399}
2400
2401/// Knowing that loop \p L executes a single vector iteration, add instructions
2402/// that will get simplified and thus should not have any cost to \p
2403/// InstsToIgnore.
2406 SmallPtrSetImpl<Instruction *> &InstsToIgnore) {
2407 auto *Cmp = L->getLatchCmpInst();
2408 if (Cmp)
2409 InstsToIgnore.insert(Cmp);
2410 for (const auto &KV : IL) {
2411 // Extract the key by hand so that it can be used in the lambda below. Note
2412 // that captured structured bindings are a C++20 extension.
2413 const PHINode *IV = KV.first;
2414
2415 // Get next iteration value of the induction variable.
2416 Instruction *IVInst =
2417 cast<Instruction>(IV->getIncomingValueForBlock(L->getLoopLatch()));
2418 if (all_of(IVInst->users(),
2419 [&](const User *U) { return U == IV || U == Cmp; }))
2420 InstsToIgnore.insert(IVInst);
2421 }
2422}
2423
2425 // Create a new IR basic block for the scalar preheader.
2426 BasicBlock *ScalarPH = createScalarPreheader("");
2427 return ScalarPH->getSinglePredecessor();
2428}
2429
2430namespace {
2431
2432struct CSEDenseMapInfo {
2433 static bool canHandle(const Instruction *I) {
2436 }
2437
2438 static inline Instruction *getEmptyKey() {
2440 }
2441
2442 static inline Instruction *getTombstoneKey() {
2443 return DenseMapInfo<Instruction *>::getTombstoneKey();
2444 }
2445
2446 static unsigned getHashValue(const Instruction *I) {
2447 assert(canHandle(I) && "Unknown instruction!");
2448 return hash_combine(I->getOpcode(),
2449 hash_combine_range(I->operand_values()));
2450 }
2451
2452 static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
2453 if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
2454 LHS == getTombstoneKey() || RHS == getTombstoneKey())
2455 return LHS == RHS;
2456 return LHS->isIdenticalTo(RHS);
2457 }
2458};
2459
2460} // end anonymous namespace
2461
2462///Perform cse of induction variable instructions.
2463static void cse(BasicBlock *BB) {
2464 // Perform simple cse.
2466 for (Instruction &In : llvm::make_early_inc_range(*BB)) {
2467 if (!CSEDenseMapInfo::canHandle(&In))
2468 continue;
2469
2470 // Check if we can replace this instruction with any of the
2471 // visited instructions.
2472 if (Instruction *V = CSEMap.lookup(&In)) {
2473 In.replaceAllUsesWith(V);
2474 In.eraseFromParent();
2475 continue;
2476 }
2477
2478 CSEMap[&In] = &In;
2479 }
2480}
2481
2482/// This function attempts to return a value that represents the ElementCount
2483/// at runtime. For fixed-width VFs we know this precisely at compile
2484/// time, but for scalable VFs we calculate it based on an estimate of the
2485/// vscale value.
2487 std::optional<unsigned> VScale) {
2488 unsigned EstimatedVF = VF.getKnownMinValue();
2489 if (VF.isScalable())
2490 if (VScale)
2491 EstimatedVF *= *VScale;
2492 assert(EstimatedVF >= 1 && "Estimated VF shouldn't be less than 1");
2493 return EstimatedVF;
2494}
2495
2498 ElementCount VF) const {
2499 // We only need to calculate a cost if the VF is scalar; for actual vectors
2500 // we should already have a pre-calculated cost at each VF.
2501 if (!VF.isScalar())
2502 return getCallWideningDecision(CI, VF).Cost;
2503
2504 Type *RetTy = CI->getType();
2506 if (auto RedCost = getReductionPatternCost(CI, VF, RetTy))
2507 return *RedCost;
2508
2510 for (auto &ArgOp : CI->args())
2511 Tys.push_back(ArgOp->getType());
2512
2513 InstructionCost ScalarCallCost =
2514 TTI.getCallInstrCost(CI->getCalledFunction(), RetTy, Tys, CostKind);
2515
2516 // If this is an intrinsic we may have a lower cost for it.
2519 return std::min(ScalarCallCost, IntrinsicCost);
2520 }
2521 return ScalarCallCost;
2522}
2523
2525 if (VF.isScalar() || !canVectorizeTy(Ty))
2526 return Ty;
2527 return toVectorizedTy(Ty, VF);
2528}
2529
2532 ElementCount VF) const {
2534 assert(ID && "Expected intrinsic call!");
2535 Type *RetTy = maybeVectorizeType(CI->getType(), VF);
2536 FastMathFlags FMF;
2537 if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
2538 FMF = FPMO->getFastMathFlags();
2539
2542 SmallVector<Type *> ParamTys;
2543 std::transform(FTy->param_begin(), FTy->param_end(),
2544 std::back_inserter(ParamTys),
2545 [&](Type *Ty) { return maybeVectorizeType(Ty, VF); });
2546
2547 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
2550 return TTI.getIntrinsicInstrCost(CostAttrs, CostKind);
2551}
2552
2554 // Fix widened non-induction PHIs by setting up the PHI operands.
2555 fixNonInductionPHIs(State);
2556
2557 // Don't apply optimizations below when no (vector) loop remains, as they all
2558 // require one at the moment.
2559 VPBasicBlock *HeaderVPBB =
2560 vputils::getFirstLoopHeader(*State.Plan, State.VPDT);
2561 if (!HeaderVPBB)
2562 return;
2563
2564 BasicBlock *HeaderBB = State.CFG.VPBB2IRBB[HeaderVPBB];
2565
2566 // Remove redundant induction instructions.
2567 cse(HeaderBB);
2568}
2569
2571 auto Iter = vp_depth_first_shallow(Plan.getEntry());
2573 for (VPRecipeBase &P : VPBB->phis()) {
2575 if (!VPPhi)
2576 continue;
2577 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi));
2578 // Make sure the builder has a valid insert point.
2579 Builder.SetInsertPoint(NewPhi);
2580 for (const auto &[Inc, VPBB] : VPPhi->incoming_values_and_blocks())
2581 NewPhi->addIncoming(State.get(Inc), State.CFG.VPBB2IRBB[VPBB]);
2582 }
2583 }
2584}
2585
2586void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
2587 // We should not collect Scalars more than once per VF. Right now, this
2588 // function is called from collectUniformsAndScalars(), which already does
2589 // this check. Collecting Scalars for VF=1 does not make any sense.
2590 assert(VF.isVector() && !Scalars.contains(VF) &&
2591 "This function should not be visited twice for the same VF");
2592
2593 // This avoids any chances of creating a REPLICATE recipe during planning
2594 // since that would result in generation of scalarized code during execution,
2595 // which is not supported for scalable vectors.
2596 if (VF.isScalable()) {
2597 Scalars[VF].insert_range(Uniforms[VF]);
2598 return;
2599 }
2600
2602
2603 // These sets are used to seed the analysis with pointers used by memory
2604 // accesses that will remain scalar.
2606 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
2607 auto *Latch = TheLoop->getLoopLatch();
2608
2609 // A helper that returns true if the use of Ptr by MemAccess will be scalar.
2610 // The pointer operands of loads and stores will be scalar as long as the
2611 // memory access is not a gather or scatter operation. The value operand of a
2612 // store will remain scalar if the store is scalarized.
2613 auto IsScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
2614 InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
2615 assert(WideningDecision != CM_Unknown &&
2616 "Widening decision should be ready at this moment");
2617 if (auto *Store = dyn_cast<StoreInst>(MemAccess))
2618 if (Ptr == Store->getValueOperand())
2619 return WideningDecision == CM_Scalarize;
2620 assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
2621 "Ptr is neither a value or pointer operand");
2622 return WideningDecision != CM_GatherScatter;
2623 };
2624
2625 // A helper that returns true if the given value is a getelementptr
2626 // instruction contained in the loop.
2627 auto IsLoopVaryingGEP = [&](Value *V) {
2628 return isa<GetElementPtrInst>(V) && !TheLoop->isLoopInvariant(V);
2629 };
2630
2631 // A helper that evaluates a memory access's use of a pointer. If the use will
2632 // be a scalar use and the pointer is only used by memory accesses, we place
2633 // the pointer in ScalarPtrs. Otherwise, the pointer is placed in
2634 // PossibleNonScalarPtrs.
2635 auto EvaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
2636 // We only care about bitcast and getelementptr instructions contained in
2637 // the loop.
2638 if (!IsLoopVaryingGEP(Ptr))
2639 return;
2640
2641 // If the pointer has already been identified as scalar (e.g., if it was
2642 // also identified as uniform), there's nothing to do.
2643 auto *I = cast<Instruction>(Ptr);
2644 if (Worklist.count(I))
2645 return;
2646
2647 // If the use of the pointer will be a scalar use, and all users of the
2648 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
2649 // place the pointer in PossibleNonScalarPtrs.
2650 if (IsScalarUse(MemAccess, Ptr) &&
2652 ScalarPtrs.insert(I);
2653 else
2654 PossibleNonScalarPtrs.insert(I);
2655 };
2656
2657 // We seed the scalars analysis with three classes of instructions: (1)
2658 // instructions marked uniform-after-vectorization and (2) bitcast,
2659 // getelementptr and (pointer) phi instructions used by memory accesses
2660 // requiring a scalar use.
2661 //
2662 // (1) Add to the worklist all instructions that have been identified as
2663 // uniform-after-vectorization.
2664 Worklist.insert_range(Uniforms[VF]);
2665
2666 // (2) Add to the worklist all bitcast and getelementptr instructions used by
2667 // memory accesses requiring a scalar use. The pointer operands of loads and
2668 // stores will be scalar unless the operation is a gather or scatter.
2669 // The value operand of a store will remain scalar if the store is scalarized.
2670 for (auto *BB : TheLoop->blocks())
2671 for (auto &I : *BB) {
2672 if (auto *Load = dyn_cast<LoadInst>(&I)) {
2673 EvaluatePtrUse(Load, Load->getPointerOperand());
2674 } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
2675 EvaluatePtrUse(Store, Store->getPointerOperand());
2676 EvaluatePtrUse(Store, Store->getValueOperand());
2677 }
2678 }
2679 for (auto *I : ScalarPtrs)
2680 if (!PossibleNonScalarPtrs.count(I)) {
2681 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
2682 Worklist.insert(I);
2683 }
2684
2685 // Insert the forced scalars.
2686 // FIXME: Currently VPWidenPHIRecipe() often creates a dead vector
2687 // induction variable when the PHI user is scalarized.
2688 auto ForcedScalar = ForcedScalars.find(VF);
2689 if (ForcedScalar != ForcedScalars.end())
2690 for (auto *I : ForcedScalar->second) {
2691 LLVM_DEBUG(dbgs() << "LV: Found (forced) scalar instruction: " << *I << "\n");
2692 Worklist.insert(I);
2693 }
2694
2695 // Expand the worklist by looking through any bitcasts and getelementptr
2696 // instructions we've already identified as scalar. This is similar to the
2697 // expansion step in collectLoopUniforms(); however, here we're only
2698 // expanding to include additional bitcasts and getelementptr instructions.
2699 unsigned Idx = 0;
2700 while (Idx != Worklist.size()) {
2701 Instruction *Dst = Worklist[Idx++];
2702 if (!IsLoopVaryingGEP(Dst->getOperand(0)))
2703 continue;
2704 auto *Src = cast<Instruction>(Dst->getOperand(0));
2705 if (llvm::all_of(Src->users(), [&](User *U) -> bool {
2706 auto *J = cast<Instruction>(U);
2707 return !TheLoop->contains(J) || Worklist.count(J) ||
2708 ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
2709 IsScalarUse(J, Src));
2710 })) {
2711 Worklist.insert(Src);
2712 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
2713 }
2714 }
2715
2716 // An induction variable will remain scalar if all users of the induction
2717 // variable and induction variable update remain scalar.
2718 for (const auto &Induction : Legal->getInductionVars()) {
2719 auto *Ind = Induction.first;
2720 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
2721
2722 // If tail-folding is applied, the primary induction variable will be used
2723 // to feed a vector compare.
2724 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
2725 continue;
2726
2727 // Returns true if \p Indvar is a pointer induction that is used directly by
2728 // load/store instruction \p I.
2729 auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar,
2730 Instruction *I) {
2731 return Induction.second.getKind() ==
2734 Indvar == getLoadStorePointerOperand(I) && IsScalarUse(I, Indvar);
2735 };
2736
2737 // Determine if all users of the induction variable are scalar after
2738 // vectorization.
2739 bool ScalarInd = all_of(Ind->users(), [&](User *U) -> bool {
2740 auto *I = cast<Instruction>(U);
2741 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
2742 IsDirectLoadStoreFromPtrIndvar(Ind, I);
2743 });
2744 if (!ScalarInd)
2745 continue;
2746
2747 // If the induction variable update is a fixed-order recurrence, neither the
2748 // induction variable or its update should be marked scalar after
2749 // vectorization.
2750 auto *IndUpdatePhi = dyn_cast<PHINode>(IndUpdate);
2751 if (IndUpdatePhi && Legal->isFixedOrderRecurrence(IndUpdatePhi))
2752 continue;
2753
2754 // Determine if all users of the induction variable update instruction are
2755 // scalar after vectorization.
2756 bool ScalarIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool {
2757 auto *I = cast<Instruction>(U);
2758 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
2759 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I);
2760 });
2761 if (!ScalarIndUpdate)
2762 continue;
2763
2764 // The induction variable and its update instruction will remain scalar.
2765 Worklist.insert(Ind);
2766 Worklist.insert(IndUpdate);
2767 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
2768 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
2769 << "\n");
2770 }
2771
2772 Scalars[VF].insert_range(Worklist);
2773}
2774
2776 Instruction *I, ElementCount VF) const {
2777 if (!isPredicatedInst(I))
2778 return false;
2779
2780 // Do we have a non-scalar lowering for this predicated
2781 // instruction? No - it is scalar with predication.
2782 switch(I->getOpcode()) {
2783 default:
2784 return true;
2785 case Instruction::Call:
2786 if (VF.isScalar())
2787 return true;
2789 case Instruction::Load:
2790 case Instruction::Store: {
2792 auto *Ty = getLoadStoreType(I);
2793 unsigned AS = getLoadStoreAddressSpace(I);
2794 Type *VTy = Ty;
2795 if (VF.isVector())
2796 VTy = VectorType::get(Ty, VF);
2797 const Align Alignment = getLoadStoreAlignment(I);
2798 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment, AS) ||
2799 TTI.isLegalMaskedGather(VTy, Alignment))
2800 : !(isLegalMaskedStore(Ty, Ptr, Alignment, AS) ||
2801 TTI.isLegalMaskedScatter(VTy, Alignment));
2802 }
2803 case Instruction::UDiv:
2804 case Instruction::SDiv:
2805 case Instruction::SRem:
2806 case Instruction::URem: {
2807 // We have the option to use the safe-divisor idiom to avoid predication.
2808 // The cost based decision here will always select safe-divisor for
2809 // scalable vectors as scalarization isn't legal.
2810 const auto [ScalarCost, SafeDivisorCost] = getDivRemSpeculationCost(I, VF);
2811 return isDivRemScalarWithPredication(ScalarCost, SafeDivisorCost);
2812 }
2813 }
2814}
2815
2816// TODO: Fold into LoopVectorizationLegality::isMaskRequired.
2818 // TODO: We can use the loop-preheader as context point here and get
2819 // context sensitive reasoning for isSafeToSpeculativelyExecute.
2821 (isa<LoadInst, StoreInst, CallInst>(I) && !Legal->isMaskRequired(I)) ||
2823 return false;
2824
2825 // If the instruction was executed conditionally in the original scalar loop,
2826 // predication is needed with a mask whose lanes are all possibly inactive.
2827 if (Legal->blockNeedsPredication(I->getParent()))
2828 return true;
2829
2830 // If we're not folding the tail by masking, predication is unnecessary.
2831 if (!foldTailByMasking())
2832 return false;
2833
2834 // All that remain are instructions with side-effects originally executed in
2835 // the loop unconditionally, but now execute under a tail-fold mask (only)
2836 // having at least one active lane (the first). If the side-effects of the
2837 // instruction are invariant, executing it w/o (the tail-folding) mask is safe
2838 // - it will cause the same side-effects as when masked.
2839 switch(I->getOpcode()) {
2840 default:
2842 "instruction should have been considered by earlier checks");
2843 case Instruction::Call:
2844 // Side-effects of a Call are assumed to be non-invariant, needing a
2845 // (fold-tail) mask.
2846 assert(Legal->isMaskRequired(I) &&
2847 "should have returned earlier for calls not needing a mask");
2848 return true;
2849 case Instruction::Load:
2850 // If the address is loop invariant no predication is needed.
2851 return !Legal->isInvariant(getLoadStorePointerOperand(I));
2852 case Instruction::Store: {
2853 // For stores, we need to prove both speculation safety (which follows from
2854 // the same argument as loads), but also must prove the value being stored
2855 // is correct. The easiest form of the later is to require that all values
2856 // stored are the same.
2857 return !(Legal->isInvariant(getLoadStorePointerOperand(I)) &&
2858 TheLoop->isLoopInvariant(cast<StoreInst>(I)->getValueOperand()));
2859 }
2860 case Instruction::UDiv:
2861 case Instruction::SDiv:
2862 case Instruction::SRem:
2863 case Instruction::URem:
2864 // If the divisor is loop-invariant no predication is needed.
2865 return !Legal->isInvariant(I->getOperand(1));
2866 }
2867}
2868
2869std::pair<InstructionCost, InstructionCost>
2871 ElementCount VF) const {
2872 assert(I->getOpcode() == Instruction::UDiv ||
2873 I->getOpcode() == Instruction::SDiv ||
2874 I->getOpcode() == Instruction::SRem ||
2875 I->getOpcode() == Instruction::URem);
2877
2878 // Scalarization isn't legal for scalable vector types
2879 InstructionCost ScalarizationCost = InstructionCost::getInvalid();
2880 if (!VF.isScalable()) {
2881 // Get the scalarization cost and scale this amount by the probability of
2882 // executing the predicated block. If the instruction is not predicated,
2883 // we fall through to the next case.
2884 ScalarizationCost = 0;
2885
2886 // These instructions have a non-void type, so account for the phi nodes
2887 // that we will create. This cost is likely to be zero. The phi node
2888 // cost, if any, should be scaled by the block probability because it
2889 // models a copy at the end of each predicated block.
2890 ScalarizationCost +=
2891 VF.getFixedValue() * TTI.getCFInstrCost(Instruction::PHI, CostKind);
2892
2893 // The cost of the non-predicated instruction.
2894 ScalarizationCost +=
2895 VF.getFixedValue() *
2896 TTI.getArithmeticInstrCost(I->getOpcode(), I->getType(), CostKind);
2897
2898 // The cost of insertelement and extractelement instructions needed for
2899 // scalarization.
2900 ScalarizationCost += getScalarizationOverhead(I, VF);
2901
2902 // Scale the cost by the probability of executing the predicated blocks.
2903 // This assumes the predicated block for each vector lane is equally
2904 // likely.
2905 ScalarizationCost = ScalarizationCost / getPredBlockCostDivisor(CostKind);
2906 }
2907 InstructionCost SafeDivisorCost = 0;
2908
2909 auto *VecTy = toVectorTy(I->getType(), VF);
2910
2911 // The cost of the select guard to ensure all lanes are well defined
2912 // after we speculate above any internal control flow.
2913 SafeDivisorCost +=
2914 TTI.getCmpSelInstrCost(Instruction::Select, VecTy,
2915 toVectorTy(Type::getInt1Ty(I->getContext()), VF),
2917
2918 SmallVector<const Value *, 4> Operands(I->operand_values());
2919 SafeDivisorCost += TTI.getArithmeticInstrCost(
2920 I->getOpcode(), VecTy, CostKind,
2921 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
2922 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
2923 Operands, I);
2924 return {ScalarizationCost, SafeDivisorCost};
2925}
2926
2928 Instruction *I, ElementCount VF) const {
2929 assert(isAccessInterleaved(I) && "Expecting interleaved access.");
2931 "Decision should not be set yet.");
2932 auto *Group = getInterleavedAccessGroup(I);
2933 assert(Group && "Must have a group.");
2934 unsigned InterleaveFactor = Group->getFactor();
2935
2936 // If the instruction's allocated size doesn't equal its type size, it
2937 // requires padding and will be scalarized.
2938 auto &DL = I->getDataLayout();
2939 auto *ScalarTy = getLoadStoreType(I);
2940 if (hasIrregularType(ScalarTy, DL))
2941 return false;
2942
2943 // For scalable vectors, the interleave factors must be <= 8 since we require
2944 // the (de)interleaveN intrinsics instead of shufflevectors.
2945 if (VF.isScalable() && InterleaveFactor > 8)
2946 return false;
2947
2948 // If the group involves a non-integral pointer, we may not be able to
2949 // losslessly cast all values to a common type.
2950 bool ScalarNI = DL.isNonIntegralPointerType(ScalarTy);
2951 for (unsigned Idx = 0; Idx < InterleaveFactor; Idx++) {
2952 Instruction *Member = Group->getMember(Idx);
2953 if (!Member)
2954 continue;
2955 auto *MemberTy = getLoadStoreType(Member);
2956 bool MemberNI = DL.isNonIntegralPointerType(MemberTy);
2957 // Don't coerce non-integral pointers to integers or vice versa.
2958 if (MemberNI != ScalarNI)
2959 // TODO: Consider adding special nullptr value case here
2960 return false;
2961 if (MemberNI && ScalarNI &&
2962 ScalarTy->getPointerAddressSpace() !=
2963 MemberTy->getPointerAddressSpace())
2964 return false;
2965 }
2966
2967 // Check if masking is required.
2968 // A Group may need masking for one of two reasons: it resides in a block that
2969 // needs predication, or it was decided to use masking to deal with gaps
2970 // (either a gap at the end of a load-access that may result in a speculative
2971 // load, or any gaps in a store-access).
2972 bool PredicatedAccessRequiresMasking =
2973 blockNeedsPredicationForAnyReason(I->getParent()) &&
2974 Legal->isMaskRequired(I);
2975 bool LoadAccessWithGapsRequiresEpilogMasking =
2976 isa<LoadInst>(I) && Group->requiresScalarEpilogue() &&
2978 bool StoreAccessWithGapsRequiresMasking =
2979 isa<StoreInst>(I) && !Group->isFull();
2980 if (!PredicatedAccessRequiresMasking &&
2981 !LoadAccessWithGapsRequiresEpilogMasking &&
2982 !StoreAccessWithGapsRequiresMasking)
2983 return true;
2984
2985 // If masked interleaving is required, we expect that the user/target had
2986 // enabled it, because otherwise it either wouldn't have been created or
2987 // it should have been invalidated by the CostModel.
2989 "Masked interleave-groups for predicated accesses are not enabled.");
2990
2991 if (Group->isReverse())
2992 return false;
2993
2994 // TODO: Support interleaved access that requires a gap mask for scalable VFs.
2995 bool NeedsMaskForGaps = LoadAccessWithGapsRequiresEpilogMasking ||
2996 StoreAccessWithGapsRequiresMasking;
2997 if (VF.isScalable() && NeedsMaskForGaps)
2998 return false;
2999
3000 auto *Ty = getLoadStoreType(I);
3001 const Align Alignment = getLoadStoreAlignment(I);
3002 unsigned AS = getLoadStoreAddressSpace(I);
3003 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment, AS)
3004 : TTI.isLegalMaskedStore(Ty, Alignment, AS);
3005}
3006
3008 Instruction *I, ElementCount VF) {
3009 // Get and ensure we have a valid memory instruction.
3010 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction");
3011
3013 auto *ScalarTy = getLoadStoreType(I);
3014
3015 // In order to be widened, the pointer should be consecutive, first of all.
3016 if (!Legal->isConsecutivePtr(ScalarTy, Ptr))
3017 return false;
3018
3019 // If the instruction is a store located in a predicated block, it will be
3020 // scalarized.
3021 if (isScalarWithPredication(I, VF))
3022 return false;
3023
3024 // If the instruction's allocated size doesn't equal it's type size, it
3025 // requires padding and will be scalarized.
3026 auto &DL = I->getDataLayout();
3027 if (hasIrregularType(ScalarTy, DL))
3028 return false;
3029
3030 return true;
3031}
3032
3033void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
3034 // We should not collect Uniforms more than once per VF. Right now,
3035 // this function is called from collectUniformsAndScalars(), which
3036 // already does this check. Collecting Uniforms for VF=1 does not make any
3037 // sense.
3038
3039 assert(VF.isVector() && !Uniforms.contains(VF) &&
3040 "This function should not be visited twice for the same VF");
3041
3042 // Visit the list of Uniforms. If we find no uniform value, we won't
3043 // analyze again. Uniforms.count(VF) will return 1.
3044 Uniforms[VF].clear();
3045
3046 // Now we know that the loop is vectorizable!
3047 // Collect instructions inside the loop that will remain uniform after
3048 // vectorization.
3049
3050 // Global values, params and instructions outside of current loop are out of
3051 // scope.
3052 auto IsOutOfScope = [&](Value *V) -> bool {
3054 return (!I || !TheLoop->contains(I));
3055 };
3056
3057 // Worklist containing uniform instructions demanding lane 0.
3058 SetVector<Instruction *> Worklist;
3059
3060 // Add uniform instructions demanding lane 0 to the worklist. Instructions
3061 // that require predication must not be considered uniform after
3062 // vectorization, because that would create an erroneous replicating region
3063 // where only a single instance out of VF should be formed.
3064 auto AddToWorklistIfAllowed = [&](Instruction *I) -> void {
3065 if (IsOutOfScope(I)) {
3066 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
3067 << *I << "\n");
3068 return;
3069 }
3070 if (isPredicatedInst(I)) {
3071 LLVM_DEBUG(
3072 dbgs() << "LV: Found not uniform due to requiring predication: " << *I
3073 << "\n");
3074 return;
3075 }
3076 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
3077 Worklist.insert(I);
3078 };
3079
3080 // Start with the conditional branches exiting the loop. If the branch
3081 // condition is an instruction contained in the loop that is only used by the
3082 // branch, it is uniform. Note conditions from uncountable early exits are not
3083 // uniform.
3085 TheLoop->getExitingBlocks(Exiting);
3086 for (BasicBlock *E : Exiting) {
3087 if (Legal->hasUncountableEarlyExit() && TheLoop->getLoopLatch() != E)
3088 continue;
3089 auto *Cmp = dyn_cast<Instruction>(E->getTerminator()->getOperand(0));
3090 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
3091 AddToWorklistIfAllowed(Cmp);
3092 }
3093
3094 auto PrevVF = VF.divideCoefficientBy(2);
3095 // Return true if all lanes perform the same memory operation, and we can
3096 // thus choose to execute only one.
3097 auto IsUniformMemOpUse = [&](Instruction *I) {
3098 // If the value was already known to not be uniform for the previous
3099 // (smaller VF), it cannot be uniform for the larger VF.
3100 if (PrevVF.isVector()) {
3101 auto Iter = Uniforms.find(PrevVF);
3102 if (Iter != Uniforms.end() && !Iter->second.contains(I))
3103 return false;
3104 }
3105 if (!Legal->isUniformMemOp(*I, VF))
3106 return false;
3107 if (isa<LoadInst>(I))
3108 // Loading the same address always produces the same result - at least
3109 // assuming aliasing and ordering which have already been checked.
3110 return true;
3111 // Storing the same value on every iteration.
3112 return TheLoop->isLoopInvariant(cast<StoreInst>(I)->getValueOperand());
3113 };
3114
3115 auto IsUniformDecision = [&](Instruction *I, ElementCount VF) {
3116 InstWidening WideningDecision = getWideningDecision(I, VF);
3117 assert(WideningDecision != CM_Unknown &&
3118 "Widening decision should be ready at this moment");
3119
3120 if (IsUniformMemOpUse(I))
3121 return true;
3122
3123 return (WideningDecision == CM_Widen ||
3124 WideningDecision == CM_Widen_Reverse ||
3125 WideningDecision == CM_Interleave);
3126 };
3127
3128 // Returns true if Ptr is the pointer operand of a memory access instruction
3129 // I, I is known to not require scalarization, and the pointer is not also
3130 // stored.
3131 auto IsVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
3132 if (isa<StoreInst>(I) && I->getOperand(0) == Ptr)
3133 return false;
3134 return getLoadStorePointerOperand(I) == Ptr &&
3135 (IsUniformDecision(I, VF) || Legal->isInvariant(Ptr));
3136 };
3137
3138 // Holds a list of values which are known to have at least one uniform use.
3139 // Note that there may be other uses which aren't uniform. A "uniform use"
3140 // here is something which only demands lane 0 of the unrolled iterations;
3141 // it does not imply that all lanes produce the same value (e.g. this is not
3142 // the usual meaning of uniform)
3143 SetVector<Value *> HasUniformUse;
3144
3145 // Scan the loop for instructions which are either a) known to have only
3146 // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
3147 for (auto *BB : TheLoop->blocks())
3148 for (auto &I : *BB) {
3149 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) {
3150 switch (II->getIntrinsicID()) {
3151 case Intrinsic::sideeffect:
3152 case Intrinsic::experimental_noalias_scope_decl:
3153 case Intrinsic::assume:
3154 case Intrinsic::lifetime_start:
3155 case Intrinsic::lifetime_end:
3156 if (TheLoop->hasLoopInvariantOperands(&I))
3157 AddToWorklistIfAllowed(&I);
3158 break;
3159 default:
3160 break;
3161 }
3162 }
3163
3164 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) {
3165 if (IsOutOfScope(EVI->getAggregateOperand())) {
3166 AddToWorklistIfAllowed(EVI);
3167 continue;
3168 }
3169 // Only ExtractValue instructions where the aggregate value comes from a
3170 // call are allowed to be non-uniform.
3171 assert(isa<CallInst>(EVI->getAggregateOperand()) &&
3172 "Expected aggregate value to be call return value");
3173 }
3174
3175 // If there's no pointer operand, there's nothing to do.
3177 if (!Ptr)
3178 continue;
3179
3180 // If the pointer can be proven to be uniform, always add it to the
3181 // worklist.
3182 if (isa<Instruction>(Ptr) && Legal->isUniform(Ptr, VF))
3183 AddToWorklistIfAllowed(cast<Instruction>(Ptr));
3184
3185 if (IsUniformMemOpUse(&I))
3186 AddToWorklistIfAllowed(&I);
3187
3188 if (IsVectorizedMemAccessUse(&I, Ptr))
3189 HasUniformUse.insert(Ptr);
3190 }
3191
3192 // Add to the worklist any operands which have *only* uniform (e.g. lane 0
3193 // demanding) users. Since loops are assumed to be in LCSSA form, this
3194 // disallows uses outside the loop as well.
3195 for (auto *V : HasUniformUse) {
3196 if (IsOutOfScope(V))
3197 continue;
3198 auto *I = cast<Instruction>(V);
3199 bool UsersAreMemAccesses = all_of(I->users(), [&](User *U) -> bool {
3200 auto *UI = cast<Instruction>(U);
3201 return TheLoop->contains(UI) && IsVectorizedMemAccessUse(UI, V);
3202 });
3203 if (UsersAreMemAccesses)
3204 AddToWorklistIfAllowed(I);
3205 }
3206
3207 // Expand Worklist in topological order: whenever a new instruction
3208 // is added , its users should be already inside Worklist. It ensures
3209 // a uniform instruction will only be used by uniform instructions.
3210 unsigned Idx = 0;
3211 while (Idx != Worklist.size()) {
3212 Instruction *I = Worklist[Idx++];
3213
3214 for (auto *OV : I->operand_values()) {
3215 // isOutOfScope operands cannot be uniform instructions.
3216 if (IsOutOfScope(OV))
3217 continue;
3218 // First order recurrence Phi's should typically be considered
3219 // non-uniform.
3220 auto *OP = dyn_cast<PHINode>(OV);
3221 if (OP && Legal->isFixedOrderRecurrence(OP))
3222 continue;
3223 // If all the users of the operand are uniform, then add the
3224 // operand into the uniform worklist.
3225 auto *OI = cast<Instruction>(OV);
3226 if (llvm::all_of(OI->users(), [&](User *U) -> bool {
3227 auto *J = cast<Instruction>(U);
3228 return Worklist.count(J) || IsVectorizedMemAccessUse(J, OI);
3229 }))
3230 AddToWorklistIfAllowed(OI);
3231 }
3232 }
3233
3234 // For an instruction to be added into Worklist above, all its users inside
3235 // the loop should also be in Worklist. However, this condition cannot be
3236 // true for phi nodes that form a cyclic dependence. We must process phi
3237 // nodes separately. An induction variable will remain uniform if all users
3238 // of the induction variable and induction variable update remain uniform.
3239 // The code below handles both pointer and non-pointer induction variables.
3240 BasicBlock *Latch = TheLoop->getLoopLatch();
3241 for (const auto &Induction : Legal->getInductionVars()) {
3242 auto *Ind = Induction.first;
3243 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
3244
3245 // Determine if all users of the induction variable are uniform after
3246 // vectorization.
3247 bool UniformInd = all_of(Ind->users(), [&](User *U) -> bool {
3248 auto *I = cast<Instruction>(U);
3249 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
3250 IsVectorizedMemAccessUse(I, Ind);
3251 });
3252 if (!UniformInd)
3253 continue;
3254
3255 // Determine if all users of the induction variable update instruction are
3256 // uniform after vectorization.
3257 bool UniformIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool {
3258 auto *I = cast<Instruction>(U);
3259 return I == Ind || Worklist.count(I) ||
3260 IsVectorizedMemAccessUse(I, IndUpdate);
3261 });
3262 if (!UniformIndUpdate)
3263 continue;
3264
3265 // The induction variable and its update instruction will remain uniform.
3266 AddToWorklistIfAllowed(Ind);
3267 AddToWorklistIfAllowed(IndUpdate);
3268 }
3269
3270 Uniforms[VF].insert_range(Worklist);
3271}
3272
3274 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
3275
3276 if (Legal->getRuntimePointerChecking()->Need) {
3277 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
3278 "runtime pointer checks needed. Enable vectorization of this "
3279 "loop with '#pragma clang loop vectorize(enable)' when "
3280 "compiling with -Os/-Oz",
3281 "CantVersionLoopWithOptForSize", ORE, TheLoop);
3282 return true;
3283 }
3284
3285 if (!PSE.getPredicate().isAlwaysTrue()) {
3286 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
3287 "runtime SCEV checks needed. Enable vectorization of this "
3288 "loop with '#pragma clang loop vectorize(enable)' when "
3289 "compiling with -Os/-Oz",
3290 "CantVersionLoopWithOptForSize", ORE, TheLoop);
3291 return true;
3292 }
3293
3294 // FIXME: Avoid specializing for stride==1 instead of bailing out.
3295 if (!Legal->getLAI()->getSymbolicStrides().empty()) {
3296 reportVectorizationFailure("Runtime stride check for small trip count",
3297 "runtime stride == 1 checks needed. Enable vectorization of "
3298 "this loop without such check by compiling with -Os/-Oz",
3299 "CantVersionLoopWithOptForSize", ORE, TheLoop);
3300 return true;
3301 }
3302
3303 return false;
3304}
3305
3306bool LoopVectorizationCostModel::isScalableVectorizationAllowed() {
3307 if (IsScalableVectorizationAllowed)
3308 return *IsScalableVectorizationAllowed;
3309
3310 IsScalableVectorizationAllowed = false;
3311 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors)
3312 return false;
3313
3315 reportVectorizationInfo("Scalable vectorization is explicitly disabled",
3316 "ScalableVectorizationDisabled", ORE, TheLoop);
3317 return false;
3318 }
3319
3320 LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n");
3321
3322 auto MaxScalableVF = ElementCount::getScalable(
3323 std::numeric_limits<ElementCount::ScalarTy>::max());
3324
3325 // Test that the loop-vectorizer can legalize all operations for this MaxVF.
3326 // FIXME: While for scalable vectors this is currently sufficient, this should
3327 // be replaced by a more detailed mechanism that filters out specific VFs,
3328 // instead of invalidating vectorization for a whole set of VFs based on the
3329 // MaxVF.
3330
3331 // Disable scalable vectorization if the loop contains unsupported reductions.
3332 if (!canVectorizeReductions(MaxScalableVF)) {
3334 "Scalable vectorization not supported for the reduction "
3335 "operations found in this loop.",
3336 "ScalableVFUnfeasible", ORE, TheLoop);
3337 return false;
3338 }
3339
3340 // Disable scalable vectorization if the loop contains any instructions
3341 // with element types not supported for scalable vectors.
3342 if (any_of(ElementTypesInLoop, [&](Type *Ty) {
3343 return !Ty->isVoidTy() &&
3344 !this->TTI.isElementTypeLegalForScalableVector(Ty);
3345 })) {
3346 reportVectorizationInfo("Scalable vectorization is not supported "
3347 "for all element types found in this loop.",
3348 "ScalableVFUnfeasible", ORE, TheLoop);
3349 return false;
3350 }
3351
3352 if (!Legal->isSafeForAnyVectorWidth() && !getMaxVScale(*TheFunction, TTI)) {
3353 reportVectorizationInfo("The target does not provide maximum vscale value "
3354 "for safe distance analysis.",
3355 "ScalableVFUnfeasible", ORE, TheLoop);
3356 return false;
3357 }
3358
3359 IsScalableVectorizationAllowed = true;
3360 return true;
3361}
3362
3364LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
3365 if (!isScalableVectorizationAllowed())
3366 return ElementCount::getScalable(0);
3367
3368 auto MaxScalableVF = ElementCount::getScalable(
3369 std::numeric_limits<ElementCount::ScalarTy>::max());
3370 if (Legal->isSafeForAnyVectorWidth())
3371 return MaxScalableVF;
3372
3373 std::optional<unsigned> MaxVScale = getMaxVScale(*TheFunction, TTI);
3374 // Limit MaxScalableVF by the maximum safe dependence distance.
3375 MaxScalableVF = ElementCount::getScalable(MaxSafeElements / *MaxVScale);
3376
3377 if (!MaxScalableVF)
3379 "Max legal vector width too small, scalable vectorization "
3380 "unfeasible.",
3381 "ScalableVFUnfeasible", ORE, TheLoop);
3382
3383 return MaxScalableVF;
3384}
3385
3386FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF(
3387 unsigned MaxTripCount, ElementCount UserVF, bool FoldTailByMasking) {
3388 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
3389 unsigned SmallestType, WidestType;
3390 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
3391
3392 // Get the maximum safe dependence distance in bits computed by LAA.
3393 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
3394 // the memory accesses that is most restrictive (involved in the smallest
3395 // dependence distance).
3396 unsigned MaxSafeElementsPowerOf2 =
3397 bit_floor(Legal->getMaxSafeVectorWidthInBits() / WidestType);
3398 if (!Legal->isSafeForAnyStoreLoadForwardDistances()) {
3399 unsigned SLDist = Legal->getMaxStoreLoadForwardSafeDistanceInBits();
3400 MaxSafeElementsPowerOf2 =
3401 std::min(MaxSafeElementsPowerOf2, SLDist / WidestType);
3402 }
3403 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElementsPowerOf2);
3404 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElementsPowerOf2);
3405
3406 if (!Legal->isSafeForAnyVectorWidth())
3407 this->MaxSafeElements = MaxSafeElementsPowerOf2;
3408
3409 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF
3410 << ".\n");
3411 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF
3412 << ".\n");
3413
3414 // First analyze the UserVF, fall back if the UserVF should be ignored.
3415 if (UserVF) {
3416 auto MaxSafeUserVF =
3417 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
3418
3419 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) {
3420 // If `VF=vscale x N` is safe, then so is `VF=N`
3421 if (UserVF.isScalable())
3422 return FixedScalableVFPair(
3423 ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF);
3424
3425 return UserVF;
3426 }
3427
3428 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF));
3429
3430 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it
3431 // is better to ignore the hint and let the compiler choose a suitable VF.
3432 if (!UserVF.isScalable()) {
3433 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
3434 << " is unsafe, clamping to max safe VF="
3435 << MaxSafeFixedVF << ".\n");
3436 ORE->emit([&]() {
3437 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
3438 TheLoop->getStartLoc(),
3439 TheLoop->getHeader())
3440 << "User-specified vectorization factor "
3441 << ore::NV("UserVectorizationFactor", UserVF)
3442 << " is unsafe, clamping to maximum safe vectorization factor "
3443 << ore::NV("VectorizationFactor", MaxSafeFixedVF);
3444 });
3445 return MaxSafeFixedVF;
3446 }
3447
3448 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) {
3449 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
3450 << " is ignored because scalable vectors are not "
3451 "available.\n");
3452 ORE->emit([&]() {
3453 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
3454 TheLoop->getStartLoc(),
3455 TheLoop->getHeader())
3456 << "User-specified vectorization factor "
3457 << ore::NV("UserVectorizationFactor", UserVF)
3458 << " is ignored because the target does not support scalable "
3459 "vectors. The compiler will pick a more suitable value.";
3460 });
3461 } else {
3462 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
3463 << " is unsafe. Ignoring scalable UserVF.\n");
3464 ORE->emit([&]() {
3465 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
3466 TheLoop->getStartLoc(),
3467 TheLoop->getHeader())
3468 << "User-specified vectorization factor "
3469 << ore::NV("UserVectorizationFactor", UserVF)
3470 << " is unsafe. Ignoring the hint to let the compiler pick a "
3471 "more suitable value.";
3472 });
3473 }
3474 }
3475
3476 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
3477 << " / " << WidestType << " bits.\n");
3478
3479 FixedScalableVFPair Result(ElementCount::getFixed(1),
3481 if (auto MaxVF =
3482 getMaximizedVFForTarget(MaxTripCount, SmallestType, WidestType,
3483 MaxSafeFixedVF, FoldTailByMasking))
3484 Result.FixedVF = MaxVF;
3485
3486 if (auto MaxVF =
3487 getMaximizedVFForTarget(MaxTripCount, SmallestType, WidestType,
3488 MaxSafeScalableVF, FoldTailByMasking))
3489 if (MaxVF.isScalable()) {
3490 Result.ScalableVF = MaxVF;
3491 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF
3492 << "\n");
3493 }
3494
3495 return Result;
3496}
3497
3500 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
3501 // TODO: It may be useful to do since it's still likely to be dynamically
3502 // uniform if the target can skip.
3504 "Not inserting runtime ptr check for divergent target",
3505 "runtime pointer checks needed. Not enabled for divergent target",
3506 "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
3508 }
3509
3510 ScalarEvolution *SE = PSE.getSE();
3512 unsigned MaxTC = PSE.getSmallConstantMaxTripCount();
3513 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
3514 if (TC != ElementCount::getFixed(MaxTC))
3515 LLVM_DEBUG(dbgs() << "LV: Found maximum trip count: " << MaxTC << '\n');
3516 if (TC.isScalar()) {
3517 reportVectorizationFailure("Single iteration (non) loop",
3518 "loop trip count is one, irrelevant for vectorization",
3519 "SingleIterationLoop", ORE, TheLoop);
3521 }
3522
3523 // If BTC matches the widest induction type and is -1 then the trip count
3524 // computation will wrap to 0 and the vector trip count will be 0. Do not try
3525 // to vectorize.
3526 const SCEV *BTC = SE->getBackedgeTakenCount(TheLoop);
3527 if (!isa<SCEVCouldNotCompute>(BTC) &&
3528 BTC->getType()->getScalarSizeInBits() >=
3529 Legal->getWidestInductionType()->getScalarSizeInBits() &&
3531 SE->getMinusOne(BTC->getType()))) {
3533 "Trip count computation wrapped",
3534 "backedge-taken count is -1, loop trip count wrapped to 0",
3535 "TripCountWrapped", ORE, TheLoop);
3537 }
3538
3539 switch (ScalarEpilogueStatus) {
3541 return computeFeasibleMaxVF(MaxTC, UserVF, false);
3543 [[fallthrough]];
3545 LLVM_DEBUG(
3546 dbgs() << "LV: vector predicate hint/switch found.\n"
3547 << "LV: Not allowing scalar epilogue, creating predicated "
3548 << "vector loop.\n");
3549 break;
3551 // fallthrough as a special case of OptForSize
3553 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
3554 LLVM_DEBUG(
3555 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
3556 else
3557 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
3558 << "count.\n");
3559
3560 // Bail if runtime checks are required, which are not good when optimising
3561 // for size.
3564
3565 break;
3566 }
3567
3568 // Now try the tail folding
3569
3570 // Invalidate interleave groups that require an epilogue if we can't mask
3571 // the interleave-group.
3573 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
3574 "No decisions should have been taken at this point");
3575 // Note: There is no need to invalidate any cost modeling decisions here, as
3576 // none were taken so far.
3577 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
3578 }
3579
3580 FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(MaxTC, UserVF, true);
3581
3582 // Avoid tail folding if the trip count is known to be a multiple of any VF
3583 // we choose.
3584 std::optional<unsigned> MaxPowerOf2RuntimeVF =
3585 MaxFactors.FixedVF.getFixedValue();
3586 if (MaxFactors.ScalableVF) {
3587 std::optional<unsigned> MaxVScale = getMaxVScale(*TheFunction, TTI);
3588 if (MaxVScale && TTI.isVScaleKnownToBeAPowerOfTwo()) {
3589 MaxPowerOf2RuntimeVF = std::max<unsigned>(
3590 *MaxPowerOf2RuntimeVF,
3591 *MaxVScale * MaxFactors.ScalableVF.getKnownMinValue());
3592 } else
3593 MaxPowerOf2RuntimeVF = std::nullopt; // Stick with tail-folding for now.
3594 }
3595
3596 auto NoScalarEpilogueNeeded = [this, &UserIC](unsigned MaxVF) {
3597 // Return false if the loop is neither a single-latch-exit loop nor an
3598 // early-exit loop as tail-folding is not supported in that case.
3599 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch() &&
3600 !Legal->hasUncountableEarlyExit())
3601 return false;
3602 unsigned MaxVFtimesIC = UserIC ? MaxVF * UserIC : MaxVF;
3603 ScalarEvolution *SE = PSE.getSE();
3604 // Calling getSymbolicMaxBackedgeTakenCount enables support for loops
3605 // with uncountable exits. For countable loops, the symbolic maximum must
3606 // remain identical to the known back-edge taken count.
3607 const SCEV *BackedgeTakenCount = PSE.getSymbolicMaxBackedgeTakenCount();
3608 assert((Legal->hasUncountableEarlyExit() ||
3609 BackedgeTakenCount == PSE.getBackedgeTakenCount()) &&
3610 "Invalid loop count");
3611 const SCEV *ExitCount = SE->getAddExpr(
3612 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3613 const SCEV *Rem = SE->getURemExpr(
3614 SE->applyLoopGuards(ExitCount, TheLoop),
3615 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
3616 return Rem->isZero();
3617 };
3618
3619 if (MaxPowerOf2RuntimeVF > 0u) {
3620 assert((UserVF.isNonZero() || isPowerOf2_32(*MaxPowerOf2RuntimeVF)) &&
3621 "MaxFixedVF must be a power of 2");
3622 if (NoScalarEpilogueNeeded(*MaxPowerOf2RuntimeVF)) {
3623 // Accept MaxFixedVF if we do not have a tail.
3624 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
3625 return MaxFactors;
3626 }
3627 }
3628
3629 auto ExpectedTC = getSmallBestKnownTC(PSE, TheLoop);
3630 if (ExpectedTC && ExpectedTC->isFixed() &&
3631 ExpectedTC->getFixedValue() <=
3632 TTI.getMinTripCountTailFoldingThreshold()) {
3633 if (MaxPowerOf2RuntimeVF > 0u) {
3634 // If we have a low-trip-count, and the fixed-width VF is known to divide
3635 // the trip count but the scalable factor does not, use the fixed-width
3636 // factor in preference to allow the generation of a non-predicated loop.
3637 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedLowTripLoop &&
3638 NoScalarEpilogueNeeded(MaxFactors.FixedVF.getFixedValue())) {
3639 LLVM_DEBUG(dbgs() << "LV: Picking a fixed-width so that no tail will "
3640 "remain for any chosen VF.\n");
3641 MaxFactors.ScalableVF = ElementCount::getScalable(0);
3642 return MaxFactors;
3643 }
3644 }
3645
3647 "The trip count is below the minial threshold value.",
3648 "loop trip count is too low, avoiding vectorization", "LowTripCount",
3649 ORE, TheLoop);
3651 }
3652
3653 // If we don't know the precise trip count, or if the trip count that we
3654 // found modulo the vectorization factor is not zero, try to fold the tail
3655 // by masking.
3656 // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
3657 bool ContainsScalableVF = MaxFactors.ScalableVF.isNonZero();
3658 setTailFoldingStyles(ContainsScalableVF, UserIC);
3659 if (foldTailByMasking()) {
3661 LLVM_DEBUG(
3662 dbgs()
3663 << "LV: tail is folded with EVL, forcing unroll factor to be 1. Will "
3664 "try to generate VP Intrinsics with scalable vector "
3665 "factors only.\n");
3666 // Tail folded loop using VP intrinsics restricts the VF to be scalable
3667 // for now.
3668 // TODO: extend it for fixed vectors, if required.
3669 assert(ContainsScalableVF && "Expected scalable vector factor.");
3670
3671 MaxFactors.FixedVF = ElementCount::getFixed(1);
3672 }
3673 return MaxFactors;
3674 }
3675
3676 // If there was a tail-folding hint/switch, but we can't fold the tail by
3677 // masking, fallback to a vectorization with a scalar epilogue.
3678 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
3679 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
3680 "scalar epilogue instead.\n");
3681 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
3682 return MaxFactors;
3683 }
3684
3685 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
3686 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
3688 }
3689
3690 if (TC.isZero()) {
3692 "unable to calculate the loop count due to complex control flow",
3693 "UnknownLoopCountComplexCFG", ORE, TheLoop);
3695 }
3696
3698 "Cannot optimize for size and vectorize at the same time.",
3699 "cannot optimize for size and vectorize at the same time. "
3700 "Enable vectorization of this loop with '#pragma clang loop "
3701 "vectorize(enable)' when compiling with -Os/-Oz",
3702 "NoTailLoopWithOptForSize", ORE, TheLoop);
3704}
3705
3707 ElementCount VF) {
3708 if (ConsiderRegPressure.getNumOccurrences())
3709 return ConsiderRegPressure;
3710
3711 // TODO: We should eventually consider register pressure for all targets. The
3712 // TTI hook is temporary whilst target-specific issues are being fixed.
3713 if (TTI.shouldConsiderVectorizationRegPressure())
3714 return true;
3715
3716 if (!useMaxBandwidth(VF.isScalable()
3719 return false;
3720 // Only calculate register pressure for VFs enabled by MaxBandwidth.
3722 VF, VF.isScalable() ? MaxPermissibleVFWithoutMaxBW.ScalableVF
3724}
3725
3728 return MaximizeBandwidth || (MaximizeBandwidth.getNumOccurrences() == 0 &&
3729 (TTI.shouldMaximizeVectorBandwidth(RegKind) ||
3731 Legal->hasVectorCallVariants())));
3732}
3733
3734ElementCount LoopVectorizationCostModel::clampVFByMaxTripCount(
3735 ElementCount VF, unsigned MaxTripCount, bool FoldTailByMasking) const {
3736 unsigned EstimatedVF = VF.getKnownMinValue();
3737 if (VF.isScalable() && TheFunction->hasFnAttribute(Attribute::VScaleRange)) {
3738 auto Attr = TheFunction->getFnAttribute(Attribute::VScaleRange);
3739 auto Min = Attr.getVScaleRangeMin();
3740 EstimatedVF *= Min;
3741 }
3742
3743 // When a scalar epilogue is required, at least one iteration of the scalar
3744 // loop has to execute. Adjust MaxTripCount accordingly to avoid picking a
3745 // max VF that results in a dead vector loop.
3746 if (MaxTripCount > 0 && requiresScalarEpilogue(true))
3747 MaxTripCount -= 1;
3748
3749 if (MaxTripCount && MaxTripCount <= EstimatedVF &&
3750 (!FoldTailByMasking || isPowerOf2_32(MaxTripCount))) {
3751 // If upper bound loop trip count (TC) is known at compile time there is no
3752 // point in choosing VF greater than TC (as done in the loop below). Select
3753 // maximum power of two which doesn't exceed TC. If VF is
3754 // scalable, we only fall back on a fixed VF when the TC is less than or
3755 // equal to the known number of lanes.
3756 auto ClampedUpperTripCount = llvm::bit_floor(MaxTripCount);
3757 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not "
3758 "exceeding the constant trip count: "
3759 << ClampedUpperTripCount << "\n");
3760 return ElementCount::get(ClampedUpperTripCount,
3761 FoldTailByMasking ? VF.isScalable() : false);
3762 }
3763 return VF;
3764}
3765
3766ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
3767 unsigned MaxTripCount, unsigned SmallestType, unsigned WidestType,
3768 ElementCount MaxSafeVF, bool FoldTailByMasking) {
3769 bool ComputeScalableMaxVF = MaxSafeVF.isScalable();
3770 const TypeSize WidestRegister = TTI.getRegisterBitWidth(
3771 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
3773
3774 // Convenience function to return the minimum of two ElementCounts.
3775 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) {
3776 assert((LHS.isScalable() == RHS.isScalable()) &&
3777 "Scalable flags must match");
3778 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS;
3779 };
3780
3781 // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
3782 // Note that both WidestRegister and WidestType may not be a powers of 2.
3783 auto MaxVectorElementCount = ElementCount::get(
3784 llvm::bit_floor(WidestRegister.getKnownMinValue() / WidestType),
3785 ComputeScalableMaxVF);
3786 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
3787 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
3788 << (MaxVectorElementCount * WidestType) << " bits.\n");
3789
3790 if (!MaxVectorElementCount) {
3791 LLVM_DEBUG(dbgs() << "LV: The target has no "
3792 << (ComputeScalableMaxVF ? "scalable" : "fixed")
3793 << " vector registers.\n");
3794 return ElementCount::getFixed(1);
3795 }
3796
3797 ElementCount MaxVF = clampVFByMaxTripCount(MaxVectorElementCount,
3798 MaxTripCount, FoldTailByMasking);
3799 // If the MaxVF was already clamped, there's no point in trying to pick a
3800 // larger one.
3801 if (MaxVF != MaxVectorElementCount)
3802 return MaxVF;
3803
3805 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
3807
3808 if (MaxVF.isScalable())
3809 MaxPermissibleVFWithoutMaxBW.ScalableVF = MaxVF;
3810 else
3811 MaxPermissibleVFWithoutMaxBW.FixedVF = MaxVF;
3812
3813 if (useMaxBandwidth(RegKind)) {
3814 auto MaxVectorElementCountMaxBW = ElementCount::get(
3815 llvm::bit_floor(WidestRegister.getKnownMinValue() / SmallestType),
3816 ComputeScalableMaxVF);
3817 MaxVF = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
3818
3819 if (ElementCount MinVF =
3820 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) {
3821 if (ElementCount::isKnownLT(MaxVF, MinVF)) {
3822 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
3823 << ") with target's minimum: " << MinVF << '\n');
3824 MaxVF = MinVF;
3825 }
3826 }
3827
3828 MaxVF = clampVFByMaxTripCount(MaxVF, MaxTripCount, FoldTailByMasking);
3829
3830 if (MaxVectorElementCount != MaxVF) {
3831 // Invalidate any widening decisions we might have made, in case the loop
3832 // requires prediction (decided later), but we have already made some
3833 // load/store widening decisions.
3835 }
3836 }
3837 return MaxVF;
3838}
3839
3840bool LoopVectorizationPlanner::isMoreProfitable(const VectorizationFactor &A,
3841 const VectorizationFactor &B,
3842 const unsigned MaxTripCount,
3843 bool HasTail,
3844 bool IsEpilogue) const {
3845 InstructionCost CostA = A.Cost;
3846 InstructionCost CostB = B.Cost;
3847
3848 // Improve estimate for the vector width if it is scalable.
3849 unsigned EstimatedWidthA = A.Width.getKnownMinValue();
3850 unsigned EstimatedWidthB = B.Width.getKnownMinValue();
3851 if (std::optional<unsigned> VScale = CM.getVScaleForTuning()) {
3852 if (A.Width.isScalable())
3853 EstimatedWidthA *= *VScale;
3854 if (B.Width.isScalable())
3855 EstimatedWidthB *= *VScale;
3856 }
3857
3858 // When optimizing for size choose whichever is smallest, which will be the
3859 // one with the smallest cost for the whole loop. On a tie pick the larger
3860 // vector width, on the assumption that throughput will be greater.
3861 if (CM.CostKind == TTI::TCK_CodeSize)
3862 return CostA < CostB ||
3863 (CostA == CostB && EstimatedWidthA > EstimatedWidthB);
3864
3865 // Assume vscale may be larger than 1 (or the value being tuned for),
3866 // so that scalable vectorization is slightly favorable over fixed-width
3867 // vectorization.
3868 bool PreferScalable = !TTI.preferFixedOverScalableIfEqualCost(IsEpilogue) &&
3869 A.Width.isScalable() && !B.Width.isScalable();
3870
3871 auto CmpFn = [PreferScalable](const InstructionCost &LHS,
3872 const InstructionCost &RHS) {
3873 return PreferScalable ? LHS <= RHS : LHS < RHS;
3874 };
3875
3876 // To avoid the need for FP division:
3877 // (CostA / EstimatedWidthA) < (CostB / EstimatedWidthB)
3878 // <=> (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA)
3879 if (!MaxTripCount)
3880 return CmpFn(CostA * EstimatedWidthB, CostB * EstimatedWidthA);
3881
3882 auto GetCostForTC = [MaxTripCount, HasTail](unsigned VF,
3883 InstructionCost VectorCost,
3884 InstructionCost ScalarCost) {
3885 // If the trip count is a known (possibly small) constant, the trip count
3886 // will be rounded up to an integer number of iterations under
3887 // FoldTailByMasking. The total cost in that case will be
3888 // VecCost*ceil(TripCount/VF). When not folding the tail, the total
3889 // cost will be VecCost*floor(TC/VF) + ScalarCost*(TC%VF). There will be
3890 // some extra overheads, but for the purpose of comparing the costs of
3891 // different VFs we can use this to compare the total loop-body cost
3892 // expected after vectorization.
3893 if (HasTail)
3894 return VectorCost * (MaxTripCount / VF) +
3895 ScalarCost * (MaxTripCount % VF);
3896 return VectorCost * divideCeil(MaxTripCount, VF);
3897 };
3898
3899 auto RTCostA = GetCostForTC(EstimatedWidthA, CostA, A.ScalarCost);
3900 auto RTCostB = GetCostForTC(EstimatedWidthB, CostB, B.ScalarCost);
3901 return CmpFn(RTCostA, RTCostB);
3902}
3903
3904bool LoopVectorizationPlanner::isMoreProfitable(const VectorizationFactor &A,
3905 const VectorizationFactor &B,
3906 bool HasTail,
3907 bool IsEpilogue) const {
3908 const unsigned MaxTripCount = PSE.getSmallConstantMaxTripCount();
3909 return LoopVectorizationPlanner::isMoreProfitable(A, B, MaxTripCount, HasTail,
3910 IsEpilogue);
3911}
3912
3915 using RecipeVFPair = std::pair<VPRecipeBase *, ElementCount>;
3916 SmallVector<RecipeVFPair> InvalidCosts;
3917 for (const auto &Plan : VPlans) {
3918 for (ElementCount VF : Plan->vectorFactors()) {
3919 // The VPlan-based cost model is designed for computing vector cost.
3920 // Querying VPlan-based cost model with a scarlar VF will cause some
3921 // errors because we expect the VF is vector for most of the widen
3922 // recipes.
3923 if (VF.isScalar())
3924 continue;
3925
3926 VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind);
3927 precomputeCosts(*Plan, VF, CostCtx);
3928 auto Iter = vp_depth_first_deep(Plan->getVectorLoopRegion()->getEntry());
3930 for (auto &R : *VPBB) {
3931 if (!R.cost(VF, CostCtx).isValid())
3932 InvalidCosts.emplace_back(&R, VF);
3933 }
3934 }
3935 }
3936 }
3937 if (InvalidCosts.empty())
3938 return;
3939
3940 // Emit a report of VFs with invalid costs in the loop.
3941
3942 // Group the remarks per recipe, keeping the recipe order from InvalidCosts.
3944 unsigned I = 0;
3945 for (auto &Pair : InvalidCosts)
3946 if (Numbering.try_emplace(Pair.first, I).second)
3947 ++I;
3948
3949 // Sort the list, first on recipe(number) then on VF.
3950 sort(InvalidCosts, [&Numbering](RecipeVFPair &A, RecipeVFPair &B) {
3951 unsigned NA = Numbering[A.first];
3952 unsigned NB = Numbering[B.first];
3953 if (NA != NB)
3954 return NA < NB;
3955 return ElementCount::isKnownLT(A.second, B.second);
3956 });
3957
3958 // For a list of ordered recipe-VF pairs:
3959 // [(load, VF1), (load, VF2), (store, VF1)]
3960 // group the recipes together to emit separate remarks for:
3961 // load (VF1, VF2)
3962 // store (VF1)
3963 auto Tail = ArrayRef<RecipeVFPair>(InvalidCosts);
3964 auto Subset = ArrayRef<RecipeVFPair>();
3965 do {
3966 if (Subset.empty())
3967 Subset = Tail.take_front(1);
3968
3969 VPRecipeBase *R = Subset.front().first;
3970
3971 unsigned Opcode =
3974 [](const auto *R) { return Instruction::PHI; })
3975 .Case<VPWidenSelectRecipe>(
3976 [](const auto *R) { return Instruction::Select; })
3977 .Case<VPWidenStoreRecipe>(
3978 [](const auto *R) { return Instruction::Store; })
3979 .Case<VPWidenLoadRecipe>(
3980 [](const auto *R) { return Instruction::Load; })
3981 .Case<VPWidenCallRecipe, VPWidenIntrinsicRecipe>(
3982 [](const auto *R) { return Instruction::Call; })
3985 [](const auto *R) { return R->getOpcode(); })
3986 .Case<VPInterleaveRecipe>([](const VPInterleaveRecipe *R) {
3987 return R->getStoredValues().empty() ? Instruction::Load
3988 : Instruction::Store;
3989 });
3990
3991 // If the next recipe is different, or if there are no other pairs,
3992 // emit a remark for the collated subset. e.g.
3993 // [(load, VF1), (load, VF2))]
3994 // to emit:
3995 // remark: invalid costs for 'load' at VF=(VF1, VF2)
3996 if (Subset == Tail || Tail[Subset.size()].first != R) {
3997 std::string OutString;
3998 raw_string_ostream OS(OutString);
3999 assert(!Subset.empty() && "Unexpected empty range");
4000 OS << "Recipe with invalid costs prevented vectorization at VF=(";
4001 for (const auto &Pair : Subset)
4002 OS << (Pair.second == Subset.front().second ? "" : ", ") << Pair.second;
4003 OS << "):";
4004 if (Opcode == Instruction::Call) {
4005 StringRef Name = "";
4006 if (auto *Int = dyn_cast<VPWidenIntrinsicRecipe>(R)) {
4007 Name = Int->getIntrinsicName();
4008 } else {
4009 auto *WidenCall = dyn_cast<VPWidenCallRecipe>(R);
4010 Function *CalledFn =
4011 WidenCall ? WidenCall->getCalledScalarFunction()
4012 : cast<Function>(R->getOperand(R->getNumOperands() - 1)
4013 ->getLiveInIRValue());
4014 Name = CalledFn->getName();
4015 }
4016 OS << " call to " << Name;
4017 } else
4018 OS << " " << Instruction::getOpcodeName(Opcode);
4019 reportVectorizationInfo(OutString, "InvalidCost", ORE, OrigLoop, nullptr,
4020 R->getDebugLoc());
4021 Tail = Tail.drop_front(Subset.size());
4022 Subset = {};
4023 } else
4024 // Grow the subset by one element
4025 Subset = Tail.take_front(Subset.size() + 1);
4026 } while (!Tail.empty());
4027}
4028
4029/// Check if any recipe of \p Plan will generate a vector value, which will be
4030/// assigned a vector register.
4032 const TargetTransformInfo &TTI) {
4033 assert(VF.isVector() && "Checking a scalar VF?");
4034 VPTypeAnalysis TypeInfo(Plan);
4035 DenseSet<VPRecipeBase *> EphemeralRecipes;
4036 collectEphemeralRecipesForVPlan(Plan, EphemeralRecipes);
4037 // Set of already visited types.
4038 DenseSet<Type *> Visited;
4041 for (VPRecipeBase &R : *VPBB) {
4042 if (EphemeralRecipes.contains(&R))
4043 continue;
4044 // Continue early if the recipe is considered to not produce a vector
4045 // result. Note that this includes VPInstruction where some opcodes may
4046 // produce a vector, to preserve existing behavior as VPInstructions model
4047 // aspects not directly mapped to existing IR instructions.
4048 switch (R.getVPDefID()) {
4049 case VPDef::VPDerivedIVSC:
4050 case VPDef::VPScalarIVStepsSC:
4051 case VPDef::VPReplicateSC:
4052 case VPDef::VPInstructionSC:
4053 case VPDef::VPCanonicalIVPHISC:
4054 case VPDef::VPVectorPointerSC:
4055 case VPDef::VPVectorEndPointerSC:
4056 case VPDef::VPExpandSCEVSC:
4057 case VPDef::VPEVLBasedIVPHISC:
4058 case VPDef::VPPredInstPHISC:
4059 case VPDef::VPBranchOnMaskSC:
4060 continue;
4061 case VPDef::VPReductionSC:
4062 case VPDef::VPActiveLaneMaskPHISC:
4063 case VPDef::VPWidenCallSC:
4064 case VPDef::VPWidenCanonicalIVSC:
4065 case VPDef::VPWidenCastSC:
4066 case VPDef::VPWidenGEPSC:
4067 case VPDef::VPWidenIntrinsicSC:
4068 case VPDef::VPWidenSC:
4069 case VPDef::VPWidenSelectSC:
4070 case VPDef::VPBlendSC:
4071 case VPDef::VPFirstOrderRecurrencePHISC:
4072 case VPDef::VPHistogramSC:
4073 case VPDef::VPWidenPHISC:
4074 case VPDef::VPWidenIntOrFpInductionSC:
4075 case VPDef::VPWidenPointerInductionSC:
4076 case VPDef::VPReductionPHISC:
4077 case VPDef::VPInterleaveEVLSC:
4078 case VPDef::VPInterleaveSC:
4079 case VPDef::VPWidenLoadEVLSC:
4080 case VPDef::VPWidenLoadSC:
4081 case VPDef::VPWidenStoreEVLSC:
4082 case VPDef::VPWidenStoreSC:
4083 break;
4084 default:
4085 llvm_unreachable("unhandled recipe");
4086 }
4087
4088 auto WillGenerateTargetVectors = [&TTI, VF](Type *VectorTy) {
4089 unsigned NumLegalParts = TTI.getNumberOfParts(VectorTy);
4090 if (!NumLegalParts)
4091 return false;
4092 if (VF.isScalable()) {
4093 // <vscale x 1 x iN> is assumed to be profitable over iN because
4094 // scalable registers are a distinct register class from scalar
4095 // ones. If we ever find a target which wants to lower scalable
4096 // vectors back to scalars, we'll need to update this code to
4097 // explicitly ask TTI about the register class uses for each part.
4098 return NumLegalParts <= VF.getKnownMinValue();
4099 }
4100 // Two or more elements that share a register - are vectorized.
4101 return NumLegalParts < VF.getFixedValue();
4102 };
4103
4104 // If no def nor is a store, e.g., branches, continue - no value to check.
4105 if (R.getNumDefinedValues() == 0 &&
4107 continue;
4108 // For multi-def recipes, currently only interleaved loads, suffice to
4109 // check first def only.
4110 // For stores check their stored value; for interleaved stores suffice
4111 // the check first stored value only. In all cases this is the second
4112 // operand.
4113 VPValue *ToCheck =
4114 R.getNumDefinedValues() >= 1 ? R.getVPValue(0) : R.getOperand(1);
4115 Type *ScalarTy = TypeInfo.inferScalarType(ToCheck);
4116 if (!Visited.insert({ScalarTy}).second)
4117 continue;
4118 Type *WideTy = toVectorizedTy(ScalarTy, VF);
4119 if (any_of(getContainedTypes(WideTy), WillGenerateTargetVectors))
4120 return true;
4121 }
4122 }
4123
4124 return false;
4125}
4126
4127static bool hasReplicatorRegion(VPlan &Plan) {
4129 Plan.getVectorLoopRegion()->getEntry())),
4130 [](auto *VPRB) { return VPRB->isReplicator(); });
4131}
4132
4133#ifndef NDEBUG
4134VectorizationFactor LoopVectorizationPlanner::selectVectorizationFactor() {
4135 InstructionCost ExpectedCost = CM.expectedCost(ElementCount::getFixed(1));
4136 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
4137 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
4138 assert(
4139 any_of(VPlans,
4140 [](std::unique_ptr<VPlan> &P) { return P->hasScalarVFOnly(); }) &&
4141 "Expected Scalar VF to be a candidate");
4142
4143 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost,
4144 ExpectedCost);
4145 VectorizationFactor ChosenFactor = ScalarCost;
4146
4147 bool ForceVectorization = Hints.getForce() == LoopVectorizeHints::FK_Enabled;
4148 if (ForceVectorization &&
4149 (VPlans.size() > 1 || !VPlans[0]->hasScalarVFOnly())) {
4150 // Ignore scalar width, because the user explicitly wants vectorization.
4151 // Initialize cost to max so that VF = 2 is, at least, chosen during cost
4152 // evaluation.
4153 ChosenFactor.Cost = InstructionCost::getMax();
4154 }
4155
4156 for (auto &P : VPlans) {
4157 ArrayRef<ElementCount> VFs(P->vectorFactors().begin(),
4158 P->vectorFactors().end());
4159
4161 if (any_of(VFs, [this](ElementCount VF) {
4162 return CM.shouldConsiderRegPressureForVF(VF);
4163 }))
4164 RUs = calculateRegisterUsageForPlan(*P, VFs, TTI, CM.ValuesToIgnore);
4165
4166 for (unsigned I = 0; I < VFs.size(); I++) {
4167 ElementCount VF = VFs[I];
4168 // The cost for scalar VF=1 is already calculated, so ignore it.
4169 if (VF.isScalar())
4170 continue;
4171
4172 /// If the register pressure needs to be considered for VF,
4173 /// don't consider the VF as valid if it exceeds the number
4174 /// of registers for the target.
4175 if (CM.shouldConsiderRegPressureForVF(VF) &&
4176 RUs[I].exceedsMaxNumRegs(TTI, ForceTargetNumVectorRegs))
4177 continue;
4178
4179 InstructionCost C = CM.expectedCost(VF);
4180
4181 // Add on other costs that are modelled in VPlan, but not in the legacy
4182 // cost model.
4183 VPCostContext CostCtx(CM.TTI, *CM.TLI, *P, CM, CM.CostKind);
4184 VPRegionBlock *VectorRegion = P->getVectorLoopRegion();
4185 assert(VectorRegion && "Expected to have a vector region!");
4186 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
4187 vp_depth_first_shallow(VectorRegion->getEntry()))) {
4188 for (VPRecipeBase &R : *VPBB) {
4189 auto *VPI = dyn_cast<VPInstruction>(&R);
4190 if (!VPI)
4191 continue;
4192 switch (VPI->getOpcode()) {
4193 // Selects are only modelled in the legacy cost model for safe
4194 // divisors.
4195 case Instruction::Select: {
4196 VPValue *VPV = VPI->getVPSingleValue();
4197 if (VPV->getNumUsers() == 1) {
4198 if (auto *WR = dyn_cast<VPWidenRecipe>(*VPV->user_begin())) {
4199 switch (WR->getOpcode()) {
4200 case Instruction::UDiv:
4201 case Instruction::SDiv:
4202 case Instruction::URem:
4203 case Instruction::SRem:
4204 continue;
4205 default:
4206 break;
4207 }
4208 }
4209 }
4210 C += VPI->cost(VF, CostCtx);
4211 break;
4212 }
4214 unsigned Multiplier =
4215 cast<ConstantInt>(VPI->getOperand(2)->getLiveInIRValue())
4216 ->getZExtValue();
4217 C += VPI->cost(VF * Multiplier, CostCtx);
4218 break;
4219 }
4221 C += VPI->cost(VF, CostCtx);
4222 break;
4223 default:
4224 break;
4225 }
4226 }
4227 }
4228
4229 VectorizationFactor Candidate(VF, C, ScalarCost.ScalarCost);
4230 unsigned Width =
4231 estimateElementCount(Candidate.Width, CM.getVScaleForTuning());
4232 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << VF
4233 << " costs: " << (Candidate.Cost / Width));
4234 if (VF.isScalable())
4235 LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of "
4236 << CM.getVScaleForTuning().value_or(1) << ")");
4237 LLVM_DEBUG(dbgs() << ".\n");
4238
4239 if (!ForceVectorization && !willGenerateVectors(*P, VF, TTI)) {
4240 LLVM_DEBUG(
4241 dbgs()
4242 << "LV: Not considering vector loop of width " << VF
4243 << " because it will not generate any vector instructions.\n");
4244 continue;
4245 }
4246
4247 if (CM.OptForSize && !ForceVectorization && hasReplicatorRegion(*P)) {
4248 LLVM_DEBUG(
4249 dbgs()
4250 << "LV: Not considering vector loop of width " << VF
4251 << " because it would cause replicated blocks to be generated,"
4252 << " which isn't allowed when optimizing for size.\n");
4253 continue;
4254 }
4255
4256 if (isMoreProfitable(Candidate, ChosenFactor, P->hasScalarTail()))
4257 ChosenFactor = Candidate;
4258 }
4259 }
4260
4261 if (!EnableCondStoresVectorization && CM.hasPredStores()) {
4263 "There are conditional stores.",
4264 "store that is conditionally executed prevents vectorization",
4265 "ConditionalStore", ORE, OrigLoop);
4266 ChosenFactor = ScalarCost;
4267 }
4268
4269 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() &&
4270 !isMoreProfitable(ChosenFactor, ScalarCost,
4271 !CM.foldTailByMasking())) dbgs()
4272 << "LV: Vectorization seems to be not beneficial, "
4273 << "but was forced by a user.\n");
4274 return ChosenFactor;
4275}
4276#endif
4277
4278bool LoopVectorizationPlanner::isCandidateForEpilogueVectorization(
4279 ElementCount VF) const {
4280 // Cross iteration phis such as fixed-order recurrences and FMaxNum/FMinNum
4281 // reductions need special handling and are currently unsupported.
4282 if (any_of(OrigLoop->getHeader()->phis(), [&](PHINode &Phi) {
4283 if (!Legal->isReductionVariable(&Phi))
4284 return Legal->isFixedOrderRecurrence(&Phi);
4285 RecurKind RK = Legal->getRecurrenceDescriptor(&Phi).getRecurrenceKind();
4286 return RK == RecurKind::FMinNum || RK == RecurKind::FMaxNum;
4287 }))
4288 return false;
4289
4290 // Phis with uses outside of the loop require special handling and are
4291 // currently unsupported.
4292 for (const auto &Entry : Legal->getInductionVars()) {
4293 // Look for uses of the value of the induction at the last iteration.
4294 Value *PostInc =
4295 Entry.first->getIncomingValueForBlock(OrigLoop->getLoopLatch());
4296 for (User *U : PostInc->users())
4297 if (!OrigLoop->contains(cast<Instruction>(U)))
4298 return false;
4299 // Look for uses of penultimate value of the induction.
4300 for (User *U : Entry.first->users())
4301 if (!OrigLoop->contains(cast<Instruction>(U)))
4302 return false;
4303 }
4304
4305 // Epilogue vectorization code has not been auditted to ensure it handles
4306 // non-latch exits properly. It may be fine, but it needs auditted and
4307 // tested.
4308 // TODO: Add support for loops with an early exit.
4309 if (OrigLoop->getExitingBlock() != OrigLoop->getLoopLatch())
4310 return false;
4311
4312 return true;
4313}
4314
4316 const ElementCount VF, const unsigned IC) const {
4317 // FIXME: We need a much better cost-model to take different parameters such
4318 // as register pressure, code size increase and cost of extra branches into
4319 // account. For now we apply a very crude heuristic and only consider loops
4320 // with vectorization factors larger than a certain value.
4321
4322 // Allow the target to opt out entirely.
4323 if (!TTI.preferEpilogueVectorization())
4324 return false;
4325
4326 // We also consider epilogue vectorization unprofitable for targets that don't
4327 // consider interleaving beneficial (eg. MVE).
4328 if (TTI.getMaxInterleaveFactor(VF) <= 1)
4329 return false;
4330
4331 unsigned MinVFThreshold = EpilogueVectorizationMinVF.getNumOccurrences() > 0
4333 : TTI.getEpilogueVectorizationMinVF();
4334 return estimateElementCount(VF * IC, VScaleForTuning) >= MinVFThreshold;
4335}
4336
4338 const ElementCount MainLoopVF, unsigned IC) {
4341 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n");
4342 return Result;
4343 }
4344
4345 if (!CM.isScalarEpilogueAllowed()) {
4346 LLVM_DEBUG(dbgs() << "LEV: Unable to vectorize epilogue because no "
4347 "epilogue is allowed.\n");
4348 return Result;
4349 }
4350
4351 // Not really a cost consideration, but check for unsupported cases here to
4352 // simplify the logic.
4353 if (!isCandidateForEpilogueVectorization(MainLoopVF)) {
4354 LLVM_DEBUG(dbgs() << "LEV: Unable to vectorize epilogue because the loop "
4355 "is not a supported candidate.\n");
4356 return Result;
4357 }
4358
4360 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n");
4362 if (hasPlanWithVF(ForcedEC))
4363 return {ForcedEC, 0, 0};
4364
4365 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization forced factor is not "
4366 "viable.\n");
4367 return Result;
4368 }
4369
4370 if (OrigLoop->getHeader()->getParent()->hasOptSize()) {
4371 LLVM_DEBUG(
4372 dbgs() << "LEV: Epilogue vectorization skipped due to opt for size.\n");
4373 return Result;
4374 }
4375
4376 if (!CM.isEpilogueVectorizationProfitable(MainLoopVF, IC)) {
4377 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for "
4378 "this loop\n");
4379 return Result;
4380 }
4381
4382 // If MainLoopVF = vscale x 2, and vscale is expected to be 4, then we know
4383 // the main loop handles 8 lanes per iteration. We could still benefit from
4384 // vectorizing the epilogue loop with VF=4.
4385 ElementCount EstimatedRuntimeVF = ElementCount::getFixed(
4386 estimateElementCount(MainLoopVF, CM.getVScaleForTuning()));
4387
4388 ScalarEvolution &SE = *PSE.getSE();
4389 Type *TCType = Legal->getWidestInductionType();
4390 const SCEV *RemainingIterations = nullptr;
4391 unsigned MaxTripCount = 0;
4392 const SCEV *TC =
4393 vputils::getSCEVExprForVPValue(getPlanFor(MainLoopVF).getTripCount(), SE);
4394 assert(!isa<SCEVCouldNotCompute>(TC) && "Trip count SCEV must be computable");
4395 RemainingIterations =
4396 SE.getURemExpr(TC, SE.getElementCount(TCType, MainLoopVF * IC));
4397
4398 // No iterations left to process in the epilogue.
4399 if (RemainingIterations->isZero())
4400 return Result;
4401
4402 if (MainLoopVF.isFixed()) {
4403 MaxTripCount = MainLoopVF.getFixedValue() * IC - 1;
4404 if (SE.isKnownPredicate(CmpInst::ICMP_ULT, RemainingIterations,
4405 SE.getConstant(TCType, MaxTripCount))) {
4406 MaxTripCount = SE.getUnsignedRangeMax(RemainingIterations).getZExtValue();
4407 }
4408 LLVM_DEBUG(dbgs() << "LEV: Maximum Trip Count for Epilogue: "
4409 << MaxTripCount << "\n");
4410 }
4411
4412 for (auto &NextVF : ProfitableVFs) {
4413 // Skip candidate VFs without a corresponding VPlan.
4414 if (!hasPlanWithVF(NextVF.Width))
4415 continue;
4416
4417 // Skip candidate VFs with widths >= the (estimated) runtime VF (scalable
4418 // vectors) or > the VF of the main loop (fixed vectors).
4419 if ((!NextVF.Width.isScalable() && MainLoopVF.isScalable() &&
4420 ElementCount::isKnownGE(NextVF.Width, EstimatedRuntimeVF)) ||
4421 (NextVF.Width.isScalable() &&
4422 ElementCount::isKnownGE(NextVF.Width, MainLoopVF)) ||
4423 (!NextVF.Width.isScalable() && !MainLoopVF.isScalable() &&
4424 ElementCount::isKnownGT(NextVF.Width, MainLoopVF)))
4425 continue;
4426
4427 // If NextVF is greater than the number of remaining iterations, the
4428 // epilogue loop would be dead. Skip such factors.
4429 if (RemainingIterations && !NextVF.Width.isScalable()) {
4430 if (SE.isKnownPredicate(
4432 SE.getConstant(TCType, NextVF.Width.getFixedValue()),
4433 RemainingIterations))
4434 continue;
4435 }
4436
4437 if (Result.Width.isScalar() ||
4438 isMoreProfitable(NextVF, Result, MaxTripCount, !CM.foldTailByMasking(),
4439 /*IsEpilogue*/ true))
4440 Result = NextVF;
4441 }
4442
4443 if (Result != VectorizationFactor::Disabled())
4444 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
4445 << Result.Width << "\n");
4446 return Result;
4447}
4448
4449std::pair<unsigned, unsigned>
4451 unsigned MinWidth = -1U;
4452 unsigned MaxWidth = 8;
4453 const DataLayout &DL = TheFunction->getDataLayout();
4454 // For in-loop reductions, no element types are added to ElementTypesInLoop
4455 // if there are no loads/stores in the loop. In this case, check through the
4456 // reduction variables to determine the maximum width.
4457 if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) {
4458 for (const auto &PhiDescriptorPair : Legal->getReductionVars()) {
4459 const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second;
4460 // When finding the min width used by the recurrence we need to account
4461 // for casts on the input operands of the recurrence.
4462 MinWidth = std::min(
4463 MinWidth,
4464 std::min(RdxDesc.getMinWidthCastToRecurrenceTypeInBits(),
4466 MaxWidth = std::max(MaxWidth,
4468 }
4469 } else {
4470 for (Type *T : ElementTypesInLoop) {
4471 MinWidth = std::min<unsigned>(
4472 MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedValue());
4473 MaxWidth = std::max<unsigned>(
4474 MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedValue());
4475 }
4476 }
4477 return {MinWidth, MaxWidth};
4478}
4479
4481 ElementTypesInLoop.clear();
4482 // For each block.
4483 for (BasicBlock *BB : TheLoop->blocks()) {
4484 // For each instruction in the loop.
4485 for (Instruction &I : BB->instructionsWithoutDebug()) {
4486 Type *T = I.getType();
4487
4488 // Skip ignored values.
4489 if (ValuesToIgnore.count(&I))
4490 continue;
4491
4492 // Only examine Loads, Stores and PHINodes.
4493 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
4494 continue;
4495
4496 // Examine PHI nodes that are reduction variables. Update the type to
4497 // account for the recurrence type.
4498 if (auto *PN = dyn_cast<PHINode>(&I)) {
4499 if (!Legal->isReductionVariable(PN))
4500 continue;
4501 const RecurrenceDescriptor &RdxDesc =
4502 Legal->getRecurrenceDescriptor(PN);
4504 TTI.preferInLoopReduction(RdxDesc.getRecurrenceKind(),
4505 RdxDesc.getRecurrenceType()))
4506 continue;
4507 T = RdxDesc.getRecurrenceType();
4508 }
4509
4510 // Examine the stored values.
4511 if (auto *ST = dyn_cast<StoreInst>(&I))
4512 T = ST->getValueOperand()->getType();
4513
4514 assert(T->isSized() &&
4515 "Expected the load/store/recurrence type to be sized");
4516
4517 ElementTypesInLoop.insert(T);
4518 }
4519 }
4520}
4521
4522unsigned
4524 InstructionCost LoopCost) {
4525 // -- The interleave heuristics --
4526 // We interleave the loop in order to expose ILP and reduce the loop overhead.
4527 // There are many micro-architectural considerations that we can't predict
4528 // at this level. For example, frontend pressure (on decode or fetch) due to
4529 // code size, or the number and capabilities of the execution ports.
4530 //
4531 // We use the following heuristics to select the interleave count:
4532 // 1. If the code has reductions, then we interleave to break the cross
4533 // iteration dependency.
4534 // 2. If the loop is really small, then we interleave to reduce the loop
4535 // overhead.
4536 // 3. We don't interleave if we think that we will spill registers to memory
4537 // due to the increased register pressure.
4538
4539 if (!CM.isScalarEpilogueAllowed())
4540 return 1;
4541
4544 LLVM_DEBUG(dbgs() << "LV: Preference for VP intrinsics indicated. "
4545 "Unroll factor forced to be 1.\n");
4546 return 1;
4547 }
4548
4549 // We used the distance for the interleave count.
4550 if (!Legal->isSafeForAnyVectorWidth())
4551 return 1;
4552
4553 // We don't attempt to perform interleaving for loops with uncountable early
4554 // exits because the VPInstruction::AnyOf code cannot currently handle
4555 // multiple parts.
4556 if (Plan.hasEarlyExit())
4557 return 1;
4558
4559 const bool HasReductions =
4562
4563 // If we did not calculate the cost for VF (because the user selected the VF)
4564 // then we calculate the cost of VF here.
4565 if (LoopCost == 0) {
4566 if (VF.isScalar())
4567 LoopCost = CM.expectedCost(VF);
4568 else
4569 LoopCost = cost(Plan, VF);
4570 assert(LoopCost.isValid() && "Expected to have chosen a VF with valid cost");
4571
4572 // Loop body is free and there is no need for interleaving.
4573 if (LoopCost == 0)
4574 return 1;
4575 }
4576
4577 VPRegisterUsage R =
4578 calculateRegisterUsageForPlan(Plan, {VF}, TTI, CM.ValuesToIgnore)[0];
4579 // We divide by these constants so assume that we have at least one
4580 // instruction that uses at least one register.
4581 for (auto &Pair : R.MaxLocalUsers) {
4582 Pair.second = std::max(Pair.second, 1U);
4583 }
4584
4585 // We calculate the interleave count using the following formula.
4586 // Subtract the number of loop invariants from the number of available
4587 // registers. These registers are used by all of the interleaved instances.
4588 // Next, divide the remaining registers by the number of registers that is
4589 // required by the loop, in order to estimate how many parallel instances
4590 // fit without causing spills. All of this is rounded down if necessary to be
4591 // a power of two. We want power of two interleave count to simplify any
4592 // addressing operations or alignment considerations.
4593 // We also want power of two interleave counts to ensure that the induction
4594 // variable of the vector loop wraps to zero, when tail is folded by masking;
4595 // this currently happens when OptForSize, in which case IC is set to 1 above.
4596 unsigned IC = UINT_MAX;
4597
4598 for (const auto &Pair : R.MaxLocalUsers) {
4599 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(Pair.first);
4600 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
4601 << " registers of "
4602 << TTI.getRegisterClassName(Pair.first)
4603 << " register class\n");
4604 if (VF.isScalar()) {
4605 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
4606 TargetNumRegisters = ForceTargetNumScalarRegs;
4607 } else {
4608 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
4609 TargetNumRegisters = ForceTargetNumVectorRegs;
4610 }
4611 unsigned MaxLocalUsers = Pair.second;
4612 unsigned LoopInvariantRegs = 0;
4613 if (R.LoopInvariantRegs.contains(Pair.first))
4614 LoopInvariantRegs = R.LoopInvariantRegs[Pair.first];
4615
4616 unsigned TmpIC = llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs) /
4617 MaxLocalUsers);
4618 // Don't count the induction variable as interleaved.
4620 TmpIC = llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs - 1) /
4621 std::max(1U, (MaxLocalUsers - 1)));
4622 }
4623
4624 IC = std::min(IC, TmpIC);
4625 }
4626
4627 // Clamp the interleave ranges to reasonable counts.
4628 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF);
4629
4630 // Check if the user has overridden the max.
4631 if (VF.isScalar()) {
4632 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
4633 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
4634 } else {
4635 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
4636 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
4637 }
4638
4639 // Try to get the exact trip count, or an estimate based on profiling data or
4640 // ConstantMax from PSE, failing that.
4641 auto BestKnownTC = getSmallBestKnownTC(PSE, OrigLoop);
4642
4643 // For fixed length VFs treat a scalable trip count as unknown.
4644 if (BestKnownTC && (BestKnownTC->isFixed() || VF.isScalable())) {
4645 // Re-evaluate trip counts and VFs to be in the same numerical space.
4646 unsigned AvailableTC =
4647 estimateElementCount(*BestKnownTC, CM.getVScaleForTuning());
4648 unsigned EstimatedVF = estimateElementCount(VF, CM.getVScaleForTuning());
4649
4650 // At least one iteration must be scalar when this constraint holds. So the
4651 // maximum available iterations for interleaving is one less.
4652 if (CM.requiresScalarEpilogue(VF.isVector()))
4653 --AvailableTC;
4654
4655 unsigned InterleaveCountLB = bit_floor(std::max(
4656 1u, std::min(AvailableTC / (EstimatedVF * 2), MaxInterleaveCount)));
4657
4658 if (getSmallConstantTripCount(PSE.getSE(), OrigLoop).isNonZero()) {
4659 // If the best known trip count is exact, we select between two
4660 // prospective ICs, where
4661 //
4662 // 1) the aggressive IC is capped by the trip count divided by VF
4663 // 2) the conservative IC is capped by the trip count divided by (VF * 2)
4664 //
4665 // The final IC is selected in a way that the epilogue loop trip count is
4666 // minimized while maximizing the IC itself, so that we either run the
4667 // vector loop at least once if it generates a small epilogue loop, or
4668 // else we run the vector loop at least twice.
4669
4670 unsigned InterleaveCountUB = bit_floor(std::max(
4671 1u, std::min(AvailableTC / EstimatedVF, MaxInterleaveCount)));
4672 MaxInterleaveCount = InterleaveCountLB;
4673
4674 if (InterleaveCountUB != InterleaveCountLB) {
4675 unsigned TailTripCountUB =
4676 (AvailableTC % (EstimatedVF * InterleaveCountUB));
4677 unsigned TailTripCountLB =
4678 (AvailableTC % (EstimatedVF * InterleaveCountLB));
4679 // If both produce same scalar tail, maximize the IC to do the same work
4680 // in fewer vector loop iterations
4681 if (TailTripCountUB == TailTripCountLB)
4682 MaxInterleaveCount = InterleaveCountUB;
4683 }
4684 } else {
4685 // If trip count is an estimated compile time constant, limit the
4686 // IC to be capped by the trip count divided by VF * 2, such that the
4687 // vector loop runs at least twice to make interleaving seem profitable
4688 // when there is an epilogue loop present. Since exact Trip count is not
4689 // known we choose to be conservative in our IC estimate.
4690 MaxInterleaveCount = InterleaveCountLB;
4691 }
4692 }
4693
4694 assert(MaxInterleaveCount > 0 &&
4695 "Maximum interleave count must be greater than 0");
4696
4697 // Clamp the calculated IC to be between the 1 and the max interleave count
4698 // that the target and trip count allows.
4699 if (IC > MaxInterleaveCount)
4700 IC = MaxInterleaveCount;
4701 else
4702 // Make sure IC is greater than 0.
4703 IC = std::max(1u, IC);
4704
4705 assert(IC > 0 && "Interleave count must be greater than 0.");
4706
4707 // Interleave if we vectorized this loop and there is a reduction that could
4708 // benefit from interleaving.
4709 if (VF.isVector() && HasReductions) {
4710 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
4711 return IC;
4712 }
4713
4714 // For any scalar loop that either requires runtime checks or predication we
4715 // are better off leaving this to the unroller. Note that if we've already
4716 // vectorized the loop we will have done the runtime check and so interleaving
4717 // won't require further checks.
4718 bool ScalarInterleavingRequiresPredication =
4719 (VF.isScalar() && any_of(OrigLoop->blocks(), [this](BasicBlock *BB) {
4720 return Legal->blockNeedsPredication(BB);
4721 }));
4722 bool ScalarInterleavingRequiresRuntimePointerCheck =
4723 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
4724
4725 // We want to interleave small loops in order to reduce the loop overhead and
4726 // potentially expose ILP opportunities.
4727 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
4728 << "LV: IC is " << IC << '\n'
4729 << "LV: VF is " << VF << '\n');
4730 const bool AggressivelyInterleaveReductions =
4731 TTI.enableAggressiveInterleaving(HasReductions);
4732 if (!ScalarInterleavingRequiresRuntimePointerCheck &&
4733 !ScalarInterleavingRequiresPredication && LoopCost < SmallLoopCost) {
4734 // We assume that the cost overhead is 1 and we use the cost model
4735 // to estimate the cost of the loop and interleave until the cost of the
4736 // loop overhead is about 5% of the cost of the loop.
4737 unsigned SmallIC = std::min(IC, (unsigned)llvm::bit_floor<uint64_t>(
4738 SmallLoopCost / LoopCost.getValue()));
4739
4740 // Interleave until store/load ports (estimated by max interleave count) are
4741 // saturated.
4742 unsigned NumStores = 0;
4743 unsigned NumLoads = 0;
4746 for (VPRecipeBase &R : *VPBB) {
4748 NumLoads++;
4749 continue;
4750 }
4752 NumStores++;
4753 continue;
4754 }
4755
4756 if (auto *InterleaveR = dyn_cast<VPInterleaveRecipe>(&R)) {
4757 if (unsigned StoreOps = InterleaveR->getNumStoreOperands())
4758 NumStores += StoreOps;
4759 else
4760 NumLoads += InterleaveR->getNumDefinedValues();
4761 continue;
4762 }
4763 if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R)) {
4764 NumLoads += isa<LoadInst>(RepR->getUnderlyingInstr());
4765 NumStores += isa<StoreInst>(RepR->getUnderlyingInstr());
4766 continue;
4767 }
4768 if (isa<VPHistogramRecipe>(&R)) {
4769 NumLoads++;
4770 NumStores++;
4771 continue;
4772 }
4773 }
4774 }
4775 unsigned StoresIC = IC / (NumStores ? NumStores : 1);
4776 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
4777
4778 // There is little point in interleaving for reductions containing selects
4779 // and compares when VF=1 since it may just create more overhead than it's
4780 // worth for loops with small trip counts. This is because we still have to
4781 // do the final reduction after the loop.
4782 bool HasSelectCmpReductions =
4783 HasReductions &&
4785 [](VPRecipeBase &R) {
4786 auto *RedR = dyn_cast<VPReductionPHIRecipe>(&R);
4787 return RedR && (RecurrenceDescriptor::isAnyOfRecurrenceKind(
4788 RedR->getRecurrenceKind()) ||
4789 RecurrenceDescriptor::isFindIVRecurrenceKind(
4790 RedR->getRecurrenceKind()));
4791 });
4792 if (HasSelectCmpReductions) {
4793 LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n");
4794 return 1;
4795 }
4796
4797 // If we have a scalar reduction (vector reductions are already dealt with
4798 // by this point), we can increase the critical path length if the loop
4799 // we're interleaving is inside another loop. For tree-wise reductions
4800 // set the limit to 2, and for ordered reductions it's best to disable
4801 // interleaving entirely.
4802 if (HasReductions && OrigLoop->getLoopDepth() > 1) {
4803 bool HasOrderedReductions =
4805 [](VPRecipeBase &R) {
4806 auto *RedR = dyn_cast<VPReductionPHIRecipe>(&R);
4807
4808 return RedR && RedR->isOrdered();
4809 });
4810 if (HasOrderedReductions) {
4811 LLVM_DEBUG(
4812 dbgs() << "LV: Not interleaving scalar ordered reductions.\n");
4813 return 1;
4814 }
4815
4816 unsigned F = MaxNestedScalarReductionIC;
4817 SmallIC = std::min(SmallIC, F);
4818 StoresIC = std::min(StoresIC, F);
4819 LoadsIC = std::min(LoadsIC, F);
4820 }
4821
4823 std::max(StoresIC, LoadsIC) > SmallIC) {
4824 LLVM_DEBUG(
4825 dbgs() << "LV: Interleaving to saturate store or load ports.\n");
4826 return std::max(StoresIC, LoadsIC);
4827 }
4828
4829 // If there are scalar reductions and TTI has enabled aggressive
4830 // interleaving for reductions, we will interleave to expose ILP.
4831 if (VF.isScalar() && AggressivelyInterleaveReductions) {
4832 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
4833 // Interleave no less than SmallIC but not as aggressive as the normal IC
4834 // to satisfy the rare situation when resources are too limited.
4835 return std::max(IC / 2, SmallIC);
4836 }
4837
4838 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
4839 return SmallIC;
4840 }
4841
4842 // Interleave if this is a large loop (small loops are already dealt with by
4843 // this point) that could benefit from interleaving.
4844 if (AggressivelyInterleaveReductions) {
4845 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
4846 return IC;
4847 }
4848
4849 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
4850 return 1;
4851}
4852
4853bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I,
4854 ElementCount VF) {
4855 // TODO: Cost model for emulated masked load/store is completely
4856 // broken. This hack guides the cost model to use an artificially
4857 // high enough value to practically disable vectorization with such
4858 // operations, except where previously deployed legality hack allowed
4859 // using very low cost values. This is to avoid regressions coming simply
4860 // from moving "masked load/store" check from legality to cost model.
4861 // Masked Load/Gather emulation was previously never allowed.
4862 // Limited number of Masked Store/Scatter emulation was allowed.
4864 "Expecting a scalar emulated instruction");
4865 return isa<LoadInst>(I) ||
4866 (isa<StoreInst>(I) &&
4867 NumPredStores > NumberOfStoresToPredicate);
4868}
4869
4871 assert(VF.isVector() && "Expected VF >= 2");
4872
4873 // If we've already collected the instructions to scalarize or the predicated
4874 // BBs after vectorization, there's nothing to do. Collection may already have
4875 // occurred if we have a user-selected VF and are now computing the expected
4876 // cost for interleaving.
4877 if (InstsToScalarize.contains(VF) ||
4878 PredicatedBBsAfterVectorization.contains(VF))
4879 return;
4880
4881 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
4882 // not profitable to scalarize any instructions, the presence of VF in the
4883 // map will indicate that we've analyzed it already.
4884 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
4885
4886 // Find all the instructions that are scalar with predication in the loop and
4887 // determine if it would be better to not if-convert the blocks they are in.
4888 // If so, we also record the instructions to scalarize.
4889 for (BasicBlock *BB : TheLoop->blocks()) {
4891 continue;
4892 for (Instruction &I : *BB)
4893 if (isScalarWithPredication(&I, VF)) {
4894 ScalarCostsTy ScalarCosts;
4895 // Do not apply discount logic for:
4896 // 1. Scalars after vectorization, as there will only be a single copy
4897 // of the instruction.
4898 // 2. Scalable VF, as that would lead to invalid scalarization costs.
4899 // 3. Emulated masked memrefs, if a hacked cost is needed.
4900 if (!isScalarAfterVectorization(&I, VF) && !VF.isScalable() &&
4901 !useEmulatedMaskMemRefHack(&I, VF) &&
4902 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) {
4903 for (const auto &[I, IC] : ScalarCosts)
4904 ScalarCostsVF.insert({I, IC});
4905 // Check if we decided to scalarize a call. If so, update the widening
4906 // decision of the call to CM_Scalarize with the computed scalar cost.
4907 for (const auto &[I, Cost] : ScalarCosts) {
4908 auto *CI = dyn_cast<CallInst>(I);
4909 if (!CI || !CallWideningDecisions.contains({CI, VF}))
4910 continue;
4911 CallWideningDecisions[{CI, VF}].Kind = CM_Scalarize;
4912 CallWideningDecisions[{CI, VF}].Cost = Cost;
4913 }
4914 }
4915 // Remember that BB will remain after vectorization.
4916 PredicatedBBsAfterVectorization[VF].insert(BB);
4917 for (auto *Pred : predecessors(BB)) {
4918 if (Pred->getSingleSuccessor() == BB)
4919 PredicatedBBsAfterVectorization[VF].insert(Pred);
4920 }
4921 }
4922 }
4923}
4924
4925InstructionCost LoopVectorizationCostModel::computePredInstDiscount(
4926 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
4927 assert(!isUniformAfterVectorization(PredInst, VF) &&
4928 "Instruction marked uniform-after-vectorization will be predicated");
4929
4930 // Initialize the discount to zero, meaning that the scalar version and the
4931 // vector version cost the same.
4932 InstructionCost Discount = 0;
4933
4934 // Holds instructions to analyze. The instructions we visit are mapped in
4935 // ScalarCosts. Those instructions are the ones that would be scalarized if
4936 // we find that the scalar version costs less.
4938
4939 // Returns true if the given instruction can be scalarized.
4940 auto CanBeScalarized = [&](Instruction *I) -> bool {
4941 // We only attempt to scalarize instructions forming a single-use chain
4942 // from the original predicated block that would otherwise be vectorized.
4943 // Although not strictly necessary, we give up on instructions we know will
4944 // already be scalar to avoid traversing chains that are unlikely to be
4945 // beneficial.
4946 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
4948 return false;
4949
4950 // If the instruction is scalar with predication, it will be analyzed
4951 // separately. We ignore it within the context of PredInst.
4952 if (isScalarWithPredication(I, VF))
4953 return false;
4954
4955 // If any of the instruction's operands are uniform after vectorization,
4956 // the instruction cannot be scalarized. This prevents, for example, a
4957 // masked load from being scalarized.
4958 //
4959 // We assume we will only emit a value for lane zero of an instruction
4960 // marked uniform after vectorization, rather than VF identical values.
4961 // Thus, if we scalarize an instruction that uses a uniform, we would
4962 // create uses of values corresponding to the lanes we aren't emitting code
4963 // for. This behavior can be changed by allowing getScalarValue to clone
4964 // the lane zero values for uniforms rather than asserting.
4965 for (Use &U : I->operands())
4966 if (auto *J = dyn_cast<Instruction>(U.get()))
4967 if (isUniformAfterVectorization(J, VF))
4968 return false;
4969
4970 // Otherwise, we can scalarize the instruction.
4971 return true;
4972 };
4973
4974 // Compute the expected cost discount from scalarizing the entire expression
4975 // feeding the predicated instruction. We currently only consider expressions
4976 // that are single-use instruction chains.
4977 Worklist.push_back(PredInst);
4978 while (!Worklist.empty()) {
4979 Instruction *I = Worklist.pop_back_val();
4980
4981 // If we've already analyzed the instruction, there's nothing to do.
4982 if (ScalarCosts.contains(I))
4983 continue;
4984
4985 // Cannot scalarize fixed-order recurrence phis at the moment.
4986 if (isa<PHINode>(I) && Legal->isFixedOrderRecurrence(cast<PHINode>(I)))
4987 continue;
4988
4989 // Compute the cost of the vector instruction. Note that this cost already
4990 // includes the scalarization overhead of the predicated instruction.
4991 InstructionCost VectorCost = getInstructionCost(I, VF);
4992
4993 // Compute the cost of the scalarized instruction. This cost is the cost of
4994 // the instruction as if it wasn't if-converted and instead remained in the
4995 // predicated block. We will scale this cost by block probability after
4996 // computing the scalarization overhead.
4997 InstructionCost ScalarCost =
4999
5000 // Compute the scalarization overhead of needed insertelement instructions
5001 // and phi nodes.
5002 if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) {
5003 Type *WideTy = toVectorizedTy(I->getType(), VF);
5004 for (Type *VectorTy : getContainedTypes(WideTy)) {
5005 ScalarCost += TTI.getScalarizationOverhead(
5007 /*Insert=*/true,
5008 /*Extract=*/false, CostKind);
5009 }
5010 ScalarCost +=
5011 VF.getFixedValue() * TTI.getCFInstrCost(Instruction::PHI, CostKind);
5012 }
5013
5014 // Compute the scalarization overhead of needed extractelement
5015 // instructions. For each of the instruction's operands, if the operand can
5016 // be scalarized, add it to the worklist; otherwise, account for the
5017 // overhead.
5018 for (Use &U : I->operands())
5019 if (auto *J = dyn_cast<Instruction>(U.get())) {
5020 assert(canVectorizeTy(J->getType()) &&
5021 "Instruction has non-scalar type");
5022 if (CanBeScalarized(J))
5023 Worklist.push_back(J);
5024 else if (needsExtract(J, VF)) {
5025 Type *WideTy = toVectorizedTy(J->getType(), VF);
5026 for (Type *VectorTy : getContainedTypes(WideTy)) {
5027 ScalarCost += TTI.getScalarizationOverhead(
5028 cast<VectorType>(VectorTy),
5029 APInt::getAllOnes(VF.getFixedValue()), /*Insert*/ false,
5030 /*Extract*/ true, CostKind);
5031 }
5032 }
5033 }
5034
5035 // Scale the total scalar cost by block probability.
5036 ScalarCost /= getPredBlockCostDivisor(CostKind);
5037
5038 // Compute the discount. A non-negative discount means the vector version
5039 // of the instruction costs more, and scalarizing would be beneficial.
5040 Discount += VectorCost - ScalarCost;
5041 ScalarCosts[I] = ScalarCost;
5042 }
5043
5044 return Discount;
5045}
5046
5049
5050 // If the vector loop gets executed exactly once with the given VF, ignore the
5051 // costs of comparison and induction instructions, as they'll get simplified
5052 // away.
5053 SmallPtrSet<Instruction *, 2> ValuesToIgnoreForVF;
5054 auto TC = getSmallConstantTripCount(PSE.getSE(), TheLoop);
5055 if (TC == VF && !foldTailByMasking())
5057 ValuesToIgnoreForVF);
5058
5059 // For each block.
5060 for (BasicBlock *BB : TheLoop->blocks()) {
5061 InstructionCost BlockCost;
5062
5063 // For each instruction in the old loop.
5064 for (Instruction &I : BB->instructionsWithoutDebug()) {
5065 // Skip ignored values.
5066 if (ValuesToIgnore.count(&I) || ValuesToIgnoreForVF.count(&I) ||
5067 (VF.isVector() && VecValuesToIgnore.count(&I)))
5068 continue;
5069
5071
5072 // Check if we should override the cost.
5073 if (C.isValid() && ForceTargetInstructionCost.getNumOccurrences() > 0)
5075
5076 BlockCost += C;
5077 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C << " for VF "
5078 << VF << " For instruction: " << I << '\n');
5079 }
5080
5081 // If we are vectorizing a predicated block, it will have been
5082 // if-converted. This means that the block's instructions (aside from
5083 // stores and instructions that may divide by zero) will now be
5084 // unconditionally executed. For the scalar case, we may not always execute
5085 // the predicated block, if it is an if-else block. Thus, scale the block's
5086 // cost by the probability of executing it. blockNeedsPredication from
5087 // Legal is used so as to not include all blocks in tail folded loops.
5088 if (VF.isScalar() && Legal->blockNeedsPredication(BB))
5089 BlockCost /= getPredBlockCostDivisor(CostKind);
5090
5091 Cost += BlockCost;
5092 }
5093
5094 return Cost;
5095}
5096
5097/// Gets Address Access SCEV after verifying that the access pattern
5098/// is loop invariant except the induction variable dependence.
5099///
5100/// This SCEV can be sent to the Target in order to estimate the address
5101/// calculation cost.
5103 Value *Ptr,
5106 const Loop *TheLoop) {
5107
5108 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
5109 if (!Gep)
5110 return nullptr;
5111
5112 // We are looking for a gep with all loop invariant indices except for one
5113 // which should be an induction variable.
5114 auto *SE = PSE.getSE();
5115 unsigned NumOperands = Gep->getNumOperands();
5116 for (unsigned Idx = 1; Idx < NumOperands; ++Idx) {
5117 Value *Opd = Gep->getOperand(Idx);
5118 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
5119 !Legal->isInductionVariable(Opd))
5120 return nullptr;
5121 }
5122
5123 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
5124 return PSE.getSCEV(Ptr);
5125}
5126
5128LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
5129 ElementCount VF) {
5130 assert(VF.isVector() &&
5131 "Scalarization cost of instruction implies vectorization.");
5132 if (VF.isScalable())
5134
5135 Type *ValTy = getLoadStoreType(I);
5136 auto *SE = PSE.getSE();
5137
5138 unsigned AS = getLoadStoreAddressSpace(I);
5140 Type *PtrTy = toVectorTy(Ptr->getType(), VF);
5141 // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost`
5142 // that it is being called from this specific place.
5143
5144 // Figure out whether the access is strided and get the stride value
5145 // if it's known in compile time
5146 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
5147
5148 // Get the cost of the scalar memory instruction and address computation.
5149 InstructionCost Cost = VF.getFixedValue() * TTI.getAddressComputationCost(
5150 PtrTy, SE, PtrSCEV, CostKind);
5151
5152 // Don't pass *I here, since it is scalar but will actually be part of a
5153 // vectorized loop where the user of it is a vectorized instruction.
5154 const Align Alignment = getLoadStoreAlignment(I);
5155 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
5156 Cost += VF.getFixedValue() *
5157 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
5158 AS, CostKind, OpInfo);
5159
5160 // Get the overhead of the extractelement and insertelement instructions
5161 // we might create due to scalarization.
5162 Cost += getScalarizationOverhead(I, VF);
5163
5164 // If we have a predicated load/store, it will need extra i1 extracts and
5165 // conditional branches, but may not be executed for each vector lane. Scale
5166 // the cost by the probability of executing the predicated block.
5167 if (isPredicatedInst(I)) {
5169
5170 // Add the cost of an i1 extract and a branch
5171 auto *VecI1Ty =
5173 Cost += TTI.getScalarizationOverhead(
5174 VecI1Ty, APInt::getAllOnes(VF.getFixedValue()),
5175 /*Insert=*/false, /*Extract=*/true, CostKind);
5176 Cost += TTI.getCFInstrCost(Instruction::Br, CostKind);
5177
5178 if (useEmulatedMaskMemRefHack(I, VF))
5179 // Artificially setting to a high enough value to practically disable
5180 // vectorization with such operations.
5181 Cost = 3000000;
5182 }
5183
5184 return Cost;
5185}
5186
5188LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
5189 ElementCount VF) {
5190 Type *ValTy = getLoadStoreType(I);
5191 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5193 unsigned AS = getLoadStoreAddressSpace(I);
5194 int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr);
5195
5196 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5197 "Stride should be 1 or -1 for consecutive memory access");
5198 const Align Alignment = getLoadStoreAlignment(I);
5200 if (Legal->isMaskRequired(I)) {
5201 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
5202 CostKind);
5203 } else {
5204 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
5205 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
5206 CostKind, OpInfo, I);
5207 }
5208
5209 bool Reverse = ConsecutiveStride < 0;
5210 if (Reverse)
5211 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy,
5212 VectorTy, {}, CostKind, 0);
5213 return Cost;
5214}
5215
5217LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
5218 ElementCount VF) {
5219 assert(Legal->isUniformMemOp(*I, VF));
5220
5221 Type *ValTy = getLoadStoreType(I);
5223 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5224 const Align Alignment = getLoadStoreAlignment(I);
5225 unsigned AS = getLoadStoreAddressSpace(I);
5226 if (isa<LoadInst>(I)) {
5227 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5228 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
5229 CostKind) +
5230 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy,
5231 VectorTy, {}, CostKind);
5232 }
5233 StoreInst *SI = cast<StoreInst>(I);
5234
5235 bool IsLoopInvariantStoreValue = Legal->isInvariant(SI->getValueOperand());
5236 // TODO: We have existing tests that request the cost of extracting element
5237 // VF.getKnownMinValue() - 1 from a scalable vector. This does not represent
5238 // the actual generated code, which involves extracting the last element of
5239 // a scalable vector where the lane to extract is unknown at compile time.
5241 TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5242 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, CostKind);
5243 if (!IsLoopInvariantStoreValue)
5244 Cost += TTI.getIndexedVectorInstrCostFromEnd(Instruction::ExtractElement,
5245 VectorTy, CostKind, 0);
5246 return Cost;
5247}
5248
5250LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
5251 ElementCount VF) {
5252 Type *ValTy = getLoadStoreType(I);
5253 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5254 const Align Alignment = getLoadStoreAlignment(I);
5256 Type *PtrTy = Ptr->getType();
5257
5258 if (!Legal->isUniform(Ptr, VF))
5259 PtrTy = toVectorTy(PtrTy, VF);
5260
5261 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5262 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr,
5263 Legal->isMaskRequired(I), Alignment,
5264 CostKind, I);
5265}
5266
5268LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
5269 ElementCount VF) {
5270 const auto *Group = getInterleavedAccessGroup(I);
5271 assert(Group && "Fail to get an interleaved access group.");
5272
5273 Instruction *InsertPos = Group->getInsertPos();
5274 Type *ValTy = getLoadStoreType(InsertPos);
5275 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5276 unsigned AS = getLoadStoreAddressSpace(InsertPos);
5277
5278 unsigned InterleaveFactor = Group->getFactor();
5279 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
5280
5281 // Holds the indices of existing members in the interleaved group.
5282 SmallVector<unsigned, 4> Indices;
5283 for (unsigned IF = 0; IF < InterleaveFactor; IF++)
5284 if (Group->getMember(IF))
5285 Indices.push_back(IF);
5286
5287 // Calculate the cost of the whole interleaved group.
5288 bool UseMaskForGaps =
5289 (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) ||
5290 (isa<StoreInst>(I) && !Group->isFull());
5291 InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
5292 InsertPos->getOpcode(), WideVecTy, Group->getFactor(), Indices,
5293 Group->getAlign(), AS, CostKind, Legal->isMaskRequired(I),
5294 UseMaskForGaps);
5295
5296 if (Group->isReverse()) {
5297 // TODO: Add support for reversed masked interleaved access.
5298 assert(!Legal->isMaskRequired(I) &&
5299 "Reverse masked interleaved access not supported.");
5300 Cost += Group->getNumMembers() *
5301 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy,
5302 VectorTy, {}, CostKind, 0);
5303 }
5304 return Cost;
5305}
5306
5307std::optional<InstructionCost>
5309 ElementCount VF,
5310 Type *Ty) const {
5311 using namespace llvm::PatternMatch;
5312 // Early exit for no inloop reductions
5313 if (InLoopReductions.empty() || VF.isScalar() || !isa<VectorType>(Ty))
5314 return std::nullopt;
5315 auto *VectorTy = cast<VectorType>(Ty);
5316
5317 // We are looking for a pattern of, and finding the minimal acceptable cost:
5318 // reduce(mul(ext(A), ext(B))) or
5319 // reduce(mul(A, B)) or
5320 // reduce(ext(A)) or
5321 // reduce(A).
5322 // The basic idea is that we walk down the tree to do that, finding the root
5323 // reduction instruction in InLoopReductionImmediateChains. From there we find
5324 // the pattern of mul/ext and test the cost of the entire pattern vs the cost
5325 // of the components. If the reduction cost is lower then we return it for the
5326 // reduction instruction and 0 for the other instructions in the pattern. If
5327 // it is not we return an invalid cost specifying the orignal cost method
5328 // should be used.
5329 Instruction *RetI = I;
5330 if (match(RetI, m_ZExtOrSExt(m_Value()))) {
5331 if (!RetI->hasOneUser())
5332 return std::nullopt;
5333 RetI = RetI->user_back();
5334 }
5335
5336 if (match(RetI, m_OneUse(m_Mul(m_Value(), m_Value()))) &&
5337 RetI->user_back()->getOpcode() == Instruction::Add) {
5338 RetI = RetI->user_back();
5339 }
5340
5341 // Test if the found instruction is a reduction, and if not return an invalid
5342 // cost specifying the parent to use the original cost modelling.
5343 Instruction *LastChain = InLoopReductionImmediateChains.lookup(RetI);
5344 if (!LastChain)
5345 return std::nullopt;
5346
5347 // Find the reduction this chain is a part of and calculate the basic cost of
5348 // the reduction on its own.
5349 Instruction *ReductionPhi = LastChain;
5350 while (!isa<PHINode>(ReductionPhi))
5351 ReductionPhi = InLoopReductionImmediateChains.at(ReductionPhi);
5352
5353 const RecurrenceDescriptor &RdxDesc =
5354 Legal->getRecurrenceDescriptor(cast<PHINode>(ReductionPhi));
5355
5356 InstructionCost BaseCost;
5357 RecurKind RK = RdxDesc.getRecurrenceKind();
5360 BaseCost = TTI.getMinMaxReductionCost(MinMaxID, VectorTy,
5361 RdxDesc.getFastMathFlags(), CostKind);
5362 } else {
5363 BaseCost = TTI.getArithmeticReductionCost(
5364 RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind);
5365 }
5366
5367 // For a call to the llvm.fmuladd intrinsic we need to add the cost of a
5368 // normal fmul instruction to the cost of the fadd reduction.
5369 if (RK == RecurKind::FMulAdd)
5370 BaseCost +=
5371 TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind);
5372
5373 // If we're using ordered reductions then we can just return the base cost
5374 // here, since getArithmeticReductionCost calculates the full ordered
5375 // reduction cost when FP reassociation is not allowed.
5376 if (useOrderedReductions(RdxDesc))
5377 return BaseCost;
5378
5379 // Get the operand that was not the reduction chain and match it to one of the
5380 // patterns, returning the better cost if it is found.
5381 Instruction *RedOp = RetI->getOperand(1) == LastChain
5384
5385 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
5386
5387 Instruction *Op0, *Op1;
5388 if (RedOp && RdxDesc.getOpcode() == Instruction::Add &&
5389 match(RedOp,
5391 match(Op0, m_ZExtOrSExt(m_Value())) &&
5392 Op0->getOpcode() == Op1->getOpcode() &&
5393 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
5394 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) &&
5395 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) {
5396
5397 // Matched reduce.add(ext(mul(ext(A), ext(B)))
5398 // Note that the extend opcodes need to all match, or if A==B they will have
5399 // been converted to zext(mul(sext(A), sext(A))) as it is known positive,
5400 // which is equally fine.
5401 bool IsUnsigned = isa<ZExtInst>(Op0);
5402 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
5403 auto *MulType = VectorType::get(Op0->getType(), VectorTy);
5404
5405 InstructionCost ExtCost =
5406 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType,
5408 InstructionCost MulCost =
5409 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind);
5410 InstructionCost Ext2Cost =
5411 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType,
5413
5414 InstructionCost RedCost = TTI.getMulAccReductionCost(
5415 IsUnsigned, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), ExtType,
5416 CostKind);
5417
5418 if (RedCost.isValid() &&
5419 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost)
5420 return I == RetI ? RedCost : 0;
5421 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) &&
5422 !TheLoop->isLoopInvariant(RedOp)) {
5423 // Matched reduce(ext(A))
5424 bool IsUnsigned = isa<ZExtInst>(RedOp);
5425 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
5426 InstructionCost RedCost = TTI.getExtendedReductionCost(
5427 RdxDesc.getOpcode(), IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
5428 RdxDesc.getFastMathFlags(), CostKind);
5429
5430 InstructionCost ExtCost =
5431 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
5433 if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
5434 return I == RetI ? RedCost : 0;
5435 } else if (RedOp && RdxDesc.getOpcode() == Instruction::Add &&
5436 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) {
5437 if (match(Op0, m_ZExtOrSExt(m_Value())) &&
5438 Op0->getOpcode() == Op1->getOpcode() &&
5439 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
5440 bool IsUnsigned = isa<ZExtInst>(Op0);
5441 Type *Op0Ty = Op0->getOperand(0)->getType();
5442 Type *Op1Ty = Op1->getOperand(0)->getType();
5443 Type *LargestOpTy =
5444 Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty
5445 : Op0Ty;
5446 auto *ExtType = VectorType::get(LargestOpTy, VectorTy);
5447
5448 // Matched reduce.add(mul(ext(A), ext(B))), where the two ext may be of
5449 // different sizes. We take the largest type as the ext to reduce, and add
5450 // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))).
5451 InstructionCost ExtCost0 = TTI.getCastInstrCost(
5452 Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy),
5454 InstructionCost ExtCost1 = TTI.getCastInstrCost(
5455 Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy),
5457 InstructionCost MulCost =
5458 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
5459
5460 InstructionCost RedCost = TTI.getMulAccReductionCost(
5461 IsUnsigned, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), ExtType,
5462 CostKind);
5463 InstructionCost ExtraExtCost = 0;
5464 if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) {
5465 Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1;
5466 ExtraExtCost = TTI.getCastInstrCost(
5467 ExtraExtOp->getOpcode(), ExtType,
5468 VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy),
5470 }
5471
5472 if (RedCost.isValid() &&
5473 (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost))
5474 return I == RetI ? RedCost : 0;
5475 } else if (!match(I, m_ZExtOrSExt(m_Value()))) {
5476 // Matched reduce.add(mul())
5477 InstructionCost MulCost =
5478 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
5479
5480 InstructionCost RedCost = TTI.getMulAccReductionCost(
5481 true, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), VectorTy,
5482 CostKind);
5483
5484 if (RedCost.isValid() && RedCost < MulCost + BaseCost)
5485 return I == RetI ? RedCost : 0;
5486 }
5487 }
5488
5489 return I == RetI ? std::optional<InstructionCost>(BaseCost) : std::nullopt;
5490}
5491
5493LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
5494 ElementCount VF) {
5495 // Calculate scalar cost only. Vectorization cost should be ready at this
5496 // moment.
5497 if (VF.isScalar()) {
5498 Type *ValTy = getLoadStoreType(I);
5500 const Align Alignment = getLoadStoreAlignment(I);
5501 unsigned AS = getLoadStoreAddressSpace(I);
5502
5503 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
5504 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5505 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, CostKind,
5506 OpInfo, I);
5507 }
5508 return getWideningCost(I, VF);
5509}
5510
5512LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
5513 ElementCount VF) const {
5514
5515 // There is no mechanism yet to create a scalable scalarization loop,
5516 // so this is currently Invalid.
5517 if (VF.isScalable())
5519
5520 if (VF.isScalar())
5521 return 0;
5522
5524 Type *RetTy = toVectorizedTy(I->getType(), VF);
5525 if (!RetTy->isVoidTy() &&
5526 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) {
5527
5528 for (Type *VectorTy : getContainedTypes(RetTy)) {
5529 Cost += TTI.getScalarizationOverhead(
5531 /*Insert=*/true,
5532 /*Extract=*/false, CostKind);
5533 }
5534 }
5535
5536 // Some targets keep addresses scalar.
5537 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
5538 return Cost;
5539
5540 // Some targets support efficient element stores.
5541 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
5542 return Cost;
5543
5544 // Collect operands to consider.
5545 CallInst *CI = dyn_cast<CallInst>(I);
5546 Instruction::op_range Ops = CI ? CI->args() : I->operands();
5547
5548 // Skip operands that do not require extraction/scalarization and do not incur
5549 // any overhead.
5551 for (auto *V : filterExtractingOperands(Ops, VF))
5552 Tys.push_back(maybeVectorizeType(V->getType(), VF));
5553 return Cost + TTI.getOperandsScalarizationOverhead(Tys, CostKind);
5554}
5555
5557 if (VF.isScalar())
5558 return;
5559 NumPredStores = 0;
5560 for (BasicBlock *BB : TheLoop->blocks()) {
5561 // For each instruction in the old loop.
5562 for (Instruction &I : *BB) {
5564 if (!Ptr)
5565 continue;
5566
5567 // TODO: We should generate better code and update the cost model for
5568 // predicated uniform stores. Today they are treated as any other
5569 // predicated store (see added test cases in
5570 // invariant-store-vectorization.ll).
5572 NumPredStores++;
5573
5574 if (Legal->isUniformMemOp(I, VF)) {
5575 auto IsLegalToScalarize = [&]() {
5576 if (!VF.isScalable())
5577 // Scalarization of fixed length vectors "just works".
5578 return true;
5579
5580 // We have dedicated lowering for unpredicated uniform loads and
5581 // stores. Note that even with tail folding we know that at least
5582 // one lane is active (i.e. generalized predication is not possible
5583 // here), and the logic below depends on this fact.
5584 if (!foldTailByMasking())
5585 return true;
5586
5587 // For scalable vectors, a uniform memop load is always
5588 // uniform-by-parts and we know how to scalarize that.
5589 if (isa<LoadInst>(I))
5590 return true;
5591
5592 // A uniform store isn't neccessarily uniform-by-part
5593 // and we can't assume scalarization.
5594 auto &SI = cast<StoreInst>(I);
5595 return TheLoop->isLoopInvariant(SI.getValueOperand());
5596 };
5597
5598 const InstructionCost GatherScatterCost =
5600 getGatherScatterCost(&I, VF) : InstructionCost::getInvalid();
5601
5602 // Load: Scalar load + broadcast
5603 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
5604 // FIXME: This cost is a significant under-estimate for tail folded
5605 // memory ops.
5606 const InstructionCost ScalarizationCost =
5607 IsLegalToScalarize() ? getUniformMemOpCost(&I, VF)
5609
5610 // Choose better solution for the current VF, Note that Invalid
5611 // costs compare as maximumal large. If both are invalid, we get
5612 // scalable invalid which signals a failure and a vectorization abort.
5613 if (GatherScatterCost < ScalarizationCost)
5614 setWideningDecision(&I, VF, CM_GatherScatter, GatherScatterCost);
5615 else
5616 setWideningDecision(&I, VF, CM_Scalarize, ScalarizationCost);
5617 continue;
5618 }
5619
5620 // We assume that widening is the best solution when possible.
5621 if (memoryInstructionCanBeWidened(&I, VF)) {
5622 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
5623 int ConsecutiveStride = Legal->isConsecutivePtr(
5625 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5626 "Expected consecutive stride.");
5627 InstWidening Decision =
5628 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
5629 setWideningDecision(&I, VF, Decision, Cost);
5630 continue;
5631 }
5632
5633 // Choose between Interleaving, Gather/Scatter or Scalarization.
5635 unsigned NumAccesses = 1;
5636 if (isAccessInterleaved(&I)) {
5637 const auto *Group = getInterleavedAccessGroup(&I);
5638 assert(Group && "Fail to get an interleaved access group.");
5639
5640 // Make one decision for the whole group.
5641 if (getWideningDecision(&I, VF) != CM_Unknown)
5642 continue;
5643
5644 NumAccesses = Group->getNumMembers();
5646 InterleaveCost = getInterleaveGroupCost(&I, VF);
5647 }
5648
5649 InstructionCost GatherScatterCost =
5651 ? getGatherScatterCost(&I, VF) * NumAccesses
5653
5654 InstructionCost ScalarizationCost =
5655 getMemInstScalarizationCost(&I, VF) * NumAccesses;
5656
5657 // Choose better solution for the current VF,
5658 // write down this decision and use it during vectorization.
5660 InstWidening Decision;
5661 if (InterleaveCost <= GatherScatterCost &&
5662 InterleaveCost < ScalarizationCost) {
5663 Decision = CM_Interleave;
5664 Cost = InterleaveCost;
5665 } else if (GatherScatterCost < ScalarizationCost) {
5666 Decision = CM_GatherScatter;
5667 Cost = GatherScatterCost;
5668 } else {
5669 Decision = CM_Scalarize;
5670 Cost = ScalarizationCost;
5671 }
5672 // If the instructions belongs to an interleave group, the whole group
5673 // receives the same decision. The whole group receives the cost, but
5674 // the cost will actually be assigned to one instruction.
5675 if (const auto *Group = getInterleavedAccessGroup(&I))
5676 setWideningDecision(Group, VF, Decision, Cost);
5677 else
5678 setWideningDecision(&I, VF, Decision, Cost);
5679 }
5680 }
5681
5682 // Make sure that any load of address and any other address computation
5683 // remains scalar unless there is gather/scatter support. This avoids
5684 // inevitable extracts into address registers, and also has the benefit of
5685 // activating LSR more, since that pass can't optimize vectorized
5686 // addresses.
5687 if (TTI.prefersVectorizedAddressing())
5688 return;
5689
5690 // Start with all scalar pointer uses.
5692 for (BasicBlock *BB : TheLoop->blocks())
5693 for (Instruction &I : *BB) {
5694 Instruction *PtrDef =
5696 if (PtrDef && TheLoop->contains(PtrDef) &&
5698 AddrDefs.insert(PtrDef);
5699 }
5700
5701 // Add all instructions used to generate the addresses.
5703 append_range(Worklist, AddrDefs);
5704 while (!Worklist.empty()) {
5705 Instruction *I = Worklist.pop_back_val();
5706 for (auto &Op : I->operands())
5707 if (auto *InstOp = dyn_cast<Instruction>(Op))
5708 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
5709 AddrDefs.insert(InstOp).second)
5710 Worklist.push_back(InstOp);
5711 }
5712
5713 for (auto *I : AddrDefs) {
5714 if (isa<LoadInst>(I)) {
5715 // Setting the desired widening decision should ideally be handled in
5716 // by cost functions, but since this involves the task of finding out
5717 // if the loaded register is involved in an address computation, it is
5718 // instead changed here when we know this is the case.
5719 InstWidening Decision = getWideningDecision(I, VF);
5720 if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
5721 // Scalarize a widened load of address.
5723 I, VF, CM_Scalarize,
5724 (VF.getKnownMinValue() *
5725 getMemoryInstructionCost(I, ElementCount::getFixed(1))));
5726 else if (const auto *Group = getInterleavedAccessGroup(I)) {
5727 // Scalarize an interleave group of address loads.
5728 for (unsigned I = 0; I < Group->getFactor(); ++I) {
5729 if (Instruction *Member = Group->getMember(I))
5731 Member, VF, CM_Scalarize,
5732 (VF.getKnownMinValue() *
5733 getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
5734 }
5735 }
5736 } else {
5737 // Cannot scalarize fixed-order recurrence phis at the moment.
5738 if (isa<PHINode>(I) && Legal->isFixedOrderRecurrence(cast<PHINode>(I)))
5739 continue;
5740
5741 // Make sure I gets scalarized and a cost estimate without
5742 // scalarization overhead.
5743 ForcedScalars[VF].insert(I);
5744 }
5745 }
5746}
5747
5749 assert(!VF.isScalar() &&
5750 "Trying to set a vectorization decision for a scalar VF");
5751
5752 auto ForcedScalar = ForcedScalars.find(VF);
5753 for (BasicBlock *BB : TheLoop->blocks()) {
5754 // For each instruction in the old loop.
5755 for (Instruction &I : *BB) {
5757
5758 if (!CI)
5759 continue;
5760
5764 Function *ScalarFunc = CI->getCalledFunction();
5765 Type *ScalarRetTy = CI->getType();
5766 SmallVector<Type *, 4> Tys, ScalarTys;
5767 for (auto &ArgOp : CI->args())
5768 ScalarTys.push_back(ArgOp->getType());
5769
5770 // Estimate cost of scalarized vector call. The source operands are
5771 // assumed to be vectors, so we need to extract individual elements from
5772 // there, execute VF scalar calls, and then gather the result into the
5773 // vector return value.
5774 if (VF.isFixed()) {
5775 InstructionCost ScalarCallCost =
5776 TTI.getCallInstrCost(ScalarFunc, ScalarRetTy, ScalarTys, CostKind);
5777
5778 // Compute costs of unpacking argument values for the scalar calls and
5779 // packing the return values to a vector.
5780 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
5781 ScalarCost = ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
5782 } else {
5783 // There is no point attempting to calculate the scalar cost for a
5784 // scalable VF as we know it will be Invalid.
5785 assert(!getScalarizationOverhead(CI, VF).isValid() &&
5786 "Unexpected valid cost for scalarizing scalable vectors");
5787 ScalarCost = InstructionCost::getInvalid();
5788 }
5789
5790 // Honor ForcedScalars and UniformAfterVectorization decisions.
5791 // TODO: For calls, it might still be more profitable to widen. Use
5792 // VPlan-based cost model to compare different options.
5793 if (VF.isVector() && ((ForcedScalar != ForcedScalars.end() &&
5794 ForcedScalar->second.contains(CI)) ||
5795 isUniformAfterVectorization(CI, VF))) {
5796 setCallWideningDecision(CI, VF, CM_Scalarize, nullptr,
5797 Intrinsic::not_intrinsic, std::nullopt,
5798 ScalarCost);
5799 continue;
5800 }
5801
5802 bool MaskRequired = Legal->isMaskRequired(CI);
5803 // Compute corresponding vector type for return value and arguments.
5804 Type *RetTy = toVectorizedTy(ScalarRetTy, VF);
5805 for (Type *ScalarTy : ScalarTys)
5806 Tys.push_back(toVectorizedTy(ScalarTy, VF));
5807
5808 // An in-loop reduction using an fmuladd intrinsic is a special case;
5809 // we don't want the normal cost for that intrinsic.
5811 if (auto RedCost = getReductionPatternCost(CI, VF, RetTy)) {
5814 std::nullopt, *RedCost);
5815 continue;
5816 }
5817
5818 // Find the cost of vectorizing the call, if we can find a suitable
5819 // vector variant of the function.
5820 VFInfo FuncInfo;
5821 Function *VecFunc = nullptr;
5822 // Search through any available variants for one we can use at this VF.
5823 for (VFInfo &Info : VFDatabase::getMappings(*CI)) {
5824 // Must match requested VF.
5825 if (Info.Shape.VF != VF)
5826 continue;
5827
5828 // Must take a mask argument if one is required
5829 if (MaskRequired && !Info.isMasked())
5830 continue;
5831
5832 // Check that all parameter kinds are supported
5833 bool ParamsOk = true;
5834 for (VFParameter Param : Info.Shape.Parameters) {
5835 switch (Param.ParamKind) {
5837 break;
5839 Value *ScalarParam = CI->getArgOperand(Param.ParamPos);
5840 // Make sure the scalar parameter in the loop is invariant.
5841 if (!PSE.getSE()->isLoopInvariant(PSE.getSCEV(ScalarParam),
5842 TheLoop))
5843 ParamsOk = false;
5844 break;
5845 }
5847 Value *ScalarParam = CI->getArgOperand(Param.ParamPos);
5848 // Find the stride for the scalar parameter in this loop and see if
5849 // it matches the stride for the variant.
5850 // TODO: do we need to figure out the cost of an extract to get the
5851 // first lane? Or do we hope that it will be folded away?
5852 ScalarEvolution *SE = PSE.getSE();
5853 if (!match(SE->getSCEV(ScalarParam),
5855 m_SCEV(), m_scev_SpecificSInt(Param.LinearStepOrPos),
5857 ParamsOk = false;
5858 break;
5859 }
5861 break;
5862 default:
5863 ParamsOk = false;
5864 break;
5865 }
5866 }
5867
5868 if (!ParamsOk)
5869 continue;
5870
5871 // Found a suitable candidate, stop here.
5872 VecFunc = CI->getModule()->getFunction(Info.VectorName);
5873 FuncInfo = Info;
5874 break;
5875 }
5876
5877 if (TLI && VecFunc && !CI->isNoBuiltin())
5878 VectorCost = TTI.getCallInstrCost(nullptr, RetTy, Tys, CostKind);
5879
5880 // Find the cost of an intrinsic; some targets may have instructions that
5881 // perform the operation without needing an actual call.
5883 if (IID != Intrinsic::not_intrinsic)
5885
5886 InstructionCost Cost = ScalarCost;
5887 InstWidening Decision = CM_Scalarize;
5888
5889 if (VectorCost <= Cost) {
5890 Cost = VectorCost;
5891 Decision = CM_VectorCall;
5892 }
5893
5894 if (IntrinsicCost <= Cost) {
5896 Decision = CM_IntrinsicCall;
5897 }
5898
5899 setCallWideningDecision(CI, VF, Decision, VecFunc, IID,
5901 }
5902 }
5903}
5904
5906 if (!Legal->isInvariant(Op))
5907 return false;
5908 // Consider Op invariant, if it or its operands aren't predicated
5909 // instruction in the loop. In that case, it is not trivially hoistable.
5910 auto *OpI = dyn_cast<Instruction>(Op);
5911 return !OpI || !TheLoop->contains(OpI) ||
5912 (!isPredicatedInst(OpI) &&
5913 (!isa<PHINode>(OpI) || OpI->getParent() != TheLoop->getHeader()) &&
5914 all_of(OpI->operands(),
5915 [this](Value *Op) { return shouldConsiderInvariant(Op); }));
5916}
5917
5920 ElementCount VF) {
5921 // If we know that this instruction will remain uniform, check the cost of
5922 // the scalar version.
5924 VF = ElementCount::getFixed(1);
5925
5926 if (VF.isVector() && isProfitableToScalarize(I, VF))
5927 return InstsToScalarize[VF][I];
5928
5929 // Forced scalars do not have any scalarization overhead.
5930 auto ForcedScalar = ForcedScalars.find(VF);
5931 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
5932 auto InstSet = ForcedScalar->second;
5933 if (InstSet.count(I))
5935 VF.getKnownMinValue();
5936 }
5937
5938 Type *RetTy = I->getType();
5940 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
5941 auto *SE = PSE.getSE();
5942
5943 Type *VectorTy;
5944 if (isScalarAfterVectorization(I, VF)) {
5945 [[maybe_unused]] auto HasSingleCopyAfterVectorization =
5946 [this](Instruction *I, ElementCount VF) -> bool {
5947 if (VF.isScalar())
5948 return true;
5949
5950 auto Scalarized = InstsToScalarize.find(VF);
5951 assert(Scalarized != InstsToScalarize.end() &&
5952 "VF not yet analyzed for scalarization profitability");
5953 return !Scalarized->second.count(I) &&
5954 llvm::all_of(I->users(), [&](User *U) {
5955 auto *UI = cast<Instruction>(U);
5956 return !Scalarized->second.count(UI);
5957 });
5958 };
5959
5960 // With the exception of GEPs and PHIs, after scalarization there should
5961 // only be one copy of the instruction generated in the loop. This is
5962 // because the VF is either 1, or any instructions that need scalarizing
5963 // have already been dealt with by the time we get here. As a result,
5964 // it means we don't have to multiply the instruction cost by VF.
5965 assert(I->getOpcode() == Instruction::GetElementPtr ||
5966 I->getOpcode() == Instruction::PHI ||
5967 (I->getOpcode() == Instruction::BitCast &&
5968 I->getType()->isPointerTy()) ||
5969 HasSingleCopyAfterVectorization(I, VF));
5970 VectorTy = RetTy;
5971 } else
5972 VectorTy = toVectorizedTy(RetTy, VF);
5973
5974 if (VF.isVector() && VectorTy->isVectorTy() &&
5975 !TTI.getNumberOfParts(VectorTy))
5977
5978 // TODO: We need to estimate the cost of intrinsic calls.
5979 switch (I->getOpcode()) {
5980 case Instruction::GetElementPtr:
5981 // We mark this instruction as zero-cost because the cost of GEPs in
5982 // vectorized code depends on whether the corresponding memory instruction
5983 // is scalarized or not. Therefore, we handle GEPs with the memory
5984 // instruction cost.
5985 return 0;
5986 case Instruction::Br: {
5987 // In cases of scalarized and predicated instructions, there will be VF
5988 // predicated blocks in the vectorized loop. Each branch around these
5989 // blocks requires also an extract of its vector compare i1 element.
5990 // Note that the conditional branch from the loop latch will be replaced by
5991 // a single branch controlling the loop, so there is no extra overhead from
5992 // scalarization.
5993 bool ScalarPredicatedBB = false;
5995 if (VF.isVector() && BI->isConditional() &&
5996 (PredicatedBBsAfterVectorization[VF].count(BI->getSuccessor(0)) ||
5997 PredicatedBBsAfterVectorization[VF].count(BI->getSuccessor(1))) &&
5998 BI->getParent() != TheLoop->getLoopLatch())
5999 ScalarPredicatedBB = true;
6000
6001 if (ScalarPredicatedBB) {
6002 // Not possible to scalarize scalable vector with predicated instructions.
6003 if (VF.isScalable())
6005 // Return cost for branches around scalarized and predicated blocks.
6006 auto *VecI1Ty =
6008 return (
6009 TTI.getScalarizationOverhead(
6010 VecI1Ty, APInt::getAllOnes(VF.getFixedValue()),
6011 /*Insert*/ false, /*Extract*/ true, CostKind) +
6012 (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue()));
6013 }
6014
6015 if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
6016 // The back-edge branch will remain, as will all scalar branches.
6017 return TTI.getCFInstrCost(Instruction::Br, CostKind);
6018
6019 // This branch will be eliminated by if-conversion.
6020 return 0;
6021 // Note: We currently assume zero cost for an unconditional branch inside
6022 // a predicated block since it will become a fall-through, although we
6023 // may decide in the future to call TTI for all branches.
6024 }
6025 case Instruction::Switch: {
6026 if (VF.isScalar())
6027 return TTI.getCFInstrCost(Instruction::Switch, CostKind);
6028 auto *Switch = cast<SwitchInst>(I);
6029 return Switch->getNumCases() *
6030 TTI.getCmpSelInstrCost(
6031 Instruction::ICmp,
6032 toVectorTy(Switch->getCondition()->getType(), VF),
6033 toVectorTy(Type::getInt1Ty(I->getContext()), VF),
6035 }
6036 case Instruction::PHI: {
6037 auto *Phi = cast<PHINode>(I);
6038
6039 // First-order recurrences are replaced by vector shuffles inside the loop.
6040 if (VF.isVector() && Legal->isFixedOrderRecurrence(Phi)) {
6042 std::iota(Mask.begin(), Mask.end(), VF.getKnownMinValue() - 1);
6043 return TTI.getShuffleCost(TargetTransformInfo::SK_Splice,
6044 cast<VectorType>(VectorTy),
6045 cast<VectorType>(VectorTy), Mask, CostKind,
6046 VF.getKnownMinValue() - 1);
6047 }
6048
6049 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
6050 // converted into select instructions. We require N - 1 selects per phi
6051 // node, where N is the number of incoming values.
6052 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) {
6053 Type *ResultTy = Phi->getType();
6054
6055 // All instructions in an Any-of reduction chain are narrowed to bool.
6056 // Check if that is the case for this phi node.
6057 auto *HeaderUser = cast_if_present<PHINode>(
6058 find_singleton<User>(Phi->users(), [this](User *U, bool) -> User * {
6059 auto *Phi = dyn_cast<PHINode>(U);
6060 if (Phi && Phi->getParent() == TheLoop->getHeader())
6061 return Phi;
6062 return nullptr;
6063 }));
6064 if (HeaderUser) {
6065 auto &ReductionVars = Legal->getReductionVars();
6066 auto Iter = ReductionVars.find(HeaderUser);
6067 if (Iter != ReductionVars.end() &&
6069 Iter->second.getRecurrenceKind()))
6070 ResultTy = Type::getInt1Ty(Phi->getContext());
6071 }
6072 return (Phi->getNumIncomingValues() - 1) *
6073 TTI.getCmpSelInstrCost(
6074 Instruction::Select, toVectorTy(ResultTy, VF),
6075 toVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
6077 }
6078
6079 // When tail folding with EVL, if the phi is part of an out of loop
6080 // reduction then it will be transformed into a wide vp_merge.
6081 if (VF.isVector() && foldTailWithEVL() &&
6082 Legal->getReductionVars().contains(Phi) && !isInLoopReduction(Phi)) {
6084 Intrinsic::vp_merge, toVectorTy(Phi->getType(), VF),
6085 {toVectorTy(Type::getInt1Ty(Phi->getContext()), VF)});
6086 return TTI.getIntrinsicInstrCost(ICA, CostKind);
6087 }
6088
6089 return TTI.getCFInstrCost(Instruction::PHI, CostKind);
6090 }
6091 case Instruction::UDiv:
6092 case Instruction::SDiv:
6093 case Instruction::URem:
6094 case Instruction::SRem:
6095 if (VF.isVector() && isPredicatedInst(I)) {
6096 const auto [ScalarCost, SafeDivisorCost] = getDivRemSpeculationCost(I, VF);
6097 return isDivRemScalarWithPredication(ScalarCost, SafeDivisorCost) ?
6098 ScalarCost : SafeDivisorCost;
6099 }
6100 // We've proven all lanes safe to speculate, fall through.
6101 [[fallthrough]];
6102 case Instruction::Add:
6103 case Instruction::Sub: {
6104 auto Info = Legal->getHistogramInfo(I);
6105 if (Info && VF.isVector()) {
6106 const HistogramInfo *HGram = Info.value();
6107 // Assume that a non-constant update value (or a constant != 1) requires
6108 // a multiply, and add that into the cost.
6110 ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1));
6111 if (!RHS || RHS->getZExtValue() != 1)
6112 MulCost =
6113 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
6114
6115 // Find the cost of the histogram operation itself.
6116 Type *PtrTy = VectorType::get(HGram->Load->getPointerOperandType(), VF);
6117 Type *ScalarTy = I->getType();
6118 Type *MaskTy = VectorType::get(Type::getInt1Ty(I->getContext()), VF);
6119 IntrinsicCostAttributes ICA(Intrinsic::experimental_vector_histogram_add,
6120 Type::getVoidTy(I->getContext()),
6121 {PtrTy, ScalarTy, MaskTy});
6122
6123 // Add the costs together with the add/sub operation.
6124 return TTI.getIntrinsicInstrCost(ICA, CostKind) + MulCost +
6125 TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, CostKind);
6126 }
6127 [[fallthrough]];
6128 }
6129 case Instruction::FAdd:
6130 case Instruction::FSub:
6131 case Instruction::Mul:
6132 case Instruction::FMul:
6133 case Instruction::FDiv:
6134 case Instruction::FRem:
6135 case Instruction::Shl:
6136 case Instruction::LShr:
6137 case Instruction::AShr:
6138 case Instruction::And:
6139 case Instruction::Or:
6140 case Instruction::Xor: {
6141 // If we're speculating on the stride being 1, the multiplication may
6142 // fold away. We can generalize this for all operations using the notion
6143 // of neutral elements. (TODO)
6144 if (I->getOpcode() == Instruction::Mul &&
6145 ((TheLoop->isLoopInvariant(I->getOperand(0)) &&
6146 PSE.getSCEV(I->getOperand(0))->isOne()) ||
6147 (TheLoop->isLoopInvariant(I->getOperand(1)) &&
6148 PSE.getSCEV(I->getOperand(1))->isOne())))
6149 return 0;
6150
6151 // Detect reduction patterns
6152 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy))
6153 return *RedCost;
6154
6155 // Certain instructions can be cheaper to vectorize if they have a constant
6156 // second vector operand. One example of this are shifts on x86.
6157 Value *Op2 = I->getOperand(1);
6158 if (!isa<Constant>(Op2) && TheLoop->isLoopInvariant(Op2) &&
6159 PSE.getSE()->isSCEVable(Op2->getType()) &&
6160 isa<SCEVConstant>(PSE.getSCEV(Op2))) {
6161 Op2 = cast<SCEVConstant>(PSE.getSCEV(Op2))->getValue();
6162 }
6163 auto Op2Info = TTI.getOperandInfo(Op2);
6164 if (Op2Info.Kind == TargetTransformInfo::OK_AnyValue &&
6167
6168 SmallVector<const Value *, 4> Operands(I->operand_values());
6169 return TTI.getArithmeticInstrCost(
6170 I->getOpcode(), VectorTy, CostKind,
6171 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6172 Op2Info, Operands, I, TLI);
6173 }
6174 case Instruction::FNeg: {
6175 return TTI.getArithmeticInstrCost(
6176 I->getOpcode(), VectorTy, CostKind,
6177 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6178 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6179 I->getOperand(0), I);
6180 }
6181 case Instruction::Select: {
6183 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
6184 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
6185
6186 const Value *Op0, *Op1;
6187 using namespace llvm::PatternMatch;
6188 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
6189 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) {
6190 // select x, y, false --> x & y
6191 // select x, true, y --> x | y
6192 const auto [Op1VK, Op1VP] = TTI::getOperandInfo(Op0);
6193 const auto [Op2VK, Op2VP] = TTI::getOperandInfo(Op1);
6194 assert(Op0->getType()->getScalarSizeInBits() == 1 &&
6195 Op1->getType()->getScalarSizeInBits() == 1);
6196
6197 return TTI.getArithmeticInstrCost(
6198 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And,
6199 VectorTy, CostKind, {Op1VK, Op1VP}, {Op2VK, Op2VP}, {Op0, Op1}, I);
6200 }
6201
6202 Type *CondTy = SI->getCondition()->getType();
6203 if (!ScalarCond)
6204 CondTy = VectorType::get(CondTy, VF);
6205
6207 if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition()))
6208 Pred = Cmp->getPredicate();
6209 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred,
6210 CostKind, {TTI::OK_AnyValue, TTI::OP_None},
6211 {TTI::OK_AnyValue, TTI::OP_None}, I);
6212 }
6213 case Instruction::ICmp:
6214 case Instruction::FCmp: {
6215 Type *ValTy = I->getOperand(0)->getType();
6216
6218 [[maybe_unused]] Instruction *Op0AsInstruction =
6219 dyn_cast<Instruction>(I->getOperand(0));
6220 assert((!canTruncateToMinimalBitwidth(Op0AsInstruction, VF) ||
6221 MinBWs[I] == MinBWs[Op0AsInstruction]) &&
6222 "if both the operand and the compare are marked for "
6223 "truncation, they must have the same bitwidth");
6224 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[I]);
6225 }
6226
6227 VectorTy = toVectorTy(ValTy, VF);
6228 return TTI.getCmpSelInstrCost(
6229 I->getOpcode(), VectorTy, CmpInst::makeCmpResultType(VectorTy),
6230 cast<CmpInst>(I)->getPredicate(), CostKind,
6231 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None}, I);
6232 }
6233 case Instruction::Store:
6234 case Instruction::Load: {
6235 ElementCount Width = VF;
6236 if (Width.isVector()) {
6237 InstWidening Decision = getWideningDecision(I, Width);
6238 assert(Decision != CM_Unknown &&
6239 "CM decision should be taken at this point");
6242 if (Decision == CM_Scalarize)
6243 Width = ElementCount::getFixed(1);
6244 }
6245 VectorTy = toVectorTy(getLoadStoreType(I), Width);
6246 return getMemoryInstructionCost(I, VF);
6247 }
6248 case Instruction::BitCast:
6249 if (I->getType()->isPointerTy())
6250 return 0;
6251 [[fallthrough]];
6252 case Instruction::ZExt:
6253 case Instruction::SExt:
6254 case Instruction::FPToUI:
6255 case Instruction::FPToSI:
6256 case Instruction::FPExt:
6257 case Instruction::PtrToInt:
6258 case Instruction::IntToPtr:
6259 case Instruction::SIToFP:
6260 case Instruction::UIToFP:
6261 case Instruction::Trunc:
6262 case Instruction::FPTrunc: {
6263 // Computes the CastContextHint from a Load/Store instruction.
6264 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
6266 "Expected a load or a store!");
6267
6268 if (VF.isScalar() || !TheLoop->contains(I))
6270
6271 switch (getWideningDecision(I, VF)) {
6283 llvm_unreachable("Instr did not go through cost modelling?");
6286 llvm_unreachable_internal("Instr has invalid widening decision");
6287 }
6288
6289 llvm_unreachable("Unhandled case!");
6290 };
6291
6292 unsigned Opcode = I->getOpcode();
6294 // For Trunc, the context is the only user, which must be a StoreInst.
6295 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
6296 if (I->hasOneUse())
6297 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
6298 CCH = ComputeCCH(Store);
6299 }
6300 // For Z/Sext, the context is the operand, which must be a LoadInst.
6301 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
6302 Opcode == Instruction::FPExt) {
6303 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
6304 CCH = ComputeCCH(Load);
6305 }
6306
6307 // We optimize the truncation of induction variables having constant
6308 // integer steps. The cost of these truncations is the same as the scalar
6309 // operation.
6310 if (isOptimizableIVTruncate(I, VF)) {
6311 auto *Trunc = cast<TruncInst>(I);
6312 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
6313 Trunc->getSrcTy(), CCH, CostKind, Trunc);
6314 }
6315
6316 // Detect reduction patterns
6317 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy))
6318 return *RedCost;
6319
6320 Type *SrcScalarTy = I->getOperand(0)->getType();
6321 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
6322 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
6323 SrcScalarTy =
6324 IntegerType::get(SrcScalarTy->getContext(), MinBWs[Op0AsInstruction]);
6325 Type *SrcVecTy =
6326 VectorTy->isVectorTy() ? toVectorTy(SrcScalarTy, VF) : SrcScalarTy;
6327
6329 // If the result type is <= the source type, there will be no extend
6330 // after truncating the users to the minimal required bitwidth.
6331 if (VectorTy->getScalarSizeInBits() <= SrcVecTy->getScalarSizeInBits() &&
6332 (I->getOpcode() == Instruction::ZExt ||
6333 I->getOpcode() == Instruction::SExt))
6334 return 0;
6335 }
6336
6337 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
6338 }
6339 case Instruction::Call:
6340 return getVectorCallCost(cast<CallInst>(I), VF);
6341 case Instruction::ExtractValue:
6342 return TTI.getInstructionCost(I, CostKind);
6343 case Instruction::Alloca:
6344 // We cannot easily widen alloca to a scalable alloca, as
6345 // the result would need to be a vector of pointers.
6346 if (VF.isScalable())
6348 [[fallthrough]];
6349 default:
6350 // This opcode is unknown. Assume that it is the same as 'mul'.
6351 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
6352 } // end of switch.
6353}
6354
6356 // Ignore ephemeral values.
6358
6359 SmallVector<Value *, 4> DeadInterleavePointerOps;
6361
6362 // If a scalar epilogue is required, users outside the loop won't use
6363 // live-outs from the vector loop but from the scalar epilogue. Ignore them if
6364 // that is the case.
6365 bool RequiresScalarEpilogue = requiresScalarEpilogue(true);
6366 auto IsLiveOutDead = [this, RequiresScalarEpilogue](User *U) {
6367 return RequiresScalarEpilogue &&
6368 !TheLoop->contains(cast<Instruction>(U)->getParent());
6369 };
6370
6372 DFS.perform(LI);
6373 MapVector<Value *, SmallVector<Value *>> DeadInvariantStoreOps;
6374 for (BasicBlock *BB : reverse(make_range(DFS.beginRPO(), DFS.endRPO())))
6375 for (Instruction &I : reverse(*BB)) {
6376 // Find all stores to invariant variables. Since they are going to sink
6377 // outside the loop we do not need calculate cost for them.
6378 StoreInst *SI;
6379 if ((SI = dyn_cast<StoreInst>(&I)) &&
6380 Legal->isInvariantAddressOfReduction(SI->getPointerOperand())) {
6381 ValuesToIgnore.insert(&I);
6382 DeadInvariantStoreOps[SI->getPointerOperand()].push_back(
6383 SI->getValueOperand());
6384 }
6385
6386 if (VecValuesToIgnore.contains(&I) || ValuesToIgnore.contains(&I))
6387 continue;
6388
6389 // Add instructions that would be trivially dead and are only used by
6390 // values already ignored to DeadOps to seed worklist.
6392 all_of(I.users(), [this, IsLiveOutDead](User *U) {
6393 return VecValuesToIgnore.contains(U) ||
6394 ValuesToIgnore.contains(U) || IsLiveOutDead(U);
6395 }))
6396 DeadOps.push_back(&I);
6397
6398 // For interleave groups, we only create a pointer for the start of the
6399 // interleave group. Queue up addresses of group members except the insert
6400 // position for further processing.
6401 if (isAccessInterleaved(&I)) {
6402 auto *Group = getInterleavedAccessGroup(&I);
6403 if (Group->getInsertPos() == &I)
6404 continue;
6405 Value *PointerOp = getLoadStorePointerOperand(&I);
6406 DeadInterleavePointerOps.push_back(PointerOp);
6407 }
6408
6409 // Queue branches for analysis. They are dead, if their successors only
6410 // contain dead instructions.
6411 if (auto *Br = dyn_cast<BranchInst>(&I)) {
6412 if (Br->isConditional())
6413 DeadOps.push_back(&I);
6414 }
6415 }
6416
6417 // Mark ops feeding interleave group members as free, if they are only used
6418 // by other dead computations.
6419 for (unsigned I = 0; I != DeadInterleavePointerOps.size(); ++I) {
6420 auto *Op = dyn_cast<Instruction>(DeadInterleavePointerOps[I]);
6421 if (!Op || !TheLoop->contains(Op) || any_of(Op->users(), [this](User *U) {
6422 Instruction *UI = cast<Instruction>(U);
6423 return !VecValuesToIgnore.contains(U) &&
6424 (!isAccessInterleaved(UI) ||
6425 getInterleavedAccessGroup(UI)->getInsertPos() == UI);
6426 }))
6427 continue;
6428 VecValuesToIgnore.insert(Op);
6429 append_range(DeadInterleavePointerOps, Op->operands());
6430 }
6431
6432 for (const auto &[_, Ops] : DeadInvariantStoreOps)
6433 llvm::append_range(DeadOps, drop_end(Ops));
6434
6435 // Mark ops that would be trivially dead and are only used by ignored
6436 // instructions as free.
6437 BasicBlock *Header = TheLoop->getHeader();
6438
6439 // Returns true if the block contains only dead instructions. Such blocks will
6440 // be removed by VPlan-to-VPlan transforms and won't be considered by the
6441 // VPlan-based cost model, so skip them in the legacy cost-model as well.
6442 auto IsEmptyBlock = [this](BasicBlock *BB) {
6443 return all_of(*BB, [this](Instruction &I) {
6444 return ValuesToIgnore.contains(&I) || VecValuesToIgnore.contains(&I) ||
6445 (isa<BranchInst>(&I) && !cast<BranchInst>(&I)->isConditional());
6446 });
6447 };
6448 for (unsigned I = 0; I != DeadOps.size(); ++I) {
6449 auto *Op = dyn_cast<Instruction>(DeadOps[I]);
6450
6451 // Check if the branch should be considered dead.
6452 if (auto *Br = dyn_cast_or_null<BranchInst>(Op)) {
6453 BasicBlock *ThenBB = Br->getSuccessor(0);
6454 BasicBlock *ElseBB = Br->getSuccessor(1);
6455 // Don't considers branches leaving the loop for simplification.
6456 if (!TheLoop->contains(ThenBB) || !TheLoop->contains(ElseBB))
6457 continue;
6458 bool ThenEmpty = IsEmptyBlock(ThenBB);
6459 bool ElseEmpty = IsEmptyBlock(ElseBB);
6460 if ((ThenEmpty && ElseEmpty) ||
6461 (ThenEmpty && ThenBB->getSingleSuccessor() == ElseBB &&
6462 ElseBB->phis().empty()) ||
6463 (ElseEmpty && ElseBB->getSingleSuccessor() == ThenBB &&
6464 ThenBB->phis().empty())) {
6465 VecValuesToIgnore.insert(Br);
6466 DeadOps.push_back(Br->getCondition());
6467 }
6468 continue;
6469 }
6470
6471 // Skip any op that shouldn't be considered dead.
6472 if (!Op || !TheLoop->contains(Op) ||
6473 (isa<PHINode>(Op) && Op->getParent() == Header) ||
6475 any_of(Op->users(), [this, IsLiveOutDead](User *U) {
6476 return !VecValuesToIgnore.contains(U) &&
6477 !ValuesToIgnore.contains(U) && !IsLiveOutDead(U);
6478 }))
6479 continue;
6480
6481 // If all of Op's users are in ValuesToIgnore, add it to ValuesToIgnore
6482 // which applies for both scalar and vector versions. Otherwise it is only
6483 // dead in vector versions, so only add it to VecValuesToIgnore.
6484 if (all_of(Op->users(),
6485 [this](User *U) { return ValuesToIgnore.contains(U); }))
6486 ValuesToIgnore.insert(Op);
6487
6488 VecValuesToIgnore.insert(Op);
6489 append_range(DeadOps, Op->operands());
6490 }
6491
6492 // Ignore type-promoting instructions we identified during reduction
6493 // detection.
6494 for (const auto &Reduction : Legal->getReductionVars()) {
6495 const RecurrenceDescriptor &RedDes = Reduction.second;
6496 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
6497 VecValuesToIgnore.insert_range(Casts);
6498 }
6499 // Ignore type-casting instructions we identified during induction
6500 // detection.
6501 for (const auto &Induction : Legal->getInductionVars()) {
6502 const InductionDescriptor &IndDes = Induction.second;
6503 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
6504 VecValuesToIgnore.insert_range(Casts);
6505 }
6506}
6507
6509 // Avoid duplicating work finding in-loop reductions.
6510 if (!InLoopReductions.empty())
6511 return;
6512
6513 for (const auto &Reduction : Legal->getReductionVars()) {
6514 PHINode *Phi = Reduction.first;
6515 const RecurrenceDescriptor &RdxDesc = Reduction.second;
6516
6517 // We don't collect reductions that are type promoted (yet).
6518 if (RdxDesc.getRecurrenceType() != Phi->getType())
6519 continue;
6520
6521 // If the target would prefer this reduction to happen "in-loop", then we
6522 // want to record it as such.
6523 RecurKind Kind = RdxDesc.getRecurrenceKind();
6524 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) &&
6525 !TTI.preferInLoopReduction(Kind, Phi->getType()))
6526 continue;
6527
6528 // Check that we can correctly put the reductions into the loop, by
6529 // finding the chain of operations that leads from the phi to the loop
6530 // exit value.
6531 SmallVector<Instruction *, 4> ReductionOperations =
6532 RdxDesc.getReductionOpChain(Phi, TheLoop);
6533 bool InLoop = !ReductionOperations.empty();
6534
6535 if (InLoop) {
6536 InLoopReductions.insert(Phi);
6537 // Add the elements to InLoopReductionImmediateChains for cost modelling.
6538 Instruction *LastChain = Phi;
6539 for (auto *I : ReductionOperations) {
6540 InLoopReductionImmediateChains[I] = LastChain;
6541 LastChain = I;
6542 }
6543 }
6544 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
6545 << " reduction for phi: " << *Phi << "\n");
6546 }
6547}
6548
6549// This function will select a scalable VF if the target supports scalable
6550// vectors and a fixed one otherwise.
6551// TODO: we could return a pair of values that specify the max VF and
6552// min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
6553// `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
6554// doesn't have a cost model that can choose which plan to execute if
6555// more than one is generated.
6558 unsigned WidestType;
6559 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
6560
6562 TTI.enableScalableVectorization()
6565
6566 TypeSize RegSize = TTI.getRegisterBitWidth(RegKind);
6567 unsigned N = RegSize.getKnownMinValue() / WidestType;
6568 return ElementCount::get(N, RegSize.isScalable());
6569}
6570
6573 ElementCount VF = UserVF;
6574 // Outer loop handling: They may require CFG and instruction level
6575 // transformations before even evaluating whether vectorization is profitable.
6576 // Since we cannot modify the incoming IR, we need to build VPlan upfront in
6577 // the vectorization pipeline.
6578 if (!OrigLoop->isInnermost()) {
6579 // If the user doesn't provide a vectorization factor, determine a
6580 // reasonable one.
6581 if (UserVF.isZero()) {
6582 VF = determineVPlanVF(TTI, CM);
6583 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
6584
6585 // Make sure we have a VF > 1 for stress testing.
6586 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
6587 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
6588 << "overriding computed VF.\n");
6589 VF = ElementCount::getFixed(4);
6590 }
6591 } else if (UserVF.isScalable() && !TTI.supportsScalableVectors() &&
6593 LLVM_DEBUG(dbgs() << "LV: Not vectorizing. Scalable VF requested, but "
6594 << "not supported by the target.\n");
6596 "Scalable vectorization requested but not supported by the target",
6597 "the scalable user-specified vectorization width for outer-loop "
6598 "vectorization cannot be used because the target does not support "
6599 "scalable vectors.",
6600 "ScalableVFUnfeasible", ORE, OrigLoop);
6602 }
6603 assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
6605 "VF needs to be a power of two");
6606 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
6607 << "VF " << VF << " to build VPlans.\n");
6608 buildVPlans(VF, VF);
6609
6610 if (VPlans.empty())
6612
6613 // For VPlan build stress testing, we bail out after VPlan construction.
6616
6617 return {VF, 0 /*Cost*/, 0 /* ScalarCost */};
6618 }
6619
6620 LLVM_DEBUG(
6621 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
6622 "VPlan-native path.\n");
6624}
6625
6626void LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
6627 assert(OrigLoop->isInnermost() && "Inner loop expected.");
6628 CM.collectValuesToIgnore();
6629 CM.collectElementTypesForWidening();
6630
6631 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC);
6632 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved.
6633 return;
6634
6635 // Invalidate interleave groups if all blocks of loop will be predicated.
6636 if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) &&
6638 LLVM_DEBUG(
6639 dbgs()
6640 << "LV: Invalidate all interleaved groups due to fold-tail by masking "
6641 "which requires masked-interleaved support.\n");
6642 if (CM.InterleaveInfo.invalidateGroups())
6643 // Invalidating interleave groups also requires invalidating all decisions
6644 // based on them, which includes widening decisions and uniform and scalar
6645 // values.
6646 CM.invalidateCostModelingDecisions();
6647 }
6648
6649 if (CM.foldTailByMasking())
6650 Legal->prepareToFoldTailByMasking();
6651
6652 ElementCount MaxUserVF =
6653 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF;
6654 if (UserVF) {
6655 if (!ElementCount::isKnownLE(UserVF, MaxUserVF)) {
6657 "UserVF ignored because it may be larger than the maximal safe VF",
6658 "InvalidUserVF", ORE, OrigLoop);
6659 } else {
6661 "VF needs to be a power of two");
6662 // Collect the instructions (and their associated costs) that will be more
6663 // profitable to scalarize.
6664 CM.collectInLoopReductions();
6665 if (CM.selectUserVectorizationFactor(UserVF)) {
6666 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
6667 buildVPlansWithVPRecipes(UserVF, UserVF);
6669 return;
6670 }
6671 reportVectorizationInfo("UserVF ignored because of invalid costs.",
6672 "InvalidCost", ORE, OrigLoop);
6673 }
6674 }
6675
6676 // Collect the Vectorization Factor Candidates.
6677 SmallVector<ElementCount> VFCandidates;
6678 for (auto VF = ElementCount::getFixed(1);
6679 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2)
6680 VFCandidates.push_back(VF);
6681 for (auto VF = ElementCount::getScalable(1);
6682 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2)
6683 VFCandidates.push_back(VF);
6684
6685 CM.collectInLoopReductions();
6686 for (const auto &VF : VFCandidates) {
6687 // Collect Uniform and Scalar instructions after vectorization with VF.
6688 CM.collectNonVectorizedAndSetWideningDecisions(VF);
6689 }
6690
6691 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF);
6692 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF);
6693
6695}
6696
6698 ElementCount VF) const {
6699 InstructionCost Cost = CM.getInstructionCost(UI, VF);
6700 if (Cost.isValid() && ForceTargetInstructionCost.getNumOccurrences())
6702 return Cost;
6703}
6704
6706 ElementCount VF) const {
6707 return CM.isUniformAfterVectorization(I, VF);
6708}
6709
6710bool VPCostContext::skipCostComputation(Instruction *UI, bool IsVector) const {
6711 return CM.ValuesToIgnore.contains(UI) ||
6712 (IsVector && CM.VecValuesToIgnore.contains(UI)) ||
6713 SkipCostComputation.contains(UI);
6714}
6715
6717LoopVectorizationPlanner::precomputeCosts(VPlan &Plan, ElementCount VF,
6718 VPCostContext &CostCtx) const {
6720 // Cost modeling for inductions is inaccurate in the legacy cost model
6721 // compared to the recipes that are generated. To match here initially during
6722 // VPlan cost model bring up directly use the induction costs from the legacy
6723 // cost model. Note that we do this as pre-processing; the VPlan may not have
6724 // any recipes associated with the original induction increment instruction
6725 // and may replace truncates with VPWidenIntOrFpInductionRecipe. We precompute
6726 // the cost of induction phis and increments (both that are represented by
6727 // recipes and those that are not), to avoid distinguishing between them here,
6728 // and skip all recipes that represent induction phis and increments (the
6729 // former case) later on, if they exist, to avoid counting them twice.
6730 // Similarly we pre-compute the cost of any optimized truncates.
6731 // TODO: Switch to more accurate costing based on VPlan.
6732 for (const auto &[IV, IndDesc] : Legal->getInductionVars()) {
6734 IV->getIncomingValueForBlock(OrigLoop->getLoopLatch()));
6735 SmallVector<Instruction *> IVInsts = {IVInc};
6736 for (unsigned I = 0; I != IVInsts.size(); I++) {
6737 for (Value *Op : IVInsts[I]->operands()) {
6738 auto *OpI = dyn_cast<Instruction>(Op);
6739 if (Op == IV || !OpI || !OrigLoop->contains(OpI) || !Op->hasOneUse())
6740 continue;
6741 IVInsts.push_back(OpI);
6742 }
6743 }
6744 IVInsts.push_back(IV);
6745 for (User *U : IV->users()) {
6746 auto *CI = cast<Instruction>(U);
6747 if (!CostCtx.CM.isOptimizableIVTruncate(CI, VF))
6748 continue;
6749 IVInsts.push_back(CI);
6750 }
6751
6752 // If the vector loop gets executed exactly once with the given VF, ignore
6753 // the costs of comparison and induction instructions, as they'll get
6754 // simplified away.
6755 // TODO: Remove this code after stepping away from the legacy cost model and
6756 // adding code to simplify VPlans before calculating their costs.
6757 auto TC = getSmallConstantTripCount(PSE.getSE(), OrigLoop);
6758 if (TC == VF && !CM.foldTailByMasking())
6759 addFullyUnrolledInstructionsToIgnore(OrigLoop, Legal->getInductionVars(),
6760 CostCtx.SkipCostComputation);
6761
6762 for (Instruction *IVInst : IVInsts) {
6763 if (CostCtx.skipCostComputation(IVInst, VF.isVector()))
6764 continue;
6765 InstructionCost InductionCost = CostCtx.getLegacyCost(IVInst, VF);
6766 LLVM_DEBUG({
6767 dbgs() << "Cost of " << InductionCost << " for VF " << VF
6768 << ": induction instruction " << *IVInst << "\n";
6769 });
6770 Cost += InductionCost;
6771 CostCtx.SkipCostComputation.insert(IVInst);
6772 }
6773 }
6774
6775 /// Compute the cost of all exiting conditions of the loop using the legacy
6776 /// cost model. This is to match the legacy behavior, which adds the cost of
6777 /// all exit conditions. Note that this over-estimates the cost, as there will
6778 /// be a single condition to control the vector loop.
6780 CM.TheLoop->getExitingBlocks(Exiting);
6781 SetVector<Instruction *> ExitInstrs;
6782 // Collect all exit conditions.
6783 for (BasicBlock *EB : Exiting) {
6784 auto *Term = dyn_cast<BranchInst>(EB->getTerminator());
6785 if (!Term || CostCtx.skipCostComputation(Term, VF.isVector()))
6786 continue;
6787 if (auto *CondI = dyn_cast<Instruction>(Term->getOperand(0))) {
6788 ExitInstrs.insert(CondI);
6789 }
6790 }
6791 // Compute the cost of all instructions only feeding the exit conditions.
6792 for (unsigned I = 0; I != ExitInstrs.size(); ++I) {
6793 Instruction *CondI = ExitInstrs[I];
6794 if (!OrigLoop->contains(CondI) ||
6795 !CostCtx.SkipCostComputation.insert(CondI).second)
6796 continue;
6797 InstructionCost CondICost = CostCtx.getLegacyCost(CondI, VF);
6798 LLVM_DEBUG({
6799 dbgs() << "Cost of " << CondICost << " for VF " << VF
6800 << ": exit condition instruction " << *CondI << "\n";
6801 });
6802 Cost += CondICost;
6803 for (Value *Op : CondI->operands()) {
6804 auto *OpI = dyn_cast<Instruction>(Op);
6805 if (!OpI || CostCtx.skipCostComputation(OpI, VF.isVector()) ||
6806 any_of(OpI->users(), [&ExitInstrs, this](User *U) {
6807 return OrigLoop->contains(cast<Instruction>(U)->getParent()) &&
6808 !ExitInstrs.contains(cast<Instruction>(U));
6809 }))
6810 continue;
6811 ExitInstrs.insert(OpI);
6812 }
6813 }
6814
6815 // Pre-compute the costs for branches except for the backedge, as the number
6816 // of replicate regions in a VPlan may not directly match the number of
6817 // branches, which would lead to different decisions.
6818 // TODO: Compute cost of branches for each replicate region in the VPlan,
6819 // which is more accurate than the legacy cost model.
6820 for (BasicBlock *BB : OrigLoop->blocks()) {
6821 if (CostCtx.skipCostComputation(BB->getTerminator(), VF.isVector()))
6822 continue;
6823 CostCtx.SkipCostComputation.insert(BB->getTerminator());
6824 if (BB == OrigLoop->getLoopLatch())
6825 continue;
6826 auto BranchCost = CostCtx.getLegacyCost(BB->getTerminator(), VF);
6827 Cost += BranchCost;
6828 }
6829
6830 // Pre-compute costs for instructions that are forced-scalar or profitable to
6831 // scalarize. Their costs will be computed separately in the legacy cost
6832 // model.
6833 for (Instruction *ForcedScalar : CM.ForcedScalars[VF]) {
6834 if (CostCtx.skipCostComputation(ForcedScalar, VF.isVector()))
6835 continue;
6836 CostCtx.SkipCostComputation.insert(ForcedScalar);
6837 InstructionCost ForcedCost = CostCtx.getLegacyCost(ForcedScalar, VF);
6838 LLVM_DEBUG({
6839 dbgs() << "Cost of " << ForcedCost << " for VF " << VF
6840 << ": forced scalar " << *ForcedScalar << "\n";
6841 });
6842 Cost += ForcedCost;
6843 }
6844 for (const auto &[Scalarized, ScalarCost] : CM.InstsToScalarize[VF]) {
6845 if (CostCtx.skipCostComputation(Scalarized, VF.isVector()))
6846 continue;
6847 CostCtx.SkipCostComputation.insert(Scalarized);
6848 LLVM_DEBUG({
6849 dbgs() << "Cost of " << ScalarCost << " for VF " << VF
6850 << ": profitable to scalarize " << *Scalarized << "\n";
6851 });
6852 Cost += ScalarCost;
6853 }
6854
6855 return Cost;
6856}
6857
6858InstructionCost LoopVectorizationPlanner::cost(VPlan &Plan,
6859 ElementCount VF) const {
6860 VPCostContext CostCtx(CM.TTI, *CM.TLI, Plan, CM, CM.CostKind);
6861 InstructionCost Cost = precomputeCosts(Plan, VF, CostCtx);
6862
6863 // Now compute and add the VPlan-based cost.
6864 Cost += Plan.cost(VF, CostCtx);
6865#ifndef NDEBUG
6866 unsigned EstimatedWidth = estimateElementCount(VF, CM.getVScaleForTuning());
6867 LLVM_DEBUG(dbgs() << "Cost for VF " << VF << ": " << Cost
6868 << " (Estimated cost per lane: ");
6869 if (Cost.isValid()) {
6870 double CostPerLane = double(Cost.getValue()) / EstimatedWidth;
6871 LLVM_DEBUG(dbgs() << format("%.1f", CostPerLane));
6872 } else /* No point dividing an invalid cost - it will still be invalid */
6873 LLVM_DEBUG(dbgs() << "Invalid");
6874 LLVM_DEBUG(dbgs() << ")\n");
6875#endif
6876 return Cost;
6877}
6878
6879#ifndef NDEBUG
6880/// Return true if the original loop \ TheLoop contains any instructions that do
6881/// not have corresponding recipes in \p Plan and are not marked to be ignored
6882/// in \p CostCtx. This means the VPlan contains simplification that the legacy
6883/// cost-model did not account for.
6885 VPCostContext &CostCtx,
6886 Loop *TheLoop,
6887 ElementCount VF) {
6888 // First collect all instructions for the recipes in Plan.
6889 auto GetInstructionForCost = [](const VPRecipeBase *R) -> Instruction * {
6890 if (auto *S = dyn_cast<VPSingleDefRecipe>(R))
6891 return dyn_cast_or_null<Instruction>(S->getUnderlyingValue());
6892 if (auto *WidenMem = dyn_cast<VPWidenMemoryRecipe>(R))
6893 return &WidenMem->getIngredient();
6894 return nullptr;
6895 };
6896
6897 DenseSet<Instruction *> SeenInstrs;
6898 auto Iter = vp_depth_first_deep(Plan.getVectorLoopRegion()->getEntry());
6900 for (VPRecipeBase &R : *VPBB) {
6901 if (auto *IR = dyn_cast<VPInterleaveRecipe>(&R)) {
6902 auto *IG = IR->getInterleaveGroup();
6903 unsigned NumMembers = IG->getNumMembers();
6904 for (unsigned I = 0; I != NumMembers; ++I) {
6905 if (Instruction *M = IG->getMember(I))
6906 SeenInstrs.insert(M);
6907 }
6908 continue;
6909 }
6910 // Unused FOR splices are removed by VPlan transforms, so the VPlan-based
6911 // cost model won't cost it whilst the legacy will.
6912 if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R)) {
6913 using namespace VPlanPatternMatch;
6914 if (none_of(FOR->users(),
6915 match_fn(m_VPInstruction<
6917 return true;
6918 }
6919 // The VPlan-based cost model is more accurate for partial reduction and
6920 // comparing against the legacy cost isn't desirable.
6922 return true;
6923
6924 // The VPlan-based cost model can analyze if recipes are scalar
6925 // recursively, but the legacy cost model cannot.
6926 if (auto *WidenMemR = dyn_cast<VPWidenMemoryRecipe>(&R)) {
6927 auto *AddrI = dyn_cast<Instruction>(
6928 getLoadStorePointerOperand(&WidenMemR->getIngredient()));
6929 if (AddrI && vputils::isSingleScalar(WidenMemR->getAddr()) !=
6930 CostCtx.isLegacyUniformAfterVectorization(AddrI, VF))
6931 return true;
6932 }
6933
6934 /// If a VPlan transform folded a recipe to one producing a single-scalar,
6935 /// but the original instruction wasn't uniform-after-vectorization in the
6936 /// legacy cost model, the legacy cost overestimates the actual cost.
6937 if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R)) {
6938 if (RepR->isSingleScalar() &&
6940 RepR->getUnderlyingInstr(), VF))
6941 return true;
6942 }
6943 if (Instruction *UI = GetInstructionForCost(&R)) {
6944 // If we adjusted the predicate of the recipe, the cost in the legacy
6945 // cost model may be different.
6946 using namespace VPlanPatternMatch;
6947 CmpPredicate Pred;
6948 if (match(&R, m_Cmp(Pred, m_VPValue(), m_VPValue())) &&
6949 cast<VPRecipeWithIRFlags>(R).getPredicate() !=
6950 cast<CmpInst>(UI)->getPredicate())
6951 return true;
6952 SeenInstrs.insert(UI);
6953 }
6954 }
6955 }
6956
6957 // Return true if the loop contains any instructions that are not also part of
6958 // the VPlan or are skipped for VPlan-based cost computations. This indicates
6959 // that the VPlan contains extra simplifications.
6960 return any_of(TheLoop->blocks(), [&SeenInstrs, &CostCtx,
6961 TheLoop](BasicBlock *BB) {
6962 return any_of(*BB, [&SeenInstrs, &CostCtx, TheLoop, BB](Instruction &I) {
6963 // Skip induction phis when checking for simplifications, as they may not
6964 // be lowered directly be lowered to a corresponding PHI recipe.
6965 if (isa<PHINode>(&I) && BB == TheLoop->getHeader() &&
6966 CostCtx.CM.Legal->isInductionPhi(cast<PHINode>(&I)))
6967 return false;
6968 return !SeenInstrs.contains(&I) && !CostCtx.skipCostComputation(&I, true);
6969 });
6970 });
6971}
6972#endif
6973
6975 if (VPlans.empty())
6977 // If there is a single VPlan with a single VF, return it directly.
6978 VPlan &FirstPlan = *VPlans[0];
6979 if (VPlans.size() == 1 && size(FirstPlan.vectorFactors()) == 1)
6980 return {*FirstPlan.vectorFactors().begin(), 0, 0};
6981
6982 LLVM_DEBUG(dbgs() << "LV: Computing best VF using cost kind: "
6983 << (CM.CostKind == TTI::TCK_RecipThroughput
6984 ? "Reciprocal Throughput\n"
6985 : CM.CostKind == TTI::TCK_Latency
6986 ? "Instruction Latency\n"
6987 : CM.CostKind == TTI::TCK_CodeSize ? "Code Size\n"
6988 : CM.CostKind == TTI::TCK_SizeAndLatency
6989 ? "Code Size and Latency\n"
6990 : "Unknown\n"));
6991
6993 assert(hasPlanWithVF(ScalarVF) &&
6994 "More than a single plan/VF w/o any plan having scalar VF");
6995
6996 // TODO: Compute scalar cost using VPlan-based cost model.
6997 InstructionCost ScalarCost = CM.expectedCost(ScalarVF);
6998 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ScalarCost << ".\n");
6999 VectorizationFactor ScalarFactor(ScalarVF, ScalarCost, ScalarCost);
7000 VectorizationFactor BestFactor = ScalarFactor;
7001
7002 bool ForceVectorization = Hints.getForce() == LoopVectorizeHints::FK_Enabled;
7003 if (ForceVectorization) {
7004 // Ignore scalar width, because the user explicitly wants vectorization.
7005 // Initialize cost to max so that VF = 2 is, at least, chosen during cost
7006 // evaluation.
7007 BestFactor.Cost = InstructionCost::getMax();
7008 }
7009
7010 for (auto &P : VPlans) {
7011 ArrayRef<ElementCount> VFs(P->vectorFactors().begin(),
7012 P->vectorFactors().end());
7013
7015 if (any_of(VFs, [this](ElementCount VF) {
7016 return CM.shouldConsiderRegPressureForVF(VF);
7017 }))
7018 RUs = calculateRegisterUsageForPlan(*P, VFs, TTI, CM.ValuesToIgnore);
7019
7020 for (unsigned I = 0; I < VFs.size(); I++) {
7021 ElementCount VF = VFs[I];
7022 if (VF.isScalar())
7023 continue;
7024 if (!ForceVectorization && !willGenerateVectors(*P, VF, TTI)) {
7025 LLVM_DEBUG(
7026 dbgs()
7027 << "LV: Not considering vector loop of width " << VF
7028 << " because it will not generate any vector instructions.\n");
7029 continue;
7030 }
7031 if (CM.OptForSize && !ForceVectorization && hasReplicatorRegion(*P)) {
7032 LLVM_DEBUG(
7033 dbgs()
7034 << "LV: Not considering vector loop of width " << VF
7035 << " because it would cause replicated blocks to be generated,"
7036 << " which isn't allowed when optimizing for size.\n");
7037 continue;
7038 }
7039
7040 InstructionCost Cost = cost(*P, VF);
7041 VectorizationFactor CurrentFactor(VF, Cost, ScalarCost);
7042
7043 if (CM.shouldConsiderRegPressureForVF(VF) &&
7044 RUs[I].exceedsMaxNumRegs(TTI, ForceTargetNumVectorRegs)) {
7045 LLVM_DEBUG(dbgs() << "LV(REG): Not considering vector loop of width "
7046 << VF << " because it uses too many registers\n");
7047 continue;
7048 }
7049
7050 if (isMoreProfitable(CurrentFactor, BestFactor, P->hasScalarTail()))
7051 BestFactor = CurrentFactor;
7052
7053 // If profitable add it to ProfitableVF list.
7054 if (isMoreProfitable(CurrentFactor, ScalarFactor, P->hasScalarTail()))
7055 ProfitableVFs.push_back(CurrentFactor);
7056 }
7057 }
7058
7059#ifndef NDEBUG
7060 // Select the optimal vectorization factor according to the legacy cost-model.
7061 // This is now only used to verify the decisions by the new VPlan-based
7062 // cost-model and will be retired once the VPlan-based cost-model is
7063 // stabilized.
7064 VectorizationFactor LegacyVF = selectVectorizationFactor();
7065 VPlan &BestPlan = getPlanFor(BestFactor.Width);
7066
7067 // Pre-compute the cost and use it to check if BestPlan contains any
7068 // simplifications not accounted for in the legacy cost model. If that's the
7069 // case, don't trigger the assertion, as the extra simplifications may cause a
7070 // different VF to be picked by the VPlan-based cost model.
7071 VPCostContext CostCtx(CM.TTI, *CM.TLI, BestPlan, CM, CM.CostKind);
7072 precomputeCosts(BestPlan, BestFactor.Width, CostCtx);
7073 // Verify that the VPlan-based and legacy cost models agree, except for VPlans
7074 // with early exits and plans with additional VPlan simplifications. The
7075 // legacy cost model doesn't properly model costs for such loops.
7076 assert((BestFactor.Width == LegacyVF.Width || BestPlan.hasEarlyExit() ||
7078 CostCtx, OrigLoop,
7079 BestFactor.Width) ||
7081 getPlanFor(LegacyVF.Width), CostCtx, OrigLoop, LegacyVF.Width)) &&
7082 " VPlan cost model and legacy cost model disagreed");
7083 assert((BestFactor.Width.isScalar() || BestFactor.ScalarCost > 0) &&
7084 "when vectorizing, the scalar cost must be computed.");
7085#endif
7086
7087 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << BestFactor.Width << ".\n");
7088 return BestFactor;
7089}
7090
7092 using namespace VPlanPatternMatch;
7094 "RdxResult must be ComputeFindIVResult");
7095 VPValue *StartVPV = RdxResult->getOperand(1);
7096 match(StartVPV, m_Freeze(m_VPValue(StartVPV)));
7097 return StartVPV->getLiveInIRValue();
7098}
7099
7100// If \p EpiResumePhiR is resume VPPhi for a reduction when vectorizing the
7101// epilog loop, fix the reduction's scalar PHI node by adding the incoming value
7102// from the main vector loop.
7104 VPPhi *EpiResumePhiR, PHINode &EpiResumePhi, BasicBlock *BypassBlock) {
7105 // Get the VPInstruction computing the reduction result in the middle block.
7106 // The first operand may not be from the middle block if it is not connected
7107 // to the scalar preheader. In that case, there's nothing to fix.
7108 VPValue *Incoming = EpiResumePhiR->getOperand(0);
7111 auto *EpiRedResult = dyn_cast<VPInstruction>(Incoming);
7112 if (!EpiRedResult ||
7113 (EpiRedResult->getOpcode() != VPInstruction::ComputeAnyOfResult &&
7114 EpiRedResult->getOpcode() != VPInstruction::ComputeReductionResult &&
7115 EpiRedResult->getOpcode() != VPInstruction::ComputeFindIVResult))
7116 return;
7117
7118 auto *EpiRedHeaderPhi =
7119 cast<VPReductionPHIRecipe>(EpiRedResult->getOperand(0));
7120 RecurKind Kind = EpiRedHeaderPhi->getRecurrenceKind();
7121 Value *MainResumeValue;
7122 if (auto *VPI = dyn_cast<VPInstruction>(EpiRedHeaderPhi->getStartValue())) {
7123 assert((VPI->getOpcode() == VPInstruction::Broadcast ||
7124 VPI->getOpcode() == VPInstruction::ReductionStartVector) &&
7125 "unexpected start recipe");
7126 MainResumeValue = VPI->getOperand(0)->getUnderlyingValue();
7127 } else
7128 MainResumeValue = EpiRedHeaderPhi->getStartValue()->getUnderlyingValue();
7130 [[maybe_unused]] Value *StartV =
7131 EpiRedResult->getOperand(1)->getLiveInIRValue();
7132 auto *Cmp = cast<ICmpInst>(MainResumeValue);
7133 assert(Cmp->getPredicate() == CmpInst::ICMP_NE &&
7134 "AnyOf expected to start with ICMP_NE");
7135 assert(Cmp->getOperand(1) == StartV &&
7136 "AnyOf expected to start by comparing main resume value to original "
7137 "start value");
7138 MainResumeValue = Cmp->getOperand(0);
7140 Value *StartV = getStartValueFromReductionResult(EpiRedResult);
7141 Value *SentinelV = EpiRedResult->getOperand(2)->getLiveInIRValue();
7142 using namespace llvm::PatternMatch;
7143 Value *Cmp, *OrigResumeV, *CmpOp;
7144 [[maybe_unused]] bool IsExpectedPattern =
7145 match(MainResumeValue,
7146 m_Select(m_OneUse(m_Value(Cmp)), m_Specific(SentinelV),
7147 m_Value(OrigResumeV))) &&
7149 m_Value(CmpOp))) &&
7150 ((CmpOp == StartV && isGuaranteedNotToBeUndefOrPoison(CmpOp))));
7151 assert(IsExpectedPattern && "Unexpected reduction resume pattern");
7152 MainResumeValue = OrigResumeV;
7153 }
7154 PHINode *MainResumePhi = cast<PHINode>(MainResumeValue);
7155
7156 // When fixing reductions in the epilogue loop we should already have
7157 // created a bc.merge.rdx Phi after the main vector body. Ensure that we carry
7158 // over the incoming values correctly.
7159 EpiResumePhi.setIncomingValueForBlock(
7160 BypassBlock, MainResumePhi->getIncomingValueForBlock(BypassBlock));
7161}
7162
7164 ElementCount BestVF, unsigned BestUF, VPlan &BestVPlan,
7165 InnerLoopVectorizer &ILV, DominatorTree *DT, bool VectorizingEpilogue) {
7166 assert(BestVPlan.hasVF(BestVF) &&
7167 "Trying to execute plan with unsupported VF");
7168 assert(BestVPlan.hasUF(BestUF) &&
7169 "Trying to execute plan with unsupported UF");
7170 if (BestVPlan.hasEarlyExit())
7171 ++LoopsEarlyExitVectorized;
7172 // TODO: Move to VPlan transform stage once the transition to the VPlan-based
7173 // cost model is complete for better cost estimates.
7178 bool HasBranchWeights =
7179 hasBranchWeightMD(*OrigLoop->getLoopLatch()->getTerminator());
7180 if (HasBranchWeights) {
7181 std::optional<unsigned> VScale = CM.getVScaleForTuning();
7183 BestVPlan, BestVF, VScale);
7184 }
7185
7186 // Checks are the same for all VPlans, added to BestVPlan only for
7187 // compactness.
7188 attachRuntimeChecks(BestVPlan, ILV.RTChecks, HasBranchWeights);
7189
7190 // Retrieving VectorPH now when it's easier while VPlan still has Regions.
7191 VPBasicBlock *VectorPH = cast<VPBasicBlock>(BestVPlan.getVectorPreheader());
7192
7193 VPlanTransforms::optimizeForVFAndUF(BestVPlan, BestVF, BestUF, PSE);
7196 if (BestVPlan.getEntry()->getSingleSuccessor() ==
7197 BestVPlan.getScalarPreheader()) {
7198 // TODO: The vector loop would be dead, should not even try to vectorize.
7199 ORE->emit([&]() {
7200 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationDead",
7201 OrigLoop->getStartLoc(),
7202 OrigLoop->getHeader())
7203 << "Created vector loop never executes due to insufficient trip "
7204 "count.";
7205 });
7207 }
7208
7210 BestVPlan, BestVF,
7211 TTI.getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector));
7212 VPlanTransforms::cse(BestVPlan);
7214
7216 // Regions are dissolved after optimizing for VF and UF, which completely
7217 // removes unneeded loop regions first.
7219 // Canonicalize EVL loops after regions are dissolved.
7223 BestVPlan, VectorPH, CM.foldTailByMasking(),
7224 CM.requiresScalarEpilogue(BestVF.isVector()));
7225 VPlanTransforms::materializeVFAndVFxUF(BestVPlan, VectorPH, BestVF);
7227
7228 // 0. Generate SCEV-dependent code in the entry, including TripCount, before
7229 // making any changes to the CFG.
7230 DenseMap<const SCEV *, Value *> ExpandedSCEVs =
7231 VPlanTransforms::expandSCEVs(BestVPlan, *PSE.getSE());
7232 if (!ILV.getTripCount())
7233 ILV.setTripCount(BestVPlan.getTripCount()->getLiveInIRValue());
7234 else
7235 assert(VectorizingEpilogue && "should only re-use the existing trip "
7236 "count during epilogue vectorization");
7237
7238 // Perform the actual loop transformation.
7239 VPTransformState State(&TTI, BestVF, LI, DT, ILV.AC, ILV.Builder, &BestVPlan,
7240 OrigLoop->getParentLoop(),
7241 Legal->getWidestInductionType());
7242
7243#ifdef EXPENSIVE_CHECKS
7244 assert(DT->verify(DominatorTree::VerificationLevel::Fast));
7245#endif
7246
7247 // 1. Set up the skeleton for vectorization, including vector pre-header and
7248 // middle block. The vector loop is created during VPlan execution.
7249 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
7251 State.CFG.PrevBB->getSingleSuccessor(), &BestVPlan);
7253
7254 assert(verifyVPlanIsValid(BestVPlan, true /*VerifyLate*/) &&
7255 "final VPlan is invalid");
7256
7257 // After vectorization, the exit blocks of the original loop will have
7258 // additional predecessors. Invalidate SCEVs for the exit phis in case SE
7259 // looked through single-entry phis.
7260 ScalarEvolution &SE = *PSE.getSE();
7261 for (VPIRBasicBlock *Exit : BestVPlan.getExitBlocks()) {
7262 if (!Exit->hasPredecessors())
7263 continue;
7264 for (VPRecipeBase &PhiR : Exit->phis())
7266 OrigLoop, cast<PHINode>(&cast<VPIRPhi>(PhiR).getInstruction()));
7267 }
7268 // Forget the original loop and block dispositions.
7269 SE.forgetLoop(OrigLoop);
7271
7273
7274 //===------------------------------------------------===//
7275 //
7276 // Notice: any optimization or new instruction that go
7277 // into the code below should also be implemented in
7278 // the cost-model.
7279 //
7280 //===------------------------------------------------===//
7281
7282 // Retrieve loop information before executing the plan, which may remove the
7283 // original loop, if it becomes unreachable.
7284 MDNode *LID = OrigLoop->getLoopID();
7285 unsigned OrigLoopInvocationWeight = 0;
7286 std::optional<unsigned> OrigAverageTripCount =
7287 getLoopEstimatedTripCount(OrigLoop, &OrigLoopInvocationWeight);
7288
7289 BestVPlan.execute(&State);
7290
7291 // 2.6. Maintain Loop Hints
7292 // Keep all loop hints from the original loop on the vector loop (we'll
7293 // replace the vectorizer-specific hints below).
7294 VPBasicBlock *HeaderVPBB = vputils::getFirstLoopHeader(BestVPlan, State.VPDT);
7295 // Add metadata to disable runtime unrolling a scalar loop when there
7296 // are no runtime checks about strides and memory. A scalar loop that is
7297 // rarely used is not worth unrolling.
7298 bool DisableRuntimeUnroll = !ILV.RTChecks.hasChecks() && !BestVF.isScalar();
7300 HeaderVPBB ? LI->getLoopFor(State.CFG.VPBB2IRBB.lookup(HeaderVPBB))
7301 : nullptr,
7302 HeaderVPBB, BestVPlan, VectorizingEpilogue, LID, OrigAverageTripCount,
7303 OrigLoopInvocationWeight,
7304 estimateElementCount(BestVF * BestUF, CM.getVScaleForTuning()),
7305 DisableRuntimeUnroll);
7306
7307 // 3. Fix the vectorized code: take care of header phi's, live-outs,
7308 // predication, updating analyses.
7309 ILV.fixVectorizedLoop(State);
7310
7312
7313 return ExpandedSCEVs;
7314}
7315
7316//===--------------------------------------------------------------------===//
7317// EpilogueVectorizerMainLoop
7318//===--------------------------------------------------------------------===//
7319
7320/// This function is partially responsible for generating the control flow
7321/// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
7323 BasicBlock *ScalarPH = createScalarPreheader("");
7324 BasicBlock *VectorPH = ScalarPH->getSinglePredecessor();
7325
7326 // Generate the code to check the minimum iteration count of the vector
7327 // epilogue (see below).
7328 EPI.EpilogueIterationCountCheck =
7329 emitIterationCountCheck(VectorPH, ScalarPH, true);
7330 EPI.EpilogueIterationCountCheck->setName("iter.check");
7331
7332 VectorPH = cast<BranchInst>(EPI.EpilogueIterationCountCheck->getTerminator())
7333 ->getSuccessor(1);
7334 // Generate the iteration count check for the main loop, *after* the check
7335 // for the epilogue loop, so that the path-length is shorter for the case
7336 // that goes directly through the vector epilogue. The longer-path length for
7337 // the main loop is compensated for, by the gain from vectorizing the larger
7338 // trip count. Note: the branch will get updated later on when we vectorize
7339 // the epilogue.
7340 EPI.MainLoopIterationCountCheck =
7341 emitIterationCountCheck(VectorPH, ScalarPH, false);
7342
7343 return cast<BranchInst>(EPI.MainLoopIterationCountCheck->getTerminator())
7344 ->getSuccessor(1);
7345}
7346
7348 LLVM_DEBUG({
7349 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
7350 << "Main Loop VF:" << EPI.MainLoopVF
7351 << ", Main Loop UF:" << EPI.MainLoopUF
7352 << ", Epilogue Loop VF:" << EPI.EpilogueVF
7353 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
7354 });
7355}
7356
7359 dbgs() << "intermediate fn:\n"
7360 << *OrigLoop->getHeader()->getParent() << "\n";
7361 });
7362}
7363
7365 BasicBlock *VectorPH, BasicBlock *Bypass, bool ForEpilogue) {
7366 assert(Bypass && "Expected valid bypass basic block.");
7369 Value *CheckMinIters = createIterationCountCheck(
7370 VectorPH, ForEpilogue ? EPI.EpilogueVF : EPI.MainLoopVF,
7371 ForEpilogue ? EPI.EpilogueUF : EPI.MainLoopUF);
7372
7373 BasicBlock *const TCCheckBlock = VectorPH;
7374 if (!ForEpilogue)
7375 TCCheckBlock->setName("vector.main.loop.iter.check");
7376
7377 // Create new preheader for vector loop.
7378 VectorPH = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
7379 static_cast<DominatorTree *>(nullptr), LI, nullptr,
7380 "vector.ph");
7381 if (ForEpilogue) {
7382 // Save the trip count so we don't have to regenerate it in the
7383 // vec.epilog.iter.check. This is safe to do because the trip count
7384 // generated here dominates the vector epilog iter check.
7385 EPI.TripCount = Count;
7386 } else {
7388 }
7389
7390 BranchInst &BI = *BranchInst::Create(Bypass, VectorPH, CheckMinIters);
7391 if (hasBranchWeightMD(*OrigLoop->getLoopLatch()->getTerminator()))
7392 setBranchWeights(BI, MinItersBypassWeights, /*IsExpected=*/false);
7393 ReplaceInstWithInst(TCCheckBlock->getTerminator(), &BI);
7394
7395 // When vectorizing the main loop, its trip-count check is placed in a new
7396 // block, whereas the overall trip-count check is placed in the VPlan entry
7397 // block. When vectorizing the epilogue loop, its trip-count check is placed
7398 // in the VPlan entry block.
7399 if (!ForEpilogue)
7400 introduceCheckBlockInVPlan(TCCheckBlock);
7401 return TCCheckBlock;
7402}
7403
7404//===--------------------------------------------------------------------===//
7405// EpilogueVectorizerEpilogueLoop
7406//===--------------------------------------------------------------------===//
7407
7408/// This function is partially responsible for generating the control flow
7409/// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
7411 BasicBlock *ScalarPH = createScalarPreheader("vec.epilog.");
7412 BasicBlock *VectorPH = ScalarPH->getSinglePredecessor();
7413 // Now, compare the remaining count and if there aren't enough iterations to
7414 // execute the vectorized epilogue skip to the scalar part.
7415 VectorPH->setName("vec.epilog.ph");
7416 BasicBlock *VecEpilogueIterationCountCheck =
7417 SplitBlock(VectorPH, VectorPH->begin(), DT, LI, nullptr,
7418 "vec.epilog.iter.check", true);
7420
7421 emitMinimumVectorEpilogueIterCountCheck(VectorPH, ScalarPH,
7422 VecEpilogueIterationCountCheck);
7423 AdditionalBypassBlock = VecEpilogueIterationCountCheck;
7424
7425 // Adjust the control flow taking the state info from the main loop
7426 // vectorization into account.
7427 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck &&
7428 "expected this to be saved from the previous pass.");
7429 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith(
7430 VecEpilogueIterationCountCheck, VectorPH);
7431
7432 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith(
7433 VecEpilogueIterationCountCheck, ScalarPH);
7434
7435 // Adjust the terminators of runtime check blocks and phis using them.
7436 BasicBlock *SCEVCheckBlock = RTChecks.getSCEVChecks().second;
7437 BasicBlock *MemCheckBlock = RTChecks.getMemRuntimeChecks().second;
7438 if (SCEVCheckBlock)
7439 SCEVCheckBlock->getTerminator()->replaceUsesOfWith(
7440 VecEpilogueIterationCountCheck, ScalarPH);
7441 if (MemCheckBlock)
7442 MemCheckBlock->getTerminator()->replaceUsesOfWith(
7443 VecEpilogueIterationCountCheck, ScalarPH);
7444
7445 DT->changeImmediateDominator(ScalarPH, EPI.EpilogueIterationCountCheck);
7446
7447 // The vec.epilog.iter.check block may contain Phi nodes from inductions or
7448 // reductions which merge control-flow from the latch block and the middle
7449 // block. Update the incoming values here and move the Phi into the preheader.
7450 SmallVector<PHINode *, 4> PhisInBlock(
7451 llvm::make_pointer_range(VecEpilogueIterationCountCheck->phis()));
7452
7453 for (PHINode *Phi : PhisInBlock) {
7454 Phi->moveBefore(VectorPH->getFirstNonPHIIt());
7455 Phi->replaceIncomingBlockWith(
7456 VecEpilogueIterationCountCheck->getSinglePredecessor(),
7457 VecEpilogueIterationCountCheck);
7458
7459 // If the phi doesn't have an incoming value from the
7460 // EpilogueIterationCountCheck, we are done. Otherwise remove the incoming
7461 // value and also those from other check blocks. This is needed for
7462 // reduction phis only.
7463 if (none_of(Phi->blocks(), [&](BasicBlock *IncB) {
7464 return EPI.EpilogueIterationCountCheck == IncB;
7465 }))
7466 continue;
7467 Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck);
7468 if (SCEVCheckBlock)
7469 Phi->removeIncomingValue(SCEVCheckBlock);
7470 if (MemCheckBlock)
7471 Phi->removeIncomingValue(MemCheckBlock);
7472 }
7473
7474 return VectorPH;
7475}
7476
7477BasicBlock *
7479 BasicBlock *VectorPH, BasicBlock *Bypass, BasicBlock *Insert) {
7480
7481 assert(EPI.TripCount &&
7482 "Expected trip count to have been saved in the first pass.");
7483 Value *TC = EPI.TripCount;
7484 IRBuilder<> Builder(Insert->getTerminator());
7485 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining");
7486
7487 // Generate code to check if the loop's trip count is less than VF * UF of the
7488 // vector epilogue loop.
7489 auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF.isVector())
7492
7493 Value *CheckMinIters =
7494 Builder.CreateICmp(P, Count,
7495 createStepForVF(Builder, Count->getType(),
7496 EPI.EpilogueVF, EPI.EpilogueUF),
7497 "min.epilog.iters.check");
7498
7499 BranchInst &BI = *BranchInst::Create(Bypass, VectorPH, CheckMinIters);
7500 auto VScale = Cost->getVScaleForTuning();
7501 unsigned MainLoopStep =
7502 estimateElementCount(EPI.MainLoopVF * EPI.MainLoopUF, VScale);
7503 unsigned EpilogueLoopStep =
7504 estimateElementCount(EPI.EpilogueVF * EPI.EpilogueUF, VScale);
7505 // We assume the remaining `Count` is equally distributed in
7506 // [0, MainLoopStep)
7507 // So the probability for `Count < EpilogueLoopStep` should be
7508 // min(MainLoopStep, EpilogueLoopStep) / MainLoopStep
7509 // TODO: Improve the estimate by taking the estimated trip count into
7510 // consideration.
7511 unsigned EstimatedSkipCount = std::min(MainLoopStep, EpilogueLoopStep);
7512 const uint32_t Weights[] = {EstimatedSkipCount,
7513 MainLoopStep - EstimatedSkipCount};
7514 setBranchWeights(BI, Weights, /*IsExpected=*/false);
7515 ReplaceInstWithInst(Insert->getTerminator(), &BI);
7516
7517 // A new entry block has been created for the epilogue VPlan. Hook it in, as
7518 // otherwise we would try to modify the entry to the main vector loop.
7519 VPIRBasicBlock *NewEntry = Plan.createVPIRBasicBlock(Insert);
7520 VPBasicBlock *OldEntry = Plan.getEntry();
7521 VPBlockUtils::reassociateBlocks(OldEntry, NewEntry);
7522 Plan.setEntry(NewEntry);
7523 // OldEntry is now dead and will be cleaned up when the plan gets destroyed.
7524
7525 return Insert;
7526}
7527
7529 LLVM_DEBUG({
7530 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
7531 << "Epilogue Loop VF:" << EPI.EpilogueVF
7532 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
7533 });
7534}
7535
7538 dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n";
7539 });
7540}
7541
7543VPRecipeBuilder::tryToWidenMemory(Instruction *I, ArrayRef<VPValue *> Operands,
7544 VFRange &Range) {
7546 "Must be called with either a load or store");
7547
7548 auto WillWiden = [&](ElementCount VF) -> bool {
7550 CM.getWideningDecision(I, VF);
7552 "CM decision should be taken at this point.");
7554 return true;
7555 if (CM.isScalarAfterVectorization(I, VF) ||
7556 CM.isProfitableToScalarize(I, VF))
7557 return false;
7559 };
7560
7562 return nullptr;
7563
7564 VPValue *Mask = nullptr;
7565 if (Legal->isMaskRequired(I))
7566 Mask = getBlockInMask(Builder.getInsertBlock());
7567
7568 // Determine if the pointer operand of the access is either consecutive or
7569 // reverse consecutive.
7571 CM.getWideningDecision(I, Range.Start);
7573 bool Consecutive =
7575
7577 if (Consecutive) {
7579 Ptr->getUnderlyingValue()->stripPointerCasts());
7580 VPSingleDefRecipe *VectorPtr;
7581 if (Reverse) {
7582 // When folding the tail, we may compute an address that we don't in the
7583 // original scalar loop and it may not be inbounds. Drop Inbounds in that
7584 // case.
7585 GEPNoWrapFlags Flags =
7586 (CM.foldTailByMasking() || !GEP || !GEP->isInBounds())
7588 : GEPNoWrapFlags::inBounds();
7589 VectorPtr =
7591 /*Stride*/ -1, Flags, I->getDebugLoc());
7592 } else {
7593 VectorPtr = new VPVectorPointerRecipe(Ptr, getLoadStoreType(I),
7594 GEP ? GEP->getNoWrapFlags()
7596 I->getDebugLoc());
7597 }
7598 Builder.insert(VectorPtr);
7599 Ptr = VectorPtr;
7600 }
7601 if (LoadInst *Load = dyn_cast<LoadInst>(I))
7602 return new VPWidenLoadRecipe(*Load, Ptr, Mask, Consecutive, Reverse,
7603 VPIRMetadata(*Load, LVer), I->getDebugLoc());
7604
7605 StoreInst *Store = cast<StoreInst>(I);
7606 return new VPWidenStoreRecipe(*Store, Ptr, Operands[0], Mask, Consecutive,
7607 Reverse, VPIRMetadata(*Store, LVer),
7608 I->getDebugLoc());
7609}
7610
7611/// Creates a VPWidenIntOrFpInductionRecpipe for \p Phi. If needed, it will also
7612/// insert a recipe to expand the step for the induction recipe.
7615 VPValue *Start, const InductionDescriptor &IndDesc,
7616 VPlan &Plan, ScalarEvolution &SE, Loop &OrigLoop) {
7617 assert(IndDesc.getStartValue() ==
7618 Phi->getIncomingValueForBlock(OrigLoop.getLoopPreheader()));
7619 assert(SE.isLoopInvariant(IndDesc.getStep(), &OrigLoop) &&
7620 "step must be loop invariant");
7621
7622 VPValue *Step =
7624 if (auto *TruncI = dyn_cast<TruncInst>(PhiOrTrunc)) {
7625 return new VPWidenIntOrFpInductionRecipe(Phi, Start, Step, &Plan.getVF(),
7626 IndDesc, TruncI,
7627 TruncI->getDebugLoc());
7628 }
7629 assert(isa<PHINode>(PhiOrTrunc) && "must be a phi node here");
7630 return new VPWidenIntOrFpInductionRecipe(Phi, Start, Step, &Plan.getVF(),
7631 IndDesc, Phi->getDebugLoc());
7632}
7633
7634VPHeaderPHIRecipe *VPRecipeBuilder::tryToOptimizeInductionPHI(
7636
7637 // Check if this is an integer or fp induction. If so, build the recipe that
7638 // produces its scalar and vector values.
7639 if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi))
7640 return createWidenInductionRecipes(Phi, Phi, Operands[0], *II, Plan,
7641 *PSE.getSE(), *OrigLoop);
7642
7643 // Check if this is pointer induction. If so, build the recipe for it.
7644 if (auto *II = Legal->getPointerInductionDescriptor(Phi)) {
7645 VPValue *Step = vputils::getOrCreateVPValueForSCEVExpr(Plan, II->getStep());
7646 return new VPWidenPointerInductionRecipe(
7647 Phi, Operands[0], Step, &Plan.getVFxUF(), *II,
7649 [&](ElementCount VF) {
7650 return CM.isScalarAfterVectorization(Phi, VF);
7651 },
7652 Range),
7653 Phi->getDebugLoc());
7654 }
7655 return nullptr;
7656}
7657
7658VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate(
7660 // Optimize the special case where the source is a constant integer
7661 // induction variable. Notice that we can only optimize the 'trunc' case
7662 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
7663 // (c) other casts depend on pointer size.
7664
7665 // Determine whether \p K is a truncation based on an induction variable that
7666 // can be optimized.
7667 auto IsOptimizableIVTruncate =
7668 [&](Instruction *K) -> std::function<bool(ElementCount)> {
7669 return [=](ElementCount VF) -> bool {
7670 return CM.isOptimizableIVTruncate(K, VF);
7671 };
7672 };
7673
7675 IsOptimizableIVTruncate(I), Range)) {
7676
7677 auto *Phi = cast<PHINode>(I->getOperand(0));
7678 const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi);
7679 VPValue *Start = Plan.getOrAddLiveIn(II.getStartValue());
7680 return createWidenInductionRecipes(Phi, I, Start, II, Plan, *PSE.getSE(),
7681 *OrigLoop);
7682 }
7683 return nullptr;
7684}
7685
7686VPSingleDefRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
7688 VFRange &Range) {
7690 [this, CI](ElementCount VF) {
7691 return CM.isScalarWithPredication(CI, VF);
7692 },
7693 Range);
7694
7695 if (IsPredicated)
7696 return nullptr;
7697
7699 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
7700 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
7701 ID == Intrinsic::pseudoprobe ||
7702 ID == Intrinsic::experimental_noalias_scope_decl))
7703 return nullptr;
7704
7706
7707 // Is it beneficial to perform intrinsic call compared to lib call?
7708 bool ShouldUseVectorIntrinsic =
7710 [&](ElementCount VF) -> bool {
7711 return CM.getCallWideningDecision(CI, VF).Kind ==
7713 },
7714 Range);
7715 if (ShouldUseVectorIntrinsic)
7716 return new VPWidenIntrinsicRecipe(*CI, ID, Ops, CI->getType(),
7717 CI->getDebugLoc());
7718
7719 Function *Variant = nullptr;
7720 std::optional<unsigned> MaskPos;
7721 // Is better to call a vectorized version of the function than to to scalarize
7722 // the call?
7723 auto ShouldUseVectorCall = LoopVectorizationPlanner::getDecisionAndClampRange(
7724 [&](ElementCount VF) -> bool {
7725 // The following case may be scalarized depending on the VF.
7726 // The flag shows whether we can use a usual Call for vectorized
7727 // version of the instruction.
7728
7729 // If we've found a variant at a previous VF, then stop looking. A
7730 // vectorized variant of a function expects input in a certain shape
7731 // -- basically the number of input registers, the number of lanes
7732 // per register, and whether there's a mask required.
7733 // We store a pointer to the variant in the VPWidenCallRecipe, so
7734 // once we have an appropriate variant it's only valid for that VF.
7735 // This will force a different vplan to be generated for each VF that
7736 // finds a valid variant.
7737 if (Variant)
7738 return false;
7739 LoopVectorizationCostModel::CallWideningDecision Decision =
7740 CM.getCallWideningDecision(CI, VF);
7742 Variant = Decision.Variant;
7743 MaskPos = Decision.MaskPos;
7744 return true;
7745 }
7746
7747 return false;
7748 },
7749 Range);
7750 if (ShouldUseVectorCall) {
7751 if (MaskPos.has_value()) {
7752 // We have 2 cases that would require a mask:
7753 // 1) The block needs to be predicated, either due to a conditional
7754 // in the scalar loop or use of an active lane mask with
7755 // tail-folding, and we use the appropriate mask for the block.
7756 // 2) No mask is required for the block, but the only available
7757 // vector variant at this VF requires a mask, so we synthesize an
7758 // all-true mask.
7759 VPValue *Mask = nullptr;
7760 if (Legal->isMaskRequired(CI))
7761 Mask = getBlockInMask(Builder.getInsertBlock());
7762 else
7763 Mask = Plan.getOrAddLiveIn(
7765
7766 Ops.insert(Ops.begin() + *MaskPos, Mask);
7767 }
7768
7769 Ops.push_back(Operands.back());
7770 return new VPWidenCallRecipe(CI, Variant, Ops, CI->getDebugLoc());
7771 }
7772
7773 return nullptr;
7774}
7775
7776bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
7778 !isa<StoreInst>(I) && "Instruction should have been handled earlier");
7779 // Instruction should be widened, unless it is scalar after vectorization,
7780 // scalarization is profitable or it is predicated.
7781 auto WillScalarize = [this, I](ElementCount VF) -> bool {
7782 return CM.isScalarAfterVectorization(I, VF) ||
7783 CM.isProfitableToScalarize(I, VF) ||
7784 CM.isScalarWithPredication(I, VF);
7785 };
7787 Range);
7788}
7789
7790VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
7792 switch (I->getOpcode()) {
7793 default:
7794 return nullptr;
7795 case Instruction::SDiv:
7796 case Instruction::UDiv:
7797 case Instruction::SRem:
7798 case Instruction::URem: {
7799 // If not provably safe, use a select to form a safe divisor before widening the
7800 // div/rem operation itself. Otherwise fall through to general handling below.
7801 if (CM.isPredicatedInst(I)) {
7803 VPValue *Mask = getBlockInMask(Builder.getInsertBlock());
7804 VPValue *One =
7805 Plan.getOrAddLiveIn(ConstantInt::get(I->getType(), 1u, false));
7806 auto *SafeRHS = Builder.createSelect(Mask, Ops[1], One, I->getDebugLoc());
7807 Ops[1] = SafeRHS;
7808 return new VPWidenRecipe(*I, Ops);
7809 }
7810 [[fallthrough]];
7811 }
7812 case Instruction::Add:
7813 case Instruction::And:
7814 case Instruction::AShr:
7815 case Instruction::FAdd:
7816 case Instruction::FCmp:
7817 case Instruction::FDiv:
7818 case Instruction::FMul:
7819 case Instruction::FNeg:
7820 case Instruction::FRem:
7821 case Instruction::FSub:
7822 case Instruction::ICmp:
7823 case Instruction::LShr:
7824 case Instruction::Mul:
7825 case Instruction::Or:
7826 case Instruction::Select:
7827 case Instruction::Shl:
7828 case Instruction::Sub:
7829 case Instruction::Xor:
7830 case Instruction::Freeze: {
7832 if (Instruction::isBinaryOp(I->getOpcode())) {
7833 // The legacy cost model uses SCEV to check if some of the operands are
7834 // constants. To match the legacy cost model's behavior, use SCEV to try
7835 // to replace operands with constants.
7836 ScalarEvolution &SE = *PSE.getSE();
7837 auto GetConstantViaSCEV = [this, &SE](VPValue *Op) {
7838 if (!Op->isLiveIn())
7839 return Op;
7840 Value *V = Op->getUnderlyingValue();
7841 if (isa<Constant>(V) || !SE.isSCEVable(V->getType()))
7842 return Op;
7843 auto *C = dyn_cast<SCEVConstant>(SE.getSCEV(V));
7844 if (!C)
7845 return Op;
7846 return Plan.getOrAddLiveIn(C->getValue());
7847 };
7848 // For Mul, the legacy cost model checks both operands.
7849 if (I->getOpcode() == Instruction::Mul)
7850 NewOps[0] = GetConstantViaSCEV(NewOps[0]);
7851 // For other binops, the legacy cost model only checks the second operand.
7852 NewOps[1] = GetConstantViaSCEV(NewOps[1]);
7853 }
7854 return new VPWidenRecipe(*I, NewOps);
7855 }
7856 case Instruction::ExtractValue: {
7858 Type *I32Ty = IntegerType::getInt32Ty(I->getContext());
7859 auto *EVI = cast<ExtractValueInst>(I);
7860 assert(EVI->getNumIndices() == 1 && "Expected one extractvalue index");
7861 unsigned Idx = EVI->getIndices()[0];
7862 NewOps.push_back(Plan.getOrAddLiveIn(ConstantInt::get(I32Ty, Idx, false)));
7863 return new VPWidenRecipe(*I, NewOps);
7864 }
7865 };
7866}
7867
7869VPRecipeBuilder::tryToWidenHistogram(const HistogramInfo *HI,
7871 // FIXME: Support other operations.
7872 unsigned Opcode = HI->Update->getOpcode();
7873 assert((Opcode == Instruction::Add || Opcode == Instruction::Sub) &&
7874 "Histogram update operation must be an Add or Sub");
7875
7877 // Bucket address.
7878 HGramOps.push_back(Operands[1]);
7879 // Increment value.
7880 HGramOps.push_back(getVPValueOrAddLiveIn(HI->Update->getOperand(1)));
7881
7882 // In case of predicated execution (due to tail-folding, or conditional
7883 // execution, or both), pass the relevant mask.
7884 if (Legal->isMaskRequired(HI->Store))
7885 HGramOps.push_back(getBlockInMask(Builder.getInsertBlock()));
7886
7887 return new VPHistogramRecipe(Opcode, HGramOps, HI->Store->getDebugLoc());
7888}
7889
7892 VFRange &Range) {
7894 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
7895 Range);
7896
7897 bool IsPredicated = CM.isPredicatedInst(I);
7898
7899 // Even if the instruction is not marked as uniform, there are certain
7900 // intrinsic calls that can be effectively treated as such, so we check for
7901 // them here. Conservatively, we only do this for scalable vectors, since
7902 // for fixed-width VFs we can always fall back on full scalarization.
7903 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) {
7904 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) {
7905 case Intrinsic::assume:
7906 case Intrinsic::lifetime_start:
7907 case Intrinsic::lifetime_end:
7908 // For scalable vectors if one of the operands is variant then we still
7909 // want to mark as uniform, which will generate one instruction for just
7910 // the first lane of the vector. We can't scalarize the call in the same
7911 // way as for fixed-width vectors because we don't know how many lanes
7912 // there are.
7913 //
7914 // The reasons for doing it this way for scalable vectors are:
7915 // 1. For the assume intrinsic generating the instruction for the first
7916 // lane is still be better than not generating any at all. For
7917 // example, the input may be a splat across all lanes.
7918 // 2. For the lifetime start/end intrinsics the pointer operand only
7919 // does anything useful when the input comes from a stack object,
7920 // which suggests it should always be uniform. For non-stack objects
7921 // the effect is to poison the object, which still allows us to
7922 // remove the call.
7923 IsUniform = true;
7924 break;
7925 default:
7926 break;
7927 }
7928 }
7929 VPValue *BlockInMask = nullptr;
7930 if (!IsPredicated) {
7931 // Finalize the recipe for Instr, first if it is not predicated.
7932 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
7933 } else {
7934 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
7935 // Instructions marked for predication are replicated and a mask operand is
7936 // added initially. Masked replicate recipes will later be placed under an
7937 // if-then construct to prevent side-effects. Generate recipes to compute
7938 // the block mask for this region.
7939 BlockInMask = getBlockInMask(Builder.getInsertBlock());
7940 }
7941
7942 // Note that there is some custom logic to mark some intrinsics as uniform
7943 // manually above for scalable vectors, which this assert needs to account for
7944 // as well.
7945 assert((Range.Start.isScalar() || !IsUniform || !IsPredicated ||
7946 (Range.Start.isScalable() && isa<IntrinsicInst>(I))) &&
7947 "Should not predicate a uniform recipe");
7948 auto *Recipe = new VPReplicateRecipe(I, Operands, IsUniform, BlockInMask,
7949 VPIRMetadata(*I, LVer));
7950 return Recipe;
7951}
7952
7953/// Find all possible partial reductions in the loop and track all of those that
7954/// are valid so recipes can be formed later.
7956 // Find all possible partial reductions.
7958 PartialReductionChains;
7959 for (const auto &[Phi, RdxDesc] : Legal->getReductionVars()) {
7960 getScaledReductions(Phi, RdxDesc.getLoopExitInstr(), Range,
7961 PartialReductionChains);
7962 }
7963
7964 // A partial reduction is invalid if any of its extends are used by
7965 // something that isn't another partial reduction. This is because the
7966 // extends are intended to be lowered along with the reduction itself.
7967
7968 // Build up a set of partial reduction ops for efficient use checking.
7969 SmallPtrSet<User *, 4> PartialReductionOps;
7970 for (const auto &[PartialRdx, _] : PartialReductionChains)
7971 PartialReductionOps.insert(PartialRdx.ExtendUser);
7972
7973 auto ExtendIsOnlyUsedByPartialReductions =
7974 [&PartialReductionOps](Instruction *Extend) {
7975 return all_of(Extend->users(), [&](const User *U) {
7976 return PartialReductionOps.contains(U);
7977 });
7978 };
7979
7980 // Check if each use of a chain's two extends is a partial reduction
7981 // and only add those that don't have non-partial reduction users.
7982 for (auto Pair : PartialReductionChains) {
7983 PartialReductionChain Chain = Pair.first;
7984 if (ExtendIsOnlyUsedByPartialReductions(Chain.ExtendA) &&
7985 (!Chain.ExtendB || ExtendIsOnlyUsedByPartialReductions(Chain.ExtendB)))
7986 ScaledReductionMap.try_emplace(Chain.Reduction, Pair.second);
7987 }
7988}
7989
7990bool VPRecipeBuilder::getScaledReductions(
7991 Instruction *PHI, Instruction *RdxExitInstr, VFRange &Range,
7992 SmallVectorImpl<std::pair<PartialReductionChain, unsigned>> &Chains) {
7993 if (!CM.TheLoop->contains(RdxExitInstr))
7994 return false;
7995
7996 auto *Update = dyn_cast<BinaryOperator>(RdxExitInstr);
7997 if (!Update)
7998 return false;
7999
8000 Value *Op = Update->getOperand(0);
8001 Value *PhiOp = Update->getOperand(1);
8002 if (Op == PHI)
8003 std::swap(Op, PhiOp);
8004
8005 // Try and get a scaled reduction from the first non-phi operand.
8006 // If one is found, we use the discovered reduction instruction in
8007 // place of the accumulator for costing.
8008 if (auto *OpInst = dyn_cast<Instruction>(Op)) {
8009 if (getScaledReductions(PHI, OpInst, Range, Chains)) {
8010 PHI = Chains.rbegin()->first.Reduction;
8011
8012 Op = Update->getOperand(0);
8013 PhiOp = Update->getOperand(1);
8014 if (Op == PHI)
8015 std::swap(Op, PhiOp);
8016 }
8017 }
8018 if (PhiOp != PHI)
8019 return false;
8020
8021 using namespace llvm::PatternMatch;
8022
8023 // If the update is a binary operator, check both of its operands to see if
8024 // they are extends. Otherwise, see if the update comes directly from an
8025 // extend.
8026 Instruction *Exts[2] = {nullptr};
8027 BinaryOperator *ExtendUser = dyn_cast<BinaryOperator>(Op);
8028 std::optional<unsigned> BinOpc;
8029 Type *ExtOpTypes[2] = {nullptr};
8030
8031 auto CollectExtInfo = [this, &Exts,
8032 &ExtOpTypes](SmallVectorImpl<Value *> &Ops) -> bool {
8033 unsigned I = 0;
8034 for (Value *OpI : Ops) {
8035 Value *ExtOp;
8036 if (!match(OpI, m_ZExtOrSExt(m_Value(ExtOp))))
8037 return false;
8038 Exts[I] = cast<Instruction>(OpI);
8039
8040 // TODO: We should be able to support live-ins.
8041 if (!CM.TheLoop->contains(Exts[I]))
8042 return false;
8043
8044 ExtOpTypes[I] = ExtOp->getType();
8045 I++;
8046 }
8047 return true;
8048 };
8049
8050 if (ExtendUser) {
8051 if (!ExtendUser->hasOneUse())
8052 return false;
8053
8054 // Use the side-effect of match to replace BinOp only if the pattern is
8055 // matched, we don't care at this point whether it actually matched.
8056 match(ExtendUser, m_Neg(m_BinOp(ExtendUser)));
8057
8058 SmallVector<Value *> Ops(ExtendUser->operands());
8059 if (!CollectExtInfo(Ops))
8060 return false;
8061
8062 BinOpc = std::make_optional(ExtendUser->getOpcode());
8063 } else if (match(Update, m_Add(m_Value(), m_Value()))) {
8064 // We already know the operands for Update are Op and PhiOp.
8066 if (!CollectExtInfo(Ops))
8067 return false;
8068
8069 ExtendUser = Update;
8070 BinOpc = std::nullopt;
8071 } else
8072 return false;
8073
8077 Exts[1] ? TTI::getPartialReductionExtendKind(Exts[1]) : TTI::PR_None;
8078 PartialReductionChain Chain(RdxExitInstr, Exts[0], Exts[1], ExtendUser);
8079
8080 TypeSize PHISize = PHI->getType()->getPrimitiveSizeInBits();
8081 TypeSize ASize = ExtOpTypes[0]->getPrimitiveSizeInBits();
8082 if (!PHISize.hasKnownScalarFactor(ASize))
8083 return false;
8084 unsigned TargetScaleFactor = PHISize.getKnownScalarFactor(ASize);
8085
8087 [&](ElementCount VF) {
8088 InstructionCost Cost = TTI->getPartialReductionCost(
8089 Update->getOpcode(), ExtOpTypes[0], ExtOpTypes[1],
8090 PHI->getType(), VF, OpAExtend, OpBExtend, BinOpc, CM.CostKind);
8091 return Cost.isValid();
8092 },
8093 Range)) {
8094 Chains.emplace_back(Chain, TargetScaleFactor);
8095 return true;
8096 }
8097
8098 return false;
8099}
8100
8102 VFRange &Range) {
8103 // First, check for specific widening recipes that deal with inductions, Phi
8104 // nodes, calls and memory operations.
8105 VPRecipeBase *Recipe;
8106 Instruction *Instr = R->getUnderlyingInstr();
8107 SmallVector<VPValue *, 4> Operands(R->operands());
8108 if (auto *PhiR = dyn_cast<VPPhi>(R)) {
8109 VPBasicBlock *Parent = PhiR->getParent();
8110 [[maybe_unused]] VPRegionBlock *LoopRegionOf =
8111 Parent->getEnclosingLoopRegion();
8112 assert(LoopRegionOf && LoopRegionOf->getEntry() == Parent &&
8113 "Non-header phis should have been handled during predication");
8114 auto *Phi = cast<PHINode>(R->getUnderlyingInstr());
8115 assert(Operands.size() == 2 && "Must have 2 operands for header phis");
8116 if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands, Range)))
8117 return Recipe;
8118
8119 VPHeaderPHIRecipe *PhiRecipe = nullptr;
8120 assert((Legal->isReductionVariable(Phi) ||
8121 Legal->isFixedOrderRecurrence(Phi)) &&
8122 "can only widen reductions and fixed-order recurrences here");
8123 VPValue *StartV = Operands[0];
8124 if (Legal->isReductionVariable(Phi)) {
8125 const RecurrenceDescriptor &RdxDesc = Legal->getRecurrenceDescriptor(Phi);
8126 assert(RdxDesc.getRecurrenceStartValue() ==
8127 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8128
8129 // If the PHI is used by a partial reduction, set the scale factor.
8130 unsigned ScaleFactor =
8131 getScalingForReduction(RdxDesc.getLoopExitInstr()).value_or(1);
8132 PhiRecipe = new VPReductionPHIRecipe(
8133 Phi, RdxDesc.getRecurrenceKind(), *StartV, CM.isInLoopReduction(Phi),
8134 CM.useOrderedReductions(RdxDesc), ScaleFactor);
8135 } else {
8136 // TODO: Currently fixed-order recurrences are modeled as chains of
8137 // first-order recurrences. If there are no users of the intermediate
8138 // recurrences in the chain, the fixed order recurrence should be modeled
8139 // directly, enabling more efficient codegen.
8140 PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV);
8141 }
8142 // Add backedge value.
8143 PhiRecipe->addOperand(Operands[1]);
8144 return PhiRecipe;
8145 }
8146 assert(!R->isPhi() && "only VPPhi nodes expected at this point");
8147
8148 if (isa<TruncInst>(Instr) && (Recipe = tryToOptimizeInductionTruncate(
8149 cast<TruncInst>(Instr), Operands, Range)))
8150 return Recipe;
8151
8152 // All widen recipes below deal only with VF > 1.
8154 [&](ElementCount VF) { return VF.isScalar(); }, Range))
8155 return nullptr;
8156
8157 if (auto *CI = dyn_cast<CallInst>(Instr))
8158 return tryToWidenCall(CI, Operands, Range);
8159
8160 if (StoreInst *SI = dyn_cast<StoreInst>(Instr))
8161 if (auto HistInfo = Legal->getHistogramInfo(SI))
8162 return tryToWidenHistogram(*HistInfo, Operands);
8163
8164 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8165 return tryToWidenMemory(Instr, Operands, Range);
8166
8167 if (std::optional<unsigned> ScaleFactor = getScalingForReduction(Instr))
8168 return tryToCreatePartialReduction(Instr, Operands, ScaleFactor.value());
8169
8170 if (!shouldWiden(Instr, Range))
8171 return nullptr;
8172
8173 if (auto *GEP = dyn_cast<GetElementPtrInst>(Instr))
8174 return new VPWidenGEPRecipe(GEP, Operands);
8175
8176 if (auto *SI = dyn_cast<SelectInst>(Instr)) {
8177 return new VPWidenSelectRecipe(*SI, Operands);
8178 }
8179
8180 if (auto *CI = dyn_cast<CastInst>(Instr)) {
8181 return new VPWidenCastRecipe(CI->getOpcode(), Operands[0], CI->getType(),
8182 *CI);
8183 }
8184
8185 return tryToWiden(Instr, Operands);
8186}
8187
8191 unsigned ScaleFactor) {
8192 assert(Operands.size() == 2 &&
8193 "Unexpected number of operands for partial reduction");
8194
8195 VPValue *BinOp = Operands[0];
8197 VPRecipeBase *BinOpRecipe = BinOp->getDefiningRecipe();
8198 if (isa<VPReductionPHIRecipe>(BinOpRecipe) ||
8199 isa<VPPartialReductionRecipe>(BinOpRecipe))
8200 std::swap(BinOp, Accumulator);
8201
8202 unsigned ReductionOpcode = Reduction->getOpcode();
8203 if (ReductionOpcode == Instruction::Sub) {
8204 auto *const Zero = ConstantInt::get(Reduction->getType(), 0);
8206 Ops.push_back(Plan.getOrAddLiveIn(Zero));
8207 Ops.push_back(BinOp);
8208 BinOp = new VPWidenRecipe(*Reduction, Ops);
8209 Builder.insert(BinOp->getDefiningRecipe());
8210 ReductionOpcode = Instruction::Add;
8211 }
8212
8213 VPValue *Cond = nullptr;
8214 if (CM.blockNeedsPredicationForAnyReason(Reduction->getParent())) {
8215 assert((ReductionOpcode == Instruction::Add ||
8216 ReductionOpcode == Instruction::Sub) &&
8217 "Expected an ADD or SUB operation for predicated partial "
8218 "reductions (because the neutral element in the mask is zero)!");
8219 Cond = getBlockInMask(Builder.getInsertBlock());
8220 VPValue *Zero =
8221 Plan.getOrAddLiveIn(ConstantInt::get(Reduction->getType(), 0));
8222 BinOp = Builder.createSelect(Cond, BinOp, Zero, Reduction->getDebugLoc());
8223 }
8224 return new VPPartialReductionRecipe(ReductionOpcode, Accumulator, BinOp, Cond,
8225 ScaleFactor, Reduction);
8226}
8227
8228void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
8229 ElementCount MaxVF) {
8230 if (ElementCount::isKnownGT(MinVF, MaxVF))
8231 return;
8232
8233 assert(OrigLoop->isInnermost() && "Inner loop expected.");
8234
8235 const LoopAccessInfo *LAI = Legal->getLAI();
8237 OrigLoop, LI, DT, PSE.getSE());
8238 if (!LAI->getRuntimePointerChecking()->getChecks().empty() &&
8240 // Only use noalias metadata when using memory checks guaranteeing no
8241 // overlap across all iterations.
8242 LVer.prepareNoAliasMetadata();
8243 }
8244
8245 // Create initial base VPlan0, to serve as common starting point for all
8246 // candidates built later for specific VF ranges.
8247 auto VPlan0 = VPlanTransforms::buildVPlan0(
8248 OrigLoop, *LI, Legal->getWidestInductionType(),
8249 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()), PSE);
8250
8251 auto MaxVFTimes2 = MaxVF * 2;
8252 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFTimes2);) {
8253 VFRange SubRange = {VF, MaxVFTimes2};
8254 if (auto Plan = tryToBuildVPlanWithVPRecipes(
8255 std::unique_ptr<VPlan>(VPlan0->duplicate()), SubRange, &LVer)) {
8256 bool HasScalarVF = Plan->hasScalarVFOnly();
8257 // Now optimize the initial VPlan.
8258 if (!HasScalarVF)
8260 *Plan, CM.getMinimalBitwidths());
8262 // TODO: try to put it close to addActiveLaneMask().
8263 if (CM.foldTailWithEVL() && !HasScalarVF)
8265 *Plan, CM.getMaxSafeElements());
8266 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
8267 VPlans.push_back(std::move(Plan));
8268 }
8269 VF = SubRange.End;
8270 }
8271}
8272
8273/// Create and return a ResumePhi for \p WideIV, unless it is truncated. If the
8274/// induction recipe is not canonical, creates a VPDerivedIVRecipe to compute
8275/// the end value of the induction.
8277 VPWidenInductionRecipe *WideIV, VPBuilder &VectorPHBuilder,
8278 VPBuilder &ScalarPHBuilder, VPTypeAnalysis &TypeInfo, VPValue *VectorTC) {
8279 auto *WideIntOrFp = dyn_cast<VPWidenIntOrFpInductionRecipe>(WideIV);
8280 // Truncated wide inductions resume from the last lane of their vector value
8281 // in the last vector iteration which is handled elsewhere.
8282 if (WideIntOrFp && WideIntOrFp->getTruncInst())
8283 return nullptr;
8284
8285 VPValue *Start = WideIV->getStartValue();
8286 VPValue *Step = WideIV->getStepValue();
8288 VPValue *EndValue = VectorTC;
8289 if (!WideIntOrFp || !WideIntOrFp->isCanonical()) {
8290 EndValue = VectorPHBuilder.createDerivedIV(
8291 ID.getKind(), dyn_cast_or_null<FPMathOperator>(ID.getInductionBinOp()),
8292 Start, VectorTC, Step);
8293 }
8294
8295 // EndValue is derived from the vector trip count (which has the same type as
8296 // the widest induction) and thus may be wider than the induction here.
8297 Type *ScalarTypeOfWideIV = TypeInfo.inferScalarType(WideIV);
8298 if (ScalarTypeOfWideIV != TypeInfo.inferScalarType(EndValue)) {
8299 EndValue = VectorPHBuilder.createScalarCast(Instruction::Trunc, EndValue,
8300 ScalarTypeOfWideIV,
8301 WideIV->getDebugLoc());
8302 }
8303
8304 auto *ResumePhiRecipe = ScalarPHBuilder.createScalarPhi(
8305 {EndValue, Start}, WideIV->getDebugLoc(), "bc.resume.val");
8306 return ResumePhiRecipe;
8307}
8308
8309/// Create resume phis in the scalar preheader for first-order recurrences,
8310/// reductions and inductions, and update the VPIRInstructions wrapping the
8311/// original phis in the scalar header. End values for inductions are added to
8312/// \p IVEndValues.
8313static void addScalarResumePhis(VPRecipeBuilder &Builder, VPlan &Plan,
8314 DenseMap<VPValue *, VPValue *> &IVEndValues) {
8315 VPTypeAnalysis TypeInfo(Plan);
8316 auto *ScalarPH = Plan.getScalarPreheader();
8317 auto *MiddleVPBB = cast<VPBasicBlock>(ScalarPH->getPredecessors()[0]);
8318 VPRegionBlock *VectorRegion = Plan.getVectorLoopRegion();
8319 VPBuilder VectorPHBuilder(
8320 cast<VPBasicBlock>(VectorRegion->getSinglePredecessor()));
8321 VPBuilder MiddleBuilder(MiddleVPBB, MiddleVPBB->getFirstNonPhi());
8322 VPBuilder ScalarPHBuilder(ScalarPH);
8323 for (VPRecipeBase &ScalarPhiR : Plan.getScalarHeader()->phis()) {
8324 auto *ScalarPhiIRI = cast<VPIRPhi>(&ScalarPhiR);
8325
8326 // TODO: Extract final value from induction recipe initially, optimize to
8327 // pre-computed end value together in optimizeInductionExitUsers.
8328 auto *VectorPhiR =
8329 cast<VPHeaderPHIRecipe>(Builder.getRecipe(&ScalarPhiIRI->getIRPhi()));
8330 if (auto *WideIVR = dyn_cast<VPWidenInductionRecipe>(VectorPhiR)) {
8332 WideIVR, VectorPHBuilder, ScalarPHBuilder, TypeInfo,
8333 &Plan.getVectorTripCount())) {
8334 assert(isa<VPPhi>(ResumePhi) && "Expected a phi");
8335 IVEndValues[WideIVR] = ResumePhi->getOperand(0);
8336 ScalarPhiIRI->addOperand(ResumePhi);
8337 continue;
8338 }
8339 // TODO: Also handle truncated inductions here. Computing end-values
8340 // separately should be done as VPlan-to-VPlan optimization, after
8341 // legalizing all resume values to use the last lane from the loop.
8342 assert(cast<VPWidenIntOrFpInductionRecipe>(VectorPhiR)->getTruncInst() &&
8343 "should only skip truncated wide inductions");
8344 continue;
8345 }
8346
8347 // The backedge value provides the value to resume coming out of a loop,
8348 // which for FORs is a vector whose last element needs to be extracted. The
8349 // start value provides the value if the loop is bypassed.
8350 bool IsFOR = isa<VPFirstOrderRecurrencePHIRecipe>(VectorPhiR);
8351 auto *ResumeFromVectorLoop = VectorPhiR->getBackedgeValue();
8352 assert(VectorRegion->getSingleSuccessor() == Plan.getMiddleBlock() &&
8353 "Cannot handle loops with uncountable early exits");
8354 if (IsFOR)
8355 ResumeFromVectorLoop = MiddleBuilder.createNaryOp(
8356 VPInstruction::ExtractLastElement, {ResumeFromVectorLoop}, {},
8357 "vector.recur.extract");
8358 StringRef Name = IsFOR ? "scalar.recur.init" : "bc.merge.rdx";
8359 auto *ResumePhiR = ScalarPHBuilder.createScalarPhi(
8360 {ResumeFromVectorLoop, VectorPhiR->getStartValue()}, {}, Name);
8361 ScalarPhiIRI->addOperand(ResumePhiR);
8362 }
8363}
8364
8365/// Handle users in the exit block for first order reductions in the original
8366/// exit block. The penultimate value of recurrences is fed to their LCSSA phi
8367/// users in the original exit block using the VPIRInstruction wrapping to the
8368/// LCSSA phi.
8370 VPRegionBlock *VectorRegion = Plan.getVectorLoopRegion();
8371 auto *ScalarPHVPBB = Plan.getScalarPreheader();
8372 auto *MiddleVPBB = Plan.getMiddleBlock();
8373 VPBuilder ScalarPHBuilder(ScalarPHVPBB);
8374 VPBuilder MiddleBuilder(MiddleVPBB, MiddleVPBB->getFirstNonPhi());
8375
8376 auto IsScalableOne = [](ElementCount VF) -> bool {
8377 return VF == ElementCount::getScalable(1);
8378 };
8379
8380 for (auto &HeaderPhi : VectorRegion->getEntryBasicBlock()->phis()) {
8381 auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&HeaderPhi);
8382 if (!FOR)
8383 continue;
8384
8385 assert(VectorRegion->getSingleSuccessor() == Plan.getMiddleBlock() &&
8386 "Cannot handle loops with uncountable early exits");
8387
8388 // This is the second phase of vectorizing first-order recurrences, creating
8389 // extract for users outside the loop. An overview of the transformation is
8390 // described below. Suppose we have the following loop with some use after
8391 // the loop of the last a[i-1],
8392 //
8393 // for (int i = 0; i < n; ++i) {
8394 // t = a[i - 1];
8395 // b[i] = a[i] - t;
8396 // }
8397 // use t;
8398 //
8399 // There is a first-order recurrence on "a". For this loop, the shorthand
8400 // scalar IR looks like:
8401 //
8402 // scalar.ph:
8403 // s.init = a[-1]
8404 // br scalar.body
8405 //
8406 // scalar.body:
8407 // i = phi [0, scalar.ph], [i+1, scalar.body]
8408 // s1 = phi [s.init, scalar.ph], [s2, scalar.body]
8409 // s2 = a[i]
8410 // b[i] = s2 - s1
8411 // br cond, scalar.body, exit.block
8412 //
8413 // exit.block:
8414 // use = lcssa.phi [s1, scalar.body]
8415 //
8416 // In this example, s1 is a recurrence because it's value depends on the
8417 // previous iteration. In the first phase of vectorization, we created a
8418 // VPFirstOrderRecurrencePHIRecipe v1 for s1. Now we create the extracts
8419 // for users in the scalar preheader and exit block.
8420 //
8421 // vector.ph:
8422 // v_init = vector(..., ..., ..., a[-1])
8423 // br vector.body
8424 //
8425 // vector.body
8426 // i = phi [0, vector.ph], [i+4, vector.body]
8427 // v1 = phi [v_init, vector.ph], [v2, vector.body]
8428 // v2 = a[i, i+1, i+2, i+3]
8429 // b[i] = v2 - v1
8430 // // Next, third phase will introduce v1' = splice(v1(3), v2(0, 1, 2))
8431 // b[i, i+1, i+2, i+3] = v2 - v1
8432 // br cond, vector.body, middle.block
8433 //
8434 // middle.block:
8435 // vector.recur.extract.for.phi = v2(2)
8436 // vector.recur.extract = v2(3)
8437 // br cond, scalar.ph, exit.block
8438 //
8439 // scalar.ph:
8440 // scalar.recur.init = phi [vector.recur.extract, middle.block],
8441 // [s.init, otherwise]
8442 // br scalar.body
8443 //
8444 // scalar.body:
8445 // i = phi [0, scalar.ph], [i+1, scalar.body]
8446 // s1 = phi [scalar.recur.init, scalar.ph], [s2, scalar.body]
8447 // s2 = a[i]
8448 // b[i] = s2 - s1
8449 // br cond, scalar.body, exit.block
8450 //
8451 // exit.block:
8452 // lo = lcssa.phi [s1, scalar.body],
8453 // [vector.recur.extract.for.phi, middle.block]
8454 //
8455 // Now update VPIRInstructions modeling LCSSA phis in the exit block.
8456 // Extract the penultimate value of the recurrence and use it as operand for
8457 // the VPIRInstruction modeling the phi.
8458 for (VPUser *U : FOR->users()) {
8459 using namespace llvm::VPlanPatternMatch;
8460 if (!match(U, m_ExtractLastElement(m_Specific(FOR))))
8461 continue;
8462 // For VF vscale x 1, if vscale = 1, we are unable to extract the
8463 // penultimate value of the recurrence. Instead we rely on the existing
8464 // extract of the last element from the result of
8465 // VPInstruction::FirstOrderRecurrenceSplice.
8466 // TODO: Consider vscale_range info and UF.
8468 Range))
8469 return;
8470 VPValue *PenultimateElement = MiddleBuilder.createNaryOp(
8471 VPInstruction::ExtractPenultimateElement, {FOR->getBackedgeValue()},
8472 {}, "vector.recur.extract.for.phi");
8473 cast<VPInstruction>(U)->replaceAllUsesWith(PenultimateElement);
8474 }
8475 }
8476}
8477
8478VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(
8479 VPlanPtr Plan, VFRange &Range, LoopVersioning *LVer) {
8480
8481 using namespace llvm::VPlanPatternMatch;
8482 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
8483
8484 // ---------------------------------------------------------------------------
8485 // Build initial VPlan: Scan the body of the loop in a topological order to
8486 // visit each basic block after having visited its predecessor basic blocks.
8487 // ---------------------------------------------------------------------------
8488
8489 bool RequiresScalarEpilogueCheck =
8491 [this](ElementCount VF) {
8492 return !CM.requiresScalarEpilogue(VF.isVector());
8493 },
8494 Range);
8495 VPlanTransforms::handleEarlyExits(*Plan, Legal->hasUncountableEarlyExit());
8496 VPlanTransforms::addMiddleCheck(*Plan, RequiresScalarEpilogueCheck,
8497 CM.foldTailByMasking());
8498
8500
8501 // Don't use getDecisionAndClampRange here, because we don't know the UF
8502 // so this function is better to be conservative, rather than to split
8503 // it up into different VPlans.
8504 // TODO: Consider using getDecisionAndClampRange here to split up VPlans.
8505 bool IVUpdateMayOverflow = false;
8506 for (ElementCount VF : Range)
8507 IVUpdateMayOverflow |= !isIndvarOverflowCheckKnownFalse(&CM, VF);
8508
8509 TailFoldingStyle Style = CM.getTailFoldingStyle(IVUpdateMayOverflow);
8510 // Use NUW for the induction increment if we proved that it won't overflow in
8511 // the vector loop or when not folding the tail. In the later case, we know
8512 // that the canonical induction increment will not overflow as the vector trip
8513 // count is >= increment and a multiple of the increment.
8514 bool HasNUW = !IVUpdateMayOverflow || Style == TailFoldingStyle::None;
8515 if (!HasNUW) {
8516 auto *IVInc = Plan->getVectorLoopRegion()
8517 ->getExitingBasicBlock()
8518 ->getTerminator()
8519 ->getOperand(0);
8520 assert(match(IVInc, m_VPInstruction<Instruction::Add>(
8521 m_Specific(Plan->getCanonicalIV()), m_VPValue())) &&
8522 "Did not find the canonical IV increment");
8523 cast<VPRecipeWithIRFlags>(IVInc)->dropPoisonGeneratingFlags();
8524 }
8525
8526 // ---------------------------------------------------------------------------
8527 // Pre-construction: record ingredients whose recipes we'll need to further
8528 // process after constructing the initial VPlan.
8529 // ---------------------------------------------------------------------------
8530
8531 // For each interleave group which is relevant for this (possibly trimmed)
8532 // Range, add it to the set of groups to be later applied to the VPlan and add
8533 // placeholders for its members' Recipes which we'll be replacing with a
8534 // single VPInterleaveRecipe.
8535 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
8536 auto ApplyIG = [IG, this](ElementCount VF) -> bool {
8537 bool Result = (VF.isVector() && // Query is illegal for VF == 1
8538 CM.getWideningDecision(IG->getInsertPos(), VF) ==
8540 // For scalable vectors, the interleave factors must be <= 8 since we
8541 // require the (de)interleaveN intrinsics instead of shufflevectors.
8542 assert((!Result || !VF.isScalable() || IG->getFactor() <= 8) &&
8543 "Unsupported interleave factor for scalable vectors");
8544 return Result;
8545 };
8546 if (!getDecisionAndClampRange(ApplyIG, Range))
8547 continue;
8548 InterleaveGroups.insert(IG);
8549 }
8550
8551 // ---------------------------------------------------------------------------
8552 // Predicate and linearize the top-level loop region.
8553 // ---------------------------------------------------------------------------
8554 auto BlockMaskCache = VPlanTransforms::introduceMasksAndLinearize(
8555 *Plan, CM.foldTailByMasking());
8556
8557 // ---------------------------------------------------------------------------
8558 // Construct wide recipes and apply predication for original scalar
8559 // VPInstructions in the loop.
8560 // ---------------------------------------------------------------------------
8561 VPRecipeBuilder RecipeBuilder(*Plan, OrigLoop, TLI, &TTI, Legal, CM, PSE,
8562 Builder, BlockMaskCache, LVer);
8563 RecipeBuilder.collectScaledReductions(Range);
8564
8565 // Scan the body of the loop in a topological order to visit each basic block
8566 // after having visited its predecessor basic blocks.
8567 VPRegionBlock *LoopRegion = Plan->getVectorLoopRegion();
8568 VPBasicBlock *HeaderVPBB = LoopRegion->getEntryBasicBlock();
8569 ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>> RPOT(
8570 HeaderVPBB);
8571
8572 auto *MiddleVPBB = Plan->getMiddleBlock();
8573 VPBasicBlock::iterator MBIP = MiddleVPBB->getFirstNonPhi();
8574 // Mapping from VPValues in the initial plan to their widened VPValues. Needed
8575 // temporarily to update created block masks.
8576 DenseMap<VPValue *, VPValue *> Old2New;
8577 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) {
8578 // Convert input VPInstructions to widened recipes.
8579 for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
8580 auto *SingleDef = cast<VPSingleDefRecipe>(&R);
8581 auto *UnderlyingValue = SingleDef->getUnderlyingValue();
8582 // Skip recipes that do not need transforming, including canonical IV,
8583 // wide canonical IV and VPInstructions without underlying values. The
8584 // latter are added above for masking.
8585 // FIXME: Migrate code relying on the underlying instruction from VPlan0
8586 // to construct recipes below to not use the underlying instruction.
8588 &R) ||
8589 (isa<VPInstruction>(&R) && !UnderlyingValue))
8590 continue;
8591
8592 // FIXME: VPlan0, which models a copy of the original scalar loop, should
8593 // not use VPWidenPHIRecipe to model the phis.
8595 UnderlyingValue && "unsupported recipe");
8596
8597 // TODO: Gradually replace uses of underlying instruction by analyses on
8598 // VPlan.
8599 Instruction *Instr = cast<Instruction>(UnderlyingValue);
8600 Builder.setInsertPoint(SingleDef);
8601
8602 // The stores with invariant address inside the loop will be deleted, and
8603 // in the exit block, a uniform store recipe will be created for the final
8604 // invariant store of the reduction.
8605 StoreInst *SI;
8606 if ((SI = dyn_cast<StoreInst>(Instr)) &&
8607 Legal->isInvariantAddressOfReduction(SI->getPointerOperand())) {
8608 // Only create recipe for the final invariant store of the reduction.
8609 if (Legal->isInvariantStoreOfReduction(SI)) {
8610 auto *Recipe =
8611 new VPReplicateRecipe(SI, R.operands(), true /* IsUniform */,
8612 nullptr /*Mask*/, VPIRMetadata(*SI, LVer));
8613 Recipe->insertBefore(*MiddleVPBB, MBIP);
8614 }
8615 R.eraseFromParent();
8616 continue;
8617 }
8618
8619 VPRecipeBase *Recipe =
8620 RecipeBuilder.tryToCreateWidenRecipe(SingleDef, Range);
8621 if (!Recipe)
8622 Recipe = RecipeBuilder.handleReplication(Instr, R.operands(), Range);
8623
8624 RecipeBuilder.setRecipe(Instr, Recipe);
8625 if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) && isa<TruncInst>(Instr)) {
8626 // Optimized a truncate to VPWidenIntOrFpInductionRecipe. It needs to be
8627 // moved to the phi section in the header.
8628 Recipe->insertBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi());
8629 } else {
8630 Builder.insert(Recipe);
8631 }
8632 if (Recipe->getNumDefinedValues() == 1) {
8633 SingleDef->replaceAllUsesWith(Recipe->getVPSingleValue());
8634 Old2New[SingleDef] = Recipe->getVPSingleValue();
8635 } else {
8636 assert(Recipe->getNumDefinedValues() == 0 &&
8637 "Unexpected multidef recipe");
8638 R.eraseFromParent();
8639 }
8640 }
8641 }
8642
8643 // replaceAllUsesWith above may invalidate the block masks. Update them here.
8644 // TODO: Include the masks as operands in the predicated VPlan directly
8645 // to remove the need to keep a map of masks beyond the predication
8646 // transform.
8647 RecipeBuilder.updateBlockMaskCache(Old2New);
8648 for (VPValue *Old : Old2New.keys())
8649 Old->getDefiningRecipe()->eraseFromParent();
8650
8651 assert(isa<VPRegionBlock>(Plan->getVectorLoopRegion()) &&
8652 !Plan->getVectorLoopRegion()->getEntryBasicBlock()->empty() &&
8653 "entry block must be set to a VPRegionBlock having a non-empty entry "
8654 "VPBasicBlock");
8655
8656 // Update wide induction increments to use the same step as the corresponding
8657 // wide induction. This enables detecting induction increments directly in
8658 // VPlan and removes redundant splats.
8659 for (const auto &[Phi, ID] : Legal->getInductionVars()) {
8660 auto *IVInc = cast<Instruction>(
8661 Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()));
8662 if (IVInc->getOperand(0) != Phi || IVInc->getOpcode() != Instruction::Add)
8663 continue;
8664 VPWidenInductionRecipe *WideIV =
8665 cast<VPWidenInductionRecipe>(RecipeBuilder.getRecipe(Phi));
8666 VPRecipeBase *R = RecipeBuilder.getRecipe(IVInc);
8667 R->setOperand(1, WideIV->getStepValue());
8668 }
8669
8671 DenseMap<VPValue *, VPValue *> IVEndValues;
8672 addScalarResumePhis(RecipeBuilder, *Plan, IVEndValues);
8673
8674 // ---------------------------------------------------------------------------
8675 // Transform initial VPlan: Apply previously taken decisions, in order, to
8676 // bring the VPlan to its final state.
8677 // ---------------------------------------------------------------------------
8678
8679 // Adjust the recipes for any inloop reductions.
8680 adjustRecipesForReductions(Plan, RecipeBuilder, Range.Start);
8681
8682 // Apply mandatory transformation to handle FP maxnum/minnum reduction with
8683 // NaNs if possible, bail out otherwise.
8685 *Plan))
8686 return nullptr;
8687
8688 // Transform recipes to abstract recipes if it is legal and beneficial and
8689 // clamp the range for better cost estimation.
8690 // TODO: Enable following transform when the EVL-version of extended-reduction
8691 // and mulacc-reduction are implemented.
8692 if (!CM.foldTailWithEVL()) {
8693 VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind);
8695 CostCtx, Range);
8696 }
8697
8698 for (ElementCount VF : Range)
8699 Plan->addVF(VF);
8700 Plan->setName("Initial VPlan");
8701
8702 // Interleave memory: for each Interleave Group we marked earlier as relevant
8703 // for this VPlan, replace the Recipes widening its memory instructions with a
8704 // single VPInterleaveRecipe at its insertion point.
8706 InterleaveGroups, RecipeBuilder,
8707 CM.isScalarEpilogueAllowed());
8708
8709 // Replace VPValues for known constant strides.
8711 Legal->getLAI()->getSymbolicStrides());
8712
8713 auto BlockNeedsPredication = [this](BasicBlock *BB) {
8714 return Legal->blockNeedsPredication(BB);
8715 };
8717 BlockNeedsPredication);
8718
8719 // Sink users of fixed-order recurrence past the recipe defining the previous
8720 // value and introduce FirstOrderRecurrenceSplice VPInstructions.
8722 *Plan, Builder))
8723 return nullptr;
8724
8725 if (useActiveLaneMask(Style)) {
8726 // TODO: Move checks to VPlanTransforms::addActiveLaneMask once
8727 // TailFoldingStyle is visible there.
8728 bool ForControlFlow = useActiveLaneMaskForControlFlow(Style);
8729 bool WithoutRuntimeCheck =
8731 VPlanTransforms::addActiveLaneMask(*Plan, ForControlFlow,
8732 WithoutRuntimeCheck);
8733 }
8734 VPlanTransforms::optimizeInductionExitUsers(*Plan, IVEndValues, *PSE.getSE());
8735
8736 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
8737 return Plan;
8738}
8739
8740VPlanPtr LoopVectorizationPlanner::tryToBuildVPlan(VFRange &Range) {
8741 // Outer loop handling: They may require CFG and instruction level
8742 // transformations before even evaluating whether vectorization is profitable.
8743 // Since we cannot modify the incoming IR, we need to build VPlan upfront in
8744 // the vectorization pipeline.
8745 assert(!OrigLoop->isInnermost());
8746 assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
8747
8748 auto Plan = VPlanTransforms::buildVPlan0(
8749 OrigLoop, *LI, Legal->getWidestInductionType(),
8750 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()), PSE);
8752 /*HasUncountableExit*/ false);
8753 VPlanTransforms::addMiddleCheck(*Plan, /*RequiresScalarEpilogue*/ true,
8754 /*TailFolded*/ false);
8755
8757
8758 for (ElementCount VF : Range)
8759 Plan->addVF(VF);
8760
8762 Plan,
8763 [this](PHINode *P) {
8764 return Legal->getIntOrFpInductionDescriptor(P);
8765 },
8766 *TLI))
8767 return nullptr;
8768
8769 // Collect mapping of IR header phis to header phi recipes, to be used in
8770 // addScalarResumePhis.
8771 DenseMap<VPBasicBlock *, VPValue *> BlockMaskCache;
8772 VPRecipeBuilder RecipeBuilder(*Plan, OrigLoop, TLI, &TTI, Legal, CM, PSE,
8773 Builder, BlockMaskCache, nullptr /*LVer*/);
8774 for (auto &R : Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
8776 continue;
8777 auto *HeaderR = cast<VPHeaderPHIRecipe>(&R);
8778 RecipeBuilder.setRecipe(HeaderR->getUnderlyingInstr(), HeaderR);
8779 }
8780 DenseMap<VPValue *, VPValue *> IVEndValues;
8781 // TODO: IVEndValues are not used yet in the native path, to optimize exit
8782 // values.
8783 addScalarResumePhis(RecipeBuilder, *Plan, IVEndValues);
8784
8785 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
8786 return Plan;
8787}
8788
8789// Adjust the recipes for reductions. For in-loop reductions the chain of
8790// instructions leading from the loop exit instr to the phi need to be converted
8791// to reductions, with one operand being vector and the other being the scalar
8792// reduction chain. For other reductions, a select is introduced between the phi
8793// and users outside the vector region when folding the tail.
8794//
8795// A ComputeReductionResult recipe is added to the middle block, also for
8796// in-loop reductions which compute their result in-loop, because generating
8797// the subsequent bc.merge.rdx phi is driven by ComputeReductionResult recipes.
8798//
8799// Adjust AnyOf reductions; replace the reduction phi for the selected value
8800// with a boolean reduction phi node to check if the condition is true in any
8801// iteration. The final value is selected by the final ComputeReductionResult.
8802void LoopVectorizationPlanner::adjustRecipesForReductions(
8803 VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, ElementCount MinVF) {
8804 using namespace VPlanPatternMatch;
8805 VPRegionBlock *VectorLoopRegion = Plan->getVectorLoopRegion();
8806 VPBasicBlock *Header = VectorLoopRegion->getEntryBasicBlock();
8807 VPBasicBlock *MiddleVPBB = Plan->getMiddleBlock();
8809
8810 for (VPRecipeBase &R : Header->phis()) {
8811 auto *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
8812 if (!PhiR || !PhiR->isInLoop() || (MinVF.isScalar() && !PhiR->isOrdered()))
8813 continue;
8814
8815 RecurKind Kind = PhiR->getRecurrenceKind();
8816 assert(
8819 "AnyOf and FindIV reductions are not allowed for in-loop reductions");
8820
8821 // Collect the chain of "link" recipes for the reduction starting at PhiR.
8822 SetVector<VPSingleDefRecipe *> Worklist;
8823 Worklist.insert(PhiR);
8824 for (unsigned I = 0; I != Worklist.size(); ++I) {
8825 VPSingleDefRecipe *Cur = Worklist[I];
8826 for (VPUser *U : Cur->users()) {
8827 auto *UserRecipe = cast<VPSingleDefRecipe>(U);
8828 if (!UserRecipe->getParent()->getEnclosingLoopRegion()) {
8829 assert((UserRecipe->getParent() == MiddleVPBB ||
8830 UserRecipe->getParent() == Plan->getScalarPreheader()) &&
8831 "U must be either in the loop region, the middle block or the "
8832 "scalar preheader.");
8833 continue;
8834 }
8835 Worklist.insert(UserRecipe);
8836 }
8837 }
8838
8839 // Visit operation "Links" along the reduction chain top-down starting from
8840 // the phi until LoopExitValue. We keep track of the previous item
8841 // (PreviousLink) to tell which of the two operands of a Link will remain
8842 // scalar and which will be reduced. For minmax by select(cmp), Link will be
8843 // the select instructions. Blend recipes of in-loop reduction phi's will
8844 // get folded to their non-phi operand, as the reduction recipe handles the
8845 // condition directly.
8846 VPSingleDefRecipe *PreviousLink = PhiR; // Aka Worklist[0].
8847 for (VPSingleDefRecipe *CurrentLink : drop_begin(Worklist)) {
8848 if (auto *Blend = dyn_cast<VPBlendRecipe>(CurrentLink)) {
8849 assert(Blend->getNumIncomingValues() == 2 &&
8850 "Blend must have 2 incoming values");
8851 if (Blend->getIncomingValue(0) == PhiR) {
8852 Blend->replaceAllUsesWith(Blend->getIncomingValue(1));
8853 } else {
8854 assert(Blend->getIncomingValue(1) == PhiR &&
8855 "PhiR must be an operand of the blend");
8856 Blend->replaceAllUsesWith(Blend->getIncomingValue(0));
8857 }
8858 continue;
8859 }
8860
8861 Instruction *CurrentLinkI = CurrentLink->getUnderlyingInstr();
8862
8863 // Index of the first operand which holds a non-mask vector operand.
8864 unsigned IndexOfFirstOperand;
8865 // Recognize a call to the llvm.fmuladd intrinsic.
8866 bool IsFMulAdd = (Kind == RecurKind::FMulAdd);
8867 VPValue *VecOp;
8868 VPBasicBlock *LinkVPBB = CurrentLink->getParent();
8869 if (IsFMulAdd) {
8870 assert(
8872 "Expected instruction to be a call to the llvm.fmuladd intrinsic");
8873 assert(((MinVF.isScalar() && isa<VPReplicateRecipe>(CurrentLink)) ||
8874 isa<VPWidenIntrinsicRecipe>(CurrentLink)) &&
8875 CurrentLink->getOperand(2) == PreviousLink &&
8876 "expected a call where the previous link is the added operand");
8877
8878 // If the instruction is a call to the llvm.fmuladd intrinsic then we
8879 // need to create an fmul recipe (multiplying the first two operands of
8880 // the fmuladd together) to use as the vector operand for the fadd
8881 // reduction.
8882 VPInstruction *FMulRecipe = new VPInstruction(
8883 Instruction::FMul,
8884 {CurrentLink->getOperand(0), CurrentLink->getOperand(1)},
8885 CurrentLinkI->getFastMathFlags());
8886 LinkVPBB->insert(FMulRecipe, CurrentLink->getIterator());
8887 VecOp = FMulRecipe;
8888 } else if (PhiR->isInLoop() && Kind == RecurKind::AddChainWithSubs &&
8889 CurrentLinkI->getOpcode() == Instruction::Sub) {
8890 Type *PhiTy = PhiR->getUnderlyingValue()->getType();
8891 auto *Zero = Plan->getOrAddLiveIn(ConstantInt::get(PhiTy, 0));
8892 VPWidenRecipe *Sub = new VPWidenRecipe(
8893 Instruction::Sub, {Zero, CurrentLink->getOperand(1)}, {},
8894 VPIRMetadata(), CurrentLinkI->getDebugLoc());
8895 Sub->setUnderlyingValue(CurrentLinkI);
8896 LinkVPBB->insert(Sub, CurrentLink->getIterator());
8897 VecOp = Sub;
8898 } else {
8900 if (isa<VPWidenRecipe>(CurrentLink)) {
8901 assert(isa<CmpInst>(CurrentLinkI) &&
8902 "need to have the compare of the select");
8903 continue;
8904 }
8905 assert(isa<VPWidenSelectRecipe>(CurrentLink) &&
8906 "must be a select recipe");
8907 IndexOfFirstOperand = 1;
8908 } else {
8909 assert((MinVF.isScalar() || isa<VPWidenRecipe>(CurrentLink)) &&
8910 "Expected to replace a VPWidenSC");
8911 IndexOfFirstOperand = 0;
8912 }
8913 // Note that for non-commutable operands (cmp-selects), the semantics of
8914 // the cmp-select are captured in the recurrence kind.
8915 unsigned VecOpId =
8916 CurrentLink->getOperand(IndexOfFirstOperand) == PreviousLink
8917 ? IndexOfFirstOperand + 1
8918 : IndexOfFirstOperand;
8919 VecOp = CurrentLink->getOperand(VecOpId);
8920 assert(VecOp != PreviousLink &&
8921 CurrentLink->getOperand(CurrentLink->getNumOperands() - 1 -
8922 (VecOpId - IndexOfFirstOperand)) ==
8923 PreviousLink &&
8924 "PreviousLink must be the operand other than VecOp");
8925 }
8926
8927 VPValue *CondOp = nullptr;
8928 if (CM.blockNeedsPredicationForAnyReason(CurrentLinkI->getParent()))
8929 CondOp = RecipeBuilder.getBlockInMask(CurrentLink->getParent());
8930
8931 // TODO: Retrieve FMFs from recipes directly.
8932 RecurrenceDescriptor RdxDesc = Legal->getRecurrenceDescriptor(
8933 cast<PHINode>(PhiR->getUnderlyingInstr()));
8934 // Non-FP RdxDescs will have all fast math flags set, so clear them.
8935 FastMathFlags FMFs = isa<FPMathOperator>(CurrentLinkI)
8936 ? RdxDesc.getFastMathFlags()
8937 : FastMathFlags();
8938 auto *RedRecipe = new VPReductionRecipe(
8939 Kind, FMFs, CurrentLinkI, PreviousLink, VecOp, CondOp,
8940 PhiR->isOrdered(), CurrentLinkI->getDebugLoc());
8941 // Append the recipe to the end of the VPBasicBlock because we need to
8942 // ensure that it comes after all of it's inputs, including CondOp.
8943 // Delete CurrentLink as it will be invalid if its operand is replaced
8944 // with a reduction defined at the bottom of the block in the next link.
8945 if (LinkVPBB->getNumSuccessors() == 0)
8946 RedRecipe->insertBefore(&*std::prev(std::prev(LinkVPBB->end())));
8947 else
8948 LinkVPBB->appendRecipe(RedRecipe);
8949
8950 CurrentLink->replaceAllUsesWith(RedRecipe);
8951 ToDelete.push_back(CurrentLink);
8952 PreviousLink = RedRecipe;
8953 }
8954 }
8955 VPBasicBlock *LatchVPBB = VectorLoopRegion->getExitingBasicBlock();
8956 Builder.setInsertPoint(&*std::prev(std::prev(LatchVPBB->end())));
8957 VPBasicBlock::iterator IP = MiddleVPBB->getFirstNonPhi();
8958 for (VPRecipeBase &R :
8959 Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
8960 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
8961 if (!PhiR)
8962 continue;
8963
8964 const RecurrenceDescriptor &RdxDesc = Legal->getRecurrenceDescriptor(
8966 Type *PhiTy = PhiR->getUnderlyingValue()->getType();
8967 // If tail is folded by masking, introduce selects between the phi
8968 // and the users outside the vector region of each reduction, at the
8969 // beginning of the dedicated latch block.
8970 auto *OrigExitingVPV = PhiR->getBackedgeValue();
8971 auto *NewExitingVPV = PhiR->getBackedgeValue();
8972 // Don't output selects for partial reductions because they have an output
8973 // with fewer lanes than the VF. So the operands of the select would have
8974 // different numbers of lanes. Partial reductions mask the input instead.
8975 if (!PhiR->isInLoop() && CM.foldTailByMasking() &&
8976 !isa<VPPartialReductionRecipe>(OrigExitingVPV->getDefiningRecipe())) {
8977 VPValue *Cond = RecipeBuilder.getBlockInMask(PhiR->getParent());
8978 std::optional<FastMathFlags> FMFs =
8979 PhiTy->isFloatingPointTy()
8980 ? std::make_optional(RdxDesc.getFastMathFlags())
8981 : std::nullopt;
8982 NewExitingVPV =
8983 Builder.createSelect(Cond, OrigExitingVPV, PhiR, {}, "", FMFs);
8984 OrigExitingVPV->replaceUsesWithIf(NewExitingVPV, [](VPUser &U, unsigned) {
8985 return isa<VPInstruction>(&U) &&
8986 (cast<VPInstruction>(&U)->getOpcode() ==
8988 cast<VPInstruction>(&U)->getOpcode() ==
8990 cast<VPInstruction>(&U)->getOpcode() ==
8992 });
8993 if (CM.usePredicatedReductionSelect())
8994 PhiR->setOperand(1, NewExitingVPV);
8995 }
8996
8997 // We want code in the middle block to appear to execute on the location of
8998 // the scalar loop's latch terminator because: (a) it is all compiler
8999 // generated, (b) these instructions are always executed after evaluating
9000 // the latch conditional branch, and (c) other passes may add new
9001 // predecessors which terminate on this line. This is the easiest way to
9002 // ensure we don't accidentally cause an extra step back into the loop while
9003 // debugging.
9004 DebugLoc ExitDL = OrigLoop->getLoopLatch()->getTerminator()->getDebugLoc();
9005
9006 // TODO: At the moment ComputeReductionResult also drives creation of the
9007 // bc.merge.rdx phi nodes, hence it needs to be created unconditionally here
9008 // even for in-loop reductions, until the reduction resume value handling is
9009 // also modeled in VPlan.
9010 VPInstruction *FinalReductionResult;
9011 VPBuilder::InsertPointGuard Guard(Builder);
9012 Builder.setInsertPoint(MiddleVPBB, IP);
9013 RecurKind RecurrenceKind = PhiR->getRecurrenceKind();
9015 VPValue *Start = PhiR->getStartValue();
9016 VPValue *Sentinel = Plan->getOrAddLiveIn(RdxDesc.getSentinelValue());
9017 FinalReductionResult =
9018 Builder.createNaryOp(VPInstruction::ComputeFindIVResult,
9019 {PhiR, Start, Sentinel, NewExitingVPV}, ExitDL);
9020 } else if (RecurrenceDescriptor::isAnyOfRecurrenceKind(RecurrenceKind)) {
9021 VPValue *Start = PhiR->getStartValue();
9022 FinalReductionResult =
9023 Builder.createNaryOp(VPInstruction::ComputeAnyOfResult,
9024 {PhiR, Start, NewExitingVPV}, ExitDL);
9025 } else {
9026 VPIRFlags Flags =
9028 ? VPIRFlags(RdxDesc.getFastMathFlags())
9029 : VPIRFlags();
9030 FinalReductionResult =
9031 Builder.createNaryOp(VPInstruction::ComputeReductionResult,
9032 {PhiR, NewExitingVPV}, Flags, ExitDL);
9033 }
9034 // If the vector reduction can be performed in a smaller type, we truncate
9035 // then extend the loop exit value to enable InstCombine to evaluate the
9036 // entire expression in the smaller type.
9037 if (MinVF.isVector() && PhiTy != RdxDesc.getRecurrenceType() &&
9039 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!");
9041 "Unexpected truncated min-max recurrence!");
9042 Type *RdxTy = RdxDesc.getRecurrenceType();
9043 auto *Trunc =
9044 new VPWidenCastRecipe(Instruction::Trunc, NewExitingVPV, RdxTy);
9045 Instruction::CastOps ExtendOpc =
9046 RdxDesc.isSigned() ? Instruction::SExt : Instruction::ZExt;
9047 auto *Extnd = new VPWidenCastRecipe(ExtendOpc, Trunc, PhiTy);
9048 Trunc->insertAfter(NewExitingVPV->getDefiningRecipe());
9049 Extnd->insertAfter(Trunc);
9050 if (PhiR->getOperand(1) == NewExitingVPV)
9051 PhiR->setOperand(1, Extnd->getVPSingleValue());
9052
9053 // Update ComputeReductionResult with the truncated exiting value and
9054 // extend its result.
9055 FinalReductionResult->setOperand(1, Trunc);
9056 FinalReductionResult =
9057 Builder.createScalarCast(ExtendOpc, FinalReductionResult, PhiTy, {});
9058 }
9059
9060 // Update all users outside the vector region. Also replace redundant
9061 // ExtractLastElement.
9062 for (auto *U : to_vector(OrigExitingVPV->users())) {
9063 auto *Parent = cast<VPRecipeBase>(U)->getParent();
9064 if (FinalReductionResult == U || Parent->getParent())
9065 continue;
9066 U->replaceUsesOfWith(OrigExitingVPV, FinalReductionResult);
9068 cast<VPInstruction>(U)->replaceAllUsesWith(FinalReductionResult);
9069 }
9070
9071 // Adjust AnyOf reductions; replace the reduction phi for the selected value
9072 // with a boolean reduction phi node to check if the condition is true in
9073 // any iteration. The final value is selected by the final
9074 // ComputeReductionResult.
9075 if (RecurrenceDescriptor::isAnyOfRecurrenceKind(RecurrenceKind)) {
9076 auto *Select = cast<VPRecipeBase>(*find_if(PhiR->users(), [](VPUser *U) {
9077 return isa<VPWidenSelectRecipe>(U) ||
9078 (isa<VPReplicateRecipe>(U) &&
9079 cast<VPReplicateRecipe>(U)->getUnderlyingInstr()->getOpcode() ==
9080 Instruction::Select);
9081 }));
9082 VPValue *Cmp = Select->getOperand(0);
9083 // If the compare is checking the reduction PHI node, adjust it to check
9084 // the start value.
9085 if (VPRecipeBase *CmpR = Cmp->getDefiningRecipe())
9086 CmpR->replaceUsesOfWith(PhiR, PhiR->getStartValue());
9087 Builder.setInsertPoint(Select);
9088
9089 // If the true value of the select is the reduction phi, the new value is
9090 // selected if the negated condition is true in any iteration.
9091 if (Select->getOperand(1) == PhiR)
9092 Cmp = Builder.createNot(Cmp);
9093 VPValue *Or = Builder.createOr(PhiR, Cmp);
9094 Select->getVPSingleValue()->replaceAllUsesWith(Or);
9095 // Delete Select now that it has invalid types.
9096 ToDelete.push_back(Select);
9097
9098 // Convert the reduction phi to operate on bools.
9099 PhiR->setOperand(0, Plan->getOrAddLiveIn(ConstantInt::getFalse(
9100 OrigLoop->getHeader()->getContext())));
9101 continue;
9102 }
9103
9105 RdxDesc.getRecurrenceKind())) {
9106 // Adjust the start value for FindFirstIV/FindLastIV recurrences to use
9107 // the sentinel value after generating the ResumePhi recipe, which uses
9108 // the original start value.
9109 PhiR->setOperand(0, Plan->getOrAddLiveIn(RdxDesc.getSentinelValue()));
9110 }
9111 RecurKind RK = RdxDesc.getRecurrenceKind();
9115 VPBuilder PHBuilder(Plan->getVectorPreheader());
9116 VPValue *Iden = Plan->getOrAddLiveIn(
9117 getRecurrenceIdentity(RK, PhiTy, RdxDesc.getFastMathFlags()));
9118 // If the PHI is used by a partial reduction, set the scale factor.
9119 unsigned ScaleFactor =
9120 RecipeBuilder.getScalingForReduction(RdxDesc.getLoopExitInstr())
9121 .value_or(1);
9122 Type *I32Ty = IntegerType::getInt32Ty(PhiTy->getContext());
9123 auto *ScaleFactorVPV =
9124 Plan->getOrAddLiveIn(ConstantInt::get(I32Ty, ScaleFactor));
9125 VPValue *StartV = PHBuilder.createNaryOp(
9127 {PhiR->getStartValue(), Iden, ScaleFactorVPV},
9128 PhiTy->isFloatingPointTy() ? RdxDesc.getFastMathFlags()
9129 : FastMathFlags());
9130 PhiR->setOperand(0, StartV);
9131 }
9132 }
9133 for (VPRecipeBase *R : ToDelete)
9134 R->eraseFromParent();
9135
9137}
9138
9139void LoopVectorizationPlanner::attachRuntimeChecks(
9140 VPlan &Plan, GeneratedRTChecks &RTChecks, bool HasBranchWeights) const {
9141 const auto &[SCEVCheckCond, SCEVCheckBlock] = RTChecks.getSCEVChecks();
9142 if (SCEVCheckBlock && SCEVCheckBlock->hasNPredecessors(0)) {
9143 assert((!CM.OptForSize ||
9144 CM.Hints->getForce() == LoopVectorizeHints::FK_Enabled) &&
9145 "Cannot SCEV check stride or overflow when optimizing for size");
9146 VPlanTransforms::attachCheckBlock(Plan, SCEVCheckCond, SCEVCheckBlock,
9147 HasBranchWeights);
9148 }
9149 const auto &[MemCheckCond, MemCheckBlock] = RTChecks.getMemRuntimeChecks();
9150 if (MemCheckBlock && MemCheckBlock->hasNPredecessors(0)) {
9151 // VPlan-native path does not do any analysis for runtime checks
9152 // currently.
9153 assert((!EnableVPlanNativePath || OrigLoop->isInnermost()) &&
9154 "Runtime checks are not supported for outer loops yet");
9155
9156 if (CM.OptForSize) {
9157 assert(
9158 CM.Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
9159 "Cannot emit memory checks when optimizing for size, unless forced "
9160 "to vectorize.");
9161 ORE->emit([&]() {
9162 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
9163 OrigLoop->getStartLoc(),
9164 OrigLoop->getHeader())
9165 << "Code-size may be reduced by not forcing "
9166 "vectorization, or by source-code modifications "
9167 "eliminating the need for runtime checks "
9168 "(e.g., adding 'restrict').";
9169 });
9170 }
9171 VPlanTransforms::attachCheckBlock(Plan, MemCheckCond, MemCheckBlock,
9172 HasBranchWeights);
9173 }
9174}
9175
9177 VPlan &Plan, ElementCount VF, unsigned UF,
9178 ElementCount MinProfitableTripCount) const {
9179 // vscale is not necessarily a power-of-2, which means we cannot guarantee
9180 // an overflow to zero when updating induction variables and so an
9181 // additional overflow check is required before entering the vector loop.
9182 bool IsIndvarOverflowCheckNeededForVF =
9183 VF.isScalable() && !TTI.isVScaleKnownToBeAPowerOfTwo() &&
9184 !isIndvarOverflowCheckKnownFalse(&CM, VF, UF) &&
9185 CM.getTailFoldingStyle() !=
9187 const uint32_t *BranchWeigths =
9188 hasBranchWeightMD(*OrigLoop->getLoopLatch()->getTerminator())
9190 : nullptr;
9192 Plan, VF, UF, MinProfitableTripCount,
9193 CM.requiresScalarEpilogue(VF.isVector()), CM.foldTailByMasking(),
9194 IsIndvarOverflowCheckNeededForVF, OrigLoop, BranchWeigths,
9195 OrigLoop->getLoopPredecessor()->getTerminator()->getDebugLoc(),
9196 *PSE.getSE());
9197}
9198
9200 assert(!State.Lane && "VPDerivedIVRecipe being replicated.");
9201
9202 // Fast-math-flags propagate from the original induction instruction.
9203 IRBuilder<>::FastMathFlagGuard FMFG(State.Builder);
9204 if (FPBinOp)
9205 State.Builder.setFastMathFlags(FPBinOp->getFastMathFlags());
9206
9207 Value *Step = State.get(getStepValue(), VPLane(0));
9208 Value *Index = State.get(getOperand(1), VPLane(0));
9209 Value *DerivedIV = emitTransformedIndex(
9210 State.Builder, Index, getStartValue()->getLiveInIRValue(), Step, Kind,
9212 DerivedIV->setName(Name);
9213 State.set(this, DerivedIV, VPLane(0));
9214}
9215
9216// Determine how to lower the scalar epilogue, which depends on 1) optimising
9217// for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
9218// predication, and 4) a TTI hook that analyses whether the loop is suitable
9219// for predication.
9224 // 1) OptSize takes precedence over all other options, i.e. if this is set,
9225 // don't look at hints or options, and don't request a scalar epilogue.
9226 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from
9227 // LoopAccessInfo (due to code dependency and not being able to reliably get
9228 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection
9229 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without
9230 // versioning when the vectorization is forced, unlike hasOptSize. So revert
9231 // back to the old way and vectorize with versioning when forced. See D81345.)
9232 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
9236
9237 // 2) If set, obey the directives
9238 if (PreferPredicateOverEpilogue.getNumOccurrences()) {
9246 };
9247 }
9248
9249 // 3) If set, obey the hints
9250 switch (Hints.getPredicate()) {
9255 };
9256
9257 // 4) if the TTI hook indicates this is profitable, request predication.
9258 TailFoldingInfo TFI(TLI, &LVL, IAI);
9259 if (TTI->preferPredicateOverEpilogue(&TFI))
9261
9263}
9264
9265// Process the loop in the VPlan-native vectorization path. This path builds
9266// VPlan upfront in the vectorization pipeline, which allows to apply
9267// VPlan-to-VPlan transformations from the very beginning without modifying the
9268// input LLVM IR.
9275 LoopVectorizationRequirements &Requirements) {
9276
9278 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
9279 return false;
9280 }
9281 assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
9282 Function *F = L->getHeader()->getParent();
9283 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
9284
9286 getScalarEpilogueLowering(F, L, Hints, PSI, BFI, TTI, TLI, *LVL, &IAI);
9287
9288 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
9289 &Hints, IAI, PSI, BFI);
9290 // Use the planner for outer loop vectorization.
9291 // TODO: CM is not used at this point inside the planner. Turn CM into an
9292 // optional argument if we don't need it in the future.
9293 LoopVectorizationPlanner LVP(L, LI, DT, TLI, *TTI, LVL, CM, IAI, PSE, Hints,
9294 ORE);
9295
9296 // Get user vectorization factor.
9297 ElementCount UserVF = Hints.getWidth();
9298
9300
9301 // Plan how to best vectorize, return the best VF and its cost.
9302 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
9303
9304 // If we are stress testing VPlan builds, do not attempt to generate vector
9305 // code. Masked vector code generation support will follow soon.
9306 // Also, do not attempt to vectorize if no vector code will be produced.
9308 return false;
9309
9310 VPlan &BestPlan = LVP.getPlanFor(VF.Width);
9311
9312 {
9313 GeneratedRTChecks Checks(PSE, DT, LI, TTI, F->getDataLayout(), CM.CostKind);
9314 InnerLoopVectorizer LB(L, PSE, LI, DT, TTI, AC, VF.Width, /*UF=*/1, &CM,
9315 BFI, PSI, Checks, BestPlan);
9316 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
9317 << L->getHeader()->getParent()->getName() << "\"\n");
9318 LVP.addMinimumIterationCheck(BestPlan, VF.Width, /*UF=*/1,
9320
9321 LVP.executePlan(VF.Width, /*UF=*/1, BestPlan, LB, DT, false);
9322 }
9323
9324 reportVectorization(ORE, L, VF, 1);
9325
9326 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
9327 return true;
9328}
9329
9330// Emit a remark if there are stores to floats that required a floating point
9331// extension. If the vectorized loop was generated with floating point there
9332// will be a performance penalty from the conversion overhead and the change in
9333// the vector width.
9336 for (BasicBlock *BB : L->getBlocks()) {
9337 for (Instruction &Inst : *BB) {
9338 if (auto *S = dyn_cast<StoreInst>(&Inst)) {
9339 if (S->getValueOperand()->getType()->isFloatTy())
9340 Worklist.push_back(S);
9341 }
9342 }
9343 }
9344
9345 // Traverse the floating point stores upwards searching, for floating point
9346 // conversions.
9349 while (!Worklist.empty()) {
9350 auto *I = Worklist.pop_back_val();
9351 if (!L->contains(I))
9352 continue;
9353 if (!Visited.insert(I).second)
9354 continue;
9355
9356 // Emit a remark if the floating point store required a floating
9357 // point conversion.
9358 // TODO: More work could be done to identify the root cause such as a
9359 // constant or a function return type and point the user to it.
9360 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
9361 ORE->emit([&]() {
9362 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
9363 I->getDebugLoc(), L->getHeader())
9364 << "floating point conversion changes vector width. "
9365 << "Mixed floating point precision requires an up/down "
9366 << "cast that will negatively impact performance.";
9367 });
9368
9369 for (Use &Op : I->operands())
9370 if (auto *OpI = dyn_cast<Instruction>(Op))
9371 Worklist.push_back(OpI);
9372 }
9373}
9374
9375/// For loops with uncountable early exits, find the cost of doing work when
9376/// exiting the loop early, such as calculating the final exit values of
9377/// variables used outside the loop.
9378/// TODO: This is currently overly pessimistic because the loop may not take
9379/// the early exit, but better to keep this conservative for now. In future,
9380/// it might be possible to relax this by using branch probabilities.
9382 VPlan &Plan, ElementCount VF) {
9383 InstructionCost Cost = 0;
9384 for (auto *ExitVPBB : Plan.getExitBlocks()) {
9385 for (auto *PredVPBB : ExitVPBB->getPredecessors()) {
9386 // If the predecessor is not the middle.block, then it must be the
9387 // vector.early.exit block, which may contain work to calculate the exit
9388 // values of variables used outside the loop.
9389 if (PredVPBB != Plan.getMiddleBlock()) {
9390 LLVM_DEBUG(dbgs() << "Calculating cost of work in exit block "
9391 << PredVPBB->getName() << ":\n");
9392 Cost += PredVPBB->cost(VF, CostCtx);
9393 }
9394 }
9395 }
9396 return Cost;
9397}
9398
9399/// This function determines whether or not it's still profitable to vectorize
9400/// the loop given the extra work we have to do outside of the loop:
9401/// 1. Perform the runtime checks before entering the loop to ensure it's safe
9402/// to vectorize.
9403/// 2. In the case of loops with uncountable early exits, we may have to do
9404/// extra work when exiting the loop early, such as calculating the final
9405/// exit values of variables used outside the loop.
9406static bool isOutsideLoopWorkProfitable(GeneratedRTChecks &Checks,
9407 VectorizationFactor &VF, Loop *L,
9409 VPCostContext &CostCtx, VPlan &Plan,
9411 std::optional<unsigned> VScale) {
9412 InstructionCost TotalCost = Checks.getCost();
9413 if (!TotalCost.isValid())
9414 return false;
9415
9416 // Add on the cost of any work required in the vector early exit block, if
9417 // one exists.
9418 TotalCost += calculateEarlyExitCost(CostCtx, Plan, VF.Width);
9419
9420 // When interleaving only scalar and vector cost will be equal, which in turn
9421 // would lead to a divide by 0. Fall back to hard threshold.
9422 if (VF.Width.isScalar()) {
9423 // TODO: Should we rename VectorizeMemoryCheckThreshold?
9424 if (TotalCost > VectorizeMemoryCheckThreshold) {
9425 LLVM_DEBUG(
9426 dbgs()
9427 << "LV: Interleaving only is not profitable due to runtime checks\n");
9428 return false;
9429 }
9430 return true;
9431 }
9432
9433 // The scalar cost should only be 0 when vectorizing with a user specified
9434 // VF/IC. In those cases, runtime checks should always be generated.
9435 uint64_t ScalarC = VF.ScalarCost.getValue();
9436 if (ScalarC == 0)
9437 return true;
9438
9439 // First, compute the minimum iteration count required so that the vector
9440 // loop outperforms the scalar loop.
9441 // The total cost of the scalar loop is
9442 // ScalarC * TC
9443 // where
9444 // * TC is the actual trip count of the loop.
9445 // * ScalarC is the cost of a single scalar iteration.
9446 //
9447 // The total cost of the vector loop is
9448 // RtC + VecC * (TC / VF) + EpiC
9449 // where
9450 // * RtC is the cost of the generated runtime checks plus the cost of
9451 // performing any additional work in the vector.early.exit block for loops
9452 // with uncountable early exits.
9453 // * VecC is the cost of a single vector iteration.
9454 // * TC is the actual trip count of the loop
9455 // * VF is the vectorization factor
9456 // * EpiCost is the cost of the generated epilogue, including the cost
9457 // of the remaining scalar operations.
9458 //
9459 // Vectorization is profitable once the total vector cost is less than the
9460 // total scalar cost:
9461 // RtC + VecC * (TC / VF) + EpiC < ScalarC * TC
9462 //
9463 // Now we can compute the minimum required trip count TC as
9464 // VF * (RtC + EpiC) / (ScalarC * VF - VecC) < TC
9465 //
9466 // For now we assume the epilogue cost EpiC = 0 for simplicity. Note that
9467 // the computations are performed on doubles, not integers and the result
9468 // is rounded up, hence we get an upper estimate of the TC.
9469 unsigned IntVF = estimateElementCount(VF.Width, VScale);
9470 uint64_t RtC = TotalCost.getValue();
9471 uint64_t Div = ScalarC * IntVF - VF.Cost.getValue();
9472 uint64_t MinTC1 = Div == 0 ? 0 : divideCeil(RtC * IntVF, Div);
9473
9474 // Second, compute a minimum iteration count so that the cost of the
9475 // runtime checks is only a fraction of the total scalar loop cost. This
9476 // adds a loop-dependent bound on the overhead incurred if the runtime
9477 // checks fail. In case the runtime checks fail, the cost is RtC + ScalarC
9478 // * TC. To bound the runtime check to be a fraction 1/X of the scalar
9479 // cost, compute
9480 // RtC < ScalarC * TC * (1 / X) ==> RtC * X / ScalarC < TC
9481 uint64_t MinTC2 = divideCeil(RtC * 10, ScalarC);
9482
9483 // Now pick the larger minimum. If it is not a multiple of VF and a scalar
9484 // epilogue is allowed, choose the next closest multiple of VF. This should
9485 // partly compensate for ignoring the epilogue cost.
9486 uint64_t MinTC = std::max(MinTC1, MinTC2);
9487 if (SEL == CM_ScalarEpilogueAllowed)
9488 MinTC = alignTo(MinTC, IntVF);
9490
9491 LLVM_DEBUG(
9492 dbgs() << "LV: Minimum required TC for runtime checks to be profitable:"
9493 << VF.MinProfitableTripCount << "\n");
9494
9495 // Skip vectorization if the expected trip count is less than the minimum
9496 // required trip count.
9497 if (auto ExpectedTC = getSmallBestKnownTC(PSE, L)) {
9498 if (ElementCount::isKnownLT(*ExpectedTC, VF.MinProfitableTripCount)) {
9499 LLVM_DEBUG(dbgs() << "LV: Vectorization is not beneficial: expected "
9500 "trip count < minimum profitable VF ("
9501 << *ExpectedTC << " < " << VF.MinProfitableTripCount
9502 << ")\n");
9503
9504 return false;
9505 }
9506 }
9507 return true;
9508}
9509
9511 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
9513 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
9515
9516/// Prepare \p MainPlan for vectorizing the main vector loop during epilogue
9517/// vectorization. Remove ResumePhis from \p MainPlan for inductions that
9518/// don't have a corresponding wide induction in \p EpiPlan.
9519static void preparePlanForMainVectorLoop(VPlan &MainPlan, VPlan &EpiPlan) {
9520 // Collect PHI nodes of widened phis in the VPlan for the epilogue. Those
9521 // will need their resume-values computed in the main vector loop. Others
9522 // can be removed from the main VPlan.
9523 SmallPtrSet<PHINode *, 2> EpiWidenedPhis;
9524 for (VPRecipeBase &R :
9527 continue;
9528 EpiWidenedPhis.insert(
9529 cast<PHINode>(R.getVPSingleValue()->getUnderlyingValue()));
9530 }
9531 for (VPRecipeBase &R :
9532 make_early_inc_range(MainPlan.getScalarHeader()->phis())) {
9533 auto *VPIRInst = cast<VPIRPhi>(&R);
9534 if (EpiWidenedPhis.contains(&VPIRInst->getIRPhi()))
9535 continue;
9536 // There is no corresponding wide induction in the epilogue plan that would
9537 // need a resume value. Remove the VPIRInst wrapping the scalar header phi
9538 // together with the corresponding ResumePhi. The resume values for the
9539 // scalar loop will be created during execution of EpiPlan.
9540 VPRecipeBase *ResumePhi = VPIRInst->getOperand(0)->getDefiningRecipe();
9541 VPIRInst->eraseFromParent();
9542 ResumePhi->eraseFromParent();
9543 }
9545
9546 using namespace VPlanPatternMatch;
9547 // When vectorizing the epilogue, FindFirstIV & FindLastIV reductions can
9548 // introduce multiple uses of undef/poison. If the reduction start value may
9549 // be undef or poison it needs to be frozen and the frozen start has to be
9550 // used when computing the reduction result. We also need to use the frozen
9551 // value in the resume phi generated by the main vector loop, as this is also
9552 // used to compute the reduction result after the epilogue vector loop.
9553 auto AddFreezeForFindLastIVReductions = [](VPlan &Plan,
9554 bool UpdateResumePhis) {
9555 VPBuilder Builder(Plan.getEntry());
9556 for (VPRecipeBase &R : *Plan.getMiddleBlock()) {
9557 auto *VPI = dyn_cast<VPInstruction>(&R);
9558 if (!VPI || VPI->getOpcode() != VPInstruction::ComputeFindIVResult)
9559 continue;
9560 VPValue *OrigStart = VPI->getOperand(1);
9562 continue;
9563 VPInstruction *Freeze =
9564 Builder.createNaryOp(Instruction::Freeze, {OrigStart}, {}, "fr");
9565 VPI->setOperand(1, Freeze);
9566 if (UpdateResumePhis)
9567 OrigStart->replaceUsesWithIf(Freeze, [Freeze](VPUser &U, unsigned) {
9568 return Freeze != &U && isa<VPPhi>(&U);
9569 });
9570 }
9571 };
9572 AddFreezeForFindLastIVReductions(MainPlan, true);
9573 AddFreezeForFindLastIVReductions(EpiPlan, false);
9574
9575 VPBasicBlock *MainScalarPH = MainPlan.getScalarPreheader();
9576 VPValue *VectorTC = &MainPlan.getVectorTripCount();
9577 // If there is a suitable resume value for the canonical induction in the
9578 // scalar (which will become vector) epilogue loop, use it and move it to the
9579 // beginning of the scalar preheader. Otherwise create it below.
9580 auto ResumePhiIter =
9581 find_if(MainScalarPH->phis(), [VectorTC](VPRecipeBase &R) {
9582 return match(&R, m_VPInstruction<Instruction::PHI>(m_Specific(VectorTC),
9583 m_ZeroInt()));
9584 });
9585 VPPhi *ResumePhi = nullptr;
9586 if (ResumePhiIter == MainScalarPH->phis().end()) {
9587 VPBuilder ScalarPHBuilder(MainScalarPH, MainScalarPH->begin());
9588 ResumePhi = ScalarPHBuilder.createScalarPhi(
9589 {VectorTC, MainPlan.getCanonicalIV()->getStartValue()}, {},
9590 "vec.epilog.resume.val");
9591 } else {
9592 ResumePhi = cast<VPPhi>(&*ResumePhiIter);
9593 if (MainScalarPH->begin() == MainScalarPH->end())
9594 ResumePhi->moveBefore(*MainScalarPH, MainScalarPH->end());
9595 else if (&*MainScalarPH->begin() != ResumePhi)
9596 ResumePhi->moveBefore(*MainScalarPH, MainScalarPH->begin());
9597 }
9598 // Add a user to to make sure the resume phi won't get removed.
9599 VPBuilder(MainScalarPH)
9601}
9602
9603/// Prepare \p Plan for vectorizing the epilogue loop. That is, re-use expanded
9604/// SCEVs from \p ExpandedSCEVs and set resume values for header recipes.
9605static void
9607 const SCEV2ValueTy &ExpandedSCEVs,
9609 VPRegionBlock *VectorLoop = Plan.getVectorLoopRegion();
9610 VPBasicBlock *Header = VectorLoop->getEntryBasicBlock();
9611 Header->setName("vec.epilog.vector.body");
9612
9614 // Ensure that the start values for all header phi recipes are updated before
9615 // vectorizing the epilogue loop.
9616 for (VPRecipeBase &R : Header->phis()) {
9617 if (auto *IV = dyn_cast<VPCanonicalIVPHIRecipe>(&R)) {
9618 // When vectorizing the epilogue loop, the canonical induction start
9619 // value needs to be changed from zero to the value after the main
9620 // vector loop. Find the resume value created during execution of the main
9621 // VPlan. It must be the first phi in the loop preheader.
9622 // FIXME: Improve modeling for canonical IV start values in the epilogue
9623 // loop.
9624 using namespace llvm::PatternMatch;
9625 PHINode *EPResumeVal = &*L->getLoopPreheader()->phis().begin();
9626 for (Value *Inc : EPResumeVal->incoming_values()) {
9627 if (match(Inc, m_SpecificInt(0)))
9628 continue;
9629 assert(!EPI.VectorTripCount &&
9630 "Must only have a single non-zero incoming value");
9631 EPI.VectorTripCount = Inc;
9632 }
9633 // If we didn't find a non-zero vector trip count, all incoming values
9634 // must be zero, which also means the vector trip count is zero. Pick the
9635 // first zero as vector trip count.
9636 // TODO: We should not choose VF * UF so the main vector loop is known to
9637 // be dead.
9638 if (!EPI.VectorTripCount) {
9639 assert(
9640 EPResumeVal->getNumIncomingValues() > 0 &&
9641 all_of(EPResumeVal->incoming_values(),
9642 [](Value *Inc) { return match(Inc, m_SpecificInt(0)); }) &&
9643 "all incoming values must be 0");
9644 EPI.VectorTripCount = EPResumeVal->getOperand(0);
9645 }
9646 VPValue *VPV = Plan.getOrAddLiveIn(EPResumeVal);
9647 assert(all_of(IV->users(),
9648 [](const VPUser *U) {
9649 return isa<VPScalarIVStepsRecipe>(U) ||
9650 isa<VPDerivedIVRecipe>(U) ||
9651 cast<VPRecipeBase>(U)->isScalarCast() ||
9652 cast<VPInstruction>(U)->getOpcode() ==
9653 Instruction::Add;
9654 }) &&
9655 "the canonical IV should only be used by its increment or "
9656 "ScalarIVSteps when resetting the start value");
9657 IV->setOperand(0, VPV);
9658 continue;
9659 }
9660
9661 Value *ResumeV = nullptr;
9662 // TODO: Move setting of resume values to prepareToExecute.
9663 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) {
9664 auto *RdxResult =
9665 cast<VPInstruction>(*find_if(ReductionPhi->users(), [](VPUser *U) {
9666 auto *VPI = dyn_cast<VPInstruction>(U);
9667 return VPI &&
9668 (VPI->getOpcode() == VPInstruction::ComputeAnyOfResult ||
9669 VPI->getOpcode() == VPInstruction::ComputeReductionResult ||
9670 VPI->getOpcode() == VPInstruction::ComputeFindIVResult);
9671 }));
9672 ResumeV = cast<PHINode>(ReductionPhi->getUnderlyingInstr())
9673 ->getIncomingValueForBlock(L->getLoopPreheader());
9674 RecurKind RK = ReductionPhi->getRecurrenceKind();
9676 Value *StartV = RdxResult->getOperand(1)->getLiveInIRValue();
9677 // VPReductionPHIRecipes for AnyOf reductions expect a boolean as
9678 // start value; compare the final value from the main vector loop
9679 // to the start value.
9680 BasicBlock *PBB = cast<Instruction>(ResumeV)->getParent();
9681 IRBuilder<> Builder(PBB, PBB->getFirstNonPHIIt());
9682 ResumeV = Builder.CreateICmpNE(ResumeV, StartV);
9684 Value *StartV = getStartValueFromReductionResult(RdxResult);
9685 ToFrozen[StartV] = cast<PHINode>(ResumeV)->getIncomingValueForBlock(
9687
9688 // VPReductionPHIRecipe for FindFirstIV/FindLastIV reductions requires
9689 // an adjustment to the resume value. The resume value is adjusted to
9690 // the sentinel value when the final value from the main vector loop
9691 // equals the start value. This ensures correctness when the start value
9692 // might not be less than the minimum value of a monotonically
9693 // increasing induction variable.
9694 BasicBlock *ResumeBB = cast<Instruction>(ResumeV)->getParent();
9695 IRBuilder<> Builder(ResumeBB, ResumeBB->getFirstNonPHIIt());
9696 Value *Cmp = Builder.CreateICmpEQ(ResumeV, ToFrozen[StartV]);
9697 Value *Sentinel = RdxResult->getOperand(2)->getLiveInIRValue();
9698 ResumeV = Builder.CreateSelect(Cmp, Sentinel, ResumeV);
9699 } else {
9700 VPValue *StartVal = Plan.getOrAddLiveIn(ResumeV);
9701 auto *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
9702 if (auto *VPI = dyn_cast<VPInstruction>(PhiR->getStartValue())) {
9703 assert(VPI->getOpcode() == VPInstruction::ReductionStartVector &&
9704 "unexpected start value");
9705 VPI->setOperand(0, StartVal);
9706 continue;
9707 }
9708 }
9709 } else {
9710 // Retrieve the induction resume values for wide inductions from
9711 // their original phi nodes in the scalar loop.
9712 PHINode *IndPhi = cast<VPWidenInductionRecipe>(&R)->getPHINode();
9713 // Hook up to the PHINode generated by a ResumePhi recipe of main
9714 // loop VPlan, which feeds the scalar loop.
9715 ResumeV = IndPhi->getIncomingValueForBlock(L->getLoopPreheader());
9716 }
9717 assert(ResumeV && "Must have a resume value");
9718 VPValue *StartVal = Plan.getOrAddLiveIn(ResumeV);
9719 cast<VPHeaderPHIRecipe>(&R)->setStartValue(StartVal);
9720 }
9721
9722 // For some VPValues in the epilogue plan we must re-use the generated IR
9723 // values from the main plan. Replace them with live-in VPValues.
9724 // TODO: This is a workaround needed for epilogue vectorization and it
9725 // should be removed once induction resume value creation is done
9726 // directly in VPlan.
9727 for (auto &R : make_early_inc_range(*Plan.getEntry())) {
9728 // Re-use frozen values from the main plan for Freeze VPInstructions in the
9729 // epilogue plan. This ensures all users use the same frozen value.
9730 auto *VPI = dyn_cast<VPInstruction>(&R);
9731 if (VPI && VPI->getOpcode() == Instruction::Freeze) {
9732 VPI->replaceAllUsesWith(Plan.getOrAddLiveIn(
9733 ToFrozen.lookup(VPI->getOperand(0)->getLiveInIRValue())));
9734 continue;
9735 }
9736
9737 // Re-use the trip count and steps expanded for the main loop, as
9738 // skeleton creation needs it as a value that dominates both the scalar
9739 // and vector epilogue loops
9740 auto *ExpandR = dyn_cast<VPExpandSCEVRecipe>(&R);
9741 if (!ExpandR)
9742 continue;
9743 VPValue *ExpandedVal =
9744 Plan.getOrAddLiveIn(ExpandedSCEVs.lookup(ExpandR->getSCEV()));
9745 ExpandR->replaceAllUsesWith(ExpandedVal);
9746 if (Plan.getTripCount() == ExpandR)
9747 Plan.resetTripCount(ExpandedVal);
9748 ExpandR->eraseFromParent();
9749 }
9750}
9751
9752// Generate bypass values from the additional bypass block. Note that when the
9753// vectorized epilogue is skipped due to iteration count check, then the
9754// resume value for the induction variable comes from the trip count of the
9755// main vector loop, passed as the second argument.
9757 PHINode *OrigPhi, const InductionDescriptor &II, IRBuilder<> &BypassBuilder,
9758 const SCEV2ValueTy &ExpandedSCEVs, Value *MainVectorTripCount,
9759 Instruction *OldInduction) {
9760 Value *Step = getExpandedStep(II, ExpandedSCEVs);
9761 // For the primary induction the additional bypass end value is known.
9762 // Otherwise it is computed.
9763 Value *EndValueFromAdditionalBypass = MainVectorTripCount;
9764 if (OrigPhi != OldInduction) {
9765 auto *BinOp = II.getInductionBinOp();
9766 // Fast-math-flags propagate from the original induction instruction.
9768 BypassBuilder.setFastMathFlags(BinOp->getFastMathFlags());
9769
9770 // Compute the end value for the additional bypass.
9771 EndValueFromAdditionalBypass =
9772 emitTransformedIndex(BypassBuilder, MainVectorTripCount,
9773 II.getStartValue(), Step, II.getKind(), BinOp);
9774 EndValueFromAdditionalBypass->setName("ind.end");
9775 }
9776 return EndValueFromAdditionalBypass;
9777}
9778
9780 VPlan &BestEpiPlan,
9782 const SCEV2ValueTy &ExpandedSCEVs,
9783 Value *MainVectorTripCount) {
9784 // Fix reduction resume values from the additional bypass block.
9785 BasicBlock *PH = L->getLoopPreheader();
9786 for (auto *Pred : predecessors(PH)) {
9787 for (PHINode &Phi : PH->phis()) {
9788 if (Phi.getBasicBlockIndex(Pred) != -1)
9789 continue;
9790 Phi.addIncoming(Phi.getIncomingValueForBlock(BypassBlock), Pred);
9791 }
9792 }
9793 auto *ScalarPH = cast<VPIRBasicBlock>(BestEpiPlan.getScalarPreheader());
9794 if (ScalarPH->hasPredecessors()) {
9795 // If ScalarPH has predecessors, we may need to update its reduction
9796 // resume values.
9797 for (const auto &[R, IRPhi] :
9798 zip(ScalarPH->phis(), ScalarPH->getIRBasicBlock()->phis())) {
9800 BypassBlock);
9801 }
9802 }
9803
9804 // Fix induction resume values from the additional bypass block.
9805 IRBuilder<> BypassBuilder(BypassBlock, BypassBlock->getFirstInsertionPt());
9806 for (const auto &[IVPhi, II] : LVL.getInductionVars()) {
9807 auto *Inc = cast<PHINode>(IVPhi->getIncomingValueForBlock(PH));
9809 IVPhi, II, BypassBuilder, ExpandedSCEVs, MainVectorTripCount,
9810 LVL.getPrimaryInduction());
9811 // TODO: Directly add as extra operand to the VPResumePHI recipe.
9812 Inc->setIncomingValueForBlock(BypassBlock, V);
9813 }
9814}
9815
9817 assert((EnableVPlanNativePath || L->isInnermost()) &&
9818 "VPlan-native path is not enabled. Only process inner loops.");
9819
9820 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in '"
9821 << L->getHeader()->getParent()->getName() << "' from "
9822 << L->getLocStr() << "\n");
9823
9824 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI);
9825
9826 LLVM_DEBUG(
9827 dbgs() << "LV: Loop hints:"
9828 << " force="
9830 ? "disabled"
9832 ? "enabled"
9833 : "?"))
9834 << " width=" << Hints.getWidth()
9835 << " interleave=" << Hints.getInterleave() << "\n");
9836
9837 // Function containing loop
9838 Function *F = L->getHeader()->getParent();
9839
9840 // Looking at the diagnostic output is the only way to determine if a loop
9841 // was vectorized (other than looking at the IR or machine code), so it
9842 // is important to generate an optimization remark for each loop. Most of
9843 // these messages are generated as OptimizationRemarkAnalysis. Remarks
9844 // generated as OptimizationRemark and OptimizationRemarkMissed are
9845 // less verbose reporting vectorized loops and unvectorized loops that may
9846 // benefit from vectorization, respectively.
9847
9848 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
9849 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
9850 return false;
9851 }
9852
9853 PredicatedScalarEvolution PSE(*SE, *L);
9854
9855 // Check if it is legal to vectorize the loop.
9856 LoopVectorizationRequirements Requirements;
9857 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, F, *LAIs, LI, ORE,
9858 &Requirements, &Hints, DB, AC, BFI, PSI, AA);
9860 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
9861 Hints.emitRemarkWithHints();
9862 return false;
9863 }
9864
9866 reportVectorizationFailure("Auto-vectorization of loops with uncountable "
9867 "early exit is not enabled",
9868 "UncountableEarlyExitLoopsDisabled", ORE, L);
9869 return false;
9870 }
9871
9872 if (!LVL.getPotentiallyFaultingLoads().empty()) {
9873 reportVectorizationFailure("Auto-vectorization of loops with potentially "
9874 "faulting load is not supported",
9875 "PotentiallyFaultingLoadsNotSupported", ORE, L);
9876 return false;
9877 }
9878
9879 // Entrance to the VPlan-native vectorization path. Outer loops are processed
9880 // here. They may require CFG and instruction level transformations before
9881 // even evaluating whether vectorization is profitable. Since we cannot modify
9882 // the incoming IR, we need to build VPlan upfront in the vectorization
9883 // pipeline.
9884 if (!L->isInnermost())
9885 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
9886 ORE, BFI, PSI, Hints, Requirements);
9887
9888 assert(L->isInnermost() && "Inner loop expected.");
9889
9890 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
9891 bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
9892
9893 // If an override option has been passed in for interleaved accesses, use it.
9894 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
9895 UseInterleaved = EnableInterleavedMemAccesses;
9896
9897 // Analyze interleaved memory accesses.
9898 if (UseInterleaved)
9900
9901 if (LVL.hasUncountableEarlyExit()) {
9902 BasicBlock *LoopLatch = L->getLoopLatch();
9903 if (IAI.requiresScalarEpilogue() ||
9905 [LoopLatch](BasicBlock *BB) { return BB != LoopLatch; })) {
9906 reportVectorizationFailure("Auto-vectorization of early exit loops "
9907 "requiring a scalar epilogue is unsupported",
9908 "UncountableEarlyExitUnsupported", ORE, L);
9909 return false;
9910 }
9911 }
9912
9913 // Check the function attributes and profiles to find out if this function
9914 // should be optimized for size.
9916 getScalarEpilogueLowering(F, L, Hints, PSI, BFI, TTI, TLI, LVL, &IAI);
9917
9918 // Check the loop for a trip count threshold: vectorize loops with a tiny trip
9919 // count by optimizing for size, to minimize overheads.
9920 auto ExpectedTC = getSmallBestKnownTC(PSE, L);
9921 if (ExpectedTC && ExpectedTC->isFixed() &&
9922 ExpectedTC->getFixedValue() < TinyTripCountVectorThreshold) {
9923 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
9924 << "This loop is worth vectorizing only if no scalar "
9925 << "iteration overheads are incurred.");
9927 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
9928 else {
9929 LLVM_DEBUG(dbgs() << "\n");
9930 // Predicate tail-folded loops are efficient even when the loop
9931 // iteration count is low. However, setting the epilogue policy to
9932 // `CM_ScalarEpilogueNotAllowedLowTripLoop` prevents vectorizing loops
9933 // with runtime checks. It's more effective to let
9934 // `isOutsideLoopWorkProfitable` determine if vectorization is
9935 // beneficial for the loop.
9938 }
9939 }
9940
9941 // Check the function attributes to see if implicit floats or vectors are
9942 // allowed.
9943 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
9945 "Can't vectorize when the NoImplicitFloat attribute is used",
9946 "loop not vectorized due to NoImplicitFloat attribute",
9947 "NoImplicitFloat", ORE, L);
9948 Hints.emitRemarkWithHints();
9949 return false;
9950 }
9951
9952 // Check if the target supports potentially unsafe FP vectorization.
9953 // FIXME: Add a check for the type of safety issue (denormal, signaling)
9954 // for the target we're vectorizing for, to make sure none of the
9955 // additional fp-math flags can help.
9956 if (Hints.isPotentiallyUnsafe() &&
9957 TTI->isFPVectorizationPotentiallyUnsafe()) {
9959 "Potentially unsafe FP op prevents vectorization",
9960 "loop not vectorized due to unsafe FP support.",
9961 "UnsafeFP", ORE, L);
9962 Hints.emitRemarkWithHints();
9963 return false;
9964 }
9965
9966 bool AllowOrderedReductions;
9967 // If the flag is set, use that instead and override the TTI behaviour.
9968 if (ForceOrderedReductions.getNumOccurrences() > 0)
9969 AllowOrderedReductions = ForceOrderedReductions;
9970 else
9971 AllowOrderedReductions = TTI->enableOrderedReductions();
9972 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) {
9973 ORE->emit([&]() {
9974 auto *ExactFPMathInst = Requirements.getExactFPInst();
9975 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
9976 ExactFPMathInst->getDebugLoc(),
9977 ExactFPMathInst->getParent())
9978 << "loop not vectorized: cannot prove it is safe to reorder "
9979 "floating-point operations";
9980 });
9981 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
9982 "reorder floating-point operations\n");
9983 Hints.emitRemarkWithHints();
9984 return false;
9985 }
9986
9987 // Use the cost model.
9988 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
9989 F, &Hints, IAI, PSI, BFI);
9990 // Use the planner for vectorization.
9991 LoopVectorizationPlanner LVP(L, LI, DT, TLI, *TTI, &LVL, CM, IAI, PSE, Hints,
9992 ORE);
9993
9994 // Get user vectorization factor and interleave count.
9995 ElementCount UserVF = Hints.getWidth();
9996 unsigned UserIC = Hints.getInterleave();
9997
9998 // Plan how to best vectorize.
9999 LVP.plan(UserVF, UserIC);
10001 unsigned IC = 1;
10002
10003 if (ORE->allowExtraAnalysis(LV_NAME))
10005
10006 GeneratedRTChecks Checks(PSE, DT, LI, TTI, F->getDataLayout(), CM.CostKind);
10007 if (LVP.hasPlanWithVF(VF.Width)) {
10008 // Select the interleave count.
10009 IC = LVP.selectInterleaveCount(LVP.getPlanFor(VF.Width), VF.Width, VF.Cost);
10010
10011 unsigned SelectedIC = std::max(IC, UserIC);
10012 // Optimistically generate runtime checks if they are needed. Drop them if
10013 // they turn out to not be profitable.
10014 if (VF.Width.isVector() || SelectedIC > 1) {
10015 Checks.create(L, *LVL.getLAI(), PSE.getPredicate(), VF.Width, SelectedIC);
10016
10017 // Bail out early if either the SCEV or memory runtime checks are known to
10018 // fail. In that case, the vector loop would never execute.
10019 using namespace llvm::PatternMatch;
10020 if (Checks.getSCEVChecks().first &&
10021 match(Checks.getSCEVChecks().first, m_One()))
10022 return false;
10023 if (Checks.getMemRuntimeChecks().first &&
10024 match(Checks.getMemRuntimeChecks().first, m_One()))
10025 return false;
10026 }
10027
10028 // Check if it is profitable to vectorize with runtime checks.
10029 bool ForceVectorization =
10031 VPCostContext CostCtx(CM.TTI, *CM.TLI, LVP.getPlanFor(VF.Width), CM,
10032 CM.CostKind);
10033 if (!ForceVectorization &&
10034 !isOutsideLoopWorkProfitable(Checks, VF, L, PSE, CostCtx,
10035 LVP.getPlanFor(VF.Width), SEL,
10036 CM.getVScaleForTuning())) {
10037 ORE->emit([&]() {
10039 DEBUG_TYPE, "CantReorderMemOps", L->getStartLoc(),
10040 L->getHeader())
10041 << "loop not vectorized: cannot prove it is safe to reorder "
10042 "memory operations";
10043 });
10044 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
10045 Hints.emitRemarkWithHints();
10046 return false;
10047 }
10048 }
10049
10050 // Identify the diagnostic messages that should be produced.
10051 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
10052 bool VectorizeLoop = true, InterleaveLoop = true;
10053 if (VF.Width.isScalar()) {
10054 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
10055 VecDiagMsg = {
10056 "VectorizationNotBeneficial",
10057 "the cost-model indicates that vectorization is not beneficial"};
10058 VectorizeLoop = false;
10059 }
10060
10061 if (!LVP.hasPlanWithVF(VF.Width) && UserIC > 1) {
10062 // Tell the user interleaving was avoided up-front, despite being explicitly
10063 // requested.
10064 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
10065 "interleaving should be avoided up front\n");
10066 IntDiagMsg = {"InterleavingAvoided",
10067 "Ignoring UserIC, because interleaving was avoided up front"};
10068 InterleaveLoop = false;
10069 } else if (IC == 1 && UserIC <= 1) {
10070 // Tell the user interleaving is not beneficial.
10071 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
10072 IntDiagMsg = {
10073 "InterleavingNotBeneficial",
10074 "the cost-model indicates that interleaving is not beneficial"};
10075 InterleaveLoop = false;
10076 if (UserIC == 1) {
10077 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
10078 IntDiagMsg.second +=
10079 " and is explicitly disabled or interleave count is set to 1";
10080 }
10081 } else if (IC > 1 && UserIC == 1) {
10082 // Tell the user interleaving is beneficial, but it explicitly disabled.
10083 LLVM_DEBUG(dbgs() << "LV: Interleaving is beneficial but is explicitly "
10084 "disabled.\n");
10085 IntDiagMsg = {"InterleavingBeneficialButDisabled",
10086 "the cost-model indicates that interleaving is beneficial "
10087 "but is explicitly disabled or interleave count is set to 1"};
10088 InterleaveLoop = false;
10089 }
10090
10091 // If there is a histogram in the loop, do not just interleave without
10092 // vectorizing. The order of operations will be incorrect without the
10093 // histogram intrinsics, which are only used for recipes with VF > 1.
10094 if (!VectorizeLoop && InterleaveLoop && LVL.hasHistograms()) {
10095 LLVM_DEBUG(dbgs() << "LV: Not interleaving without vectorization due "
10096 << "to histogram operations.\n");
10097 IntDiagMsg = {
10098 "HistogramPreventsScalarInterleaving",
10099 "Unable to interleave without vectorization due to constraints on "
10100 "the order of histogram operations"};
10101 InterleaveLoop = false;
10102 }
10103
10104 // Override IC if user provided an interleave count.
10105 IC = UserIC > 0 ? UserIC : IC;
10106
10107 // Emit diagnostic messages, if any.
10108 const char *VAPassName = Hints.vectorizeAnalysisPassName();
10109 if (!VectorizeLoop && !InterleaveLoop) {
10110 // Do not vectorize or interleaving the loop.
10111 ORE->emit([&]() {
10112 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
10113 L->getStartLoc(), L->getHeader())
10114 << VecDiagMsg.second;
10115 });
10116 ORE->emit([&]() {
10117 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
10118 L->getStartLoc(), L->getHeader())
10119 << IntDiagMsg.second;
10120 });
10121 return false;
10122 }
10123
10124 if (!VectorizeLoop && InterleaveLoop) {
10125 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10126 ORE->emit([&]() {
10127 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
10128 L->getStartLoc(), L->getHeader())
10129 << VecDiagMsg.second;
10130 });
10131 } else if (VectorizeLoop && !InterleaveLoop) {
10132 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10133 << ") in " << L->getLocStr() << '\n');
10134 ORE->emit([&]() {
10135 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
10136 L->getStartLoc(), L->getHeader())
10137 << IntDiagMsg.second;
10138 });
10139 } else if (VectorizeLoop && InterleaveLoop) {
10140 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10141 << ") in " << L->getLocStr() << '\n');
10142 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10143 }
10144
10145 // Report the vectorization decision.
10146 if (VF.Width.isScalar()) {
10147 using namespace ore;
10148 assert(IC > 1);
10149 ORE->emit([&]() {
10150 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
10151 L->getHeader())
10152 << "interleaved loop (interleaved count: "
10153 << NV("InterleaveCount", IC) << ")";
10154 });
10155 } else {
10156 // Report the vectorization decision.
10157 reportVectorization(ORE, L, VF, IC);
10158 }
10159 if (ORE->allowExtraAnalysis(LV_NAME))
10161
10162 // If we decided that it is *legal* to interleave or vectorize the loop, then
10163 // do it.
10164
10165 VPlan &BestPlan = LVP.getPlanFor(VF.Width);
10166 // Consider vectorizing the epilogue too if it's profitable.
10167 VectorizationFactor EpilogueVF =
10169 if (EpilogueVF.Width.isVector()) {
10170 std::unique_ptr<VPlan> BestMainPlan(BestPlan.duplicate());
10171
10172 // The first pass vectorizes the main loop and creates a scalar epilogue
10173 // to be vectorized by executing the plan (potentially with a different
10174 // factor) again shortly afterwards.
10175 VPlan &BestEpiPlan = LVP.getPlanFor(EpilogueVF.Width);
10176 BestEpiPlan.getMiddleBlock()->setName("vec.epilog.middle.block");
10177 preparePlanForMainVectorLoop(*BestMainPlan, BestEpiPlan);
10178 EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1,
10179 BestEpiPlan);
10180 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TTI, AC, EPI, &CM, BFI,
10181 PSI, Checks, *BestMainPlan);
10182 auto ExpandedSCEVs = LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF,
10183 *BestMainPlan, MainILV, DT, false);
10184 ++LoopsVectorized;
10185
10186 // Second pass vectorizes the epilogue and adjusts the control flow
10187 // edges from the first pass.
10188 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TTI, AC, EPI, &CM,
10189 BFI, PSI, Checks, BestEpiPlan);
10190 EpilogILV.setTripCount(MainILV.getTripCount());
10191 preparePlanForEpilogueVectorLoop(BestEpiPlan, L, ExpandedSCEVs, EPI);
10192
10193 LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV, DT,
10194 true);
10195
10197 BestEpiPlan, LVL, ExpandedSCEVs,
10198 EPI.VectorTripCount);
10199 ++LoopsEpilogueVectorized;
10200 } else {
10201 InnerLoopVectorizer LB(L, PSE, LI, DT, TTI, AC, VF.Width, IC, &CM, BFI, PSI,
10202 Checks, BestPlan);
10203 // TODO: Move to general VPlan pipeline once epilogue loops are also
10204 // supported.
10207 IC, PSE);
10208 LVP.addMinimumIterationCheck(BestPlan, VF.Width, IC,
10210
10211 LVP.executePlan(VF.Width, IC, BestPlan, LB, DT, false);
10212 ++LoopsVectorized;
10213 }
10214
10215 assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&
10216 "DT not preserved correctly");
10217 assert(!verifyFunction(*F, &dbgs()));
10218
10219 return true;
10220}
10221
10223
10224 // Don't attempt if
10225 // 1. the target claims to have no vector registers, and
10226 // 2. interleaving won't help ILP.
10227 //
10228 // The second condition is necessary because, even if the target has no
10229 // vector registers, loop vectorization may still enable scalar
10230 // interleaving.
10231 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
10232 TTI->getMaxInterleaveFactor(ElementCount::getFixed(1)) < 2)
10233 return LoopVectorizeResult(false, false);
10234
10235 bool Changed = false, CFGChanged = false;
10236
10237 // The vectorizer requires loops to be in simplified form.
10238 // Since simplification may add new inner loops, it has to run before the
10239 // legality and profitability checks. This means running the loop vectorizer
10240 // will simplify all loops, regardless of whether anything end up being
10241 // vectorized.
10242 for (const auto &L : *LI)
10243 Changed |= CFGChanged |=
10244 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10245
10246 // Build up a worklist of inner-loops to vectorize. This is necessary as
10247 // the act of vectorizing or partially unrolling a loop creates new loops
10248 // and can invalidate iterators across the loops.
10249 SmallVector<Loop *, 8> Worklist;
10250
10251 for (Loop *L : *LI)
10252 collectSupportedLoops(*L, LI, ORE, Worklist);
10253
10254 LoopsAnalyzed += Worklist.size();
10255
10256 // Now walk the identified inner loops.
10257 while (!Worklist.empty()) {
10258 Loop *L = Worklist.pop_back_val();
10259
10260 // For the inner loops we actually process, form LCSSA to simplify the
10261 // transform.
10262 Changed |= formLCSSARecursively(*L, *DT, LI, SE);
10263
10264 Changed |= CFGChanged |= processLoop(L);
10265
10266 if (Changed) {
10267 LAIs->clear();
10268
10269#ifndef NDEBUG
10270 if (VerifySCEV)
10271 SE->verify();
10272#endif
10273 }
10274 }
10275
10276 // Process each loop nest in the function.
10277 return LoopVectorizeResult(Changed, CFGChanged);
10278}
10279
10282 LI = &AM.getResult<LoopAnalysis>(F);
10283 // There are no loops in the function. Return before computing other
10284 // expensive analyses.
10285 if (LI->empty())
10286 return PreservedAnalyses::all();
10295 AA = &AM.getResult<AAManager>(F);
10296
10297 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
10298 PSI = MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
10299 BFI = nullptr;
10300 if (PSI && PSI->hasProfileSummary())
10302 LoopVectorizeResult Result = runImpl(F);
10303 if (!Result.MadeAnyChange)
10304 return PreservedAnalyses::all();
10306
10307 if (isAssignmentTrackingEnabled(*F.getParent())) {
10308 for (auto &BB : F)
10310 }
10311
10312 PA.preserve<LoopAnalysis>();
10316
10317 if (Result.MadeCFGChange) {
10318 // Making CFG changes likely means a loop got vectorized. Indicate that
10319 // extra simplification passes should be run.
10320 // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only
10321 // be run if runtime checks have been added.
10324 } else {
10326 }
10327 return PA;
10328}
10329
10331 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
10332 static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline(
10333 OS, MapClassName2PassName);
10334
10335 OS << '<';
10336 OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;";
10337 OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;";
10338 OS << '>';
10339}
static unsigned getIntrinsicID(const SDNode *N)
unsigned RegSize
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
AMDGPU Lower Kernel Arguments
AMDGPU Register Bank Select
Rewrite undef for PHI
This file implements a class to represent arbitrary precision integral constant values and operations...
@ PostInc
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool isEqual(const Function &Caller, const Function &Callee)
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
static bool IsEmptyBlock(MachineBasicBlock *MBB)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
static cl::opt< IntrinsicCostStrategy > IntrinsicCost("intrinsic-cost-strategy", cl::desc("Costing strategy for intrinsic instructions"), cl::init(IntrinsicCostStrategy::InstructionCost), cl::values(clEnumValN(IntrinsicCostStrategy::InstructionCost, "instruction-cost", "Use TargetTransformInfo::getInstructionCost"), clEnumValN(IntrinsicCostStrategy::IntrinsicCost, "intrinsic-cost", "Use TargetTransformInfo::getIntrinsicInstrCost"), clEnumValN(IntrinsicCostStrategy::TypeBasedIntrinsicCost, "type-based-intrinsic-cost", "Calculate the intrinsic cost based only on argument types")))
static InstructionCost getCost(Instruction &Inst, TTI::TargetCostKind CostKind, TargetTransformInfo &TTI, TargetLibraryInfo &TLI)
Definition CostModel.cpp:74
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
#define DEBUG_TYPE
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
#define _
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This defines the Use class.
static bool hasNoUnsignedWrap(BinaryOperator &I)
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
static std::pair< Value *, APInt > getMask(Value *WideMask, unsigned Factor, ElementCount LeafValueEC)
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Legalize the Machine IR a function s Machine IR
Definition Legalizer.cpp:80
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
This header provides classes for managing per-loop analyses.
static const char * VerboseDebug
#define LV_NAME
This file defines the LoopVectorizationLegality class.
This file provides a LoopVectorizationPlanner class.
static void collectSupportedLoops(Loop &L, LoopInfo *LI, OptimizationRemarkEmitter *ORE, SmallVectorImpl< Loop * > &V)
static cl::opt< unsigned > EpilogueVectorizationMinVF("epilogue-vectorization-minimum-VF", cl::Hidden, cl::desc("Only loops with vectorization factor equal to or larger than " "the specified value are considered for epilogue vectorization."))
static cl::opt< unsigned > EpilogueVectorizationForceVF("epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, cl::desc("When epilogue vectorization is enabled, and a value greater than " "1 is specified, forces the given VF for all applicable epilogue " "loops."))
static void addScalarResumePhis(VPRecipeBuilder &Builder, VPlan &Plan, DenseMap< VPValue *, VPValue * > &IVEndValues)
Create resume phis in the scalar preheader for first-order recurrences, reductions and inductions,...
static Type * maybeVectorizeType(Type *Ty, ElementCount VF)
static ElementCount determineVPlanVF(const TargetTransformInfo &TTI, LoopVectorizationCostModel &CM)
static ElementCount getSmallConstantTripCount(ScalarEvolution *SE, const Loop *L)
A version of ScalarEvolution::getSmallConstantTripCount that returns an ElementCount to include loops...
static cl::opt< unsigned > VectorizeMemoryCheckThreshold("vectorize-memory-check-threshold", cl::init(128), cl::Hidden, cl::desc("The maximum allowed number of runtime memory checks"))
static void preparePlanForMainVectorLoop(VPlan &MainPlan, VPlan &EpiPlan)
Prepare MainPlan for vectorizing the main vector loop during epilogue vectorization.
static cl::opt< unsigned > TinyTripCountVectorThreshold("vectorizer-min-trip-count", cl::init(16), cl::Hidden, cl::desc("Loops with a constant trip count that is smaller than this " "value are vectorized only if no scalar iteration overheads " "are incurred."))
Loops with a known constant trip count below this number are vectorized only if no scalar iteration o...
static void debugVectorizationMessage(const StringRef Prefix, const StringRef DebugMsg, Instruction *I)
Write a DebugMsg about vectorization to the debug output stream.
static cl::opt< bool > EnableCondStoresVectorization("enable-cond-stores-vec", cl::init(true), cl::Hidden, cl::desc("Enable if predication of stores during vectorization."))
static VPIRBasicBlock * replaceVPBBWithIRVPBB(VPBasicBlock *VPBB, BasicBlock *IRBB, VPlan *Plan=nullptr)
Replace VPBB with a VPIRBasicBlock wrapping IRBB.
static VPInstruction * addResumePhiRecipeForInduction(VPWidenInductionRecipe *WideIV, VPBuilder &VectorPHBuilder, VPBuilder &ScalarPHBuilder, VPTypeAnalysis &TypeInfo, VPValue *VectorTC)
Create and return a ResumePhi for WideIV, unless it is truncated.
static Value * emitTransformedIndex(IRBuilderBase &B, Value *Index, Value *StartValue, Value *Step, InductionDescriptor::InductionKind InductionKind, const BinaryOperator *InductionBinOp)
Compute the transformed value of Index at offset StartValue using step StepValue.
static DebugLoc getDebugLocFromInstOrOperands(Instruction *I)
Look for a meaningful debug location on the instruction or its operands.
static Value * createInductionAdditionalBypassValues(PHINode *OrigPhi, const InductionDescriptor &II, IRBuilder<> &BypassBuilder, const SCEV2ValueTy &ExpandedSCEVs, Value *MainVectorTripCount, Instruction *OldInduction)
static void fixReductionScalarResumeWhenVectorizingEpilog(VPPhi *EpiResumePhiR, PHINode &EpiResumePhi, BasicBlock *BypassBlock)
static Value * getStartValueFromReductionResult(VPInstruction *RdxResult)
static cl::opt< bool > ForceTargetSupportsScalableVectors("force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, cl::desc("Pretend that scalable vectors are supported, even if the target does " "not support them. This flag should only be used for testing."))
static bool useActiveLaneMaskForControlFlow(TailFoldingStyle Style)
static cl::opt< bool > EnableEarlyExitVectorization("enable-early-exit-vectorization", cl::init(true), cl::Hidden, cl::desc("Enable vectorization of early exit loops with uncountable exits."))
static cl::opt< bool > ConsiderRegPressure("vectorizer-consider-reg-pressure", cl::init(false), cl::Hidden, cl::desc("Discard VFs if their register pressure is too high."))
static unsigned estimateElementCount(ElementCount VF, std::optional< unsigned > VScale)
This function attempts to return a value that represents the ElementCount at runtime.
static constexpr uint32_t MinItersBypassWeights[]
static cl::opt< unsigned > ForceTargetNumScalarRegs("force-target-num-scalar-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of scalar registers."))
static cl::opt< bool > UseWiderVFIfCallVariantsPresent("vectorizer-maximize-bandwidth-for-vector-calls", cl::init(true), cl::Hidden, cl::desc("Try wider VFs if they enable the use of vector variants"))
static std::optional< unsigned > getMaxVScale(const Function &F, const TargetTransformInfo &TTI)
static cl::opt< unsigned > SmallLoopCost("small-loop-cost", cl::init(20), cl::Hidden, cl::desc("The cost of a loop that is considered 'small' by the interleaver."))
static bool planContainsAdditionalSimplifications(VPlan &Plan, VPCostContext &CostCtx, Loop *TheLoop, ElementCount VF)
Return true if the original loop \ TheLoop contains any instructions that do not have corresponding r...
static cl::opt< unsigned > ForceTargetNumVectorRegs("force-target-num-vector-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of vector registers."))
static bool isExplicitVecOuterLoop(Loop *OuterLp, OptimizationRemarkEmitter *ORE)
static cl::opt< bool > EnableIndVarRegisterHeur("enable-ind-var-reg-heur", cl::init(true), cl::Hidden, cl::desc("Count the induction variable only once when interleaving"))
static cl::opt< TailFoldingStyle > ForceTailFoldingStyle("force-tail-folding-style", cl::desc("Force the tail folding style"), cl::init(TailFoldingStyle::None), cl::values(clEnumValN(TailFoldingStyle::None, "none", "Disable tail folding"), clEnumValN(TailFoldingStyle::Data, "data", "Create lane mask for data only, using active.lane.mask intrinsic"), clEnumValN(TailFoldingStyle::DataWithoutLaneMask, "data-without-lane-mask", "Create lane mask with compare/stepvector"), clEnumValN(TailFoldingStyle::DataAndControlFlow, "data-and-control", "Create lane mask using active.lane.mask intrinsic, and use " "it for both data and control flow"), clEnumValN(TailFoldingStyle::DataAndControlFlowWithoutRuntimeCheck, "data-and-control-without-rt-check", "Similar to data-and-control, but remove the runtime check"), clEnumValN(TailFoldingStyle::DataWithEVL, "data-with-evl", "Use predicated EVL instructions for tail folding. If EVL " "is unsupported, fallback to data-without-lane-mask.")))
static cl::opt< bool > EnableEpilogueVectorization("enable-epilogue-vectorization", cl::init(true), cl::Hidden, cl::desc("Enable vectorization of epilogue loops."))
static ScalarEpilogueLowering getScalarEpilogueLowering(Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, LoopVectorizationLegality &LVL, InterleavedAccessInfo *IAI)
static void preparePlanForEpilogueVectorLoop(VPlan &Plan, Loop *L, const SCEV2ValueTy &ExpandedSCEVs, EpilogueLoopVectorizationInfo &EPI)
Prepare Plan for vectorizing the epilogue loop.
static cl::opt< bool > PreferPredicatedReductionSelect("prefer-predicated-reduction-select", cl::init(false), cl::Hidden, cl::desc("Prefer predicating a reduction operation over an after loop select."))
static VPWidenIntOrFpInductionRecipe * createWidenInductionRecipes(PHINode *Phi, Instruction *PhiOrTrunc, VPValue *Start, const InductionDescriptor &IndDesc, VPlan &Plan, ScalarEvolution &SE, Loop &OrigLoop)
Creates a VPWidenIntOrFpInductionRecpipe for Phi.
static cl::opt< bool > PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), cl::Hidden, cl::desc("Prefer in-loop vector reductions, " "overriding the targets preference."))
static cl::opt< bool > EnableLoadStoreRuntimeInterleave("enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, cl::desc("Enable runtime interleaving until load/store ports are saturated"))
static cl::opt< bool > VPlanBuildStressTest("vplan-build-stress-test", cl::init(false), cl::Hidden, cl::desc("Build VPlan for every supported loop nest in the function and bail " "out right after the build (stress test the VPlan H-CFG construction " "in the VPlan-native vectorization path)."))
static bool hasIrregularType(Type *Ty, const DataLayout &DL)
A helper function that returns true if the given type is irregular.
static cl::opt< bool > LoopVectorizeWithBlockFrequency("loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, cl::desc("Enable the use of the block frequency analysis to access PGO " "heuristics minimizing code growth in cold regions and being more " "aggressive in hot regions."))
static std::optional< ElementCount > getSmallBestKnownTC(PredicatedScalarEvolution &PSE, Loop *L, bool CanUseConstantMax=true)
Returns "best known" trip count, which is either a valid positive trip count or std::nullopt when an ...
static Value * getExpandedStep(const InductionDescriptor &ID, const SCEV2ValueTy &ExpandedSCEVs)
Return the expanded step for ID using ExpandedSCEVs to look up SCEV expansion results.
static bool useActiveLaneMask(TailFoldingStyle Style)
static bool hasReplicatorRegion(VPlan &Plan)
static bool isIndvarOverflowCheckKnownFalse(const LoopVectorizationCostModel *Cost, ElementCount VF, std::optional< unsigned > UF=std::nullopt)
For the given VF and UF and maximum trip count computed for the loop, return whether the induction va...
static void addFullyUnrolledInstructionsToIgnore(Loop *L, const LoopVectorizationLegality::InductionList &IL, SmallPtrSetImpl< Instruction * > &InstsToIgnore)
Knowing that loop L executes a single vector iteration, add instructions that will get simplified and...
static cl::opt< PreferPredicateTy::Option > PreferPredicateOverEpilogue("prefer-predicate-over-epilogue", cl::init(PreferPredicateTy::ScalarEpilogue), cl::Hidden, cl::desc("Tail-folding and predication preferences over creating a scalar " "epilogue loop."), cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, "scalar-epilogue", "Don't tail-predicate loops, create scalar epilogue"), clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, "predicate-else-scalar-epilogue", "prefer tail-folding, create scalar epilogue if tail " "folding fails."), clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, "predicate-dont-vectorize", "prefers tail-folding, don't attempt vectorization if " "tail-folding fails.")))
static cl::opt< bool > EnableInterleavedMemAccesses("enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on interleaved memory accesses in a loop"))
static cl::opt< bool > EnableMaskedInterleavedMemAccesses("enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"))
An interleave-group may need masking if it resides in a block that needs predication,...
static cl::opt< bool > ForceOrderedReductions("force-ordered-reductions", cl::init(false), cl::Hidden, cl::desc("Enable the vectorisation of loops with in-order (strict) " "FP reductions"))
static void cse(BasicBlock *BB)
Perform cse of induction variable instructions.
static const SCEV * getAddressAccessSCEV(Value *Ptr, LoopVectorizationLegality *Legal, PredicatedScalarEvolution &PSE, const Loop *TheLoop)
Gets Address Access SCEV after verifying that the access pattern is loop invariant except the inducti...
static cl::opt< cl::boolOrDefault > ForceSafeDivisor("force-widen-divrem-via-safe-divisor", cl::Hidden, cl::desc("Override cost based safe divisor widening for div/rem instructions"))
static InstructionCost calculateEarlyExitCost(VPCostContext &CostCtx, VPlan &Plan, ElementCount VF)
For loops with uncountable early exits, find the cost of doing work when exiting the loop early,...
static cl::opt< unsigned > ForceTargetMaxVectorInterleaveFactor("force-target-max-vector-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "vectorized loops."))
static bool processLoopInVPlanNativePath(Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, LoopVectorizationRequirements &Requirements)
static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI)
static cl::opt< unsigned > NumberOfStoresToPredicate("vectorize-num-stores-pred", cl::init(1), cl::Hidden, cl::desc("Max number of stores to be predicated behind an if."))
The number of stores in a loop that are allowed to need predication.
static cl::opt< unsigned > MaxNestedScalarReductionIC("max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, cl::desc("The maximum interleave count to use when interleaving a scalar " "reduction in a nested loop."))
static cl::opt< unsigned > ForceTargetMaxScalarInterleaveFactor("force-target-max-scalar-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "scalar loops."))
static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE)
static bool willGenerateVectors(VPlan &Plan, ElementCount VF, const TargetTransformInfo &TTI)
Check if any recipe of Plan will generate a vector value, which will be assigned a vector register.
static bool isOutsideLoopWorkProfitable(GeneratedRTChecks &Checks, VectorizationFactor &VF, Loop *L, PredicatedScalarEvolution &PSE, VPCostContext &CostCtx, VPlan &Plan, ScalarEpilogueLowering SEL, std::optional< unsigned > VScale)
This function determines whether or not it's still profitable to vectorize the loop given the extra w...
static void addExitUsersForFirstOrderRecurrences(VPlan &Plan, VFRange &Range)
Handle users in the exit block for first order reductions in the original exit block.
static void fixScalarResumeValuesFromBypass(BasicBlock *BypassBlock, Loop *L, VPlan &BestEpiPlan, LoopVectorizationLegality &LVL, const SCEV2ValueTy &ExpandedSCEVs, Value *MainVectorTripCount)
static cl::opt< bool > MaximizeBandwidth("vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, cl::desc("Maximize bandwidth when selecting vectorization factor which " "will be determined by the smallest type in loop."))
static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop, Instruction *I, DebugLoc DL={})
Create an analysis remark that explains why vectorization failed.
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
mir Rename Register Operands
This file implements a map that provides insertion order iteration.
This file contains the declarations for metadata subclasses.
#define T
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define P(N)
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static BinaryOperator * CreateMul(Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore, Value *FlagsOp)
static BinaryOperator * CreateAdd(Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore, Value *FlagsOp)
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
This file contains some templates that are useful if you are working with the STL at all.
#define OP(OPC)
Definition Instruction.h:46
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
#define DEBUG_WITH_TYPE(TYPE,...)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
Definition Debug.h:72
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
This pass exposes codegen information to IR-level passes.
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
This file implements the TypeSwitch template, which mimics a switch() statement whose cases are type ...
This file contains the declarations of different VPlan-related auxiliary helpers.
This file provides utility VPlan to VPlan transformations.
This file declares the class VPlanVerifier, which contains utility functions to check the consistency...
This file contains the declarations of the Vectorization Plan base classes:
static const char PassName[]
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition blake3_impl.h:83
A manager for alias analyses.
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:234
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1540
unsigned getActiveBits() const
Compute the number of active bits in the value.
Definition APInt.h:1512
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition ArrayRef.h:147
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
LLVM_ABI unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:459
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
LLVM_ABI const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
BinaryOps getOpcode() const
Definition InstrTypes.h:374
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
bool isConditional() const
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
BasicBlock * getSuccessor(unsigned i) const
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
bool isNoBuiltin() const
Return true if the call should not be treated as a call to a builtin.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Value * getArgOperand(unsigned i) const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:984
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:678
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:701
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:703
@ ICMP_NE
not equal
Definition InstrTypes.h:700
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:704
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:791
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
A debug info location.
Definition DebugLoc.h:124
static DebugLoc getTemporary()
Definition DebugLoc.h:161
static DebugLoc getUnknown()
Definition DebugLoc.h:162
An analysis that produces DemandedBits for a function.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:187
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:229
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition DenseMap.h:156
void insert_range(Range &&R)
Inserts range of 'std::pair<KeyT, ValueT>' values into the map.
Definition DenseMap.h:267
Implements a dense probed hash-table based set.
Definition DenseSet.h:269
Analysis pass which computes a DominatorTree.
Definition Dominators.h:284
void changeImmediateDominator(DomTreeNodeBase< NodeT > *N, DomTreeNodeBase< NodeT > *NewIDom)
changeImmediateDominator - This method is used to update the dominator tree information when a node's...
void eraseNode(NodeT *BB)
eraseNode - Removes a node from the dominator tree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:165
constexpr bool isVector() const
One or more elements.
Definition TypeSize.h:324
static constexpr ElementCount getScalable(ScalarTy MinVal)
Definition TypeSize.h:312
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Definition TypeSize.h:315
constexpr bool isScalar() const
Exactly one element.
Definition TypeSize.h:320
BasicBlock * getAdditionalBypassBlock() const
Return the additional bypass block which targets the scalar loop by skipping the epilogue loop after ...
BasicBlock * createVectorizedLoopSkeleton() final
Implements the interface for creating a vectorized skeleton using the epilogue loop strategy (i....
EpilogueVectorizerEpilogueLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &Checks, VPlan &Plan)
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
BasicBlock * emitMinimumVectorEpilogueIterCountCheck(BasicBlock *VectorPH, BasicBlock *Bypass, BasicBlock *Insert)
Emits an iteration count bypass check after the main vector loop has finished to see if there are any...
A specialized derived class of inner loop vectorizer that performs vectorization of main loops in the...
void introduceCheckBlockInVPlan(BasicBlock *CheckIRBB)
Introduces a new VPIRBasicBlock for CheckIRBB to Plan between the vector preheader and its predecesso...
BasicBlock * emitIterationCountCheck(BasicBlock *VectorPH, BasicBlock *Bypass, bool ForEpilogue)
Emits an iteration count bypass check once for the main loop (when ForEpilogue is false) and once for...
EpilogueVectorizerMainLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &Check, VPlan &Plan)
Value * createIterationCountCheck(BasicBlock *VectorPH, ElementCount VF, unsigned UF) const
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
BasicBlock * createVectorizedLoopSkeleton() final
Implements the interface for creating a vectorized skeleton using the main loop strategy (i....
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
Class to represent function types.
param_iterator param_begin() const
param_iterator param_end() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:209
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition Function.cpp:762
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:727
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags none()
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Definition IRBuilder.h:345
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2780
A struct for saving information about induction variables.
const SCEV * getStep() const
InductionKind
This enum represents the kinds of inductions that we support.
@ IK_NoInduction
Not an induction variable.
@ IK_FpInduction
Floating point induction variable.
@ IK_PtrInduction
Pointer induction var. Step = C.
@ IK_IntInduction
Integer induction variable. Step = C.
const SmallVectorImpl< Instruction * > & getCastInsts() const
Returns a reference to the type cast instructions in the induction update chain, that are redundant w...
Value * getStartValue() const
InnerLoopAndEpilogueVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &Checks, VPlan &Plan, ElementCount VecWidth, ElementCount MinProfitableTripCount, unsigned UnrollFactor)
EpilogueLoopVectorizationInfo & EPI
Holds and updates state information required to vectorize the main loop and its epilogue in two separ...
InnerLoopVectorizer vectorizes loops which contain only one basic block to a specified vectorization ...
virtual void printDebugTracesAtStart()
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
Value * TripCount
Trip count of the original loop.
const TargetTransformInfo * TTI
Target Transform Info.
LoopVectorizationCostModel * Cost
The profitablity analysis.
BlockFrequencyInfo * BFI
BFI and PSI are used to check for profile guided size optimizations.
Value * getTripCount() const
Returns the original loop trip count.
friend class LoopVectorizationPlanner
PredicatedScalarEvolution & PSE
A wrapper around ScalarEvolution used to add runtime SCEV checks.
LoopInfo * LI
Loop Info.
ProfileSummaryInfo * PSI
DominatorTree * DT
Dominator Tree.
void setTripCount(Value *TC)
Used to set the trip count after ILV's construction and after the preheader block has been executed.
void fixVectorizedLoop(VPTransformState &State)
Fix the vectorized code, taking care of header phi's, and more.
virtual BasicBlock * createVectorizedLoopSkeleton()
Creates a basic block for the scalar preheader.
virtual void printDebugTracesAtEnd()
AssumptionCache * AC
Assumption Cache.
InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, ElementCount VecWidth, unsigned UnrollFactor, LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks, VPlan &Plan)
IRBuilder Builder
The builder that we use.
void fixNonInductionPHIs(VPTransformState &State)
Fix the non-induction PHIs in Plan.
VPBasicBlock * VectorPHVPBB
The vector preheader block of Plan, used as target for check blocks introduced during skeleton creati...
unsigned UF
The vectorization unroll factor to use.
GeneratedRTChecks & RTChecks
Structure to hold information about generated runtime checks, responsible for cleaning the checks,...
virtual ~InnerLoopVectorizer()=default
ElementCount VF
The vectorization SIMD factor to use.
Loop * OrigLoop
The original loop.
BasicBlock * createScalarPreheader(StringRef Prefix)
Create and return a new IR basic block for the scalar preheader whose name is prefixed with Prefix.
InstSimplifyFolder - Use InstructionSimplify to fold operations to existing values.
static InstructionCost getInvalid(CostType Val=0)
static InstructionCost getMax()
CostType getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
bool isBinaryOp() const
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
const char * getOpcodeName() const
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:319
LLVM_ABI APInt getMask() const
For example, this is 0xFF for an 8 bit integer, 0xFFFF for i16, etc.
Definition Type.cpp:343
The group of interleaved loads/stores sharing the same stride and close to each other.
uint32_t getFactor() const
InstTy * getMember(uint32_t Index) const
Get the member with the given index Index.
InstTy * getInsertPos() const
uint32_t getNumMembers() const
Drive the analysis of interleaved memory accesses in the loop.
bool requiresScalarEpilogue() const
Returns true if an interleaved group that may access memory out-of-bounds requires a scalar epilogue ...
LLVM_ABI void analyzeInterleaving(bool EnableMaskedInterleavedGroup)
Analyze the interleaved accesses and collect them in interleave groups.
An instruction for reading from memory.
Type * getPointerOperandType() const
This analysis provides dependence information for the memory accesses of a loop.
Drive the analysis of memory accesses in the loop.
const RuntimePointerChecking * getRuntimePointerChecking() const
unsigned getNumRuntimePointerChecks() const
Number of memchecks required to prove independence of otherwise may-alias pointers.
Analysis pass that exposes the LoopInfo for a function.
Definition LoopInfo.h:569
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
BlockT * getHeader() const
iterator_range< block_iterator > blocks() const
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
Store the result of a depth first search within basic blocks contained by a single loop.
RPOIterator beginRPO() const
Reverse iterate over the cached postorder blocks.
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
RPOIterator endRPO() const
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
void removeBlock(BlockT *BB)
This method completely removes BB from all data structures, including all of the Loop objects it is n...
LoopVectorizationCostModel - estimates the expected speedups due to vectorization.
SmallPtrSet< Type *, 16 > ElementTypesInLoop
All element types found in the loop.
bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment, unsigned AddressSpace) const
Returns true if the target machine supports masked load operation for the given DataType and kind of ...
LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, LoopVectorizationLegality *Legal, const TargetTransformInfo &TTI, const TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, const Function *F, const LoopVectorizeHints *Hints, InterleavedAccessInfo &IAI, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
void collectElementTypesForWidening()
Collect all element types in the loop for which widening is needed.
bool canVectorizeReductions(ElementCount VF) const
Returns true if the target machine supports all of the reduction variables found for the given VF.
bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment, unsigned AddressSpace) const
Returns true if the target machine supports masked store operation for the given DataType and kind of...
bool isEpilogueVectorizationProfitable(const ElementCount VF, const unsigned IC) const
Returns true if epilogue vectorization is considered profitable, and false otherwise.
bool isPredicatedInst(Instruction *I) const
Returns true if I is an instruction that needs to be predicated at runtime.
void collectValuesToIgnore()
Collect values we want to ignore in the cost model.
void collectInLoopReductions()
Split reductions into those that happen in the loop, and those that happen outside.
std::pair< unsigned, unsigned > getSmallestAndWidestTypes()
bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be uniform after vectorization.
void collectNonVectorizedAndSetWideningDecisions(ElementCount VF)
Collect values that will not be widened, including Uniforms, Scalars, and Instructions to Scalarize f...
PredicatedScalarEvolution & PSE
Predicated scalar evolution analysis.
const LoopVectorizeHints * Hints
Loop Vectorize Hint.
std::optional< unsigned > getMaxSafeElements() const
Return maximum safe number of elements to be processed per vector iteration, which do not prevent sto...
const TargetTransformInfo & TTI
Vector target information.
LoopVectorizationLegality * Legal
Vectorization legality.
std::optional< InstructionCost > getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy) const
Return the cost of instructions in an inloop reduction pattern, if I is part of that pattern.
InstructionCost getInstructionCost(Instruction *I, ElementCount VF)
Returns the execution time cost of an instruction for a given vector width.
DemandedBits * DB
Demanded bits analysis.
bool interleavedAccessCanBeWidened(Instruction *I, ElementCount VF) const
Returns true if I is a memory instruction in an interleaved-group of memory accesses that can be vect...
const TargetLibraryInfo * TLI
Target Library Info.
bool memoryInstructionCanBeWidened(Instruction *I, ElementCount VF)
Returns true if I is a memory instruction with consecutive memory access that can be widened.
const InterleaveGroup< Instruction > * getInterleavedAccessGroup(Instruction *Instr) const
Get the interleaved access group that Instr belongs to.
InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const
Estimate cost of an intrinsic call instruction CI if it were vectorized with factor VF.
bool OptForSize
Whether this loop should be optimized for size based on function attribute or profile information.
bool useMaxBandwidth(TargetTransformInfo::RegisterKind RegKind)
bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be scalar after vectorization.
bool isOptimizableIVTruncate(Instruction *I, ElementCount VF)
Return True if instruction I is an optimizable truncate whose operand is an induction variable.
FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC)
bool shouldConsiderRegPressureForVF(ElementCount VF)
Loop * TheLoop
The loop that we evaluate.
TTI::TargetCostKind CostKind
The kind of cost that we are calculating.
TailFoldingStyle getTailFoldingStyle(bool IVUpdateMayOverflow=true) const
Returns the TailFoldingStyle that is best for the current loop.
InterleavedAccessInfo & InterleaveInfo
The interleave access information contains groups of interleaved accesses with the same stride and cl...
SmallPtrSet< const Value *, 16 > ValuesToIgnore
Values to ignore in the cost model.
void setVectorizedCallDecision(ElementCount VF)
A call may be vectorized in different ways depending on whether we have vectorized variants available...
void invalidateCostModelingDecisions()
Invalidates decisions already taken by the cost model.
bool isAccessInterleaved(Instruction *Instr) const
Check if Instr belongs to any interleaved access group.
bool selectUserVectorizationFactor(ElementCount UserVF)
Setup cost-based decisions for user vectorization factor.
std::optional< unsigned > getVScaleForTuning() const
Return the value of vscale used for tuning the cost model.
OptimizationRemarkEmitter * ORE
Interface to emit optimization remarks.
LoopInfo * LI
Loop Info analysis.
bool requiresScalarEpilogue(bool IsVectorizing) const
Returns true if we're required to use a scalar epilogue for at least the final iteration of the origi...
SmallPtrSet< const Value *, 16 > VecValuesToIgnore
Values to ignore in the cost model when VF > 1.
bool isInLoopReduction(PHINode *Phi) const
Returns true if the Phi is part of an inloop reduction.
bool isProfitableToScalarize(Instruction *I, ElementCount VF) const
void setWideningDecision(const InterleaveGroup< Instruction > *Grp, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for interleaving group Grp and vector ...
const MapVector< Instruction *, uint64_t > & getMinimalBitwidths() const
CallWideningDecision getCallWideningDecision(CallInst *CI, ElementCount VF) const
bool isLegalGatherOrScatter(Value *V, ElementCount VF)
Returns true if the target machine can represent V as a masked gather or scatter operation.
bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const
bool shouldConsiderInvariant(Value *Op)
Returns true if Op should be considered invariant and if it is trivially hoistable.
bool foldTailByMasking() const
Returns true if all loop blocks should be masked to fold tail loop.
bool foldTailWithEVL() const
Returns true if VP intrinsics with explicit vector length support should be generated in the tail fol...
bool usePredicatedReductionSelect() const
Returns true if the predicated reduction select should be used to set the incoming value for the redu...
bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const
Returns true if the instructions in this block requires predication for any reason,...
void setCallWideningDecision(CallInst *CI, ElementCount VF, InstWidening Kind, Function *Variant, Intrinsic::ID IID, std::optional< unsigned > MaskPos, InstructionCost Cost)
void setTailFoldingStyles(bool IsScalableVF, unsigned UserIC)
Selects and saves TailFoldingStyle for 2 options - if IV update may overflow or not.
AssumptionCache * AC
Assumption cache.
void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for instruction I and vector width VF.
InstWidening
Decision that was taken during cost calculation for memory instruction.
bool isScalarWithPredication(Instruction *I, ElementCount VF) const
Returns true if I is an instruction which requires predication and for which our chosen predication s...
InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF) const
Estimate cost of a call instruction CI if it were vectorized with factor VF.
bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) const
Returns true if we should use strict in-order reductions for the given RdxDesc.
std::pair< InstructionCost, InstructionCost > getDivRemSpeculationCost(Instruction *I, ElementCount VF) const
Return the costs for our two available strategies for lowering a div/rem operation which requires spe...
bool isDivRemScalarWithPredication(InstructionCost ScalarCost, InstructionCost SafeDivisorCost) const
Given costs for both strategies, return true if the scalar predication lowering should be used for di...
InstructionCost expectedCost(ElementCount VF)
Returns the expected execution cost.
void setCostBasedWideningDecision(ElementCount VF)
Memory access instruction may be vectorized in more than one way.
InstWidening getWideningDecision(Instruction *I, ElementCount VF) const
Return the cost model decision for the given instruction I and vector width VF.
FixedScalableVFPair MaxPermissibleVFWithoutMaxBW
The highest VF possible for this loop, without using MaxBandwidth.
bool isScalarEpilogueAllowed() const
Returns true if a scalar epilogue is not allowed due to optsize or a loop hint annotation.
InstructionCost getWideningCost(Instruction *I, ElementCount VF)
Return the vectorization cost for the given instruction I and vector width VF.
void collectInstsToScalarize(ElementCount VF)
Collects the instructions to scalarize for each predicated instruction in the loop.
LoopVectorizationLegality checks if it is legal to vectorize a loop, and to what vectorization factor...
MapVector< PHINode *, InductionDescriptor > InductionList
InductionList saves induction variables and maps them to the induction descriptor.
const SmallPtrSetImpl< const Instruction * > & getPotentiallyFaultingLoads() const
Returns potentially faulting loads.
bool canVectorize(bool UseVPlanNativePath)
Returns true if it is legal to vectorize this loop.
bool canVectorizeFPMath(bool EnableStrictReductions)
Returns true if it is legal to vectorize the FP math operations in this loop.
PHINode * getPrimaryInduction()
Returns the primary induction variable.
const SmallVector< BasicBlock *, 4 > & getCountableExitingBlocks() const
Returns all exiting blocks with a countable exit, i.e.
const InductionList & getInductionVars() const
Returns the induction variables found in the loop.
bool hasUncountableEarlyExit() const
Returns true if the loop has exactly one uncountable early exit, i.e.
bool hasHistograms() const
Returns a list of all known histogram operations in the loop.
const LoopAccessInfo * getLAI() const
Planner drives the vectorization process after having passed Legality checks.
VectorizationFactor selectEpilogueVectorizationFactor(const ElementCount MaxVF, unsigned IC)
VPlan & getPlanFor(ElementCount VF) const
Return the VPlan for VF.
Definition VPlan.cpp:1611
VectorizationFactor planInVPlanNativePath(ElementCount UserVF)
Use the VPlan-native path to plan how to best vectorize, return the best VF and its cost.
void updateLoopMetadataAndProfileInfo(Loop *VectorLoop, VPBasicBlock *HeaderVPBB, const VPlan &Plan, bool VectorizingEpilogue, MDNode *OrigLoopID, std::optional< unsigned > OrigAverageTripCount, unsigned OrigLoopInvocationWeight, unsigned EstimatedVFxUF, bool DisableRuntimeUnroll)
Update loop metadata and profile info for both the scalar remainder loop and VectorLoop,...
Definition VPlan.cpp:1662
void buildVPlans(ElementCount MinVF, ElementCount MaxVF)
Build VPlans for power-of-2 VF's between MinVF and MaxVF inclusive, according to the information gath...
Definition VPlan.cpp:1595
VectorizationFactor computeBestVF()
Compute and return the most profitable vectorization factor.
DenseMap< const SCEV *, Value * > executePlan(ElementCount VF, unsigned UF, VPlan &BestPlan, InnerLoopVectorizer &LB, DominatorTree *DT, bool VectorizingEpilogue)
Generate the IR code for the vectorized loop captured in VPlan BestPlan according to the best selecte...
unsigned selectInterleaveCount(VPlan &Plan, ElementCount VF, InstructionCost LoopCost)
void emitInvalidCostRemarks(OptimizationRemarkEmitter *ORE)
Emit remarks for recipes with invalid costs in the available VPlans.
static bool getDecisionAndClampRange(const std::function< bool(ElementCount)> &Predicate, VFRange &Range)
Test a Predicate on a Range of VF's.
Definition VPlan.cpp:1576
void printPlans(raw_ostream &O)
Definition VPlan.cpp:1760
void plan(ElementCount UserVF, unsigned UserIC)
Build VPlans for the specified UserVF and UserIC if they are non-zero or all applicable candidate VFs...
void addMinimumIterationCheck(VPlan &Plan, ElementCount VF, unsigned UF, ElementCount MinProfitableTripCount) const
Create a check to Plan to see if the vector loop should be executed based on its trip count.
bool hasPlanWithVF(ElementCount VF) const
Look through the existing plans and return true if we have one with vectorization factor VF.
This holds vectorization requirements that must be verified late in the process.
Utility class for getting and setting loop vectorizer hints in the form of loop metadata.
bool allowVectorization(Function *F, Loop *L, bool VectorizeOnlyWhenForced) const
void emitRemarkWithHints() const
Dumps all the hint information.
const char * vectorizeAnalysisPassName() const
If hints are provided that force vectorization, use the AlwaysPrint pass name to force the frontend t...
This class emits a version of the loop where run-time checks ensure that may-alias pointers can't ove...
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Definition LoopInfo.cpp:632
Metadata node.
Definition Metadata.h:1077
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition MapVector.h:115
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
Definition Module.cpp:230
Diagnostic information for optimization analysis remarks related to pointer aliasing.
Diagnostic information for optimization analysis remarks related to floating-point non-commutativity.
Diagnostic information for optimization analysis remarks.
The optimization diagnostic interface.
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
void setIncomingValueForBlock(const BasicBlock *BB, Value *V)
Set every incoming value(s) for block BB to V.
Value * getIncomingValueForBlock(const BasicBlock *BB) const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
LLVM_ABI const SCEVPredicate & getPredicate() const
LLVM_ABI unsigned getSmallConstantMaxTripCount()
Returns the upper bound of the loop trip count as a normal unsigned value, or 0 if the trip count is ...
LLVM_ABI const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
Analysis providing profile information.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
static bool isFMulAddIntrinsic(Instruction *I)
Returns true if the instruction is a call to the llvm.fmuladd intrinsic.
FastMathFlags getFastMathFlags() const
Instruction * getLoopExitInstr() const
static LLVM_ABI unsigned getOpcode(RecurKind Kind)
Returns the opcode corresponding to the RecurrenceKind.
Type * getRecurrenceType() const
Returns the type of the recurrence.
const SmallPtrSet< Instruction *, 8 > & getCastInsts() const
Returns a reference to the instructions used for type-promoting the recurrence.
unsigned getMinWidthCastToRecurrenceTypeInBits() const
Returns the minimum width used by the recurrence in bits.
TrackingVH< Value > getRecurrenceStartValue() const
LLVM_ABI SmallVector< Instruction *, 4 > getReductionOpChain(PHINode *Phi, Loop *L) const
Attempts to find a chain of operations from Phi to LoopExitInst that can be treated as a set of reduc...
static bool isAnyOfRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
bool isSigned() const
Returns true if all source operands of the recurrence are SExtInsts.
RecurKind getRecurrenceKind() const
bool isOrdered() const
Expose an ordered FP reduction to the instance users.
static LLVM_ABI bool isFloatingPointRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is a floating point kind.
static bool isFindIVRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
Value * getSentinelValue() const
Returns the sentinel value for FindFirstIV & FindLastIV recurrences to replace the start value.
static bool isMinMaxRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is any min/max kind.
std::optional< ArrayRef< PointerDiffInfo > > getDiffChecks() const
const SmallVectorImpl< RuntimePointerCheck > & getChecks() const
Returns the checks that generateChecks created.
This class uses information about analyze scalars to rewrite expressions in canonical form.
ScalarEvolution * getSE()
bool isInsertedInstruction(Instruction *I) const
Return true if the specified instruction was inserted by the code rewriter.
LLVM_ABI Value * expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc)
Generates a code sequence that evaluates this predicate.
void eraseDeadInstructions(Value *Root)
Remove inserted instructions that are dead, e.g.
virtual bool isAlwaysTrue() const =0
Returns true if the predicate is always true.
This class represents an analyzed expression in the program.
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
LLVM_ABI const SCEV * getURemExpr(const SCEV *LHS, const SCEV *RHS)
Represents an unsigned remainder expression based on unsigned division.
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
LLVM_ABI const SCEV * getElementCount(Type *Ty, ElementCount EC, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
LLVM_ABI void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
LLVM_ABI void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
const SCEV * getMinusOne(Type *Ty)
Return a SCEV for the constant -1 of a specific type.
LLVM_ABI void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
This class represents the LLVM 'select' instruction.
A vector that has set insertion semantics.
Definition SetVector.h:59
size_type size() const
Determine the number of elements in the SetVector.
Definition SetVector.h:104
void insert_range(Range &&R)
Definition SetVector.h:193
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
Definition SetVector.h:279
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:168
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:356
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
static LLVM_ABI PartialReductionExtendKind getPartialReductionExtendKind(Instruction *I)
Get the kind of extension that an instruction represents.
static LLVM_ABI OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_CodeSize
Instruction code size.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
@ TCK_Latency
The latency of instruction.
@ TCC_Free
Expected to fold away in lowering.
LLVM_ABI InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TargetCostKind CostKind) const
Estimate the cost of a given IR user when lowered.
@ SK_Splice
Concatenates elements from the first input vector with elements of the second input vector.
@ SK_Broadcast
Broadcast element 0 to all other elements.
@ SK_Reverse
Reverse the order of the vector.
CastContextHint
Represents a hint about the context in which a cast is used.
@ Reversed
The cast is used with a reversed load/store.
@ Masked
The cast is used with a masked load/store.
@ None
The cast is not used with a load/store of any kind.
@ Normal
The cast is used with a normal load/store.
@ Interleave
The cast is used with an interleaved load/store.
@ GatherScatter
The cast is used with a gather/scatter.
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
This class implements a switch-like dispatch statement for a value of 'T' using dyn_cast functionalit...
Definition TypeSwitch.h:87
TypeSwitch< T, ResultT > & Case(CallableT &&caseFn)
Add a case on the given type.
Definition TypeSwitch.h:96
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:297
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:281
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:198
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:231
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:294
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_range operands()
Definition User.h:292
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:21
iterator_range< op_iterator > op_range
Definition User.h:281
Value * getOperand(unsigned i) const
Definition User.h:232
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
Definition VectorUtils.h:74
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
Definition VPlan.h:3755
void appendRecipe(VPRecipeBase *Recipe)
Augment the existing recipes of a VPBasicBlock with an additional Recipe as the last recipe.
Definition VPlan.h:3830
RecipeListTy::iterator iterator
Instruction iterators...
Definition VPlan.h:3782
iterator end()
Definition VPlan.h:3792
iterator begin()
Recipe iterator methods.
Definition VPlan.h:3790
iterator_range< iterator > phis()
Returns an iterator range over the PHI-like recipes in the block.
Definition VPlan.h:3843
iterator getFirstNonPhi()
Return the position of the first non-phi node recipe in the block.
Definition VPlan.cpp:246
VPRegionBlock * getEnclosingLoopRegion()
Definition VPlan.cpp:619
void insert(VPRecipeBase *Recipe, iterator InsertPt)
Definition VPlan.h:3821
VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
Definition VPlan.h:81
VPRegionBlock * getParent()
Definition VPlan.h:173
const VPBasicBlock * getExitingBasicBlock() const
Definition VPlan.cpp:190
void setName(const Twine &newName)
Definition VPlan.h:166
size_t getNumSuccessors() const
Definition VPlan.h:219
void swapSuccessors()
Swap successors of the block. The block must have exactly 2 successors.
Definition VPlan.h:322
size_t getNumPredecessors() const
Definition VPlan.h:220
VPlan * getPlan()
Definition VPlan.cpp:165
VPBlockBase * getSinglePredecessor() const
Definition VPlan.h:215
const VPBasicBlock * getEntryBasicBlock() const
Definition VPlan.cpp:170
VPBlockBase * getSingleSuccessor() const
Definition VPlan.h:209
const VPBlocksTy & getSuccessors() const
Definition VPlan.h:198
static auto blocksOnly(const T &Range)
Return an iterator range over Range which only includes BlockTy blocks.
Definition VPlanUtils.h:228
static void insertOnEdge(VPBlockBase *From, VPBlockBase *To, VPBlockBase *BlockPtr)
Inserts BlockPtr on the edge between From and To.
Definition VPlanUtils.h:249
static void connectBlocks(VPBlockBase *From, VPBlockBase *To, unsigned PredIdx=-1u, unsigned SuccIdx=-1u)
Connect VPBlockBases From and To bi-directionally.
Definition VPlanUtils.h:187
static void reassociateBlocks(VPBlockBase *Old, VPBlockBase *New)
Reassociate all the blocks connected to Old so that they now point to New.
Definition VPlanUtils.h:214
VPlan-based builder utility analogous to IRBuilder.
VPDerivedIVRecipe * createDerivedIV(InductionDescriptor::InductionKind Kind, FPMathOperator *FPBinOp, VPValue *Start, VPValue *Current, VPValue *Step, const Twine &Name="")
Convert the input value Current to the corresponding value of an induction with Start and Step values...
VPPhi * createScalarPhi(ArrayRef< VPValue * > IncomingValues, DebugLoc DL, const Twine &Name="")
VPInstruction * createNaryOp(unsigned Opcode, ArrayRef< VPValue * > Operands, Instruction *Inst=nullptr, const Twine &Name="")
Create an N-ary operation with Opcode, Operands and set Inst as its underlying Instruction.
VPInstruction * createScalarCast(Instruction::CastOps Opcode, VPValue *Op, Type *ResultTy, DebugLoc DL)
unsigned getNumDefinedValues() const
Returns the number of values defined by the VPDef.
Definition VPlanValue.h:424
VPValue * getVPSingleValue()
Returns the only VPValue defined by the VPDef.
Definition VPlanValue.h:397
void execute(VPTransformState &State) override
Generate the transformed value of the induction at offset StartValue (1.
VPValue * getStepValue() const
Definition VPlan.h:3632
VPValue * getStartValue() const
Definition VPlan.h:3631
A pure virtual base class for all recipes modeling header phis, including phis for first order recurr...
Definition VPlan.h:1968
virtual VPValue * getBackedgeValue()
Returns the incoming value from the loop backedge.
Definition VPlan.h:2016
VPValue * getStartValue()
Returns the start value of the phi, if one is set.
Definition VPlan.h:2005
A recipe representing a sequence of load -> update -> store as part of a histogram operation.
Definition VPlan.h:1678
A special type of VPBasicBlock that wraps an existing IR basic block.
Definition VPlan.h:3908
Helper to manage IR metadata for recipes.
Definition VPlan.h:939
This is a concrete Recipe that models a single VPlan-level instruction.
Definition VPlan.h:980
@ ComputeAnyOfResult
Compute the final result of a AnyOf reduction with select(cmp(),x,y), where one of (x,...
Definition VPlan.h:1013
@ ResumeForEpilogue
Explicit user for the resume phi of the canonical induction in the main VPlan, used by the epilogue v...
Definition VPlan.h:1060
@ FirstOrderRecurrenceSplice
Definition VPlan.h:986
@ ReductionStartVector
Start vector for reductions with 3 operands: the original start value, the identity value for the red...
Definition VPlan.h:1051
unsigned getOpcode() const
Definition VPlan.h:1116
VPInterleaveRecipe is a recipe for transforming an interleave group of load or stores into one wide l...
Definition VPlan.h:2567
In what follows, the term "input IR" refers to code that is fed into the vectorizer whereas the term ...
A recipe for forming partial reductions.
Definition VPlan.h:2744
detail::zippy< llvm::detail::zip_first, VPUser::const_operand_range, const_incoming_blocks_range > incoming_values_and_blocks() const
Returns an iterator range over pairs of incoming values and corresponding incoming blocks.
Definition VPlan.h:1287
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
Definition VPlan.h:394
VPBasicBlock * getParent()
Definition VPlan.h:415
DebugLoc getDebugLoc() const
Returns the debug location of the recipe.
Definition VPlan.h:482
void moveBefore(VPBasicBlock &BB, iplist< VPRecipeBase >::iterator I)
Unlink this recipe and insert into BB before I.
void insertBefore(VPRecipeBase *InsertPos)
Insert an unlinked recipe into a basic block immediately before the specified recipe.
iplist< VPRecipeBase >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Helper class to create VPRecipies from IR instructions.
VPRecipeBase * tryToCreateWidenRecipe(VPSingleDefRecipe *R, VFRange &Range)
Create and return a widened recipe for R if one can be created within the given VF Range.
VPValue * getBlockInMask(VPBasicBlock *VPBB) const
Returns the entry mask for block VPBB or null if the mask is all-true.
VPValue * getVPValueOrAddLiveIn(Value *V)
std::optional< unsigned > getScalingForReduction(const Instruction *ExitInst)
void collectScaledReductions(VFRange &Range)
Find all possible partial reductions in the loop and track all of those that are valid so recipes can...
VPReplicateRecipe * handleReplication(Instruction *I, ArrayRef< VPValue * > Operands, VFRange &Range)
Build a VPReplicationRecipe for I using Operands.
VPRecipeBase * tryToCreatePartialReduction(Instruction *Reduction, ArrayRef< VPValue * > Operands, unsigned ScaleFactor)
Create and return a partial reduction recipe for a reduction instruction along with binary operation ...
A recipe for handling reduction phis.
Definition VPlan.h:2322
bool isInLoop() const
Returns true, if the phi is part of an in-loop reduction.
Definition VPlan.h:2382
RecurKind getRecurrenceKind() const
Returns the recurrence kind of the reduction.
Definition VPlan.h:2376
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
Definition VPlan.h:3943
const VPBlockBase * getEntry() const
Definition VPlan.h:3979
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
Definition VPlan.h:2847
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
Definition VPlan.h:521
Instruction * getUnderlyingInstr()
Returns the underlying instruction.
Definition VPlan.h:586
An analysis for type-inference for VPValues.
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
Definition VPlanValue.h:199
void setOperand(unsigned I, VPValue *New)
Definition VPlanValue.h:243
VPValue * getOperand(unsigned N) const
Definition VPlanValue.h:238
void addOperand(VPValue *Operand)
Definition VPlanValue.h:232
VPRecipeBase * getDefiningRecipe()
Returns the recipe defining this VPValue or nullptr if it is not defined by a recipe,...
Definition VPlan.cpp:135
Value * getLiveInIRValue() const
Returns the underlying IR value, if this VPValue is defined outside the scope of VPlan.
Definition VPlanValue.h:176
Value * getUnderlyingValue() const
Return the underlying Value attached to this VPValue.
Definition VPlanValue.h:85
void replaceAllUsesWith(VPValue *New)
Definition VPlan.cpp:1412
user_iterator user_begin()
Definition VPlanValue.h:130
unsigned getNumUsers() const
Definition VPlanValue.h:113
void replaceUsesWithIf(VPValue *New, llvm::function_ref< bool(VPUser &U, unsigned Idx)> ShouldReplace)
Go through the uses list for this VPValue and make each use point to New if the callback ShouldReplac...
Definition VPlan.cpp:1416
user_range users()
Definition VPlanValue.h:134
A recipe to compute a pointer to the last element of each part of a widened memory access for widened...
Definition VPlan.h:1832
VPWidenCastRecipe is a recipe to create vector cast instructions.
Definition VPlan.h:1479
A recipe for handling GEP instructions.
Definition VPlan.h:1765
Base class for widened induction (VPWidenIntOrFpInductionRecipe and VPWidenPointerInductionRecipe),...
Definition VPlan.h:2033
VPValue * getStepValue()
Returns the step value of the induction.
Definition VPlan.h:2061
const InductionDescriptor & getInductionDescriptor() const
Returns the induction descriptor for the recipe.
Definition VPlan.h:2078
A recipe for handling phi nodes of integer and floating-point inductions, producing their vector valu...
Definition VPlan.h:2108
A common base class for widening memory operations.
Definition VPlan.h:3124
A recipe for widened phis.
Definition VPlan.h:2244
VPWidenRecipe is a recipe for producing a widened instruction using the opcode and operands of the re...
Definition VPlan.h:1436
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
Definition VPlan.h:4046
bool hasVF(ElementCount VF) const
Definition VPlan.h:4255
VPBasicBlock * getEntry()
Definition VPlan.h:4145
VPValue & getVectorTripCount()
The vector trip count.
Definition VPlan.h:4235
VPValue & getVF()
Returns the VF of the vector loop region.
Definition VPlan.h:4238
VPValue * getTripCount() const
The trip count of the original loop.
Definition VPlan.h:4207
iterator_range< SmallSetVector< ElementCount, 2 >::iterator > vectorFactors() const
Returns an iterator range over all VFs of the plan.
Definition VPlan.h:4262
bool hasUF(unsigned UF) const
Definition VPlan.h:4273
ArrayRef< VPIRBasicBlock * > getExitBlocks() const
Return an ArrayRef containing VPIRBasicBlocks wrapping the exit blocks of the original scalar loop.
Definition VPlan.h:4197
LLVM_ABI_FOR_TEST VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
Definition VPlan.cpp:1046
bool hasEarlyExit() const
Returns true if the VPlan is based on a loop with an early exit.
Definition VPlan.h:4418
InstructionCost cost(ElementCount VF, VPCostContext &Ctx)
Return the cost of this plan.
Definition VPlan.cpp:1028
void resetTripCount(VPValue *NewTripCount)
Resets the trip count for the VPlan.
Definition VPlan.h:4221
VPBasicBlock * getMiddleBlock()
Returns the 'middle' block of the plan, that is the block that selects whether to execute the scalar ...
Definition VPlan.h:4170
VPValue * getOrAddLiveIn(Value *V)
Gets the live-in VPValue for V or adds a new live-in (if none exists yet) for V.
Definition VPlan.h:4297
bool hasScalarVFOnly() const
Definition VPlan.h:4266
VPBasicBlock * getScalarPreheader() const
Return the VPBasicBlock for the preheader of the scalar loop.
Definition VPlan.h:4188
void execute(VPTransformState *State)
Generate the IR code for this VPlan.
Definition VPlan.cpp:952
VPCanonicalIVPHIRecipe * getCanonicalIV()
Returns the canonical induction recipe of the vector loop.
Definition VPlan.h:4351
VPIRBasicBlock * getScalarHeader() const
Return the VPIRBasicBlock wrapping the header of the scalar loop.
Definition VPlan.h:4193
VPBasicBlock * getVectorPreheader()
Returns the preheader of the vector loop region, if one exists, or null otherwise.
Definition VPlan.h:4150
VPlan * duplicate()
Clone the current VPlan, update all VPValues of the new VPlan and cloned recipes to refer to the clon...
Definition VPlan.cpp:1188
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition Value.cpp:166
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:390
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:546
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1101
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:194
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition DenseSet.h:169
constexpr bool hasKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns true if there exists a value X where RHS.multiplyCoefficientBy(X) will result in a value whos...
Definition TypeSize.h:269
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:230
constexpr bool isNonZero() const
Definition TypeSize.h:156
constexpr ScalarTy getKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns a value X where RHS.multiplyCoefficientBy(X) will result in a value whose quantity matches ou...
Definition TypeSize.h:277
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:216
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:169
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
Definition TypeSize.h:256
constexpr bool isFixed() const
Returns true if the quantity is not scaled by vscale.
Definition TypeSize.h:172
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:166
constexpr bool isZero() const
Definition TypeSize.h:154
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:223
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
Definition TypeSize.h:252
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:237
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:130
IteratorT end() const
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
Changed
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:189
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
OneOps_match< OpTy, Instruction::Freeze > m_Freeze(const OpTy &Op)
Matches FreezeInst.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
MatchFunctor< Val, Pattern > match_fn(const Pattern &P)
A match functor that can be used as a UnaryPredicate in functional algorithms like all_of.
class_match< const SCEVVScale > m_SCEVVScale()
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
specificloop_ty m_SpecificLoop(const Loop *L)
cst_pred_ty< is_specific_signed_cst > m_scev_SpecificSInt(int64_t V)
Match an SCEV constant with a plain signed integer (sign-extended value will be matched)
SCEVAffineAddRec_match< Op0_t, Op1_t, class_match< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
SCEVBinaryExpr_match< SCEVMulExpr, Op0_t, Op1_t > m_scev_Mul(const Op0_t &Op0, const Op1_t &Op1)
bool match(const SCEV *S, const Pattern &P)
class_match< const SCEV > m_SCEV()
match_combine_or< AllRecipe_match< Instruction::ZExt, Op0_t >, AllRecipe_match< Instruction::SExt, Op0_t > > m_ZExtOrSExt(const Op0_t &Op0)
VPInstruction_match< VPInstruction::ExtractLastElement, Op0_t > m_ExtractLastElement(const Op0_t &Op0)
class_match< VPValue > m_VPValue()
Match an arbitrary VPValue and ignore it.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
Add a small namespace to avoid name clashes with the classes used in the streaming interface.
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< InstrNode * > Instr
Definition RDFGraph.h:389
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
bool isSingleScalar(const VPValue *VPV)
Returns true if VPV is a single scalar, either because it produces the same value for all lanes or on...
Definition VPlanUtils.h:44
VPValue * getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr)
Get or create a VPValue that corresponds to the expansion of Expr.
VPBasicBlock * getFirstLoopHeader(VPlan &Plan, VPDominatorTree &VPDT)
Returns the header block of the first, top-level loop, or null if none exist.
const SCEV * getSCEVExprForVPValue(VPValue *V, ScalarEvolution &SE)
Return the SCEV expression for V.
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE, AssumptionCache *AC, MemorySSAUpdater *MSSAU, bool PreserveLCSSA)
Simplify each loop in a loop nest recursively.
LLVM_ABI void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:318
@ Offset
Definition DWP.cpp:477
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:831
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI Value * addRuntimeChecks(Instruction *Loc, Loop *TheLoop, const SmallVectorImpl< RuntimePointerCheck > &PointerChecks, SCEVExpander &Expander, bool HoistRuntimeChecks=false)
Add code that checks at runtime if the accessed arrays in PointerChecks overlap.
auto cast_if_present(const Y &Val)
cast_if_present<X> - Functionally identical to cast, except that a null value is accepted.
Definition Casting.h:689
LLVM_ABI bool RemoveRedundantDbgInstrs(BasicBlock *BB)
Try to remove redundant dbg.value instructions from given basic block.
cl::opt< bool > VerifyEachVPlan
LLVM_ABI std::optional< unsigned > getLoopEstimatedTripCount(Loop *L, unsigned *EstimatedLoopInvocationWeight=nullptr)
Return either:
static void reportVectorization(OptimizationRemarkEmitter *ORE, Loop *TheLoop, VectorizationFactor VF, unsigned IC)
Report successful vectorization of the loop.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1705
unsigned getLoadStoreAddressSpace(const Value *I)
A helper function that returns the address space of the pointer operand of load or store instruction.
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1657
LLVM_ABI_FOR_TEST bool verifyVPlanIsValid(const VPlan &Plan, bool VerifyLate=false)
Verify invariants for general VPlans.
LLVM_ABI Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
InstructionCost Cost
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
Value * getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF)
Return the runtime value for VF.
LLVM_ABI bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI, ScalarEvolution *SE)
Put a loop nest into LCSSA form.
Definition LCSSA.cpp:449
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2116
LLVM_ABI bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:293
Align getLoadStoreAlignment(const Value *I)
A helper function that returns the alignment of load or store instruction.
iterator_range< df_iterator< VPBlockShallowTraversalWrapper< VPBlockBase * > > > vp_depth_first_shallow(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order.
Definition VPlanCFG.h:216
LLVM_ABI bool VerifySCEV
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:682
iterator_range< df_iterator< VPBlockDeepTraversalWrapper< VPBlockBase * > > > vp_depth_first_deep(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order while traversing t...
Definition VPlanCFG.h:243
SmallVector< VPRegisterUsage, 8 > calculateRegisterUsageForPlan(VPlan &Plan, ArrayRef< ElementCount > VFs, const TargetTransformInfo &TTI, const SmallPtrSetImpl< const Value * > &ValuesToIgnore)
Estimate the register usage for Plan and vectorization factors in VFs by calculating the highest numb...
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:348
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:759
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1712
void collectEphemeralRecipesForVPlan(VPlan &Plan, DenseSet< VPRecipeBase * > &EphRecipes)
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
LLVM_ABI void setBranchWeights(Instruction &I, ArrayRef< uint32_t > Weights, bool IsExpected)
Create a new branch_weights metadata node and add or overwrite a prof metadata reference to instructi...
bool containsIrreducibleCFG(RPOTraversalT &RPOTraversal, const LoopInfoT &LI)
Return true if the control flow in RPOTraversal is irreducible.
Definition CFG.h:149
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:288
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1624
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1719
LLVM_ABI cl::opt< bool > EnableLoopVectorization
LLVM_ABI bool wouldInstructionBeTriviallyDead(const Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction would have no side effects if it was not used.
Definition Local.cpp:421
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
Type * toVectorizedTy(Type *Ty, ElementCount EC)
A helper for converting to vectorized types.
LLVM_ABI void llvm_unreachable_internal(const char *msg=nullptr, const char *file=nullptr, unsigned line=0)
This function calls abort(), and prints the optional message to stderr.
T * find_singleton(R &&Range, Predicate P, bool AllowRepeats=false)
Return the single value in Range that satisfies P(<member of Range> *, AllowRepeats)->T * returning n...
Definition STLExtras.h:1767
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
cl::opt< unsigned > ForceTargetInstructionCost
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
format_object< Ts... > format(const char *Fmt, const Ts &... Vals)
These are helper functions used to produce formatted output.
Definition Format.h:126
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Definition STLExtras.h:325
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition MathExtras.h:405
bool canVectorizeTy(Type *Ty)
Returns true if Ty is a valid vector element type, void, or an unpacked literal struct where all elem...
TargetTransformInfo TTI
static void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I=nullptr, DebugLoc DL={})
Reports an informative message: print Msg for debugging purposes as well as an optimization remark.
LLVM_ABI bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
RecurKind
These are the kinds of recurrences that we support.
@ Or
Bitwise or logical OR of integers.
@ FMulAdd
Sum of float products with llvm.fmuladd(a * b + sum).
@ Sub
Subtraction of integers.
@ AddChainWithSubs
A chain of adds and subs.
LLVM_ABI Value * getRecurrenceIdentity(RecurKind K, Type *Tp, FastMathFlags FMF)
Given information about an recurrence kind, return the identity for the @llvm.vector....
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:155
LLVM_ABI void reportVectorizationFailure(const StringRef DebugMsg, const StringRef OREMsg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I=nullptr)
Reports a vectorization failure: print DebugMsg for debugging purposes along with the corresponding o...
DWARFExpression::Operation Op
ScalarEpilogueLowering
@ CM_ScalarEpilogueNotAllowedLowTripLoop
@ CM_ScalarEpilogueNotNeededUsePredicate
@ CM_ScalarEpilogueNotAllowedOptSize
@ CM_ScalarEpilogueAllowed
@ CM_ScalarEpilogueNotAllowedUsePredicate
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
Value * createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF, int64_t Step)
Return a value for Step multiplied by VF.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
LLVM_ABI BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="", bool Before=false)
Split the specified block at the specified instruction.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
auto predecessors(const MachineBasicBlock *BB)
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:363
cl::opt< bool > EnableVPlanNativePath
Definition VPlan.cpp:56
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
ArrayRef< Type * > getContainedTypes(Type *const &Ty)
Returns the types contained in Ty.
LLVM_ABI Value * addDiffRuntimeChecks(Instruction *Loc, ArrayRef< PointerDiffInfo > Checks, SCEVExpander &Expander, function_ref< Value *(IRBuilderBase &, unsigned)> GetVF, unsigned IC)
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
@ DataAndControlFlowWithoutRuntimeCheck
Use predicate to control both data and control flow, but modify the trip count so that a runtime over...
@ None
Don't use tail folding.
@ DataWithEVL
Use predicated EVL instructions for tail-folding.
@ DataAndControlFlow
Use predicate to control both data and control flow.
@ DataWithoutLaneMask
Same as Data, but avoids using the get.active.lane.mask intrinsic to calculate the mask and instead i...
@ Data
Use predicate only to mask operations on data in the loop.
unsigned getPredBlockCostDivisor(TargetTransformInfo::TargetCostKind CostKind)
A helper function that returns how much we should divide the cost of a predicated block by.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool hasBranchWeightMD(const Instruction &I)
Checks if an instructions has Branch Weight Metadata.
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition Hashing.h:592
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
Definition bit.h:299
Type * toVectorTy(Type *Scalar, ElementCount EC)
A helper function for converting Scalar types to vector types.
std::unique_ptr< VPlan > VPlanPtr
Definition VPlan.h:77
constexpr detail::IsaCheckPredicate< Types... > IsaPred
Function object wrapper for the llvm::isa type check.
Definition Casting.h:836
LLVM_ABI MapVector< Instruction *, uint64_t > computeMinimumValueSizes(ArrayRef< BasicBlock * > Blocks, DemandedBits &DB, const TargetTransformInfo *TTI=nullptr)
Compute a map of integer instructions to their minimum legal type size.
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition Hashing.h:466
LLVM_ABI cl::opt< bool > EnableLoopInterleaving
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:853
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI void collectEphemeralValues(const Loop *L, AssumptionCache *AC, SmallPtrSetImpl< const Value * > &EphValues)
Collect a loop's ephemeral values (those used only by an assume or similar intrinsics in the loop).
An information struct used to provide DenseMap with the various necessary components for a given valu...
Encapsulate information regarding vectorization of a loop and its epilogue.
EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, ElementCount EVF, unsigned EUF, VPlan &EpiloguePlan)
A class that represents two vectorization factors (initialized with 0 by default).
static FixedScalableVFPair getNone()
This holds details about a histogram operation – a load -> update -> store sequence where each lane i...
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
TargetLibraryInfo * TLI
LLVM_ABI LoopVectorizeResult runImpl(Function &F)
LLVM_ABI bool processLoop(Loop *L)
ProfileSummaryInfo * PSI
LoopAccessInfoManager * LAIs
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI LoopVectorizePass(LoopVectorizeOptions Opts={})
BlockFrequencyInfo * BFI
ScalarEvolution * SE
AssumptionCache * AC
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
OptimizationRemarkEmitter * ORE
TargetTransformInfo * TTI
Storage for information about made changes.
A chain of instructions that form a partial reduction.
Instruction * Reduction
The top-level binary operation that forms the reduction to a scalar after the loop body.
Instruction * ExtendA
The extension of each of the inner binary operation's operands.
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70
A marker analysis to determine if extra passes should be run after loop vectorization.
static LLVM_ABI AnalysisKey Key
Holds the VFShape for a specific scalar to vector function mapping.
std::optional< unsigned > getParamIndexForOptionalMask() const
Instruction Set Architecture.
Encapsulates information needed to describe a parameter.
A range of powers-of-2 vectorization factors with fixed start and adjustable end.
ElementCount End
Struct to hold various analysis needed for cost computations.
LoopVectorizationCostModel & CM
bool isLegacyUniformAfterVectorization(Instruction *I, ElementCount VF) const
Return true if I is considered uniform-after-vectorization in the legacy cost model for VF.
bool skipCostComputation(Instruction *UI, bool IsVector) const
Return true if the cost for UI shouldn't be computed, e.g.
InstructionCost getLegacyCost(Instruction *UI, ElementCount VF) const
Return the cost for UI with VF using the legacy cost model as fallback until computing the cost of al...
SmallPtrSet< Instruction *, 8 > SkipCostComputation
A recipe for handling first-order recurrence phis.
Definition VPlan.h:2287
A struct that represents some properties of the register usage of a loop.
VPTransformState holds information passed down when "executing" a VPlan, needed for generating the ou...
A recipe for widening select instructions.
Definition VPlan.h:1719
static void materializeBroadcasts(VPlan &Plan)
Add explicit broadcasts for live-ins and VPValues defined in Plan's entry block if they are used as v...
static void materializeBackedgeTakenCount(VPlan &Plan, VPBasicBlock *VectorPH)
Materialize the backedge-taken count to be computed explicitly using VPInstructions.
static LLVM_ABI_FOR_TEST std::unique_ptr< VPlan > buildVPlan0(Loop *TheLoop, LoopInfo &LI, Type *InductionTy, DebugLoc IVDL, PredicatedScalarEvolution &PSE)
Create a base VPlan0, serving as the common starting point for all later candidates.
static void optimizeInductionExitUsers(VPlan &Plan, DenseMap< VPValue *, VPValue * > &EndValues, ScalarEvolution &SE)
If there's a single exit block, optimize its phi recipes that use exiting IV values by feeding them p...
static LLVM_ABI_FOR_TEST void handleEarlyExits(VPlan &Plan, bool HasUncountableExit)
Update Plan to account for all early exits.
static void canonicalizeEVLLoops(VPlan &Plan)
Transform EVL loops to use variable-length stepping after region dissolution.
static void dropPoisonGeneratingRecipes(VPlan &Plan, const std::function< bool(BasicBlock *)> &BlockNeedsPredication)
Drop poison flags from recipes that may generate a poison value that is used after vectorization,...
static void createInterleaveGroups(VPlan &Plan, const SmallPtrSetImpl< const InterleaveGroup< Instruction > * > &InterleaveGroups, VPRecipeBuilder &RecipeBuilder, const bool &ScalarEpilogueAllowed)
static bool runPass(bool(*Transform)(VPlan &, ArgsTy...), VPlan &Plan, typename std::remove_reference< ArgsTy >::type &...Args)
Helper to run a VPlan transform Transform on VPlan, forwarding extra arguments to the transform.
static void addBranchWeightToMiddleTerminator(VPlan &Plan, ElementCount VF, std::optional< unsigned > VScaleForTuning)
Add branch weight metadata, if the Plan's middle block is terminated by a BranchOnCond recipe.
static void materializeBuildVectors(VPlan &Plan)
Add explicit Build[Struct]Vector recipes that combine multiple scalar values into single vectors.
static void unrollByUF(VPlan &Plan, unsigned UF)
Explicitly unroll Plan by UF.
static DenseMap< const SCEV *, Value * > expandSCEVs(VPlan &Plan, ScalarEvolution &SE)
Expand VPExpandSCEVRecipes in Plan's entry block.
static void convertToConcreteRecipes(VPlan &Plan)
Lower abstract recipes to concrete ones, that can be codegen'd.
static void addMinimumIterationCheck(VPlan &Plan, ElementCount VF, unsigned UF, ElementCount MinProfitableTripCount, bool RequiresScalarEpilogue, bool TailFolded, bool CheckNeededWithTailFolding, Loop *OrigLoop, const uint32_t *MinItersBypassWeights, DebugLoc DL, ScalarEvolution &SE)
static void convertToAbstractRecipes(VPlan &Plan, VPCostContext &Ctx, VFRange &Range)
This function converts initial recipes to the abstract recipes and clamps Range based on cost model f...
static void materializeConstantVectorTripCount(VPlan &Plan, ElementCount BestVF, unsigned BestUF, PredicatedScalarEvolution &PSE)
static DenseMap< VPBasicBlock *, VPValue * > introduceMasksAndLinearize(VPlan &Plan, bool FoldTail)
Predicate and linearize the control-flow in the only loop region of Plan.
static void addExplicitVectorLength(VPlan &Plan, const std::optional< unsigned > &MaxEVLSafeElements)
Add a VPEVLBasedIVPHIRecipe and related recipes to Plan and replaces all uses except the canonical IV...
static void replaceSymbolicStrides(VPlan &Plan, PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &StridesMap)
Replace symbolic strides from StridesMap in Plan with constants when possible.
static bool handleMaxMinNumReductions(VPlan &Plan)
Check if Plan contains any FMaxNum or FMinNum reductions.
static void removeBranchOnConst(VPlan &Plan)
Remove BranchOnCond recipes with true or false conditions together with removing dead edges to their ...
static LLVM_ABI_FOR_TEST void createLoopRegions(VPlan &Plan)
Replace loops in Plan's flat CFG with VPRegionBlocks, turning Plan's flat CFG into a hierarchical CFG...
static void removeDeadRecipes(VPlan &Plan)
Remove dead recipes from Plan.
static void attachCheckBlock(VPlan &Plan, Value *Cond, BasicBlock *CheckBlock, bool AddBranchWeights)
Wrap runtime check block CheckBlock in a VPIRBB and Cond in a VPValue and connect the block to Plan,...
static void materializeVectorTripCount(VPlan &Plan, VPBasicBlock *VectorPHVPBB, bool TailByMasking, bool RequiresScalarEpilogue)
Materialize vector trip count computations to a set of VPInstructions.
static void simplifyRecipes(VPlan &Plan)
Perform instcombine-like simplifications on recipes in Plan.
static LLVM_ABI_FOR_TEST bool tryToConvertVPInstructionsToVPRecipes(VPlanPtr &Plan, function_ref< const InductionDescriptor *(PHINode *)> GetIntOrFpInductionDescriptor, const TargetLibraryInfo &TLI)
Replaces the VPInstructions in Plan with corresponding widen recipes.
static void replicateByVF(VPlan &Plan, ElementCount VF)
Replace each replicating VPReplicateRecipe and VPInstruction outside of any replicate region in Plan ...
static void clearReductionWrapFlags(VPlan &Plan)
Clear NSW/NUW flags from reduction instructions if necessary.
static void cse(VPlan &Plan)
Perform common-subexpression-elimination on Plan.
static void addActiveLaneMask(VPlan &Plan, bool UseActiveLaneMaskForControlFlow, bool DataAndControlFlowWithoutRuntimeCheck)
Replace (ICMP_ULE, wide canonical IV, backedge-taken-count) checks with an (active-lane-mask recipe,...
static void optimize(VPlan &Plan)
Apply VPlan-to-VPlan optimizations to Plan, including induction recipe optimizations,...
static void dissolveLoopRegions(VPlan &Plan)
Replace loop regions with explicit CFG.
static void narrowInterleaveGroups(VPlan &Plan, ElementCount VF, unsigned VectorRegWidth)
Try to convert a plan with interleave groups with VF elements to a plan with the interleave groups re...
static void truncateToMinimalBitwidths(VPlan &Plan, const MapVector< Instruction *, uint64_t > &MinBWs)
Insert truncates and extends for any truncated recipe.
static bool adjustFixedOrderRecurrences(VPlan &Plan, VPBuilder &Builder)
Try to have all users of fixed-order recurrences appear after the recipe defining their previous valu...
static void optimizeForVFAndUF(VPlan &Plan, ElementCount BestVF, unsigned BestUF, PredicatedScalarEvolution &PSE)
Optimize Plan based on BestVF and BestUF.
static void materializeVFAndVFxUF(VPlan &Plan, VPBasicBlock *VectorPH, ElementCount VF)
Materialize VF and VFxUF to be computed explicitly using VPInstructions.
static LLVM_ABI_FOR_TEST void addMiddleCheck(VPlan &Plan, bool RequiresScalarEpilogueCheck, bool TailFolded)
If a check is needed to guard executing the scalar epilogue loop, it will be added to the middle bloc...
TODO: The following VectorizationFactor was pulled out of LoopVectorizationCostModel class.
InstructionCost Cost
Cost of the loop with that width.
ElementCount MinProfitableTripCount
The minimum trip count required to make vectorization profitable, e.g.
ElementCount Width
Vector width with best cost.
InstructionCost ScalarCost
Cost of the scalar loop.
static VectorizationFactor Disabled()
Width 1 means no vectorization, cost 0 means uncomputed cost.
static LLVM_ABI bool HoistRuntimeChecks