LLVM 22.0.0git
LoopAccessAnalysis.h
Go to the documentation of this file.
1//===- llvm/Analysis/LoopAccessAnalysis.h -----------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interface for the loop memory dependence framework that
10// was originally developed for the Loop Vectorizer.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_ANALYSIS_LOOPACCESSANALYSIS_H
15#define LLVM_ANALYSIS_LOOPACCESSANALYSIS_H
16
21#include <optional>
22#include <variant>
23
24namespace llvm {
25
26class AAResults;
27class DataLayout;
28class Loop;
29class raw_ostream;
30class TargetTransformInfo;
31
32/// Collection of parameters shared beetween the Loop Vectorizer and the
33/// Loop Access Analysis.
35 /// Maximum SIMD width.
36 LLVM_ABI static const unsigned MaxVectorWidth;
37
38 /// VF as overridden by the user.
40 /// Interleave factor as overridden by the user.
42 /// True if force-vector-interleave was specified by the user.
43 LLVM_ABI static bool isInterleaveForced();
44
45 /// \When performing memory disambiguation checks at runtime do not
46 /// make more than this number of comparisons.
48
49 // When creating runtime checks for nested loops, where possible try to
50 // write the checks in a form that allows them to be easily hoisted out of
51 // the outermost loop. For example, we can do this by expanding the range of
52 // addresses considered to include the entire nested loop so that they are
53 // loop invariant.
55};
56
57/// Checks memory dependences among accesses to the same underlying
58/// object to determine whether there vectorization is legal or not (and at
59/// which vectorization factor).
60///
61/// Note: This class will compute a conservative dependence for access to
62/// different underlying pointers. Clients, such as the loop vectorizer, will
63/// sometimes deal these potential dependencies by emitting runtime checks.
64///
65/// We use the ScalarEvolution framework to symbolically evalutate access
66/// functions pairs. Since we currently don't restructure the loop we can rely
67/// on the program order of memory accesses to determine their safety.
68/// At the moment we will only deem accesses as safe for:
69/// * A negative constant distance assuming program order.
70///
71/// Safe: tmp = a[i + 1]; OR a[i + 1] = x;
72/// a[i] = tmp; y = a[i];
73///
74/// The latter case is safe because later checks guarantuee that there can't
75/// be a cycle through a phi node (that is, we check that "x" and "y" is not
76/// the same variable: a header phi can only be an induction or a reduction, a
77/// reduction can't have a memory sink, an induction can't have a memory
78/// source). This is important and must not be violated (or we have to
79/// resort to checking for cycles through memory).
80///
81/// * A positive constant distance assuming program order that is bigger
82/// than the biggest memory access.
83///
84/// tmp = a[i] OR b[i] = x
85/// a[i+2] = tmp y = b[i+2];
86///
87/// Safe distance: 2 x sizeof(a[0]), and 2 x sizeof(b[0]), respectively.
88///
89/// * Zero distances and all accesses have the same size.
90///
92public:
95 /// Set of potential dependent memory accesses.
97
98 /// Type to keep track of the status of the dependence check. The order of
99 /// the elements is important and has to be from most permissive to least
100 /// permissive.
102 // Can vectorize safely without RT checks. All dependences are known to be
103 // safe.
104 Safe,
105 // Can possibly vectorize with RT checks to overcome unknown dependencies.
107 // Cannot vectorize due to known unsafe dependencies.
108 Unsafe,
109 };
110
111 /// Dependece between memory access instructions.
112 struct Dependence {
113 /// The type of the dependence.
114 enum DepType {
115 // No dependence.
117 // We couldn't determine the direction or the distance.
119 // At least one of the memory access instructions may access a loop
120 // varying object, e.g. the address of underlying object is loaded inside
121 // the loop, like A[B[i]]. We cannot determine direction or distance in
122 // those cases, and also are unable to generate any runtime checks.
124
125 // Lexically forward.
126 //
127 // FIXME: If we only have loop-independent forward dependences (e.g. a
128 // read and write of A[i]), LAA will locally deem the dependence "safe"
129 // without querying the MemoryDepChecker. Therefore we can miss
130 // enumerating loop-independent forward dependences in
131 // getDependences. Note that as soon as there are different
132 // indices used to access the same array, the MemoryDepChecker *is*
133 // queried and the dependence list is complete.
135 // Forward, but if vectorized, is likely to prevent store-to-load
136 // forwarding.
138 // Lexically backward.
140 // Backward, but the distance allows a vectorization factor of dependent
141 // on MinDepDistBytes.
143 // Same, but may prevent store-to-load forwarding.
145 };
146
147 /// String version of the types.
148 LLVM_ABI static const char *DepName[];
149
150 /// Index of the source of the dependence in the InstMap vector.
151 unsigned Source;
152 /// Index of the destination of the dependence in the InstMap vector.
153 unsigned Destination;
154 /// The type of the dependence.
156
159
160 /// Return the source instruction of the dependence.
161 Instruction *getSource(const MemoryDepChecker &DepChecker) const;
162 /// Return the destination instruction of the dependence.
163 Instruction *getDestination(const MemoryDepChecker &DepChecker) const;
164
165 /// Dependence types that don't prevent vectorization.
168
169 /// Lexically forward dependence.
170 LLVM_ABI bool isForward() const;
171 /// Lexically backward dependence.
172 LLVM_ABI bool isBackward() const;
173
174 /// May be a lexically backward dependence type (includes Unknown).
175 LLVM_ABI bool isPossiblyBackward() const;
176
177 /// Print the dependence. \p Instr is used to map the instruction
178 /// indices to instructions.
179 LLVM_ABI void print(raw_ostream &OS, unsigned Depth,
180 const SmallVectorImpl<Instruction *> &Instrs) const;
181 };
182
184 DominatorTree *DT, const Loop *L,
185 const DenseMap<Value *, const SCEV *> &SymbolicStrides,
186 unsigned MaxTargetVectorWidthInBits)
187 : PSE(PSE), AC(AC), DT(DT), InnermostLoop(L),
188 SymbolicStrides(SymbolicStrides),
189 MaxTargetVectorWidthInBits(MaxTargetVectorWidthInBits) {}
190
191 /// Register the location (instructions are given increasing numbers)
192 /// of a write access.
193 LLVM_ABI void addAccess(StoreInst *SI);
194
195 /// Register the location (instructions are given increasing numbers)
196 /// of a write access.
197 LLVM_ABI void addAccess(LoadInst *LI);
198
199 /// Check whether the dependencies between the accesses are safe, and records
200 /// the dependence information in Dependences if so.
201 ///
202 /// Only checks sets with elements in \p CheckDeps.
203 LLVM_ABI bool areDepsSafe(const DepCandidates &AccessSets,
204 const MemAccessInfoList &CheckDeps);
205
206 /// No memory dependence was encountered that would inhibit
207 /// vectorization.
210 }
211
212 /// Return true if the number of elements that are safe to operate on
213 /// simultaneously is not bounded.
215 return MaxSafeVectorWidthInBits == UINT_MAX;
216 }
217
218 /// Return the number of elements that are safe to operate on
219 /// simultaneously, multiplied by the size of the element in bits.
221 return MaxSafeVectorWidthInBits;
222 }
223
224 /// Return true if there are no store-load forwarding dependencies.
226 return MaxStoreLoadForwardSafeDistanceInBits ==
227 std::numeric_limits<uint64_t>::max();
228 }
229
230 /// Return safe power-of-2 number of elements, which do not prevent store-load
231 /// forwarding, multiplied by the size of the elements in bits.
234 "Expected the distance, that prevent store-load forwarding, to be "
235 "set.");
236 return MaxStoreLoadForwardSafeDistanceInBits;
237 }
238
239 /// In same cases when the dependency check fails we can still
240 /// vectorize the loop with a dynamic array access check.
242 return ShouldRetryWithRuntimeChecks &&
244 }
245
246 /// Returns the memory dependences. If null is returned we exceeded
247 /// the MaxDependences threshold and this information is not
248 /// available.
250 return RecordDependences ? &Dependences : nullptr;
251 }
252
253 void clearDependences() { Dependences.clear(); }
254
255 /// The vector of memory access instructions. The indices are used as
256 /// instruction identifiers in the Dependence class.
258 return InstMap;
259 }
260
261 /// Generate a mapping between the memory instructions and their
262 /// indices according to program order.
265
266 for (unsigned I = 0; I < InstMap.size(); ++I)
267 OrderMap[InstMap[I]] = I;
268
269 return OrderMap;
270 }
271
272 /// Find the set of instructions that read or write via \p Ptr.
274 getInstructionsForAccess(Value *Ptr, bool isWrite) const;
275
276 /// Return the program order indices for the access location (Ptr, IsWrite).
277 /// Returns an empty ArrayRef if there are no accesses for the location.
279 auto I = Accesses.find({Ptr, IsWrite});
280 if (I != Accesses.end())
281 return I->second;
282 return {};
283 }
284
285 const Loop *getInnermostLoop() const { return InnermostLoop; }
286
288 std::pair<const SCEV *, const SCEV *>> &
290 return PointerBounds;
291 }
292
294 assert(DT && "requested DT, but it is not available");
295 return DT;
296 }
298 assert(AC && "requested AC, but it is not available");
299 return AC;
300 }
301
302private:
303 /// A wrapper around ScalarEvolution, used to add runtime SCEV checks, and
304 /// applies dynamic knowledge to simplify SCEV expressions and convert them
305 /// to a more usable form. We need this in case assumptions about SCEV
306 /// expressions need to be made in order to avoid unknown dependences. For
307 /// example we might assume a unit stride for a pointer in order to prove
308 /// that a memory access is strided and doesn't wrap.
310
311 AssumptionCache *AC;
312 DominatorTree *DT;
313
314 const Loop *InnermostLoop;
315
316 /// Reference to map of pointer values to
317 /// their stride symbols, if they have a symbolic stride.
318 const DenseMap<Value *, const SCEV *> &SymbolicStrides;
319
320 /// Maps access locations (ptr, read/write) to program order.
322
323 /// Memory access instructions in program order.
325
326 /// The program order index to be used for the next instruction.
327 unsigned AccessIdx = 0;
328
329 /// The smallest dependence distance in bytes in the loop. This may not be
330 /// the same as the maximum number of bytes that are safe to operate on
331 /// simultaneously.
332 uint64_t MinDepDistBytes = 0;
333
334 /// Number of elements (from consecutive iterations) that are safe to
335 /// operate on simultaneously, multiplied by the size of the element in bits.
336 /// The size of the element is taken from the memory access that is most
337 /// restrictive.
338 uint64_t MaxSafeVectorWidthInBits = -1U;
339
340 /// Maximum power-of-2 number of elements, which do not prevent store-load
341 /// forwarding, multiplied by the size of the elements in bits.
342 uint64_t MaxStoreLoadForwardSafeDistanceInBits =
343 std::numeric_limits<uint64_t>::max();
344
345 /// Whether we should try to vectorize the loop with runtime checks, if the
346 /// dependencies are not safe.
347 bool ShouldRetryWithRuntimeChecks = false;
348
349 /// Result of the dependence checks, indicating whether the checked
350 /// dependences are safe for vectorization, require RT checks or are known to
351 /// be unsafe.
353
354 //// True if Dependences reflects the dependences in the
355 //// loop. If false we exceeded MaxDependences and
356 //// Dependences is invalid.
357 bool RecordDependences = true;
358
359 /// Memory dependences collected during the analysis. Only valid if
360 /// RecordDependences is true.
361 SmallVector<Dependence, 8> Dependences;
362
363 /// The maximum width of a target's vector registers multiplied by 2 to also
364 /// roughly account for additional interleaving. Is used to decide if a
365 /// backwards dependence with non-constant stride should be classified as
366 /// backwards-vectorizable or unknown (triggering a runtime check).
367 unsigned MaxTargetVectorWidthInBits = 0;
368
369 /// Mapping of SCEV expressions to their expanded pointer bounds (pair of
370 /// start and end pointer expressions).
372 std::pair<const SCEV *, const SCEV *>>
374
375 /// Cache for the loop guards of InnermostLoop.
376 std::optional<ScalarEvolution::LoopGuards> LoopGuards;
377
378 /// Check whether there is a plausible dependence between the two
379 /// accesses.
380 ///
381 /// Access \p A must happen before \p B in program order. The two indices
382 /// identify the index into the program order map.
383 ///
384 /// This function checks whether there is a plausible dependence (or the
385 /// absence of such can't be proved) between the two accesses. If there is a
386 /// plausible dependence but the dependence distance is bigger than one
387 /// element access it records this distance in \p MinDepDistBytes (if this
388 /// distance is smaller than any other distance encountered so far).
389 /// Otherwise, this function returns true signaling a possible dependence.
390 Dependence::DepType isDependent(const MemAccessInfo &A, unsigned AIdx,
391 const MemAccessInfo &B, unsigned BIdx);
392
393 /// Check whether the data dependence could prevent store-load
394 /// forwarding.
395 ///
396 /// \return false if we shouldn't vectorize at all or avoid larger
397 /// vectorization factors by limiting MinDepDistBytes.
398 bool couldPreventStoreLoadForward(uint64_t Distance, uint64_t TypeByteSize,
399 unsigned CommonStride = 0);
400
401 /// Updates the current safety status with \p S. We can go from Safe to
402 /// either PossiblySafeWithRtChecks or Unsafe and from
403 /// PossiblySafeWithRtChecks to Unsafe.
404 void mergeInStatus(VectorizationSafetyStatus S);
405
406 struct DepDistanceStrideAndSizeInfo {
407 const SCEV *Dist;
408
409 /// Strides here are scaled; i.e. in bytes, taking the size of the
410 /// underlying type into account.
411 uint64_t MaxStride;
412 std::optional<uint64_t> CommonStride;
413
414 /// TypeByteSize is either the common store size of both accesses, or 0 when
415 /// store sizes mismatch.
416 uint64_t TypeByteSize;
417
418 bool AIsWrite;
419 bool BIsWrite;
420
421 DepDistanceStrideAndSizeInfo(const SCEV *Dist, uint64_t MaxStride,
422 std::optional<uint64_t> CommonStride,
423 uint64_t TypeByteSize, bool AIsWrite,
424 bool BIsWrite)
425 : Dist(Dist), MaxStride(MaxStride), CommonStride(CommonStride),
426 TypeByteSize(TypeByteSize), AIsWrite(AIsWrite), BIsWrite(BIsWrite) {}
427 };
428
429 /// Get the dependence distance, strides, type size and whether it is a write
430 /// for the dependence between A and B. Returns a DepType, if we can prove
431 /// there's no dependence or the analysis fails. Outlined to lambda to limit
432 /// he scope of various temporary variables, like A/BPtr, StrideA/BPtr and
433 /// others. Returns either the dependence result, if it could already be
434 /// determined, or a DepDistanceStrideAndSizeInfo struct, noting that
435 /// TypeByteSize could be 0 when store sizes mismatch, and this should be
436 /// checked in the caller.
437 std::variant<Dependence::DepType, DepDistanceStrideAndSizeInfo>
438 getDependenceDistanceStrideAndSize(const MemAccessInfo &A, Instruction *AInst,
439 const MemAccessInfo &B,
440 Instruction *BInst);
441
442 // Return true if we can prove that \p Sink only accesses memory after \p
443 // Src's end or vice versa.
444 bool areAccessesCompletelyBeforeOrAfter(const SCEV *Src, Type *SrcTy,
445 const SCEV *Sink, Type *SinkTy);
446};
447
448class RuntimePointerChecking;
449/// A grouping of pointers. A single memcheck is required between
450/// two groups.
452 /// Create a new pointer checking group containing a single
453 /// pointer, with index \p Index in RtCheck.
455 const RuntimePointerChecking &RtCheck);
456
457 /// Tries to add the pointer recorded in RtCheck at index
458 /// \p Index to this pointer checking group. We can only add a pointer
459 /// to a checking group if we will still be able to get
460 /// the upper and lower bounds of the check. Returns true in case
461 /// of success, false otherwise.
462 LLVM_ABI bool addPointer(unsigned Index,
463 const RuntimePointerChecking &RtCheck);
464 LLVM_ABI bool addPointer(unsigned Index, const SCEV *Start, const SCEV *End,
465 unsigned AS, bool NeedsFreeze, ScalarEvolution &SE);
466
467 /// The SCEV expression which represents the upper bound of all the
468 /// pointers in this group.
469 const SCEV *High;
470 /// The SCEV expression which represents the lower bound of all the
471 /// pointers in this group.
472 const SCEV *Low;
473 /// Indices of all the pointers that constitute this grouping.
475 /// Address space of the involved pointers.
476 unsigned AddressSpace;
477 /// Whether the pointer needs to be frozen after expansion, e.g. because it
478 /// may be poison outside the loop.
479 bool NeedsFreeze = false;
480};
481
482/// A memcheck which made up of a pair of grouped pointers.
483typedef std::pair<const RuntimeCheckingPtrGroup *,
486
490 unsigned AccessSize;
492
494 unsigned AccessSize, bool NeedsFreeze)
497};
498
499/// Holds information about the memory runtime legality checks to verify
500/// that a group of pointers do not overlap.
503
504public:
505 struct PointerInfo {
506 /// Holds the pointer value that we need to check.
508 /// Holds the smallest byte address accessed by the pointer throughout all
509 /// iterations of the loop.
510 const SCEV *Start;
511 /// Holds the largest byte address accessed by the pointer throughout all
512 /// iterations of the loop, plus 1.
513 const SCEV *End;
514 /// Holds the information if this pointer is used for writing to memory.
516 /// Holds the id of the set of pointers that could be dependent because of a
517 /// shared underlying object.
519 /// Holds the id of the disjoint alias set to which this pointer belongs.
520 unsigned AliasSetId;
521 /// SCEV for the access.
522 const SCEV *Expr;
523 /// True if the pointer expressions needs to be frozen after expansion.
525
527 bool IsWritePtr, unsigned DependencySetId, unsigned AliasSetId,
528 const SCEV *Expr, bool NeedsFreeze)
532 };
533
535 : DC(DC), SE(SE) {}
536
537 /// Reset the state of the pointer runtime information.
538 void reset() {
539 Need = false;
540 CanUseDiffCheck = true;
541 Pointers.clear();
542 Checks.clear();
543 DiffChecks.clear();
544 CheckingGroups.clear();
545 }
546
547 /// Insert a pointer and calculate the start and end SCEVs.
548 /// We need \p PSE in order to compute the SCEV expression of the pointer
549 /// according to the assumptions that we've made during the analysis.
550 /// The method might also version the pointer stride according to \p Strides,
551 /// and add new predicates to \p PSE.
552 LLVM_ABI void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr,
553 Type *AccessTy, bool WritePtr, unsigned DepSetId,
554 unsigned ASId, PredicatedScalarEvolution &PSE,
555 bool NeedsFreeze);
556
557 /// No run-time memory checking is necessary.
558 bool empty() const { return Pointers.empty(); }
559
560 /// Generate the checks and store it. This also performs the grouping
561 /// of pointers to reduce the number of memchecks necessary.
562 LLVM_ABI void generateChecks(MemoryDepChecker::DepCandidates &DepCands,
563 bool UseDependencies);
564
565 /// Returns the checks that generateChecks created. They can be used to ensure
566 /// no read/write accesses overlap across all loop iterations.
568 return Checks;
569 }
570
571 // Returns an optional list of (pointer-difference expressions, access size)
572 // pairs that can be used to prove that there are no vectorization-preventing
573 // dependencies at runtime. There are is a vectorization-preventing dependency
574 // if any pointer-difference is <u VF * InterleaveCount * access size. Returns
575 // std::nullopt if pointer-difference checks cannot be used.
576 std::optional<ArrayRef<PointerDiffInfo>> getDiffChecks() const {
577 if (!CanUseDiffCheck)
578 return std::nullopt;
579 return {DiffChecks};
580 }
581
582 /// Decide if we need to add a check between two groups of pointers,
583 /// according to needsChecking.
585 const RuntimeCheckingPtrGroup &N) const;
586
587 /// Returns the number of run-time checks required according to
588 /// needsChecking.
589 unsigned getNumberOfChecks() const { return Checks.size(); }
590
591 /// Print the list run-time memory checks necessary.
592 LLVM_ABI void print(raw_ostream &OS, unsigned Depth = 0) const;
593
594 /// Print \p Checks.
597 unsigned Depth = 0) const;
598
599 /// This flag indicates if we need to add the runtime check.
600 bool Need = false;
601
602 /// Information about the pointers that may require checking.
604
605 /// Holds a partitioning of pointers into "check groups".
607
608 /// Check if pointers are in the same partition
609 ///
610 /// \p PtrToPartition contains the partition number for pointers (-1 if the
611 /// pointer belongs to multiple partitions).
612 LLVM_ABI static bool
614 unsigned PtrIdx1, unsigned PtrIdx2);
615
616 /// Decide whether we need to issue a run-time check for pointer at
617 /// index \p I and \p J to prove their independence.
618 LLVM_ABI bool needsChecking(unsigned I, unsigned J) const;
619
620 /// Return PointerInfo for pointer at index \p PtrIdx.
621 const PointerInfo &getPointerInfo(unsigned PtrIdx) const {
622 return Pointers[PtrIdx];
623 }
624
625 ScalarEvolution *getSE() const { return SE; }
626
627private:
628 /// Groups pointers such that a single memcheck is required
629 /// between two different groups. This will clear the CheckingGroups vector
630 /// and re-compute it. We will only group dependecies if \p UseDependencies
631 /// is true, otherwise we will create a separate group for each pointer.
632 void groupChecks(MemoryDepChecker::DepCandidates &DepCands,
633 bool UseDependencies);
634
635 /// Generate the checks and return them.
637
638 /// Try to create add a new (pointer-difference, access size) pair to
639 /// DiffCheck for checking groups \p CGI and \p CGJ. If pointer-difference
640 /// checks cannot be used for the groups, set CanUseDiffCheck to false.
641 bool tryToCreateDiffCheck(const RuntimeCheckingPtrGroup &CGI,
642 const RuntimeCheckingPtrGroup &CGJ);
643
645
646 /// Holds a pointer to the ScalarEvolution analysis.
647 ScalarEvolution *SE;
648
649 /// Set of run-time checks required to establish independence of
650 /// otherwise may-aliasing pointers in the loop.
652
653 /// Flag indicating if pointer-difference checks can be used
654 bool CanUseDiffCheck = true;
655
656 /// A list of (pointer-difference, access size) pairs that can be used to
657 /// prove that there are no vectorization-preventing dependencies.
659};
660
661/// Drive the analysis of memory accesses in the loop
662///
663/// This class is responsible for analyzing the memory accesses of a loop. It
664/// collects the accesses and then its main helper the AccessAnalysis class
665/// finds and categorizes the dependences in buildDependenceSets.
666///
667/// For memory dependences that can be analyzed at compile time, it determines
668/// whether the dependence is part of cycle inhibiting vectorization. This work
669/// is delegated to the MemoryDepChecker class.
670///
671/// For memory dependences that cannot be determined at compile time, it
672/// generates run-time checks to prove independence. This is done by
673/// AccessAnalysis::canCheckPtrAtRT and the checks are maintained by the
674/// RuntimePointerCheck class. \p AllowPartial determines whether partial checks
675/// are generated when not all pointers could be analyzed.
676///
677/// If pointers can wrap or can't be expressed as affine AddRec expressions by
678/// ScalarEvolution, we will generate run-time checks by emitting a
679/// SCEVUnionPredicate.
680///
681/// Checks for both memory dependences and the SCEV predicates contained in the
682/// PSE must be emitted in order for the results of this analysis to be valid.
684public:
687 const TargetLibraryInfo *TLI, AAResults *AA,
689 bool AllowPartial = false);
690
691 /// Return true we can analyze the memory accesses in the loop and there are
692 /// no memory dependence cycles. Note that for dependences between loads &
693 /// stores with uniform addresses,
694 /// hasStoreStoreDependenceInvolvingLoopInvariantAddress and
695 /// hasLoadStoreDependenceInvolvingLoopInvariantAddress also need to be
696 /// checked.
697 bool canVectorizeMemory() const { return CanVecMem; }
698
699 /// Return true if there is a convergent operation in the loop. There may
700 /// still be reported runtime pointer checks that would be required, but it is
701 /// not legal to insert them.
702 bool hasConvergentOp() const { return HasConvergentOp; }
703
704 /// Return true if, when runtime pointer checking does not have complete
705 /// results, it instead has partial results for those memory accesses that
706 /// could be analyzed.
707 bool hasAllowPartial() const { return AllowPartial; }
708
710 return PtrRtChecking.get();
711 }
712
713 /// Number of memchecks required to prove independence of otherwise
714 /// may-alias pointers.
715 unsigned getNumRuntimePointerChecks() const {
716 return PtrRtChecking->getNumberOfChecks();
717 }
718
719 /// Return true if the block BB needs to be predicated in order for the loop
720 /// to be vectorized.
721 LLVM_ABI static bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop,
722 DominatorTree *DT);
723
724 /// Returns true if value \p V is loop invariant.
725 LLVM_ABI bool isInvariant(Value *V) const;
726
727 unsigned getNumStores() const { return NumStores; }
728 unsigned getNumLoads() const { return NumLoads;}
729
730 /// The diagnostics report generated for the analysis. E.g. why we
731 /// couldn't analyze the loop.
732 const OptimizationRemarkAnalysis *getReport() const { return Report.get(); }
733
734 /// the Memory Dependence Checker which can determine the
735 /// loop-independent and loop-carried dependences between memory accesses.
736 const MemoryDepChecker &getDepChecker() const { return *DepChecker; }
737
738 /// Return the list of instructions that use \p Ptr to read or write
739 /// memory.
741 bool isWrite) const {
742 return DepChecker->getInstructionsForAccess(Ptr, isWrite);
743 }
744
745 /// If an access has a symbolic strides, this maps the pointer value to
746 /// the stride symbol.
748 return SymbolicStrides;
749 }
750
751 /// Print the information about the memory accesses in the loop.
752 LLVM_ABI void print(raw_ostream &OS, unsigned Depth = 0) const;
753
754 /// Return true if the loop has memory dependence involving two stores to an
755 /// invariant address, else return false.
757 return HasStoreStoreDependenceInvolvingLoopInvariantAddress;
758 }
759
760 /// Return true if the loop has memory dependence involving a load and a store
761 /// to an invariant address, else return false.
763 return HasLoadStoreDependenceInvolvingLoopInvariantAddress;
764 }
765
766 /// Return the list of stores to invariant addresses.
768 return StoresToInvariantAddresses;
769 }
770
771 /// Used to add runtime SCEV checks. Simplifies SCEV expressions and converts
772 /// them to a more usable form. All SCEV expressions during the analysis
773 /// should be re-written (and therefore simplified) according to PSE.
774 /// A user of LoopAccessAnalysis will need to emit the runtime checks
775 /// associated with this predicate.
776 const PredicatedScalarEvolution &getPSE() const { return *PSE; }
777
778private:
779 /// Analyze the loop. Returns true if all memory access in the loop can be
780 /// vectorized.
781 bool analyzeLoop(AAResults *AA, const LoopInfo *LI,
782 const TargetLibraryInfo *TLI, DominatorTree *DT);
783
784 /// Check if the structure of the loop allows it to be analyzed by this
785 /// pass.
786 bool canAnalyzeLoop();
787
788 /// Save the analysis remark.
789 ///
790 /// LAA does not directly emits the remarks. Instead it stores it which the
791 /// client can retrieve and presents as its own analysis
792 /// (e.g. -Rpass-analysis=loop-vectorize).
794 recordAnalysis(StringRef RemarkName, const Instruction *Instr = nullptr);
795
796 /// Collect memory access with loop invariant strides.
797 ///
798 /// Looks for accesses like "a[i * StrideA]" where "StrideA" is loop
799 /// invariant.
800 void collectStridedAccess(Value *LoadOrStoreInst);
801
802 // Emits the first unsafe memory dependence in a loop.
803 // Emits nothing if there are no unsafe dependences
804 // or if the dependences were not recorded.
805 void emitUnsafeDependenceRemark();
806
807 std::unique_ptr<PredicatedScalarEvolution> PSE;
808
809 /// We need to check that all of the pointers in this list are disjoint
810 /// at runtime. Using std::unique_ptr to make using move ctor simpler.
811 /// If AllowPartial is true then this list may contain only partial
812 /// information when we've failed to analyze all the memory accesses in the
813 /// loop, in which case HasCompletePtrRtChecking will be false.
814 std::unique_ptr<RuntimePointerChecking> PtrRtChecking;
815
816 /// The Memory Dependence Checker which can determine the
817 /// loop-independent and loop-carried dependences between memory accesses.
818 /// This will be empty if we've failed to analyze all the memory access in the
819 /// loop (i.e. CanVecMem is false).
820 std::unique_ptr<MemoryDepChecker> DepChecker;
821
822 Loop *TheLoop;
823
824 /// Determines whether we should generate partial runtime checks when not all
825 /// memory accesses could be analyzed.
826 bool AllowPartial;
827
828 unsigned NumLoads = 0;
829 unsigned NumStores = 0;
830
831 /// Cache the result of analyzeLoop.
832 bool CanVecMem = false;
833 bool HasConvergentOp = false;
834 bool HasCompletePtrRtChecking = false;
835
836 /// Indicator that there are two non vectorizable stores to the same uniform
837 /// address.
838 bool HasStoreStoreDependenceInvolvingLoopInvariantAddress = false;
839 /// Indicator that there is non vectorizable load and store to the same
840 /// uniform address.
841 bool HasLoadStoreDependenceInvolvingLoopInvariantAddress = false;
842
843 /// List of stores to invariant addresses.
844 SmallVector<StoreInst *> StoresToInvariantAddresses;
845
846 /// The diagnostics report generated for the analysis. E.g. why we
847 /// couldn't analyze the loop.
848 std::unique_ptr<OptimizationRemarkAnalysis> Report;
849
850 /// If an access has a symbolic strides, this maps the pointer value to
851 /// the stride symbol.
852 DenseMap<Value *, const SCEV *> SymbolicStrides;
853};
854
855/// Return the SCEV corresponding to a pointer with the symbolic stride
856/// replaced with constant one, assuming the SCEV predicate associated with
857/// \p PSE is true.
858///
859/// If necessary this method will version the stride of the pointer according
860/// to \p PtrToStride and therefore add further predicates to \p PSE.
861///
862/// \p PtrToStride provides the mapping between the pointer value and its
863/// stride as collected by LoopVectorizationLegality::collectStridedAccess.
864LLVM_ABI const SCEV *
865replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
866 const DenseMap<Value *, const SCEV *> &PtrToStride,
867 Value *Ptr);
868
869/// If the pointer has a constant stride return it in units of the access type
870/// size. If the pointer is loop-invariant, return 0. Otherwise return
871/// std::nullopt.
872///
873/// Ensure that it does not wrap in the address space, assuming the predicate
874/// associated with \p PSE is true.
875///
876/// If necessary this method will version the stride of the pointer according
877/// to \p PtrToStride and therefore add further predicates to \p PSE.
878/// The \p Assume parameter indicates if we are allowed to make additional
879/// run-time assumptions.
880///
881/// Note that the analysis results are defined if-and-only-if the original
882/// memory access was defined. If that access was dead, or UB, then the
883/// result of this function is undefined.
884LLVM_ABI std::optional<int64_t>
885getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr,
886 const Loop *Lp,
887 const DenseMap<Value *, const SCEV *> &StridesMap =
888 DenseMap<Value *, const SCEV *>(),
889 bool Assume = false, bool ShouldCheckWrap = true);
890
891/// Returns the distance between the pointers \p PtrA and \p PtrB iff they are
892/// compatible and it is possible to calculate the distance between them. This
893/// is a simple API that does not depend on the analysis pass.
894/// \param StrictCheck Ensure that the calculated distance matches the
895/// type-based one after all the bitcasts removal in the provided pointers.
896LLVM_ABI std::optional<int64_t>
897getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, Value *PtrB,
898 const DataLayout &DL, ScalarEvolution &SE,
899 bool StrictCheck = false, bool CheckType = true);
900
901/// Attempt to sort the pointers in \p VL and return the sorted indices
902/// in \p SortedIndices, if reordering is required.
903///
904/// Returns 'true' if sorting is legal, otherwise returns 'false'.
905///
906/// For example, for a given \p VL of memory accesses in program order, a[i+4],
907/// a[i+0], a[i+1] and a[i+7], this function will sort the \p VL and save the
908/// sorted indices in \p SortedIndices as a[i+0], a[i+1], a[i+4], a[i+7] and
909/// saves the mask for actual memory accesses in program order in
910/// \p SortedIndices as <1,2,0,3>
911LLVM_ABI bool sortPtrAccesses(ArrayRef<Value *> VL, Type *ElemTy,
912 const DataLayout &DL, ScalarEvolution &SE,
913 SmallVectorImpl<unsigned> &SortedIndices);
914
915/// Returns true if the memory operations \p A and \p B are consecutive.
916/// This is a simple API that does not depend on the analysis pass.
917LLVM_ABI bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
918 ScalarEvolution &SE, bool CheckType = true);
919
920/// Calculate Start and End points of memory access using exact backedge taken
921/// count \p BTC if computable or maximum backedge taken count \p MaxBTC
922/// otherwise.
923///
924/// Let's assume A is the first access and B is a memory access on N-th loop
925/// iteration. Then B is calculated as:
926/// B = A + Step*N .
927/// Step value may be positive or negative.
928/// N is a calculated back-edge taken count:
929/// N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0
930/// Start and End points are calculated in the following way:
931/// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt,
932/// where SizeOfElt is the size of single memory access in bytes.
933///
934/// There is no conflict when the intervals are disjoint:
935/// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End)
936LLVM_ABI std::pair<const SCEV *, const SCEV *> getStartAndEndForAccess(
937 const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, const SCEV *BTC,
938 const SCEV *MaxBTC, ScalarEvolution *SE,
939 DenseMap<std::pair<const SCEV *, Type *>,
940 std::pair<const SCEV *, const SCEV *>> *PointerBounds,
941 DominatorTree *DT, AssumptionCache *AC);
942
944 /// The cache.
946
947 // The used analysis passes.
948 ScalarEvolution &SE;
949 AAResults &AA;
950 DominatorTree &DT;
951 LoopInfo &LI;
953 const TargetLibraryInfo *TLI = nullptr;
954 AssumptionCache *AC;
955
956public:
959 const TargetLibraryInfo *TLI, AssumptionCache *AC)
960 : SE(SE), AA(AA), DT(DT), LI(LI), TTI(TTI), TLI(TLI), AC(AC) {}
961
962 LLVM_ABI const LoopAccessInfo &getInfo(Loop &L, bool AllowPartial = false);
963
964 LLVM_ABI void clear();
965
968};
969
970/// This analysis provides dependence information for the memory
971/// accesses of a loop.
972///
973/// It runs the analysis for a loop on demand. This can be initiated by
974/// querying the loop access info via AM.getResult<LoopAccessAnalysis>.
975/// getResult return a LoopAccessInfo object. See this class for the
976/// specifics of what information is provided.
978 : public AnalysisInfoMixin<LoopAccessAnalysis> {
980 LLVM_ABI static AnalysisKey Key;
981
982public:
984
986};
987
989 const MemoryDepChecker &DepChecker) const {
990 return DepChecker.getMemoryInstructions()[Source];
991}
992
994 const MemoryDepChecker &DepChecker) const {
995 return DepChecker.getMemoryInstructions()[Destination];
996}
997
998} // End llvm namespace
999
1000#endif
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MapVector< const Value *, unsigned > OrderMap
Definition: AsmWriter.cpp:110
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
#define LLVM_ABI
Definition: Compiler.h:213
uint32_t Index
bool End
Definition: ELF_riscv.cpp:480
Generic implementation of equivalence classes through the use Tarjan's efficient union-find algorithm...
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
raw_pwrite_stream & OS
A private abstract base class describing the concept of an individual alias analysis implementation.
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:294
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:255
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition: BasicBlock.h:62
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:165
EquivalenceClasses - This represents a collection of equivalence classes and supports three efficient...
An instruction for reading from memory.
Definition: Instructions.h:180
This analysis provides dependence information for the memory accesses of a loop.
LoopAccessInfoManager Result
LLVM_ABI Result run(Function &F, FunctionAnalysisManager &AM)
LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
LoopAccessInfoManager(ScalarEvolution &SE, AAResults &AA, DominatorTree &DT, LoopInfo &LI, TargetTransformInfo *TTI, const TargetLibraryInfo *TLI, AssumptionCache *AC)
LLVM_ABI const LoopAccessInfo & getInfo(Loop &L, bool AllowPartial=false)
Drive the analysis of memory accesses in the loop.
const MemoryDepChecker & getDepChecker() const
the Memory Dependence Checker which can determine the loop-independent and loop-carried dependences b...
ArrayRef< StoreInst * > getStoresToInvariantAddresses() const
Return the list of stores to invariant addresses.
const OptimizationRemarkAnalysis * getReport() const
The diagnostics report generated for the analysis.
const RuntimePointerChecking * getRuntimePointerChecking() const
bool canVectorizeMemory() const
Return true we can analyze the memory accesses in the loop and there are no memory dependence cycles.
unsigned getNumLoads() const
unsigned getNumRuntimePointerChecks() const
Number of memchecks required to prove independence of otherwise may-alias pointers.
LLVM_ABI bool isInvariant(Value *V) const
Returns true if value V is loop invariant.
bool hasLoadStoreDependenceInvolvingLoopInvariantAddress() const
Return true if the loop has memory dependence involving a load and a store to an invariant address,...
LLVM_ABI void print(raw_ostream &OS, unsigned Depth=0) const
Print the information about the memory accesses in the loop.
const PredicatedScalarEvolution & getPSE() const
Used to add runtime SCEV checks.
unsigned getNumStores() const
static LLVM_ABI bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, DominatorTree *DT)
Return true if the block BB needs to be predicated in order for the loop to be vectorized.
SmallVector< Instruction *, 4 > getInstructionsForAccess(Value *Ptr, bool isWrite) const
Return the list of instructions that use Ptr to read or write memory.
const DenseMap< Value *, const SCEV * > & getSymbolicStrides() const
If an access has a symbolic strides, this maps the pointer value to the stride symbol.
bool hasAllowPartial() const
Return true if, when runtime pointer checking does not have complete results, it instead has partial ...
bool hasStoreStoreDependenceInvolvingLoopInvariantAddress() const
Return true if the loop has memory dependence involving two stores to an invariant address,...
bool hasConvergentOp() const
Return true if there is a convergent operation in the loop.
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:40
Checks memory dependences among accesses to the same underlying object to determine whether there vec...
DominatorTree * getDT() const
ArrayRef< unsigned > getOrderForAccess(Value *Ptr, bool IsWrite) const
Return the program order indices for the access location (Ptr, IsWrite).
bool isSafeForAnyStoreLoadForwardDistances() const
Return true if there are no store-load forwarding dependencies.
bool isSafeForAnyVectorWidth() const
Return true if the number of elements that are safe to operate on simultaneously is not bounded.
LLVM_ABI bool areDepsSafe(const DepCandidates &AccessSets, const MemAccessInfoList &CheckDeps)
Check whether the dependencies between the accesses are safe, and records the dependence information ...
EquivalenceClasses< MemAccessInfo > DepCandidates
Set of potential dependent memory accesses.
const SmallVectorImpl< Instruction * > & getMemoryInstructions() const
The vector of memory access instructions.
bool shouldRetryWithRuntimeChecks() const
In same cases when the dependency check fails we can still vectorize the loop with a dynamic array ac...
const Loop * getInnermostLoop() const
uint64_t getMaxSafeVectorWidthInBits() const
Return the number of elements that are safe to operate on simultaneously, multiplied by the size of t...
bool isSafeForVectorization() const
No memory dependence was encountered that would inhibit vectorization.
AssumptionCache * getAC() const
const SmallVectorImpl< Dependence > * getDependences() const
Returns the memory dependences.
DenseMap< std::pair< const SCEV *, Type * >, std::pair< const SCEV *, const SCEV * > > & getPointerBounds()
SmallVector< MemAccessInfo, 8 > MemAccessInfoList
LLVM_ABI SmallVector< Instruction *, 4 > getInstructionsForAccess(Value *Ptr, bool isWrite) const
Find the set of instructions that read or write via Ptr.
VectorizationSafetyStatus
Type to keep track of the status of the dependence check.
LLVM_ABI void addAccess(StoreInst *SI)
Register the location (instructions are given increasing numbers) of a write access.
PointerIntPair< Value *, 1, bool > MemAccessInfo
uint64_t getStoreLoadForwardSafeDistanceInBits() const
Return safe power-of-2 number of elements, which do not prevent store-load forwarding,...
DenseMap< Instruction *, unsigned > generateInstructionOrderMap() const
Generate a mapping between the memory instructions and their indices according to program order.
MemoryDepChecker(PredicatedScalarEvolution &PSE, AssumptionCache *AC, DominatorTree *DT, const Loop *L, const DenseMap< Value *, const SCEV * > &SymbolicStrides, unsigned MaxTargetVectorWidthInBits)
Diagnostic information for optimization analysis remarks.
PointerIntPair - This class implements a pair of a pointer and small integer.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:112
Holds information about the memory runtime legality checks to verify that a group of pointers do not ...
bool Need
This flag indicates if we need to add the runtime check.
void reset()
Reset the state of the pointer runtime information.
unsigned getNumberOfChecks() const
Returns the number of run-time checks required according to needsChecking.
RuntimePointerChecking(MemoryDepChecker &DC, ScalarEvolution *SE)
LLVM_ABI void printChecks(raw_ostream &OS, const SmallVectorImpl< RuntimePointerCheck > &Checks, unsigned Depth=0) const
Print Checks.
LLVM_ABI bool needsChecking(const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const
Decide if we need to add a check between two groups of pointers, according to needsChecking.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth=0) const
Print the list run-time memory checks necessary.
std::optional< ArrayRef< PointerDiffInfo > > getDiffChecks() const
SmallVector< RuntimeCheckingPtrGroup, 2 > CheckingGroups
Holds a partitioning of pointers into "check groups".
static LLVM_ABI bool arePointersInSamePartition(const SmallVectorImpl< int > &PtrToPartition, unsigned PtrIdx1, unsigned PtrIdx2)
Check if pointers are in the same partition.
bool empty() const
No run-time memory checking is necessary.
SmallVector< PointerInfo, 2 > Pointers
Information about the pointers that may require checking.
ScalarEvolution * getSE() const
LLVM_ABI void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr, Type *AccessTy, bool WritePtr, unsigned DepSetId, unsigned ASId, PredicatedScalarEvolution &PSE, bool NeedsFreeze)
Insert a pointer and calculate the start and end SCEVs.
const SmallVectorImpl< RuntimePointerCheck > & getChecks() const
Returns the checks that generateChecks created.
const PointerInfo & getPointerInfo(unsigned PtrIdx) const
Return PointerInfo for pointer at index PtrIdx.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
size_t size() const
Definition: SmallVector.h:79
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:574
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
An instruction for storing to memory.
Definition: Instructions.h:296
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
Provides information about what library functions are available for the current target.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Value handle that tracks a Value across RAUW.
Definition: ValueHandle.h:332
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:75
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:53
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
std::pair< const RuntimeCheckingPtrGroup *, const RuntimeCheckingPtrGroup * > RuntimePointerCheck
A memcheck which made up of a pair of grouped pointers.
LLVM_ABI std::optional< int64_t > getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr, const Loop *Lp, const DenseMap< Value *, const SCEV * > &StridesMap=DenseMap< Value *, const SCEV * >(), bool Assume=false, bool ShouldCheckWrap=true)
If the pointer has a constant stride return it in units of the access type size.
LLVM_ABI std::optional< int64_t > getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, Value *PtrB, const DataLayout &DL, ScalarEvolution &SE, bool StrictCheck=false, bool CheckType=true)
Returns the distance between the pointers PtrA and PtrB iff they are compatible and it is possible to...
LLVM_ABI bool sortPtrAccesses(ArrayRef< Value * > VL, Type *ElemTy, const DataLayout &DL, ScalarEvolution &SE, SmallVectorImpl< unsigned > &SortedIndices)
Attempt to sort the pointers in VL and return the sorted indices in SortedIndices,...
LLVM_ABI const SCEV * replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &PtrToStride, Value *Ptr)
Return the SCEV corresponding to a pointer with the symbolic stride replaced with constant one,...
LLVM_ABI bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType=true)
Returns true if the memory operations A and B are consecutive.
LLVM_ABI std::pair< const SCEV *, const SCEV * > getStartAndEndForAccess(const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, const SCEV *BTC, const SCEV *MaxBTC, ScalarEvolution *SE, DenseMap< std::pair< const SCEV *, Type * >, std::pair< const SCEV *, const SCEV * > > *PointerBounds, DominatorTree *DT, AssumptionCache *AC)
Calculate Start and End points of memory access using exact backedge taken count BTC if computable or...
#define N
IR Values for the lower and upper bounds of a pointer evolution.
Definition: LoopUtils.cpp:1884
A CRTP mix-in that provides informational APIs needed for analysis passes.
Definition: PassManager.h:93
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: Analysis.h:29
Dependece between memory access instructions.
Instruction * getDestination(const MemoryDepChecker &DepChecker) const
Return the destination instruction of the dependence.
DepType Type
The type of the dependence.
unsigned Destination
Index of the destination of the dependence in the InstMap vector.
Dependence(unsigned Source, unsigned Destination, DepType Type)
LLVM_ABI bool isPossiblyBackward() const
May be a lexically backward dependence type (includes Unknown).
Instruction * getSource(const MemoryDepChecker &DepChecker) const
Return the source instruction of the dependence.
LLVM_ABI bool isForward() const
Lexically forward dependence.
LLVM_ABI bool isBackward() const
Lexically backward dependence.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth, const SmallVectorImpl< Instruction * > &Instrs) const
Print the dependence.
unsigned Source
Index of the source of the dependence in the InstMap vector.
DepType
The type of the dependence.
static LLVM_ABI const char * DepName[]
String version of the types.
PointerDiffInfo(const SCEV *SrcStart, const SCEV *SinkStart, unsigned AccessSize, bool NeedsFreeze)
unsigned AddressSpace
Address space of the involved pointers.
LLVM_ABI bool addPointer(unsigned Index, const RuntimePointerChecking &RtCheck)
Tries to add the pointer recorded in RtCheck at index Index to this pointer checking group.
bool NeedsFreeze
Whether the pointer needs to be frozen after expansion, e.g.
const SCEV * High
The SCEV expression which represents the upper bound of all the pointers in this group.
SmallVector< unsigned, 2 > Members
Indices of all the pointers that constitute this grouping.
const SCEV * Low
The SCEV expression which represents the lower bound of all the pointers in this group.
PointerInfo(Value *PointerValue, const SCEV *Start, const SCEV *End, bool IsWritePtr, unsigned DependencySetId, unsigned AliasSetId, const SCEV *Expr, bool NeedsFreeze)
const SCEV * Start
Holds the smallest byte address accessed by the pointer throughout all iterations of the loop.
const SCEV * Expr
SCEV for the access.
bool NeedsFreeze
True if the pointer expressions needs to be frozen after expansion.
bool IsWritePtr
Holds the information if this pointer is used for writing to memory.
unsigned DependencySetId
Holds the id of the set of pointers that could be dependent because of a shared underlying object.
unsigned AliasSetId
Holds the id of the disjoint alias set to which this pointer belongs.
const SCEV * End
Holds the largest byte address accessed by the pointer throughout all iterations of the loop,...
TrackingVH< Value > PointerValue
Holds the pointer value that we need to check.
Collection of parameters shared beetween the Loop Vectorizer and the Loop Access Analysis.
static LLVM_ABI const unsigned MaxVectorWidth
Maximum SIMD width.
static LLVM_ABI unsigned VectorizationFactor
VF as overridden by the user.
static LLVM_ABI unsigned RuntimeMemoryCheckThreshold
\When performing memory disambiguation checks at runtime do not make more than this number of compari...
static LLVM_ABI bool isInterleaveForced()
True if force-vector-interleave was specified by the user.
static LLVM_ABI unsigned VectorizationInterleave
Interleave factor as overridden by the user.
static LLVM_ABI bool HoistRuntimeChecks