LLVM 22.0.0git
MemorySSA.h
Go to the documentation of this file.
1//===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file exposes an interface to building/using memory SSA to
11/// walk memory instructions using a use/def graph.
12///
13/// Memory SSA class builds an SSA form that links together memory access
14/// instructions such as loads, stores, atomics, and calls. Additionally, it
15/// does a trivial form of "heap versioning" Every time the memory state changes
16/// in the program, we generate a new heap version. It generates
17/// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions.
18///
19/// As a trivial example,
20/// define i32 @main() #0 {
21/// entry:
22/// %call = call noalias i8* @_Znwm(i64 4) #2
23/// %0 = bitcast i8* %call to i32*
24/// %call1 = call noalias i8* @_Znwm(i64 4) #2
25/// %1 = bitcast i8* %call1 to i32*
26/// store i32 5, i32* %0, align 4
27/// store i32 7, i32* %1, align 4
28/// %2 = load i32* %0, align 4
29/// %3 = load i32* %1, align 4
30/// %add = add nsw i32 %2, %3
31/// ret i32 %add
32/// }
33///
34/// Will become
35/// define i32 @main() #0 {
36/// entry:
37/// ; 1 = MemoryDef(0)
38/// %call = call noalias i8* @_Znwm(i64 4) #3
39/// %2 = bitcast i8* %call to i32*
40/// ; 2 = MemoryDef(1)
41/// %call1 = call noalias i8* @_Znwm(i64 4) #3
42/// %4 = bitcast i8* %call1 to i32*
43/// ; 3 = MemoryDef(2)
44/// store i32 5, i32* %2, align 4
45/// ; 4 = MemoryDef(3)
46/// store i32 7, i32* %4, align 4
47/// ; MemoryUse(3)
48/// %7 = load i32* %2, align 4
49/// ; MemoryUse(4)
50/// %8 = load i32* %4, align 4
51/// %add = add nsw i32 %7, %8
52/// ret i32 %add
53/// }
54///
55/// Given this form, all the stores that could ever effect the load at %8 can be
56/// gotten by using the MemoryUse associated with it, and walking from use to
57/// def until you hit the top of the function.
58///
59/// Each def also has a list of users associated with it, so you can walk from
60/// both def to users, and users to defs. Note that we disambiguate MemoryUses,
61/// but not the RHS of MemoryDefs. You can see this above at %7, which would
62/// otherwise be a MemoryUse(4). Being disambiguated means that for a given
63/// store, all the MemoryUses on its use lists are may-aliases of that store
64/// (but the MemoryDefs on its use list may not be).
65///
66/// MemoryDefs are not disambiguated because it would require multiple reaching
67/// definitions, which would require multiple phis, and multiple memoryaccesses
68/// per instruction.
69///
70/// In addition to the def/use graph described above, MemoryDefs also contain
71/// an "optimized" definition use. The "optimized" use points to some def
72/// reachable through the memory def chain. The optimized def *may* (but is
73/// not required to) alias the original MemoryDef, but no def *closer* to the
74/// source def may alias it. As the name implies, the purpose of the optimized
75/// use is to allow caching of clobber searches for memory defs. The optimized
76/// def may be nullptr, in which case clients must walk the defining access
77/// chain.
78///
79/// When iterating the uses of a MemoryDef, both defining uses and optimized
80/// uses will be encountered. If only one type is needed, the client must
81/// filter the use walk.
82//
83//===----------------------------------------------------------------------===//
84
85#ifndef LLVM_ANALYSIS_MEMORYSSA_H
86#define LLVM_ANALYSIS_MEMORYSSA_H
87
88#include "llvm/ADT/DenseMap.h"
91#include "llvm/ADT/ilist_node.h"
96#include "llvm/IR/DerivedUser.h"
97#include "llvm/IR/Dominators.h"
98#include "llvm/IR/Type.h"
99#include "llvm/IR/User.h"
100#include "llvm/Pass.h"
102#include <algorithm>
103#include <cassert>
104#include <cstddef>
105#include <iterator>
106#include <memory>
107#include <utility>
108
109namespace llvm {
110
111template <class GraphType> struct GraphTraits;
112class Function;
113class Loop;
114class LLVMContext;
115class MemoryAccess;
116class MemorySSAWalker;
117class Module;
118class raw_ostream;
119
120namespace MSSAHelpers {
121
122struct AllAccessTag {};
123struct DefsOnlyTag {};
124
125} // end namespace MSSAHelpers
126
127enum : unsigned {
128 // Used to signify what the default invalid ID is for MemoryAccess's
129 // getID()
132
133template <class T> class memoryaccess_def_iterator_base;
137
138// The base for all memory accesses. All memory accesses in a block are
139// linked together using an intrusive list.
141 : public DerivedUser,
142 public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>,
143 public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> {
144public:
149
150 MemoryAccess(const MemoryAccess &) = delete;
152
153 void *operator new(size_t) = delete;
154
155 // Methods for support type inquiry through isa, cast, and
156 // dyn_cast
157 static bool classof(const Value *V) {
158 unsigned ID = V->getValueID();
159 return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal;
160 }
161
162 BasicBlock *getBlock() const { return Block; }
163
164 LLVM_ABI void print(raw_ostream &OS) const;
165 LLVM_ABI void dump() const;
166
167 /// The user iterators for a memory access
170
171 /// This iterator walks over all of the defs in a given
172 /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For
173 /// MemoryUse/MemoryDef, this walks the defining access.
178
179 /// Get the iterators for the all access list and the defs only list
180 /// We default to the all access list.
182 return this->AllAccessType::getIterator();
183 }
185 return this->AllAccessType::getIterator();
186 }
189 }
192 }
194 return this->DefsOnlyType::getIterator();
195 }
197 return this->DefsOnlyType::getIterator();
198 }
201 }
204 }
205
206protected:
207 friend class MemoryDef;
208 friend class MemoryPhi;
209 friend class MemorySSA;
210 friend class MemoryUse;
211 friend class MemoryUseOrDef;
212
213 /// Used by MemorySSA to change the block of a MemoryAccess when it is
214 /// moved.
215 void setBlock(BasicBlock *BB) { Block = BB; }
216
217 /// Used for debugging and tracking things about MemoryAccesses.
218 /// Guaranteed unique among MemoryAccesses, no guarantees otherwise.
219 inline unsigned getID() const;
220
221 MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue,
223 : DerivedUser(Type::getVoidTy(C), Vty, AllocInfo, DeleteValue),
224 Block(BB) {}
225
226 // Use deleteValue() to delete a generic MemoryAccess.
227 ~MemoryAccess() = default;
228
229private:
230 BasicBlock *Block;
231};
232
233template <>
235 static void deleteNode(MemoryAccess *MA) { MA->deleteValue(); }
236};
237
239 MA.print(OS);
240 return OS;
241}
242
243/// Class that has the common methods + fields of memory uses/defs. It's
244/// a little awkward to have, but there are many cases where we want either a
245/// use or def, and there are many cases where uses are needed (defs aren't
246/// acceptable), and vice-versa.
247///
248/// This class should never be instantiated directly; make a MemoryUse or
249/// MemoryDef instead.
251public:
252 void *operator new(size_t) = delete;
253
255
256 /// Get the instruction that this MemoryUse represents.
257 Instruction *getMemoryInst() const { return MemoryInstruction; }
258
259 /// Get the access that produces the memory state used by this Use.
261
262 static bool classof(const Value *MA) {
263 return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal;
264 }
265
266 /// Do we have an optimized use?
267 inline bool isOptimized() const;
268 /// Return the MemoryAccess associated with the optimized use, or nullptr.
269 inline MemoryAccess *getOptimized() const;
270 /// Sets the optimized use for a MemoryDef.
271 inline void setOptimized(MemoryAccess *);
272
273 /// Reset the ID of what this MemoryUse was optimized to, causing it to
274 /// be rewalked by the walker if necessary.
275 /// This really should only be called by tests.
276 inline void resetOptimized();
277
278protected:
279 friend class MemorySSA;
280 friend class MemorySSAUpdater;
281
283 DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB,
285 : MemoryAccess(C, Vty, DeleteValue, BB, AllocInfo),
286 MemoryInstruction(MI) {
288 }
289
290 // Use deleteValue() to delete a generic MemoryUseOrDef.
291 ~MemoryUseOrDef() = default;
292
293 void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false) {
294 if (!Optimized) {
295 setOperand(0, DMA);
296 return;
297 }
298 setOptimized(DMA);
299 }
300
301private:
302 Instruction *MemoryInstruction;
303};
304
305/// Represents read-only accesses to memory
306///
307/// In particular, the set of Instructions that will be represented by
308/// MemoryUse's is exactly the set of Instructions for which
309/// AliasAnalysis::getModRefInfo returns "Ref".
310class MemoryUse final : public MemoryUseOrDef {
311 constexpr static IntrusiveOperandsAllocMarker AllocMarker{1};
312
313public:
315
317 : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB, AllocMarker) {}
318
319 // allocate space for exactly one operand
320 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
321 void operator delete(void *Ptr) { User::operator delete(Ptr); }
322
323 static bool classof(const Value *MA) {
324 return MA->getValueID() == MemoryUseVal;
325 }
326
327 LLVM_ABI void print(raw_ostream &OS) const;
328
330 OptimizedID = DMA->getID();
331 setOperand(0, DMA);
332 }
333
334 /// Whether the MemoryUse is optimized. If ensureOptimizedUses() was called,
335 /// uses will usually be optimized, but this is not guaranteed (e.g. due to
336 /// invalidation and optimization limits.)
337 bool isOptimized() const {
338 return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID();
339 }
340
342 return getDefiningAccess();
343 }
344
346 OptimizedID = INVALID_MEMORYACCESS_ID;
347 }
348
349protected:
350 friend class MemorySSA;
351
352private:
353 static void deleteMe(DerivedUser *Self);
354
355 unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
356};
357
358template <>
359struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {};
361
362/// Represents a read-write access to memory, whether it is a must-alias,
363/// or a may-alias.
364///
365/// In particular, the set of Instructions that will be represented by
366/// MemoryDef's is exactly the set of Instructions for which
367/// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef".
368/// Note that, in order to provide def-def chains, all defs also have a use
369/// associated with them. This use points to the nearest reaching
370/// MemoryDef/MemoryPhi.
371class MemoryDef final : public MemoryUseOrDef {
372 constexpr static IntrusiveOperandsAllocMarker AllocMarker{2};
373
374public:
375 friend class MemorySSA;
376
378
380 unsigned Ver)
381 : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB, AllocMarker),
382 ID(Ver) {}
383
384 // allocate space for exactly two operands
385 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
386 void operator delete(void *Ptr) { User::operator delete(Ptr); }
387
388 static bool classof(const Value *MA) {
389 return MA->getValueID() == MemoryDefVal;
390 }
391
393 setOperand(1, MA);
394 OptimizedID = MA->getID();
395 }
396
398 return cast_or_null<MemoryAccess>(getOperand(1));
399 }
400
401 bool isOptimized() const {
402 return getOptimized() && OptimizedID == getOptimized()->getID();
403 }
404
406 OptimizedID = INVALID_MEMORYACCESS_ID;
407 setOperand(1, nullptr);
408 }
409
410 LLVM_ABI void print(raw_ostream &OS) const;
411
412 unsigned getID() const { return ID; }
413
414private:
415 static void deleteMe(DerivedUser *Self);
416
417 const unsigned ID;
418 unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
419};
420
421template <>
422struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 2> {};
424
425template <>
428 if (auto *MU = dyn_cast<MemoryUse>(MUD))
430 return OperandTraits<MemoryDef>::op_begin(cast<MemoryDef>(MUD));
431 }
432
433 static Use *op_end(MemoryUseOrDef *MUD) {
434 if (auto *MU = dyn_cast<MemoryUse>(MUD))
436 return OperandTraits<MemoryDef>::op_end(cast<MemoryDef>(MUD));
437 }
438
439 static unsigned operands(const MemoryUseOrDef *MUD) {
440 if (const auto *MU = dyn_cast<MemoryUse>(MUD))
442 return OperandTraits<MemoryDef>::operands(cast<MemoryDef>(MUD));
443 }
444};
445DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)
446
447/// Represents phi nodes for memory accesses.
448///
449/// These have the same semantic as regular phi nodes, with the exception that
450/// only one phi will ever exist in a given basic block.
451/// Guaranteeing one phi per block means guaranteeing there is only ever one
452/// valid reaching MemoryDef/MemoryPHI along each path to the phi node.
453/// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or
454/// a MemoryPhi's operands.
455/// That is, given
456/// if (a) {
457/// store %a
458/// store %b
459/// }
460/// it *must* be transformed into
461/// if (a) {
462/// 1 = MemoryDef(liveOnEntry)
463/// store %a
464/// 2 = MemoryDef(1)
465/// store %b
466/// }
467/// and *not*
468/// if (a) {
469/// 1 = MemoryDef(liveOnEntry)
470/// store %a
471/// 2 = MemoryDef(liveOnEntry)
472/// store %b
473/// }
474/// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the
475/// end of the branch, and if there are not two phi nodes, one will be
476/// disconnected completely from the SSA graph below that point.
477/// Because MemoryUse's do not generate new definitions, they do not have this
478/// issue.
479class MemoryPhi final : public MemoryAccess {
480 constexpr static HungOffOperandsAllocMarker AllocMarker{};
481
482 // allocate space for exactly zero operands
483 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
484
485public:
486 void operator delete(void *Ptr) { User::operator delete(Ptr); }
487
488 /// Provide fast operand accessors
490
491 MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0)
492 : MemoryAccess(C, MemoryPhiVal, deleteMe, BB, AllocMarker), ID(Ver),
493 ReservedSpace(NumPreds) {
494 allocHungoffUses(ReservedSpace);
495 }
496
497 // Block iterator interface. This provides access to the list of incoming
498 // basic blocks, which parallels the list of incoming values.
501
503 return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace);
504 }
505
507 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
508 }
509
510 block_iterator block_end() { return block_begin() + getNumOperands(); }
511
513 return block_begin() + getNumOperands();
514 }
515
517 return make_range(block_begin(), block_end());
518 }
519
521 return make_range(block_begin(), block_end());
522 }
523
524 op_range incoming_values() { return operands(); }
525
526 const_op_range incoming_values() const { return operands(); }
527
528 /// Return the number of incoming edges
529 unsigned getNumIncomingValues() const { return getNumOperands(); }
530
531 /// Return incoming value number x
532 MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); }
533 void setIncomingValue(unsigned I, MemoryAccess *V) {
534 assert(V && "PHI node got a null value!");
535 setOperand(I, V);
536 }
537
538 static unsigned getOperandNumForIncomingValue(unsigned I) { return I; }
539 static unsigned getIncomingValueNumForOperand(unsigned I) { return I; }
540
541 /// Return incoming basic block number @p i.
542 BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; }
543
544 /// Return incoming basic block corresponding
545 /// to an operand of the PHI.
546 BasicBlock *getIncomingBlock(const Use &U) const {
547 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
548 return getIncomingBlock(unsigned(&U - op_begin()));
549 }
550
551 /// Return incoming basic block corresponding
552 /// to value use iterator.
554 return getIncomingBlock(I.getUse());
555 }
556
557 void setIncomingBlock(unsigned I, BasicBlock *BB) {
558 assert(BB && "PHI node got a null basic block!");
559 block_begin()[I] = BB;
560 }
561
562 /// Add an incoming value to the end of the PHI list
564 if (getNumOperands() == ReservedSpace)
565 growOperands(); // Get more space!
566 // Initialize some new operands.
567 setNumHungOffUseOperands(getNumOperands() + 1);
568 setIncomingValue(getNumOperands() - 1, V);
569 setIncomingBlock(getNumOperands() - 1, BB);
570 }
571
572 /// Return the first index of the specified basic
573 /// block in the value list for this PHI. Returns -1 if no instance.
574 int getBasicBlockIndex(const BasicBlock *BB) const {
575 for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
576 if (block_begin()[I] == BB)
577 return I;
578 return -1;
579 }
580
582 int Idx = getBasicBlockIndex(BB);
583 assert(Idx >= 0 && "Invalid basic block argument!");
584 return getIncomingValue(Idx);
585 }
586
587 // After deleting incoming position I, the order of incoming may be changed.
588 void unorderedDeleteIncoming(unsigned I) {
589 unsigned E = getNumOperands();
590 assert(I < E && "Cannot remove out of bounds Phi entry.");
591 // MemoryPhi must have at least two incoming values, otherwise the MemoryPhi
592 // itself should be deleted.
593 assert(E >= 2 && "Cannot only remove incoming values in MemoryPhis with "
594 "at least 2 values.");
595 setIncomingValue(I, getIncomingValue(E - 1));
596 setIncomingBlock(I, block_begin()[E - 1]);
597 setOperand(E - 1, nullptr);
598 block_begin()[E - 1] = nullptr;
599 setNumHungOffUseOperands(getNumOperands() - 1);
600 }
601
602 // After deleting entries that satisfy Pred, remaining entries may have
603 // changed order.
604 template <typename Fn> void unorderedDeleteIncomingIf(Fn &&Pred) {
605 for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
606 if (Pred(getIncomingValue(I), getIncomingBlock(I))) {
607 unorderedDeleteIncoming(I);
608 E = getNumOperands();
609 --I;
610 }
611 assert(getNumOperands() >= 1 &&
612 "Cannot remove all incoming blocks in a MemoryPhi.");
613 }
614
615 // After deleting incoming block BB, the incoming blocks order may be changed.
617 unorderedDeleteIncomingIf(
618 [&](const MemoryAccess *, const BasicBlock *B) { return BB == B; });
619 }
620
621 // After deleting incoming memory access MA, the incoming accesses order may
622 // be changed.
624 unorderedDeleteIncomingIf(
625 [&](const MemoryAccess *M, const BasicBlock *) { return MA == M; });
626 }
627
628 static bool classof(const Value *V) {
629 return V->getValueID() == MemoryPhiVal;
630 }
631
632 LLVM_ABI void print(raw_ostream &OS) const;
633
634 unsigned getID() const { return ID; }
635
636protected:
637 friend class MemorySSA;
638
639 /// this is more complicated than the generic
640 /// User::allocHungoffUses, because we have to allocate Uses for the incoming
641 /// values and pointers to the incoming blocks, all in one allocation.
642 void allocHungoffUses(unsigned N) {
643 User::allocHungoffUses(N, /* IsPhi */ true);
644 }
645
646private:
647 // For debugging only
648 const unsigned ID;
649 unsigned ReservedSpace;
650
651 /// This grows the operand list in response to a push_back style of
652 /// operation. This grows the number of ops by 1.5 times.
653 void growOperands() {
654 unsigned E = getNumOperands();
655 // 2 op PHI nodes are VERY common, so reserve at least enough for that.
656 ReservedSpace = std::max(E + E / 2, 2u);
657 growHungoffUses(ReservedSpace, /* IsPhi */ true);
658 }
659
660 static void deleteMe(DerivedUser *Self);
661};
662
663inline unsigned MemoryAccess::getID() const {
664 assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) &&
665 "only memory defs and phis have ids");
666 if (const auto *MD = dyn_cast<MemoryDef>(this))
667 return MD->getID();
668 return cast<MemoryPhi>(this)->getID();
669}
670
671inline bool MemoryUseOrDef::isOptimized() const {
672 if (const auto *MD = dyn_cast<MemoryDef>(this))
673 return MD->isOptimized();
674 return cast<MemoryUse>(this)->isOptimized();
675}
676
678 if (const auto *MD = dyn_cast<MemoryDef>(this))
679 return MD->getOptimized();
680 return cast<MemoryUse>(this)->getOptimized();
681}
682
684 if (auto *MD = dyn_cast<MemoryDef>(this))
685 MD->setOptimized(MA);
686 else
687 cast<MemoryUse>(this)->setOptimized(MA);
688}
689
691 if (auto *MD = dyn_cast<MemoryDef>(this))
692 MD->resetOptimized();
693 else
694 cast<MemoryUse>(this)->resetOptimized();
695}
696
697template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits {};
699
700/// Encapsulates MemorySSA, including all data associated with memory
701/// accesses.
703public:
706
707 // MemorySSA must remain where it's constructed; Walkers it creates store
708 // pointers to it.
709 MemorySSA(MemorySSA &&) = delete;
710
712
713 LLVM_ABI MemorySSAWalker *getWalker();
714 LLVM_ABI MemorySSAWalker *getSkipSelfWalker();
715
716 /// Given a memory Mod/Ref'ing instruction, get the MemorySSA
717 /// access associated with it. If passed a basic block gets the memory phi
718 /// node that exists for that block, if there is one. Otherwise, this will get
719 /// a MemoryUseOrDef.
721 return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I));
722 }
723
725 return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB)));
726 }
727
728 DominatorTree &getDomTree() const { return *DT; }
729
730 LLVM_ABI void dump() const;
731 LLVM_ABI void print(raw_ostream &) const;
732
733 /// Return true if \p MA represents the live on entry value
734 ///
735 /// Loads and stores from pointer arguments and other global values may be
736 /// defined by memory operations that do not occur in the current function, so
737 /// they may be live on entry to the function. MemorySSA represents such
738 /// memory state by the live on entry definition, which is guaranteed to occur
739 /// before any other memory access in the function.
740 inline bool isLiveOnEntryDef(const MemoryAccess *MA) const {
741 return MA == LiveOnEntryDef.get();
742 }
743
745 return LiveOnEntryDef.get();
746 }
747
748 // Sadly, iplists, by default, owns and deletes pointers added to the
749 // list. It's not currently possible to have two iplists for the same type,
750 // where one owns the pointers, and one does not. This is because the traits
751 // are per-type, not per-tag. If this ever changes, we should make the
752 // DefList an iplist.
754 using DefsList =
756
757 /// Return the list of MemoryAccess's for a given basic block.
758 ///
759 /// This list is not modifiable by the user.
760 const AccessList *getBlockAccesses(const BasicBlock *BB) const {
761 return getWritableBlockAccesses(BB);
762 }
763
764 /// Return the list of MemoryDef's and MemoryPhi's for a given basic
765 /// block.
766 ///
767 /// This list is not modifiable by the user.
768 const DefsList *getBlockDefs(const BasicBlock *BB) const {
769 return getWritableBlockDefs(BB);
770 }
771
772 /// Given two memory accesses in the same basic block, determine
773 /// whether MemoryAccess \p A dominates MemoryAccess \p B.
774 LLVM_ABI bool locallyDominates(const MemoryAccess *A,
775 const MemoryAccess *B) const;
776
777 /// Given two memory accesses in potentially different blocks,
778 /// determine whether MemoryAccess \p A dominates MemoryAccess \p B.
779 LLVM_ABI bool dominates(const MemoryAccess *A, const MemoryAccess *B) const;
780
781 /// Given a MemoryAccess and a Use, determine whether MemoryAccess \p A
782 /// dominates Use \p B.
783 LLVM_ABI bool dominates(const MemoryAccess *A, const Use &B) const;
784
785 enum class VerificationLevel { Fast, Full };
786 /// Verify that MemorySSA is self consistent (IE definitions dominate
787 /// all uses, uses appear in the right places). This is used by unit tests.
788 LLVM_ABI void
789 verifyMemorySSA(VerificationLevel = VerificationLevel::Fast) const;
790
791 /// Used in various insertion functions to specify whether we are talking
792 /// about the beginning or end of a block.
793 enum InsertionPlace { Beginning, End, BeforeTerminator };
794
795 /// By default, uses are *not* optimized during MemorySSA construction.
796 /// Calling this method will attempt to optimize all MemoryUses, if this has
797 /// not happened yet for this MemorySSA instance. This should be done if you
798 /// plan to query the clobbering access for most uses, or if you walk the
799 /// def-use chain of uses.
800 LLVM_ABI void ensureOptimizedUses();
801
802 AliasAnalysis &getAA() { return *AA; }
803
804protected:
805 // Used by Memory SSA dumpers and wrapper pass
806 friend class MemorySSAUpdater;
807
808 template <typename IterT>
809 void verifyOrderingDominationAndDefUses(
810 IterT Blocks, VerificationLevel = VerificationLevel::Fast) const;
811 template <typename IterT> void verifyDominationNumbers(IterT Blocks) const;
812 template <typename IterT> void verifyPrevDefInPhis(IterT Blocks) const;
813
814 // This is used by the use optimizer and updater.
816 auto It = PerBlockAccesses.find(BB);
817 return It == PerBlockAccesses.end() ? nullptr : It->second.get();
818 }
819
820 // This is used by the use optimizer and updater.
822 auto It = PerBlockDefs.find(BB);
823 return It == PerBlockDefs.end() ? nullptr : It->second.get();
824 }
825
826 // These is used by the updater to perform various internal MemorySSA
827 // machinsations. They do not always leave the IR in a correct state, and
828 // relies on the updater to fixup what it breaks, so it is not public.
829
830 LLVM_ABI void moveTo(MemoryUseOrDef *What, BasicBlock *BB,
831 AccessList::iterator Where);
832 LLVM_ABI void moveTo(MemoryAccess *What, BasicBlock *BB,
833 InsertionPlace Point);
834
835 // Rename the dominator tree branch rooted at BB.
836 void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal,
838 renamePass(DT->getNode(BB), IncomingVal, Visited, true, true);
839 }
840
841 LLVM_ABI void removeFromLookups(MemoryAccess *);
842 LLVM_ABI void removeFromLists(MemoryAccess *, bool ShouldDelete = true);
843 LLVM_ABI void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *,
844 InsertionPlace);
845 LLVM_ABI void insertIntoListsBefore(MemoryAccess *, const BasicBlock *,
846 AccessList::iterator);
848 createDefinedAccess(Instruction *, MemoryAccess *,
849 const MemoryUseOrDef *Template = nullptr,
850 bool CreationMustSucceed = true);
851
852private:
853 class ClobberWalkerBase;
854 class CachingWalker;
855 class SkipSelfWalker;
856 class OptimizeUses;
857
858 CachingWalker *getWalkerImpl();
859 template <typename IterT>
860 void buildMemorySSA(BatchAAResults &BAA, IterT Blocks);
861
862 void prepareForMoveTo(MemoryAccess *, BasicBlock *);
863 void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const;
864
867
868 void markUnreachableAsLiveOnEntry(BasicBlock *BB);
869 MemoryPhi *createMemoryPhi(BasicBlock *BB);
870 template <typename AliasAnalysisType>
871 MemoryUseOrDef *createNewAccess(Instruction *, AliasAnalysisType *,
872 const MemoryUseOrDef *Template = nullptr);
873 void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &);
874 MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool);
875 void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool);
876 LLVM_ABI void renamePass(DomTreeNode *, MemoryAccess *IncomingVal,
878 bool SkipVisited = false,
879 bool RenameAllUses = false);
880 AccessList *getOrCreateAccessList(const BasicBlock *);
881 DefsList *getOrCreateDefsList(const BasicBlock *);
882 void renumberBlock(const BasicBlock *) const;
883 AliasAnalysis *AA = nullptr;
884 DominatorTree *DT;
885 Function *F = nullptr;
886 Loop *L = nullptr;
887
888 // Memory SSA mappings
889 DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess;
890
891 // These two mappings contain the main block to access/def mappings for
892 // MemorySSA. The list contained in PerBlockAccesses really owns all the
893 // MemoryAccesses.
894 // Both maps maintain the invariant that if a block is found in them, the
895 // corresponding list is not empty, and if a block is not found in them, the
896 // corresponding list is empty.
897 AccessMap PerBlockAccesses;
898 DefsMap PerBlockDefs;
899 std::unique_ptr<MemoryAccess, ValueDeleter> LiveOnEntryDef;
900
901 // Domination mappings
902 // Note that the numbering is local to a block, even though the map is
903 // global.
904 mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid;
906
907 // Memory SSA building info
908 std::unique_ptr<ClobberWalkerBase> WalkerBase;
909 std::unique_ptr<CachingWalker> Walker;
910 std::unique_ptr<SkipSelfWalker> SkipWalker;
911 unsigned NextID = 0;
912 bool IsOptimized = false;
913};
914
915/// Enables verification of MemorySSA.
916///
917/// The checks which this flag enables is exensive and disabled by default
918/// unless `EXPENSIVE_CHECKS` is defined. The flag `-verify-memoryssa` can be
919/// used to selectively enable the verification without re-compilation.
920LLVM_ABI extern bool VerifyMemorySSA;
921
922// Internal MemorySSA utils, for use by MemorySSA classes and walkers
924protected:
925 friend class GVNHoist;
926 friend class MemorySSAWalker;
927
928 // This function should not be used by new passes.
929 LLVM_ABI static bool defClobbersUseOrDef(MemoryDef *MD,
930 const MemoryUseOrDef *MU,
931 AliasAnalysis &AA);
932};
933
934/// An analysis that produces \c MemorySSA for a function.
935///
936class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> {
938
939 LLVM_ABI static AnalysisKey Key;
940
941public:
942 // Wrap MemorySSA result to ensure address stability of internal MemorySSA
943 // pointers after construction. Use a wrapper class instead of plain
944 // unique_ptr<MemorySSA> to avoid build breakage on MSVC.
945 struct Result {
946 Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {}
947
948 MemorySSA &getMSSA() { return *MSSA; }
949
950 std::unique_ptr<MemorySSA> MSSA;
951
954 };
955
957};
958
959/// Printer pass for \c MemorySSA.
960class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> {
961 raw_ostream &OS;
962 bool EnsureOptimizedUses;
963
964public:
965 explicit MemorySSAPrinterPass(raw_ostream &OS, bool EnsureOptimizedUses)
966 : OS(OS), EnsureOptimizedUses(EnsureOptimizedUses) {}
967
969
970 static bool isRequired() { return true; }
971};
972
973/// Printer pass for \c MemorySSA via the walker.
975 : public PassInfoMixin<MemorySSAWalkerPrinterPass> {
976 raw_ostream &OS;
977
978public:
980
982
983 static bool isRequired() { return true; }
984};
985
986/// Verifier pass for \c MemorySSA.
987struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> {
989 static bool isRequired() { return true; }
990};
991
992/// Legacy analysis pass which computes \c MemorySSA.
994public:
996
997 static char ID;
998
999 bool runOnFunction(Function &) override;
1000 void releaseMemory() override;
1001 MemorySSA &getMSSA() { return *MSSA; }
1002 const MemorySSA &getMSSA() const { return *MSSA; }
1003
1004 void getAnalysisUsage(AnalysisUsage &AU) const override;
1005
1006 void verifyAnalysis() const override;
1007 void print(raw_ostream &OS, const Module *M = nullptr) const override;
1008
1009private:
1010 std::unique_ptr<MemorySSA> MSSA;
1011};
1012
1013/// This is the generic walker interface for walkers of MemorySSA.
1014/// Walkers are used to be able to further disambiguate the def-use chains
1015/// MemorySSA gives you, or otherwise produce better info than MemorySSA gives
1016/// you.
1017/// In particular, while the def-use chains provide basic information, and are
1018/// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a
1019/// MemoryUse as AliasAnalysis considers it, a user mant want better or other
1020/// information. In particular, they may want to use SCEV info to further
1021/// disambiguate memory accesses, or they may want the nearest dominating
1022/// may-aliasing MemoryDef for a call or a store. This API enables a
1023/// standardized interface to getting and using that info.
1025public:
1027 virtual ~MemorySSAWalker() = default;
1028
1030
1031 /// Given a memory Mod/Ref/ModRef'ing instruction, calling this
1032 /// will give you the nearest dominating MemoryAccess that Mod's the location
1033 /// the instruction accesses (by skipping any def which AA can prove does not
1034 /// alias the location(s) accessed by the instruction given).
1035 ///
1036 /// Note that this will return a single access, and it must dominate the
1037 /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction,
1038 /// this will return the MemoryPhi, not the operand. This means that
1039 /// given:
1040 /// if (a) {
1041 /// 1 = MemoryDef(liveOnEntry)
1042 /// store %a
1043 /// } else {
1044 /// 2 = MemoryDef(liveOnEntry)
1045 /// store %b
1046 /// }
1047 /// 3 = MemoryPhi(2, 1)
1048 /// MemoryUse(3)
1049 /// load %a
1050 ///
1051 /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef
1052 /// in the if (a) branch.
1054 BatchAAResults &AA) {
1056 assert(MA && "Handed an instruction that MemorySSA doesn't recognize?");
1057 return getClobberingMemoryAccess(MA, AA);
1058 }
1059
1060 /// Does the same thing as getClobberingMemoryAccess(const Instruction *I),
1061 /// but takes a MemoryAccess instead of an Instruction.
1063 BatchAAResults &AA) = 0;
1064
1065 /// Given a potentially clobbering memory access and a new location,
1066 /// calling this will give you the nearest dominating clobbering MemoryAccess
1067 /// (by skipping non-aliasing def links).
1068 ///
1069 /// This version of the function is mainly used to disambiguate phi translated
1070 /// pointers, where the value of a pointer may have changed from the initial
1071 /// memory access. Note that this expects to be handed either a MemoryUse,
1072 /// or an already potentially clobbering access. Unlike the above API, if
1073 /// given a MemoryDef that clobbers the pointer as the starting access, it
1074 /// will return that MemoryDef, whereas the above would return the clobber
1075 /// starting from the use side of the memory def.
1077 const MemoryLocation &,
1078 BatchAAResults &AA) = 0;
1079
1081 BatchAAResults BAA(MSSA->getAA());
1082 return getClobberingMemoryAccess(I, BAA);
1083 }
1084
1086 BatchAAResults BAA(MSSA->getAA());
1087 return getClobberingMemoryAccess(MA, BAA);
1088 }
1089
1091 const MemoryLocation &Loc) {
1092 BatchAAResults BAA(MSSA->getAA());
1093 return getClobberingMemoryAccess(MA, Loc, BAA);
1094 }
1095
1096 /// Given a memory access, invalidate anything this walker knows about
1097 /// that access.
1098 /// This API is used by walkers that store information to perform basic cache
1099 /// invalidation. This will be called by MemorySSA at appropriate times for
1100 /// the walker it uses or returns.
1101 virtual void invalidateInfo(MemoryAccess *) {}
1102
1103protected:
1104 friend class MemorySSA; // For updating MSSA pointer in MemorySSA move
1105 // constructor.
1107};
1108
1109/// A MemorySSAWalker that does no alias queries, or anything else. It
1110/// simply returns the links as they were constructed by the builder.
1112public:
1113 // Keep the overrides below from hiding the Instruction overload of
1114 // getClobberingMemoryAccess.
1115 using MemorySSAWalker::getClobberingMemoryAccess;
1116
1118 BatchAAResults &) override;
1120 const MemoryLocation &,
1121 BatchAAResults &) override;
1122};
1123
1124using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>;
1125using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>;
1126
1127/// Iterator base class used to implement const and non-const iterators
1128/// over the defining accesses of a MemoryAccess.
1129template <class T>
1131 : public iterator_facade_base<memoryaccess_def_iterator_base<T>,
1132 std::forward_iterator_tag, T, ptrdiff_t, T *,
1133 T *> {
1134 using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base;
1135
1136public:
1137 memoryaccess_def_iterator_base(T *Start) : Access(Start) {}
1139
1141 return Access == Other.Access && (!Access || ArgNo == Other.ArgNo);
1142 }
1143
1144 // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the
1145 // block from the operand in constant time (In a PHINode, the uselist has
1146 // both, so it's just subtraction). We provide it as part of the
1147 // iterator to avoid callers having to linear walk to get the block.
1148 // If the operation becomes constant time on MemoryPHI's, this bit of
1149 // abstraction breaking should be removed.
1151 MemoryPhi *MP = dyn_cast<MemoryPhi>(Access);
1152 assert(MP && "Tried to get phi arg block when not iterating over a PHI");
1153 return MP->getIncomingBlock(ArgNo);
1154 }
1155
1156 typename std::iterator_traits<BaseT>::pointer operator*() const {
1157 assert(Access && "Tried to access past the end of our iterator");
1158 // Go to the first argument for phis, and the defining access for everything
1159 // else.
1160 if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access))
1161 return MP->getIncomingValue(ArgNo);
1162 return cast<MemoryUseOrDef>(Access)->getDefiningAccess();
1163 }
1164
1165 using BaseT::operator++;
1167 assert(Access && "Hit end of iterator");
1168 if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) {
1169 if (++ArgNo >= MP->getNumIncomingValues()) {
1170 ArgNo = 0;
1171 Access = nullptr;
1172 }
1173 } else {
1174 Access = nullptr;
1175 }
1176 return *this;
1177 }
1178
1179private:
1180 T *Access = nullptr;
1181 unsigned ArgNo = 0;
1182};
1183
1185 return memoryaccess_def_iterator(this);
1186}
1187
1190}
1191
1194}
1195
1198}
1199
1200/// GraphTraits for a MemoryAccess, which walks defs in the normal case,
1201/// and uses in the inverse case.
1202template <> struct GraphTraits<MemoryAccess *> {
1205
1206 static NodeRef getEntryNode(NodeRef N) { return N; }
1207 static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); }
1208 static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); }
1209};
1210
1211template <> struct GraphTraits<Inverse<MemoryAccess *>> {
1214
1215 static NodeRef getEntryNode(NodeRef N) { return N; }
1216 static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); }
1217 static ChildIteratorType child_end(NodeRef N) { return N->user_end(); }
1218};
1219
1220/// Provide an iterator that walks defs, giving both the memory access,
1221/// and the current pointer location, updating the pointer location as it
1222/// changes due to phi node translation.
1223///
1224/// This iterator, while somewhat specialized, is what most clients actually
1225/// want when walking upwards through MemorySSA def chains. It takes a pair of
1226/// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the
1227/// memory location through phi nodes for the user.
1229 : public iterator_facade_base<upward_defs_iterator,
1230 std::forward_iterator_tag,
1231 const MemoryAccessPair> {
1232 using BaseT = upward_defs_iterator::iterator_facade_base;
1233
1234public:
1236 : DefIterator(Info.first), Location(Info.second),
1237 OriginalAccess(Info.first), DT(DT) {
1238 CurrentPair.first = nullptr;
1239
1240 WalkingPhi = Info.first && isa<MemoryPhi>(Info.first);
1241 fillInCurrentPair();
1242 }
1243
1244 upward_defs_iterator() { CurrentPair.first = nullptr; }
1245
1247 return DefIterator == Other.DefIterator;
1248 }
1249
1250 typename std::iterator_traits<BaseT>::reference operator*() const {
1251 assert(DefIterator != OriginalAccess->defs_end() &&
1252 "Tried to access past the end of our iterator");
1253 return CurrentPair;
1254 }
1255
1256 using BaseT::operator++;
1258 assert(DefIterator != OriginalAccess->defs_end() &&
1259 "Tried to access past the end of the iterator");
1260 ++DefIterator;
1261 if (DefIterator != OriginalAccess->defs_end())
1262 fillInCurrentPair();
1263 return *this;
1264 }
1265
1266 BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); }
1267
1268private:
1269 /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible
1270 /// loop. In particular, this guarantees that it only references a single
1271 /// MemoryLocation during execution of the containing function.
1272 LLVM_ABI bool IsGuaranteedLoopInvariant(const Value *Ptr) const;
1273
1274 void fillInCurrentPair() {
1275 CurrentPair.first = *DefIterator;
1276 CurrentPair.second = Location;
1277 if (WalkingPhi && Location.Ptr) {
1278 PHITransAddr Translator(
1279 const_cast<Value *>(Location.Ptr),
1280 OriginalAccess->getBlock()->getDataLayout(), nullptr);
1281
1282 if (Value *Addr =
1283 Translator.translateValue(OriginalAccess->getBlock(),
1284 DefIterator.getPhiArgBlock(), DT, true))
1285 if (Addr != CurrentPair.second.Ptr)
1286 CurrentPair.second = CurrentPair.second.getWithNewPtr(Addr);
1287
1288 // Mark size as unknown, if the location is not guaranteed to be
1289 // loop-invariant for any possible loop in the function. Setting the size
1290 // to unknown guarantees that any memory accesses that access locations
1291 // after the pointer are considered as clobbers, which is important to
1292 // catch loop carried dependences.
1293 if (!IsGuaranteedLoopInvariant(CurrentPair.second.Ptr))
1294 CurrentPair.second = CurrentPair.second.getWithNewSize(
1296 }
1297 }
1298
1299 MemoryAccessPair CurrentPair;
1300 memoryaccess_def_iterator DefIterator;
1301 MemoryLocation Location;
1302 MemoryAccess *OriginalAccess = nullptr;
1303 DominatorTree *DT = nullptr;
1304 bool WalkingPhi = false;
1305};
1306
1307inline upward_defs_iterator
1309 return upward_defs_iterator(Pair, &DT);
1310}
1311
1313
1314inline iterator_range<upward_defs_iterator>
1316 return make_range(upward_defs_begin(Pair, DT), upward_defs_end());
1317}
1318
1319/// Walks the defining accesses of MemoryDefs. Stops after we hit something that
1320/// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when
1321/// comparing against a null def_chain_iterator, this will compare equal only
1322/// after walking said Phi/liveOnEntry.
1323///
1324/// The UseOptimizedChain flag specifies whether to walk the clobbering
1325/// access chain, or all the accesses.
1326///
1327/// Normally, MemoryDef are all just def/use linked together, so a def_chain on
1328/// a MemoryDef will walk all MemoryDefs above it in the program until it hits
1329/// a phi node. The optimized chain walks the clobbering access of a store.
1330/// So if you are just trying to find, given a store, what the next
1331/// thing that would clobber the same memory is, you want the optimized chain.
1332template <class T, bool UseOptimizedChain = false>
1334 : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>,
1335 std::forward_iterator_tag, MemoryAccess *> {
1336 def_chain_iterator() : MA(nullptr) {}
1337 def_chain_iterator(T MA) : MA(MA) {}
1338
1339 T operator*() const { return MA; }
1340
1342 // N.B. liveOnEntry has a null defining access.
1343 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
1344 if (UseOptimizedChain && MUD->isOptimized())
1345 MA = MUD->getOptimized();
1346 else
1347 MA = MUD->getDefiningAccess();
1348 } else {
1349 MA = nullptr;
1350 }
1351
1352 return *this;
1353 }
1354
1355 bool operator==(const def_chain_iterator &O) const { return MA == O.MA; }
1356
1357private:
1358 T MA;
1359};
1360
1361template <class T>
1362inline iterator_range<def_chain_iterator<T>>
1363def_chain(T MA, MemoryAccess *UpTo = nullptr) {
1364#ifdef EXPENSIVE_CHECKS
1365 assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) &&
1366 "UpTo isn't in the def chain!");
1367#endif
1369}
1370
1371template <class T>
1375}
1376
1377} // end namespace llvm
1378
1379#endif // LLVM_ANALYSIS_MEMORYSSA_H
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
#define LLVM_ABI
Definition: Compiler.h:213
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
uint64_t Addr
bool End
Definition: ELF_riscv.cpp:480
DenseMap< Block *, BlockRelaxAux > Blocks
Definition: ELF_riscv.cpp:507
static bool runOnFunction(Function &F, bool PostInlining)
IRTranslator LLVM IR MI
static MemoryAccess * getClobberingMemoryAccess(MemorySSA &MSSA, BatchAAResults &BAA, SinkAndHoistLICMFlags &Flags, MemoryUseOrDef *MA)
Definition: LICM.cpp:1153
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Machine Check Debug Module
This file provides utility analysis objects describing memory locations.
#define DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CLASS, VALUECLASS)
Macro for generating out-of-class operand accessor definitions.
static bool dominates(InstrPosIndexes &PosIndexes, const MachineInstr &A, const MachineInstr &B)
raw_pwrite_stream & OS
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
A private abstract base class describing the concept of an individual alias analysis implementation.
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:294
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:255
Represent the analysis usage information of a pass.
LLVM Basic Block Representation.
Definition: BasicBlock.h:62
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
Definition: BasicBlock.cpp:252
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
Extension point for the Value hierarchy.
Definition: DerivedUser.h:27
void(*)(DerivedUser *) DeleteValueTy
Definition: DerivedUser.h:29
A MemorySSAWalker that does no alias queries, or anything else.
Definition: MemorySSA.h:1111
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:165
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:314
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:40
LLVM_ABI void dump() const
Definition: MemorySSA.cpp:2272
AllAccessType::reverse_self_iterator getReverseIterator()
Definition: MemorySSA.h:187
AllAccessType::const_self_iterator getIterator() const
Definition: MemorySSA.h:184
MemoryAccess(const MemoryAccess &)=delete
static bool classof(const Value *V)
Definition: MemorySSA.h:157
DefsOnlyType::const_self_iterator getDefsIterator() const
Definition: MemorySSA.h:196
DefsOnlyType::self_iterator getDefsIterator()
Definition: MemorySSA.h:193
DefsOnlyType::reverse_self_iterator getReverseDefsIterator()
Definition: MemorySSA.h:199
DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const
Definition: MemorySSA.h:202
memoryaccess_def_iterator defs_end()
Definition: MemorySSA.h:1192
~MemoryAccess()=default
MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue, BasicBlock *BB, AllocInfo AllocInfo)
Definition: MemorySSA.h:221
BasicBlock * getBlock() const
Definition: MemorySSA.h:162
user_iterator iterator
The user iterators for a memory access.
Definition: MemorySSA.h:168
AllAccessType::const_reverse_self_iterator getReverseIterator() const
Definition: MemorySSA.h:190
LLVM_ABI void print(raw_ostream &OS) const
Definition: MemorySSA.cpp:2211
unsigned getID() const
Used for debugging and tracking things about MemoryAccesses.
Definition: MemorySSA.h:663
MemoryAccess & operator=(const MemoryAccess &)=delete
void setBlock(BasicBlock *BB)
Used by MemorySSA to change the block of a MemoryAccess when it is moved.
Definition: MemorySSA.h:215
const_user_iterator const_iterator
Definition: MemorySSA.h:169
memoryaccess_def_iterator defs_begin()
This iterator walks over all of the defs in a given MemoryAccess.
Definition: MemorySSA.h:1184
AllAccessType::self_iterator getIterator()
Get the iterators for the all access list and the defs only list We default to the all access list.
Definition: MemorySSA.h:181
Represents a read-write access to memory, whether it is a must-alias, or a may-alias.
Definition: MemorySSA.h:371
static bool classof(const Value *MA)
Definition: MemorySSA.h:388
void resetOptimized()
Definition: MemorySSA.h:405
MemoryAccess * getOptimized() const
Definition: MemorySSA.h:397
unsigned getID() const
Definition: MemorySSA.h:412
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)
bool isOptimized() const
Definition: MemorySSA.h:401
MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB, unsigned Ver)
Definition: MemorySSA.h:379
void setOptimized(MemoryAccess *MA)
Definition: MemorySSA.h:392
Representation for a specific memory location.
const Value * Ptr
The address of the start of the location.
Represents phi nodes for memory accesses.
Definition: MemorySSA.h:479
void setIncomingBlock(unsigned I, BasicBlock *BB)
Definition: MemorySSA.h:557
void allocHungoffUses(unsigned N)
this is more complicated than the generic User::allocHungoffUses, because we have to allocate Uses fo...
Definition: MemorySSA.h:642
void setIncomingValue(unsigned I, MemoryAccess *V)
Definition: MemorySSA.h:533
static bool classof(const Value *V)
Definition: MemorySSA.h:628
void unorderedDeleteIncomingValue(const MemoryAccess *MA)
Definition: MemorySSA.h:623
const_block_iterator block_end() const
Definition: MemorySSA.h:512
BasicBlock * getIncomingBlock(const Use &U) const
Return incoming basic block corresponding to an operand of the PHI.
Definition: MemorySSA.h:546
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)
Provide fast operand accessors.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Definition: MemorySSA.h:529
MemoryAccess * getIncomingValueForBlock(const BasicBlock *BB) const
Definition: MemorySSA.h:581
block_iterator block_end()
Definition: MemorySSA.h:510
const_block_iterator block_begin() const
Definition: MemorySSA.h:506
iterator_range< block_iterator > blocks()
Definition: MemorySSA.h:516
void unorderedDeleteIncomingIf(Fn &&Pred)
Definition: MemorySSA.h:604
void unorderedDeleteIncoming(unsigned I)
Definition: MemorySSA.h:588
BasicBlock * getIncomingBlock(unsigned I) const
Return incoming basic block number i.
Definition: MemorySSA.h:542
const_op_range incoming_values() const
Definition: MemorySSA.h:526
BasicBlock * getIncomingBlock(MemoryAccess::const_user_iterator I) const
Return incoming basic block corresponding to value use iterator.
Definition: MemorySSA.h:553
static unsigned getIncomingValueNumForOperand(unsigned I)
Definition: MemorySSA.h:539
void addIncoming(MemoryAccess *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
Definition: MemorySSA.h:563
op_range incoming_values()
Definition: MemorySSA.h:524
void unorderedDeleteIncomingBlock(const BasicBlock *BB)
Definition: MemorySSA.h:616
MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds=0)
Definition: MemorySSA.h:491
MemoryAccess * getIncomingValue(unsigned I) const
Return incoming value number x.
Definition: MemorySSA.h:532
static unsigned getOperandNumForIncomingValue(unsigned I)
Definition: MemorySSA.h:538
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
Definition: MemorySSA.h:574
iterator_range< const_block_iterator > blocks() const
Definition: MemorySSA.h:520
unsigned getID() const
Definition: MemorySSA.h:634
BasicBlock *const * const_block_iterator
Definition: MemorySSA.h:500
block_iterator block_begin()
Definition: MemorySSA.h:502
An analysis that produces MemorySSA for a function.
Definition: MemorySSA.h:936
LLVM_ABI Result run(Function &F, FunctionAnalysisManager &AM)
Definition: MemorySSA.cpp:2366
Printer pass for MemorySSA.
Definition: MemorySSA.h:960
static bool isRequired()
Definition: MemorySSA.h:970
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Definition: MemorySSA.cpp:2382
MemorySSAPrinterPass(raw_ostream &OS, bool EnsureOptimizedUses)
Definition: MemorySSA.h:965
static LLVM_ABI bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, AliasAnalysis &AA)
Definition: MemorySSA.cpp:340
Printer pass for MemorySSA via the walker.
Definition: MemorySSA.h:975
MemorySSAWalkerPrinterPass(raw_ostream &OS)
Definition: MemorySSA.h:979
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Definition: MemorySSA.cpp:2398
This is the generic walker interface for walkers of MemorySSA.
Definition: MemorySSA.h:1024
MemoryAccess * getClobberingMemoryAccess(const Instruction *I, BatchAAResults &AA)
Given a memory Mod/Ref/ModRef'ing instruction, calling this will give you the nearest dominating Memo...
Definition: MemorySSA.h:1053
virtual ~MemorySSAWalker()=default
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc)
Definition: MemorySSA.h:1090
virtual void invalidateInfo(MemoryAccess *)
Given a memory access, invalidate anything this walker knows about that access.
Definition: MemorySSA.h:1101
virtual MemoryAccess * getClobberingMemoryAccess(MemoryAccess *, const MemoryLocation &, BatchAAResults &AA)=0
Given a potentially clobbering memory access and a new location, calling this will give you the neare...
virtual MemoryAccess * getClobberingMemoryAccess(MemoryAccess *, BatchAAResults &AA)=0
Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...
MemoryAccess * getClobberingMemoryAccess(const Instruction *I)
Definition: MemorySSA.h:1080
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA)
Definition: MemorySSA.h:1085
Legacy analysis pass which computes MemorySSA.
Definition: MemorySSA.h:993
const MemorySSA & getMSSA() const
Definition: MemorySSA.h:1002
Encapsulates MemorySSA, including all data associated with memory accesses.
Definition: MemorySSA.h:702
AliasAnalysis & getAA()
Definition: MemorySSA.h:802
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess's for a given basic block.
Definition: MemorySSA.h:760
void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, SmallPtrSetImpl< BasicBlock * > &Visited)
Definition: MemorySSA.h:836
AccessList * getWritableBlockAccesses(const BasicBlock *BB) const
Definition: MemorySSA.h:815
InsertionPlace
Used in various insertion functions to specify whether we are talking about the beginning or end of a...
Definition: MemorySSA.h:793
DefsList * getWritableBlockDefs(const BasicBlock *BB) const
Definition: MemorySSA.h:821
MemorySSA(MemorySSA &&)=delete
DominatorTree & getDomTree() const
Definition: MemorySSA.h:728
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
Definition: MemorySSA.h:720
MemoryPhi * getMemoryAccess(const BasicBlock *BB) const
Definition: MemorySSA.h:724
MemoryAccess * getLiveOnEntryDef() const
Definition: MemorySSA.h:744
const DefsList * getBlockDefs(const BasicBlock *BB) const
Return the list of MemoryDef's and MemoryPhi's for a given basic block.
Definition: MemorySSA.h:768
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
Definition: MemorySSA.h:740
Class that has the common methods + fields of memory uses/defs.
Definition: MemorySSA.h:250
MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty, DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB, AllocInfo AllocInfo)
Definition: MemorySSA.h:282
~MemoryUseOrDef()=default
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
Definition: MemorySSA.h:260
void resetOptimized()
Reset the ID of what this MemoryUse was optimized to, causing it to be rewalked by the walker if nece...
Definition: MemorySSA.h:690
MemoryAccess * getOptimized() const
Return the MemoryAccess associated with the optimized use, or nullptr.
Definition: MemorySSA.h:677
void setDefiningAccess(MemoryAccess *DMA, bool Optimized=false)
Definition: MemorySSA.h:293
void setOptimized(MemoryAccess *)
Sets the optimized use for a MemoryDef.
Definition: MemorySSA.h:683
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)
Instruction * getMemoryInst() const
Get the instruction that this MemoryUse represents.
Definition: MemorySSA.h:257
static bool classof(const Value *MA)
Definition: MemorySSA.h:262
bool isOptimized() const
Do we have an optimized use?
Definition: MemorySSA.h:671
Represents read-only accesses to memory.
Definition: MemorySSA.h:310
MemoryAccess * getOptimized() const
Definition: MemorySSA.h:341
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess)
MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB)
Definition: MemorySSA.h:316
LLVM_ABI void print(raw_ostream &OS) const
Definition: MemorySSA.cpp:2262
void resetOptimized()
Definition: MemorySSA.h:345
bool isOptimized() const
Whether the MemoryUse is optimized.
Definition: MemorySSA.h:337
static bool classof(const Value *MA)
Definition: MemorySSA.h:323
void setOptimized(MemoryAccess *DMA)
Definition: MemorySSA.h:329
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
PHITransAddr - An address value which tracks and handles phi translation.
Definition: PHITransAddr.h:36
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:112
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:380
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:541
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
A Use represents the edge between a Value definition and its users.
Definition: Use.h:35
LLVM_ABI void allocHungoffUses(unsigned N, bool IsPhi=false)
Allocate the array of Uses, followed by a pointer (with bottom bit set) to the User.
Definition: User.cpp:50
void setOperand(unsigned i, Value *Val)
Definition: User.h:237
Value * getOperand(unsigned i) const
Definition: User.h:232
LLVM Value Representation.
Definition: Value.h:75
user_iterator_impl< const User > const_user_iterator
Definition: Value.h:392
unsigned getValueID() const
Return an ID for the concrete type of this object.
Definition: Value.h:543
LLVM_ABI void deleteValue()
Delete a pointer to a generic Value.
Definition: Value.cpp:111
user_iterator_impl< User > user_iterator
Definition: Value.h:391
typename ilist_select_iterator_type< OptionsT::has_iterator_bits, OptionsT, true, false >::type reverse_self_iterator
Definition: ilist_node.h:106
typename ilist_select_iterator_type< OptionsT::has_iterator_bits, OptionsT, false, true >::type const_self_iterator
Definition: ilist_node.h:103
typename ilist_select_iterator_type< OptionsT::has_iterator_bits, OptionsT, false, false >::type self_iterator
Definition: ilist_node.h:100
typename ilist_select_iterator_type< OptionsT::has_iterator_bits, OptionsT, true, true >::type const_reverse_self_iterator
Definition: ilist_node.h:109
reverse_self_iterator getReverseIterator()
Definition: ilist_node.h:137
self_iterator getIterator()
Definition: ilist_node.h:134
An intrusive list with ownership and callbacks specified/controlled by ilist_traits,...
Definition: ilist.h:328
CRTP base class which implements the entire standard iterator facade in terms of a minimal subset of ...
Definition: iterator.h:80
A range adaptor for a pair of iterators.
BasicBlock * getPhiArgBlock() const
Definition: MemorySSA.h:1150
std::iterator_traits< BaseT >::pointer operator*() const
Definition: MemorySSA.h:1156
bool operator==(const memoryaccess_def_iterator_base &Other) const
Definition: MemorySSA.h:1140
memoryaccess_def_iterator_base & operator++()
Definition: MemorySSA.h:1166
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:53
A simple intrusive list implementation.
Definition: simple_ilist.h:81
Provide an iterator that walks defs, giving both the memory access, and the current pointer location,...
Definition: MemorySSA.h:1231
upward_defs_iterator(const MemoryAccessPair &Info, DominatorTree *DT)
Definition: MemorySSA.h:1235
std::iterator_traits< BaseT >::reference operator*() const
Definition: MemorySSA.h:1250
BasicBlock * getPhiArgBlock() const
Definition: MemorySSA.h:1266
upward_defs_iterator & operator++()
Definition: MemorySSA.h:1257
bool operator==(const upward_defs_iterator &Other) const
Definition: MemorySSA.h:1246
This file defines the ilist_node class template, which is a convenient base class for creating classe...
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ INVALID_MEMORYACCESS_ID
Definition: MemorySSA.h:130
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1770
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair, DominatorTree &DT)
Definition: MemorySSA.h:1308
std::pair< const MemoryAccess *, MemoryLocation > ConstMemoryAccessPair
Definition: MemorySSA.h:1125
iterator_range< def_chain_iterator< T > > def_chain(T MA, MemoryAccess *UpTo=nullptr)
Definition: MemorySSA.h:1363
memoryaccess_def_iterator_base< MemoryAccess > memoryaccess_def_iterator
Definition: MemorySSA.h:134
memoryaccess_def_iterator_base< const MemoryAccess > const_memoryaccess_def_iterator
Definition: MemorySSA.h:136
@ Other
Any other memory.
LLVM_ABI bool VerifyMemorySSA
Enables verification of MemorySSA.
Definition: MemorySSA.cpp:84
std::pair< MemoryAccess *, MemoryLocation > MemoryAccessPair
Definition: MemorySSA.h:1124
upward_defs_iterator upward_defs_end()
Definition: MemorySSA.h:1312
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
Definition: APFixedPoint.h:312
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1886
iterator_range< upward_defs_iterator > upward_defs(const MemoryAccessPair &Pair, DominatorTree &DT)
Definition: MemorySSA.h:1315
iterator_range< def_chain_iterator< T, true > > optimized_def_chain(T MA)
Definition: MemorySSA.h:1372
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:856
#define N
A CRTP mix-in that provides informational APIs needed for analysis passes.
Definition: PassManager.h:93
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: Analysis.h:29
FixedNumOperandTraits - determine the allocation regime of the Use array when it is a prefix to the U...
Definition: OperandTraits.h:30
static ChildIteratorType child_begin(NodeRef N)
Definition: MemorySSA.h:1216
static ChildIteratorType child_end(NodeRef N)
Definition: MemorySSA.h:1217
static ChildIteratorType child_begin(NodeRef N)
Definition: MemorySSA.h:1207
static ChildIteratorType child_end(NodeRef N)
Definition: MemorySSA.h:1208
static NodeRef getEntryNode(NodeRef N)
Definition: MemorySSA.h:1206
HungoffOperandTraits - determine the allocation regime of the Use array when it is not a prefix to th...
Definition: OperandTraits.h:93
Result(std::unique_ptr< MemorySSA > &&MSSA)
Definition: MemorySSA.h:946
LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
std::unique_ptr< MemorySSA > MSSA
Definition: MemorySSA.h:950
Verifier pass for MemorySSA.
Definition: MemorySSA.h:987
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Definition: MemorySSA.cpp:2408
static bool isRequired()
Definition: MemorySSA.h:989
static unsigned operands(const MemoryUseOrDef *MUD)
Definition: MemorySSA.h:439
static Use * op_end(MemoryUseOrDef *MUD)
Definition: MemorySSA.h:433
static Use * op_begin(MemoryUseOrDef *MUD)
Definition: MemorySSA.h:427
Compile-time customization of User operands.
Definition: User.h:42
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:70
Information about how a User object was allocated, to be passed into the User constructor.
Definition: User.h:79
Indicates this User has operands "hung off" in another allocation.
Definition: User.h:57
Indicates this User has operands co-allocated.
Definition: User.h:60
Walks the defining accesses of MemoryDefs.
Definition: MemorySSA.h:1335
bool operator==(const def_chain_iterator &O) const
Definition: MemorySSA.h:1355
def_chain_iterator & operator++()
Definition: MemorySSA.h:1341
static void deleteNode(MemoryAccess *MA)
Definition: MemorySSA.h:235
Use delete by default for iplist and ilist.
Definition: ilist.h:41