LLVM 21.0.0git
GVN.cpp
Go to the documentation of this file.
1//===- GVN.cpp - Eliminate redundant values and loads ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass performs global value numbering to eliminate fully redundant
10// instructions. It also performs simple dead load elimination.
11//
12// Note that this pass does the value numbering itself; it does not use the
13// ValueNumbering analysis passes.
14//
15//===----------------------------------------------------------------------===//
16
18#include "llvm/ADT/DenseMap.h"
20#include "llvm/ADT/Hashing.h"
21#include "llvm/ADT/MapVector.h"
23#include "llvm/ADT/STLExtras.h"
24#include "llvm/ADT/SetVector.h"
27#include "llvm/ADT/Statistic.h"
31#include "llvm/Analysis/CFG.h"
36#include "llvm/Analysis/Loads.h"
46#include "llvm/IR/Attributes.h"
47#include "llvm/IR/BasicBlock.h"
48#include "llvm/IR/Constant.h"
49#include "llvm/IR/Constants.h"
50#include "llvm/IR/DebugLoc.h"
51#include "llvm/IR/Dominators.h"
52#include "llvm/IR/Function.h"
53#include "llvm/IR/InstrTypes.h"
54#include "llvm/IR/Instruction.h"
57#include "llvm/IR/LLVMContext.h"
58#include "llvm/IR/Metadata.h"
59#include "llvm/IR/Module.h"
60#include "llvm/IR/PassManager.h"
62#include "llvm/IR/Type.h"
63#include "llvm/IR/Use.h"
64#include "llvm/IR/Value.h"
66#include "llvm/Pass.h"
70#include "llvm/Support/Debug.h"
77#include <algorithm>
78#include <cassert>
79#include <cstdint>
80#include <optional>
81#include <utility>
82
83using namespace llvm;
84using namespace llvm::gvn;
85using namespace llvm::VNCoercion;
86using namespace PatternMatch;
87
88#define DEBUG_TYPE "gvn"
89
90STATISTIC(NumGVNInstr, "Number of instructions deleted");
91STATISTIC(NumGVNLoad, "Number of loads deleted");
92STATISTIC(NumGVNPRE, "Number of instructions PRE'd");
93STATISTIC(NumGVNBlocks, "Number of blocks merged");
94STATISTIC(NumGVNSimpl, "Number of instructions simplified");
95STATISTIC(NumGVNEqProp, "Number of equalities propagated");
96STATISTIC(NumPRELoad, "Number of loads PRE'd");
97STATISTIC(NumPRELoopLoad, "Number of loop loads PRE'd");
98STATISTIC(NumPRELoadMoved2CEPred,
99 "Number of loads moved to predecessor of a critical edge in PRE");
100
101STATISTIC(IsValueFullyAvailableInBlockNumSpeculationsMax,
102 "Number of blocks speculated as available in "
103 "IsValueFullyAvailableInBlock(), max");
104STATISTIC(MaxBBSpeculationCutoffReachedTimes,
105 "Number of times we we reached gvn-max-block-speculations cut-off "
106 "preventing further exploration");
107
108static cl::opt<bool> GVNEnablePRE("enable-pre", cl::init(true), cl::Hidden);
109static cl::opt<bool> GVNEnableLoadPRE("enable-load-pre", cl::init(true));
110static cl::opt<bool> GVNEnableLoadInLoopPRE("enable-load-in-loop-pre",
111 cl::init(true));
112static cl::opt<bool>
113GVNEnableSplitBackedgeInLoadPRE("enable-split-backedge-in-load-pre",
114 cl::init(false));
115static cl::opt<bool> GVNEnableMemDep("enable-gvn-memdep", cl::init(true));
116static cl::opt<bool> GVNEnableMemorySSA("enable-gvn-memoryssa",
117 cl::init(false));
118
120 "gvn-max-num-deps", cl::Hidden, cl::init(100),
121 cl::desc("Max number of dependences to attempt Load PRE (default = 100)"));
122
123// This is based on IsValueFullyAvailableInBlockNumSpeculationsMax stat.
125 "gvn-max-block-speculations", cl::Hidden, cl::init(600),
126 cl::desc("Max number of blocks we're willing to speculate on (and recurse "
127 "into) when deducing if a value is fully available or not in GVN "
128 "(default = 600)"));
129
131 "gvn-max-num-visited-insts", cl::Hidden, cl::init(100),
132 cl::desc("Max number of visited instructions when trying to find "
133 "dominating value of select dependency (default = 100)"));
134
136 "gvn-max-num-insns", cl::Hidden, cl::init(100),
137 cl::desc("Max number of instructions to scan in each basic block in GVN "
138 "(default = 100)"));
139
142 bool commutative = false;
143 // The type is not necessarily the result type of the expression, it may be
144 // any additional type needed to disambiguate the expression.
145 Type *type = nullptr;
147
149
150 Expression(uint32_t o = ~2U) : opcode(o) {}
151
152 bool operator==(const Expression &other) const {
153 if (opcode != other.opcode)
154 return false;
155 if (opcode == ~0U || opcode == ~1U)
156 return true;
157 if (type != other.type)
158 return false;
159 if (varargs != other.varargs)
160 return false;
161 if ((!attrs.isEmpty() || !other.attrs.isEmpty()) &&
162 !attrs.intersectWith(type->getContext(), other.attrs).has_value())
163 return false;
164 return true;
165 }
166
168 return hash_combine(
169 Value.opcode, Value.type,
170 hash_combine_range(Value.varargs.begin(), Value.varargs.end()));
171 }
172};
173
174namespace llvm {
175
176template <> struct DenseMapInfo<GVNPass::Expression> {
177 static inline GVNPass::Expression getEmptyKey() { return ~0U; }
178 static inline GVNPass::Expression getTombstoneKey() { return ~1U; }
179
180 static unsigned getHashValue(const GVNPass::Expression &e) {
181 using llvm::hash_value;
182
183 return static_cast<unsigned>(hash_value(e));
184 }
185
186 static bool isEqual(const GVNPass::Expression &LHS,
187 const GVNPass::Expression &RHS) {
188 return LHS == RHS;
189 }
190};
191
192} // end namespace llvm
193
194/// Represents a particular available value that we know how to materialize.
195/// Materialization of an AvailableValue never fails. An AvailableValue is
196/// implicitly associated with a rematerialization point which is the
197/// location of the instruction from which it was formed.
199 enum class ValType {
200 SimpleVal, // A simple offsetted value that is accessed.
201 LoadVal, // A value produced by a load.
202 MemIntrin, // A memory intrinsic which is loaded from.
203 UndefVal, // A UndefValue representing a value from dead block (which
204 // is not yet physically removed from the CFG).
205 SelectVal, // A pointer select which is loaded from and for which the load
206 // can be replace by a value select.
207 };
208
209 /// Val - The value that is live out of the block.
211 /// Kind of the live-out value.
213
214 /// Offset - The byte offset in Val that is interesting for the load query.
215 unsigned Offset = 0;
216 /// V1, V2 - The dominating non-clobbered values of SelectVal.
217 Value *V1 = nullptr, *V2 = nullptr;
218
219 static AvailableValue get(Value *V, unsigned Offset = 0) {
220 AvailableValue Res;
221 Res.Val = V;
223 Res.Offset = Offset;
224 return Res;
225 }
226
227 static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset = 0) {
228 AvailableValue Res;
229 Res.Val = MI;
231 Res.Offset = Offset;
232 return Res;
233 }
234
235 static AvailableValue getLoad(LoadInst *Load, unsigned Offset = 0) {
236 AvailableValue Res;
237 Res.Val = Load;
239 Res.Offset = Offset;
240 return Res;
241 }
242
244 AvailableValue Res;
245 Res.Val = nullptr;
247 Res.Offset = 0;
248 return Res;
249 }
250
252 AvailableValue Res;
253 Res.Val = Sel;
255 Res.Offset = 0;
256 Res.V1 = V1;
257 Res.V2 = V2;
258 return Res;
259 }
260
261 bool isSimpleValue() const { return Kind == ValType::SimpleVal; }
262 bool isCoercedLoadValue() const { return Kind == ValType::LoadVal; }
263 bool isMemIntrinValue() const { return Kind == ValType::MemIntrin; }
264 bool isUndefValue() const { return Kind == ValType::UndefVal; }
265 bool isSelectValue() const { return Kind == ValType::SelectVal; }
266
268 assert(isSimpleValue() && "Wrong accessor");
269 return Val;
270 }
271
273 assert(isCoercedLoadValue() && "Wrong accessor");
274 return cast<LoadInst>(Val);
275 }
276
278 assert(isMemIntrinValue() && "Wrong accessor");
279 return cast<MemIntrinsic>(Val);
280 }
281
283 assert(isSelectValue() && "Wrong accessor");
284 return cast<SelectInst>(Val);
285 }
286
287 /// Emit code at the specified insertion point to adjust the value defined
288 /// here to the specified type. This handles various coercion cases.
290 GVNPass &gvn) const;
291};
292
293/// Represents an AvailableValue which can be rematerialized at the end of
294/// the associated BasicBlock.
296 /// BB - The basic block in question.
297 BasicBlock *BB = nullptr;
298
299 /// AV - The actual available value
301
304 Res.BB = BB;
305 Res.AV = std::move(AV);
306 return Res;
307 }
308
310 unsigned Offset = 0) {
311 return get(BB, AvailableValue::get(V, Offset));
312 }
313
316 }
317
319 Value *V1, Value *V2) {
320 return get(BB, AvailableValue::getSelect(Sel, V1, V2));
321 }
322
323 /// Emit code at the end of this block to adjust the value defined here to
324 /// the specified type. This handles various coercion cases.
326 return AV.MaterializeAdjustedValue(Load, BB->getTerminator(), gvn);
327 }
328};
329
330//===----------------------------------------------------------------------===//
331// ValueTable Internal Functions
332//===----------------------------------------------------------------------===//
333
334GVNPass::Expression GVNPass::ValueTable::createExpr(Instruction *I) {
335 Expression e;
336 e.type = I->getType();
337 e.opcode = I->getOpcode();
338 if (const GCRelocateInst *GCR = dyn_cast<GCRelocateInst>(I)) {
339 // gc.relocate is 'special' call: its second and third operands are
340 // not real values, but indices into statepoint's argument list.
341 // Use the refered to values for purposes of identity.
342 e.varargs.push_back(lookupOrAdd(GCR->getOperand(0)));
343 e.varargs.push_back(lookupOrAdd(GCR->getBasePtr()));
344 e.varargs.push_back(lookupOrAdd(GCR->getDerivedPtr()));
345 } else {
346 for (Use &Op : I->operands())
347 e.varargs.push_back(lookupOrAdd(Op));
348 }
349 if (I->isCommutative()) {
350 // Ensure that commutative instructions that only differ by a permutation
351 // of their operands get the same value number by sorting the operand value
352 // numbers. Since commutative operands are the 1st two operands it is more
353 // efficient to sort by hand rather than using, say, std::sort.
354 assert(I->getNumOperands() >= 2 && "Unsupported commutative instruction!");
355 if (e.varargs[0] > e.varargs[1])
356 std::swap(e.varargs[0], e.varargs[1]);
357 e.commutative = true;
358 }
359
360 if (auto *C = dyn_cast<CmpInst>(I)) {
361 // Sort the operand value numbers so x<y and y>x get the same value number.
362 CmpInst::Predicate Predicate = C->getPredicate();
363 if (e.varargs[0] > e.varargs[1]) {
364 std::swap(e.varargs[0], e.varargs[1]);
366 }
367 e.opcode = (C->getOpcode() << 8) | Predicate;
368 e.commutative = true;
369 } else if (auto *E = dyn_cast<InsertValueInst>(I)) {
370 e.varargs.append(E->idx_begin(), E->idx_end());
371 } else if (auto *SVI = dyn_cast<ShuffleVectorInst>(I)) {
372 ArrayRef<int> ShuffleMask = SVI->getShuffleMask();
373 e.varargs.append(ShuffleMask.begin(), ShuffleMask.end());
374 } else if (auto *CB = dyn_cast<CallBase>(I)) {
375 e.attrs = CB->getAttributes();
376 }
377
378 return e;
379}
380
381GVNPass::Expression GVNPass::ValueTable::createCmpExpr(
382 unsigned Opcode, CmpInst::Predicate Predicate, Value *LHS, Value *RHS) {
383 assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
384 "Not a comparison!");
387 e.varargs.push_back(lookupOrAdd(LHS));
388 e.varargs.push_back(lookupOrAdd(RHS));
389
390 // Sort the operand value numbers so x<y and y>x get the same value number.
391 if (e.varargs[0] > e.varargs[1]) {
392 std::swap(e.varargs[0], e.varargs[1]);
394 }
395 e.opcode = (Opcode << 8) | Predicate;
396 e.commutative = true;
397 return e;
398}
399
401GVNPass::ValueTable::createExtractvalueExpr(ExtractValueInst *EI) {
402 assert(EI && "Not an ExtractValueInst?");
404 e.type = EI->getType();
405 e.opcode = 0;
406
407 WithOverflowInst *WO = dyn_cast<WithOverflowInst>(EI->getAggregateOperand());
408 if (WO != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0) {
409 // EI is an extract from one of our with.overflow intrinsics. Synthesize
410 // a semantically equivalent expression instead of an extract value
411 // expression.
412 e.opcode = WO->getBinaryOp();
413 e.varargs.push_back(lookupOrAdd(WO->getLHS()));
414 e.varargs.push_back(lookupOrAdd(WO->getRHS()));
415 return e;
416 }
417
418 // Not a recognised intrinsic. Fall back to producing an extract value
419 // expression.
420 e.opcode = EI->getOpcode();
421 for (Use &Op : EI->operands())
422 e.varargs.push_back(lookupOrAdd(Op));
423
424 append_range(e.varargs, EI->indices());
425
426 return e;
427}
428
429GVNPass::Expression GVNPass::ValueTable::createGEPExpr(GetElementPtrInst *GEP) {
430 Expression E;
431 Type *PtrTy = GEP->getType()->getScalarType();
432 const DataLayout &DL = GEP->getDataLayout();
433 unsigned BitWidth = DL.getIndexTypeSizeInBits(PtrTy);
434 SmallMapVector<Value *, APInt, 4> VariableOffsets;
435 APInt ConstantOffset(BitWidth, 0);
436 if (GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset)) {
437 // Convert into offset representation, to recognize equivalent address
438 // calculations that use different type encoding.
439 LLVMContext &Context = GEP->getContext();
440 E.opcode = GEP->getOpcode();
441 E.type = nullptr;
442 E.varargs.push_back(lookupOrAdd(GEP->getPointerOperand()));
443 for (const auto &Pair : VariableOffsets) {
444 E.varargs.push_back(lookupOrAdd(Pair.first));
445 E.varargs.push_back(lookupOrAdd(ConstantInt::get(Context, Pair.second)));
446 }
447 if (!ConstantOffset.isZero())
448 E.varargs.push_back(
449 lookupOrAdd(ConstantInt::get(Context, ConstantOffset)));
450 } else {
451 // If converting to offset representation fails (for scalable vectors),
452 // fall back to type-based implementation:
453 E.opcode = GEP->getOpcode();
454 E.type = GEP->getSourceElementType();
455 for (Use &Op : GEP->operands())
456 E.varargs.push_back(lookupOrAdd(Op));
457 }
458 return E;
459}
460
461//===----------------------------------------------------------------------===//
462// ValueTable External Functions
463//===----------------------------------------------------------------------===//
464
465GVNPass::ValueTable::ValueTable() = default;
466GVNPass::ValueTable::ValueTable(const ValueTable &) = default;
467GVNPass::ValueTable::ValueTable(ValueTable &&) = default;
468GVNPass::ValueTable::~ValueTable() = default;
471
472/// add - Insert a value into the table with a specified value number.
473void GVNPass::ValueTable::add(Value *V, uint32_t num) {
474 valueNumbering.insert(std::make_pair(V, num));
475 if (PHINode *PN = dyn_cast<PHINode>(V))
476 NumberingPhi[num] = PN;
477}
478
479uint32_t GVNPass::ValueTable::lookupOrAddCall(CallInst *C) {
480 // FIXME: Currently the calls which may access the thread id may
481 // be considered as not accessing the memory. But this is
482 // problematic for coroutines, since coroutines may resume in a
483 // different thread. So we disable the optimization here for the
484 // correctness. However, it may block many other correct
485 // optimizations. Revert this one when we detect the memory
486 // accessing kind more precisely.
487 if (C->getFunction()->isPresplitCoroutine()) {
488 valueNumbering[C] = nextValueNumber;
489 return nextValueNumber++;
490 }
491
492 // Do not combine convergent calls since they implicitly depend on the set of
493 // threads that is currently executing, and they might be in different basic
494 // blocks.
495 if (C->isConvergent()) {
496 valueNumbering[C] = nextValueNumber;
497 return nextValueNumber++;
498 }
499
500 if (AA->doesNotAccessMemory(C)) {
501 Expression exp = createExpr(C);
502 uint32_t e = assignExpNewValueNum(exp).first;
503 valueNumbering[C] = e;
504 return e;
505 }
506
507 if (MD && AA->onlyReadsMemory(C)) {
508 Expression exp = createExpr(C);
509 auto ValNum = assignExpNewValueNum(exp);
510 if (ValNum.second) {
511 valueNumbering[C] = ValNum.first;
512 return ValNum.first;
513 }
514
515 MemDepResult local_dep = MD->getDependency(C);
516
517 if (!local_dep.isDef() && !local_dep.isNonLocal()) {
518 valueNumbering[C] = nextValueNumber;
519 return nextValueNumber++;
520 }
521
522 if (local_dep.isDef()) {
523 // For masked load/store intrinsics, the local_dep may actually be
524 // a normal load or store instruction.
525 CallInst *local_cdep = dyn_cast<CallInst>(local_dep.getInst());
526
527 if (!local_cdep || local_cdep->arg_size() != C->arg_size()) {
528 valueNumbering[C] = nextValueNumber;
529 return nextValueNumber++;
530 }
531
532 for (unsigned i = 0, e = C->arg_size(); i < e; ++i) {
533 uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
534 uint32_t cd_vn = lookupOrAdd(local_cdep->getArgOperand(i));
535 if (c_vn != cd_vn) {
536 valueNumbering[C] = nextValueNumber;
537 return nextValueNumber++;
538 }
539 }
540
541 uint32_t v = lookupOrAdd(local_cdep);
542 valueNumbering[C] = v;
543 return v;
544 }
545
546 // Non-local case.
549 // FIXME: Move the checking logic to MemDep!
550 CallInst* cdep = nullptr;
551
552 // Check to see if we have a single dominating call instruction that is
553 // identical to C.
554 for (const NonLocalDepEntry &I : deps) {
555 if (I.getResult().isNonLocal())
556 continue;
557
558 // We don't handle non-definitions. If we already have a call, reject
559 // instruction dependencies.
560 if (!I.getResult().isDef() || cdep != nullptr) {
561 cdep = nullptr;
562 break;
563 }
564
565 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I.getResult().getInst());
566 // FIXME: All duplicated with non-local case.
567 if (NonLocalDepCall && DT->properlyDominates(I.getBB(), C->getParent())) {
568 cdep = NonLocalDepCall;
569 continue;
570 }
571
572 cdep = nullptr;
573 break;
574 }
575
576 if (!cdep) {
577 valueNumbering[C] = nextValueNumber;
578 return nextValueNumber++;
579 }
580
581 if (cdep->arg_size() != C->arg_size()) {
582 valueNumbering[C] = nextValueNumber;
583 return nextValueNumber++;
584 }
585 for (unsigned i = 0, e = C->arg_size(); i < e; ++i) {
586 uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
587 uint32_t cd_vn = lookupOrAdd(cdep->getArgOperand(i));
588 if (c_vn != cd_vn) {
589 valueNumbering[C] = nextValueNumber;
590 return nextValueNumber++;
591 }
592 }
593
594 uint32_t v = lookupOrAdd(cdep);
595 valueNumbering[C] = v;
596 return v;
597 }
598
599 valueNumbering[C] = nextValueNumber;
600 return nextValueNumber++;
601}
602
603/// Returns true if a value number exists for the specified value.
604bool GVNPass::ValueTable::exists(Value *V) const {
605 return valueNumbering.contains(V);
606}
607
608/// lookup_or_add - Returns the value number for the specified value, assigning
609/// it a new number if it did not have one before.
610uint32_t GVNPass::ValueTable::lookupOrAdd(Value *V) {
611 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
612 if (VI != valueNumbering.end())
613 return VI->second;
614
615 auto *I = dyn_cast<Instruction>(V);
616 if (!I) {
617 valueNumbering[V] = nextValueNumber;
618 return nextValueNumber++;
619 }
620
621 Expression exp;
622 switch (I->getOpcode()) {
623 case Instruction::Call:
624 return lookupOrAddCall(cast<CallInst>(I));
625 case Instruction::FNeg:
626 case Instruction::Add:
627 case Instruction::FAdd:
628 case Instruction::Sub:
629 case Instruction::FSub:
630 case Instruction::Mul:
631 case Instruction::FMul:
632 case Instruction::UDiv:
633 case Instruction::SDiv:
634 case Instruction::FDiv:
635 case Instruction::URem:
636 case Instruction::SRem:
637 case Instruction::FRem:
638 case Instruction::Shl:
639 case Instruction::LShr:
640 case Instruction::AShr:
641 case Instruction::And:
642 case Instruction::Or:
643 case Instruction::Xor:
644 case Instruction::ICmp:
645 case Instruction::FCmp:
646 case Instruction::Trunc:
647 case Instruction::ZExt:
648 case Instruction::SExt:
649 case Instruction::FPToUI:
650 case Instruction::FPToSI:
651 case Instruction::UIToFP:
652 case Instruction::SIToFP:
653 case Instruction::FPTrunc:
654 case Instruction::FPExt:
655 case Instruction::PtrToInt:
656 case Instruction::IntToPtr:
657 case Instruction::AddrSpaceCast:
658 case Instruction::BitCast:
659 case Instruction::Select:
660 case Instruction::Freeze:
661 case Instruction::ExtractElement:
662 case Instruction::InsertElement:
663 case Instruction::ShuffleVector:
664 case Instruction::InsertValue:
665 exp = createExpr(I);
666 break;
667 case Instruction::GetElementPtr:
668 exp = createGEPExpr(cast<GetElementPtrInst>(I));
669 break;
670 case Instruction::ExtractValue:
671 exp = createExtractvalueExpr(cast<ExtractValueInst>(I));
672 break;
673 case Instruction::PHI:
674 valueNumbering[V] = nextValueNumber;
675 NumberingPhi[nextValueNumber] = cast<PHINode>(V);
676 return nextValueNumber++;
677 default:
678 valueNumbering[V] = nextValueNumber;
679 return nextValueNumber++;
680 }
681
682 uint32_t e = assignExpNewValueNum(exp).first;
683 valueNumbering[V] = e;
684 return e;
685}
686
687/// Returns the value number of the specified value. Fails if
688/// the value has not yet been numbered.
689uint32_t GVNPass::ValueTable::lookup(Value *V, bool Verify) const {
690 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
691 if (Verify) {
692 assert(VI != valueNumbering.end() && "Value not numbered?");
693 return VI->second;
694 }
695 return (VI != valueNumbering.end()) ? VI->second : 0;
696}
697
698/// Returns the value number of the given comparison,
699/// assigning it a new number if it did not have one before. Useful when
700/// we deduced the result of a comparison, but don't immediately have an
701/// instruction realizing that comparison to hand.
702uint32_t GVNPass::ValueTable::lookupOrAddCmp(unsigned Opcode,
703 CmpInst::Predicate Predicate,
704 Value *LHS, Value *RHS) {
705 Expression exp = createCmpExpr(Opcode, Predicate, LHS, RHS);
706 return assignExpNewValueNum(exp).first;
707}
708
709/// Remove all entries from the ValueTable.
711 valueNumbering.clear();
712 expressionNumbering.clear();
713 NumberingPhi.clear();
714 PhiTranslateTable.clear();
715 nextValueNumber = 1;
716 Expressions.clear();
717 ExprIdx.clear();
718 nextExprNumber = 0;
719}
720
721/// Remove a value from the value numbering.
723 uint32_t Num = valueNumbering.lookup(V);
724 valueNumbering.erase(V);
725 // If V is PHINode, V <--> value number is an one-to-one mapping.
726 if (isa<PHINode>(V))
727 NumberingPhi.erase(Num);
728}
729
730/// verifyRemoved - Verify that the value is removed from all internal data
731/// structures.
732void GVNPass::ValueTable::verifyRemoved(const Value *V) const {
733 assert(!valueNumbering.contains(V) &&
734 "Inst still occurs in value numbering map!");
735}
736
737//===----------------------------------------------------------------------===//
738// LeaderMap External Functions
739//===----------------------------------------------------------------------===//
740
741/// Push a new Value to the LeaderTable onto the list for its value number.
742void GVNPass::LeaderMap::insert(uint32_t N, Value *V, const BasicBlock *BB) {
743 LeaderListNode &Curr = NumToLeaders[N];
744 if (!Curr.Entry.Val) {
745 Curr.Entry.Val = V;
746 Curr.Entry.BB = BB;
747 return;
748 }
749
750 LeaderListNode *Node = TableAllocator.Allocate<LeaderListNode>();
751 Node->Entry.Val = V;
752 Node->Entry.BB = BB;
753 Node->Next = Curr.Next;
754 Curr.Next = Node;
755}
756
757/// Scan the list of values corresponding to a given
758/// value number, and remove the given instruction if encountered.
759void GVNPass::LeaderMap::erase(uint32_t N, Instruction *I,
760 const BasicBlock *BB) {
761 LeaderListNode *Prev = nullptr;
762 LeaderListNode *Curr = &NumToLeaders[N];
763
764 while (Curr && (Curr->Entry.Val != I || Curr->Entry.BB != BB)) {
765 Prev = Curr;
766 Curr = Curr->Next;
767 }
768
769 if (!Curr)
770 return;
771
772 if (Prev) {
773 Prev->Next = Curr->Next;
774 } else {
775 if (!Curr->Next) {
776 Curr->Entry.Val = nullptr;
777 Curr->Entry.BB = nullptr;
778 } else {
779 LeaderListNode *Next = Curr->Next;
780 Curr->Entry.Val = Next->Entry.Val;
781 Curr->Entry.BB = Next->Entry.BB;
782 Curr->Next = Next->Next;
783 }
784 }
785}
786
787void GVNPass::LeaderMap::verifyRemoved(const Value *V) const {
788 // Walk through the value number scope to make sure the instruction isn't
789 // ferreted away in it.
790 for (const auto &I : NumToLeaders) {
791 (void)I;
792 assert(I.second.Entry.Val != V && "Inst still in value numbering scope!");
793 assert(
794 std::none_of(leader_iterator(&I.second), leader_iterator(nullptr),
795 [=](const LeaderTableEntry &E) { return E.Val == V; }) &&
796 "Inst still in value numbering scope!");
797 }
798}
799
800//===----------------------------------------------------------------------===//
801// GVN Pass
802//===----------------------------------------------------------------------===//
803
805 return Options.AllowPRE.value_or(GVNEnablePRE);
806}
807
809 return Options.AllowLoadPRE.value_or(GVNEnableLoadPRE);
810}
811
813 return Options.AllowLoadInLoopPRE.value_or(GVNEnableLoadInLoopPRE);
814}
815
817 return Options.AllowLoadPRESplitBackedge.value_or(
819}
820
822 return Options.AllowMemDep.value_or(GVNEnableMemDep);
823}
824
826 return Options.AllowMemorySSA.value_or(GVNEnableMemorySSA);
827}
828
830 // FIXME: The order of evaluation of these 'getResult' calls is very
831 // significant! Re-ordering these variables will cause GVN when run alone to
832 // be less effective! We should fix memdep and basic-aa to not exhibit this
833 // behavior, but until then don't change the order here.
834 auto &AC = AM.getResult<AssumptionAnalysis>(F);
835 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
836 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
837 auto &AA = AM.getResult<AAManager>(F);
838 auto *MemDep =
840 auto &LI = AM.getResult<LoopAnalysis>(F);
841 auto *MSSA = AM.getCachedResult<MemorySSAAnalysis>(F);
842 if (isMemorySSAEnabled() && !MSSA) {
843 assert(!MemDep &&
844 "On-demand computation of MemSSA implies that MemDep is disabled!");
845 MSSA = &AM.getResult<MemorySSAAnalysis>(F);
846 }
848 bool Changed = runImpl(F, AC, DT, TLI, AA, MemDep, LI, &ORE,
849 MSSA ? &MSSA->getMSSA() : nullptr);
850 if (!Changed)
851 return PreservedAnalyses::all();
855 if (MSSA)
858 return PA;
859}
860
862 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
863 static_cast<PassInfoMixin<GVNPass> *>(this)->printPipeline(
864 OS, MapClassName2PassName);
865
866 OS << '<';
867 if (Options.AllowPRE != std::nullopt)
868 OS << (*Options.AllowPRE ? "" : "no-") << "pre;";
869 if (Options.AllowLoadPRE != std::nullopt)
870 OS << (*Options.AllowLoadPRE ? "" : "no-") << "load-pre;";
871 if (Options.AllowLoadPRESplitBackedge != std::nullopt)
872 OS << (*Options.AllowLoadPRESplitBackedge ? "" : "no-")
873 << "split-backedge-load-pre;";
874 if (Options.AllowMemDep != std::nullopt)
875 OS << (*Options.AllowMemDep ? "" : "no-") << "memdep;";
876 if (Options.AllowMemorySSA != std::nullopt)
877 OS << (*Options.AllowMemorySSA ? "" : "no-") << "memoryssa";
878 OS << '>';
879}
880
881#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
882LLVM_DUMP_METHOD void GVNPass::dump(DenseMap<uint32_t, Value *> &d) const {
883 errs() << "{\n";
884 for (auto &I : d) {
885 errs() << I.first << "\n";
886 I.second->dump();
887 }
888 errs() << "}\n";
889}
890#endif
891
892enum class AvailabilityState : char {
893 /// We know the block *is not* fully available. This is a fixpoint.
894 Unavailable = 0,
895 /// We know the block *is* fully available. This is a fixpoint.
896 Available = 1,
897 /// We do not know whether the block is fully available or not,
898 /// but we are currently speculating that it will be.
899 /// If it would have turned out that the block was, in fact, not fully
900 /// available, this would have been cleaned up into an Unavailable.
902};
903
904/// Return true if we can prove that the value
905/// we're analyzing is fully available in the specified block. As we go, keep
906/// track of which blocks we know are fully alive in FullyAvailableBlocks. This
907/// map is actually a tri-state map with the following values:
908/// 0) we know the block *is not* fully available.
909/// 1) we know the block *is* fully available.
910/// 2) we do not know whether the block is fully available or not, but we are
911/// currently speculating that it will be.
913 BasicBlock *BB,
914 DenseMap<BasicBlock *, AvailabilityState> &FullyAvailableBlocks) {
916 std::optional<BasicBlock *> UnavailableBB;
917
918 // The number of times we didn't find an entry for a block in a map and
919 // optimistically inserted an entry marking block as speculatively available.
920 unsigned NumNewNewSpeculativelyAvailableBBs = 0;
921
922#ifndef NDEBUG
923 SmallSet<BasicBlock *, 32> NewSpeculativelyAvailableBBs;
925#endif
926
927 Worklist.emplace_back(BB);
928 while (!Worklist.empty()) {
929 BasicBlock *CurrBB = Worklist.pop_back_val(); // LoadFO - depth-first!
930 // Optimistically assume that the block is Speculatively Available and check
931 // to see if we already know about this block in one lookup.
932 std::pair<DenseMap<BasicBlock *, AvailabilityState>::iterator, bool> IV =
933 FullyAvailableBlocks.try_emplace(
934 CurrBB, AvailabilityState::SpeculativelyAvailable);
935 AvailabilityState &State = IV.first->second;
936
937 // Did the entry already exist for this block?
938 if (!IV.second) {
939 if (State == AvailabilityState::Unavailable) {
940 UnavailableBB = CurrBB;
941 break; // Backpropagate unavailability info.
942 }
943
944#ifndef NDEBUG
945 AvailableBBs.emplace_back(CurrBB);
946#endif
947 continue; // Don't recurse further, but continue processing worklist.
948 }
949
950 // No entry found for block.
951 ++NumNewNewSpeculativelyAvailableBBs;
952 bool OutOfBudget = NumNewNewSpeculativelyAvailableBBs > MaxBBSpeculations;
953
954 // If we have exhausted our budget, mark this block as unavailable.
955 // Also, if this block has no predecessors, the value isn't live-in here.
956 if (OutOfBudget || pred_empty(CurrBB)) {
957 MaxBBSpeculationCutoffReachedTimes += (int)OutOfBudget;
958 State = AvailabilityState::Unavailable;
959 UnavailableBB = CurrBB;
960 break; // Backpropagate unavailability info.
961 }
962
963 // Tentatively consider this block as speculatively available.
964#ifndef NDEBUG
965 NewSpeculativelyAvailableBBs.insert(CurrBB);
966#endif
967 // And further recurse into block's predecessors, in depth-first order!
968 Worklist.append(pred_begin(CurrBB), pred_end(CurrBB));
969 }
970
971#if LLVM_ENABLE_STATS
972 IsValueFullyAvailableInBlockNumSpeculationsMax.updateMax(
973 NumNewNewSpeculativelyAvailableBBs);
974#endif
975
976 // If the block isn't marked as fixpoint yet
977 // (the Unavailable and Available states are fixpoints)
978 auto MarkAsFixpointAndEnqueueSuccessors =
979 [&](BasicBlock *BB, AvailabilityState FixpointState) {
980 auto It = FullyAvailableBlocks.find(BB);
981 if (It == FullyAvailableBlocks.end())
982 return; // Never queried this block, leave as-is.
983 switch (AvailabilityState &State = It->second) {
984 case AvailabilityState::Unavailable:
985 case AvailabilityState::Available:
986 return; // Don't backpropagate further, continue processing worklist.
987 case AvailabilityState::SpeculativelyAvailable: // Fix it!
988 State = FixpointState;
989#ifndef NDEBUG
990 assert(NewSpeculativelyAvailableBBs.erase(BB) &&
991 "Found a speculatively available successor leftover?");
992#endif
993 // Queue successors for further processing.
994 Worklist.append(succ_begin(BB), succ_end(BB));
995 return;
996 }
997 };
998
999 if (UnavailableBB) {
1000 // Okay, we have encountered an unavailable block.
1001 // Mark speculatively available blocks reachable from UnavailableBB as
1002 // unavailable as well. Paths are terminated when they reach blocks not in
1003 // FullyAvailableBlocks or they are not marked as speculatively available.
1004 Worklist.clear();
1005 Worklist.append(succ_begin(*UnavailableBB), succ_end(*UnavailableBB));
1006 while (!Worklist.empty())
1007 MarkAsFixpointAndEnqueueSuccessors(Worklist.pop_back_val(),
1008 AvailabilityState::Unavailable);
1009 }
1010
1011#ifndef NDEBUG
1012 Worklist.clear();
1013 for (BasicBlock *AvailableBB : AvailableBBs)
1014 Worklist.append(succ_begin(AvailableBB), succ_end(AvailableBB));
1015 while (!Worklist.empty())
1016 MarkAsFixpointAndEnqueueSuccessors(Worklist.pop_back_val(),
1017 AvailabilityState::Available);
1018
1019 assert(NewSpeculativelyAvailableBBs.empty() &&
1020 "Must have fixed all the new speculatively available blocks.");
1021#endif
1022
1023 return !UnavailableBB;
1024}
1025
1026/// If the specified OldValue exists in ValuesPerBlock, replace its value with
1027/// NewValue.
1029 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock, Value *OldValue,
1030 Value *NewValue) {
1031 for (AvailableValueInBlock &V : ValuesPerBlock) {
1032 if (V.AV.Val == OldValue)
1033 V.AV.Val = NewValue;
1034 if (V.AV.isSelectValue()) {
1035 if (V.AV.V1 == OldValue)
1036 V.AV.V1 = NewValue;
1037 if (V.AV.V2 == OldValue)
1038 V.AV.V2 = NewValue;
1039 }
1040 }
1041}
1042
1043/// Given a set of loads specified by ValuesPerBlock,
1044/// construct SSA form, allowing us to eliminate Load. This returns the value
1045/// that should be used at Load's definition site.
1046static Value *
1049 GVNPass &gvn) {
1050 // Check for the fully redundant, dominating load case. In this case, we can
1051 // just use the dominating value directly.
1052 if (ValuesPerBlock.size() == 1 &&
1053 gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB,
1054 Load->getParent())) {
1055 assert(!ValuesPerBlock[0].AV.isUndefValue() &&
1056 "Dead BB dominate this block");
1057 return ValuesPerBlock[0].MaterializeAdjustedValue(Load, gvn);
1058 }
1059
1060 // Otherwise, we have to construct SSA form.
1062 SSAUpdater SSAUpdate(&NewPHIs);
1063 SSAUpdate.Initialize(Load->getType(), Load->getName());
1064
1065 for (const AvailableValueInBlock &AV : ValuesPerBlock) {
1066 BasicBlock *BB = AV.BB;
1067
1068 if (AV.AV.isUndefValue())
1069 continue;
1070
1071 if (SSAUpdate.HasValueForBlock(BB))
1072 continue;
1073
1074 // If the value is the load that we will be eliminating, and the block it's
1075 // available in is the block that the load is in, then don't add it as
1076 // SSAUpdater will resolve the value to the relevant phi which may let it
1077 // avoid phi construction entirely if there's actually only one value.
1078 if (BB == Load->getParent() &&
1079 ((AV.AV.isSimpleValue() && AV.AV.getSimpleValue() == Load) ||
1080 (AV.AV.isCoercedLoadValue() && AV.AV.getCoercedLoadValue() == Load)))
1081 continue;
1082
1083 SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(Load, gvn));
1084 }
1085
1086 // Perform PHI construction.
1087 return SSAUpdate.GetValueInMiddleOfBlock(Load->getParent());
1088}
1089
1091 Instruction *InsertPt,
1092 GVNPass &gvn) const {
1093 Value *Res;
1094 Type *LoadTy = Load->getType();
1095 const DataLayout &DL = Load->getDataLayout();
1096 if (isSimpleValue()) {
1097 Res = getSimpleValue();
1098 if (Res->getType() != LoadTy) {
1099 Res = getValueForLoad(Res, Offset, LoadTy, InsertPt, Load->getFunction());
1100
1101 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset
1102 << " " << *getSimpleValue() << '\n'
1103 << *Res << '\n'
1104 << "\n\n\n");
1105 }
1106 } else if (isCoercedLoadValue()) {
1107 LoadInst *CoercedLoad = getCoercedLoadValue();
1108 if (CoercedLoad->getType() == LoadTy && Offset == 0) {
1109 Res = CoercedLoad;
1110 combineMetadataForCSE(CoercedLoad, Load, false);
1111 } else {
1112 Res = getValueForLoad(CoercedLoad, Offset, LoadTy, InsertPt,
1113 Load->getFunction());
1114 // We are adding a new user for this load, for which the original
1115 // metadata may not hold. Additionally, the new load may have a different
1116 // size and type, so their metadata cannot be combined in any
1117 // straightforward way.
1118 // Drop all metadata that is not known to cause immediate UB on violation,
1119 // unless the load has !noundef, in which case all metadata violations
1120 // will be promoted to UB.
1121 // TODO: We can combine noalias/alias.scope metadata here, because it is
1122 // independent of the load type.
1123 if (!CoercedLoad->hasMetadata(LLVMContext::MD_noundef))
1124 CoercedLoad->dropUnknownNonDebugMetadata(
1125 {LLVMContext::MD_dereferenceable,
1126 LLVMContext::MD_dereferenceable_or_null,
1127 LLVMContext::MD_invariant_load, LLVMContext::MD_invariant_group});
1128 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset
1129 << " " << *getCoercedLoadValue() << '\n'
1130 << *Res << '\n'
1131 << "\n\n\n");
1132 }
1133 } else if (isMemIntrinValue()) {
1134 Res = getMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy,
1135 InsertPt, DL);
1136 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
1137 << " " << *getMemIntrinValue() << '\n'
1138 << *Res << '\n'
1139 << "\n\n\n");
1140 } else if (isSelectValue()) {
1141 // Introduce a new value select for a load from an eligible pointer select.
1142 SelectInst *Sel = getSelectValue();
1143 assert(V1 && V2 && "both value operands of the select must be present");
1144 Res =
1145 SelectInst::Create(Sel->getCondition(), V1, V2, "", Sel->getIterator());
1146 // We use the DebugLoc from the original load here, as this instruction
1147 // materializes the value that would previously have been loaded.
1148 cast<SelectInst>(Res)->setDebugLoc(Load->getDebugLoc());
1149 } else {
1150 llvm_unreachable("Should not materialize value from dead block");
1151 }
1152 assert(Res && "failed to materialize?");
1153 return Res;
1154}
1155
1156static bool isLifetimeStart(const Instruction *Inst) {
1157 if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
1158 return II->getIntrinsicID() == Intrinsic::lifetime_start;
1159 return false;
1160}
1161
1162/// Assuming To can be reached from both From and Between, does Between lie on
1163/// every path from From to To?
1164static bool liesBetween(const Instruction *From, Instruction *Between,
1165 const Instruction *To, DominatorTree *DT) {
1166 if (From->getParent() == Between->getParent())
1167 return DT->dominates(From, Between);
1168 SmallSet<BasicBlock *, 1> Exclusion;
1169 Exclusion.insert(Between->getParent());
1170 return !isPotentiallyReachable(From, To, &Exclusion, DT);
1171}
1172
1173/// Try to locate the three instruction involved in a missed
1174/// load-elimination case that is due to an intervening store.
1176 DominatorTree *DT,
1178 using namespace ore;
1179
1180 Instruction *OtherAccess = nullptr;
1181
1182 OptimizationRemarkMissed R(DEBUG_TYPE, "LoadClobbered", Load);
1183 R << "load of type " << NV("Type", Load->getType()) << " not eliminated"
1184 << setExtraArgs();
1185
1186 for (auto *U : Load->getPointerOperand()->users()) {
1187 if (U != Load && (isa<LoadInst>(U) || isa<StoreInst>(U))) {
1188 auto *I = cast<Instruction>(U);
1189 if (I->getFunction() == Load->getFunction() && DT->dominates(I, Load)) {
1190 // Use the most immediately dominating value
1191 if (OtherAccess) {
1192 if (DT->dominates(OtherAccess, I))
1193 OtherAccess = I;
1194 else
1195 assert(U == OtherAccess || DT->dominates(I, OtherAccess));
1196 } else
1197 OtherAccess = I;
1198 }
1199 }
1200 }
1201
1202 if (!OtherAccess) {
1203 // There is no dominating use, check if we can find a closest non-dominating
1204 // use that lies between any other potentially available use and Load.
1205 for (auto *U : Load->getPointerOperand()->users()) {
1206 if (U != Load && (isa<LoadInst>(U) || isa<StoreInst>(U))) {
1207 auto *I = cast<Instruction>(U);
1208 if (I->getFunction() == Load->getFunction() &&
1209 isPotentiallyReachable(I, Load, nullptr, DT)) {
1210 if (OtherAccess) {
1211 if (liesBetween(OtherAccess, I, Load, DT)) {
1212 OtherAccess = I;
1213 } else if (!liesBetween(I, OtherAccess, Load, DT)) {
1214 // These uses are both partially available at Load were it not for
1215 // the clobber, but neither lies strictly after the other.
1216 OtherAccess = nullptr;
1217 break;
1218 } // else: keep current OtherAccess since it lies between U and Load
1219 } else {
1220 OtherAccess = I;
1221 }
1222 }
1223 }
1224 }
1225 }
1226
1227 if (OtherAccess)
1228 R << " in favor of " << NV("OtherAccess", OtherAccess);
1229
1230 R << " because it is clobbered by " << NV("ClobberedBy", DepInfo.getInst());
1231
1232 ORE->emit(R);
1233}
1234
1235// Find non-clobbered value for Loc memory location in extended basic block
1236// (chain of basic blocks with single predecessors) starting From instruction.
1237static Value *findDominatingValue(const MemoryLocation &Loc, Type *LoadTy,
1238 Instruction *From, AAResults *AA) {
1239 uint32_t NumVisitedInsts = 0;
1240 BasicBlock *FromBB = From->getParent();
1241 BatchAAResults BatchAA(*AA);
1242 for (BasicBlock *BB = FromBB; BB; BB = BB->getSinglePredecessor())
1243 for (auto *Inst = BB == FromBB ? From : BB->getTerminator();
1244 Inst != nullptr; Inst = Inst->getPrevNonDebugInstruction()) {
1245 // Stop the search if limit is reached.
1246 if (++NumVisitedInsts > MaxNumVisitedInsts)
1247 return nullptr;
1248 if (isModSet(BatchAA.getModRefInfo(Inst, Loc)))
1249 return nullptr;
1250 if (auto *LI = dyn_cast<LoadInst>(Inst))
1251 if (LI->getPointerOperand() == Loc.Ptr && LI->getType() == LoadTy)
1252 return LI;
1253 }
1254 return nullptr;
1255}
1256
1257std::optional<AvailableValue>
1258GVNPass::AnalyzeLoadAvailability(LoadInst *Load, MemDepResult DepInfo,
1259 Value *Address) {
1260 assert(Load->isUnordered() && "rules below are incorrect for ordered access");
1261 assert(DepInfo.isLocal() && "expected a local dependence");
1262
1263 Instruction *DepInst = DepInfo.getInst();
1264
1265 const DataLayout &DL = Load->getDataLayout();
1266 if (DepInfo.isClobber()) {
1267 // If the dependence is to a store that writes to a superset of the bits
1268 // read by the load, we can extract the bits we need for the load from the
1269 // stored value.
1270 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
1271 // Can't forward from non-atomic to atomic without violating memory model.
1272 if (Address && Load->isAtomic() <= DepSI->isAtomic()) {
1273 int Offset =
1274 analyzeLoadFromClobberingStore(Load->getType(), Address, DepSI, DL);
1275 if (Offset != -1)
1276 return AvailableValue::get(DepSI->getValueOperand(), Offset);
1277 }
1278 }
1279
1280 // Check to see if we have something like this:
1281 // load i32* P
1282 // load i8* (P+1)
1283 // if we have this, replace the later with an extraction from the former.
1284 if (LoadInst *DepLoad = dyn_cast<LoadInst>(DepInst)) {
1285 // If this is a clobber and L is the first instruction in its block, then
1286 // we have the first instruction in the entry block.
1287 // Can't forward from non-atomic to atomic without violating memory model.
1288 if (DepLoad != Load && Address &&
1289 Load->isAtomic() <= DepLoad->isAtomic()) {
1290 Type *LoadType = Load->getType();
1291 int Offset = -1;
1292
1293 // If MD reported clobber, check it was nested.
1294 if (DepInfo.isClobber() &&
1295 canCoerceMustAliasedValueToLoad(DepLoad, LoadType,
1296 DepLoad->getFunction())) {
1297 const auto ClobberOff = MD->getClobberOffset(DepLoad);
1298 // GVN has no deal with a negative offset.
1299 Offset = (ClobberOff == std::nullopt || *ClobberOff < 0)
1300 ? -1
1301 : *ClobberOff;
1302 }
1303 if (Offset == -1)
1304 Offset =
1305 analyzeLoadFromClobberingLoad(LoadType, Address, DepLoad, DL);
1306 if (Offset != -1)
1307 return AvailableValue::getLoad(DepLoad, Offset);
1308 }
1309 }
1310
1311 // If the clobbering value is a memset/memcpy/memmove, see if we can
1312 // forward a value on from it.
1313 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInst)) {
1314 if (Address && !Load->isAtomic()) {
1316 DepMI, DL);
1317 if (Offset != -1)
1318 return AvailableValue::getMI(DepMI, Offset);
1319 }
1320 }
1321
1322 // Nothing known about this clobber, have to be conservative
1323 LLVM_DEBUG(
1324 // fast print dep, using operator<< on instruction is too slow.
1325 dbgs() << "GVN: load "; Load->printAsOperand(dbgs());
1326 dbgs() << " is clobbered by " << *DepInst << '\n';);
1328 reportMayClobberedLoad(Load, DepInfo, DT, ORE);
1329
1330 return std::nullopt;
1331 }
1332 assert(DepInfo.isDef() && "follows from above");
1333
1334 // Loading the alloca -> undef.
1335 // Loading immediately after lifetime begin -> undef.
1336 if (isa<AllocaInst>(DepInst) || isLifetimeStart(DepInst))
1337 return AvailableValue::get(UndefValue::get(Load->getType()));
1338
1339 if (Constant *InitVal =
1340 getInitialValueOfAllocation(DepInst, TLI, Load->getType()))
1341 return AvailableValue::get(InitVal);
1342
1343 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1344 // Reject loads and stores that are to the same address but are of
1345 // different types if we have to. If the stored value is convertable to
1346 // the loaded value, we can reuse it.
1347 if (!canCoerceMustAliasedValueToLoad(S->getValueOperand(), Load->getType(),
1348 S->getFunction()))
1349 return std::nullopt;
1350
1351 // Can't forward from non-atomic to atomic without violating memory model.
1352 if (S->isAtomic() < Load->isAtomic())
1353 return std::nullopt;
1354
1355 return AvailableValue::get(S->getValueOperand());
1356 }
1357
1358 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
1359 // If the types mismatch and we can't handle it, reject reuse of the load.
1360 // If the stored value is larger or equal to the loaded value, we can reuse
1361 // it.
1362 if (!canCoerceMustAliasedValueToLoad(LD, Load->getType(),
1363 LD->getFunction()))
1364 return std::nullopt;
1365
1366 // Can't forward from non-atomic to atomic without violating memory model.
1367 if (LD->isAtomic() < Load->isAtomic())
1368 return std::nullopt;
1369
1370 return AvailableValue::getLoad(LD);
1371 }
1372
1373 // Check if load with Addr dependent from select can be converted to select
1374 // between load values. There must be no instructions between the found
1375 // loads and DepInst that may clobber the loads.
1376 if (auto *Sel = dyn_cast<SelectInst>(DepInst)) {
1377 assert(Sel->getType() == Load->getPointerOperandType());
1378 auto Loc = MemoryLocation::get(Load);
1379 Value *V1 =
1380 findDominatingValue(Loc.getWithNewPtr(Sel->getTrueValue()),
1381 Load->getType(), DepInst, getAliasAnalysis());
1382 if (!V1)
1383 return std::nullopt;
1384 Value *V2 =
1385 findDominatingValue(Loc.getWithNewPtr(Sel->getFalseValue()),
1386 Load->getType(), DepInst, getAliasAnalysis());
1387 if (!V2)
1388 return std::nullopt;
1389 return AvailableValue::getSelect(Sel, V1, V2);
1390 }
1391
1392 // Unknown def - must be conservative
1393 LLVM_DEBUG(
1394 // fast print dep, using operator<< on instruction is too slow.
1395 dbgs() << "GVN: load "; Load->printAsOperand(dbgs());
1396 dbgs() << " has unknown def " << *DepInst << '\n';);
1397 return std::nullopt;
1398}
1399
1400void GVNPass::AnalyzeLoadAvailability(LoadInst *Load, LoadDepVect &Deps,
1401 AvailValInBlkVect &ValuesPerBlock,
1402 UnavailBlkVect &UnavailableBlocks) {
1403 // Filter out useless results (non-locals, etc). Keep track of the blocks
1404 // where we have a value available in repl, also keep track of whether we see
1405 // dependencies that produce an unknown value for the load (such as a call
1406 // that could potentially clobber the load).
1407 for (const auto &Dep : Deps) {
1408 BasicBlock *DepBB = Dep.getBB();
1409 MemDepResult DepInfo = Dep.getResult();
1410
1411 if (DeadBlocks.count(DepBB)) {
1412 // Dead dependent mem-op disguise as a load evaluating the same value
1413 // as the load in question.
1414 ValuesPerBlock.push_back(AvailableValueInBlock::getUndef(DepBB));
1415 continue;
1416 }
1417
1418 if (!DepInfo.isLocal()) {
1419 UnavailableBlocks.push_back(DepBB);
1420 continue;
1421 }
1422
1423 // The address being loaded in this non-local block may not be the same as
1424 // the pointer operand of the load if PHI translation occurs. Make sure
1425 // to consider the right address.
1426 if (auto AV = AnalyzeLoadAvailability(Load, DepInfo, Dep.getAddress())) {
1427 // subtlety: because we know this was a non-local dependency, we know
1428 // it's safe to materialize anywhere between the instruction within
1429 // DepInfo and the end of it's block.
1430 ValuesPerBlock.push_back(
1431 AvailableValueInBlock::get(DepBB, std::move(*AV)));
1432 } else {
1433 UnavailableBlocks.push_back(DepBB);
1434 }
1435 }
1436
1437 assert(Deps.size() == ValuesPerBlock.size() + UnavailableBlocks.size() &&
1438 "post condition violation");
1439}
1440
1441/// Given the following code, v1 is partially available on some edges, but not
1442/// available on the edge from PredBB. This function tries to find if there is
1443/// another identical load in the other successor of PredBB.
1444///
1445/// v0 = load %addr
1446/// br %LoadBB
1447///
1448/// LoadBB:
1449/// v1 = load %addr
1450/// ...
1451///
1452/// PredBB:
1453/// ...
1454/// br %cond, label %LoadBB, label %SuccBB
1455///
1456/// SuccBB:
1457/// v2 = load %addr
1458/// ...
1459///
1460LoadInst *GVNPass::findLoadToHoistIntoPred(BasicBlock *Pred, BasicBlock *LoadBB,
1461 LoadInst *Load) {
1462 // For simplicity we handle a Pred has 2 successors only.
1463 auto *Term = Pred->getTerminator();
1464 if (Term->getNumSuccessors() != 2 || Term->isSpecialTerminator())
1465 return nullptr;
1466 auto *SuccBB = Term->getSuccessor(0);
1467 if (SuccBB == LoadBB)
1468 SuccBB = Term->getSuccessor(1);
1469 if (!SuccBB->getSinglePredecessor())
1470 return nullptr;
1471
1472 unsigned int NumInsts = MaxNumInsnsPerBlock;
1473 for (Instruction &Inst : *SuccBB) {
1474 if (Inst.isDebugOrPseudoInst())
1475 continue;
1476 if (--NumInsts == 0)
1477 return nullptr;
1478
1479 if (!Inst.isIdenticalTo(Load))
1480 continue;
1481
1482 MemDepResult Dep = MD->getDependency(&Inst);
1483 // If an identical load doesn't depends on any local instructions, it can
1484 // be safely moved to PredBB.
1485 // Also check for the implicit control flow instructions. See the comments
1486 // in PerformLoadPRE for details.
1487 if (Dep.isNonLocal() && !ICF->isDominatedByICFIFromSameBlock(&Inst))
1488 return cast<LoadInst>(&Inst);
1489
1490 // Otherwise there is something in the same BB clobbers the memory, we can't
1491 // move this and later load to PredBB.
1492 return nullptr;
1493 }
1494
1495 return nullptr;
1496}
1497
1498void GVNPass::eliminatePartiallyRedundantLoad(
1499 LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1500 MapVector<BasicBlock *, Value *> &AvailableLoads,
1501 MapVector<BasicBlock *, LoadInst *> *CriticalEdgePredAndLoad) {
1502 for (const auto &AvailableLoad : AvailableLoads) {
1503 BasicBlock *UnavailableBlock = AvailableLoad.first;
1504 Value *LoadPtr = AvailableLoad.second;
1505
1506 auto *NewLoad = new LoadInst(
1507 Load->getType(), LoadPtr, Load->getName() + ".pre", Load->isVolatile(),
1508 Load->getAlign(), Load->getOrdering(), Load->getSyncScopeID(),
1509 UnavailableBlock->getTerminator()->getIterator());
1510 NewLoad->setDebugLoc(Load->getDebugLoc());
1511 if (MSSAU) {
1512 auto *NewAccess = MSSAU->createMemoryAccessInBB(
1513 NewLoad, nullptr, NewLoad->getParent(), MemorySSA::BeforeTerminator);
1514 if (auto *NewDef = dyn_cast<MemoryDef>(NewAccess))
1515 MSSAU->insertDef(NewDef, /*RenameUses=*/true);
1516 else
1517 MSSAU->insertUse(cast<MemoryUse>(NewAccess), /*RenameUses=*/true);
1518 }
1519
1520 // Transfer the old load's AA tags to the new load.
1521 AAMDNodes Tags = Load->getAAMetadata();
1522 if (Tags)
1523 NewLoad->setAAMetadata(Tags);
1524
1525 if (auto *MD = Load->getMetadata(LLVMContext::MD_invariant_load))
1526 NewLoad->setMetadata(LLVMContext::MD_invariant_load, MD);
1527 if (auto *InvGroupMD = Load->getMetadata(LLVMContext::MD_invariant_group))
1528 NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD);
1529 if (auto *RangeMD = Load->getMetadata(LLVMContext::MD_range))
1530 NewLoad->setMetadata(LLVMContext::MD_range, RangeMD);
1531 if (auto *AccessMD = Load->getMetadata(LLVMContext::MD_access_group))
1532 if (LI->getLoopFor(Load->getParent()) == LI->getLoopFor(UnavailableBlock))
1533 NewLoad->setMetadata(LLVMContext::MD_access_group, AccessMD);
1534
1535 // We do not propagate the old load's debug location, because the new
1536 // load now lives in a different BB, and we want to avoid a jumpy line
1537 // table.
1538 // FIXME: How do we retain source locations without causing poor debugging
1539 // behavior?
1540
1541 // Add the newly created load.
1542 ValuesPerBlock.push_back(
1543 AvailableValueInBlock::get(UnavailableBlock, NewLoad));
1544 MD->invalidateCachedPointerInfo(LoadPtr);
1545 LLVM_DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n');
1546
1547 // For PredBB in CriticalEdgePredAndLoad we need to replace the uses of old
1548 // load instruction with the new created load instruction.
1549 if (CriticalEdgePredAndLoad) {
1550 auto I = CriticalEdgePredAndLoad->find(UnavailableBlock);
1551 if (I != CriticalEdgePredAndLoad->end()) {
1552 ++NumPRELoadMoved2CEPred;
1553 ICF->insertInstructionTo(NewLoad, UnavailableBlock);
1554 LoadInst *OldLoad = I->second;
1555 combineMetadataForCSE(NewLoad, OldLoad, false);
1556 OldLoad->replaceAllUsesWith(NewLoad);
1557 replaceValuesPerBlockEntry(ValuesPerBlock, OldLoad, NewLoad);
1558 if (uint32_t ValNo = VN.lookup(OldLoad, false))
1559 LeaderTable.erase(ValNo, OldLoad, OldLoad->getParent());
1560 VN.erase(OldLoad);
1561 removeInstruction(OldLoad);
1562 }
1563 }
1564 }
1565
1566 // Perform PHI construction.
1567 Value *V = ConstructSSAForLoadSet(Load, ValuesPerBlock, *this);
1568 // ConstructSSAForLoadSet is responsible for combining metadata.
1569 ICF->removeUsersOf(Load);
1570 Load->replaceAllUsesWith(V);
1571 if (isa<PHINode>(V))
1572 V->takeName(Load);
1573 if (Instruction *I = dyn_cast<Instruction>(V))
1574 I->setDebugLoc(Load->getDebugLoc());
1575 if (V->getType()->isPtrOrPtrVectorTy())
1578 ORE->emit([&]() {
1579 return OptimizationRemark(DEBUG_TYPE, "LoadPRE", Load)
1580 << "load eliminated by PRE";
1581 });
1582}
1583
1584bool GVNPass::PerformLoadPRE(LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1585 UnavailBlkVect &UnavailableBlocks) {
1586 // Okay, we have *some* definitions of the value. This means that the value
1587 // is available in some of our (transitive) predecessors. Lets think about
1588 // doing PRE of this load. This will involve inserting a new load into the
1589 // predecessor when it's not available. We could do this in general, but
1590 // prefer to not increase code size. As such, we only do this when we know
1591 // that we only have to insert *one* load (which means we're basically moving
1592 // the load, not inserting a new one).
1593
1594 SmallPtrSet<BasicBlock *, 4> Blockers(UnavailableBlocks.begin(),
1595 UnavailableBlocks.end());
1596
1597 // Let's find the first basic block with more than one predecessor. Walk
1598 // backwards through predecessors if needed.
1599 BasicBlock *LoadBB = Load->getParent();
1600 BasicBlock *TmpBB = LoadBB;
1601
1602 // Check that there is no implicit control flow instructions above our load in
1603 // its block. If there is an instruction that doesn't always pass the
1604 // execution to the following instruction, then moving through it may become
1605 // invalid. For example:
1606 //
1607 // int arr[LEN];
1608 // int index = ???;
1609 // ...
1610 // guard(0 <= index && index < LEN);
1611 // use(arr[index]);
1612 //
1613 // It is illegal to move the array access to any point above the guard,
1614 // because if the index is out of bounds we should deoptimize rather than
1615 // access the array.
1616 // Check that there is no guard in this block above our instruction.
1617 bool MustEnsureSafetyOfSpeculativeExecution =
1619
1620 while (TmpBB->getSinglePredecessor()) {
1621 TmpBB = TmpBB->getSinglePredecessor();
1622 if (TmpBB == LoadBB) // Infinite (unreachable) loop.
1623 return false;
1624 if (Blockers.count(TmpBB))
1625 return false;
1626
1627 // If any of these blocks has more than one successor (i.e. if the edge we
1628 // just traversed was critical), then there are other paths through this
1629 // block along which the load may not be anticipated. Hoisting the load
1630 // above this block would be adding the load to execution paths along
1631 // which it was not previously executed.
1632 if (TmpBB->getTerminator()->getNumSuccessors() != 1)
1633 return false;
1634
1635 // Check that there is no implicit control flow in a block above.
1636 MustEnsureSafetyOfSpeculativeExecution =
1637 MustEnsureSafetyOfSpeculativeExecution || ICF->hasICF(TmpBB);
1638 }
1639
1640 assert(TmpBB);
1641 LoadBB = TmpBB;
1642
1643 // Check to see how many predecessors have the loaded value fully
1644 // available.
1646 DenseMap<BasicBlock *, AvailabilityState> FullyAvailableBlocks;
1647 for (const AvailableValueInBlock &AV : ValuesPerBlock)
1648 FullyAvailableBlocks[AV.BB] = AvailabilityState::Available;
1649 for (BasicBlock *UnavailableBB : UnavailableBlocks)
1650 FullyAvailableBlocks[UnavailableBB] = AvailabilityState::Unavailable;
1651
1652 // The edge from Pred to LoadBB is a critical edge will be splitted.
1653 SmallVector<BasicBlock *, 4> CriticalEdgePredSplit;
1654 // The edge from Pred to LoadBB is a critical edge, another successor of Pred
1655 // contains a load can be moved to Pred. This data structure maps the Pred to
1656 // the movable load.
1657 MapVector<BasicBlock *, LoadInst *> CriticalEdgePredAndLoad;
1658 for (BasicBlock *Pred : predecessors(LoadBB)) {
1659 // If any predecessor block is an EH pad that does not allow non-PHI
1660 // instructions before the terminator, we can't PRE the load.
1661 if (Pred->getTerminator()->isEHPad()) {
1662 LLVM_DEBUG(
1663 dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '"
1664 << Pred->getName() << "': " << *Load << '\n');
1665 return false;
1666 }
1667
1668 if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) {
1669 continue;
1670 }
1671
1672 if (Pred->getTerminator()->getNumSuccessors() != 1) {
1673 if (isa<IndirectBrInst>(Pred->getTerminator())) {
1674 LLVM_DEBUG(
1675 dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1676 << Pred->getName() << "': " << *Load << '\n');
1677 return false;
1678 }
1679
1680 if (LoadBB->isEHPad()) {
1681 LLVM_DEBUG(
1682 dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '"
1683 << Pred->getName() << "': " << *Load << '\n');
1684 return false;
1685 }
1686
1687 // Do not split backedge as it will break the canonical loop form.
1689 if (DT->dominates(LoadBB, Pred)) {
1690 LLVM_DEBUG(
1691 dbgs()
1692 << "COULD NOT PRE LOAD BECAUSE OF A BACKEDGE CRITICAL EDGE '"
1693 << Pred->getName() << "': " << *Load << '\n');
1694 return false;
1695 }
1696
1697 if (LoadInst *LI = findLoadToHoistIntoPred(Pred, LoadBB, Load))
1698 CriticalEdgePredAndLoad[Pred] = LI;
1699 else
1700 CriticalEdgePredSplit.push_back(Pred);
1701 } else {
1702 // Only add the predecessors that will not be split for now.
1703 PredLoads[Pred] = nullptr;
1704 }
1705 }
1706
1707 // Decide whether PRE is profitable for this load.
1708 unsigned NumInsertPreds = PredLoads.size() + CriticalEdgePredSplit.size();
1709 unsigned NumUnavailablePreds = NumInsertPreds +
1710 CriticalEdgePredAndLoad.size();
1711 assert(NumUnavailablePreds != 0 &&
1712 "Fully available value should already be eliminated!");
1713 (void)NumUnavailablePreds;
1714
1715 // If we need to insert new load in multiple predecessors, reject it.
1716 // FIXME: If we could restructure the CFG, we could make a common pred with
1717 // all the preds that don't have an available Load and insert a new load into
1718 // that one block.
1719 if (NumInsertPreds > 1)
1720 return false;
1721
1722 // Now we know where we will insert load. We must ensure that it is safe
1723 // to speculatively execute the load at that points.
1724 if (MustEnsureSafetyOfSpeculativeExecution) {
1725 if (CriticalEdgePredSplit.size())
1726 if (!isSafeToSpeculativelyExecute(Load, &*LoadBB->getFirstNonPHIIt(), AC,
1727 DT))
1728 return false;
1729 for (auto &PL : PredLoads)
1730 if (!isSafeToSpeculativelyExecute(Load, PL.first->getTerminator(), AC,
1731 DT))
1732 return false;
1733 for (auto &CEP : CriticalEdgePredAndLoad)
1734 if (!isSafeToSpeculativelyExecute(Load, CEP.first->getTerminator(), AC,
1735 DT))
1736 return false;
1737 }
1738
1739 // Split critical edges, and update the unavailable predecessors accordingly.
1740 for (BasicBlock *OrigPred : CriticalEdgePredSplit) {
1741 BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB);
1742 assert(!PredLoads.count(OrigPred) && "Split edges shouldn't be in map!");
1743 PredLoads[NewPred] = nullptr;
1744 LLVM_DEBUG(dbgs() << "Split critical edge " << OrigPred->getName() << "->"
1745 << LoadBB->getName() << '\n');
1746 }
1747
1748 for (auto &CEP : CriticalEdgePredAndLoad)
1749 PredLoads[CEP.first] = nullptr;
1750
1751 // Check if the load can safely be moved to all the unavailable predecessors.
1752 bool CanDoPRE = true;
1753 const DataLayout &DL = Load->getDataLayout();
1755 for (auto &PredLoad : PredLoads) {
1756 BasicBlock *UnavailablePred = PredLoad.first;
1757
1758 // Do PHI translation to get its value in the predecessor if necessary. The
1759 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
1760 // We do the translation for each edge we skipped by going from Load's block
1761 // to LoadBB, otherwise we might miss pieces needing translation.
1762
1763 // If all preds have a single successor, then we know it is safe to insert
1764 // the load on the pred (?!?), so we can insert code to materialize the
1765 // pointer if it is not available.
1766 Value *LoadPtr = Load->getPointerOperand();
1767 BasicBlock *Cur = Load->getParent();
1768 while (Cur != LoadBB) {
1769 PHITransAddr Address(LoadPtr, DL, AC);
1770 LoadPtr = Address.translateWithInsertion(Cur, Cur->getSinglePredecessor(),
1771 *DT, NewInsts);
1772 if (!LoadPtr) {
1773 CanDoPRE = false;
1774 break;
1775 }
1776 Cur = Cur->getSinglePredecessor();
1777 }
1778
1779 if (LoadPtr) {
1780 PHITransAddr Address(LoadPtr, DL, AC);
1781 LoadPtr = Address.translateWithInsertion(LoadBB, UnavailablePred, *DT,
1782 NewInsts);
1783 }
1784 // If we couldn't find or insert a computation of this phi translated value,
1785 // we fail PRE.
1786 if (!LoadPtr) {
1787 LLVM_DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
1788 << *Load->getPointerOperand() << "\n");
1789 CanDoPRE = false;
1790 break;
1791 }
1792
1793 PredLoad.second = LoadPtr;
1794 }
1795
1796 if (!CanDoPRE) {
1797 while (!NewInsts.empty()) {
1798 // Erase instructions generated by the failed PHI translation before
1799 // trying to number them. PHI translation might insert instructions
1800 // in basic blocks other than the current one, and we delete them
1801 // directly, as markInstructionForDeletion only allows removing from the
1802 // current basic block.
1803 NewInsts.pop_back_val()->eraseFromParent();
1804 }
1805 // HINT: Don't revert the edge-splitting as following transformation may
1806 // also need to split these critical edges.
1807 return !CriticalEdgePredSplit.empty();
1808 }
1809
1810 // Okay, we can eliminate this load by inserting a reload in the predecessor
1811 // and using PHI construction to get the value in the other predecessors, do
1812 // it.
1813 LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *Load << '\n');
1814 LLVM_DEBUG(if (!NewInsts.empty()) dbgs() << "INSERTED " << NewInsts.size()
1815 << " INSTS: " << *NewInsts.back()
1816 << '\n');
1817
1818 // Assign value numbers to the new instructions.
1819 for (Instruction *I : NewInsts) {
1820 // Instructions that have been inserted in predecessor(s) to materialize
1821 // the load address do not retain their original debug locations. Doing
1822 // so could lead to confusing (but correct) source attributions.
1823 I->updateLocationAfterHoist();
1824
1825 // FIXME: We really _ought_ to insert these value numbers into their
1826 // parent's availability map. However, in doing so, we risk getting into
1827 // ordering issues. If a block hasn't been processed yet, we would be
1828 // marking a value as AVAIL-IN, which isn't what we intend.
1829 VN.lookupOrAdd(I);
1830 }
1831
1832 eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, PredLoads,
1833 &CriticalEdgePredAndLoad);
1834 ++NumPRELoad;
1835 return true;
1836}
1837
1838bool GVNPass::performLoopLoadPRE(LoadInst *Load,
1839 AvailValInBlkVect &ValuesPerBlock,
1840 UnavailBlkVect &UnavailableBlocks) {
1841 const Loop *L = LI->getLoopFor(Load->getParent());
1842 // TODO: Generalize to other loop blocks that dominate the latch.
1843 if (!L || L->getHeader() != Load->getParent())
1844 return false;
1845
1846 BasicBlock *Preheader = L->getLoopPreheader();
1847 BasicBlock *Latch = L->getLoopLatch();
1848 if (!Preheader || !Latch)
1849 return false;
1850
1851 Value *LoadPtr = Load->getPointerOperand();
1852 // Must be available in preheader.
1853 if (!L->isLoopInvariant(LoadPtr))
1854 return false;
1855
1856 // We plan to hoist the load to preheader without introducing a new fault.
1857 // In order to do it, we need to prove that we cannot side-exit the loop
1858 // once loop header is first entered before execution of the load.
1859 if (ICF->isDominatedByICFIFromSameBlock(Load))
1860 return false;
1861
1862 BasicBlock *LoopBlock = nullptr;
1863 for (auto *Blocker : UnavailableBlocks) {
1864 // Blockers from outside the loop are handled in preheader.
1865 if (!L->contains(Blocker))
1866 continue;
1867
1868 // Only allow one loop block. Loop header is not less frequently executed
1869 // than each loop block, and likely it is much more frequently executed. But
1870 // in case of multiple loop blocks, we need extra information (such as block
1871 // frequency info) to understand whether it is profitable to PRE into
1872 // multiple loop blocks.
1873 if (LoopBlock)
1874 return false;
1875
1876 // Do not sink into inner loops. This may be non-profitable.
1877 if (L != LI->getLoopFor(Blocker))
1878 return false;
1879
1880 // Blocks that dominate the latch execute on every single iteration, maybe
1881 // except the last one. So PREing into these blocks doesn't make much sense
1882 // in most cases. But the blocks that do not necessarily execute on each
1883 // iteration are sometimes much colder than the header, and this is when
1884 // PRE is potentially profitable.
1885 if (DT->dominates(Blocker, Latch))
1886 return false;
1887
1888 // Make sure that the terminator itself doesn't clobber.
1889 if (Blocker->getTerminator()->mayWriteToMemory())
1890 return false;
1891
1892 LoopBlock = Blocker;
1893 }
1894
1895 if (!LoopBlock)
1896 return false;
1897
1898 // Make sure the memory at this pointer cannot be freed, therefore we can
1899 // safely reload from it after clobber.
1900 if (LoadPtr->canBeFreed())
1901 return false;
1902
1903 // TODO: Support critical edge splitting if blocker has more than 1 successor.
1904 MapVector<BasicBlock *, Value *> AvailableLoads;
1905 AvailableLoads[LoopBlock] = LoadPtr;
1906 AvailableLoads[Preheader] = LoadPtr;
1907
1908 LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOOP LOAD: " << *Load << '\n');
1909 eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, AvailableLoads,
1910 /*CriticalEdgePredAndLoad*/ nullptr);
1911 ++NumPRELoopLoad;
1912 return true;
1913}
1914
1917 using namespace ore;
1918
1919 ORE->emit([&]() {
1920 return OptimizationRemark(DEBUG_TYPE, "LoadElim", Load)
1921 << "load of type " << NV("Type", Load->getType()) << " eliminated"
1922 << setExtraArgs() << " in favor of "
1923 << NV("InfavorOfValue", AvailableValue);
1924 });
1925}
1926
1927/// Attempt to eliminate a load whose dependencies are
1928/// non-local by performing PHI construction.
1929bool GVNPass::processNonLocalLoad(LoadInst *Load) {
1930 // non-local speculations are not allowed under asan.
1931 if (Load->getParent()->getParent()->hasFnAttribute(
1932 Attribute::SanitizeAddress) ||
1933 Load->getParent()->getParent()->hasFnAttribute(
1934 Attribute::SanitizeHWAddress))
1935 return false;
1936
1937 // Step 1: Find the non-local dependencies of the load.
1938 LoadDepVect Deps;
1939 MD->getNonLocalPointerDependency(Load, Deps);
1940
1941 // If we had to process more than one hundred blocks to find the
1942 // dependencies, this load isn't worth worrying about. Optimizing
1943 // it will be too expensive.
1944 unsigned NumDeps = Deps.size();
1945 if (NumDeps > MaxNumDeps)
1946 return false;
1947
1948 // If we had a phi translation failure, we'll have a single entry which is a
1949 // clobber in the current block. Reject this early.
1950 if (NumDeps == 1 &&
1951 !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) {
1952 LLVM_DEBUG(dbgs() << "GVN: non-local load "; Load->printAsOperand(dbgs());
1953 dbgs() << " has unknown dependencies\n";);
1954 return false;
1955 }
1956
1957 bool Changed = false;
1958 // If this load follows a GEP, see if we can PRE the indices before analyzing.
1959 if (GetElementPtrInst *GEP =
1960 dyn_cast<GetElementPtrInst>(Load->getOperand(0))) {
1961 for (Use &U : GEP->indices())
1962 if (Instruction *I = dyn_cast<Instruction>(U.get()))
1963 Changed |= performScalarPRE(I);
1964 }
1965
1966 // Step 2: Analyze the availability of the load
1967 AvailValInBlkVect ValuesPerBlock;
1968 UnavailBlkVect UnavailableBlocks;
1969 AnalyzeLoadAvailability(Load, Deps, ValuesPerBlock, UnavailableBlocks);
1970
1971 // If we have no predecessors that produce a known value for this load, exit
1972 // early.
1973 if (ValuesPerBlock.empty())
1974 return Changed;
1975
1976 // Step 3: Eliminate fully redundancy.
1977 //
1978 // If all of the instructions we depend on produce a known value for this
1979 // load, then it is fully redundant and we can use PHI insertion to compute
1980 // its value. Insert PHIs and remove the fully redundant value now.
1981 if (UnavailableBlocks.empty()) {
1982 LLVM_DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *Load << '\n');
1983
1984 // Perform PHI construction.
1985 Value *V = ConstructSSAForLoadSet(Load, ValuesPerBlock, *this);
1986 // ConstructSSAForLoadSet is responsible for combining metadata.
1987 ICF->removeUsersOf(Load);
1988 Load->replaceAllUsesWith(V);
1989
1990 if (isa<PHINode>(V))
1991 V->takeName(Load);
1992 if (Instruction *I = dyn_cast<Instruction>(V))
1993 // If instruction I has debug info, then we should not update it.
1994 // Also, if I has a null DebugLoc, then it is still potentially incorrect
1995 // to propagate Load's DebugLoc because Load may not post-dominate I.
1996 if (Load->getDebugLoc() && Load->getParent() == I->getParent())
1997 I->setDebugLoc(Load->getDebugLoc());
1998 if (V->getType()->isPtrOrPtrVectorTy())
2001 ++NumGVNLoad;
2002 reportLoadElim(Load, V, ORE);
2003 return true;
2004 }
2005
2006 // Step 4: Eliminate partial redundancy.
2007 if (!isPREEnabled() || !isLoadPREEnabled())
2008 return Changed;
2009 if (!isLoadInLoopPREEnabled() && LI->getLoopFor(Load->getParent()))
2010 return Changed;
2011
2012 if (performLoopLoadPRE(Load, ValuesPerBlock, UnavailableBlocks) ||
2013 PerformLoadPRE(Load, ValuesPerBlock, UnavailableBlocks))
2014 return true;
2015
2016 return Changed;
2017}
2018
2019static bool hasUsersIn(Value *V, BasicBlock *BB) {
2020 return llvm::any_of(V->users(), [BB](User *U) {
2021 auto *I = dyn_cast<Instruction>(U);
2022 return I && I->getParent() == BB;
2023 });
2024}
2025
2026bool GVNPass::processAssumeIntrinsic(AssumeInst *IntrinsicI) {
2027 Value *V = IntrinsicI->getArgOperand(0);
2028
2029 if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) {
2030 if (Cond->isZero()) {
2031 Type *Int8Ty = Type::getInt8Ty(V->getContext());
2032 Type *PtrTy = PointerType::get(V->getContext(), 0);
2033 // Insert a new store to null instruction before the load to indicate that
2034 // this code is not reachable. FIXME: We could insert unreachable
2035 // instruction directly because we can modify the CFG.
2036 auto *NewS =
2038 IntrinsicI->getIterator());
2039 if (MSSAU) {
2040 const MemoryUseOrDef *FirstNonDom = nullptr;
2041 const auto *AL =
2042 MSSAU->getMemorySSA()->getBlockAccesses(IntrinsicI->getParent());
2043
2044 // If there are accesses in the current basic block, find the first one
2045 // that does not come before NewS. The new memory access is inserted
2046 // after the found access or before the terminator if no such access is
2047 // found.
2048 if (AL) {
2049 for (const auto &Acc : *AL) {
2050 if (auto *Current = dyn_cast<MemoryUseOrDef>(&Acc))
2051 if (!Current->getMemoryInst()->comesBefore(NewS)) {
2052 FirstNonDom = Current;
2053 break;
2054 }
2055 }
2056 }
2057
2058 auto *NewDef =
2059 FirstNonDom ? MSSAU->createMemoryAccessBefore(
2060 NewS, nullptr,
2061 const_cast<MemoryUseOrDef *>(FirstNonDom))
2062 : MSSAU->createMemoryAccessInBB(
2063 NewS, nullptr,
2064 NewS->getParent(), MemorySSA::BeforeTerminator);
2065
2066 MSSAU->insertDef(cast<MemoryDef>(NewDef), /*RenameUses=*/false);
2067 }
2068 }
2069 if (isAssumeWithEmptyBundle(*IntrinsicI)) {
2070 markInstructionForDeletion(IntrinsicI);
2071 return true;
2072 }
2073 return false;
2074 }
2075
2076 if (isa<Constant>(V)) {
2077 // If it's not false, and constant, it must evaluate to true. This means our
2078 // assume is assume(true), and thus, pointless, and we don't want to do
2079 // anything more here.
2080 return false;
2081 }
2082
2083 Constant *True = ConstantInt::getTrue(V->getContext());
2084 bool Changed = false;
2085
2086 for (BasicBlock *Successor : successors(IntrinsicI->getParent())) {
2087 BasicBlockEdge Edge(IntrinsicI->getParent(), Successor);
2088
2089 // This property is only true in dominated successors, propagateEquality
2090 // will check dominance for us.
2091 Changed |= propagateEquality(V, True, Edge, false);
2092 }
2093
2094 // We can replace assume value with true, which covers cases like this:
2095 // call void @llvm.assume(i1 %cmp)
2096 // br i1 %cmp, label %bb1, label %bb2 ; will change %cmp to true
2097 ReplaceOperandsWithMap[V] = True;
2098
2099 // Similarly, after assume(!NotV) we know that NotV == false.
2100 Value *NotV;
2101 if (match(V, m_Not(m_Value(NotV))))
2102 ReplaceOperandsWithMap[NotV] = ConstantInt::getFalse(V->getContext());
2103
2104 // If we find an equality fact, canonicalize all dominated uses in this block
2105 // to one of the two values. We heuristically choice the "oldest" of the
2106 // two where age is determined by value number. (Note that propagateEquality
2107 // above handles the cross block case.)
2108 //
2109 // Key case to cover are:
2110 // 1)
2111 // %cmp = fcmp oeq float 3.000000e+00, %0 ; const on lhs could happen
2112 // call void @llvm.assume(i1 %cmp)
2113 // ret float %0 ; will change it to ret float 3.000000e+00
2114 // 2)
2115 // %load = load float, float* %addr
2116 // %cmp = fcmp oeq float %load, %0
2117 // call void @llvm.assume(i1 %cmp)
2118 // ret float %load ; will change it to ret float %0
2119 if (auto *CmpI = dyn_cast<CmpInst>(V)) {
2120 if (CmpI->isEquivalence()) {
2121 Value *CmpLHS = CmpI->getOperand(0);
2122 Value *CmpRHS = CmpI->getOperand(1);
2123 // Heuristically pick the better replacement -- the choice of heuristic
2124 // isn't terribly important here, but the fact we canonicalize on some
2125 // replacement is for exposing other simplifications.
2126 // TODO: pull this out as a helper function and reuse w/existing
2127 // (slightly different) logic.
2128 if (isa<Constant>(CmpLHS) && !isa<Constant>(CmpRHS))
2129 std::swap(CmpLHS, CmpRHS);
2130 if (!isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))
2131 std::swap(CmpLHS, CmpRHS);
2132 if ((isa<Argument>(CmpLHS) && isa<Argument>(CmpRHS)) ||
2133 (isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))) {
2134 // Move the 'oldest' value to the right-hand side, using the value
2135 // number as a proxy for age.
2136 uint32_t LVN = VN.lookupOrAdd(CmpLHS);
2137 uint32_t RVN = VN.lookupOrAdd(CmpRHS);
2138 if (LVN < RVN)
2139 std::swap(CmpLHS, CmpRHS);
2140 }
2141
2142 // Handle degenerate case where we either haven't pruned a dead path or a
2143 // removed a trivial assume yet.
2144 if (isa<Constant>(CmpLHS) && isa<Constant>(CmpRHS))
2145 return Changed;
2146
2147 LLVM_DEBUG(dbgs() << "Replacing dominated uses of "
2148 << *CmpLHS << " with "
2149 << *CmpRHS << " in block "
2150 << IntrinsicI->getParent()->getName() << "\n");
2151
2152
2153 // Setup the replacement map - this handles uses within the same block
2154 if (hasUsersIn(CmpLHS, IntrinsicI->getParent()))
2155 ReplaceOperandsWithMap[CmpLHS] = CmpRHS;
2156
2157 // NOTE: The non-block local cases are handled by the call to
2158 // propagateEquality above; this block is just about handling the block
2159 // local cases. TODO: There's a bunch of logic in propagateEqualiy which
2160 // isn't duplicated for the block local case, can we share it somehow?
2161 }
2162 }
2163 return Changed;
2164}
2165
2168 I->replaceAllUsesWith(Repl);
2169}
2170
2171/// Attempt to eliminate a load, first by eliminating it
2172/// locally, and then attempting non-local elimination if that fails.
2173bool GVNPass::processLoad(LoadInst *L) {
2174 if (!MD)
2175 return false;
2176
2177 // This code hasn't been audited for ordered or volatile memory access
2178 if (!L->isUnordered())
2179 return false;
2180
2181 if (L->use_empty()) {
2183 return true;
2184 }
2185
2186 // ... to a pointer that has been loaded from before...
2187 MemDepResult Dep = MD->getDependency(L);
2188
2189 // If it is defined in another block, try harder.
2190 if (Dep.isNonLocal())
2191 return processNonLocalLoad(L);
2192
2193 // Only handle the local case below
2194 if (!Dep.isLocal()) {
2195 // This might be a NonFuncLocal or an Unknown
2196 LLVM_DEBUG(
2197 // fast print dep, using operator<< on instruction is too slow.
2198 dbgs() << "GVN: load "; L->printAsOperand(dbgs());
2199 dbgs() << " has unknown dependence\n";);
2200 return false;
2201 }
2202
2203 auto AV = AnalyzeLoadAvailability(L, Dep, L->getPointerOperand());
2204 if (!AV)
2205 return false;
2206
2207 Value *AvailableValue = AV->MaterializeAdjustedValue(L, L, *this);
2208
2209 // MaterializeAdjustedValue is responsible for combining metadata.
2210 ICF->removeUsersOf(L);
2211 L->replaceAllUsesWith(AvailableValue);
2213 if (MSSAU)
2214 MSSAU->removeMemoryAccess(L);
2215 ++NumGVNLoad;
2217 // Tell MDA to reexamine the reused pointer since we might have more
2218 // information after forwarding it.
2219 if (MD && AvailableValue->getType()->isPtrOrPtrVectorTy())
2221 return true;
2222}
2223
2224/// Return a pair the first field showing the value number of \p Exp and the
2225/// second field showing whether it is a value number newly created.
2226std::pair<uint32_t, bool>
2227GVNPass::ValueTable::assignExpNewValueNum(Expression &Exp) {
2228 uint32_t &e = expressionNumbering[Exp];
2229 bool CreateNewValNum = !e;
2230 if (CreateNewValNum) {
2231 Expressions.push_back(Exp);
2232 if (ExprIdx.size() < nextValueNumber + 1)
2233 ExprIdx.resize(nextValueNumber * 2);
2234 e = nextValueNumber;
2235 ExprIdx[nextValueNumber++] = nextExprNumber++;
2236 }
2237 return {e, CreateNewValNum};
2238}
2239
2240/// Return whether all the values related with the same \p num are
2241/// defined in \p BB.
2242bool GVNPass::ValueTable::areAllValsInBB(uint32_t Num, const BasicBlock *BB,
2243 GVNPass &Gvn) {
2244 return all_of(
2245 Gvn.LeaderTable.getLeaders(Num),
2246 [=](const LeaderMap::LeaderTableEntry &L) { return L.BB == BB; });
2247}
2248
2249/// Wrap phiTranslateImpl to provide caching functionality.
2250uint32_t GVNPass::ValueTable::phiTranslate(const BasicBlock *Pred,
2251 const BasicBlock *PhiBlock,
2252 uint32_t Num, GVNPass &Gvn) {
2253 auto FindRes = PhiTranslateTable.find({Num, Pred});
2254 if (FindRes != PhiTranslateTable.end())
2255 return FindRes->second;
2256 uint32_t NewNum = phiTranslateImpl(Pred, PhiBlock, Num, Gvn);
2257 PhiTranslateTable.insert({{Num, Pred}, NewNum});
2258 return NewNum;
2259}
2260
2261// Return true if the value number \p Num and NewNum have equal value.
2262// Return false if the result is unknown.
2263bool GVNPass::ValueTable::areCallValsEqual(uint32_t Num, uint32_t NewNum,
2264 const BasicBlock *Pred,
2265 const BasicBlock *PhiBlock,
2266 GVNPass &Gvn) {
2267 CallInst *Call = nullptr;
2268 auto Leaders = Gvn.LeaderTable.getLeaders(Num);
2269 for (const auto &Entry : Leaders) {
2270 Call = dyn_cast<CallInst>(Entry.Val);
2271 if (Call && Call->getParent() == PhiBlock)
2272 break;
2273 }
2274
2275 if (AA->doesNotAccessMemory(Call))
2276 return true;
2277
2278 if (!MD || !AA->onlyReadsMemory(Call))
2279 return false;
2280
2281 MemDepResult local_dep = MD->getDependency(Call);
2282 if (!local_dep.isNonLocal())
2283 return false;
2284
2286 MD->getNonLocalCallDependency(Call);
2287
2288 // Check to see if the Call has no function local clobber.
2289 for (const NonLocalDepEntry &D : deps) {
2290 if (D.getResult().isNonFuncLocal())
2291 return true;
2292 }
2293 return false;
2294}
2295
2296/// Translate value number \p Num using phis, so that it has the values of
2297/// the phis in BB.
2298uint32_t GVNPass::ValueTable::phiTranslateImpl(const BasicBlock *Pred,
2299 const BasicBlock *PhiBlock,
2300 uint32_t Num, GVNPass &Gvn) {
2301 if (PHINode *PN = NumberingPhi[Num]) {
2302 for (unsigned i = 0; i != PN->getNumIncomingValues(); ++i) {
2303 if (PN->getParent() == PhiBlock && PN->getIncomingBlock(i) == Pred)
2304 if (uint32_t TransVal = lookup(PN->getIncomingValue(i), false))
2305 return TransVal;
2306 }
2307 return Num;
2308 }
2309
2310 // If there is any value related with Num is defined in a BB other than
2311 // PhiBlock, it cannot depend on a phi in PhiBlock without going through
2312 // a backedge. We can do an early exit in that case to save compile time.
2313 if (!areAllValsInBB(Num, PhiBlock, Gvn))
2314 return Num;
2315
2316 if (Num >= ExprIdx.size() || ExprIdx[Num] == 0)
2317 return Num;
2318 Expression Exp = Expressions[ExprIdx[Num]];
2319
2320 for (unsigned i = 0; i < Exp.varargs.size(); i++) {
2321 // For InsertValue and ExtractValue, some varargs are index numbers
2322 // instead of value numbers. Those index numbers should not be
2323 // translated.
2324 if ((i > 1 && Exp.opcode == Instruction::InsertValue) ||
2325 (i > 0 && Exp.opcode == Instruction::ExtractValue) ||
2326 (i > 1 && Exp.opcode == Instruction::ShuffleVector))
2327 continue;
2328 Exp.varargs[i] = phiTranslate(Pred, PhiBlock, Exp.varargs[i], Gvn);
2329 }
2330
2331 if (Exp.commutative) {
2332 assert(Exp.varargs.size() >= 2 && "Unsupported commutative instruction!");
2333 if (Exp.varargs[0] > Exp.varargs[1]) {
2334 std::swap(Exp.varargs[0], Exp.varargs[1]);
2335 uint32_t Opcode = Exp.opcode >> 8;
2336 if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)
2337 Exp.opcode = (Opcode << 8) |
2339 static_cast<CmpInst::Predicate>(Exp.opcode & 255));
2340 }
2341 }
2342
2343 if (uint32_t NewNum = expressionNumbering[Exp]) {
2344 if (Exp.opcode == Instruction::Call && NewNum != Num)
2345 return areCallValsEqual(Num, NewNum, Pred, PhiBlock, Gvn) ? NewNum : Num;
2346 return NewNum;
2347 }
2348 return Num;
2349}
2350
2351/// Erase stale entry from phiTranslate cache so phiTranslate can be computed
2352/// again.
2353void GVNPass::ValueTable::eraseTranslateCacheEntry(
2354 uint32_t Num, const BasicBlock &CurrBlock) {
2355 for (const BasicBlock *Pred : predecessors(&CurrBlock))
2356 PhiTranslateTable.erase({Num, Pred});
2357}
2358
2359// In order to find a leader for a given value number at a
2360// specific basic block, we first obtain the list of all Values for that number,
2361// and then scan the list to find one whose block dominates the block in
2362// question. This is fast because dominator tree queries consist of only
2363// a few comparisons of DFS numbers.
2364Value *GVNPass::findLeader(const BasicBlock *BB, uint32_t num) {
2365 auto Leaders = LeaderTable.getLeaders(num);
2366 if (Leaders.empty())
2367 return nullptr;
2368
2369 Value *Val = nullptr;
2370 for (const auto &Entry : Leaders) {
2371 if (DT->dominates(Entry.BB, BB)) {
2372 Val = Entry.Val;
2373 if (isa<Constant>(Val))
2374 return Val;
2375 }
2376 }
2377
2378 return Val;
2379}
2380
2381/// There is an edge from 'Src' to 'Dst'. Return
2382/// true if every path from the entry block to 'Dst' passes via this edge. In
2383/// particular 'Dst' must not be reachable via another edge from 'Src'.
2385 DominatorTree *DT) {
2386 // While in theory it is interesting to consider the case in which Dst has
2387 // more than one predecessor, because Dst might be part of a loop which is
2388 // only reachable from Src, in practice it is pointless since at the time
2389 // GVN runs all such loops have preheaders, which means that Dst will have
2390 // been changed to have only one predecessor, namely Src.
2391 const BasicBlock *Pred = E.getEnd()->getSinglePredecessor();
2392 assert((!Pred || Pred == E.getStart()) &&
2393 "No edge between these basic blocks!");
2394 return Pred != nullptr;
2395}
2396
2397void GVNPass::assignBlockRPONumber(Function &F) {
2398 BlockRPONumber.clear();
2399 uint32_t NextBlockNumber = 1;
2401 for (BasicBlock *BB : RPOT)
2402 BlockRPONumber[BB] = NextBlockNumber++;
2403 InvalidBlockRPONumbers = false;
2404}
2405
2406bool GVNPass::replaceOperandsForInBlockEquality(Instruction *Instr) const {
2407 bool Changed = false;
2408 for (unsigned OpNum = 0; OpNum < Instr->getNumOperands(); ++OpNum) {
2409 Value *Operand = Instr->getOperand(OpNum);
2410 auto it = ReplaceOperandsWithMap.find(Operand);
2411 if (it != ReplaceOperandsWithMap.end()) {
2412 LLVM_DEBUG(dbgs() << "GVN replacing: " << *Operand << " with "
2413 << *it->second << " in instruction " << *Instr << '\n');
2414 Instr->setOperand(OpNum, it->second);
2415 Changed = true;
2416 }
2417 }
2418 return Changed;
2419}
2420
2421/// The given values are known to be equal in every block
2422/// dominated by 'Root'. Exploit this, for example by replacing 'LHS' with
2423/// 'RHS' everywhere in the scope. Returns whether a change was made.
2424/// If DominatesByEdge is false, then it means that we will propagate the RHS
2425/// value starting from the end of Root.Start.
2426bool GVNPass::propagateEquality(Value *LHS, Value *RHS,
2427 const BasicBlockEdge &Root,
2428 bool DominatesByEdge) {
2430 Worklist.push_back(std::make_pair(LHS, RHS));
2431 bool Changed = false;
2432 // For speed, compute a conservative fast approximation to
2433 // DT->dominates(Root, Root.getEnd());
2434 const bool RootDominatesEnd = isOnlyReachableViaThisEdge(Root, DT);
2435
2436 while (!Worklist.empty()) {
2437 std::pair<Value*, Value*> Item = Worklist.pop_back_val();
2438 LHS = Item.first; RHS = Item.second;
2439
2440 if (LHS == RHS)
2441 continue;
2442 assert(LHS->getType() == RHS->getType() && "Equality but unequal types!");
2443
2444 // Don't try to propagate equalities between constants.
2445 if (isa<Constant>(LHS) && isa<Constant>(RHS))
2446 continue;
2447
2448 // Prefer a constant on the right-hand side, or an Argument if no constants.
2449 if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS)))
2450 std::swap(LHS, RHS);
2451 assert((isa<Argument>(LHS) || isa<Instruction>(LHS)) && "Unexpected value!");
2452 const DataLayout &DL =
2453 isa<Argument>(LHS)
2454 ? cast<Argument>(LHS)->getParent()->getDataLayout()
2455 : cast<Instruction>(LHS)->getDataLayout();
2456
2457 // If there is no obvious reason to prefer the left-hand side over the
2458 // right-hand side, ensure the longest lived term is on the right-hand side,
2459 // so the shortest lived term will be replaced by the longest lived.
2460 // This tends to expose more simplifications.
2461 uint32_t LVN = VN.lookupOrAdd(LHS);
2462 if ((isa<Argument>(LHS) && isa<Argument>(RHS)) ||
2463 (isa<Instruction>(LHS) && isa<Instruction>(RHS))) {
2464 // Move the 'oldest' value to the right-hand side, using the value number
2465 // as a proxy for age.
2466 uint32_t RVN = VN.lookupOrAdd(RHS);
2467 if (LVN < RVN) {
2468 std::swap(LHS, RHS);
2469 LVN = RVN;
2470 }
2471 }
2472
2473 // If value numbering later sees that an instruction in the scope is equal
2474 // to 'LHS' then ensure it will be turned into 'RHS'. In order to preserve
2475 // the invariant that instructions only occur in the leader table for their
2476 // own value number (this is used by removeFromLeaderTable), do not do this
2477 // if RHS is an instruction (if an instruction in the scope is morphed into
2478 // LHS then it will be turned into RHS by the next GVN iteration anyway, so
2479 // using the leader table is about compiling faster, not optimizing better).
2480 // The leader table only tracks basic blocks, not edges. Only add to if we
2481 // have the simple case where the edge dominates the end.
2482 if (RootDominatesEnd && !isa<Instruction>(RHS) &&
2483 canReplacePointersIfEqual(LHS, RHS, DL))
2484 LeaderTable.insert(LVN, RHS, Root.getEnd());
2485
2486 // Replace all occurrences of 'LHS' with 'RHS' everywhere in the scope. As
2487 // LHS always has at least one use that is not dominated by Root, this will
2488 // never do anything if LHS has only one use.
2489 if (!LHS->hasOneUse()) {
2490 // Create a callback that captures the DL.
2491 auto canReplacePointersCallBack = [&DL](const Use &U, const Value *To) {
2492 return canReplacePointersInUseIfEqual(U, To, DL);
2493 };
2494 unsigned NumReplacements =
2495 DominatesByEdge
2496 ? replaceDominatedUsesWithIf(LHS, RHS, *DT, Root,
2497 canReplacePointersCallBack)
2498 : replaceDominatedUsesWithIf(LHS, RHS, *DT, Root.getStart(),
2499 canReplacePointersCallBack);
2500
2501 if (NumReplacements > 0) {
2502 Changed = true;
2503 NumGVNEqProp += NumReplacements;
2504 // Cached information for anything that uses LHS will be invalid.
2505 if (MD)
2507 }
2508 }
2509
2510 // Now try to deduce additional equalities from this one. For example, if
2511 // the known equality was "(A != B)" == "false" then it follows that A and B
2512 // are equal in the scope. Only boolean equalities with an explicit true or
2513 // false RHS are currently supported.
2514 if (!RHS->getType()->isIntegerTy(1))
2515 // Not a boolean equality - bail out.
2516 continue;
2517 ConstantInt *CI = dyn_cast<ConstantInt>(RHS);
2518 if (!CI)
2519 // RHS neither 'true' nor 'false' - bail out.
2520 continue;
2521 // Whether RHS equals 'true'. Otherwise it equals 'false'.
2522 bool isKnownTrue = CI->isMinusOne();
2523 bool isKnownFalse = !isKnownTrue;
2524
2525 // If "A && B" is known true then both A and B are known true. If "A || B"
2526 // is known false then both A and B are known false.
2527 Value *A, *B;
2528 if ((isKnownTrue && match(LHS, m_LogicalAnd(m_Value(A), m_Value(B)))) ||
2529 (isKnownFalse && match(LHS, m_LogicalOr(m_Value(A), m_Value(B))))) {
2530 Worklist.push_back(std::make_pair(A, RHS));
2531 Worklist.push_back(std::make_pair(B, RHS));
2532 continue;
2533 }
2534
2535 // If we are propagating an equality like "(A == B)" == "true" then also
2536 // propagate the equality A == B. When propagating a comparison such as
2537 // "(A >= B)" == "true", replace all instances of "A < B" with "false".
2538 if (CmpInst *Cmp = dyn_cast<CmpInst>(LHS)) {
2539 Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1);
2540
2541 // If "A == B" is known true, or "A != B" is known false, then replace
2542 // A with B everywhere in the scope. For floating point operations, we
2543 // have to be careful since equality does not always imply equivalance.
2544 if (Cmp->isEquivalence(isKnownFalse))
2545 Worklist.push_back(std::make_pair(Op0, Op1));
2546
2547 // If "A >= B" is known true, replace "A < B" with false everywhere.
2548 CmpInst::Predicate NotPred = Cmp->getInversePredicate();
2549 Constant *NotVal = ConstantInt::get(Cmp->getType(), isKnownFalse);
2550 // Since we don't have the instruction "A < B" immediately to hand, work
2551 // out the value number that it would have and use that to find an
2552 // appropriate instruction (if any).
2553 uint32_t NextNum = VN.getNextUnusedValueNumber();
2554 uint32_t Num = VN.lookupOrAddCmp(Cmp->getOpcode(), NotPred, Op0, Op1);
2555 // If the number we were assigned was brand new then there is no point in
2556 // looking for an instruction realizing it: there cannot be one!
2557 if (Num < NextNum) {
2558 Value *NotCmp = findLeader(Root.getEnd(), Num);
2559 if (NotCmp && isa<Instruction>(NotCmp)) {
2560 unsigned NumReplacements =
2561 DominatesByEdge
2562 ? replaceDominatedUsesWith(NotCmp, NotVal, *DT, Root)
2563 : replaceDominatedUsesWith(NotCmp, NotVal, *DT,
2564 Root.getStart());
2565 Changed |= NumReplacements > 0;
2566 NumGVNEqProp += NumReplacements;
2567 // Cached information for anything that uses NotCmp will be invalid.
2568 if (MD)
2569 MD->invalidateCachedPointerInfo(NotCmp);
2570 }
2571 }
2572 // Ensure that any instruction in scope that gets the "A < B" value number
2573 // is replaced with false.
2574 // The leader table only tracks basic blocks, not edges. Only add to if we
2575 // have the simple case where the edge dominates the end.
2576 if (RootDominatesEnd)
2577 LeaderTable.insert(Num, NotVal, Root.getEnd());
2578
2579 continue;
2580 }
2581 }
2582
2583 return Changed;
2584}
2585
2586/// When calculating availability, handle an instruction
2587/// by inserting it into the appropriate sets
2588bool GVNPass::processInstruction(Instruction *I) {
2589 // Ignore dbg info intrinsics.
2590 if (isa<DbgInfoIntrinsic>(I))
2591 return false;
2592
2593 // If the instruction can be easily simplified then do so now in preference
2594 // to value numbering it. Value numbering often exposes redundancies, for
2595 // example if it determines that %y is equal to %x then the instruction
2596 // "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify.
2597 const DataLayout &DL = I->getDataLayout();
2598 if (Value *V = simplifyInstruction(I, {DL, TLI, DT, AC})) {
2599 bool Changed = false;
2600 if (!I->use_empty()) {
2601 // Simplification can cause a special instruction to become not special.
2602 // For example, devirtualization to a willreturn function.
2603 ICF->removeUsersOf(I);
2604 I->replaceAllUsesWith(V);
2605 Changed = true;
2606 }
2607 if (isInstructionTriviallyDead(I, TLI)) {
2609 Changed = true;
2610 }
2611 if (Changed) {
2612 if (MD && V->getType()->isPtrOrPtrVectorTy())
2614 ++NumGVNSimpl;
2615 return true;
2616 }
2617 }
2618
2619 if (auto *Assume = dyn_cast<AssumeInst>(I))
2620 return processAssumeIntrinsic(Assume);
2621
2622 if (LoadInst *Load = dyn_cast<LoadInst>(I)) {
2623 if (processLoad(Load))
2624 return true;
2625
2626 unsigned Num = VN.lookupOrAdd(Load);
2627 LeaderTable.insert(Num, Load, Load->getParent());
2628 return false;
2629 }
2630
2631 // For conditional branches, we can perform simple conditional propagation on
2632 // the condition value itself.
2633 if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
2634 if (!BI->isConditional())
2635 return false;
2636
2637 if (isa<Constant>(BI->getCondition()))
2638 return processFoldableCondBr(BI);
2639
2640 Value *BranchCond = BI->getCondition();
2641 BasicBlock *TrueSucc = BI->getSuccessor(0);
2642 BasicBlock *FalseSucc = BI->getSuccessor(1);
2643 // Avoid multiple edges early.
2644 if (TrueSucc == FalseSucc)
2645 return false;
2646
2647 BasicBlock *Parent = BI->getParent();
2648 bool Changed = false;
2649
2651 BasicBlockEdge TrueE(Parent, TrueSucc);
2652 Changed |= propagateEquality(BranchCond, TrueVal, TrueE, true);
2653
2655 BasicBlockEdge FalseE(Parent, FalseSucc);
2656 Changed |= propagateEquality(BranchCond, FalseVal, FalseE, true);
2657
2658 return Changed;
2659 }
2660
2661 // For switches, propagate the case values into the case destinations.
2662 if (SwitchInst *SI = dyn_cast<SwitchInst>(I)) {
2663 Value *SwitchCond = SI->getCondition();
2664 BasicBlock *Parent = SI->getParent();
2665 bool Changed = false;
2666
2667 // Remember how many outgoing edges there are to every successor.
2669 for (BasicBlock *Succ : successors(Parent))
2670 ++SwitchEdges[Succ];
2671
2672 for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
2673 i != e; ++i) {
2674 BasicBlock *Dst = i->getCaseSuccessor();
2675 // If there is only a single edge, propagate the case value into it.
2676 if (SwitchEdges.lookup(Dst) == 1) {
2677 BasicBlockEdge E(Parent, Dst);
2678 Changed |= propagateEquality(SwitchCond, i->getCaseValue(), E, true);
2679 }
2680 }
2681 return Changed;
2682 }
2683
2684 // Instructions with void type don't return a value, so there's
2685 // no point in trying to find redundancies in them.
2686 if (I->getType()->isVoidTy())
2687 return false;
2688
2689 uint32_t NextNum = VN.getNextUnusedValueNumber();
2690 unsigned Num = VN.lookupOrAdd(I);
2691
2692 // Allocations are always uniquely numbered, so we can save time and memory
2693 // by fast failing them.
2694 if (isa<AllocaInst>(I) || I->isTerminator() || isa<PHINode>(I)) {
2695 LeaderTable.insert(Num, I, I->getParent());
2696 return false;
2697 }
2698
2699 // If the number we were assigned was a brand new VN, then we don't
2700 // need to do a lookup to see if the number already exists
2701 // somewhere in the domtree: it can't!
2702 if (Num >= NextNum) {
2703 LeaderTable.insert(Num, I, I->getParent());
2704 return false;
2705 }
2706
2707 // Perform fast-path value-number based elimination of values inherited from
2708 // dominators.
2709 Value *Repl = findLeader(I->getParent(), Num);
2710 if (!Repl) {
2711 // Failure, just remember this instance for future use.
2712 LeaderTable.insert(Num, I, I->getParent());
2713 return false;
2714 }
2715
2716 if (Repl == I) {
2717 // If I was the result of a shortcut PRE, it might already be in the table
2718 // and the best replacement for itself. Nothing to do.
2719 return false;
2720 }
2721
2722 // Remove it!
2724 if (MD && Repl->getType()->isPtrOrPtrVectorTy())
2727 return true;
2728}
2729
2730/// runOnFunction - This is the main transformation entry point for a function.
2731bool GVNPass::runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT,
2732 const TargetLibraryInfo &RunTLI, AAResults &RunAA,
2734 OptimizationRemarkEmitter *RunORE, MemorySSA *MSSA) {
2735 AC = &RunAC;
2736 DT = &RunDT;
2737 VN.setDomTree(DT);
2738 TLI = &RunTLI;
2739 VN.setAliasAnalysis(&RunAA);
2740 MD = RunMD;
2741 ImplicitControlFlowTracking ImplicitCFT;
2742 ICF = &ImplicitCFT;
2743 this->LI = &LI;
2744 VN.setMemDep(MD);
2745 ORE = RunORE;
2746 InvalidBlockRPONumbers = true;
2747 MemorySSAUpdater Updater(MSSA);
2748 MSSAU = MSSA ? &Updater : nullptr;
2749
2750 bool Changed = false;
2751 bool ShouldContinue = true;
2752
2753 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
2754 // Merge unconditional branches, allowing PRE to catch more
2755 // optimization opportunities.
2757 bool removedBlock = MergeBlockIntoPredecessor(&BB, &DTU, &LI, MSSAU, MD);
2758 if (removedBlock)
2759 ++NumGVNBlocks;
2760
2761 Changed |= removedBlock;
2762 }
2763 DTU.flush();
2764
2765 unsigned Iteration = 0;
2766 while (ShouldContinue) {
2767 LLVM_DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n");
2768 (void) Iteration;
2769 ShouldContinue = iterateOnFunction(F);
2770 Changed |= ShouldContinue;
2771 ++Iteration;
2772 }
2773
2774 if (isPREEnabled()) {
2775 // Fabricate val-num for dead-code in order to suppress assertion in
2776 // performPRE().
2777 assignValNumForDeadCode();
2778 bool PREChanged = true;
2779 while (PREChanged) {
2780 PREChanged = performPRE(F);
2781 Changed |= PREChanged;
2782 }
2783 }
2784
2785 // FIXME: Should perform GVN again after PRE does something. PRE can move
2786 // computations into blocks where they become fully redundant. Note that
2787 // we can't do this until PRE's critical edge splitting updates memdep.
2788 // Actually, when this happens, we should just fully integrate PRE into GVN.
2789
2790 cleanupGlobalSets();
2791 // Do not cleanup DeadBlocks in cleanupGlobalSets() as it's called for each
2792 // iteration.
2793 DeadBlocks.clear();
2794
2795 if (MSSA && VerifyMemorySSA)
2796 MSSA->verifyMemorySSA();
2797
2798 return Changed;
2799}
2800
2801bool GVNPass::processBlock(BasicBlock *BB) {
2802 // FIXME: Kill off InstrsToErase by doing erasing eagerly in a helper function
2803 // (and incrementing BI before processing an instruction).
2804 assert(InstrsToErase.empty() &&
2805 "We expect InstrsToErase to be empty across iterations");
2806 if (DeadBlocks.count(BB))
2807 return false;
2808
2809 // Clearing map before every BB because it can be used only for single BB.
2810 ReplaceOperandsWithMap.clear();
2811 bool ChangedFunction = false;
2812
2813 // Since we may not have visited the input blocks of the phis, we can't
2814 // use our normal hash approach for phis. Instead, simply look for
2815 // obvious duplicates. The first pass of GVN will tend to create
2816 // identical phis, and the second or later passes can eliminate them.
2817 SmallPtrSet<PHINode *, 8> PHINodesToRemove;
2818 ChangedFunction |= EliminateDuplicatePHINodes(BB, PHINodesToRemove);
2819 for (PHINode *PN : PHINodesToRemove) {
2820 VN.erase(PN);
2821 removeInstruction(PN);
2822 }
2823
2824 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
2825 BI != BE;) {
2826 if (!ReplaceOperandsWithMap.empty())
2827 ChangedFunction |= replaceOperandsForInBlockEquality(&*BI);
2828 ChangedFunction |= processInstruction(&*BI);
2829
2830 if (InstrsToErase.empty()) {
2831 ++BI;
2832 continue;
2833 }
2834
2835 // If we need some instructions deleted, do it now.
2836 NumGVNInstr += InstrsToErase.size();
2837
2838 // Avoid iterator invalidation.
2839 bool AtStart = BI == BB->begin();
2840 if (!AtStart)
2841 --BI;
2842
2843 for (auto *I : InstrsToErase) {
2844 assert(I->getParent() == BB && "Removing instruction from wrong block?");
2845 LLVM_DEBUG(dbgs() << "GVN removed: " << *I << '\n');
2846 salvageKnowledge(I, AC);
2848 removeInstruction(I);
2849 }
2850 InstrsToErase.clear();
2851
2852 if (AtStart)
2853 BI = BB->begin();
2854 else
2855 ++BI;
2856 }
2857
2858 return ChangedFunction;
2859}
2860
2861// Instantiate an expression in a predecessor that lacked it.
2862bool GVNPass::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
2863 BasicBlock *Curr, unsigned int ValNo) {
2864 // Because we are going top-down through the block, all value numbers
2865 // will be available in the predecessor by the time we need them. Any
2866 // that weren't originally present will have been instantiated earlier
2867 // in this loop.
2868 bool success = true;
2869 for (unsigned i = 0, e = Instr->getNumOperands(); i != e; ++i) {
2870 Value *Op = Instr->getOperand(i);
2871 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
2872 continue;
2873 // This could be a newly inserted instruction, in which case, we won't
2874 // find a value number, and should give up before we hurt ourselves.
2875 // FIXME: Rewrite the infrastructure to let it easier to value number
2876 // and process newly inserted instructions.
2877 if (!VN.exists(Op)) {
2878 success = false;
2879 break;
2880 }
2881 uint32_t TValNo =
2882 VN.phiTranslate(Pred, Curr, VN.lookup(Op), *this);
2883 if (Value *V = findLeader(Pred, TValNo)) {
2884 Instr->setOperand(i, V);
2885 } else {
2886 success = false;
2887 break;
2888 }
2889 }
2890
2891 // Fail out if we encounter an operand that is not available in
2892 // the PRE predecessor. This is typically because of loads which
2893 // are not value numbered precisely.
2894 if (!success)
2895 return false;
2896
2897 Instr->insertBefore(Pred->getTerminator()->getIterator());
2898 Instr->setName(Instr->getName() + ".pre");
2899 Instr->setDebugLoc(Instr->getDebugLoc());
2900
2901 ICF->insertInstructionTo(Instr, Pred);
2902
2903 unsigned Num = VN.lookupOrAdd(Instr);
2904 VN.add(Instr, Num);
2905
2906 // Update the availability map to include the new instruction.
2907 LeaderTable.insert(Num, Instr, Pred);
2908 return true;
2909}
2910
2911bool GVNPass::performScalarPRE(Instruction *CurInst) {
2912 if (isa<AllocaInst>(CurInst) || CurInst->isTerminator() ||
2913 isa<PHINode>(CurInst) || CurInst->getType()->isVoidTy() ||
2914 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
2915 isa<DbgInfoIntrinsic>(CurInst))
2916 return false;
2917
2918 // Don't do PRE on compares. The PHI would prevent CodeGenPrepare from
2919 // sinking the compare again, and it would force the code generator to
2920 // move the i1 from processor flags or predicate registers into a general
2921 // purpose register.
2922 if (isa<CmpInst>(CurInst))
2923 return false;
2924
2925 // Don't do PRE on GEPs. The inserted PHI would prevent CodeGenPrepare from
2926 // sinking the addressing mode computation back to its uses. Extending the
2927 // GEP's live range increases the register pressure, and therefore it can
2928 // introduce unnecessary spills.
2929 //
2930 // This doesn't prevent Load PRE. PHI translation will make the GEP available
2931 // to the load by moving it to the predecessor block if necessary.
2932 if (isa<GetElementPtrInst>(CurInst))
2933 return false;
2934
2935 if (auto *CallB = dyn_cast<CallBase>(CurInst)) {
2936 // We don't currently value number ANY inline asm calls.
2937 if (CallB->isInlineAsm())
2938 return false;
2939 }
2940
2941 uint32_t ValNo = VN.lookup(CurInst);
2942
2943 // Look for the predecessors for PRE opportunities. We're
2944 // only trying to solve the basic diamond case, where
2945 // a value is computed in the successor and one predecessor,
2946 // but not the other. We also explicitly disallow cases
2947 // where the successor is its own predecessor, because they're
2948 // more complicated to get right.
2949 unsigned NumWith = 0;
2950 unsigned NumWithout = 0;
2951 BasicBlock *PREPred = nullptr;
2952 BasicBlock *CurrentBlock = CurInst->getParent();
2953
2954 // Update the RPO numbers for this function.
2955 if (InvalidBlockRPONumbers)
2956 assignBlockRPONumber(*CurrentBlock->getParent());
2957
2959 for (BasicBlock *P : predecessors(CurrentBlock)) {
2960 // We're not interested in PRE where blocks with predecessors that are
2961 // not reachable.
2962 if (!DT->isReachableFromEntry(P)) {
2963 NumWithout = 2;
2964 break;
2965 }
2966 // It is not safe to do PRE when P->CurrentBlock is a loop backedge.
2967 assert(BlockRPONumber.count(P) && BlockRPONumber.count(CurrentBlock) &&
2968 "Invalid BlockRPONumber map.");
2969 if (BlockRPONumber[P] >= BlockRPONumber[CurrentBlock]) {
2970 NumWithout = 2;
2971 break;
2972 }
2973
2974 uint32_t TValNo = VN.phiTranslate(P, CurrentBlock, ValNo, *this);
2975 Value *predV = findLeader(P, TValNo);
2976 if (!predV) {
2977 predMap.push_back(std::make_pair(static_cast<Value *>(nullptr), P));
2978 PREPred = P;
2979 ++NumWithout;
2980 } else if (predV == CurInst) {
2981 /* CurInst dominates this predecessor. */
2982 NumWithout = 2;
2983 break;
2984 } else {
2985 predMap.push_back(std::make_pair(predV, P));
2986 ++NumWith;
2987 }
2988 }
2989
2990 // Don't do PRE when it might increase code size, i.e. when
2991 // we would need to insert instructions in more than one pred.
2992 if (NumWithout > 1 || NumWith == 0)
2993 return false;
2994
2995 // We may have a case where all predecessors have the instruction,
2996 // and we just need to insert a phi node. Otherwise, perform
2997 // insertion.
2998 Instruction *PREInstr = nullptr;
2999
3000 if (NumWithout != 0) {
3001 if (!isSafeToSpeculativelyExecute(CurInst)) {
3002 // It is only valid to insert a new instruction if the current instruction
3003 // is always executed. An instruction with implicit control flow could
3004 // prevent us from doing it. If we cannot speculate the execution, then
3005 // PRE should be prohibited.
3006 if (ICF->isDominatedByICFIFromSameBlock(CurInst))
3007 return false;
3008 }
3009
3010 // Don't do PRE across indirect branch.
3011 if (isa<IndirectBrInst>(PREPred->getTerminator()))
3012 return false;
3013
3014 // We can't do PRE safely on a critical edge, so instead we schedule
3015 // the edge to be split and perform the PRE the next time we iterate
3016 // on the function.
3017 unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock);
3018 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) {
3019 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
3020 return false;
3021 }
3022 // We need to insert somewhere, so let's give it a shot
3023 PREInstr = CurInst->clone();
3024 if (!performScalarPREInsertion(PREInstr, PREPred, CurrentBlock, ValNo)) {
3025 // If we failed insertion, make sure we remove the instruction.
3026#ifndef NDEBUG
3027 verifyRemoved(PREInstr);
3028#endif
3029 PREInstr->deleteValue();
3030 return false;
3031 }
3032 }
3033
3034 // Either we should have filled in the PRE instruction, or we should
3035 // not have needed insertions.
3036 assert(PREInstr != nullptr || NumWithout == 0);
3037
3038 ++NumGVNPRE;
3039
3040 // Create a PHI to make the value available in this block.
3041 PHINode *Phi = PHINode::Create(CurInst->getType(), predMap.size(),
3042 CurInst->getName() + ".pre-phi");
3043 Phi->insertBefore(CurrentBlock->begin());
3044 for (unsigned i = 0, e = predMap.size(); i != e; ++i) {
3045 if (Value *V = predMap[i].first) {
3046 // If we use an existing value in this phi, we have to patch the original
3047 // value because the phi will be used to replace a later value.
3048 patchReplacementInstruction(CurInst, V);
3049 Phi->addIncoming(V, predMap[i].second);
3050 } else
3051 Phi->addIncoming(PREInstr, PREPred);
3052 }
3053
3054 VN.add(Phi, ValNo);
3055 // After creating a new PHI for ValNo, the phi translate result for ValNo will
3056 // be changed, so erase the related stale entries in phi translate cache.
3057 VN.eraseTranslateCacheEntry(ValNo, *CurrentBlock);
3058 LeaderTable.insert(ValNo, Phi, CurrentBlock);
3059 Phi->setDebugLoc(CurInst->getDebugLoc());
3060 CurInst->replaceAllUsesWith(Phi);
3061 if (MD && Phi->getType()->isPtrOrPtrVectorTy())
3063 VN.erase(CurInst);
3064 LeaderTable.erase(ValNo, CurInst, CurrentBlock);
3065
3066 LLVM_DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n');
3067 removeInstruction(CurInst);
3068 ++NumGVNInstr;
3069
3070 return true;
3071}
3072
3073/// Perform a purely local form of PRE that looks for diamond
3074/// control flow patterns and attempts to perform simple PRE at the join point.
3075bool GVNPass::performPRE(Function &F) {
3076 bool Changed = false;
3077 for (BasicBlock *CurrentBlock : depth_first(&F.getEntryBlock())) {
3078 // Nothing to PRE in the entry block.
3079 if (CurrentBlock == &F.getEntryBlock())
3080 continue;
3081
3082 // Don't perform PRE on an EH pad.
3083 if (CurrentBlock->isEHPad())
3084 continue;
3085
3086 for (BasicBlock::iterator BI = CurrentBlock->begin(),
3087 BE = CurrentBlock->end();
3088 BI != BE;) {
3089 Instruction *CurInst = &*BI++;
3090 Changed |= performScalarPRE(CurInst);
3091 }
3092 }
3093
3094 if (splitCriticalEdges())
3095 Changed = true;
3096
3097 return Changed;
3098}
3099
3100/// Split the critical edge connecting the given two blocks, and return
3101/// the block inserted to the critical edge.
3102BasicBlock *GVNPass::splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ) {
3103 // GVN does not require loop-simplify, do not try to preserve it if it is not
3104 // possible.
3106 Pred, Succ,
3107 CriticalEdgeSplittingOptions(DT, LI, MSSAU).unsetPreserveLoopSimplify());
3108 if (BB) {
3109 if (MD)
3111 InvalidBlockRPONumbers = true;
3112 }
3113 return BB;
3114}
3115
3116/// Split critical edges found during the previous
3117/// iteration that may enable further optimization.
3118bool GVNPass::splitCriticalEdges() {
3119 if (toSplit.empty())
3120 return false;
3121
3122 bool Changed = false;
3123 do {
3124 std::pair<Instruction *, unsigned> Edge = toSplit.pop_back_val();
3125 Changed |= SplitCriticalEdge(Edge.first, Edge.second,
3126 CriticalEdgeSplittingOptions(DT, LI, MSSAU)) !=
3127 nullptr;
3128 } while (!toSplit.empty());
3129 if (Changed) {
3130 if (MD)
3132 InvalidBlockRPONumbers = true;
3133 }
3134 return Changed;
3135}
3136
3137/// Executes one iteration of GVN
3138bool GVNPass::iterateOnFunction(Function &F) {
3139 cleanupGlobalSets();
3140
3141 // Top-down walk of the dominator tree
3142 bool Changed = false;
3143 // Needed for value numbering with phi construction to work.
3144 // RPOT walks the graph in its constructor and will not be invalidated during
3145 // processBlock.
3147
3148 for (BasicBlock *BB : RPOT)
3149 Changed |= processBlock(BB);
3150
3151 return Changed;
3152}
3153
3154void GVNPass::cleanupGlobalSets() {
3155 VN.clear();
3156 LeaderTable.clear();
3157 BlockRPONumber.clear();
3158 ICF->clear();
3159 InvalidBlockRPONumbers = true;
3160}
3161
3162void GVNPass::removeInstruction(Instruction *I) {
3163 if (MD) MD->removeInstruction(I);
3164 if (MSSAU)
3165 MSSAU->removeMemoryAccess(I);
3166#ifndef NDEBUG
3167 verifyRemoved(I);
3168#endif
3169 ICF->removeInstruction(I);
3170 I->eraseFromParent();
3171}
3172
3173/// Verify that the specified instruction does not occur in our
3174/// internal data structures.
3175void GVNPass::verifyRemoved(const Instruction *Inst) const {
3176 VN.verifyRemoved(Inst);
3177 LeaderTable.verifyRemoved(Inst);
3178}
3179
3180/// BB is declared dead, which implied other blocks become dead as well. This
3181/// function is to add all these blocks to "DeadBlocks". For the dead blocks'
3182/// live successors, update their phi nodes by replacing the operands
3183/// corresponding to dead blocks with UndefVal.
3184void GVNPass::addDeadBlock(BasicBlock *BB) {
3187
3188 NewDead.push_back(BB);
3189 while (!NewDead.empty()) {
3190 BasicBlock *D = NewDead.pop_back_val();
3191 if (DeadBlocks.count(D))
3192 continue;
3193
3194 // All blocks dominated by D are dead.
3196 DT->getDescendants(D, Dom);
3197 DeadBlocks.insert(Dom.begin(), Dom.end());
3198
3199 // Figure out the dominance-frontier(D).
3200 for (BasicBlock *B : Dom) {
3201 for (BasicBlock *S : successors(B)) {
3202 if (DeadBlocks.count(S))
3203 continue;
3204
3205 bool AllPredDead = true;
3206 for (BasicBlock *P : predecessors(S))
3207 if (!DeadBlocks.count(P)) {
3208 AllPredDead = false;
3209 break;
3210 }
3211
3212 if (!AllPredDead) {
3213 // S could be proved dead later on. That is why we don't update phi
3214 // operands at this moment.
3215 DF.insert(S);
3216 } else {
3217 // While S is not dominated by D, it is dead by now. This could take
3218 // place if S already have a dead predecessor before D is declared
3219 // dead.
3220 NewDead.push_back(S);
3221 }
3222 }
3223 }
3224 }
3225
3226 // For the dead blocks' live successors, update their phi nodes by replacing
3227 // the operands corresponding to dead blocks with UndefVal.
3228 for (BasicBlock *B : DF) {
3229 if (DeadBlocks.count(B))
3230 continue;
3231
3232 // First, split the critical edges. This might also create additional blocks
3233 // to preserve LoopSimplify form and adjust edges accordingly.
3235 for (BasicBlock *P : Preds) {
3236 if (!DeadBlocks.count(P))
3237 continue;
3238
3240 isCriticalEdge(P->getTerminator(), B)) {
3241 if (BasicBlock *S = splitCriticalEdges(P, B))
3242 DeadBlocks.insert(P = S);
3243 }
3244 }
3245
3246 // Now poison the incoming values from the dead predecessors.
3247 for (BasicBlock *P : predecessors(B)) {
3248 if (!DeadBlocks.count(P))
3249 continue;
3250 for (PHINode &Phi : B->phis()) {
3251 Phi.setIncomingValueForBlock(P, PoisonValue::get(Phi.getType()));
3252 if (MD)
3254 }
3255 }
3256 }
3257}
3258
3259// If the given branch is recognized as a foldable branch (i.e. conditional
3260// branch with constant condition), it will perform following analyses and
3261// transformation.
3262// 1) If the dead out-coming edge is a critical-edge, split it. Let
3263// R be the target of the dead out-coming edge.
3264// 1) Identify the set of dead blocks implied by the branch's dead outcoming
3265// edge. The result of this step will be {X| X is dominated by R}
3266// 2) Identify those blocks which haves at least one dead predecessor. The
3267// result of this step will be dominance-frontier(R).
3268// 3) Update the PHIs in DF(R) by replacing the operands corresponding to
3269// dead blocks with "UndefVal" in an hope these PHIs will optimized away.
3270//
3271// Return true iff *NEW* dead code are found.
3272bool GVNPass::processFoldableCondBr(BranchInst *BI) {
3273 if (!BI || BI->isUnconditional())
3274 return false;
3275
3276 // If a branch has two identical successors, we cannot declare either dead.
3277 if (BI->getSuccessor(0) == BI->getSuccessor(1))
3278 return false;
3279
3280 ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition());
3281 if (!Cond)
3282 return false;
3283
3284 BasicBlock *DeadRoot =
3285 Cond->getZExtValue() ? BI->getSuccessor(1) : BI->getSuccessor(0);
3286 if (DeadBlocks.count(DeadRoot))
3287 return false;
3288
3289 if (!DeadRoot->getSinglePredecessor())
3290 DeadRoot = splitCriticalEdges(BI->getParent(), DeadRoot);
3291
3292 addDeadBlock(DeadRoot);
3293 return true;
3294}
3295
3296// performPRE() will trigger assert if it comes across an instruction without
3297// associated val-num. As it normally has far more live instructions than dead
3298// instructions, it makes more sense just to "fabricate" a val-number for the
3299// dead code than checking if instruction involved is dead or not.
3300void GVNPass::assignValNumForDeadCode() {
3301 for (BasicBlock *BB : DeadBlocks) {
3302 for (Instruction &Inst : *BB) {
3303 unsigned ValNum = VN.lookupOrAdd(&Inst);
3304 LeaderTable.insert(ValNum, &Inst, BB);
3305 }
3306 }
3307}
3308
3310public:
3311 static char ID; // Pass identification, replacement for typeid
3312
3313 explicit GVNLegacyPass(bool MemDepAnalysis = GVNEnableMemDep,
3314 bool MemSSAAnalysis = GVNEnableMemorySSA)
3315 : FunctionPass(ID), Impl(GVNOptions()
3316 .setMemDep(MemDepAnalysis)
3317 .setMemorySSA(MemSSAAnalysis)) {
3319 }
3320
3321 bool runOnFunction(Function &F) override {
3322 if (skipFunction(F))
3323 return false;
3324
3325 auto *MSSAWP = getAnalysisIfAvailable<MemorySSAWrapperPass>();
3326 if (Impl.isMemorySSAEnabled() && !MSSAWP)
3327 MSSAWP = &getAnalysis<MemorySSAWrapperPass>();
3328
3329 return Impl.runImpl(
3330 F, getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F),
3331 getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
3332 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
3333 getAnalysis<AAResultsWrapperPass>().getAAResults(),
3334 Impl.isMemDepEnabled()
3335 ? &getAnalysis<MemoryDependenceWrapperPass>().getMemDep()
3336 : nullptr,
3337 getAnalysis<LoopInfoWrapperPass>().getLoopInfo(),
3338 &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(),
3339 MSSAWP ? &MSSAWP->getMSSA() : nullptr);
3340 }
3341
3342 void getAnalysisUsage(AnalysisUsage &AU) const override {
3347 if (Impl.isMemDepEnabled())
3356 if (Impl.isMemorySSAEnabled())
3358 }
3359
3360private:
3361 GVNPass Impl;
3362};
3363
3364char GVNLegacyPass::ID = 0;
3365
3366INITIALIZE_PASS_BEGIN(GVNLegacyPass, "gvn", "Global Value Numbering", false, false)
3376
3377// The public interface to this file...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition: Compiler.h:622
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define LLVM_DEBUG(...)
Definition: Debug.h:106
static RegisterPass< DebugifyFunctionPass > DF("debugify-function", "Attach debug info to a function")
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool hasUsersIn(Value *V, BasicBlock *BB)
Definition: GVN.cpp:2019
static void reportMayClobberedLoad(LoadInst *Load, MemDepResult DepInfo, DominatorTree *DT, OptimizationRemarkEmitter *ORE)
Try to locate the three instruction involved in a missed load-elimination case that is due to an inte...
Definition: GVN.cpp:1175
static void reportLoadElim(LoadInst *Load, Value *AvailableValue, OptimizationRemarkEmitter *ORE)
Definition: GVN.cpp:1915
static cl::opt< uint32_t > MaxNumInsnsPerBlock("gvn-max-num-insns", cl::Hidden, cl::init(100), cl::desc("Max number of instructions to scan in each basic block in GVN " "(default = 100)"))
static cl::opt< bool > GVNEnableMemDep("enable-gvn-memdep", cl::init(true))
static cl::opt< bool > GVNEnableLoadInLoopPRE("enable-load-in-loop-pre", cl::init(true))
static cl::opt< uint32_t > MaxNumDeps("gvn-max-num-deps", cl::Hidden, cl::init(100), cl::desc("Max number of dependences to attempt Load PRE (default = 100)"))
static cl::opt< bool > GVNEnableMemorySSA("enable-gvn-memoryssa", cl::init(false))
static Value * ConstructSSAForLoadSet(LoadInst *Load, SmallVectorImpl< AvailableValueInBlock > &ValuesPerBlock, GVNPass &gvn)
Given a set of loads specified by ValuesPerBlock, construct SSA form, allowing us to eliminate Load.
Definition: GVN.cpp:1047
static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E, DominatorTree *DT)
There is an edge from 'Src' to 'Dst'.
Definition: GVN.cpp:2384
static bool IsValueFullyAvailableInBlock(BasicBlock *BB, DenseMap< BasicBlock *, AvailabilityState > &FullyAvailableBlocks)
Return true if we can prove that the value we're analyzing is fully available in the specified block.
Definition: GVN.cpp:912
static Value * findDominatingValue(const MemoryLocation &Loc, Type *LoadTy, Instruction *From, AAResults *AA)
Definition: GVN.cpp:1237
static bool isLifetimeStart(const Instruction *Inst)
Definition: GVN.cpp:1156
static cl::opt< bool > GVNEnableSplitBackedgeInLoadPRE("enable-split-backedge-in-load-pre", cl::init(false))
static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl)
Definition: GVN.cpp:2166
static void replaceValuesPerBlockEntry(SmallVectorImpl< AvailableValueInBlock > &ValuesPerBlock, Value *OldValue, Value *NewValue)
If the specified OldValue exists in ValuesPerBlock, replace its value with NewValue.
Definition: GVN.cpp:1028
AvailabilityState
Definition: GVN.cpp:892
@ Unavailable
We know the block is not fully available. This is a fixpoint.
@ Available
We know the block is fully available. This is a fixpoint.
@ SpeculativelyAvailable
We do not know whether the block is fully available or not, but we are currently speculating that it ...
static cl::opt< bool > GVNEnablePRE("enable-pre", cl::init(true), cl::Hidden)
static cl::opt< uint32_t > MaxNumVisitedInsts("gvn-max-num-visited-insts", cl::Hidden, cl::init(100), cl::desc("Max number of visited instructions when trying to find " "dominating value of select dependency (default = 100)"))
static cl::opt< uint32_t > MaxBBSpeculations("gvn-max-block-speculations", cl::Hidden, cl::init(600), cl::desc("Max number of blocks we're willing to speculate on (and recurse " "into) when deducing if a value is fully available or not in GVN " "(default = 600)"))
static bool liesBetween(const Instruction *From, Instruction *Between, const Instruction *To, DominatorTree *DT)
Assuming To can be reached from both From and Between, does Between lie on every path from From to To...
Definition: GVN.cpp:1164
static cl::opt< bool > GVNEnableLoadPRE("enable-load-pre", cl::init(true))
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
#define DEBUG_TYPE
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
static bool lookup(const GsymReader &GR, DataExtractor &Data, uint64_t &Offset, uint64_t BaseAddr, uint64_t Addr, SourceLocations &SrcLocs, llvm::Error &Err)
A Lookup helper functions.
Definition: InlineInfo.cpp:108
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
This file contains the declarations for metadata subclasses.
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops PowerPC CTR Loops Verify
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:57
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:166
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition: blake3_impl.h:78
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Class for arbitrary precision integers.
Definition: APInt.h:78
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:429
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:410
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
iterator end() const
Definition: ArrayRef.h:157
iterator begin() const
Definition: ArrayRef.h:156
This represents the llvm.assume intrinsic.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
bool isEmpty() const
Return true if there are no attributes.
Definition: Attributes.h:1026
std::optional< AttributeList > intersectWith(LLVMContext &C, AttributeList Other) const
Try to intersect this AttributeList with Other.
const BasicBlock * getEnd() const
Definition: Dominators.h:112
const BasicBlock * getStart() const
Definition: Dominators.h:108
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator end()
Definition: BasicBlock.h:474
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:461
InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:381
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Definition: BasicBlock.cpp:481
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:220
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:177
LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition: BasicBlock.cpp:168
bool isEHPad() const
Return true if this basic block is an exception handling block.
Definition: BasicBlock.h:688
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:240
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
Value * getRHS() const
Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
Value * getLHS() const
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1286
unsigned arg_size() const
Definition: InstrTypes.h:1284
This class represents a function call, abstracting a target machine's calling convention.
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:661
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition: InstrTypes.h:980
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:673
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:825
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition: Constants.h:220
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:866
static ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:873
This is an important base class in LLVM.
Definition: Constant.h:42
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:373
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:194
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:156
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition: DenseMap.h:226
iterator end()
Definition: DenseMap.h:84
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
void getDescendants(NodeT *R, SmallVectorImpl< NodeT * > &Result) const
Get all nodes dominated by R, including R itself.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:317
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:321
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
Class representing an expression and its matching format.
This instruction extracts a struct member or array element value from an aggregate value.
unsigned getNumIndices() const
iterator_range< idx_iterator > indices() const
idx_iterator idx_begin() const
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:310
Represents calls to the gc.relocate intrinsic.
This class holds the mapping between values and value numbers.
Definition: GVN.h:159
uint32_t lookupOrAddCmp(unsigned Opcode, CmpInst::Predicate Pred, Value *LHS, Value *RHS)
Returns the value number of the given comparison, assigning it a new number if it did not have one be...
Definition: GVN.cpp:702
uint32_t getNextUnusedValueNumber()
Definition: GVN.h:221
uint32_t lookupOrAdd(Value *V)
lookup_or_add - Returns the value number for the specified value, assigning it a new number if it did...
Definition: GVN.cpp:610
uint32_t lookup(Value *V, bool Verify=true) const
Returns the value number of the specified value.
Definition: GVN.cpp:689
void setAliasAnalysis(AAResults *A)
Definition: GVN.h:217
void clear()
Remove all entries from the ValueTable.
Definition: GVN.cpp:710
bool exists(Value *V) const
Returns true if a value number exists for the specified value.
Definition: GVN.cpp:604
uint32_t phiTranslate(const BasicBlock *BB, const BasicBlock *PhiBlock, uint32_t Num, GVNPass &Gvn)
Wrap phiTranslateImpl to provide caching functionality.
Definition: GVN.cpp:2250
void setMemDep(MemoryDependenceResults *M)
Definition: GVN.h:219
void erase(Value *v)
Remove a value from the value numbering.
Definition: GVN.cpp:722
void add(Value *V, uint32_t num)
add - Insert a value into the table with a specified value number.
Definition: GVN.cpp:473
void setDomTree(DominatorTree *D)
Definition: GVN.h:220
void eraseTranslateCacheEntry(uint32_t Num, const BasicBlock &CurrBlock)
Erase stale entry from phiTranslate cache so phiTranslate can be computed again.
Definition: GVN.cpp:2353
void verifyRemoved(const Value *) const
verifyRemoved - Verify that the value is removed from all internal data structures.
Definition: GVN.cpp:732
The core GVN pass object.
Definition: GVN.h:124
friend class gvn::GVNLegacyPass
Definition: GVN.h:226
bool isPREEnabled() const
Definition: GVN.cpp:804
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Run the pass over the function.
Definition: GVN.cpp:829
AAResults * getAliasAnalysis() const
Definition: GVN.h:146
bool isLoadPREEnabled() const
Definition: GVN.cpp:808
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Definition: GVN.cpp:861
bool isMemorySSAEnabled() const
Definition: GVN.cpp:825
DominatorTree & getDominatorTree() const
Definition: GVN.h:145
bool isLoadInLoopPREEnabled() const
Definition: GVN.cpp:812
bool isLoadPRESplitBackedgeEnabled() const
Definition: GVN.cpp:816
void markInstructionForDeletion(Instruction *I)
This removes the specified instruction from our various maps and marks it for deletion.
Definition: GVN.h:140
bool isMemDepEnabled() const
Definition: GVN.cpp:821
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:933
Legacy wrapper pass to provide the GlobalsAAResult object.
This class allows to keep track on instructions with implicit control flow.
bool isDominatedByICFIFromSameBlock(const Instruction *Insn)
Returns true if the first ICFI of Insn's block exists and dominates Insn.
bool hasICF(const BasicBlock *BB)
Returns true if at least one instruction from the given basic block has implicit control flow.
void clear()
Invalidates all information from this tracking.
void removeUsersOf(const Instruction *Inst)
Notifies this tracking that we are going to replace all uses of Inst.
void insertInstructionTo(const Instruction *Inst, const BasicBlock *BB)
Notifies this tracking that we are going to insert a new instruction Inst to the basic block BB.
void removeInstruction(const Instruction *Inst)
Notifies this tracking that we are going to remove the instruction Inst It makes all necessary update...
Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:511
const Instruction * getPrevNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the previous non-debug instruction in the same basic block as 'this',...
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
Definition: Instruction.h:404
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
Definition: Instruction.h:869
bool mayHaveSideEffects() const LLVM_READONLY
Return true if the instruction may have side effects.
bool isTerminator() const
Definition: Instruction.h:313
bool mayReadFromMemory() const LLVM_READONLY
Return true if this instruction may read memory.
void dropUnknownNonDebugMetadata(ArrayRef< unsigned > KnownIDs={})
Drop all unknown metadata except for debug locations.
Definition: Metadata.cpp:1633
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:310
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:176
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:566
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
The legacy pass manager's analysis pass to compute loop information.
Definition: LoopInfo.h:593
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:39
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
iterator end()
Definition: MapVector.h:71
iterator find(const KeyT &Key)
Definition: MapVector.h:167
size_type size() const
Definition: MapVector.h:60
A memory dependence query can return one of three different answers.
bool isClobber() const
Tests if this MemDepResult represents a query that is an instruction clobber dependency.
bool isNonLocal() const
Tests if this MemDepResult represents a query that is transparent to the start of the block,...
bool isDef() const
Tests if this MemDepResult represents a query that is an instruction definition dependency.
bool isLocal() const
Tests if this MemDepResult represents a valid local query (Clobber/Def).
Instruction * getInst() const
If this is a normal dependency, returns the instruction that is depended on.
This is the common base class for memset/memcpy/memmove.
An analysis that produces MemoryDependenceResults for a function.
Provides a lazy, caching interface for making common memory aliasing information queries,...
std::vector< NonLocalDepEntry > NonLocalDepInfo
void invalidateCachedPredecessors()
Clears the PredIteratorCache info.
void invalidateCachedPointerInfo(Value *Ptr)
Invalidates cached information about the specified pointer, because it may be too conservative in mem...
std::optional< int32_t > getClobberOffset(LoadInst *DepInst) const
Return the clobber offset to dependent instruction.
void removeInstruction(Instruction *InstToRemove)
Removes an instruction from the dependence analysis, updating the dependence of instructions that pre...
MemDepResult getDependency(Instruction *QueryInst)
Returns the instruction on which a memory operation depends.
const NonLocalDepInfo & getNonLocalCallDependency(CallBase *QueryCall)
Perform a full dependency query for the specified call, returning the set of blocks that the value is...
void getNonLocalPointerDependency(Instruction *QueryInst, SmallVectorImpl< NonLocalDepResult > &Result)
Perform a full dependency query for an access to the QueryInst's specified memory location,...
A wrapper analysis pass for the legacy pass manager that exposes a MemoryDepnedenceResults instance.
Representation for a specific memory location.
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
const Value * Ptr
The address of the start of the location.
An analysis that produces MemorySSA for a function.
Definition: MemorySSA.h:928
MemorySSA * getMemorySSA() const
Get handle on MemorySSA.
MemoryUseOrDef * createMemoryAccessBefore(Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt)
Create a MemoryAccess in MemorySSA before an existing MemoryAccess.
void insertDef(MemoryDef *Def, bool RenameUses=false)
Insert a definition into the MemorySSA IR.
void insertUse(MemoryUse *Use, bool RenameUses=false)
MemoryAccess * createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition, const BasicBlock *BB, MemorySSA::InsertionPlace Point, bool CreationMustSucceed=true)
Create a MemoryAccess in MemorySSA at a specified point in a block.
void removeMemoryAccess(MemoryAccess *, bool OptimizePhis=false)
Remove a MemoryAccess from MemorySSA, including updating all definitions and uses.
Legacy analysis pass which computes MemorySSA.
Definition: MemorySSA.h:985
Encapsulates MemorySSA, including all data associated with memory accesses.
Definition: MemorySSA.h:701
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess's for a given basic block.
Definition: MemorySSA.h:759
void verifyMemorySSA(VerificationLevel=VerificationLevel::Fast) const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
Definition: MemorySSA.cpp:1905
Class that has the common methods + fields of memory uses/defs.
Definition: MemorySSA.h:249
This is an entry in the NonLocalDepInfo cache.
OptimizationRemarkEmitter legacy analysis pass.
The optimization diagnostic interface.
bool allowExtraAnalysis(StringRef PassName) const
Whether we allow for extra compile-time budget to perform more analysis to produce fewer false positi...
void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PHITransAddr - An address value which tracks and handles phi translation.
Definition: PHITransAddr.h:35
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1878
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
void preserve()
Mark an analysis as preserved.
Definition: Analysis.h:131
Helper class for SSA formation on a set of values defined in multiple blocks.
Definition: SSAUpdater.h:40
void Initialize(Type *Ty, StringRef Name)
Reset this object to get ready for a new set of SSA updates with type 'Ty'.
Definition: SSAUpdater.cpp:52
Value * GetValueInMiddleOfBlock(BasicBlock *BB)
Construct SSA form, materializing a value that is live in the middle of the specified block.
Definition: SSAUpdater.cpp:97
bool HasValueForBlock(BasicBlock *BB) const
Return true if the SSAUpdater already has a value for the specified block.
Definition: SSAUpdater.cpp:61
void AddAvailableValue(BasicBlock *BB, Value *V)
Indicate that a rewritten value is available in the specified block with the specified value.
Definition: SSAUpdater.cpp:69
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
const Value * getCondition() const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:519
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:370
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:132
bool empty() const
Definition: SmallSet.h:168
bool erase(const T &V)
Definition: SmallSet.h:193
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:181
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:937
iterator erase(const_iterator CI)
Definition: SmallVector.h:737
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:683
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:805
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
SmallVector & operator=(const SmallVector &RHS)
Definition: SmallVector.h:1241
An instruction for storing to memory.
Definition: Instructions.h:292
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
Multiway switch.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
static IntegerType * getInt8Ty(LLVMContext &C)
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:267
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:237
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:139
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1859
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
op_range operands()
Definition: User.h:288
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
bool canBeFreed() const
Return true if the memory object referred to by V can by freed in the scope for which the SSA value d...
Definition: Value.cpp:790
void deleteValue()
Delete a pointer to a generic Value.
Definition: Value.cpp:110
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
Represents an op.with.overflow intrinsic.
An efficient, type-erasing, non-owning reference to a callable.
GVNLegacyPass(bool MemDepAnalysis=GVNEnableMemDep, bool MemSSAAnalysis=GVNEnableMemorySSA)
Definition: GVN.cpp:3313
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: GVN.cpp:3342
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
Definition: GVN.cpp:3321
An opaque object representing a hash code.
Definition: Hashing.h:75
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
int analyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr, StoreInst *DepSI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the store at D...
Definition: VNCoercion.cpp:234
Value * getMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL)
If analyzeLoadFromClobberingMemInst returned an offset, this function can be used to actually perform...
Definition: VNCoercion.cpp:408
int analyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, LoadInst *DepLI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the load at De...
Definition: VNCoercion.cpp:255
Value * getValueForLoad(Value *SrcVal, unsigned Offset, Type *LoadTy, Instruction *InsertPt, Function *F)
If analyzeLoadFromClobberingStore/Load returned an offset, this function can be used to actually perf...
Definition: VNCoercion.cpp:375
int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, MemIntrinsic *DepMI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the memory int...
Definition: VNCoercion.cpp:269
bool canCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy, Function *F)
Return true if CoerceAvailableValueToLoadType would succeed if it was called.
Definition: VNCoercion.cpp:17
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
A private "module" namespace for types and utilities used by GVN.
Definition: GVN.h:59
constexpr double e
Definition: MathExtras.h:47
NodeAddr< InstrNode * > Instr
Definition: RDFGraph.h:389
NodeAddr< PhiNode * > Phi
Definition: RDFGraph.h:390
@ FalseVal
Definition: TGLexer.h:59
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1739
hash_code hash_value(const FixedPointSemantics &Val)
Definition: APFixedPoint.h:136
Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
unsigned replaceDominatedUsesWithIf(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge, function_ref< bool(const Use &U, const Value *To)> ShouldReplace)
Replace each use of 'From' with 'To' if that use is dominated by the given edge and the callback Shou...
Definition: Local.cpp:3598
unsigned GetSuccessorNumber(const BasicBlock *BB, const BasicBlock *Succ)
Search for the specified successor of basic block BB and return its position in the terminator instru...
Definition: CFG.cpp:79
auto pred_end(const MachineBasicBlock *BB)
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition: Utils.cpp:1683
auto successors(const MachineBasicBlock *BB)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2115
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:657
bool isAssumeWithEmptyBundle(const AssumeInst &Assume)
Return true iff the operand bundles of the provided llvm.assume doesn't contain any valuable informat...
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
void initializeGVNLegacyPassPass(PassRegistry &)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1746
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition: Local.cpp:406
bool canReplacePointersInUseIfEqual(const Use &U, const Value *To, const DataLayout &DL)
Definition: Loads.cpp:795
bool canReplacePointersIfEqual(const Value *From, const Value *To, const DataLayout &DL)
Returns true if a pointer value From can be replaced with another pointer value \To if they are deeme...
Definition: Loads.cpp:807
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:48
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void patchReplacementInstruction(Instruction *I, Value *Repl)
Patch the replacement so that it is not more restrictive than the value being replaced.
Definition: Local.cpp:3500
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge)
Replace each use of 'From' with 'To' if that use is dominated by the given edge.
Definition: Local.cpp:3580
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
RNSuccIterator< NodeRef, BlockT, RegionT > succ_begin(NodeRef Node)
@ Global
Append to llvm.global_dtors.
void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
Definition: Local.cpp:3439
bool VerifyMemorySSA
Enables verification of MemorySSA.
Definition: MemorySSA.cpp:84
RNSuccIterator< NodeRef, BlockT, RegionT > succ_end(NodeRef Node)
bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
bool MergeBlockIntoPredecessor(BasicBlock *BB, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, MemoryDependenceResults *MemDep=nullptr, bool PredecessorWithTwoSuccessors=false, DominatorTree *DT=nullptr)
Attempts to merge a block into its predecessor, if possible.
FunctionPass * createGVNPass()
Create a legacy GVN pass.
Definition: GVN.cpp:3378
BasicBlock * SplitCriticalEdge(Instruction *TI, unsigned SuccNum, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions(), const Twine &BBName="")
If this edge is a critical edge, insert a new node to split the critical edge.
bool isCriticalEdge(const Instruction *TI, unsigned SuccNum, bool AllowIdenticalEdges=false)
Return true if the specified edge is a critical edge.
Definition: CFG.cpp:95
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:217
auto pred_begin(const MachineBasicBlock *BB)
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1903
bool pred_empty(const BasicBlock *BB)
Definition: CFG.h:118
iterator_range< df_iterator< T > > depth_first(const T &G)
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition: Hashing.h:590
bool EliminateDuplicatePHINodes(BasicBlock *BB)
Check for and eliminate duplicate PHI nodes in this block.
Definition: Local.cpp:1524
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition: Hashing.h:468
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
Definition: CFG.cpp:281
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:764
Option class for critical edge splitting.
static GVNPass::Expression getTombstoneKey()
Definition: GVN.cpp:178
static bool isEqual(const GVNPass::Expression &LHS, const GVNPass::Expression &RHS)
Definition: GVN.cpp:186
static unsigned getHashValue(const GVNPass::Expression &e)
Definition: GVN.cpp:180
static GVNPass::Expression getEmptyKey()
Definition: GVN.cpp:177
An information struct used to provide DenseMap with the various necessary components for a given valu...
Definition: DenseMapInfo.h:52
A set of parameters to control various transforms performed by GVN pass.
Definition: GVN.h:74
std::optional< bool > AllowLoadPRESplitBackedge
Definition: GVN.h:78
std::optional< bool > AllowPRE
Definition: GVN.h:75
std::optional< bool > AllowLoadInLoopPRE
Definition: GVN.h:77
std::optional< bool > AllowMemDep
Definition: GVN.h:79
std::optional< bool > AllowLoadPRE
Definition: GVN.h:76
std::optional< bool > AllowMemorySSA
Definition: GVN.h:80
SmallVector< uint32_t, 4 > varargs
Definition: GVN.cpp:146
bool operator==(const Expression &other) const
Definition: GVN.cpp:152
friend hash_code hash_value(const Expression &Value)
Definition: GVN.cpp:167
Expression(uint32_t o=~2U)
Definition: GVN.cpp:150
AttributeList attrs
Definition: GVN.cpp:148
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:69
A MapVector that performs no allocations if smaller than a certain size.
Definition: MapVector.h:254
Represents an AvailableValue which can be rematerialized at the end of the associated BasicBlock.
Definition: GVN.cpp:295
static AvailableValueInBlock get(BasicBlock *BB, Value *V, unsigned Offset=0)
Definition: GVN.cpp:309
static AvailableValueInBlock getUndef(BasicBlock *BB)
Definition: GVN.cpp:314
static AvailableValueInBlock get(BasicBlock *BB, AvailableValue &&AV)
Definition: GVN.cpp:302
Value * MaterializeAdjustedValue(LoadInst *Load, GVNPass &gvn) const
Emit code at the end of this block to adjust the value defined here to the specified type.
Definition: GVN.cpp:325
AvailableValue AV
AV - The actual available value.
Definition: GVN.cpp:300
static AvailableValueInBlock getSelect(BasicBlock *BB, SelectInst *Sel, Value *V1, Value *V2)
Definition: GVN.cpp:318
BasicBlock * BB
BB - The basic block in question.
Definition: GVN.cpp:297
Represents a particular available value that we know how to materialize.
Definition: GVN.cpp:198
unsigned Offset
Offset - The byte offset in Val that is interesting for the load query.
Definition: GVN.cpp:215
bool isSimpleValue() const
Definition: GVN.cpp:261
static AvailableValue getSelect(SelectInst *Sel, Value *V1, Value *V2)
Definition: GVN.cpp:251
bool isCoercedLoadValue() const
Definition: GVN.cpp:262
static AvailableValue get(Value *V, unsigned Offset=0)
Definition: GVN.cpp:219
ValType Kind
Kind of the live-out value.
Definition: GVN.cpp:212
LoadInst * getCoercedLoadValue() const
Definition: GVN.cpp:272
static AvailableValue getLoad(LoadInst *Load, unsigned Offset=0)
Definition: GVN.cpp:235
static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset=0)
Definition: GVN.cpp:227
bool isUndefValue() const
Definition: GVN.cpp:264
bool isSelectValue() const
Definition: GVN.cpp:265
Value * Val
Val - The value that is live out of the block.
Definition: GVN.cpp:210
Value * V1
V1, V2 - The dominating non-clobbered values of SelectVal.
Definition: GVN.cpp:217
Value * MaterializeAdjustedValue(LoadInst *Load, Instruction *InsertPt, GVNPass &gvn) const
Emit code at the specified insertion point to adjust the value defined here to the specified type.
Definition: GVN.cpp:1090
static AvailableValue getUndef()
Definition: GVN.cpp:243
SelectInst * getSelectValue() const
Definition: GVN.cpp:282
Value * getSimpleValue() const
Definition: GVN.cpp:267
bool isMemIntrinValue() const
Definition: GVN.cpp:263
MemIntrinsic * getMemIntrinValue() const
Definition: GVN.cpp:277