LLVM 22.0.0git
LICM.cpp
Go to the documentation of this file.
1//===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass performs loop invariant code motion, attempting to remove as much
10// code from the body of a loop as possible. It does this by either hoisting
11// code into the preheader block, or by sinking code to the exit blocks if it is
12// safe. This pass also promotes must-aliased memory locations in the loop to
13// live in registers, thus hoisting and sinking "invariant" loads and stores.
14//
15// Hoisting operations out of loops is a canonicalization transform. It
16// enables and simplifies subsequent optimizations in the middle-end.
17// Rematerialization of hoisted instructions to reduce register pressure is the
18// responsibility of the back-end, which has more accurate information about
19// register pressure and also handles other optimizations than LICM that
20// increase live-ranges.
21//
22// This pass uses alias analysis for two purposes:
23//
24// 1. Moving loop invariant loads and calls out of loops. If we can determine
25// that a load or call inside of a loop never aliases anything stored to,
26// we can hoist it or sink it like any other instruction.
27// 2. Scalar Promotion of Memory - If there is a store instruction inside of
28// the loop, we try to move the store to happen AFTER the loop instead of
29// inside of the loop. This can only happen if a few conditions are true:
30// A. The pointer stored through is loop invariant
31// B. There are no stores or loads in the loop which _may_ alias the
32// pointer. There are no calls in the loop which mod/ref the pointer.
33// If these conditions are true, we can promote the loads and stores in the
34// loop of the pointer to use a temporary alloca'd variable. We then use
35// the SSAUpdater to construct the appropriate SSA form for the value.
36//
37//===----------------------------------------------------------------------===//
38
42#include "llvm/ADT/Statistic.h"
50#include "llvm/Analysis/Loads.h"
63#include "llvm/IR/CFG.h"
64#include "llvm/IR/Constants.h"
65#include "llvm/IR/DataLayout.h"
68#include "llvm/IR/Dominators.h"
69#include "llvm/IR/IRBuilder.h"
72#include "llvm/IR/LLVMContext.h"
73#include "llvm/IR/Metadata.h"
78#include "llvm/Support/Debug.h"
86#include <algorithm>
87#include <utility>
88using namespace llvm;
89
90namespace llvm {
91class LPMUpdater;
92} // namespace llvm
93
94#define DEBUG_TYPE "licm"
95
96STATISTIC(NumCreatedBlocks, "Number of blocks created");
97STATISTIC(NumClonedBranches, "Number of branches cloned");
98STATISTIC(NumSunk, "Number of instructions sunk out of loop");
99STATISTIC(NumHoisted, "Number of instructions hoisted out of loop");
100STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk");
101STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk");
102STATISTIC(NumPromotionCandidates, "Number of promotion candidates");
103STATISTIC(NumLoadPromoted, "Number of load-only promotions");
104STATISTIC(NumLoadStorePromoted, "Number of load and store promotions");
105STATISTIC(NumMinMaxHoisted,
106 "Number of min/max expressions hoisted out of the loop");
107STATISTIC(NumGEPsHoisted,
108 "Number of geps reassociated and hoisted out of the loop");
109STATISTIC(NumAddSubHoisted, "Number of add/subtract expressions reassociated "
110 "and hoisted out of the loop");
111STATISTIC(NumFPAssociationsHoisted, "Number of invariant FP expressions "
112 "reassociated and hoisted out of the loop");
113STATISTIC(NumIntAssociationsHoisted,
114 "Number of invariant int expressions "
115 "reassociated and hoisted out of the loop");
116STATISTIC(NumBOAssociationsHoisted, "Number of invariant BinaryOp expressions "
117 "reassociated and hoisted out of the loop");
118
119/// Memory promotion is enabled by default.
120static cl::opt<bool>
121 DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false),
122 cl::desc("Disable memory promotion in LICM pass"));
123
125 "licm-control-flow-hoisting", cl::Hidden, cl::init(false),
126 cl::desc("Enable control flow (and PHI) hoisting in LICM"));
127
128static cl::opt<bool>
129 SingleThread("licm-force-thread-model-single", cl::Hidden, cl::init(false),
130 cl::desc("Force thread model single in LICM pass"));
131
133 "licm-max-num-uses-traversed", cl::Hidden, cl::init(8),
134 cl::desc("Max num uses visited for identifying load "
135 "invariance in loop using invariant start (default = 8)"));
136
138 "licm-max-num-fp-reassociations", cl::init(5U), cl::Hidden,
139 cl::desc(
140 "Set upper limit for the number of transformations performed "
141 "during a single round of hoisting the reassociated expressions."));
142
144 "licm-max-num-int-reassociations", cl::init(5U), cl::Hidden,
145 cl::desc(
146 "Set upper limit for the number of transformations performed "
147 "during a single round of hoisting the reassociated expressions."));
148
149// Experimental option to allow imprecision in LICM in pathological cases, in
150// exchange for faster compile. This is to be removed if MemorySSA starts to
151// address the same issue. LICM calls MemorySSAWalker's
152// getClobberingMemoryAccess, up to the value of the Cap, getting perfect
153// accuracy. Afterwards, LICM will call into MemorySSA's getDefiningAccess,
154// which may not be precise, since optimizeUses is capped. The result is
155// correct, but we may not get as "far up" as possible to get which access is
156// clobbering the one queried.
158 "licm-mssa-optimization-cap", cl::init(100), cl::Hidden,
159 cl::desc("Enable imprecision in LICM in pathological cases, in exchange "
160 "for faster compile. Caps the MemorySSA clobbering calls."));
161
162// Experimentally, memory promotion carries less importance than sinking and
163// hoisting. Limit when we do promotion when using MemorySSA, in order to save
164// compile time.
166 "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden,
167 cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no "
168 "effect. When MSSA in LICM is enabled, then this is the maximum "
169 "number of accesses allowed to be present in a loop in order to "
170 "enable memory promotion."));
171
173
174static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI);
175static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop,
176 const LoopSafetyInfo *SafetyInfo,
178 bool &FoldableInLoop, bool LoopNestMode);
179static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
180 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
183static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
184 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
187 Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI,
188 const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo,
189 OptimizationRemarkEmitter *ORE, const Instruction *CtxI,
190 AssumptionCache *AC, bool AllowSpeculation);
192 AAResults *AA, Loop *CurLoop,
193 SinkAndHoistLICMFlags &Flags);
194static bool pointerInvalidatedByLoop(MemorySSA *MSSA, MemoryUse *MU,
195 Loop *CurLoop, Instruction &I,
197 bool InvariantGroup);
198static bool pointerInvalidatedByBlock(BasicBlock &BB, MemorySSA &MSSA,
199 MemoryUse &MU);
200/// Aggregates various functions for hoisting computations out of loop.
201static bool hoistArithmetics(Instruction &I, Loop &L,
202 ICFLoopSafetyInfo &SafetyInfo,
204 DominatorTree *DT);
206 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
207 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU);
208
209static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
210 MemorySSAUpdater &MSSAU);
211
213 ICFLoopSafetyInfo &SafetyInfo,
215
216static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
217 function_ref<void(Instruction *)> Fn);
219 std::pair<SmallSetVector<Value *, 8>, bool>;
222
223namespace {
224struct LoopInvariantCodeMotion {
225 bool runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
228 OptimizationRemarkEmitter *ORE, bool LoopNestMode = false);
229
230 LoopInvariantCodeMotion(unsigned LicmMssaOptCap,
231 unsigned LicmMssaNoAccForPromotionCap,
232 bool LicmAllowSpeculation)
233 : LicmMssaOptCap(LicmMssaOptCap),
234 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap),
235 LicmAllowSpeculation(LicmAllowSpeculation) {}
236
237private:
238 unsigned LicmMssaOptCap;
239 unsigned LicmMssaNoAccForPromotionCap;
240 bool LicmAllowSpeculation;
241};
242
243struct LegacyLICMPass : public LoopPass {
244 static char ID; // Pass identification, replacement for typeid
245 LegacyLICMPass(
246 unsigned LicmMssaOptCap = SetLicmMssaOptCap,
247 unsigned LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap,
248 bool LicmAllowSpeculation = true)
249 : LoopPass(ID), LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
250 LicmAllowSpeculation) {
252 }
253
254 bool runOnLoop(Loop *L, LPPassManager &LPM) override {
255 if (skipLoop(L))
256 return false;
257
258 LLVM_DEBUG(dbgs() << "Perform LICM on Loop with header at block "
259 << L->getHeader()->getNameOrAsOperand() << "\n");
260
261 Function *F = L->getHeader()->getParent();
262
263 auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>();
264 MemorySSA *MSSA = &getAnalysis<MemorySSAWrapperPass>().getMSSA();
265 // For the old PM, we can't use OptimizationRemarkEmitter as an analysis
266 // pass. Function analyses need to be preserved across loop transformations
267 // but ORE cannot be preserved (see comment before the pass definition).
268 OptimizationRemarkEmitter ORE(L->getHeader()->getParent());
269 return LICM.runOnLoop(
270 L, &getAnalysis<AAResultsWrapperPass>().getAAResults(),
271 &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(),
272 &getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
273 &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(*F),
274 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(*F),
275 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(*F),
276 SE ? &SE->getSE() : nullptr, MSSA, &ORE);
277 }
278
279 /// This transformation requires natural loop information & requires that
280 /// loop preheaders be inserted into the CFG...
281 ///
282 void getAnalysisUsage(AnalysisUsage &AU) const override {
283 AU.addPreserved<DominatorTreeWrapperPass>();
284 AU.addPreserved<LoopInfoWrapperPass>();
285 AU.addRequired<TargetLibraryInfoWrapperPass>();
286 AU.addRequired<MemorySSAWrapperPass>();
287 AU.addPreserved<MemorySSAWrapperPass>();
288 AU.addRequired<TargetTransformInfoWrapperPass>();
289 AU.addRequired<AssumptionCacheTracker>();
292 AU.addPreserved<LazyBlockFrequencyInfoPass>();
293 AU.addPreserved<LazyBranchProbabilityInfoPass>();
294 }
295
296private:
297 LoopInvariantCodeMotion LICM;
298};
299} // namespace
300
303 if (!AR.MSSA)
304 reportFatalUsageError("LICM requires MemorySSA (loop-mssa)");
305
306 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
307 // pass. Function analyses need to be preserved across loop transformations
308 // but ORE cannot be preserved (see comment before the pass definition).
309 OptimizationRemarkEmitter ORE(L.getHeader()->getParent());
310
311 LoopInvariantCodeMotion LICM(Opts.MssaOptCap, Opts.MssaNoAccForPromotionCap,
312 Opts.AllowSpeculation);
313 if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, &AR.AC, &AR.TLI, &AR.TTI,
314 &AR.SE, AR.MSSA, &ORE))
315 return PreservedAnalyses::all();
316
318 PA.preserve<MemorySSAAnalysis>();
319
320 return PA;
321}
322
324 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
325 static_cast<PassInfoMixin<LICMPass> *>(this)->printPipeline(
326 OS, MapClassName2PassName);
327
328 OS << '<';
329 OS << (Opts.AllowSpeculation ? "" : "no-") << "allowspeculation";
330 OS << '>';
331}
332
335 LPMUpdater &) {
336 if (!AR.MSSA)
337 reportFatalUsageError("LNICM requires MemorySSA (loop-mssa)");
338
339 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
340 // pass. Function analyses need to be preserved across loop transformations
341 // but ORE cannot be preserved (see comment before the pass definition).
343
344 LoopInvariantCodeMotion LICM(Opts.MssaOptCap, Opts.MssaNoAccForPromotionCap,
345 Opts.AllowSpeculation);
346
347 Loop &OutermostLoop = LN.getOutermostLoop();
348 bool Changed = LICM.runOnLoop(&OutermostLoop, &AR.AA, &AR.LI, &AR.DT, &AR.AC,
349 &AR.TLI, &AR.TTI, &AR.SE, AR.MSSA, &ORE, true);
350
351 if (!Changed)
352 return PreservedAnalyses::all();
353
355
356 PA.preserve<DominatorTreeAnalysis>();
357 PA.preserve<LoopAnalysis>();
358 PA.preserve<MemorySSAAnalysis>();
359
360 return PA;
361}
362
364 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
365 static_cast<PassInfoMixin<LNICMPass> *>(this)->printPipeline(
366 OS, MapClassName2PassName);
367
368 OS << '<';
369 OS << (Opts.AllowSpeculation ? "" : "no-") << "allowspeculation";
370 OS << '>';
371}
372
373char LegacyLICMPass::ID = 0;
374INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion",
375 false, false)
381INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false,
382 false)
383
384Pass *llvm::createLICMPass() { return new LegacyLICMPass(); }
385
390
392 unsigned LicmMssaOptCap, unsigned LicmMssaNoAccForPromotionCap, bool IsSink,
393 Loop &L, MemorySSA &MSSA)
396 IsSink(IsSink) {
397 unsigned AccessCapCount = 0;
398 for (auto *BB : L.getBlocks())
399 if (const auto *Accesses = MSSA.getBlockAccesses(BB))
400 for (const auto &MA : *Accesses) {
401 (void)MA;
402 ++AccessCapCount;
403 if (AccessCapCount > LicmMssaNoAccForPromotionCap) {
404 NoOfMemAccTooLarge = true;
405 return;
406 }
407 }
408}
409
410/// Hoist expressions out of the specified loop. Note, alias info for inner
411/// loop is not preserved so it is not a good idea to run LICM multiple
412/// times on one loop.
413bool LoopInvariantCodeMotion::runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI,
417 ScalarEvolution *SE, MemorySSA *MSSA,
419 bool LoopNestMode) {
420 bool Changed = false;
421
422 assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.");
423
424 // If this loop has metadata indicating that LICM is not to be performed then
425 // just exit.
427 return false;
428 }
429
430 // Don't sink stores from loops with coroutine suspend instructions.
431 // LICM would sink instructions into the default destination of
432 // the coroutine switch. The default destination of the switch is to
433 // handle the case where the coroutine is suspended, by which point the
434 // coroutine frame may have been destroyed. No instruction can be sunk there.
435 // FIXME: This would unfortunately hurt the performance of coroutines, however
436 // there is currently no general solution for this. Similar issues could also
437 // potentially happen in other passes where instructions are being moved
438 // across that edge.
439 bool HasCoroSuspendInst = llvm::any_of(L->getBlocks(), [](BasicBlock *BB) {
440 using namespace PatternMatch;
441 return any_of(make_pointer_range(*BB),
442 match_fn(m_Intrinsic<Intrinsic::coro_suspend>()));
443 });
444
445 MemorySSAUpdater MSSAU(MSSA);
446 SinkAndHoistLICMFlags Flags(LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
447 /*IsSink=*/true, *L, *MSSA);
448
449 // Get the preheader block to move instructions into...
450 BasicBlock *Preheader = L->getLoopPreheader();
451
452 // Compute loop safety information.
453 ICFLoopSafetyInfo SafetyInfo;
454 SafetyInfo.computeLoopSafetyInfo(L);
455
456 // We want to visit all of the instructions in this loop... that are not parts
457 // of our subloops (they have already had their invariants hoisted out of
458 // their loop, into this loop, so there is no need to process the BODIES of
459 // the subloops).
460 //
461 // Traverse the body of the loop in depth first order on the dominator tree so
462 // that we are guaranteed to see definitions before we see uses. This allows
463 // us to sink instructions in one pass, without iteration. After sinking
464 // instructions, we perform another pass to hoist them out of the loop.
465 if (L->hasDedicatedExits())
466 Changed |=
467 LoopNestMode
468 ? sinkRegionForLoopNest(DT->getNode(L->getHeader()), AA, LI, DT,
469 TLI, TTI, L, MSSAU, &SafetyInfo, Flags, ORE)
470 : sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, TTI, L,
471 MSSAU, &SafetyInfo, Flags, ORE);
472 Flags.setIsSink(false);
473 if (Preheader)
474 Changed |= hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, AC, TLI, L,
475 MSSAU, SE, &SafetyInfo, Flags, ORE, LoopNestMode,
476 LicmAllowSpeculation);
477
478 // Now that all loop invariants have been removed from the loop, promote any
479 // memory references to scalars that we can.
480 // Don't sink stores from loops without dedicated block exits. Exits
481 // containing indirect branches are not transformed by loop simplify,
482 // make sure we catch that. An additional load may be generated in the
483 // preheader for SSA updater, so also avoid sinking when no preheader
484 // is available.
485 if (!DisablePromotion && Preheader && L->hasDedicatedExits() &&
486 !Flags.tooManyMemoryAccesses() && !HasCoroSuspendInst) {
487 // Figure out the loop exits and their insertion points
488 SmallVector<BasicBlock *, 8> ExitBlocks;
489 L->getUniqueExitBlocks(ExitBlocks);
490
491 // We can't insert into a catchswitch.
492 bool HasCatchSwitch = llvm::any_of(ExitBlocks, [](BasicBlock *Exit) {
493 return isa<CatchSwitchInst>(Exit->getTerminator());
494 });
495
496 if (!HasCatchSwitch) {
498 SmallVector<MemoryAccess *, 8> MSSAInsertPts;
499 InsertPts.reserve(ExitBlocks.size());
500 MSSAInsertPts.reserve(ExitBlocks.size());
501 for (BasicBlock *ExitBlock : ExitBlocks) {
502 InsertPts.push_back(ExitBlock->getFirstInsertionPt());
503 MSSAInsertPts.push_back(nullptr);
504 }
505
507
508 // Promoting one set of accesses may make the pointers for another set
509 // loop invariant, so run this in a loop.
510 bool Promoted = false;
511 bool LocalPromoted;
512 do {
513 LocalPromoted = false;
514 for (auto [PointerMustAliases, HasReadsOutsideSet] :
515 collectPromotionCandidates(MSSA, AA, L)) {
516 LocalPromoted |= promoteLoopAccessesToScalars(
517 PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC, LI,
518 DT, AC, TLI, TTI, L, MSSAU, &SafetyInfo, ORE,
519 LicmAllowSpeculation, HasReadsOutsideSet);
520 }
521 Promoted |= LocalPromoted;
522 } while (LocalPromoted);
523
524 // Once we have promoted values across the loop body we have to
525 // recursively reform LCSSA as any nested loop may now have values defined
526 // within the loop used in the outer loop.
527 // FIXME: This is really heavy handed. It would be a bit better to use an
528 // SSAUpdater strategy during promotion that was LCSSA aware and reformed
529 // it as it went.
530 if (Promoted)
531 formLCSSARecursively(*L, *DT, LI, SE);
532
533 Changed |= Promoted;
534 }
535 }
536
537 // Check that neither this loop nor its parent have had LCSSA broken. LICM is
538 // specifically moving instructions across the loop boundary and so it is
539 // especially in need of basic functional correctness checking here.
540 assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!");
541 assert((L->isOutermost() || L->getParentLoop()->isLCSSAForm(*DT)) &&
542 "Parent loop not left in LCSSA form after LICM!");
543
544 if (VerifyMemorySSA)
545 MSSA->verifyMemorySSA();
546
547 if (Changed && SE)
549 return Changed;
550}
551
552/// Walk the specified region of the CFG (defined by all blocks dominated by
553/// the specified block, and that are in the current loop) in reverse depth
554/// first order w.r.t the DominatorTree. This allows us to visit uses before
555/// definitions, allowing us to sink a loop body in one pass without iteration.
556///
559 TargetTransformInfo *TTI, Loop *CurLoop,
560 MemorySSAUpdater &MSSAU, ICFLoopSafetyInfo *SafetyInfo,
562 OptimizationRemarkEmitter *ORE, Loop *OutermostLoop) {
563
564 // Verify inputs.
565 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
566 CurLoop != nullptr && SafetyInfo != nullptr &&
567 "Unexpected input to sinkRegion.");
568
569 // We want to visit children before parents. We will enqueue all the parents
570 // before their children in the worklist and process the worklist in reverse
571 // order.
573 collectChildrenInLoop(DT, N, CurLoop);
574
575 bool Changed = false;
576 for (BasicBlock *BB : reverse(Worklist)) {
577 // subloop (which would already have been processed).
578 if (inSubLoop(BB, CurLoop, LI))
579 continue;
580
581 for (BasicBlock::iterator II = BB->end(); II != BB->begin();) {
582 Instruction &I = *--II;
583
584 // The instruction is not used in the loop if it is dead. In this case,
585 // we just delete it instead of sinking it.
586 if (isInstructionTriviallyDead(&I, TLI)) {
587 LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n');
590 ++II;
591 eraseInstruction(I, *SafetyInfo, MSSAU);
592 Changed = true;
593 continue;
594 }
595
596 // Check to see if we can sink this instruction to the exit blocks
597 // of the loop. We can do this if the all users of the instruction are
598 // outside of the loop. In this case, it doesn't even matter if the
599 // operands of the instruction are loop invariant.
600 //
601 bool FoldableInLoop = false;
602 bool LoopNestMode = OutermostLoop != nullptr;
603 if (!I.mayHaveSideEffects() &&
604 isNotUsedOrFoldableInLoop(I, LoopNestMode ? OutermostLoop : CurLoop,
605 SafetyInfo, TTI, FoldableInLoop,
606 LoopNestMode) &&
607 canSinkOrHoistInst(I, AA, DT, CurLoop, MSSAU, true, Flags, ORE)) {
608 if (sink(I, LI, DT, CurLoop, SafetyInfo, MSSAU, ORE)) {
609 if (!FoldableInLoop) {
610 ++II;
612 eraseInstruction(I, *SafetyInfo, MSSAU);
613 }
614 Changed = true;
615 }
616 }
617 }
618 }
619 if (VerifyMemorySSA)
620 MSSAU.getMemorySSA()->verifyMemorySSA();
621 return Changed;
622}
623
626 TargetTransformInfo *TTI, Loop *CurLoop,
627 MemorySSAUpdater &MSSAU,
628 ICFLoopSafetyInfo *SafetyInfo,
631
632 bool Changed = false;
634 Worklist.insert(CurLoop);
635 appendLoopsToWorklist(*CurLoop, Worklist);
636 while (!Worklist.empty()) {
637 Loop *L = Worklist.pop_back_val();
638 Changed |= sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, TTI, L,
639 MSSAU, SafetyInfo, Flags, ORE, CurLoop);
640 }
641 return Changed;
642}
643
644namespace {
645// This is a helper class for hoistRegion to make it able to hoist control flow
646// in order to be able to hoist phis. The way this works is that we initially
647// start hoisting to the loop preheader, and when we see a loop invariant branch
648// we make note of this. When we then come to hoist an instruction that's
649// conditional on such a branch we duplicate the branch and the relevant control
650// flow, then hoist the instruction into the block corresponding to its original
651// block in the duplicated control flow.
652class ControlFlowHoister {
653private:
654 // Information about the loop we are hoisting from
655 LoopInfo *LI;
656 DominatorTree *DT;
657 Loop *CurLoop;
658 MemorySSAUpdater &MSSAU;
659
660 // A map of blocks in the loop to the block their instructions will be hoisted
661 // to.
662 DenseMap<BasicBlock *, BasicBlock *> HoistDestinationMap;
663
664 // The branches that we can hoist, mapped to the block that marks a
665 // convergence point of their control flow.
666 DenseMap<BranchInst *, BasicBlock *> HoistableBranches;
667
668public:
669 ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop,
670 MemorySSAUpdater &MSSAU)
671 : LI(LI), DT(DT), CurLoop(CurLoop), MSSAU(MSSAU) {}
672
673 void registerPossiblyHoistableBranch(BranchInst *BI) {
674 // We can only hoist conditional branches with loop invariant operands.
675 if (!ControlFlowHoisting || !BI->isConditional() ||
676 !CurLoop->hasLoopInvariantOperands(BI))
677 return;
678
679 // The branch destinations need to be in the loop, and we don't gain
680 // anything by duplicating conditional branches with duplicate successors,
681 // as it's essentially the same as an unconditional branch.
682 BasicBlock *TrueDest = BI->getSuccessor(0);
683 BasicBlock *FalseDest = BI->getSuccessor(1);
684 if (!CurLoop->contains(TrueDest) || !CurLoop->contains(FalseDest) ||
685 TrueDest == FalseDest)
686 return;
687
688 // We can hoist BI if one branch destination is the successor of the other,
689 // or both have common successor which we check by seeing if the
690 // intersection of their successors is non-empty.
691 // TODO: This could be expanded to allowing branches where both ends
692 // eventually converge to a single block.
693 SmallPtrSet<BasicBlock *, 4> TrueDestSucc(llvm::from_range,
694 successors(TrueDest));
695 SmallPtrSet<BasicBlock *, 4> FalseDestSucc(llvm::from_range,
696 successors(FalseDest));
697 BasicBlock *CommonSucc = nullptr;
698 if (TrueDestSucc.count(FalseDest)) {
699 CommonSucc = FalseDest;
700 } else if (FalseDestSucc.count(TrueDest)) {
701 CommonSucc = TrueDest;
702 } else {
703 set_intersect(TrueDestSucc, FalseDestSucc);
704 // If there's one common successor use that.
705 if (TrueDestSucc.size() == 1)
706 CommonSucc = *TrueDestSucc.begin();
707 // If there's more than one pick whichever appears first in the block list
708 // (we can't use the value returned by TrueDestSucc.begin() as it's
709 // unpredicatable which element gets returned).
710 else if (!TrueDestSucc.empty()) {
711 Function *F = TrueDest->getParent();
712 auto IsSucc = [&](BasicBlock &BB) { return TrueDestSucc.count(&BB); };
713 auto It = llvm::find_if(*F, IsSucc);
714 assert(It != F->end() && "Could not find successor in function");
715 CommonSucc = &*It;
716 }
717 }
718 // The common successor has to be dominated by the branch, as otherwise
719 // there will be some other path to the successor that will not be
720 // controlled by this branch so any phi we hoist would be controlled by the
721 // wrong condition. This also takes care of avoiding hoisting of loop back
722 // edges.
723 // TODO: In some cases this could be relaxed if the successor is dominated
724 // by another block that's been hoisted and we can guarantee that the
725 // control flow has been replicated exactly.
726 if (CommonSucc && DT->dominates(BI, CommonSucc))
727 HoistableBranches[BI] = CommonSucc;
728 }
729
730 bool canHoistPHI(PHINode *PN) {
731 // The phi must have loop invariant operands.
732 if (!ControlFlowHoisting || !CurLoop->hasLoopInvariantOperands(PN))
733 return false;
734 // We can hoist phis if the block they are in is the target of hoistable
735 // branches which cover all of the predecessors of the block.
736 BasicBlock *BB = PN->getParent();
737 SmallPtrSet<BasicBlock *, 8> PredecessorBlocks(llvm::from_range,
738 predecessors(BB));
739 // If we have less predecessor blocks than predecessors then the phi will
740 // have more than one incoming value for the same block which we can't
741 // handle.
742 // TODO: This could be handled be erasing some of the duplicate incoming
743 // values.
744 if (PredecessorBlocks.size() != pred_size(BB))
745 return false;
746 for (auto &Pair : HoistableBranches) {
747 if (Pair.second == BB) {
748 // Which blocks are predecessors via this branch depends on if the
749 // branch is triangle-like or diamond-like.
750 if (Pair.first->getSuccessor(0) == BB) {
751 PredecessorBlocks.erase(Pair.first->getParent());
752 PredecessorBlocks.erase(Pair.first->getSuccessor(1));
753 } else if (Pair.first->getSuccessor(1) == BB) {
754 PredecessorBlocks.erase(Pair.first->getParent());
755 PredecessorBlocks.erase(Pair.first->getSuccessor(0));
756 } else {
757 PredecessorBlocks.erase(Pair.first->getSuccessor(0));
758 PredecessorBlocks.erase(Pair.first->getSuccessor(1));
759 }
760 }
761 }
762 // PredecessorBlocks will now be empty if for every predecessor of BB we
763 // found a hoistable branch source.
764 return PredecessorBlocks.empty();
765 }
766
767 BasicBlock *getOrCreateHoistedBlock(BasicBlock *BB) {
769 return CurLoop->getLoopPreheader();
770 // If BB has already been hoisted, return that
771 if (auto It = HoistDestinationMap.find(BB); It != HoistDestinationMap.end())
772 return It->second;
773
774 // Check if this block is conditional based on a pending branch
775 auto HasBBAsSuccessor =
776 [&](DenseMap<BranchInst *, BasicBlock *>::value_type &Pair) {
777 return BB != Pair.second && (Pair.first->getSuccessor(0) == BB ||
778 Pair.first->getSuccessor(1) == BB);
779 };
780 auto It = llvm::find_if(HoistableBranches, HasBBAsSuccessor);
781
782 // If not involved in a pending branch, hoist to preheader
783 BasicBlock *InitialPreheader = CurLoop->getLoopPreheader();
784 if (It == HoistableBranches.end()) {
785 LLVM_DEBUG(dbgs() << "LICM using "
786 << InitialPreheader->getNameOrAsOperand()
787 << " as hoist destination for "
788 << BB->getNameOrAsOperand() << "\n");
789 HoistDestinationMap[BB] = InitialPreheader;
790 return InitialPreheader;
791 }
792 BranchInst *BI = It->first;
793 assert(std::none_of(std::next(It), HoistableBranches.end(),
794 HasBBAsSuccessor) &&
795 "BB is expected to be the target of at most one branch");
796
797 LLVMContext &C = BB->getContext();
798 BasicBlock *TrueDest = BI->getSuccessor(0);
799 BasicBlock *FalseDest = BI->getSuccessor(1);
800 BasicBlock *CommonSucc = HoistableBranches[BI];
801 BasicBlock *HoistTarget = getOrCreateHoistedBlock(BI->getParent());
802
803 // Create hoisted versions of blocks that currently don't have them
804 auto CreateHoistedBlock = [&](BasicBlock *Orig) {
805 auto [It, Inserted] = HoistDestinationMap.try_emplace(Orig);
806 if (!Inserted)
807 return It->second;
808 BasicBlock *New =
809 BasicBlock::Create(C, Orig->getName() + ".licm", Orig->getParent());
810 It->second = New;
811 DT->addNewBlock(New, HoistTarget);
812 if (CurLoop->getParentLoop())
813 CurLoop->getParentLoop()->addBasicBlockToLoop(New, *LI);
814 ++NumCreatedBlocks;
815 LLVM_DEBUG(dbgs() << "LICM created " << New->getName()
816 << " as hoist destination for " << Orig->getName()
817 << "\n");
818 return New;
819 };
820 BasicBlock *HoistTrueDest = CreateHoistedBlock(TrueDest);
821 BasicBlock *HoistFalseDest = CreateHoistedBlock(FalseDest);
822 BasicBlock *HoistCommonSucc = CreateHoistedBlock(CommonSucc);
823
824 // Link up these blocks with branches.
825 if (!HoistCommonSucc->getTerminator()) {
826 // The new common successor we've generated will branch to whatever that
827 // hoist target branched to.
828 BasicBlock *TargetSucc = HoistTarget->getSingleSuccessor();
829 assert(TargetSucc && "Expected hoist target to have a single successor");
830 HoistCommonSucc->moveBefore(TargetSucc);
831 BranchInst::Create(TargetSucc, HoistCommonSucc);
832 }
833 if (!HoistTrueDest->getTerminator()) {
834 HoistTrueDest->moveBefore(HoistCommonSucc);
835 BranchInst::Create(HoistCommonSucc, HoistTrueDest);
836 }
837 if (!HoistFalseDest->getTerminator()) {
838 HoistFalseDest->moveBefore(HoistCommonSucc);
839 BranchInst::Create(HoistCommonSucc, HoistFalseDest);
840 }
841
842 // If BI is being cloned to what was originally the preheader then
843 // HoistCommonSucc will now be the new preheader.
844 if (HoistTarget == InitialPreheader) {
845 // Phis in the loop header now need to use the new preheader.
846 InitialPreheader->replaceSuccessorsPhiUsesWith(HoistCommonSucc);
848 HoistTarget->getSingleSuccessor(), HoistCommonSucc, {HoistTarget});
849 // The new preheader dominates the loop header.
850 DomTreeNode *PreheaderNode = DT->getNode(HoistCommonSucc);
851 DomTreeNode *HeaderNode = DT->getNode(CurLoop->getHeader());
852 DT->changeImmediateDominator(HeaderNode, PreheaderNode);
853 // The preheader hoist destination is now the new preheader, with the
854 // exception of the hoist destination of this branch.
855 for (auto &Pair : HoistDestinationMap)
856 if (Pair.second == InitialPreheader && Pair.first != BI->getParent())
857 Pair.second = HoistCommonSucc;
858 }
859
860 // Now finally clone BI.
861 auto *NewBI =
862 BranchInst::Create(HoistTrueDest, HoistFalseDest, BI->getCondition(),
863 HoistTarget->getTerminator()->getIterator());
864 HoistTarget->getTerminator()->eraseFromParent();
865 // md_prof should also come from the original branch - since the
866 // condition was hoisted, the branch probabilities shouldn't change.
868 NewBI->copyMetadata(*BI, {LLVMContext::MD_prof});
869 // FIXME: Issue #152767: debug info should also be the same as the
870 // original branch, **if** the user explicitly indicated that.
871 NewBI->setDebugLoc(HoistTarget->getTerminator()->getDebugLoc());
872
873 ++NumClonedBranches;
874
875 assert(CurLoop->getLoopPreheader() &&
876 "Hoisting blocks should not have destroyed preheader");
877 return HoistDestinationMap[BB];
878 }
879};
880} // namespace
881
882/// Walk the specified region of the CFG (defined by all blocks dominated by
883/// the specified block, and that are in the current loop) in depth first
884/// order w.r.t the DominatorTree. This allows us to visit definitions before
885/// uses, allowing us to hoist a loop body in one pass without iteration.
886///
889 TargetLibraryInfo *TLI, Loop *CurLoop,
891 ICFLoopSafetyInfo *SafetyInfo,
893 OptimizationRemarkEmitter *ORE, bool LoopNestMode,
894 bool AllowSpeculation) {
895 // Verify inputs.
896 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
897 CurLoop != nullptr && SafetyInfo != nullptr &&
898 "Unexpected input to hoistRegion.");
899
900 ControlFlowHoister CFH(LI, DT, CurLoop, MSSAU);
901
902 // Keep track of instructions that have been hoisted, as they may need to be
903 // re-hoisted if they end up not dominating all of their uses.
904 SmallVector<Instruction *, 16> HoistedInstructions;
905
906 // For PHI hoisting to work we need to hoist blocks before their successors.
907 // We can do this by iterating through the blocks in the loop in reverse
908 // post-order.
909 LoopBlocksRPO Worklist(CurLoop);
910 Worklist.perform(LI);
911 bool Changed = false;
912 BasicBlock *Preheader = CurLoop->getLoopPreheader();
913 for (BasicBlock *BB : Worklist) {
914 // Only need to process the contents of this block if it is not part of a
915 // subloop (which would already have been processed).
916 if (!LoopNestMode && inSubLoop(BB, CurLoop, LI))
917 continue;
918
920 // Try hoisting the instruction out to the preheader. We can only do
921 // this if all of the operands of the instruction are loop invariant and
922 // if it is safe to hoist the instruction. We also check block frequency
923 // to make sure instruction only gets hoisted into colder blocks.
924 // TODO: It may be safe to hoist if we are hoisting to a conditional block
925 // and we have accurately duplicated the control flow from the loop header
926 // to that block.
927 if (CurLoop->hasLoopInvariantOperands(&I) &&
928 canSinkOrHoistInst(I, AA, DT, CurLoop, MSSAU, true, Flags, ORE) &&
929 isSafeToExecuteUnconditionally(I, DT, TLI, CurLoop, SafetyInfo, ORE,
930 Preheader->getTerminator(), AC,
931 AllowSpeculation)) {
932 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
933 MSSAU, SE, ORE);
934 HoistedInstructions.push_back(&I);
935 Changed = true;
936 continue;
937 }
938
939 // Attempt to remove floating point division out of the loop by
940 // converting it to a reciprocal multiplication.
941 if (I.getOpcode() == Instruction::FDiv && I.hasAllowReciprocal() &&
942 CurLoop->isLoopInvariant(I.getOperand(1))) {
943 auto Divisor = I.getOperand(1);
944 auto One = llvm::ConstantFP::get(Divisor->getType(), 1.0);
945 auto ReciprocalDivisor = BinaryOperator::CreateFDiv(One, Divisor);
946 ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags());
947 SafetyInfo->insertInstructionTo(ReciprocalDivisor, I.getParent());
948 ReciprocalDivisor->insertBefore(I.getIterator());
949 ReciprocalDivisor->setDebugLoc(I.getDebugLoc());
950
951 auto Product =
952 BinaryOperator::CreateFMul(I.getOperand(0), ReciprocalDivisor);
953 Product->setFastMathFlags(I.getFastMathFlags());
954 SafetyInfo->insertInstructionTo(Product, I.getParent());
955 Product->insertAfter(I.getIterator());
956 Product->setDebugLoc(I.getDebugLoc());
957 I.replaceAllUsesWith(Product);
958 eraseInstruction(I, *SafetyInfo, MSSAU);
959
960 hoist(*ReciprocalDivisor, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB),
961 SafetyInfo, MSSAU, SE, ORE);
962 HoistedInstructions.push_back(ReciprocalDivisor);
963 Changed = true;
964 continue;
965 }
966
967 auto IsInvariantStart = [&](Instruction &I) {
968 using namespace PatternMatch;
969 return I.use_empty() &&
971 };
972 auto MustExecuteWithoutWritesBefore = [&](Instruction &I) {
973 return SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) &&
974 SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop);
975 };
976 if ((IsInvariantStart(I) || isGuard(&I)) &&
977 CurLoop->hasLoopInvariantOperands(&I) &&
978 MustExecuteWithoutWritesBefore(I)) {
979 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
980 MSSAU, SE, ORE);
981 HoistedInstructions.push_back(&I);
982 Changed = true;
983 continue;
984 }
985
986 if (PHINode *PN = dyn_cast<PHINode>(&I)) {
987 if (CFH.canHoistPHI(PN)) {
988 // Redirect incoming blocks first to ensure that we create hoisted
989 // versions of those blocks before we hoist the phi.
990 for (unsigned int i = 0; i < PN->getNumIncomingValues(); ++i)
992 i, CFH.getOrCreateHoistedBlock(PN->getIncomingBlock(i)));
993 hoist(*PN, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
994 MSSAU, SE, ORE);
995 assert(DT->dominates(PN, BB) && "Conditional PHIs not expected");
996 Changed = true;
997 continue;
998 }
999 }
1000
1001 // Try to reassociate instructions so that part of computations can be
1002 // done out of loop.
1003 if (hoistArithmetics(I, *CurLoop, *SafetyInfo, MSSAU, AC, DT)) {
1004 Changed = true;
1005 continue;
1006 }
1007
1008 // Remember possibly hoistable branches so we can actually hoist them
1009 // later if needed.
1010 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
1011 CFH.registerPossiblyHoistableBranch(BI);
1012 }
1013 }
1014
1015 // If we hoisted instructions to a conditional block they may not dominate
1016 // their uses that weren't hoisted (such as phis where some operands are not
1017 // loop invariant). If so make them unconditional by moving them to their
1018 // immediate dominator. We iterate through the instructions in reverse order
1019 // which ensures that when we rehoist an instruction we rehoist its operands,
1020 // and also keep track of where in the block we are rehoisting to make sure
1021 // that we rehoist instructions before the instructions that use them.
1022 Instruction *HoistPoint = nullptr;
1023 if (ControlFlowHoisting) {
1024 for (Instruction *I : reverse(HoistedInstructions)) {
1025 if (!llvm::all_of(I->uses(),
1026 [&](Use &U) { return DT->dominates(I, U); })) {
1027 BasicBlock *Dominator =
1028 DT->getNode(I->getParent())->getIDom()->getBlock();
1029 if (!HoistPoint || !DT->dominates(HoistPoint->getParent(), Dominator)) {
1030 if (HoistPoint)
1031 assert(DT->dominates(Dominator, HoistPoint->getParent()) &&
1032 "New hoist point expected to dominate old hoist point");
1033 HoistPoint = Dominator->getTerminator();
1034 }
1035 LLVM_DEBUG(dbgs() << "LICM rehoisting to "
1036 << HoistPoint->getParent()->getNameOrAsOperand()
1037 << ": " << *I << "\n");
1038 moveInstructionBefore(*I, HoistPoint->getIterator(), *SafetyInfo, MSSAU,
1039 SE);
1040 HoistPoint = I;
1041 Changed = true;
1042 }
1043 }
1044 }
1045 if (VerifyMemorySSA)
1046 MSSAU.getMemorySSA()->verifyMemorySSA();
1047
1048 // Now that we've finished hoisting make sure that LI and DT are still
1049 // valid.
1050#ifdef EXPENSIVE_CHECKS
1051 if (Changed) {
1052 assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&
1053 "Dominator tree verification failed");
1054 LI->verify(*DT);
1055 }
1056#endif
1057
1058 return Changed;
1059}
1060
1061// Return true if LI is invariant within scope of the loop. LI is invariant if
1062// CurLoop is dominated by an invariant.start representing the same memory
1063// location and size as the memory location LI loads from, and also the
1064// invariant.start has no uses.
1066 Loop *CurLoop) {
1067 Value *Addr = LI->getPointerOperand();
1068 const DataLayout &DL = LI->getDataLayout();
1069 const TypeSize LocSizeInBits = DL.getTypeSizeInBits(LI->getType());
1070
1071 // It is not currently possible for clang to generate an invariant.start
1072 // intrinsic with scalable vector types because we don't support thread local
1073 // sizeless types and we don't permit sizeless types in structs or classes.
1074 // Furthermore, even if support is added for this in future the intrinsic
1075 // itself is defined to have a size of -1 for variable sized objects. This
1076 // makes it impossible to verify if the intrinsic envelops our region of
1077 // interest. For example, both <vscale x 32 x i8> and <vscale x 16 x i8>
1078 // types would have a -1 parameter, but the former is clearly double the size
1079 // of the latter.
1080 if (LocSizeInBits.isScalable())
1081 return false;
1082
1083 // If we've ended up at a global/constant, bail. We shouldn't be looking at
1084 // uselists for non-local Values in a loop pass.
1085 if (isa<Constant>(Addr))
1086 return false;
1087
1088 unsigned UsesVisited = 0;
1089 // Traverse all uses of the load operand value, to see if invariant.start is
1090 // one of the uses, and whether it dominates the load instruction.
1091 for (auto *U : Addr->users()) {
1092 // Avoid traversing for Load operand with high number of users.
1093 if (++UsesVisited > MaxNumUsesTraversed)
1094 return false;
1096 // If there are escaping uses of invariant.start instruction, the load maybe
1097 // non-invariant.
1098 if (!II || II->getIntrinsicID() != Intrinsic::invariant_start ||
1099 !II->use_empty())
1100 continue;
1101 ConstantInt *InvariantSize = cast<ConstantInt>(II->getArgOperand(0));
1102 // The intrinsic supports having a -1 argument for variable sized objects
1103 // so we should check for that here.
1104 if (InvariantSize->isNegative())
1105 continue;
1106 uint64_t InvariantSizeInBits = InvariantSize->getSExtValue() * 8;
1107 // Confirm the invariant.start location size contains the load operand size
1108 // in bits. Also, the invariant.start should dominate the load, and we
1109 // should not hoist the load out of a loop that contains this dominating
1110 // invariant.start.
1111 if (LocSizeInBits.getFixedValue() <= InvariantSizeInBits &&
1112 DT->properlyDominates(II->getParent(), CurLoop->getHeader()))
1113 return true;
1114 }
1115
1116 return false;
1117}
1118
1119namespace {
1120/// Return true if-and-only-if we know how to (mechanically) both hoist and
1121/// sink a given instruction out of a loop. Does not address legality
1122/// concerns such as aliasing or speculation safety.
1123bool isHoistableAndSinkableInst(Instruction &I) {
1124 // Only these instructions are hoistable/sinkable.
1125 return (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
1132}
1133
1134/// Return true if I is the only Instruction with a MemoryAccess in L.
1135bool isOnlyMemoryAccess(const Instruction *I, const Loop *L,
1136 const MemorySSAUpdater &MSSAU) {
1137 for (auto *BB : L->getBlocks())
1138 if (auto *Accs = MSSAU.getMemorySSA()->getBlockAccesses(BB)) {
1139 int NotAPhi = 0;
1140 for (const auto &Acc : *Accs) {
1141 if (isa<MemoryPhi>(&Acc))
1142 continue;
1143 const auto *MUD = cast<MemoryUseOrDef>(&Acc);
1144 if (MUD->getMemoryInst() != I || NotAPhi++ == 1)
1145 return false;
1146 }
1147 }
1148 return true;
1149}
1150}
1151
1153 BatchAAResults &BAA,
1154 SinkAndHoistLICMFlags &Flags,
1155 MemoryUseOrDef *MA) {
1156 // See declaration of SetLicmMssaOptCap for usage details.
1157 if (Flags.tooManyClobberingCalls())
1158 return MA->getDefiningAccess();
1159
1160 MemoryAccess *Source =
1162 Flags.incrementClobberingCalls();
1163 return Source;
1164}
1165
1167 Loop *CurLoop, MemorySSAUpdater &MSSAU,
1168 bool TargetExecutesOncePerLoop,
1169 SinkAndHoistLICMFlags &Flags,
1171 // If we don't understand the instruction, bail early.
1172 if (!isHoistableAndSinkableInst(I))
1173 return false;
1174
1175 MemorySSA *MSSA = MSSAU.getMemorySSA();
1176 // Loads have extra constraints we have to verify before we can hoist them.
1177 if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
1178 if (!LI->isUnordered())
1179 return false; // Don't sink/hoist volatile or ordered atomic loads!
1180
1181 // Loads from constant memory are always safe to move, even if they end up
1182 // in the same alias set as something that ends up being modified.
1183 if (!isModSet(AA->getModRefInfoMask(LI->getOperand(0))))
1184 return true;
1185 if (LI->hasMetadata(LLVMContext::MD_invariant_load))
1186 return true;
1187
1188 if (LI->isAtomic() && !TargetExecutesOncePerLoop)
1189 return false; // Don't risk duplicating unordered loads
1190
1191 // This checks for an invariant.start dominating the load.
1192 if (isLoadInvariantInLoop(LI, DT, CurLoop))
1193 return true;
1194
1195 auto MU = cast<MemoryUse>(MSSA->getMemoryAccess(LI));
1196
1197 bool InvariantGroup = LI->hasMetadata(LLVMContext::MD_invariant_group);
1198
1199 bool Invalidated = pointerInvalidatedByLoop(
1200 MSSA, MU, CurLoop, I, Flags, InvariantGroup);
1201 // Check loop-invariant address because this may also be a sinkable load
1202 // whose address is not necessarily loop-invariant.
1203 if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1204 ORE->emit([&]() {
1206 DEBUG_TYPE, "LoadWithLoopInvariantAddressInvalidated", LI)
1207 << "failed to move load with loop-invariant address "
1208 "because the loop may invalidate its value";
1209 });
1210
1211 return !Invalidated;
1212 } else if (CallInst *CI = dyn_cast<CallInst>(&I)) {
1213 // Don't sink calls which can throw.
1214 if (CI->mayThrow())
1215 return false;
1216
1217 // Convergent attribute has been used on operations that involve
1218 // inter-thread communication which results are implicitly affected by the
1219 // enclosing control flows. It is not safe to hoist or sink such operations
1220 // across control flow.
1221 if (CI->isConvergent())
1222 return false;
1223
1224 // FIXME: Current LLVM IR semantics don't work well with coroutines and
1225 // thread local globals. We currently treat getting the address of a thread
1226 // local global as not accessing memory, even though it may not be a
1227 // constant throughout a function with coroutines. Remove this check after
1228 // we better model semantics of thread local globals.
1229 if (CI->getFunction()->isPresplitCoroutine())
1230 return false;
1231
1232 using namespace PatternMatch;
1234 // Assumes don't actually alias anything or throw
1235 return true;
1236
1237 // Handle simple cases by querying alias analysis.
1238 MemoryEffects Behavior = AA->getMemoryEffects(CI);
1239
1240 if (Behavior.doesNotAccessMemory())
1241 return true;
1242 if (Behavior.onlyReadsMemory()) {
1243 // Might have stale MemoryDef for call that was later inferred to be
1244 // read-only.
1245 auto *MU = dyn_cast<MemoryUse>(MSSA->getMemoryAccess(CI));
1246 if (!MU)
1247 return false;
1248
1249 // If we can prove there are no writes to the memory read by the call, we
1250 // can hoist or sink.
1252 MSSA, MU, CurLoop, I, Flags, /*InvariantGroup=*/false);
1253 }
1254
1255 if (Behavior.onlyWritesMemory()) {
1256 // can hoist or sink if there are no conflicting read/writes to the
1257 // memory location written to by the call.
1258 return noConflictingReadWrites(CI, MSSA, AA, CurLoop, Flags);
1259 }
1260
1261 return false;
1262 } else if (auto *FI = dyn_cast<FenceInst>(&I)) {
1263 // Fences alias (most) everything to provide ordering. For the moment,
1264 // just give up if there are any other memory operations in the loop.
1265 return isOnlyMemoryAccess(FI, CurLoop, MSSAU);
1266 } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
1267 if (!SI->isUnordered())
1268 return false; // Don't sink/hoist volatile or ordered atomic store!
1269
1270 // We can only hoist a store that we can prove writes a value which is not
1271 // read or overwritten within the loop. For those cases, we fallback to
1272 // load store promotion instead. TODO: We can extend this to cases where
1273 // there is exactly one write to the location and that write dominates an
1274 // arbitrary number of reads in the loop.
1275 if (isOnlyMemoryAccess(SI, CurLoop, MSSAU))
1276 return true;
1277 return noConflictingReadWrites(SI, MSSA, AA, CurLoop, Flags);
1278 }
1279
1280 assert(!I.mayReadOrWriteMemory() && "unhandled aliasing");
1281
1282 // We've established mechanical ability and aliasing, it's up to the caller
1283 // to check fault safety
1284 return true;
1285}
1286
1287/// Returns true if a PHINode is a trivially replaceable with an
1288/// Instruction.
1289/// This is true when all incoming values are that instruction.
1290/// This pattern occurs most often with LCSSA PHI nodes.
1291///
1292static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I) {
1293 for (const Value *IncValue : PN.incoming_values())
1294 if (IncValue != &I)
1295 return false;
1296
1297 return true;
1298}
1299
1300/// Return true if the instruction is foldable in the loop.
1301static bool isFoldableInLoop(const Instruction &I, const Loop *CurLoop,
1302 const TargetTransformInfo *TTI) {
1303 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1304 InstructionCost CostI =
1305 TTI->getInstructionCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
1306 if (CostI != TargetTransformInfo::TCC_Free)
1307 return false;
1308 // For a GEP, we cannot simply use getInstructionCost because currently
1309 // it optimistically assumes that a GEP will fold into addressing mode
1310 // regardless of its users.
1311 const BasicBlock *BB = GEP->getParent();
1312 for (const User *U : GEP->users()) {
1313 const Instruction *UI = cast<Instruction>(U);
1314 if (CurLoop->contains(UI) &&
1315 (BB != UI->getParent() ||
1316 (!isa<StoreInst>(UI) && !isa<LoadInst>(UI))))
1317 return false;
1318 }
1319 return true;
1320 }
1321
1322 return false;
1323}
1324
1325/// Return true if the only users of this instruction are outside of
1326/// the loop. If this is true, we can sink the instruction to the exit
1327/// blocks of the loop.
1328///
1329/// We also return true if the instruction could be folded away in lowering.
1330/// (e.g., a GEP can be folded into a load as an addressing mode in the loop).
1331static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop,
1332 const LoopSafetyInfo *SafetyInfo,
1334 bool &FoldableInLoop, bool LoopNestMode) {
1335 const auto &BlockColors = SafetyInfo->getBlockColors();
1336 bool IsFoldable = isFoldableInLoop(I, CurLoop, TTI);
1337 for (const User *U : I.users()) {
1338 const Instruction *UI = cast<Instruction>(U);
1339 if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
1340 const BasicBlock *BB = PN->getParent();
1341 // We cannot sink uses in catchswitches.
1343 return false;
1344
1345 // We need to sink a callsite to a unique funclet. Avoid sinking if the
1346 // phi use is too muddled.
1347 if (isa<CallInst>(I))
1348 if (!BlockColors.empty() &&
1349 BlockColors.find(const_cast<BasicBlock *>(BB))->second.size() != 1)
1350 return false;
1351
1352 if (LoopNestMode) {
1353 while (isa<PHINode>(UI) && UI->hasOneUser() &&
1354 UI->getNumOperands() == 1) {
1355 if (!CurLoop->contains(UI))
1356 break;
1357 UI = cast<Instruction>(UI->user_back());
1358 }
1359 }
1360 }
1361
1362 if (CurLoop->contains(UI)) {
1363 if (IsFoldable) {
1364 FoldableInLoop = true;
1365 continue;
1366 }
1367 return false;
1368 }
1369 }
1370 return true;
1371}
1372
1374 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
1375 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU) {
1376 Instruction *New;
1377 if (auto *CI = dyn_cast<CallInst>(&I)) {
1378 const auto &BlockColors = SafetyInfo->getBlockColors();
1379
1380 // Sinking call-sites need to be handled differently from other
1381 // instructions. The cloned call-site needs a funclet bundle operand
1382 // appropriate for its location in the CFG.
1384 for (unsigned BundleIdx = 0, BundleEnd = CI->getNumOperandBundles();
1385 BundleIdx != BundleEnd; ++BundleIdx) {
1386 OperandBundleUse Bundle = CI->getOperandBundleAt(BundleIdx);
1387 if (Bundle.getTagID() == LLVMContext::OB_funclet)
1388 continue;
1389
1390 OpBundles.emplace_back(Bundle);
1391 }
1392
1393 if (!BlockColors.empty()) {
1394 const ColorVector &CV = BlockColors.find(&ExitBlock)->second;
1395 assert(CV.size() == 1 && "non-unique color for exit block!");
1396 BasicBlock *BBColor = CV.front();
1397 BasicBlock::iterator EHPad = BBColor->getFirstNonPHIIt();
1398 if (EHPad->isEHPad())
1399 OpBundles.emplace_back("funclet", &*EHPad);
1400 }
1401
1402 New = CallInst::Create(CI, OpBundles);
1403 New->copyMetadata(*CI);
1404 } else {
1405 New = I.clone();
1406 }
1407
1408 New->insertInto(&ExitBlock, ExitBlock.getFirstInsertionPt());
1409 if (!I.getName().empty())
1410 New->setName(I.getName() + ".le");
1411
1412 if (MSSAU.getMemorySSA()->getMemoryAccess(&I)) {
1413 // Create a new MemoryAccess and let MemorySSA set its defining access.
1414 // After running some passes, MemorySSA might be outdated, and the
1415 // instruction `I` may have become a non-memory touching instruction.
1416 MemoryAccess *NewMemAcc = MSSAU.createMemoryAccessInBB(
1417 New, nullptr, New->getParent(), MemorySSA::Beginning,
1418 /*CreationMustSucceed=*/false);
1419 if (NewMemAcc) {
1420 if (auto *MemDef = dyn_cast<MemoryDef>(NewMemAcc))
1421 MSSAU.insertDef(MemDef, /*RenameUses=*/true);
1422 else {
1423 auto *MemUse = cast<MemoryUse>(NewMemAcc);
1424 MSSAU.insertUse(MemUse, /*RenameUses=*/true);
1425 }
1426 }
1427 }
1428
1429 // Build LCSSA PHI nodes for any in-loop operands (if legal). Note that
1430 // this is particularly cheap because we can rip off the PHI node that we're
1431 // replacing for the number and blocks of the predecessors.
1432 // OPT: If this shows up in a profile, we can instead finish sinking all
1433 // invariant instructions, and then walk their operands to re-establish
1434 // LCSSA. That will eliminate creating PHI nodes just to nuke them when
1435 // sinking bottom-up.
1436 for (Use &Op : New->operands())
1437 if (LI->wouldBeOutOfLoopUseRequiringLCSSA(Op.get(), PN.getParent())) {
1438 auto *OInst = cast<Instruction>(Op.get());
1439 PHINode *OpPN =
1440 PHINode::Create(OInst->getType(), PN.getNumIncomingValues(),
1441 OInst->getName() + ".lcssa");
1442 OpPN->insertBefore(ExitBlock.begin());
1443 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
1444 OpPN->addIncoming(OInst, PN.getIncomingBlock(i));
1445 Op = OpPN;
1446 }
1447 return New;
1448}
1449
1451 MemorySSAUpdater &MSSAU) {
1452 MSSAU.removeMemoryAccess(&I);
1453 SafetyInfo.removeInstruction(&I);
1454 I.eraseFromParent();
1455}
1456
1458 ICFLoopSafetyInfo &SafetyInfo,
1459 MemorySSAUpdater &MSSAU,
1460 ScalarEvolution *SE) {
1461 SafetyInfo.removeInstruction(&I);
1462 SafetyInfo.insertInstructionTo(&I, Dest->getParent());
1463 I.moveBefore(*Dest->getParent(), Dest);
1465 MSSAU.getMemorySSA()->getMemoryAccess(&I)))
1466 MSSAU.moveToPlace(OldMemAcc, Dest->getParent(),
1468 if (SE)
1470}
1471
1473 PHINode *TPN, Instruction *I, LoopInfo *LI,
1475 const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop,
1476 MemorySSAUpdater &MSSAU) {
1478 "Expect only trivially replaceable PHI");
1479 BasicBlock *ExitBlock = TPN->getParent();
1480 auto [It, Inserted] = SunkCopies.try_emplace(ExitBlock);
1481 if (Inserted)
1482 It->second = cloneInstructionInExitBlock(*I, *ExitBlock, *TPN, LI,
1483 SafetyInfo, MSSAU);
1484 return It->second;
1485}
1486
1487static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) {
1488 BasicBlock *BB = PN->getParent();
1489 if (!BB->canSplitPredecessors())
1490 return false;
1491 // It's not impossible to split EHPad blocks, but if BlockColors already exist
1492 // it require updating BlockColors for all offspring blocks accordingly. By
1493 // skipping such corner case, we can make updating BlockColors after splitting
1494 // predecessor fairly simple.
1495 if (!SafetyInfo->getBlockColors().empty() &&
1496 BB->getFirstNonPHIIt()->isEHPad())
1497 return false;
1498 for (BasicBlock *BBPred : predecessors(BB)) {
1499 if (isa<IndirectBrInst>(BBPred->getTerminator()))
1500 return false;
1501 }
1502 return true;
1503}
1504
1506 LoopInfo *LI, const Loop *CurLoop,
1507 LoopSafetyInfo *SafetyInfo,
1508 MemorySSAUpdater *MSSAU) {
1509#ifndef NDEBUG
1511 CurLoop->getUniqueExitBlocks(ExitBlocks);
1512 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(llvm::from_range, ExitBlocks);
1513#endif
1514 BasicBlock *ExitBB = PN->getParent();
1515 assert(ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block.");
1516
1517 // Split predecessors of the loop exit to make instructions in the loop are
1518 // exposed to exit blocks through trivially replaceable PHIs while keeping the
1519 // loop in the canonical form where each predecessor of each exit block should
1520 // be contained within the loop. For example, this will convert the loop below
1521 // from
1522 //
1523 // LB1:
1524 // %v1 =
1525 // br %LE, %LB2
1526 // LB2:
1527 // %v2 =
1528 // br %LE, %LB1
1529 // LE:
1530 // %p = phi [%v1, %LB1], [%v2, %LB2] <-- non-trivially replaceable
1531 //
1532 // to
1533 //
1534 // LB1:
1535 // %v1 =
1536 // br %LE.split, %LB2
1537 // LB2:
1538 // %v2 =
1539 // br %LE.split2, %LB1
1540 // LE.split:
1541 // %p1 = phi [%v1, %LB1] <-- trivially replaceable
1542 // br %LE
1543 // LE.split2:
1544 // %p2 = phi [%v2, %LB2] <-- trivially replaceable
1545 // br %LE
1546 // LE:
1547 // %p = phi [%p1, %LE.split], [%p2, %LE.split2]
1548 //
1549 const auto &BlockColors = SafetyInfo->getBlockColors();
1550 SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(ExitBB), pred_end(ExitBB));
1551 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
1552 while (!PredBBs.empty()) {
1553 BasicBlock *PredBB = *PredBBs.begin();
1554 assert(CurLoop->contains(PredBB) &&
1555 "Expect all predecessors are in the loop");
1556 if (PN->getBasicBlockIndex(PredBB) >= 0) {
1558 ExitBB, PredBB, ".split.loop.exit", &DTU, LI, MSSAU, true);
1559 // Since we do not allow splitting EH-block with BlockColors in
1560 // canSplitPredecessors(), we can simply assign predecessor's color to
1561 // the new block.
1562 if (!BlockColors.empty())
1563 // Grab a reference to the ColorVector to be inserted before getting the
1564 // reference to the vector we are copying because inserting the new
1565 // element in BlockColors might cause the map to be reallocated.
1566 SafetyInfo->copyColors(NewPred, PredBB);
1567 }
1568 PredBBs.remove(PredBB);
1569 }
1570}
1571
1572/// When an instruction is found to only be used outside of the loop, this
1573/// function moves it to the exit blocks and patches up SSA form as needed.
1574/// This method is guaranteed to remove the original instruction from its
1575/// position, and may either delete it or move it to outside of the loop.
1576///
1577static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
1578 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
1580 bool Changed = false;
1581 LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n");
1582
1583 // Iterate over users to be ready for actual sinking. Replace users via
1584 // unreachable blocks with undef and make all user PHIs trivially replaceable.
1585 SmallPtrSet<Instruction *, 8> VisitedUsers;
1586 for (Value::user_iterator UI = I.user_begin(), UE = I.user_end(); UI != UE;) {
1587 auto *User = cast<Instruction>(*UI);
1588 Use &U = UI.getUse();
1589 ++UI;
1590
1591 if (VisitedUsers.count(User) || CurLoop->contains(User))
1592 continue;
1593
1594 if (!DT->isReachableFromEntry(User->getParent())) {
1595 U = PoisonValue::get(I.getType());
1596 Changed = true;
1597 continue;
1598 }
1599
1600 // The user must be a PHI node.
1601 PHINode *PN = cast<PHINode>(User);
1602
1603 // Surprisingly, instructions can be used outside of loops without any
1604 // exits. This can only happen in PHI nodes if the incoming block is
1605 // unreachable.
1606 BasicBlock *BB = PN->getIncomingBlock(U);
1607 if (!DT->isReachableFromEntry(BB)) {
1608 U = PoisonValue::get(I.getType());
1609 Changed = true;
1610 continue;
1611 }
1612
1613 VisitedUsers.insert(PN);
1614 if (isTriviallyReplaceablePHI(*PN, I))
1615 continue;
1616
1617 if (!canSplitPredecessors(PN, SafetyInfo))
1618 return Changed;
1619
1620 // Split predecessors of the PHI so that we can make users trivially
1621 // replaceable.
1622 splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo, &MSSAU);
1623
1624 // Should rebuild the iterators, as they may be invalidated by
1625 // splitPredecessorsOfLoopExit().
1626 UI = I.user_begin();
1627 UE = I.user_end();
1628 }
1629
1630 if (VisitedUsers.empty())
1631 return Changed;
1632
1633 ORE->emit([&]() {
1634 return OptimizationRemark(DEBUG_TYPE, "InstSunk", &I)
1635 << "sinking " << ore::NV("Inst", &I);
1636 });
1637 if (isa<LoadInst>(I))
1638 ++NumMovedLoads;
1639 else if (isa<CallInst>(I))
1640 ++NumMovedCalls;
1641 ++NumSunk;
1642
1643#ifndef NDEBUG
1645 CurLoop->getUniqueExitBlocks(ExitBlocks);
1646 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(llvm::from_range, ExitBlocks);
1647#endif
1648
1649 // Clones of this instruction. Don't create more than one per exit block!
1651
1652 // If this instruction is only used outside of the loop, then all users are
1653 // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of
1654 // the instruction.
1655 // First check if I is worth sinking for all uses. Sink only when it is worth
1656 // across all uses.
1657 SmallSetVector<User*, 8> Users(I.user_begin(), I.user_end());
1658 for (auto *UI : Users) {
1659 auto *User = cast<Instruction>(UI);
1660
1661 if (CurLoop->contains(User))
1662 continue;
1663
1664 PHINode *PN = cast<PHINode>(User);
1665 assert(ExitBlockSet.count(PN->getParent()) &&
1666 "The LCSSA PHI is not in an exit block!");
1667
1668 // The PHI must be trivially replaceable.
1670 PN, &I, LI, SunkCopies, SafetyInfo, CurLoop, MSSAU);
1671 // As we sink the instruction out of the BB, drop its debug location.
1672 New->dropLocation();
1673 PN->replaceAllUsesWith(New);
1674 eraseInstruction(*PN, *SafetyInfo, MSSAU);
1675 Changed = true;
1676 }
1677 return Changed;
1678}
1679
1680/// When an instruction is found to only use loop invariant operands that
1681/// is safe to hoist, this instruction is called to do the dirty work.
1682///
1683static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
1684 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
1687 LLVM_DEBUG(dbgs() << "LICM hoisting to " << Dest->getNameOrAsOperand() << ": "
1688 << I << "\n");
1689 ORE->emit([&]() {
1690 return OptimizationRemark(DEBUG_TYPE, "Hoisted", &I) << "hoisting "
1691 << ore::NV("Inst", &I);
1692 });
1693
1694 // Metadata can be dependent on conditions we are hoisting above.
1695 // Conservatively strip all metadata on the instruction unless we were
1696 // guaranteed to execute I if we entered the loop, in which case the metadata
1697 // is valid in the loop preheader.
1698 // Similarly, If I is a call and it is not guaranteed to execute in the loop,
1699 // then moving to the preheader means we should strip attributes on the call
1700 // that can cause UB since we may be hoisting above conditions that allowed
1701 // inferring those attributes. They may not be valid at the preheader.
1702 if ((I.hasMetadataOtherThanDebugLoc() || isa<CallInst>(I)) &&
1703 // The check on hasMetadataOtherThanDebugLoc is to prevent us from burning
1704 // time in isGuaranteedToExecute if we don't actually have anything to
1705 // drop. It is a compile time optimization, not required for correctness.
1706 !SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop)) {
1707 I.dropUBImplyingAttrsAndMetadata();
1708 }
1709
1710 if (isa<PHINode>(I))
1711 // Move the new node to the end of the phi list in the destination block.
1712 moveInstructionBefore(I, Dest->getFirstNonPHIIt(), *SafetyInfo, MSSAU, SE);
1713 else
1714 // Move the new node to the destination block, before its terminator.
1715 moveInstructionBefore(I, Dest->getTerminator()->getIterator(), *SafetyInfo,
1716 MSSAU, SE);
1717
1718 I.updateLocationAfterHoist();
1719
1720 if (isa<LoadInst>(I))
1721 ++NumMovedLoads;
1722 else if (isa<CallInst>(I))
1723 ++NumMovedCalls;
1724 ++NumHoisted;
1725}
1726
1727/// Only sink or hoist an instruction if it is not a trapping instruction,
1728/// or if the instruction is known not to trap when moved to the preheader.
1729/// or if it is a trapping instruction and is guaranteed to execute.
1731 Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI,
1732 const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo,
1733 OptimizationRemarkEmitter *ORE, const Instruction *CtxI,
1734 AssumptionCache *AC, bool AllowSpeculation) {
1735 if (AllowSpeculation &&
1736 isSafeToSpeculativelyExecute(&Inst, CtxI, AC, DT, TLI))
1737 return true;
1738
1739 bool GuaranteedToExecute =
1740 SafetyInfo->isGuaranteedToExecute(Inst, DT, CurLoop);
1741
1742 if (!GuaranteedToExecute) {
1743 auto *LI = dyn_cast<LoadInst>(&Inst);
1744 if (LI && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1745 ORE->emit([&]() {
1747 DEBUG_TYPE, "LoadWithLoopInvariantAddressCondExecuted", LI)
1748 << "failed to hoist load with loop-invariant address "
1749 "because load is conditionally executed";
1750 });
1751 }
1752
1753 return GuaranteedToExecute;
1754}
1755
1756namespace {
1757class LoopPromoter : public LoadAndStorePromoter {
1758 Value *SomePtr; // Designated pointer to store to.
1759 SmallVectorImpl<BasicBlock *> &LoopExitBlocks;
1760 SmallVectorImpl<BasicBlock::iterator> &LoopInsertPts;
1761 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts;
1762 PredIteratorCache &PredCache;
1763 MemorySSAUpdater &MSSAU;
1764 LoopInfo &LI;
1765 DebugLoc DL;
1766 Align Alignment;
1767 bool UnorderedAtomic;
1768 AAMDNodes AATags;
1769 ICFLoopSafetyInfo &SafetyInfo;
1770 bool CanInsertStoresInExitBlocks;
1772
1773 // We're about to add a use of V in a loop exit block. Insert an LCSSA phi
1774 // (if legal) if doing so would add an out-of-loop use to an instruction
1775 // defined in-loop.
1776 Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const {
1777 if (!LI.wouldBeOutOfLoopUseRequiringLCSSA(V, BB))
1778 return V;
1779
1781 // We need to create an LCSSA PHI node for the incoming value and
1782 // store that.
1783 PHINode *PN = PHINode::Create(I->getType(), PredCache.size(BB),
1784 I->getName() + ".lcssa");
1785 PN->insertBefore(BB->begin());
1786 for (BasicBlock *Pred : PredCache.get(BB))
1787 PN->addIncoming(I, Pred);
1788 return PN;
1789 }
1790
1791public:
1792 LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S,
1793 SmallVectorImpl<BasicBlock *> &LEB,
1794 SmallVectorImpl<BasicBlock::iterator> &LIP,
1795 SmallVectorImpl<MemoryAccess *> &MSSAIP, PredIteratorCache &PIC,
1796 MemorySSAUpdater &MSSAU, LoopInfo &li, DebugLoc dl,
1797 Align Alignment, bool UnorderedAtomic, const AAMDNodes &AATags,
1798 ICFLoopSafetyInfo &SafetyInfo, bool CanInsertStoresInExitBlocks)
1799 : LoadAndStorePromoter(Insts, S), SomePtr(SP), LoopExitBlocks(LEB),
1800 LoopInsertPts(LIP), MSSAInsertPts(MSSAIP), PredCache(PIC), MSSAU(MSSAU),
1801 LI(li), DL(std::move(dl)), Alignment(Alignment),
1802 UnorderedAtomic(UnorderedAtomic), AATags(AATags),
1803 SafetyInfo(SafetyInfo),
1804 CanInsertStoresInExitBlocks(CanInsertStoresInExitBlocks), Uses(Insts) {}
1805
1806 void insertStoresInLoopExitBlocks() {
1807 // Insert stores after in the loop exit blocks. Each exit block gets a
1808 // store of the live-out values that feed them. Since we've already told
1809 // the SSA updater about the defs in the loop and the preheader
1810 // definition, it is all set and we can start using it.
1811 DIAssignID *NewID = nullptr;
1812 for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) {
1813 BasicBlock *ExitBlock = LoopExitBlocks[i];
1814 Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
1815 LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock);
1816 Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock);
1817 BasicBlock::iterator InsertPos = LoopInsertPts[i];
1818 StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos);
1819 if (UnorderedAtomic)
1820 NewSI->setOrdering(AtomicOrdering::Unordered);
1821 NewSI->setAlignment(Alignment);
1822 NewSI->setDebugLoc(DL);
1823 // Attach DIAssignID metadata to the new store, generating it on the
1824 // first loop iteration.
1825 if (i == 0) {
1826 // NewSI will have its DIAssignID set here if there are any stores in
1827 // Uses with a DIAssignID attachment. This merged ID will then be
1828 // attached to the other inserted stores (in the branch below).
1829 NewSI->mergeDIAssignID(Uses);
1831 NewSI->getMetadata(LLVMContext::MD_DIAssignID));
1832 } else {
1833 // Attach the DIAssignID (or nullptr) merged from Uses in the branch
1834 // above.
1835 NewSI->setMetadata(LLVMContext::MD_DIAssignID, NewID);
1836 }
1837
1838 if (AATags)
1839 NewSI->setAAMetadata(AATags);
1840
1841 MemoryAccess *MSSAInsertPoint = MSSAInsertPts[i];
1842 MemoryAccess *NewMemAcc;
1843 if (!MSSAInsertPoint) {
1844 NewMemAcc = MSSAU.createMemoryAccessInBB(
1845 NewSI, nullptr, NewSI->getParent(), MemorySSA::Beginning);
1846 } else {
1847 NewMemAcc =
1848 MSSAU.createMemoryAccessAfter(NewSI, nullptr, MSSAInsertPoint);
1849 }
1850 MSSAInsertPts[i] = NewMemAcc;
1851 MSSAU.insertDef(cast<MemoryDef>(NewMemAcc), true);
1852 // FIXME: true for safety, false may still be correct.
1853 }
1854 }
1855
1856 void doExtraRewritesBeforeFinalDeletion() override {
1857 if (CanInsertStoresInExitBlocks)
1858 insertStoresInLoopExitBlocks();
1859 }
1860
1861 void instructionDeleted(Instruction *I) const override {
1862 SafetyInfo.removeInstruction(I);
1863 MSSAU.removeMemoryAccess(I);
1864 }
1865
1866 bool shouldDelete(Instruction *I) const override {
1867 if (isa<StoreInst>(I))
1868 return CanInsertStoresInExitBlocks;
1869 return true;
1870 }
1871};
1872
1873bool isNotCapturedBeforeOrInLoop(const Value *V, const Loop *L,
1874 DominatorTree *DT) {
1875 // We can perform the captured-before check against any instruction in the
1876 // loop header, as the loop header is reachable from any instruction inside
1877 // the loop.
1878 // TODO: ReturnCaptures=true shouldn't be necessary here.
1880 V, /*ReturnCaptures=*/true, L->getHeader()->getTerminator(), DT,
1881 /*IncludeI=*/false, CaptureComponents::Provenance));
1882}
1883
1884/// Return true if we can prove that a caller cannot inspect the object if an
1885/// unwind occurs inside the loop.
1886bool isNotVisibleOnUnwindInLoop(const Value *Object, const Loop *L,
1887 DominatorTree *DT) {
1888 bool RequiresNoCaptureBeforeUnwind;
1889 if (!isNotVisibleOnUnwind(Object, RequiresNoCaptureBeforeUnwind))
1890 return false;
1891
1892 return !RequiresNoCaptureBeforeUnwind ||
1893 isNotCapturedBeforeOrInLoop(Object, L, DT);
1894}
1895
1896bool isThreadLocalObject(const Value *Object, const Loop *L, DominatorTree *DT,
1898 // The object must be function-local to start with, and then not captured
1899 // before/in the loop.
1900 return (isIdentifiedFunctionLocal(Object) &&
1901 isNotCapturedBeforeOrInLoop(Object, L, DT)) ||
1902 (TTI->isSingleThreaded() || SingleThread);
1903}
1904
1905} // namespace
1906
1907/// Try to promote memory values to scalars by sinking stores out of the
1908/// loop and moving loads to before the loop. We do this by looping over
1909/// the stores in the loop, looking for stores to Must pointers which are
1910/// loop invariant.
1911///
1913 const SmallSetVector<Value *, 8> &PointerMustAliases,
1918 const TargetLibraryInfo *TLI, TargetTransformInfo *TTI, Loop *CurLoop,
1919 MemorySSAUpdater &MSSAU, ICFLoopSafetyInfo *SafetyInfo,
1920 OptimizationRemarkEmitter *ORE, bool AllowSpeculation,
1921 bool HasReadsOutsideSet) {
1922 // Verify inputs.
1923 assert(LI != nullptr && DT != nullptr && CurLoop != nullptr &&
1924 SafetyInfo != nullptr &&
1925 "Unexpected Input to promoteLoopAccessesToScalars");
1926
1927 LLVM_DEBUG({
1928 dbgs() << "Trying to promote set of must-aliased pointers:\n";
1929 for (Value *Ptr : PointerMustAliases)
1930 dbgs() << " " << *Ptr << "\n";
1931 });
1932 ++NumPromotionCandidates;
1933
1934 Value *SomePtr = *PointerMustAliases.begin();
1935 BasicBlock *Preheader = CurLoop->getLoopPreheader();
1936
1937 // It is not safe to promote a load/store from the loop if the load/store is
1938 // conditional. For example, turning:
1939 //
1940 // for () { if (c) *P += 1; }
1941 //
1942 // into:
1943 //
1944 // tmp = *P; for () { if (c) tmp +=1; } *P = tmp;
1945 //
1946 // is not safe, because *P may only be valid to access if 'c' is true.
1947 //
1948 // The safety property divides into two parts:
1949 // p1) The memory may not be dereferenceable on entry to the loop. In this
1950 // case, we can't insert the required load in the preheader.
1951 // p2) The memory model does not allow us to insert a store along any dynamic
1952 // path which did not originally have one.
1953 //
1954 // If at least one store is guaranteed to execute, both properties are
1955 // satisfied, and promotion is legal.
1956 //
1957 // This, however, is not a necessary condition. Even if no store/load is
1958 // guaranteed to execute, we can still establish these properties.
1959 // We can establish (p1) by proving that hoisting the load into the preheader
1960 // is safe (i.e. proving dereferenceability on all paths through the loop). We
1961 // can use any access within the alias set to prove dereferenceability,
1962 // since they're all must alias.
1963 //
1964 // There are two ways establish (p2):
1965 // a) Prove the location is thread-local. In this case the memory model
1966 // requirement does not apply, and stores are safe to insert.
1967 // b) Prove a store dominates every exit block. In this case, if an exit
1968 // blocks is reached, the original dynamic path would have taken us through
1969 // the store, so inserting a store into the exit block is safe. Note that this
1970 // is different from the store being guaranteed to execute. For instance,
1971 // if an exception is thrown on the first iteration of the loop, the original
1972 // store is never executed, but the exit blocks are not executed either.
1973
1974 bool DereferenceableInPH = false;
1975 bool StoreIsGuanteedToExecute = false;
1976 bool LoadIsGuaranteedToExecute = false;
1977 bool FoundLoadToPromote = false;
1978
1979 // Goes from Unknown to either Safe or Unsafe, but can't switch between them.
1980 enum {
1981 StoreSafe,
1982 StoreUnsafe,
1983 StoreSafetyUnknown,
1984 } StoreSafety = StoreSafetyUnknown;
1985
1987
1988 // We start with an alignment of one and try to find instructions that allow
1989 // us to prove better alignment.
1990 Align Alignment;
1991 // Keep track of which types of access we see
1992 bool SawUnorderedAtomic = false;
1993 bool SawNotAtomic = false;
1994 AAMDNodes AATags;
1995
1996 const DataLayout &MDL = Preheader->getDataLayout();
1997
1998 // If there are reads outside the promoted set, then promoting stores is
1999 // definitely not safe.
2000 if (HasReadsOutsideSet)
2001 StoreSafety = StoreUnsafe;
2002
2003 if (StoreSafety == StoreSafetyUnknown && SafetyInfo->anyBlockMayThrow()) {
2004 // If a loop can throw, we have to insert a store along each unwind edge.
2005 // That said, we can't actually make the unwind edge explicit. Therefore,
2006 // we have to prove that the store is dead along the unwind edge. We do
2007 // this by proving that the caller can't have a reference to the object
2008 // after return and thus can't possibly load from the object.
2009 Value *Object = getUnderlyingObject(SomePtr);
2010 if (!isNotVisibleOnUnwindInLoop(Object, CurLoop, DT))
2011 StoreSafety = StoreUnsafe;
2012 }
2013
2014 // Check that all accesses to pointers in the alias set use the same type.
2015 // We cannot (yet) promote a memory location that is loaded and stored in
2016 // different sizes. While we are at it, collect alignment and AA info.
2017 Type *AccessTy = nullptr;
2018 for (Value *ASIV : PointerMustAliases) {
2019 for (Use &U : ASIV->uses()) {
2020 // Ignore instructions that are outside the loop.
2021 Instruction *UI = dyn_cast<Instruction>(U.getUser());
2022 if (!UI || !CurLoop->contains(UI))
2023 continue;
2024
2025 // If there is an non-load/store instruction in the loop, we can't promote
2026 // it.
2027 if (LoadInst *Load = dyn_cast<LoadInst>(UI)) {
2028 if (!Load->isUnordered())
2029 return false;
2030
2031 SawUnorderedAtomic |= Load->isAtomic();
2032 SawNotAtomic |= !Load->isAtomic();
2033 FoundLoadToPromote = true;
2034
2035 Align InstAlignment = Load->getAlign();
2036
2037 if (!LoadIsGuaranteedToExecute)
2038 LoadIsGuaranteedToExecute =
2039 SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop);
2040
2041 // Note that proving a load safe to speculate requires proving
2042 // sufficient alignment at the target location. Proving it guaranteed
2043 // to execute does as well. Thus we can increase our guaranteed
2044 // alignment as well.
2045 if (!DereferenceableInPH || (InstAlignment > Alignment))
2047 *Load, DT, TLI, CurLoop, SafetyInfo, ORE,
2048 Preheader->getTerminator(), AC, AllowSpeculation)) {
2049 DereferenceableInPH = true;
2050 Alignment = std::max(Alignment, InstAlignment);
2051 }
2052 } else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) {
2053 // Stores *of* the pointer are not interesting, only stores *to* the
2054 // pointer.
2055 if (U.getOperandNo() != StoreInst::getPointerOperandIndex())
2056 continue;
2057 if (!Store->isUnordered())
2058 return false;
2059
2060 SawUnorderedAtomic |= Store->isAtomic();
2061 SawNotAtomic |= !Store->isAtomic();
2062
2063 // If the store is guaranteed to execute, both properties are satisfied.
2064 // We may want to check if a store is guaranteed to execute even if we
2065 // already know that promotion is safe, since it may have higher
2066 // alignment than any other guaranteed stores, in which case we can
2067 // raise the alignment on the promoted store.
2068 Align InstAlignment = Store->getAlign();
2069 bool GuaranteedToExecute =
2070 SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop);
2071 StoreIsGuanteedToExecute |= GuaranteedToExecute;
2072 if (GuaranteedToExecute) {
2073 DereferenceableInPH = true;
2074 if (StoreSafety == StoreSafetyUnknown)
2075 StoreSafety = StoreSafe;
2076 Alignment = std::max(Alignment, InstAlignment);
2077 }
2078
2079 // If a store dominates all exit blocks, it is safe to sink.
2080 // As explained above, if an exit block was executed, a dominating
2081 // store must have been executed at least once, so we are not
2082 // introducing stores on paths that did not have them.
2083 // Note that this only looks at explicit exit blocks. If we ever
2084 // start sinking stores into unwind edges (see above), this will break.
2085 if (StoreSafety == StoreSafetyUnknown &&
2086 llvm::all_of(ExitBlocks, [&](BasicBlock *Exit) {
2087 return DT->dominates(Store->getParent(), Exit);
2088 }))
2089 StoreSafety = StoreSafe;
2090
2091 // If the store is not guaranteed to execute, we may still get
2092 // deref info through it.
2093 if (!DereferenceableInPH) {
2094 DereferenceableInPH = isDereferenceableAndAlignedPointer(
2095 Store->getPointerOperand(), Store->getValueOperand()->getType(),
2096 Store->getAlign(), MDL, Preheader->getTerminator(), AC, DT, TLI);
2097 }
2098 } else
2099 continue; // Not a load or store.
2100
2101 if (!AccessTy)
2102 AccessTy = getLoadStoreType(UI);
2103 else if (AccessTy != getLoadStoreType(UI))
2104 return false;
2105
2106 // Merge the AA tags.
2107 if (LoopUses.empty()) {
2108 // On the first load/store, just take its AA tags.
2109 AATags = UI->getAAMetadata();
2110 } else if (AATags) {
2111 AATags = AATags.merge(UI->getAAMetadata());
2112 }
2113
2114 LoopUses.push_back(UI);
2115 }
2116 }
2117
2118 // If we found both an unordered atomic instruction and a non-atomic memory
2119 // access, bail. We can't blindly promote non-atomic to atomic since we
2120 // might not be able to lower the result. We can't downgrade since that
2121 // would violate memory model. Also, align 0 is an error for atomics.
2122 if (SawUnorderedAtomic && SawNotAtomic)
2123 return false;
2124
2125 // If we're inserting an atomic load in the preheader, we must be able to
2126 // lower it. We're only guaranteed to be able to lower naturally aligned
2127 // atomics.
2128 if (SawUnorderedAtomic && Alignment < MDL.getTypeStoreSize(AccessTy))
2129 return false;
2130
2131 // If we couldn't prove we can hoist the load, bail.
2132 if (!DereferenceableInPH) {
2133 LLVM_DEBUG(dbgs() << "Not promoting: Not dereferenceable in preheader\n");
2134 return false;
2135 }
2136
2137 // We know we can hoist the load, but don't have a guaranteed store.
2138 // Check whether the location is writable and thread-local. If it is, then we
2139 // can insert stores along paths which originally didn't have them without
2140 // violating the memory model.
2141 if (StoreSafety == StoreSafetyUnknown) {
2142 Value *Object = getUnderlyingObject(SomePtr);
2143 bool ExplicitlyDereferenceableOnly;
2144 if (isWritableObject(Object, ExplicitlyDereferenceableOnly) &&
2145 (!ExplicitlyDereferenceableOnly ||
2146 isDereferenceablePointer(SomePtr, AccessTy, MDL)) &&
2147 isThreadLocalObject(Object, CurLoop, DT, TTI))
2148 StoreSafety = StoreSafe;
2149 }
2150
2151 // If we've still failed to prove we can sink the store, hoist the load
2152 // only, if possible.
2153 if (StoreSafety != StoreSafe && !FoundLoadToPromote)
2154 // If we cannot hoist the load either, give up.
2155 return false;
2156
2157 // Lets do the promotion!
2158 if (StoreSafety == StoreSafe) {
2159 LLVM_DEBUG(dbgs() << "LICM: Promoting load/store of the value: " << *SomePtr
2160 << '\n');
2161 ++NumLoadStorePromoted;
2162 } else {
2163 LLVM_DEBUG(dbgs() << "LICM: Promoting load of the value: " << *SomePtr
2164 << '\n');
2165 ++NumLoadPromoted;
2166 }
2167
2168 ORE->emit([&]() {
2169 return OptimizationRemark(DEBUG_TYPE, "PromoteLoopAccessesToScalar",
2170 LoopUses[0])
2171 << "Moving accesses to memory location out of the loop";
2172 });
2173
2174 // Look at all the loop uses, and try to merge their locations.
2175 std::vector<DebugLoc> LoopUsesLocs;
2176 for (auto U : LoopUses)
2177 LoopUsesLocs.push_back(U->getDebugLoc());
2178 auto DL = DebugLoc::getMergedLocations(LoopUsesLocs);
2179
2180 // We use the SSAUpdater interface to insert phi nodes as required.
2182 SSAUpdater SSA(&NewPHIs);
2183 LoopPromoter Promoter(SomePtr, LoopUses, SSA, ExitBlocks, InsertPts,
2184 MSSAInsertPts, PIC, MSSAU, *LI, DL, Alignment,
2185 SawUnorderedAtomic,
2186 StoreIsGuanteedToExecute ? AATags : AAMDNodes(),
2187 *SafetyInfo, StoreSafety == StoreSafe);
2188
2189 // Set up the preheader to have a definition of the value. It is the live-out
2190 // value from the preheader that uses in the loop will use.
2191 LoadInst *PreheaderLoad = nullptr;
2192 if (FoundLoadToPromote || !StoreIsGuanteedToExecute) {
2193 PreheaderLoad =
2194 new LoadInst(AccessTy, SomePtr, SomePtr->getName() + ".promoted",
2195 Preheader->getTerminator()->getIterator());
2196 if (SawUnorderedAtomic)
2197 PreheaderLoad->setOrdering(AtomicOrdering::Unordered);
2198 PreheaderLoad->setAlignment(Alignment);
2199 PreheaderLoad->setDebugLoc(DebugLoc::getDropped());
2200 if (AATags && LoadIsGuaranteedToExecute)
2201 PreheaderLoad->setAAMetadata(AATags);
2202
2203 MemoryAccess *PreheaderLoadMemoryAccess = MSSAU.createMemoryAccessInBB(
2204 PreheaderLoad, nullptr, PreheaderLoad->getParent(), MemorySSA::End);
2205 MemoryUse *NewMemUse = cast<MemoryUse>(PreheaderLoadMemoryAccess);
2206 MSSAU.insertUse(NewMemUse, /*RenameUses=*/true);
2207 SSA.AddAvailableValue(Preheader, PreheaderLoad);
2208 } else {
2209 SSA.AddAvailableValue(Preheader, PoisonValue::get(AccessTy));
2210 }
2211
2212 if (VerifyMemorySSA)
2213 MSSAU.getMemorySSA()->verifyMemorySSA();
2214 // Rewrite all the loads in the loop and remember all the definitions from
2215 // stores in the loop.
2216 Promoter.run(LoopUses);
2217
2218 if (VerifyMemorySSA)
2219 MSSAU.getMemorySSA()->verifyMemorySSA();
2220 // If the SSAUpdater didn't use the load in the preheader, just zap it now.
2221 if (PreheaderLoad && PreheaderLoad->use_empty())
2222 eraseInstruction(*PreheaderLoad, *SafetyInfo, MSSAU);
2223
2224 return true;
2225}
2226
2227static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
2228 function_ref<void(Instruction *)> Fn) {
2229 for (const BasicBlock *BB : L->blocks())
2230 if (const auto *Accesses = MSSA->getBlockAccesses(BB))
2231 for (const auto &Access : *Accesses)
2232 if (const auto *MUD = dyn_cast<MemoryUseOrDef>(&Access))
2233 Fn(MUD->getMemoryInst());
2234}
2235
2236// The bool indicates whether there might be reads outside the set, in which
2237// case only loads may be promoted.
2240 BatchAAResults BatchAA(*AA);
2241 AliasSetTracker AST(BatchAA);
2242
2243 auto IsPotentiallyPromotable = [L](const Instruction *I) {
2244 if (const auto *SI = dyn_cast<StoreInst>(I)) {
2245 const Value *PtrOp = SI->getPointerOperand();
2246 return !isa<ConstantData>(PtrOp) && L->isLoopInvariant(PtrOp);
2247 }
2248 if (const auto *LI = dyn_cast<LoadInst>(I)) {
2249 const Value *PtrOp = LI->getPointerOperand();
2250 return !isa<ConstantData>(PtrOp) && L->isLoopInvariant(PtrOp);
2251 }
2252 return false;
2253 };
2254
2255 // Populate AST with potentially promotable accesses.
2256 SmallPtrSet<Value *, 16> AttemptingPromotion;
2257 foreachMemoryAccess(MSSA, L, [&](Instruction *I) {
2258 if (IsPotentiallyPromotable(I)) {
2259 AttemptingPromotion.insert(I);
2260 AST.add(I);
2261 }
2262 });
2263
2264 // We're only interested in must-alias sets that contain a mod.
2266 for (AliasSet &AS : AST)
2267 if (!AS.isForwardingAliasSet() && AS.isMod() && AS.isMustAlias())
2268 Sets.push_back({&AS, false});
2269
2270 if (Sets.empty())
2271 return {}; // Nothing to promote...
2272
2273 // Discard any sets for which there is an aliasing non-promotable access.
2274 foreachMemoryAccess(MSSA, L, [&](Instruction *I) {
2275 if (AttemptingPromotion.contains(I))
2276 return;
2277
2279 ModRefInfo MR = Pair.getPointer()->aliasesUnknownInst(I, BatchAA);
2280 // Cannot promote if there are writes outside the set.
2281 if (isModSet(MR))
2282 return true;
2283 if (isRefSet(MR)) {
2284 // Remember reads outside the set.
2285 Pair.setInt(true);
2286 // If this is a mod-only set and there are reads outside the set,
2287 // we will not be able to promote, so bail out early.
2288 return !Pair.getPointer()->isRef();
2289 }
2290 return false;
2291 });
2292 });
2293
2295 for (auto [Set, HasReadsOutsideSet] : Sets) {
2296 SmallSetVector<Value *, 8> PointerMustAliases;
2297 for (const auto &MemLoc : *Set)
2298 PointerMustAliases.insert(const_cast<Value *>(MemLoc.Ptr));
2299 Result.emplace_back(std::move(PointerMustAliases), HasReadsOutsideSet);
2300 }
2301
2302 return Result;
2303}
2304
2305// For a given store instruction or writeonly call instruction, this function
2306// checks that there are no read or writes that conflict with the memory
2307// access in the instruction
2309 AAResults *AA, Loop *CurLoop,
2310 SinkAndHoistLICMFlags &Flags) {
2312 // If there are more accesses than the Promotion cap, then give up as we're
2313 // not walking a list that long.
2314 if (Flags.tooManyMemoryAccesses())
2315 return false;
2316
2317 auto *IMD = MSSA->getMemoryAccess(I);
2318 BatchAAResults BAA(*AA);
2319 auto *Source = getClobberingMemoryAccess(*MSSA, BAA, Flags, IMD);
2320 // Make sure there are no clobbers inside the loop.
2321 if (!MSSA->isLiveOnEntryDef(Source) && CurLoop->contains(Source->getBlock()))
2322 return false;
2323
2324 // If there are interfering Uses (i.e. their defining access is in the
2325 // loop), or ordered loads (stored as Defs!), don't move this store.
2326 // Could do better here, but this is conservatively correct.
2327 // TODO: Cache set of Uses on the first walk in runOnLoop, update when
2328 // moving accesses. Can also extend to dominating uses.
2329 for (auto *BB : CurLoop->getBlocks()) {
2330 auto *Accesses = MSSA->getBlockAccesses(BB);
2331 if (!Accesses)
2332 continue;
2333 for (const auto &MA : *Accesses)
2334 if (const auto *MU = dyn_cast<MemoryUse>(&MA)) {
2335 auto *MD = getClobberingMemoryAccess(*MSSA, BAA, Flags,
2336 const_cast<MemoryUse *>(MU));
2337 if (!MSSA->isLiveOnEntryDef(MD) && CurLoop->contains(MD->getBlock()))
2338 return false;
2339 // Disable hoisting past potentially interfering loads. Optimized
2340 // Uses may point to an access outside the loop, as getClobbering
2341 // checks the previous iteration when walking the backedge.
2342 // FIXME: More precise: no Uses that alias I.
2343 if (!Flags.getIsSink() && !MSSA->dominates(IMD, MU))
2344 return false;
2345 } else if (const auto *MD = dyn_cast<MemoryDef>(&MA)) {
2346 if (auto *LI = dyn_cast<LoadInst>(MD->getMemoryInst())) {
2347 (void)LI; // Silence warning.
2348 assert(!LI->isUnordered() && "Expected unordered load");
2349 return false;
2350 }
2351 // Any call, while it may not be clobbering I, it may be a use.
2352 if (auto *CI = dyn_cast<CallInst>(MD->getMemoryInst())) {
2353 // Check if the call may read from the memory location written
2354 // to by I. Check CI's attributes and arguments; the number of
2355 // such checks performed is limited above by NoOfMemAccTooLarge.
2356 if (auto *SI = dyn_cast<StoreInst>(I)) {
2358 if (isModOrRefSet(MRI))
2359 return false;
2360 } else {
2361 auto *SCI = cast<CallInst>(I);
2362 // If the instruction we are wanting to hoist is also a call
2363 // instruction then we need not check mod/ref info with itself
2364 if (SCI == CI)
2365 continue;
2366 ModRefInfo MRI = BAA.getModRefInfo(CI, SCI);
2367 if (isModOrRefSet(MRI))
2368 return false;
2369 }
2370 }
2371 }
2372 }
2373 return true;
2374}
2375
2377 Loop *CurLoop, Instruction &I,
2378 SinkAndHoistLICMFlags &Flags,
2379 bool InvariantGroup) {
2380 // For hoisting, use the walker to determine safety
2381 if (!Flags.getIsSink()) {
2382 // If hoisting an invariant group, we only need to check that there
2383 // is no store to the loaded pointer between the start of the loop,
2384 // and the load (since all values must be the same).
2385
2386 // This can be checked in two conditions:
2387 // 1) if the memoryaccess is outside the loop
2388 // 2) the earliest access is at the loop header,
2389 // if the memory loaded is the phi node
2390
2391 BatchAAResults BAA(MSSA->getAA());
2392 MemoryAccess *Source = getClobberingMemoryAccess(*MSSA, BAA, Flags, MU);
2393 return !MSSA->isLiveOnEntryDef(Source) &&
2394 CurLoop->contains(Source->getBlock()) &&
2395 !(InvariantGroup && Source->getBlock() == CurLoop->getHeader() && isa<MemoryPhi>(Source));
2396 }
2397
2398 // For sinking, we'd need to check all Defs below this use. The getClobbering
2399 // call will look on the backedge of the loop, but will check aliasing with
2400 // the instructions on the previous iteration.
2401 // For example:
2402 // for (i ... )
2403 // load a[i] ( Use (LoE)
2404 // store a[i] ( 1 = Def (2), with 2 = Phi for the loop.
2405 // i++;
2406 // The load sees no clobbering inside the loop, as the backedge alias check
2407 // does phi translation, and will check aliasing against store a[i-1].
2408 // However sinking the load outside the loop, below the store is incorrect.
2409
2410 // For now, only sink if there are no Defs in the loop, and the existing ones
2411 // precede the use and are in the same block.
2412 // FIXME: Increase precision: Safe to sink if Use post dominates the Def;
2413 // needs PostDominatorTreeAnalysis.
2414 // FIXME: More precise: no Defs that alias this Use.
2415 if (Flags.tooManyMemoryAccesses())
2416 return true;
2417 for (auto *BB : CurLoop->getBlocks())
2418 if (pointerInvalidatedByBlock(*BB, *MSSA, *MU))
2419 return true;
2420 // When sinking, the source block may not be part of the loop so check it.
2421 if (!CurLoop->contains(&I))
2422 return pointerInvalidatedByBlock(*I.getParent(), *MSSA, *MU);
2423
2424 return false;
2425}
2426
2428 if (const auto *Accesses = MSSA.getBlockDefs(&BB))
2429 for (const auto &MA : *Accesses)
2430 if (const auto *MD = dyn_cast<MemoryDef>(&MA))
2431 if (MU.getBlock() != MD->getBlock() || !MSSA.locallyDominates(MD, &MU))
2432 return true;
2433 return false;
2434}
2435
2436/// Try to simplify things like (A < INV_1 AND icmp A < INV_2) into (A <
2437/// min(INV_1, INV_2)), if INV_1 and INV_2 are both loop invariants and their
2438/// minimun can be computed outside of loop, and X is not a loop-invariant.
2439static bool hoistMinMax(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
2440 MemorySSAUpdater &MSSAU) {
2441 bool Inverse = false;
2442 using namespace PatternMatch;
2443 Value *Cond1, *Cond2;
2444 if (match(&I, m_LogicalOr(m_Value(Cond1), m_Value(Cond2)))) {
2445 Inverse = true;
2446 } else if (match(&I, m_LogicalAnd(m_Value(Cond1), m_Value(Cond2)))) {
2447 // Do nothing
2448 } else
2449 return false;
2450
2451 auto MatchICmpAgainstInvariant = [&](Value *C, CmpPredicate &P, Value *&LHS,
2452 Value *&RHS) {
2453 if (!match(C, m_OneUse(m_ICmp(P, m_Value(LHS), m_Value(RHS)))))
2454 return false;
2455 if (!LHS->getType()->isIntegerTy())
2456 return false;
2458 return false;
2459 if (L.isLoopInvariant(LHS)) {
2460 std::swap(LHS, RHS);
2462 }
2463 if (L.isLoopInvariant(LHS) || !L.isLoopInvariant(RHS))
2464 return false;
2465 if (Inverse)
2467 return true;
2468 };
2469 CmpPredicate P1, P2;
2470 Value *LHS1, *LHS2, *RHS1, *RHS2;
2471 if (!MatchICmpAgainstInvariant(Cond1, P1, LHS1, RHS1) ||
2472 !MatchICmpAgainstInvariant(Cond2, P2, LHS2, RHS2))
2473 return false;
2474 auto MatchingPred = CmpPredicate::getMatching(P1, P2);
2475 if (!MatchingPred || LHS1 != LHS2)
2476 return false;
2477
2478 // Everything is fine, we can do the transform.
2479 bool UseMin = ICmpInst::isLT(*MatchingPred) || ICmpInst::isLE(*MatchingPred);
2480 assert(
2481 (UseMin || ICmpInst::isGT(*MatchingPred) ||
2482 ICmpInst::isGE(*MatchingPred)) &&
2483 "Relational predicate is either less (or equal) or greater (or equal)!");
2484 Intrinsic::ID id = ICmpInst::isSigned(*MatchingPred)
2485 ? (UseMin ? Intrinsic::smin : Intrinsic::smax)
2486 : (UseMin ? Intrinsic::umin : Intrinsic::umax);
2487 auto *Preheader = L.getLoopPreheader();
2488 assert(Preheader && "Loop is not in simplify form?");
2489 IRBuilder<> Builder(Preheader->getTerminator());
2490 // We are about to create a new guaranteed use for RHS2 which might not exist
2491 // before (if it was a non-taken input of logical and/or instruction). If it
2492 // was poison, we need to freeze it. Note that no new use for LHS and RHS1 are
2493 // introduced, so they don't need this.
2494 if (isa<SelectInst>(I))
2495 RHS2 = Builder.CreateFreeze(RHS2, RHS2->getName() + ".fr");
2496 Value *NewRHS = Builder.CreateBinaryIntrinsic(
2497 id, RHS1, RHS2, nullptr,
2498 StringRef("invariant.") +
2499 (ICmpInst::isSigned(*MatchingPred) ? "s" : "u") +
2500 (UseMin ? "min" : "max"));
2501 Builder.SetInsertPoint(&I);
2502 ICmpInst::Predicate P = *MatchingPred;
2503 if (Inverse)
2505 Value *NewCond = Builder.CreateICmp(P, LHS1, NewRHS);
2506 NewCond->takeName(&I);
2507 I.replaceAllUsesWith(NewCond);
2508 eraseInstruction(I, SafetyInfo, MSSAU);
2509 Instruction &CondI1 = *cast<Instruction>(Cond1);
2510 Instruction &CondI2 = *cast<Instruction>(Cond2);
2511 salvageDebugInfo(CondI1);
2512 salvageDebugInfo(CondI2);
2513 eraseInstruction(CondI1, SafetyInfo, MSSAU);
2514 eraseInstruction(CondI2, SafetyInfo, MSSAU);
2515 return true;
2516}
2517
2518/// Reassociate gep (gep ptr, idx1), idx2 to gep (gep ptr, idx2), idx1 if
2519/// this allows hoisting the inner GEP.
2520static bool hoistGEP(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
2522 DominatorTree *DT) {
2524 if (!GEP)
2525 return false;
2526
2527 // Do not try to hoist a constant GEP out of the loop via reassociation.
2528 // Constant GEPs can often be folded into addressing modes, and reassociating
2529 // them may inhibit CSE of a common base.
2530 if (GEP->hasAllConstantIndices())
2531 return false;
2532
2533 auto *Src = dyn_cast<GetElementPtrInst>(GEP->getPointerOperand());
2534 if (!Src || !Src->hasOneUse() || !L.contains(Src))
2535 return false;
2536
2537 Value *SrcPtr = Src->getPointerOperand();
2538 auto LoopInvariant = [&](Value *V) { return L.isLoopInvariant(V); };
2539 if (!L.isLoopInvariant(SrcPtr) || !all_of(GEP->indices(), LoopInvariant))
2540 return false;
2541
2542 // This can only happen if !AllowSpeculation, otherwise this would already be
2543 // handled.
2544 // FIXME: Should we respect AllowSpeculation in these reassociation folds?
2545 // The flag exists to prevent metadata dropping, which is not relevant here.
2546 if (all_of(Src->indices(), LoopInvariant))
2547 return false;
2548
2549 // The swapped GEPs are inbounds if both original GEPs are inbounds
2550 // and the sign of the offsets is the same. For simplicity, only
2551 // handle both offsets being non-negative.
2552 const DataLayout &DL = GEP->getDataLayout();
2553 auto NonNegative = [&](Value *V) {
2554 return isKnownNonNegative(V, SimplifyQuery(DL, DT, AC, GEP));
2555 };
2556 bool IsInBounds = Src->isInBounds() && GEP->isInBounds() &&
2557 all_of(Src->indices(), NonNegative) &&
2558 all_of(GEP->indices(), NonNegative);
2559
2560 BasicBlock *Preheader = L.getLoopPreheader();
2561 IRBuilder<> Builder(Preheader->getTerminator());
2562 Value *NewSrc = Builder.CreateGEP(GEP->getSourceElementType(), SrcPtr,
2563 SmallVector<Value *>(GEP->indices()),
2564 "invariant.gep", IsInBounds);
2565 Builder.SetInsertPoint(GEP);
2566 Value *NewGEP = Builder.CreateGEP(Src->getSourceElementType(), NewSrc,
2567 SmallVector<Value *>(Src->indices()), "gep",
2568 IsInBounds);
2569 GEP->replaceAllUsesWith(NewGEP);
2570 eraseInstruction(*GEP, SafetyInfo, MSSAU);
2571 salvageDebugInfo(*Src);
2572 eraseInstruction(*Src, SafetyInfo, MSSAU);
2573 return true;
2574}
2575
2576/// Try to turn things like "LV + C1 < C2" into "LV < C2 - C1". Here
2577/// C1 and C2 are loop invariants and LV is a loop-variant.
2578static bool hoistAdd(ICmpInst::Predicate Pred, Value *VariantLHS,
2579 Value *InvariantRHS, ICmpInst &ICmp, Loop &L,
2580 ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU,
2581 AssumptionCache *AC, DominatorTree *DT) {
2582 assert(!L.isLoopInvariant(VariantLHS) && "Precondition.");
2583 assert(L.isLoopInvariant(InvariantRHS) && "Precondition.");
2584
2585 bool IsSigned = ICmpInst::isSigned(Pred);
2586
2587 // Try to represent VariantLHS as sum of invariant and variant operands.
2588 using namespace PatternMatch;
2589 Value *VariantOp, *InvariantOp;
2590 if (IsSigned &&
2591 !match(VariantLHS, m_NSWAdd(m_Value(VariantOp), m_Value(InvariantOp))))
2592 return false;
2593 if (!IsSigned &&
2594 !match(VariantLHS, m_NUWAdd(m_Value(VariantOp), m_Value(InvariantOp))))
2595 return false;
2596
2597 // LHS itself is a loop-variant, try to represent it in the form:
2598 // "VariantOp + InvariantOp". If it is possible, then we can reassociate.
2599 if (L.isLoopInvariant(VariantOp))
2600 std::swap(VariantOp, InvariantOp);
2601 if (L.isLoopInvariant(VariantOp) || !L.isLoopInvariant(InvariantOp))
2602 return false;
2603
2604 // In order to turn "LV + C1 < C2" into "LV < C2 - C1", we need to be able to
2605 // freely move values from left side of inequality to right side (just as in
2606 // normal linear arithmetics). Overflows make things much more complicated, so
2607 // we want to avoid this.
2608 auto &DL = L.getHeader()->getDataLayout();
2609 SimplifyQuery SQ(DL, DT, AC, &ICmp);
2610 if (IsSigned && computeOverflowForSignedSub(InvariantRHS, InvariantOp, SQ) !=
2612 return false;
2613 if (!IsSigned &&
2614 computeOverflowForUnsignedSub(InvariantRHS, InvariantOp, SQ) !=
2616 return false;
2617 auto *Preheader = L.getLoopPreheader();
2618 assert(Preheader && "Loop is not in simplify form?");
2619 IRBuilder<> Builder(Preheader->getTerminator());
2620 Value *NewCmpOp =
2621 Builder.CreateSub(InvariantRHS, InvariantOp, "invariant.op",
2622 /*HasNUW*/ !IsSigned, /*HasNSW*/ IsSigned);
2623 ICmp.setPredicate(Pred);
2624 ICmp.setOperand(0, VariantOp);
2625 ICmp.setOperand(1, NewCmpOp);
2626
2627 Instruction &DeadI = cast<Instruction>(*VariantLHS);
2628 salvageDebugInfo(DeadI);
2629 eraseInstruction(DeadI, SafetyInfo, MSSAU);
2630 return true;
2631}
2632
2633/// Try to reassociate and hoist the following two patterns:
2634/// LV - C1 < C2 --> LV < C1 + C2,
2635/// C1 - LV < C2 --> LV > C1 - C2.
2636static bool hoistSub(ICmpInst::Predicate Pred, Value *VariantLHS,
2637 Value *InvariantRHS, ICmpInst &ICmp, Loop &L,
2638 ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU,
2639 AssumptionCache *AC, DominatorTree *DT) {
2640 assert(!L.isLoopInvariant(VariantLHS) && "Precondition.");
2641 assert(L.isLoopInvariant(InvariantRHS) && "Precondition.");
2642
2643 bool IsSigned = ICmpInst::isSigned(Pred);
2644
2645 // Try to represent VariantLHS as sum of invariant and variant operands.
2646 using namespace PatternMatch;
2647 Value *VariantOp, *InvariantOp;
2648 if (IsSigned &&
2649 !match(VariantLHS, m_NSWSub(m_Value(VariantOp), m_Value(InvariantOp))))
2650 return false;
2651 if (!IsSigned &&
2652 !match(VariantLHS, m_NUWSub(m_Value(VariantOp), m_Value(InvariantOp))))
2653 return false;
2654
2655 bool VariantSubtracted = false;
2656 // LHS itself is a loop-variant, try to represent it in the form:
2657 // "VariantOp + InvariantOp". If it is possible, then we can reassociate. If
2658 // the variant operand goes with minus, we use a slightly different scheme.
2659 if (L.isLoopInvariant(VariantOp)) {
2660 std::swap(VariantOp, InvariantOp);
2661 VariantSubtracted = true;
2662 Pred = ICmpInst::getSwappedPredicate(Pred);
2663 }
2664 if (L.isLoopInvariant(VariantOp) || !L.isLoopInvariant(InvariantOp))
2665 return false;
2666
2667 // In order to turn "LV - C1 < C2" into "LV < C2 + C1", we need to be able to
2668 // freely move values from left side of inequality to right side (just as in
2669 // normal linear arithmetics). Overflows make things much more complicated, so
2670 // we want to avoid this. Likewise, for "C1 - LV < C2" we need to prove that
2671 // "C1 - C2" does not overflow.
2672 auto &DL = L.getHeader()->getDataLayout();
2673 SimplifyQuery SQ(DL, DT, AC, &ICmp);
2674 if (VariantSubtracted && IsSigned) {
2675 // C1 - LV < C2 --> LV > C1 - C2
2676 if (computeOverflowForSignedSub(InvariantOp, InvariantRHS, SQ) !=
2678 return false;
2679 } else if (VariantSubtracted && !IsSigned) {
2680 // C1 - LV < C2 --> LV > C1 - C2
2681 if (computeOverflowForUnsignedSub(InvariantOp, InvariantRHS, SQ) !=
2683 return false;
2684 } else if (!VariantSubtracted && IsSigned) {
2685 // LV - C1 < C2 --> LV < C1 + C2
2686 if (computeOverflowForSignedAdd(InvariantOp, InvariantRHS, SQ) !=
2688 return false;
2689 } else { // !VariantSubtracted && !IsSigned
2690 // LV - C1 < C2 --> LV < C1 + C2
2691 if (computeOverflowForUnsignedAdd(InvariantOp, InvariantRHS, SQ) !=
2693 return false;
2694 }
2695 auto *Preheader = L.getLoopPreheader();
2696 assert(Preheader && "Loop is not in simplify form?");
2697 IRBuilder<> Builder(Preheader->getTerminator());
2698 Value *NewCmpOp =
2699 VariantSubtracted
2700 ? Builder.CreateSub(InvariantOp, InvariantRHS, "invariant.op",
2701 /*HasNUW*/ !IsSigned, /*HasNSW*/ IsSigned)
2702 : Builder.CreateAdd(InvariantOp, InvariantRHS, "invariant.op",
2703 /*HasNUW*/ !IsSigned, /*HasNSW*/ IsSigned);
2704 ICmp.setPredicate(Pred);
2705 ICmp.setOperand(0, VariantOp);
2706 ICmp.setOperand(1, NewCmpOp);
2707
2708 Instruction &DeadI = cast<Instruction>(*VariantLHS);
2709 salvageDebugInfo(DeadI);
2710 eraseInstruction(DeadI, SafetyInfo, MSSAU);
2711 return true;
2712}
2713
2714/// Reassociate and hoist add/sub expressions.
2715static bool hoistAddSub(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
2717 DominatorTree *DT) {
2718 using namespace PatternMatch;
2719 CmpPredicate Pred;
2720 Value *LHS, *RHS;
2721 if (!match(&I, m_ICmp(Pred, m_Value(LHS), m_Value(RHS))))
2722 return false;
2723
2724 // Put variant operand to LHS position.
2725 if (L.isLoopInvariant(LHS)) {
2726 std::swap(LHS, RHS);
2727 Pred = ICmpInst::getSwappedPredicate(Pred);
2728 }
2729 // We want to delete the initial operation after reassociation, so only do it
2730 // if it has no other uses.
2731 if (L.isLoopInvariant(LHS) || !L.isLoopInvariant(RHS) || !LHS->hasOneUse())
2732 return false;
2733
2734 // TODO: We could go with smarter context, taking common dominator of all I's
2735 // users instead of I itself.
2736 if (hoistAdd(Pred, LHS, RHS, cast<ICmpInst>(I), L, SafetyInfo, MSSAU, AC, DT))
2737 return true;
2738
2739 if (hoistSub(Pred, LHS, RHS, cast<ICmpInst>(I), L, SafetyInfo, MSSAU, AC, DT))
2740 return true;
2741
2742 return false;
2743}
2744
2745static bool isReassociableOp(Instruction *I, unsigned IntOpcode,
2746 unsigned FPOpcode) {
2747 if (I->getOpcode() == IntOpcode)
2748 return true;
2749 if (I->getOpcode() == FPOpcode && I->hasAllowReassoc() &&
2750 I->hasNoSignedZeros())
2751 return true;
2752 return false;
2753}
2754
2755/// Try to reassociate expressions like ((A1 * B1) + (A2 * B2) + ...) * C where
2756/// A1, A2, ... and C are loop invariants into expressions like
2757/// ((A1 * C * B1) + (A2 * C * B2) + ...) and hoist the (A1 * C), (A2 * C), ...
2758/// invariant expressions. This functions returns true only if any hoisting has
2759/// actually occured.
2761 ICFLoopSafetyInfo &SafetyInfo,
2763 DominatorTree *DT) {
2764 if (!isReassociableOp(&I, Instruction::Mul, Instruction::FMul))
2765 return false;
2766 Value *VariantOp = I.getOperand(0);
2767 Value *InvariantOp = I.getOperand(1);
2768 if (L.isLoopInvariant(VariantOp))
2769 std::swap(VariantOp, InvariantOp);
2770 if (L.isLoopInvariant(VariantOp) || !L.isLoopInvariant(InvariantOp))
2771 return false;
2772 Value *Factor = InvariantOp;
2773
2774 // First, we need to make sure we should do the transformation.
2775 SmallVector<Use *> Changes;
2778 if (BinaryOperator *VariantBinOp = dyn_cast<BinaryOperator>(VariantOp))
2779 Worklist.push_back(VariantBinOp);
2780 while (!Worklist.empty()) {
2781 BinaryOperator *BO = Worklist.pop_back_val();
2782 if (!BO->hasOneUse())
2783 return false;
2784 if (isReassociableOp(BO, Instruction::Add, Instruction::FAdd) &&
2787 Worklist.push_back(cast<BinaryOperator>(BO->getOperand(0)));
2788 Worklist.push_back(cast<BinaryOperator>(BO->getOperand(1)));
2789 Adds.push_back(BO);
2790 continue;
2791 }
2792 if (!isReassociableOp(BO, Instruction::Mul, Instruction::FMul) ||
2793 L.isLoopInvariant(BO))
2794 return false;
2795 Use &U0 = BO->getOperandUse(0);
2796 Use &U1 = BO->getOperandUse(1);
2797 if (L.isLoopInvariant(U0))
2798 Changes.push_back(&U0);
2799 else if (L.isLoopInvariant(U1))
2800 Changes.push_back(&U1);
2801 else
2802 return false;
2803 unsigned Limit = I.getType()->isIntOrIntVectorTy()
2806 if (Changes.size() > Limit)
2807 return false;
2808 }
2809 if (Changes.empty())
2810 return false;
2811
2812 // Drop the poison flags for any adds we looked through.
2813 if (I.getType()->isIntOrIntVectorTy()) {
2814 for (auto *Add : Adds)
2815 Add->dropPoisonGeneratingFlags();
2816 }
2817
2818 // We know we should do it so let's do the transformation.
2819 auto *Preheader = L.getLoopPreheader();
2820 assert(Preheader && "Loop is not in simplify form?");
2821 IRBuilder<> Builder(Preheader->getTerminator());
2822 for (auto *U : Changes) {
2823 assert(L.isLoopInvariant(U->get()));
2824 auto *Ins = cast<BinaryOperator>(U->getUser());
2825 Value *Mul;
2826 if (I.getType()->isIntOrIntVectorTy()) {
2827 Mul = Builder.CreateMul(U->get(), Factor, "factor.op.mul");
2828 // Drop the poison flags on the original multiply.
2829 Ins->dropPoisonGeneratingFlags();
2830 } else
2831 Mul = Builder.CreateFMulFMF(U->get(), Factor, Ins, "factor.op.fmul");
2832
2833 // Rewrite the reassociable instruction.
2834 unsigned OpIdx = U->getOperandNo();
2835 auto *LHS = OpIdx == 0 ? Mul : Ins->getOperand(0);
2836 auto *RHS = OpIdx == 1 ? Mul : Ins->getOperand(1);
2837 auto *NewBO =
2838 BinaryOperator::Create(Ins->getOpcode(), LHS, RHS,
2839 Ins->getName() + ".reass", Ins->getIterator());
2840 NewBO->setDebugLoc(DebugLoc::getDropped());
2841 NewBO->copyIRFlags(Ins);
2842 if (VariantOp == Ins)
2843 VariantOp = NewBO;
2844 Ins->replaceAllUsesWith(NewBO);
2845 eraseInstruction(*Ins, SafetyInfo, MSSAU);
2846 }
2847
2848 I.replaceAllUsesWith(VariantOp);
2849 eraseInstruction(I, SafetyInfo, MSSAU);
2850 return true;
2851}
2852
2853/// Reassociate associative binary expressions of the form
2854///
2855/// 1. "(LV op C1) op C2" ==> "LV op (C1 op C2)"
2856/// 2. "(C1 op LV) op C2" ==> "LV op (C1 op C2)"
2857/// 3. "C2 op (C1 op LV)" ==> "LV op (C1 op C2)"
2858/// 4. "C2 op (LV op C1)" ==> "LV op (C1 op C2)"
2859///
2860/// where op is an associative BinOp, LV is a loop variant, and C1 and C2 are
2861/// loop invariants that we want to hoist, noting that associativity implies
2862/// commutativity.
2864 ICFLoopSafetyInfo &SafetyInfo,
2866 DominatorTree *DT) {
2867 auto *BO = dyn_cast<BinaryOperator>(&I);
2868 if (!BO || !BO->isAssociative())
2869 return false;
2870
2871 Instruction::BinaryOps Opcode = BO->getOpcode();
2872 bool LVInRHS = L.isLoopInvariant(BO->getOperand(0));
2873 auto *BO0 = dyn_cast<BinaryOperator>(BO->getOperand(LVInRHS));
2874 if (!BO0 || BO0->getOpcode() != Opcode || !BO0->isAssociative() ||
2875 BO0->hasNUsesOrMore(BO0->getType()->isIntegerTy() ? 2 : 3))
2876 return false;
2877
2878 Value *LV = BO0->getOperand(0);
2879 Value *C1 = BO0->getOperand(1);
2880 Value *C2 = BO->getOperand(!LVInRHS);
2881
2882 assert(BO->isCommutative() && BO0->isCommutative() &&
2883 "Associativity implies commutativity");
2884 if (L.isLoopInvariant(LV) && !L.isLoopInvariant(C1))
2885 std::swap(LV, C1);
2886 if (L.isLoopInvariant(LV) || !L.isLoopInvariant(C1) || !L.isLoopInvariant(C2))
2887 return false;
2888
2889 auto *Preheader = L.getLoopPreheader();
2890 assert(Preheader && "Loop is not in simplify form?");
2891
2892 IRBuilder<> Builder(Preheader->getTerminator());
2893 auto *Inv = Builder.CreateBinOp(Opcode, C1, C2, "invariant.op");
2894
2895 auto *NewBO = BinaryOperator::Create(
2896 Opcode, LV, Inv, BO->getName() + ".reass", BO->getIterator());
2897 NewBO->setDebugLoc(DebugLoc::getDropped());
2898
2899 if (Opcode == Instruction::FAdd || Opcode == Instruction::FMul) {
2900 // Intersect FMF flags for FADD and FMUL.
2901 FastMathFlags Intersect = BO->getFastMathFlags() & BO0->getFastMathFlags();
2902 if (auto *I = dyn_cast<Instruction>(Inv))
2903 I->setFastMathFlags(Intersect);
2904 NewBO->setFastMathFlags(Intersect);
2905 } else {
2906 OverflowTracking Flags;
2907 Flags.AllKnownNonNegative = false;
2908 Flags.AllKnownNonZero = false;
2909 Flags.mergeFlags(*BO);
2910 Flags.mergeFlags(*BO0);
2911 // If `Inv` was not constant-folded, a new Instruction has been created.
2912 if (auto *I = dyn_cast<Instruction>(Inv))
2913 Flags.applyFlags(*I);
2914 Flags.applyFlags(*NewBO);
2915 }
2916
2917 BO->replaceAllUsesWith(NewBO);
2918 eraseInstruction(*BO, SafetyInfo, MSSAU);
2919
2920 // (LV op C1) might not be erased if it has more uses than the one we just
2921 // replaced.
2922 if (BO0->use_empty()) {
2923 salvageDebugInfo(*BO0);
2924 eraseInstruction(*BO0, SafetyInfo, MSSAU);
2925 }
2926
2927 return true;
2928}
2929
2931 ICFLoopSafetyInfo &SafetyInfo,
2933 DominatorTree *DT) {
2934 // Optimize complex patterns, such as (x < INV1 && x < INV2), turning them
2935 // into (x < min(INV1, INV2)), and hoisting the invariant part of this
2936 // expression out of the loop.
2937 if (hoistMinMax(I, L, SafetyInfo, MSSAU)) {
2938 ++NumHoisted;
2939 ++NumMinMaxHoisted;
2940 return true;
2941 }
2942
2943 // Try to hoist GEPs by reassociation.
2944 if (hoistGEP(I, L, SafetyInfo, MSSAU, AC, DT)) {
2945 ++NumHoisted;
2946 ++NumGEPsHoisted;
2947 return true;
2948 }
2949
2950 // Try to hoist add/sub's by reassociation.
2951 if (hoistAddSub(I, L, SafetyInfo, MSSAU, AC, DT)) {
2952 ++NumHoisted;
2953 ++NumAddSubHoisted;
2954 return true;
2955 }
2956
2957 bool IsInt = I.getType()->isIntOrIntVectorTy();
2958 if (hoistMulAddAssociation(I, L, SafetyInfo, MSSAU, AC, DT)) {
2959 ++NumHoisted;
2960 if (IsInt)
2961 ++NumIntAssociationsHoisted;
2962 else
2963 ++NumFPAssociationsHoisted;
2964 return true;
2965 }
2966
2967 if (hoistBOAssociation(I, L, SafetyInfo, MSSAU, AC, DT)) {
2968 ++NumHoisted;
2969 ++NumBOAssociationsHoisted;
2970 return true;
2971 }
2972
2973 return false;
2974}
2975
2976/// Little predicate that returns true if the specified basic block is in
2977/// a subloop of the current one, not the current one itself.
2978///
2979static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) {
2980 assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop");
2981 return LI->getLoopFor(BB) != CurLoop;
2982}
unsigned const MachineRegisterInfo * MRI
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the declarations for the subclasses of Constant, which represent the different fla...
DXIL Forward Handle Accesses
DXIL Resource Access
early cse Early CSE w MemorySSA
#define DEBUG_TYPE
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
iv Induction Variable Users
Definition IVUsers.cpp:48
static bool isReassociableOp(Instruction *I, unsigned IntOpcode, unsigned FPOpcode)
Definition LICM.cpp:2745
static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo, TargetTransformInfo *TTI, bool &FoldableInLoop, bool LoopNestMode)
Return true if the only users of this instruction are outside of the loop.
Definition LICM.cpp:1331
static bool hoistGEP(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Reassociate gep (gep ptr, idx1), idx2 to gep (gep ptr, idx2), idx1 if this allows hoisting the inner ...
Definition LICM.cpp:2520
static cl::opt< bool > SingleThread("licm-force-thread-model-single", cl::Hidden, cl::init(false), cl::desc("Force thread model single in LICM pass"))
static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT, LoopInfo *LI, const Loop *CurLoop, LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU)
Definition LICM.cpp:1505
static cl::opt< unsigned > FPAssociationUpperLimit("licm-max-num-fp-reassociations", cl::init(5U), cl::Hidden, cl::desc("Set upper limit for the number of transformations performed " "during a single round of hoisting the reassociated expressions."))
static bool isFoldableInLoop(const Instruction &I, const Loop *CurLoop, const TargetTransformInfo *TTI)
Return true if the instruction is foldable in the loop.
Definition LICM.cpp:1301
static bool hoistMinMax(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU)
Try to simplify things like (A < INV_1 AND icmp A < INV_2) into (A < min(INV_1, INV_2)),...
Definition LICM.cpp:2439
static void moveInstructionBefore(Instruction &I, BasicBlock::iterator Dest, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, ScalarEvolution *SE)
Definition LICM.cpp:1457
static Instruction * cloneInstructionInExitBlock(Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI, const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU)
Definition LICM.cpp:1373
static cl::opt< bool > ControlFlowHoisting("licm-control-flow-hoisting", cl::Hidden, cl::init(false), cl::desc("Enable control flow (and PHI) hoisting in LICM"))
static bool pointerInvalidatedByLoop(MemorySSA *MSSA, MemoryUse *MU, Loop *CurLoop, Instruction &I, SinkAndHoistLICMFlags &Flags, bool InvariantGroup)
Definition LICM.cpp:2376
static bool hoistAdd(ICmpInst::Predicate Pred, Value *VariantLHS, Value *InvariantRHS, ICmpInst &ICmp, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Try to turn things like "LV + C1 < C2" into "LV < C2 - C1".
Definition LICM.cpp:2578
static MemoryAccess * getClobberingMemoryAccess(MemorySSA &MSSA, BatchAAResults &BAA, SinkAndHoistLICMFlags &Flags, MemoryUseOrDef *MA)
Definition LICM.cpp:1152
static SmallVector< PointersAndHasReadsOutsideSet, 0 > collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L)
Definition LICM.cpp:2239
static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop, BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU, ScalarEvolution *SE, OptimizationRemarkEmitter *ORE)
When an instruction is found to only use loop invariant operands that is safe to hoist,...
Definition LICM.cpp:1683
static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo)
Definition LICM.cpp:1487
static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT, const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU, OptimizationRemarkEmitter *ORE)
When an instruction is found to only be used outside of the loop, this function moves it to the exit ...
Definition LICM.cpp:1577
static bool hoistAddSub(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Reassociate and hoist add/sub expressions.
Definition LICM.cpp:2715
static bool hoistMulAddAssociation(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Try to reassociate expressions like ((A1 * B1) + (A2 * B2) + ...) * C where A1, A2,...
Definition LICM.cpp:2760
static cl::opt< uint32_t > MaxNumUsesTraversed("licm-max-num-uses-traversed", cl::Hidden, cl::init(8), cl::desc("Max num uses visited for identifying load " "invariance in loop using invariant start (default = 8)"))
static cl::opt< unsigned > IntAssociationUpperLimit("licm-max-num-int-reassociations", cl::init(5U), cl::Hidden, cl::desc("Set upper limit for the number of transformations performed " "during a single round of hoisting the reassociated expressions."))
static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L, function_ref< void(Instruction *)> Fn)
Definition LICM.cpp:2227
static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT, Loop *CurLoop)
Definition LICM.cpp:1065
static Instruction * sinkThroughTriviallyReplaceablePHI(PHINode *TPN, Instruction *I, LoopInfo *LI, SmallDenseMap< BasicBlock *, Instruction *, 32 > &SunkCopies, const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop, MemorySSAUpdater &MSSAU)
Definition LICM.cpp:1472
static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI)
Little predicate that returns true if the specified basic block is in a subloop of the current one,...
Definition LICM.cpp:2979
static bool hoistSub(ICmpInst::Predicate Pred, Value *VariantLHS, Value *InvariantRHS, ICmpInst &ICmp, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Try to reassociate and hoist the following two patterns: LV - C1 < C2 --> LV < C1 + C2,...
Definition LICM.cpp:2636
static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU)
Definition LICM.cpp:1450
static bool isSafeToExecuteUnconditionally(Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI, const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo, OptimizationRemarkEmitter *ORE, const Instruction *CtxI, AssumptionCache *AC, bool AllowSpeculation)
Only sink or hoist an instruction if it is not a trapping instruction, or if the instruction is known...
Definition LICM.cpp:1730
static bool hoistArithmetics(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Aggregates various functions for hoisting computations out of loop.
Definition LICM.cpp:2930
static bool noConflictingReadWrites(Instruction *I, MemorySSA *MSSA, AAResults *AA, Loop *CurLoop, SinkAndHoistLICMFlags &Flags)
Definition LICM.cpp:2308
static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I)
Returns true if a PHINode is a trivially replaceable with an Instruction.
Definition LICM.cpp:1292
std::pair< SmallSetVector< Value *, 8 >, bool > PointersAndHasReadsOutsideSet
Definition LICM.cpp:218
static cl::opt< bool > DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false), cl::desc("Disable memory promotion in LICM pass"))
Memory promotion is enabled by default.
static bool hoistBOAssociation(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Reassociate associative binary expressions of the form.
Definition LICM.cpp:2863
static bool pointerInvalidatedByBlock(BasicBlock &BB, MemorySSA &MSSA, MemoryUse &MU)
Definition LICM.cpp:2427
This file defines the interface for the loop nest analysis.
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
Memory SSA
Definition MemorySSA.cpp:72
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
This file contains the declarations for metadata subclasses.
Contains a collection of routines for determining if a given instruction is guaranteed to execute if ...
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define P(N)
if(PassOpts->AAPipeline)
PassInstrumentationCallbacks PIC
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
This file provides a priority worklist.
Remove Loads Into Fake Uses
cl::opt< bool > ProfcheckDisableMetadataFixes("profcheck-disable-metadata-fixes", cl::Hidden, cl::init(false), cl::desc("Disable metadata propagation fixes discovered through Issue #147390"))
This file defines generic set operations that may be used on set's of different types,...
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
This pass exposes codegen information to IR-level passes.
static cl::opt< bool > DisablePromotion("disable-type-promotion", cl::Hidden, cl::init(false), cl::desc("Disable type promotion pass"))
Value * RHS
Value * LHS
BinaryOperator * Mul
LLVM_ABI void add(const MemoryLocation &Loc)
These methods are used to add different types of instructions to the alias sets.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
LLVM_ABI void replaceSuccessorsPhiUsesWith(BasicBlock *Old, BasicBlock *New)
Update all phi nodes in this basic block's successors to refer to basic block New instead of basic bl...
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:459
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
LLVM_ABI const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
void moveBefore(BasicBlock *MovePos)
Unlink this basic block from its current function and insert it into the function that MovePos lives ...
Definition BasicBlock.h:386
LLVM_ABI bool canSplitPredecessors() const
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
Conditional or Unconditional Branch instruction.
bool isConditional() const
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
BasicBlock * getSuccessor(unsigned i) const
Value * getCondition() const
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
Definition InstrTypes.h:770
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:678
bool isSigned() const
Definition InstrTypes.h:932
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:829
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:791
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
This is the shared class of boolean and integer constants.
Definition Constants.h:87
bool isNegative() const
Definition Constants.h:209
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition Constants.h:169
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
TypeSize getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
Definition DataLayout.h:466
static LLVM_ABI DebugLoc getMergedLocations(ArrayRef< DebugLoc > Locs)
Try to combine the vector of locations passed as input in a single one.
Definition DebugLoc.cpp:170
static DebugLoc getDropped()
Definition DebugLoc.h:164
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:165
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:229
iterator end()
Definition DenseMap.h:81
DomTreeNodeBase * getIDom() const
NodeT * getBlock() const
Analysis pass which computes a DominatorTree.
Definition Dominators.h:284
bool verify(VerificationLevel VL=VerificationLevel::Full) const
verify - checks if the tree is correct.
void changeImmediateDominator(DomTreeNodeBase< NodeT > *N, DomTreeNodeBase< NodeT > *NewIDom)
changeImmediateDominator - This method is used to update the dominator tree information when a node's...
DomTreeNodeBase< NodeT > * addNewBlock(NodeT *BB, NodeT *DomBB)
Add a new node to the dominator tree information.
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:165
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
This implementation of LoopSafetyInfo use ImplicitControlFlowTracking to give precise answers on "may...
bool doesNotWriteMemoryBefore(const BasicBlock *BB, const Loop *CurLoop) const
Returns true if we could not execute a memory-modifying instruction before we enter BB under assumpti...
void removeInstruction(const Instruction *Inst)
Inform safety info that we are planning to remove the instruction Inst from its block.
bool isGuaranteedToExecute(const Instruction &Inst, const DominatorTree *DT, const Loop *CurLoop) const override
Returns true if the instruction in a loop is guaranteed to execute at least once (under the assumptio...
bool anyBlockMayThrow() const override
Returns true iff any block of the loop for which this info is contains an instruction that may throw ...
void computeLoopSafetyInfo(const Loop *CurLoop) override
Computes safety information for a loop checks loop body & header for the possibility of may throw exc...
void insertInstructionTo(const Instruction *Inst, const BasicBlock *BB)
Inform the safety info that we are planning to insert a new instruction Inst into the basic block BB.
This instruction compares its operands according to the predicate given to the constructor.
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2780
LLVM_ABI void mergeDIAssignID(ArrayRef< const Instruction * > SourceInstructions)
Merge the DIAssignID metadata from this instruction and those attached to instructions in SourceInstr...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
LLVM_ABI void insertBefore(InstListType::iterator InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified position.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
A wrapper class for inspecting calls to intrinsic functions.
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Definition LICM.cpp:323
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR, LPMUpdater &U)
Definition LICM.cpp:301
PreservedAnalyses run(LoopNest &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR, LPMUpdater &U)
Definition LICM.cpp:333
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Definition LICM.cpp:363
This class provides an interface for updating the loop pass manager based on mutations to the loop ne...
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
Helper class for promoting a collection of loads and stores into SSA Form using the SSAUpdater.
Definition SSAUpdater.h:147
An instruction for reading from memory.
void setAlignment(Align Align)
Value * getPointerOperand()
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this load instruction.
Analysis pass that exposes the LoopInfo for a function.
Definition LoopInfo.h:569
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getHeader() const
void addBasicBlockToLoop(BlockT *NewBB, LoopInfoBase< BlockT, LoopT > &LI)
This method is used by other analyses to update loop information.
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
ArrayRef< BlockT * > getBlocks() const
Get a list of the basic blocks which make up this loop.
void getUniqueExitBlocks(SmallVectorImpl< BlockT * > &ExitBlocks) const
Return all unique successor blocks of this loop.
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
void verify(const DominatorTreeBase< BlockT, false > &DomTree) const
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
LLVM_ABI bool wouldBeOutOfLoopUseRequiringLCSSA(const Value *V, const BasicBlock *ExitBB) const
Definition LoopInfo.cpp:943
This class represents a loop nest and can be used to query its properties.
Function * getParent() const
Return the function to which the loop-nest belongs.
Loop & getOutermostLoop() const
Return the outermost loop in the loop nest.
Captures loop safety information.
Definition MustExecute.h:60
LLVM_ABI void copyColors(BasicBlock *New, BasicBlock *Old)
Copy colors of block Old into the block New.
LLVM_ABI const DenseMap< BasicBlock *, ColorVector > & getBlockColors() const
Returns block colors map that is used to update funclet operand bundles.
virtual bool isGuaranteedToExecute(const Instruction &Inst, const DominatorTree *DT, const Loop *CurLoop) const =0
Returns true if the instruction in a loop is guaranteed to execute at least once (under the assumptio...
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
bool hasLoopInvariantOperands(const Instruction *I) const
Return true if all the operands of the specified instruction are loop invariant.
Definition LoopInfo.cpp:67
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
Definition LoopInfo.cpp:61
BasicBlock * getBlock() const
Definition MemorySSA.h:162
bool onlyWritesMemory() const
Whether this function only (at most) writes memory.
Definition ModRef.h:221
bool doesNotAccessMemory() const
Whether this function accesses no memory.
Definition ModRef.h:215
bool onlyReadsMemory() const
Whether this function only (at most) reads memory.
Definition ModRef.h:218
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
An analysis that produces MemorySSA for a function.
Definition MemorySSA.h:936
MemorySSA * getMemorySSA() const
Get handle on MemorySSA.
LLVM_ABI void insertDef(MemoryDef *Def, bool RenameUses=false)
Insert a definition into the MemorySSA IR.
LLVM_ABI void insertUse(MemoryUse *Use, bool RenameUses=false)
LLVM_ABI MemoryAccess * createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition, const BasicBlock *BB, MemorySSA::InsertionPlace Point, bool CreationMustSucceed=true)
Create a MemoryAccess in MemorySSA at a specified point in a block.
LLVM_ABI void removeMemoryAccess(MemoryAccess *, bool OptimizePhis=false)
Remove a MemoryAccess from MemorySSA, including updating all definitions and uses.
LLVM_ABI MemoryUseOrDef * createMemoryAccessAfter(Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt)
Create a MemoryAccess in MemorySSA after an existing MemoryAccess.
LLVM_ABI void moveToPlace(MemoryUseOrDef *What, BasicBlock *BB, MemorySSA::InsertionPlace Where)
LLVM_ABI void wireOldPredecessorsToNewImmediatePredecessor(BasicBlock *Old, BasicBlock *New, ArrayRef< BasicBlock * > Preds, bool IdenticalEdgesWereMerged=true)
A new empty BasicBlock (New) now branches directly to Old.
MemoryAccess * getClobberingMemoryAccess(const Instruction *I, BatchAAResults &AA)
Given a memory Mod/Ref/ModRef'ing instruction, calling this will give you the nearest dominating Memo...
Definition MemorySSA.h:1053
Legacy analysis pass which computes MemorySSA.
Definition MemorySSA.h:993
Encapsulates MemorySSA, including all data associated with memory accesses.
Definition MemorySSA.h:702
AliasAnalysis & getAA()
Definition MemorySSA.h:802
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess's for a given basic block.
Definition MemorySSA.h:760
LLVM_ABI MemorySSAWalker * getSkipSelfWalker()
LLVM_ABI bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
LLVM_ABI void verifyMemorySSA(VerificationLevel=VerificationLevel::Fast) const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
Definition MemorySSA.h:720
const DefsList * getBlockDefs(const BasicBlock *BB) const
Return the list of MemoryDef's and MemoryPhi's for a given basic block.
Definition MemorySSA.h:768
LLVM_ABI bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in the same basic block, determine whether MemoryAccess A dominates MemoryA...
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
Definition MemorySSA.h:740
Class that has the common methods + fields of memory uses/defs.
Definition MemorySSA.h:250
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
Definition MemorySSA.h:260
Represents read-only accesses to memory.
Definition MemorySSA.h:310
The optimization diagnostic interface.
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
void setIncomingBlock(unsigned i, BasicBlock *BB)
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition Pass.h:99
PointerIntPair - This class implements a pair of a pointer and small integer.
void setInt(IntType IntVal) &
PointerTy getPointer() const
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
PredIteratorCache - This class is an extremely trivial cache for predecessor iterator queries.
size_t size(BasicBlock *BB)
ArrayRef< BasicBlock * > get(BasicBlock *BB)
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
bool empty() const
Determine if the PriorityWorklist is empty or not.
bool insert(const T &X)
Insert a new element into the PriorityWorklist.
Helper class for SSA formation on a set of values defined in multiple blocks.
Definition SSAUpdater.h:39
The main scalar evolution driver.
LLVM_ABI void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
LLVM_ABI void forgetLoopDispositions()
Called when the client has changed the disposition of values in this loop.
bool remove(const value_type &X)
Remove an item from the set vector.
Definition SetVector.h:198
bool empty() const
Determine if the SetVector is empty or not.
Definition SetVector.h:99
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition SetVector.h:109
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:168
Flags controlling how much is checked when sinking or hoisting instructions.
Definition LoopUtils.h:124
LLVM_ABI SinkAndHoistLICMFlags(unsigned LicmMssaOptCap, unsigned LicmMssaNoAccForPromotionCap, bool IsSink, Loop &L, MemorySSA &MSSA)
Definition LICM.cpp:391
unsigned LicmMssaNoAccForPromotionCap
Definition LoopUtils.h:143
A version of PriorityWorklist that selects small size optimized data structures for the vector and ma...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:356
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
void setAlignment(Align Align)
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this store instruction.
static unsigned getPointerOperandIndex()
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Provides information about what library functions are available for the current target.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
@ TCC_Free
Expected to fold away in lowering.
EltTy front() const
unsigned size() const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
const Use & getOperandUse(unsigned i) const
Definition User.h:245
void setOperand(unsigned i, Value *Val)
Definition User.h:237
Value * getOperand(unsigned i) const
Definition User.h:232
unsigned getNumOperands() const
Definition User.h:254
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition Value.cpp:166
LLVM_ABI std::string getNameOrAsOperand() const
Definition Value.cpp:457
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:546
iterator_range< user_iterator > users()
Definition Value.h:426
bool use_empty() const
Definition Value.h:346
iterator_range< use_iterator > uses()
Definition Value.h:380
user_iterator_impl< User > user_iterator
Definition Value.h:391
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:396
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:169
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:130
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
Changed
Abstract Attribute helper functions.
Definition Attributor.h:165
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWSub(const LHS &L, const RHS &R)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
initializer< Ty > init(const Ty &Val)
DiagnosticInfoOptimizationBase::Argument NV
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
@ NeverOverflows
Never overflows.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1705
LLVM_ABI bool canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT, Loop *CurLoop, MemorySSAUpdater &MSSAU, bool TargetExecutesOncePerLoop, SinkAndHoistLICMFlags &LICMFlags, OptimizationRemarkEmitter *ORE=nullptr)
Returns true if is legal to hoist or sink this instruction disregarding the possible introduction of ...
Definition LICM.cpp:1166
auto pred_end(const MachineBasicBlock *BB)
void set_intersect(S1Ty &S1, const S2Ty &S2)
set_intersect(A, B) - Compute A := A ^ B Identical to set_intersection, except that it works on set<>...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition Utils.cpp:1725
auto successors(const MachineBasicBlock *BB)
LLVM_ABI bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
Definition Loads.cpp:229
constexpr from_range_t from_range
LLVM_ABI bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI, ScalarEvolution *SE)
Put a loop nest into LCSSA form.
Definition LCSSA.cpp:449
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
auto cast_or_null(const Y &Val)
Definition Casting.h:720
auto pred_size(const MachineBasicBlock *BB)
MemoryEffectsBase< IRMemLocation > MemoryEffects
Summary of how a function affects memory in the program.
Definition ModRef.h:296
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
LLVM_ABI bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures, const Instruction *I, const DominatorTree *DT, bool IncludeI=false, unsigned MaxUsesToExplore=0, const LoopInfo *LI=nullptr)
PointerMayBeCapturedBefore - Return true if this pointer value may be captured by the enclosing funct...
LLVM_ABI Pass * createLICMPass()
Definition LICM.cpp:384
LLVM_ABI SmallVector< BasicBlock *, 16 > collectChildrenInLoop(DominatorTree *DT, DomTreeNode *N, const Loop *CurLoop)
Does a BFS from a given node to all of its children inside a given loop.
DomTreeNodeBase< BasicBlock > DomTreeNode
Definition Dominators.h:95
AnalysisManager< Loop, LoopStandardAnalysisResults & > LoopAnalysisManager
The loop analysis manager.
LLVM_ABI bool hoistRegion(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *, AssumptionCache *, TargetLibraryInfo *, Loop *, MemorySSAUpdater &, ScalarEvolution *, ICFLoopSafetyInfo *, SinkAndHoistLICMFlags &, OptimizationRemarkEmitter *, bool, bool AllowSpeculation)
Walk the specified region of the CFG (defined by all blocks dominated by the specified block,...
Definition LICM.cpp:887
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1712
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition Local.cpp:402
bool isGuard(const User *U)
Returns true iff U has semantics of a guard expressed in a form of call of llvm.experimental....
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
LLVM_ABI OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
LLVM_ABI void initializeLegacyLICMPassPass(PassRegistry &)
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_TEMPLATE_ABI void appendLoopsToWorklist(RangeT &&, SmallPriorityWorklist< Loop *, 4 > &)
Utility that implements appending of loops onto a worklist given a range.
bool isModOrRefSet(const ModRefInfo MRI)
Definition ModRef.h:43
LLVM_ABI bool isNotVisibleOnUnwind(const Value *Object, bool &RequiresNoCaptureBeforeUnwind)
Return true if Object memory is not visible after an unwind, in the sense that program semantics cann...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
LLVM_ABI void getLoopAnalysisUsage(AnalysisUsage &AU)
Helper to consistently add the set of standard passes to a loop pass's AnalysisUsage.
LLVM_ABI BasicBlock * SplitBlockPredecessors(BasicBlock *BB, ArrayRef< BasicBlock * > Preds, const char *Suffix, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, bool PreserveLCSSA=false)
This method introduces at least one new basic block into the function and moves some of the predecess...
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition ModRef.h:28
TargetTransformInfo TTI
LLVM_ABI bool VerifyMemorySSA
Enables verification of MemorySSA.
Definition MemorySSA.cpp:84
LLVM_ABI bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
LLVM_ABI bool hasDisableLICMTransformsHint(const Loop *L)
Look for the loop attribute that disables the LICM transformation heuristics.
LLVM_ABI OverflowResult computeOverflowForSignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
@ Add
Sum of integers.
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
LLVM_ABI bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if this is always a dereferenceable pointer.
Definition Loads.cpp:249
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1847
LLVM_ABI OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
TinyPtrVector< BasicBlock * > ColorVector
auto pred_begin(const MachineBasicBlock *BB)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
LLVM_ABI PreservedAnalyses getLoopPassPreservedAnalyses()
Returns the minimum set of Analyses that all loop passes must preserve.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition STLExtras.h:2100
auto predecessors(const MachineBasicBlock *BB)
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
LLVM_ABI bool sinkRegion(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *, TargetLibraryInfo *, TargetTransformInfo *, Loop *CurLoop, MemorySSAUpdater &, ICFLoopSafetyInfo *, SinkAndHoistLICMFlags &, OptimizationRemarkEmitter *, Loop *OutermostLoop=nullptr)
Walk the specified region of the CFG (defined by all blocks dominated by the specified block,...
Definition LICM.cpp:557
LLVM_ABI OverflowResult computeOverflowForUnsignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
cl::opt< unsigned > SetLicmMssaNoAccForPromotionCap
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
bool capturesNothing(CaptureComponents CC)
Definition ModRef.h:315
LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
LLVM_ABI bool promoteLoopAccessesToScalars(const SmallSetVector< Value *, 8 > &, SmallVectorImpl< BasicBlock * > &, SmallVectorImpl< BasicBlock::iterator > &, SmallVectorImpl< MemoryAccess * > &, PredIteratorCache &, LoopInfo *, DominatorTree *, AssumptionCache *AC, const TargetLibraryInfo *, TargetTransformInfo *, Loop *, MemorySSAUpdater &, ICFLoopSafetyInfo *, OptimizationRemarkEmitter *, bool AllowSpeculation, bool HasReadsOutsideSet)
Try to promote memory values to scalars by sinking stores out of the loop and moving loads to before ...
Definition LICM.cpp:1912
cl::opt< unsigned > SetLicmMssaOptCap
LLVM_ABI bool sinkRegionForLoopNest(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *, TargetLibraryInfo *, TargetTransformInfo *, Loop *, MemorySSAUpdater &, ICFLoopSafetyInfo *, SinkAndHoistLICMFlags &, OptimizationRemarkEmitter *)
Call sinkRegion on loops contained within the specified loop in order from innermost to outermost.
Definition LICM.cpp:624
bool isRefSet(const ModRefInfo MRI)
Definition ModRef.h:52
LLVM_ABI bool isWritableObject(const Value *Object, bool &ExplicitlyDereferenceableOnly)
Return true if the Object is writable, in the sense that any location based on this pointer that can ...
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:180
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:853
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition Metadata.h:760
LLVM_ABI AAMDNodes merge(const AAMDNodes &Other) const
Given two sets of AAMDNodes applying to potentially different locations, determine the best AAMDNodes...
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
The adaptor from a function pass to a loop pass computes these analyses and makes them available to t...
A lightweight accessor for an operand bundle meant to be passed around by value.
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70