LLVM 22.0.0git
LICM.cpp
Go to the documentation of this file.
1//===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass performs loop invariant code motion, attempting to remove as much
10// code from the body of a loop as possible. It does this by either hoisting
11// code into the preheader block, or by sinking code to the exit blocks if it is
12// safe. This pass also promotes must-aliased memory locations in the loop to
13// live in registers, thus hoisting and sinking "invariant" loads and stores.
14//
15// Hoisting operations out of loops is a canonicalization transform. It
16// enables and simplifies subsequent optimizations in the middle-end.
17// Rematerialization of hoisted instructions to reduce register pressure is the
18// responsibility of the back-end, which has more accurate information about
19// register pressure and also handles other optimizations than LICM that
20// increase live-ranges.
21//
22// This pass uses alias analysis for two purposes:
23//
24// 1. Moving loop invariant loads and calls out of loops. If we can determine
25// that a load or call inside of a loop never aliases anything stored to,
26// we can hoist it or sink it like any other instruction.
27// 2. Scalar Promotion of Memory - If there is a store instruction inside of
28// the loop, we try to move the store to happen AFTER the loop instead of
29// inside of the loop. This can only happen if a few conditions are true:
30// A. The pointer stored through is loop invariant
31// B. There are no stores or loads in the loop which _may_ alias the
32// pointer. There are no calls in the loop which mod/ref the pointer.
33// If these conditions are true, we can promote the loads and stores in the
34// loop of the pointer to use a temporary alloca'd variable. We then use
35// the SSAUpdater to construct the appropriate SSA form for the value.
36//
37//===----------------------------------------------------------------------===//
38
42#include "llvm/ADT/Statistic.h"
50#include "llvm/Analysis/Loads.h"
63#include "llvm/IR/CFG.h"
64#include "llvm/IR/Constants.h"
65#include "llvm/IR/DataLayout.h"
68#include "llvm/IR/Dominators.h"
69#include "llvm/IR/IRBuilder.h"
72#include "llvm/IR/LLVMContext.h"
73#include "llvm/IR/Metadata.h"
78#include "llvm/Support/Debug.h"
86#include <algorithm>
87#include <utility>
88using namespace llvm;
89
90namespace llvm {
91class LPMUpdater;
92} // namespace llvm
93
94#define DEBUG_TYPE "licm"
95
96STATISTIC(NumCreatedBlocks, "Number of blocks created");
97STATISTIC(NumClonedBranches, "Number of branches cloned");
98STATISTIC(NumSunk, "Number of instructions sunk out of loop");
99STATISTIC(NumHoisted, "Number of instructions hoisted out of loop");
100STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk");
101STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk");
102STATISTIC(NumPromotionCandidates, "Number of promotion candidates");
103STATISTIC(NumLoadPromoted, "Number of load-only promotions");
104STATISTIC(NumLoadStorePromoted, "Number of load and store promotions");
105STATISTIC(NumMinMaxHoisted,
106 "Number of min/max expressions hoisted out of the loop");
107STATISTIC(NumGEPsHoisted,
108 "Number of geps reassociated and hoisted out of the loop");
109STATISTIC(NumAddSubHoisted, "Number of add/subtract expressions reassociated "
110 "and hoisted out of the loop");
111STATISTIC(NumFPAssociationsHoisted, "Number of invariant FP expressions "
112 "reassociated and hoisted out of the loop");
113STATISTIC(NumIntAssociationsHoisted,
114 "Number of invariant int expressions "
115 "reassociated and hoisted out of the loop");
116STATISTIC(NumBOAssociationsHoisted, "Number of invariant BinaryOp expressions "
117 "reassociated and hoisted out of the loop");
118
119/// Memory promotion is enabled by default.
120static cl::opt<bool>
121 DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false),
122 cl::desc("Disable memory promotion in LICM pass"));
123
125 "licm-control-flow-hoisting", cl::Hidden, cl::init(false),
126 cl::desc("Enable control flow (and PHI) hoisting in LICM"));
127
128static cl::opt<bool>
129 SingleThread("licm-force-thread-model-single", cl::Hidden, cl::init(false),
130 cl::desc("Force thread model single in LICM pass"));
131
133 "licm-max-num-uses-traversed", cl::Hidden, cl::init(8),
134 cl::desc("Max num uses visited for identifying load "
135 "invariance in loop using invariant start (default = 8)"));
136
138 "licm-max-num-fp-reassociations", cl::init(5U), cl::Hidden,
139 cl::desc(
140 "Set upper limit for the number of transformations performed "
141 "during a single round of hoisting the reassociated expressions."));
142
144 "licm-max-num-int-reassociations", cl::init(5U), cl::Hidden,
145 cl::desc(
146 "Set upper limit for the number of transformations performed "
147 "during a single round of hoisting the reassociated expressions."));
148
149// Experimental option to allow imprecision in LICM in pathological cases, in
150// exchange for faster compile. This is to be removed if MemorySSA starts to
151// address the same issue. LICM calls MemorySSAWalker's
152// getClobberingMemoryAccess, up to the value of the Cap, getting perfect
153// accuracy. Afterwards, LICM will call into MemorySSA's getDefiningAccess,
154// which may not be precise, since optimizeUses is capped. The result is
155// correct, but we may not get as "far up" as possible to get which access is
156// clobbering the one queried.
158 "licm-mssa-optimization-cap", cl::init(100), cl::Hidden,
159 cl::desc("Enable imprecision in LICM in pathological cases, in exchange "
160 "for faster compile. Caps the MemorySSA clobbering calls."));
161
162// Experimentally, memory promotion carries less importance than sinking and
163// hoisting. Limit when we do promotion when using MemorySSA, in order to save
164// compile time.
166 "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden,
167 cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no "
168 "effect. When MSSA in LICM is enabled, then this is the maximum "
169 "number of accesses allowed to be present in a loop in order to "
170 "enable memory promotion."));
171
173
174static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI);
175static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop,
176 const LoopSafetyInfo *SafetyInfo,
178 bool &FoldableInLoop, bool LoopNestMode);
179static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
180 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
183static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
184 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
187 Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI,
188 const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo,
189 OptimizationRemarkEmitter *ORE, const Instruction *CtxI,
190 AssumptionCache *AC, bool AllowSpeculation);
192 AAResults *AA, Loop *CurLoop,
193 SinkAndHoistLICMFlags &Flags);
194static bool pointerInvalidatedByLoop(MemorySSA *MSSA, MemoryUse *MU,
195 Loop *CurLoop, Instruction &I,
197 bool InvariantGroup);
198static bool pointerInvalidatedByBlock(BasicBlock &BB, MemorySSA &MSSA,
199 MemoryUse &MU);
200/// Aggregates various functions for hoisting computations out of loop.
201static bool hoistArithmetics(Instruction &I, Loop &L,
202 ICFLoopSafetyInfo &SafetyInfo,
204 DominatorTree *DT);
206 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
207 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU);
208
209static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
210 MemorySSAUpdater &MSSAU);
211
213 ICFLoopSafetyInfo &SafetyInfo,
215
216static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
217 function_ref<void(Instruction *)> Fn);
219 std::pair<SmallSetVector<Value *, 8>, bool>;
222
223namespace {
224struct LoopInvariantCodeMotion {
225 bool runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
228 OptimizationRemarkEmitter *ORE, bool LoopNestMode = false);
229
230 LoopInvariantCodeMotion(unsigned LicmMssaOptCap,
231 unsigned LicmMssaNoAccForPromotionCap,
232 bool LicmAllowSpeculation)
233 : LicmMssaOptCap(LicmMssaOptCap),
234 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap),
235 LicmAllowSpeculation(LicmAllowSpeculation) {}
236
237private:
238 unsigned LicmMssaOptCap;
239 unsigned LicmMssaNoAccForPromotionCap;
240 bool LicmAllowSpeculation;
241};
242
243struct LegacyLICMPass : public LoopPass {
244 static char ID; // Pass identification, replacement for typeid
245 LegacyLICMPass(
246 unsigned LicmMssaOptCap = SetLicmMssaOptCap,
247 unsigned LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap,
248 bool LicmAllowSpeculation = true)
249 : LoopPass(ID), LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
250 LicmAllowSpeculation) {
252 }
253
254 bool runOnLoop(Loop *L, LPPassManager &LPM) override {
255 if (skipLoop(L))
256 return false;
257
258 LLVM_DEBUG(dbgs() << "Perform LICM on Loop with header at block "
259 << L->getHeader()->getNameOrAsOperand() << "\n");
260
261 Function *F = L->getHeader()->getParent();
262
263 auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>();
264 MemorySSA *MSSA = &getAnalysis<MemorySSAWrapperPass>().getMSSA();
265 // For the old PM, we can't use OptimizationRemarkEmitter as an analysis
266 // pass. Function analyses need to be preserved across loop transformations
267 // but ORE cannot be preserved (see comment before the pass definition).
268 OptimizationRemarkEmitter ORE(L->getHeader()->getParent());
269 return LICM.runOnLoop(
270 L, &getAnalysis<AAResultsWrapperPass>().getAAResults(),
271 &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(),
272 &getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
273 &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(*F),
274 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(*F),
275 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(*F),
276 SE ? &SE->getSE() : nullptr, MSSA, &ORE);
277 }
278
279 /// This transformation requires natural loop information & requires that
280 /// loop preheaders be inserted into the CFG...
281 ///
282 void getAnalysisUsage(AnalysisUsage &AU) const override {
294 }
295
296private:
297 LoopInvariantCodeMotion LICM;
298};
299} // namespace
300
303 if (!AR.MSSA)
304 reportFatalUsageError("LICM requires MemorySSA (loop-mssa)");
305
306 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
307 // pass. Function analyses need to be preserved across loop transformations
308 // but ORE cannot be preserved (see comment before the pass definition).
309 OptimizationRemarkEmitter ORE(L.getHeader()->getParent());
310
311 LoopInvariantCodeMotion LICM(Opts.MssaOptCap, Opts.MssaNoAccForPromotionCap,
312 Opts.AllowSpeculation);
313 if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, &AR.AC, &AR.TLI, &AR.TTI,
314 &AR.SE, AR.MSSA, &ORE))
315 return PreservedAnalyses::all();
316
318 PA.preserve<MemorySSAAnalysis>();
319
320 return PA;
321}
322
324 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
325 static_cast<PassInfoMixin<LICMPass> *>(this)->printPipeline(
326 OS, MapClassName2PassName);
327
328 OS << '<';
329 OS << (Opts.AllowSpeculation ? "" : "no-") << "allowspeculation";
330 OS << '>';
331}
332
335 LPMUpdater &) {
336 if (!AR.MSSA)
337 reportFatalUsageError("LNICM requires MemorySSA (loop-mssa)");
338
339 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
340 // pass. Function analyses need to be preserved across loop transformations
341 // but ORE cannot be preserved (see comment before the pass definition).
343
344 LoopInvariantCodeMotion LICM(Opts.MssaOptCap, Opts.MssaNoAccForPromotionCap,
345 Opts.AllowSpeculation);
346
347 Loop &OutermostLoop = LN.getOutermostLoop();
348 bool Changed = LICM.runOnLoop(&OutermostLoop, &AR.AA, &AR.LI, &AR.DT, &AR.AC,
349 &AR.TLI, &AR.TTI, &AR.SE, AR.MSSA, &ORE, true);
350
351 if (!Changed)
352 return PreservedAnalyses::all();
353
355
356 PA.preserve<DominatorTreeAnalysis>();
357 PA.preserve<LoopAnalysis>();
358 PA.preserve<MemorySSAAnalysis>();
359
360 return PA;
361}
362
364 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
365 static_cast<PassInfoMixin<LNICMPass> *>(this)->printPipeline(
366 OS, MapClassName2PassName);
367
368 OS << '<';
369 OS << (Opts.AllowSpeculation ? "" : "no-") << "allowspeculation";
370 OS << '>';
371}
372
373char LegacyLICMPass::ID = 0;
374INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion",
375 false, false)
381INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false,
382 false)
383
384Pass *llvm::createLICMPass() { return new LegacyLICMPass(); }
385
387 MemorySSA &MSSA)
389 IsSink, L, MSSA) {}
390
392 unsigned LicmMssaOptCap, unsigned LicmMssaNoAccForPromotionCap, bool IsSink,
393 Loop &L, MemorySSA &MSSA)
394 : LicmMssaOptCap(LicmMssaOptCap),
395 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap),
396 IsSink(IsSink) {
397 unsigned AccessCapCount = 0;
398 for (auto *BB : L.getBlocks())
399 if (const auto *Accesses = MSSA.getBlockAccesses(BB))
400 for (const auto &MA : *Accesses) {
401 (void)MA;
402 ++AccessCapCount;
403 if (AccessCapCount > LicmMssaNoAccForPromotionCap) {
404 NoOfMemAccTooLarge = true;
405 return;
406 }
407 }
408}
409
410/// Hoist expressions out of the specified loop. Note, alias info for inner
411/// loop is not preserved so it is not a good idea to run LICM multiple
412/// times on one loop.
413bool LoopInvariantCodeMotion::runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI,
417 ScalarEvolution *SE, MemorySSA *MSSA,
419 bool LoopNestMode) {
420 bool Changed = false;
421
422 assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.");
423
424 // If this loop has metadata indicating that LICM is not to be performed then
425 // just exit.
427 return false;
428 }
429
430 // Don't sink stores from loops with coroutine suspend instructions.
431 // LICM would sink instructions into the default destination of
432 // the coroutine switch. The default destination of the switch is to
433 // handle the case where the coroutine is suspended, by which point the
434 // coroutine frame may have been destroyed. No instruction can be sunk there.
435 // FIXME: This would unfortunately hurt the performance of coroutines, however
436 // there is currently no general solution for this. Similar issues could also
437 // potentially happen in other passes where instructions are being moved
438 // across that edge.
439 bool HasCoroSuspendInst = llvm::any_of(L->getBlocks(), [](BasicBlock *BB) {
440 return llvm::any_of(*BB, [](Instruction &I) {
441 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
442 return II && II->getIntrinsicID() == Intrinsic::coro_suspend;
443 });
444 });
445
446 MemorySSAUpdater MSSAU(MSSA);
447 SinkAndHoistLICMFlags Flags(LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
448 /*IsSink=*/true, *L, *MSSA);
449
450 // Get the preheader block to move instructions into...
451 BasicBlock *Preheader = L->getLoopPreheader();
452
453 // Compute loop safety information.
454 ICFLoopSafetyInfo SafetyInfo;
455 SafetyInfo.computeLoopSafetyInfo(L);
456
457 // We want to visit all of the instructions in this loop... that are not parts
458 // of our subloops (they have already had their invariants hoisted out of
459 // their loop, into this loop, so there is no need to process the BODIES of
460 // the subloops).
461 //
462 // Traverse the body of the loop in depth first order on the dominator tree so
463 // that we are guaranteed to see definitions before we see uses. This allows
464 // us to sink instructions in one pass, without iteration. After sinking
465 // instructions, we perform another pass to hoist them out of the loop.
466 if (L->hasDedicatedExits())
467 Changed |=
468 LoopNestMode
469 ? sinkRegionForLoopNest(DT->getNode(L->getHeader()), AA, LI, DT,
470 TLI, TTI, L, MSSAU, &SafetyInfo, Flags, ORE)
471 : sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, TTI, L,
472 MSSAU, &SafetyInfo, Flags, ORE);
473 Flags.setIsSink(false);
474 if (Preheader)
475 Changed |= hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, AC, TLI, L,
476 MSSAU, SE, &SafetyInfo, Flags, ORE, LoopNestMode,
477 LicmAllowSpeculation, HasCoroSuspendInst);
478
479 // Now that all loop invariants have been removed from the loop, promote any
480 // memory references to scalars that we can.
481 // Don't sink stores from loops without dedicated block exits. Exits
482 // containing indirect branches are not transformed by loop simplify,
483 // make sure we catch that. An additional load may be generated in the
484 // preheader for SSA updater, so also avoid sinking when no preheader
485 // is available.
486 if (!DisablePromotion && Preheader && L->hasDedicatedExits() &&
487 !Flags.tooManyMemoryAccesses() && !HasCoroSuspendInst) {
488 // Figure out the loop exits and their insertion points
490 L->getUniqueExitBlocks(ExitBlocks);
491
492 // We can't insert into a catchswitch.
493 bool HasCatchSwitch = llvm::any_of(ExitBlocks, [](BasicBlock *Exit) {
494 return isa<CatchSwitchInst>(Exit->getTerminator());
495 });
496
497 if (!HasCatchSwitch) {
499 SmallVector<MemoryAccess *, 8> MSSAInsertPts;
500 InsertPts.reserve(ExitBlocks.size());
501 MSSAInsertPts.reserve(ExitBlocks.size());
502 for (BasicBlock *ExitBlock : ExitBlocks) {
503 InsertPts.push_back(ExitBlock->getFirstInsertionPt());
504 MSSAInsertPts.push_back(nullptr);
505 }
506
508
509 // Promoting one set of accesses may make the pointers for another set
510 // loop invariant, so run this in a loop.
511 bool Promoted = false;
512 bool LocalPromoted;
513 do {
514 LocalPromoted = false;
515 for (auto [PointerMustAliases, HasReadsOutsideSet] :
516 collectPromotionCandidates(MSSA, AA, L)) {
517 LocalPromoted |= promoteLoopAccessesToScalars(
518 PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC, LI,
519 DT, AC, TLI, TTI, L, MSSAU, &SafetyInfo, ORE,
520 LicmAllowSpeculation, HasReadsOutsideSet);
521 }
522 Promoted |= LocalPromoted;
523 } while (LocalPromoted);
524
525 // Once we have promoted values across the loop body we have to
526 // recursively reform LCSSA as any nested loop may now have values defined
527 // within the loop used in the outer loop.
528 // FIXME: This is really heavy handed. It would be a bit better to use an
529 // SSAUpdater strategy during promotion that was LCSSA aware and reformed
530 // it as it went.
531 if (Promoted)
532 formLCSSARecursively(*L, *DT, LI, SE);
533
534 Changed |= Promoted;
535 }
536 }
537
538 // Check that neither this loop nor its parent have had LCSSA broken. LICM is
539 // specifically moving instructions across the loop boundary and so it is
540 // especially in need of basic functional correctness checking here.
541 assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!");
542 assert((L->isOutermost() || L->getParentLoop()->isLCSSAForm(*DT)) &&
543 "Parent loop not left in LCSSA form after LICM!");
544
545 if (VerifyMemorySSA)
546 MSSA->verifyMemorySSA();
547
548 if (Changed && SE)
550 return Changed;
551}
552
553/// Walk the specified region of the CFG (defined by all blocks dominated by
554/// the specified block, and that are in the current loop) in reverse depth
555/// first order w.r.t the DominatorTree. This allows us to visit uses before
556/// definitions, allowing us to sink a loop body in one pass without iteration.
557///
560 TargetTransformInfo *TTI, Loop *CurLoop,
561 MemorySSAUpdater &MSSAU, ICFLoopSafetyInfo *SafetyInfo,
563 OptimizationRemarkEmitter *ORE, Loop *OutermostLoop) {
564
565 // Verify inputs.
566 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
567 CurLoop != nullptr && SafetyInfo != nullptr &&
568 "Unexpected input to sinkRegion.");
569
570 // We want to visit children before parents. We will enqueue all the parents
571 // before their children in the worklist and process the worklist in reverse
572 // order.
574 collectChildrenInLoop(DT, N, CurLoop);
575
576 bool Changed = false;
577 for (BasicBlock *BB : reverse(Worklist)) {
578 // subloop (which would already have been processed).
579 if (inSubLoop(BB, CurLoop, LI))
580 continue;
581
582 for (BasicBlock::iterator II = BB->end(); II != BB->begin();) {
583 Instruction &I = *--II;
584
585 // The instruction is not used in the loop if it is dead. In this case,
586 // we just delete it instead of sinking it.
587 if (isInstructionTriviallyDead(&I, TLI)) {
588 LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n');
591 ++II;
592 eraseInstruction(I, *SafetyInfo, MSSAU);
593 Changed = true;
594 continue;
595 }
596
597 // Check to see if we can sink this instruction to the exit blocks
598 // of the loop. We can do this if the all users of the instruction are
599 // outside of the loop. In this case, it doesn't even matter if the
600 // operands of the instruction are loop invariant.
601 //
602 bool FoldableInLoop = false;
603 bool LoopNestMode = OutermostLoop != nullptr;
604 if (!I.mayHaveSideEffects() &&
605 isNotUsedOrFoldableInLoop(I, LoopNestMode ? OutermostLoop : CurLoop,
606 SafetyInfo, TTI, FoldableInLoop,
607 LoopNestMode) &&
608 canSinkOrHoistInst(I, AA, DT, CurLoop, MSSAU, true, Flags, ORE)) {
609 if (sink(I, LI, DT, CurLoop, SafetyInfo, MSSAU, ORE)) {
610 if (!FoldableInLoop) {
611 ++II;
613 eraseInstruction(I, *SafetyInfo, MSSAU);
614 }
615 Changed = true;
616 }
617 }
618 }
619 }
620 if (VerifyMemorySSA)
621 MSSAU.getMemorySSA()->verifyMemorySSA();
622 return Changed;
623}
624
627 TargetTransformInfo *TTI, Loop *CurLoop,
628 MemorySSAUpdater &MSSAU,
629 ICFLoopSafetyInfo *SafetyInfo,
632
633 bool Changed = false;
635 Worklist.insert(CurLoop);
636 appendLoopsToWorklist(*CurLoop, Worklist);
637 while (!Worklist.empty()) {
638 Loop *L = Worklist.pop_back_val();
639 Changed |= sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, TTI, L,
640 MSSAU, SafetyInfo, Flags, ORE, CurLoop);
641 }
642 return Changed;
643}
644
645namespace {
646// This is a helper class for hoistRegion to make it able to hoist control flow
647// in order to be able to hoist phis. The way this works is that we initially
648// start hoisting to the loop preheader, and when we see a loop invariant branch
649// we make note of this. When we then come to hoist an instruction that's
650// conditional on such a branch we duplicate the branch and the relevant control
651// flow, then hoist the instruction into the block corresponding to its original
652// block in the duplicated control flow.
653class ControlFlowHoister {
654private:
655 // Information about the loop we are hoisting from
656 LoopInfo *LI;
657 DominatorTree *DT;
658 Loop *CurLoop;
659 MemorySSAUpdater &MSSAU;
660
661 // A map of blocks in the loop to the block their instructions will be hoisted
662 // to.
663 DenseMap<BasicBlock *, BasicBlock *> HoistDestinationMap;
664
665 // The branches that we can hoist, mapped to the block that marks a
666 // convergence point of their control flow.
667 DenseMap<BranchInst *, BasicBlock *> HoistableBranches;
668
669public:
670 ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop,
671 MemorySSAUpdater &MSSAU)
672 : LI(LI), DT(DT), CurLoop(CurLoop), MSSAU(MSSAU) {}
673
674 void registerPossiblyHoistableBranch(BranchInst *BI) {
675 // We can only hoist conditional branches with loop invariant operands.
676 if (!ControlFlowHoisting || !BI->isConditional() ||
677 !CurLoop->hasLoopInvariantOperands(BI))
678 return;
679
680 // The branch destinations need to be in the loop, and we don't gain
681 // anything by duplicating conditional branches with duplicate successors,
682 // as it's essentially the same as an unconditional branch.
683 BasicBlock *TrueDest = BI->getSuccessor(0);
684 BasicBlock *FalseDest = BI->getSuccessor(1);
685 if (!CurLoop->contains(TrueDest) || !CurLoop->contains(FalseDest) ||
686 TrueDest == FalseDest)
687 return;
688
689 // We can hoist BI if one branch destination is the successor of the other,
690 // or both have common successor which we check by seeing if the
691 // intersection of their successors is non-empty.
692 // TODO: This could be expanded to allowing branches where both ends
693 // eventually converge to a single block.
695 successors(TrueDest));
697 successors(FalseDest));
698 BasicBlock *CommonSucc = nullptr;
699 if (TrueDestSucc.count(FalseDest)) {
700 CommonSucc = FalseDest;
701 } else if (FalseDestSucc.count(TrueDest)) {
702 CommonSucc = TrueDest;
703 } else {
704 set_intersect(TrueDestSucc, FalseDestSucc);
705 // If there's one common successor use that.
706 if (TrueDestSucc.size() == 1)
707 CommonSucc = *TrueDestSucc.begin();
708 // If there's more than one pick whichever appears first in the block list
709 // (we can't use the value returned by TrueDestSucc.begin() as it's
710 // unpredicatable which element gets returned).
711 else if (!TrueDestSucc.empty()) {
712 Function *F = TrueDest->getParent();
713 auto IsSucc = [&](BasicBlock &BB) { return TrueDestSucc.count(&BB); };
714 auto It = llvm::find_if(*F, IsSucc);
715 assert(It != F->end() && "Could not find successor in function");
716 CommonSucc = &*It;
717 }
718 }
719 // The common successor has to be dominated by the branch, as otherwise
720 // there will be some other path to the successor that will not be
721 // controlled by this branch so any phi we hoist would be controlled by the
722 // wrong condition. This also takes care of avoiding hoisting of loop back
723 // edges.
724 // TODO: In some cases this could be relaxed if the successor is dominated
725 // by another block that's been hoisted and we can guarantee that the
726 // control flow has been replicated exactly.
727 if (CommonSucc && DT->dominates(BI, CommonSucc))
728 HoistableBranches[BI] = CommonSucc;
729 }
730
731 bool canHoistPHI(PHINode *PN) {
732 // The phi must have loop invariant operands.
733 if (!ControlFlowHoisting || !CurLoop->hasLoopInvariantOperands(PN))
734 return false;
735 // We can hoist phis if the block they are in is the target of hoistable
736 // branches which cover all of the predecessors of the block.
737 BasicBlock *BB = PN->getParent();
739 predecessors(BB));
740 // If we have less predecessor blocks than predecessors then the phi will
741 // have more than one incoming value for the same block which we can't
742 // handle.
743 // TODO: This could be handled be erasing some of the duplicate incoming
744 // values.
745 if (PredecessorBlocks.size() != pred_size(BB))
746 return false;
747 for (auto &Pair : HoistableBranches) {
748 if (Pair.second == BB) {
749 // Which blocks are predecessors via this branch depends on if the
750 // branch is triangle-like or diamond-like.
751 if (Pair.first->getSuccessor(0) == BB) {
752 PredecessorBlocks.erase(Pair.first->getParent());
753 PredecessorBlocks.erase(Pair.first->getSuccessor(1));
754 } else if (Pair.first->getSuccessor(1) == BB) {
755 PredecessorBlocks.erase(Pair.first->getParent());
756 PredecessorBlocks.erase(Pair.first->getSuccessor(0));
757 } else {
758 PredecessorBlocks.erase(Pair.first->getSuccessor(0));
759 PredecessorBlocks.erase(Pair.first->getSuccessor(1));
760 }
761 }
762 }
763 // PredecessorBlocks will now be empty if for every predecessor of BB we
764 // found a hoistable branch source.
765 return PredecessorBlocks.empty();
766 }
767
768 BasicBlock *getOrCreateHoistedBlock(BasicBlock *BB) {
770 return CurLoop->getLoopPreheader();
771 // If BB has already been hoisted, return that
772 if (auto It = HoistDestinationMap.find(BB); It != HoistDestinationMap.end())
773 return It->second;
774
775 // Check if this block is conditional based on a pending branch
776 auto HasBBAsSuccessor =
778 return BB != Pair.second && (Pair.first->getSuccessor(0) == BB ||
779 Pair.first->getSuccessor(1) == BB);
780 };
781 auto It = llvm::find_if(HoistableBranches, HasBBAsSuccessor);
782
783 // If not involved in a pending branch, hoist to preheader
784 BasicBlock *InitialPreheader = CurLoop->getLoopPreheader();
785 if (It == HoistableBranches.end()) {
786 LLVM_DEBUG(dbgs() << "LICM using "
787 << InitialPreheader->getNameOrAsOperand()
788 << " as hoist destination for "
789 << BB->getNameOrAsOperand() << "\n");
790 HoistDestinationMap[BB] = InitialPreheader;
791 return InitialPreheader;
792 }
793 BranchInst *BI = It->first;
794 assert(std::none_of(std::next(It), HoistableBranches.end(),
795 HasBBAsSuccessor) &&
796 "BB is expected to be the target of at most one branch");
797
798 LLVMContext &C = BB->getContext();
799 BasicBlock *TrueDest = BI->getSuccessor(0);
800 BasicBlock *FalseDest = BI->getSuccessor(1);
801 BasicBlock *CommonSucc = HoistableBranches[BI];
802 BasicBlock *HoistTarget = getOrCreateHoistedBlock(BI->getParent());
803
804 // Create hoisted versions of blocks that currently don't have them
805 auto CreateHoistedBlock = [&](BasicBlock *Orig) {
806 auto [It, Inserted] = HoistDestinationMap.try_emplace(Orig);
807 if (!Inserted)
808 return It->second;
809 BasicBlock *New =
810 BasicBlock::Create(C, Orig->getName() + ".licm", Orig->getParent());
811 It->second = New;
812 DT->addNewBlock(New, HoistTarget);
813 if (CurLoop->getParentLoop())
814 CurLoop->getParentLoop()->addBasicBlockToLoop(New, *LI);
815 ++NumCreatedBlocks;
816 LLVM_DEBUG(dbgs() << "LICM created " << New->getName()
817 << " as hoist destination for " << Orig->getName()
818 << "\n");
819 return New;
820 };
821 BasicBlock *HoistTrueDest = CreateHoistedBlock(TrueDest);
822 BasicBlock *HoistFalseDest = CreateHoistedBlock(FalseDest);
823 BasicBlock *HoistCommonSucc = CreateHoistedBlock(CommonSucc);
824
825 // Link up these blocks with branches.
826 if (!HoistCommonSucc->getTerminator()) {
827 // The new common successor we've generated will branch to whatever that
828 // hoist target branched to.
829 BasicBlock *TargetSucc = HoistTarget->getSingleSuccessor();
830 assert(TargetSucc && "Expected hoist target to have a single successor");
831 HoistCommonSucc->moveBefore(TargetSucc);
832 BranchInst::Create(TargetSucc, HoistCommonSucc);
833 }
834 if (!HoistTrueDest->getTerminator()) {
835 HoistTrueDest->moveBefore(HoistCommonSucc);
836 BranchInst::Create(HoistCommonSucc, HoistTrueDest);
837 }
838 if (!HoistFalseDest->getTerminator()) {
839 HoistFalseDest->moveBefore(HoistCommonSucc);
840 BranchInst::Create(HoistCommonSucc, HoistFalseDest);
841 }
842
843 // If BI is being cloned to what was originally the preheader then
844 // HoistCommonSucc will now be the new preheader.
845 if (HoistTarget == InitialPreheader) {
846 // Phis in the loop header now need to use the new preheader.
847 InitialPreheader->replaceSuccessorsPhiUsesWith(HoistCommonSucc);
849 HoistTarget->getSingleSuccessor(), HoistCommonSucc, {HoistTarget});
850 // The new preheader dominates the loop header.
851 DomTreeNode *PreheaderNode = DT->getNode(HoistCommonSucc);
852 DomTreeNode *HeaderNode = DT->getNode(CurLoop->getHeader());
853 DT->changeImmediateDominator(HeaderNode, PreheaderNode);
854 // The preheader hoist destination is now the new preheader, with the
855 // exception of the hoist destination of this branch.
856 for (auto &Pair : HoistDestinationMap)
857 if (Pair.second == InitialPreheader && Pair.first != BI->getParent())
858 Pair.second = HoistCommonSucc;
859 }
860
861 // Now finally clone BI.
862 auto *NewBI =
863 BranchInst::Create(HoistTrueDest, HoistFalseDest, BI->getCondition(),
864 HoistTarget->getTerminator()->getIterator());
865 HoistTarget->getTerminator()->eraseFromParent();
866 // md_prof should also come from the original branch - since the
867 // condition was hoisted, the branch probabilities shouldn't change.
869 NewBI->copyMetadata(*BI, {LLVMContext::MD_prof});
870 // FIXME: Issue #152767: debug info should also be the same as the
871 // original branch, **if** the user explicitly indicated that.
872 NewBI->setDebugLoc(HoistTarget->getTerminator()->getDebugLoc());
873
874 ++NumClonedBranches;
875
876 assert(CurLoop->getLoopPreheader() &&
877 "Hoisting blocks should not have destroyed preheader");
878 return HoistDestinationMap[BB];
879 }
880};
881} // namespace
882
883/// Walk the specified region of the CFG (defined by all blocks dominated by
884/// the specified block, and that are in the current loop) in depth first
885/// order w.r.t the DominatorTree. This allows us to visit definitions before
886/// uses, allowing us to hoist a loop body in one pass without iteration.
887///
890 TargetLibraryInfo *TLI, Loop *CurLoop,
892 ICFLoopSafetyInfo *SafetyInfo,
894 OptimizationRemarkEmitter *ORE, bool LoopNestMode,
895 bool AllowSpeculation, bool HasCoroSuspendInst) {
896 // Verify inputs.
897 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
898 CurLoop != nullptr && SafetyInfo != nullptr &&
899 "Unexpected input to hoistRegion.");
900
901 ControlFlowHoister CFH(LI, DT, CurLoop, MSSAU);
902
903 // Keep track of instructions that have been hoisted, as they may need to be
904 // re-hoisted if they end up not dominating all of their uses.
905 SmallVector<Instruction *, 16> HoistedInstructions;
906
907 // For PHI hoisting to work we need to hoist blocks before their successors.
908 // We can do this by iterating through the blocks in the loop in reverse
909 // post-order.
910 LoopBlocksRPO Worklist(CurLoop);
911 Worklist.perform(LI);
912 bool Changed = false;
913 BasicBlock *Preheader = CurLoop->getLoopPreheader();
914 for (BasicBlock *BB : Worklist) {
915 // Only need to process the contents of this block if it is not part of a
916 // subloop (which would already have been processed).
917 if (!LoopNestMode && inSubLoop(BB, CurLoop, LI))
918 continue;
919
921 // Try hoisting the instruction out to the preheader. We can only do
922 // this if all of the operands of the instruction are loop invariant and
923 // if it is safe to hoist the instruction. We also check block frequency
924 // to make sure instruction only gets hoisted into colder blocks.
925 // TODO: It may be safe to hoist if we are hoisting to a conditional block
926 // and we have accurately duplicated the control flow from the loop header
927 // to that block.
928 if (CurLoop->hasLoopInvariantOperands(&I, HasCoroSuspendInst) &&
929 canSinkOrHoistInst(I, AA, DT, CurLoop, MSSAU, true, Flags, ORE) &&
930 isSafeToExecuteUnconditionally(I, DT, TLI, CurLoop, SafetyInfo, ORE,
931 Preheader->getTerminator(), AC,
932 AllowSpeculation)) {
933 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
934 MSSAU, SE, ORE);
935 HoistedInstructions.push_back(&I);
936 Changed = true;
937 continue;
938 }
939
940 // Attempt to remove floating point division out of the loop by
941 // converting it to a reciprocal multiplication.
942 if (I.getOpcode() == Instruction::FDiv && I.hasAllowReciprocal() &&
943 CurLoop->isLoopInvariant(I.getOperand(1))) {
944 auto Divisor = I.getOperand(1);
945 auto One = llvm::ConstantFP::get(Divisor->getType(), 1.0);
946 auto ReciprocalDivisor = BinaryOperator::CreateFDiv(One, Divisor);
947 ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags());
948 SafetyInfo->insertInstructionTo(ReciprocalDivisor, I.getParent());
949 ReciprocalDivisor->insertBefore(I.getIterator());
950 ReciprocalDivisor->setDebugLoc(I.getDebugLoc());
951
952 auto Product =
953 BinaryOperator::CreateFMul(I.getOperand(0), ReciprocalDivisor);
954 Product->setFastMathFlags(I.getFastMathFlags());
955 SafetyInfo->insertInstructionTo(Product, I.getParent());
956 Product->insertAfter(I.getIterator());
957 Product->setDebugLoc(I.getDebugLoc());
958 I.replaceAllUsesWith(Product);
959 eraseInstruction(I, *SafetyInfo, MSSAU);
960
961 hoist(*ReciprocalDivisor, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB),
962 SafetyInfo, MSSAU, SE, ORE);
963 HoistedInstructions.push_back(ReciprocalDivisor);
964 Changed = true;
965 continue;
966 }
967
968 auto IsInvariantStart = [&](Instruction &I) {
969 using namespace PatternMatch;
970 return I.use_empty() &&
971 match(&I, m_Intrinsic<Intrinsic::invariant_start>());
972 };
973 auto MustExecuteWithoutWritesBefore = [&](Instruction &I) {
974 return SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) &&
975 SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop);
976 };
977 if ((IsInvariantStart(I) || isGuard(&I)) &&
978 CurLoop->hasLoopInvariantOperands(&I, HasCoroSuspendInst) &&
979 MustExecuteWithoutWritesBefore(I)) {
980 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
981 MSSAU, SE, ORE);
982 HoistedInstructions.push_back(&I);
983 Changed = true;
984 continue;
985 }
986
987 if (PHINode *PN = dyn_cast<PHINode>(&I)) {
988 if (CFH.canHoistPHI(PN)) {
989 // Redirect incoming blocks first to ensure that we create hoisted
990 // versions of those blocks before we hoist the phi.
991 for (unsigned int i = 0; i < PN->getNumIncomingValues(); ++i)
993 i, CFH.getOrCreateHoistedBlock(PN->getIncomingBlock(i)));
994 hoist(*PN, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
995 MSSAU, SE, ORE);
996 assert(DT->dominates(PN, BB) && "Conditional PHIs not expected");
997 Changed = true;
998 continue;
999 }
1000 }
1001
1002 // Try to reassociate instructions so that part of computations can be
1003 // done out of loop.
1004 if (hoistArithmetics(I, *CurLoop, *SafetyInfo, MSSAU, AC, DT)) {
1005 Changed = true;
1006 continue;
1007 }
1008
1009 // Remember possibly hoistable branches so we can actually hoist them
1010 // later if needed.
1011 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
1012 CFH.registerPossiblyHoistableBranch(BI);
1013 }
1014 }
1015
1016 // If we hoisted instructions to a conditional block they may not dominate
1017 // their uses that weren't hoisted (such as phis where some operands are not
1018 // loop invariant). If so make them unconditional by moving them to their
1019 // immediate dominator. We iterate through the instructions in reverse order
1020 // which ensures that when we rehoist an instruction we rehoist its operands,
1021 // and also keep track of where in the block we are rehoisting to make sure
1022 // that we rehoist instructions before the instructions that use them.
1023 Instruction *HoistPoint = nullptr;
1024 if (ControlFlowHoisting) {
1025 for (Instruction *I : reverse(HoistedInstructions)) {
1026 if (!llvm::all_of(I->uses(),
1027 [&](Use &U) { return DT->dominates(I, U); })) {
1028 BasicBlock *Dominator =
1029 DT->getNode(I->getParent())->getIDom()->getBlock();
1030 if (!HoistPoint || !DT->dominates(HoistPoint->getParent(), Dominator)) {
1031 if (HoistPoint)
1032 assert(DT->dominates(Dominator, HoistPoint->getParent()) &&
1033 "New hoist point expected to dominate old hoist point");
1034 HoistPoint = Dominator->getTerminator();
1035 }
1036 LLVM_DEBUG(dbgs() << "LICM rehoisting to "
1037 << HoistPoint->getParent()->getNameOrAsOperand()
1038 << ": " << *I << "\n");
1039 moveInstructionBefore(*I, HoistPoint->getIterator(), *SafetyInfo, MSSAU,
1040 SE);
1041 HoistPoint = I;
1042 Changed = true;
1043 }
1044 }
1045 }
1046 if (VerifyMemorySSA)
1047 MSSAU.getMemorySSA()->verifyMemorySSA();
1048
1049 // Now that we've finished hoisting make sure that LI and DT are still
1050 // valid.
1051#ifdef EXPENSIVE_CHECKS
1052 if (Changed) {
1053 assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&
1054 "Dominator tree verification failed");
1055 LI->verify(*DT);
1056 }
1057#endif
1058
1059 return Changed;
1060}
1061
1062// Return true if LI is invariant within scope of the loop. LI is invariant if
1063// CurLoop is dominated by an invariant.start representing the same memory
1064// location and size as the memory location LI loads from, and also the
1065// invariant.start has no uses.
1067 Loop *CurLoop) {
1068 Value *Addr = LI->getPointerOperand();
1069 const DataLayout &DL = LI->getDataLayout();
1070 const TypeSize LocSizeInBits = DL.getTypeSizeInBits(LI->getType());
1071
1072 // It is not currently possible for clang to generate an invariant.start
1073 // intrinsic with scalable vector types because we don't support thread local
1074 // sizeless types and we don't permit sizeless types in structs or classes.
1075 // Furthermore, even if support is added for this in future the intrinsic
1076 // itself is defined to have a size of -1 for variable sized objects. This
1077 // makes it impossible to verify if the intrinsic envelops our region of
1078 // interest. For example, both <vscale x 32 x i8> and <vscale x 16 x i8>
1079 // types would have a -1 parameter, but the former is clearly double the size
1080 // of the latter.
1081 if (LocSizeInBits.isScalable())
1082 return false;
1083
1084 // If we've ended up at a global/constant, bail. We shouldn't be looking at
1085 // uselists for non-local Values in a loop pass.
1086 if (isa<Constant>(Addr))
1087 return false;
1088
1089 unsigned UsesVisited = 0;
1090 // Traverse all uses of the load operand value, to see if invariant.start is
1091 // one of the uses, and whether it dominates the load instruction.
1092 for (auto *U : Addr->users()) {
1093 // Avoid traversing for Load operand with high number of users.
1094 if (++UsesVisited > MaxNumUsesTraversed)
1095 return false;
1096 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
1097 // If there are escaping uses of invariant.start instruction, the load maybe
1098 // non-invariant.
1099 if (!II || II->getIntrinsicID() != Intrinsic::invariant_start ||
1100 !II->use_empty())
1101 continue;
1102 ConstantInt *InvariantSize = cast<ConstantInt>(II->getArgOperand(0));
1103 // The intrinsic supports having a -1 argument for variable sized objects
1104 // so we should check for that here.
1105 if (InvariantSize->isNegative())
1106 continue;
1107 uint64_t InvariantSizeInBits = InvariantSize->getSExtValue() * 8;
1108 // Confirm the invariant.start location size contains the load operand size
1109 // in bits. Also, the invariant.start should dominate the load, and we
1110 // should not hoist the load out of a loop that contains this dominating
1111 // invariant.start.
1112 if (LocSizeInBits.getFixedValue() <= InvariantSizeInBits &&
1113 DT->properlyDominates(II->getParent(), CurLoop->getHeader()))
1114 return true;
1115 }
1116
1117 return false;
1118}
1119
1120namespace {
1121/// Return true if-and-only-if we know how to (mechanically) both hoist and
1122/// sink a given instruction out of a loop. Does not address legality
1123/// concerns such as aliasing or speculation safety.
1124bool isHoistableAndSinkableInst(Instruction &I) {
1125 // Only these instructions are hoistable/sinkable.
1126 return (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
1127 isa<FenceInst>(I) || isa<CastInst>(I) || isa<UnaryOperator>(I) ||
1128 isa<BinaryOperator>(I) || isa<SelectInst>(I) ||
1129 isa<GetElementPtrInst>(I) || isa<CmpInst>(I) ||
1130 isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
1131 isa<ShuffleVectorInst>(I) || isa<ExtractValueInst>(I) ||
1132 isa<InsertValueInst>(I) || isa<FreezeInst>(I));
1133}
1134
1135/// Return true if I is the only Instruction with a MemoryAccess in L.
1136bool isOnlyMemoryAccess(const Instruction *I, const Loop *L,
1137 const MemorySSAUpdater &MSSAU) {
1138 for (auto *BB : L->getBlocks())
1139 if (auto *Accs = MSSAU.getMemorySSA()->getBlockAccesses(BB)) {
1140 int NotAPhi = 0;
1141 for (const auto &Acc : *Accs) {
1142 if (isa<MemoryPhi>(&Acc))
1143 continue;
1144 const auto *MUD = cast<MemoryUseOrDef>(&Acc);
1145 if (MUD->getMemoryInst() != I || NotAPhi++ == 1)
1146 return false;
1147 }
1148 }
1149 return true;
1150}
1151}
1152
1154 BatchAAResults &BAA,
1155 SinkAndHoistLICMFlags &Flags,
1156 MemoryUseOrDef *MA) {
1157 // See declaration of SetLicmMssaOptCap for usage details.
1158 if (Flags.tooManyClobberingCalls())
1159 return MA->getDefiningAccess();
1160
1161 MemoryAccess *Source =
1163 Flags.incrementClobberingCalls();
1164 return Source;
1165}
1166
1168 Loop *CurLoop, MemorySSAUpdater &MSSAU,
1169 bool TargetExecutesOncePerLoop,
1170 SinkAndHoistLICMFlags &Flags,
1172 // If we don't understand the instruction, bail early.
1173 if (!isHoistableAndSinkableInst(I))
1174 return false;
1175
1176 MemorySSA *MSSA = MSSAU.getMemorySSA();
1177 // Loads have extra constraints we have to verify before we can hoist them.
1178 if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
1179 if (!LI->isUnordered())
1180 return false; // Don't sink/hoist volatile or ordered atomic loads!
1181
1182 // Loads from constant memory are always safe to move, even if they end up
1183 // in the same alias set as something that ends up being modified.
1184 if (!isModSet(AA->getModRefInfoMask(LI->getOperand(0))))
1185 return true;
1186 if (LI->hasMetadata(LLVMContext::MD_invariant_load))
1187 return true;
1188
1189 if (LI->isAtomic() && !TargetExecutesOncePerLoop)
1190 return false; // Don't risk duplicating unordered loads
1191
1192 // This checks for an invariant.start dominating the load.
1193 if (isLoadInvariantInLoop(LI, DT, CurLoop))
1194 return true;
1195
1196 auto MU = cast<MemoryUse>(MSSA->getMemoryAccess(LI));
1197
1198 bool InvariantGroup = LI->hasMetadata(LLVMContext::MD_invariant_group);
1199
1200 bool Invalidated = pointerInvalidatedByLoop(
1201 MSSA, MU, CurLoop, I, Flags, InvariantGroup);
1202 // Check loop-invariant address because this may also be a sinkable load
1203 // whose address is not necessarily loop-invariant.
1204 if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1205 ORE->emit([&]() {
1207 DEBUG_TYPE, "LoadWithLoopInvariantAddressInvalidated", LI)
1208 << "failed to move load with loop-invariant address "
1209 "because the loop may invalidate its value";
1210 });
1211
1212 return !Invalidated;
1213 } else if (CallInst *CI = dyn_cast<CallInst>(&I)) {
1214 // Don't sink calls which can throw.
1215 if (CI->mayThrow())
1216 return false;
1217
1218 // Convergent attribute has been used on operations that involve
1219 // inter-thread communication which results are implicitly affected by the
1220 // enclosing control flows. It is not safe to hoist or sink such operations
1221 // across control flow.
1222 if (CI->isConvergent())
1223 return false;
1224
1225 // FIXME: Current LLVM IR semantics don't work well with coroutines and
1226 // thread local globals. We currently treat getting the address of a thread
1227 // local global as not accessing memory, even though it may not be a
1228 // constant throughout a function with coroutines. Remove this check after
1229 // we better model semantics of thread local globals.
1230 if (CI->getFunction()->isPresplitCoroutine())
1231 return false;
1232
1233 using namespace PatternMatch;
1234 if (match(CI, m_Intrinsic<Intrinsic::assume>()))
1235 // Assumes don't actually alias anything or throw
1236 return true;
1237
1238 // Handle simple cases by querying alias analysis.
1239 MemoryEffects Behavior = AA->getMemoryEffects(CI);
1240
1241 if (Behavior.doesNotAccessMemory())
1242 return true;
1243 if (Behavior.onlyReadsMemory()) {
1244 // Might have stale MemoryDef for call that was later inferred to be
1245 // read-only.
1246 auto *MU = dyn_cast<MemoryUse>(MSSA->getMemoryAccess(CI));
1247 if (!MU)
1248 return false;
1249
1250 // If we can prove there are no writes to the memory read by the call, we
1251 // can hoist or sink.
1253 MSSA, MU, CurLoop, I, Flags, /*InvariantGroup=*/false);
1254 }
1255
1256 if (Behavior.onlyWritesMemory()) {
1257 // can hoist or sink if there are no conflicting read/writes to the
1258 // memory location written to by the call.
1259 return noConflictingReadWrites(CI, MSSA, AA, CurLoop, Flags);
1260 }
1261
1262 return false;
1263 } else if (auto *FI = dyn_cast<FenceInst>(&I)) {
1264 // Fences alias (most) everything to provide ordering. For the moment,
1265 // just give up if there are any other memory operations in the loop.
1266 return isOnlyMemoryAccess(FI, CurLoop, MSSAU);
1267 } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
1268 if (!SI->isUnordered())
1269 return false; // Don't sink/hoist volatile or ordered atomic store!
1270
1271 // We can only hoist a store that we can prove writes a value which is not
1272 // read or overwritten within the loop. For those cases, we fallback to
1273 // load store promotion instead. TODO: We can extend this to cases where
1274 // there is exactly one write to the location and that write dominates an
1275 // arbitrary number of reads in the loop.
1276 if (isOnlyMemoryAccess(SI, CurLoop, MSSAU))
1277 return true;
1278 return noConflictingReadWrites(SI, MSSA, AA, CurLoop, Flags);
1279 }
1280
1281 assert(!I.mayReadOrWriteMemory() && "unhandled aliasing");
1282
1283 // We've established mechanical ability and aliasing, it's up to the caller
1284 // to check fault safety
1285 return true;
1286}
1287
1288/// Returns true if a PHINode is a trivially replaceable with an
1289/// Instruction.
1290/// This is true when all incoming values are that instruction.
1291/// This pattern occurs most often with LCSSA PHI nodes.
1292///
1293static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I) {
1294 for (const Value *IncValue : PN.incoming_values())
1295 if (IncValue != &I)
1296 return false;
1297
1298 return true;
1299}
1300
1301/// Return true if the instruction is foldable in the loop.
1302static bool isFoldableInLoop(const Instruction &I, const Loop *CurLoop,
1303 const TargetTransformInfo *TTI) {
1304 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1305 InstructionCost CostI =
1307 if (CostI != TargetTransformInfo::TCC_Free)
1308 return false;
1309 // For a GEP, we cannot simply use getInstructionCost because currently
1310 // it optimistically assumes that a GEP will fold into addressing mode
1311 // regardless of its users.
1312 const BasicBlock *BB = GEP->getParent();
1313 for (const User *U : GEP->users()) {
1314 const Instruction *UI = cast<Instruction>(U);
1315 if (CurLoop->contains(UI) &&
1316 (BB != UI->getParent() ||
1317 (!isa<StoreInst>(UI) && !isa<LoadInst>(UI))))
1318 return false;
1319 }
1320 return true;
1321 }
1322
1323 return false;
1324}
1325
1326/// Return true if the only users of this instruction are outside of
1327/// the loop. If this is true, we can sink the instruction to the exit
1328/// blocks of the loop.
1329///
1330/// We also return true if the instruction could be folded away in lowering.
1331/// (e.g., a GEP can be folded into a load as an addressing mode in the loop).
1332static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop,
1333 const LoopSafetyInfo *SafetyInfo,
1335 bool &FoldableInLoop, bool LoopNestMode) {
1336 const auto &BlockColors = SafetyInfo->getBlockColors();
1337 bool IsFoldable = isFoldableInLoop(I, CurLoop, TTI);
1338 for (const User *U : I.users()) {
1339 const Instruction *UI = cast<Instruction>(U);
1340 if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
1341 const BasicBlock *BB = PN->getParent();
1342 // We cannot sink uses in catchswitches.
1343 if (isa<CatchSwitchInst>(BB->getTerminator()))
1344 return false;
1345
1346 // We need to sink a callsite to a unique funclet. Avoid sinking if the
1347 // phi use is too muddled.
1348 if (isa<CallInst>(I))
1349 if (!BlockColors.empty() &&
1350 BlockColors.find(const_cast<BasicBlock *>(BB))->second.size() != 1)
1351 return false;
1352
1353 if (LoopNestMode) {
1354 while (isa<PHINode>(UI) && UI->hasOneUser() &&
1355 UI->getNumOperands() == 1) {
1356 if (!CurLoop->contains(UI))
1357 break;
1358 UI = cast<Instruction>(UI->user_back());
1359 }
1360 }
1361 }
1362
1363 if (CurLoop->contains(UI)) {
1364 if (IsFoldable) {
1365 FoldableInLoop = true;
1366 continue;
1367 }
1368 return false;
1369 }
1370 }
1371 return true;
1372}
1373
1375 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
1376 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU) {
1377 Instruction *New;
1378 if (auto *CI = dyn_cast<CallInst>(&I)) {
1379 const auto &BlockColors = SafetyInfo->getBlockColors();
1380
1381 // Sinking call-sites need to be handled differently from other
1382 // instructions. The cloned call-site needs a funclet bundle operand
1383 // appropriate for its location in the CFG.
1385 for (unsigned BundleIdx = 0, BundleEnd = CI->getNumOperandBundles();
1386 BundleIdx != BundleEnd; ++BundleIdx) {
1387 OperandBundleUse Bundle = CI->getOperandBundleAt(BundleIdx);
1388 if (Bundle.getTagID() == LLVMContext::OB_funclet)
1389 continue;
1390
1391 OpBundles.emplace_back(Bundle);
1392 }
1393
1394 if (!BlockColors.empty()) {
1395 const ColorVector &CV = BlockColors.find(&ExitBlock)->second;
1396 assert(CV.size() == 1 && "non-unique color for exit block!");
1397 BasicBlock *BBColor = CV.front();
1398 BasicBlock::iterator EHPad = BBColor->getFirstNonPHIIt();
1399 if (EHPad->isEHPad())
1400 OpBundles.emplace_back("funclet", &*EHPad);
1401 }
1402
1403 New = CallInst::Create(CI, OpBundles);
1404 New->copyMetadata(*CI);
1405 } else {
1406 New = I.clone();
1407 }
1408
1409 New->insertInto(&ExitBlock, ExitBlock.getFirstInsertionPt());
1410 if (!I.getName().empty())
1411 New->setName(I.getName() + ".le");
1412
1413 if (MSSAU.getMemorySSA()->getMemoryAccess(&I)) {
1414 // Create a new MemoryAccess and let MemorySSA set its defining access.
1415 // After running some passes, MemorySSA might be outdated, and the
1416 // instruction `I` may have become a non-memory touching instruction.
1417 MemoryAccess *NewMemAcc = MSSAU.createMemoryAccessInBB(
1418 New, nullptr, New->getParent(), MemorySSA::Beginning,
1419 /*CreationMustSucceed=*/false);
1420 if (NewMemAcc) {
1421 if (auto *MemDef = dyn_cast<MemoryDef>(NewMemAcc))
1422 MSSAU.insertDef(MemDef, /*RenameUses=*/true);
1423 else {
1424 auto *MemUse = cast<MemoryUse>(NewMemAcc);
1425 MSSAU.insertUse(MemUse, /*RenameUses=*/true);
1426 }
1427 }
1428 }
1429
1430 // Build LCSSA PHI nodes for any in-loop operands (if legal). Note that
1431 // this is particularly cheap because we can rip off the PHI node that we're
1432 // replacing for the number and blocks of the predecessors.
1433 // OPT: If this shows up in a profile, we can instead finish sinking all
1434 // invariant instructions, and then walk their operands to re-establish
1435 // LCSSA. That will eliminate creating PHI nodes just to nuke them when
1436 // sinking bottom-up.
1437 for (Use &Op : New->operands())
1438 if (LI->wouldBeOutOfLoopUseRequiringLCSSA(Op.get(), PN.getParent())) {
1439 auto *OInst = cast<Instruction>(Op.get());
1440 PHINode *OpPN =
1441 PHINode::Create(OInst->getType(), PN.getNumIncomingValues(),
1442 OInst->getName() + ".lcssa");
1443 OpPN->insertBefore(ExitBlock.begin());
1444 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
1445 OpPN->addIncoming(OInst, PN.getIncomingBlock(i));
1446 Op = OpPN;
1447 }
1448 return New;
1449}
1450
1452 MemorySSAUpdater &MSSAU) {
1453 MSSAU.removeMemoryAccess(&I);
1454 SafetyInfo.removeInstruction(&I);
1455 I.eraseFromParent();
1456}
1457
1459 ICFLoopSafetyInfo &SafetyInfo,
1460 MemorySSAUpdater &MSSAU,
1461 ScalarEvolution *SE) {
1462 SafetyInfo.removeInstruction(&I);
1463 SafetyInfo.insertInstructionTo(&I, Dest->getParent());
1464 I.moveBefore(*Dest->getParent(), Dest);
1465 if (MemoryUseOrDef *OldMemAcc = cast_or_null<MemoryUseOrDef>(
1466 MSSAU.getMemorySSA()->getMemoryAccess(&I)))
1467 MSSAU.moveToPlace(OldMemAcc, Dest->getParent(),
1469 if (SE)
1471}
1472
1474 PHINode *TPN, Instruction *I, LoopInfo *LI,
1476 const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop,
1477 MemorySSAUpdater &MSSAU) {
1479 "Expect only trivially replaceable PHI");
1480 BasicBlock *ExitBlock = TPN->getParent();
1481 auto [It, Inserted] = SunkCopies.try_emplace(ExitBlock);
1482 if (Inserted)
1483 It->second = cloneInstructionInExitBlock(*I, *ExitBlock, *TPN, LI,
1484 SafetyInfo, MSSAU);
1485 return It->second;
1486}
1487
1488static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) {
1489 BasicBlock *BB = PN->getParent();
1490 if (!BB->canSplitPredecessors())
1491 return false;
1492 // It's not impossible to split EHPad blocks, but if BlockColors already exist
1493 // it require updating BlockColors for all offspring blocks accordingly. By
1494 // skipping such corner case, we can make updating BlockColors after splitting
1495 // predecessor fairly simple.
1496 if (!SafetyInfo->getBlockColors().empty() &&
1497 BB->getFirstNonPHIIt()->isEHPad())
1498 return false;
1499 for (BasicBlock *BBPred : predecessors(BB)) {
1500 if (isa<IndirectBrInst>(BBPred->getTerminator()))
1501 return false;
1502 }
1503 return true;
1504}
1505
1507 LoopInfo *LI, const Loop *CurLoop,
1508 LoopSafetyInfo *SafetyInfo,
1509 MemorySSAUpdater *MSSAU) {
1510#ifndef NDEBUG
1512 CurLoop->getUniqueExitBlocks(ExitBlocks);
1513 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(llvm::from_range, ExitBlocks);
1514#endif
1515 BasicBlock *ExitBB = PN->getParent();
1516 assert(ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block.");
1517
1518 // Split predecessors of the loop exit to make instructions in the loop are
1519 // exposed to exit blocks through trivially replaceable PHIs while keeping the
1520 // loop in the canonical form where each predecessor of each exit block should
1521 // be contained within the loop. For example, this will convert the loop below
1522 // from
1523 //
1524 // LB1:
1525 // %v1 =
1526 // br %LE, %LB2
1527 // LB2:
1528 // %v2 =
1529 // br %LE, %LB1
1530 // LE:
1531 // %p = phi [%v1, %LB1], [%v2, %LB2] <-- non-trivially replaceable
1532 //
1533 // to
1534 //
1535 // LB1:
1536 // %v1 =
1537 // br %LE.split, %LB2
1538 // LB2:
1539 // %v2 =
1540 // br %LE.split2, %LB1
1541 // LE.split:
1542 // %p1 = phi [%v1, %LB1] <-- trivially replaceable
1543 // br %LE
1544 // LE.split2:
1545 // %p2 = phi [%v2, %LB2] <-- trivially replaceable
1546 // br %LE
1547 // LE:
1548 // %p = phi [%p1, %LE.split], [%p2, %LE.split2]
1549 //
1550 const auto &BlockColors = SafetyInfo->getBlockColors();
1551 SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(ExitBB), pred_end(ExitBB));
1552 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
1553 while (!PredBBs.empty()) {
1554 BasicBlock *PredBB = *PredBBs.begin();
1555 assert(CurLoop->contains(PredBB) &&
1556 "Expect all predecessors are in the loop");
1557 if (PN->getBasicBlockIndex(PredBB) >= 0) {
1559 ExitBB, PredBB, ".split.loop.exit", &DTU, LI, MSSAU, true);
1560 // Since we do not allow splitting EH-block with BlockColors in
1561 // canSplitPredecessors(), we can simply assign predecessor's color to
1562 // the new block.
1563 if (!BlockColors.empty())
1564 // Grab a reference to the ColorVector to be inserted before getting the
1565 // reference to the vector we are copying because inserting the new
1566 // element in BlockColors might cause the map to be reallocated.
1567 SafetyInfo->copyColors(NewPred, PredBB);
1568 }
1569 PredBBs.remove(PredBB);
1570 }
1571}
1572
1573/// When an instruction is found to only be used outside of the loop, this
1574/// function moves it to the exit blocks and patches up SSA form as needed.
1575/// This method is guaranteed to remove the original instruction from its
1576/// position, and may either delete it or move it to outside of the loop.
1577///
1578static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
1579 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
1581 bool Changed = false;
1582 LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n");
1583
1584 // Iterate over users to be ready for actual sinking. Replace users via
1585 // unreachable blocks with undef and make all user PHIs trivially replaceable.
1586 SmallPtrSet<Instruction *, 8> VisitedUsers;
1587 for (Value::user_iterator UI = I.user_begin(), UE = I.user_end(); UI != UE;) {
1588 auto *User = cast<Instruction>(*UI);
1589 Use &U = UI.getUse();
1590 ++UI;
1591
1592 if (VisitedUsers.count(User) || CurLoop->contains(User))
1593 continue;
1594
1595 if (!DT->isReachableFromEntry(User->getParent())) {
1596 U = PoisonValue::get(I.getType());
1597 Changed = true;
1598 continue;
1599 }
1600
1601 // The user must be a PHI node.
1602 PHINode *PN = cast<PHINode>(User);
1603
1604 // Surprisingly, instructions can be used outside of loops without any
1605 // exits. This can only happen in PHI nodes if the incoming block is
1606 // unreachable.
1607 BasicBlock *BB = PN->getIncomingBlock(U);
1608 if (!DT->isReachableFromEntry(BB)) {
1609 U = PoisonValue::get(I.getType());
1610 Changed = true;
1611 continue;
1612 }
1613
1614 VisitedUsers.insert(PN);
1615 if (isTriviallyReplaceablePHI(*PN, I))
1616 continue;
1617
1618 if (!canSplitPredecessors(PN, SafetyInfo))
1619 return Changed;
1620
1621 // Split predecessors of the PHI so that we can make users trivially
1622 // replaceable.
1623 splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo, &MSSAU);
1624
1625 // Should rebuild the iterators, as they may be invalidated by
1626 // splitPredecessorsOfLoopExit().
1627 UI = I.user_begin();
1628 UE = I.user_end();
1629 }
1630
1631 if (VisitedUsers.empty())
1632 return Changed;
1633
1634 ORE->emit([&]() {
1635 return OptimizationRemark(DEBUG_TYPE, "InstSunk", &I)
1636 << "sinking " << ore::NV("Inst", &I);
1637 });
1638 if (isa<LoadInst>(I))
1639 ++NumMovedLoads;
1640 else if (isa<CallInst>(I))
1641 ++NumMovedCalls;
1642 ++NumSunk;
1643
1644#ifndef NDEBUG
1646 CurLoop->getUniqueExitBlocks(ExitBlocks);
1647 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(llvm::from_range, ExitBlocks);
1648#endif
1649
1650 // Clones of this instruction. Don't create more than one per exit block!
1652
1653 // If this instruction is only used outside of the loop, then all users are
1654 // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of
1655 // the instruction.
1656 // First check if I is worth sinking for all uses. Sink only when it is worth
1657 // across all uses.
1658 SmallSetVector<User*, 8> Users(I.user_begin(), I.user_end());
1659 for (auto *UI : Users) {
1660 auto *User = cast<Instruction>(UI);
1661
1662 if (CurLoop->contains(User))
1663 continue;
1664
1665 PHINode *PN = cast<PHINode>(User);
1666 assert(ExitBlockSet.count(PN->getParent()) &&
1667 "The LCSSA PHI is not in an exit block!");
1668
1669 // The PHI must be trivially replaceable.
1671 PN, &I, LI, SunkCopies, SafetyInfo, CurLoop, MSSAU);
1672 // As we sink the instruction out of the BB, drop its debug location.
1673 New->dropLocation();
1674 PN->replaceAllUsesWith(New);
1675 eraseInstruction(*PN, *SafetyInfo, MSSAU);
1676 Changed = true;
1677 }
1678 return Changed;
1679}
1680
1681/// When an instruction is found to only use loop invariant operands that
1682/// is safe to hoist, this instruction is called to do the dirty work.
1683///
1684static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
1685 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
1688 LLVM_DEBUG(dbgs() << "LICM hoisting to " << Dest->getNameOrAsOperand() << ": "
1689 << I << "\n");
1690 ORE->emit([&]() {
1691 return OptimizationRemark(DEBUG_TYPE, "Hoisted", &I) << "hoisting "
1692 << ore::NV("Inst", &I);
1693 });
1694
1695 // Metadata can be dependent on conditions we are hoisting above.
1696 // Conservatively strip all metadata on the instruction unless we were
1697 // guaranteed to execute I if we entered the loop, in which case the metadata
1698 // is valid in the loop preheader.
1699 // Similarly, If I is a call and it is not guaranteed to execute in the loop,
1700 // then moving to the preheader means we should strip attributes on the call
1701 // that can cause UB since we may be hoisting above conditions that allowed
1702 // inferring those attributes. They may not be valid at the preheader.
1703 if ((I.hasMetadataOtherThanDebugLoc() || isa<CallInst>(I)) &&
1704 // The check on hasMetadataOtherThanDebugLoc is to prevent us from burning
1705 // time in isGuaranteedToExecute if we don't actually have anything to
1706 // drop. It is a compile time optimization, not required for correctness.
1707 !SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop)) {
1709 I.dropUBImplyingAttrsAndMetadata();
1710 else
1711 I.dropUBImplyingAttrsAndMetadata({LLVMContext::MD_prof});
1712 }
1713
1714 if (isa<PHINode>(I))
1715 // Move the new node to the end of the phi list in the destination block.
1716 moveInstructionBefore(I, Dest->getFirstNonPHIIt(), *SafetyInfo, MSSAU, SE);
1717 else
1718 // Move the new node to the destination block, before its terminator.
1719 moveInstructionBefore(I, Dest->getTerminator()->getIterator(), *SafetyInfo,
1720 MSSAU, SE);
1721
1722 I.updateLocationAfterHoist();
1723
1724 if (isa<LoadInst>(I))
1725 ++NumMovedLoads;
1726 else if (isa<CallInst>(I))
1727 ++NumMovedCalls;
1728 ++NumHoisted;
1729}
1730
1731/// Only sink or hoist an instruction if it is not a trapping instruction,
1732/// or if the instruction is known not to trap when moved to the preheader.
1733/// or if it is a trapping instruction and is guaranteed to execute.
1735 Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI,
1736 const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo,
1737 OptimizationRemarkEmitter *ORE, const Instruction *CtxI,
1738 AssumptionCache *AC, bool AllowSpeculation) {
1739 if (AllowSpeculation &&
1740 isSafeToSpeculativelyExecute(&Inst, CtxI, AC, DT, TLI))
1741 return true;
1742
1743 bool GuaranteedToExecute =
1744 SafetyInfo->isGuaranteedToExecute(Inst, DT, CurLoop);
1745
1746 if (!GuaranteedToExecute) {
1747 auto *LI = dyn_cast<LoadInst>(&Inst);
1748 if (LI && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1749 ORE->emit([&]() {
1751 DEBUG_TYPE, "LoadWithLoopInvariantAddressCondExecuted", LI)
1752 << "failed to hoist load with loop-invariant address "
1753 "because load is conditionally executed";
1754 });
1755 }
1756
1757 return GuaranteedToExecute;
1758}
1759
1760namespace {
1761class LoopPromoter : public LoadAndStorePromoter {
1762 Value *SomePtr; // Designated pointer to store to.
1763 SmallVectorImpl<BasicBlock *> &LoopExitBlocks;
1765 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts;
1766 PredIteratorCache &PredCache;
1767 MemorySSAUpdater &MSSAU;
1768 LoopInfo &LI;
1769 DebugLoc DL;
1770 Align Alignment;
1771 bool UnorderedAtomic;
1772 AAMDNodes AATags;
1773 ICFLoopSafetyInfo &SafetyInfo;
1774 bool CanInsertStoresInExitBlocks;
1776
1777 // We're about to add a use of V in a loop exit block. Insert an LCSSA phi
1778 // (if legal) if doing so would add an out-of-loop use to an instruction
1779 // defined in-loop.
1780 Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const {
1781 if (!LI.wouldBeOutOfLoopUseRequiringLCSSA(V, BB))
1782 return V;
1783
1784 Instruction *I = cast<Instruction>(V);
1785 // We need to create an LCSSA PHI node for the incoming value and
1786 // store that.
1787 PHINode *PN = PHINode::Create(I->getType(), PredCache.size(BB),
1788 I->getName() + ".lcssa");
1789 PN->insertBefore(BB->begin());
1790 for (BasicBlock *Pred : PredCache.get(BB))
1791 PN->addIncoming(I, Pred);
1792 return PN;
1793 }
1794
1795public:
1796 LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S,
1800 MemorySSAUpdater &MSSAU, LoopInfo &li, DebugLoc dl,
1801 Align Alignment, bool UnorderedAtomic, const AAMDNodes &AATags,
1802 ICFLoopSafetyInfo &SafetyInfo, bool CanInsertStoresInExitBlocks)
1803 : LoadAndStorePromoter(Insts, S), SomePtr(SP), LoopExitBlocks(LEB),
1804 LoopInsertPts(LIP), MSSAInsertPts(MSSAIP), PredCache(PIC), MSSAU(MSSAU),
1805 LI(li), DL(std::move(dl)), Alignment(Alignment),
1806 UnorderedAtomic(UnorderedAtomic), AATags(AATags),
1807 SafetyInfo(SafetyInfo),
1808 CanInsertStoresInExitBlocks(CanInsertStoresInExitBlocks), Uses(Insts) {}
1809
1810 void insertStoresInLoopExitBlocks() {
1811 // Insert stores after in the loop exit blocks. Each exit block gets a
1812 // store of the live-out values that feed them. Since we've already told
1813 // the SSA updater about the defs in the loop and the preheader
1814 // definition, it is all set and we can start using it.
1815 DIAssignID *NewID = nullptr;
1816 for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) {
1817 BasicBlock *ExitBlock = LoopExitBlocks[i];
1818 Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
1819 LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock);
1820 Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock);
1821 BasicBlock::iterator InsertPos = LoopInsertPts[i];
1822 StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos);
1823 if (UnorderedAtomic)
1824 NewSI->setOrdering(AtomicOrdering::Unordered);
1825 NewSI->setAlignment(Alignment);
1826 NewSI->setDebugLoc(DL);
1827 // Attach DIAssignID metadata to the new store, generating it on the
1828 // first loop iteration.
1829 if (i == 0) {
1830 // NewSI will have its DIAssignID set here if there are any stores in
1831 // Uses with a DIAssignID attachment. This merged ID will then be
1832 // attached to the other inserted stores (in the branch below).
1833 NewSI->mergeDIAssignID(Uses);
1834 NewID = cast_or_null<DIAssignID>(
1835 NewSI->getMetadata(LLVMContext::MD_DIAssignID));
1836 } else {
1837 // Attach the DIAssignID (or nullptr) merged from Uses in the branch
1838 // above.
1839 NewSI->setMetadata(LLVMContext::MD_DIAssignID, NewID);
1840 }
1841
1842 if (AATags)
1843 NewSI->setAAMetadata(AATags);
1844
1845 MemoryAccess *MSSAInsertPoint = MSSAInsertPts[i];
1846 MemoryAccess *NewMemAcc;
1847 if (!MSSAInsertPoint) {
1848 NewMemAcc = MSSAU.createMemoryAccessInBB(
1849 NewSI, nullptr, NewSI->getParent(), MemorySSA::Beginning);
1850 } else {
1851 NewMemAcc =
1852 MSSAU.createMemoryAccessAfter(NewSI, nullptr, MSSAInsertPoint);
1853 }
1854 MSSAInsertPts[i] = NewMemAcc;
1855 MSSAU.insertDef(cast<MemoryDef>(NewMemAcc), true);
1856 // FIXME: true for safety, false may still be correct.
1857 }
1858 }
1859
1860 void doExtraRewritesBeforeFinalDeletion() override {
1861 if (CanInsertStoresInExitBlocks)
1862 insertStoresInLoopExitBlocks();
1863 }
1864
1865 void instructionDeleted(Instruction *I) const override {
1866 SafetyInfo.removeInstruction(I);
1867 MSSAU.removeMemoryAccess(I);
1868 }
1869
1870 bool shouldDelete(Instruction *I) const override {
1871 if (isa<StoreInst>(I))
1872 return CanInsertStoresInExitBlocks;
1873 return true;
1874 }
1875};
1876
1877bool isNotCapturedBeforeOrInLoop(const Value *V, const Loop *L,
1878 DominatorTree *DT) {
1879 // We can perform the captured-before check against any instruction in the
1880 // loop header, as the loop header is reachable from any instruction inside
1881 // the loop.
1882 // TODO: ReturnCaptures=true shouldn't be necessary here.
1884 V, /*ReturnCaptures=*/true, L->getHeader()->getTerminator(), DT,
1885 /*IncludeI=*/false, CaptureComponents::Provenance));
1886}
1887
1888/// Return true if we can prove that a caller cannot inspect the object if an
1889/// unwind occurs inside the loop.
1890bool isNotVisibleOnUnwindInLoop(const Value *Object, const Loop *L,
1891 DominatorTree *DT) {
1892 bool RequiresNoCaptureBeforeUnwind;
1893 if (!isNotVisibleOnUnwind(Object, RequiresNoCaptureBeforeUnwind))
1894 return false;
1895
1896 return !RequiresNoCaptureBeforeUnwind ||
1897 isNotCapturedBeforeOrInLoop(Object, L, DT);
1898}
1899
1900bool isThreadLocalObject(const Value *Object, const Loop *L, DominatorTree *DT,
1902 // The object must be function-local to start with, and then not captured
1903 // before/in the loop.
1904 return (isIdentifiedFunctionLocal(Object) &&
1905 isNotCapturedBeforeOrInLoop(Object, L, DT)) ||
1907}
1908
1909} // namespace
1910
1911/// Try to promote memory values to scalars by sinking stores out of the
1912/// loop and moving loads to before the loop. We do this by looping over
1913/// the stores in the loop, looking for stores to Must pointers which are
1914/// loop invariant.
1915///
1917 const SmallSetVector<Value *, 8> &PointerMustAliases,
1922 const TargetLibraryInfo *TLI, TargetTransformInfo *TTI, Loop *CurLoop,
1923 MemorySSAUpdater &MSSAU, ICFLoopSafetyInfo *SafetyInfo,
1924 OptimizationRemarkEmitter *ORE, bool AllowSpeculation,
1925 bool HasReadsOutsideSet) {
1926 // Verify inputs.
1927 assert(LI != nullptr && DT != nullptr && CurLoop != nullptr &&
1928 SafetyInfo != nullptr &&
1929 "Unexpected Input to promoteLoopAccessesToScalars");
1930
1931 LLVM_DEBUG({
1932 dbgs() << "Trying to promote set of must-aliased pointers:\n";
1933 for (Value *Ptr : PointerMustAliases)
1934 dbgs() << " " << *Ptr << "\n";
1935 });
1936 ++NumPromotionCandidates;
1937
1938 Value *SomePtr = *PointerMustAliases.begin();
1939 BasicBlock *Preheader = CurLoop->getLoopPreheader();
1940
1941 // It is not safe to promote a load/store from the loop if the load/store is
1942 // conditional. For example, turning:
1943 //
1944 // for () { if (c) *P += 1; }
1945 //
1946 // into:
1947 //
1948 // tmp = *P; for () { if (c) tmp +=1; } *P = tmp;
1949 //
1950 // is not safe, because *P may only be valid to access if 'c' is true.
1951 //
1952 // The safety property divides into two parts:
1953 // p1) The memory may not be dereferenceable on entry to the loop. In this
1954 // case, we can't insert the required load in the preheader.
1955 // p2) The memory model does not allow us to insert a store along any dynamic
1956 // path which did not originally have one.
1957 //
1958 // If at least one store is guaranteed to execute, both properties are
1959 // satisfied, and promotion is legal.
1960 //
1961 // This, however, is not a necessary condition. Even if no store/load is
1962 // guaranteed to execute, we can still establish these properties.
1963 // We can establish (p1) by proving that hoisting the load into the preheader
1964 // is safe (i.e. proving dereferenceability on all paths through the loop). We
1965 // can use any access within the alias set to prove dereferenceability,
1966 // since they're all must alias.
1967 //
1968 // There are two ways establish (p2):
1969 // a) Prove the location is thread-local. In this case the memory model
1970 // requirement does not apply, and stores are safe to insert.
1971 // b) Prove a store dominates every exit block. In this case, if an exit
1972 // blocks is reached, the original dynamic path would have taken us through
1973 // the store, so inserting a store into the exit block is safe. Note that this
1974 // is different from the store being guaranteed to execute. For instance,
1975 // if an exception is thrown on the first iteration of the loop, the original
1976 // store is never executed, but the exit blocks are not executed either.
1977
1978 bool DereferenceableInPH = false;
1979 bool StoreIsGuanteedToExecute = false;
1980 bool LoadIsGuaranteedToExecute = false;
1981 bool FoundLoadToPromote = false;
1982
1983 // Goes from Unknown to either Safe or Unsafe, but can't switch between them.
1984 enum {
1985 StoreSafe,
1986 StoreUnsafe,
1987 StoreSafetyUnknown,
1988 } StoreSafety = StoreSafetyUnknown;
1989
1991
1992 // We start with an alignment of one and try to find instructions that allow
1993 // us to prove better alignment.
1994 Align Alignment;
1995 // Keep track of which types of access we see
1996 bool SawUnorderedAtomic = false;
1997 bool SawNotAtomic = false;
1998 AAMDNodes AATags;
1999
2000 const DataLayout &MDL = Preheader->getDataLayout();
2001
2002 // If there are reads outside the promoted set, then promoting stores is
2003 // definitely not safe.
2004 if (HasReadsOutsideSet)
2005 StoreSafety = StoreUnsafe;
2006
2007 if (StoreSafety == StoreSafetyUnknown && SafetyInfo->anyBlockMayThrow()) {
2008 // If a loop can throw, we have to insert a store along each unwind edge.
2009 // That said, we can't actually make the unwind edge explicit. Therefore,
2010 // we have to prove that the store is dead along the unwind edge. We do
2011 // this by proving that the caller can't have a reference to the object
2012 // after return and thus can't possibly load from the object.
2013 Value *Object = getUnderlyingObject(SomePtr);
2014 if (!isNotVisibleOnUnwindInLoop(Object, CurLoop, DT))
2015 StoreSafety = StoreUnsafe;
2016 }
2017
2018 // Check that all accesses to pointers in the alias set use the same type.
2019 // We cannot (yet) promote a memory location that is loaded and stored in
2020 // different sizes. While we are at it, collect alignment and AA info.
2021 Type *AccessTy = nullptr;
2022 for (Value *ASIV : PointerMustAliases) {
2023 for (Use &U : ASIV->uses()) {
2024 // Ignore instructions that are outside the loop.
2025 Instruction *UI = dyn_cast<Instruction>(U.getUser());
2026 if (!UI || !CurLoop->contains(UI))
2027 continue;
2028
2029 // If there is an non-load/store instruction in the loop, we can't promote
2030 // it.
2031 if (LoadInst *Load = dyn_cast<LoadInst>(UI)) {
2032 if (!Load->isUnordered())
2033 return false;
2034
2035 SawUnorderedAtomic |= Load->isAtomic();
2036 SawNotAtomic |= !Load->isAtomic();
2037 FoundLoadToPromote = true;
2038
2039 Align InstAlignment = Load->getAlign();
2040
2041 if (!LoadIsGuaranteedToExecute)
2042 LoadIsGuaranteedToExecute =
2043 SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop);
2044
2045 // Note that proving a load safe to speculate requires proving
2046 // sufficient alignment at the target location. Proving it guaranteed
2047 // to execute does as well. Thus we can increase our guaranteed
2048 // alignment as well.
2049 if (!DereferenceableInPH || (InstAlignment > Alignment))
2051 *Load, DT, TLI, CurLoop, SafetyInfo, ORE,
2052 Preheader->getTerminator(), AC, AllowSpeculation)) {
2053 DereferenceableInPH = true;
2054 Alignment = std::max(Alignment, InstAlignment);
2055 }
2056 } else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) {
2057 // Stores *of* the pointer are not interesting, only stores *to* the
2058 // pointer.
2059 if (U.getOperandNo() != StoreInst::getPointerOperandIndex())
2060 continue;
2061 if (!Store->isUnordered())
2062 return false;
2063
2064 SawUnorderedAtomic |= Store->isAtomic();
2065 SawNotAtomic |= !Store->isAtomic();
2066
2067 // If the store is guaranteed to execute, both properties are satisfied.
2068 // We may want to check if a store is guaranteed to execute even if we
2069 // already know that promotion is safe, since it may have higher
2070 // alignment than any other guaranteed stores, in which case we can
2071 // raise the alignment on the promoted store.
2072 Align InstAlignment = Store->getAlign();
2073 bool GuaranteedToExecute =
2074 SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop);
2075 StoreIsGuanteedToExecute |= GuaranteedToExecute;
2076 if (GuaranteedToExecute) {
2077 DereferenceableInPH = true;
2078 if (StoreSafety == StoreSafetyUnknown)
2079 StoreSafety = StoreSafe;
2080 Alignment = std::max(Alignment, InstAlignment);
2081 }
2082
2083 // If a store dominates all exit blocks, it is safe to sink.
2084 // As explained above, if an exit block was executed, a dominating
2085 // store must have been executed at least once, so we are not
2086 // introducing stores on paths that did not have them.
2087 // Note that this only looks at explicit exit blocks. If we ever
2088 // start sinking stores into unwind edges (see above), this will break.
2089 if (StoreSafety == StoreSafetyUnknown &&
2090 llvm::all_of(ExitBlocks, [&](BasicBlock *Exit) {
2091 return DT->dominates(Store->getParent(), Exit);
2092 }))
2093 StoreSafety = StoreSafe;
2094
2095 // If the store is not guaranteed to execute, we may still get
2096 // deref info through it.
2097 if (!DereferenceableInPH) {
2098 DereferenceableInPH = isDereferenceableAndAlignedPointer(
2099 Store->getPointerOperand(), Store->getValueOperand()->getType(),
2100 Store->getAlign(), MDL, Preheader->getTerminator(), AC, DT, TLI);
2101 }
2102 } else
2103 continue; // Not a load or store.
2104
2105 if (!AccessTy)
2106 AccessTy = getLoadStoreType(UI);
2107 else if (AccessTy != getLoadStoreType(UI))
2108 return false;
2109
2110 // Merge the AA tags.
2111 if (LoopUses.empty()) {
2112 // On the first load/store, just take its AA tags.
2113 AATags = UI->getAAMetadata();
2114 } else if (AATags) {
2115 AATags = AATags.merge(UI->getAAMetadata());
2116 }
2117
2118 LoopUses.push_back(UI);
2119 }
2120 }
2121
2122 // If we found both an unordered atomic instruction and a non-atomic memory
2123 // access, bail. We can't blindly promote non-atomic to atomic since we
2124 // might not be able to lower the result. We can't downgrade since that
2125 // would violate memory model. Also, align 0 is an error for atomics.
2126 if (SawUnorderedAtomic && SawNotAtomic)
2127 return false;
2128
2129 // If we're inserting an atomic load in the preheader, we must be able to
2130 // lower it. We're only guaranteed to be able to lower naturally aligned
2131 // atomics.
2132 if (SawUnorderedAtomic && Alignment < MDL.getTypeStoreSize(AccessTy))
2133 return false;
2134
2135 // If we couldn't prove we can hoist the load, bail.
2136 if (!DereferenceableInPH) {
2137 LLVM_DEBUG(dbgs() << "Not promoting: Not dereferenceable in preheader\n");
2138 return false;
2139 }
2140
2141 // We know we can hoist the load, but don't have a guaranteed store.
2142 // Check whether the location is writable and thread-local. If it is, then we
2143 // can insert stores along paths which originally didn't have them without
2144 // violating the memory model.
2145 if (StoreSafety == StoreSafetyUnknown) {
2146 Value *Object = getUnderlyingObject(SomePtr);
2147 bool ExplicitlyDereferenceableOnly;
2148 if (isWritableObject(Object, ExplicitlyDereferenceableOnly) &&
2149 (!ExplicitlyDereferenceableOnly ||
2150 isDereferenceablePointer(SomePtr, AccessTy, MDL)) &&
2151 isThreadLocalObject(Object, CurLoop, DT, TTI))
2152 StoreSafety = StoreSafe;
2153 }
2154
2155 // If we've still failed to prove we can sink the store, hoist the load
2156 // only, if possible.
2157 if (StoreSafety != StoreSafe && !FoundLoadToPromote)
2158 // If we cannot hoist the load either, give up.
2159 return false;
2160
2161 // Lets do the promotion!
2162 if (StoreSafety == StoreSafe) {
2163 LLVM_DEBUG(dbgs() << "LICM: Promoting load/store of the value: " << *SomePtr
2164 << '\n');
2165 ++NumLoadStorePromoted;
2166 } else {
2167 LLVM_DEBUG(dbgs() << "LICM: Promoting load of the value: " << *SomePtr
2168 << '\n');
2169 ++NumLoadPromoted;
2170 }
2171
2172 ORE->emit([&]() {
2173 return OptimizationRemark(DEBUG_TYPE, "PromoteLoopAccessesToScalar",
2174 LoopUses[0])
2175 << "Moving accesses to memory location out of the loop";
2176 });
2177
2178 // Look at all the loop uses, and try to merge their locations.
2179 std::vector<DebugLoc> LoopUsesLocs;
2180 for (auto U : LoopUses)
2181 LoopUsesLocs.push_back(U->getDebugLoc());
2182 auto DL = DebugLoc::getMergedLocations(LoopUsesLocs);
2183
2184 // We use the SSAUpdater interface to insert phi nodes as required.
2186 SSAUpdater SSA(&NewPHIs);
2187 LoopPromoter Promoter(SomePtr, LoopUses, SSA, ExitBlocks, InsertPts,
2188 MSSAInsertPts, PIC, MSSAU, *LI, DL, Alignment,
2189 SawUnorderedAtomic,
2190 StoreIsGuanteedToExecute ? AATags : AAMDNodes(),
2191 *SafetyInfo, StoreSafety == StoreSafe);
2192
2193 // Set up the preheader to have a definition of the value. It is the live-out
2194 // value from the preheader that uses in the loop will use.
2195 LoadInst *PreheaderLoad = nullptr;
2196 if (FoundLoadToPromote || !StoreIsGuanteedToExecute) {
2197 PreheaderLoad =
2198 new LoadInst(AccessTy, SomePtr, SomePtr->getName() + ".promoted",
2199 Preheader->getTerminator()->getIterator());
2200 if (SawUnorderedAtomic)
2201 PreheaderLoad->setOrdering(AtomicOrdering::Unordered);
2202 PreheaderLoad->setAlignment(Alignment);
2203 PreheaderLoad->setDebugLoc(DebugLoc::getDropped());
2204 if (AATags && LoadIsGuaranteedToExecute)
2205 PreheaderLoad->setAAMetadata(AATags);
2206
2207 MemoryAccess *PreheaderLoadMemoryAccess = MSSAU.createMemoryAccessInBB(
2208 PreheaderLoad, nullptr, PreheaderLoad->getParent(), MemorySSA::End);
2209 MemoryUse *NewMemUse = cast<MemoryUse>(PreheaderLoadMemoryAccess);
2210 MSSAU.insertUse(NewMemUse, /*RenameUses=*/true);
2211 SSA.AddAvailableValue(Preheader, PreheaderLoad);
2212 } else {
2213 SSA.AddAvailableValue(Preheader, PoisonValue::get(AccessTy));
2214 }
2215
2216 if (VerifyMemorySSA)
2217 MSSAU.getMemorySSA()->verifyMemorySSA();
2218 // Rewrite all the loads in the loop and remember all the definitions from
2219 // stores in the loop.
2220 Promoter.run(LoopUses);
2221
2222 if (VerifyMemorySSA)
2223 MSSAU.getMemorySSA()->verifyMemorySSA();
2224 // If the SSAUpdater didn't use the load in the preheader, just zap it now.
2225 if (PreheaderLoad && PreheaderLoad->use_empty())
2226 eraseInstruction(*PreheaderLoad, *SafetyInfo, MSSAU);
2227
2228 return true;
2229}
2230
2231static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
2232 function_ref<void(Instruction *)> Fn) {
2233 for (const BasicBlock *BB : L->blocks())
2234 if (const auto *Accesses = MSSA->getBlockAccesses(BB))
2235 for (const auto &Access : *Accesses)
2236 if (const auto *MUD = dyn_cast<MemoryUseOrDef>(&Access))
2237 Fn(MUD->getMemoryInst());
2238}
2239
2240// The bool indicates whether there might be reads outside the set, in which
2241// case only loads may be promoted.
2244 BatchAAResults BatchAA(*AA);
2245 AliasSetTracker AST(BatchAA);
2246
2247 auto IsPotentiallyPromotable = [L](const Instruction *I) {
2248 if (const auto *SI = dyn_cast<StoreInst>(I)) {
2249 const Value *PtrOp = SI->getPointerOperand();
2250 return !isa<ConstantData>(PtrOp) && L->isLoopInvariant(PtrOp);
2251 }
2252 if (const auto *LI = dyn_cast<LoadInst>(I)) {
2253 const Value *PtrOp = LI->getPointerOperand();
2254 return !isa<ConstantData>(PtrOp) && L->isLoopInvariant(PtrOp);
2255 }
2256 return false;
2257 };
2258
2259 // Populate AST with potentially promotable accesses.
2260 SmallPtrSet<Value *, 16> AttemptingPromotion;
2261 foreachMemoryAccess(MSSA, L, [&](Instruction *I) {
2262 if (IsPotentiallyPromotable(I)) {
2263 AttemptingPromotion.insert(I);
2264 AST.add(I);
2265 }
2266 });
2267
2268 // We're only interested in must-alias sets that contain a mod.
2270 for (AliasSet &AS : AST)
2271 if (!AS.isForwardingAliasSet() && AS.isMod() && AS.isMustAlias())
2272 Sets.push_back({&AS, false});
2273
2274 if (Sets.empty())
2275 return {}; // Nothing to promote...
2276
2277 // Discard any sets for which there is an aliasing non-promotable access.
2278 foreachMemoryAccess(MSSA, L, [&](Instruction *I) {
2279 if (AttemptingPromotion.contains(I))
2280 return;
2281
2283 ModRefInfo MR = Pair.getPointer()->aliasesUnknownInst(I, BatchAA);
2284 // Cannot promote if there are writes outside the set.
2285 if (isModSet(MR))
2286 return true;
2287 if (isRefSet(MR)) {
2288 // Remember reads outside the set.
2289 Pair.setInt(true);
2290 // If this is a mod-only set and there are reads outside the set,
2291 // we will not be able to promote, so bail out early.
2292 return !Pair.getPointer()->isRef();
2293 }
2294 return false;
2295 });
2296 });
2297
2299 for (auto [Set, HasReadsOutsideSet] : Sets) {
2300 SmallSetVector<Value *, 8> PointerMustAliases;
2301 for (const auto &MemLoc : *Set)
2302 PointerMustAliases.insert(const_cast<Value *>(MemLoc.Ptr));
2303 Result.emplace_back(std::move(PointerMustAliases), HasReadsOutsideSet);
2304 }
2305
2306 return Result;
2307}
2308
2309// For a given store instruction or writeonly call instruction, this function
2310// checks that there are no read or writes that conflict with the memory
2311// access in the instruction
2313 AAResults *AA, Loop *CurLoop,
2314 SinkAndHoistLICMFlags &Flags) {
2315 assert(isa<CallInst>(*I) || isa<StoreInst>(*I));
2316 // If there are more accesses than the Promotion cap, then give up as we're
2317 // not walking a list that long.
2318 if (Flags.tooManyMemoryAccesses())
2319 return false;
2320
2321 auto *IMD = MSSA->getMemoryAccess(I);
2322 BatchAAResults BAA(*AA);
2323 auto *Source = getClobberingMemoryAccess(*MSSA, BAA, Flags, IMD);
2324 // Make sure there are no clobbers inside the loop.
2325 if (!MSSA->isLiveOnEntryDef(Source) && CurLoop->contains(Source->getBlock()))
2326 return false;
2327
2328 // If there are interfering Uses (i.e. their defining access is in the
2329 // loop), or ordered loads (stored as Defs!), don't move this store.
2330 // Could do better here, but this is conservatively correct.
2331 // TODO: Cache set of Uses on the first walk in runOnLoop, update when
2332 // moving accesses. Can also extend to dominating uses.
2333 for (auto *BB : CurLoop->getBlocks()) {
2334 auto *Accesses = MSSA->getBlockAccesses(BB);
2335 if (!Accesses)
2336 continue;
2337 for (const auto &MA : *Accesses)
2338 if (const auto *MU = dyn_cast<MemoryUse>(&MA)) {
2339 auto *MD = getClobberingMemoryAccess(*MSSA, BAA, Flags,
2340 const_cast<MemoryUse *>(MU));
2341 if (!MSSA->isLiveOnEntryDef(MD) && CurLoop->contains(MD->getBlock()))
2342 return false;
2343 // Disable hoisting past potentially interfering loads. Optimized
2344 // Uses may point to an access outside the loop, as getClobbering
2345 // checks the previous iteration when walking the backedge.
2346 // FIXME: More precise: no Uses that alias I.
2347 if (!Flags.getIsSink() && !MSSA->dominates(IMD, MU))
2348 return false;
2349 } else if (const auto *MD = dyn_cast<MemoryDef>(&MA)) {
2350 if (auto *LI = dyn_cast<LoadInst>(MD->getMemoryInst())) {
2351 (void)LI; // Silence warning.
2352 assert(!LI->isUnordered() && "Expected unordered load");
2353 return false;
2354 }
2355 // Any call, while it may not be clobbering I, it may be a use.
2356 if (auto *CI = dyn_cast<CallInst>(MD->getMemoryInst())) {
2357 // Check if the call may read from the memory location written
2358 // to by I. Check CI's attributes and arguments; the number of
2359 // such checks performed is limited above by NoOfMemAccTooLarge.
2360 if (auto *SI = dyn_cast<StoreInst>(I)) {
2362 if (isModOrRefSet(MRI))
2363 return false;
2364 } else {
2365 auto *SCI = cast<CallInst>(I);
2366 // If the instruction we are wanting to hoist is also a call
2367 // instruction then we need not check mod/ref info with itself
2368 if (SCI == CI)
2369 continue;
2370 ModRefInfo MRI = BAA.getModRefInfo(CI, SCI);
2371 if (isModOrRefSet(MRI))
2372 return false;
2373 }
2374 }
2375 }
2376 }
2377 return true;
2378}
2379
2381 Loop *CurLoop, Instruction &I,
2382 SinkAndHoistLICMFlags &Flags,
2383 bool InvariantGroup) {
2384 // For hoisting, use the walker to determine safety
2385 if (!Flags.getIsSink()) {
2386 // If hoisting an invariant group, we only need to check that there
2387 // is no store to the loaded pointer between the start of the loop,
2388 // and the load (since all values must be the same).
2389
2390 // This can be checked in two conditions:
2391 // 1) if the memoryaccess is outside the loop
2392 // 2) the earliest access is at the loop header,
2393 // if the memory loaded is the phi node
2394
2395 BatchAAResults BAA(MSSA->getAA());
2396 MemoryAccess *Source = getClobberingMemoryAccess(*MSSA, BAA, Flags, MU);
2397 return !MSSA->isLiveOnEntryDef(Source) &&
2398 CurLoop->contains(Source->getBlock()) &&
2399 !(InvariantGroup && Source->getBlock() == CurLoop->getHeader() && isa<MemoryPhi>(Source));
2400 }
2401
2402 // For sinking, we'd need to check all Defs below this use. The getClobbering
2403 // call will look on the backedge of the loop, but will check aliasing with
2404 // the instructions on the previous iteration.
2405 // For example:
2406 // for (i ... )
2407 // load a[i] ( Use (LoE)
2408 // store a[i] ( 1 = Def (2), with 2 = Phi for the loop.
2409 // i++;
2410 // The load sees no clobbering inside the loop, as the backedge alias check
2411 // does phi translation, and will check aliasing against store a[i-1].
2412 // However sinking the load outside the loop, below the store is incorrect.
2413
2414 // For now, only sink if there are no Defs in the loop, and the existing ones
2415 // precede the use and are in the same block.
2416 // FIXME: Increase precision: Safe to sink if Use post dominates the Def;
2417 // needs PostDominatorTreeAnalysis.
2418 // FIXME: More precise: no Defs that alias this Use.
2419 if (Flags.tooManyMemoryAccesses())
2420 return true;
2421 for (auto *BB : CurLoop->getBlocks())
2422 if (pointerInvalidatedByBlock(*BB, *MSSA, *MU))
2423 return true;
2424 // When sinking, the source block may not be part of the loop so check it.
2425 if (!CurLoop->contains(&I))
2426 return pointerInvalidatedByBlock(*I.getParent(), *MSSA, *MU);
2427
2428 return false;
2429}
2430
2432 if (const auto *Accesses = MSSA.getBlockDefs(&BB))
2433 for (const auto &MA : *Accesses)
2434 if (const auto *MD = dyn_cast<MemoryDef>(&MA))
2435 if (MU.getBlock() != MD->getBlock() || !MSSA.locallyDominates(MD, &MU))
2436 return true;
2437 return false;
2438}
2439
2440/// Try to simplify things like (A < INV_1 AND icmp A < INV_2) into (A <
2441/// min(INV_1, INV_2)), if INV_1 and INV_2 are both loop invariants and their
2442/// minimun can be computed outside of loop, and X is not a loop-invariant.
2443static bool hoistMinMax(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
2444 MemorySSAUpdater &MSSAU) {
2445 bool Inverse = false;
2446 using namespace PatternMatch;
2447 Value *Cond1, *Cond2;
2448 if (match(&I, m_LogicalOr(m_Value(Cond1), m_Value(Cond2)))) {
2449 Inverse = true;
2450 } else if (match(&I, m_LogicalAnd(m_Value(Cond1), m_Value(Cond2)))) {
2451 // Do nothing
2452 } else
2453 return false;
2454
2455 auto MatchICmpAgainstInvariant = [&](Value *C, CmpPredicate &P, Value *&LHS,
2456 Value *&RHS) {
2457 if (!match(C, m_OneUse(m_ICmp(P, m_Value(LHS), m_Value(RHS)))))
2458 return false;
2459 if (!LHS->getType()->isIntegerTy())
2460 return false;
2462 return false;
2463 if (L.isLoopInvariant(LHS)) {
2464 std::swap(LHS, RHS);
2466 }
2467 if (L.isLoopInvariant(LHS) || !L.isLoopInvariant(RHS))
2468 return false;
2469 if (Inverse)
2471 return true;
2472 };
2473 CmpPredicate P1, P2;
2474 Value *LHS1, *LHS2, *RHS1, *RHS2;
2475 if (!MatchICmpAgainstInvariant(Cond1, P1, LHS1, RHS1) ||
2476 !MatchICmpAgainstInvariant(Cond2, P2, LHS2, RHS2))
2477 return false;
2478 auto MatchingPred = CmpPredicate::getMatching(P1, P2);
2479 if (!MatchingPred || LHS1 != LHS2)
2480 return false;
2481
2482 // Everything is fine, we can do the transform.
2483 bool UseMin = ICmpInst::isLT(*MatchingPred) || ICmpInst::isLE(*MatchingPred);
2484 assert(
2485 (UseMin || ICmpInst::isGT(*MatchingPred) ||
2486 ICmpInst::isGE(*MatchingPred)) &&
2487 "Relational predicate is either less (or equal) or greater (or equal)!");
2488 Intrinsic::ID id = ICmpInst::isSigned(*MatchingPred)
2489 ? (UseMin ? Intrinsic::smin : Intrinsic::smax)
2490 : (UseMin ? Intrinsic::umin : Intrinsic::umax);
2491 auto *Preheader = L.getLoopPreheader();
2492 assert(Preheader && "Loop is not in simplify form?");
2493 IRBuilder<> Builder(Preheader->getTerminator());
2494 // We are about to create a new guaranteed use for RHS2 which might not exist
2495 // before (if it was a non-taken input of logical and/or instruction). If it
2496 // was poison, we need to freeze it. Note that no new use for LHS and RHS1 are
2497 // introduced, so they don't need this.
2498 if (isa<SelectInst>(I))
2499 RHS2 = Builder.CreateFreeze(RHS2, RHS2->getName() + ".fr");
2500 Value *NewRHS = Builder.CreateBinaryIntrinsic(
2501 id, RHS1, RHS2, nullptr,
2502 StringRef("invariant.") +
2503 (ICmpInst::isSigned(*MatchingPred) ? "s" : "u") +
2504 (UseMin ? "min" : "max"));
2505 Builder.SetInsertPoint(&I);
2506 ICmpInst::Predicate P = *MatchingPred;
2507 if (Inverse)
2509 Value *NewCond = Builder.CreateICmp(P, LHS1, NewRHS);
2510 NewCond->takeName(&I);
2511 I.replaceAllUsesWith(NewCond);
2512 eraseInstruction(I, SafetyInfo, MSSAU);
2513 Instruction &CondI1 = *cast<Instruction>(Cond1);
2514 Instruction &CondI2 = *cast<Instruction>(Cond2);
2515 salvageDebugInfo(CondI1);
2516 salvageDebugInfo(CondI2);
2517 eraseInstruction(CondI1, SafetyInfo, MSSAU);
2518 eraseInstruction(CondI2, SafetyInfo, MSSAU);
2519 return true;
2520}
2521
2522/// Reassociate gep (gep ptr, idx1), idx2 to gep (gep ptr, idx2), idx1 if
2523/// this allows hoisting the inner GEP.
2524static bool hoistGEP(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
2526 DominatorTree *DT) {
2527 auto *GEP = dyn_cast<GetElementPtrInst>(&I);
2528 if (!GEP)
2529 return false;
2530
2531 // Do not try to hoist a constant GEP out of the loop via reassociation.
2532 // Constant GEPs can often be folded into addressing modes, and reassociating
2533 // them may inhibit CSE of a common base.
2534 if (GEP->hasAllConstantIndices())
2535 return false;
2536
2537 auto *Src = dyn_cast<GetElementPtrInst>(GEP->getPointerOperand());
2538 if (!Src || !Src->hasOneUse() || !L.contains(Src))
2539 return false;
2540
2541 Value *SrcPtr = Src->getPointerOperand();
2542 auto LoopInvariant = [&](Value *V) { return L.isLoopInvariant(V); };
2543 if (!L.isLoopInvariant(SrcPtr) || !all_of(GEP->indices(), LoopInvariant))
2544 return false;
2545
2546 // This can only happen if !AllowSpeculation, otherwise this would already be
2547 // handled.
2548 // FIXME: Should we respect AllowSpeculation in these reassociation folds?
2549 // The flag exists to prevent metadata dropping, which is not relevant here.
2550 if (all_of(Src->indices(), LoopInvariant))
2551 return false;
2552
2553 // The swapped GEPs are inbounds if both original GEPs are inbounds
2554 // and the sign of the offsets is the same. For simplicity, only
2555 // handle both offsets being non-negative.
2556 const DataLayout &DL = GEP->getDataLayout();
2557 auto NonNegative = [&](Value *V) {
2558 return isKnownNonNegative(V, SimplifyQuery(DL, DT, AC, GEP));
2559 };
2560 bool IsInBounds = Src->isInBounds() && GEP->isInBounds() &&
2561 all_of(Src->indices(), NonNegative) &&
2562 all_of(GEP->indices(), NonNegative);
2563
2564 BasicBlock *Preheader = L.getLoopPreheader();
2565 IRBuilder<> Builder(Preheader->getTerminator());
2566 Value *NewSrc = Builder.CreateGEP(GEP->getSourceElementType(), SrcPtr,
2567 SmallVector<Value *>(GEP->indices()),
2568 "invariant.gep", IsInBounds);
2569 Builder.SetInsertPoint(GEP);
2570 Value *NewGEP = Builder.CreateGEP(Src->getSourceElementType(), NewSrc,
2571 SmallVector<Value *>(Src->indices()), "gep",
2572 IsInBounds);
2573 GEP->replaceAllUsesWith(NewGEP);
2574 eraseInstruction(*GEP, SafetyInfo, MSSAU);
2575 salvageDebugInfo(*Src);
2576 eraseInstruction(*Src, SafetyInfo, MSSAU);
2577 return true;
2578}
2579
2580/// Try to turn things like "LV + C1 < C2" into "LV < C2 - C1". Here
2581/// C1 and C2 are loop invariants and LV is a loop-variant.
2582static bool hoistAdd(ICmpInst::Predicate Pred, Value *VariantLHS,
2583 Value *InvariantRHS, ICmpInst &ICmp, Loop &L,
2584 ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU,
2585 AssumptionCache *AC, DominatorTree *DT) {
2586 assert(!L.isLoopInvariant(VariantLHS) && "Precondition.");
2587 assert(L.isLoopInvariant(InvariantRHS) && "Precondition.");
2588
2589 bool IsSigned = ICmpInst::isSigned(Pred);
2590
2591 // Try to represent VariantLHS as sum of invariant and variant operands.
2592 using namespace PatternMatch;
2593 Value *VariantOp, *InvariantOp;
2594 if (IsSigned &&
2595 !match(VariantLHS, m_NSWAdd(m_Value(VariantOp), m_Value(InvariantOp))))
2596 return false;
2597 if (!IsSigned &&
2598 !match(VariantLHS, m_NUWAdd(m_Value(VariantOp), m_Value(InvariantOp))))
2599 return false;
2600
2601 // LHS itself is a loop-variant, try to represent it in the form:
2602 // "VariantOp + InvariantOp". If it is possible, then we can reassociate.
2603 if (L.isLoopInvariant(VariantOp))
2604 std::swap(VariantOp, InvariantOp);
2605 if (L.isLoopInvariant(VariantOp) || !L.isLoopInvariant(InvariantOp))
2606 return false;
2607
2608 // In order to turn "LV + C1 < C2" into "LV < C2 - C1", we need to be able to
2609 // freely move values from left side of inequality to right side (just as in
2610 // normal linear arithmetics). Overflows make things much more complicated, so
2611 // we want to avoid this.
2612 auto &DL = L.getHeader()->getDataLayout();
2613 SimplifyQuery SQ(DL, DT, AC, &ICmp);
2614 if (IsSigned && computeOverflowForSignedSub(InvariantRHS, InvariantOp, SQ) !=
2616 return false;
2617 if (!IsSigned &&
2618 computeOverflowForUnsignedSub(InvariantRHS, InvariantOp, SQ) !=
2620 return false;
2621 auto *Preheader = L.getLoopPreheader();
2622 assert(Preheader && "Loop is not in simplify form?");
2623 IRBuilder<> Builder(Preheader->getTerminator());
2624 Value *NewCmpOp =
2625 Builder.CreateSub(InvariantRHS, InvariantOp, "invariant.op",
2626 /*HasNUW*/ !IsSigned, /*HasNSW*/ IsSigned);
2627 ICmp.setPredicate(Pred);
2628 ICmp.setOperand(0, VariantOp);
2629 ICmp.setOperand(1, NewCmpOp);
2630
2631 Instruction &DeadI = cast<Instruction>(*VariantLHS);
2632 salvageDebugInfo(DeadI);
2633 eraseInstruction(DeadI, SafetyInfo, MSSAU);
2634 return true;
2635}
2636
2637/// Try to reassociate and hoist the following two patterns:
2638/// LV - C1 < C2 --> LV < C1 + C2,
2639/// C1 - LV < C2 --> LV > C1 - C2.
2640static bool hoistSub(ICmpInst::Predicate Pred, Value *VariantLHS,
2641 Value *InvariantRHS, ICmpInst &ICmp, Loop &L,
2642 ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU,
2643 AssumptionCache *AC, DominatorTree *DT) {
2644 assert(!L.isLoopInvariant(VariantLHS) && "Precondition.");
2645 assert(L.isLoopInvariant(InvariantRHS) && "Precondition.");
2646
2647 bool IsSigned = ICmpInst::isSigned(Pred);
2648
2649 // Try to represent VariantLHS as sum of invariant and variant operands.
2650 using namespace PatternMatch;
2651 Value *VariantOp, *InvariantOp;
2652 if (IsSigned &&
2653 !match(VariantLHS, m_NSWSub(m_Value(VariantOp), m_Value(InvariantOp))))
2654 return false;
2655 if (!IsSigned &&
2656 !match(VariantLHS, m_NUWSub(m_Value(VariantOp), m_Value(InvariantOp))))
2657 return false;
2658
2659 bool VariantSubtracted = false;
2660 // LHS itself is a loop-variant, try to represent it in the form:
2661 // "VariantOp + InvariantOp". If it is possible, then we can reassociate. If
2662 // the variant operand goes with minus, we use a slightly different scheme.
2663 if (L.isLoopInvariant(VariantOp)) {
2664 std::swap(VariantOp, InvariantOp);
2665 VariantSubtracted = true;
2666 Pred = ICmpInst::getSwappedPredicate(Pred);
2667 }
2668 if (L.isLoopInvariant(VariantOp) || !L.isLoopInvariant(InvariantOp))
2669 return false;
2670
2671 // In order to turn "LV - C1 < C2" into "LV < C2 + C1", we need to be able to
2672 // freely move values from left side of inequality to right side (just as in
2673 // normal linear arithmetics). Overflows make things much more complicated, so
2674 // we want to avoid this. Likewise, for "C1 - LV < C2" we need to prove that
2675 // "C1 - C2" does not overflow.
2676 auto &DL = L.getHeader()->getDataLayout();
2677 SimplifyQuery SQ(DL, DT, AC, &ICmp);
2678 if (VariantSubtracted && IsSigned) {
2679 // C1 - LV < C2 --> LV > C1 - C2
2680 if (computeOverflowForSignedSub(InvariantOp, InvariantRHS, SQ) !=
2682 return false;
2683 } else if (VariantSubtracted && !IsSigned) {
2684 // C1 - LV < C2 --> LV > C1 - C2
2685 if (computeOverflowForUnsignedSub(InvariantOp, InvariantRHS, SQ) !=
2687 return false;
2688 } else if (!VariantSubtracted && IsSigned) {
2689 // LV - C1 < C2 --> LV < C1 + C2
2690 if (computeOverflowForSignedAdd(InvariantOp, InvariantRHS, SQ) !=
2692 return false;
2693 } else { // !VariantSubtracted && !IsSigned
2694 // LV - C1 < C2 --> LV < C1 + C2
2695 if (computeOverflowForUnsignedAdd(InvariantOp, InvariantRHS, SQ) !=
2697 return false;
2698 }
2699 auto *Preheader = L.getLoopPreheader();
2700 assert(Preheader && "Loop is not in simplify form?");
2701 IRBuilder<> Builder(Preheader->getTerminator());
2702 Value *NewCmpOp =
2703 VariantSubtracted
2704 ? Builder.CreateSub(InvariantOp, InvariantRHS, "invariant.op",
2705 /*HasNUW*/ !IsSigned, /*HasNSW*/ IsSigned)
2706 : Builder.CreateAdd(InvariantOp, InvariantRHS, "invariant.op",
2707 /*HasNUW*/ !IsSigned, /*HasNSW*/ IsSigned);
2708 ICmp.setPredicate(Pred);
2709 ICmp.setOperand(0, VariantOp);
2710 ICmp.setOperand(1, NewCmpOp);
2711
2712 Instruction &DeadI = cast<Instruction>(*VariantLHS);
2713 salvageDebugInfo(DeadI);
2714 eraseInstruction(DeadI, SafetyInfo, MSSAU);
2715 return true;
2716}
2717
2718/// Reassociate and hoist add/sub expressions.
2719static bool hoistAddSub(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
2721 DominatorTree *DT) {
2722 using namespace PatternMatch;
2723 CmpPredicate Pred;
2724 Value *LHS, *RHS;
2725 if (!match(&I, m_ICmp(Pred, m_Value(LHS), m_Value(RHS))))
2726 return false;
2727
2728 // Put variant operand to LHS position.
2729 if (L.isLoopInvariant(LHS)) {
2730 std::swap(LHS, RHS);
2731 Pred = ICmpInst::getSwappedPredicate(Pred);
2732 }
2733 // We want to delete the initial operation after reassociation, so only do it
2734 // if it has no other uses.
2735 if (L.isLoopInvariant(LHS) || !L.isLoopInvariant(RHS) || !LHS->hasOneUse())
2736 return false;
2737
2738 // TODO: We could go with smarter context, taking common dominator of all I's
2739 // users instead of I itself.
2740 if (hoistAdd(Pred, LHS, RHS, cast<ICmpInst>(I), L, SafetyInfo, MSSAU, AC, DT))
2741 return true;
2742
2743 if (hoistSub(Pred, LHS, RHS, cast<ICmpInst>(I), L, SafetyInfo, MSSAU, AC, DT))
2744 return true;
2745
2746 return false;
2747}
2748
2749static bool isReassociableOp(Instruction *I, unsigned IntOpcode,
2750 unsigned FPOpcode) {
2751 if (I->getOpcode() == IntOpcode)
2752 return true;
2753 if (I->getOpcode() == FPOpcode && I->hasAllowReassoc() &&
2754 I->hasNoSignedZeros())
2755 return true;
2756 return false;
2757}
2758
2759/// Try to reassociate expressions like ((A1 * B1) + (A2 * B2) + ...) * C where
2760/// A1, A2, ... and C are loop invariants into expressions like
2761/// ((A1 * C * B1) + (A2 * C * B2) + ...) and hoist the (A1 * C), (A2 * C), ...
2762/// invariant expressions. This functions returns true only if any hoisting has
2763/// actually occured.
2765 ICFLoopSafetyInfo &SafetyInfo,
2767 DominatorTree *DT) {
2768 if (!isReassociableOp(&I, Instruction::Mul, Instruction::FMul))
2769 return false;
2770 Value *VariantOp = I.getOperand(0);
2771 Value *InvariantOp = I.getOperand(1);
2772 if (L.isLoopInvariant(VariantOp))
2773 std::swap(VariantOp, InvariantOp);
2774 if (L.isLoopInvariant(VariantOp) || !L.isLoopInvariant(InvariantOp))
2775 return false;
2776 Value *Factor = InvariantOp;
2777
2778 // First, we need to make sure we should do the transformation.
2779 SmallVector<Use *> Changes;
2782 if (BinaryOperator *VariantBinOp = dyn_cast<BinaryOperator>(VariantOp))
2783 Worklist.push_back(VariantBinOp);
2784 while (!Worklist.empty()) {
2785 BinaryOperator *BO = Worklist.pop_back_val();
2786 if (!BO->hasOneUse())
2787 return false;
2788 if (isReassociableOp(BO, Instruction::Add, Instruction::FAdd) &&
2789 isa<BinaryOperator>(BO->getOperand(0)) &&
2790 isa<BinaryOperator>(BO->getOperand(1))) {
2791 Worklist.push_back(cast<BinaryOperator>(BO->getOperand(0)));
2792 Worklist.push_back(cast<BinaryOperator>(BO->getOperand(1)));
2793 Adds.push_back(BO);
2794 continue;
2795 }
2796 if (!isReassociableOp(BO, Instruction::Mul, Instruction::FMul) ||
2797 L.isLoopInvariant(BO))
2798 return false;
2799 Use &U0 = BO->getOperandUse(0);
2800 Use &U1 = BO->getOperandUse(1);
2801 if (L.isLoopInvariant(U0))
2802 Changes.push_back(&U0);
2803 else if (L.isLoopInvariant(U1))
2804 Changes.push_back(&U1);
2805 else
2806 return false;
2807 unsigned Limit = I.getType()->isIntOrIntVectorTy()
2810 if (Changes.size() > Limit)
2811 return false;
2812 }
2813 if (Changes.empty())
2814 return false;
2815
2816 // Drop the poison flags for any adds we looked through.
2817 if (I.getType()->isIntOrIntVectorTy()) {
2818 for (auto *Add : Adds)
2819 Add->dropPoisonGeneratingFlags();
2820 }
2821
2822 // We know we should do it so let's do the transformation.
2823 auto *Preheader = L.getLoopPreheader();
2824 assert(Preheader && "Loop is not in simplify form?");
2825 IRBuilder<> Builder(Preheader->getTerminator());
2826 for (auto *U : Changes) {
2827 assert(L.isLoopInvariant(U->get()));
2828 auto *Ins = cast<BinaryOperator>(U->getUser());
2829 Value *Mul;
2830 if (I.getType()->isIntOrIntVectorTy()) {
2831 Mul = Builder.CreateMul(U->get(), Factor, "factor.op.mul");
2832 // Drop the poison flags on the original multiply.
2833 Ins->dropPoisonGeneratingFlags();
2834 } else
2835 Mul = Builder.CreateFMulFMF(U->get(), Factor, Ins, "factor.op.fmul");
2836
2837 // Rewrite the reassociable instruction.
2838 unsigned OpIdx = U->getOperandNo();
2839 auto *LHS = OpIdx == 0 ? Mul : Ins->getOperand(0);
2840 auto *RHS = OpIdx == 1 ? Mul : Ins->getOperand(1);
2841 auto *NewBO =
2842 BinaryOperator::Create(Ins->getOpcode(), LHS, RHS,
2843 Ins->getName() + ".reass", Ins->getIterator());
2844 NewBO->setDebugLoc(DebugLoc::getDropped());
2845 NewBO->copyIRFlags(Ins);
2846 if (VariantOp == Ins)
2847 VariantOp = NewBO;
2848 Ins->replaceAllUsesWith(NewBO);
2849 eraseInstruction(*Ins, SafetyInfo, MSSAU);
2850 }
2851
2852 I.replaceAllUsesWith(VariantOp);
2853 eraseInstruction(I, SafetyInfo, MSSAU);
2854 return true;
2855}
2856
2857/// Reassociate associative binary expressions of the form
2858///
2859/// 1. "(LV op C1) op C2" ==> "LV op (C1 op C2)"
2860/// 2. "(C1 op LV) op C2" ==> "LV op (C1 op C2)"
2861/// 3. "C2 op (C1 op LV)" ==> "LV op (C1 op C2)"
2862/// 4. "C2 op (LV op C1)" ==> "LV op (C1 op C2)"
2863///
2864/// where op is an associative BinOp, LV is a loop variant, and C1 and C2 are
2865/// loop invariants that we want to hoist, noting that associativity implies
2866/// commutativity.
2868 ICFLoopSafetyInfo &SafetyInfo,
2870 DominatorTree *DT) {
2871 auto *BO = dyn_cast<BinaryOperator>(&I);
2872 if (!BO || !BO->isAssociative())
2873 return false;
2874
2875 Instruction::BinaryOps Opcode = BO->getOpcode();
2876 bool LVInRHS = L.isLoopInvariant(BO->getOperand(0));
2877 auto *BO0 = dyn_cast<BinaryOperator>(BO->getOperand(LVInRHS));
2878 if (!BO0 || BO0->getOpcode() != Opcode || !BO0->isAssociative() ||
2879 BO0->hasNUsesOrMore(BO0->getType()->isIntegerTy() ? 2 : 3))
2880 return false;
2881
2882 Value *LV = BO0->getOperand(0);
2883 Value *C1 = BO0->getOperand(1);
2884 Value *C2 = BO->getOperand(!LVInRHS);
2885
2886 assert(BO->isCommutative() && BO0->isCommutative() &&
2887 "Associativity implies commutativity");
2888 if (L.isLoopInvariant(LV) && !L.isLoopInvariant(C1))
2889 std::swap(LV, C1);
2890 if (L.isLoopInvariant(LV) || !L.isLoopInvariant(C1) || !L.isLoopInvariant(C2))
2891 return false;
2892
2893 auto *Preheader = L.getLoopPreheader();
2894 assert(Preheader && "Loop is not in simplify form?");
2895
2896 IRBuilder<> Builder(Preheader->getTerminator());
2897 auto *Inv = Builder.CreateBinOp(Opcode, C1, C2, "invariant.op");
2898
2899 auto *NewBO = BinaryOperator::Create(
2900 Opcode, LV, Inv, BO->getName() + ".reass", BO->getIterator());
2901 NewBO->setDebugLoc(DebugLoc::getDropped());
2902
2903 if (Opcode == Instruction::FAdd || Opcode == Instruction::FMul) {
2904 // Intersect FMF flags for FADD and FMUL.
2905 FastMathFlags Intersect = BO->getFastMathFlags() & BO0->getFastMathFlags();
2906 if (auto *I = dyn_cast<Instruction>(Inv))
2907 I->setFastMathFlags(Intersect);
2908 NewBO->setFastMathFlags(Intersect);
2909 } else {
2910 OverflowTracking Flags;
2911 Flags.AllKnownNonNegative = false;
2912 Flags.AllKnownNonZero = false;
2913 Flags.mergeFlags(*BO);
2914 Flags.mergeFlags(*BO0);
2915 // If `Inv` was not constant-folded, a new Instruction has been created.
2916 if (auto *I = dyn_cast<Instruction>(Inv))
2917 Flags.applyFlags(*I);
2918 Flags.applyFlags(*NewBO);
2919 }
2920
2921 BO->replaceAllUsesWith(NewBO);
2922 eraseInstruction(*BO, SafetyInfo, MSSAU);
2923
2924 // (LV op C1) might not be erased if it has more uses than the one we just
2925 // replaced.
2926 if (BO0->use_empty()) {
2927 salvageDebugInfo(*BO0);
2928 eraseInstruction(*BO0, SafetyInfo, MSSAU);
2929 }
2930
2931 return true;
2932}
2933
2935 ICFLoopSafetyInfo &SafetyInfo,
2937 DominatorTree *DT) {
2938 // Optimize complex patterns, such as (x < INV1 && x < INV2), turning them
2939 // into (x < min(INV1, INV2)), and hoisting the invariant part of this
2940 // expression out of the loop.
2941 if (hoistMinMax(I, L, SafetyInfo, MSSAU)) {
2942 ++NumHoisted;
2943 ++NumMinMaxHoisted;
2944 return true;
2945 }
2946
2947 // Try to hoist GEPs by reassociation.
2948 if (hoistGEP(I, L, SafetyInfo, MSSAU, AC, DT)) {
2949 ++NumHoisted;
2950 ++NumGEPsHoisted;
2951 return true;
2952 }
2953
2954 // Try to hoist add/sub's by reassociation.
2955 if (hoistAddSub(I, L, SafetyInfo, MSSAU, AC, DT)) {
2956 ++NumHoisted;
2957 ++NumAddSubHoisted;
2958 return true;
2959 }
2960
2961 bool IsInt = I.getType()->isIntOrIntVectorTy();
2962 if (hoistMulAddAssociation(I, L, SafetyInfo, MSSAU, AC, DT)) {
2963 ++NumHoisted;
2964 if (IsInt)
2965 ++NumIntAssociationsHoisted;
2966 else
2967 ++NumFPAssociationsHoisted;
2968 return true;
2969 }
2970
2971 if (hoistBOAssociation(I, L, SafetyInfo, MSSAU, AC, DT)) {
2972 ++NumHoisted;
2973 ++NumBOAssociationsHoisted;
2974 return true;
2975 }
2976
2977 return false;
2978}
2979
2980/// Little predicate that returns true if the specified basic block is in
2981/// a subloop of the current one, not the current one itself.
2982///
2983static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) {
2984 assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop");
2985 return LI->getLoopFor(BB) != CurLoop;
2986}
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the declarations for the subclasses of Constant, which represent the different fla...
@ NonNegative
DXIL Forward Handle Accesses
DXIL Resource Access
uint64_t Addr
#define DEBUG_TYPE
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
iv Induction Variable Users
Definition: IVUsers.cpp:48
static bool isReassociableOp(Instruction *I, unsigned IntOpcode, unsigned FPOpcode)
Definition: LICM.cpp:2749
static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo, TargetTransformInfo *TTI, bool &FoldableInLoop, bool LoopNestMode)
Return true if the only users of this instruction are outside of the loop.
Definition: LICM.cpp:1332
static bool hoistGEP(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Reassociate gep (gep ptr, idx1), idx2 to gep (gep ptr, idx2), idx1 if this allows hoisting the inner ...
Definition: LICM.cpp:2524
static cl::opt< bool > SingleThread("licm-force-thread-model-single", cl::Hidden, cl::init(false), cl::desc("Force thread model single in LICM pass"))
cl::opt< bool > ProfcheckDisableMetadataFixes
static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT, LoopInfo *LI, const Loop *CurLoop, LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU)
Definition: LICM.cpp:1506
static cl::opt< unsigned > FPAssociationUpperLimit("licm-max-num-fp-reassociations", cl::init(5U), cl::Hidden, cl::desc("Set upper limit for the number of transformations performed " "during a single round of hoisting the reassociated expressions."))
static bool isFoldableInLoop(const Instruction &I, const Loop *CurLoop, const TargetTransformInfo *TTI)
Return true if the instruction is foldable in the loop.
Definition: LICM.cpp:1302
static bool hoistMinMax(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU)
Try to simplify things like (A < INV_1 AND icmp A < INV_2) into (A < min(INV_1, INV_2)),...
Definition: LICM.cpp:2443
static void moveInstructionBefore(Instruction &I, BasicBlock::iterator Dest, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, ScalarEvolution *SE)
Definition: LICM.cpp:1458
static Instruction * cloneInstructionInExitBlock(Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI, const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU)
Definition: LICM.cpp:1374
static cl::opt< bool > ControlFlowHoisting("licm-control-flow-hoisting", cl::Hidden, cl::init(false), cl::desc("Enable control flow (and PHI) hoisting in LICM"))
static bool pointerInvalidatedByLoop(MemorySSA *MSSA, MemoryUse *MU, Loop *CurLoop, Instruction &I, SinkAndHoistLICMFlags &Flags, bool InvariantGroup)
Definition: LICM.cpp:2380
static bool hoistAdd(ICmpInst::Predicate Pred, Value *VariantLHS, Value *InvariantRHS, ICmpInst &ICmp, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Try to turn things like "LV + C1 < C2" into "LV < C2 - C1".
Definition: LICM.cpp:2582
static MemoryAccess * getClobberingMemoryAccess(MemorySSA &MSSA, BatchAAResults &BAA, SinkAndHoistLICMFlags &Flags, MemoryUseOrDef *MA)
Definition: LICM.cpp:1153
static SmallVector< PointersAndHasReadsOutsideSet, 0 > collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L)
Definition: LICM.cpp:2243
static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop, BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU, ScalarEvolution *SE, OptimizationRemarkEmitter *ORE)
When an instruction is found to only use loop invariant operands that is safe to hoist,...
Definition: LICM.cpp:1684
static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo)
Definition: LICM.cpp:1488
static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT, const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU, OptimizationRemarkEmitter *ORE)
When an instruction is found to only be used outside of the loop, this function moves it to the exit ...
Definition: LICM.cpp:1578
static bool hoistAddSub(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Reassociate and hoist add/sub expressions.
Definition: LICM.cpp:2719
static bool hoistMulAddAssociation(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Try to reassociate expressions like ((A1 * B1) + (A2 * B2) + ...) * C where A1, A2,...
Definition: LICM.cpp:2764
static cl::opt< uint32_t > MaxNumUsesTraversed("licm-max-num-uses-traversed", cl::Hidden, cl::init(8), cl::desc("Max num uses visited for identifying load " "invariance in loop using invariant start (default = 8)"))
static cl::opt< unsigned > IntAssociationUpperLimit("licm-max-num-int-reassociations", cl::init(5U), cl::Hidden, cl::desc("Set upper limit for the number of transformations performed " "during a single round of hoisting the reassociated expressions."))
static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L, function_ref< void(Instruction *)> Fn)
Definition: LICM.cpp:2231
static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT, Loop *CurLoop)
Definition: LICM.cpp:1066
static Instruction * sinkThroughTriviallyReplaceablePHI(PHINode *TPN, Instruction *I, LoopInfo *LI, SmallDenseMap< BasicBlock *, Instruction *, 32 > &SunkCopies, const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop, MemorySSAUpdater &MSSAU)
Definition: LICM.cpp:1473
licm
Definition: LICM.cpp:381
static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI)
Little predicate that returns true if the specified basic block is in a subloop of the current one,...
Definition: LICM.cpp:2983
static bool hoistSub(ICmpInst::Predicate Pred, Value *VariantLHS, Value *InvariantRHS, ICmpInst &ICmp, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Try to reassociate and hoist the following two patterns: LV - C1 < C2 --> LV < C1 + C2,...
Definition: LICM.cpp:2640
static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU)
Definition: LICM.cpp:1451
static bool isSafeToExecuteUnconditionally(Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI, const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo, OptimizationRemarkEmitter *ORE, const Instruction *CtxI, AssumptionCache *AC, bool AllowSpeculation)
Only sink or hoist an instruction if it is not a trapping instruction, or if the instruction is known...
Definition: LICM.cpp:1734
static bool hoistArithmetics(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Aggregates various functions for hoisting computations out of loop.
Definition: LICM.cpp:2934
static bool noConflictingReadWrites(Instruction *I, MemorySSA *MSSA, AAResults *AA, Loop *CurLoop, SinkAndHoistLICMFlags &Flags)
Definition: LICM.cpp:2312
static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I)
Returns true if a PHINode is a trivially replaceable with an Instruction.
Definition: LICM.cpp:1293
std::pair< SmallSetVector< Value *, 8 >, bool > PointersAndHasReadsOutsideSet
Definition: LICM.cpp:219
static cl::opt< bool > DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false), cl::desc("Disable memory promotion in LICM pass"))
Memory promotion is enabled by default.
static bool hoistBOAssociation(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, AssumptionCache *AC, DominatorTree *DT)
Reassociate associative binary expressions of the form.
Definition: LICM.cpp:2867
static bool pointerInvalidatedByBlock(BasicBlock &BB, MemorySSA &MSSA, MemoryUse &MU)
Definition: LICM.cpp:2431
This file defines the interface for the loop nest analysis.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Machine Loop Invariant Code Motion
Memory SSA
Definition: MemorySSA.cpp:72
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
This file contains the declarations for metadata subclasses.
Contains a collection of routines for determining if a given instruction is guaranteed to execute if ...
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define P(N)
PassInstrumentationCallbacks PIC
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:39
This file provides a priority worklist.
Remove Loads Into Fake Uses
raw_pwrite_stream & OS
cl::opt< bool > ProfcheckDisableMetadataFixes("profcheck-disable-metadata-fixes", cl::Hidden, cl::init(false), cl::desc("Disable metadata propagation fixes discovered through Issue #147390"))
This file defines generic set operations that may be used on set's of different types,...
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
#define LLVM_DEBUG(...)
Definition: Debug.h:119
This pass exposes codegen information to IR-level passes.
static cl::opt< bool > DisablePromotion("disable-type-promotion", cl::Hidden, cl::init(false), cl::desc("Disable type promotion pass"))
Value * RHS
Value * LHS
A private abstract base class describing the concept of an individual alias analysis implementation.
LLVM_ABI ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, bool IgnoreLocals=false)
Returns a bitmask that should be unconditionally applied to the ModRef info of a memory location.
LLVM_ABI MemoryEffects getMemoryEffects(const CallBase *Call)
Return the behavior of the given call site.
LLVM_ABI void add(const MemoryLocation &Loc)
These methods are used to add different types of instructions to the alias sets.
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:255
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition: BasicBlock.h:62
LLVM_ABI void replaceSuccessorsPhiUsesWith(BasicBlock *Old, BasicBlock *New)
Update all phi nodes in this basic block's successors to refer to basic block New instead of basic bl...
Definition: BasicBlock.cpp:646
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:459
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:393
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:337
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:206
LLVM_ABI const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
Definition: BasicBlock.cpp:467
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:213
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
Definition: BasicBlock.cpp:252
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:170
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition: BasicBlock.cpp:131
void moveBefore(BasicBlock *MovePos)
Unlink this basic block from its current function and insert it into the function that MovePos lives ...
Definition: BasicBlock.h:386
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:233
LLVM_ABI bool canSplitPredecessors() const
Definition: BasicBlock.cpp:523
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
Conditional or Unconditional Branch instruction.
bool isConditional() const
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
BasicBlock * getSuccessor(unsigned i) const
Value * getCondition() const
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
Definition: InstrTypes.h:770
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:678
bool isSigned() const
Definition: InstrTypes.h:932
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:829
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition: InstrTypes.h:791
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
Definition: CmpPredicate.h:23
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
This is the shared class of boolean and integer constants.
Definition: Constants.h:87
bool isNegative() const
Definition: Constants.h:209
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition: Constants.h:169
Assignment ID.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
TypeSize getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
Definition: DataLayout.h:468
A debug info location.
Definition: DebugLoc.h:124
static LLVM_ABI DebugLoc getMergedLocations(ArrayRef< DebugLoc > Locs)
Try to combine the vector of locations passed as input in a single one.
Definition: DebugLoc.cpp:170
static DebugLoc getDropped()
Definition: DebugLoc.h:164
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:177
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition: DenseMap.h:245
iterator end()
Definition: DenseMap.h:87
DomTreeNodeBase * getIDom() const
NodeT * getBlock() const
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:284
bool verify(VerificationLevel VL=VerificationLevel::Full) const
verify - checks if the tree is correct.
void changeImmediateDominator(DomTreeNodeBase< NodeT > *N, DomTreeNodeBase< NodeT > *NewIDom)
changeImmediateDominator - This method is used to update the dominator tree information when a node's...
DomTreeNodeBase< NodeT > * addNewBlock(NodeT *BB, NodeT *DomBB)
Add a new node to the dominator tree information.
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:322
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:165
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:334
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:135
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:22
This implementation of LoopSafetyInfo use ImplicitControlFlowTracking to give precise answers on "may...
Definition: MustExecute.h:133
bool doesNotWriteMemoryBefore(const BasicBlock *BB, const Loop *CurLoop) const
Returns true if we could not execute a memory-modifying instruction before we enter BB under assumpti...
void removeInstruction(const Instruction *Inst)
Inform safety info that we are planning to remove the instruction Inst from its block.
Definition: MustExecute.cpp:97
bool isGuaranteedToExecute(const Instruction &Inst, const DominatorTree *DT, const Loop *CurLoop) const override
Returns true if the instruction in a loop is guaranteed to execute at least once (under the assumptio...
bool anyBlockMayThrow() const override
Returns true iff any block of the loop for which this info is contains an instruction that may throw ...
Definition: MustExecute.cpp:73
void computeLoopSafetyInfo(const Loop *CurLoop) override
Computes safety information for a loop checks loop body & header for the possibility of may throw exc...
Definition: MustExecute.cpp:77
void insertInstructionTo(const Instruction *Inst, const BasicBlock *BB)
Inform the safety info that we are planning to insert a new instruction Inst into the basic block BB.
Definition: MustExecute.cpp:91
This instruction compares its operands according to the predicate given to the constructor.
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
Value * CreateFreeze(Value *V, const Twine &Name="")
Definition: IRBuilder.h:2637
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1923
LLVM_ABI Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
Definition: IRBuilder.cpp:823
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1420
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1403
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1708
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:207
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2439
Value * CreateFMulFMF(Value *L, Value *R, FMFSource FMFSource, const Twine &Name="", MDNode *FPMD=nullptr)
Definition: IRBuilder.h:1656
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1437
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2780
LLVM_ABI void mergeDIAssignID(ArrayRef< const Instruction * > SourceInstructions)
Merge the DIAssignID metadata from this instruction and those attached to instructions in SourceInstr...
Definition: DebugInfo.cpp:901
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:513
LLVM_ABI void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
Definition: Metadata.cpp:1804
LLVM_ABI void insertBefore(InstListType::iterator InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified position.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
Definition: Instruction.h:171
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:428
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1718
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
Definition: Metadata.cpp:1789
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:510
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Definition: Instruction.cpp:86
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:49
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Definition: LICM.cpp:323
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR, LPMUpdater &U)
Definition: LICM.cpp:301
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
PreservedAnalyses run(LoopNest &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR, LPMUpdater &U)
Definition: LICM.cpp:333
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Definition: LICM.cpp:363
This class provides an interface for updating the loop pass manager based on mutations to the loop ne...
This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
This is an alternative analysis pass to BranchProbabilityInfoWrapperPass.
Helper class for promoting a collection of loads and stores into SSA Form using the SSAUpdater.
Definition: SSAUpdater.h:147
An instruction for reading from memory.
Definition: Instructions.h:180
void setAlignment(Align Align)
Definition: Instructions.h:219
Value * getPointerOperand()
Definition: Instructions.h:259
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this load instruction.
Definition: Instructions.h:229
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:570
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getHeader() const
void addBasicBlockToLoop(BlockT *NewBB, LoopInfoBase< BlockT, LoopT > &LI)
This method is used by other analyses to update loop information.
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
ArrayRef< BlockT * > getBlocks() const
Get a list of the basic blocks which make up this loop.
void getUniqueExitBlocks(SmallVectorImpl< BlockT * > &ExitBlocks) const
Return all unique successor blocks of this loop.
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
Definition: LoopIterator.h:172
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
Definition: LoopIterator.h:180
void verify(const DominatorTreeBase< BlockT, false > &DomTree) const
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
The legacy pass manager's analysis pass to compute loop information.
Definition: LoopInfo.h:597
LLVM_ABI bool wouldBeOutOfLoopUseRequiringLCSSA(const Value *V, const BasicBlock *ExitBB) const
Definition: LoopInfo.cpp:955
This class represents a loop nest and can be used to query its properties.
Function * getParent() const
Return the function to which the loop-nest belongs.
Loop & getOutermostLoop() const
Return the outermost loop in the loop nest.
Captures loop safety information.
Definition: MustExecute.h:60
LLVM_ABI void copyColors(BasicBlock *New, BasicBlock *Old)
Copy colors of block Old into the block New.
Definition: MustExecute.cpp:34
LLVM_ABI const DenseMap< BasicBlock *, ColorVector > & getBlockColors() const
Returns block colors map that is used to update funclet operand bundles.
Definition: MustExecute.cpp:30
virtual bool isGuaranteedToExecute(const Instruction &Inst, const DominatorTree *DT, const Loop *CurLoop) const =0
Returns true if the instruction in a loop is guaranteed to execute at least once (under the assumptio...
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:40
bool isLoopInvariant(const Value *V, bool HasCoroSuspendInst=false) const
Return true if the specified value is loop invariant.
Definition: LoopInfo.cpp:61
bool hasLoopInvariantOperands(const Instruction *I, bool HasCoroSuspendInst=false) const
Return true if all the operands of the specified instruction are loop invariant.
Definition: LoopInfo.cpp:76
BasicBlock * getBlock() const
Definition: MemorySSA.h:162
bool onlyWritesMemory() const
Whether this function only (at most) writes memory.
Definition: ModRef.h:221
bool doesNotAccessMemory() const
Whether this function accesses no memory.
Definition: ModRef.h:215
bool onlyReadsMemory() const
Whether this function only (at most) reads memory.
Definition: ModRef.h:218
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
An analysis that produces MemorySSA for a function.
Definition: MemorySSA.h:936
MemorySSA * getMemorySSA() const
Get handle on MemorySSA.
LLVM_ABI void insertDef(MemoryDef *Def, bool RenameUses=false)
Insert a definition into the MemorySSA IR.
LLVM_ABI void insertUse(MemoryUse *Use, bool RenameUses=false)
LLVM_ABI MemoryAccess * createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition, const BasicBlock *BB, MemorySSA::InsertionPlace Point, bool CreationMustSucceed=true)
Create a MemoryAccess in MemorySSA at a specified point in a block.
LLVM_ABI void removeMemoryAccess(MemoryAccess *, bool OptimizePhis=false)
Remove a MemoryAccess from MemorySSA, including updating all definitions and uses.
LLVM_ABI MemoryUseOrDef * createMemoryAccessAfter(Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt)
Create a MemoryAccess in MemorySSA after an existing MemoryAccess.
LLVM_ABI void moveToPlace(MemoryUseOrDef *What, BasicBlock *BB, MemorySSA::InsertionPlace Where)
LLVM_ABI void wireOldPredecessorsToNewImmediatePredecessor(BasicBlock *Old, BasicBlock *New, ArrayRef< BasicBlock * > Preds, bool IdenticalEdgesWereMerged=true)
A new empty BasicBlock (New) now branches directly to Old.
MemoryAccess * getClobberingMemoryAccess(const Instruction *I, BatchAAResults &AA)
Given a memory Mod/Ref/ModRef'ing instruction, calling this will give you the nearest dominating Memo...
Definition: MemorySSA.h:1053
Legacy analysis pass which computes MemorySSA.
Definition: MemorySSA.h:993
Encapsulates MemorySSA, including all data associated with memory accesses.
Definition: MemorySSA.h:702
AliasAnalysis & getAA()
Definition: MemorySSA.h:802
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess's for a given basic block.
Definition: MemorySSA.h:760
LLVM_ABI MemorySSAWalker * getSkipSelfWalker()
Definition: MemorySSA.cpp:1603
LLVM_ABI bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
Definition: MemorySSA.cpp:2173
LLVM_ABI void verifyMemorySSA(VerificationLevel=VerificationLevel::Fast) const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
Definition: MemorySSA.cpp:1905
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
Definition: MemorySSA.h:720
const DefsList * getBlockDefs(const BasicBlock *BB) const
Return the list of MemoryDef's and MemoryPhi's for a given basic block.
Definition: MemorySSA.h:768
LLVM_ABI bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in the same basic block, determine whether MemoryAccess A dominates MemoryA...
Definition: MemorySSA.cpp:2142
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
Definition: MemorySSA.h:740
Class that has the common methods + fields of memory uses/defs.
Definition: MemorySSA.h:250
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
Definition: MemorySSA.h:260
Represents read-only accesses to memory.
Definition: MemorySSA.h:310
The optimization diagnostic interface.
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
void setIncomingBlock(unsigned i, BasicBlock *BB)
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:99
PointerIntPair - This class implements a pair of a pointer and small integer.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1885
PredIteratorCache - This class is an extremely trivial cache for predecessor iterator queries.
size_t size(BasicBlock *BB)
ArrayRef< BasicBlock * > get(BasicBlock *BB)
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:118
bool empty() const
Determine if the PriorityWorklist is empty or not.
bool insert(const T &X)
Insert a new element into the PriorityWorklist.
Helper class for SSA formation on a set of values defined in multiple blocks.
Definition: SSAUpdater.h:39
The main scalar evolution driver.
LLVM_ABI void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
LLVM_ABI void forgetLoopDispositions()
Called when the client has changed the disposition of values in this loop.
bool remove(const value_type &X)
Remove an item from the set vector.
Definition: SetVector.h:198
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:99
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition: SetVector.h:109
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:168
Flags controlling how much is checked when sinking or hoisting instructions.
Definition: LoopUtils.h:124
LLVM_ABI SinkAndHoistLICMFlags(unsigned LicmMssaOptCap, unsigned LicmMssaNoAccForPromotionCap, bool IsSink, Loop &L, MemorySSA &MSSA)
Definition: LICM.cpp:391
unsigned LicmMssaNoAccForPromotionCap
Definition: LoopUtils.h:143
A version of PriorityWorklist that selects small size optimized data structures for the vector and ma...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:470
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:401
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:476
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:541
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:356
bool empty() const
Definition: SmallVector.h:82
size_t size() const
Definition: SmallVector.h:79
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:574
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:938
void reserve(size_type N)
Definition: SmallVector.h:664
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
An instruction for storing to memory.
Definition: Instructions.h:296
void setAlignment(Align Align)
Definition: Instructions.h:342
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this store instruction.
Definition: Instructions.h:353
static unsigned getPointerOperandIndex()
Definition: Instructions.h:388
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
Provides information about what library functions are available for the current target.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
LLVM_ABI bool isSingleThreaded() const
@ TCK_SizeAndLatency
The weighted sum of size and latency.
@ TCC_Free
Expected to fold away in lowering.
LLVM_ABI InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TargetCostKind CostKind) const
Estimate the cost of a given IR user when lowered.
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:29
EltTy front() const
unsigned size() const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:240
A Use represents the edge between a Value definition and its users.
Definition: Use.h:35
const Use & getOperandUse(unsigned i) const
Definition: User.h:245
void setOperand(unsigned i, Value *Val)
Definition: User.h:237
Value * getOperand(unsigned i) const
Definition: User.h:232
unsigned getNumOperands() const
Definition: User.h:254
LLVM Value Representation.
Definition: Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition: Value.cpp:166
LLVM_ABI std::string getNameOrAsOperand() const
Definition: Value.cpp:457
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:439
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:546
bool use_empty() const
Definition: Value.h:346
user_iterator_impl< User > user_iterator
Definition: Value.h:391
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:396
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:203
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:172
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:34
self_iterator getIterator()
Definition: ilist_node.h:134
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:53
@ Exit
Definition: COFF.h:863
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWSub(const LHS &L, const RHS &R)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:444
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ NeverOverflows
Never overflows.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1744
LLVM_ABI bool canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT, Loop *CurLoop, MemorySSAUpdater &MSSAU, bool TargetExecutesOncePerLoop, SinkAndHoistLICMFlags &LICMFlags, OptimizationRemarkEmitter *ORE=nullptr)
Returns true if is legal to hoist or sink this instruction disregarding the possible introduction of ...
Definition: LICM.cpp:1167
auto pred_end(const MachineBasicBlock *BB)
void set_intersect(S1Ty &S1, const S2Ty &S2)
set_intersect(A, B) - Compute A := A ^ B Identical to set_intersection, except that it works on set<>...
Definition: SetOperations.h:58
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition: Utils.cpp:1723
auto successors(const MachineBasicBlock *BB)
LLVM_ABI bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
Definition: Loads.cpp:232
constexpr from_range_t from_range
LLVM_ABI bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI, ScalarEvolution *SE)
Put a loop nest into LCSSA form.
Definition: LCSSA.cpp:449
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:663
auto pred_size(const MachineBasicBlock *BB)
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
LLVM_ABI bool hoistRegion(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *, AssumptionCache *, TargetLibraryInfo *, Loop *, MemorySSAUpdater &, ScalarEvolution *, ICFLoopSafetyInfo *, SinkAndHoistLICMFlags &, OptimizationRemarkEmitter *, bool, bool AllowSpeculation, bool HasCoroSuspendInst=false)
Walk the specified region of the CFG (defined by all blocks dominated by the specified block,...
Definition: LICM.cpp:888
LLVM_ABI bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures, const Instruction *I, const DominatorTree *DT, bool IncludeI=false, unsigned MaxUsesToExplore=0, const LoopInfo *LI=nullptr)
PointerMayBeCapturedBefore - Return true if this pointer value may be captured by the enclosing funct...
LLVM_ABI Pass * createLICMPass()
Definition: LICM.cpp:384
LLVM_ABI SmallVector< BasicBlock *, 16 > collectChildrenInLoop(DominatorTree *DT, DomTreeNode *N, const Loop *CurLoop)
Does a BFS from a given node to all of its children inside a given loop.
Definition: LoopUtils.cpp:450
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1751
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition: Local.cpp:402
bool isGuard(const User *U)
Returns true iff U has semantics of a guard expressed in a form of call of llvm.experimental....
Definition: GuardUtils.cpp:18
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:428
LLVM_ABI OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
LLVM_ABI void initializeLegacyLICMPassPass(PassRegistry &)
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:49
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:207
LLVM_TEMPLATE_ABI void appendLoopsToWorklist(RangeT &&, SmallPriorityWorklist< Loop *, 4 > &)
Utility that implements appending of loops onto a worklist given a range.
bool isModOrRefSet(const ModRefInfo MRI)
Definition: ModRef.h:43
LLVM_ABI bool isNotVisibleOnUnwind(const Value *Object, bool &RequiresNoCaptureBeforeUnwind)
Return true if Object memory is not visible after an unwind, in the sense that program semantics cann...
LLVM_ABI void getLoopAnalysisUsage(AnalysisUsage &AU)
Helper to consistently add the set of standard passes to a loop pass's AnalysisUsage.
Definition: LoopUtils.cpp:142
LLVM_ABI BasicBlock * SplitBlockPredecessors(BasicBlock *BB, ArrayRef< BasicBlock * > Preds, const char *Suffix, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, bool PreserveLCSSA=false)
This method introduces at least one new basic block into the function and moves some of the predecess...
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition: ModRef.h:28
LLVM_ABI bool VerifyMemorySSA
Enables verification of MemorySSA.
Definition: MemorySSA.cpp:84
LLVM_ABI bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
LLVM_ABI bool hasDisableLICMTransformsHint(const Loop *L)
Look for the loop attribute that disables the LICM transformation heuristics.
Definition: LoopUtils.cpp:349
LLVM_ABI OverflowResult computeOverflowForSignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
@ Mul
Product of integers.
@ Add
Sum of integers.
LLVM_ABI bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
LLVM_ABI bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if this is always a dereferenceable pointer.
Definition: Loads.cpp:252
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1886
LLVM_ABI OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
auto pred_begin(const MachineBasicBlock *BB)
LLVM_ABI PreservedAnalyses getLoopPassPreservedAnalyses()
Returns the minimum set of Analyses that all loop passes must preserve.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1777
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:2139
auto predecessors(const MachineBasicBlock *BB)
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
LLVM_ABI bool sinkRegion(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *, TargetLibraryInfo *, TargetTransformInfo *, Loop *CurLoop, MemorySSAUpdater &, ICFLoopSafetyInfo *, SinkAndHoistLICMFlags &, OptimizationRemarkEmitter *, Loop *OutermostLoop=nullptr)
Walk the specified region of the CFG (defined by all blocks dominated by the specified block,...
Definition: LICM.cpp:558
LLVM_ABI OverflowResult computeOverflowForUnsignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
cl::opt< unsigned > SetLicmMssaNoAccForPromotionCap
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
bool capturesNothing(CaptureComponents CC)
Definition: ModRef.h:315
LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
LLVM_ABI bool promoteLoopAccessesToScalars(const SmallSetVector< Value *, 8 > &, SmallVectorImpl< BasicBlock * > &, SmallVectorImpl< BasicBlock::iterator > &, SmallVectorImpl< MemoryAccess * > &, PredIteratorCache &, LoopInfo *, DominatorTree *, AssumptionCache *AC, const TargetLibraryInfo *, TargetTransformInfo *, Loop *, MemorySSAUpdater &, ICFLoopSafetyInfo *, OptimizationRemarkEmitter *, bool AllowSpeculation, bool HasReadsOutsideSet)
Try to promote memory values to scalars by sinking stores out of the loop and moving loads to before ...
Definition: LICM.cpp:1916
cl::opt< unsigned > SetLicmMssaOptCap
LLVM_ABI bool sinkRegionForLoopNest(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *, TargetLibraryInfo *, TargetTransformInfo *, Loop *, MemorySSAUpdater &, ICFLoopSafetyInfo *, SinkAndHoistLICMFlags &, OptimizationRemarkEmitter *)
Call sinkRegion on loops contained within the specified loop in order from innermost to outermost.
Definition: LICM.cpp:625
LLVM_ABI bool isWritableObject(const Value *Object, bool &ExplicitlyDereferenceableOnly)
Return true if the Object is writable, in the sense that any location based on this pointer that can ...
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition: Error.cpp:180
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:856
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:858
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:760
LLVM_ABI AAMDNodes merge(const AAMDNodes &Other) const
Given two sets of AAMDNodes applying to potentially different locations, determine the best AAMDNodes...
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
unsigned MssaOptCap
Definition: LICM.h:49
unsigned MssaNoAccForPromotionCap
Definition: LICM.h:50
bool AllowSpeculation
Definition: LICM.h:51
The adaptor from a function pass to a loop pass computes these analyses and makes them available to t...
A lightweight accessor for an operand bundle meant to be passed around by value.
Definition: InstrTypes.h:1011
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
Definition: InstrTypes.h:1039
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:70