LLVM 22.0.0git
TailRecursionElimination.cpp
Go to the documentation of this file.
1//===- TailRecursionElimination.cpp - Eliminate Tail Calls ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file transforms calls of the current function (self recursion) followed
10// by a return instruction with a branch to the entry of the function, creating
11// a loop. This pass also implements the following extensions to the basic
12// algorithm:
13//
14// 1. Trivial instructions between the call and return do not prevent the
15// transformation from taking place, though currently the analysis cannot
16// support moving any really useful instructions (only dead ones).
17// 2. This pass transforms functions that are prevented from being tail
18// recursive by an associative and commutative expression to use an
19// accumulator variable, thus compiling the typical naive factorial or
20// 'fib' implementation into efficient code.
21// 3. TRE is performed if the function returns void, if the return
22// returns the result returned by the call, or if the function returns a
23// run-time constant on all exits from the function. It is possible, though
24// unlikely, that the return returns something else (like constant 0), and
25// can still be TRE'd. It can be TRE'd if ALL OTHER return instructions in
26// the function return the exact same value.
27// 4. If it can prove that callees do not access their caller stack frame,
28// they are marked as eligible for tail call elimination (by the code
29// generator).
30//
31// There are several improvements that could be made:
32//
33// 1. If the function has any alloca instructions, these instructions will be
34// moved out of the entry block of the function, causing them to be
35// evaluated each time through the tail recursion. Safely keeping allocas
36// in the entry block requires analysis to proves that the tail-called
37// function does not read or write the stack object.
38// 2. Tail recursion is only performed if the call immediately precedes the
39// return instruction. It's possible that there could be a jump between
40// the call and the return.
41// 3. There can be intervening operations between the call and the return that
42// prevent the TRE from occurring. For example, there could be GEP's and
43// stores to memory that will not be read or written by the call. This
44// requires some substantial analysis (such as with DSA) to prove safe to
45// move ahead of the call, but doing so could allow many more TREs to be
46// performed, for example in TreeAdd/TreeAlloc from the treeadd benchmark.
47// 4. The algorithm we use to detect if callees access their caller stack
48// frames is very primitive.
49//
50//===----------------------------------------------------------------------===//
51
53#include "llvm/ADT/STLExtras.h"
55#include "llvm/ADT/Statistic.h"
60#include "llvm/Analysis/Loads.h"
65#include "llvm/IR/CFG.h"
66#include "llvm/IR/Constants.h"
67#include "llvm/IR/DataLayout.h"
70#include "llvm/IR/Dominators.h"
71#include "llvm/IR/Function.h"
72#include "llvm/IR/IRBuilder.h"
76#include "llvm/IR/Module.h"
78#include "llvm/Pass.h"
80#include "llvm/Support/Debug.h"
84#include <cmath>
85using namespace llvm;
86
87#define DEBUG_TYPE "tailcallelim"
88
89STATISTIC(NumEliminated, "Number of tail calls removed");
90STATISTIC(NumRetDuped, "Number of return duplicated");
91STATISTIC(NumAccumAdded, "Number of accumulators introduced");
92
94 "tre-disable-entrycount-recompute", cl::init(false), cl::Hidden,
95 cl::desc("Force disabling recomputing of function entry count, on "
96 "successful tail recursion elimination."));
97
98/// Scan the specified function for alloca instructions.
99/// If it contains any dynamic allocas, returns false.
100static bool canTRE(Function &F) {
101 // TODO: We don't do TRE if dynamic allocas are used.
102 // Dynamic allocas allocate stack space which should be
103 // deallocated before new iteration started. That is
104 // currently not implemented.
105 return llvm::all_of(instructions(F), [](Instruction &I) {
106 auto *AI = dyn_cast<AllocaInst>(&I);
107 return !AI || AI->isStaticAlloca();
108 });
109}
110
111namespace {
112struct AllocaDerivedValueTracker {
113 // Start at a root value and walk its use-def chain to mark calls that use the
114 // value or a derived value in AllocaUsers, and places where it may escape in
115 // EscapePoints.
116 void walk(Value *Root) {
117 SmallVector<Use *, 32> Worklist;
119
120 auto AddUsesToWorklist = [&](Value *V) {
121 for (auto &U : V->uses()) {
122 if (!Visited.insert(&U).second)
123 continue;
124 Worklist.push_back(&U);
125 }
126 };
127
128 AddUsesToWorklist(Root);
129
130 while (!Worklist.empty()) {
131 Use *U = Worklist.pop_back_val();
132 Instruction *I = cast<Instruction>(U->getUser());
133
134 switch (I->getOpcode()) {
135 case Instruction::Call:
136 case Instruction::Invoke: {
137 auto &CB = cast<CallBase>(*I);
138 // If the alloca-derived argument is passed byval it is not an escape
139 // point, or a use of an alloca. Calling with byval copies the contents
140 // of the alloca into argument registers or stack slots, which exist
141 // beyond the lifetime of the current frame.
142 if (CB.isArgOperand(U) && CB.isByValArgument(CB.getArgOperandNo(U)))
143 continue;
144 bool IsNocapture =
145 CB.isDataOperand(U) && CB.doesNotCapture(CB.getDataOperandNo(U));
146 callUsesLocalStack(CB, IsNocapture);
147 if (IsNocapture) {
148 // If the alloca-derived argument is passed in as nocapture, then it
149 // can't propagate to the call's return. That would be capturing.
150 continue;
151 }
152 break;
153 }
154 case Instruction::Load: {
155 // The result of a load is not alloca-derived (unless an alloca has
156 // otherwise escaped, but this is a local analysis).
157 continue;
158 }
159 case Instruction::Store: {
160 if (U->getOperandNo() == 0)
161 EscapePoints.insert(I);
162 continue; // Stores have no users to analyze.
163 }
164 case Instruction::BitCast:
165 case Instruction::GetElementPtr:
166 case Instruction::PHI:
167 case Instruction::Select:
168 case Instruction::AddrSpaceCast:
169 break;
170 default:
171 EscapePoints.insert(I);
172 break;
173 }
174
175 AddUsesToWorklist(I);
176 }
177 }
178
179 void callUsesLocalStack(CallBase &CB, bool IsNocapture) {
180 // Add it to the list of alloca users.
181 AllocaUsers.insert(&CB);
182
183 // If it's nocapture then it can't capture this alloca.
184 if (IsNocapture)
185 return;
186
187 // If it can write to memory, it can leak the alloca value.
188 if (!CB.onlyReadsMemory())
189 EscapePoints.insert(&CB);
190 }
191
194};
195}
196
198 if (F.callsFunctionThatReturnsTwice())
199 return false;
200
201 // The local stack holds all alloca instructions and all byval arguments.
202 AllocaDerivedValueTracker Tracker;
203 for (Argument &Arg : F.args()) {
204 if (Arg.hasByValAttr())
205 Tracker.walk(&Arg);
206 }
207 for (auto &BB : F) {
208 for (auto &I : BB)
209 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
210 Tracker.walk(AI);
211 }
212
213 bool Modified = false;
214
215 // Track whether a block is reachable after an alloca has escaped. Blocks that
216 // contain the escaping instruction will be marked as being visited without an
217 // escaped alloca, since that is how the block began.
218 enum VisitType {
219 UNVISITED,
220 UNESCAPED,
221 ESCAPED
222 };
224
225 // We propagate the fact that an alloca has escaped from block to successor.
226 // Visit the blocks that are propagating the escapedness first. To do this, we
227 // maintain two worklists.
228 SmallVector<BasicBlock *, 32> WorklistUnescaped, WorklistEscaped;
229
230 // We may enter a block and visit it thinking that no alloca has escaped yet,
231 // then see an escape point and go back around a loop edge and come back to
232 // the same block twice. Because of this, we defer setting tail on calls when
233 // we first encounter them in a block. Every entry in this list does not
234 // statically use an alloca via use-def chain analysis, but may find an alloca
235 // through other means if the block turns out to be reachable after an escape
236 // point.
237 SmallVector<CallInst *, 32> DeferredTails;
238
239 BasicBlock *BB = &F.getEntryBlock();
240 VisitType Escaped = UNESCAPED;
241 do {
242 for (auto &I : *BB) {
243 if (Tracker.EscapePoints.count(&I))
244 Escaped = ESCAPED;
245
246 CallInst *CI = dyn_cast<CallInst>(&I);
247 // A PseudoProbeInst has the IntrInaccessibleMemOnly tag hence it is
248 // considered accessing memory and will be marked as a tail call if we
249 // don't bail out here.
250 if (!CI || CI->isTailCall() || isa<PseudoProbeInst>(&I))
251 continue;
252
253 // Bail out for intrinsic stackrestore call because it can modify
254 // unescaped allocas.
255 if (auto *II = dyn_cast<IntrinsicInst>(CI))
256 if (II->getIntrinsicID() == Intrinsic::stackrestore)
257 continue;
258
259 // Special-case operand bundles "clang.arc.attachedcall", "ptrauth", and
260 // "kcfi".
261 bool IsNoTail = CI->isNoTailCall() ||
265
266 if (!IsNoTail && CI->doesNotAccessMemory()) {
267 // A call to a readnone function whose arguments are all things computed
268 // outside this function can be marked tail. Even if you stored the
269 // alloca address into a global, a readnone function can't load the
270 // global anyhow.
271 //
272 // Note that this runs whether we know an alloca has escaped or not. If
273 // it has, then we can't trust Tracker.AllocaUsers to be accurate.
274 bool SafeToTail = true;
275 for (auto &Arg : CI->args()) {
276 if (isa<Constant>(Arg.getUser()))
277 continue;
278 if (Argument *A = dyn_cast<Argument>(Arg.getUser()))
279 if (!A->hasByValAttr())
280 continue;
281 SafeToTail = false;
282 break;
283 }
284 if (SafeToTail) {
285 using namespace ore;
286 ORE->emit([&]() {
287 return OptimizationRemark(DEBUG_TYPE, "tailcall-readnone", CI)
288 << "marked as tail call candidate (readnone)";
289 });
290 CI->setTailCall();
291 Modified = true;
292 continue;
293 }
294 }
295
296 if (!IsNoTail && Escaped == UNESCAPED && !Tracker.AllocaUsers.count(CI))
297 DeferredTails.push_back(CI);
298 }
299
300 for (auto *SuccBB : successors(BB)) {
301 auto &State = Visited[SuccBB];
302 if (State < Escaped) {
303 State = Escaped;
304 if (State == ESCAPED)
305 WorklistEscaped.push_back(SuccBB);
306 else
307 WorklistUnescaped.push_back(SuccBB);
308 }
309 }
310
311 if (!WorklistEscaped.empty()) {
312 BB = WorklistEscaped.pop_back_val();
313 Escaped = ESCAPED;
314 } else {
315 BB = nullptr;
316 while (!WorklistUnescaped.empty()) {
317 auto *NextBB = WorklistUnescaped.pop_back_val();
318 if (Visited[NextBB] == UNESCAPED) {
319 BB = NextBB;
320 Escaped = UNESCAPED;
321 break;
322 }
323 }
324 }
325 } while (BB);
326
327 for (CallInst *CI : DeferredTails) {
328 if (Visited[CI->getParent()] != ESCAPED) {
329 // If the escape point was part way through the block, calls after the
330 // escape point wouldn't have been put into DeferredTails.
331 LLVM_DEBUG(dbgs() << "Marked as tail call candidate: " << *CI << "\n");
332 CI->setTailCall();
333 Modified = true;
334 }
335 }
336
337 return Modified;
338}
339
340/// Return true if it is safe to move the specified
341/// instruction from after the call to before the call, assuming that all
342/// instructions between the call and this instruction are movable.
343///
345 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
346 if (II->getIntrinsicID() == Intrinsic::lifetime_end)
347 return true;
348
349 // FIXME: We can move load/store/call/free instructions above the call if the
350 // call does not mod/ref the memory location being processed.
351 if (I->mayHaveSideEffects()) // This also handles volatile loads.
352 return false;
353
354 if (LoadInst *L = dyn_cast<LoadInst>(I)) {
355 // Loads may always be moved above calls without side effects.
356 if (CI->mayHaveSideEffects()) {
357 // Non-volatile loads may be moved above a call with side effects if it
358 // does not write to memory and the load provably won't trap.
359 // Writes to memory only matter if they may alias the pointer
360 // being loaded from.
361 const DataLayout &DL = L->getDataLayout();
362 if (isModSet(AA->getModRefInfo(CI, MemoryLocation::get(L))) ||
363 !isSafeToLoadUnconditionally(L->getPointerOperand(), L->getType(),
364 L->getAlign(), DL, L))
365 return false;
366 }
367 }
368
369 // Otherwise, if this is a side-effect free instruction, check to make sure
370 // that it does not use the return value of the call. If it doesn't use the
371 // return value of the call, it must only use things that are defined before
372 // the call, or movable instructions between the call and the instruction
373 // itself.
374 return !is_contained(I->operands(), CI);
375}
376
378 if (!I->isAssociative() || !I->isCommutative())
379 return false;
380
381 assert(I->getNumOperands() >= 2 &&
382 "Associative/commutative operations should have at least 2 args!");
383
384 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
385 // Accumulators must have an identity.
386 if (!ConstantExpr::getIntrinsicIdentity(II->getIntrinsicID(), I->getType()))
387 return false;
388 }
389
390 // Exactly one operand should be the result of the call instruction.
391 if ((I->getOperand(0) == CI && I->getOperand(1) == CI) ||
392 (I->getOperand(0) != CI && I->getOperand(1) != CI))
393 return false;
394
395 // The only user of this instruction we allow is a single return instruction.
396 if (!I->hasOneUse() || !isa<ReturnInst>(I->user_back()))
397 return false;
398
399 return true;
400}
401
402namespace {
403class TailRecursionEliminator {
404 Function &F;
406 AliasAnalysis *AA;
408 DomTreeUpdater &DTU;
409 BlockFrequencyInfo *const BFI;
410 const uint64_t OrigEntryBBFreq;
411 const uint64_t OrigEntryCount;
412
413 // The below are shared state we want to have available when eliminating any
414 // calls in the function. There values should be populated by
415 // createTailRecurseLoopHeader the first time we find a call we can eliminate.
416 BasicBlock *HeaderBB = nullptr;
417 SmallVector<PHINode *, 8> ArgumentPHIs;
418
419 // PHI node to store our return value.
420 PHINode *RetPN = nullptr;
421
422 // i1 PHI node to track if we have a valid return value stored in RetPN.
423 PHINode *RetKnownPN = nullptr;
424
425 // Vector of select instructions we insereted. These selects use RetKnownPN
426 // to either propagate RetPN or select a new return value.
428
429 // The below are shared state needed when performing accumulator recursion.
430 // There values should be populated by insertAccumulator the first time we
431 // find an elimination that requires an accumulator.
432
433 // PHI node to store our current accumulated value.
434 PHINode *AccPN = nullptr;
435
436 // The instruction doing the accumulating.
437 Instruction *AccumulatorRecursionInstr = nullptr;
438
439 TailRecursionEliminator(Function &F, const TargetTransformInfo *TTI,
442 : F(F), TTI(TTI), AA(AA), ORE(ORE), DTU(DTU), BFI(BFI),
443 OrigEntryBBFreq(
444 BFI ? BFI->getBlockFreq(&F.getEntryBlock()).getFrequency() : 0U),
445 OrigEntryCount(F.getEntryCount() ? F.getEntryCount()->getCount() : 0) {
446 if (BFI) {
447 // The assert is meant as API documentation for the caller.
448 assert((OrigEntryCount != 0 && OrigEntryBBFreq != 0) &&
449 "If a BFI was provided, the function should have both an entry "
450 "count that is non-zero and an entry basic block with a non-zero "
451 "frequency.");
452 }
453 }
454
455 CallInst *findTRECandidate(BasicBlock *BB);
456
457 void createTailRecurseLoopHeader(CallInst *CI);
458
459 void insertAccumulator(Instruction *AccRecInstr);
460
461 bool eliminateCall(CallInst *CI);
462
463 void cleanupAndFinalize();
464
465 bool processBlock(BasicBlock &BB);
466
467 void copyByValueOperandIntoLocalTemp(CallInst *CI, int OpndIdx);
468
469 void copyLocalTempOfByValueOperandIntoArguments(CallInst *CI, int OpndIdx);
470
471public:
472 static bool eliminate(Function &F, const TargetTransformInfo *TTI,
475};
476} // namespace
477
478CallInst *TailRecursionEliminator::findTRECandidate(BasicBlock *BB) {
479 Instruction *TI = BB->getTerminator();
480
481 if (&BB->front() == TI) // Make sure there is something before the terminator.
482 return nullptr;
483
484 // Scan backwards from the return, checking to see if there is a tail call in
485 // this block. If so, set CI to it.
486 CallInst *CI = nullptr;
487 BasicBlock::iterator BBI(TI);
488 while (true) {
489 CI = dyn_cast<CallInst>(BBI);
490 if (CI && CI->getCalledFunction() == &F)
491 break;
492
493 if (BBI == BB->begin())
494 return nullptr; // Didn't find a potential tail call.
495 --BBI;
496 }
497
498 assert((!CI->isTailCall() || !CI->isNoTailCall()) &&
499 "Incompatible call site attributes(Tail,NoTail)");
500 if (!CI->isTailCall())
501 return nullptr;
502
503 // As a special case, detect code like this:
504 // double fabs(double f) { return __builtin_fabs(f); } // a 'fabs' call
505 // and disable this xform in this case, because the code generator will
506 // lower the call to fabs into inline code.
507 if (BB == &F.getEntryBlock() && &BB->front() == CI &&
508 &*std::next(BB->begin()) == TI && CI->getCalledFunction() &&
510 // A single-block function with just a call and a return. Check that
511 // the arguments match.
512 auto I = CI->arg_begin(), E = CI->arg_end();
513 Function::arg_iterator FI = F.arg_begin(), FE = F.arg_end();
514 for (; I != E && FI != FE; ++I, ++FI)
515 if (*I != &*FI) break;
516 if (I == E && FI == FE)
517 return nullptr;
518 }
519
520 return CI;
521}
522
523void TailRecursionEliminator::createTailRecurseLoopHeader(CallInst *CI) {
524 HeaderBB = &F.getEntryBlock();
525 BasicBlock *NewEntry = BasicBlock::Create(F.getContext(), "", &F, HeaderBB);
526 NewEntry->takeName(HeaderBB);
527 HeaderBB->setName("tailrecurse");
528 auto *BI = BranchInst::Create(HeaderBB, NewEntry);
529 BI->setDebugLoc(DebugLoc::getCompilerGenerated());
530 // If the new branch preserves the debug location of CI, it could result in
531 // misleading stepping, if CI is located in a conditional branch.
532 // So, here we don't give any debug location to the new branch.
533
534 // Move all fixed sized allocas from HeaderBB to NewEntry.
535 for (BasicBlock::iterator OEBI = HeaderBB->begin(), E = HeaderBB->end(),
536 NEBI = NewEntry->begin();
537 OEBI != E;)
538 if (AllocaInst *AI = dyn_cast<AllocaInst>(OEBI++))
539 if (isa<ConstantInt>(AI->getArraySize()))
540 AI->moveBefore(NEBI);
541
542 // Now that we have created a new block, which jumps to the entry
543 // block, insert a PHI node for each argument of the function.
544 // For now, we initialize each PHI to only have the real arguments
545 // which are passed in.
546 BasicBlock::iterator InsertPos = HeaderBB->begin();
547 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
548 PHINode *PN = PHINode::Create(I->getType(), 2, I->getName() + ".tr");
549 PN->insertBefore(InsertPos);
550 I->replaceAllUsesWith(PN); // Everyone use the PHI node now!
551 PN->addIncoming(&*I, NewEntry);
552 ArgumentPHIs.push_back(PN);
553 }
554
555 // If the function doen't return void, create the RetPN and RetKnownPN PHI
556 // nodes to track our return value. We initialize RetPN with poison and
557 // RetKnownPN with false since we can't know our return value at function
558 // entry.
559 Type *RetType = F.getReturnType();
560 if (!RetType->isVoidTy()) {
561 Type *BoolType = Type::getInt1Ty(F.getContext());
562 RetPN = PHINode::Create(RetType, 2, "ret.tr");
563 RetPN->insertBefore(InsertPos);
564 RetKnownPN = PHINode::Create(BoolType, 2, "ret.known.tr");
565 RetKnownPN->insertBefore(InsertPos);
566
567 RetPN->addIncoming(PoisonValue::get(RetType), NewEntry);
568 RetKnownPN->addIncoming(ConstantInt::getFalse(BoolType), NewEntry);
569 }
570
571 // The entry block was changed from HeaderBB to NewEntry.
572 // The forward DominatorTree needs to be recalculated when the EntryBB is
573 // changed. In this corner-case we recalculate the entire tree.
574 DTU.recalculate(*NewEntry->getParent());
575}
576
577void TailRecursionEliminator::insertAccumulator(Instruction *AccRecInstr) {
578 assert(!AccPN && "Trying to insert multiple accumulators");
579
580 AccumulatorRecursionInstr = AccRecInstr;
581
582 // Start by inserting a new PHI node for the accumulator.
583 pred_iterator PB = pred_begin(HeaderBB), PE = pred_end(HeaderBB);
584 AccPN = PHINode::Create(F.getReturnType(), std::distance(PB, PE) + 1,
585 "accumulator.tr");
586 AccPN->insertBefore(HeaderBB->begin());
587
588 // Loop over all of the predecessors of the tail recursion block. For the
589 // real entry into the function we seed the PHI with the identity constant for
590 // the accumulation operation. For any other existing branches to this block
591 // (due to other tail recursions eliminated) the accumulator is not modified.
592 // Because we haven't added the branch in the current block to HeaderBB yet,
593 // it will not show up as a predecessor.
594 for (pred_iterator PI = PB; PI != PE; ++PI) {
595 BasicBlock *P = *PI;
596 if (P == &F.getEntryBlock()) {
597 Constant *Identity =
598 ConstantExpr::getIdentity(AccRecInstr, AccRecInstr->getType());
599 AccPN->addIncoming(Identity, P);
600 } else {
601 AccPN->addIncoming(AccPN, P);
602 }
603 }
604
605 ++NumAccumAdded;
606}
607
608// Creates a copy of contents of ByValue operand of the specified
609// call instruction into the newly created temporarily variable.
610void TailRecursionEliminator::copyByValueOperandIntoLocalTemp(CallInst *CI,
611 int OpndIdx) {
612 Type *AggTy = CI->getParamByValType(OpndIdx);
613 assert(AggTy);
614 const DataLayout &DL = F.getDataLayout();
615
616 // Get alignment of byVal operand.
617 Align Alignment(CI->getParamAlign(OpndIdx).valueOrOne());
618
619 // Create alloca for temporarily byval operands.
620 // Put alloca into the entry block.
621 Value *NewAlloca = new AllocaInst(
622 AggTy, DL.getAllocaAddrSpace(), nullptr, Alignment,
623 CI->getArgOperand(OpndIdx)->getName(), F.getEntryBlock().begin());
624
625 IRBuilder<> Builder(CI);
626 Value *Size = Builder.getInt64(DL.getTypeAllocSize(AggTy));
627
628 // Copy data from byvalue operand into the temporarily variable.
629 Builder.CreateMemCpy(NewAlloca, /*DstAlign*/ Alignment,
630 CI->getArgOperand(OpndIdx),
631 /*SrcAlign*/ Alignment, Size);
632 CI->setArgOperand(OpndIdx, NewAlloca);
633}
634
635// Creates a copy from temporarily variable(keeping value of ByVal argument)
636// into the corresponding function argument location.
637void TailRecursionEliminator::copyLocalTempOfByValueOperandIntoArguments(
638 CallInst *CI, int OpndIdx) {
639 Type *AggTy = CI->getParamByValType(OpndIdx);
640 assert(AggTy);
641 const DataLayout &DL = F.getDataLayout();
642
643 // Get alignment of byVal operand.
644 Align Alignment(CI->getParamAlign(OpndIdx).valueOrOne());
645
646 IRBuilder<> Builder(CI);
647 Value *Size = Builder.getInt64(DL.getTypeAllocSize(AggTy));
648
649 // Copy data from the temporarily variable into corresponding
650 // function argument location.
651 Builder.CreateMemCpy(F.getArg(OpndIdx), /*DstAlign*/ Alignment,
652 CI->getArgOperand(OpndIdx),
653 /*SrcAlign*/ Alignment, Size);
654}
655
656bool TailRecursionEliminator::eliminateCall(CallInst *CI) {
657 ReturnInst *Ret = cast<ReturnInst>(CI->getParent()->getTerminator());
658
659 // Ok, we found a potential tail call. We can currently only transform the
660 // tail call if all of the instructions between the call and the return are
661 // movable to above the call itself, leaving the call next to the return.
662 // Check that this is the case now.
663 Instruction *AccRecInstr = nullptr;
664 BasicBlock::iterator BBI(CI);
665 for (++BBI; &*BBI != Ret; ++BBI) {
666 if (canMoveAboveCall(&*BBI, CI, AA))
667 continue;
668
669 // If we can't move the instruction above the call, it might be because it
670 // is an associative and commutative operation that could be transformed
671 // using accumulator recursion elimination. Check to see if this is the
672 // case, and if so, remember which instruction accumulates for later.
673 if (AccPN || !canTransformAccumulatorRecursion(&*BBI, CI))
674 return false; // We cannot eliminate the tail recursion!
675
676 // Yes, this is accumulator recursion. Remember which instruction
677 // accumulates.
678 AccRecInstr = &*BBI;
679 }
680
681 BasicBlock *BB = Ret->getParent();
682
683 using namespace ore;
684 ORE->emit([&]() {
685 return OptimizationRemark(DEBUG_TYPE, "tailcall-recursion", CI)
686 << "transforming tail recursion into loop";
687 });
688
689 // OK! We can transform this tail call. If this is the first one found,
690 // create the new entry block, allowing us to branch back to the old entry.
691 if (!HeaderBB)
692 createTailRecurseLoopHeader(CI);
693
694 // Copy values of ByVal operands into local temporarily variables.
695 for (unsigned I = 0, E = CI->arg_size(); I != E; ++I) {
696 if (CI->isByValArgument(I))
697 copyByValueOperandIntoLocalTemp(CI, I);
698 }
699
700 // Ok, now that we know we have a pseudo-entry block WITH all of the
701 // required PHI nodes, add entries into the PHI node for the actual
702 // parameters passed into the tail-recursive call.
703 for (unsigned I = 0, E = CI->arg_size(); I != E; ++I) {
704 if (CI->isByValArgument(I)) {
705 copyLocalTempOfByValueOperandIntoArguments(CI, I);
706 // When eliminating a tail call, we modify the values of the arguments.
707 // Therefore, if the byval parameter has a readonly attribute, we have to
708 // remove it. It is safe because, from the perspective of a caller, the
709 // byval parameter is always treated as "readonly," even if the readonly
710 // attribute is removed.
711 F.removeParamAttr(I, Attribute::ReadOnly);
712 ArgumentPHIs[I]->addIncoming(F.getArg(I), BB);
713 } else
714 ArgumentPHIs[I]->addIncoming(CI->getArgOperand(I), BB);
715 }
716
717 if (AccRecInstr) {
718 insertAccumulator(AccRecInstr);
719
720 // Rewrite the accumulator recursion instruction so that it does not use
721 // the result of the call anymore, instead, use the PHI node we just
722 // inserted.
723 AccRecInstr->setOperand(AccRecInstr->getOperand(0) != CI, AccPN);
724 }
725
726 // Update our return value tracking
727 if (RetPN) {
728 if (Ret->getReturnValue() == CI || AccRecInstr) {
729 // Defer selecting a return value
730 RetPN->addIncoming(RetPN, BB);
731 RetKnownPN->addIncoming(RetKnownPN, BB);
732 } else {
733 // We found a return value we want to use, insert a select instruction to
734 // select it if we don't already know what our return value will be and
735 // store the result in our return value PHI node.
736 SelectInst *SI =
737 SelectInst::Create(RetKnownPN, RetPN, Ret->getReturnValue(),
738 "current.ret.tr", Ret->getIterator());
739 SI->setDebugLoc(Ret->getDebugLoc());
740 RetSelects.push_back(SI);
741
742 RetPN->addIncoming(SI, BB);
743 RetKnownPN->addIncoming(ConstantInt::getTrue(RetKnownPN->getType()), BB);
744 }
745
746 if (AccPN)
747 AccPN->addIncoming(AccRecInstr ? AccRecInstr : AccPN, BB);
748 }
749
750 // Now that all of the PHI nodes are in place, remove the call and
751 // ret instructions, replacing them with an unconditional branch.
752 BranchInst *NewBI = BranchInst::Create(HeaderBB, Ret->getIterator());
753 NewBI->setDebugLoc(CI->getDebugLoc());
754
755 Ret->eraseFromParent(); // Remove return.
756 CI->eraseFromParent(); // Remove call.
757 DTU.applyUpdates({{DominatorTree::Insert, BB, HeaderBB}});
758 ++NumEliminated;
759 if (OrigEntryBBFreq) {
760 assert(F.getEntryCount().has_value());
761 // This pass is not expected to remove BBs, only add an entry BB. For that
762 // reason, and because the BB here isn't the new entry BB, the BFI lookup is
763 // expected to succeed.
764 assert(&F.getEntryBlock() != BB);
765 auto RelativeBBFreq =
766 static_cast<double>(BFI->getBlockFreq(BB).getFrequency()) /
767 static_cast<double>(OrigEntryBBFreq);
768 auto ToSubtract =
769 static_cast<uint64_t>(std::round(RelativeBBFreq * OrigEntryCount));
770 auto OldEntryCount = F.getEntryCount()->getCount();
771 if (OldEntryCount <= ToSubtract) {
773 errs() << "[TRE] The entrycount attributable to the recursive call, "
774 << ToSubtract
775 << ", should be strictly lower than the function entry count, "
776 << OldEntryCount << "\n");
777 } else {
778 F.setEntryCount(OldEntryCount - ToSubtract, F.getEntryCount()->getType());
779 }
780 }
781 return true;
782}
783
784void TailRecursionEliminator::cleanupAndFinalize() {
785 // If we eliminated any tail recursions, it's possible that we inserted some
786 // silly PHI nodes which just merge an initial value (the incoming operand)
787 // with themselves. Check to see if we did and clean up our mess if so. This
788 // occurs when a function passes an argument straight through to its tail
789 // call.
790 for (PHINode *PN : ArgumentPHIs) {
791 // If the PHI Node is a dynamic constant, replace it with the value it is.
792 if (Value *PNV = simplifyInstruction(PN, F.getDataLayout())) {
793 PN->replaceAllUsesWith(PNV);
794 PN->eraseFromParent();
795 }
796 }
797
798 if (RetPN) {
799 if (RetSelects.empty()) {
800 // If we didn't insert any select instructions, then we know we didn't
801 // store a return value and we can remove the PHI nodes we inserted.
802 RetPN->dropAllReferences();
803 RetPN->eraseFromParent();
804
805 RetKnownPN->dropAllReferences();
806 RetKnownPN->eraseFromParent();
807
808 if (AccPN) {
809 // We need to insert a copy of our accumulator instruction before any
810 // return in the function, and return its result instead.
811 Instruction *AccRecInstr = AccumulatorRecursionInstr;
812 for (BasicBlock &BB : F) {
813 ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator());
814 if (!RI)
815 continue;
816
817 Instruction *AccRecInstrNew = AccRecInstr->clone();
818 AccRecInstrNew->setName("accumulator.ret.tr");
819 AccRecInstrNew->setOperand(AccRecInstr->getOperand(0) == AccPN,
820 RI->getOperand(0));
821 AccRecInstrNew->insertBefore(RI->getIterator());
822 AccRecInstrNew->dropLocation();
823 RI->setOperand(0, AccRecInstrNew);
824 }
825 }
826 } else {
827 // We need to insert a select instruction before any return left in the
828 // function to select our stored return value if we have one.
829 for (BasicBlock &BB : F) {
830 ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator());
831 if (!RI)
832 continue;
833
834 SelectInst *SI =
835 SelectInst::Create(RetKnownPN, RetPN, RI->getOperand(0),
836 "current.ret.tr", RI->getIterator());
837 SI->setDebugLoc(DebugLoc::getCompilerGenerated());
838 RetSelects.push_back(SI);
839 RI->setOperand(0, SI);
840 }
841
842 if (AccPN) {
843 // We need to insert a copy of our accumulator instruction before any
844 // of the selects we inserted, and select its result instead.
845 Instruction *AccRecInstr = AccumulatorRecursionInstr;
846 for (SelectInst *SI : RetSelects) {
847 Instruction *AccRecInstrNew = AccRecInstr->clone();
848 AccRecInstrNew->setName("accumulator.ret.tr");
849 AccRecInstrNew->setOperand(AccRecInstr->getOperand(0) == AccPN,
850 SI->getFalseValue());
851 AccRecInstrNew->insertBefore(SI->getIterator());
852 AccRecInstrNew->dropLocation();
853 SI->setFalseValue(AccRecInstrNew);
854 }
855 }
856 }
857 }
858}
859
860bool TailRecursionEliminator::processBlock(BasicBlock &BB) {
861 Instruction *TI = BB.getTerminator();
862
863 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
864 if (BI->isConditional())
865 return false;
866
867 BasicBlock *Succ = BI->getSuccessor(0);
868 ReturnInst *Ret = dyn_cast<ReturnInst>(Succ->getFirstNonPHIOrDbg(true));
869
870 if (!Ret)
871 return false;
872
873 CallInst *CI = findTRECandidate(&BB);
874
875 if (!CI)
876 return false;
877
878 LLVM_DEBUG(dbgs() << "FOLDING: " << *Succ
879 << "INTO UNCOND BRANCH PRED: " << BB);
880 FoldReturnIntoUncondBranch(Ret, Succ, &BB, &DTU);
881 ++NumRetDuped;
882
883 // If all predecessors of Succ have been eliminated by
884 // FoldReturnIntoUncondBranch, delete it. It is important to empty it,
885 // because the ret instruction in there is still using a value which
886 // eliminateCall will attempt to remove. This block can only contain
887 // instructions that can't have uses, therefore it is safe to remove.
888 if (pred_empty(Succ))
889 DTU.deleteBB(Succ);
890
891 eliminateCall(CI);
892 return true;
893 } else if (isa<ReturnInst>(TI)) {
894 CallInst *CI = findTRECandidate(&BB);
895
896 if (CI)
897 return eliminateCall(CI);
898 }
899
900 return false;
901}
902
903bool TailRecursionEliminator::eliminate(Function &F,
905 AliasAnalysis *AA,
907 DomTreeUpdater &DTU,
908 BlockFrequencyInfo *BFI) {
909 if (F.getFnAttribute("disable-tail-calls").getValueAsBool())
910 return false;
911
912 bool MadeChange = false;
913 MadeChange |= markTails(F, ORE);
914
915 // If this function is a varargs function, we won't be able to PHI the args
916 // right, so don't even try to convert it...
917 if (F.getFunctionType()->isVarArg())
918 return MadeChange;
919
920 if (!canTRE(F))
921 return MadeChange;
922
923 // Change any tail recursive calls to loops.
924 TailRecursionEliminator TRE(F, TTI, AA, ORE, DTU, BFI);
925
926 for (BasicBlock &BB : F)
927 MadeChange |= TRE.processBlock(BB);
928
929 TRE.cleanupAndFinalize();
930
931 return MadeChange;
932}
933
934namespace {
935struct TailCallElim : public FunctionPass {
936 static char ID; // Pass identification, replacement for typeid
937 TailCallElim() : FunctionPass(ID) {
939 }
940
941 void getAnalysisUsage(AnalysisUsage &AU) const override {
948 }
949
950 bool runOnFunction(Function &F) override {
951 if (skipFunction(F))
952 return false;
953
954 auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
955 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
956 auto *PDTWP = getAnalysisIfAvailable<PostDominatorTreeWrapperPass>();
957 auto *PDT = PDTWP ? &PDTWP->getPostDomTree() : nullptr;
958 // There is no noticable performance difference here between Lazy and Eager
959 // UpdateStrategy based on some test results. It is feasible to switch the
960 // UpdateStrategy to Lazy if we find it profitable later.
961 DomTreeUpdater DTU(DT, PDT, DomTreeUpdater::UpdateStrategy::Eager);
962
963 return TailRecursionEliminator::eliminate(
964 F, &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F),
965 &getAnalysis<AAResultsWrapperPass>().getAAResults(),
966 &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(), DTU,
967 /*BFI=*/nullptr);
968 }
969};
970}
971
972char TailCallElim::ID = 0;
973INITIALIZE_PASS_BEGIN(TailCallElim, "tailcallelim", "Tail Call Elimination",
974 false, false)
979
980// Public interface to the TailCallElimination pass
982 return new TailCallElim();
983}
984
987
990 // This must come first. It needs the 2 analyses, meaning, if it came after
991 // the lines asking for the cached result, should they be nullptr (which, in
992 // the case of the PDT, is likely), updates to the trees would be missed.
993 auto *BFI = (!ForceDisableBFI && UpdateFunctionEntryCount &&
994 F.getEntryCount().has_value() && F.getEntryCount()->getCount())
996 : nullptr;
1000 // There is no noticable performance difference here between Lazy and Eager
1001 // UpdateStrategy based on some test results. It is feasible to switch the
1002 // UpdateStrategy to Lazy if we find it profitable later.
1003 DomTreeUpdater DTU(DT, PDT, DomTreeUpdater::UpdateStrategy::Eager);
1004 bool Changed =
1005 TailRecursionEliminator::eliminate(F, &TTI, &AA, &ORE, DTU, BFI);
1006
1007 if (!Changed)
1008 return PreservedAnalyses::all();
1012 return PA;
1013}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
uint64_t Size
This is the interface for a simple mod/ref and alias analysis over globals.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
uint64_t IntrinsicInst * II
#define P(N)
PassBuilder PB(Machine, PassOpts->PTO, std::nullopt, &PIC)
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:39
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallPtrSet class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
#define LLVM_DEBUG(...)
Definition: Debug.h:119
static cl::opt< bool > ForceDisableBFI("tre-disable-entrycount-recompute", cl::init(false), cl::Hidden, cl::desc("Force disabling recomputing of function entry count, on " "successful tail recursion elimination."))
static bool canTRE(Function &F)
Scan the specified function for alloca instructions.
static bool canMoveAboveCall(Instruction *I, CallInst *CI, AliasAnalysis *AA)
Return true if it is safe to move the specified instruction from after the call to before the call,...
Tail Call Elimination
#define DEBUG_TYPE
static bool canTransformAccumulatorRecursion(Instruction *I, CallInst *CI)
static bool markTails(Function &F, OptimizationRemarkEmitter *ORE)
This pass exposes codegen information to IR-level passes.
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
A private abstract base class describing the concept of an individual alias analysis implementation.
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
Check whether or not an instruction may read or write the optionally specified memory location.
an instruction to allocate memory on the stack
Definition: Instructions.h:64
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:255
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:431
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:412
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:32
LLVM Basic Block Representation.
Definition: BasicBlock.h:62
iterator end()
Definition: BasicBlock.h:472
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:459
const Instruction & front() const
Definition: BasicBlock.h:482
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:206
LLVM_ABI InstListType::const_iterator getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
Definition: BasicBlock.cpp:354
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:213
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:233
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1116
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1348
bool doesNotAccessMemory(unsigned OpNo) const
Definition: InstrTypes.h:1745
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1267
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
Definition: InstrTypes.h:1709
MaybeAlign getParamAlign(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
Definition: InstrTypes.h:1778
bool onlyReadsMemory(unsigned OpNo) const
Definition: InstrTypes.h:1751
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
Definition: InstrTypes.h:1796
bool hasOperandBundlesOtherThan(ArrayRef< uint32_t > IDs) const
Return true if this operand bundle user contains operand bundles with tags other than those specified...
Definition: InstrTypes.h:2158
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1292
void setArgOperand(unsigned i, Value *v)
Definition: InstrTypes.h:1297
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Definition: InstrTypes.h:1273
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1283
unsigned arg_size() const
Definition: InstrTypes.h:1290
This class represents a function call, abstracting a target machine's calling convention.
bool isNoTailCall() const
bool isTailCall() const
void setTailCall(bool IsTc=true)
static LLVM_ABI Constant * getIdentity(Instruction *I, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary or intrinsic Instruction.
Definition: Constants.cpp:2756
static LLVM_ABI Constant * getIntrinsicIdentity(Intrinsic::ID, Type *Ty)
Definition: Constants.cpp:2739
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:868
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:875
This is an important base class in LLVM.
Definition: Constant.h:43
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
static DebugLoc getCompilerGenerated()
Definition: DebugLoc.h:163
LLVM_ABI void deleteBB(BasicBlock *DelBB)
Delete DelBB.
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:284
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:322
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:314
virtual bool runOnFunction(Function &F)=0
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
Definition: Pass.cpp:188
void applyUpdates(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
void recalculate(FuncT &F)
Notify DTU that the entry block was replaced.
Legacy wrapper pass to provide the GlobalsAAResult object.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2780
LLVM_ABI Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
LLVM_ABI void dropLocation()
Drop the instruction's debug location.
Definition: DebugInfo.cpp:932
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:513
LLVM_ABI void insertBefore(InstListType::iterator InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified position.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI bool mayHaveSideEffects() const LLVM_READONLY
Return true if the instruction may have side effects.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:510
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:49
An instruction for reading from memory.
Definition: Instructions.h:180
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
OptimizationRemarkEmitter legacy analysis pass.
The optimization diagnostic interface.
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for applied optimization remarks.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
virtual void getAnalysisUsage(AnalysisUsage &) const
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: Pass.cpp:112
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1885
Analysis pass which computes a PostDominatorTree.
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:118
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition: Analysis.h:132
Return a value (possibly void), from a function.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:401
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:541
bool empty() const
Definition: SmallVector.h:82
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Analysis pass providing the TargetTransformInfo.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
LLVM_ABI bool isLoweredToCall(const Function *F) const
Test whether calls to a function lower to actual program function calls.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:139
A Use represents the edge between a Value definition and its users.
Definition: Use.h:35
void dropAllReferences()
Drop all references to operands.
Definition: User.h:349
void setOperand(unsigned i, Value *Val)
Definition: User.h:237
Value * getOperand(unsigned i) const
Definition: User.h:232
LLVM Value Representation.
Definition: Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:390
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:546
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:396
const ParentTy * getParent() const
Definition: ilist_node.h:34
self_iterator getIterator()
Definition: ilist_node.h:134
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:444
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1744
LLVM_ABI FunctionPass * createTailCallEliminationPass()
auto pred_end(const MachineBasicBlock *BB)
auto successors(const MachineBasicBlock *BB)
LLVM_ABI ReturnInst * FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB, BasicBlock *Pred, DomTreeUpdater *DTU=nullptr)
This method duplicates the specified return instruction into a predecessor which ends in an unconditi...
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:49
LLVM_ABI bool isSafeToLoadUnconditionally(Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, Instruction *ScanFrom, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if we know that executing a load from this value cannot trap.
Definition: Loads.cpp:431
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:207
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
auto pred_begin(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1916
bool pred_empty(const BasicBlock *BB)
Definition: CFG.h:119
LLVM_ABI void initializeTailCallElimPass(PassRegistry &)
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141