LLVM 22.0.0git
CoroSplit.cpp
Go to the documentation of this file.
1//===- CoroSplit.cpp - Converts a coroutine into a state machine ----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8// This pass builds the coroutine frame and outlines resume and destroy parts
9// of the coroutine into separate functions.
10//
11// We present a coroutine to an LLVM as an ordinary function with suspension
12// points marked up with intrinsics. We let the optimizer party on the coroutine
13// as a single function for as long as possible. Shortly before the coroutine is
14// eligible to be inlined into its callers, we split up the coroutine into parts
15// corresponding to an initial, resume and destroy invocations of the coroutine,
16// add them to the current SCC and restart the IPO pipeline to optimize the
17// coroutine subfunctions we extracted before proceeding to the caller of the
18// coroutine.
19//===----------------------------------------------------------------------===//
20
22#include "CoroCloner.h"
23#include "CoroInternal.h"
24#include "llvm/ADT/DenseMap.h"
26#include "llvm/ADT/STLExtras.h"
30#include "llvm/ADT/StringRef.h"
31#include "llvm/ADT/Twine.h"
32#include "llvm/Analysis/CFG.h"
39#include "llvm/IR/Argument.h"
40#include "llvm/IR/Attributes.h"
41#include "llvm/IR/BasicBlock.h"
42#include "llvm/IR/CFG.h"
43#include "llvm/IR/CallingConv.h"
44#include "llvm/IR/Constants.h"
45#include "llvm/IR/DIBuilder.h"
46#include "llvm/IR/DataLayout.h"
47#include "llvm/IR/DebugInfo.h"
49#include "llvm/IR/Dominators.h"
50#include "llvm/IR/GlobalValue.h"
53#include "llvm/IR/InstrTypes.h"
54#include "llvm/IR/Instruction.h"
57#include "llvm/IR/LLVMContext.h"
58#include "llvm/IR/Module.h"
59#include "llvm/IR/Type.h"
60#include "llvm/IR/Value.h"
61#include "llvm/IR/Verifier.h"
63#include "llvm/Support/Debug.h"
72#include <cassert>
73#include <cstddef>
74#include <cstdint>
75#include <initializer_list>
76#include <iterator>
77
78using namespace llvm;
79
80#define DEBUG_TYPE "coro-split"
81
82// FIXME:
83// Lower the intrinisc in CoroEarly phase if coroutine frame doesn't escape
84// and it is known that other transformations, for example, sanitizers
85// won't lead to incorrect code.
87 coro::Shape &Shape) {
88 auto Wrapper = CB->getWrapperFunction();
89 auto Awaiter = CB->getAwaiter();
90 auto FramePtr = CB->getFrame();
91
92 Builder.SetInsertPoint(CB);
93
94 CallBase *NewCall = nullptr;
95 // await_suspend has only 2 parameters, awaiter and handle.
96 // Copy parameter attributes from the intrinsic call, but remove the last,
97 // because the last parameter now becomes the function that is being called.
98 AttributeList NewAttributes =
100
101 if (auto Invoke = dyn_cast<InvokeInst>(CB)) {
102 auto WrapperInvoke =
103 Builder.CreateInvoke(Wrapper, Invoke->getNormalDest(),
104 Invoke->getUnwindDest(), {Awaiter, FramePtr});
105
106 WrapperInvoke->setCallingConv(Invoke->getCallingConv());
107 std::copy(Invoke->bundle_op_info_begin(), Invoke->bundle_op_info_end(),
108 WrapperInvoke->bundle_op_info_begin());
109 WrapperInvoke->setAttributes(NewAttributes);
110 WrapperInvoke->setDebugLoc(Invoke->getDebugLoc());
111 NewCall = WrapperInvoke;
112 } else if (auto Call = dyn_cast<CallInst>(CB)) {
113 auto WrapperCall = Builder.CreateCall(Wrapper, {Awaiter, FramePtr});
114
115 WrapperCall->setAttributes(NewAttributes);
116 WrapperCall->setDebugLoc(Call->getDebugLoc());
117 NewCall = WrapperCall;
118 } else {
119 llvm_unreachable("Unexpected coro_await_suspend invocation method");
120 }
121
122 if (CB->getCalledFunction()->getIntrinsicID() ==
123 Intrinsic::coro_await_suspend_handle) {
124 // Follow the lowered await_suspend call above with a lowered resume call
125 // to the returned coroutine.
126 if (auto *Invoke = dyn_cast<InvokeInst>(CB)) {
127 // If the await_suspend call is an invoke, we continue in the next block.
128 Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstInsertionPt());
129 }
130
131 coro::LowererBase LB(*Wrapper->getParent());
132 auto *ResumeAddr = LB.makeSubFnCall(NewCall, CoroSubFnInst::ResumeIndex,
133 &*Builder.GetInsertPoint());
134
135 LLVMContext &Ctx = Builder.getContext();
136 FunctionType *ResumeTy = FunctionType::get(
137 Type::getVoidTy(Ctx), PointerType::getUnqual(Ctx), false);
138 auto *ResumeCall = Builder.CreateCall(ResumeTy, ResumeAddr, {NewCall});
140
141 // We can't insert the 'ret' instruction and adjust the cc until the
142 // function has been split, so remember this for later.
143 Shape.SymmetricTransfers.push_back(ResumeCall);
144
145 NewCall = ResumeCall;
146 }
147
148 CB->replaceAllUsesWith(NewCall);
149 CB->eraseFromParent();
150}
151
153 IRBuilder<> Builder(F.getContext());
154 for (auto *AWS : Shape.CoroAwaitSuspends)
155 lowerAwaitSuspend(Builder, AWS, Shape);
156}
157
159 const coro::Shape &Shape, Value *FramePtr,
160 CallGraph *CG) {
161 assert(Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce);
163 return;
164
165 Shape.emitDealloc(Builder, FramePtr, CG);
166}
167
168/// Replace an llvm.coro.end.async.
169/// Will inline the must tail call function call if there is one.
170/// \returns true if cleanup of the coro.end block is needed, false otherwise.
172 IRBuilder<> Builder(End);
173
174 auto *EndAsync = dyn_cast<CoroAsyncEndInst>(End);
175 if (!EndAsync) {
176 Builder.CreateRetVoid();
177 return true /*needs cleanup of coro.end block*/;
178 }
179
180 auto *MustTailCallFunc = EndAsync->getMustTailCallFunction();
181 if (!MustTailCallFunc) {
182 Builder.CreateRetVoid();
183 return true /*needs cleanup of coro.end block*/;
184 }
185
186 // Move the must tail call from the predecessor block into the end block.
187 auto *CoroEndBlock = End->getParent();
188 auto *MustTailCallFuncBlock = CoroEndBlock->getSinglePredecessor();
189 assert(MustTailCallFuncBlock && "Must have a single predecessor block");
190 auto It = MustTailCallFuncBlock->getTerminator()->getIterator();
191 auto *MustTailCall = cast<CallInst>(&*std::prev(It));
192 CoroEndBlock->splice(End->getIterator(), MustTailCallFuncBlock,
193 MustTailCall->getIterator());
194
195 // Insert the return instruction.
196 Builder.SetInsertPoint(End);
197 Builder.CreateRetVoid();
198 InlineFunctionInfo FnInfo;
199
200 // Remove the rest of the block, by splitting it into an unreachable block.
201 auto *BB = End->getParent();
202 BB->splitBasicBlock(End);
203 BB->getTerminator()->eraseFromParent();
204
205 auto InlineRes = InlineFunction(*MustTailCall, FnInfo);
206 assert(InlineRes.isSuccess() && "Expected inlining to succeed");
207 (void)InlineRes;
208
209 // We have cleaned up the coro.end block above.
210 return false;
211}
212
213/// Replace a non-unwind call to llvm.coro.end.
215 const coro::Shape &Shape, Value *FramePtr,
216 bool InResume, CallGraph *CG) {
217 // Start inserting right before the coro.end.
218 IRBuilder<> Builder(End);
219
220 // Create the return instruction.
221 switch (Shape.ABI) {
222 // The cloned functions in switch-lowering always return void.
223 case coro::ABI::Switch:
224 assert(!cast<CoroEndInst>(End)->hasResults() &&
225 "switch coroutine should not return any values");
226 // coro.end doesn't immediately end the coroutine in the main function
227 // in this lowering, because we need to deallocate the coroutine.
228 if (!InResume)
229 return;
230 Builder.CreateRetVoid();
231 break;
232
233 // In async lowering this returns.
234 case coro::ABI::Async: {
235 bool CoroEndBlockNeedsCleanup = replaceCoroEndAsync(End);
236 if (!CoroEndBlockNeedsCleanup)
237 return;
238 break;
239 }
240
241 // In unique continuation lowering, the continuations always return void.
242 // But we may have implicitly allocated storage.
243 case coro::ABI::RetconOnce: {
244 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
245 auto *CoroEnd = cast<CoroEndInst>(End);
246 auto *RetTy = Shape.getResumeFunctionType()->getReturnType();
247
248 if (!CoroEnd->hasResults()) {
249 assert(RetTy->isVoidTy());
250 Builder.CreateRetVoid();
251 break;
252 }
253
254 auto *CoroResults = CoroEnd->getResults();
255 unsigned NumReturns = CoroResults->numReturns();
256
257 if (auto *RetStructTy = dyn_cast<StructType>(RetTy)) {
258 assert(RetStructTy->getNumElements() == NumReturns &&
259 "numbers of returns should match resume function singature");
260 Value *ReturnValue = PoisonValue::get(RetStructTy);
261 unsigned Idx = 0;
262 for (Value *RetValEl : CoroResults->return_values())
263 ReturnValue = Builder.CreateInsertValue(ReturnValue, RetValEl, Idx++);
264 Builder.CreateRet(ReturnValue);
265 } else if (NumReturns == 0) {
266 assert(RetTy->isVoidTy());
267 Builder.CreateRetVoid();
268 } else {
269 assert(NumReturns == 1);
270 Builder.CreateRet(*CoroResults->retval_begin());
271 }
272 CoroResults->replaceAllUsesWith(
273 ConstantTokenNone::get(CoroResults->getContext()));
274 CoroResults->eraseFromParent();
275 break;
276 }
277
278 // In non-unique continuation lowering, we signal completion by returning
279 // a null continuation.
280 case coro::ABI::Retcon: {
281 assert(!cast<CoroEndInst>(End)->hasResults() &&
282 "retcon coroutine should not return any values");
283 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
284 auto RetTy = Shape.getResumeFunctionType()->getReturnType();
285 auto RetStructTy = dyn_cast<StructType>(RetTy);
286 PointerType *ContinuationTy =
287 cast<PointerType>(RetStructTy ? RetStructTy->getElementType(0) : RetTy);
288
289 Value *ReturnValue = ConstantPointerNull::get(ContinuationTy);
290 if (RetStructTy) {
291 ReturnValue = Builder.CreateInsertValue(PoisonValue::get(RetStructTy),
292 ReturnValue, 0);
293 }
294 Builder.CreateRet(ReturnValue);
295 break;
296 }
297 }
298
299 // Remove the rest of the block, by splitting it into an unreachable block.
300 auto *BB = End->getParent();
301 BB->splitBasicBlock(End);
302 BB->getTerminator()->eraseFromParent();
303}
304
305// Mark a coroutine as done, which implies that the coroutine is finished and
306// never gets resumed.
307//
308// In resume-switched ABI, the done state is represented by storing zero in
309// ResumeFnAddr.
310//
311// NOTE: We couldn't omit the argument `FramePtr`. It is necessary because the
312// pointer to the frame in splitted function is not stored in `Shape`.
313static void markCoroutineAsDone(IRBuilder<> &Builder, const coro::Shape &Shape,
314 Value *FramePtr) {
315 assert(
316 Shape.ABI == coro::ABI::Switch &&
317 "markCoroutineAsDone is only supported for Switch-Resumed ABI for now.");
318 auto *GepIndex = Builder.CreateStructGEP(
320 "ResumeFn.addr");
321 auto *NullPtr = ConstantPointerNull::get(cast<PointerType>(
323 Builder.CreateStore(NullPtr, GepIndex);
324
325 // If the coroutine don't have unwind coro end, we could omit the store to
326 // the final suspend point since we could infer the coroutine is suspended
327 // at the final suspend point by the nullness of ResumeFnAddr.
328 // However, we can't skip it if the coroutine have unwind coro end. Since
329 // the coroutine reaches unwind coro end is considered suspended at the
330 // final suspend point (the ResumeFnAddr is null) but in fact the coroutine
331 // didn't complete yet. We need the IndexVal for the final suspend point
332 // to make the states clear.
335 assert(cast<CoroSuspendInst>(Shape.CoroSuspends.back())->isFinal() &&
336 "The final suspend should only live in the last position of "
337 "CoroSuspends.");
338 ConstantInt *IndexVal = Shape.getIndex(Shape.CoroSuspends.size() - 1);
339 auto *FinalIndex = Builder.CreateStructGEP(
340 Shape.FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr");
341
342 Builder.CreateStore(IndexVal, FinalIndex);
343 }
344}
345
346/// Replace an unwind call to llvm.coro.end.
348 Value *FramePtr, bool InResume,
349 CallGraph *CG) {
350 IRBuilder<> Builder(End);
351
352 switch (Shape.ABI) {
353 // In switch-lowering, this does nothing in the main function.
354 case coro::ABI::Switch: {
355 // In C++'s specification, the coroutine should be marked as done
356 // if promise.unhandled_exception() throws. The frontend will
357 // call coro.end(true) along this path.
358 //
359 // FIXME: We should refactor this once there is other language
360 // which uses Switch-Resumed style other than C++.
361 markCoroutineAsDone(Builder, Shape, FramePtr);
362 if (!InResume)
363 return;
364 break;
365 }
366 // In async lowering this does nothing.
367 case coro::ABI::Async:
368 break;
369 // In continuation-lowering, this frees the continuation storage.
370 case coro::ABI::Retcon:
371 case coro::ABI::RetconOnce:
372 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
373 break;
374 }
375
376 // If coro.end has an associated bundle, add cleanupret instruction.
377 if (auto Bundle = End->getOperandBundle(LLVMContext::OB_funclet)) {
378 auto *FromPad = cast<CleanupPadInst>(Bundle->Inputs[0]);
379 auto *CleanupRet = Builder.CreateCleanupRet(FromPad, nullptr);
380 End->getParent()->splitBasicBlock(End);
381 CleanupRet->getParent()->getTerminator()->eraseFromParent();
382 }
383}
384
385static void replaceCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
386 Value *FramePtr, bool InResume, CallGraph *CG) {
387 if (End->isUnwind())
388 replaceUnwindCoroEnd(End, Shape, FramePtr, InResume, CG);
389 else
390 replaceFallthroughCoroEnd(End, Shape, FramePtr, InResume, CG);
391
392 auto &Context = End->getContext();
393 End->replaceAllUsesWith(InResume ? ConstantInt::getTrue(Context)
395 End->eraseFromParent();
396}
397
398// In the resume function, we remove the last case (when coro::Shape is built,
399// the final suspend point (if present) is always the last element of
400// CoroSuspends array) since it is an undefined behavior to resume a coroutine
401// suspended at the final suspend point.
402// In the destroy function, if it isn't possible that the ResumeFnAddr is NULL
403// and the coroutine doesn't suspend at the final suspend point actually (this
404// is possible since the coroutine is considered suspended at the final suspend
405// point if promise.unhandled_exception() exits via an exception), we can
406// remove the last case.
410
412 return;
413
414 auto *Switch = cast<SwitchInst>(VMap[Shape.SwitchLowering.ResumeSwitch]);
415 auto FinalCaseIt = std::prev(Switch->case_end());
416 BasicBlock *ResumeBB = FinalCaseIt->getCaseSuccessor();
417 Switch->removeCase(FinalCaseIt);
419 BasicBlock *OldSwitchBB = Switch->getParent();
420 auto *NewSwitchBB = OldSwitchBB->splitBasicBlock(Switch, "Switch");
421 Builder.SetInsertPoint(OldSwitchBB->getTerminator());
422
424 // When the coroutine can only be destroyed when complete, we don't need
425 // to generate code for other cases.
426 Builder.CreateBr(ResumeBB);
427 } else {
428 auto *GepIndex = Builder.CreateStructGEP(
430 "ResumeFn.addr");
431 auto *Load =
433 auto *Cond = Builder.CreateIsNull(Load);
434 Builder.CreateCondBr(Cond, ResumeBB, NewSwitchBB);
435 }
436 OldSwitchBB->getTerminator()->eraseFromParent();
437 }
438}
439
440static FunctionType *
442 auto *AsyncSuspend = cast<CoroSuspendAsyncInst>(Suspend);
443 auto *StructTy = cast<StructType>(AsyncSuspend->getType());
444 auto &Context = Suspend->getParent()->getParent()->getContext();
445 auto *VoidTy = Type::getVoidTy(Context);
446 return FunctionType::get(VoidTy, StructTy->elements(), false);
447}
448
450 const Twine &Suffix,
451 Module::iterator InsertBefore,
452 AnyCoroSuspendInst *ActiveSuspend) {
453 Module *M = OrigF.getParent();
454 auto *FnTy = (Shape.ABI != coro::ABI::Async)
455 ? Shape.getResumeFunctionType()
456 : getFunctionTypeFromAsyncSuspend(ActiveSuspend);
457
458 Function *NewF =
460 OrigF.getName() + Suffix);
461
462 M->getFunctionList().insert(InsertBefore, NewF);
463
464 return NewF;
465}
466
467/// Replace uses of the active llvm.coro.suspend.retcon/async call with the
468/// arguments to the continuation function.
469///
470/// This assumes that the builder has a meaningful insertion point.
474
475 auto NewS = VMap[ActiveSuspend];
476 if (NewS->use_empty())
477 return;
478
479 // Copy out all the continuation arguments after the buffer pointer into
480 // an easily-indexed data structure for convenience.
482 // The async ABI includes all arguments -- including the first argument.
483 bool IsAsyncABI = Shape.ABI == coro::ABI::Async;
484 for (auto I = IsAsyncABI ? NewF->arg_begin() : std::next(NewF->arg_begin()),
485 E = NewF->arg_end();
486 I != E; ++I)
487 Args.push_back(&*I);
488
489 // If the suspend returns a single scalar value, we can just do a simple
490 // replacement.
491 if (!isa<StructType>(NewS->getType())) {
492 assert(Args.size() == 1);
493 NewS->replaceAllUsesWith(Args.front());
494 return;
495 }
496
497 // Try to peephole extracts of an aggregate return.
498 for (Use &U : llvm::make_early_inc_range(NewS->uses())) {
499 auto *EVI = dyn_cast<ExtractValueInst>(U.getUser());
500 if (!EVI || EVI->getNumIndices() != 1)
501 continue;
502
503 EVI->replaceAllUsesWith(Args[EVI->getIndices().front()]);
504 EVI->eraseFromParent();
505 }
506
507 // If we have no remaining uses, we're done.
508 if (NewS->use_empty())
509 return;
510
511 // Otherwise, we need to create an aggregate.
512 Value *Aggr = PoisonValue::get(NewS->getType());
513 for (auto [Idx, Arg] : llvm::enumerate(Args))
514 Aggr = Builder.CreateInsertValue(Aggr, Arg, Idx);
515
516 NewS->replaceAllUsesWith(Aggr);
517}
518
520 Value *SuspendResult;
521
522 switch (Shape.ABI) {
523 // In switch lowering, replace coro.suspend with the appropriate value
524 // for the type of function we're extracting.
525 // Replacing coro.suspend with (0) will result in control flow proceeding to
526 // a resume label associated with a suspend point, replacing it with (1) will
527 // result in control flow proceeding to a cleanup label associated with this
528 // suspend point.
530 SuspendResult = Builder.getInt8(isSwitchDestroyFunction() ? 1 : 0);
531 break;
532
533 // In async lowering there are no uses of the result.
534 case coro::ABI::Async:
535 return;
536
537 // In returned-continuation lowering, the arguments from earlier
538 // continuations are theoretically arbitrary, and they should have been
539 // spilled.
542 return;
543 }
544
546 // The active suspend was handled earlier.
547 if (CS == ActiveSuspend)
548 continue;
549
550 auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[CS]);
551 MappedCS->replaceAllUsesWith(SuspendResult);
552 MappedCS->eraseFromParent();
553 }
554}
555
557 for (AnyCoroEndInst *CE : Shape.CoroEnds) {
558 // We use a null call graph because there's no call graph node for
559 // the cloned function yet. We'll just be rebuilding that later.
560 auto *NewCE = cast<AnyCoroEndInst>(VMap[CE]);
561 replaceCoroEnd(NewCE, Shape, NewFramePtr, /*in resume*/ true, nullptr);
562 }
563}
564
566 ValueToValueMapTy *VMap) {
567 if (Shape.ABI == coro::ABI::Async && Shape.CoroSuspends.empty())
568 return;
569 Value *CachedSlot = nullptr;
570 auto getSwiftErrorSlot = [&](Type *ValueTy) -> Value * {
571 if (CachedSlot)
572 return CachedSlot;
573
574 // Check if the function has a swifterror argument.
575 for (auto &Arg : F.args()) {
576 if (Arg.isSwiftError()) {
577 CachedSlot = &Arg;
578 return &Arg;
579 }
580 }
581
582 // Create a swifterror alloca.
583 IRBuilder<> Builder(&F.getEntryBlock(),
584 F.getEntryBlock().getFirstNonPHIOrDbg());
585 auto Alloca = Builder.CreateAlloca(ValueTy);
586 Alloca->setSwiftError(true);
587
588 CachedSlot = Alloca;
589 return Alloca;
590 };
591
592 for (CallInst *Op : Shape.SwiftErrorOps) {
593 auto MappedOp = VMap ? cast<CallInst>((*VMap)[Op]) : Op;
594 IRBuilder<> Builder(MappedOp);
595
596 // If there are no arguments, this is a 'get' operation.
597 Value *MappedResult;
598 if (Op->arg_empty()) {
599 auto ValueTy = Op->getType();
600 auto Slot = getSwiftErrorSlot(ValueTy);
601 MappedResult = Builder.CreateLoad(ValueTy, Slot);
602 } else {
603 assert(Op->arg_size() == 1);
604 auto Value = MappedOp->getArgOperand(0);
605 auto ValueTy = Value->getType();
606 auto Slot = getSwiftErrorSlot(ValueTy);
607 Builder.CreateStore(Value, Slot);
608 MappedResult = Slot;
609 }
610
611 MappedOp->replaceAllUsesWith(MappedResult);
612 MappedOp->eraseFromParent();
613 }
614
615 // If we're updating the original function, we've invalidated SwiftErrorOps.
616 if (VMap == nullptr) {
617 Shape.SwiftErrorOps.clear();
618 }
619}
620
621/// Returns all debug records in F.
624 SmallVector<DbgVariableRecord *> DbgVariableRecords;
625 for (auto &I : instructions(F)) {
626 for (DbgVariableRecord &DVR : filterDbgVars(I.getDbgRecordRange()))
627 DbgVariableRecords.push_back(&DVR);
628 }
629 return DbgVariableRecords;
630}
631
633 ::replaceSwiftErrorOps(*NewF, Shape, &VMap);
634}
635
637 auto DbgVariableRecords = collectDbgVariableRecords(*NewF);
639
640 // Only 64-bit ABIs have a register we can refer to with the entry value.
641 bool UseEntryValue = OrigF.getParent()->getTargetTriple().isArch64Bit();
642 for (DbgVariableRecord *DVR : DbgVariableRecords)
643 coro::salvageDebugInfo(ArgToAllocaMap, *DVR, UseEntryValue);
644
645 // Remove all salvaged dbg.declare intrinsics that became
646 // either unreachable or stale due to the CoroSplit transformation.
647 DominatorTree DomTree(*NewF);
648 auto IsUnreachableBlock = [&](BasicBlock *BB) {
649 return !isPotentiallyReachable(&NewF->getEntryBlock(), BB, nullptr,
650 &DomTree);
651 };
652 auto RemoveOne = [&](DbgVariableRecord *DVI) {
653 if (IsUnreachableBlock(DVI->getParent()))
654 DVI->eraseFromParent();
655 else if (isa_and_nonnull<AllocaInst>(DVI->getVariableLocationOp(0))) {
656 // Count all non-debuginfo uses in reachable blocks.
657 unsigned Uses = 0;
658 for (auto *User : DVI->getVariableLocationOp(0)->users())
659 if (auto *I = dyn_cast<Instruction>(User))
660 if (!isa<AllocaInst>(I) && !IsUnreachableBlock(I->getParent()))
661 ++Uses;
662 if (!Uses)
663 DVI->eraseFromParent();
664 }
665 };
666 for_each(DbgVariableRecords, RemoveOne);
667}
668
670 // In the original function, the AllocaSpillBlock is a block immediately
671 // following the allocation of the frame object which defines GEPs for
672 // all the allocas that have been moved into the frame, and it ends by
673 // branching to the original beginning of the coroutine. Make this
674 // the entry block of the cloned function.
675 auto *Entry = cast<BasicBlock>(VMap[Shape.AllocaSpillBlock]);
676 auto *OldEntry = &NewF->getEntryBlock();
677 Entry->setName("entry" + Suffix);
678 Entry->moveBefore(OldEntry);
679 Entry->getTerminator()->eraseFromParent();
680
681 // Clear all predecessors of the new entry block. There should be
682 // exactly one predecessor, which we created when splitting out
683 // AllocaSpillBlock to begin with.
684 assert(Entry->hasOneUse());
685 auto BranchToEntry = cast<BranchInst>(Entry->user_back());
686 assert(BranchToEntry->isUnconditional());
687 Builder.SetInsertPoint(BranchToEntry);
688 Builder.CreateUnreachable();
689 BranchToEntry->eraseFromParent();
690
691 // Branch from the entry to the appropriate place.
692 Builder.SetInsertPoint(Entry);
693 switch (Shape.ABI) {
694 case coro::ABI::Switch: {
695 // In switch-lowering, we built a resume-entry block in the original
696 // function. Make the entry block branch to this.
697 auto *SwitchBB =
698 cast<BasicBlock>(VMap[Shape.SwitchLowering.ResumeEntryBlock]);
699 Builder.CreateBr(SwitchBB);
700 SwitchBB->moveAfter(Entry);
701 break;
702 }
703 case coro::ABI::Async:
706 // In continuation ABIs, we want to branch to immediately after the
707 // active suspend point. Earlier phases will have put the suspend in its
708 // own basic block, so just thread our jump directly to its successor.
710 isa<CoroSuspendAsyncInst>(ActiveSuspend)) ||
713 isa<CoroSuspendRetconInst>(ActiveSuspend)));
714 auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[ActiveSuspend]);
715 auto Branch = cast<BranchInst>(MappedCS->getNextNode());
716 assert(Branch->isUnconditional());
717 Builder.CreateBr(Branch->getSuccessor(0));
718 break;
719 }
720 }
721
722 // Any static alloca that's still being used but not reachable from the new
723 // entry needs to be moved to the new entry.
724 Function *F = OldEntry->getParent();
725 DominatorTree DT{*F};
727 auto *Alloca = dyn_cast<AllocaInst>(&I);
728 if (!Alloca || I.use_empty())
729 continue;
730 if (DT.isReachableFromEntry(I.getParent()) ||
731 !isa<ConstantInt>(Alloca->getArraySize()))
732 continue;
733 I.moveBefore(*Entry, Entry->getFirstInsertionPt());
734 }
735}
736
737/// Derive the value of the new frame pointer.
739 // Builder should be inserting to the front of the new entry block.
740
741 switch (Shape.ABI) {
742 // In switch-lowering, the argument is the frame pointer.
744 return &*NewF->arg_begin();
745 // In async-lowering, one of the arguments is an async context as determined
746 // by the `llvm.coro.id.async` intrinsic. We can retrieve the async context of
747 // the resume function from the async context projection function associated
748 // with the active suspend. The frame is located as a tail to the async
749 // context header.
750 case coro::ABI::Async: {
751 auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(ActiveSuspend);
752 auto ContextIdx = ActiveAsyncSuspend->getStorageArgumentIndex() & 0xff;
753 auto *CalleeContext = NewF->getArg(ContextIdx);
754 auto *ProjectionFunc =
755 ActiveAsyncSuspend->getAsyncContextProjectionFunction();
756 auto DbgLoc =
757 cast<CoroSuspendAsyncInst>(VMap[ActiveSuspend])->getDebugLoc();
758 // Calling i8* (i8*)
759 auto *CallerContext = Builder.CreateCall(ProjectionFunc->getFunctionType(),
760 ProjectionFunc, CalleeContext);
761 CallerContext->setCallingConv(ProjectionFunc->getCallingConv());
762 CallerContext->setDebugLoc(DbgLoc);
763 // The frame is located after the async_context header.
764 auto &Context = Builder.getContext();
765 auto *FramePtrAddr = Builder.CreateConstInBoundsGEP1_32(
766 Type::getInt8Ty(Context), CallerContext,
767 Shape.AsyncLowering.FrameOffset, "async.ctx.frameptr");
768 // Inline the projection function.
770 auto InlineRes = InlineFunction(*CallerContext, InlineInfo);
771 assert(InlineRes.isSuccess());
772 (void)InlineRes;
773 return FramePtrAddr;
774 }
775 // In continuation-lowering, the argument is the opaque storage.
778 Argument *NewStorage = &*NewF->arg_begin();
779 auto FramePtrTy = PointerType::getUnqual(Shape.FrameTy->getContext());
780
781 // If the storage is inline, just bitcast to the storage to the frame type.
783 return NewStorage;
784
785 // Otherwise, load the real frame from the opaque storage.
786 return Builder.CreateLoad(FramePtrTy, NewStorage);
787 }
788 }
789 llvm_unreachable("bad ABI");
790}
791
792/// Adjust the scope line of the funclet to the first line number after the
793/// suspend point. This avoids a jump in the line table from the function
794/// declaration (where prologue instructions are attributed to) to the suspend
795/// point.
796/// Only adjust the scope line when the files are the same.
797/// If no candidate line number is found, fallback to the line of ActiveSuspend.
798static void updateScopeLine(Instruction *ActiveSuspend,
799 DISubprogram &SPToUpdate) {
800 if (!ActiveSuspend)
801 return;
802
803 // No subsequent instruction -> fallback to the location of ActiveSuspend.
804 if (!ActiveSuspend->getNextNode()) {
805 if (auto DL = ActiveSuspend->getDebugLoc())
806 if (SPToUpdate.getFile() == DL->getFile())
807 SPToUpdate.setScopeLine(DL->getLine());
808 return;
809 }
810
812 // Corosplit splits the BB around ActiveSuspend, so the meaningful
813 // instructions are not in the same BB.
814 if (auto *Branch = dyn_cast_or_null<BranchInst>(Successor);
815 Branch && Branch->isUnconditional())
816 Successor = Branch->getSuccessor(0)->getFirstNonPHIOrDbg();
817
818 // Find the first successor of ActiveSuspend with a non-zero line location.
819 // If that matches the file of ActiveSuspend, use it.
820 BasicBlock *PBB = Successor->getParent();
821 for (; Successor != PBB->end(); Successor = std::next(Successor)) {
823 auto DL = Successor->getDebugLoc();
824 if (!DL || DL.getLine() == 0)
825 continue;
826
827 if (SPToUpdate.getFile() == DL->getFile()) {
828 SPToUpdate.setScopeLine(DL.getLine());
829 return;
830 }
831
832 break;
833 }
834
835 // If the search above failed, fallback to the location of ActiveSuspend.
836 if (auto DL = ActiveSuspend->getDebugLoc())
837 if (SPToUpdate.getFile() == DL->getFile())
838 SPToUpdate.setScopeLine(DL->getLine());
839}
840
841static void addFramePointerAttrs(AttributeList &Attrs, LLVMContext &Context,
842 unsigned ParamIndex, uint64_t Size,
843 Align Alignment, bool NoAlias) {
844 AttrBuilder ParamAttrs(Context);
845 ParamAttrs.addAttribute(Attribute::NonNull);
846 ParamAttrs.addAttribute(Attribute::NoUndef);
847
848 if (NoAlias)
849 ParamAttrs.addAttribute(Attribute::NoAlias);
850
851 ParamAttrs.addAlignmentAttr(Alignment);
852 ParamAttrs.addDereferenceableAttr(Size);
853 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
854}
855
856static void addAsyncContextAttrs(AttributeList &Attrs, LLVMContext &Context,
857 unsigned ParamIndex) {
858 AttrBuilder ParamAttrs(Context);
859 ParamAttrs.addAttribute(Attribute::SwiftAsync);
860 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
861}
862
863static void addSwiftSelfAttrs(AttributeList &Attrs, LLVMContext &Context,
864 unsigned ParamIndex) {
865 AttrBuilder ParamAttrs(Context);
866 ParamAttrs.addAttribute(Attribute::SwiftSelf);
867 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
868}
869
870/// Clone the body of the original function into a resume function of
871/// some sort.
873 assert(NewF);
874
875 // Replace all args with dummy instructions. If an argument is the old frame
876 // pointer, the dummy will be replaced by the new frame pointer once it is
877 // computed below. Uses of all other arguments should have already been
878 // rewritten by buildCoroutineFrame() to use loads/stores on the coroutine
879 // frame.
881 for (Argument &A : OrigF.args()) {
882 DummyArgs.push_back(new FreezeInst(PoisonValue::get(A.getType())));
883 VMap[&A] = DummyArgs.back();
884 }
885
887
888 // Ignore attempts to change certain attributes of the function.
889 // TODO: maybe there should be a way to suppress this during cloning?
890 auto savedVisibility = NewF->getVisibility();
891 auto savedUnnamedAddr = NewF->getUnnamedAddr();
892 auto savedDLLStorageClass = NewF->getDLLStorageClass();
893
894 // NewF's linkage (which CloneFunctionInto does *not* change) might not
895 // be compatible with the visibility of OrigF (which it *does* change),
896 // so protect against that.
897 auto savedLinkage = NewF->getLinkage();
898 NewF->setLinkage(llvm::GlobalValue::ExternalLinkage);
899
900 CloneFunctionInto(NewF, &OrigF, VMap,
902
903 auto &Context = NewF->getContext();
904
905 if (DISubprogram *SP = NewF->getSubprogram()) {
906 assert(SP != OrigF.getSubprogram() && SP->isDistinct());
907 updateScopeLine(ActiveSuspend, *SP);
908
909 // Update the linkage name and the function name to reflect the modified
910 // name.
911 MDString *NewLinkageName = MDString::get(Context, NewF->getName());
912 SP->replaceLinkageName(NewLinkageName);
913 if (DISubprogram *Decl = SP->getDeclaration()) {
914 TempDISubprogram NewDecl = Decl->clone();
915 NewDecl->replaceLinkageName(NewLinkageName);
916 SP->replaceDeclaration(MDNode::replaceWithUniqued(std::move(NewDecl)));
917 }
918 }
919
920 NewF->setLinkage(savedLinkage);
921 NewF->setVisibility(savedVisibility);
922 NewF->setUnnamedAddr(savedUnnamedAddr);
923 NewF->setDLLStorageClass(savedDLLStorageClass);
924 // The function sanitizer metadata needs to match the signature of the
925 // function it is being attached to. However this does not hold for split
926 // functions here. Thus remove the metadata for split functions.
927 if (Shape.ABI == coro::ABI::Switch &&
928 NewF->hasMetadata(LLVMContext::MD_func_sanitize))
929 NewF->eraseMetadata(LLVMContext::MD_func_sanitize);
930
931 // Replace the attributes of the new function:
932 auto OrigAttrs = NewF->getAttributes();
933 auto NewAttrs = AttributeList();
934
935 switch (Shape.ABI) {
937 // Bootstrap attributes by copying function attributes from the
938 // original function. This should include optimization settings and so on.
939 NewAttrs = NewAttrs.addFnAttributes(
940 Context, AttrBuilder(Context, OrigAttrs.getFnAttrs()));
941
943 Shape.FrameAlign, /*NoAlias=*/false);
944 break;
945 case coro::ABI::Async: {
946 auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(ActiveSuspend);
947 if (OrigF.hasParamAttribute(Shape.AsyncLowering.ContextArgNo,
948 Attribute::SwiftAsync)) {
949 uint32_t ArgAttributeIndices =
950 ActiveAsyncSuspend->getStorageArgumentIndex();
951 auto ContextArgIndex = ArgAttributeIndices & 0xff;
952 addAsyncContextAttrs(NewAttrs, Context, ContextArgIndex);
953
954 // `swiftasync` must preceed `swiftself` so 0 is not a valid index for
955 // `swiftself`.
956 auto SwiftSelfIndex = ArgAttributeIndices >> 8;
957 if (SwiftSelfIndex)
958 addSwiftSelfAttrs(NewAttrs, Context, SwiftSelfIndex);
959 }
960
961 // Transfer the original function's attributes.
962 auto FnAttrs = OrigF.getAttributes().getFnAttrs();
963 NewAttrs = NewAttrs.addFnAttributes(Context, AttrBuilder(Context, FnAttrs));
964 break;
965 }
968 // If we have a continuation prototype, just use its attributes,
969 // full-stop.
971
972 /// FIXME: Is it really good to add the NoAlias attribute?
973 addFramePointerAttrs(NewAttrs, Context, 0,
976 /*NoAlias=*/true);
977
978 break;
979 }
980
981 switch (Shape.ABI) {
982 // In these ABIs, the cloned functions always return 'void', and the
983 // existing return sites are meaningless. Note that for unique
984 // continuations, this includes the returns associated with suspends;
985 // this is fine because we can't suspend twice.
988 // Remove old returns.
989 for (ReturnInst *Return : Returns)
990 changeToUnreachable(Return);
991 break;
992
993 // With multi-suspend continuations, we'll already have eliminated the
994 // original returns and inserted returns before all the suspend points,
995 // so we want to leave any returns in place.
997 break;
998 // Async lowering will insert musttail call functions at all suspend points
999 // followed by a return.
1000 // Don't change returns to unreachable because that will trip up the verifier.
1001 // These returns should be unreachable from the clone.
1002 case coro::ABI::Async:
1003 break;
1004 }
1005
1006 NewF->setAttributes(NewAttrs);
1007 NewF->setCallingConv(Shape.getResumeFunctionCC());
1008
1009 // Set up the new entry block.
1010 replaceEntryBlock();
1011
1012 // Turn symmetric transfers into musttail calls.
1013 for (CallInst *ResumeCall : Shape.SymmetricTransfers) {
1014 ResumeCall = cast<CallInst>(VMap[ResumeCall]);
1015 if (TTI.supportsTailCallFor(ResumeCall)) {
1016 // FIXME: Could we support symmetric transfer effectively without
1017 // musttail?
1018 ResumeCall->setTailCallKind(CallInst::TCK_MustTail);
1019 }
1020
1021 // Put a 'ret void' after the call, and split any remaining instructions to
1022 // an unreachable block.
1023 BasicBlock *BB = ResumeCall->getParent();
1024 BB->splitBasicBlock(ResumeCall->getNextNode());
1025 Builder.SetInsertPoint(BB->getTerminator());
1026 Builder.CreateRetVoid();
1028 }
1029
1030 Builder.SetInsertPoint(&NewF->getEntryBlock().front());
1031 NewFramePtr = deriveNewFramePointer();
1032
1033 // Remap frame pointer.
1034 Value *OldFramePtr = VMap[Shape.FramePtr];
1035 NewFramePtr->takeName(OldFramePtr);
1036 OldFramePtr->replaceAllUsesWith(NewFramePtr);
1037
1038 // Remap vFrame pointer.
1039 auto *NewVFrame = Builder.CreateBitCast(
1040 NewFramePtr, PointerType::getUnqual(Builder.getContext()), "vFrame");
1041 Value *OldVFrame = cast<Value>(VMap[Shape.CoroBegin]);
1042 if (OldVFrame != NewVFrame)
1043 OldVFrame->replaceAllUsesWith(NewVFrame);
1044
1045 // All uses of the arguments should have been resolved by this point,
1046 // so we can safely remove the dummy values.
1047 for (Instruction *DummyArg : DummyArgs) {
1048 DummyArg->replaceAllUsesWith(PoisonValue::get(DummyArg->getType()));
1049 DummyArg->deleteValue();
1050 }
1051
1052 switch (Shape.ABI) {
1053 case coro::ABI::Switch:
1054 // Rewrite final suspend handling as it is not done via switch (allows to
1055 // remove final case from the switch, since it is undefined behavior to
1056 // resume the coroutine suspended at the final suspend point.
1058 handleFinalSuspend();
1059 break;
1060 case coro::ABI::Async:
1061 case coro::ABI::Retcon:
1063 // Replace uses of the active suspend with the corresponding
1064 // continuation-function arguments.
1065 assert(ActiveSuspend != nullptr &&
1066 "no active suspend when lowering a continuation-style coroutine");
1067 replaceRetconOrAsyncSuspendUses();
1068 break;
1069 }
1070
1071 // Handle suspends.
1072 replaceCoroSuspends();
1073
1074 // Handle swifterror.
1076
1077 // Remove coro.end intrinsics.
1078 replaceCoroEnds();
1079
1080 // Salvage debug info that points into the coroutine frame.
1082}
1083
1085 // Create a new function matching the original type
1086 NewF = createCloneDeclaration(OrigF, Shape, Suffix, OrigF.getParent()->end(),
1087 ActiveSuspend);
1088
1089 // Clone the function
1091
1092 // Eliminate coro.free from the clones, replacing it with 'null' in cleanup,
1093 // to suppress deallocation code.
1094 coro::replaceCoroFree(cast<CoroIdInst>(VMap[Shape.CoroBegin->getId()]),
1095 /*Elide=*/FKind == coro::CloneKind::SwitchCleanup);
1096}
1097
1099 assert(Shape.ABI == coro::ABI::Async);
1100
1101 auto *FuncPtrStruct = cast<ConstantStruct>(
1103 auto *OrigRelativeFunOffset = FuncPtrStruct->getOperand(0);
1104 auto *OrigContextSize = FuncPtrStruct->getOperand(1);
1105 auto *NewContextSize = ConstantInt::get(OrigContextSize->getType(),
1107 auto *NewFuncPtrStruct = ConstantStruct::get(
1108 FuncPtrStruct->getType(), OrigRelativeFunOffset, NewContextSize);
1109
1110 Shape.AsyncLowering.AsyncFuncPointer->setInitializer(NewFuncPtrStruct);
1111}
1112
1114 // In the same function all coro.sizes should have the same result type.
1115 auto *SizeIntrin = Shape.CoroSizes.back();
1116 Module *M = SizeIntrin->getModule();
1117 const DataLayout &DL = M->getDataLayout();
1118 return DL.getTypeAllocSize(Shape.FrameTy);
1119}
1120
1122 if (Shape.ABI == coro::ABI::Async)
1124
1125 for (CoroAlignInst *CA : Shape.CoroAligns) {
1127 ConstantInt::get(CA->getType(), Shape.FrameAlign.value()));
1128 CA->eraseFromParent();
1129 }
1130
1131 if (Shape.CoroSizes.empty())
1132 return;
1133
1134 // In the same function all coro.sizes should have the same result type.
1135 auto *SizeIntrin = Shape.CoroSizes.back();
1136 auto *SizeConstant =
1137 ConstantInt::get(SizeIntrin->getType(), getFrameSizeForShape(Shape));
1138
1139 for (CoroSizeInst *CS : Shape.CoroSizes) {
1140 CS->replaceAllUsesWith(SizeConstant);
1141 CS->eraseFromParent();
1142 }
1143}
1144
1147
1148#ifndef NDEBUG
1149 // For now, we do a mandatory verification step because we don't
1150 // entirely trust this pass. Note that we don't want to add a verifier
1151 // pass to FPM below because it will also verify all the global data.
1152 if (verifyFunction(F, &errs()))
1153 report_fatal_error("Broken function");
1154#endif
1155}
1156
1157// Coroutine has no suspend points. Remove heap allocation for the coroutine
1158// frame if possible.
1160 auto *CoroBegin = Shape.CoroBegin;
1161 switch (Shape.ABI) {
1162 case coro::ABI::Switch: {
1163 auto SwitchId = Shape.getSwitchCoroId();
1164 auto *AllocInst = SwitchId->getCoroAlloc();
1165 coro::replaceCoroFree(SwitchId, /*Elide=*/AllocInst != nullptr);
1166 if (AllocInst) {
1167 IRBuilder<> Builder(AllocInst);
1168 auto *Frame = Builder.CreateAlloca(Shape.FrameTy);
1169 Frame->setAlignment(Shape.FrameAlign);
1170 AllocInst->replaceAllUsesWith(Builder.getFalse());
1171 AllocInst->eraseFromParent();
1172 CoroBegin->replaceAllUsesWith(Frame);
1173 } else {
1174 CoroBegin->replaceAllUsesWith(CoroBegin->getMem());
1175 }
1176
1177 break;
1178 }
1179 case coro::ABI::Async:
1180 case coro::ABI::Retcon:
1182 CoroBegin->replaceAllUsesWith(PoisonValue::get(CoroBegin->getType()));
1183 break;
1184 }
1185
1186 CoroBegin->eraseFromParent();
1187 Shape.CoroBegin = nullptr;
1188}
1189
1190// SimplifySuspendPoint needs to check that there is no calls between
1191// coro_save and coro_suspend, since any of the calls may potentially resume
1192// the coroutine and if that is the case we cannot eliminate the suspend point.
1194 for (Instruction &I : R) {
1195 // Assume that no intrinsic can resume the coroutine.
1196 if (isa<IntrinsicInst>(I))
1197 continue;
1198
1199 if (isa<CallBase>(I))
1200 return true;
1201 }
1202 return false;
1203}
1204
1205static bool hasCallsInBlocksBetween(BasicBlock *SaveBB, BasicBlock *ResDesBB) {
1208
1209 Set.insert(SaveBB);
1210 Worklist.push_back(ResDesBB);
1211
1212 // Accumulate all blocks between SaveBB and ResDesBB. Because CoroSaveIntr
1213 // returns a token consumed by suspend instruction, all blocks in between
1214 // will have to eventually hit SaveBB when going backwards from ResDesBB.
1215 while (!Worklist.empty()) {
1216 auto *BB = Worklist.pop_back_val();
1217 Set.insert(BB);
1218 for (auto *Pred : predecessors(BB))
1219 if (!Set.contains(Pred))
1220 Worklist.push_back(Pred);
1221 }
1222
1223 // SaveBB and ResDesBB are checked separately in hasCallsBetween.
1224 Set.erase(SaveBB);
1225 Set.erase(ResDesBB);
1226
1227 for (auto *BB : Set)
1228 if (hasCallsInBlockBetween({BB->getFirstNonPHIIt(), BB->end()}))
1229 return true;
1230
1231 return false;
1232}
1233
1234static bool hasCallsBetween(Instruction *Save, Instruction *ResumeOrDestroy) {
1235 auto *SaveBB = Save->getParent();
1236 auto *ResumeOrDestroyBB = ResumeOrDestroy->getParent();
1237 BasicBlock::iterator SaveIt = Save->getIterator();
1238 BasicBlock::iterator ResumeOrDestroyIt = ResumeOrDestroy->getIterator();
1239
1240 if (SaveBB == ResumeOrDestroyBB)
1241 return hasCallsInBlockBetween({std::next(SaveIt), ResumeOrDestroyIt});
1242
1243 // Any calls from Save to the end of the block?
1244 if (hasCallsInBlockBetween({std::next(SaveIt), SaveBB->end()}))
1245 return true;
1246
1247 // Any calls from begging of the block up to ResumeOrDestroy?
1249 {ResumeOrDestroyBB->getFirstNonPHIIt(), ResumeOrDestroyIt}))
1250 return true;
1251
1252 // Any calls in all of the blocks between SaveBB and ResumeOrDestroyBB?
1253 if (hasCallsInBlocksBetween(SaveBB, ResumeOrDestroyBB))
1254 return true;
1255
1256 return false;
1257}
1258
1259// If a SuspendIntrin is preceded by Resume or Destroy, we can eliminate the
1260// suspend point and replace it with nornal control flow.
1262 CoroBeginInst *CoroBegin) {
1263 Instruction *Prev = Suspend->getPrevNode();
1264 if (!Prev) {
1265 auto *Pred = Suspend->getParent()->getSinglePredecessor();
1266 if (!Pred)
1267 return false;
1268 Prev = Pred->getTerminator();
1269 }
1270
1271 CallBase *CB = dyn_cast<CallBase>(Prev);
1272 if (!CB)
1273 return false;
1274
1275 auto *Callee = CB->getCalledOperand()->stripPointerCasts();
1276
1277 // See if the callsite is for resumption or destruction of the coroutine.
1278 auto *SubFn = dyn_cast<CoroSubFnInst>(Callee);
1279 if (!SubFn)
1280 return false;
1281
1282 // Does not refer to the current coroutine, we cannot do anything with it.
1283 if (SubFn->getFrame() != CoroBegin)
1284 return false;
1285
1286 // See if the transformation is safe. Specifically, see if there are any
1287 // calls in between Save and CallInstr. They can potenitally resume the
1288 // coroutine rendering this optimization unsafe.
1289 auto *Save = Suspend->getCoroSave();
1290 if (hasCallsBetween(Save, CB))
1291 return false;
1292
1293 // Replace llvm.coro.suspend with the value that results in resumption over
1294 // the resume or cleanup path.
1295 Suspend->replaceAllUsesWith(SubFn->getRawIndex());
1296 Suspend->eraseFromParent();
1297 Save->eraseFromParent();
1298
1299 // No longer need a call to coro.resume or coro.destroy.
1300 if (auto *Invoke = dyn_cast<InvokeInst>(CB)) {
1301 BranchInst::Create(Invoke->getNormalDest(), Invoke->getIterator());
1302 }
1303
1304 // Grab the CalledValue from CB before erasing the CallInstr.
1305 auto *CalledValue = CB->getCalledOperand();
1306 CB->eraseFromParent();
1307
1308 // If no more users remove it. Usually it is a bitcast of SubFn.
1309 if (CalledValue != SubFn && CalledValue->user_empty())
1310 if (auto *I = dyn_cast<Instruction>(CalledValue))
1311 I->eraseFromParent();
1312
1313 // Now we are good to remove SubFn.
1314 if (SubFn->user_empty())
1315 SubFn->eraseFromParent();
1316
1317 return true;
1318}
1319
1320// Remove suspend points that are simplified.
1322 // Currently, the only simplification we do is switch-lowering-specific.
1323 if (Shape.ABI != coro::ABI::Switch)
1324 return;
1325
1326 auto &S = Shape.CoroSuspends;
1327 size_t I = 0, N = S.size();
1328 if (N == 0)
1329 return;
1330
1331 size_t ChangedFinalIndex = std::numeric_limits<size_t>::max();
1332 while (true) {
1333 auto SI = cast<CoroSuspendInst>(S[I]);
1334 // Leave final.suspend to handleFinalSuspend since it is undefined behavior
1335 // to resume a coroutine suspended at the final suspend point.
1336 if (!SI->isFinal() && simplifySuspendPoint(SI, Shape.CoroBegin)) {
1337 if (--N == I)
1338 break;
1339
1340 std::swap(S[I], S[N]);
1341
1342 if (cast<CoroSuspendInst>(S[I])->isFinal()) {
1344 ChangedFinalIndex = I;
1345 }
1346
1347 continue;
1348 }
1349 if (++I == N)
1350 break;
1351 }
1352 S.resize(N);
1353
1354 // Maintain final.suspend in case final suspend was swapped.
1355 // Due to we requrie the final suspend to be the last element of CoroSuspends.
1356 if (ChangedFinalIndex < N) {
1357 assert(cast<CoroSuspendInst>(S[ChangedFinalIndex])->isFinal());
1358 std::swap(S[ChangedFinalIndex], S.back());
1359 }
1360}
1361
1362namespace {
1363
1364struct SwitchCoroutineSplitter {
1365 static void split(Function &F, coro::Shape &Shape,
1368 assert(Shape.ABI == coro::ABI::Switch);
1369
1370 // Create a resume clone by cloning the body of the original function,
1371 // setting new entry block and replacing coro.suspend an appropriate value
1372 // to force resume or cleanup pass for every suspend point.
1373 createResumeEntryBlock(F, Shape);
1374 auto *ResumeClone = coro::SwitchCloner::createClone(
1375 F, ".resume", Shape, coro::CloneKind::SwitchResume, TTI);
1376 auto *DestroyClone = coro::SwitchCloner::createClone(
1377 F, ".destroy", Shape, coro::CloneKind::SwitchUnwind, TTI);
1378 auto *CleanupClone = coro::SwitchCloner::createClone(
1379 F, ".cleanup", Shape, coro::CloneKind::SwitchCleanup, TTI);
1380
1381 postSplitCleanup(*ResumeClone);
1382 postSplitCleanup(*DestroyClone);
1383 postSplitCleanup(*CleanupClone);
1384
1385 // Store addresses resume/destroy/cleanup functions in the coroutine frame.
1386 updateCoroFrame(Shape, ResumeClone, DestroyClone, CleanupClone);
1387
1388 assert(Clones.empty());
1389 Clones.push_back(ResumeClone);
1390 Clones.push_back(DestroyClone);
1391 Clones.push_back(CleanupClone);
1392
1393 // Create a constant array referring to resume/destroy/clone functions
1394 // pointed by the last argument of @llvm.coro.info, so that CoroElide pass
1395 // can determined correct function to call.
1396 setCoroInfo(F, Shape, Clones);
1397 }
1398
1399 // Create a variant of ramp function that does not perform heap allocation
1400 // for a switch ABI coroutine.
1401 //
1402 // The newly split `.noalloc` ramp function has the following differences:
1403 // - Has one additional frame pointer parameter in lieu of dynamic
1404 // allocation.
1405 // - Suppressed allocations by replacing coro.alloc and coro.free.
1406 static Function *createNoAllocVariant(Function &F, coro::Shape &Shape,
1408 assert(Shape.ABI == coro::ABI::Switch);
1409 auto *OrigFnTy = F.getFunctionType();
1410 auto OldParams = OrigFnTy->params();
1411
1412 SmallVector<Type *> NewParams;
1413 NewParams.reserve(OldParams.size() + 1);
1414 NewParams.append(OldParams.begin(), OldParams.end());
1415 NewParams.push_back(PointerType::getUnqual(Shape.FrameTy->getContext()));
1416
1417 auto *NewFnTy = FunctionType::get(OrigFnTy->getReturnType(), NewParams,
1418 OrigFnTy->isVarArg());
1419 Function *NoAllocF =
1420 Function::Create(NewFnTy, F.getLinkage(), F.getName() + ".noalloc");
1421
1422 ValueToValueMapTy VMap;
1423 unsigned int Idx = 0;
1424 for (const auto &I : F.args()) {
1425 VMap[&I] = NoAllocF->getArg(Idx++);
1426 }
1427 // We just appended the frame pointer as the last argument of the new
1428 // function.
1429 auto FrameIdx = NoAllocF->arg_size() - 1;
1431 CloneFunctionInto(NoAllocF, &F, VMap,
1432 CloneFunctionChangeType::LocalChangesOnly, Returns);
1433
1434 if (Shape.CoroBegin) {
1435 auto *NewCoroBegin =
1436 cast_if_present<CoroBeginInst>(VMap[Shape.CoroBegin]);
1437 auto *NewCoroId = cast<CoroIdInst>(NewCoroBegin->getId());
1438 coro::replaceCoroFree(NewCoroId, /*Elide=*/true);
1439 coro::suppressCoroAllocs(NewCoroId);
1440 NewCoroBegin->replaceAllUsesWith(NoAllocF->getArg(FrameIdx));
1441 NewCoroBegin->eraseFromParent();
1442 }
1443
1444 Module *M = F.getParent();
1445 M->getFunctionList().insert(M->end(), NoAllocF);
1446
1447 removeUnreachableBlocks(*NoAllocF);
1448 auto NewAttrs = NoAllocF->getAttributes();
1449 // When we elide allocation, we read these attributes to determine the
1450 // frame size and alignment.
1451 addFramePointerAttrs(NewAttrs, NoAllocF->getContext(), FrameIdx,
1452 Shape.FrameSize, Shape.FrameAlign,
1453 /*NoAlias=*/false);
1454
1455 NoAllocF->setAttributes(NewAttrs);
1456
1457 Clones.push_back(NoAllocF);
1458 // Reset the original function's coro info, make the new noalloc variant
1459 // connected to the original ramp function.
1460 setCoroInfo(F, Shape, Clones);
1461 // After copying, set the linkage to internal linkage. Original function
1462 // may have different linkage, but optimization dependent on this function
1463 // generally relies on LTO.
1465 return NoAllocF;
1466 }
1467
1468private:
1469 // Create an entry block for a resume function with a switch that will jump to
1470 // suspend points.
1471 static void createResumeEntryBlock(Function &F, coro::Shape &Shape) {
1472 LLVMContext &C = F.getContext();
1473
1474 DIBuilder DBuilder(*F.getParent(), /*AllowUnresolved*/ false);
1475 DISubprogram *DIS = F.getSubprogram();
1476 // If there is no DISubprogram for F, it implies the function is compiled
1477 // without debug info. So we also don't generate debug info for the
1478 // suspension points.
1479 bool AddDebugLabels = DIS && DIS->getUnit() &&
1480 (DIS->getUnit()->getEmissionKind() ==
1481 DICompileUnit::DebugEmissionKind::FullDebug);
1482
1483 // resume.entry:
1484 // %index.addr = getelementptr inbounds %f.Frame, %f.Frame* %FramePtr, i32
1485 // 0, i32 2 % index = load i32, i32* %index.addr switch i32 %index, label
1486 // %unreachable [
1487 // i32 0, label %resume.0
1488 // i32 1, label %resume.1
1489 // ...
1490 // ]
1491
1492 auto *NewEntry = BasicBlock::Create(C, "resume.entry", &F);
1493 auto *UnreachBB = BasicBlock::Create(C, "unreachable", &F);
1494
1495 IRBuilder<> Builder(NewEntry);
1496 auto *FramePtr = Shape.FramePtr;
1497 auto *FrameTy = Shape.FrameTy;
1498 auto *GepIndex = Builder.CreateStructGEP(
1499 FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr");
1500 auto *Index = Builder.CreateLoad(Shape.getIndexType(), GepIndex, "index");
1501 auto *Switch =
1502 Builder.CreateSwitch(Index, UnreachBB, Shape.CoroSuspends.size());
1504
1505 // Split all coro.suspend calls
1506 size_t SuspendIndex = 0;
1507 for (auto *AnyS : Shape.CoroSuspends) {
1508 auto *S = cast<CoroSuspendInst>(AnyS);
1509 ConstantInt *IndexVal = Shape.getIndex(SuspendIndex);
1510
1511 // Replace CoroSave with a store to Index:
1512 // %index.addr = getelementptr %f.frame... (index field number)
1513 // store i32 %IndexVal, i32* %index.addr1
1514 auto *Save = S->getCoroSave();
1515 Builder.SetInsertPoint(Save);
1516 if (S->isFinal()) {
1517 // The coroutine should be marked done if it reaches the final suspend
1518 // point.
1519 markCoroutineAsDone(Builder, Shape, FramePtr);
1520 } else {
1521 auto *GepIndex = Builder.CreateStructGEP(
1522 FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr");
1523 Builder.CreateStore(IndexVal, GepIndex);
1524 }
1525
1526 Save->replaceAllUsesWith(ConstantTokenNone::get(C));
1527 Save->eraseFromParent();
1528
1529 // Split block before and after coro.suspend and add a jump from an entry
1530 // switch:
1531 //
1532 // whateverBB:
1533 // whatever
1534 // %0 = call i8 @llvm.coro.suspend(token none, i1 false)
1535 // switch i8 %0, label %suspend[i8 0, label %resume
1536 // i8 1, label %cleanup]
1537 // becomes:
1538 //
1539 // whateverBB:
1540 // whatever
1541 // br label %resume.0.landing
1542 //
1543 // resume.0: ; <--- jump from the switch in the resume.entry
1544 // #dbg_label(...) ; <--- artificial label for debuggers
1545 // %0 = tail call i8 @llvm.coro.suspend(token none, i1 false)
1546 // br label %resume.0.landing
1547 //
1548 // resume.0.landing:
1549 // %1 = phi i8[-1, %whateverBB], [%0, %resume.0]
1550 // switch i8 % 1, label %suspend [i8 0, label %resume
1551 // i8 1, label %cleanup]
1552
1553 auto *SuspendBB = S->getParent();
1554 auto *ResumeBB =
1555 SuspendBB->splitBasicBlock(S, "resume." + Twine(SuspendIndex));
1556 auto *LandingBB = ResumeBB->splitBasicBlock(
1557 S->getNextNode(), ResumeBB->getName() + Twine(".landing"));
1558 Switch->addCase(IndexVal, ResumeBB);
1559
1560 cast<BranchInst>(SuspendBB->getTerminator())->setSuccessor(0, LandingBB);
1561 auto *PN = PHINode::Create(Builder.getInt8Ty(), 2, "");
1562 PN->insertBefore(LandingBB->begin());
1563 S->replaceAllUsesWith(PN);
1564 PN->addIncoming(Builder.getInt8(-1), SuspendBB);
1565 PN->addIncoming(S, ResumeBB);
1566
1567 if (AddDebugLabels) {
1568 if (DebugLoc SuspendLoc = S->getDebugLoc()) {
1569 std::string LabelName =
1570 ("__coro_resume_" + Twine(SuspendIndex)).str();
1571 DILocation &DILoc = *SuspendLoc;
1572 DILabel *ResumeLabel =
1573 DBuilder.createLabel(DIS, LabelName, DILoc.getFile(),
1574 SuspendLoc.getLine(), SuspendLoc.getCol(),
1575 /*IsArtificial=*/true,
1576 /*CoroSuspendIdx=*/SuspendIndex,
1577 /*AlwaysPreserve=*/false);
1578 DBuilder.insertLabel(ResumeLabel, &DILoc, ResumeBB->begin());
1579 }
1580 }
1581
1582 ++SuspendIndex;
1583 }
1584
1585 Builder.SetInsertPoint(UnreachBB);
1586 Builder.CreateUnreachable();
1587 DBuilder.finalize();
1588
1589 Shape.SwitchLowering.ResumeEntryBlock = NewEntry;
1590 }
1591
1592 // Store addresses of Resume/Destroy/Cleanup functions in the coroutine frame.
1593 static void updateCoroFrame(coro::Shape &Shape, Function *ResumeFn,
1594 Function *DestroyFn, Function *CleanupFn) {
1595 IRBuilder<> Builder(&*Shape.getInsertPtAfterFramePtr());
1596
1597 auto *ResumeAddr = Builder.CreateStructGEP(
1599 "resume.addr");
1600 Builder.CreateStore(ResumeFn, ResumeAddr);
1601
1602 Value *DestroyOrCleanupFn = DestroyFn;
1603
1604 CoroIdInst *CoroId = Shape.getSwitchCoroId();
1605 if (CoroAllocInst *CA = CoroId->getCoroAlloc()) {
1606 // If there is a CoroAlloc and it returns false (meaning we elide the
1607 // allocation, use CleanupFn instead of DestroyFn).
1608 DestroyOrCleanupFn = Builder.CreateSelect(CA, DestroyFn, CleanupFn);
1609 }
1610
1611 auto *DestroyAddr = Builder.CreateStructGEP(
1613 "destroy.addr");
1614 Builder.CreateStore(DestroyOrCleanupFn, DestroyAddr);
1615 }
1616
1617 // Create a global constant array containing pointers to functions provided
1618 // and set Info parameter of CoroBegin to point at this constant. Example:
1619 //
1620 // @f.resumers = internal constant [2 x void(%f.frame*)*]
1621 // [void(%f.frame*)* @f.resume, void(%f.frame*)*
1622 // @f.destroy]
1623 // define void @f() {
1624 // ...
1625 // call i8* @llvm.coro.begin(i8* null, i32 0, i8* null,
1626 // i8* bitcast([2 x void(%f.frame*)*] * @f.resumers to
1627 // i8*))
1628 //
1629 // Assumes that all the functions have the same signature.
1630 static void setCoroInfo(Function &F, coro::Shape &Shape,
1632 // This only works under the switch-lowering ABI because coro elision
1633 // only works on the switch-lowering ABI.
1635 assert(!Args.empty());
1636 Function *Part = *Fns.begin();
1637 Module *M = Part->getParent();
1638 auto *ArrTy = ArrayType::get(Part->getType(), Args.size());
1639
1640 auto *ConstVal = ConstantArray::get(ArrTy, Args);
1641 auto *GV = new GlobalVariable(*M, ConstVal->getType(), /*isConstant=*/true,
1642 GlobalVariable::PrivateLinkage, ConstVal,
1643 F.getName() + Twine(".resumers"));
1644
1645 // Update coro.begin instruction to refer to this constant.
1646 LLVMContext &C = F.getContext();
1647 auto *BC = ConstantExpr::getPointerCast(GV, PointerType::getUnqual(C));
1648 Shape.getSwitchCoroId()->setInfo(BC);
1649 }
1650};
1651
1652} // namespace
1653
1656 auto *ResumeIntrinsic = Suspend->getResumeFunction();
1657 auto &Context = Suspend->getParent()->getParent()->getContext();
1658 auto *Int8PtrTy = PointerType::getUnqual(Context);
1659
1660 IRBuilder<> Builder(ResumeIntrinsic);
1661 auto *Val = Builder.CreateBitOrPointerCast(Continuation, Int8PtrTy);
1662 ResumeIntrinsic->replaceAllUsesWith(Val);
1663 ResumeIntrinsic->eraseFromParent();
1665 PoisonValue::get(Int8PtrTy));
1666}
1667
1668/// Coerce the arguments in \p FnArgs according to \p FnTy in \p CallArgs.
1669static void coerceArguments(IRBuilder<> &Builder, FunctionType *FnTy,
1670 ArrayRef<Value *> FnArgs,
1671 SmallVectorImpl<Value *> &CallArgs) {
1672 size_t ArgIdx = 0;
1673 for (auto *paramTy : FnTy->params()) {
1674 assert(ArgIdx < FnArgs.size());
1675 if (paramTy != FnArgs[ArgIdx]->getType())
1676 CallArgs.push_back(
1677 Builder.CreateBitOrPointerCast(FnArgs[ArgIdx], paramTy));
1678 else
1679 CallArgs.push_back(FnArgs[ArgIdx]);
1680 ++ArgIdx;
1681 }
1682}
1683
1687 IRBuilder<> &Builder) {
1688 auto *FnTy = MustTailCallFn->getFunctionType();
1689 // Coerce the arguments, llvm optimizations seem to ignore the types in
1690 // vaarg functions and throws away casts in optimized mode.
1691 SmallVector<Value *, 8> CallArgs;
1692 coerceArguments(Builder, FnTy, Arguments, CallArgs);
1693
1694 auto *TailCall = Builder.CreateCall(FnTy, MustTailCallFn, CallArgs);
1695 // Skip targets which don't support tail call.
1696 if (TTI.supportsTailCallFor(TailCall)) {
1697 TailCall->setTailCallKind(CallInst::TCK_MustTail);
1698 }
1699 TailCall->setDebugLoc(Loc);
1700 TailCall->setCallingConv(MustTailCallFn->getCallingConv());
1701 return TailCall;
1702}
1703
1708 assert(Clones.empty());
1709 // Reset various things that the optimizer might have decided it
1710 // "knows" about the coroutine function due to not seeing a return.
1711 F.removeFnAttr(Attribute::NoReturn);
1712 F.removeRetAttr(Attribute::NoAlias);
1713 F.removeRetAttr(Attribute::NonNull);
1714
1715 auto &Context = F.getContext();
1716 auto *Int8PtrTy = PointerType::getUnqual(Context);
1717
1718 auto *Id = Shape.getAsyncCoroId();
1719 IRBuilder<> Builder(Id);
1720
1721 auto *FramePtr = Id->getStorage();
1722 FramePtr = Builder.CreateBitOrPointerCast(FramePtr, Int8PtrTy);
1725 "async.ctx.frameptr");
1726
1727 // Map all uses of llvm.coro.begin to the allocated frame pointer.
1728 {
1729 // Make sure we don't invalidate Shape.FramePtr.
1732 Shape.FramePtr = Handle.getValPtr();
1733 }
1734
1735 // Create all the functions in order after the main function.
1736 auto NextF = std::next(F.getIterator());
1737
1738 // Create a continuation function for each of the suspend points.
1739 Clones.reserve(Shape.CoroSuspends.size());
1740 for (auto [Idx, CS] : llvm::enumerate(Shape.CoroSuspends)) {
1741 auto *Suspend = cast<CoroSuspendAsyncInst>(CS);
1742
1743 // Create the clone declaration.
1744 auto ResumeNameSuffix = ".resume.";
1745 auto ProjectionFunctionName =
1746 Suspend->getAsyncContextProjectionFunction()->getName();
1747 bool UseSwiftMangling = false;
1748 if (ProjectionFunctionName == "__swift_async_resume_project_context") {
1749 ResumeNameSuffix = "TQ";
1750 UseSwiftMangling = true;
1751 } else if (ProjectionFunctionName == "__swift_async_resume_get_context") {
1752 ResumeNameSuffix = "TY";
1753 UseSwiftMangling = true;
1754 }
1756 F, Shape,
1757 UseSwiftMangling ? ResumeNameSuffix + Twine(Idx) + "_"
1758 : ResumeNameSuffix + Twine(Idx),
1759 NextF, Suspend);
1760 Clones.push_back(Continuation);
1761
1762 // Insert a branch to a new return block immediately before the suspend
1763 // point.
1764 auto *SuspendBB = Suspend->getParent();
1765 auto *NewSuspendBB = SuspendBB->splitBasicBlock(Suspend);
1766 auto *Branch = cast<BranchInst>(SuspendBB->getTerminator());
1767
1768 // Place it before the first suspend.
1769 auto *ReturnBB =
1770 BasicBlock::Create(F.getContext(), "coro.return", &F, NewSuspendBB);
1771 Branch->setSuccessor(0, ReturnBB);
1772
1773 IRBuilder<> Builder(ReturnBB);
1774
1775 // Insert the call to the tail call function and inline it.
1776 auto *Fn = Suspend->getMustTailCallFunction();
1777 SmallVector<Value *, 8> Args(Suspend->args());
1778 auto FnArgs = ArrayRef<Value *>(Args).drop_front(
1780 auto *TailCall = coro::createMustTailCall(Suspend->getDebugLoc(), Fn, TTI,
1781 FnArgs, Builder);
1782 Builder.CreateRetVoid();
1783 InlineFunctionInfo FnInfo;
1784 (void)InlineFunction(*TailCall, FnInfo);
1785
1786 // Replace the lvm.coro.async.resume intrisic call.
1788 }
1789
1790 assert(Clones.size() == Shape.CoroSuspends.size());
1791
1792 for (auto [Idx, CS] : llvm::enumerate(Shape.CoroSuspends)) {
1793 auto *Suspend = CS;
1794 auto *Clone = Clones[Idx];
1795
1796 coro::BaseCloner::createClone(F, "resume." + Twine(Idx), Shape, Clone,
1797 Suspend, TTI);
1798 }
1799}
1800
1805 assert(Clones.empty());
1806
1807 // Reset various things that the optimizer might have decided it
1808 // "knows" about the coroutine function due to not seeing a return.
1809 F.removeFnAttr(Attribute::NoReturn);
1810 F.removeRetAttr(Attribute::NoAlias);
1811 F.removeRetAttr(Attribute::NonNull);
1812
1813 // Allocate the frame.
1814 auto *Id = Shape.getRetconCoroId();
1815 Value *RawFramePtr;
1817 RawFramePtr = Id->getStorage();
1818 } else {
1819 IRBuilder<> Builder(Id);
1820
1821 // Determine the size of the frame.
1822 const DataLayout &DL = F.getDataLayout();
1823 auto Size = DL.getTypeAllocSize(Shape.FrameTy);
1824
1825 // Allocate. We don't need to update the call graph node because we're
1826 // going to recompute it from scratch after splitting.
1827 // FIXME: pass the required alignment
1828 RawFramePtr = Shape.emitAlloc(Builder, Builder.getInt64(Size), nullptr);
1829 RawFramePtr =
1830 Builder.CreateBitCast(RawFramePtr, Shape.CoroBegin->getType());
1831
1832 // Stash the allocated frame pointer in the continuation storage.
1833 Builder.CreateStore(RawFramePtr, Id->getStorage());
1834 }
1835
1836 // Map all uses of llvm.coro.begin to the allocated frame pointer.
1837 {
1838 // Make sure we don't invalidate Shape.FramePtr.
1840 Shape.CoroBegin->replaceAllUsesWith(RawFramePtr);
1841 Shape.FramePtr = Handle.getValPtr();
1842 }
1843
1844 // Create a unique return block.
1845 BasicBlock *ReturnBB = nullptr;
1846 PHINode *ContinuationPhi = nullptr;
1847 SmallVector<PHINode *, 4> ReturnPHIs;
1848
1849 // Create all the functions in order after the main function.
1850 auto NextF = std::next(F.getIterator());
1851
1852 // Create a continuation function for each of the suspend points.
1853 Clones.reserve(Shape.CoroSuspends.size());
1854 for (auto [Idx, CS] : llvm::enumerate(Shape.CoroSuspends)) {
1855 auto Suspend = cast<CoroSuspendRetconInst>(CS);
1856
1857 // Create the clone declaration.
1859 F, Shape, ".resume." + Twine(Idx), NextF, nullptr);
1860 Clones.push_back(Continuation);
1861
1862 // Insert a branch to the unified return block immediately before
1863 // the suspend point.
1864 auto SuspendBB = Suspend->getParent();
1865 auto NewSuspendBB = SuspendBB->splitBasicBlock(Suspend);
1866 auto Branch = cast<BranchInst>(SuspendBB->getTerminator());
1867
1868 // Create the unified return block.
1869 if (!ReturnBB) {
1870 // Place it before the first suspend.
1871 ReturnBB =
1872 BasicBlock::Create(F.getContext(), "coro.return", &F, NewSuspendBB);
1873 Shape.RetconLowering.ReturnBlock = ReturnBB;
1874
1875 IRBuilder<> Builder(ReturnBB);
1876
1877 // First, the continuation.
1878 ContinuationPhi =
1879 Builder.CreatePHI(Continuation->getType(), Shape.CoroSuspends.size());
1880
1881 // Create PHIs for all other return values.
1882 assert(ReturnPHIs.empty());
1883
1884 // Next, all the directly-yielded values.
1885 for (auto *ResultTy : Shape.getRetconResultTypes())
1886 ReturnPHIs.push_back(
1887 Builder.CreatePHI(ResultTy, Shape.CoroSuspends.size()));
1888
1889 // Build the return value.
1890 auto RetTy = F.getReturnType();
1891
1892 // Cast the continuation value if necessary.
1893 // We can't rely on the types matching up because that type would
1894 // have to be infinite.
1895 auto CastedContinuationTy =
1896 (ReturnPHIs.empty() ? RetTy : RetTy->getStructElementType(0));
1897 auto *CastedContinuation =
1898 Builder.CreateBitCast(ContinuationPhi, CastedContinuationTy);
1899
1900 Value *RetV = CastedContinuation;
1901 if (!ReturnPHIs.empty()) {
1902 auto ValueIdx = 0;
1903 RetV = PoisonValue::get(RetTy);
1904 RetV = Builder.CreateInsertValue(RetV, CastedContinuation, ValueIdx++);
1905
1906 for (auto Phi : ReturnPHIs)
1907 RetV = Builder.CreateInsertValue(RetV, Phi, ValueIdx++);
1908 }
1909
1910 Builder.CreateRet(RetV);
1911 }
1912
1913 // Branch to the return block.
1914 Branch->setSuccessor(0, ReturnBB);
1915 assert(ContinuationPhi);
1916 ContinuationPhi->addIncoming(Continuation, SuspendBB);
1917 for (auto [Phi, VUse] :
1918 llvm::zip_equal(ReturnPHIs, Suspend->value_operands()))
1919 Phi->addIncoming(VUse, SuspendBB);
1920 }
1921
1922 assert(Clones.size() == Shape.CoroSuspends.size());
1923
1924 for (auto [Idx, CS] : llvm::enumerate(Shape.CoroSuspends)) {
1925 auto Suspend = CS;
1926 auto Clone = Clones[Idx];
1927
1928 coro::BaseCloner::createClone(F, "resume." + Twine(Idx), Shape, Clone,
1929 Suspend, TTI);
1930 }
1931}
1932
1933namespace {
1934class PrettyStackTraceFunction : public PrettyStackTraceEntry {
1935 Function &F;
1936
1937public:
1938 PrettyStackTraceFunction(Function &F) : F(F) {}
1939 void print(raw_ostream &OS) const override {
1940 OS << "While splitting coroutine ";
1941 F.printAsOperand(OS, /*print type*/ false, F.getParent());
1942 OS << "\n";
1943 }
1944};
1945} // namespace
1946
1947/// Remove calls to llvm.coro.end in the original function.
1949 if (Shape.ABI != coro::ABI::Switch) {
1950 for (auto *End : Shape.CoroEnds) {
1951 replaceCoroEnd(End, Shape, Shape.FramePtr, /*in resume*/ false, nullptr);
1952 }
1953 } else {
1954 for (llvm::AnyCoroEndInst *End : Shape.CoroEnds) {
1955 auto &Context = End->getContext();
1956 End->replaceAllUsesWith(ConstantInt::getFalse(Context));
1957 End->eraseFromParent();
1958 }
1959 }
1960}
1961
1963 for (auto *U : F.users()) {
1964 if (auto *CB = dyn_cast<CallBase>(U)) {
1965 auto *Caller = CB->getFunction();
1966 if (Caller && Caller->isPresplitCoroutine() &&
1967 CB->hasFnAttr(llvm::Attribute::CoroElideSafe))
1968 return true;
1969 }
1970 }
1971 return false;
1972}
1973
1977 SwitchCoroutineSplitter::split(F, Shape, Clones, TTI);
1978}
1979
1982 bool OptimizeFrame) {
1983 PrettyStackTraceFunction prettyStackTrace(F);
1984
1985 auto &Shape = ABI.Shape;
1986 assert(Shape.CoroBegin);
1987
1988 lowerAwaitSuspends(F, Shape);
1989
1990 simplifySuspendPoints(Shape);
1991
1992 normalizeCoroutine(F, Shape, TTI);
1993 ABI.buildCoroutineFrame(OptimizeFrame);
1995
1996 bool isNoSuspendCoroutine = Shape.CoroSuspends.empty();
1997
1998 bool shouldCreateNoAllocVariant =
1999 !isNoSuspendCoroutine && Shape.ABI == coro::ABI::Switch &&
2000 hasSafeElideCaller(F) && !F.hasFnAttribute(llvm::Attribute::NoInline);
2001
2002 // If there are no suspend points, no split required, just remove
2003 // the allocation and deallocation blocks, they are not needed.
2004 if (isNoSuspendCoroutine) {
2006 } else {
2007 ABI.splitCoroutine(F, Shape, Clones, TTI);
2008 }
2009
2010 // Replace all the swifterror operations in the original function.
2011 // This invalidates SwiftErrorOps in the Shape.
2012 replaceSwiftErrorOps(F, Shape, nullptr);
2013
2014 // Salvage debug intrinsics that point into the coroutine frame in the
2015 // original function. The Cloner has already salvaged debug info in the new
2016 // coroutine funclets.
2018 auto DbgVariableRecords = collectDbgVariableRecords(F);
2019 for (DbgVariableRecord *DVR : DbgVariableRecords)
2020 coro::salvageDebugInfo(ArgToAllocaMap, *DVR, false /*UseEntryValue*/);
2021
2023
2024 if (shouldCreateNoAllocVariant)
2025 SwitchCoroutineSplitter::createNoAllocVariant(F, Shape, Clones);
2026}
2027
2029 LazyCallGraph::Node &N, const coro::Shape &Shape,
2033
2034 auto *CurrentSCC = &C;
2035 if (!Clones.empty()) {
2036 switch (Shape.ABI) {
2037 case coro::ABI::Switch:
2038 // Each clone in the Switch lowering is independent of the other clones.
2039 // Let the LazyCallGraph know about each one separately.
2040 for (Function *Clone : Clones)
2041 CG.addSplitFunction(N.getFunction(), *Clone);
2042 break;
2043 case coro::ABI::Async:
2044 case coro::ABI::Retcon:
2046 // Each clone in the Async/Retcon lowering references of the other clones.
2047 // Let the LazyCallGraph know about all of them at once.
2048 if (!Clones.empty())
2049 CG.addSplitRefRecursiveFunctions(N.getFunction(), Clones);
2050 break;
2051 }
2052
2053 // Let the CGSCC infra handle the changes to the original function.
2054 CurrentSCC = &updateCGAndAnalysisManagerForCGSCCPass(CG, *CurrentSCC, N, AM,
2055 UR, FAM);
2056 }
2057
2058 // Do some cleanup and let the CGSCC infra see if we've cleaned up any edges
2059 // to the split functions.
2060 postSplitCleanup(N.getFunction());
2061 CurrentSCC = &updateCGAndAnalysisManagerForFunctionPass(CG, *CurrentSCC, N,
2062 AM, UR, FAM);
2063 return *CurrentSCC;
2064}
2065
2066/// Replace a call to llvm.coro.prepare.retcon.
2067static void replacePrepare(CallInst *Prepare, LazyCallGraph &CG,
2069 auto CastFn = Prepare->getArgOperand(0); // as an i8*
2070 auto Fn = CastFn->stripPointerCasts(); // as its original type
2071
2072 // Attempt to peephole this pattern:
2073 // %0 = bitcast [[TYPE]] @some_function to i8*
2074 // %1 = call @llvm.coro.prepare.retcon(i8* %0)
2075 // %2 = bitcast %1 to [[TYPE]]
2076 // ==>
2077 // %2 = @some_function
2078 for (Use &U : llvm::make_early_inc_range(Prepare->uses())) {
2079 // Look for bitcasts back to the original function type.
2080 auto *Cast = dyn_cast<BitCastInst>(U.getUser());
2081 if (!Cast || Cast->getType() != Fn->getType())
2082 continue;
2083
2084 // Replace and remove the cast.
2085 Cast->replaceAllUsesWith(Fn);
2086 Cast->eraseFromParent();
2087 }
2088
2089 // Replace any remaining uses with the function as an i8*.
2090 // This can never directly be a callee, so we don't need to update CG.
2091 Prepare->replaceAllUsesWith(CastFn);
2092 Prepare->eraseFromParent();
2093
2094 // Kill dead bitcasts.
2095 while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) {
2096 if (!Cast->use_empty())
2097 break;
2098 CastFn = Cast->getOperand(0);
2099 Cast->eraseFromParent();
2100 }
2101}
2102
2103static bool replaceAllPrepares(Function *PrepareFn, LazyCallGraph &CG,
2105 bool Changed = false;
2106 for (Use &P : llvm::make_early_inc_range(PrepareFn->uses())) {
2107 // Intrinsics can only be used in calls.
2108 auto *Prepare = cast<CallInst>(P.getUser());
2109 replacePrepare(Prepare, CG, C);
2110 Changed = true;
2111 }
2112
2113 return Changed;
2114}
2115
2116static void addPrepareFunction(const Module &M,
2118 StringRef Name) {
2119 auto *PrepareFn = M.getFunction(Name);
2120 if (PrepareFn && !PrepareFn->use_empty())
2121 Fns.push_back(PrepareFn);
2122}
2123
2124static std::unique_ptr<coro::BaseABI>
2126 std::function<bool(Instruction &)> IsMatCallback,
2127 const SmallVector<CoroSplitPass::BaseABITy> GenCustomABIs) {
2128 if (S.CoroBegin->hasCustomABI()) {
2129 unsigned CustomABI = S.CoroBegin->getCustomABI();
2130 if (CustomABI >= GenCustomABIs.size())
2131 llvm_unreachable("Custom ABI not found amoung those specified");
2132 return GenCustomABIs[CustomABI](F, S);
2133 }
2134
2135 switch (S.ABI) {
2136 case coro::ABI::Switch:
2137 return std::make_unique<coro::SwitchABI>(F, S, IsMatCallback);
2138 case coro::ABI::Async:
2139 return std::make_unique<coro::AsyncABI>(F, S, IsMatCallback);
2140 case coro::ABI::Retcon:
2141 return std::make_unique<coro::AnyRetconABI>(F, S, IsMatCallback);
2143 return std::make_unique<coro::AnyRetconABI>(F, S, IsMatCallback);
2144 }
2145 llvm_unreachable("Unknown ABI");
2146}
2147
2149 : CreateAndInitABI([](Function &F, coro::Shape &S) {
2150 std::unique_ptr<coro::BaseABI> ABI =
2152 ABI->init();
2153 return ABI;
2154 }),
2155 OptimizeFrame(OptimizeFrame) {}
2156
2158 SmallVector<CoroSplitPass::BaseABITy> GenCustomABIs, bool OptimizeFrame)
2159 : CreateAndInitABI([=](Function &F, coro::Shape &S) {
2160 std::unique_ptr<coro::BaseABI> ABI =
2162 ABI->init();
2163 return ABI;
2164 }),
2165 OptimizeFrame(OptimizeFrame) {}
2166
2167// For back compatibility, constructor takes a materializable callback and
2168// creates a generator for an ABI with a modified materializable callback.
2169CoroSplitPass::CoroSplitPass(std::function<bool(Instruction &)> IsMatCallback,
2170 bool OptimizeFrame)
2171 : CreateAndInitABI([=](Function &F, coro::Shape &S) {
2172 std::unique_ptr<coro::BaseABI> ABI =
2173 CreateNewABI(F, S, IsMatCallback, {});
2174 ABI->init();
2175 return ABI;
2176 }),
2177 OptimizeFrame(OptimizeFrame) {}
2178
2179// For back compatibility, constructor takes a materializable callback and
2180// creates a generator for an ABI with a modified materializable callback.
2182 std::function<bool(Instruction &)> IsMatCallback,
2183 SmallVector<CoroSplitPass::BaseABITy> GenCustomABIs, bool OptimizeFrame)
2184 : CreateAndInitABI([=](Function &F, coro::Shape &S) {
2185 std::unique_ptr<coro::BaseABI> ABI =
2186 CreateNewABI(F, S, IsMatCallback, GenCustomABIs);
2187 ABI->init();
2188 return ABI;
2189 }),
2190 OptimizeFrame(OptimizeFrame) {}
2191
2195 // NB: One invariant of a valid LazyCallGraph::SCC is that it must contain a
2196 // non-zero number of nodes, so we assume that here and grab the first
2197 // node's function's module.
2198 Module &M = *C.begin()->getFunction().getParent();
2199 auto &FAM =
2200 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
2201
2202 // Check for uses of llvm.coro.prepare.retcon/async.
2203 SmallVector<Function *, 2> PrepareFns;
2204 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon");
2205 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async");
2206
2207 // Find coroutines for processing.
2209 for (LazyCallGraph::Node &N : C)
2210 if (N.getFunction().isPresplitCoroutine())
2211 Coroutines.push_back(&N);
2212
2213 if (Coroutines.empty() && PrepareFns.empty())
2214 return PreservedAnalyses::all();
2215
2216 auto *CurrentSCC = &C;
2217 // Split all the coroutines.
2218 for (LazyCallGraph::Node *N : Coroutines) {
2219 Function &F = N->getFunction();
2220 LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F.getName()
2221 << "\n");
2222
2223 // The suspend-crossing algorithm in buildCoroutineFrame gets tripped up
2224 // by unreachable blocks, so remove them as a first pass. Remove the
2225 // unreachable blocks before collecting intrinsics into Shape.
2227
2228 coro::Shape Shape(F);
2229 if (!Shape.CoroBegin)
2230 continue;
2231
2232 F.setSplittedCoroutine();
2233
2234 std::unique_ptr<coro::BaseABI> ABI = CreateAndInitABI(F, Shape);
2235
2238 doSplitCoroutine(F, Clones, *ABI, TTI, OptimizeFrame);
2240 *N, Shape, Clones, *CurrentSCC, CG, AM, UR, FAM);
2241
2243 ORE.emit([&]() {
2244 return OptimizationRemark(DEBUG_TYPE, "CoroSplit", &F)
2245 << "Split '" << ore::NV("function", F.getName())
2246 << "' (frame_size=" << ore::NV("frame_size", Shape.FrameSize)
2247 << ", align=" << ore::NV("align", Shape.FrameAlign.value()) << ")";
2248 });
2249
2250 if (!Shape.CoroSuspends.empty()) {
2251 // Run the CGSCC pipeline on the original and newly split functions.
2252 UR.CWorklist.insert(CurrentSCC);
2253 for (Function *Clone : Clones)
2254 UR.CWorklist.insert(CG.lookupSCC(CG.get(*Clone)));
2255 } else if (Shape.ABI == coro::ABI::Async) {
2256 // Reprocess the function to inline the tail called return function of
2257 // coro.async.end.
2258 UR.CWorklist.insert(&C);
2259 }
2260 }
2261
2262 for (auto *PrepareFn : PrepareFns) {
2263 replaceAllPrepares(PrepareFn, CG, *CurrentSCC);
2264 }
2265
2266 return PreservedAnalyses::none();
2267}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
AMDGPU Lower Kernel Arguments
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
Expand Atomic instructions
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file provides interfaces used to manipulate a call graph, regardless if it is a "old style" Call...
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static void addSwiftSelfAttrs(AttributeList &Attrs, LLVMContext &Context, unsigned ParamIndex)
Definition: CoroSplit.cpp:863
static bool hasCallsBetween(Instruction *Save, Instruction *ResumeOrDestroy)
Definition: CoroSplit.cpp:1234
static LazyCallGraph::SCC & updateCallGraphAfterCoroutineSplit(LazyCallGraph::Node &N, const coro::Shape &Shape, const SmallVectorImpl< Function * > &Clones, LazyCallGraph::SCC &C, LazyCallGraph &CG, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR, FunctionAnalysisManager &FAM)
Definition: CoroSplit.cpp:2028
static void replaceSwiftErrorOps(Function &F, coro::Shape &Shape, ValueToValueMapTy *VMap)
Definition: CoroSplit.cpp:565
static void addAsyncContextAttrs(AttributeList &Attrs, LLVMContext &Context, unsigned ParamIndex)
Definition: CoroSplit.cpp:856
static void maybeFreeRetconStorage(IRBuilder<> &Builder, const coro::Shape &Shape, Value *FramePtr, CallGraph *CG)
Definition: CoroSplit.cpp:158
static bool hasCallsInBlocksBetween(BasicBlock *SaveBB, BasicBlock *ResDesBB)
Definition: CoroSplit.cpp:1205
static Function * createCloneDeclaration(Function &OrigF, coro::Shape &Shape, const Twine &Suffix, Module::iterator InsertBefore, AnyCoroSuspendInst *ActiveSuspend)
Definition: CoroSplit.cpp:449
Remove calls to llvm coro end in the original static function void removeCoroEndsFromRampFunction(const coro::Shape &Shape)
Definition: CoroSplit.cpp:1948
static FunctionType * getFunctionTypeFromAsyncSuspend(AnyCoroSuspendInst *Suspend)
Definition: CoroSplit.cpp:441
static void updateScopeLine(Instruction *ActiveSuspend, DISubprogram &SPToUpdate)
Adjust the scope line of the funclet to the first line number after the suspend point.
Definition: CoroSplit.cpp:798
static void addPrepareFunction(const Module &M, SmallVectorImpl< Function * > &Fns, StringRef Name)
Definition: CoroSplit.cpp:2116
static SmallVector< DbgVariableRecord * > collectDbgVariableRecords(Function &F)
Returns all debug records in F.
Definition: CoroSplit.cpp:623
static void simplifySuspendPoints(coro::Shape &Shape)
Definition: CoroSplit.cpp:1321
static void addFramePointerAttrs(AttributeList &Attrs, LLVMContext &Context, unsigned ParamIndex, uint64_t Size, Align Alignment, bool NoAlias)
Definition: CoroSplit.cpp:841
static bool hasSafeElideCaller(Function &F)
Definition: CoroSplit.cpp:1962
static bool replaceAllPrepares(Function *PrepareFn, LazyCallGraph &CG, LazyCallGraph::SCC &C)
Definition: CoroSplit.cpp:2103
static void replaceFallthroughCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape, Value *FramePtr, bool InResume, CallGraph *CG)
Replace a non-unwind call to llvm.coro.end.
Definition: CoroSplit.cpp:214
static void replaceFrameSizeAndAlignment(coro::Shape &Shape)
Definition: CoroSplit.cpp:1121
static std::unique_ptr< coro::BaseABI > CreateNewABI(Function &F, coro::Shape &S, std::function< bool(Instruction &)> IsMatCallback, const SmallVector< CoroSplitPass::BaseABITy > GenCustomABIs)
Definition: CoroSplit.cpp:2125
static bool replaceCoroEndAsync(AnyCoroEndInst *End)
Replace an llvm.coro.end.async.
Definition: CoroSplit.cpp:171
static void doSplitCoroutine(Function &F, SmallVectorImpl< Function * > &Clones, coro::BaseABI &ABI, TargetTransformInfo &TTI, bool OptimizeFrame)
Definition: CoroSplit.cpp:1980
static bool hasCallsInBlockBetween(iterator_range< BasicBlock::iterator > R)
Definition: CoroSplit.cpp:1193
Replace a call to llvm coro prepare static retcon void replacePrepare(CallInst *Prepare, LazyCallGraph &CG, LazyCallGraph::SCC &C)
Definition: CoroSplit.cpp:2067
static void replaceUnwindCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape, Value *FramePtr, bool InResume, CallGraph *CG)
Replace an unwind call to llvm.coro.end.
Definition: CoroSplit.cpp:347
static bool simplifySuspendPoint(CoroSuspendInst *Suspend, CoroBeginInst *CoroBegin)
Definition: CoroSplit.cpp:1261
static void markCoroutineAsDone(IRBuilder<> &Builder, const coro::Shape &Shape, Value *FramePtr)
Definition: CoroSplit.cpp:313
static void updateAsyncFuncPointerContextSize(coro::Shape &Shape)
Definition: CoroSplit.cpp:1098
static void replaceCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape, Value *FramePtr, bool InResume, CallGraph *CG)
Definition: CoroSplit.cpp:385
static void lowerAwaitSuspend(IRBuilder<> &Builder, CoroAwaitSuspendInst *CB, coro::Shape &Shape)
Definition: CoroSplit.cpp:86
static void lowerAwaitSuspends(Function &F, coro::Shape &Shape)
Definition: CoroSplit.cpp:152
static void handleNoSuspendCoroutine(coro::Shape &Shape)
Definition: CoroSplit.cpp:1159
static void postSplitCleanup(Function &F)
Definition: CoroSplit.cpp:1145
static TypeSize getFrameSizeForShape(coro::Shape &Shape)
Definition: CoroSplit.cpp:1113
Coerce the arguments in p FnArgs according to p FnTy in p static CallArgs void coerceArguments(IRBuilder<> &Builder, FunctionType *FnTy, ArrayRef< Value * > FnArgs, SmallVectorImpl< Value * > &CallArgs)
Definition: CoroSplit.cpp:1669
static void replaceAsyncResumeFunction(CoroSuspendAsyncInst *Suspend, Value *Continuation)
Definition: CoroSplit.cpp:1654
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
std::string Name
uint32_t Index
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
@ InlineInfo
#define DEBUG_TYPE
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
Implements a lazy call graph analysis and related passes for the new pass manager.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define P(N)
FunctionAnalysisManager FAM
This file provides a priority worklist.
const SmallVectorImpl< MachineOperand > & Cond
Remove Loads Into Fake Uses
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
#define LLVM_DEBUG(...)
Definition: Debug.h:119
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
static const unsigned FramePtr
void setSwiftError(bool V)
Specify whether this alloca is used to represent a swifterror.
Definition: Instructions.h:155
void setAlignment(Align Align)
Definition: Instructions.h:132
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:255
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:412
CoroAllocInst * getCoroAlloc()
Definition: CoroInstr.h:118
Align getStorageAlignment() const
Definition: CoroInstr.h:247
uint64_t getStorageSize() const
Definition: CoroInstr.h:243
This class represents an incoming formal argument to a Function.
Definition: Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:200
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:147
iterator begin() const
Definition: ArrayRef.h:135
LLVM_ABI AttrBuilder & addAlignmentAttr(MaybeAlign Align)
This turns an alignment into the form used internally in Attribute.
LLVM_ABI AttrBuilder & addAttribute(Attribute::AttrKind Val)
Add an attribute to the builder.
LLVM_ABI AttrBuilder & addDereferenceableAttr(uint64_t Bytes)
This turns the number of dereferenceable bytes into the form used internally in Attribute.
AttributeList removeParamAttributes(LLVMContext &C, unsigned ArgNo, const AttributeMask &AttrsToRemove) const
Remove the specified attribute at the specified arg index from this attribute list.
Definition: Attributes.h:760
LLVM Basic Block Representation.
Definition: BasicBlock.h:62
iterator end()
Definition: BasicBlock.h:472
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:206
LLVM_ABI BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
Definition: BasicBlock.cpp:555
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:213
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:233
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1116
void setCallingConv(CallingConv::ID CC)
Definition: InstrTypes.h:1410
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1348
Value * getCalledOperand() const
Definition: InstrTypes.h:1340
void setAttributes(AttributeList A)
Set the attributes for this call.
Definition: InstrTypes.h:1427
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1292
AttributeList getAttributes() const
Return the attributes for this call.
Definition: InstrTypes.h:1424
The basic data container for the call graph of a Module of IR.
Definition: CallGraph.h:72
This class represents a function call, abstracting a target machine's calling convention.
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
Definition: Constants.cpp:1314
static LLVM_ABI Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
Definition: Constants.cpp:2246
This is the shared class of boolean and integer constants.
Definition: Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:868
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:875
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Definition: Constants.cpp:1833
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
Definition: Constants.cpp:1380
static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
Definition: Constants.cpp:1526
This represents the llvm.coro.align instruction.
Definition: CoroInstr.h:641
This represents the llvm.coro.alloc instruction.
Definition: CoroInstr.h:71
This represents the llvm.coro.await.suspend.{void,bool,handle} instructions.
Definition: CoroInstr.h:86
Value * getFrame() const
Definition: CoroInstr.h:92
Value * getAwaiter() const
Definition: CoroInstr.h:90
Function * getWrapperFunction() const
Definition: CoroInstr.h:94
This class represents the llvm.coro.begin or llvm.coro.begin.custom.abi instructions.
Definition: CoroInstr.h:449
AnyCoroIdInst * getId() const
Definition: CoroInstr.h:453
bool hasCustomABI() const
Definition: CoroInstr.h:457
int getCustomABI() const
Definition: CoroInstr.h:461
This represents the llvm.coro.id instruction.
Definition: CoroInstr.h:148
void setInfo(Constant *C)
Definition: CoroInstr.h:215
This represents the llvm.coro.size instruction.
Definition: CoroInstr.h:629
This represents the llvm.coro.suspend.async instruction.
Definition: CoroInstr.h:563
CoroAsyncResumeInst * getResumeFunction() const
Definition: CoroInstr.h:584
This represents the llvm.coro.suspend instruction.
Definition: CoroInstr.h:531
CoroSaveInst * getCoroSave() const
Definition: CoroInstr.h:535
Debug location.
DIFile * getFile() const
Subprogram description. Uses SubclassData1.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
Record of a variable value-assignment, aka a non instruction representation of the dbg....
A debug info location.
Definition: DebugLoc.h:124
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:165
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:334
This class represents a freeze function that returns random concrete value if an operand is either a ...
A proxy from a FunctionAnalysisManager to an SCC.
Class to represent function types.
Definition: DerivedTypes.h:105
Type * getReturnType() const
Definition: DerivedTypes.h:126
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition: Function.h:166
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:209
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:244
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:270
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:352
void setAttributes(AttributeList Attrs)
Set the attribute list for this Function.
Definition: Function.h:355
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:359
bool isCoroOnlyDestroyWhenComplete() const
Definition: Function.h:545
size_t arg_size() const
Definition: Function.h:899
Argument * getArg(unsigned i) const
Definition: Function.h:884
void setLinkage(LinkageTypes LT)
Definition: GlobalValue.h:539
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:663
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:296
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:60
@ ExternalLinkage
Externally visible function.
Definition: GlobalValue.h:53
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
LLVM_ABI void setInitializer(Constant *InitVal)
setInitializer - Sets the initializer for this global variable, removing any existing initializer if ...
Definition: Globals.cpp:511
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Definition: IRBuilder.h:1830
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition: IRBuilder.h:2625
InvokeInst * CreateInvoke(FunctionType *Ty, Value *Callee, BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef< Value * > Args, ArrayRef< OperandBundleDef > OpBundles, const Twine &Name="")
Create an invoke instruction.
Definition: IRBuilder.h:1235
BasicBlock::iterator GetInsertPoint() const
Definition: IRBuilder.h:202
Value * CreateStructGEP(Type *Ty, Value *Ptr, unsigned Idx, const Twine &Name="")
Definition: IRBuilder.h:2029
Value * CreateConstInBoundsGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
Definition: IRBuilder.h:1946
CleanupReturnInst * CreateCleanupRet(CleanupPadInst *CleanupPad, BasicBlock *UnwindBB=nullptr)
Definition: IRBuilder.h:1312
ReturnInst * CreateRet(Value *V)
Create a 'ret <val>' instruction.
Definition: IRBuilder.h:1172
ConstantInt * getInt64(uint64_t C)
Get a constant 64-bit value.
Definition: IRBuilder.h:527
Value * CreateBitOrPointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2286
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition: IRBuilder.h:2494
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2204
BranchInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
Definition: IRBuilder.h:1197
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1847
LLVMContext & getContext() const
Definition: IRBuilder.h:203
ReturnInst * CreateRetVoid()
Create a 'ret void' instruction.
Definition: IRBuilder.h:1167
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition: IRBuilder.h:1860
ConstantInt * getFalse()
Get the constant value for i1 false.
Definition: IRBuilder.h:507
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2508
BranchInst * CreateBr(BasicBlock *Dest)
Create an unconditional 'br label X' instruction.
Definition: IRBuilder.h:1191
Value * CreateIsNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg == 0.
Definition: IRBuilder.h:2646
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:207
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2780
This class captures the data input to the InlineFunction call, and records the auxiliary results prod...
Definition: Cloning.h:251
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:513
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
A node in the call graph.
An SCC of the call graph.
A lazily constructed view of the call graph of a module.
LLVM_ABI void addSplitFunction(Function &OriginalFunction, Function &NewFunction)
Add a new function split/outlined from an existing function.
LLVM_ABI void addSplitRefRecursiveFunctions(Function &OriginalFunction, ArrayRef< Function * > NewFunctions)
Add new ref-recursive functions split/outlined from an existing function.
Node & get(Function &F)
Get a graph node for a given function, scanning it to populate the graph data as necessary.
SCC * lookupSCC(Node &N) const
Lookup a function's SCC in the graph.
static std::enable_if_t< std::is_base_of< MDNode, T >::value, T * > replaceWithUniqued(std::unique_ptr< T, TempMDNodeDeleter > N)
Replace a temporary node with a uniqued one.
Definition: Metadata.h:1316
A single uniqued string.
Definition: Metadata.h:720
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
Definition: Metadata.cpp:607
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
FunctionListType::iterator iterator
The Function iterators.
Definition: Module.h:92
Diagnostic information for applied optimization remarks.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Definition: DerivedTypes.h:720
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1885
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:112
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: Analysis.h:115
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:118
PrettyStackTraceEntry - This class is used to represent a frame of the "pretty" stack trace that is d...
Return a value (possibly void), from a function.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:541
bool empty() const
Definition: SmallVector.h:82
size_t size() const
Definition: SmallVector.h:79
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:574
void reserve(size_type N)
Definition: SmallVector.h:664
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:684
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
LLVM_ABI Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition: Type.cpp:719
Analysis pass providing the TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
LLVM_ABI bool supportsTailCallFor(const CallBase *CB) const
If target supports tail call on CB.
Value handle that tracks a Value across RAUW.
Definition: ValueHandle.h:332
ValueTy * getValPtr() const
Definition: ValueHandle.h:336
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
A Use represents the edge between a Value definition and its users.
Definition: Use.h:35
void setOperand(unsigned i, Value *Val)
Definition: User.h:237
LLVM Value Representation.
Definition: Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:546
iterator_range< user_iterator > users()
Definition: Value.h:426
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:701
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1098
iterator_range< use_iterator > uses()
Definition: Value.h:380
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:396
void splitCoroutine(Function &F, coro::Shape &Shape, SmallVectorImpl< Function * > &Clones, TargetTransformInfo &TTI) override
Definition: CoroSplit.cpp:1801
void splitCoroutine(Function &F, coro::Shape &Shape, SmallVectorImpl< Function * > &Clones, TargetTransformInfo &TTI) override
Definition: CoroSplit.cpp:1704
Value * deriveNewFramePointer()
Derive the value of the new frame pointer.
Definition: CoroSplit.cpp:738
static Function * createClone(Function &OrigF, const Twine &Suffix, coro::Shape &Shape, Function *NewF, AnyCoroSuspendInst *ActiveSuspend, TargetTransformInfo &TTI)
Create a clone for a continuation lowering.
Definition: CoroCloner.h:84
ValueToValueMapTy VMap
Definition: CoroCloner.h:52
bool isSwitchDestroyFunction()
Definition: CoroCloner.h:105
void replaceRetconOrAsyncSuspendUses()
Replace uses of the active llvm.coro.suspend.retcon/async call with the arguments to the continuation...
Definition: CoroSplit.cpp:471
virtual void create()
Clone the body of the original function into a resume function of some sort.
Definition: CoroSplit.cpp:872
void splitCoroutine(Function &F, coro::Shape &Shape, SmallVectorImpl< Function * > &Clones, TargetTransformInfo &TTI) override
Definition: CoroSplit.cpp:1974
static Function * createClone(Function &OrigF, const Twine &Suffix, coro::Shape &Shape, CloneKind FKind, TargetTransformInfo &TTI)
Create a clone for a switch lowering.
Definition: CoroCloner.h:139
void create() override
Clone the body of the original function into a resume function of some sort.
Definition: CoroSplit.cpp:1084
const ParentTy * getParent() const
Definition: ilist_node.h:34
self_iterator getIterator()
Definition: ilist_node.h:134
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:359
A range adaptor for a pair of iterators.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:53
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ Async
The "async continuation" lowering, where each suspend point creates a single continuation function.
@ RetconOnce
The "unique returned-continuation" lowering, where each suspend point creates a single continuation f...
@ Retcon
The "returned-continuation" lowering, where each suspend point creates a single continuation function...
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
void suppressCoroAllocs(CoroIdInst *CoroId)
Replaces all @llvm.coro.alloc intrinsics calls associated with a given call @llvm....
Definition: Coroutines.cpp:142
void normalizeCoroutine(Function &F, coro::Shape &Shape, TargetTransformInfo &TTI)
Definition: CoroFrame.cpp:1960
CallInst * createMustTailCall(DebugLoc Loc, Function *MustTailCallFn, TargetTransformInfo &TTI, ArrayRef< Value * > Arguments, IRBuilder<> &)
Definition: CoroSplit.cpp:1684
void replaceCoroFree(CoroIdInst *CoroId, bool Elide)
Definition: Coroutines.cpp:122
LLVM_ABI bool isTriviallyMaterializable(Instruction &I)
@ SwitchCleanup
The shared cleanup function for a switch lowering.
@ Continuation
An individual continuation function.
void salvageDebugInfo(SmallDenseMap< Argument *, AllocaInst *, 4 > &ArgToAllocaMap, DbgVariableRecord &DVR, bool UseEntryValue)
Attempts to rewrite the location operand of debug records in terms of the coroutine frame pointer,...
Definition: CoroFrame.cpp:1916
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1737
LLVM_ABI InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, bool MergeAttributes=false, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr, OptimizationRemarkEmitter *ORE=nullptr)
This function inlines the called function into the basic block of the caller.
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition: STLExtras.h:870
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition: STLExtras.h:2491
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
Definition: Verifier.cpp:7502
LLVM_ABI LazyCallGraph::SCC & updateCGAndAnalysisManagerForFunctionPass(LazyCallGraph &G, LazyCallGraph::SCC &C, LazyCallGraph::Node &N, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR, FunctionAnalysisManager &FAM)
Helper to update the call graph after running a function pass.
LLVM_ABI LazyCallGraph::SCC & updateCGAndAnalysisManagerForCGSCCPass(LazyCallGraph &G, LazyCallGraph::SCC &C, LazyCallGraph::Node &N, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR, FunctionAnalysisManager &FAM)
Helper to update the call graph after running a CGSCC pass.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:663
LLVM_ABI BasicBlock::iterator skipDebugIntrinsics(BasicBlock::iterator It)
Advance It while it points to a debug instruction and return the result.
Definition: BasicBlock.cpp:682
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition: Error.cpp:167
LLVM_ABI unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
Definition: Local.cpp:2513
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
DWARFExpression::Operation Op
LLVM_ABI void CloneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, CloneFunctionChangeType Changes, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr, ValueMapTypeRemapper *TypeMapper=nullptr, ValueMaterializer *Materializer=nullptr)
Clone OldFunc into NewFunc, transforming the old arguments into references to VMap values.
auto predecessors(const MachineBasicBlock *BB)
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
LLVM_ABI bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
Definition: Local.cpp:2883
LLVM_ABI bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
Definition: CFG.cpp:282
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:858
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
Support structure for SCC passes to communicate updates the call graph back to the CGSCC pass manager...
SmallPriorityWorklist< LazyCallGraph::SCC *, 1 > & CWorklist
Worklist of the SCCs queued for processing.
LLVM_ABI PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM, LazyCallGraph &CG, CGSCCUpdateResult &UR)
Definition: CoroSplit.cpp:2192
LLVM_ABI CoroSplitPass(bool OptimizeFrame=false)
Definition: CoroSplit.cpp:2148
BaseABITy CreateAndInitABI
Definition: CoroSplit.h:56
CallInst * makeSubFnCall(Value *Arg, int Index, Instruction *InsertPt)
Definition: Coroutines.cpp:52
SmallVector< CallInst *, 2 > SymmetricTransfers
Definition: CoroShape.h:60
SmallVector< CoroAwaitSuspendInst *, 4 > CoroAwaitSuspends
Definition: CoroShape.h:59
AsyncLoweringStorage AsyncLowering
Definition: CoroShape.h:155
FunctionType * getResumeFunctionType() const
Definition: CoroShape.h:193
IntegerType * getIndexType() const
Definition: CoroShape.h:178
StructType * FrameTy
Definition: CoroShape.h:114
AnyCoroIdRetconInst * getRetconCoroId() const
Definition: CoroShape.h:163
PointerType * getSwitchResumePointerType() const
Definition: CoroShape.h:187
CoroIdInst * getSwitchCoroId() const
Definition: CoroShape.h:158
SmallVector< CoroSizeInst *, 2 > CoroSizes
Definition: CoroShape.h:56
CallingConv::ID getResumeFunctionCC() const
Definition: CoroShape.h:230
coro::ABI ABI
Definition: CoroShape.h:112
Value * FramePtr
Definition: CoroShape.h:117
SmallVector< AnyCoroSuspendInst *, 4 > CoroSuspends
Definition: CoroShape.h:58
uint64_t FrameSize
Definition: CoroShape.h:116
LLVM_ABI Value * emitAlloc(IRBuilder<> &Builder, Value *Size, CallGraph *CG) const
Allocate memory according to the rules of the active lowering.
Definition: Coroutines.cpp:505
ConstantInt * getIndex(uint64_t Value) const
Definition: CoroShape.h:183
SwitchLoweringStorage SwitchLowering
Definition: CoroShape.h:153
CoroBeginInst * CoroBegin
Definition: CoroShape.h:54
BasicBlock::iterator getInsertPtAfterFramePtr() const
Definition: CoroShape.h:250
ArrayRef< Type * > getRetconResultTypes() const
Definition: CoroShape.h:210
LLVM_ABI void emitDealloc(IRBuilder<> &Builder, Value *Ptr, CallGraph *CG) const
Deallocate memory according to the rules of the active lowering.
Definition: Coroutines.cpp:528
RetconLoweringStorage RetconLowering
Definition: CoroShape.h:154
SmallVector< CoroAlignInst *, 2 > CoroAligns
Definition: CoroShape.h:57
CoroIdAsyncInst * getAsyncCoroId() const
Definition: CoroShape.h:168
SmallVector< AnyCoroEndInst *, 4 > CoroEnds
Definition: CoroShape.h:55
SmallVector< CallInst *, 2 > SwiftErrorOps
Definition: CoroShape.h:63
BasicBlock * AllocaSpillBlock
Definition: CoroShape.h:118
unsigned getSwitchIndexField() const
Definition: CoroShape.h:173