LLVM 22.0.0git
SpillUtils.cpp
Go to the documentation of this file.
1//===- SpillUtils.cpp - Utilities for checking for spills ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
10#include "CoroInternal.h"
11#include "llvm/Analysis/CFG.h"
13#include "llvm/IR/CFG.h"
14#include "llvm/IR/DebugInfo.h"
15#include "llvm/IR/Dominators.h"
18
19namespace llvm {
20
21namespace coro {
22
23namespace {
24
25typedef SmallPtrSet<BasicBlock *, 8> VisitedBlocksSet;
26
27static bool isNonSpilledIntrinsic(Instruction &I) {
28 // Structural coroutine intrinsics that should not be spilled into the
29 // coroutine frame.
30 return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I);
31}
32
33/// Does control flow starting at the given block ever reach a suspend
34/// instruction before reaching a block in VisitedOrFreeBBs?
35static bool isSuspendReachableFrom(BasicBlock *From,
36 VisitedBlocksSet &VisitedOrFreeBBs) {
37 // Eagerly try to add this block to the visited set. If it's already
38 // there, stop recursing; this path doesn't reach a suspend before
39 // either looping or reaching a freeing block.
40 if (!VisitedOrFreeBBs.insert(From).second)
41 return false;
42
43 // We assume that we'll already have split suspends into their own blocks.
45 return true;
46
47 // Recurse on the successors.
48 for (auto *Succ : successors(From)) {
49 if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs))
50 return true;
51 }
52
53 return false;
54}
55
56/// Is the given alloca "local", i.e. bounded in lifetime to not cross a
57/// suspend point?
58static bool isLocalAlloca(CoroAllocaAllocInst *AI) {
59 // Seed the visited set with all the basic blocks containing a free
60 // so that we won't pass them up.
61 VisitedBlocksSet VisitedOrFreeBBs;
62 for (auto *User : AI->users()) {
63 if (auto FI = dyn_cast<CoroAllocaFreeInst>(User))
64 VisitedOrFreeBBs.insert(FI->getParent());
65 }
66
67 return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs);
68}
69
70/// Turn the given coro.alloca.alloc call into a dynamic allocation.
71/// This happens during the all-instructions iteration, so it must not
72/// delete the call.
73static Instruction *
74lowerNonLocalAlloca(CoroAllocaAllocInst *AI, const coro::Shape &Shape,
75 SmallVectorImpl<Instruction *> &DeadInsts) {
76 IRBuilder<> Builder(AI);
77 auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr);
78
79 for (User *U : AI->users()) {
80 if (isa<CoroAllocaGetInst>(U)) {
81 U->replaceAllUsesWith(Alloc);
82 } else {
83 auto FI = cast<CoroAllocaFreeInst>(U);
84 Builder.SetInsertPoint(FI);
85 Shape.emitDealloc(Builder, Alloc, nullptr);
86 }
87 DeadInsts.push_back(cast<Instruction>(U));
88 }
89
90 // Push this on last so that it gets deleted after all the others.
91 DeadInsts.push_back(AI);
92
93 // Return the new allocation value so that we can check for needed spills.
94 return cast<Instruction>(Alloc);
95}
96
97// We need to make room to insert a spill after initial PHIs, but before
98// catchswitch instruction. Placing it before violates the requirement that
99// catchswitch, like all other EHPads must be the first nonPHI in a block.
100//
101// Split away catchswitch into a separate block and insert in its place:
102//
103// cleanuppad <InsertPt> cleanupret.
104//
105// cleanupret instruction will act as an insert point for the spill.
106static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) {
107 BasicBlock *CurrentBlock = CatchSwitch->getParent();
108 BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch);
109 CurrentBlock->getTerminator()->eraseFromParent();
110
111 auto *CleanupPad =
112 CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock);
113 auto *CleanupRet =
114 CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock);
115 return CleanupRet;
116}
117
118// We use a pointer use visitor to track how an alloca is being used.
119// The goal is to be able to answer the following three questions:
120// 1. Should this alloca be allocated on the frame instead.
121// 2. Could the content of the alloca be modified prior to CoroBegin, which
122// would require copying the data from the alloca to the frame after
123// CoroBegin.
124// 3. Are there any aliases created for this alloca prior to CoroBegin, but
125// used after CoroBegin. In that case, we will need to recreate the alias
126// after CoroBegin based off the frame.
127//
128// To answer question 1, we track two things:
129// A. List of all BasicBlocks that use this alloca or any of the aliases of
130// the alloca. In the end, we check if there exists any two basic blocks that
131// cross suspension points. If so, this alloca must be put on the frame.
132// B. Whether the alloca or any alias of the alloca is escaped at some point,
133// either by storing the address somewhere, or the address is used in a
134// function call that might capture. If it's ever escaped, this alloca must be
135// put on the frame conservatively.
136//
137// To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin.
138// Whenever a potential write happens, either through a store instruction, a
139// function call or any of the memory intrinsics, we check whether this
140// instruction is prior to CoroBegin.
141//
142// To answer question 3, we track the offsets of all aliases created for the
143// alloca prior to CoroBegin but used after CoroBegin. std::optional is used to
144// be able to represent the case when the offset is unknown (e.g. when you have
145// a PHINode that takes in different offset values). We cannot handle unknown
146// offsets and will assert. This is the potential issue left out. An ideal
147// solution would likely require a significant redesign.
148
149namespace {
150struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
151 using Base = PtrUseVisitor<AllocaUseVisitor>;
152 AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT,
153 const coro::Shape &CoroShape,
154 const SuspendCrossingInfo &Checker,
155 bool ShouldUseLifetimeStartInfo)
156 : PtrUseVisitor(DL), DT(DT), CoroShape(CoroShape), Checker(Checker),
157 ShouldUseLifetimeStartInfo(ShouldUseLifetimeStartInfo) {
158 for (AnyCoroSuspendInst *SuspendInst : CoroShape.CoroSuspends)
159 CoroSuspendBBs.insert(SuspendInst->getParent());
160 }
161
162 void visit(Instruction &I) {
163 Users.insert(&I);
164 Base::visit(I);
165 // If the pointer is escaped prior to CoroBegin, we have to assume it would
166 // be written into before CoroBegin as well.
167 if (PI.isEscaped() &&
168 !DT.dominates(CoroShape.CoroBegin, PI.getEscapingInst())) {
169 MayWriteBeforeCoroBegin = true;
170 }
171 }
172 // We need to provide this overload as PtrUseVisitor uses a pointer based
173 // visiting function.
174 void visit(Instruction *I) { return visit(*I); }
175
176 void visitPHINode(PHINode &I) {
177 enqueueUsers(I);
178 handleAlias(I);
179 }
180
181 void visitSelectInst(SelectInst &I) {
182 enqueueUsers(I);
183 handleAlias(I);
184 }
185
186 void visitStoreInst(StoreInst &SI) {
187 // Regardless whether the alias of the alloca is the value operand or the
188 // pointer operand, we need to assume the alloca is been written.
189 handleMayWrite(SI);
190
191 if (SI.getValueOperand() != U->get())
192 return;
193
194 // We are storing the pointer into a memory location, potentially escaping.
195 // As an optimization, we try to detect simple cases where it doesn't
196 // actually escape, for example:
197 // %ptr = alloca ..
198 // %addr = alloca ..
199 // store %ptr, %addr
200 // %x = load %addr
201 // ..
202 // If %addr is only used by loading from it, we could simply treat %x as
203 // another alias of %ptr, and not considering %ptr being escaped.
204 auto IsSimpleStoreThenLoad = [&]() {
205 auto *AI = dyn_cast<AllocaInst>(SI.getPointerOperand());
206 // If the memory location we are storing to is not an alloca, it
207 // could be an alias of some other memory locations, which is difficult
208 // to analyze.
209 if (!AI)
210 return false;
211 // StoreAliases contains aliases of the memory location stored into.
212 SmallVector<Instruction *, 4> StoreAliases = {AI};
213 while (!StoreAliases.empty()) {
214 Instruction *I = StoreAliases.pop_back_val();
215 for (User *U : I->users()) {
216 // If we are loading from the memory location, we are creating an
217 // alias of the original pointer.
218 if (auto *LI = dyn_cast<LoadInst>(U)) {
219 enqueueUsers(*LI);
220 handleAlias(*LI);
221 continue;
222 }
223 // If we are overriding the memory location, the pointer certainly
224 // won't escape.
225 if (auto *S = dyn_cast<StoreInst>(U))
226 if (S->getPointerOperand() == I)
227 continue;
228 if (isa<LifetimeIntrinsic>(U))
229 continue;
230 // BitCastInst creats aliases of the memory location being stored
231 // into.
232 if (auto *BI = dyn_cast<BitCastInst>(U)) {
233 StoreAliases.push_back(BI);
234 continue;
235 }
236 return false;
237 }
238 }
239
240 return true;
241 };
242
243 if (!IsSimpleStoreThenLoad())
244 PI.setEscaped(&SI);
245 }
246
247 // All mem intrinsics modify the data.
248 void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); }
249
250 void visitBitCastInst(BitCastInst &BC) {
251 Base::visitBitCastInst(BC);
252 handleAlias(BC);
253 }
254
255 void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) {
256 Base::visitAddrSpaceCastInst(ASC);
257 handleAlias(ASC);
258 }
259
260 void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
261 // The base visitor will adjust Offset accordingly.
262 Base::visitGetElementPtrInst(GEPI);
263 handleAlias(GEPI);
264 }
265
266 void visitIntrinsicInst(IntrinsicInst &II) {
267 switch (II.getIntrinsicID()) {
268 default:
269 return Base::visitIntrinsicInst(II);
270 case Intrinsic::lifetime_start:
271 LifetimeStarts.insert(&II);
272 LifetimeStartBBs.push_back(II.getParent());
273 break;
274 case Intrinsic::lifetime_end:
275 LifetimeEndBBs.insert(II.getParent());
276 break;
277 }
278 }
279
280 void visitCallBase(CallBase &CB) {
281 for (unsigned Op = 0, OpCount = CB.arg_size(); Op < OpCount; ++Op)
282 if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op))
283 PI.setEscaped(&CB);
284 handleMayWrite(CB);
285 }
286
287 bool getShouldLiveOnFrame() const {
288 if (!ShouldLiveOnFrame)
289 ShouldLiveOnFrame = computeShouldLiveOnFrame();
290 return *ShouldLiveOnFrame;
291 }
292
293 bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; }
294
295 DenseMap<Instruction *, std::optional<APInt>> getAliasesCopy() const {
296 assert(getShouldLiveOnFrame() && "This method should only be called if the "
297 "alloca needs to live on the frame.");
298 for (const auto &P : AliasOffetMap)
299 if (!P.second)
300 report_fatal_error("Unable to handle an alias with unknown offset "
301 "created before CoroBegin.");
302 return AliasOffetMap;
303 }
304
305private:
306 const DominatorTree &DT;
307 const coro::Shape &CoroShape;
308 const SuspendCrossingInfo &Checker;
309 // All alias to the original AllocaInst, created before CoroBegin and used
310 // after CoroBegin. Each entry contains the instruction and the offset in the
311 // original Alloca. They need to be recreated after CoroBegin off the frame.
312 DenseMap<Instruction *, std::optional<APInt>> AliasOffetMap{};
313 SmallPtrSet<Instruction *, 4> Users{};
314 SmallPtrSet<IntrinsicInst *, 2> LifetimeStarts{};
315 SmallVector<BasicBlock *> LifetimeStartBBs{};
316 SmallPtrSet<BasicBlock *, 2> LifetimeEndBBs{};
317 SmallPtrSet<const BasicBlock *, 2> CoroSuspendBBs{};
318 bool MayWriteBeforeCoroBegin{false};
319 bool ShouldUseLifetimeStartInfo{true};
320
321 mutable std::optional<bool> ShouldLiveOnFrame{};
322
323 bool computeShouldLiveOnFrame() const {
324 // If lifetime information is available, we check it first since it's
325 // more precise. We look at every pair of lifetime.start intrinsic and
326 // every basic block that uses the pointer to see if they cross suspension
327 // points. The uses cover both direct uses as well as indirect uses.
328 if (ShouldUseLifetimeStartInfo && !LifetimeStarts.empty()) {
329 // If there is no explicit lifetime.end, then assume the address can
330 // cross suspension points.
331 if (LifetimeEndBBs.empty())
332 return true;
333
334 // If there is a path from a lifetime.start to a suspend without a
335 // corresponding lifetime.end, then the alloca's lifetime persists
336 // beyond that suspension point and the alloca must go on the frame.
337 llvm::SmallVector<BasicBlock *> Worklist(LifetimeStartBBs);
338 if (isManyPotentiallyReachableFromMany(Worklist, CoroSuspendBBs,
339 &LifetimeEndBBs, &DT))
340 return true;
341
342 // Addresses are guaranteed to be identical after every lifetime.start so
343 // we cannot use the local stack if the address escaped and there is a
344 // suspend point between lifetime markers. This should also cover the
345 // case of a single lifetime.start intrinsic in a loop with suspend point.
346 if (PI.isEscaped()) {
347 for (auto *A : LifetimeStarts) {
348 for (auto *B : LifetimeStarts) {
349 if (Checker.hasPathOrLoopCrossingSuspendPoint(A->getParent(),
350 B->getParent()))
351 return true;
352 }
353 }
354 }
355 return false;
356 }
357 // FIXME: Ideally the isEscaped check should come at the beginning.
358 // However there are a few loose ends that need to be fixed first before
359 // we can do that. We need to make sure we are not over-conservative, so
360 // that the data accessed in-between await_suspend and symmetric transfer
361 // is always put on the stack, and also data accessed after coro.end is
362 // always put on the stack (esp the return object). To fix that, we need
363 // to:
364 // 1) Potentially treat sret as nocapture in calls
365 // 2) Special handle the return object and put it on the stack
366 // 3) Utilize lifetime.end intrinsic
367 if (PI.isEscaped())
368 return true;
369
370 for (auto *U1 : Users)
371 for (auto *U2 : Users)
372 if (Checker.isDefinitionAcrossSuspend(*U1, U2))
373 return true;
374
375 return false;
376 }
377
378 void handleMayWrite(const Instruction &I) {
379 if (!DT.dominates(CoroShape.CoroBegin, &I))
380 MayWriteBeforeCoroBegin = true;
381 }
382
383 bool usedAfterCoroBegin(Instruction &I) {
384 for (auto &U : I.uses())
385 if (DT.dominates(CoroShape.CoroBegin, U))
386 return true;
387 return false;
388 }
389
390 void handleAlias(Instruction &I) {
391 // We track all aliases created prior to CoroBegin but used after.
392 // These aliases may need to be recreated after CoroBegin if the alloca
393 // need to live on the frame.
394 if (DT.dominates(CoroShape.CoroBegin, &I) || !usedAfterCoroBegin(I))
395 return;
396
397 if (!IsOffsetKnown) {
398 AliasOffetMap[&I].reset();
399 } else {
400 auto [Itr, Inserted] = AliasOffetMap.try_emplace(&I, Offset);
401 if (!Inserted && Itr->second && *Itr->second != Offset) {
402 // If we have seen two different possible values for this alias, we set
403 // it to empty.
404 Itr->second.reset();
405 }
406 }
407 }
408};
409} // namespace
410
411static void collectFrameAlloca(AllocaInst *AI, const coro::Shape &Shape,
412 const SuspendCrossingInfo &Checker,
413 SmallVectorImpl<AllocaInfo> &Allocas,
414 const DominatorTree &DT) {
415 if (Shape.CoroSuspends.empty())
416 return;
417
418 // The PromiseAlloca will be specially handled since it needs to be in a
419 // fixed position in the frame.
420 if (AI == Shape.SwitchLowering.PromiseAlloca)
421 return;
422
423 // The __coro_gro alloca should outlive the promise, make sure we
424 // keep it outside the frame.
425 if (AI->hasMetadata(LLVMContext::MD_coro_outside_frame))
426 return;
427
428 // The code that uses lifetime.start intrinsic does not work for functions
429 // with loops without exit. Disable it on ABIs we know to generate such
430 // code.
431 bool ShouldUseLifetimeStartInfo =
432 (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon &&
433 Shape.ABI != coro::ABI::RetconOnce);
434 AllocaUseVisitor Visitor{AI->getDataLayout(), DT, Shape, Checker,
435 ShouldUseLifetimeStartInfo};
436 Visitor.visitPtr(*AI);
437 if (!Visitor.getShouldLiveOnFrame())
438 return;
439 Allocas.emplace_back(AI, Visitor.getAliasesCopy(),
440 Visitor.getMayWriteBeforeCoroBegin());
441}
442
443} // namespace
444
446 const SuspendCrossingInfo &Checker) {
447 // Collect the spills for arguments and other not-materializable values.
448 for (Argument &A : F.args())
449 for (User *U : A.users())
450 if (Checker.isDefinitionAcrossSuspend(A, U))
451 Spills[&A].push_back(cast<Instruction>(U));
452}
453
455 SpillInfo &Spills, SmallVector<AllocaInfo, 8> &Allocas,
456 SmallVector<Instruction *, 4> &DeadInstructions,
458 const SuspendCrossingInfo &Checker, const DominatorTree &DT,
459 const coro::Shape &Shape) {
460
461 for (Instruction &I : instructions(F)) {
462 // Values returned from coroutine structure intrinsics should not be part
463 // of the Coroutine Frame.
464 if (isNonSpilledIntrinsic(I) || &I == Shape.CoroBegin)
465 continue;
466
467 // Handle alloca.alloc specially here.
468 if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) {
469 // Check whether the alloca's lifetime is bounded by suspend points.
470 if (isLocalAlloca(AI)) {
471 LocalAllocas.push_back(AI);
472 continue;
473 }
474
475 // If not, do a quick rewrite of the alloca and then add spills of
476 // the rewritten value. The rewrite doesn't invalidate anything in
477 // Spills because the other alloca intrinsics have no other operands
478 // besides AI, and it doesn't invalidate the iteration because we delay
479 // erasing AI.
480 auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions);
481
482 for (User *U : Alloc->users()) {
483 if (Checker.isDefinitionAcrossSuspend(*Alloc, U))
484 Spills[Alloc].push_back(cast<Instruction>(U));
485 }
486 continue;
487 }
488
489 // Ignore alloca.get; we process this as part of coro.alloca.alloc.
490 if (isa<CoroAllocaGetInst>(I))
491 continue;
492
493 if (auto *AI = dyn_cast<AllocaInst>(&I)) {
494 collectFrameAlloca(AI, Shape, Checker, Allocas, DT);
495 continue;
496 }
497
498 for (User *U : I.users())
499 if (Checker.isDefinitionAcrossSuspend(I, U)) {
500 // We cannot spill a token.
501 if (I.getType()->isTokenTy())
503 "token definition is separated from the use by a suspend point");
504 Spills[&I].push_back(cast<Instruction>(U));
505 }
506 }
507}
508
510 const SuspendCrossingInfo &Checker) {
511 // We don't want the layout of coroutine frame to be affected
512 // by debug information. So we only choose to salvage dbg.values for
513 // whose value is already in the frame.
514 // We would handle the dbg.values for allocas specially
515 for (auto &Iter : Spills) {
516 auto *V = Iter.first;
518 findDbgValues(V, DVRs);
519 // Add the instructions which carry debug info that is in the frame.
520 for (DbgVariableRecord *DVR : DVRs)
521 if (Checker.isDefinitionAcrossSuspend(*V, DVR->Marker->MarkedInstr))
522 Spills[V].push_back(DVR->Marker->MarkedInstr);
523 }
524}
525
526/// Async and Retcon{Once} conventions assume that all spill uses can be sunk
527/// after the coro.begin intrinsic.
529 CoroBeginInst *CoroBegin,
530 coro::SpillInfo &Spills,
534
535 // Collect all users that precede coro.begin.
536 auto collectUsers = [&](Value *Def) {
537 for (User *U : Def->users()) {
538 auto Inst = cast<Instruction>(U);
539 if (Inst->getParent() != CoroBegin->getParent() ||
540 Dom.dominates(CoroBegin, Inst))
541 continue;
542 if (ToMove.insert(Inst))
543 Worklist.push_back(Inst);
544 }
545 };
546 for (auto &I : Spills)
547 collectUsers(I.first);
548 for (auto &I : Allocas)
549 collectUsers(I.Alloca);
550
551 // Recursively collect users before coro.begin.
552 while (!Worklist.empty()) {
553 auto *Def = Worklist.pop_back_val();
554 for (User *U : Def->users()) {
555 auto Inst = cast<Instruction>(U);
556 if (Dom.dominates(CoroBegin, Inst))
557 continue;
558 if (ToMove.insert(Inst))
559 Worklist.push_back(Inst);
560 }
561 }
562
563 // Sort by dominance.
564 SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end());
565 llvm::sort(InsertionList, [&Dom](Instruction *A, Instruction *B) -> bool {
566 // If a dominates b it should precede (<) b.
567 return Dom.dominates(A, B);
568 });
569
570 Instruction *InsertPt = CoroBegin->getNextNode();
571 for (Instruction *Inst : InsertionList)
572 Inst->moveBefore(InsertPt->getIterator());
573}
574
576 const DominatorTree &DT) {
577 BasicBlock::iterator InsertPt;
578 if (auto *Arg = dyn_cast<Argument>(Def)) {
579 // For arguments, we will place the store instruction right after
580 // the coroutine frame pointer instruction, i.e. coro.begin.
581 InsertPt = Shape.getInsertPtAfterFramePtr();
582
583 // If we're spilling an Argument, make sure we clear 'captures'
584 // from the coroutine function.
585 Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::Captures);
586 } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) {
587 // Don't spill immediately after a suspend; splitting assumes
588 // that the suspend will be followed by a branch.
589 InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHIIt();
590 } else {
591 auto *I = cast<Instruction>(Def);
592 if (!DT.dominates(Shape.CoroBegin, I)) {
593 // If it is not dominated by CoroBegin, then spill should be
594 // inserted immediately after CoroFrame is computed.
595 InsertPt = Shape.getInsertPtAfterFramePtr();
596 } else if (auto *II = dyn_cast<InvokeInst>(I)) {
597 // If we are spilling the result of the invoke instruction, split
598 // the normal edge and insert the spill in the new block.
599 auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest());
600 InsertPt = NewBB->getTerminator()->getIterator();
601 } else if (isa<PHINode>(I)) {
602 // Skip the PHINodes and EH pads instructions.
603 BasicBlock *DefBlock = I->getParent();
604 if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator()))
605 InsertPt = splitBeforeCatchSwitch(CSI)->getIterator();
606 else
607 InsertPt = DefBlock->getFirstInsertionPt();
608 } else {
609 assert(!I->isTerminator() && "unexpected terminator");
610 // For all other values, the spill is placed immediately after
611 // the definition.
612 InsertPt = I->getNextNode()->getIterator();
613 }
614 }
615
616 return InsertPt;
617}
618
619} // End namespace coro.
620
621} // End namespace llvm.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
uint64_t Offset
Definition: ELF_riscv.cpp:478
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
iv Induction Variable Users
Definition: IVUsers.cpp:48
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
uint64_t IntrinsicInst * II
#define P(N)
StandardInstrumentations SI(Mod->getContext(), Debug, VerifyEach)
This file provides a collection of visitors which walk the (instruction) uses of a pointer.
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This class represents an incoming formal argument to a Function.
Definition: Argument.h:32
LLVM Basic Block Representation.
Definition: BasicBlock.h:62
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:393
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:233
static CleanupPadInst * Create(Value *ParentPad, ArrayRef< Value * > Args={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, InsertPosition InsertBefore=nullptr)
This class represents the llvm.coro.begin or llvm.coro.begin.custom.abi instructions.
Definition: CoroInstr.h:449
Record of a variable value-assignment, aka a non instruction representation of the dbg....
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:165
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:135
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
iterator end()
Get an iterator to the end of the SetVector.
Definition: SetVector.h:119
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition: SetVector.h:109
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:168
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:356
bool empty() const
Definition: SmallVector.h:82
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:574
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
LLVM Value Representation.
Definition: Value.h:75
const ParentTy * getParent() const
Definition: ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:359
@ BasicBlock
Various leaf nodes.
Definition: ISDOpcodes.h:81
@ Async
The "async continuation" lowering, where each suspend point creates a single continuation function.
@ RetconOnce
The "unique returned-continuation" lowering, where each suspend point creates a single continuation f...
@ Retcon
The "returned-continuation" lowering, where each suspend point creates a single continuation function...
BasicBlock::iterator getSpillInsertionPt(const coro::Shape &, Value *Def, const DominatorTree &DT)
Definition: SpillUtils.cpp:575
bool isSuspendBlock(BasicBlock *BB)
Definition: Coroutines.cpp:98
void sinkSpillUsesAfterCoroBegin(const DominatorTree &DT, CoroBeginInst *CoroBegin, coro::SpillInfo &Spills, SmallVectorImpl< coro::AllocaInfo > &Allocas)
Async and Retcon{Once} conventions assume that all spill uses can be sunk after the coro....
Definition: SpillUtils.cpp:528
void collectSpillsFromArgs(SpillInfo &Spills, Function &F, const SuspendCrossingInfo &Checker)
Definition: SpillUtils.cpp:445
void collectSpillsFromDbgInfo(SpillInfo &Spills, Function &F, const SuspendCrossingInfo &Checker)
Definition: SpillUtils.cpp:509
void collectSpillsAndAllocasFromInsts(SpillInfo &Spills, SmallVector< AllocaInfo, 8 > &Allocas, SmallVector< Instruction *, 4 > &DeadInstructions, SmallVector< CoroAllocaAllocInst *, 4 > &LocalAllocas, Function &F, const SuspendCrossingInfo &Checker, const DominatorTree &DT, const coro::Shape &Shape)
Definition: SpillUtils.cpp:454
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
LLVM_ABI void findDbgValues(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the dbg.values describing a value.
Definition: DebugInfo.cpp:124
auto successors(const MachineBasicBlock *BB)
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1669
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition: Error.cpp:167
LLVM_ABI bool isManyPotentiallyReachableFromMany(SmallVectorImpl< BasicBlock * > &Worklist, const SmallPtrSetImpl< const BasicBlock * > &StopSet, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether there is a potentially a path from at least one block in 'Worklist' to at least one...
Definition: CFG.cpp:249
LLVM_ABI BasicBlock * SplitEdge(BasicBlock *From, BasicBlock *To, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the edge connecting the specified blocks, and return the newly created basic block between From...
A MapVector that performs no allocations if smaller than a certain size.
Definition: MapVector.h:249
CoroBeginInst * CoroBegin
Definition: CoroShape.h:54
BasicBlock::iterator getInsertPtAfterFramePtr() const
Definition: CoroShape.h:250