LLVM 21.0.0git
CoroFrame.cpp
Go to the documentation of this file.
1//===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8// This file contains classes used to discover if for a particular value
9// its definition precedes and its uses follow a suspend block. This is
10// referred to as a suspend crossing value.
11//
12// Using the information discovered we form a Coroutine Frame structure to
13// contain those values. All uses of those values are replaced with appropriate
14// GEP + load from the coroutine frame. At the point of the definition we spill
15// the value into the coroutine frame.
16//===----------------------------------------------------------------------===//
17
18#include "CoroInternal.h"
19#include "llvm/ADT/ScopeExit.h"
22#include "llvm/IR/DIBuilder.h"
23#include "llvm/IR/DebugInfo.h"
24#include "llvm/IR/Dominators.h"
25#include "llvm/IR/IRBuilder.h"
28#include "llvm/IR/Module.h"
29#include "llvm/Support/Debug.h"
39#include <algorithm>
40#include <optional>
41
42using namespace llvm;
43
45
46#define DEBUG_TYPE "coro-frame"
47
48namespace {
49class FrameTypeBuilder;
50// Mapping from the to-be-spilled value to all the users that need reload.
51struct FrameDataInfo {
52 // All the values (that are not allocas) that needs to be spilled to the
53 // frame.
54 coro::SpillInfo &Spills;
55 // Allocas contains all values defined as allocas that need to live in the
56 // frame.
58
59 FrameDataInfo(coro::SpillInfo &Spills,
61 : Spills(Spills), Allocas(Allocas) {}
62
63 SmallVector<Value *, 8> getAllDefs() const {
65 for (const auto &P : Spills)
66 Defs.push_back(P.first);
67 for (const auto &A : Allocas)
68 Defs.push_back(A.Alloca);
69 return Defs;
70 }
71
72 uint32_t getFieldIndex(Value *V) const {
73 auto Itr = FieldIndexMap.find(V);
74 assert(Itr != FieldIndexMap.end() &&
75 "Value does not have a frame field index");
76 return Itr->second;
77 }
78
79 void setFieldIndex(Value *V, uint32_t Index) {
80 assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) &&
81 "Cannot set the index for the same field twice.");
82 FieldIndexMap[V] = Index;
83 }
84
85 Align getAlign(Value *V) const {
86 auto Iter = FieldAlignMap.find(V);
87 assert(Iter != FieldAlignMap.end());
88 return Iter->second;
89 }
90
91 void setAlign(Value *V, Align AL) {
92 assert(FieldAlignMap.count(V) == 0);
93 FieldAlignMap.insert({V, AL});
94 }
95
96 uint64_t getDynamicAlign(Value *V) const {
97 auto Iter = FieldDynamicAlignMap.find(V);
98 assert(Iter != FieldDynamicAlignMap.end());
99 return Iter->second;
100 }
101
102 void setDynamicAlign(Value *V, uint64_t Align) {
103 assert(FieldDynamicAlignMap.count(V) == 0);
104 FieldDynamicAlignMap.insert({V, Align});
105 }
106
107 uint64_t getOffset(Value *V) const {
108 auto Iter = FieldOffsetMap.find(V);
109 assert(Iter != FieldOffsetMap.end());
110 return Iter->second;
111 }
112
113 void setOffset(Value *V, uint64_t Offset) {
114 assert(FieldOffsetMap.count(V) == 0);
115 FieldOffsetMap.insert({V, Offset});
116 }
117
118 // Remap the index of every field in the frame, using the final layout index.
119 void updateLayoutIndex(FrameTypeBuilder &B);
120
121private:
122 // LayoutIndexUpdateStarted is used to avoid updating the index of any field
123 // twice by mistake.
124 bool LayoutIndexUpdateStarted = false;
125 // Map from values to their slot indexes on the frame. They will be first set
126 // with their original insertion field index. After the frame is built, their
127 // indexes will be updated into the final layout index.
128 DenseMap<Value *, uint32_t> FieldIndexMap;
129 // Map from values to their alignment on the frame. They would be set after
130 // the frame is built.
131 DenseMap<Value *, Align> FieldAlignMap;
132 DenseMap<Value *, uint64_t> FieldDynamicAlignMap;
133 // Map from values to their offset on the frame. They would be set after
134 // the frame is built.
135 DenseMap<Value *, uint64_t> FieldOffsetMap;
136};
137} // namespace
138
139#ifndef NDEBUG
140static void dumpSpills(StringRef Title, const coro::SpillInfo &Spills) {
141 dbgs() << "------------- " << Title << " --------------\n";
142 for (const auto &E : Spills) {
143 E.first->dump();
144 dbgs() << " user: ";
145 for (auto *I : E.second)
146 I->dump();
147 }
148}
149
151 dbgs() << "------------- Allocas --------------\n";
152 for (const auto &A : Allocas) {
153 A.Alloca->dump();
154 }
155}
156#endif
157
158namespace {
159using FieldIDType = size_t;
160// We cannot rely solely on natural alignment of a type when building a
161// coroutine frame and if the alignment specified on the Alloca instruction
162// differs from the natural alignment of the alloca type we will need to insert
163// padding.
164class FrameTypeBuilder {
165private:
166 struct Field {
169 Type *Ty;
170 FieldIDType LayoutFieldIndex;
171 Align Alignment;
172 Align TyAlignment;
173 uint64_t DynamicAlignBuffer;
174 };
175
176 const DataLayout &DL;
177 LLVMContext &Context;
178 uint64_t StructSize = 0;
179 Align StructAlign;
180 bool IsFinished = false;
181
182 std::optional<Align> MaxFrameAlignment;
183
185 DenseMap<Value*, unsigned> FieldIndexByKey;
186
187public:
188 FrameTypeBuilder(LLVMContext &Context, const DataLayout &DL,
189 std::optional<Align> MaxFrameAlignment)
190 : DL(DL), Context(Context), MaxFrameAlignment(MaxFrameAlignment) {}
191
192 /// Add a field to this structure for the storage of an `alloca`
193 /// instruction.
194 [[nodiscard]] FieldIDType addFieldForAlloca(AllocaInst *AI,
195 bool IsHeader = false) {
196 Type *Ty = AI->getAllocatedType();
197
198 // Make an array type if this is a static array allocation.
199 if (AI->isArrayAllocation()) {
200 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
201 Ty = ArrayType::get(Ty, CI->getValue().getZExtValue());
202 else
203 report_fatal_error("Coroutines cannot handle non static allocas yet");
204 }
205
206 return addField(Ty, AI->getAlign(), IsHeader);
207 }
208
209 /// We want to put the allocas whose lifetime-ranges are not overlapped
210 /// into one slot of coroutine frame.
211 /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566
212 ///
213 /// cppcoro::task<void> alternative_paths(bool cond) {
214 /// if (cond) {
215 /// big_structure a;
216 /// process(a);
217 /// co_await something();
218 /// } else {
219 /// big_structure b;
220 /// process2(b);
221 /// co_await something();
222 /// }
223 /// }
224 ///
225 /// We want to put variable a and variable b in the same slot to
226 /// reduce the size of coroutine frame.
227 ///
228 /// This function use StackLifetime algorithm to partition the AllocaInsts in
229 /// Spills to non-overlapped sets in order to put Alloca in the same
230 /// non-overlapped set into the same slot in the Coroutine Frame. Then add
231 /// field for the allocas in the same non-overlapped set by using the largest
232 /// type as the field type.
233 ///
234 /// Side Effects: Because We sort the allocas, the order of allocas in the
235 /// frame may be different with the order in the source code.
236 void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData,
237 coro::Shape &Shape, bool OptimizeFrame);
238
239 /// Add a field to this structure.
240 [[nodiscard]] FieldIDType addField(Type *Ty, MaybeAlign MaybeFieldAlignment,
241 bool IsHeader = false,
242 bool IsSpillOfValue = false) {
243 assert(!IsFinished && "adding fields to a finished builder");
244 assert(Ty && "must provide a type for a field");
245
246 // The field size is always the alloc size of the type.
247 uint64_t FieldSize = DL.getTypeAllocSize(Ty);
248
249 // For an alloca with size=0, we don't need to add a field and they
250 // can just point to any index in the frame. Use index 0.
251 if (FieldSize == 0) {
252 return 0;
253 }
254
255 // The field alignment might not be the type alignment, but we need
256 // to remember the type alignment anyway to build the type.
257 // If we are spilling values we don't need to worry about ABI alignment
258 // concerns.
259 Align ABIAlign = DL.getABITypeAlign(Ty);
260 Align TyAlignment = ABIAlign;
261 if (IsSpillOfValue && MaxFrameAlignment && *MaxFrameAlignment < ABIAlign)
262 TyAlignment = *MaxFrameAlignment;
263 Align FieldAlignment = MaybeFieldAlignment.value_or(TyAlignment);
264
265 // The field alignment could be bigger than the max frame case, in that case
266 // we request additional storage to be able to dynamically align the
267 // pointer.
268 uint64_t DynamicAlignBuffer = 0;
269 if (MaxFrameAlignment && (FieldAlignment > *MaxFrameAlignment)) {
270 DynamicAlignBuffer =
271 offsetToAlignment(MaxFrameAlignment->value(), FieldAlignment);
272 FieldAlignment = *MaxFrameAlignment;
273 FieldSize = FieldSize + DynamicAlignBuffer;
274 }
275
276 // Lay out header fields immediately.
278 if (IsHeader) {
279 Offset = alignTo(StructSize, FieldAlignment);
280 StructSize = Offset + FieldSize;
281
282 // Everything else has a flexible offset.
283 } else {
285 }
286
287 Fields.push_back({FieldSize, Offset, Ty, 0, FieldAlignment, TyAlignment,
288 DynamicAlignBuffer});
289 return Fields.size() - 1;
290 }
291
292 /// Finish the layout and create the struct type with the given name.
293 StructType *finish(StringRef Name);
294
295 uint64_t getStructSize() const {
296 assert(IsFinished && "not yet finished!");
297 return StructSize;
298 }
299
300 Align getStructAlign() const {
301 assert(IsFinished && "not yet finished!");
302 return StructAlign;
303 }
304
305 FieldIDType getLayoutFieldIndex(FieldIDType Id) const {
306 assert(IsFinished && "not yet finished!");
307 return Fields[Id].LayoutFieldIndex;
308 }
309
310 Field getLayoutField(FieldIDType Id) const {
311 assert(IsFinished && "not yet finished!");
312 return Fields[Id];
313 }
314};
315} // namespace
316
317void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) {
318 auto Updater = [&](Value *I) {
319 auto Field = B.getLayoutField(getFieldIndex(I));
320 setFieldIndex(I, Field.LayoutFieldIndex);
321 setAlign(I, Field.Alignment);
322 uint64_t dynamicAlign =
323 Field.DynamicAlignBuffer
324 ? Field.DynamicAlignBuffer + Field.Alignment.value()
325 : 0;
326 setDynamicAlign(I, dynamicAlign);
327 setOffset(I, Field.Offset);
328 };
329 LayoutIndexUpdateStarted = true;
330 for (auto &S : Spills)
331 Updater(S.first);
332 for (const auto &A : Allocas)
333 Updater(A.Alloca);
334 LayoutIndexUpdateStarted = false;
335}
336
337void FrameTypeBuilder::addFieldForAllocas(const Function &F,
338 FrameDataInfo &FrameData,
339 coro::Shape &Shape,
340 bool OptimizeFrame) {
341 using AllocaSetType = SmallVector<AllocaInst *, 4>;
342 SmallVector<AllocaSetType, 4> NonOverlapedAllocas;
343
344 // We need to add field for allocas at the end of this function.
345 auto AddFieldForAllocasAtExit = make_scope_exit([&]() {
346 for (auto AllocaList : NonOverlapedAllocas) {
347 auto *LargestAI = *AllocaList.begin();
348 FieldIDType Id = addFieldForAlloca(LargestAI);
349 for (auto *Alloca : AllocaList)
350 FrameData.setFieldIndex(Alloca, Id);
351 }
352 });
353
354 if (!OptimizeFrame) {
355 for (const auto &A : FrameData.Allocas) {
356 AllocaInst *Alloca = A.Alloca;
357 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
358 }
359 return;
360 }
361
362 // Because there are paths from the lifetime.start to coro.end
363 // for each alloca, the liferanges for every alloca is overlaped
364 // in the blocks who contain coro.end and the successor blocks.
365 // So we choose to skip there blocks when we calculate the liferange
366 // for each alloca. It should be reasonable since there shouldn't be uses
367 // in these blocks and the coroutine frame shouldn't be used outside the
368 // coroutine body.
369 //
370 // Note that the user of coro.suspend may not be SwitchInst. However, this
371 // case seems too complex to handle. And it is harmless to skip these
372 // patterns since it just prevend putting the allocas to live in the same
373 // slot.
374 DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest;
375 for (auto *CoroSuspendInst : Shape.CoroSuspends) {
376 for (auto *U : CoroSuspendInst->users()) {
377 if (auto *ConstSWI = dyn_cast<SwitchInst>(U)) {
378 auto *SWI = const_cast<SwitchInst *>(ConstSWI);
379 DefaultSuspendDest[SWI] = SWI->getDefaultDest();
380 SWI->setDefaultDest(SWI->getSuccessor(1));
381 }
382 }
383 }
384
385 auto ExtractAllocas = [&]() {
386 AllocaSetType Allocas;
387 Allocas.reserve(FrameData.Allocas.size());
388 for (const auto &A : FrameData.Allocas)
389 Allocas.push_back(A.Alloca);
390 return Allocas;
391 };
392 StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(),
393 StackLifetime::LivenessType::May);
394 StackLifetimeAnalyzer.run();
395 auto DoAllocasInterfere = [&](const AllocaInst *AI1, const AllocaInst *AI2) {
396 return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps(
397 StackLifetimeAnalyzer.getLiveRange(AI2));
398 };
399 auto GetAllocaSize = [&](const coro::AllocaInfo &A) {
400 std::optional<TypeSize> RetSize = A.Alloca->getAllocationSize(DL);
401 assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n");
402 assert(!RetSize->isScalable() && "Scalable vectors are not yet supported");
403 return RetSize->getFixedValue();
404 };
405 // Put larger allocas in the front. So the larger allocas have higher
406 // priority to merge, which can save more space potentially. Also each
407 // AllocaSet would be ordered. So we can get the largest Alloca in one
408 // AllocaSet easily.
409 sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) {
410 return GetAllocaSize(Iter1) > GetAllocaSize(Iter2);
411 });
412 for (const auto &A : FrameData.Allocas) {
413 AllocaInst *Alloca = A.Alloca;
414 bool Merged = false;
415 // Try to find if the Alloca does not interfere with any existing
416 // NonOverlappedAllocaSet. If it is true, insert the alloca to that
417 // NonOverlappedAllocaSet.
418 for (auto &AllocaSet : NonOverlapedAllocas) {
419 assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n");
420 bool NoInterference = none_of(AllocaSet, [&](auto Iter) {
421 return DoAllocasInterfere(Alloca, Iter);
422 });
423 // If the alignment of A is multiple of the alignment of B, the address
424 // of A should satisfy the requirement for aligning for B.
425 //
426 // There may be other more fine-grained strategies to handle the alignment
427 // infomation during the merging process. But it seems hard to handle
428 // these strategies and benefit little.
429 bool Alignable = [&]() -> bool {
430 auto *LargestAlloca = *AllocaSet.begin();
431 return LargestAlloca->getAlign().value() % Alloca->getAlign().value() ==
432 0;
433 }();
434 bool CouldMerge = NoInterference && Alignable;
435 if (!CouldMerge)
436 continue;
437 AllocaSet.push_back(Alloca);
438 Merged = true;
439 break;
440 }
441 if (!Merged) {
442 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
443 }
444 }
445 // Recover the default target destination for each Switch statement
446 // reserved.
447 for (auto SwitchAndDefaultDest : DefaultSuspendDest) {
448 SwitchInst *SWI = SwitchAndDefaultDest.first;
449 BasicBlock *DestBB = SwitchAndDefaultDest.second;
450 SWI->setDefaultDest(DestBB);
451 }
452 // This Debug Info could tell us which allocas are merged into one slot.
453 LLVM_DEBUG(for (auto &AllocaSet
454 : NonOverlapedAllocas) {
455 if (AllocaSet.size() > 1) {
456 dbgs() << "In Function:" << F.getName() << "\n";
457 dbgs() << "Find Union Set "
458 << "\n";
459 dbgs() << "\tAllocas are \n";
460 for (auto Alloca : AllocaSet)
461 dbgs() << "\t\t" << *Alloca << "\n";
462 }
463 });
464}
465
466StructType *FrameTypeBuilder::finish(StringRef Name) {
467 assert(!IsFinished && "already finished!");
468
469 // Prepare the optimal-layout field array.
470 // The Id in the layout field is a pointer to our Field for it.
472 LayoutFields.reserve(Fields.size());
473 for (auto &Field : Fields) {
474 LayoutFields.emplace_back(&Field, Field.Size, Field.Alignment,
475 Field.Offset);
476 }
477
478 // Perform layout.
479 auto SizeAndAlign = performOptimizedStructLayout(LayoutFields);
480 StructSize = SizeAndAlign.first;
481 StructAlign = SizeAndAlign.second;
482
483 auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & {
484 return *static_cast<Field *>(const_cast<void*>(LayoutField.Id));
485 };
486
487 // We need to produce a packed struct type if there's a field whose
488 // assigned offset isn't a multiple of its natural type alignment.
489 bool Packed = [&] {
490 for (auto &LayoutField : LayoutFields) {
491 auto &F = getField(LayoutField);
492 if (!isAligned(F.TyAlignment, LayoutField.Offset))
493 return true;
494 }
495 return false;
496 }();
497
498 // Build the struct body.
499 SmallVector<Type*, 16> FieldTypes;
500 FieldTypes.reserve(LayoutFields.size() * 3 / 2);
501 uint64_t LastOffset = 0;
502 for (auto &LayoutField : LayoutFields) {
503 auto &F = getField(LayoutField);
504
505 auto Offset = LayoutField.Offset;
506
507 // Add a padding field if there's a padding gap and we're either
508 // building a packed struct or the padding gap is more than we'd
509 // get from aligning to the field type's natural alignment.
510 assert(Offset >= LastOffset);
511 if (Offset != LastOffset) {
512 if (Packed || alignTo(LastOffset, F.TyAlignment) != Offset)
513 FieldTypes.push_back(ArrayType::get(Type::getInt8Ty(Context),
514 Offset - LastOffset));
515 }
516
517 F.Offset = Offset;
518 F.LayoutFieldIndex = FieldTypes.size();
519
520 FieldTypes.push_back(F.Ty);
521 if (F.DynamicAlignBuffer) {
522 FieldTypes.push_back(
523 ArrayType::get(Type::getInt8Ty(Context), F.DynamicAlignBuffer));
524 }
525 LastOffset = Offset + F.Size;
526 }
527
528 StructType *Ty = StructType::create(Context, FieldTypes, Name, Packed);
529
530#ifndef NDEBUG
531 // Check that the IR layout matches the offsets we expect.
532 auto Layout = DL.getStructLayout(Ty);
533 for (auto &F : Fields) {
534 assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty);
535 assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset);
536 }
537#endif
538
539 IsFinished = true;
540
541 return Ty;
542}
543
544static void cacheDIVar(FrameDataInfo &FrameData,
546 for (auto *V : FrameData.getAllDefs()) {
547 if (DIVarCache.contains(V))
548 continue;
549
550 auto CacheIt = [&DIVarCache, V](const auto &Container) {
551 auto *I = llvm::find_if(Container, [](auto *DDI) {
552 return DDI->getExpression()->getNumElements() == 0;
553 });
554 if (I != Container.end())
555 DIVarCache.insert({V, (*I)->getVariable()});
556 };
557 CacheIt(findDbgDeclares(V));
558 CacheIt(findDVRDeclares(V));
559 }
560}
561
562/// Create name for Type. It uses MDString to store new created string to
563/// avoid memory leak.
565 if (Ty->isIntegerTy()) {
566 // The longest name in common may be '__int_128', which has 9 bits.
567 SmallString<16> Buffer;
568 raw_svector_ostream OS(Buffer);
569 OS << "__int_" << cast<IntegerType>(Ty)->getBitWidth();
570 auto *MDName = MDString::get(Ty->getContext(), OS.str());
571 return MDName->getString();
572 }
573
574 if (Ty->isFloatingPointTy()) {
575 if (Ty->isFloatTy())
576 return "__float_";
577 if (Ty->isDoubleTy())
578 return "__double_";
579 return "__floating_type_";
580 }
581
582 if (Ty->isPointerTy())
583 return "PointerType";
584
585 if (Ty->isStructTy()) {
586 if (!cast<StructType>(Ty)->hasName())
587 return "__LiteralStructType_";
588
589 auto Name = Ty->getStructName();
590
591 SmallString<16> Buffer(Name);
592 for (auto &Iter : Buffer)
593 if (Iter == '.' || Iter == ':')
594 Iter = '_';
595 auto *MDName = MDString::get(Ty->getContext(), Buffer.str());
596 return MDName->getString();
597 }
598
599 return "UnknownType";
600}
601
602static DIType *solveDIType(DIBuilder &Builder, Type *Ty,
603 const DataLayout &Layout, DIScope *Scope,
604 unsigned LineNum,
605 DenseMap<Type *, DIType *> &DITypeCache) {
606 if (DIType *DT = DITypeCache.lookup(Ty))
607 return DT;
608
610
611 DIType *RetType = nullptr;
612
613 if (Ty->isIntegerTy()) {
614 auto BitWidth = cast<IntegerType>(Ty)->getBitWidth();
615 RetType = Builder.createBasicType(Name, BitWidth, dwarf::DW_ATE_signed,
616 llvm::DINode::FlagArtificial);
617 } else if (Ty->isFloatingPointTy()) {
618 RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty),
619 dwarf::DW_ATE_float,
620 llvm::DINode::FlagArtificial);
621 } else if (Ty->isPointerTy()) {
622 // Construct PointerType points to null (aka void *) instead of exploring
623 // pointee type to avoid infinite search problem. For example, we would be
624 // in trouble if we traverse recursively:
625 //
626 // struct Node {
627 // Node* ptr;
628 // };
629 RetType =
630 Builder.createPointerType(nullptr, Layout.getTypeSizeInBits(Ty),
631 Layout.getABITypeAlign(Ty).value() * CHAR_BIT,
632 /*DWARFAddressSpace=*/std::nullopt, Name);
633 } else if (Ty->isStructTy()) {
634 auto *DIStruct = Builder.createStructType(
635 Scope, Name, Scope->getFile(), LineNum, Layout.getTypeSizeInBits(Ty),
636 Layout.getPrefTypeAlign(Ty).value() * CHAR_BIT,
637 llvm::DINode::FlagArtificial, nullptr, llvm::DINodeArray());
638
639 auto *StructTy = cast<StructType>(Ty);
641 for (unsigned I = 0; I < StructTy->getNumElements(); I++) {
642 DIType *DITy = solveDIType(Builder, StructTy->getElementType(I), Layout,
643 Scope, LineNum, DITypeCache);
644 assert(DITy);
645 Elements.push_back(Builder.createMemberType(
646 Scope, DITy->getName(), Scope->getFile(), LineNum,
647 DITy->getSizeInBits(), DITy->getAlignInBits(),
648 Layout.getStructLayout(StructTy)->getElementOffsetInBits(I),
649 llvm::DINode::FlagArtificial, DITy));
650 }
651
652 Builder.replaceArrays(DIStruct, Builder.getOrCreateArray(Elements));
653
654 RetType = DIStruct;
655 } else {
656 LLVM_DEBUG(dbgs() << "Unresolved Type: " << *Ty << "\n");
657 TypeSize Size = Layout.getTypeSizeInBits(Ty);
658 auto *CharSizeType = Builder.createBasicType(
659 Name, 8, dwarf::DW_ATE_unsigned_char, llvm::DINode::FlagArtificial);
660
661 if (Size <= 8)
662 RetType = CharSizeType;
663 else {
664 if (Size % 8 != 0)
665 Size = TypeSize::getFixed(Size + 8 - (Size % 8));
666
667 RetType = Builder.createArrayType(
668 Size, Layout.getPrefTypeAlign(Ty).value(), CharSizeType,
669 Builder.getOrCreateArray(Builder.getOrCreateSubrange(0, Size / 8)));
670 }
671 }
672
673 DITypeCache.insert({Ty, RetType});
674 return RetType;
675}
676
677/// Build artificial debug info for C++ coroutine frames to allow users to
678/// inspect the contents of the frame directly
679///
680/// Create Debug information for coroutine frame with debug name "__coro_frame".
681/// The debug information for the fields of coroutine frame is constructed from
682/// the following way:
683/// 1. For all the value in the Frame, we search the use of dbg.declare to find
684/// the corresponding debug variables for the value. If we can find the
685/// debug variable, we can get full and accurate debug information.
686/// 2. If we can't get debug information in step 1 and 2, we could only try to
687/// build the DIType by Type. We did this in solveDIType. We only handle
688/// integer, float, double, integer type and struct type for now.
690 FrameDataInfo &FrameData) {
691 DISubprogram *DIS = F.getSubprogram();
692 // If there is no DISubprogram for F, it implies the Function are not compiled
693 // with debug info. So we also don't need to generate debug info for the frame
694 // neither.
695 if (!DIS || !DIS->getUnit() ||
697 (dwarf::SourceLanguage)DIS->getUnit()->getSourceLanguage()) ||
698 DIS->getUnit()->getEmissionKind() != DICompileUnit::DebugEmissionKind::FullDebug)
699 return;
700
701 assert(Shape.ABI == coro::ABI::Switch &&
702 "We could only build debug infomation for C++ coroutine now.\n");
703
704 DIBuilder DBuilder(*F.getParent(), /*AllowUnresolved*/ false);
705
706 assert(Shape.getPromiseAlloca() &&
707 "Coroutine with switch ABI should own Promise alloca");
708
709 DIFile *DFile = DIS->getFile();
710 unsigned LineNum = DIS->getLine();
711
712 DICompositeType *FrameDITy = DBuilder.createStructType(
713 DIS->getUnit(), Twine(F.getName() + ".coro_frame_ty").str(),
714 DFile, LineNum, Shape.FrameSize * 8,
715 Shape.FrameAlign.value() * 8, llvm::DINode::FlagArtificial, nullptr,
716 llvm::DINodeArray());
717 StructType *FrameTy = Shape.FrameTy;
719 DataLayout Layout = F.getDataLayout();
720
722 cacheDIVar(FrameData, DIVarCache);
723
724 unsigned ResumeIndex = coro::Shape::SwitchFieldIndex::Resume;
725 unsigned DestroyIndex = coro::Shape::SwitchFieldIndex::Destroy;
726 unsigned IndexIndex = Shape.SwitchLowering.IndexField;
727
729 NameCache.insert({ResumeIndex, "__resume_fn"});
730 NameCache.insert({DestroyIndex, "__destroy_fn"});
731 NameCache.insert({IndexIndex, "__coro_index"});
732
733 Type *ResumeFnTy = FrameTy->getElementType(ResumeIndex),
734 *DestroyFnTy = FrameTy->getElementType(DestroyIndex),
735 *IndexTy = FrameTy->getElementType(IndexIndex);
736
738 TyCache.insert(
739 {ResumeIndex, DBuilder.createPointerType(
740 nullptr, Layout.getTypeSizeInBits(ResumeFnTy))});
741 TyCache.insert(
742 {DestroyIndex, DBuilder.createPointerType(
743 nullptr, Layout.getTypeSizeInBits(DestroyFnTy))});
744
745 /// FIXME: If we fill the field `SizeInBits` with the actual size of
746 /// __coro_index in bits, then __coro_index wouldn't show in the debugger.
747 TyCache.insert({IndexIndex, DBuilder.createBasicType(
748 "__coro_index",
749 (Layout.getTypeSizeInBits(IndexTy) < 8)
750 ? 8
751 : Layout.getTypeSizeInBits(IndexTy),
752 dwarf::DW_ATE_unsigned_char)});
753
754 for (auto *V : FrameData.getAllDefs()) {
755 auto It = DIVarCache.find(V);
756 if (It == DIVarCache.end())
757 continue;
758
759 auto Index = FrameData.getFieldIndex(V);
760
761 NameCache.insert({Index, It->second->getName()});
762 TyCache.insert({Index, It->second->getType()});
763 }
764
765 // Cache from index to (Align, Offset Pair)
767 // The Align and Offset of Resume function and Destroy function are fixed.
768 OffsetCache.insert({ResumeIndex, {8, 0}});
769 OffsetCache.insert({DestroyIndex, {8, 8}});
770 OffsetCache.insert(
771 {IndexIndex,
773
774 for (auto *V : FrameData.getAllDefs()) {
775 auto Index = FrameData.getFieldIndex(V);
776
777 OffsetCache.insert(
778 {Index, {FrameData.getAlign(V).value(), FrameData.getOffset(V)}});
779 }
780
781 DenseMap<Type *, DIType *> DITypeCache;
782 // This counter is used to avoid same type names. e.g., there would be
783 // many i32 and i64 types in one coroutine. And we would use i32_0 and
784 // i32_1 to avoid the same type. Since it makes no sense the name of the
785 // fields confilicts with each other.
786 unsigned UnknownTypeNum = 0;
787 for (unsigned Index = 0; Index < FrameTy->getNumElements(); Index++) {
788 if (!OffsetCache.contains(Index))
789 continue;
790
791 std::string Name;
792 uint64_t SizeInBits;
793 uint32_t AlignInBits;
794 uint64_t OffsetInBits;
795 DIType *DITy = nullptr;
796
797 Type *Ty = FrameTy->getElementType(Index);
798 assert(Ty->isSized() && "We can't handle type which is not sized.\n");
799 SizeInBits = Layout.getTypeSizeInBits(Ty).getFixedValue();
800 AlignInBits = OffsetCache[Index].first * 8;
801 OffsetInBits = OffsetCache[Index].second * 8;
802
803 if (auto It = NameCache.find(Index); It != NameCache.end()) {
804 Name = It->second.str();
805 DITy = TyCache[Index];
806 } else {
807 DITy = solveDIType(DBuilder, Ty, Layout, FrameDITy, LineNum, DITypeCache);
808 assert(DITy && "SolveDIType shouldn't return nullptr.\n");
809 Name = DITy->getName().str();
810 Name += "_" + std::to_string(UnknownTypeNum);
811 UnknownTypeNum++;
812 }
813
814 Elements.push_back(DBuilder.createMemberType(
815 FrameDITy, Name, DFile, LineNum, SizeInBits, AlignInBits, OffsetInBits,
816 llvm::DINode::FlagArtificial, DITy));
817 }
818
819 DBuilder.replaceArrays(FrameDITy, DBuilder.getOrCreateArray(Elements));
820
821 auto *FrameDIVar =
822 DBuilder.createAutoVariable(DIS, "__coro_frame", DFile, LineNum,
823 FrameDITy, true, DINode::FlagArtificial);
824
825 // Subprogram would have ContainedNodes field which records the debug
826 // variables it contained. So we need to add __coro_frame to the
827 // ContainedNodes of it.
828 //
829 // If we don't add __coro_frame to the RetainedNodes, user may get
830 // `no symbol __coro_frame in context` rather than `__coro_frame`
831 // is optimized out, which is more precise.
832 auto RetainedNodes = DIS->getRetainedNodes();
833 SmallVector<Metadata *, 32> RetainedNodesVec(RetainedNodes.begin(),
834 RetainedNodes.end());
835 RetainedNodesVec.push_back(FrameDIVar);
836 DIS->replaceOperandWith(7, (MDTuple::get(F.getContext(), RetainedNodesVec)));
837
838 // Construct the location for the frame debug variable. The column number
839 // is fake but it should be fine.
840 DILocation *DILoc =
841 DILocation::get(DIS->getContext(), LineNum, /*Column=*/1, DIS);
842 assert(FrameDIVar->isValidLocationForIntrinsic(DILoc));
843
845 DbgVariableRecord *NewDVR =
846 new DbgVariableRecord(ValueAsMetadata::get(Shape.FramePtr), FrameDIVar,
847 DBuilder.createExpression(), DILoc,
848 DbgVariableRecord::LocationType::Declare);
850 It->getParent()->insertDbgRecordBefore(NewDVR, It);
851 } else {
852 DBuilder.insertDeclare(Shape.FramePtr, FrameDIVar,
853 DBuilder.createExpression(), DILoc,
854 &*Shape.getInsertPtAfterFramePtr());
855 }
856}
857
858// Build a struct that will keep state for an active coroutine.
859// struct f.frame {
860// ResumeFnTy ResumeFnAddr;
861// ResumeFnTy DestroyFnAddr;
862// ... promise (if present) ...
863// int ResumeIndex;
864// ... spills ...
865// };
867 FrameDataInfo &FrameData,
868 bool OptimizeFrame) {
869 LLVMContext &C = F.getContext();
870 const DataLayout &DL = F.getDataLayout();
871
872 // We will use this value to cap the alignment of spilled values.
873 std::optional<Align> MaxFrameAlignment;
874 if (Shape.ABI == coro::ABI::Async)
875 MaxFrameAlignment = Shape.AsyncLowering.getContextAlignment();
876 FrameTypeBuilder B(C, DL, MaxFrameAlignment);
877
878 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
879 std::optional<FieldIDType> SwitchIndexFieldId;
880
881 if (Shape.ABI == coro::ABI::Switch) {
882 auto *FnPtrTy = PointerType::getUnqual(C);
883
884 // Add header fields for the resume and destroy functions.
885 // We can rely on these being perfectly packed.
886 (void)B.addField(FnPtrTy, std::nullopt, /*header*/ true);
887 (void)B.addField(FnPtrTy, std::nullopt, /*header*/ true);
888
889 // PromiseAlloca field needs to be explicitly added here because it's
890 // a header field with a fixed offset based on its alignment. Hence it
891 // needs special handling and cannot be added to FrameData.Allocas.
892 if (PromiseAlloca)
893 FrameData.setFieldIndex(
894 PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, /*header*/ true));
895
896 // Add a field to store the suspend index. This doesn't need to
897 // be in the header.
898 unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size()));
899 Type *IndexType = Type::getIntNTy(C, IndexBits);
900
901 SwitchIndexFieldId = B.addField(IndexType, std::nullopt);
902 } else {
903 assert(PromiseAlloca == nullptr && "lowering doesn't support promises");
904 }
905
906 // Because multiple allocas may own the same field slot,
907 // we add allocas to field here.
908 B.addFieldForAllocas(F, FrameData, Shape, OptimizeFrame);
909 // Add PromiseAlloca to Allocas list so that
910 // 1. updateLayoutIndex could update its index after
911 // `performOptimizedStructLayout`
912 // 2. it is processed in insertSpills.
913 if (Shape.ABI == coro::ABI::Switch && PromiseAlloca)
914 // We assume that the promise alloca won't be modified before
915 // CoroBegin and no alias will be create before CoroBegin.
916 FrameData.Allocas.emplace_back(
917 PromiseAlloca, DenseMap<Instruction *, std::optional<APInt>>{}, false);
918 // Create an entry for every spilled value.
919 for (auto &S : FrameData.Spills) {
920 Type *FieldType = S.first->getType();
921 // For byval arguments, we need to store the pointed value in the frame,
922 // instead of the pointer itself.
923 if (const Argument *A = dyn_cast<Argument>(S.first))
924 if (A->hasByValAttr())
925 FieldType = A->getParamByValType();
926 FieldIDType Id = B.addField(FieldType, std::nullopt, false /*header*/,
927 true /*IsSpillOfValue*/);
928 FrameData.setFieldIndex(S.first, Id);
929 }
930
931 StructType *FrameTy = [&] {
932 SmallString<32> Name(F.getName());
933 Name.append(".Frame");
934 return B.finish(Name);
935 }();
936
937 FrameData.updateLayoutIndex(B);
938 Shape.FrameAlign = B.getStructAlign();
939 Shape.FrameSize = B.getStructSize();
940
941 switch (Shape.ABI) {
942 case coro::ABI::Switch: {
943 // In the switch ABI, remember the switch-index field.
944 auto IndexField = B.getLayoutField(*SwitchIndexFieldId);
945 Shape.SwitchLowering.IndexField = IndexField.LayoutFieldIndex;
946 Shape.SwitchLowering.IndexAlign = IndexField.Alignment.value();
947 Shape.SwitchLowering.IndexOffset = IndexField.Offset;
948
949 // Also round the frame size up to a multiple of its alignment, as is
950 // generally expected in C/C++.
951 Shape.FrameSize = alignTo(Shape.FrameSize, Shape.FrameAlign);
952 break;
953 }
954
955 // In the retcon ABI, remember whether the frame is inline in the storage.
956 case coro::ABI::Retcon:
957 case coro::ABI::RetconOnce: {
958 auto Id = Shape.getRetconCoroId();
960 = (B.getStructSize() <= Id->getStorageSize() &&
961 B.getStructAlign() <= Id->getStorageAlignment());
962 break;
963 }
964 case coro::ABI::Async: {
967 // Also make the final context size a multiple of the context alignment to
968 // make allocation easier for allocators.
972 if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) {
974 "The alignment requirment of frame variables cannot be higher than "
975 "the alignment of the async function context");
976 }
977 break;
978 }
979 }
980
981 return FrameTy;
982}
983
984// Replace all alloca and SSA values that are accessed across suspend points
985// with GetElementPointer from coroutine frame + loads and stores. Create an
986// AllocaSpillBB that will become the new entry block for the resume parts of
987// the coroutine:
988//
989// %hdl = coro.begin(...)
990// whatever
991//
992// becomes:
993//
994// %hdl = coro.begin(...)
995// br label %AllocaSpillBB
996//
997// AllocaSpillBB:
998// ; geps corresponding to allocas that were moved to coroutine frame
999// br label PostSpill
1000//
1001// PostSpill:
1002// whatever
1003//
1004//
1005static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape) {
1006 LLVMContext &C = Shape.CoroBegin->getContext();
1007 Function *F = Shape.CoroBegin->getFunction();
1008 IRBuilder<> Builder(C);
1009 StructType *FrameTy = Shape.FrameTy;
1010 Value *FramePtr = Shape.FramePtr;
1011 DominatorTree DT(*F);
1013
1014 // Create a GEP with the given index into the coroutine frame for the original
1015 // value Orig. Appends an extra 0 index for array-allocas, preserving the
1016 // original type.
1017 auto GetFramePointer = [&](Value *Orig) -> Value * {
1018 FieldIDType Index = FrameData.getFieldIndex(Orig);
1019 SmallVector<Value *, 3> Indices = {
1020 ConstantInt::get(Type::getInt32Ty(C), 0),
1021 ConstantInt::get(Type::getInt32Ty(C), Index),
1022 };
1023
1024 if (auto *AI = dyn_cast<AllocaInst>(Orig)) {
1025 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) {
1026 auto Count = CI->getValue().getZExtValue();
1027 if (Count > 1) {
1028 Indices.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
1029 }
1030 } else {
1031 report_fatal_error("Coroutines cannot handle non static allocas yet");
1032 }
1033 }
1034
1035 auto GEP = cast<GetElementPtrInst>(
1036 Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices));
1037 if (auto *AI = dyn_cast<AllocaInst>(Orig)) {
1038 if (FrameData.getDynamicAlign(Orig) != 0) {
1039 assert(FrameData.getDynamicAlign(Orig) == AI->getAlign().value());
1040 auto *M = AI->getModule();
1041 auto *IntPtrTy = M->getDataLayout().getIntPtrType(AI->getType());
1042 auto *PtrValue = Builder.CreatePtrToInt(GEP, IntPtrTy);
1043 auto *AlignMask =
1044 ConstantInt::get(IntPtrTy, AI->getAlign().value() - 1);
1045 PtrValue = Builder.CreateAdd(PtrValue, AlignMask);
1046 PtrValue = Builder.CreateAnd(PtrValue, Builder.CreateNot(AlignMask));
1047 return Builder.CreateIntToPtr(PtrValue, AI->getType());
1048 }
1049 // If the type of GEP is not equal to the type of AllocaInst, it implies
1050 // that the AllocaInst may be reused in the Frame slot of other
1051 // AllocaInst. So We cast GEP to the AllocaInst here to re-use
1052 // the Frame storage.
1053 //
1054 // Note: If we change the strategy dealing with alignment, we need to refine
1055 // this casting.
1056 if (GEP->getType() != Orig->getType())
1057 return Builder.CreateAddrSpaceCast(GEP, Orig->getType(),
1058 Orig->getName() + Twine(".cast"));
1059 }
1060 return GEP;
1061 };
1062
1063 for (auto const &E : FrameData.Spills) {
1064 Value *Def = E.first;
1065 auto SpillAlignment = Align(FrameData.getAlign(Def));
1066 // Create a store instruction storing the value into the
1067 // coroutine frame.
1068 BasicBlock::iterator InsertPt = coro::getSpillInsertionPt(Shape, Def, DT);
1069
1070 Type *ByValTy = nullptr;
1071 if (auto *Arg = dyn_cast<Argument>(Def)) {
1072 // If we're spilling an Argument, make sure we clear 'captures'
1073 // from the coroutine function.
1074 Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::Captures);
1075
1076 if (Arg->hasByValAttr())
1077 ByValTy = Arg->getParamByValType();
1078 }
1079
1080 auto Index = FrameData.getFieldIndex(Def);
1081 Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
1082 auto *G = Builder.CreateConstInBoundsGEP2_32(
1083 FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr"));
1084 if (ByValTy) {
1085 // For byval arguments, we need to store the pointed value in the frame,
1086 // instead of the pointer itself.
1087 auto *Value = Builder.CreateLoad(ByValTy, Def);
1088 Builder.CreateAlignedStore(Value, G, SpillAlignment);
1089 } else {
1090 Builder.CreateAlignedStore(Def, G, SpillAlignment);
1091 }
1092
1093 BasicBlock *CurrentBlock = nullptr;
1094 Value *CurrentReload = nullptr;
1095 for (auto *U : E.second) {
1096 // If we have not seen the use block, create a load instruction to reload
1097 // the spilled value from the coroutine frame. Populates the Value pointer
1098 // reference provided with the frame GEP.
1099 if (CurrentBlock != U->getParent()) {
1100 CurrentBlock = U->getParent();
1101 Builder.SetInsertPoint(CurrentBlock,
1102 CurrentBlock->getFirstInsertionPt());
1103
1104 auto *GEP = GetFramePointer(E.first);
1105 GEP->setName(E.first->getName() + Twine(".reload.addr"));
1106 if (ByValTy)
1107 CurrentReload = GEP;
1108 else
1109 CurrentReload = Builder.CreateAlignedLoad(
1110 FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP,
1111 SpillAlignment, E.first->getName() + Twine(".reload"));
1112
1115 // Try best to find dbg.declare. If the spill is a temp, there may not
1116 // be a direct dbg.declare. Walk up the load chain to find one from an
1117 // alias.
1118 if (F->getSubprogram()) {
1119 auto *CurDef = Def;
1120 while (DIs.empty() && DVRs.empty() && isa<LoadInst>(CurDef)) {
1121 auto *LdInst = cast<LoadInst>(CurDef);
1122 // Only consider ptr to ptr same type load.
1123 if (LdInst->getPointerOperandType() != LdInst->getType())
1124 break;
1125 CurDef = LdInst->getPointerOperand();
1126 if (!isa<AllocaInst, LoadInst>(CurDef))
1127 break;
1128 DIs = findDbgDeclares(CurDef);
1129 DVRs = findDVRDeclares(CurDef);
1130 }
1131 }
1132
1133 auto SalvageOne = [&](auto *DDI) {
1134 bool AllowUnresolved = false;
1135 // This dbg.declare is preserved for all coro-split function
1136 // fragments. It will be unreachable in the main function, and
1137 // processed by coro::salvageDebugInfo() by the Cloner.
1138 if (UseNewDbgInfoFormat) {
1140 ValueAsMetadata::get(CurrentReload), DDI->getVariable(),
1141 DDI->getExpression(), DDI->getDebugLoc(),
1142 DbgVariableRecord::LocationType::Declare);
1143 Builder.GetInsertPoint()->getParent()->insertDbgRecordBefore(
1144 NewDVR, Builder.GetInsertPoint());
1145 } else {
1146 DIBuilder(*CurrentBlock->getParent()->getParent(), AllowUnresolved)
1147 .insertDeclare(CurrentReload, DDI->getVariable(),
1148 DDI->getExpression(), DDI->getDebugLoc(),
1149 &*Builder.GetInsertPoint());
1150 }
1151 // This dbg.declare is for the main function entry point. It
1152 // will be deleted in all coro-split functions.
1153 coro::salvageDebugInfo(ArgToAllocaMap, *DDI, false /*UseEntryValue*/);
1154 };
1155 for_each(DIs, SalvageOne);
1156 for_each(DVRs, SalvageOne);
1157 }
1158
1159 // If we have a single edge PHINode, remove it and replace it with a
1160 // reload from the coroutine frame. (We already took care of multi edge
1161 // PHINodes by normalizing them in the rewritePHIs function).
1162 if (auto *PN = dyn_cast<PHINode>(U)) {
1163 assert(PN->getNumIncomingValues() == 1 &&
1164 "unexpected number of incoming "
1165 "values in the PHINode");
1166 PN->replaceAllUsesWith(CurrentReload);
1167 PN->eraseFromParent();
1168 continue;
1169 }
1170
1171 // Replace all uses of CurrentValue in the current instruction with
1172 // reload.
1173 U->replaceUsesOfWith(Def, CurrentReload);
1174 // Instructions are added to Def's user list if the attached
1175 // debug records use Def. Update those now.
1176 for (DbgVariableRecord &DVR : filterDbgVars(U->getDbgRecordRange()))
1177 DVR.replaceVariableLocationOp(Def, CurrentReload, true);
1178 }
1179 }
1180
1181 BasicBlock *FramePtrBB = Shape.getInsertPtAfterFramePtr()->getParent();
1182
1183 auto SpillBlock = FramePtrBB->splitBasicBlock(
1184 Shape.getInsertPtAfterFramePtr(), "AllocaSpillBB");
1185 SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill");
1186 Shape.AllocaSpillBlock = SpillBlock;
1187
1188 // retcon and retcon.once lowering assumes all uses have been sunk.
1189 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
1190 Shape.ABI == coro::ABI::Async) {
1191 // If we found any allocas, replace all of their remaining uses with Geps.
1192 Builder.SetInsertPoint(SpillBlock, SpillBlock->begin());
1193 for (const auto &P : FrameData.Allocas) {
1194 AllocaInst *Alloca = P.Alloca;
1195 auto *G = GetFramePointer(Alloca);
1196
1197 // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G))
1198 // here, as we are changing location of the instruction.
1199 G->takeName(Alloca);
1200 Alloca->replaceAllUsesWith(G);
1201 Alloca->eraseFromParent();
1202 }
1203 return;
1204 }
1205
1206 // If we found any alloca, replace all of their remaining uses with GEP
1207 // instructions. To remain debugbility, we replace the uses of allocas for
1208 // dbg.declares and dbg.values with the reload from the frame.
1209 // Note: We cannot replace the alloca with GEP instructions indiscriminately,
1210 // as some of the uses may not be dominated by CoroBegin.
1211 Builder.SetInsertPoint(Shape.AllocaSpillBlock,
1212 Shape.AllocaSpillBlock->begin());
1213 SmallVector<Instruction *, 4> UsersToUpdate;
1214 for (const auto &A : FrameData.Allocas) {
1215 AllocaInst *Alloca = A.Alloca;
1216 UsersToUpdate.clear();
1217 for (User *U : Alloca->users()) {
1218 auto *I = cast<Instruction>(U);
1219 if (DT.dominates(Shape.CoroBegin, I))
1220 UsersToUpdate.push_back(I);
1221 }
1222 if (UsersToUpdate.empty())
1223 continue;
1224 auto *G = GetFramePointer(Alloca);
1225 G->setName(Alloca->getName() + Twine(".reload.addr"));
1226
1228 SmallVector<DbgVariableRecord *> DbgVariableRecords;
1229 findDbgUsers(DIs, Alloca, &DbgVariableRecords);
1230 for (auto *DVI : DIs)
1231 DVI->replaceUsesOfWith(Alloca, G);
1232 for (auto *DVR : DbgVariableRecords)
1233 DVR->replaceVariableLocationOp(Alloca, G);
1234
1235 for (Instruction *I : UsersToUpdate) {
1236 // It is meaningless to retain the lifetime intrinsics refer for the
1237 // member of coroutine frames and the meaningless lifetime intrinsics
1238 // are possible to block further optimizations.
1239 if (I->isLifetimeStartOrEnd()) {
1240 I->eraseFromParent();
1241 continue;
1242 }
1243
1244 I->replaceUsesOfWith(Alloca, G);
1245 }
1246 }
1247 Builder.SetInsertPoint(&*Shape.getInsertPtAfterFramePtr());
1248 for (const auto &A : FrameData.Allocas) {
1249 AllocaInst *Alloca = A.Alloca;
1250 if (A.MayWriteBeforeCoroBegin) {
1251 // isEscaped really means potentially modified before CoroBegin.
1252 if (Alloca->isArrayAllocation())
1254 "Coroutines cannot handle copying of array allocas yet");
1255
1256 auto *G = GetFramePointer(Alloca);
1257 auto *Value = Builder.CreateLoad(Alloca->getAllocatedType(), Alloca);
1258 Builder.CreateStore(Value, G);
1259 }
1260 // For each alias to Alloca created before CoroBegin but used after
1261 // CoroBegin, we recreate them after CoroBegin by applying the offset
1262 // to the pointer in the frame.
1263 for (const auto &Alias : A.Aliases) {
1264 auto *FramePtr = GetFramePointer(Alloca);
1265 auto &Value = *Alias.second;
1266 auto ITy = IntegerType::get(C, Value.getBitWidth());
1267 auto *AliasPtr =
1268 Builder.CreatePtrAdd(FramePtr, ConstantInt::get(ITy, Value));
1269 Alias.first->replaceUsesWithIf(
1270 AliasPtr, [&](Use &U) { return DT.dominates(Shape.CoroBegin, U); });
1271 }
1272 }
1273
1274 // PromiseAlloca is not collected in FrameData.Allocas. So we don't handle
1275 // the case that the PromiseAlloca may have writes before CoroBegin in the
1276 // above codes. And it may be problematic in edge cases. See
1277 // https://github.com/llvm/llvm-project/issues/57861 for an example.
1278 if (Shape.ABI == coro::ABI::Switch && Shape.SwitchLowering.PromiseAlloca) {
1280 // If there is memory accessing to promise alloca before CoroBegin;
1281 bool HasAccessingPromiseBeforeCB = llvm::any_of(PA->uses(), [&](Use &U) {
1282 auto *Inst = dyn_cast<Instruction>(U.getUser());
1283 if (!Inst || DT.dominates(Shape.CoroBegin, Inst))
1284 return false;
1285
1286 if (auto *CI = dyn_cast<CallInst>(Inst)) {
1287 // It is fine if the call wouldn't write to the Promise.
1288 // This is possible for @llvm.coro.id intrinsics, which
1289 // would take the promise as the second argument as a
1290 // marker.
1291 if (CI->onlyReadsMemory() ||
1292 CI->onlyReadsMemory(CI->getArgOperandNo(&U)))
1293 return false;
1294 return true;
1295 }
1296
1297 return isa<StoreInst>(Inst) ||
1298 // It may take too much time to track the uses.
1299 // Be conservative about the case the use may escape.
1300 isa<GetElementPtrInst>(Inst) ||
1301 // There would always be a bitcast for the promise alloca
1302 // before we enabled Opaque pointers. And now given
1303 // opaque pointers are enabled by default. This should be
1304 // fine.
1305 isa<BitCastInst>(Inst);
1306 });
1307 if (HasAccessingPromiseBeforeCB) {
1308 Builder.SetInsertPoint(&*Shape.getInsertPtAfterFramePtr());
1309 auto *G = GetFramePointer(PA);
1310 auto *Value = Builder.CreateLoad(PA->getAllocatedType(), PA);
1311 Builder.CreateStore(Value, G);
1312 }
1313 }
1314}
1315
1316// Moves the values in the PHIs in SuccBB that correspong to PredBB into a new
1317// PHI in InsertedBB.
1319 BasicBlock *InsertedBB,
1320 BasicBlock *PredBB,
1321 PHINode *UntilPHI = nullptr) {
1322 auto *PN = cast<PHINode>(&SuccBB->front());
1323 do {
1324 int Index = PN->getBasicBlockIndex(InsertedBB);
1325 Value *V = PN->getIncomingValue(Index);
1326 PHINode *InputV = PHINode::Create(
1327 V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName());
1328 InputV->insertBefore(InsertedBB->begin());
1329 InputV->addIncoming(V, PredBB);
1330 PN->setIncomingValue(Index, InputV);
1331 PN = dyn_cast<PHINode>(PN->getNextNode());
1332 } while (PN != UntilPHI);
1333}
1334
1335// Rewrites the PHI Nodes in a cleanuppad.
1336static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB,
1337 CleanupPadInst *CleanupPad) {
1338 // For every incoming edge to a CleanupPad we will create a new block holding
1339 // all incoming values in single-value PHI nodes. We will then create another
1340 // block to act as a dispather (as all unwind edges for related EH blocks
1341 // must be the same).
1342 //
1343 // cleanuppad:
1344 // %2 = phi i32[%0, %catchswitch], [%1, %catch.1]
1345 // %3 = cleanuppad within none []
1346 //
1347 // It will create:
1348 //
1349 // cleanuppad.corodispatch
1350 // %2 = phi i8[0, %catchswitch], [1, %catch.1]
1351 // %3 = cleanuppad within none []
1352 // switch i8 % 2, label %unreachable
1353 // [i8 0, label %cleanuppad.from.catchswitch
1354 // i8 1, label %cleanuppad.from.catch.1]
1355 // cleanuppad.from.catchswitch:
1356 // %4 = phi i32 [%0, %catchswitch]
1357 // br %label cleanuppad
1358 // cleanuppad.from.catch.1:
1359 // %6 = phi i32 [%1, %catch.1]
1360 // br %label cleanuppad
1361 // cleanuppad:
1362 // %8 = phi i32 [%4, %cleanuppad.from.catchswitch],
1363 // [%6, %cleanuppad.from.catch.1]
1364
1365 // Unreachable BB, in case switching on an invalid value in the dispatcher.
1366 auto *UnreachBB = BasicBlock::Create(
1367 CleanupPadBB->getContext(), "unreachable", CleanupPadBB->getParent());
1368 IRBuilder<> Builder(UnreachBB);
1369 Builder.CreateUnreachable();
1370
1371 // Create a new cleanuppad which will be the dispatcher.
1372 auto *NewCleanupPadBB =
1373 BasicBlock::Create(CleanupPadBB->getContext(),
1374 CleanupPadBB->getName() + Twine(".corodispatch"),
1375 CleanupPadBB->getParent(), CleanupPadBB);
1376 Builder.SetInsertPoint(NewCleanupPadBB);
1377 auto *SwitchType = Builder.getInt8Ty();
1378 auto *SetDispatchValuePN =
1379 Builder.CreatePHI(SwitchType, pred_size(CleanupPadBB));
1380 CleanupPad->removeFromParent();
1381 CleanupPad->insertAfter(SetDispatchValuePN->getIterator());
1382 auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB,
1383 pred_size(CleanupPadBB));
1384
1385 int SwitchIndex = 0;
1386 SmallVector<BasicBlock *, 8> Preds(predecessors(CleanupPadBB));
1387 for (BasicBlock *Pred : Preds) {
1388 // Create a new cleanuppad and move the PHI values to there.
1389 auto *CaseBB = BasicBlock::Create(CleanupPadBB->getContext(),
1390 CleanupPadBB->getName() +
1391 Twine(".from.") + Pred->getName(),
1392 CleanupPadBB->getParent(), CleanupPadBB);
1393 updatePhiNodes(CleanupPadBB, Pred, CaseBB);
1394 CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") +
1395 Pred->getName());
1396 Builder.SetInsertPoint(CaseBB);
1397 Builder.CreateBr(CleanupPadBB);
1398 movePHIValuesToInsertedBlock(CleanupPadBB, CaseBB, NewCleanupPadBB);
1399
1400 // Update this Pred to the new unwind point.
1401 setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB);
1402
1403 // Setup the switch in the dispatcher.
1404 auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex);
1405 SetDispatchValuePN->addIncoming(SwitchConstant, Pred);
1406 SwitchOnDispatch->addCase(SwitchConstant, CaseBB);
1407 SwitchIndex++;
1408 }
1409}
1410
1413 for (auto &BB : F) {
1414 for (auto &Phi : BB.phis()) {
1415 if (Phi.getNumIncomingValues() == 1) {
1416 Worklist.push_back(&Phi);
1417 } else
1418 break;
1419 }
1420 }
1421 while (!Worklist.empty()) {
1422 auto *Phi = Worklist.pop_back_val();
1423 auto *OriginalValue = Phi->getIncomingValue(0);
1424 Phi->replaceAllUsesWith(OriginalValue);
1425 }
1426}
1427
1428static void rewritePHIs(BasicBlock &BB) {
1429 // For every incoming edge we will create a block holding all
1430 // incoming values in a single PHI nodes.
1431 //
1432 // loop:
1433 // %n.val = phi i32[%n, %entry], [%inc, %loop]
1434 //
1435 // It will create:
1436 //
1437 // loop.from.entry:
1438 // %n.loop.pre = phi i32 [%n, %entry]
1439 // br %label loop
1440 // loop.from.loop:
1441 // %inc.loop.pre = phi i32 [%inc, %loop]
1442 // br %label loop
1443 //
1444 // After this rewrite, further analysis will ignore any phi nodes with more
1445 // than one incoming edge.
1446
1447 // TODO: Simplify PHINodes in the basic block to remove duplicate
1448 // predecessors.
1449
1450 // Special case for CleanupPad: all EH blocks must have the same unwind edge
1451 // so we need to create an additional "dispatcher" block.
1452 if (!BB.empty()) {
1453 if (auto *CleanupPad =
1454 dyn_cast_or_null<CleanupPadInst>(BB.getFirstNonPHIIt())) {
1456 for (BasicBlock *Pred : Preds) {
1457 if (CatchSwitchInst *CS =
1458 dyn_cast<CatchSwitchInst>(Pred->getTerminator())) {
1459 // CleanupPad with a CatchSwitch predecessor: therefore this is an
1460 // unwind destination that needs to be handle specially.
1461 assert(CS->getUnwindDest() == &BB);
1462 (void)CS;
1463 rewritePHIsForCleanupPad(&BB, CleanupPad);
1464 return;
1465 }
1466 }
1467 }
1468 }
1469
1470 LandingPadInst *LandingPad = nullptr;
1471 PHINode *ReplPHI = nullptr;
1472 if (!BB.empty()) {
1473 if ((LandingPad =
1474 dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHIIt()))) {
1475 // ehAwareSplitEdge will clone the LandingPad in all the edge blocks.
1476 // We replace the original landing pad with a PHINode that will collect the
1477 // results from all of them.
1478 ReplPHI = PHINode::Create(LandingPad->getType(), 1, "");
1479 ReplPHI->insertBefore(LandingPad->getIterator());
1480 ReplPHI->takeName(LandingPad);
1481 LandingPad->replaceAllUsesWith(ReplPHI);
1482 // We will erase the original landing pad at the end of this function after
1483 // ehAwareSplitEdge cloned it in the transition blocks.
1484 }
1485 }
1486
1488 for (BasicBlock *Pred : Preds) {
1489 auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI);
1490 IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName());
1491
1492 // Stop the moving of values at ReplPHI, as this is either null or the PHI
1493 // that replaced the landing pad.
1494 movePHIValuesToInsertedBlock(&BB, IncomingBB, Pred, ReplPHI);
1495 }
1496
1497 if (LandingPad) {
1498 // Calls to ehAwareSplitEdge function cloned the original lading pad.
1499 // No longer need it.
1500 LandingPad->eraseFromParent();
1501 }
1502}
1503
1504static void rewritePHIs(Function &F) {
1506
1507 for (BasicBlock &BB : F)
1508 if (auto *PN = dyn_cast<PHINode>(&BB.front()))
1509 if (PN->getNumIncomingValues() > 1)
1510 WorkList.push_back(&BB);
1511
1512 for (BasicBlock *BB : WorkList)
1513 rewritePHIs(*BB);
1514}
1515
1516// Splits the block at a particular instruction unless it is the first
1517// instruction in the block with a single predecessor.
1519 auto *BB = I->getParent();
1520 if (&BB->front() == I) {
1521 if (BB->getSinglePredecessor()) {
1522 BB->setName(Name);
1523 return BB;
1524 }
1525 }
1526 return BB->splitBasicBlock(I, Name);
1527}
1528
1529// Split above and below a particular instruction so that it
1530// will be all alone by itself in a block.
1531static void splitAround(Instruction *I, const Twine &Name) {
1533 splitBlockIfNotFirst(I->getNextNode(), "After" + Name);
1534}
1535
1536/// After we split the coroutine, will the given basic block be along
1537/// an obvious exit path for the resumption function?
1539 unsigned depth = 3) {
1540 // If we've bottomed out our depth count, stop searching and assume
1541 // that the path might loop back.
1542 if (depth == 0) return false;
1543
1544 // If this is a suspend block, we're about to exit the resumption function.
1545 if (coro::isSuspendBlock(BB))
1546 return true;
1547
1548 // Recurse into the successors.
1549 for (auto *Succ : successors(BB)) {
1550 if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1))
1551 return false;
1552 }
1553
1554 // If none of the successors leads back in a loop, we're on an exit/abort.
1555 return true;
1556}
1557
1559 // Look for a free that isn't sufficiently obviously followed by
1560 // either a suspend or a termination, i.e. something that will leave
1561 // the coro resumption frame.
1562 for (auto *U : AI->users()) {
1563 auto FI = dyn_cast<CoroAllocaFreeInst>(U);
1564 if (!FI) continue;
1565
1566 if (!willLeaveFunctionImmediatelyAfter(FI->getParent()))
1567 return true;
1568 }
1569
1570 // If we never found one, we don't need a stack save.
1571 return false;
1572}
1573
1574/// Turn each of the given local allocas into a normal (dynamic) alloca
1575/// instruction.
1577 SmallVectorImpl<Instruction*> &DeadInsts) {
1578 for (auto *AI : LocalAllocas) {
1579 IRBuilder<> Builder(AI);
1580
1581 // Save the stack depth. Try to avoid doing this if the stackrestore
1582 // is going to immediately precede a return or something.
1583 Value *StackSave = nullptr;
1585 StackSave = Builder.CreateStackSave();
1586
1587 // Allocate memory.
1588 auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize());
1589 Alloca->setAlignment(AI->getAlignment());
1590
1591 for (auto *U : AI->users()) {
1592 // Replace gets with the allocation.
1593 if (isa<CoroAllocaGetInst>(U)) {
1594 U->replaceAllUsesWith(Alloca);
1595
1596 // Replace frees with stackrestores. This is safe because
1597 // alloca.alloc is required to obey a stack discipline, although we
1598 // don't enforce that structurally.
1599 } else {
1600 auto FI = cast<CoroAllocaFreeInst>(U);
1601 if (StackSave) {
1602 Builder.SetInsertPoint(FI);
1603 Builder.CreateStackRestore(StackSave);
1604 }
1605 }
1606 DeadInsts.push_back(cast<Instruction>(U));
1607 }
1608
1609 DeadInsts.push_back(AI);
1610 }
1611}
1612
1613/// Get the current swifterror value.
1615 coro::Shape &Shape) {
1616 // Make a fake function pointer as a sort of intrinsic.
1617 auto FnTy = FunctionType::get(ValueTy, {}, false);
1618 auto Fn = ConstantPointerNull::get(Builder.getPtrTy());
1619
1620 auto Call = Builder.CreateCall(FnTy, Fn, {});
1621 Shape.SwiftErrorOps.push_back(Call);
1622
1623 return Call;
1624}
1625
1626/// Set the given value as the current swifterror value.
1627///
1628/// Returns a slot that can be used as a swifterror slot.
1630 coro::Shape &Shape) {
1631 // Make a fake function pointer as a sort of intrinsic.
1632 auto FnTy = FunctionType::get(Builder.getPtrTy(),
1633 {V->getType()}, false);
1634 auto Fn = ConstantPointerNull::get(Builder.getPtrTy());
1635
1636 auto Call = Builder.CreateCall(FnTy, Fn, { V });
1637 Shape.SwiftErrorOps.push_back(Call);
1638
1639 return Call;
1640}
1641
1642/// Set the swifterror value from the given alloca before a call,
1643/// then put in back in the alloca afterwards.
1644///
1645/// Returns an address that will stand in for the swifterror slot
1646/// until splitting.
1648 AllocaInst *Alloca,
1649 coro::Shape &Shape) {
1650 auto ValueTy = Alloca->getAllocatedType();
1651 IRBuilder<> Builder(Call);
1652
1653 // Load the current value from the alloca and set it as the
1654 // swifterror value.
1655 auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca);
1656 auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape);
1657
1658 // Move to after the call. Since swifterror only has a guaranteed
1659 // value on normal exits, we can ignore implicit and explicit unwind
1660 // edges.
1661 if (isa<CallInst>(Call)) {
1662 Builder.SetInsertPoint(Call->getNextNode());
1663 } else {
1664 auto Invoke = cast<InvokeInst>(Call);
1665 Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg());
1666 }
1667
1668 // Get the current swifterror value and store it to the alloca.
1669 auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape);
1670 Builder.CreateStore(ValueAfterCall, Alloca);
1671
1672 return Addr;
1673}
1674
1675/// Eliminate a formerly-swifterror alloca by inserting the get/set
1676/// intrinsics and attempting to MemToReg the alloca away.
1678 coro::Shape &Shape) {
1679 for (Use &Use : llvm::make_early_inc_range(Alloca->uses())) {
1680 // swifterror values can only be used in very specific ways.
1681 // We take advantage of that here.
1682 auto User = Use.getUser();
1683 if (isa<LoadInst>(User) || isa<StoreInst>(User))
1684 continue;
1685
1686 assert(isa<CallInst>(User) || isa<InvokeInst>(User));
1687 auto Call = cast<Instruction>(User);
1688
1689 auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape);
1690
1691 // Use the returned slot address as the call argument.
1692 Use.set(Addr);
1693 }
1694
1695 // All the uses should be loads and stores now.
1696 assert(isAllocaPromotable(Alloca));
1697}
1698
1699/// "Eliminate" a swifterror argument by reducing it to the alloca case
1700/// and then loading and storing in the prologue and epilog.
1701///
1702/// The argument keeps the swifterror flag.
1704 coro::Shape &Shape,
1705 SmallVectorImpl<AllocaInst*> &AllocasToPromote) {
1706 IRBuilder<> Builder(&F.getEntryBlock(),
1707 F.getEntryBlock().getFirstNonPHIOrDbg());
1708
1709 auto ArgTy = cast<PointerType>(Arg.getType());
1710 auto ValueTy = PointerType::getUnqual(F.getContext());
1711
1712 // Reduce to the alloca case:
1713
1714 // Create an alloca and replace all uses of the arg with it.
1715 auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace());
1716 Arg.replaceAllUsesWith(Alloca);
1717
1718 // Set an initial value in the alloca. swifterror is always null on entry.
1719 auto InitialValue = Constant::getNullValue(ValueTy);
1720 Builder.CreateStore(InitialValue, Alloca);
1721
1722 // Find all the suspends in the function and save and restore around them.
1723 for (auto *Suspend : Shape.CoroSuspends) {
1724 (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape);
1725 }
1726
1727 // Find all the coro.ends in the function and restore the error value.
1728 for (auto *End : Shape.CoroEnds) {
1729 Builder.SetInsertPoint(End);
1730 auto FinalValue = Builder.CreateLoad(ValueTy, Alloca);
1731 (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape);
1732 }
1733
1734 // Now we can use the alloca logic.
1735 AllocasToPromote.push_back(Alloca);
1736 eliminateSwiftErrorAlloca(F, Alloca, Shape);
1737}
1738
1739/// Eliminate all problematic uses of swifterror arguments and allocas
1740/// from the function. We'll fix them up later when splitting the function.
1742 SmallVector<AllocaInst*, 4> AllocasToPromote;
1743
1744 // Look for a swifterror argument.
1745 for (auto &Arg : F.args()) {
1746 if (!Arg.hasSwiftErrorAttr()) continue;
1747
1748 eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote);
1749 break;
1750 }
1751
1752 // Look for swifterror allocas.
1753 for (auto &Inst : F.getEntryBlock()) {
1754 auto Alloca = dyn_cast<AllocaInst>(&Inst);
1755 if (!Alloca || !Alloca->isSwiftError()) continue;
1756
1757 // Clear the swifterror flag.
1758 Alloca->setSwiftError(false);
1759
1760 AllocasToPromote.push_back(Alloca);
1761 eliminateSwiftErrorAlloca(F, Alloca, Shape);
1762 }
1763
1764 // If we have any allocas to promote, compute a dominator tree and
1765 // promote them en masse.
1766 if (!AllocasToPromote.empty()) {
1767 DominatorTree DT(F);
1768 PromoteMemToReg(AllocasToPromote, DT);
1769 }
1770}
1771
1772/// For each local variable that all of its user are only used inside one of
1773/// suspended region, we sink their lifetime.start markers to the place where
1774/// after the suspend block. Doing so minimizes the lifetime of each variable,
1775/// hence minimizing the amount of data we end up putting on the frame.
1777 SuspendCrossingInfo &Checker,
1778 const DominatorTree &DT) {
1779 if (F.hasOptNone())
1780 return;
1781
1782 // Collect all possible basic blocks which may dominate all uses of allocas.
1784 DomSet.insert(&F.getEntryBlock());
1785 for (auto *CSI : Shape.CoroSuspends) {
1786 BasicBlock *SuspendBlock = CSI->getParent();
1787 assert(coro::isSuspendBlock(SuspendBlock) &&
1788 SuspendBlock->getSingleSuccessor() &&
1789 "should have split coro.suspend into its own block");
1790 DomSet.insert(SuspendBlock->getSingleSuccessor());
1791 }
1792
1793 for (Instruction &I : instructions(F)) {
1794 AllocaInst* AI = dyn_cast<AllocaInst>(&I);
1795 if (!AI)
1796 continue;
1797
1798 for (BasicBlock *DomBB : DomSet) {
1799 bool Valid = true;
1801
1802 auto isLifetimeStart = [](Instruction* I) {
1803 if (auto* II = dyn_cast<IntrinsicInst>(I))
1804 return II->getIntrinsicID() == Intrinsic::lifetime_start;
1805 return false;
1806 };
1807
1808 auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) {
1809 if (isLifetimeStart(U)) {
1810 Lifetimes.push_back(U);
1811 return true;
1812 }
1813 if (!U->hasOneUse() || U->stripPointerCasts() != AI)
1814 return false;
1815 if (isLifetimeStart(U->user_back())) {
1816 Lifetimes.push_back(U->user_back());
1817 return true;
1818 }
1819 return false;
1820 };
1821
1822 for (User *U : AI->users()) {
1823 Instruction *UI = cast<Instruction>(U);
1824 // For all users except lifetime.start markers, if they are all
1825 // dominated by one of the basic blocks and do not cross
1826 // suspend points as well, then there is no need to spill the
1827 // instruction.
1828 if (!DT.dominates(DomBB, UI->getParent()) ||
1829 Checker.isDefinitionAcrossSuspend(DomBB, UI)) {
1830 // Skip lifetime.start, GEP and bitcast used by lifetime.start
1831 // markers.
1832 if (collectLifetimeStart(UI, AI))
1833 continue;
1834 Valid = false;
1835 break;
1836 }
1837 }
1838 // Sink lifetime.start markers to dominate block when they are
1839 // only used outside the region.
1840 if (Valid && Lifetimes.size() != 0) {
1841 auto *NewLifetime = Lifetimes[0]->clone();
1842 NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), AI);
1843 NewLifetime->insertBefore(DomBB->getTerminator()->getIterator());
1844
1845 // All the outsided lifetime.start markers are no longer necessary.
1846 for (Instruction *S : Lifetimes)
1847 S->eraseFromParent();
1848
1849 break;
1850 }
1851 }
1852 }
1853}
1854
1855static std::optional<std::pair<Value &, DIExpression &>>
1857 bool UseEntryValue, Function *F, Value *Storage,
1858 DIExpression *Expr, bool SkipOutermostLoad) {
1859 IRBuilder<> Builder(F->getContext());
1860 auto InsertPt = F->getEntryBlock().getFirstInsertionPt();
1861 while (isa<IntrinsicInst>(InsertPt))
1862 ++InsertPt;
1863 Builder.SetInsertPoint(&F->getEntryBlock(), InsertPt);
1864
1865 while (auto *Inst = dyn_cast_or_null<Instruction>(Storage)) {
1866 if (auto *LdInst = dyn_cast<LoadInst>(Inst)) {
1867 Storage = LdInst->getPointerOperand();
1868 // FIXME: This is a heuristic that works around the fact that
1869 // LLVM IR debug intrinsics cannot yet distinguish between
1870 // memory and value locations: Because a dbg.declare(alloca) is
1871 // implicitly a memory location no DW_OP_deref operation for the
1872 // last direct load from an alloca is necessary. This condition
1873 // effectively drops the *last* DW_OP_deref in the expression.
1874 if (!SkipOutermostLoad)
1876 } else if (auto *StInst = dyn_cast<StoreInst>(Inst)) {
1877 Storage = StInst->getValueOperand();
1878 } else {
1880 SmallVector<Value *, 0> AdditionalValues;
1882 *Inst, Expr ? Expr->getNumLocationOperands() : 0, Ops,
1883 AdditionalValues);
1884 if (!Op || !AdditionalValues.empty()) {
1885 // If salvaging failed or salvaging produced more than one location
1886 // operand, give up.
1887 break;
1888 }
1889 Storage = Op;
1890 Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, /*StackValue*/ false);
1891 }
1892 SkipOutermostLoad = false;
1893 }
1894 if (!Storage)
1895 return std::nullopt;
1896
1897 auto *StorageAsArg = dyn_cast<Argument>(Storage);
1898 const bool IsSwiftAsyncArg =
1899 StorageAsArg && StorageAsArg->hasAttribute(Attribute::SwiftAsync);
1900
1901 // Swift async arguments are described by an entry value of the ABI-defined
1902 // register containing the coroutine context.
1903 // Entry values in variadic expressions are not supported.
1904 if (IsSwiftAsyncArg && UseEntryValue && !Expr->isEntryValue() &&
1907
1908 // If the coroutine frame is an Argument, store it in an alloca to improve
1909 // its availability (e.g. registers may be clobbered).
1910 // Avoid this if the value is guaranteed to be available through other means
1911 // (e.g. swift ABI guarantees).
1912 if (StorageAsArg && !IsSwiftAsyncArg) {
1913 auto &Cached = ArgToAllocaMap[StorageAsArg];
1914 if (!Cached) {
1915 Cached = Builder.CreateAlloca(Storage->getType(), 0, nullptr,
1916 Storage->getName() + ".debug");
1917 Builder.CreateStore(Storage, Cached);
1918 }
1919 Storage = Cached;
1920 // FIXME: LLVM lacks nuanced semantics to differentiate between
1921 // memory and direct locations at the IR level. The backend will
1922 // turn a dbg.declare(alloca, ..., DIExpression()) into a memory
1923 // location. Thus, if there are deref and offset operations in the
1924 // expression, we need to add a DW_OP_deref at the *start* of the
1925 // expression to first load the contents of the alloca before
1926 // adjusting it with the expression.
1928 }
1929
1930 Expr = Expr->foldConstantMath();
1931 return {{*Storage, *Expr}};
1932}
1933
1936 DbgVariableIntrinsic &DVI, bool UseEntryValue) {
1937
1938 Function *F = DVI.getFunction();
1939 // Follow the pointer arithmetic all the way to the incoming
1940 // function argument and convert into a DIExpression.
1941 bool SkipOutermostLoad = !isa<DbgValueInst>(DVI);
1942 Value *OriginalStorage = DVI.getVariableLocationOp(0);
1943
1944 auto SalvagedInfo =
1945 ::salvageDebugInfoImpl(ArgToAllocaMap, UseEntryValue, F, OriginalStorage,
1946 DVI.getExpression(), SkipOutermostLoad);
1947 if (!SalvagedInfo)
1948 return;
1949
1950 Value *Storage = &SalvagedInfo->first;
1951 DIExpression *Expr = &SalvagedInfo->second;
1952
1953 DVI.replaceVariableLocationOp(OriginalStorage, Storage);
1954 DVI.setExpression(Expr);
1955 // We only hoist dbg.declare today since it doesn't make sense to hoist
1956 // dbg.value since it does not have the same function wide guarantees that
1957 // dbg.declare does.
1958 if (isa<DbgDeclareInst>(DVI)) {
1959 std::optional<BasicBlock::iterator> InsertPt;
1960 if (auto *I = dyn_cast<Instruction>(Storage)) {
1961 InsertPt = I->getInsertionPointAfterDef();
1962 // Update DILocation only if variable was not inlined.
1963 DebugLoc ILoc = I->getDebugLoc();
1964 DebugLoc DVILoc = DVI.getDebugLoc();
1965 if (ILoc && DVILoc &&
1966 DVILoc->getScope()->getSubprogram() ==
1967 ILoc->getScope()->getSubprogram())
1968 DVI.setDebugLoc(I->getDebugLoc());
1969 } else if (isa<Argument>(Storage))
1970 InsertPt = F->getEntryBlock().begin();
1971 if (InsertPt)
1972 DVI.moveBefore(*(*InsertPt)->getParent(), *InsertPt);
1973 }
1974}
1975
1978 DbgVariableRecord &DVR, bool UseEntryValue) {
1979
1980 Function *F = DVR.getFunction();
1981 // Follow the pointer arithmetic all the way to the incoming
1982 // function argument and convert into a DIExpression.
1983 bool SkipOutermostLoad = DVR.isDbgDeclare();
1984 Value *OriginalStorage = DVR.getVariableLocationOp(0);
1985
1986 auto SalvagedInfo =
1987 ::salvageDebugInfoImpl(ArgToAllocaMap, UseEntryValue, F, OriginalStorage,
1988 DVR.getExpression(), SkipOutermostLoad);
1989 if (!SalvagedInfo)
1990 return;
1991
1992 Value *Storage = &SalvagedInfo->first;
1993 DIExpression *Expr = &SalvagedInfo->second;
1994
1995 DVR.replaceVariableLocationOp(OriginalStorage, Storage);
1996 DVR.setExpression(Expr);
1997 // We only hoist dbg.declare today since it doesn't make sense to hoist
1998 // dbg.value since it does not have the same function wide guarantees that
1999 // dbg.declare does.
2000 if (DVR.getType() == DbgVariableRecord::LocationType::Declare) {
2001 std::optional<BasicBlock::iterator> InsertPt;
2002 if (auto *I = dyn_cast<Instruction>(Storage)) {
2003 InsertPt = I->getInsertionPointAfterDef();
2004 // Update DILocation only if variable was not inlined.
2005 DebugLoc ILoc = I->getDebugLoc();
2006 DebugLoc DVRLoc = DVR.getDebugLoc();
2007 if (ILoc && DVRLoc &&
2008 DVRLoc->getScope()->getSubprogram() ==
2009 ILoc->getScope()->getSubprogram())
2010 DVR.setDebugLoc(ILoc);
2011 } else if (isa<Argument>(Storage))
2012 InsertPt = F->getEntryBlock().begin();
2013 if (InsertPt) {
2014 DVR.removeFromParent();
2015 (*InsertPt)->getParent()->insertDbgRecordBefore(&DVR, *InsertPt);
2016 }
2017 }
2018}
2019
2022 // Don't eliminate swifterror in async functions that won't be split.
2023 if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
2025
2026 if (Shape.ABI == coro::ABI::Switch &&
2029 }
2030
2031 // Make sure that all coro.save, coro.suspend and the fallthrough coro.end
2032 // intrinsics are in their own blocks to simplify the logic of building up
2033 // SuspendCrossing data.
2034 for (auto *CSI : Shape.CoroSuspends) {
2035 if (auto *Save = CSI->getCoroSave())
2036 splitAround(Save, "CoroSave");
2037 splitAround(CSI, "CoroSuspend");
2038 }
2039
2040 // Put CoroEnds into their own blocks.
2041 for (AnyCoroEndInst *CE : Shape.CoroEnds) {
2042 splitAround(CE, "CoroEnd");
2043
2044 // Emit the musttail call function in a new block before the CoroEnd.
2045 // We do this here so that the right suspend crossing info is computed for
2046 // the uses of the musttail call function call. (Arguments to the coro.end
2047 // instructions would be ignored)
2048 if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) {
2049 auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction();
2050 if (!MustTailCallFn)
2051 continue;
2052 IRBuilder<> Builder(AsyncEnd);
2053 SmallVector<Value *, 8> Args(AsyncEnd->args());
2054 auto Arguments = ArrayRef<Value *>(Args).drop_front(3);
2055 auto *Call = coro::createMustTailCall(
2056 AsyncEnd->getDebugLoc(), MustTailCallFn, TTI, Arguments, Builder);
2057 splitAround(Call, "MustTailCall.Before.CoroEnd");
2058 }
2059 }
2060
2061 // Later code makes structural assumptions about single predecessors phis e.g
2062 // that they are not live across a suspend point.
2064
2065 // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will
2066 // never have its definition separated from the PHI by the suspend point.
2067 rewritePHIs(F);
2068}
2069
2070void coro::BaseABI::buildCoroutineFrame(bool OptimizeFrame) {
2073
2074 const DominatorTree DT(F);
2077 sinkLifetimeStartMarkers(F, Shape, Checker, DT);
2078
2079 // All values (that are not allocas) that needs to be spilled to the frame.
2080 coro::SpillInfo Spills;
2081 // All values defined as allocas that need to live in the frame.
2083
2084 // Collect the spills for arguments and other not-materializable values.
2085 coro::collectSpillsFromArgs(Spills, F, Checker);
2086 SmallVector<Instruction *, 4> DeadInstructions;
2088 coro::collectSpillsAndAllocasFromInsts(Spills, Allocas, DeadInstructions,
2089 LocalAllocas, F, Checker, DT, Shape);
2090 coro::collectSpillsFromDbgInfo(Spills, F, Checker);
2091
2092 LLVM_DEBUG(dumpAllocas(Allocas));
2093 LLVM_DEBUG(dumpSpills("Spills", Spills));
2094
2097 sinkSpillUsesAfterCoroBegin(DT, Shape.CoroBegin, Spills, Allocas);
2098
2099 // Build frame
2100 FrameDataInfo FrameData(Spills, Allocas);
2101 Shape.FrameTy = buildFrameType(F, Shape, FrameData, OptimizeFrame);
2103 // For now, this works for C++ programs only.
2104 buildFrameDebugInfo(F, Shape, FrameData);
2105 // Insert spills and reloads
2106 insertSpills(FrameData, Shape);
2107 lowerLocalAllocas(LocalAllocas, DeadInstructions);
2108
2109 for (auto *I : DeadInstructions)
2110 I->eraseFromParent();
2111}
AMDGPU Lower Kernel Arguments
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static void cleanupSinglePredPHIs(Function &F)
Definition: CoroFrame.cpp:1411
static std::optional< std::pair< Value &, DIExpression & > > salvageDebugInfoImpl(SmallDenseMap< Argument *, AllocaInst *, 4 > &ArgToAllocaMap, bool UseEntryValue, Function *F, Value *Storage, DIExpression *Expr, bool SkipOutermostLoad)
Definition: CoroFrame.cpp:1856
static void eliminateSwiftError(Function &F, coro::Shape &Shape)
Eliminate all problematic uses of swifterror arguments and allocas from the function.
Definition: CoroFrame.cpp:1741
static void lowerLocalAllocas(ArrayRef< CoroAllocaAllocInst * > LocalAllocas, SmallVectorImpl< Instruction * > &DeadInsts)
Turn each of the given local allocas into a normal (dynamic) alloca instruction.
Definition: CoroFrame.cpp:1576
static Value * emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V, coro::Shape &Shape)
Set the given value as the current swifterror value.
Definition: CoroFrame.cpp:1629
static Value * emitSetAndGetSwiftErrorValueAround(Instruction *Call, AllocaInst *Alloca, coro::Shape &Shape)
Set the swifterror value from the given alloca before a call, then put in back in the alloca afterwar...
Definition: CoroFrame.cpp:1647
static void cacheDIVar(FrameDataInfo &FrameData, DenseMap< Value *, DILocalVariable * > &DIVarCache)
Definition: CoroFrame.cpp:544
static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI)
Definition: CoroFrame.cpp:1558
static void dumpAllocas(const SmallVectorImpl< coro::AllocaInfo > &Allocas)
Definition: CoroFrame.cpp:150
static void splitAround(Instruction *I, const Twine &Name)
Definition: CoroFrame.cpp:1531
static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca, coro::Shape &Shape)
Eliminate a formerly-swifterror alloca by inserting the get/set intrinsics and attempting to MemToReg...
Definition: CoroFrame.cpp:1677
static void rewritePHIs(BasicBlock &BB)
Definition: CoroFrame.cpp:1428
static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB, BasicBlock *InsertedBB, BasicBlock *PredBB, PHINode *UntilPHI=nullptr)
Definition: CoroFrame.cpp:1318
static void dumpSpills(StringRef Title, const coro::SpillInfo &Spills)
Definition: CoroFrame.cpp:140
static DIType * solveDIType(DIBuilder &Builder, Type *Ty, const DataLayout &Layout, DIScope *Scope, unsigned LineNum, DenseMap< Type *, DIType * > &DITypeCache)
Definition: CoroFrame.cpp:602
static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB, unsigned depth=3)
After we split the coroutine, will the given basic block be along an obvious exit path for the resump...
Definition: CoroFrame.cpp:1538
static StructType * buildFrameType(Function &F, coro::Shape &Shape, FrameDataInfo &FrameData, bool OptimizeFrame)
Definition: CoroFrame.cpp:866
static void eliminateSwiftErrorArgument(Function &F, Argument &Arg, coro::Shape &Shape, SmallVectorImpl< AllocaInst * > &AllocasToPromote)
"Eliminate" a swifterror argument by reducing it to the alloca case and then loading and storing in t...
Definition: CoroFrame.cpp:1703
static void buildFrameDebugInfo(Function &F, coro::Shape &Shape, FrameDataInfo &FrameData)
Build artificial debug info for C++ coroutine frames to allow users to inspect the contents of the fr...
Definition: CoroFrame.cpp:689
static BasicBlock * splitBlockIfNotFirst(Instruction *I, const Twine &Name)
Definition: CoroFrame.cpp:1518
static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB, CleanupPadInst *CleanupPad)
Definition: CoroFrame.cpp:1336
static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape, SuspendCrossingInfo &Checker, const DominatorTree &DT)
For each local variable that all of its user are only used inside one of suspended region,...
Definition: CoroFrame.cpp:1776
static StringRef solveTypeName(Type *Ty)
Create name for Type.
Definition: CoroFrame.cpp:564
static Value * emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy, coro::Shape &Shape)
Get the current swifterror value.
Definition: CoroFrame.cpp:1614
cl::opt< bool > UseNewDbgInfoFormat
static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape)
Definition: CoroFrame.cpp:1005
Given that RA is a live value
#define LLVM_DEBUG(...)
Definition: Debug.h:106
uint64_t Addr
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
static bool isLifetimeStart(const Instruction *Inst)
Definition: GVN.cpp:1156
Hexagon Common GEP
static MaybeAlign getAlign(Value *Ptr)
Definition: IRBuilder.cpp:500
Module.h This file contains the declarations for the Module class.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
uint64_t IntrinsicInst * II
This file provides an interface for laying out a sequence of fields as a struct in a way that attempt...
#define P(N)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getNumElements(Type *Ty)
raw_pwrite_stream & OS
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallString class.
static const unsigned FramePtr
an instruction to allocate memory on the stack
Definition: Instructions.h:63
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:149
void setSwiftError(bool V)
Specify whether this alloca is used to represent a swifterror.
Definition: Instructions.h:151
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:124
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:99
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:117
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
Definition: Instructions.h:128
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:95
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:207
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:461
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:437
bool empty() const
Definition: BasicBlock.h:483
InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:381
const Instruction & front() const
Definition: BasicBlock.h:484
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:213
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
Definition: BasicBlock.cpp:599
const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
Definition: BasicBlock.cpp:511
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:220
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:177
LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition: BasicBlock.cpp:168
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Definition: Constants.cpp:1826
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:373
This represents the llvm.coro.alloca.alloc instruction.
Definition: CoroInstr.h:745
void clearPromise()
Definition: CoroInstr.h:158
This represents the llvm.coro.suspend instruction.
Definition: CoroInstr.h:530
DIDerivedType * createPointerType(DIType *PointeeTy, uint64_t SizeInBits, uint32_t AlignInBits=0, std::optional< unsigned > DWARFAddressSpace=std::nullopt, StringRef Name="", DINodeArray Annotations=nullptr)
Create debugging information entry for a pointer.
Definition: DIBuilder.cpp:316
DIBasicType * createBasicType(StringRef Name, uint64_t SizeInBits, unsigned Encoding, DINode::DIFlags Flags=DINode::FlagZero, uint32_t NumExtraInhabitants=0)
Create debugging information entry for a basic type.
Definition: DIBuilder.cpp:266
DISubrange * getOrCreateSubrange(int64_t Lo, int64_t Count)
Create a descriptor for a value range.
Definition: DIBuilder.cpp:721
DICompositeType * createArrayType(uint64_t Size, uint32_t AlignInBits, DIType *Ty, DINodeArray Subscripts, PointerUnion< DIExpression *, DIVariable * > DataLocation=nullptr, PointerUnion< DIExpression *, DIVariable * > Associated=nullptr, PointerUnion< DIExpression *, DIVariable * > Allocated=nullptr, PointerUnion< DIExpression *, DIVariable * > Rank=nullptr)
Create debugging information entry for an array.
Definition: DIBuilder.cpp:598
DINodeArray getOrCreateArray(ArrayRef< Metadata * > Elements)
Get a DINodeArray, create one if required.
Definition: DIBuilder.cpp:701
DIExpression * createExpression(ArrayRef< uint64_t > Addr={})
Create a new descriptor for the specified variable which has a complex address expression for its add...
Definition: DIBuilder.cpp:852
DIDerivedType * createMemberType(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo, uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits, DINode::DIFlags Flags, DIType *Ty, DINodeArray Annotations=nullptr)
Create debugging information entry for a member.
Definition: DIBuilder.cpp:390
DILocalVariable * createAutoVariable(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo, DIType *Ty, bool AlwaysPreserve=false, DINode::DIFlags Flags=DINode::FlagZero, uint32_t AlignInBits=0)
Create a new descriptor for an auto variable.
Definition: DIBuilder.cpp:814
DICompositeType * createStructType(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber, uint64_t SizeInBits, uint32_t AlignInBits, DINode::DIFlags Flags, DIType *DerivedFrom, DINodeArray Elements, unsigned RunTimeLang=0, DIType *VTableHolder=nullptr, StringRef UniqueIdentifier="", DIType *Specification=nullptr, uint32_t NumExtraInhabitants=0)
Create debugging information entry for a struct.
Definition: DIBuilder.cpp:520
void replaceArrays(DICompositeType *&T, DINodeArray Elements, DINodeArray TParams=DINodeArray())
Replace arrays on a composite type.
Definition: DIBuilder.cpp:1214
DWARF expression.
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
DIExpression * foldConstantMath()
Try to shorten an expression with constant math operations that can be evaluated at compile time.
uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
static DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
bool isSingleLocationExpression() const
Return whether the evaluated expression makes use of a single location at the start of the expression...
Debug location.
Base class for scope-like contexts.
DIFile * getFile() const
Subprogram description.
Base class for types.
StringRef getName() const
uint64_t getSizeInBits() const
uint32_t getAlignInBits() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:709
Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:843
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:617
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Definition: DataLayout.cpp:847
DebugLoc getDebugLoc() const
void setDebugLoc(DebugLoc Loc)
This is the common base class for debug info intrinsics for variables.
void replaceVariableLocationOp(Value *OldValue, Value *NewValue, bool AllowEmpty=false)
Value * getVariableLocationOp(unsigned OpIdx) const
void setExpression(DIExpression *NewExpr)
DIExpression * getExpression() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
void setExpression(DIExpression *NewExpr)
DIExpression * getExpression() const
Value * getVariableLocationOp(unsigned OpIdx) const
void replaceVariableLocationOp(Value *OldValue, Value *NewValue, bool AllowEmpty=false)
A debug info location.
Definition: DebugLoc.h:33
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:194
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:156
iterator end()
Definition: DenseMap.h:84
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition: DenseMap.h:147
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:211
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Definition: DenseMap.h:103
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:657
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Definition: IRBuilder.h:1781
CallInst * CreateStackSave(const Twine &Name="")
Create a call to llvm.stacksave.
Definition: IRBuilder.h:1088
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition: IRBuilder.h:1815
UnreachableInst * CreateUnreachable()
Definition: IRBuilder.h:1306
BasicBlock::iterator GetInsertPoint() const
Definition: IRBuilder.h:194
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2147
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1987
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
Definition: IRBuilder.h:1882
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition: IRBuilder.h:2435
Value * CreateNot(Value *V, const Twine &Name="")
Definition: IRBuilder.h:1757
SwitchInst * CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases=10, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a switch instruction with the specified value, default dest, and with a hint for the number of...
Definition: IRBuilder.h:1187
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1798
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1518
Value * CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1, const Twine &Name="")
Definition: IRBuilder.h:1921
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition: IRBuilder.h:1811
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1370
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2142
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2449
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Definition: IRBuilder.h:588
BranchInst * CreateBr(BasicBlock *Dest)
Create an unconditional 'br label X' instruction.
Definition: IRBuilder.h:1158
CallInst * CreateStackRestore(Value *Ptr, const Twine &Name="")
Create a call to llvm.stackrestore.
Definition: IRBuilder.h:1095
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:199
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Definition: IRBuilder.h:1834
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition: IRBuilder.h:535
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2157
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2705
void removeFromParent()
This method unlinks 'this' from the containing basic block, but does not delete it.
Definition: Instruction.cpp:80
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
Definition: Instruction.cpp:99
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:511
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:68
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:94
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:72
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:508
void insertAfter(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately after the specified instruction.
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:311
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
The landingpad instruction holds all of the information necessary to generate correct exception handl...
void replaceOperandWith(unsigned I, Metadata *New)
Replace a specific operand.
Definition: Metadata.cpp:1077
LLVMContext & getContext() const
Definition: Metadata.h:1237
static MDString * get(LLVMContext &Context, StringRef Str)
Definition: Metadata.cpp:606
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1506
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:384
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:519
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
StringRef str() const
Explicit conversion to StringRef.
Definition: SmallString.h:254
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:937
void reserve(size_type N)
Definition: SmallVector.h:663
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
Compute live ranges of allocas.
Definition: StackLifetime.h:37
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:229
TypeSize getElementOffsetInBits(unsigned Idx) const
Definition: DataLayout.h:601
Class to represent struct types.
Definition: DerivedTypes.h:218
static StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition: Type.cpp:612
unsigned getNumElements() const
Random access to the elements.
Definition: DerivedTypes.h:365
Type * getElementType(unsigned N) const
Definition: DerivedTypes.h:366
bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const
Multiway switch.
void setDefaultDest(BasicBlock *DefaultCase)
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:29
bool empty() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
std::string str() const
Return the twine contents as a std::string.
Definition: Twine.cpp:17
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition: TypeSize.h:345
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:264
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
Definition: Type.h:153
StringRef getStructName() const
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isStructTy() const
True if this is an instance of StructType.
Definition: Type.h:258
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:310
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
static IntegerType * getInt8Ty(LLVMContext &C)
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
Definition: Type.h:156
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:184
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:237
A Use represents the edge between a Value definition and its users.
Definition: Use.h:35
void set(Value *Val)
Definition: Value.h:892
User * getUser() const
Returns the User that contains this Use.
Definition: Use.h:64
static ValueAsMetadata * get(Value *V)
Definition: Metadata.cpp:501
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
iterator_range< user_iterator > users()
Definition: Value.h:421
void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition: Value.cpp:542
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1094
iterator_range< use_iterator > uses()
Definition: Value.h:376
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
std::function< bool(Instruction &I)> IsMaterializable
Definition: ABI.h:63
Function & F
Definition: ABI.h:58
virtual void buildCoroutineFrame(bool OptimizeFrame)
Definition: CoroFrame.cpp:2070
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
A raw_ostream that writes to an SmallVector or SmallString.
Definition: raw_ostream.h:691
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
void salvageDebugInfo(SmallDenseMap< Argument *, AllocaInst *, 4 > &ArgToAllocaMap, DbgVariableIntrinsic &DVI, bool IsEntryPoint)
Attempts to rewrite the location operand of debug intrinsics in terms of the coroutine frame pointer,...
Definition: CoroFrame.cpp:1934
@ Async
The "async continuation" lowering, where each suspend point creates a single continuation function.
@ RetconOnce
The "unique returned-continuation" lowering, where each suspend point creates a single continuation f...
@ Retcon
The "returned-continuation" lowering, where each suspend point creates a single continuation function...
BasicBlock::iterator getSpillInsertionPt(const coro::Shape &, Value *Def, const DominatorTree &DT)
Definition: SpillUtils.cpp:585
bool isSuspendBlock(BasicBlock *BB)
Definition: Coroutines.cpp:106
void normalizeCoroutine(Function &F, coro::Shape &Shape, TargetTransformInfo &TTI)
Definition: CoroFrame.cpp:2020
CallInst * createMustTailCall(DebugLoc Loc, Function *MustTailCallFn, TargetTransformInfo &TTI, ArrayRef< Value * > Arguments, IRBuilder<> &)
Definition: CoroSplit.cpp:1705
void sinkSpillUsesAfterCoroBegin(const DominatorTree &DT, CoroBeginInst *CoroBegin, coro::SpillInfo &Spills, SmallVectorImpl< coro::AllocaInfo > &Allocas)
Async and Retcon{Once} conventions assume that all spill uses can be sunk after the coro....
Definition: SpillUtils.cpp:538
void doRematerializations(Function &F, SuspendCrossingInfo &Checker, std::function< bool(Instruction &)> IsMaterializable)
void collectSpillsFromArgs(SpillInfo &Spills, Function &F, const SuspendCrossingInfo &Checker)
Definition: SpillUtils.cpp:451
void collectSpillsFromDbgInfo(SpillInfo &Spills, Function &F, const SuspendCrossingInfo &Checker)
Definition: SpillUtils.cpp:515
void collectSpillsAndAllocasFromInsts(SpillInfo &Spills, SmallVector< AllocaInfo, 8 > &Allocas, SmallVector< Instruction *, 4 > &DeadInstructions, SmallVector< CoroAllocaAllocInst *, 4 > &LocalAllocas, Function &F, const SuspendCrossingInfo &Checker, const DominatorTree &DT, const coro::Shape &Shape)
Definition: SpillUtils.cpp:460
SourceLanguage
Definition: Dwarf.h:214
bool isCPlusPlus(SourceLanguage S)
Definition: Dwarf.h:504
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1732
TinyPtrVector< DbgDeclareInst * > findDbgDeclares(Value *V)
Finds dbg.declare intrinsics declaring local variables as living in the memory that 'V' points to.
Definition: DebugInfo.cpp:47
void PromoteMemToReg(ArrayRef< AllocaInst * > Allocas, DominatorTree &DT, AssumptionCache *AC=nullptr)
Promote the specified list of alloca instructions into scalar registers, inserting PHI nodes as appro...
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:59
unsigned Log2_64_Ceil(uint64_t Value)
Return the ceil log base 2 of the specified value, 64 if the value is zero.
Definition: MathExtras.h:360
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
Definition: Alignment.h:145
void findDbgUsers(SmallVectorImpl< DbgVariableIntrinsic * > &DbgInsts, Value *V, SmallVectorImpl< DbgVariableRecord * > *DbgVariableRecords=nullptr)
Finds the debug info intrinsics describing a value.
Definition: DebugInfo.cpp:162
auto successors(const MachineBasicBlock *BB)
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:657
auto pred_size(const MachineBasicBlock *BB)
bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1746
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1664
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1753
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
BasicBlock * ehAwareSplitEdge(BasicBlock *BB, BasicBlock *Succ, LandingPadInst *OriginalPad=nullptr, PHINode *LandingPadReplacement=nullptr, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions(), const Twine &BBName="")
Split the edge connect the specficed blocks in the case that Succ is an Exception Handling Block.
Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
Definition: Local.cpp:2614
uint64_t offsetToAlignment(uint64_t Value, Align Alignment)
Returns the offset to the next integer (mod 2**64) that is greater than or equal to Value and is a mu...
Definition: Alignment.h:197
std::pair< uint64_t, Align > performOptimizedStructLayout(MutableArrayRef< OptimizedStructLayoutField > Fields)
Compute a layout for a struct containing the given fields, making a best-effort attempt to minimize t...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
DWARFExpression::Operation Op
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:217
void updatePhiNodes(BasicBlock *DestBB, BasicBlock *OldPred, BasicBlock *NewPred, PHINode *Until=nullptr)
Replaces all uses of OldPred with the NewPred block in all PHINodes in a block.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1766
TinyPtrVector< DbgVariableRecord * > findDVRDeclares(Value *V)
As above, for DVRDeclares.
Definition: DebugInfo.cpp:66
auto predecessors(const MachineBasicBlock *BB)
void setUnwindEdgeTo(Instruction *TI, BasicBlock *Succ)
Sets the unwind edge of an instruction to a particular successor.
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Align Alignment
The required alignment of this field.
uint64_t Offset
The offset of this field in the final layout.
uint64_t Size
The required size of this field in bytes.
static constexpr uint64_t FlexibleOffset
A special value for Offset indicating that the field can be moved anywhere.
A MapVector that performs no allocations if smaller than a certain size.
Definition: MapVector.h:254
AsyncLoweringStorage AsyncLowering
Definition: CoroShape.h:150
StructType * FrameTy
Definition: CoroShape.h:109
AnyCoroIdRetconInst * getRetconCoroId() const
Definition: CoroShape.h:158
CoroIdInst * getSwitchCoroId() const
Definition: CoroShape.h:153
coro::ABI ABI
Definition: CoroShape.h:107
Value * FramePtr
Definition: CoroShape.h:112
SmallVector< AnyCoroSuspendInst *, 4 > CoroSuspends
Definition: CoroShape.h:57
uint64_t FrameSize
Definition: CoroShape.h:111
AllocaInst * getPromiseAlloca() const
Definition: CoroShape.h:239
SwitchLoweringStorage SwitchLowering
Definition: CoroShape.h:148
CoroBeginInst * CoroBegin
Definition: CoroShape.h:53
BasicBlock::iterator getInsertPtAfterFramePtr() const
Definition: CoroShape.h:245
RetconLoweringStorage RetconLowering
Definition: CoroShape.h:149
SmallVector< AnyCoroEndInst *, 4 > CoroEnds
Definition: CoroShape.h:54
SmallVector< CallInst *, 2 > SwiftErrorOps
Definition: CoroShape.h:62
BasicBlock * AllocaSpillBlock
Definition: CoroShape.h:113