85#define DEBUG_TYPE "inline-function"
94 cl::desc(
"Convert noalias attributes to metadata during inlining."));
99 cl::desc(
"Use the llvm.experimental.noalias.scope.decl "
100 "intrinsic during inlining."));
108 cl::desc(
"Convert align attributes to assumptions during inlining."));
111 "max-inst-checked-for-throw-during-inlining",
cl::Hidden,
112 cl::desc(
"the maximum number of instructions analyzed for may throw during "
113 "attribute inference in inlined body"),
119 class LandingPadInliningInfo {
130 PHINode *InnerEHValuesPHI =
nullptr;
136 : OuterResumeDest(
II->getUnwindDest()) {
142 for (; isa<PHINode>(
I); ++
I) {
145 UnwindDestPHIValues.
push_back(
PHI->getIncomingValueForBlock(InvokeBB));
148 CallerLPad = cast<LandingPadInst>(
I);
154 return OuterResumeDest;
171 void addIncomingPHIValuesFor(
BasicBlock *BB)
const {
172 addIncomingPHIValuesForInto(BB, OuterResumeDest);
177 for (
unsigned i = 0, e = UnwindDestPHIValues.
size(); i != e; ++i, ++
I) {
179 phi->addIncoming(UnwindDestPHIValues[i], src);
187 while (It != BB.
end()) {
188 if (
auto *IntrinsicCall = dyn_cast<ConvergenceControlInst>(It)) {
189 if (IntrinsicCall->isEntry()) {
190 return IntrinsicCall;
199BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
200 if (InnerResumeDest)
return InnerResumeDest;
206 OuterResumeDest->
getName() +
".body");
209 const unsigned PHICapacity = 2;
214 for (
unsigned i = 0, e = UnwindDestPHIValues.
size(); i != e; ++i, ++
I) {
215 PHINode *OuterPHI = cast<PHINode>(
I);
217 OuterPHI->
getName() +
".lpad-body");
228 InnerEHValuesPHI->
addIncoming(CallerLPad, OuterResumeDest);
231 return InnerResumeDest;
238void LandingPadInliningInfo::forwardResume(
248 addIncomingPHIValuesForInto(Src, Dest);
256 if (
auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
257 return FPI->getParentPad();
258 return cast<CatchSwitchInst>(EHPad)->getParentPad();
269 while (!Worklist.
empty()) {
276 Value *UnwindDestToken =
nullptr;
277 if (
auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
278 if (CatchSwitch->hasUnwindDest()) {
279 UnwindDestToken = &*CatchSwitch->getUnwindDest()->getFirstNonPHIIt();
287 for (
auto HI = CatchSwitch->handler_begin(),
288 HE = CatchSwitch->handler_end();
289 HI != HE && !UnwindDestToken; ++HI) {
298 if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
302 auto Memo = MemoMap.
find(ChildPad);
303 if (Memo == MemoMap.
end()) {
310 Value *ChildUnwindDestToken = Memo->second;
311 if (!ChildUnwindDestToken)
317 if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
318 UnwindDestToken = ChildUnwindDestToken;
326 auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
327 for (
User *U : CleanupPad->users()) {
328 if (
auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
329 if (
BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
330 UnwindDestToken = &*RetUnwindDest->getFirstNonPHIIt();
335 Value *ChildUnwindDestToken;
336 if (
auto *Invoke = dyn_cast<InvokeInst>(U)) {
337 ChildUnwindDestToken = &*Invoke->getUnwindDest()->getFirstNonPHIIt();
338 }
else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
340 auto Memo = MemoMap.
find(ChildPad);
341 if (Memo == MemoMap.
end()) {
348 ChildUnwindDestToken = Memo->second;
349 if (!ChildUnwindDestToken)
358 if (isa<Instruction>(ChildUnwindDestToken) &&
361 UnwindDestToken = ChildUnwindDestToken;
367 if (!UnwindDestToken)
375 if (
auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
378 UnwindParent =
nullptr;
379 bool ExitedOriginalPad =
false;
381 ExitedPad && ExitedPad != UnwindParent;
382 ExitedPad = dyn_cast<Instruction>(
getParentPad(ExitedPad))) {
384 if (isa<CatchPadInst>(ExitedPad))
386 MemoMap[ExitedPad] = UnwindDestToken;
387 ExitedOriginalPad |= (ExitedPad == EHPad);
390 if (ExitedOriginalPad)
391 return UnwindDestToken;
422 if (
auto *CPI = dyn_cast<CatchPadInst>(EHPad))
423 EHPad = CPI->getCatchSwitch();
426 auto Memo = MemoMap.
find(EHPad);
427 if (Memo != MemoMap.
end())
432 assert((UnwindDestToken ==
nullptr) != (MemoMap.
count(EHPad) != 0));
434 return UnwindDestToken;
441 MemoMap[EHPad] =
nullptr;
447 Value *AncestorToken;
449 auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
452 if (isa<CatchPadInst>(AncestorPad))
461 assert(!MemoMap.
count(AncestorPad) || MemoMap[AncestorPad]);
462 auto AncestorMemo = MemoMap.
find(AncestorPad);
463 if (AncestorMemo == MemoMap.
end()) {
466 UnwindDestToken = AncestorMemo->second;
470 LastUselessPad = AncestorPad;
471 MemoMap[LastUselessPad] =
nullptr;
473 TempMemos.
insert(LastUselessPad);
491 while (!Worklist.
empty()) {
493 auto Memo = MemoMap.
find(UselessPad);
494 if (Memo != MemoMap.
end() && Memo->second) {
522 MemoMap[UselessPad] = UnwindDestToken;
523 if (
auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
524 assert(CatchSwitch->getUnwindDest() ==
nullptr &&
"Expected useless pad");
525 for (
BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
526 auto *CatchPad = &*HandlerBlock->getFirstNonPHIIt();
527 for (
User *U : CatchPad->users()) {
528 assert((!isa<InvokeInst>(U) ||
531 ->getFirstNonPHIIt()) == CatchPad)) &&
532 "Expected useless pad");
533 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
534 Worklist.
push_back(cast<Instruction>(U));
538 assert(isa<CleanupPadInst>(UselessPad));
540 assert(!isa<CleanupReturnInst>(U) &&
"Expected useless pad");
542 (!isa<InvokeInst>(U) ||
544 &*cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHIIt()) ==
546 "Expected useless pad");
547 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
548 Worklist.
push_back(cast<Instruction>(U));
553 return UnwindDestToken;
579 if (
F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
580 F->getIntrinsicID() == Intrinsic::experimental_guard)
591 auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
592 Value *UnwindDestToken =
594 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
598 if (
auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
599 MemoKey = CatchPad->getCatchSwitch();
601 MemoKey = FuncletPad;
602 assert(FuncletUnwindMap->count(MemoKey) &&
603 (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
604 "must get memoized to avoid confusing later searches");
629 LandingPadInliningInfo Invoke(
II);
635 if (
InvokeInst *
II = dyn_cast<InvokeInst>(
I->getTerminator()))
636 InlinedLPads.
insert(
II->getLandingPadInst());
643 InlinedLPad->reserveClauses(OuterNum);
644 for (
unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
645 InlinedLPad->addClause(OuterLPad->
getClause(OuterIdx));
647 InlinedLPad->setCleanup(
true);
654 &*BB, Invoke.getOuterResumeDest()))
657 Invoke.addIncomingPHIValuesFor(NewBB);
660 if (
ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
661 Invoke.forwardResume(RI, InlinedLPads);
691 UnwindDestPHIValues.
push_back(
PHI.getIncomingValueForBlock(InvokeBB));
698 for (
Value *V : UnwindDestPHIValues) {
700 PHI->addIncoming(V, Src);
710 if (
auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
711 if (CRI->unwindsToCaller()) {
712 auto *CleanupPad = CRI->getCleanupPad();
714 CRI->eraseFromParent();
721 isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
722 FuncletUnwindMap[CleanupPad] =
732 if (
auto *CatchSwitch = dyn_cast<CatchSwitchInst>(
I)) {
733 if (CatchSwitch->unwindsToCaller()) {
734 Value *UnwindDestToken;
735 if (
auto *ParentPad =
736 dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
746 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
759 CatchSwitch->getParentPad(), UnwindDest,
760 CatchSwitch->getNumHandlers(), CatchSwitch->
getName(),
761 CatchSwitch->getIterator());
762 for (
BasicBlock *PadBB : CatchSwitch->handlers())
763 NewCatchSwitch->addHandler(PadBB);
768 FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
769 Replacement = NewCatchSwitch;
771 }
else if (!isa<FuncletPadInst>(
I)) {
777 I->replaceAllUsesWith(Replacement);
778 I->eraseFromParent();
788 &*BB, UnwindDest, &FuncletUnwindMap))
801 MDNode *CallsiteStackContext) {
807 for (
auto MIBStackIter = MIBStackContext->
op_begin(),
808 CallsiteStackIter = CallsiteStackContext->
op_begin();
809 MIBStackIter != MIBStackContext->
op_end() &&
810 CallsiteStackIter != CallsiteStackContext->
op_end();
811 MIBStackIter++, CallsiteStackIter++) {
812 auto *Val1 = mdconst::dyn_extract<ConstantInt>(*MIBStackIter);
813 auto *Val2 = mdconst::dyn_extract<ConstantInt>(*CallsiteStackIter);
815 if (Val1->getZExtValue() != Val2->getZExtValue())
822 Call->setMetadata(LLVMContext::MD_memprof,
nullptr);
826 Call->setMetadata(LLVMContext::MD_callsite,
nullptr);
830 const std::vector<Metadata *> &MIBList,
838 CallStack.addCallStack(cast<MDNode>(MIB));
839 bool MemprofMDAttached =
CallStack.buildAndAttachMIBMetadata(CI);
841 if (!MemprofMDAttached)
851 MDNode *InlinedCallsiteMD,
854 MDNode *ClonedCallsiteMD =
nullptr;
857 if (OrigCallsiteMD) {
862 ClonedCall->
setMetadata(LLVMContext::MD_callsite, ClonedCallsiteMD);
874 std::vector<Metadata *> NewMIBList;
879 for (
auto &MIBOp : OrigMemProfMD->
operands()) {
880 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
887 NewMIBList.push_back(MIB);
889 if (NewMIBList.empty()) {
905 bool ContainsMemProfMetadata,
911 if (!CallsiteMD && !ContainsMemProfMetadata)
915 for (
const auto &Entry : VMap) {
918 auto *OrigCall = dyn_cast_or_null<CallBase>(Entry.first);
919 auto *ClonedCall = dyn_cast_or_null<CallBase>(Entry.second);
920 if (!OrigCall || !ClonedCall)
939 MDNode *MemParallelLoopAccess =
940 CB.
getMetadata(LLVMContext::MD_mem_parallel_loop_access);
944 if (!MemParallelLoopAccess && !AccessGroup && !AliasScope && !NoAlias)
950 if (!
I.mayReadOrWriteMemory())
953 if (MemParallelLoopAccess) {
956 I.getMetadata(LLVMContext::MD_mem_parallel_loop_access),
957 MemParallelLoopAccess);
958 I.setMetadata(LLVMContext::MD_mem_parallel_loop_access,
959 MemParallelLoopAccess);
964 I.getMetadata(LLVMContext::MD_access_group), AccessGroup));
968 I.getMetadata(LLVMContext::MD_alias_scope), AliasScope));
972 I.getMetadata(LLVMContext::MD_noalias), NoAlias));
990 dyn_cast<Function>(
I->getCalledOperand()->stripPointerCasts());
991 if (CalledFn && CalledFn->isIntrinsic() &&
I->doesNotThrow() &&
996 I->getOperandBundlesAsDefs(OpBundles);
1001 I->replaceAllUsesWith(NewInst);
1002 I->eraseFromParent();
1011class ScopedAliasMetadataDeepCloner {
1015 void addRecursiveMetadataUses();
1018 ScopedAliasMetadataDeepCloner(
const Function *
F);
1030ScopedAliasMetadataDeepCloner::ScopedAliasMetadataDeepCloner(
1034 if (
const MDNode *M =
I.getMetadata(LLVMContext::MD_alias_scope))
1036 if (
const MDNode *M =
I.getMetadata(LLVMContext::MD_noalias))
1040 if (
const auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&
I))
1041 MD.insert(Decl->getScopeList());
1044 addRecursiveMetadataUses();
1047void ScopedAliasMetadataDeepCloner::addRecursiveMetadataUses() {
1049 while (!
Queue.empty()) {
1052 if (
const MDNode *OpMD = dyn_cast<MDNode>(
Op))
1053 if (MD.insert(OpMD))
1054 Queue.push_back(OpMD);
1058void ScopedAliasMetadataDeepCloner::clone() {
1059 assert(MDMap.empty() &&
"clone() already called ?");
1064 MDMap[
I].reset(DummyNodes.
back().get());
1073 if (
const MDNode *M = dyn_cast<MDNode>(
Op))
1080 MDTuple *TempM = cast<MDTuple>(MDMap[
I]);
1097 if (
MDNode *M =
I.getMetadata(LLVMContext::MD_alias_scope))
1098 if (
MDNode *MNew = MDMap.lookup(M))
1099 I.setMetadata(LLVMContext::MD_alias_scope, MNew);
1101 if (
MDNode *M =
I.getMetadata(LLVMContext::MD_noalias))
1102 if (
MDNode *MNew = MDMap.lookup(M))
1103 I.setMetadata(LLVMContext::MD_noalias, MNew);
1105 if (
auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&
I))
1106 if (
MDNode *MNew = MDMap.lookup(Decl->getScopeList()))
1107 Decl->setScopeList(MNew);
1126 if (CB.
paramHasAttr(Arg.getArgNo(), Attribute::NoAlias) && !Arg.use_empty())
1129 if (NoAliasArgs.
empty())
1149 for (
unsigned i = 0, e = NoAliasArgs.
size(); i != e; ++i) {
1152 std::string
Name = std::string(CalledFunc->
getName());
1155 Name +=
A->getName();
1157 Name +=
": argument ";
1165 NewScopes.
insert(std::make_pair(
A, NewScope));
1182 VMI != VMIE; ++VMI) {
1183 if (
const Instruction *
I = dyn_cast<Instruction>(VMI->first)) {
1187 Instruction *NI = dyn_cast<Instruction>(VMI->second);
1191 bool IsArgMemOnlyCall =
false, IsFuncCall =
false;
1194 if (
const LoadInst *LI = dyn_cast<LoadInst>(
I))
1195 PtrArgs.
push_back(LI->getPointerOperand());
1196 else if (
const StoreInst *SI = dyn_cast<StoreInst>(
I))
1197 PtrArgs.
push_back(SI->getPointerOperand());
1198 else if (
const VAArgInst *VAAI = dyn_cast<VAArgInst>(
I))
1199 PtrArgs.
push_back(VAAI->getPointerOperand());
1201 PtrArgs.
push_back(CXI->getPointerOperand());
1203 PtrArgs.
push_back(RMWI->getPointerOperand());
1204 else if (
const auto *Call = dyn_cast<CallBase>(
I)) {
1208 if (Call->doesNotAccessMemory())
1220 IsArgMemOnlyCall =
true;
1223 for (
Value *Arg : Call->args()) {
1227 if (!Arg->getType()->isPointerTy())
1238 if (PtrArgs.
empty() && !IsFuncCall)
1247 for (
const Value *V : PtrArgs) {
1256 bool RequiresNoCaptureBefore =
false, UsesAliasingPtr =
false,
1257 UsesUnknownObject =
false;
1258 for (
const Value *V : ObjSet) {
1262 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1263 isa<ConstantPointerNull>(V) ||
1264 isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1271 if (
const Argument *
A = dyn_cast<Argument>(V)) {
1273 UsesAliasingPtr =
true;
1275 UsesAliasingPtr =
true;
1281 RequiresNoCaptureBefore =
true;
1287 UsesUnknownObject =
true;
1293 if (UsesUnknownObject)
1298 if (IsFuncCall && !IsArgMemOnlyCall)
1299 RequiresNoCaptureBefore =
true;
1317 if (!RequiresNoCaptureBefore ||
1319 A,
false,
I, &DT,
false,
1320 CaptureComponents::Provenance)))
1340 bool CanAddScopes = !UsesAliasingPtr;
1341 if (CanAddScopes && IsFuncCall)
1342 CanAddScopes = IsArgMemOnlyCall;
1347 Scopes.push_back(NewScopes[
A]);
1350 if (!Scopes.empty())
1352 LLVMContext::MD_alias_scope,
1363 "Expected to be in same basic block!");
1365 assert(BeginIt !=
End->getIterator() &&
"Non-empty BB has empty iterator");
1376 auto &
Context = CalledFunction->getContext();
1380 bool HasAttrToPropagate =
false;
1388 Attribute::Dereferenceable, Attribute::DereferenceableOrNull,
1389 Attribute::NonNull, Attribute::NoFPClass,
1390 Attribute::Alignment, Attribute::Range};
1392 for (
unsigned I = 0, E = CB.
arg_size();
I < E; ++
I) {
1398 ValidObjParamAttrs.
back().addAttribute(Attribute::ReadNone);
1400 ValidObjParamAttrs.
back().addAttribute(Attribute::ReadOnly);
1405 ValidExactParamAttrs.
back().addAttribute(Attr);
1408 HasAttrToPropagate |= ValidObjParamAttrs.
back().hasAttributes();
1409 HasAttrToPropagate |= ValidExactParamAttrs.
back().hasAttributes();
1413 if (!HasAttrToPropagate)
1418 const auto *InnerCB = dyn_cast<CallBase>(&Ins);
1421 auto *NewInnerCB = dyn_cast_or_null<CallBase>(VMap.
lookup(InnerCB));
1426 if (InlinedFunctionInfo.
isSimplified(InnerCB, NewInnerCB))
1430 for (
unsigned I = 0, E = InnerCB->arg_size();
I < E; ++
I) {
1435 if (NewInnerCB->paramHasAttr(
I, Attribute::ByVal))
1439 if (
match(NewInnerCB->getArgOperand(
I),
1444 const Argument *Arg = dyn_cast<Argument>(InnerCB->getArgOperand(
I));
1455 if (AL.getParamDereferenceableBytes(
I) >
1456 NewAB.getDereferenceableBytes())
1458 if (AL.getParamDereferenceableOrNullBytes(
I) >
1459 NewAB.getDereferenceableOrNullBytes())
1461 if (AL.getParamAlignment(
I).valueOrOne() >
1462 NewAB.getAlignment().valueOrOne())
1464 if (
auto ExistingRange = AL.getParamRange(
I)) {
1465 if (
auto NewRange = NewAB.getRange()) {
1468 NewAB.removeAttribute(Attribute::Range);
1469 NewAB.addRangeAttr(CombinedRange);
1473 if (
FPClassTest ExistingNoFP = AL.getParamNoFPClass(
I))
1474 NewAB.addNoFPClassAttr(ExistingNoFP | NewAB.getNoFPClass());
1476 AL = AL.addParamAttributes(
Context,
I, NewAB);
1477 }
else if (NewInnerCB->getArgOperand(
I)->getType()->isPointerTy()) {
1479 const Value *UnderlyingV =
1481 Arg = dyn_cast<Argument>(UnderlyingV);
1490 AL = AL.addParamAttributes(
Context,
I, ValidObjParamAttrs[ArgNo]);
1497 if (AL.hasParamAttr(
I, Attribute::ReadOnly) &&
1498 AL.hasParamAttr(
I, Attribute::WriteOnly))
1499 AL = AL.addParamAttribute(
Context,
I, Attribute::ReadNone);
1502 if (AL.hasParamAttr(
I, Attribute::ReadNone)) {
1503 AL = AL.removeParamAttribute(
Context,
I, Attribute::ReadOnly);
1504 AL = AL.removeParamAttribute(
Context,
I, Attribute::WriteOnly);
1508 if (AL.hasParamAttr(
I, Attribute::ReadOnly) ||
1509 AL.hasParamAttr(
I, Attribute::ReadNone))
1510 AL = AL.removeParamAttribute(
Context,
I, Attribute::Writable);
1512 NewInnerCB->setAttributes(AL);
1556 auto &
Context = CalledFunction->getContext();
1558 for (
auto &BB : *CalledFunction) {
1559 auto *RI = dyn_cast<ReturnInst>(BB.getTerminator());
1560 if (!RI || !isa<CallBase>(RI->
getOperand(0)))
1562 auto *RetVal = cast<CallBase>(RI->
getOperand(0));
1566 auto *NewRetVal = dyn_cast_or_null<CallBase>(VMap.
lookup(RetVal));
1572 if (InlinedFunctionInfo.
isSimplified(RetVal, NewRetVal))
1592 if (RI->
getParent() != RetVal->getParent() ||
1605 AL.getRetDereferenceableOrNullBytes())
1645 Attribute NewRange = AL.getRetAttr(Attribute::Range);
1663 (RetVal->hasOneUse() && !RetVal->hasRetAttr(Attribute::NoUndef)))
1666 NewRetVal->setAttributes(NewAL);
1682 bool DTCalculated =
false;
1686 if (!Arg.getType()->isPointerTy() || Arg.hasPassPointeeByValueCopyAttr() ||
1693 if (!DTCalculated) {
1695 DTCalculated =
true;
1704 DL, ArgVal, Alignment->value());
1717 Builder.
getInt64(M->getDataLayout().getTypeStoreSize(ByValType));
1719 Align DstAlign = Dst->getPointerAlignment(M->getDataLayout());
1729 CI->
setDebugLoc(DILocation::get(SP->getContext(), 0, 0, SP));
1766 Align Alignment =
DL.getPrefTypeAlign(ByValType);
1772 Alignment = std::max(Alignment, *ByValAlignment);
1776 nullptr, Alignment, Arg->
getName());
1788 for (
User *U : V->users())
1789 if (isa<LifetimeIntrinsic>(U))
1800 if (Ty == Int8PtrTy)
1805 if (U->getType() != Int8PtrTy)
continue;
1806 if (U->stripPointerCasts() != AI)
continue;
1826 return DILocation::get(Ctx, OrigDL.
getLine(), OrigDL.
getCol(),
1850 InlinedAtNode = DILocation::getDistinct(
1851 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1852 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1861 bool NoInlineLineTables = Fn->
hasFnAttribute(
"no-inline-line-tables");
1867 auto updateLoopInfoLoc = [&Ctx, &InlinedAtNode,
1869 if (
auto *Loc = dyn_cast_or_null<DILocation>(MD))
1875 if (!NoInlineLineTables)
1883 if (CalleeHasDebugInfo && !NoInlineLineTables)
1893 if (
auto *AI = dyn_cast<AllocaInst>(&
I))
1900 if (isa<PseudoProbeInst>(
I))
1903 I.setDebugLoc(TheCallDL);
1908 assert(DVR->getDebugLoc() &&
"Debug Value must have debug loc");
1909 if (NoInlineLineTables) {
1910 DVR->setDebugLoc(TheCallDL);
1916 DVR->getMarker()->getParent()->
getContext(), IANodes);
1917 DVR->setDebugLoc(IDL);
1921 for (; FI != Fn->
end(); ++FI) {
1924 for (
DbgRecord &DVR :
I.getDbgRecordRange()) {
1930 if (NoInlineLineTables) {
1932 while (BI != FI->end()) {
1933 BI->dropDbgRecords();
1941#define DEBUG_TYPE "assignment-tracking"
1949 errs() <<
"# Finding caller local variables escaped by callee\n");
1952 if (!Arg->getType()->isPointerTy()) {
1964 assert(Arg->getType()->isPtrOrPtrVectorTy());
1965 APInt TmpOffset(
DL.getIndexTypeSizeInBits(Arg->getType()), 0,
false);
1967 Arg->stripAndAccumulateConstantOffsets(
DL, TmpOffset,
true));
1969 LLVM_DEBUG(
errs() <<
" | SKIP: Couldn't walk back to base storage\n");
1982 if (DbgAssign->getDebugLoc().getInlinedAt())
1989 return EscapedLocals;
1995 << Start->getParent()->getName() <<
" from "
2008 for (
auto BBI = Start; BBI !=
End; ++BBI) {
2014#define DEBUG_TYPE "inline-function"
2028 for (
auto Entry : VMap) {
2029 if (!isa<BasicBlock>(Entry.first) || !Entry.second)
2031 auto *OrigBB = cast<BasicBlock>(Entry.first);
2032 auto *ClonedBB = cast<BasicBlock>(Entry.second);
2034 if (!ClonedBBs.
insert(ClonedBB).second) {
2046 EntryClone, CallerBFI->
getBlockFreq(CallSiteBlock), ClonedBBs);
2056 auto CallSiteCount =
2059 std::min(CallSiteCount.value_or(0), CalleeEntryCount.
getCount());
2064 Function *Callee, int64_t EntryDelta,
2066 auto CalleeCount = Callee->getEntryCount();
2070 const uint64_t PriorEntryCount = CalleeCount->getCount();
2075 (EntryDelta < 0 && static_cast<uint64_t>(-EntryDelta) > PriorEntryCount)
2077 : PriorEntryCount + EntryDelta;
2079 auto updateVTableProfWeight = [](
CallBase *CB,
const uint64_t NewEntryCount,
2088 uint64_t CloneEntryCount = PriorEntryCount - NewEntryCount;
2089 for (
auto Entry : *VMap) {
2090 if (isa<CallInst>(Entry.first))
2091 if (
auto *CI = dyn_cast_or_null<CallInst>(Entry.second)) {
2092 CI->updateProfWeight(CloneEntryCount, PriorEntryCount);
2093 updateVTableProfWeight(CI, CloneEntryCount, PriorEntryCount);
2096 if (isa<InvokeInst>(Entry.first))
2097 if (
auto *
II = dyn_cast_or_null<InvokeInst>(Entry.second)) {
2098 II->updateProfWeight(CloneEntryCount, PriorEntryCount);
2099 updateVTableProfWeight(
II, CloneEntryCount, PriorEntryCount);
2105 Callee->setEntryCount(NewEntryCount);
2109 if (!VMap || VMap->
count(&BB))
2111 if (
CallInst *CI = dyn_cast<CallInst>(&
I)) {
2112 CI->updateProfWeight(NewEntryCount, PriorEntryCount);
2113 updateVTableProfWeight(CI, NewEntryCount, PriorEntryCount);
2116 II->updateProfWeight(NewEntryCount, PriorEntryCount);
2117 updateVTableProfWeight(
II, NewEntryCount, PriorEntryCount);
2144 bool IsRetainRV = RVCallKind == objcarc::ARCInstKind::RetainRV,
2145 IsUnsafeClaimRV = !IsRetainRV;
2147 for (
auto *RI : Returns) {
2149 bool InsertRetainCall = IsRetainRV;
2158 if (isa<CastInst>(
I))
2161 if (
auto *
II = dyn_cast<IntrinsicInst>(&
I)) {
2162 if (
II->getIntrinsicID() != Intrinsic::objc_autoreleaseReturnValue ||
2172 if (IsUnsafeClaimRV) {
2176 II->eraseFromParent();
2177 InsertRetainCall =
false;
2181 auto *CI = dyn_cast<CallInst>(&
I);
2196 NewCall->copyMetadata(*CI);
2197 CI->replaceAllUsesWith(NewCall);
2198 CI->eraseFromParent();
2199 InsertRetainCall =
false;
2203 if (InsertRetainCall) {
2234static std::pair<std::vector<int64_t>, std::vector<int64_t>>
2242 std::vector<int64_t> CalleeCounterMap;
2243 std::vector<int64_t> CalleeCallsiteMap;
2244 CalleeCounterMap.resize(CalleeCounters, -1);
2245 CalleeCallsiteMap.resize(CalleeCallsites, -1);
2248 if (Ins.getNameValue() == &Caller)
2250 const auto OldID =
static_cast<uint32_t>(Ins.getIndex()->getZExtValue());
2251 if (CalleeCounterMap[OldID] == -1)
2253 const auto NewID =
static_cast<uint32_t>(CalleeCounterMap[OldID]);
2255 Ins.setNameValue(&Caller);
2256 Ins.setIndex(NewID);
2261 if (Ins.getNameValue() == &Caller)
2263 const auto OldID =
static_cast<uint32_t>(Ins.getIndex()->getZExtValue());
2264 if (CalleeCallsiteMap[OldID] == -1)
2266 const auto NewID =
static_cast<uint32_t>(CalleeCallsiteMap[OldID]);
2268 Ins.setNameValue(&Caller);
2269 Ins.setIndex(NewID);
2273 std::deque<BasicBlock *> Worklist;
2290 Worklist.push_back(StartBB);
2291 while (!Worklist.empty()) {
2292 auto *BB = Worklist.front();
2293 Worklist.pop_front();
2294 bool Changed =
false;
2297 Changed |= RewriteInstrIfNeeded(*BBID);
2301 BBID->moveBefore(BB->getFirstInsertionPt());
2304 if (
auto *Inc = dyn_cast<InstrProfIncrementInst>(&
I)) {
2305 if (isa<InstrProfIncrementInstStep>(Inc)) {
2312 if (isa<Constant>(Inc->getStep())) {
2313 assert(!Inc->getNextNode() || !isa<SelectInst>(Inc->getNextNode()));
2314 Inc->eraseFromParent();
2316 assert(isa_and_nonnull<SelectInst>(Inc->getNextNode()));
2317 RewriteInstrIfNeeded(*Inc);
2319 }
else if (Inc != BBID) {
2324 Inc->eraseFromParent();
2327 }
else if (
auto *CS = dyn_cast<InstrProfCallsite>(&
I)) {
2328 Changed |= RewriteCallsiteInsIfNeeded(*CS);
2331 if (!BBID || Changed)
2333 if (Seen.
insert(Succ).second)
2334 Worklist.push_back(Succ);
2338 "Counter index mapping should be either to -1 or to non-zero index, "
2340 "index corresponds to the entry BB of the caller");
2342 "Callsite index mapping should be either to -1 or to non-zero index, "
2343 "because there should have been at least a callsite - the inlined one "
2344 "- which would have had a 0 index.");
2346 return {std::move(CalleeCounterMap), std::move(CalleeCallsiteMap)};
2365 bool MergeAttributes,
AAResults *CalleeAAR,
bool InsertLifetime,
2368 return InlineFunction(CB, IFI, MergeAttributes, CalleeAAR, InsertLifetime,
2369 ForwardVarArgsTo, ORE);
2380 const auto CallsiteID =
2381 static_cast<uint32_t>(CallsiteIDIns->getIndex()->getZExtValue());
2386 auto Ret =
InlineFunction(CB, IFI, MergeAttributes, CalleeAAR, InsertLifetime,
2387 ForwardVarArgsTo, ORE);
2388 if (!Ret.isSuccess())
2393 CallsiteIDIns->eraseFromParent();
2398 const auto IndicesMaps =
remapIndices(Caller, StartBB, CtxProf,
2399 NumCalleeCounters, NumCalleeCallsites);
2404 const auto &[CalleeCounterMap, CalleeCallsiteMap] = IndicesMaps;
2406 (Ctx.counters().size() +
2407 llvm::count_if(CalleeCounterMap, [](
auto V) { return V != -1; }) ==
2409 "The caller's counters size should have grown by the number of new "
2410 "distinct counters inherited from the inlined callee.");
2411 Ctx.resizeCounters(NewCountersSize);
2415 auto CSIt = Ctx.callsites().find(CallsiteID);
2416 if (CSIt == Ctx.callsites().end())
2418 auto CalleeCtxIt = CSIt->second.find(CalleeGUID);
2421 if (CalleeCtxIt == CSIt->second.end())
2426 auto &CalleeCtx = CalleeCtxIt->second;
2427 assert(CalleeCtx.guid() == CalleeGUID);
2429 for (
auto I = 0U;
I < CalleeCtx.counters().
size(); ++
I) {
2430 const int64_t NewIndex = CalleeCounterMap[
I];
2431 if (NewIndex >= 0) {
2432 assert(NewIndex != 0 &&
"counter index mapping shouldn't happen to a 0 "
2433 "index, that's the caller's entry BB");
2434 Ctx.counters()[NewIndex] = CalleeCtx.counters()[
I];
2437 for (
auto &[
I, OtherSet] : CalleeCtx.callsites()) {
2438 const int64_t NewCSIdx = CalleeCallsiteMap[
I];
2439 if (NewCSIdx >= 0) {
2441 "callsite index mapping shouldn't happen to a 0 index, the "
2442 "caller must've had at least one callsite (with such an index)");
2443 Ctx.ingestAllContexts(NewCSIdx, std::move(OtherSet));
2449 auto Deleted = Ctx.callsites().erase(CallsiteID);
2453 CtxProf.
update(Updater, Caller);
2462 if (isa<CallBrInst>(CB))
2511 "convergent call needs convergencectrl operand");
2522 if (CalledFunc->
hasGC()) {
2523 if (Caller->hasGC() && CalledFunc->
getGC() != Caller->getGC())
2537 Caller->hasPersonalityFn()
2538 ? Caller->getPersonalityFn()->stripPointerCasts()
2540 if (CalledPersonality) {
2545 if (CallerPersonality && CalledPersonality != CallerPersonality)
2551 if (CallerPersonality) {
2554 std::optional<OperandBundleUse> ParentFunclet =
2557 IFI.
CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
2562 if (Personality == EHPersonality::MSVC_CXX) {
2568 for (
const BasicBlock &CalledBB : *CalledFunc) {
2569 if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHIIt()))
2576 for (
const BasicBlock &CalledBB : *CalledFunc) {
2577 if (CalledBB.isEHPad())
2597 bool MergeAttributes,
AAResults *CalleeAAR,
2598 bool InsertLifetime,
Function *ForwardVarArgsTo,
2604 "CanInlineCallSite should have verified direct call to definition");
2608 bool EHPadForCallUnwindsLocally =
false;
2611 Value *CallSiteUnwindDestToken =
2614 EHPadForCallUnwindsLocally =
2615 CallSiteUnwindDestToken &&
2616 !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
2633 if (CalledFunc->
hasGC()) {
2634 if (!Caller->hasGC())
2635 Caller->setGC(CalledFunc->
getGC());
2638 "CanInlineCallSite should have verified compatible GCs");
2645 if (!Caller->hasPersonalityFn()) {
2646 Caller->setPersonalityFn(CalledPersonality);
2648 assert(Caller->getPersonalityFn()->stripPointerCasts() ==
2649 CalledPersonality &&
2650 "CanInlineCallSite should have verified compatible personality");
2674 auto &
DL = Caller->getDataLayout();
2681 E = CalledFunc->
arg_end();
I != E; ++
I, ++AI, ++ArgNo) {
2682 Value *ActualArg = *AI;
2690 &CB, CalledFunc, IFI,
2692 if (ActualArg != *AI)
2698 VMap[&*
I] = ActualArg;
2718 false, Returns,
".i",
2719 &InlinedFunctionInfo);
2721 FirstNewBlock = LastBlock; ++FirstNewBlock;
2725 if (RVCallKind != objcarc::ARCInstKind::None)
2736 CalledFunc->
front());
2744 for (ByValInit &
Init : ByValInits)
2746 Caller->getParent(), &*FirstNewBlock, IFI,
2749 std::optional<OperandBundleUse> ParentDeopt =
2755 CallBase *ICS = dyn_cast_or_null<CallBase>(VH);
2776 std::vector<Value *> MergedDeoptArgs;
2777 MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
2778 ChildOB.Inputs.size());
2783 OpDefs.
emplace_back(
"deopt", std::move(MergedDeoptArgs));
2813 SAMetadataCloner.clone();
2814 SAMetadataCloner.remap(FirstNewBlock, Caller->end());
2836 make_range(FirstNewBlock->getIterator(), Caller->end()))
2838 if (
auto *
II = dyn_cast<AssumeInst>(&
I))
2844 if (IntrinsicCall) {
2857 E = FirstNewBlock->end();
I != E; ) {
2876 while (isa<AllocaInst>(
I) &&
2877 !cast<AllocaInst>(
I)->use_empty() &&
2887 Caller->getEntryBlock().splice(InsertPoint, &*FirstNewBlock,
2904 bool InlinedMustTailCalls =
false, InlinedDeoptimizeCalls =
false;
2907 if (
CallInst *CI = dyn_cast<CallInst>(&CB))
2908 CallSiteTailKind = CI->getTailCallKind();
2923 if (!VarArgsToForward.
empty() &&
2924 ((ForwardVarArgsTo &&
2930 if (!Attrs.isEmpty() || !VarArgsAttrs.
empty()) {
2931 for (
unsigned ArgNo = 0;
2933 ArgAttrs.
push_back(Attrs.getParamAttrs(ArgNo));
2939 Attrs.getRetAttrs(), ArgAttrs);
2954 InlinedDeoptimizeCalls |=
2955 F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
2974 ChildTCK = std::min(CallSiteTailKind, ChildTCK);
2993 if ((InsertLifetime || Caller->isPresplitCoroutine()) &&
2995 IRBuilder<> builder(&*FirstNewBlock, FirstNewBlock->begin());
3014 if (InlinedMustTailCalls &&
3015 RI->
getParent()->getTerminatingMustTailCall())
3017 if (InlinedDeoptimizeCalls &&
3018 RI->
getParent()->getTerminatingDeoptimizeCall())
3037 if (InlinedMustTailCalls && RI->
getParent()->getTerminatingMustTailCall())
3039 if (InlinedDeoptimizeCalls && RI->
getParent()->getTerminatingDeoptimizeCall())
3049 if (
auto *
II = dyn_cast<InvokeInst>(&CB)) {
3052 if (isa<LandingPadInst>(FirstNonPHI)) {
3073 if (
auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
3074 if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
3081 if (
auto *CatchSwitch = dyn_cast<CatchSwitchInst>(
I)) {
3082 if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
3085 auto *FPI = cast<FuncletPadInst>(
I);
3086 if (isa<ConstantTokenNone>(FPI->getParentPad()))
3092 if (InlinedDeoptimizeCalls) {
3098 if (Caller->getReturnType() == CB.
getType()) {
3100 return RI->
getParent()->getTerminatingDeoptimizeCall() !=
nullptr;
3105 Caller->getParent(), Intrinsic::experimental_deoptimize,
3106 {Caller->getReturnType()});
3132 "Expected at least the deopt operand bundle");
3136 Builder.
CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
3157 if (InlinedMustTailCalls) {
3159 Type *NewRetTy = Caller->getReturnType();
3166 RI->
getParent()->getTerminatingMustTailCall();
3167 if (!ReturnedMustTail) {
3176 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
3179 OldCast->eraseFromParent();
3199 make_range(FirstNewBlock->getIterator(), Caller->end()))
3201 if (
auto *CB = dyn_cast<CallBase>(&
I))
3210 if (Returns.
size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
3213 FirstNewBlock->end());
3215 Caller->back().eraseFromParent();
3228 if (&CB == R->getReturnValue())
3237 Returns[0]->eraseFromParent();
3239 if (MergeAttributes)
3253 BranchInst *CreatedBranchToNormalDest =
nullptr;
3266 CalledFunc->
getName() +
".exit");
3273 CalledFunc->
getName() +
".exit");
3287 "splitBasicBlock broken!");
3293 Caller->splice(AfterCallBB->
getIterator(), Caller, FirstNewBlock,
3301 if (Returns.
size() > 1) {
3306 PHI->insertBefore(AfterCallBB->
begin());
3317 "Ret value not consistent in function!");
3318 PHI->addIncoming(RI->getReturnValue(), RI->
getParent());
3334 if (CreatedBranchToNormalDest)
3336 }
else if (!Returns.
empty()) {
3340 if (&CB == Returns[0]->getReturnValue())
3347 BasicBlock *ReturnBB = Returns[0]->getParent();
3352 AfterCallBB->
splice(AfterCallBB->
begin(), ReturnBB);
3354 if (CreatedBranchToNormalDest)
3358 Returns[0]->eraseFromParent();
3365 if (CreatedBranchToNormalDest)
3377 if (InlinedMustTailCalls &&
pred_empty(AfterCallBB))
3382 assert(cast<BranchInst>(Br)->isUnconditional() &&
"splitBasicBlock broken!");
3383 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
3402 auto &
DL = Caller->getDataLayout();
3404 PHI->replaceAllUsesWith(V);
3405 PHI->eraseFromParent();
3409 if (MergeAttributes)
3414 bool MergeAttributes,
3416 bool InsertLifetime,
3420 if (Result.isSuccess()) {
3422 ForwardVarArgsTo, ORE);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
static void UpdatePHINodes(BasicBlock *OrigBB, BasicBlock *NewBB, ArrayRef< BasicBlock * > Preds, BranchInst *BI, bool HasLoopExit)
Update the PHI nodes in OrigBB to include the values coming from NewBB.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static cl::opt< bool > NoAliases("csky-no-aliases", cl::desc("Disable the emission of assembler pseudo instructions"), cl::init(false), cl::Hidden)
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
static AttrBuilder IdentifyValidUBGeneratingAttributes(CallBase &CB)
static at::StorageToVarsMap collectEscapedLocals(const DataLayout &DL, const CallBase &CB)
Find Alloca and linked DbgAssignIntrinsic for locals escaped by CB.
static void fixupLineNumbers(Function *Fn, Function::iterator FI, Instruction *TheCall, bool CalleeHasDebugInfo)
Update inlined instructions' line numbers to to encode location where these instructions are inlined.
static void removeCallsiteMetadata(CallBase *Call)
static Value * getUnwindDestToken(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Given an EH pad, find where it unwinds.
static void propagateMemProfMetadata(Function *Callee, CallBase &CB, bool ContainsMemProfMetadata, const ValueMap< const Value *, WeakTrackingVH > &VMap, OptimizationRemarkEmitter *ORE)
static cl::opt< bool > PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", cl::init(false), cl::Hidden, cl::desc("Convert align attributes to assumptions during inlining."))
static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
static Value * getUnwindDestTokenHelper(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Helper for getUnwindDestToken that does the descendant-ward part of the search.
static BasicBlock * HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, BasicBlock *UnwindEdge, UnwindDestMemoTy *FuncletUnwindMap=nullptr)
When we inline a basic block into an invoke, we have to turn all of the calls that can throw into inv...
static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &IANodes)
Returns a DebugLoc for a new DILocation which is a clone of OrigDL inlined at InlinedAt.
static cl::opt< bool > UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden, cl::init(true), cl::desc("Use the llvm.experimental.noalias.scope.decl " "intrinsic during inlining."))
static void PropagateCallSiteMetadata(CallBase &CB, Function::iterator FStart, Function::iterator FEnd)
When inlining a call site that has !llvm.mem.parallel_loop_access, !llvm.access.group,...
static std::pair< std::vector< int64_t >, std::vector< int64_t > > remapIndices(Function &Caller, BasicBlock *StartBB, PGOContextualProfile &CtxProf, uint32_t CalleeCounters, uint32_t CalleeCallsites)
static AttrBuilder IdentifyValidPoisonGeneratingAttributes(CallBase &CB)
static void updateMemprofMetadata(CallBase *CI, const std::vector< Metadata * > &MIBList, OptimizationRemarkEmitter *ORE)
static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap, const ProfileCount &CalleeEntryCount, const CallBase &TheCall, ProfileSummaryInfo *PSI, BlockFrequencyInfo *CallerBFI)
Update the branch metadata for cloned call instructions.
static void updateCallerBFI(BasicBlock *CallSiteBlock, const ValueToValueMapTy &VMap, BlockFrequencyInfo *CallerBFI, BlockFrequencyInfo *CalleeBFI, const BasicBlock &CalleeEntryBlock)
Update the block frequencies of the caller after a callee has been inlined.
static void AddReturnAttributes(CallBase &CB, ValueToValueMapTy &VMap, ClonedCodeInfo &InlinedFunctionInfo)
static void HandleByValArgumentInit(Type *ByValType, Value *Dst, Value *Src, MaybeAlign SrcAlign, Module *M, BasicBlock *InsertBlock, InlineFunctionInfo &IFI, Function *CalledFunc)
static bool MayContainThrowingOrExitingCallAfterCB(CallBase *Begin, ReturnInst *End)
static cl::opt< bool > EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), cl::Hidden, cl::desc("Convert noalias attributes to metadata during inlining."))
static void AddAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap, const DataLayout &DL, AAResults *CalleeAAR, ClonedCodeInfo &InlinedFunctionInfo)
If the inlined function has noalias arguments, then add new alias scopes for each noalias argument,...
static IntrinsicInst * getConvergenceEntry(BasicBlock &BB)
static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
static void inlineRetainOrClaimRVCalls(CallBase &CB, objcarc::ARCInstKind RVCallKind, const SmallVectorImpl< ReturnInst * > &Returns)
An operand bundle "clang.arc.attachedcall" on a call indicates the call result is implicitly consumed...
static Value * getParentPad(Value *EHPad)
Helper for getUnwindDestToken/getUnwindDestTokenHelper.
static void fixupAssignments(Function::iterator Start, Function::iterator End)
Update inlined instructions' DIAssignID metadata.
static void propagateMemProfHelper(const CallBase *OrigCall, CallBase *ClonedCall, MDNode *InlinedCallsiteMD, OptimizationRemarkEmitter *ORE)
static bool allocaWouldBeStaticInEntry(const AllocaInst *AI)
Return the result of AI->isStaticAlloca() if AI were moved to the entry block.
static bool isUsedByLifetimeMarker(Value *V)
static void removeMemProfMetadata(CallBase *Call)
static Value * HandleByValArgument(Type *ByValType, Value *Arg, Instruction *TheCall, const Function *CalledFunc, InlineFunctionInfo &IFI, MaybeAlign ByValAlignment)
When inlining a call site that has a byval argument, we have to make the implicit memcpy explicit by ...
static void AddAlignmentAssumptions(CallBase &CB, InlineFunctionInfo &IFI)
If the inlined function has non-byval align arguments, then add @llvm.assume-based alignment assumpti...
static void trackInlinedStores(Function::iterator Start, Function::iterator End, const CallBase &CB)
static cl::opt< unsigned > InlinerAttributeWindow("max-inst-checked-for-throw-during-inlining", cl::Hidden, cl::desc("the maximum number of instructions analyzed for may throw during " "attribute inference in inlined body"), cl::init(4))
static void AddParamAndFnBasicAttributes(const CallBase &CB, ValueToValueMapTy &VMap, ClonedCodeInfo &InlinedFunctionInfo)
static bool haveCommonPrefix(MDNode *MIBStackContext, MDNode *CallsiteStackContext)
static void PropagateOperandBundles(Function::iterator InlinedBB, Instruction *CallSiteEHPad)
Bundle operands of the inlined function must be added to inlined call sites.
static bool hasLifetimeMarkers(AllocaInst *AI)
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
This file defines common analysis utilities used by the ObjC ARC Optimizer.
This file defines ARC utility functions which are used by various parts of the compiler.
This file contains the declarations for profiling metadata utility functions.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
A private abstract base class describing the concept of an individual alias analysis implementation.
LLVM_ABI MemoryEffects getMemoryEffects(const CallBase *Call)
Return the behavior of the given call site.
Class for arbitrary precision integers.
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
PointerType * getType() const
Overload to return most specific pointer type.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
const Value * getArraySize() const
Get the number of elements allocated.
This class represents an incoming formal argument to a Function.
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
static LLVM_ABI uint64_t getGUID(const Function &F)
A cache of @llvm.assume calls within a function.
LLVM_ABI void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
LLVM_ABI AttrBuilder & addAlignmentAttr(MaybeAlign Align)
This turns an alignment into the form used internally in Attribute.
LLVM_ABI Attribute getAttribute(Attribute::AttrKind Kind) const
Return Attribute with the given Kind.
uint64_t getDereferenceableBytes() const
Retrieve the number of dereferenceable bytes, if the dereferenceable attribute exists (zero is return...
bool hasAttributes() const
Return true if the builder has IR-level attributes.
LLVM_ABI AttrBuilder & addAttribute(Attribute::AttrKind Val)
Add an attribute to the builder.
MaybeAlign getAlignment() const
Retrieve the alignment attribute, if it exists.
LLVM_ABI AttrBuilder & addDereferenceableAttr(uint64_t Bytes)
This turns the number of dereferenceable bytes into the form used internally in Attribute.
uint64_t getDereferenceableOrNullBytes() const
Retrieve the number of dereferenceable_or_null bytes, if the dereferenceable_or_null attribute exists...
LLVM_ABI AttrBuilder & removeAttribute(Attribute::AttrKind Val)
Remove an attribute from the builder.
LLVM_ABI AttrBuilder & addDereferenceableOrNullAttr(uint64_t Bytes)
This turns the number of dereferenceable_or_null bytes into the form used internally in Attribute.
LLVM_ABI AttrBuilder & addRangeAttr(const ConstantRange &CR)
Add range attribute.
AttributeList addRetAttributes(LLVMContext &C, const AttrBuilder &B) const
Add a return value attribute to the list.
static LLVM_ABI AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
LLVM_ABI AttributeSet getParamAttrs(unsigned ArgNo) const
The attributes for the argument or parameter at the given index are returned.
LLVM_ABI AttributeSet removeAttribute(LLVMContext &C, Attribute::AttrKind Kind) const
Remove the specified attribute from this set.
static LLVM_ABI AttributeSet get(LLVMContext &C, const AttrBuilder &B)
LLVM_ABI const ConstantRange & getRange() const
Returns the value of the range attribute.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
bool isValid() const
Return true if the attribute is any kind of attribute.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI SymbolTableList< BasicBlock >::iterator eraseFromParent()
Unlink 'this' from the containing function and delete it.
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
void splice(BasicBlock::iterator ToIt, BasicBlock *FromBB)
Transfer all instructions from FromBB to this basic block at ToIt.
LLVM_ABI void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)
Update PHI nodes in this BasicBlock before removal of predecessor Pred.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
LLVM_ABI void setBlockFreq(const BasicBlock *BB, BlockFrequency Freq)
LLVM_ABI void setBlockFreqAndScale(const BasicBlock *ReferenceBB, BlockFrequency Freq, SmallPtrSetImpl< BasicBlock * > &BlocksToScale)
Set the frequency of ReferenceBB to Freq and scale the frequencies of the blocks in BlocksToScale suc...
LLVM_ABI BlockFrequency getBlockFreq(const BasicBlock *BB) const
getblockFreq - Return block frequency.
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
void setCallingConv(CallingConv::ID CC)
MaybeAlign getRetAlign() const
Extract the alignment of the return value.
LLVM_ABI void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
void removeRetAttrs(const AttributeMask &AttrsToRemove)
Removes the attributes from the return value.
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
static LLVM_ABI CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
MaybeAlign getParamAlign(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
AttributeSet getRetAttributes() const
Return the return attributes for this call.
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
Value * getCalledOperand() const
void setAttributes(AttributeList A)
Set the attributes for this call.
LLVM_ABI std::optional< ConstantRange > getRange() const
If this return value has a range attribute, return the value range of the argument.
bool doesNotThrow() const
Determine if the call cannot unwind.
Value * getArgOperand(unsigned i) const
uint64_t getRetDereferenceableBytes() const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
bool isConvergent() const
Determine if the invoke is convergent.
FunctionType * getFunctionType() const
static LLVM_ABI CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
uint64_t getRetDereferenceableOrNullBytes() const
Extract the number of dereferenceable_or_null bytes for a call (0=unknown).
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
This class represents a function call, abstracting a target machine's calling convention.
void setTailCallKind(TailCallKind TCK)
TailCallKind getTailCallKind() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
bool isMustTailCall() const
static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, InsertPosition InsertBefore=nullptr)
This class represents a range of values.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
This is an important base class in LLVM.
const Constant * stripPointerCasts() const
static LLVM_ABI InstrProfIncrementInst * getBBInstrumentation(BasicBlock &BB)
Get the instruction instrumenting a BB, or nullptr if not present.
static LLVM_ABI InstrProfCallsite * getCallsiteInstrumentation(CallBase &CB)
Get the instruction instrumenting a callsite, or nullptr if that cannot be found.
const DILocation * getWithoutAtom() const
uint64_t getAtomGroup() const
uint8_t getAtomRank() const
Subprogram description. Uses SubclassData1.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Base class for non-instruction debug metadata records that have positions within IR.
Record of a variable value-assignment, aka a non instruction representation of the dbg....
static DebugLoc getCompilerGenerated()
LLVM_ABI unsigned getLine() const
LLVM_ABI DILocation * get() const
Get the underlying DILocation.
LLVM_ABI MDNode * getScope() const
static LLVM_ABI DebugLoc appendInlinedAt(const DebugLoc &DL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &Cache)
Rebuild the entire inlined-at chain for this instruction so that the top of the chain now is inlined-...
static DebugLoc getTemporary()
LLVM_ABI unsigned getCol() const
LLVM_ABI bool isImplicitCode() const
Check if the DebugLoc corresponds to an implicit code.
static DebugLoc getUnknown()
iterator find(const_arg_type_t< KeyT > Val)
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Implements a dense probed hash-table based set.
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Class to represent profile counts.
uint64_t getCount() const
const BasicBlock & getEntryBlock() const
BasicBlockListType::iterator iterator
FunctionType * getFunctionType() const
Returns the FunctionType for me.
const BasicBlock & front() const
iterator_range< arg_iterator > args()
DISubprogram * getSubprogram() const
Get the attached subprogram.
bool hasGC() const
hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm to use during code generatio...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasPersonalityFn() const
Check whether this function has a personality function.
Constant * getPersonalityFn() const
Get the personality function associated with this function.
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
MaybeAlign getParamAlign(unsigned ArgNo) const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
const std::string & getGC() const
std::optional< ProfileCount > getEntryCount(bool AllowSynthetic=false) const
Get the entry count for this function.
Type * getReturnType() const
Returns the type of the ret val.
void setCallingConv(CallingConv::ID CC)
bool onlyReadsMemory() const
Determine if the function does not access or only reads memory.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
CallInst * CreateStackSave(const Twine &Name="")
Create a call to llvm.stacksave.
LLVM_ABI CallInst * CreateLifetimeStart(Value *Ptr)
Create a lifetime.start intrinsic.
LLVM_ABI CallInst * CreateLifetimeEnd(Value *Ptr)
Create a lifetime.end intrinsic.
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memcpy between the specified pointers.
LLVM_ABI CallInst * CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, unsigned Alignment, Value *OffsetValue=nullptr)
Create an assume intrinsic call that represents an alignment assumption on the provided pointer.
ReturnInst * CreateRet(Value *V)
Create a 'ret <val>' instruction.
ConstantInt * getInt64(uint64_t C)
Get a constant 64-bit value.
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
ReturnInst * CreateRetVoid()
Create a 'ret void' instruction.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
CallInst * CreateStackRestore(Value *Ptr, const Twine &Name="")
Create a call to llvm.stackrestore.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
LLVM_ABI Instruction * CreateNoAliasScopeDeclaration(Value *Scope)
Create a llvm.experimental.noalias.scope.decl intrinsic call.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
This class captures the data input to the InlineFunction call, and records the auxiliary results prod...
Value * ConvergenceControlToken
bool UpdateProfile
Update profile for callee as well as cloned version.
Instruction * CallSiteEHPad
function_ref< AssumptionCache &(Function &)> GetAssumptionCache
If non-null, InlineFunction will update the callgraph to reflect the changes it makes.
BlockFrequencyInfo * CalleeBFI
SmallVector< AllocaInst *, 4 > StaticAllocas
InlineFunction fills this in with all static allocas that get copied into the caller.
BlockFrequencyInfo * CallerBFI
SmallVector< CallBase *, 8 > InlinedCallSites
All of the new call sites inlined into the caller.
InlineResult is basically true or false.
static InlineResult success()
static InlineResult failure(const char *Reason)
This represents the llvm.instrprof.callsite intrinsic.
This represents the llvm.instrprof.increment intrinsic.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
LLVM_ABI void insertBefore(InstListType::iterator InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified position.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
A wrapper class for inspecting calls to intrinsic functions.
static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
This is an important class for using LLVM in a threaded context.
@ OB_clang_arc_attachedcall
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
An instruction for reading from memory.
MDNode * createAnonymousAliasScope(MDNode *Domain, StringRef Name=StringRef())
Return metadata appropriate for an alias scope root node.
MDNode * createAnonymousAliasScopeDomain(StringRef Name=StringRef())
Return metadata appropriate for an alias scope domain node.
void replaceAllUsesWith(Metadata *MD)
RAUW a temporary.
static LLVM_ABI MDNode * concatenate(MDNode *A, MDNode *B)
Methods for metadata merging.
ArrayRef< MDOperand > operands() const
op_iterator op_end() const
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
unsigned getNumOperands() const
Return number of MDNode operands.
op_iterator op_begin() const
LLVMContext & getContext() const
static TempMDTuple getTemporary(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Return a temporary node.
bool onlyAccessesInaccessibleMem() const
Whether this function only (at most) accesses inaccessible memory.
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
A Module instance is used to store all the information related to an LLVM module.
A container for an operand bundle being viewed as a set of values rather than a set of uses.
The instrumented contextual profile, produced by the CtxProfAnalysis.
LLVM_ABI bool isInSpecializedModule() const
LLVM_ABI void update(Visitor, const Function &F)
uint32_t getNumCounters(const Function &F) const
uint32_t allocateNextCounterIndex(const Function &F)
uint32_t getNumCallsites(const Function &F) const
uint32_t allocateNextCallsiteIndex(const Function &F)
A node (context) in the loaded contextual profile, suitable for mutation during IPO passes.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Analysis providing profile information.
LLVM_ABI std::optional< uint64_t > getProfileCount(const CallBase &CallInst, BlockFrequencyInfo *BFI, bool AllowSynthetic=false) const
Returns the profile count for CallInst.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
A vector that has set insertion semantics.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool isVoidTy() const
Return true if this is 'void'.
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
std::pair< iterator, bool > insert(const ValueT &V)
const ParentTy * getParent() const
self_iterator getIterator()
Class to build a trie of call stack contexts for a particular profiled allocation call,...
Helper class to iterate through stack ids in both metadata (memprof MIB and callsite) and the corresp...
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI AttributeMask typeIncompatible(Type *Ty, AttributeSet AS, AttributeSafetyKind ASK=ASK_ALL)
Which attributes cannot be applied to a type.
LLVM_ABI void mergeAttributesForInlining(Function &Caller, const Function &Callee)
Merge caller's and callee's attributes.
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
bool match(Val *V, const Pattern &P)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
LLVM_ABI void trackAssignments(Function::iterator Start, Function::iterator End, const StorageToVarsMap &Vars, const DataLayout &DL, bool DebugPrints=false)
Track assignments to Vars between Start and End.
LLVM_ABI void remapAssignID(DenseMap< DIAssignID *, DIAssignID * > &Map, Instruction &I)
Replace DIAssignID uses and attachments with IDs from Map.
SmallVector< DbgVariableRecord * > getDVRAssignmentMarkers(const Instruction *Inst)
Return a range of dbg_assign records for which Inst performs the assignment they encode.
initializer< Ty > init(const Ty &Val)
LLVM_ABI MDNode * getMIBStackNode(const MDNode *MIB)
Returns the stack node from an MIB metadata node.
ARCInstKind getAttachedARCFunctionKind(const CallBase *CB)
This function returns the ARCInstKind of the function attached to operand bundle clang_arc_attachedca...
ARCInstKind
Equivalence classes of instructions in the ARC Model.
std::optional< Function * > getAttachedARCFunction(const CallBase *CB)
This function returns operand bundle clang_arc_attachedcall's argument, which is the address of the A...
bool isRetainOrClaimRV(ARCInstKind Kind)
Check whether the function is retainRV/unsafeClaimRV.
const Value * GetRCIdentityRoot(const Value *V)
The RCIdentity root of a value V is a dominating value U for which retaining or releasing U is equiva...
bool hasAttachedCallOpBundle(const CallBase *CB)
This is an optimization pass for GlobalISel generic memory operations.
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
LLVM_ABI BasicBlock * changeToInvokeAndSplitBasicBlock(CallInst *CI, BasicBlock *UnwindEdge, DomTreeUpdater *DTU=nullptr)
Convert the CallInst to InvokeInst with the specified unwind edge basic block.
LLVM_ABI InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, bool MergeAttributes=false, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr, OptimizationRemarkEmitter *ORE=nullptr)
This function inlines the called function into the basic block of the caller.
auto successors(const MachineBasicBlock *BB)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
LLVM_ABI bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures, const Instruction *I, const DominatorTree *DT, bool IncludeI=false, unsigned MaxUsesToExplore=0, const LoopInfo *LI=nullptr)
PointerMayBeCapturedBefore - Return true if this pointer value may be captured by the enclosing funct...
LLVM_ABI InlineResult CanInlineCallSite(const CallBase &CB, InlineFunctionInfo &IFI)
Check if it is legal to perform inlining of the function called by CB into the caller at this particu...
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
LLVM_ABI Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, bool ModuleLevelChanges, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr)
This works exactly like CloneFunctionInto, except that it does some simple constant prop and DCE on t...
LLVM_ABI void InlineFunctionImpl(CallBase &CB, InlineFunctionInfo &IFI, bool MergeAttributes=false, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr, OptimizationRemarkEmitter *ORE=nullptr)
This should generally not be used, use InlineFunction instead.
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
LLVM_ABI unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_ABI bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
LLVM_ABI void updateProfileCallee(Function *Callee, int64_t EntryDelta, const ValueMap< const Value *, WeakTrackingVH > *VMap=nullptr)
Updates profile information by adjusting the entry count by adding EntryDelta then scaling callsite i...
LLVM_ABI bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
LLVM_ABI MDNode * uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2)
Compute the union of two access-group lists.
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
LLVM_ABI bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNotCapturedBefore.
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
bool capturesAnything(CaptureComponents CC)
bool pred_empty(const BasicBlock *BB)
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=MaxLookupSearchDepth)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
LLVM_ABI void updateLoopMetadataDebugLocations(Instruction &I, function_ref< Metadata *(Metadata *)> Updater)
Update the debug locations contained within the MD_loop metadata attached to the instruction I,...
LLVM_ABI bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
LLVM_ABI void scaleProfData(Instruction &I, uint64_t S, uint64_t T)
Scaling the profile data attached to 'I' using the ratio of S/T.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
This struct can be used to capture information about code being cloned, while it is being cloned.
bool ContainsDynamicAllocas
This is set to true if the cloned code contains a 'dynamic' alloca.
bool isSimplified(const Value *From, const Value *To) const
bool ContainsCalls
This is set to true if the cloned code contains a normal call instruction.
bool ContainsMemProfMetadata
This is set to true if there is memprof related metadata (memprof or callsite metadata) in the cloned...
std::vector< WeakTrackingVH > OperandBundleCallSites
All cloned call sites that have operand bundles attached are appended to this vector.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
static Instruction * tryGetVTableInstruction(CallBase *CB)
Helper struct for trackAssignments, below.