LLVM 22.0.0git
MemoryProfileInfo.cpp
Go to the documentation of this file.
1//===-- MemoryProfileInfo.cpp - memory profile info ------------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains utilities to analyze memory profile information.
10//
11//===----------------------------------------------------------------------===//
12
15#include "llvm/IR/Constants.h"
18#include "llvm/Support/Format.h"
19
20using namespace llvm;
21using namespace llvm::memprof;
22
23#define DEBUG_TYPE "memory-profile-info"
24
26 "memprof-report-hinted-sizes", cl::init(false), cl::Hidden,
27 cl::desc("Report total allocation sizes of hinted allocations"));
28
29// This is useful if we have enabled reporting of hinted sizes, and want to get
30// information from the indexing step for all contexts (especially for testing),
31// or have specified a value less than 100% for -memprof-cloning-cold-threshold.
33 "memprof-keep-all-not-cold-contexts", cl::init(false), cl::Hidden,
34 cl::desc("Keep all non-cold contexts (increases cloning overheads)"));
35
37 "memprof-cloning-cold-threshold", cl::init(100), cl::Hidden,
38 cl::desc("Min percent of cold bytes to hint alloc cold during cloning"));
39
40// Discard non-cold contexts if they overlap with much larger cold contexts,
41// specifically, if all contexts reaching a given callsite are at least this
42// percent cold byte allocations. This reduces the amount of cloning required
43// to expose the cold contexts when they greatly dominate non-cold contexts.
45 "memprof-callsite-cold-threshold", cl::init(100), cl::Hidden,
46 cl::desc("Min percent of cold bytes at a callsite to discard non-cold "
47 "contexts"));
48
49// Enable saving context size information for largest cold contexts, which can
50// be used to flag contexts for more aggressive cloning and reporting.
52 "memprof-min-percent-max-cold-size", cl::init(100), cl::Hidden,
53 cl::desc("Min percent of max cold bytes for critical cold context"));
54
57}
58
61}
62
66}
67
69 LLVMContext &Ctx) {
71 StackVals.reserve(CallStack.size());
72 for (auto Id : CallStack) {
73 auto *StackValMD =
74 ValueAsMetadata::get(ConstantInt::get(Type::getInt64Ty(Ctx), Id));
75 StackVals.push_back(StackValMD);
76 }
77 return MDNode::get(Ctx, StackVals);
78}
79
81 assert(MIB->getNumOperands() >= 2);
82 // The stack metadata is the first operand of each memprof MIB metadata.
83 return cast<MDNode>(MIB->getOperand(0));
84}
85
87 assert(MIB->getNumOperands() >= 2);
88 // The allocation type is currently the second operand of each memprof
89 // MIB metadata. This will need to change as we add additional allocation
90 // types that can be applied based on the allocation profile data.
91 auto *MDS = dyn_cast<MDString>(MIB->getOperand(1));
92 assert(MDS);
93 if (MDS->getString() == "cold") {
94 return AllocationType::Cold;
95 } else if (MDS->getString() == "hot") {
96 return AllocationType::Hot;
97 }
98 return AllocationType::NotCold;
99}
100
102 switch (Type) {
103 case AllocationType::NotCold:
104 return "notcold";
105 break;
106 case AllocationType::Cold:
107 return "cold";
108 break;
109 case AllocationType::Hot:
110 return "hot";
111 break;
112 default:
113 assert(false && "Unexpected alloc type");
114 }
115 llvm_unreachable("invalid alloc type");
116}
117
119 const unsigned NumAllocTypes = llvm::popcount(AllocTypes);
120 assert(NumAllocTypes != 0);
121 return NumAllocTypes == 1;
122}
123
126 std::vector<ContextTotalSize> ContextSizeInfo) {
127 bool First = true;
128 CallStackTrieNode *Curr = nullptr;
129 for (auto StackId : StackIds) {
130 // If this is the first stack frame, add or update alloc node.
131 if (First) {
132 First = false;
133 if (Alloc) {
134 assert(AllocStackId == StackId);
135 Alloc->addAllocType(AllocType);
136 } else {
137 AllocStackId = StackId;
138 Alloc = new CallStackTrieNode(AllocType);
139 }
140 Curr = Alloc;
141 continue;
142 }
143 // Update existing caller node if it exists.
144 auto [Next, Inserted] = Curr->Callers.try_emplace(StackId);
145 if (!Inserted) {
146 Curr = Next->second;
147 Curr->addAllocType(AllocType);
148 continue;
149 }
150 // Otherwise add a new caller node.
151 auto *New = new CallStackTrieNode(AllocType);
152 Next->second = New;
153 Curr = New;
154 }
155 assert(Curr);
156 llvm::append_range(Curr->ContextSizeInfo, ContextSizeInfo);
157}
158
160 // Note that we are building this from existing MD_memprof metadata.
161 BuiltFromExistingMetadata = true;
162 MDNode *StackMD = getMIBStackNode(MIB);
163 assert(StackMD);
164 std::vector<uint64_t> CallStack;
165 CallStack.reserve(StackMD->getNumOperands());
166 for (const auto &MIBStackIter : StackMD->operands()) {
167 auto *StackId = mdconst::dyn_extract<ConstantInt>(MIBStackIter);
168 assert(StackId);
169 CallStack.push_back(StackId->getZExtValue());
170 }
171 std::vector<ContextTotalSize> ContextSizeInfo;
172 // Collect the context size information if it exists.
173 if (MIB->getNumOperands() > 2) {
174 for (unsigned I = 2; I < MIB->getNumOperands(); I++) {
175 MDNode *ContextSizePair = dyn_cast<MDNode>(MIB->getOperand(I));
176 assert(ContextSizePair->getNumOperands() == 2);
177 uint64_t FullStackId =
178 mdconst::dyn_extract<ConstantInt>(ContextSizePair->getOperand(0))
179 ->getZExtValue();
180 uint64_t TotalSize =
181 mdconst::dyn_extract<ConstantInt>(ContextSizePair->getOperand(1))
182 ->getZExtValue();
183 ContextSizeInfo.push_back({FullStackId, TotalSize});
184 }
185 }
186 addCallStack(getMIBAllocType(MIB), CallStack, std::move(ContextSizeInfo));
187}
188
191 ArrayRef<ContextTotalSize> ContextSizeInfo,
192 const uint64_t MaxColdSize,
193 bool BuiltFromExistingMetadata,
194 uint64_t &TotalBytes, uint64_t &ColdBytes) {
195 SmallVector<Metadata *> MIBPayload(
196 {buildCallstackMetadata(MIBCallStack, Ctx)});
197 MIBPayload.push_back(
199
200 if (ContextSizeInfo.empty()) {
201 // The profile matcher should have provided context size info if there was a
202 // MinCallsiteColdBytePercent < 100. Here we check >=100 to gracefully
203 // handle a user-provided percent larger than 100. However, we may not have
204 // this information if we built the Trie from existing MD_memprof metadata.
205 assert(BuiltFromExistingMetadata || MinCallsiteColdBytePercent >= 100);
206 return MDNode::get(Ctx, MIBPayload);
207 }
208
209 for (const auto &[FullStackId, TotalSize] : ContextSizeInfo) {
210 TotalBytes += TotalSize;
211 bool LargeColdContext = false;
213 ColdBytes += TotalSize;
214 // If we have the max cold context size from summary information and have
215 // requested identification of contexts above a percentage of the max, see
216 // if this context qualifies.
217 if (MaxColdSize > 0 && MinPercentMaxColdSize < 100 &&
218 TotalSize * 100 >= MaxColdSize * MinPercentMaxColdSize)
219 LargeColdContext = true;
220 }
221 // Only add the context size info as metadata if we need it in the thin
222 // link (currently if reporting of hinted sizes is enabled, we have
223 // specified a threshold for marking allocations cold after cloning, or we
224 // have identified this as a large cold context of interest above).
225 if (metadataIncludesAllContextSizeInfo() || LargeColdContext) {
226 auto *FullStackIdMD = ValueAsMetadata::get(
227 ConstantInt::get(Type::getInt64Ty(Ctx), FullStackId));
228 auto *TotalSizeMD = ValueAsMetadata::get(
229 ConstantInt::get(Type::getInt64Ty(Ctx), TotalSize));
230 auto *ContextSizeMD = MDNode::get(Ctx, {FullStackIdMD, TotalSizeMD});
231 MIBPayload.push_back(ContextSizeMD);
232 }
233 }
234 assert(TotalBytes > 0);
235 return MDNode::get(Ctx, MIBPayload);
236}
237
238void CallStackTrie::collectContextSizeInfo(
239 CallStackTrieNode *Node, std::vector<ContextTotalSize> &ContextSizeInfo) {
240 llvm::append_range(ContextSizeInfo, Node->ContextSizeInfo);
241 for (auto &Caller : Node->Callers)
242 collectContextSizeInfo(Caller.second, ContextSizeInfo);
243}
244
245void CallStackTrie::convertHotToNotCold(CallStackTrieNode *Node) {
246 if (Node->hasAllocType(AllocationType::Hot)) {
247 Node->removeAllocType(AllocationType::Hot);
248 Node->addAllocType(AllocationType::NotCold);
249 }
250 for (auto &Caller : Node->Callers)
251 convertHotToNotCold(Caller.second);
252}
253
254// Copy over some or all of NewMIBNodes to the SavedMIBNodes vector, depending
255// on options that enable filtering out some NotCold contexts.
256static void saveFilteredNewMIBNodes(std::vector<Metadata *> &NewMIBNodes,
257 std::vector<Metadata *> &SavedMIBNodes,
258 unsigned CallerContextLength,
259 uint64_t TotalBytes, uint64_t ColdBytes,
260 bool BuiltFromExistingMetadata) {
261 const bool MostlyCold =
262 // If we have built the Trie from existing MD_memprof metadata, we may or
263 // may not have context size information (in which case ColdBytes and
264 // TotalBytes are 0, which is not also guarded against below). Even if we
265 // do have some context size information from the the metadata, we have
266 // already gone through a round of discarding of small non-cold contexts
267 // during matching, and it would be overly aggressive to do it again, and
268 // we also want to maintain the same behavior with and without reporting
269 // of hinted bytes enabled.
270 !BuiltFromExistingMetadata && MinCallsiteColdBytePercent < 100 &&
271 ColdBytes > 0 &&
272 ColdBytes * 100 >= MinCallsiteColdBytePercent * TotalBytes;
273
274 // In the simplest case, with pruning disabled, keep all the new MIB nodes.
275 if (MemProfKeepAllNotColdContexts && !MostlyCold) {
276 append_range(SavedMIBNodes, NewMIBNodes);
277 return;
278 }
279
280 auto EmitMessageForRemovedContexts = [](const MDNode *MIBMD, StringRef Tag,
281 StringRef Extra) {
282 assert(MIBMD->getNumOperands() > 2);
283 for (unsigned I = 2; I < MIBMD->getNumOperands(); I++) {
284 MDNode *ContextSizePair = dyn_cast<MDNode>(MIBMD->getOperand(I));
285 assert(ContextSizePair->getNumOperands() == 2);
286 uint64_t FullStackId =
287 mdconst::dyn_extract<ConstantInt>(ContextSizePair->getOperand(0))
288 ->getZExtValue();
289 uint64_t TS =
290 mdconst::dyn_extract<ConstantInt>(ContextSizePair->getOperand(1))
291 ->getZExtValue();
292 errs() << "MemProf hinting: Total size for " << Tag
293 << " non-cold full allocation context hash " << FullStackId
294 << Extra << ": " << TS << "\n";
295 }
296 };
297
298 // If the cold bytes at the current callsite exceed the given threshold, we
299 // discard all non-cold contexts so do not need any of the later pruning
300 // handling. We can simply copy over all the cold contexts and return early.
301 if (MostlyCold) {
302 auto NewColdMIBNodes =
303 make_filter_range(NewMIBNodes, [&](const Metadata *M) {
304 auto MIBMD = cast<MDNode>(M);
305 // Only append cold contexts.
307 return true;
309 const float PercentCold = ColdBytes * 100.0 / TotalBytes;
310 std::string PercentStr;
311 llvm::raw_string_ostream OS(PercentStr);
312 OS << format(" for %5.2f%% cold bytes", PercentCold);
313 EmitMessageForRemovedContexts(MIBMD, "discarded", OS.str());
314 }
315 return false;
316 });
317 for (auto *M : NewColdMIBNodes)
318 SavedMIBNodes.push_back(M);
319 return;
320 }
321
322 // Prune unneeded NotCold contexts, taking advantage of the fact
323 // that we later will only clone Cold contexts, as NotCold is the allocation
324 // default. We only need to keep as metadata the NotCold contexts that
325 // overlap the longest with Cold allocations, so that we know how deeply we
326 // need to clone. For example, assume we add the following contexts to the
327 // trie:
328 // 1 3 (notcold)
329 // 1 2 4 (cold)
330 // 1 2 5 (notcold)
331 // 1 2 6 (notcold)
332 // the trie looks like:
333 // 1
334 // / \
335 // 2 3
336 // /|\
337 // 4 5 6
338 //
339 // It is sufficient to prune all but one not-cold contexts (either 1,2,5 or
340 // 1,2,6, we arbitrarily keep the first one we encounter which will be
341 // 1,2,5).
342 //
343 // To do this pruning, we first check if there were any not-cold
344 // contexts kept for a deeper caller, which will have a context length larger
345 // than the CallerContextLength being handled here (i.e. kept by a deeper
346 // recursion step). If so, none of the not-cold MIB nodes added for the
347 // immediate callers need to be kept. If not, we keep the first (created
348 // for the immediate caller) not-cold MIB node.
349 bool LongerNotColdContextKept = false;
350 for (auto *MIB : NewMIBNodes) {
351 auto MIBMD = cast<MDNode>(MIB);
353 continue;
354 MDNode *StackMD = getMIBStackNode(MIBMD);
355 assert(StackMD);
356 if (StackMD->getNumOperands() > CallerContextLength) {
357 LongerNotColdContextKept = true;
358 break;
359 }
360 }
361 // Don't need to emit any for the immediate caller if we already have
362 // longer overlapping contexts;
363 bool KeepFirstNewNotCold = !LongerNotColdContextKept;
364 auto NewColdMIBNodes = make_filter_range(NewMIBNodes, [&](const Metadata *M) {
365 auto MIBMD = cast<MDNode>(M);
366 // Only keep cold contexts and first (longest non-cold context).
368 MDNode *StackMD = getMIBStackNode(MIBMD);
369 assert(StackMD);
370 // Keep any already kept for longer contexts.
371 if (StackMD->getNumOperands() > CallerContextLength)
372 return true;
373 // Otherwise keep the first one added by the immediate caller if there
374 // were no longer contexts.
375 if (KeepFirstNewNotCold) {
376 KeepFirstNewNotCold = false;
377 return true;
378 }
380 EmitMessageForRemovedContexts(MIBMD, "pruned", "");
381 return false;
382 }
383 return true;
384 });
385 for (auto *M : NewColdMIBNodes)
386 SavedMIBNodes.push_back(M);
387}
388
389// Recursive helper to trim contexts and create metadata nodes.
390// Caller should have pushed Node's loc to MIBCallStack. Doing this in the
391// caller makes it simpler to handle the many early returns in this method.
392// Updates the total and cold profiled bytes in the subtrie rooted at this node.
393bool CallStackTrie::buildMIBNodes(CallStackTrieNode *Node, LLVMContext &Ctx,
394 std::vector<uint64_t> &MIBCallStack,
395 std::vector<Metadata *> &MIBNodes,
396 bool CalleeHasAmbiguousCallerContext,
397 uint64_t &TotalBytes, uint64_t &ColdBytes) {
398 // Trim context below the first node in a prefix with a single alloc type.
399 // Add an MIB record for the current call stack prefix.
400 if (hasSingleAllocType(Node->AllocTypes)) {
401 std::vector<ContextTotalSize> ContextSizeInfo;
402 collectContextSizeInfo(Node, ContextSizeInfo);
403 MIBNodes.push_back(createMIBNode(
404 Ctx, MIBCallStack, (AllocationType)Node->AllocTypes, ContextSizeInfo,
405 MaxColdSize, BuiltFromExistingMetadata, TotalBytes, ColdBytes));
406 return true;
407 }
408
409 // We don't have a single allocation for all the contexts sharing this prefix,
410 // so recursively descend into callers in trie.
411 if (!Node->Callers.empty()) {
412 bool NodeHasAmbiguousCallerContext = Node->Callers.size() > 1;
413 bool AddedMIBNodesForAllCallerContexts = true;
414 // Accumulate all new MIB nodes by the recursive calls below into a vector
415 // that will later be filtered before adding to the caller's MIBNodes
416 // vector.
417 std::vector<Metadata *> NewMIBNodes;
418 // Determine the total and cold byte counts for all callers, then add to the
419 // caller's counts further below.
420 uint64_t CallerTotalBytes = 0;
421 uint64_t CallerColdBytes = 0;
422 for (auto &Caller : Node->Callers) {
423 MIBCallStack.push_back(Caller.first);
424 AddedMIBNodesForAllCallerContexts &= buildMIBNodes(
425 Caller.second, Ctx, MIBCallStack, NewMIBNodes,
426 NodeHasAmbiguousCallerContext, CallerTotalBytes, CallerColdBytes);
427 // Remove Caller.
428 MIBCallStack.pop_back();
429 }
430 // Pass in the stack length of the MIB nodes added for the immediate caller,
431 // which is the current stack length plus 1.
432 saveFilteredNewMIBNodes(NewMIBNodes, MIBNodes, MIBCallStack.size() + 1,
433 CallerTotalBytes, CallerColdBytes,
434 BuiltFromExistingMetadata);
435 TotalBytes += CallerTotalBytes;
436 ColdBytes += CallerColdBytes;
437
438 if (AddedMIBNodesForAllCallerContexts)
439 return true;
440 // We expect that the callers should be forced to add MIBs to disambiguate
441 // the context in this case (see below).
442 assert(!NodeHasAmbiguousCallerContext);
443 }
444
445 // If we reached here, then this node does not have a single allocation type,
446 // and we didn't add metadata for a longer call stack prefix including any of
447 // Node's callers. That means we never hit a single allocation type along all
448 // call stacks with this prefix. This can happen due to recursion collapsing
449 // or the stack being deeper than tracked by the profiler runtime, leading to
450 // contexts with different allocation types being merged. In that case, we
451 // trim the context just below the deepest context split, which is this
452 // node if the callee has an ambiguous caller context (multiple callers),
453 // since the recursive calls above returned false. Conservatively give it
454 // non-cold allocation type.
455 if (!CalleeHasAmbiguousCallerContext)
456 return false;
457 std::vector<ContextTotalSize> ContextSizeInfo;
458 collectContextSizeInfo(Node, ContextSizeInfo);
459 MIBNodes.push_back(createMIBNode(
460 Ctx, MIBCallStack, AllocationType::NotCold, ContextSizeInfo, MaxColdSize,
461 BuiltFromExistingMetadata, TotalBytes, ColdBytes));
462 return true;
463}
464
466 StringRef Descriptor) {
467 auto AllocTypeString = getAllocTypeAttributeString(AT);
468 auto A = llvm::Attribute::get(CI->getContext(), "memprof", AllocTypeString);
469 CI->addFnAttr(A);
471 std::vector<ContextTotalSize> ContextSizeInfo;
472 collectContextSizeInfo(Alloc, ContextSizeInfo);
473 for (const auto &[FullStackId, TotalSize] : ContextSizeInfo) {
474 errs() << "MemProf hinting: Total size for full allocation context hash "
475 << FullStackId << " and " << Descriptor << " alloc type "
476 << getAllocTypeAttributeString(AT) << ": " << TotalSize << "\n";
477 }
478 }
479 if (ORE)
480 ORE->emit(OptimizationRemark(DEBUG_TYPE, "MemprofAttribute", CI)
481 << ore::NV("AllocationCall", CI) << " in function "
482 << ore::NV("Caller", CI->getFunction())
483 << " marked with memprof allocation attribute "
484 << ore::NV("Attribute", AllocTypeString));
485}
486
487// Build and attach the minimal necessary MIB metadata. If the alloc has a
488// single allocation type, add a function attribute instead. Returns true if
489// memprof metadata attached, false if not (attribute added).
491 if (hasSingleAllocType(Alloc->AllocTypes)) {
492 addSingleAllocTypeAttribute(CI, (AllocationType)Alloc->AllocTypes,
493 "single");
494 return false;
495 }
496 // If there were any hot allocation contexts, the Alloc trie node would have
497 // the Hot type set. If so, because we don't currently support cloning for hot
498 // contexts, they should be converted to NotCold. This happens in the cloning
499 // support anyway, however, doing this now enables more aggressive context
500 // trimming when building the MIB metadata (and possibly may make the
501 // allocation have a single NotCold allocation type), greatly reducing
502 // overheads in bitcode, cloning memory and cloning time.
503 if (Alloc->hasAllocType(AllocationType::Hot)) {
504 convertHotToNotCold(Alloc);
505 // Check whether we now have a single alloc type.
506 if (hasSingleAllocType(Alloc->AllocTypes)) {
507 addSingleAllocTypeAttribute(CI, (AllocationType)Alloc->AllocTypes,
508 "single");
509 return false;
510 }
511 }
512 auto &Ctx = CI->getContext();
513 std::vector<uint64_t> MIBCallStack;
514 MIBCallStack.push_back(AllocStackId);
515 std::vector<Metadata *> MIBNodes;
516 uint64_t TotalBytes = 0;
517 uint64_t ColdBytes = 0;
518 assert(!Alloc->Callers.empty() && "addCallStack has not been called yet");
519 // The CalleeHasAmbiguousCallerContext flag is meant to say whether the
520 // callee of the given node has more than one caller. Here the node being
521 // passed in is the alloc and it has no callees. So it's false.
522 if (buildMIBNodes(Alloc, Ctx, MIBCallStack, MIBNodes,
523 /*CalleeHasAmbiguousCallerContext=*/false, TotalBytes,
524 ColdBytes)) {
525 assert(MIBCallStack.size() == 1 &&
526 "Should only be left with Alloc's location in stack");
527 CI->setMetadata(LLVMContext::MD_memprof, MDNode::get(Ctx, MIBNodes));
528 return true;
529 }
530 // If there exists corner case that CallStackTrie has one chain to leaf
531 // and all node in the chain have multi alloc type, conservatively give
532 // it non-cold allocation type.
533 // FIXME: Avoid this case before memory profile created. Alternatively, select
534 // hint based on fraction cold.
536 return false;
537}
538
539template <>
541 const MDNode *N, bool End)
542 : N(N) {
543 if (!N)
544 return;
545 Iter = End ? N->op_end() : N->op_begin();
546}
547
548template <>
551 assert(Iter != N->op_end());
552 ConstantInt *StackIdCInt = mdconst::dyn_extract<ConstantInt>(*Iter);
553 assert(StackIdCInt);
554 return StackIdCInt->getZExtValue();
555}
556
558 assert(N);
559 return mdconst::dyn_extract<ConstantInt>(N->operands().back())
560 ->getZExtValue();
561}
562
564 // TODO: Support more sophisticated merging, such as selecting the one with
565 // more bytes allocated, or implement support for carrying multiple allocation
566 // leaf contexts. For now, keep the first one.
567 if (A)
568 return A;
569 return B;
570}
571
573 // TODO: Support more sophisticated merging, which will require support for
574 // carrying multiple contexts. For now, keep the first one.
575 if (A)
576 return A;
577 return B;
578}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
#define LLVM_ABI
Definition: Compiler.h:213
This file contains the declarations for the subclasses of Constant, which represent the different fla...
bool End
Definition: ELF_riscv.cpp:480
#define DEBUG_TYPE
#define I(x, y, z)
Definition: MD5.cpp:58
AllocType
LLVM_ABI cl::opt< bool > MemProfKeepAllNotColdContexts("memprof-keep-all-not-cold-contexts", cl::init(false), cl::Hidden, cl::desc("Keep all non-cold contexts (increases cloning overheads)"))
cl::opt< unsigned > MinPercentMaxColdSize("memprof-min-percent-max-cold-size", cl::init(100), cl::Hidden, cl::desc("Min percent of max cold bytes for critical cold context"))
cl::opt< bool > MemProfReportHintedSizes("memprof-report-hinted-sizes", cl::init(false), cl::Hidden, cl::desc("Report total allocation sizes of hinted allocations"))
cl::opt< unsigned > MinCallsiteColdBytePercent("memprof-callsite-cold-threshold", cl::init(100), cl::Hidden, cl::desc("Min percent of cold bytes at a callsite to discard non-cold " "contexts"))
static MDNode * createMIBNode(LLVMContext &Ctx, ArrayRef< uint64_t > MIBCallStack, AllocationType AllocType, ArrayRef< ContextTotalSize > ContextSizeInfo, const uint64_t MaxColdSize, bool BuiltFromExistingMetadata, uint64_t &TotalBytes, uint64_t &ColdBytes)
static void saveFilteredNewMIBNodes(std::vector< Metadata * > &NewMIBNodes, std::vector< Metadata * > &SavedMIBNodes, unsigned CallerContextLength, uint64_t TotalBytes, uint64_t ColdBytes, bool BuiltFromExistingMetadata)
cl::opt< unsigned > MinClonedColdBytePercent("memprof-cloning-cold-threshold", cl::init(100), cl::Hidden, cl::desc("Min percent of cold bytes to hint alloc cold during cloning"))
raw_pwrite_stream & OS
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:142
static LLVM_ABI Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)
Return a uniquified Attribute object.
Definition: Attributes.cpp:95
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1116
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
Definition: InstrTypes.h:1481
This is the shared class of boolean and integer constants.
Definition: Constants.h:87
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:163
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:82
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1718
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
Metadata node.
Definition: Metadata.h:1077
static LLVM_ABI MDNode * getMergedCallsiteMetadata(MDNode *A, MDNode *B)
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1445
ArrayRef< MDOperand > operands() const
Definition: Metadata.h:1443
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1565
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1451
static LLVM_ABI MDNode * getMergedMemProfMetadata(MDNode *A, MDNode *B)
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
Definition: Metadata.cpp:607
void push_back(Metadata *MD)
Append an element to the tuple. This will resize the node.
Definition: Metadata.h:1551
Root of the metadata hierarchy.
Definition: Metadata.h:63
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for applied optimization remarks.
void reserve(size_type N)
Definition: SmallVector.h:664
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
static LLVM_ABI ValueAsMetadata * get(Value *V)
Definition: Metadata.cpp:502
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1101
LLVM_ABI void addCallStack(AllocationType AllocType, ArrayRef< uint64_t > StackIds, std::vector< ContextTotalSize > ContextSizeInfo={})
Add a call stack context with the given allocation type to the Trie.
LLVM_ABI void addSingleAllocTypeAttribute(CallBase *CI, AllocationType AT, StringRef Descriptor)
Add an attribute for the given allocation type to the call instruction.
LLVM_ABI bool buildAndAttachMIBMetadata(CallBase *CI)
Build and attach the minimal necessary MIB metadata.
Helper class to iterate through stack ids in both metadata (memprof MIB and callsite) and the corresp...
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:662
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:444
LLVM_ABI MDNode * buildCallstackMetadata(ArrayRef< uint64_t > CallStack, LLVMContext &Ctx)
Build callstack metadata from the provided list of call stack ids.
LLVM_ABI bool recordContextSizeInfoForAnalysis()
Whether we need to record the context size info in the alloc trie used to build metadata.
LLVM_ABI bool metadataIncludesAllContextSizeInfo()
Whether the alloc memeprof metadata will include context size info for all MIBs.
LLVM_ABI AllocationType getMIBAllocType(const MDNode *MIB)
Returns the allocation type from an MIB metadata node.
LLVM_ABI bool metadataMayIncludeContextSizeInfo()
Whether the alloc memprof metadata may include context size info for some MIBs (but possibly not all)...
LLVM_ABI bool hasSingleAllocType(uint8_t AllocTypes)
True if the AllocTypes bitmask contains just a single type.
LLVM_ABI std::string getAllocTypeAttributeString(AllocationType Type)
Returns the string to use in attributes with the given type.
LLVM_ABI MDNode * getMIBStackNode(const MDNode *MIB)
Returns the stack node from an MIB metadata node.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
int popcount(T Value) noexcept
Count the number of set bits in a value.
Definition: bit.h:307
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2155
iterator_range< filter_iterator< detail::IterOfRange< RangeT >, PredicateT > > make_filter_range(RangeT &&Range, PredicateT Pred)
Convenience function that takes a range of elements and a predicate, and return a new filter_iterator...
Definition: STLExtras.h:581
format_object< Ts... > format(const char *Fmt, const Ts &... Vals)
These are helper functions used to produce formatted output.
Definition: Format.h:126
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
#define N
Used in the streaming interface as the general argument type.