13#define MIBEntryDef(NameTag, Name, Type) List.push_back(Meta::Name);
20 return {Meta::AllocCount, Meta::TotalSize, Meta::TotalLifetime,
21 Meta::TotalLifetimeAccessDensity};
62 Result +=
N.serializedSize(Schema,
Version2);
76 Result +=
N.serializedSize(Schema,
Version3);
91 Result +=
N.serializedSize(Schema,
Version4);
95 for (
const auto &CS :
Record.CallSites)
116 using namespace support;
123 N.Info.serialize(Schema,
OS);
128 for (
const auto &CS :
Record.CallSites)
136 using namespace support;
144 N.Info.serialize(Schema,
OS);
149 for (
const auto &CS :
Record.CallSites) {
159 using namespace support;
167 N.Info.serialize(Schema,
OS);
172 for (
const auto &CS :
Record.CallSites) {
175 LE.write<
uint64_t>(CS.CalleeGuids.size());
176 for (
const auto &
Guid : CS.CalleeGuids)
200 const unsigned char *
Ptr) {
201 using namespace support;
207 endian::readNext<uint64_t, llvm::endianness::little>(
Ptr);
208 Record.AllocSites.reserve(NumNodes);
211 Node.CSId = endian::readNext<CallStackId, llvm::endianness::little>(
Ptr);
212 Node.Info.deserialize(Schema,
Ptr);
219 endian::readNext<uint64_t, llvm::endianness::little>(
Ptr);
220 Record.CallSites.reserve(NumCtxs);
221 for (
uint64_t J = 0; J < NumCtxs; J++) {
223 endian::readNext<CallStackId, llvm::endianness::little>(
Ptr);
224 Record.CallSites.emplace_back(CSId);
231 const unsigned char *
Ptr) {
232 using namespace support;
238 endian::readNext<uint64_t, llvm::endianness::little>(
Ptr);
239 Record.AllocSites.reserve(NumNodes);
244 endian::readNext<LinearCallStackId, llvm::endianness::little>(
Ptr);
245 Node.Info.deserialize(Schema,
Ptr);
246 Ptr += SerializedSize;
252 endian::readNext<uint64_t, llvm::endianness::little>(
Ptr);
253 Record.CallSites.reserve(NumCtxs);
254 for (
uint64_t J = 0; J < NumCtxs; J++) {
260 endian::readNext<LinearCallStackId, llvm::endianness::little>(
Ptr);
261 Record.CallSites.emplace_back(CSId);
268 const unsigned char *
Ptr) {
269 using namespace support;
275 endian::readNext<uint64_t, llvm::endianness::little>(
Ptr);
276 Record.AllocSites.reserve(NumNodes);
281 endian::readNext<LinearCallStackId, llvm::endianness::little>(
Ptr);
282 Node.Info.deserialize(Schema,
Ptr);
283 Ptr += SerializedSize;
289 endian::readNext<uint64_t, llvm::endianness::little>(
Ptr);
290 Record.CallSites.reserve(NumCtxs);
291 for (
uint64_t J = 0; J < NumCtxs; J++) {
294 endian::readNext<LinearCallStackId, llvm::endianness::little>(
Ptr);
296 endian::readNext<uint64_t, llvm::endianness::little>(
Ptr);
299 for (
uint64_t K = 0; K < NumGuids; ++K)
301 endian::readNext<GlobalValue::GUID, llvm::endianness::little>(
Ptr));
302 Record.CallSites.emplace_back(CSId, std::move(Guids));
310 const unsigned char *
Ptr,
330 AI.
Info = IndexedAI.Info;
332 Record.AllocSites.push_back(std::move(AI));
337 std::vector<Frame> Frames = Callback(CS.CSId);
338 Record.CallSites.emplace_back(std::move(Frames), CS.CalleeGuids);
361 using namespace support;
363 const unsigned char *
Ptr = Buffer;
365 endian::readNext<uint64_t, llvm::endianness::little>(
Ptr);
368 "memprof schema invalid");
372 for (
size_t I = 0;
I < NumSchemaIds;
I++) {
374 endian::readNext<uint64_t, llvm::endianness::little>(
Ptr);
377 "memprof schema invalid");
379 Result.push_back(
static_cast<Meta>(
Tag));
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file defines the SmallVector class.
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Tagged union holding either a T or a Error.
static LLVM_ABI GUID getGUIDAssumingExternalLinkage(StringRef GlobalName)
Return a 64-bit global unique ID constructed from the name of a global symbol.
uint64_t GUID
Declare a type to represent a global unique identifier for a global value.
void reserve(size_type N)
void push_back(const T &Elt)
StringRef - Represent a constant reference to a string, i.e.
An efficient, type-erasing, non-owning reference to a callable.
This class implements an extremely fast bulk output stream that can only output to a stream.
static StringRef getCanonicalFnName(const Function &F)
Return the canonical name for a function, taking into account suffix elision policy attributes.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
static IndexedMemProfRecord deserializeV4(const MemProfSchema &Schema, const unsigned char *Ptr)
static IndexedMemProfRecord deserializeV3(const MemProfSchema &Schema, const unsigned char *Ptr)
static void serializeV3(const IndexedMemProfRecord &Record, const MemProfSchema &Schema, raw_ostream &OS, llvm::DenseMap< CallStackId, LinearCallStackId > &MemProfCallStackIndexes)
LLVM_ABI MemProfSchema getHotColdSchema()
uint32_t LinearCallStackId
static size_t serializedSizeV2(const IndexedAllocationInfo &IAI, const MemProfSchema &Schema)
static size_t serializedSizeV3(const IndexedAllocationInfo &IAI, const MemProfSchema &Schema)
LLVM_ABI MemProfSchema getFullSchema()
LLVM_ABI GlobalValue::GUID getGUID(const StringRef FunctionName)
static void serializeV4(const IndexedMemProfRecord &Record, const MemProfSchema &Schema, raw_ostream &OS, llvm::DenseMap< CallStackId, LinearCallStackId > &MemProfCallStackIndexes)
LLVM_ABI Expected< MemProfSchema > readMemProfSchema(const unsigned char *&Buffer)
static void serializeV2(const IndexedMemProfRecord &Record, const MemProfSchema &Schema, raw_ostream &OS)
static IndexedMemProfRecord deserializeV2(const MemProfSchema &Schema, const unsigned char *Ptr)
static size_t serializedSizeV4(const IndexedMemProfRecord &Record, const MemProfSchema &Schema)
This is an optimization pass for GlobalISel generic memory operations.
std::vector< Frame > CallStack
PortableMemInfoBlock Info
LLVM_ABI size_t serializedSize(const MemProfSchema &Schema, IndexedVersion Version) const
llvm::SmallVector< IndexedAllocationInfo > AllocSites
LLVM_ABI size_t serializedSize(const MemProfSchema &Schema, IndexedVersion Version) const
LLVM_ABI void serialize(const MemProfSchema &Schema, raw_ostream &OS, IndexedVersion Version, llvm::DenseMap< CallStackId, LinearCallStackId > *MemProfCallStackIndexes=nullptr) const
static LLVM_ABI IndexedMemProfRecord deserialize(const MemProfSchema &Schema, const unsigned char *Buffer, IndexedVersion Version)
llvm::SmallVector< IndexedCallSiteInfo > CallSites
LLVM_ABI MemProfRecord toMemProfRecord(llvm::function_ref< std::vector< Frame >(const CallStackId)> Callback) const
static size_t serializedSize(const MemProfSchema &Schema)
Adapter to write values to a stream in a particular byte order.