17#ifndef LLVM_SUPPORT_ALLOCATOR_H
18#define LLVM_SUPPORT_ALLOCATOR_H
40 size_t BytesAllocated,
62template <
typename AllocatorT = MallocAllocator,
size_t SlabSize = 4096,
63 size_t SizeThreshold = SlabSize,
size_t GrowthDelay = 128>
65 :
public AllocatorBase<BumpPtrAllocatorImpl<AllocatorT, SlabSize,
66 SizeThreshold, GrowthDelay>>,
71 static_assert(SizeThreshold <= SlabSize,
72 "The SizeThreshold must be at most the SlabSize to ensure "
73 "that objects larger than a slab go into their own memory "
75 static_assert(GrowthDelay > 0,
76 "GrowthDelay must be at least 1 which already increases the"
77 "slab size after each allocated slab.");
89 End(Old.End), Slabs(
std::
move(Old.Slabs)),
90 CustomSizedSlabs(
std::
move(Old.CustomSizedSlabs)),
91 BytesAllocated(Old.BytesAllocated), RedZoneSize(Old.RedZoneSize) {
92 Old.CurPtr = Old.End =
nullptr;
93 Old.BytesAllocated = 0;
95 Old.CustomSizedSlabs.clear();
99 DeallocateSlabs(Slabs.
begin(), Slabs.
end());
100 DeallocateCustomSizedSlabs();
104 DeallocateSlabs(Slabs.
begin(), Slabs.
end());
105 DeallocateCustomSizedSlabs();
109 BytesAllocated =
RHS.BytesAllocated;
110 RedZoneSize =
RHS.RedZoneSize;
111 Slabs = std::move(
RHS.Slabs);
112 CustomSizedSlabs = std::move(
RHS.CustomSizedSlabs);
113 AllocTy::operator=(std::move(
RHS.getAllocator()));
115 RHS.CurPtr =
RHS.End =
nullptr;
116 RHS.BytesAllocated = 0;
118 RHS.CustomSizedSlabs.clear();
126 DeallocateCustomSizedSlabs();
127 CustomSizedSlabs.
clear();
134 CurPtr = (
char *)Slabs.
front();
135 End = CurPtr + SlabSize;
138 DeallocateSlabs(std::next(Slabs.
begin()), Slabs.
end());
151 BytesAllocated +=
Size;
153 uintptr_t AlignedPtr =
alignAddr(CurPtr, Alignment);
155 size_t SizeToAllocate =
Size;
156#if LLVM_ADDRESS_SANITIZER_BUILD
158 SizeToAllocate += RedZoneSize;
161 uintptr_t AllocEndPtr = AlignedPtr + SizeToAllocate;
162 assert(AllocEndPtr >= uintptr_t(CurPtr) &&
163 "Alignment + Size must not overflow");
168 && CurPtr !=
nullptr)) {
169 CurPtr =
reinterpret_cast<char *
>(AllocEndPtr);
176 return reinterpret_cast<char *
>(AlignedPtr);
185 size_t PaddedSize = SizeToAllocate + Alignment.
value() - 1;
186 if (PaddedSize > SizeThreshold) {
188 this->
getAllocator().Allocate(PaddedSize,
alignof(std::max_align_t));
192 CustomSizedSlabs.
push_back(std::make_pair(NewSlab, PaddedSize));
194 uintptr_t AlignedAddr =
alignAddr(NewSlab, Alignment);
195 assert(AlignedAddr +
Size <= (uintptr_t)NewSlab + PaddedSize);
196 char *AlignedPtr = (
char*)AlignedAddr;
204 uintptr_t AlignedAddr =
alignAddr(CurPtr, Alignment);
205 assert(AlignedAddr + SizeToAllocate <= (uintptr_t)End &&
206 "Unable to allocate memory!");
207 char *AlignedPtr = (
char*)AlignedAddr;
208 CurPtr = AlignedPtr + SizeToAllocate;
216 assert(Alignment > 0 &&
"0-byte alignment is not allowed. Use 1 instead.");
241 const char *
P =
static_cast<const char *
>(
Ptr);
242 int64_t InSlabIdx = 0;
244 const char *S =
static_cast<const char *
>(Slabs[
Idx]);
245 if (
P >= S &&
P < S + computeSlabSize(
Idx))
246 return InSlabIdx +
static_cast<int64_t
>(
P - S);
247 InSlabIdx +=
static_cast<int64_t
>(computeSlabSize(
Idx));
251 int64_t InCustomSizedSlabIdx = -1;
252 for (
const auto &Slab : CustomSizedSlabs) {
253 const char *S =
static_cast<const char *
>(Slab.first);
254 size_t Size = Slab.second;
255 if (
P >= S &&
P < S +
Size)
256 return InCustomSizedSlabIdx -
static_cast<int64_t
>(
P - S);
257 InCustomSizedSlabIdx -=
static_cast<int64_t
>(
Size);
268 assert(Out &&
"Wrong allocator used");
282 template <
typename T>
285 assert(Out %
alignof(
T) == 0 &&
"Wrong alignment information");
286 return Out /
alignof(
T);
290 size_t TotalMemory = 0;
292 TotalMemory += computeSlabSize(std::distance(Slabs.
begin(),
I));
293 for (
const auto &PtrAndSize : CustomSizedSlabs)
294 TotalMemory += PtrAndSize.second;
301 RedZoneSize = NewSize;
313 char *CurPtr =
nullptr;
327 size_t BytesAllocated = 0;
331 size_t RedZoneSize = 1;
333 static size_t computeSlabSize(
unsigned SlabIdx) {
339 ((size_t)1 << std::min<size_t>(30, SlabIdx / GrowthDelay));
344 void StartNewSlab() {
345 size_t AllocatedSlabSize = computeSlabSize(Slabs.
size());
347 void *NewSlab = this->
getAllocator().Allocate(AllocatedSlabSize,
348 alignof(std::max_align_t));
354 CurPtr = (
char *)(NewSlab);
355 End = ((
char *)NewSlab) + AllocatedSlabSize;
359 void DeallocateSlabs(SmallVectorImpl<void *>::iterator
I,
360 SmallVectorImpl<void *>::iterator
E) {
361 for (;
I !=
E; ++
I) {
362 size_t AllocatedSlabSize =
363 computeSlabSize(std::distance(Slabs.
begin(),
I));
365 alignof(std::max_align_t));
370 void DeallocateCustomSizedSlabs() {
371 for (
auto &PtrAndSize : CustomSizedSlabs) {
372 void *
Ptr = PtrAndSize.first;
373 size_t Size = PtrAndSize.second;
412 auto DestroyElements = [](
char *Begin,
char *
End) {
415 reinterpret_cast<T *
>(
Ptr)->~
T();
420 size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize(
422 char *Begin = (
char *)
alignAddr(*
I, Align::Of<T>());
424 : (
char *)*
I + AllocatedSlabSize;
426 DestroyElements(Begin,
End);
429 for (
auto &PtrAndSize :
Allocator.CustomSizedSlabs) {
430 void *
Ptr = PtrAndSize.first;
431 size_t Size = PtrAndSize.second;
452template <
typename AllocatorT,
size_t SlabSize,
size_t SizeThreshold,
459 alignof(std::max_align_t)));
462template <
typename AllocatorT,
size_t SlabSize,
size_t SizeThreshold,
464void operator delete(
void *,
466 SizeThreshold, GrowthDelay> &) {
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file defines MallocAllocator.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define __asan_poison_memory_region(p, size)
#define __asan_unpoison_memory_region(p, size)
#define LLVM_ATTRIBUTE_NOINLINE
LLVM_ATTRIBUTE_NOINLINE - On compilers where we have a directive to do so, mark a method "not for inl...
#define LLVM_ATTRIBUTE_RETURNS_NONNULL
#define __msan_allocated_memory(p, size)
#define LLVM_LIKELY(EXPR)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the SmallVector class.
CRTP base class providing obvious overloads for the core Allocate() methods of LLVM-style allocators.
Allocate memory in an ever growing pool, as if by bump-pointer.
size_t GetNumSlabs() const
LLVM_ATTRIBUTE_RETURNS_NONNULL void * Allocate(size_t Size, size_t Alignment)
void setRedZoneSize(size_t NewSize)
std::optional< int64_t > identifyObject(const void *Ptr)
LLVM_ATTRIBUTE_RETURNS_NONNULL void * Allocate(size_t Size, Align Alignment)
Allocate space at the specified alignment.
BumpPtrAllocatorImpl()=default
BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old)
int64_t identifyKnownAlignedObject(const void *Ptr)
A wrapper around identifyKnownObject.
size_t getBytesAllocated() const
void Reset()
Deallocate all but the current slab and reset the current pointer to the beginning of it,...
void Deallocate(const void *Ptr, size_t Size, size_t)
BumpPtrAllocatorImpl(T &&Allocator)
size_t getTotalMemory() const
BumpPtrAllocatorImpl & operator=(BumpPtrAllocatorImpl &&RHS)
int64_t identifyKnownObject(const void *Ptr)
A wrapper around identifyObject that additionally asserts that the object is indeed within the alloca...
LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_NOINLINE void * AllocateSlow(size_t Size, size_t SizeToAllocate, Align Alignment)
iterator erase(const_iterator CI)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
A BumpPtrAllocator that allows only elements of a specific type to be allocated.
~SpecificBumpPtrAllocator()
std::optional< int64_t > identifyObject(const void *Ptr)
SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old)
T * Allocate(size_t num=1)
Allocate space for an array of objects without constructing them.
void DestroyAll()
Call the destructor of each allocated object and deallocate all but the current slab and reset the cu...
SpecificBumpPtrAllocator()
SpecificBumpPtrAllocator & operator=(SpecificBumpPtrAllocator &&RHS)
LLVM_ABI void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated, size_t TotalMemory)
This is an optimization pass for GlobalISel generic memory operations.
BumpPtrAllocatorImpl BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
uintptr_t alignAddr(const void *Addr, Align Alignment)
Aligns Addr to Alignment bytes, rounding up.
Implement std::hash so that hash_code can be used in STL containers.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.