21 #ifndef LLVM_SUPPORT_ALLOCATOR_H 22 #define LLVM_SUPPORT_ALLOCATOR_H 36 #include <type_traits> 53 static_assert(
static_cast<void *(
AllocatorBase::*)(
size_t,
size_t)
>(
55 static_cast<void *(DerivedT::*)(
size_t,
size_t)
>(
57 "Class derives from AllocatorBase without implementing the " 58 "core Allocate(size_t, size_t) overload!");
60 return static_cast<DerivedT *
>(
this)->
Allocate(Size, Alignment);
67 static_assert(
static_cast<void (
AllocatorBase::*)(
const void *,
size_t)
>(
69 static_cast<void (DerivedT::*)(
const void *,
size_t)
>(
70 &DerivedT::Deallocate),
71 "Class derives from AllocatorBase without implementing the " 72 "core Deallocate(void *) overload!");
74 return static_cast<DerivedT *
>(
this)->
Deallocate(Ptr, Size);
82 return static_cast<T *
>(
Allocate(Num *
sizeof(
T),
alignof(
T)));
87 typename std::enable_if<
88 !std::is_same<typename std::remove_cv<T>::type,
void>::value,
void>
::type 90 Deallocate(static_cast<const void *>(Ptr), Num *
sizeof(
T));
107 free(const_cast<void *>(Ptr));
139 template <
typename AllocatorT =
MallocAllocator,
size_t SlabSize = 4096,
140 size_t SizeThreshold = SlabSize>
143 BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold>> {
145 static_assert(SizeThreshold <= SlabSize,
146 "The SizeThreshold must be at most the SlabSize to ensure " 147 "that objects larger than a slab go into their own memory " 152 template <
typename T>
159 : CurPtr(Old.CurPtr), End(Old.End), Slabs(
std::move(Old.Slabs)),
160 CustomSizedSlabs(
std::move(Old.CustomSizedSlabs)),
161 BytesAllocated(Old.BytesAllocated), RedZoneSize(Old.RedZoneSize),
163 Old.CurPtr = Old.End =
nullptr;
164 Old.BytesAllocated = 0;
166 Old.CustomSizedSlabs.clear();
170 DeallocateSlabs(Slabs.begin(), Slabs.end());
171 DeallocateCustomSizedSlabs();
175 DeallocateSlabs(Slabs.begin(), Slabs.end());
176 DeallocateCustomSizedSlabs();
180 BytesAllocated = RHS.BytesAllocated;
181 RedZoneSize = RHS.RedZoneSize;
182 Slabs = std::move(RHS.Slabs);
183 CustomSizedSlabs = std::move(RHS.CustomSizedSlabs);
186 RHS.CurPtr = RHS.End =
nullptr;
187 RHS.BytesAllocated = 0;
189 RHS.CustomSizedSlabs.clear();
197 DeallocateCustomSizedSlabs();
198 CustomSizedSlabs.clear();
205 CurPtr = (
char *)Slabs.front();
206 End = CurPtr + SlabSize;
209 DeallocateSlabs(std::next(Slabs.begin()), Slabs.end());
210 Slabs.erase(std::next(Slabs.begin()), Slabs.end());
216 assert(Alignment > 0 &&
"0-byte alignnment is not allowed. Use 1 instead.");
219 BytesAllocated +=
Size;
222 assert(Adjustment + Size >= Size &&
"Adjustment + Size must not overflow");
224 size_t SizeToAllocate =
Size;
225 #if LLVM_ADDRESS_SANITIZER_BUILD 227 SizeToAllocate += RedZoneSize;
231 if (Adjustment + SizeToAllocate <=
size_t(End - CurPtr)) {
232 char *AlignedPtr = CurPtr + Adjustment;
233 CurPtr = AlignedPtr + SizeToAllocate;
244 size_t PaddedSize = SizeToAllocate + Alignment - 1;
245 if (PaddedSize > SizeThreshold) {
246 void *NewSlab =
Allocator.Allocate(PaddedSize, 0);
250 CustomSizedSlabs.push_back(std::make_pair(NewSlab, PaddedSize));
252 uintptr_t AlignedAddr =
alignAddr(NewSlab, Alignment);
253 assert(AlignedAddr + Size <= (uintptr_t)NewSlab + PaddedSize);
254 char *AlignedPtr = (
char*)AlignedAddr;
262 uintptr_t AlignedAddr =
alignAddr(CurPtr, Alignment);
263 assert(AlignedAddr + SizeToAllocate <= (uintptr_t)End &&
264 "Unable to allocate memory!");
265 char *AlignedPtr = (
char*)AlignedAddr;
266 CurPtr = AlignedPtr + SizeToAllocate;
285 size_t GetNumSlabs()
const {
return Slabs.size() + CustomSizedSlabs.size(); }
293 const char *
P =
static_cast<const char *
>(Ptr);
294 int64_t InSlabIdx = 0;
295 for (
size_t Idx = 0,
E = Slabs.size(); Idx <
E; Idx++) {
296 const char *S =
static_cast<const char *
>(Slabs[Idx]);
297 if (P >= S && P < S + computeSlabSize(Idx))
298 return InSlabIdx + static_cast<int64_t>(P - S);
299 InSlabIdx +=
static_cast<int64_t
>(computeSlabSize(Idx));
303 int64_t InCustomSizedSlabIdx = -1;
304 for (
size_t Idx = 0, E = CustomSizedSlabs.size(); Idx <
E; Idx++) {
305 const char *S =
static_cast<const char *
>(CustomSizedSlabs[Idx].first);
306 size_t Size = CustomSizedSlabs[Idx].second;
307 if (P >= S && P < S + Size)
308 return InCustomSizedSlabIdx -
static_cast<int64_t
>(P - S);
309 InCustomSizedSlabIdx -=
static_cast<int64_t
>(
Size);
320 assert(Out &&
"Wrong allocator used");
334 template <
typename T>
336 int64_t Out = identifyKnownObject(Ptr);
337 assert(Out %
alignof(
T) == 0 &&
"Wrong alignment information");
338 return Out /
alignof(
T);
342 size_t TotalMemory = 0;
343 for (
auto I = Slabs.begin(),
E = Slabs.end();
I !=
E; ++
I)
344 TotalMemory += computeSlabSize(std::distance(Slabs.begin(),
I));
345 for (
auto &PtrAndSize : CustomSizedSlabs)
346 TotalMemory += PtrAndSize.second;
353 RedZoneSize = NewSize;
365 char *CurPtr =
nullptr;
379 size_t BytesAllocated = 0;
383 size_t RedZoneSize = 1;
388 static size_t computeSlabSize(
unsigned SlabIdx) {
393 return SlabSize * ((
size_t)1 << std::min<size_t>(30, SlabIdx / 128));
398 void StartNewSlab() {
399 size_t AllocatedSlabSize = computeSlabSize(Slabs.
size());
401 void *NewSlab = Allocator.Allocate(AllocatedSlabSize, 0);
407 CurPtr = (
char *)(NewSlab);
408 End = ((
char *)NewSlab) + AllocatedSlabSize;
414 for (; I !=
E; ++
I) {
415 size_t AllocatedSlabSize =
416 computeSlabSize(std::distance(Slabs.
begin(),
I));
417 Allocator.Deallocate(*I, AllocatedSlabSize);
422 void DeallocateCustomSizedSlabs() {
423 for (
auto &PtrAndSize : CustomSizedSlabs) {
424 void *Ptr = PtrAndSize.first;
425 size_t Size = PtrAndSize.second;
426 Allocator.Deallocate(Ptr, Size);
452 : Allocator(
std::move(Old.Allocator)) {}
456 Allocator = std::move(RHS.Allocator);
464 auto DestroyElements = [](
char *Begin,
char *End) {
466 for (
char *Ptr = Begin; Ptr +
sizeof(
T) <= End; Ptr +=
sizeof(
T))
467 reinterpret_cast<T *
>(Ptr)->~
T();
470 for (
auto I = Allocator.Slabs.
begin(),
E = Allocator.Slabs.
end();
I !=
E;
472 size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize(
473 std::distance(Allocator.Slabs.
begin(),
I));
475 char *End = *
I == Allocator.Slabs.
back() ? Allocator.CurPtr
476 : (
char *)*
I + AllocatedSlabSize;
478 DestroyElements(Begin, End);
481 for (
auto &PtrAndSize : Allocator.CustomSizedSlabs) {
482 void *Ptr = PtrAndSize.first;
483 size_t Size = PtrAndSize.second;
484 DestroyElements((
char *)
alignAddr(Ptr,
alignof(
T)), (
char *)Ptr + Size);
496 template <
typename AllocatorT,
size_t SlabSize,
size_t SizeThreshold>
497 void *
operator new(
size_t Size,
513 template <
typename AllocatorT,
size_t SlabSize,
size_t SizeThreshold>
514 void operator delete(
518 #endif // LLVM_SUPPORT_ALLOCATOR_H #define __asan_poison_memory_region(p, size)
This class represents lattice values for constants.
void Deallocate(const void *Ptr, size_t Size)
Deallocate Ptr to Size bytes of memory allocated by this allocator.
void push_back(const T &Elt)
#define __asan_unpoison_memory_region(p, size)
void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated, size_t TotalMemory)
void * Allocate(size_t Size, size_t Alignment)
Allocate Size bytes of Alignment aligned memory.
void Reset()
Deallocate all but the current slab and reset the current pointer to the beginning of it...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
BumpPtrAllocatorImpl & operator=(BumpPtrAllocatorImpl &&RHS)
This file defines counterparts of C library allocation functions defined in the namespace 'std'...
void Deallocate(const void *Ptr, size_t Size)
#define __msan_allocated_memory(p, size)
void DestroyAll()
Call the destructor of each allocated object and deallocate all but the current slab and reset the cu...
size_t getBytesAllocated() const
BumpPtrAllocatorImpl BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
Allocate memory in an ever growing pool, as if by bump-pointer.
llvm::Optional< int64_t > identifyObject(const void *Ptr)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
size_t alignmentAdjustment(const void *Ptr, size_t Alignment)
Returns the necessary adjustment for aligning Ptr to Alignment bytes, rounding up.
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void * Allocate(size_t Size, size_t Alignment)
Allocate space at the specified alignment.
#define offsetof(TYPE, MEMBER)
int64_t identifyKnownObject(const void *Ptr)
A wrapper around identifyObject that additionally asserts that the object is indeed within the alloca...
void Deallocate(const void *Ptr, size_t)
T * Allocate(size_t num=1)
Allocate space for an array of objects without constructing them.
void setRedZoneSize(size_t NewSize)
~SpecificBumpPtrAllocator()
uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
SpecificBumpPtrAllocator & operator=(SpecificBumpPtrAllocator &&RHS)
LLVM_ATTRIBUTE_RETURNS_NONNULL void * safe_malloc(size_t Sz)
SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old)
SpecificBumpPtrAllocator()
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
A BumpPtrAllocator that allows only elements of a specific type to be allocated.
size_t GetNumSlabs() const
uintptr_t alignAddr(const void *Addr, size_t Alignment)
Aligns Addr to Alignment bytes, rounding up.
LLVM_ATTRIBUTE_RETURNS_NONNULL void * Allocate(size_t Size, size_t)
T * Allocate(size_t Num=1)
Allocate space for a sequence of objects without constructing them.
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
#define LLVM_ATTRIBUTE_RETURNS_NOALIAS
LLVM_ATTRIBUTE_RETURNS_NOALIAS Used to mark a function as returning a pointer that does not alias any...
#define LLVM_ATTRIBUTE_RETURNS_NONNULL
std::enable_if< !std::is_same< typename std::remove_cv< T >::type, void >::value, void >::type Deallocate(T *Ptr, size_t Num=1)
Deallocate space for a sequence of objects without constructing them.
size_t getTotalMemory() const
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
BumpPtrAllocatorImpl(T &&Allocator)
BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old)
int64_t identifyKnownAlignedObject(const void *Ptr)
A wrapper around identifyKnownObject.
CRTP base class providing obvious overloads for the core Allocate() methods of LLVM-style allocators...