1 //===-- sanitizer_allocator64.h ---------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
9 // Specialized allocator which works only in 64-bit address space.
10 // It is used by ThreadSanitizer, MemorySanitizer and possibly other tools.
11 // The main feature of this allocator is that the header is located far away
12 // from the user memory region, so that the tool does not use extra shadow
14 // Another important feature is that the size class of a pointer is computed
15 // without any memory accesses by simply looking at the address.
17 //===----------------------------------------------------------------------===//
18 #ifndef SANITIZER_ALLOCATOR64_H
19 #define SANITIZER_ALLOCATOR64_H
21 #include "sanitizer_allocator.h"
23 namespace __sanitizer {
25 // SizeClassAllocator64 -- allocator for 64-bit address space.
27 // Space: a portion of address space of kSpaceSize bytes starting at
28 // a fixed address (kSpaceBeg). Both constants are powers of two and
29 // kSpaceBeg is kSpaceSize-aligned.
31 // Region: a part of Space dedicated to a single size class.
32 // There are kNumClasses Regions of equal size.
34 // UserChunk: a piece of memory returned to user.
35 // MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
37 // A Region looks like this:
38 // UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
39 template <const uptr kSpaceBeg, const uptr kSpaceSize,
40 const uptr kMetadataSize, class SizeClassMap>
41 class SizeClassAllocator64 {
44 CHECK_EQ(AllocBeg(), reinterpret_cast<uptr>(MmapFixedNoReserve(
45 AllocBeg(), AllocSize())));
48 bool CanAllocate(uptr size, uptr alignment) {
49 return size <= SizeClassMap::kMaxSize &&
50 alignment <= SizeClassMap::kMaxSize;
53 void *Allocate(uptr size, uptr alignment) {
54 CHECK(CanAllocate(size, alignment));
55 return AllocateBySizeClass(SizeClassMap::ClassID(size));
58 void Deallocate(void *p) {
59 CHECK(PointerIsMine(p));
60 DeallocateBySizeClass(p, GetSizeClass(p));
63 // Allocate several chunks of the given class_id.
64 void BulkAllocate(uptr class_id, AllocatorFreeList *free_list) {
65 CHECK_LT(class_id, kNumClasses);
66 RegionInfo *region = GetRegionInfo(class_id);
67 SpinMutexLock l(®ion->mutex);
68 if (region->free_list.empty()) {
69 PopulateFreeList(class_id, region);
71 CHECK(!region->free_list.empty());
72 uptr count = SizeClassMap::MaxCached(class_id);
73 if (region->free_list.size() <= count) {
74 free_list->append_front(®ion->free_list);
76 for (uptr i = 0; i < count; i++) {
77 AllocatorListNode *node = region->free_list.front();
78 region->free_list.pop_front();
79 free_list->push_front(node);
82 CHECK(!free_list->empty());
85 // Swallow the entire free_list for the given class_id.
86 void BulkDeallocate(uptr class_id, AllocatorFreeList *free_list) {
87 CHECK_LT(class_id, kNumClasses);
88 RegionInfo *region = GetRegionInfo(class_id);
89 SpinMutexLock l(®ion->mutex);
90 region->free_list.append_front(free_list);
93 static bool PointerIsMine(void *p) {
94 return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize;
97 static uptr GetSizeClass(void *p) {
98 return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClasses;
101 static void *GetBlockBegin(void *p) {
102 uptr class_id = GetSizeClass(p);
103 uptr size = SizeClassMap::Size(class_id);
104 uptr chunk_idx = GetChunkIdx((uptr)p, size);
105 uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
106 uptr begin = reg_beg + chunk_idx * size;
110 static uptr GetActuallyAllocatedSize(void *p) {
111 CHECK(PointerIsMine(p));
112 return SizeClassMap::Size(GetSizeClass(p));
115 uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
117 void *GetMetaData(void *p) {
118 uptr class_id = GetSizeClass(p);
119 uptr size = SizeClassMap::Size(class_id);
120 uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
121 return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) -
122 (1 + chunk_idx) * kMetadataSize);
125 uptr TotalMemoryUsed() {
127 for (uptr i = 0; i < kNumClasses; i++)
128 res += GetRegionInfo(i)->allocated_user;
133 void TestOnlyUnmap() {
134 UnmapOrDie(reinterpret_cast<void*>(AllocBeg()), AllocSize());
137 static uptr AllocBeg() { return kSpaceBeg; }
138 static uptr AllocSize() { return kSpaceSize + AdditionalSize(); }
140 typedef SizeClassMap SizeClassMapT;
141 static const uptr kNumClasses = SizeClassMap::kNumClasses; // 2^k <= 256
144 static const uptr kRegionSize = kSpaceSize / kNumClasses;
145 COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0);
146 COMPILER_CHECK(kNumClasses <= SizeClassMap::kNumClasses);
147 // kRegionSize must be >= 2^32.
148 COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
149 // Populate the free list with at most this number of bytes at once
150 // or with one element if its size is greater.
151 static const uptr kPopulateSize = 1 << 18;
155 AllocatorFreeList free_list;
156 uptr allocated_user; // Bytes allocated for user memory.
157 uptr allocated_meta; // Bytes allocated for metadata.
158 char padding[kCacheLineSize - 3 * sizeof(uptr) - sizeof(AllocatorFreeList)];
160 COMPILER_CHECK(sizeof(RegionInfo) == kCacheLineSize);
162 static uptr AdditionalSize() {
163 uptr PageSize = GetPageSizeCached();
164 uptr res = Max(sizeof(RegionInfo) * kNumClasses, PageSize);
165 CHECK_EQ(res % PageSize, 0);
169 RegionInfo *GetRegionInfo(uptr class_id) {
170 CHECK_LT(class_id, kNumClasses);
171 RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg + kSpaceSize);
172 return ®ions[class_id];
175 static uptr GetChunkIdx(uptr chunk, uptr size) {
176 u32 offset = chunk % kRegionSize;
177 // Here we divide by a non-constant. This is costly.
178 // We require that kRegionSize is at least 2^32 so that offset is 32-bit.
179 // We save 2x by using 32-bit div, but may need to use a 256-way switch.
180 return offset / (u32)size;
183 void PopulateFreeList(uptr class_id, RegionInfo *region) {
184 uptr size = SizeClassMap::Size(class_id);
185 uptr beg_idx = region->allocated_user;
186 uptr end_idx = beg_idx + kPopulateSize;
187 region->free_list.clear();
188 uptr region_beg = kSpaceBeg + kRegionSize * class_id;
191 do { // do-while loop because we need to put at least one item.
192 uptr p = region_beg + idx;
193 region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
196 } while (idx < end_idx);
197 region->allocated_user += idx - beg_idx;
198 region->allocated_meta += i * kMetadataSize;
199 if (region->allocated_user + region->allocated_meta > kRegionSize) {
200 Printf("Out of memory. Dying.\n");
201 Printf("The process has exhausted %zuMB for size class %zu.\n",
202 kRegionSize / 1024 / 1024, size);
207 void *AllocateBySizeClass(uptr class_id) {
208 CHECK_LT(class_id, kNumClasses);
209 RegionInfo *region = GetRegionInfo(class_id);
210 SpinMutexLock l(®ion->mutex);
211 if (region->free_list.empty()) {
212 PopulateFreeList(class_id, region);
214 CHECK(!region->free_list.empty());
215 AllocatorListNode *node = region->free_list.front();
216 region->free_list.pop_front();
217 return reinterpret_cast<void*>(node);
220 void DeallocateBySizeClass(void *p, uptr class_id) {
221 RegionInfo *region = GetRegionInfo(class_id);
222 SpinMutexLock l(®ion->mutex);
223 region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
227 } // namespace __sanitizer
229 #endif // SANITIZER_ALLOCATOR64_H