* configure.ac: Change target-libasan to target-libsanitizer.
[official-gcc.git] / libsanitizer / sanitizer_common / sanitizer_allocator64.h
blob247719876aa759cf104289f5061c35b1d59b3d34
1 //===-- sanitizer_allocator64.h ---------------------------------*- C++ -*-===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 // Specialized allocator which works only in 64-bit address space.
8 // To be used by ThreadSanitizer, MemorySanitizer and possibly other tools.
9 // The main feature of this allocator is that the header is located far away
10 // from the user memory region, so that the tool does not use extra shadow
11 // for the header.
13 // Status: not yet ready.
14 //===----------------------------------------------------------------------===//
15 #ifndef SANITIZER_ALLOCATOR_H
16 #define SANITIZER_ALLOCATOR_H
18 #include "sanitizer_internal_defs.h"
19 #if __WORDSIZE != 64
20 # error "sanitizer_allocator64.h can only be used on 64-bit platforms"
21 #endif
23 #include "sanitizer_common.h"
24 #include "sanitizer_libc.h"
25 #include "sanitizer_list.h"
26 #include "sanitizer_mutex.h"
28 namespace __sanitizer {
30 // Maps size class id to size and back.
31 class DefaultSizeClassMap {
32 private:
33 // Here we use a spline composed of 5 polynomials of oder 1.
34 // The first size class is l0, then the classes go with step s0
35 // untill they reach l1, after which they go with step s1 and so on.
36 // Steps should be powers of two for cheap division.
37 // The size of the last size class should be a power of two.
38 // There should be at most 256 size classes.
39 static const uptr l0 = 1 << 4;
40 static const uptr l1 = 1 << 9;
41 static const uptr l2 = 1 << 12;
42 static const uptr l3 = 1 << 15;
43 static const uptr l4 = 1 << 18;
44 static const uptr l5 = 1 << 21;
46 static const uptr s0 = 1 << 4;
47 static const uptr s1 = 1 << 6;
48 static const uptr s2 = 1 << 9;
49 static const uptr s3 = 1 << 12;
50 static const uptr s4 = 1 << 15;
52 static const uptr u0 = 0 + (l1 - l0) / s0;
53 static const uptr u1 = u0 + (l2 - l1) / s1;
54 static const uptr u2 = u1 + (l3 - l2) / s2;
55 static const uptr u3 = u2 + (l4 - l3) / s3;
56 static const uptr u4 = u3 + (l5 - l4) / s4;
58 // Max cached in local cache blocks.
59 static const uptr c0 = 256;
60 static const uptr c1 = 64;
61 static const uptr c2 = 16;
62 static const uptr c3 = 4;
63 static const uptr c4 = 1;
65 public:
66 static const uptr kNumClasses = u4 + 1;
67 static const uptr kMaxSize = l5;
68 static const uptr kMinSize = l0;
70 COMPILER_CHECK(kNumClasses <= 256);
71 COMPILER_CHECK((kMaxSize & (kMaxSize - 1)) == 0);
73 static uptr Size(uptr class_id) {
74 if (class_id <= u0) return l0 + s0 * (class_id - 0);
75 if (class_id <= u1) return l1 + s1 * (class_id - u0);
76 if (class_id <= u2) return l2 + s2 * (class_id - u1);
77 if (class_id <= u3) return l3 + s3 * (class_id - u2);
78 if (class_id <= u4) return l4 + s4 * (class_id - u3);
79 return 0;
81 static uptr ClassID(uptr size) {
82 if (size <= l1) return 0 + (size - l0 + s0 - 1) / s0;
83 if (size <= l2) return u0 + (size - l1 + s1 - 1) / s1;
84 if (size <= l3) return u1 + (size - l2 + s2 - 1) / s2;
85 if (size <= l4) return u2 + (size - l3 + s3 - 1) / s3;
86 if (size <= l5) return u3 + (size - l4 + s4 - 1) / s4;
87 return 0;
90 static uptr MaxCached(uptr class_id) {
91 if (class_id <= u0) return c0;
92 if (class_id <= u1) return c1;
93 if (class_id <= u2) return c2;
94 if (class_id <= u3) return c3;
95 if (class_id <= u4) return c4;
96 return 0;
100 struct AllocatorListNode {
101 AllocatorListNode *next;
104 typedef IntrusiveList<AllocatorListNode> AllocatorFreeList;
107 // Space: a portion of address space of kSpaceSize bytes starting at
108 // a fixed address (kSpaceBeg). Both constants are powers of two and
109 // kSpaceBeg is kSpaceSize-aligned.
111 // Region: a part of Space dedicated to a single size class.
112 // There are kNumClasses Regions of equal size.
114 // UserChunk: a piece of memory returned to user.
115 // MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
117 // A Region looks like this:
118 // UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
119 template <const uptr kSpaceBeg, const uptr kSpaceSize,
120 const uptr kMetadataSize, class SizeClassMap>
121 class SizeClassAllocator64 {
122 public:
123 void Init() {
124 CHECK_EQ(AllocBeg(), reinterpret_cast<uptr>(MmapFixedNoReserve(
125 AllocBeg(), AllocSize())));
128 bool CanAllocate(uptr size, uptr alignment) {
129 return size <= SizeClassMap::kMaxSize &&
130 alignment <= SizeClassMap::kMaxSize;
133 void *Allocate(uptr size, uptr alignment) {
134 CHECK(CanAllocate(size, alignment));
135 return AllocateBySizeClass(SizeClassMap::ClassID(size));
138 void Deallocate(void *p) {
139 CHECK(PointerIsMine(p));
140 DeallocateBySizeClass(p, GetSizeClass(p));
143 // Allocate several chunks of the given class_id.
144 void BulkAllocate(uptr class_id, AllocatorFreeList *free_list) {
145 CHECK_LT(class_id, kNumClasses);
146 RegionInfo *region = GetRegionInfo(class_id);
147 SpinMutexLock l(&region->mutex);
148 if (region->free_list.empty()) {
149 PopulateFreeList(class_id, region);
151 CHECK(!region->free_list.empty());
152 uptr count = SizeClassMap::MaxCached(class_id);
153 if (region->free_list.size() <= count) {
154 free_list->append_front(&region->free_list);
155 } else {
156 for (uptr i = 0; i < count; i++) {
157 AllocatorListNode *node = region->free_list.front();
158 region->free_list.pop_front();
159 free_list->push_front(node);
162 CHECK(!free_list->empty());
165 // Swallow the entire free_list for the given class_id.
166 void BulkDeallocate(uptr class_id, AllocatorFreeList *free_list) {
167 CHECK_LT(class_id, kNumClasses);
168 RegionInfo *region = GetRegionInfo(class_id);
169 SpinMutexLock l(&region->mutex);
170 region->free_list.append_front(free_list);
173 static bool PointerIsMine(void *p) {
174 return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize;
177 static uptr GetSizeClass(void *p) {
178 return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClasses;
181 static void *GetBlockBegin(void *p) {
182 uptr class_id = GetSizeClass(p);
183 uptr size = SizeClassMap::Size(class_id);
184 uptr chunk_idx = GetChunkIdx((uptr)p, size);
185 uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
186 uptr begin = reg_beg + chunk_idx * size;
187 return (void*)begin;
190 static uptr GetActuallyAllocatedSize(void *p) {
191 CHECK(PointerIsMine(p));
192 return SizeClassMap::Size(GetSizeClass(p));
195 uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
197 void *GetMetaData(void *p) {
198 uptr class_id = GetSizeClass(p);
199 uptr size = SizeClassMap::Size(class_id);
200 uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
201 return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) -
202 (1 + chunk_idx) * kMetadataSize);
205 uptr TotalMemoryUsed() {
206 uptr res = 0;
207 for (uptr i = 0; i < kNumClasses; i++)
208 res += GetRegionInfo(i)->allocated_user;
209 return res;
212 // Test-only.
213 void TestOnlyUnmap() {
214 UnmapOrDie(reinterpret_cast<void*>(AllocBeg()), AllocSize());
217 static uptr AllocBeg() { return kSpaceBeg; }
218 static uptr AllocEnd() { return kSpaceBeg + kSpaceSize + AdditionalSize(); }
219 static uptr AllocSize() { return kSpaceSize + AdditionalSize(); }
221 static const uptr kNumClasses = 256; // Power of two <= 256
222 typedef SizeClassMap SizeClassMapT;
224 private:
225 COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0);
226 COMPILER_CHECK(kNumClasses <= SizeClassMap::kNumClasses);
227 static const uptr kRegionSize = kSpaceSize / kNumClasses;
228 COMPILER_CHECK((kRegionSize >> 32) > 0); // kRegionSize must be >= 2^32.
229 // Populate the free list with at most this number of bytes at once
230 // or with one element if its size is greater.
231 static const uptr kPopulateSize = 1 << 18;
233 struct RegionInfo {
234 SpinMutex mutex;
235 AllocatorFreeList free_list;
236 uptr allocated_user; // Bytes allocated for user memory.
237 uptr allocated_meta; // Bytes allocated for metadata.
238 char padding[kCacheLineSize - 3 * sizeof(uptr) - sizeof(AllocatorFreeList)];
240 COMPILER_CHECK(sizeof(RegionInfo) == kCacheLineSize);
242 static uptr AdditionalSize() {
243 uptr res = sizeof(RegionInfo) * kNumClasses;
244 CHECK_EQ(res % kPageSize, 0);
245 return res;
248 RegionInfo *GetRegionInfo(uptr class_id) {
249 CHECK_LT(class_id, kNumClasses);
250 RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg + kSpaceSize);
251 return &regions[class_id];
254 static uptr GetChunkIdx(uptr chunk, uptr size) {
255 u32 offset = chunk % kRegionSize;
256 // Here we divide by a non-constant. This is costly.
257 // We require that kRegionSize is at least 2^32 so that offset is 32-bit.
258 // We save 2x by using 32-bit div, but may need to use a 256-way switch.
259 return offset / (u32)size;
262 void PopulateFreeList(uptr class_id, RegionInfo *region) {
263 uptr size = SizeClassMap::Size(class_id);
264 uptr beg_idx = region->allocated_user;
265 uptr end_idx = beg_idx + kPopulateSize;
266 region->free_list.clear();
267 uptr region_beg = kSpaceBeg + kRegionSize * class_id;
268 uptr idx = beg_idx;
269 uptr i = 0;
270 do { // do-while loop because we need to put at least one item.
271 uptr p = region_beg + idx;
272 region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
273 idx += size;
274 i++;
275 } while (idx < end_idx);
276 region->allocated_user += idx - beg_idx;
277 region->allocated_meta += i * kMetadataSize;
278 CHECK_LT(region->allocated_user + region->allocated_meta, kRegionSize);
281 void *AllocateBySizeClass(uptr class_id) {
282 CHECK_LT(class_id, kNumClasses);
283 RegionInfo *region = GetRegionInfo(class_id);
284 SpinMutexLock l(&region->mutex);
285 if (region->free_list.empty()) {
286 PopulateFreeList(class_id, region);
288 CHECK(!region->free_list.empty());
289 AllocatorListNode *node = region->free_list.front();
290 region->free_list.pop_front();
291 return reinterpret_cast<void*>(node);
294 void DeallocateBySizeClass(void *p, uptr class_id) {
295 RegionInfo *region = GetRegionInfo(class_id);
296 SpinMutexLock l(&region->mutex);
297 region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
301 // Objects of this type should be used as local caches for SizeClassAllocator64.
302 // Since the typical use of this class is to have one object per thread in TLS,
303 // is has to be POD.
304 template<const uptr kNumClasses, class SizeClassAllocator>
305 struct SizeClassAllocatorLocalCache {
306 // Don't need to call Init if the object is a global (i.e. zero-initialized).
307 void Init() {
308 internal_memset(this, 0, sizeof(*this));
311 void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
312 CHECK_LT(class_id, kNumClasses);
313 AllocatorFreeList *free_list = &free_lists_[class_id];
314 if (free_list->empty())
315 allocator->BulkAllocate(class_id, free_list);
316 CHECK(!free_list->empty());
317 void *res = free_list->front();
318 free_list->pop_front();
319 return res;
322 void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
323 CHECK_LT(class_id, kNumClasses);
324 AllocatorFreeList *free_list = &free_lists_[class_id];
325 free_list->push_front(reinterpret_cast<AllocatorListNode*>(p));
326 if (free_list->size() >= 2 * SizeClassMap::MaxCached(class_id))
327 DrainHalf(allocator, class_id);
330 void Drain(SizeClassAllocator *allocator) {
331 for (uptr i = 0; i < kNumClasses; i++) {
332 allocator->BulkDeallocate(i, &free_lists_[i]);
333 CHECK(free_lists_[i].empty());
337 // private:
338 typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
339 AllocatorFreeList free_lists_[kNumClasses];
341 void DrainHalf(SizeClassAllocator *allocator, uptr class_id) {
342 AllocatorFreeList *free_list = &free_lists_[class_id];
343 AllocatorFreeList half;
344 half.clear();
345 const uptr count = free_list->size() / 2;
346 for (uptr i = 0; i < count; i++) {
347 AllocatorListNode *node = free_list->front();
348 free_list->pop_front();
349 half.push_front(node);
351 allocator->BulkDeallocate(class_id, &half);
355 // This class can (de)allocate only large chunks of memory using mmap/unmap.
356 // The main purpose of this allocator is to cover large and rare allocation
357 // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
358 class LargeMmapAllocator {
359 public:
360 void Init() {
361 internal_memset(this, 0, sizeof(*this));
363 void *Allocate(uptr size, uptr alignment) {
364 CHECK(IsPowerOfTwo(alignment));
365 uptr map_size = RoundUpMapSize(size);
366 if (alignment > kPageSize)
367 map_size += alignment;
368 if (map_size < size) return 0; // Overflow.
369 uptr map_beg = reinterpret_cast<uptr>(
370 MmapOrDie(map_size, "LargeMmapAllocator"));
371 uptr map_end = map_beg + map_size;
372 uptr res = map_beg + kPageSize;
373 if (res & (alignment - 1)) // Align.
374 res += alignment - (res & (alignment - 1));
375 CHECK_EQ(0, res & (alignment - 1));
376 CHECK_LE(res + size, map_end);
377 Header *h = GetHeader(res);
378 h->size = size;
379 h->map_beg = map_beg;
380 h->map_size = map_size;
382 SpinMutexLock l(&mutex_);
383 h->next = list_;
384 h->prev = 0;
385 if (list_)
386 list_->prev = h;
387 list_ = h;
389 return reinterpret_cast<void*>(res);
392 void Deallocate(void *p) {
393 Header *h = GetHeader(p);
395 SpinMutexLock l(&mutex_);
396 Header *prev = h->prev;
397 Header *next = h->next;
398 if (prev)
399 prev->next = next;
400 if (next)
401 next->prev = prev;
402 if (h == list_)
403 list_ = next;
405 UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
408 uptr TotalMemoryUsed() {
409 SpinMutexLock l(&mutex_);
410 uptr res = 0;
411 for (Header *l = list_; l; l = l->next) {
412 res += RoundUpMapSize(l->size);
414 return res;
417 bool PointerIsMine(void *p) {
418 // Fast check.
419 if ((reinterpret_cast<uptr>(p) % kPageSize) != 0) return false;
420 SpinMutexLock l(&mutex_);
421 for (Header *l = list_; l; l = l->next) {
422 if (GetUser(l) == p) return true;
424 return false;
427 uptr GetActuallyAllocatedSize(void *p) {
428 return RoundUpMapSize(GetHeader(p)->size) - kPageSize;
431 // At least kPageSize/2 metadata bytes is available.
432 void *GetMetaData(void *p) {
433 return GetHeader(p) + 1;
436 void *GetBlockBegin(void *p) {
437 SpinMutexLock l(&mutex_);
438 for (Header *l = list_; l; l = l->next) {
439 void *b = GetUser(l);
440 if (p >= b && p < (u8*)b + l->size)
441 return b;
443 return 0;
446 private:
447 struct Header {
448 uptr map_beg;
449 uptr map_size;
450 uptr size;
451 Header *next;
452 Header *prev;
455 Header *GetHeader(uptr p) { return reinterpret_cast<Header*>(p - kPageSize); }
456 Header *GetHeader(void *p) { return GetHeader(reinterpret_cast<uptr>(p)); }
458 void *GetUser(Header *h) {
459 return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + kPageSize);
462 uptr RoundUpMapSize(uptr size) {
463 return RoundUpTo(size, kPageSize) + kPageSize;
466 Header *list_;
467 SpinMutex mutex_;
470 // This class implements a complete memory allocator by using two
471 // internal allocators:
472 // PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
473 // When allocating 2^x bytes it should return 2^x aligned chunk.
474 // PrimaryAllocator is used via a local AllocatorCache.
475 // SecondaryAllocator can allocate anything, but is not efficient.
476 template <class PrimaryAllocator, class AllocatorCache,
477 class SecondaryAllocator> // NOLINT
478 class CombinedAllocator {
479 public:
480 void Init() {
481 primary_.Init();
482 secondary_.Init();
485 void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
486 bool cleared = false) {
487 // Returning 0 on malloc(0) may break a lot of code.
488 if (size == 0)
489 size = 1;
490 if (size + alignment < size)
491 return 0;
492 if (alignment > 8)
493 size = RoundUpTo(size, alignment);
494 void *res;
495 if (primary_.CanAllocate(size, alignment))
496 res = cache->Allocate(&primary_, primary_.ClassID(size));
497 else
498 res = secondary_.Allocate(size, alignment);
499 if (alignment > 8)
500 CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
501 if (cleared && res)
502 internal_memset(res, 0, size);
503 return res;
506 void Deallocate(AllocatorCache *cache, void *p) {
507 if (!p) return;
508 if (primary_.PointerIsMine(p))
509 cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
510 else
511 secondary_.Deallocate(p);
514 void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
515 uptr alignment) {
516 if (!p)
517 return Allocate(cache, new_size, alignment);
518 if (!new_size) {
519 Deallocate(cache, p);
520 return 0;
522 CHECK(PointerIsMine(p));
523 uptr old_size = GetActuallyAllocatedSize(p);
524 uptr memcpy_size = Min(new_size, old_size);
525 void *new_p = Allocate(cache, new_size, alignment);
526 if (new_p)
527 internal_memcpy(new_p, p, memcpy_size);
528 Deallocate(cache, p);
529 return new_p;
532 bool PointerIsMine(void *p) {
533 if (primary_.PointerIsMine(p))
534 return true;
535 return secondary_.PointerIsMine(p);
538 void *GetMetaData(void *p) {
539 if (primary_.PointerIsMine(p))
540 return primary_.GetMetaData(p);
541 return secondary_.GetMetaData(p);
544 void *GetBlockBegin(void *p) {
545 if (primary_.PointerIsMine(p))
546 return primary_.GetBlockBegin(p);
547 return secondary_.GetBlockBegin(p);
550 uptr GetActuallyAllocatedSize(void *p) {
551 if (primary_.PointerIsMine(p))
552 return primary_.GetActuallyAllocatedSize(p);
553 return secondary_.GetActuallyAllocatedSize(p);
556 uptr TotalMemoryUsed() {
557 return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
560 void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
562 void SwallowCache(AllocatorCache *cache) {
563 cache->Drain(&primary_);
566 private:
567 PrimaryAllocator primary_;
568 SecondaryAllocator secondary_;
571 } // namespace __sanitizer
573 #endif // SANITIZER_ALLOCATOR_H