1 //===-- sanitizer_allocator_primary64.h -------------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // Part of the Sanitizer Allocator.
10 //===----------------------------------------------------------------------===//
11 #ifndef SANITIZER_ALLOCATOR_H
12 #error This file must be included inside sanitizer_allocator.h
15 template<class SizeClassAllocator
> struct SizeClassAllocator64LocalCache
;
17 // SizeClassAllocator64 -- allocator for 64-bit address space.
18 // The template parameter Params is a class containing the actual parameters.
20 // Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg.
21 // If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically my mmap.
22 // Otherwise SpaceBeg=kSpaceBeg (fixed address).
23 // kSpaceSize is a power of two.
24 // At the beginning the entire space is mprotect-ed, then small parts of it
25 // are mapped on demand.
27 // Region: a part of Space dedicated to a single size class.
28 // There are kNumClasses Regions of equal size.
30 // UserChunk: a piece of memory returned to user.
31 // MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
33 // FreeArray is an array free-d chunks (stored as 4-byte offsets)
35 // A Region looks like this:
36 // UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1 FreeArray
38 struct SizeClassAllocator64FlagMasks
{ // Bit masks.
40 kRandomShuffleChunks
= 1,
44 template <class Params
>
45 class SizeClassAllocator64
{
47 static const uptr kSpaceBeg
= Params::kSpaceBeg
;
48 static const uptr kSpaceSize
= Params::kSpaceSize
;
49 static const uptr kMetadataSize
= Params::kMetadataSize
;
50 typedef typename
Params::SizeClassMap SizeClassMap
;
51 typedef typename
Params::MapUnmapCallback MapUnmapCallback
;
53 static const bool kRandomShuffleChunks
=
54 Params::kFlags
& SizeClassAllocator64FlagMasks::kRandomShuffleChunks
;
56 typedef SizeClassAllocator64
<Params
> ThisT
;
57 typedef SizeClassAllocator64LocalCache
<ThisT
> AllocatorCache
;
59 // When we know the size class (the region base) we can represent a pointer
60 // as a 4-byte integer (offset from the region start shifted right by 4).
61 typedef u32 CompactPtrT
;
62 static const uptr kCompactPtrScale
= 4;
63 CompactPtrT
PointerToCompactPtr(uptr base
, uptr ptr
) {
64 return static_cast<CompactPtrT
>((ptr
- base
) >> kCompactPtrScale
);
66 uptr
CompactPtrToPointer(uptr base
, CompactPtrT ptr32
) {
67 return base
+ (static_cast<uptr
>(ptr32
) << kCompactPtrScale
);
71 uptr TotalSpaceSize
= kSpaceSize
+ AdditionalSize();
72 if (kUsingConstantSpaceBeg
) {
73 CHECK_EQ(kSpaceBeg
, reinterpret_cast<uptr
>(
74 MmapFixedNoAccess(kSpaceBeg
, TotalSpaceSize
)));
77 reinterpret_cast<uptr
>(MmapNoAccess(TotalSpaceSize
));
78 CHECK_NE(NonConstSpaceBeg
, ~(uptr
)0);
80 MapWithCallback(SpaceEnd(), AdditionalSize());
83 void MapWithCallback(uptr beg
, uptr size
) {
84 CHECK_EQ(beg
, reinterpret_cast<uptr
>(MmapFixedOrDie(beg
, size
)));
85 MapUnmapCallback().OnMap(beg
, size
);
88 void UnmapWithCallback(uptr beg
, uptr size
) {
89 MapUnmapCallback().OnUnmap(beg
, size
);
90 UnmapOrDie(reinterpret_cast<void *>(beg
), size
);
93 static bool CanAllocate(uptr size
, uptr alignment
) {
94 return size
<= SizeClassMap::kMaxSize
&&
95 alignment
<= SizeClassMap::kMaxSize
;
98 NOINLINE
void ReturnToAllocator(AllocatorStats
*stat
, uptr class_id
,
99 const CompactPtrT
*chunks
, uptr n_chunks
) {
100 RegionInfo
*region
= GetRegionInfo(class_id
);
101 uptr region_beg
= GetRegionBeginBySizeClass(class_id
);
102 CompactPtrT
*free_array
= GetFreeArray(region_beg
);
104 BlockingMutexLock
l(®ion
->mutex
);
105 uptr old_num_chunks
= region
->num_freed_chunks
;
106 uptr new_num_freed_chunks
= old_num_chunks
+ n_chunks
;
107 EnsureFreeArraySpace(region
, region_beg
, new_num_freed_chunks
);
108 for (uptr i
= 0; i
< n_chunks
; i
++)
109 free_array
[old_num_chunks
+ i
] = chunks
[i
];
110 region
->num_freed_chunks
= new_num_freed_chunks
;
111 region
->n_freed
+= n_chunks
;
114 NOINLINE
void GetFromAllocator(AllocatorStats
*stat
, uptr class_id
,
115 CompactPtrT
*chunks
, uptr n_chunks
) {
116 RegionInfo
*region
= GetRegionInfo(class_id
);
117 uptr region_beg
= GetRegionBeginBySizeClass(class_id
);
118 CompactPtrT
*free_array
= GetFreeArray(region_beg
);
120 BlockingMutexLock
l(®ion
->mutex
);
121 if (UNLIKELY(region
->num_freed_chunks
< n_chunks
)) {
122 PopulateFreeArray(stat
, class_id
, region
,
123 n_chunks
- region
->num_freed_chunks
);
124 CHECK_GE(region
->num_freed_chunks
, n_chunks
);
126 region
->num_freed_chunks
-= n_chunks
;
127 uptr base_idx
= region
->num_freed_chunks
;
128 for (uptr i
= 0; i
< n_chunks
; i
++)
129 chunks
[i
] = free_array
[base_idx
+ i
];
130 region
->n_allocated
+= n_chunks
;
134 bool PointerIsMine(const void *p
) {
135 uptr P
= reinterpret_cast<uptr
>(p
);
136 if (kUsingConstantSpaceBeg
&& (kSpaceBeg
% kSpaceSize
) == 0)
137 return P
/ kSpaceSize
== kSpaceBeg
/ kSpaceSize
;
138 return P
>= SpaceBeg() && P
< SpaceEnd();
141 uptr
GetRegionBegin(const void *p
) {
142 if (kUsingConstantSpaceBeg
)
143 return reinterpret_cast<uptr
>(p
) & ~(kRegionSize
- 1);
144 uptr space_beg
= SpaceBeg();
145 return ((reinterpret_cast<uptr
>(p
) - space_beg
) & ~(kRegionSize
- 1)) +
149 uptr
GetRegionBeginBySizeClass(uptr class_id
) {
150 return SpaceBeg() + kRegionSize
* class_id
;
153 uptr
GetSizeClass(const void *p
) {
154 if (kUsingConstantSpaceBeg
&& (kSpaceBeg
% kSpaceSize
) == 0)
155 return ((reinterpret_cast<uptr
>(p
)) / kRegionSize
) % kNumClassesRounded
;
156 return ((reinterpret_cast<uptr
>(p
) - SpaceBeg()) / kRegionSize
) %
160 void *GetBlockBegin(const void *p
) {
161 uptr class_id
= GetSizeClass(p
);
162 uptr size
= ClassIdToSize(class_id
);
163 if (!size
) return nullptr;
164 uptr chunk_idx
= GetChunkIdx((uptr
)p
, size
);
165 uptr reg_beg
= GetRegionBegin(p
);
166 uptr beg
= chunk_idx
* size
;
167 uptr next_beg
= beg
+ size
;
168 if (class_id
>= kNumClasses
) return nullptr;
169 RegionInfo
*region
= GetRegionInfo(class_id
);
170 if (region
->mapped_user
>= next_beg
)
171 return reinterpret_cast<void*>(reg_beg
+ beg
);
175 uptr
GetActuallyAllocatedSize(void *p
) {
176 CHECK(PointerIsMine(p
));
177 return ClassIdToSize(GetSizeClass(p
));
180 uptr
ClassID(uptr size
) { return SizeClassMap::ClassID(size
); }
182 void *GetMetaData(const void *p
) {
183 uptr class_id
= GetSizeClass(p
);
184 uptr size
= ClassIdToSize(class_id
);
185 uptr chunk_idx
= GetChunkIdx(reinterpret_cast<uptr
>(p
), size
);
186 uptr region_beg
= GetRegionBeginBySizeClass(class_id
);
187 return reinterpret_cast<void *>(GetMetadataEnd(region_beg
) -
188 (1 + chunk_idx
) * kMetadataSize
);
191 uptr
TotalMemoryUsed() {
193 for (uptr i
= 0; i
< kNumClasses
; i
++)
194 res
+= GetRegionInfo(i
)->allocated_user
;
199 void TestOnlyUnmap() {
200 UnmapWithCallback(SpaceBeg(), kSpaceSize
+ AdditionalSize());
203 static void FillMemoryProfile(uptr start
, uptr rss
, bool file
, uptr
*stats
,
205 for (uptr class_id
= 0; class_id
< stats_size
; class_id
++)
206 if (stats
[class_id
] == start
)
207 stats
[class_id
] = rss
;
210 void PrintStats(uptr class_id
, uptr rss
) {
211 RegionInfo
*region
= GetRegionInfo(class_id
);
212 if (region
->mapped_user
== 0) return;
213 uptr in_use
= region
->n_allocated
- region
->n_freed
;
214 uptr avail_chunks
= region
->allocated_user
/ ClassIdToSize(class_id
);
216 " %02zd (%zd): mapped: %zdK allocs: %zd frees: %zd inuse: %zd "
217 "num_freed_chunks %zd"
218 " avail: %zd rss: %zdK releases: %zd\n",
219 class_id
, ClassIdToSize(class_id
), region
->mapped_user
>> 10,
220 region
->n_allocated
, region
->n_freed
, in_use
,
221 region
->num_freed_chunks
, avail_chunks
, rss
>> 10,
222 region
->rtoi
.num_releases
);
226 uptr total_mapped
= 0;
227 uptr n_allocated
= 0;
229 for (uptr class_id
= 1; class_id
< kNumClasses
; class_id
++) {
230 RegionInfo
*region
= GetRegionInfo(class_id
);
231 total_mapped
+= region
->mapped_user
;
232 n_allocated
+= region
->n_allocated
;
233 n_freed
+= region
->n_freed
;
235 Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; "
237 total_mapped
>> 20, n_allocated
, n_allocated
- n_freed
);
238 uptr rss_stats
[kNumClasses
];
239 for (uptr class_id
= 0; class_id
< kNumClasses
; class_id
++)
240 rss_stats
[class_id
] = SpaceBeg() + kRegionSize
* class_id
;
241 GetMemoryProfile(FillMemoryProfile
, rss_stats
, kNumClasses
);
242 for (uptr class_id
= 1; class_id
< kNumClasses
; class_id
++)
243 PrintStats(class_id
, rss_stats
[class_id
]);
246 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
247 // introspection API.
249 for (uptr i
= 0; i
< kNumClasses
; i
++) {
250 GetRegionInfo(i
)->mutex
.Lock();
255 for (int i
= (int)kNumClasses
- 1; i
>= 0; i
--) {
256 GetRegionInfo(i
)->mutex
.Unlock();
260 // Iterate over all existing chunks.
261 // The allocator must be locked when calling this function.
262 void ForEachChunk(ForEachChunkCallback callback
, void *arg
) {
263 for (uptr class_id
= 1; class_id
< kNumClasses
; class_id
++) {
264 RegionInfo
*region
= GetRegionInfo(class_id
);
265 uptr chunk_size
= ClassIdToSize(class_id
);
266 uptr region_beg
= SpaceBeg() + class_id
* kRegionSize
;
267 for (uptr chunk
= region_beg
;
268 chunk
< region_beg
+ region
->allocated_user
;
269 chunk
+= chunk_size
) {
270 // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
271 callback(chunk
, arg
);
276 static uptr
ClassIdToSize(uptr class_id
) {
277 return SizeClassMap::Size(class_id
);
280 static uptr
AdditionalSize() {
281 return RoundUpTo(sizeof(RegionInfo
) * kNumClassesRounded
,
282 GetPageSizeCached());
286 for (uptr class_id
= 1; class_id
< kNumClasses
; class_id
++)
287 ReleaseToOS(class_id
);
290 typedef SizeClassMap SizeClassMapT
;
291 static const uptr kNumClasses
= SizeClassMap::kNumClasses
;
292 static const uptr kNumClassesRounded
= SizeClassMap::kNumClassesRounded
;
295 static const uptr kRegionSize
= kSpaceSize
/ kNumClassesRounded
;
296 // FreeArray is the array of free-d chunks (stored as 4-byte offsets).
297 // In the worst case it may reguire kRegionSize/SizeClassMap::kMinSize
298 // elements, but in reality this will not happen. For simplicity we
299 // dedicate 1/8 of the region's virtual space to FreeArray.
300 static const uptr kFreeArraySize
= kRegionSize
/ 8;
302 static const bool kUsingConstantSpaceBeg
= kSpaceBeg
!= ~(uptr
)0;
303 uptr NonConstSpaceBeg
;
304 uptr
SpaceBeg() const {
305 return kUsingConstantSpaceBeg
? kSpaceBeg
: NonConstSpaceBeg
;
307 uptr
SpaceEnd() const { return SpaceBeg() + kSpaceSize
; }
308 // kRegionSize must be >= 2^32.
309 COMPILER_CHECK((kRegionSize
) >= (1ULL << (SANITIZER_WORDSIZE
/ 2)));
310 // kRegionSize must be <= 2^36, see CompactPtrT.
311 COMPILER_CHECK((kRegionSize
) <= (1ULL << (SANITIZER_WORDSIZE
/ 2 + 4)));
312 // Call mmap for user memory with at least this size.
313 static const uptr kUserMapSize
= 1 << 16;
314 // Call mmap for metadata memory with at least this size.
315 static const uptr kMetaMapSize
= 1 << 16;
316 // Call mmap for free array memory with at least this size.
317 static const uptr kFreeArrayMapSize
= 1 << 16;
318 // Granularity of ReleaseToOs (aka madvise).
319 static const uptr kReleaseToOsGranularity
= 1 << 12;
321 struct ReleaseToOsInfo
{
322 uptr n_freed_at_last_release
;
328 uptr num_freed_chunks
; // Number of elements in the freearray.
329 uptr mapped_free_array
; // Bytes mapped for freearray.
330 uptr allocated_user
; // Bytes allocated for user memory.
331 uptr allocated_meta
; // Bytes allocated for metadata.
332 uptr mapped_user
; // Bytes mapped for user memory.
333 uptr mapped_meta
; // Bytes mapped for metadata.
334 u32 rand_state
; // Seed for random shuffle, used if kRandomShuffleChunks.
335 uptr n_allocated
, n_freed
; // Just stats.
336 ReleaseToOsInfo rtoi
;
338 COMPILER_CHECK(sizeof(RegionInfo
) >= kCacheLineSize
);
340 u32
Rand(u32
*state
) { // ANSI C linear congruential PRNG.
341 return (*state
= *state
* 1103515245 + 12345) >> 16;
344 u32
RandN(u32
*state
, u32 n
) { return Rand(state
) % n
; } // [0, n)
346 void RandomShuffle(u32
*a
, u32 n
, u32
*rand_state
) {
348 for (u32 i
= n
- 1; i
> 0; i
--)
349 Swap(a
[i
], a
[RandN(rand_state
, i
+ 1)]);
352 RegionInfo
*GetRegionInfo(uptr class_id
) {
353 CHECK_LT(class_id
, kNumClasses
);
354 RegionInfo
*regions
=
355 reinterpret_cast<RegionInfo
*>(SpaceBeg() + kSpaceSize
);
356 return ®ions
[class_id
];
359 uptr
GetMetadataEnd(uptr region_beg
) {
360 return region_beg
+ kRegionSize
- kFreeArraySize
;
363 uptr
GetChunkIdx(uptr chunk
, uptr size
) {
364 if (!kUsingConstantSpaceBeg
)
367 uptr offset
= chunk
% kRegionSize
;
368 // Here we divide by a non-constant. This is costly.
369 // size always fits into 32-bits. If the offset fits too, use 32-bit div.
370 if (offset
>> (SANITIZER_WORDSIZE
/ 2))
371 return offset
/ size
;
372 return (u32
)offset
/ (u32
)size
;
375 CompactPtrT
*GetFreeArray(uptr region_beg
) {
376 return reinterpret_cast<CompactPtrT
*>(region_beg
+ kRegionSize
-
380 void EnsureFreeArraySpace(RegionInfo
*region
, uptr region_beg
,
381 uptr num_freed_chunks
) {
382 uptr needed_space
= num_freed_chunks
* sizeof(CompactPtrT
);
383 if (region
->mapped_free_array
< needed_space
) {
384 CHECK_LE(needed_space
, kFreeArraySize
);
385 uptr new_mapped_free_array
= RoundUpTo(needed_space
, kFreeArrayMapSize
);
386 uptr current_map_end
= reinterpret_cast<uptr
>(GetFreeArray(region_beg
)) +
387 region
->mapped_free_array
;
388 uptr new_map_size
= new_mapped_free_array
- region
->mapped_free_array
;
389 MapWithCallback(current_map_end
, new_map_size
);
390 region
->mapped_free_array
= new_mapped_free_array
;
395 NOINLINE
void PopulateFreeArray(AllocatorStats
*stat
, uptr class_id
,
396 RegionInfo
*region
, uptr requested_count
) {
397 // region->mutex is held.
398 uptr size
= ClassIdToSize(class_id
);
399 uptr beg_idx
= region
->allocated_user
;
400 uptr end_idx
= beg_idx
+ requested_count
* size
;
401 uptr region_beg
= GetRegionBeginBySizeClass(class_id
);
402 if (end_idx
> region
->mapped_user
) {
403 if (!kUsingConstantSpaceBeg
&& region
->mapped_user
== 0)
404 region
->rand_state
= static_cast<u32
>(region_beg
>> 12); // From ASLR.
405 // Do the mmap for the user memory.
406 uptr map_size
= kUserMapSize
;
407 while (end_idx
> region
->mapped_user
+ map_size
)
408 map_size
+= kUserMapSize
;
409 CHECK_GE(region
->mapped_user
+ map_size
, end_idx
);
410 MapWithCallback(region_beg
+ region
->mapped_user
, map_size
);
411 stat
->Add(AllocatorStatMapped
, map_size
);
412 region
->mapped_user
+= map_size
;
414 CompactPtrT
*free_array
= GetFreeArray(region_beg
);
415 uptr total_count
= (region
->mapped_user
- beg_idx
) / size
;
416 uptr num_freed_chunks
= region
->num_freed_chunks
;
417 EnsureFreeArraySpace(region
, region_beg
, num_freed_chunks
+ total_count
);
418 for (uptr i
= 0; i
< total_count
; i
++) {
419 uptr chunk
= beg_idx
+ i
* size
;
420 free_array
[num_freed_chunks
+ total_count
- 1 - i
] =
421 PointerToCompactPtr(0, chunk
);
423 if (kRandomShuffleChunks
)
424 RandomShuffle(&free_array
[num_freed_chunks
], total_count
,
425 ®ion
->rand_state
);
426 region
->num_freed_chunks
+= total_count
;
427 region
->allocated_user
+= total_count
* size
;
428 CHECK_LE(region
->allocated_user
, region
->mapped_user
);
430 region
->allocated_meta
+= total_count
* kMetadataSize
;
431 if (region
->allocated_meta
> region
->mapped_meta
) {
432 uptr map_size
= kMetaMapSize
;
433 while (region
->allocated_meta
> region
->mapped_meta
+ map_size
)
434 map_size
+= kMetaMapSize
;
435 // Do the mmap for the metadata.
436 CHECK_GE(region
->mapped_meta
+ map_size
, region
->allocated_meta
);
437 MapWithCallback(GetMetadataEnd(region_beg
) -
438 region
->mapped_meta
- map_size
, map_size
);
439 region
->mapped_meta
+= map_size
;
441 CHECK_LE(region
->allocated_meta
, region
->mapped_meta
);
442 if (region
->mapped_user
+ region
->mapped_meta
>
443 kRegionSize
- kFreeArraySize
) {
444 Printf("%s: Out of memory. Dying. ", SanitizerToolName
);
445 Printf("The process has exhausted %zuMB for size class %zu.\n",
446 kRegionSize
/ 1024 / 1024, size
);
451 bool MaybeReleaseChunkRange(uptr region_beg
, uptr chunk_size
,
452 CompactPtrT first
, CompactPtrT last
) {
453 uptr beg_ptr
= CompactPtrToPointer(region_beg
, first
);
454 uptr end_ptr
= CompactPtrToPointer(region_beg
, last
) + chunk_size
;
455 CHECK_GE(end_ptr
- beg_ptr
, kReleaseToOsGranularity
);
456 beg_ptr
= RoundUpTo(beg_ptr
, kReleaseToOsGranularity
);
457 end_ptr
= RoundDownTo(end_ptr
, kReleaseToOsGranularity
);
458 if (end_ptr
== beg_ptr
) return false;
459 ReleaseMemoryToOS(beg_ptr
, end_ptr
- beg_ptr
);
463 // Releases some RAM back to OS.
465 // * Lock the region.
466 // * Sort the chunks.
467 // * Find ranges fully covered by free-d chunks
468 // * Release them to OS with madvise.
470 // TODO(kcc): make sure we don't do it too frequently.
471 void ReleaseToOS(uptr class_id
) {
472 RegionInfo
*region
= GetRegionInfo(class_id
);
473 uptr region_beg
= GetRegionBeginBySizeClass(class_id
);
474 CompactPtrT
*free_array
= GetFreeArray(region_beg
);
475 uptr chunk_size
= ClassIdToSize(class_id
);
476 uptr scaled_chunk_size
= chunk_size
>> kCompactPtrScale
;
477 const uptr kScaledGranularity
= kReleaseToOsGranularity
>> kCompactPtrScale
;
478 BlockingMutexLock
l(®ion
->mutex
);
479 uptr n
= region
->num_freed_chunks
;
480 if (n
* chunk_size
< kReleaseToOsGranularity
)
481 return; // No chance to release anything.
482 if ((region
->rtoi
.n_freed_at_last_release
- region
->n_freed
) * chunk_size
<
483 kReleaseToOsGranularity
)
484 return; // Nothing new to release.
485 SortArray(free_array
, n
);
486 uptr beg
= free_array
[0];
487 uptr prev
= free_array
[0];
488 for (uptr i
= 1; i
< n
; i
++) {
489 uptr chunk
= free_array
[i
];
490 CHECK_GT(chunk
, prev
);
491 if (chunk
- prev
!= scaled_chunk_size
) {
492 CHECK_GT(chunk
- prev
, scaled_chunk_size
);
493 if (prev
+ scaled_chunk_size
- beg
>= kScaledGranularity
) {
494 MaybeReleaseChunkRange(region_beg
, chunk_size
, beg
, prev
);
495 region
->rtoi
.n_freed_at_last_release
= region
->n_freed
;
496 region
->rtoi
.num_releases
++;