1 //===-- sanitizer_allocator_primary64.h -------------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // Part of the Sanitizer Allocator.
10 //===----------------------------------------------------------------------===//
11 #ifndef SANITIZER_ALLOCATOR_H
12 #error This file must be included inside sanitizer_allocator.h
15 template<class SizeClassAllocator
> struct SizeClassAllocator64LocalCache
;
17 // SizeClassAllocator64 -- allocator for 64-bit address space.
18 // The template parameter Params is a class containing the actual parameters.
20 // Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg.
21 // If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically my mmap.
22 // Otherwise SpaceBeg=kSpaceBeg (fixed address).
23 // kSpaceSize is a power of two.
24 // At the beginning the entire space is mprotect-ed, then small parts of it
25 // are mapped on demand.
27 // Region: a part of Space dedicated to a single size class.
28 // There are kNumClasses Regions of equal size.
30 // UserChunk: a piece of memory returned to user.
31 // MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
33 // FreeArray is an array free-d chunks (stored as 4-byte offsets)
35 // A Region looks like this:
36 // UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1 FreeArray
38 struct SizeClassAllocator64FlagMasks
{ // Bit masks.
40 kRandomShuffleChunks
= 1,
44 template <class Params
>
45 class SizeClassAllocator64
{
47 static const uptr kSpaceBeg
= Params::kSpaceBeg
;
48 static const uptr kSpaceSize
= Params::kSpaceSize
;
49 static const uptr kMetadataSize
= Params::kMetadataSize
;
50 typedef typename
Params::SizeClassMap SizeClassMap
;
51 typedef typename
Params::MapUnmapCallback MapUnmapCallback
;
53 static const bool kRandomShuffleChunks
=
54 Params::kFlags
& SizeClassAllocator64FlagMasks::kRandomShuffleChunks
;
56 typedef SizeClassAllocator64
<Params
> ThisT
;
57 typedef SizeClassAllocator64LocalCache
<ThisT
> AllocatorCache
;
59 // When we know the size class (the region base) we can represent a pointer
60 // as a 4-byte integer (offset from the region start shifted right by 4).
61 typedef u32 CompactPtrT
;
62 static const uptr kCompactPtrScale
= 4;
63 CompactPtrT
PointerToCompactPtr(uptr base
, uptr ptr
) const {
64 return static_cast<CompactPtrT
>((ptr
- base
) >> kCompactPtrScale
);
66 uptr
CompactPtrToPointer(uptr base
, CompactPtrT ptr32
) const {
67 return base
+ (static_cast<uptr
>(ptr32
) << kCompactPtrScale
);
70 void Init(s32 release_to_os_interval_ms
) {
71 uptr TotalSpaceSize
= kSpaceSize
+ AdditionalSize();
72 if (kUsingConstantSpaceBeg
) {
73 CHECK_EQ(kSpaceBeg
, reinterpret_cast<uptr
>(
74 MmapFixedNoAccess(kSpaceBeg
, TotalSpaceSize
)));
77 reinterpret_cast<uptr
>(MmapNoAccess(TotalSpaceSize
));
78 CHECK_NE(NonConstSpaceBeg
, ~(uptr
)0);
80 SetReleaseToOSIntervalMs(release_to_os_interval_ms
);
81 MapWithCallbackOrDie(SpaceEnd(), AdditionalSize());
84 s32
ReleaseToOSIntervalMs() const {
85 return atomic_load(&release_to_os_interval_ms_
, memory_order_relaxed
);
88 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms
) {
89 atomic_store(&release_to_os_interval_ms_
, release_to_os_interval_ms
,
90 memory_order_relaxed
);
93 static bool CanAllocate(uptr size
, uptr alignment
) {
94 return size
<= SizeClassMap::kMaxSize
&&
95 alignment
<= SizeClassMap::kMaxSize
;
98 NOINLINE
void ReturnToAllocator(AllocatorStats
*stat
, uptr class_id
,
99 const CompactPtrT
*chunks
, uptr n_chunks
) {
100 RegionInfo
*region
= GetRegionInfo(class_id
);
101 uptr region_beg
= GetRegionBeginBySizeClass(class_id
);
102 CompactPtrT
*free_array
= GetFreeArray(region_beg
);
104 BlockingMutexLock
l(®ion
->mutex
);
105 uptr old_num_chunks
= region
->num_freed_chunks
;
106 uptr new_num_freed_chunks
= old_num_chunks
+ n_chunks
;
107 // Failure to allocate free array space while releasing memory is non
109 if (UNLIKELY(!EnsureFreeArraySpace(region
, region_beg
,
110 new_num_freed_chunks
)))
111 DieOnFailure::OnOOM();
112 for (uptr i
= 0; i
< n_chunks
; i
++)
113 free_array
[old_num_chunks
+ i
] = chunks
[i
];
114 region
->num_freed_chunks
= new_num_freed_chunks
;
115 region
->stats
.n_freed
+= n_chunks
;
117 MaybeReleaseToOS(class_id
);
120 NOINLINE
bool GetFromAllocator(AllocatorStats
*stat
, uptr class_id
,
121 CompactPtrT
*chunks
, uptr n_chunks
) {
122 RegionInfo
*region
= GetRegionInfo(class_id
);
123 uptr region_beg
= GetRegionBeginBySizeClass(class_id
);
124 CompactPtrT
*free_array
= GetFreeArray(region_beg
);
126 BlockingMutexLock
l(®ion
->mutex
);
127 if (UNLIKELY(region
->num_freed_chunks
< n_chunks
)) {
128 if (UNLIKELY(!PopulateFreeArray(stat
, class_id
, region
,
129 n_chunks
- region
->num_freed_chunks
)))
131 CHECK_GE(region
->num_freed_chunks
, n_chunks
);
133 region
->num_freed_chunks
-= n_chunks
;
134 uptr base_idx
= region
->num_freed_chunks
;
135 for (uptr i
= 0; i
< n_chunks
; i
++)
136 chunks
[i
] = free_array
[base_idx
+ i
];
137 region
->stats
.n_allocated
+= n_chunks
;
141 bool PointerIsMine(const void *p
) {
142 uptr P
= reinterpret_cast<uptr
>(p
);
143 if (kUsingConstantSpaceBeg
&& (kSpaceBeg
% kSpaceSize
) == 0)
144 return P
/ kSpaceSize
== kSpaceBeg
/ kSpaceSize
;
145 return P
>= SpaceBeg() && P
< SpaceEnd();
148 uptr
GetRegionBegin(const void *p
) {
149 if (kUsingConstantSpaceBeg
)
150 return reinterpret_cast<uptr
>(p
) & ~(kRegionSize
- 1);
151 uptr space_beg
= SpaceBeg();
152 return ((reinterpret_cast<uptr
>(p
) - space_beg
) & ~(kRegionSize
- 1)) +
156 uptr
GetRegionBeginBySizeClass(uptr class_id
) const {
157 return SpaceBeg() + kRegionSize
* class_id
;
160 uptr
GetSizeClass(const void *p
) {
161 if (kUsingConstantSpaceBeg
&& (kSpaceBeg
% kSpaceSize
) == 0)
162 return ((reinterpret_cast<uptr
>(p
)) / kRegionSize
) % kNumClassesRounded
;
163 return ((reinterpret_cast<uptr
>(p
) - SpaceBeg()) / kRegionSize
) %
167 void *GetBlockBegin(const void *p
) {
168 uptr class_id
= GetSizeClass(p
);
169 uptr size
= ClassIdToSize(class_id
);
170 if (!size
) return nullptr;
171 uptr chunk_idx
= GetChunkIdx((uptr
)p
, size
);
172 uptr reg_beg
= GetRegionBegin(p
);
173 uptr beg
= chunk_idx
* size
;
174 uptr next_beg
= beg
+ size
;
175 if (class_id
>= kNumClasses
) return nullptr;
176 RegionInfo
*region
= GetRegionInfo(class_id
);
177 if (region
->mapped_user
>= next_beg
)
178 return reinterpret_cast<void*>(reg_beg
+ beg
);
182 uptr
GetActuallyAllocatedSize(void *p
) {
183 CHECK(PointerIsMine(p
));
184 return ClassIdToSize(GetSizeClass(p
));
187 uptr
ClassID(uptr size
) { return SizeClassMap::ClassID(size
); }
189 void *GetMetaData(const void *p
) {
190 uptr class_id
= GetSizeClass(p
);
191 uptr size
= ClassIdToSize(class_id
);
192 uptr chunk_idx
= GetChunkIdx(reinterpret_cast<uptr
>(p
), size
);
193 uptr region_beg
= GetRegionBeginBySizeClass(class_id
);
194 return reinterpret_cast<void *>(GetMetadataEnd(region_beg
) -
195 (1 + chunk_idx
) * kMetadataSize
);
198 uptr
TotalMemoryUsed() {
200 for (uptr i
= 0; i
< kNumClasses
; i
++)
201 res
+= GetRegionInfo(i
)->allocated_user
;
206 void TestOnlyUnmap() {
207 UnmapWithCallbackOrDie(SpaceBeg(), kSpaceSize
+ AdditionalSize());
210 static void FillMemoryProfile(uptr start
, uptr rss
, bool file
, uptr
*stats
,
212 for (uptr class_id
= 0; class_id
< stats_size
; class_id
++)
213 if (stats
[class_id
] == start
)
214 stats
[class_id
] = rss
;
217 void PrintStats(uptr class_id
, uptr rss
) {
218 RegionInfo
*region
= GetRegionInfo(class_id
);
219 if (region
->mapped_user
== 0) return;
220 uptr in_use
= region
->stats
.n_allocated
- region
->stats
.n_freed
;
221 uptr avail_chunks
= region
->allocated_user
/ ClassIdToSize(class_id
);
223 "%s %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd "
224 "num_freed_chunks %7zd avail: %6zd rss: %6zdK releases: %6zd "
225 "last released: %6zdK region: 0x%zx\n",
226 region
->exhausted
? "F" : " ", class_id
, ClassIdToSize(class_id
),
227 region
->mapped_user
>> 10, region
->stats
.n_allocated
,
228 region
->stats
.n_freed
, in_use
, region
->num_freed_chunks
, avail_chunks
,
229 rss
>> 10, region
->rtoi
.num_releases
,
230 region
->rtoi
.last_released_bytes
>> 10,
231 SpaceBeg() + kRegionSize
* class_id
);
235 uptr total_mapped
= 0;
236 uptr n_allocated
= 0;
238 for (uptr class_id
= 1; class_id
< kNumClasses
; class_id
++) {
239 RegionInfo
*region
= GetRegionInfo(class_id
);
240 total_mapped
+= region
->mapped_user
;
241 n_allocated
+= region
->stats
.n_allocated
;
242 n_freed
+= region
->stats
.n_freed
;
244 Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; "
246 total_mapped
>> 20, n_allocated
, n_allocated
- n_freed
);
247 uptr rss_stats
[kNumClasses
];
248 for (uptr class_id
= 0; class_id
< kNumClasses
; class_id
++)
249 rss_stats
[class_id
] = SpaceBeg() + kRegionSize
* class_id
;
250 GetMemoryProfile(FillMemoryProfile
, rss_stats
, kNumClasses
);
251 for (uptr class_id
= 1; class_id
< kNumClasses
; class_id
++)
252 PrintStats(class_id
, rss_stats
[class_id
]);
255 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
256 // introspection API.
258 for (uptr i
= 0; i
< kNumClasses
; i
++) {
259 GetRegionInfo(i
)->mutex
.Lock();
264 for (int i
= (int)kNumClasses
- 1; i
>= 0; i
--) {
265 GetRegionInfo(i
)->mutex
.Unlock();
269 // Iterate over all existing chunks.
270 // The allocator must be locked when calling this function.
271 void ForEachChunk(ForEachChunkCallback callback
, void *arg
) {
272 for (uptr class_id
= 1; class_id
< kNumClasses
; class_id
++) {
273 RegionInfo
*region
= GetRegionInfo(class_id
);
274 uptr chunk_size
= ClassIdToSize(class_id
);
275 uptr region_beg
= SpaceBeg() + class_id
* kRegionSize
;
276 for (uptr chunk
= region_beg
;
277 chunk
< region_beg
+ region
->allocated_user
;
278 chunk
+= chunk_size
) {
279 // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
280 callback(chunk
, arg
);
285 static uptr
ClassIdToSize(uptr class_id
) {
286 return SizeClassMap::Size(class_id
);
289 static uptr
AdditionalSize() {
290 return RoundUpTo(sizeof(RegionInfo
) * kNumClassesRounded
,
291 GetPageSizeCached());
294 typedef SizeClassMap SizeClassMapT
;
295 static const uptr kNumClasses
= SizeClassMap::kNumClasses
;
296 static const uptr kNumClassesRounded
= SizeClassMap::kNumClassesRounded
;
298 // A packed array of counters. Each counter occupies 2^n bits, enough to store
299 // counter's max_value. Ctor will try to allocate the required buffer via
300 // mapper->MapPackedCounterArrayBuffer and the caller is expected to check
301 // whether the initialization was successful by checking IsAllocated() result.
302 // For the performance sake, none of the accessors check the validity of the
303 // arguments, it is assumed that index is always in [0, n) range and the value
304 // is not incremented past max_value.
305 template<class MemoryMapperT
>
306 class PackedCounterArray
{
308 PackedCounterArray(u64 num_counters
, u64 max_value
, MemoryMapperT
*mapper
)
309 : n(num_counters
), memory_mapper(mapper
) {
310 CHECK_GT(num_counters
, 0);
311 CHECK_GT(max_value
, 0);
312 constexpr u64 kMaxCounterBits
= sizeof(*buffer
) * 8ULL;
313 // Rounding counter storage size up to the power of two allows for using
314 // bit shifts calculating particular counter's index and offset.
315 uptr counter_size_bits
=
316 RoundUpToPowerOfTwo(MostSignificantSetBitIndex(max_value
) + 1);
317 CHECK_LE(counter_size_bits
, kMaxCounterBits
);
318 counter_size_bits_log
= Log2(counter_size_bits
);
319 counter_mask
= ~0ULL >> (kMaxCounterBits
- counter_size_bits
);
321 uptr packing_ratio
= kMaxCounterBits
>> counter_size_bits_log
;
322 CHECK_GT(packing_ratio
, 0);
323 packing_ratio_log
= Log2(packing_ratio
);
324 bit_offset_mask
= packing_ratio
- 1;
327 (RoundUpTo(n
, 1ULL << packing_ratio_log
) >> packing_ratio_log
) *
329 buffer
= reinterpret_cast<u64
*>(
330 memory_mapper
->MapPackedCounterArrayBuffer(buffer_size
));
332 ~PackedCounterArray() {
334 memory_mapper
->UnmapPackedCounterArrayBuffer(
335 reinterpret_cast<uptr
>(buffer
), buffer_size
);
339 bool IsAllocated() const {
343 u64
GetCount() const {
347 uptr
Get(uptr i
) const {
349 uptr index
= i
>> packing_ratio_log
;
350 uptr bit_offset
= (i
& bit_offset_mask
) << counter_size_bits_log
;
351 return (buffer
[index
] >> bit_offset
) & counter_mask
;
354 void Inc(uptr i
) const {
355 DCHECK_LT(Get(i
), counter_mask
);
356 uptr index
= i
>> packing_ratio_log
;
357 uptr bit_offset
= (i
& bit_offset_mask
) << counter_size_bits_log
;
358 buffer
[index
] += 1ULL << bit_offset
;
361 void IncRange(uptr from
, uptr to
) const {
363 for (uptr i
= from
; i
<= to
; i
++)
369 u64 counter_size_bits_log
;
371 u64 packing_ratio_log
;
374 MemoryMapperT
* const memory_mapper
;
379 template<class MemoryMapperT
>
380 class FreePagesRangeTracker
{
382 explicit FreePagesRangeTracker(MemoryMapperT
* mapper
)
383 : memory_mapper(mapper
),
384 page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale
)),
385 in_the_range(false), current_page(0), current_range_start_page(0) {}
387 void NextPage(bool freed
) {
390 current_range_start_page
= current_page
;
404 void CloseOpenedRange() {
406 memory_mapper
->ReleasePageRangeToOS(
407 current_range_start_page
<< page_size_scaled_log
,
408 current_page
<< page_size_scaled_log
);
409 in_the_range
= false;
413 MemoryMapperT
* const memory_mapper
;
414 const uptr page_size_scaled_log
;
417 uptr current_range_start_page
;
420 // Iterates over the free_array to identify memory pages containing freed
421 // chunks only and returns these pages back to OS.
422 // allocated_pages_count is the total number of pages allocated for the
424 template<class MemoryMapperT
>
425 static void ReleaseFreeMemoryToOS(CompactPtrT
*free_array
,
426 uptr free_array_count
, uptr chunk_size
,
427 uptr allocated_pages_count
,
428 MemoryMapperT
*memory_mapper
) {
429 const uptr page_size
= GetPageSizeCached();
431 // Figure out the number of chunks per page and whether we can take a fast
432 // path (the number of chunks per page is the same for all pages).
433 uptr full_pages_chunk_count_max
;
434 bool same_chunk_count_per_page
;
435 if (chunk_size
<= page_size
&& page_size
% chunk_size
== 0) {
436 // Same number of chunks per page, no cross overs.
437 full_pages_chunk_count_max
= page_size
/ chunk_size
;
438 same_chunk_count_per_page
= true;
439 } else if (chunk_size
<= page_size
&& page_size
% chunk_size
!= 0 &&
440 chunk_size
% (page_size
% chunk_size
) == 0) {
441 // Some chunks are crossing page boundaries, which means that the page
442 // contains one or two partial chunks, but all pages contain the same
444 full_pages_chunk_count_max
= page_size
/ chunk_size
+ 1;
445 same_chunk_count_per_page
= true;
446 } else if (chunk_size
<= page_size
) {
447 // Some chunks are crossing page boundaries, which means that the page
448 // contains one or two partial chunks.
449 full_pages_chunk_count_max
= page_size
/ chunk_size
+ 2;
450 same_chunk_count_per_page
= false;
451 } else if (chunk_size
> page_size
&& chunk_size
% page_size
== 0) {
452 // One chunk covers multiple pages, no cross overs.
453 full_pages_chunk_count_max
= 1;
454 same_chunk_count_per_page
= true;
455 } else if (chunk_size
> page_size
) {
456 // One chunk covers multiple pages, Some chunks are crossing page
457 // boundaries. Some pages contain one chunk, some contain two.
458 full_pages_chunk_count_max
= 2;
459 same_chunk_count_per_page
= false;
461 UNREACHABLE("All chunk_size/page_size ratios must be handled.");
464 PackedCounterArray
<MemoryMapperT
> counters(allocated_pages_count
,
465 full_pages_chunk_count_max
,
467 if (!counters
.IsAllocated())
470 const uptr chunk_size_scaled
= chunk_size
>> kCompactPtrScale
;
471 const uptr page_size_scaled
= page_size
>> kCompactPtrScale
;
472 const uptr page_size_scaled_log
= Log2(page_size_scaled
);
474 // Iterate over free chunks and count how many free chunks affect each
476 if (chunk_size
<= page_size
&& page_size
% chunk_size
== 0) {
477 // Each chunk affects one page only.
478 for (uptr i
= 0; i
< free_array_count
; i
++)
479 counters
.Inc(free_array
[i
] >> page_size_scaled_log
);
481 // In all other cases chunks might affect more than one page.
482 for (uptr i
= 0; i
< free_array_count
; i
++) {
484 free_array
[i
] >> page_size_scaled_log
,
485 (free_array
[i
] + chunk_size_scaled
- 1) >> page_size_scaled_log
);
489 // Iterate over pages detecting ranges of pages with chunk counters equal
490 // to the expected number of chunks for the particular page.
491 FreePagesRangeTracker
<MemoryMapperT
> range_tracker(memory_mapper
);
492 if (same_chunk_count_per_page
) {
493 // Fast path, every page has the same number of chunks affecting it.
494 for (uptr i
= 0; i
< counters
.GetCount(); i
++)
495 range_tracker
.NextPage(counters
.Get(i
) == full_pages_chunk_count_max
);
497 // Show path, go through the pages keeping count how many chunks affect
500 chunk_size
< page_size
? page_size_scaled
/ chunk_size_scaled
: 1;
501 const uptr pnc
= pn
* chunk_size_scaled
;
502 // The idea is to increment the current page pointer by the first chunk
503 // size, middle portion size (the portion of the page covered by chunks
504 // except the first and the last one) and then the last chunk size, adding
505 // up the number of chunks on the current page and checking on every step
506 // whether the page boundary was crossed.
507 uptr prev_page_boundary
= 0;
508 uptr current_boundary
= 0;
509 for (uptr i
= 0; i
< counters
.GetCount(); i
++) {
510 uptr page_boundary
= prev_page_boundary
+ page_size_scaled
;
511 uptr chunks_per_page
= pn
;
512 if (current_boundary
< page_boundary
) {
513 if (current_boundary
> prev_page_boundary
)
515 current_boundary
+= pnc
;
516 if (current_boundary
< page_boundary
) {
518 current_boundary
+= chunk_size_scaled
;
521 prev_page_boundary
= page_boundary
;
523 range_tracker
.NextPage(counters
.Get(i
) == chunks_per_page
);
526 range_tracker
.Done();
530 friend class MemoryMapper
;
532 static const uptr kRegionSize
= kSpaceSize
/ kNumClassesRounded
;
533 // FreeArray is the array of free-d chunks (stored as 4-byte offsets).
534 // In the worst case it may reguire kRegionSize/SizeClassMap::kMinSize
535 // elements, but in reality this will not happen. For simplicity we
536 // dedicate 1/8 of the region's virtual space to FreeArray.
537 static const uptr kFreeArraySize
= kRegionSize
/ 8;
539 static const bool kUsingConstantSpaceBeg
= kSpaceBeg
!= ~(uptr
)0;
540 uptr NonConstSpaceBeg
;
541 uptr
SpaceBeg() const {
542 return kUsingConstantSpaceBeg
? kSpaceBeg
: NonConstSpaceBeg
;
544 uptr
SpaceEnd() const { return SpaceBeg() + kSpaceSize
; }
545 // kRegionSize must be >= 2^32.
546 COMPILER_CHECK((kRegionSize
) >= (1ULL << (SANITIZER_WORDSIZE
/ 2)));
547 // kRegionSize must be <= 2^36, see CompactPtrT.
548 COMPILER_CHECK((kRegionSize
) <= (1ULL << (SANITIZER_WORDSIZE
/ 2 + 4)));
549 // Call mmap for user memory with at least this size.
550 static const uptr kUserMapSize
= 1 << 16;
551 // Call mmap for metadata memory with at least this size.
552 static const uptr kMetaMapSize
= 1 << 16;
553 // Call mmap for free array memory with at least this size.
554 static const uptr kFreeArrayMapSize
= 1 << 16;
556 atomic_sint32_t release_to_os_interval_ms_
;
563 struct ReleaseToOsInfo
{
564 uptr n_freed_at_last_release
;
566 u64 last_release_at_ns
;
567 u64 last_released_bytes
;
572 uptr num_freed_chunks
; // Number of elements in the freearray.
573 uptr mapped_free_array
; // Bytes mapped for freearray.
574 uptr allocated_user
; // Bytes allocated for user memory.
575 uptr allocated_meta
; // Bytes allocated for metadata.
576 uptr mapped_user
; // Bytes mapped for user memory.
577 uptr mapped_meta
; // Bytes mapped for metadata.
578 u32 rand_state
; // Seed for random shuffle, used if kRandomShuffleChunks.
579 bool exhausted
; // Whether region is out of space for new chunks.
581 ReleaseToOsInfo rtoi
;
583 COMPILER_CHECK(sizeof(RegionInfo
) >= kCacheLineSize
);
585 u32
Rand(u32
*state
) { // ANSI C linear congruential PRNG.
586 return (*state
= *state
* 1103515245 + 12345) >> 16;
589 u32
RandN(u32
*state
, u32 n
) { return Rand(state
) % n
; } // [0, n)
591 void RandomShuffle(u32
*a
, u32 n
, u32
*rand_state
) {
593 for (u32 i
= n
- 1; i
> 0; i
--)
594 Swap(a
[i
], a
[RandN(rand_state
, i
+ 1)]);
597 RegionInfo
*GetRegionInfo(uptr class_id
) const {
598 CHECK_LT(class_id
, kNumClasses
);
599 RegionInfo
*regions
=
600 reinterpret_cast<RegionInfo
*>(SpaceBeg() + kSpaceSize
);
601 return ®ions
[class_id
];
604 uptr
GetMetadataEnd(uptr region_beg
) const {
605 return region_beg
+ kRegionSize
- kFreeArraySize
;
608 uptr
GetChunkIdx(uptr chunk
, uptr size
) const {
609 if (!kUsingConstantSpaceBeg
)
612 uptr offset
= chunk
% kRegionSize
;
613 // Here we divide by a non-constant. This is costly.
614 // size always fits into 32-bits. If the offset fits too, use 32-bit div.
615 if (offset
>> (SANITIZER_WORDSIZE
/ 2))
616 return offset
/ size
;
617 return (u32
)offset
/ (u32
)size
;
620 CompactPtrT
*GetFreeArray(uptr region_beg
) const {
621 return reinterpret_cast<CompactPtrT
*>(GetMetadataEnd(region_beg
));
624 bool MapWithCallback(uptr beg
, uptr size
) {
625 uptr mapped
= reinterpret_cast<uptr
>(MmapFixedOrDieOnFatalError(beg
, size
));
626 if (UNLIKELY(!mapped
))
628 CHECK_EQ(beg
, mapped
);
629 MapUnmapCallback().OnMap(beg
, size
);
633 void MapWithCallbackOrDie(uptr beg
, uptr size
) {
634 CHECK_EQ(beg
, reinterpret_cast<uptr
>(MmapFixedOrDie(beg
, size
)));
635 MapUnmapCallback().OnMap(beg
, size
);
638 void UnmapWithCallbackOrDie(uptr beg
, uptr size
) {
639 MapUnmapCallback().OnUnmap(beg
, size
);
640 UnmapOrDie(reinterpret_cast<void *>(beg
), size
);
643 bool EnsureFreeArraySpace(RegionInfo
*region
, uptr region_beg
,
644 uptr num_freed_chunks
) {
645 uptr needed_space
= num_freed_chunks
* sizeof(CompactPtrT
);
646 if (region
->mapped_free_array
< needed_space
) {
647 uptr new_mapped_free_array
= RoundUpTo(needed_space
, kFreeArrayMapSize
);
648 CHECK_LE(new_mapped_free_array
, kFreeArraySize
);
649 uptr current_map_end
= reinterpret_cast<uptr
>(GetFreeArray(region_beg
)) +
650 region
->mapped_free_array
;
651 uptr new_map_size
= new_mapped_free_array
- region
->mapped_free_array
;
652 if (UNLIKELY(!MapWithCallback(current_map_end
, new_map_size
)))
654 region
->mapped_free_array
= new_mapped_free_array
;
659 NOINLINE
bool PopulateFreeArray(AllocatorStats
*stat
, uptr class_id
,
660 RegionInfo
*region
, uptr requested_count
) {
661 // region->mutex is held.
662 const uptr size
= ClassIdToSize(class_id
);
663 const uptr new_space_beg
= region
->allocated_user
;
664 const uptr new_space_end
= new_space_beg
+ requested_count
* size
;
665 const uptr region_beg
= GetRegionBeginBySizeClass(class_id
);
667 // Map more space for chunks, if necessary.
668 if (new_space_end
> region
->mapped_user
) {
669 if (!kUsingConstantSpaceBeg
&& region
->mapped_user
== 0)
670 region
->rand_state
= static_cast<u32
>(region_beg
>> 12); // From ASLR.
671 // Do the mmap for the user memory.
672 uptr map_size
= kUserMapSize
;
673 while (new_space_end
> region
->mapped_user
+ map_size
)
674 map_size
+= kUserMapSize
;
675 CHECK_GE(region
->mapped_user
+ map_size
, new_space_end
);
676 if (UNLIKELY(!MapWithCallback(region_beg
+ region
->mapped_user
,
679 stat
->Add(AllocatorStatMapped
, map_size
);
680 region
->mapped_user
+= map_size
;
682 const uptr new_chunks_count
= (region
->mapped_user
- new_space_beg
) / size
;
684 // Calculate the required space for metadata.
685 const uptr requested_allocated_meta
=
686 region
->allocated_meta
+ new_chunks_count
* kMetadataSize
;
687 uptr requested_mapped_meta
= region
->mapped_meta
;
688 while (requested_allocated_meta
> requested_mapped_meta
)
689 requested_mapped_meta
+= kMetaMapSize
;
690 // Check whether this size class is exhausted.
691 if (region
->mapped_user
+ requested_mapped_meta
>
692 kRegionSize
- kFreeArraySize
) {
693 if (!region
->exhausted
) {
694 region
->exhausted
= true;
695 Printf("%s: Out of memory. ", SanitizerToolName
);
696 Printf("The process has exhausted %zuMB for size class %zu.\n",
697 kRegionSize
>> 20, size
);
701 // Map more space for metadata, if necessary.
702 if (requested_mapped_meta
> region
->mapped_meta
) {
703 if (UNLIKELY(!MapWithCallback(
704 GetMetadataEnd(region_beg
) - requested_mapped_meta
,
705 requested_mapped_meta
- region
->mapped_meta
)))
707 region
->mapped_meta
= requested_mapped_meta
;
710 // If necessary, allocate more space for the free array and populate it with
711 // newly allocated chunks.
712 const uptr total_freed_chunks
= region
->num_freed_chunks
+ new_chunks_count
;
713 if (UNLIKELY(!EnsureFreeArraySpace(region
, region_beg
, total_freed_chunks
)))
715 CompactPtrT
*free_array
= GetFreeArray(region_beg
);
716 for (uptr i
= 0, chunk
= new_space_beg
; i
< new_chunks_count
;
718 free_array
[total_freed_chunks
- 1 - i
] = PointerToCompactPtr(0, chunk
);
719 if (kRandomShuffleChunks
)
720 RandomShuffle(&free_array
[region
->num_freed_chunks
], new_chunks_count
,
721 ®ion
->rand_state
);
723 // All necessary memory is mapped and now it is safe to advance all
724 // 'allocated_*' counters.
725 region
->num_freed_chunks
+= new_chunks_count
;
726 region
->allocated_user
+= new_chunks_count
* size
;
727 CHECK_LE(region
->allocated_user
, region
->mapped_user
);
728 region
->allocated_meta
= requested_allocated_meta
;
729 CHECK_LE(region
->allocated_meta
, region
->mapped_meta
);
730 region
->exhausted
= false;
732 // TODO(alekseyshl): Consider bumping last_release_at_ns here to prevent
733 // MaybeReleaseToOS from releasing just allocated pages or protect these
734 // not yet used chunks some other way.
741 MemoryMapper(const ThisT
& base_allocator
, uptr class_id
)
742 : allocator(base_allocator
),
743 region_base(base_allocator
.GetRegionBeginBySizeClass(class_id
)),
744 released_ranges_count(0),
748 uptr
GetReleasedRangesCount() const {
749 return released_ranges_count
;
752 uptr
GetReleasedBytes() const {
753 return released_bytes
;
756 uptr
MapPackedCounterArrayBuffer(uptr buffer_size
) {
757 // TODO(alekseyshl): The idea to explore is to check if we have enough
758 // space between num_freed_chunks*sizeof(CompactPtrT) and
759 // mapped_free_array to fit buffer_size bytes and use that space instead
760 // of mapping a temporary one.
761 return reinterpret_cast<uptr
>(
762 MmapOrDieOnFatalError(buffer_size
, "ReleaseToOSPageCounters"));
765 void UnmapPackedCounterArrayBuffer(uptr buffer
, uptr buffer_size
) {
766 UnmapOrDie(reinterpret_cast<void *>(buffer
), buffer_size
);
769 // Releases [from, to) range of pages back to OS.
770 void ReleasePageRangeToOS(CompactPtrT from
, CompactPtrT to
) {
771 const uptr from_page
= allocator
.CompactPtrToPointer(region_base
, from
);
772 const uptr to_page
= allocator
.CompactPtrToPointer(region_base
, to
);
773 ReleaseMemoryPagesToOS(from_page
, to_page
);
774 released_ranges_count
++;
775 released_bytes
+= to_page
- from_page
;
779 const ThisT
& allocator
;
780 const uptr region_base
;
781 uptr released_ranges_count
;
785 // Attempts to release RAM occupied by freed chunks back to OS. The region is
786 // expected to be locked.
787 void MaybeReleaseToOS(uptr class_id
) {
788 RegionInfo
*region
= GetRegionInfo(class_id
);
789 const uptr chunk_size
= ClassIdToSize(class_id
);
790 const uptr page_size
= GetPageSizeCached();
792 uptr n
= region
->num_freed_chunks
;
793 if (n
* chunk_size
< page_size
)
794 return; // No chance to release anything.
795 if ((region
->stats
.n_freed
-
796 region
->rtoi
.n_freed_at_last_release
) * chunk_size
< page_size
) {
797 return; // Nothing new to release.
800 s32 interval_ms
= ReleaseToOSIntervalMs();
804 if (region
->rtoi
.last_release_at_ns
+ interval_ms
* 1000000ULL > NanoTime())
805 return; // Memory was returned recently.
807 MemoryMapper
memory_mapper(*this, class_id
);
809 ReleaseFreeMemoryToOS
<MemoryMapper
>(
810 GetFreeArray(GetRegionBeginBySizeClass(class_id
)), n
, chunk_size
,
811 RoundUpTo(region
->allocated_user
, page_size
) / page_size
,
814 if (memory_mapper
.GetReleasedRangesCount() > 0) {
815 region
->rtoi
.n_freed_at_last_release
= region
->stats
.n_freed
;
816 region
->rtoi
.num_releases
+= memory_mapper
.GetReleasedRangesCount();
817 region
->rtoi
.last_released_bytes
= memory_mapper
.GetReleasedBytes();
819 region
->rtoi
.last_release_at_ns
= NanoTime();