1 //===-- sanitizer_allocator_secondary.h -------------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // Part of the Sanitizer Allocator.
10 //===----------------------------------------------------------------------===//
11 #ifndef SANITIZER_ALLOCATOR_H
12 #error This file must be included inside sanitizer_allocator.h
15 // This class can (de)allocate only large chunks of memory using mmap/unmap.
16 // The main purpose of this allocator is to cover large and rare allocation
17 // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
18 template <class MapUnmapCallback
= NoOpMapUnmapCallback
,
19 class FailureHandlerT
= ReturnNullOrDieOnFailure
>
20 class LargeMmapAllocator
{
22 typedef FailureHandlerT FailureHandler
;
24 void InitLinkerInitialized() {
25 page_size_
= GetPageSizeCached();
29 internal_memset(this, 0, sizeof(*this));
30 InitLinkerInitialized();
33 void *Allocate(AllocatorStats
*stat
, uptr size
, uptr alignment
) {
34 CHECK(IsPowerOfTwo(alignment
));
35 uptr map_size
= RoundUpMapSize(size
);
36 if (alignment
> page_size_
)
37 map_size
+= alignment
;
40 return FailureHandler::OnBadRequest();
41 uptr map_beg
= reinterpret_cast<uptr
>(
42 MmapOrDieOnFatalError(map_size
, "LargeMmapAllocator"));
44 return FailureHandler::OnOOM();
45 CHECK(IsAligned(map_beg
, page_size_
));
46 MapUnmapCallback().OnMap(map_beg
, map_size
);
47 uptr map_end
= map_beg
+ map_size
;
48 uptr res
= map_beg
+ page_size_
;
49 if (res
& (alignment
- 1)) // Align.
50 res
+= alignment
- (res
& (alignment
- 1));
51 CHECK(IsAligned(res
, alignment
));
52 CHECK(IsAligned(res
, page_size_
));
53 CHECK_GE(res
+ size
, map_beg
);
54 CHECK_LE(res
+ size
, map_end
);
55 Header
*h
= GetHeader(res
);
58 h
->map_size
= map_size
;
59 uptr size_log
= MostSignificantSetBitIndex(map_size
);
60 CHECK_LT(size_log
, ARRAY_SIZE(stats
.by_size_log
));
62 SpinMutexLock
l(&mutex_
);
63 uptr idx
= n_chunks_
++;
64 chunks_sorted_
= false;
65 CHECK_LT(idx
, kMaxNumChunks
);
69 stats
.currently_allocated
+= map_size
;
70 stats
.max_allocated
= Max(stats
.max_allocated
, stats
.currently_allocated
);
71 stats
.by_size_log
[size_log
]++;
72 stat
->Add(AllocatorStatAllocated
, map_size
);
73 stat
->Add(AllocatorStatMapped
, map_size
);
75 return reinterpret_cast<void*>(res
);
78 void Deallocate(AllocatorStats
*stat
, void *p
) {
79 Header
*h
= GetHeader(p
);
81 SpinMutexLock
l(&mutex_
);
82 uptr idx
= h
->chunk_idx
;
83 CHECK_EQ(chunks_
[idx
], h
);
84 CHECK_LT(idx
, n_chunks_
);
85 chunks_
[idx
] = chunks_
[n_chunks_
- 1];
86 chunks_
[idx
]->chunk_idx
= idx
;
88 chunks_sorted_
= false;
90 stats
.currently_allocated
-= h
->map_size
;
91 stat
->Sub(AllocatorStatAllocated
, h
->map_size
);
92 stat
->Sub(AllocatorStatMapped
, h
->map_size
);
94 MapUnmapCallback().OnUnmap(h
->map_beg
, h
->map_size
);
95 UnmapOrDie(reinterpret_cast<void*>(h
->map_beg
), h
->map_size
);
98 uptr
TotalMemoryUsed() {
99 SpinMutexLock
l(&mutex_
);
101 for (uptr i
= 0; i
< n_chunks_
; i
++) {
102 Header
*h
= chunks_
[i
];
103 CHECK_EQ(h
->chunk_idx
, i
);
104 res
+= RoundUpMapSize(h
->size
);
109 bool PointerIsMine(const void *p
) {
110 return GetBlockBegin(p
) != nullptr;
113 uptr
GetActuallyAllocatedSize(void *p
) {
114 return RoundUpTo(GetHeader(p
)->size
, page_size_
);
117 // At least page_size_/2 metadata bytes is available.
118 void *GetMetaData(const void *p
) {
119 // Too slow: CHECK_EQ(p, GetBlockBegin(p));
120 if (!IsAligned(reinterpret_cast<uptr
>(p
), page_size_
)) {
121 Printf("%s: bad pointer %p\n", SanitizerToolName
, p
);
122 CHECK(IsAligned(reinterpret_cast<uptr
>(p
), page_size_
));
124 return GetHeader(p
) + 1;
127 void *GetBlockBegin(const void *ptr
) {
128 uptr p
= reinterpret_cast<uptr
>(ptr
);
129 SpinMutexLock
l(&mutex_
);
130 uptr nearest_chunk
= 0;
131 // Cache-friendly linear search.
132 for (uptr i
= 0; i
< n_chunks_
; i
++) {
133 uptr ch
= reinterpret_cast<uptr
>(chunks_
[i
]);
134 if (p
< ch
) continue; // p is at left to this chunk, skip it.
135 if (p
- ch
< p
- nearest_chunk
)
140 Header
*h
= reinterpret_cast<Header
*>(nearest_chunk
);
141 CHECK_GE(nearest_chunk
, h
->map_beg
);
142 CHECK_LT(nearest_chunk
, h
->map_beg
+ h
->map_size
);
143 CHECK_LE(nearest_chunk
, p
);
144 if (h
->map_beg
+ h
->map_size
<= p
)
149 void EnsureSortedChunks() {
150 if (chunks_sorted_
) return;
151 SortArray(reinterpret_cast<uptr
*>(chunks_
), n_chunks_
);
152 for (uptr i
= 0; i
< n_chunks_
; i
++)
153 chunks_
[i
]->chunk_idx
= i
;
154 chunks_sorted_
= true;
157 // This function does the same as GetBlockBegin, but is much faster.
158 // Must be called with the allocator locked.
159 void *GetBlockBeginFastLocked(void *ptr
) {
160 mutex_
.CheckLocked();
161 uptr p
= reinterpret_cast<uptr
>(ptr
);
163 if (!n
) return nullptr;
164 EnsureSortedChunks();
165 auto min_mmap_
= reinterpret_cast<uptr
>(chunks_
[0]);
167 reinterpret_cast<uptr
>(chunks_
[n
- 1]) + chunks_
[n
- 1]->map_size
;
168 if (p
< min_mmap_
|| p
>= max_mmap_
)
170 uptr beg
= 0, end
= n
- 1;
171 // This loop is a log(n) lower_bound. It does not check for the exact match
172 // to avoid expensive cache-thrashing loads.
173 while (end
- beg
>= 2) {
174 uptr mid
= (beg
+ end
) / 2; // Invariant: mid >= beg + 1
175 if (p
< reinterpret_cast<uptr
>(chunks_
[mid
]))
176 end
= mid
- 1; // We are not interested in chunks_[mid].
178 beg
= mid
; // chunks_[mid] may still be what we want.
182 CHECK_EQ(beg
+ 1, end
);
183 // There are 2 chunks left, choose one.
184 if (p
>= reinterpret_cast<uptr
>(chunks_
[end
]))
188 Header
*h
= chunks_
[beg
];
189 if (h
->map_beg
+ h
->map_size
<= p
|| p
< h
->map_beg
)
195 Printf("Stats: LargeMmapAllocator: allocated %zd times, "
196 "remains %zd (%zd K) max %zd M; by size logs: ",
197 stats
.n_allocs
, stats
.n_allocs
- stats
.n_frees
,
198 stats
.currently_allocated
>> 10, stats
.max_allocated
>> 20);
199 for (uptr i
= 0; i
< ARRAY_SIZE(stats
.by_size_log
); i
++) {
200 uptr c
= stats
.by_size_log
[i
];
202 Printf("%zd:%zd; ", i
, c
);
207 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
208 // introspection API.
217 // Iterate over all existing chunks.
218 // The allocator must be locked when calling this function.
219 void ForEachChunk(ForEachChunkCallback callback
, void *arg
) {
220 EnsureSortedChunks(); // Avoid doing the sort while iterating.
221 for (uptr i
= 0; i
< n_chunks_
; i
++) {
223 callback(reinterpret_cast<uptr
>(GetUser(chunks_
[i
])), arg
);
224 // Consistency check: verify that the array did not change.
225 CHECK_EQ(chunks_
[i
], t
);
226 CHECK_EQ(chunks_
[i
]->chunk_idx
, i
);
231 static const int kMaxNumChunks
= 1 << FIRST_32_SECOND_64(15, 18);
239 Header
*GetHeader(uptr p
) {
240 CHECK(IsAligned(p
, page_size_
));
241 return reinterpret_cast<Header
*>(p
- page_size_
);
243 Header
*GetHeader(const void *p
) {
244 return GetHeader(reinterpret_cast<uptr
>(p
));
247 void *GetUser(Header
*h
) {
248 CHECK(IsAligned((uptr
)h
, page_size_
));
249 return reinterpret_cast<void*>(reinterpret_cast<uptr
>(h
) + page_size_
);
252 uptr
RoundUpMapSize(uptr size
) {
253 return RoundUpTo(size
, page_size_
) + page_size_
;
257 Header
*chunks_
[kMaxNumChunks
];
261 uptr n_allocs
, n_frees
, currently_allocated
, max_allocated
, by_size_log
[64];