Reset prologue_location before calling code_end
[official-gcc.git] / libsanitizer / sanitizer_common / sanitizer_allocator_primary32.h
blobb90dabbf776929bc75a4f86ed56e935dd24c553c
1 //===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Part of the Sanitizer Allocator.
11 //===----------------------------------------------------------------------===//
12 #ifndef SANITIZER_ALLOCATOR_H
13 #error This file must be included inside sanitizer_allocator.h
14 #endif
16 template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
18 // SizeClassAllocator32 -- allocator for 32-bit address space.
19 // This allocator can theoretically be used on 64-bit arch, but there it is less
20 // efficient than SizeClassAllocator64.
22 // [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
23 // be returned by MmapOrDie().
25 // Region:
26 // a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize,
27 // kRegionSize).
28 // Since the regions are aligned by kRegionSize, there are exactly
29 // kNumPossibleRegions possible regions in the address space and so we keep
30 // a ByteMap possible_regions to store the size classes of each Region.
31 // 0 size class means the region is not used by the allocator.
33 // One Region is used to allocate chunks of a single size class.
34 // A Region looks like this:
35 // UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
37 // In order to avoid false sharing the objects of this class should be
38 // chache-line aligned.
40 struct SizeClassAllocator32FlagMasks { // Bit masks.
41 enum {
42 kRandomShuffleChunks = 1,
43 kUseSeparateSizeClassForBatch = 2,
47 template <class Params>
48 class SizeClassAllocator32 {
49 private:
50 static const u64 kTwoLevelByteMapSize1 =
51 (Params::kSpaceSize >> Params::kRegionSizeLog) >> 12;
52 static const u64 kMinFirstMapSizeTwoLevelByteMap = 4;
54 public:
55 using AddressSpaceView = typename Params::AddressSpaceView;
56 static const uptr kSpaceBeg = Params::kSpaceBeg;
57 static const u64 kSpaceSize = Params::kSpaceSize;
58 static const uptr kMetadataSize = Params::kMetadataSize;
59 typedef typename Params::SizeClassMap SizeClassMap;
60 static const uptr kRegionSizeLog = Params::kRegionSizeLog;
61 typedef typename Params::MapUnmapCallback MapUnmapCallback;
62 using ByteMap = typename conditional<
63 (kTwoLevelByteMapSize1 < kMinFirstMapSizeTwoLevelByteMap),
64 FlatByteMap<(Params::kSpaceSize >> Params::kRegionSizeLog),
65 AddressSpaceView>,
66 TwoLevelByteMap<kTwoLevelByteMapSize1, 1 << 12, AddressSpaceView>>::type;
68 COMPILER_CHECK(!SANITIZER_SIGN_EXTENDED_ADDRESSES ||
69 (kSpaceSize & (kSpaceSize - 1)) == 0);
71 static const bool kRandomShuffleChunks = Params::kFlags &
72 SizeClassAllocator32FlagMasks::kRandomShuffleChunks;
73 static const bool kUseSeparateSizeClassForBatch = Params::kFlags &
74 SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
76 struct TransferBatch {
77 static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;
78 void SetFromArray(void *batch[], uptr count) {
79 DCHECK_LE(count, kMaxNumCached);
80 count_ = count;
81 for (uptr i = 0; i < count; i++)
82 batch_[i] = batch[i];
84 uptr Count() const { return count_; }
85 void Clear() { count_ = 0; }
86 void Add(void *ptr) {
87 batch_[count_++] = ptr;
88 DCHECK_LE(count_, kMaxNumCached);
90 void CopyToArray(void *to_batch[]) const {
91 for (uptr i = 0, n = Count(); i < n; i++)
92 to_batch[i] = batch_[i];
95 // How much memory do we need for a batch containing n elements.
96 static uptr AllocationSizeRequiredForNElements(uptr n) {
97 return sizeof(uptr) * 2 + sizeof(void *) * n;
99 static uptr MaxCached(uptr size) {
100 return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(size));
103 TransferBatch *next;
105 private:
106 uptr count_;
107 void *batch_[kMaxNumCached];
110 static const uptr kBatchSize = sizeof(TransferBatch);
111 COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0);
112 COMPILER_CHECK(kBatchSize == SizeClassMap::kMaxNumCachedHint * sizeof(uptr));
114 static uptr ClassIdToSize(uptr class_id) {
115 return (class_id == SizeClassMap::kBatchClassID) ?
116 kBatchSize : SizeClassMap::Size(class_id);
119 typedef SizeClassAllocator32<Params> ThisT;
120 typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
122 void Init(s32 release_to_os_interval_ms) {
123 possible_regions.Init();
124 internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
127 s32 ReleaseToOSIntervalMs() const {
128 return kReleaseToOSIntervalNever;
131 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
132 // This is empty here. Currently only implemented in 64-bit allocator.
135 void ForceReleaseToOS() {
136 // Currently implemented in 64-bit allocator only.
139 void *MapWithCallback(uptr size) {
140 void *res = MmapOrDie(size, PrimaryAllocatorName);
141 MapUnmapCallback().OnMap((uptr)res, size);
142 return res;
145 void UnmapWithCallback(uptr beg, uptr size) {
146 MapUnmapCallback().OnUnmap(beg, size);
147 UnmapOrDie(reinterpret_cast<void *>(beg), size);
150 static bool CanAllocate(uptr size, uptr alignment) {
151 return size <= SizeClassMap::kMaxSize &&
152 alignment <= SizeClassMap::kMaxSize;
155 void *GetMetaData(const void *p) {
156 CHECK(kMetadataSize);
157 CHECK(PointerIsMine(p));
158 uptr mem = reinterpret_cast<uptr>(p);
159 uptr beg = ComputeRegionBeg(mem);
160 uptr size = ClassIdToSize(GetSizeClass(p));
161 u32 offset = mem - beg;
162 uptr n = offset / (u32)size; // 32-bit division
163 uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
164 return reinterpret_cast<void*>(meta);
167 NOINLINE TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
168 uptr class_id) {
169 DCHECK_LT(class_id, kNumClasses);
170 SizeClassInfo *sci = GetSizeClassInfo(class_id);
171 SpinMutexLock l(&sci->mutex);
172 if (sci->free_list.empty()) {
173 if (UNLIKELY(!PopulateFreeList(stat, c, sci, class_id)))
174 return nullptr;
175 DCHECK(!sci->free_list.empty());
177 TransferBatch *b = sci->free_list.front();
178 sci->free_list.pop_front();
179 return b;
182 NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id,
183 TransferBatch *b) {
184 DCHECK_LT(class_id, kNumClasses);
185 CHECK_GT(b->Count(), 0);
186 SizeClassInfo *sci = GetSizeClassInfo(class_id);
187 SpinMutexLock l(&sci->mutex);
188 sci->free_list.push_front(b);
191 bool PointerIsMine(const void *p) {
192 uptr mem = reinterpret_cast<uptr>(p);
193 if (SANITIZER_SIGN_EXTENDED_ADDRESSES)
194 mem &= (kSpaceSize - 1);
195 if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
196 return false;
197 return GetSizeClass(p) != 0;
200 uptr GetSizeClass(const void *p) {
201 return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
204 void *GetBlockBegin(const void *p) {
205 CHECK(PointerIsMine(p));
206 uptr mem = reinterpret_cast<uptr>(p);
207 uptr beg = ComputeRegionBeg(mem);
208 uptr size = ClassIdToSize(GetSizeClass(p));
209 u32 offset = mem - beg;
210 u32 n = offset / (u32)size; // 32-bit division
211 uptr res = beg + (n * (u32)size);
212 return reinterpret_cast<void*>(res);
215 uptr GetActuallyAllocatedSize(void *p) {
216 CHECK(PointerIsMine(p));
217 return ClassIdToSize(GetSizeClass(p));
220 static uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
222 uptr TotalMemoryUsed() {
223 // No need to lock here.
224 uptr res = 0;
225 for (uptr i = 0; i < kNumPossibleRegions; i++)
226 if (possible_regions[i])
227 res += kRegionSize;
228 return res;
231 void TestOnlyUnmap() {
232 for (uptr i = 0; i < kNumPossibleRegions; i++)
233 if (possible_regions[i])
234 UnmapWithCallback((i * kRegionSize), kRegionSize);
237 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
238 // introspection API.
239 void ForceLock() {
240 for (uptr i = 0; i < kNumClasses; i++) {
241 GetSizeClassInfo(i)->mutex.Lock();
245 void ForceUnlock() {
246 for (int i = kNumClasses - 1; i >= 0; i--) {
247 GetSizeClassInfo(i)->mutex.Unlock();
251 // Iterate over all existing chunks.
252 // The allocator must be locked when calling this function.
253 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
254 for (uptr region = 0; region < kNumPossibleRegions; region++)
255 if (possible_regions[region]) {
256 uptr chunk_size = ClassIdToSize(possible_regions[region]);
257 uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
258 uptr region_beg = region * kRegionSize;
259 for (uptr chunk = region_beg;
260 chunk < region_beg + max_chunks_in_region * chunk_size;
261 chunk += chunk_size) {
262 // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
263 callback(chunk, arg);
268 void PrintStats() {}
270 static uptr AdditionalSize() { return 0; }
272 typedef SizeClassMap SizeClassMapT;
273 static const uptr kNumClasses = SizeClassMap::kNumClasses;
275 private:
276 static const uptr kRegionSize = 1 << kRegionSizeLog;
277 static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
279 struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) SizeClassInfo {
280 StaticSpinMutex mutex;
281 IntrusiveList<TransferBatch> free_list;
282 u32 rand_state;
284 COMPILER_CHECK(sizeof(SizeClassInfo) % kCacheLineSize == 0);
286 uptr ComputeRegionId(uptr mem) const {
287 if (SANITIZER_SIGN_EXTENDED_ADDRESSES)
288 mem &= (kSpaceSize - 1);
289 const uptr res = mem >> kRegionSizeLog;
290 CHECK_LT(res, kNumPossibleRegions);
291 return res;
294 uptr ComputeRegionBeg(uptr mem) {
295 return mem & ~(kRegionSize - 1);
298 uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
299 DCHECK_LT(class_id, kNumClasses);
300 const uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(
301 kRegionSize, kRegionSize, PrimaryAllocatorName));
302 if (UNLIKELY(!res))
303 return 0;
304 MapUnmapCallback().OnMap(res, kRegionSize);
305 stat->Add(AllocatorStatMapped, kRegionSize);
306 CHECK(IsAligned(res, kRegionSize));
307 possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
308 return res;
311 SizeClassInfo *GetSizeClassInfo(uptr class_id) {
312 DCHECK_LT(class_id, kNumClasses);
313 return &size_class_info_array[class_id];
316 bool PopulateBatches(AllocatorCache *c, SizeClassInfo *sci, uptr class_id,
317 TransferBatch **current_batch, uptr max_count,
318 uptr *pointers_array, uptr count) {
319 // If using a separate class for batches, we do not need to shuffle it.
320 if (kRandomShuffleChunks && (!kUseSeparateSizeClassForBatch ||
321 class_id != SizeClassMap::kBatchClassID))
322 RandomShuffle(pointers_array, count, &sci->rand_state);
323 TransferBatch *b = *current_batch;
324 for (uptr i = 0; i < count; i++) {
325 if (!b) {
326 b = c->CreateBatch(class_id, this, (TransferBatch*)pointers_array[i]);
327 if (UNLIKELY(!b))
328 return false;
329 b->Clear();
331 b->Add((void*)pointers_array[i]);
332 if (b->Count() == max_count) {
333 sci->free_list.push_back(b);
334 b = nullptr;
337 *current_batch = b;
338 return true;
341 bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
342 SizeClassInfo *sci, uptr class_id) {
343 const uptr region = AllocateRegion(stat, class_id);
344 if (UNLIKELY(!region))
345 return false;
346 if (kRandomShuffleChunks)
347 if (UNLIKELY(sci->rand_state == 0))
348 // The random state is initialized from ASLR (PIE) and time.
349 sci->rand_state = reinterpret_cast<uptr>(sci) ^ NanoTime();
350 const uptr size = ClassIdToSize(class_id);
351 const uptr n_chunks = kRegionSize / (size + kMetadataSize);
352 const uptr max_count = TransferBatch::MaxCached(size);
353 DCHECK_GT(max_count, 0);
354 TransferBatch *b = nullptr;
355 constexpr uptr kShuffleArraySize = 48;
356 uptr shuffle_array[kShuffleArraySize];
357 uptr count = 0;
358 for (uptr i = region; i < region + n_chunks * size; i += size) {
359 shuffle_array[count++] = i;
360 if (count == kShuffleArraySize) {
361 if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,
362 shuffle_array, count)))
363 return false;
364 count = 0;
367 if (count) {
368 if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,
369 shuffle_array, count)))
370 return false;
372 if (b) {
373 CHECK_GT(b->Count(), 0);
374 sci->free_list.push_back(b);
376 return true;
379 ByteMap possible_regions;
380 SizeClassInfo size_class_info_array[kNumClasses];