function.c (assign_parm_adjust_stack_rtl): Revise STRICT_ALIGNMENT check to use targe...
[official-gcc.git] / libsanitizer / sanitizer_common / sanitizer_allocator_primary32.h
blobde16cf2915113df9592ef1411b3dbd8491555733
1 //===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // Part of the Sanitizer Allocator.
9 //
10 //===----------------------------------------------------------------------===//
11 #ifndef SANITIZER_ALLOCATOR_H
12 #error This file must be included inside sanitizer_allocator.h
13 #endif
15 template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
17 // SizeClassAllocator32 -- allocator for 32-bit address space.
18 // This allocator can theoretically be used on 64-bit arch, but there it is less
19 // efficient than SizeClassAllocator64.
21 // [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
22 // be returned by MmapOrDie().
24 // Region:
25 // a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize,
26 // kRegionSize).
27 // Since the regions are aligned by kRegionSize, there are exactly
28 // kNumPossibleRegions possible regions in the address space and so we keep
29 // a ByteMap possible_regions to store the size classes of each Region.
30 // 0 size class means the region is not used by the allocator.
32 // One Region is used to allocate chunks of a single size class.
33 // A Region looks like this:
34 // UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
36 // In order to avoid false sharing the objects of this class should be
37 // chache-line aligned.
39 struct SizeClassAllocator32FlagMasks { // Bit masks.
40 enum {
41 kRandomShuffleChunks = 1,
42 kUseSeparateSizeClassForBatch = 2,
46 template <class Params>
47 class SizeClassAllocator32 {
48 public:
49 static const uptr kSpaceBeg = Params::kSpaceBeg;
50 static const u64 kSpaceSize = Params::kSpaceSize;
51 static const uptr kMetadataSize = Params::kMetadataSize;
52 typedef typename Params::SizeClassMap SizeClassMap;
53 static const uptr kRegionSizeLog = Params::kRegionSizeLog;
54 typedef typename Params::ByteMap ByteMap;
55 typedef typename Params::MapUnmapCallback MapUnmapCallback;
57 COMPILER_CHECK(!SANITIZER_SIGN_EXTENDED_ADDRESSES ||
58 (kSpaceSize & (kSpaceSize - 1)) == 0);
60 static const bool kRandomShuffleChunks = Params::kFlags &
61 SizeClassAllocator32FlagMasks::kRandomShuffleChunks;
62 static const bool kUseSeparateSizeClassForBatch = Params::kFlags &
63 SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
65 struct TransferBatch {
66 static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;
67 void SetFromArray(void *batch[], uptr count) {
68 DCHECK_LE(count, kMaxNumCached);
69 count_ = count;
70 for (uptr i = 0; i < count; i++)
71 batch_[i] = batch[i];
73 uptr Count() const { return count_; }
74 void Clear() { count_ = 0; }
75 void Add(void *ptr) {
76 batch_[count_++] = ptr;
77 DCHECK_LE(count_, kMaxNumCached);
79 void CopyToArray(void *to_batch[]) const {
80 for (uptr i = 0, n = Count(); i < n; i++)
81 to_batch[i] = batch_[i];
84 // How much memory do we need for a batch containing n elements.
85 static uptr AllocationSizeRequiredForNElements(uptr n) {
86 return sizeof(uptr) * 2 + sizeof(void *) * n;
88 static uptr MaxCached(uptr size) {
89 return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(size));
92 TransferBatch *next;
94 private:
95 uptr count_;
96 void *batch_[kMaxNumCached];
99 static const uptr kBatchSize = sizeof(TransferBatch);
100 COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0);
101 COMPILER_CHECK(kBatchSize == SizeClassMap::kMaxNumCachedHint * sizeof(uptr));
103 static uptr ClassIdToSize(uptr class_id) {
104 return (class_id == SizeClassMap::kBatchClassID) ?
105 kBatchSize : SizeClassMap::Size(class_id);
108 typedef SizeClassAllocator32<Params> ThisT;
109 typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
111 void Init(s32 release_to_os_interval_ms) {
112 possible_regions.Init();
113 internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
116 s32 ReleaseToOSIntervalMs() const {
117 return kReleaseToOSIntervalNever;
120 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
121 // This is empty here. Currently only implemented in 64-bit allocator.
124 void ForceReleaseToOS() {
125 // Currently implemented in 64-bit allocator only.
128 void *MapWithCallback(uptr size) {
129 void *res = MmapOrDie(size, PrimaryAllocatorName);
130 MapUnmapCallback().OnMap((uptr)res, size);
131 return res;
134 void UnmapWithCallback(uptr beg, uptr size) {
135 MapUnmapCallback().OnUnmap(beg, size);
136 UnmapOrDie(reinterpret_cast<void *>(beg), size);
139 static bool CanAllocate(uptr size, uptr alignment) {
140 return size <= SizeClassMap::kMaxSize &&
141 alignment <= SizeClassMap::kMaxSize;
144 void *GetMetaData(const void *p) {
145 CHECK(PointerIsMine(p));
146 uptr mem = reinterpret_cast<uptr>(p);
147 uptr beg = ComputeRegionBeg(mem);
148 uptr size = ClassIdToSize(GetSizeClass(p));
149 u32 offset = mem - beg;
150 uptr n = offset / (u32)size; // 32-bit division
151 uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
152 return reinterpret_cast<void*>(meta);
155 NOINLINE TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
156 uptr class_id) {
157 DCHECK_LT(class_id, kNumClasses);
158 SizeClassInfo *sci = GetSizeClassInfo(class_id);
159 SpinMutexLock l(&sci->mutex);
160 if (sci->free_list.empty()) {
161 if (UNLIKELY(!PopulateFreeList(stat, c, sci, class_id)))
162 return nullptr;
163 DCHECK(!sci->free_list.empty());
165 TransferBatch *b = sci->free_list.front();
166 sci->free_list.pop_front();
167 return b;
170 NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id,
171 TransferBatch *b) {
172 DCHECK_LT(class_id, kNumClasses);
173 CHECK_GT(b->Count(), 0);
174 SizeClassInfo *sci = GetSizeClassInfo(class_id);
175 SpinMutexLock l(&sci->mutex);
176 sci->free_list.push_front(b);
179 bool PointerIsMine(const void *p) {
180 uptr mem = reinterpret_cast<uptr>(p);
181 if (SANITIZER_SIGN_EXTENDED_ADDRESSES)
182 mem &= (kSpaceSize - 1);
183 if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
184 return false;
185 return GetSizeClass(p) != 0;
188 uptr GetSizeClass(const void *p) {
189 return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
192 void *GetBlockBegin(const void *p) {
193 CHECK(PointerIsMine(p));
194 uptr mem = reinterpret_cast<uptr>(p);
195 uptr beg = ComputeRegionBeg(mem);
196 uptr size = ClassIdToSize(GetSizeClass(p));
197 u32 offset = mem - beg;
198 u32 n = offset / (u32)size; // 32-bit division
199 uptr res = beg + (n * (u32)size);
200 return reinterpret_cast<void*>(res);
203 uptr GetActuallyAllocatedSize(void *p) {
204 CHECK(PointerIsMine(p));
205 return ClassIdToSize(GetSizeClass(p));
208 uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
210 uptr TotalMemoryUsed() {
211 // No need to lock here.
212 uptr res = 0;
213 for (uptr i = 0; i < kNumPossibleRegions; i++)
214 if (possible_regions[i])
215 res += kRegionSize;
216 return res;
219 void TestOnlyUnmap() {
220 for (uptr i = 0; i < kNumPossibleRegions; i++)
221 if (possible_regions[i])
222 UnmapWithCallback((i * kRegionSize), kRegionSize);
225 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
226 // introspection API.
227 void ForceLock() {
228 for (uptr i = 0; i < kNumClasses; i++) {
229 GetSizeClassInfo(i)->mutex.Lock();
233 void ForceUnlock() {
234 for (int i = kNumClasses - 1; i >= 0; i--) {
235 GetSizeClassInfo(i)->mutex.Unlock();
239 // Iterate over all existing chunks.
240 // The allocator must be locked when calling this function.
241 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
242 for (uptr region = 0; region < kNumPossibleRegions; region++)
243 if (possible_regions[region]) {
244 uptr chunk_size = ClassIdToSize(possible_regions[region]);
245 uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
246 uptr region_beg = region * kRegionSize;
247 for (uptr chunk = region_beg;
248 chunk < region_beg + max_chunks_in_region * chunk_size;
249 chunk += chunk_size) {
250 // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
251 callback(chunk, arg);
256 void PrintStats() {}
258 static uptr AdditionalSize() { return 0; }
260 typedef SizeClassMap SizeClassMapT;
261 static const uptr kNumClasses = SizeClassMap::kNumClasses;
263 private:
264 static const uptr kRegionSize = 1 << kRegionSizeLog;
265 static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
267 struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) SizeClassInfo {
268 StaticSpinMutex mutex;
269 IntrusiveList<TransferBatch> free_list;
270 u32 rand_state;
272 COMPILER_CHECK(sizeof(SizeClassInfo) % kCacheLineSize == 0);
274 uptr ComputeRegionId(uptr mem) {
275 if (SANITIZER_SIGN_EXTENDED_ADDRESSES)
276 mem &= (kSpaceSize - 1);
277 const uptr res = mem >> kRegionSizeLog;
278 CHECK_LT(res, kNumPossibleRegions);
279 return res;
282 uptr ComputeRegionBeg(uptr mem) {
283 return mem & ~(kRegionSize - 1);
286 uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
287 DCHECK_LT(class_id, kNumClasses);
288 const uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(
289 kRegionSize, kRegionSize, PrimaryAllocatorName));
290 if (UNLIKELY(!res))
291 return 0;
292 MapUnmapCallback().OnMap(res, kRegionSize);
293 stat->Add(AllocatorStatMapped, kRegionSize);
294 CHECK(IsAligned(res, kRegionSize));
295 possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
296 return res;
299 SizeClassInfo *GetSizeClassInfo(uptr class_id) {
300 DCHECK_LT(class_id, kNumClasses);
301 return &size_class_info_array[class_id];
304 bool PopulateBatches(AllocatorCache *c, SizeClassInfo *sci, uptr class_id,
305 TransferBatch **current_batch, uptr max_count,
306 uptr *pointers_array, uptr count) {
307 // If using a separate class for batches, we do not need to shuffle it.
308 if (kRandomShuffleChunks && (!kUseSeparateSizeClassForBatch ||
309 class_id != SizeClassMap::kBatchClassID))
310 RandomShuffle(pointers_array, count, &sci->rand_state);
311 TransferBatch *b = *current_batch;
312 for (uptr i = 0; i < count; i++) {
313 if (!b) {
314 b = c->CreateBatch(class_id, this, (TransferBatch*)pointers_array[i]);
315 if (UNLIKELY(!b))
316 return false;
317 b->Clear();
319 b->Add((void*)pointers_array[i]);
320 if (b->Count() == max_count) {
321 sci->free_list.push_back(b);
322 b = nullptr;
325 *current_batch = b;
326 return true;
329 bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
330 SizeClassInfo *sci, uptr class_id) {
331 const uptr region = AllocateRegion(stat, class_id);
332 if (UNLIKELY(!region))
333 return false;
334 if (kRandomShuffleChunks)
335 if (UNLIKELY(sci->rand_state == 0))
336 // The random state is initialized from ASLR (PIE) and time.
337 sci->rand_state = reinterpret_cast<uptr>(sci) ^ NanoTime();
338 const uptr size = ClassIdToSize(class_id);
339 const uptr n_chunks = kRegionSize / (size + kMetadataSize);
340 const uptr max_count = TransferBatch::MaxCached(size);
341 DCHECK_GT(max_count, 0);
342 TransferBatch *b = nullptr;
343 constexpr uptr kShuffleArraySize = 48;
344 uptr shuffle_array[kShuffleArraySize];
345 uptr count = 0;
346 for (uptr i = region; i < region + n_chunks * size; i += size) {
347 shuffle_array[count++] = i;
348 if (count == kShuffleArraySize) {
349 if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,
350 shuffle_array, count)))
351 return false;
352 count = 0;
355 if (count) {
356 if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,
357 shuffle_array, count)))
358 return false;
360 if (b) {
361 CHECK_GT(b->Count(), 0);
362 sci->free_list.push_back(b);
364 return true;
367 ByteMap possible_regions;
368 SizeClassInfo size_class_info_array[kNumClasses];