* doc/xml/manual/profile_mode.xml: Update three ieeexplore.ieee.org
[official-gcc.git] / libsanitizer / sanitizer_common / sanitizer_allocator_primary32.h
blob90a57dbb6cf97765cc35fa276b12a5087a8f03b4
1 //===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // Part of the Sanitizer Allocator.
9 //
10 //===----------------------------------------------------------------------===//
11 #ifndef SANITIZER_ALLOCATOR_H
12 #error This file must be included inside sanitizer_allocator.h
13 #endif
15 template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
17 // SizeClassAllocator32 -- allocator for 32-bit address space.
18 // This allocator can theoretically be used on 64-bit arch, but there it is less
19 // efficient than SizeClassAllocator64.
21 // [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
22 // be returned by MmapOrDie().
24 // Region:
25 // a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize,
26 // kRegionSize).
27 // Since the regions are aligned by kRegionSize, there are exactly
28 // kNumPossibleRegions possible regions in the address space and so we keep
29 // a ByteMap possible_regions to store the size classes of each Region.
30 // 0 size class means the region is not used by the allocator.
32 // One Region is used to allocate chunks of a single size class.
33 // A Region looks like this:
34 // UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
36 // In order to avoid false sharing the objects of this class should be
37 // chache-line aligned.
39 struct SizeClassAllocator32FlagMasks { // Bit masks.
40 enum {
41 kRandomShuffleChunks = 1,
42 kUseSeparateSizeClassForBatch = 2,
46 template <class Params>
47 class SizeClassAllocator32 {
48 public:
49 static const uptr kSpaceBeg = Params::kSpaceBeg;
50 static const u64 kSpaceSize = Params::kSpaceSize;
51 static const uptr kMetadataSize = Params::kMetadataSize;
52 typedef typename Params::SizeClassMap SizeClassMap;
53 static const uptr kRegionSizeLog = Params::kRegionSizeLog;
54 typedef typename Params::ByteMap ByteMap;
55 typedef typename Params::MapUnmapCallback MapUnmapCallback;
57 static const bool kRandomShuffleChunks = Params::kFlags &
58 SizeClassAllocator32FlagMasks::kRandomShuffleChunks;
59 static const bool kUseSeparateSizeClassForBatch = Params::kFlags &
60 SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
62 struct TransferBatch {
63 static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;
64 void SetFromArray(uptr region_beg_unused, void *batch[], uptr count) {
65 count_ = count;
66 CHECK_LE(count_, kMaxNumCached);
67 for (uptr i = 0; i < count; i++)
68 batch_[i] = batch[i];
70 uptr Count() const { return count_; }
71 void Clear() { count_ = 0; }
72 void Add(void *ptr) {
73 batch_[count_++] = ptr;
74 CHECK_LE(count_, kMaxNumCached);
76 void CopyToArray(void *to_batch[]) {
77 for (uptr i = 0, n = Count(); i < n; i++)
78 to_batch[i] = batch_[i];
81 // How much memory do we need for a batch containing n elements.
82 static uptr AllocationSizeRequiredForNElements(uptr n) {
83 return sizeof(uptr) * 2 + sizeof(void *) * n;
85 static uptr MaxCached(uptr class_id) {
86 return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(class_id));
89 TransferBatch *next;
91 private:
92 uptr count_;
93 void *batch_[kMaxNumCached];
96 static const uptr kBatchSize = sizeof(TransferBatch);
97 COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0);
98 COMPILER_CHECK(kBatchSize == SizeClassMap::kMaxNumCachedHint * sizeof(uptr));
100 static uptr ClassIdToSize(uptr class_id) {
101 return (class_id == SizeClassMap::kBatchClassID) ?
102 kBatchSize : SizeClassMap::Size(class_id);
105 typedef SizeClassAllocator32<Params> ThisT;
106 typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
108 void Init(s32 release_to_os_interval_ms) {
109 possible_regions.TestOnlyInit();
110 internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
113 s32 ReleaseToOSIntervalMs() const {
114 return kReleaseToOSIntervalNever;
117 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
118 // This is empty here. Currently only implemented in 64-bit allocator.
121 void *MapWithCallback(uptr size) {
122 void *res = MmapOrDie(size, "SizeClassAllocator32");
123 MapUnmapCallback().OnMap((uptr)res, size);
124 return res;
127 void UnmapWithCallback(uptr beg, uptr size) {
128 MapUnmapCallback().OnUnmap(beg, size);
129 UnmapOrDie(reinterpret_cast<void *>(beg), size);
132 static bool CanAllocate(uptr size, uptr alignment) {
133 return size <= SizeClassMap::kMaxSize &&
134 alignment <= SizeClassMap::kMaxSize;
137 void *GetMetaData(const void *p) {
138 CHECK(PointerIsMine(p));
139 uptr mem = reinterpret_cast<uptr>(p);
140 uptr beg = ComputeRegionBeg(mem);
141 uptr size = ClassIdToSize(GetSizeClass(p));
142 u32 offset = mem - beg;
143 uptr n = offset / (u32)size; // 32-bit division
144 uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
145 return reinterpret_cast<void*>(meta);
148 NOINLINE TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
149 uptr class_id) {
150 CHECK_LT(class_id, kNumClasses);
151 SizeClassInfo *sci = GetSizeClassInfo(class_id);
152 SpinMutexLock l(&sci->mutex);
153 if (sci->free_list.empty() &&
154 UNLIKELY(!PopulateFreeList(stat, c, sci, class_id)))
155 return nullptr;
156 CHECK(!sci->free_list.empty());
157 TransferBatch *b = sci->free_list.front();
158 sci->free_list.pop_front();
159 return b;
162 NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id,
163 TransferBatch *b) {
164 CHECK_LT(class_id, kNumClasses);
165 CHECK_GT(b->Count(), 0);
166 SizeClassInfo *sci = GetSizeClassInfo(class_id);
167 SpinMutexLock l(&sci->mutex);
168 sci->free_list.push_front(b);
171 uptr GetRegionBeginBySizeClass(uptr class_id) { return 0; }
173 bool PointerIsMine(const void *p) {
174 uptr mem = reinterpret_cast<uptr>(p);
175 if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
176 return false;
177 return GetSizeClass(p) != 0;
180 uptr GetSizeClass(const void *p) {
181 return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
184 void *GetBlockBegin(const void *p) {
185 CHECK(PointerIsMine(p));
186 uptr mem = reinterpret_cast<uptr>(p);
187 uptr beg = ComputeRegionBeg(mem);
188 uptr size = ClassIdToSize(GetSizeClass(p));
189 u32 offset = mem - beg;
190 u32 n = offset / (u32)size; // 32-bit division
191 uptr res = beg + (n * (u32)size);
192 return reinterpret_cast<void*>(res);
195 uptr GetActuallyAllocatedSize(void *p) {
196 CHECK(PointerIsMine(p));
197 return ClassIdToSize(GetSizeClass(p));
200 uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
202 uptr TotalMemoryUsed() {
203 // No need to lock here.
204 uptr res = 0;
205 for (uptr i = 0; i < kNumPossibleRegions; i++)
206 if (possible_regions[i])
207 res += kRegionSize;
208 return res;
211 void TestOnlyUnmap() {
212 for (uptr i = 0; i < kNumPossibleRegions; i++)
213 if (possible_regions[i])
214 UnmapWithCallback((i * kRegionSize), kRegionSize);
217 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
218 // introspection API.
219 void ForceLock() {
220 for (uptr i = 0; i < kNumClasses; i++) {
221 GetSizeClassInfo(i)->mutex.Lock();
225 void ForceUnlock() {
226 for (int i = kNumClasses - 1; i >= 0; i--) {
227 GetSizeClassInfo(i)->mutex.Unlock();
231 // Iterate over all existing chunks.
232 // The allocator must be locked when calling this function.
233 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
234 for (uptr region = 0; region < kNumPossibleRegions; region++)
235 if (possible_regions[region]) {
236 uptr chunk_size = ClassIdToSize(possible_regions[region]);
237 uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
238 uptr region_beg = region * kRegionSize;
239 for (uptr chunk = region_beg;
240 chunk < region_beg + max_chunks_in_region * chunk_size;
241 chunk += chunk_size) {
242 // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
243 callback(chunk, arg);
248 void PrintStats() {
251 static uptr AdditionalSize() {
252 return 0;
255 typedef SizeClassMap SizeClassMapT;
256 static const uptr kNumClasses = SizeClassMap::kNumClasses;
258 private:
259 static const uptr kRegionSize = 1 << kRegionSizeLog;
260 static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
262 struct SizeClassInfo {
263 SpinMutex mutex;
264 IntrusiveList<TransferBatch> free_list;
265 char padding[kCacheLineSize - sizeof(uptr) -
266 sizeof(IntrusiveList<TransferBatch>)];
268 COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
270 uptr ComputeRegionId(uptr mem) {
271 uptr res = mem >> kRegionSizeLog;
272 CHECK_LT(res, kNumPossibleRegions);
273 return res;
276 uptr ComputeRegionBeg(uptr mem) {
277 return mem & ~(kRegionSize - 1);
280 uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
281 CHECK_LT(class_id, kNumClasses);
282 uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(
283 kRegionSize, kRegionSize, "SizeClassAllocator32"));
284 if (UNLIKELY(!res))
285 return 0;
286 MapUnmapCallback().OnMap(res, kRegionSize);
287 stat->Add(AllocatorStatMapped, kRegionSize);
288 CHECK(IsAligned(res, kRegionSize));
289 possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
290 return res;
293 SizeClassInfo *GetSizeClassInfo(uptr class_id) {
294 CHECK_LT(class_id, kNumClasses);
295 return &size_class_info_array[class_id];
298 bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
299 SizeClassInfo *sci, uptr class_id) {
300 uptr size = ClassIdToSize(class_id);
301 uptr reg = AllocateRegion(stat, class_id);
302 if (UNLIKELY(!reg))
303 return false;
304 uptr n_chunks = kRegionSize / (size + kMetadataSize);
305 uptr max_count = TransferBatch::MaxCached(class_id);
306 CHECK_GT(max_count, 0);
307 TransferBatch *b = nullptr;
308 for (uptr i = reg; i < reg + n_chunks * size; i += size) {
309 if (!b) {
310 b = c->CreateBatch(class_id, this, (TransferBatch*)i);
311 if (UNLIKELY(!b))
312 return false;
313 b->Clear();
315 b->Add((void*)i);
316 if (b->Count() == max_count) {
317 sci->free_list.push_back(b);
318 b = nullptr;
321 if (b) {
322 CHECK_GT(b->Count(), 0);
323 sci->free_list.push_back(b);
325 return true;
328 ByteMap possible_regions;
329 SizeClassInfo size_class_info_array[kNumClasses];