* es.po: Update.
[official-gcc.git] / libsanitizer / sanitizer_common / sanitizer_allocator_secondary.h
blob91c2ecc5b2655cfe04aa13becab5ebebbb5272c9
1 //===-- sanitizer_allocator_secondary.h -------------------------*- C++ -*-===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // Part of the Sanitizer Allocator.
9 //
10 //===----------------------------------------------------------------------===//
11 #ifndef SANITIZER_ALLOCATOR_H
12 #error This file must be included inside sanitizer_allocator.h
13 #endif
15 // This class can (de)allocate only large chunks of memory using mmap/unmap.
16 // The main purpose of this allocator is to cover large and rare allocation
17 // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
18 template <class MapUnmapCallback = NoOpMapUnmapCallback>
19 class LargeMmapAllocator {
20 public:
21 void InitLinkerInitialized(bool may_return_null) {
22 page_size_ = GetPageSizeCached();
23 atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
26 void Init(bool may_return_null) {
27 internal_memset(this, 0, sizeof(*this));
28 InitLinkerInitialized(may_return_null);
31 void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
32 CHECK(IsPowerOfTwo(alignment));
33 uptr map_size = RoundUpMapSize(size);
34 if (alignment > page_size_)
35 map_size += alignment;
36 // Overflow.
37 if (map_size < size) return ReturnNullOrDieOnBadRequest();
38 uptr map_beg = reinterpret_cast<uptr>(
39 MmapOrDie(map_size, "LargeMmapAllocator"));
40 CHECK(IsAligned(map_beg, page_size_));
41 MapUnmapCallback().OnMap(map_beg, map_size);
42 uptr map_end = map_beg + map_size;
43 uptr res = map_beg + page_size_;
44 if (res & (alignment - 1)) // Align.
45 res += alignment - (res & (alignment - 1));
46 CHECK(IsAligned(res, alignment));
47 CHECK(IsAligned(res, page_size_));
48 CHECK_GE(res + size, map_beg);
49 CHECK_LE(res + size, map_end);
50 Header *h = GetHeader(res);
51 h->size = size;
52 h->map_beg = map_beg;
53 h->map_size = map_size;
54 uptr size_log = MostSignificantSetBitIndex(map_size);
55 CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
57 SpinMutexLock l(&mutex_);
58 uptr idx = n_chunks_++;
59 chunks_sorted_ = false;
60 CHECK_LT(idx, kMaxNumChunks);
61 h->chunk_idx = idx;
62 chunks_[idx] = h;
63 stats.n_allocs++;
64 stats.currently_allocated += map_size;
65 stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
66 stats.by_size_log[size_log]++;
67 stat->Add(AllocatorStatAllocated, map_size);
68 stat->Add(AllocatorStatMapped, map_size);
70 return reinterpret_cast<void*>(res);
73 bool MayReturnNull() const {
74 return atomic_load(&may_return_null_, memory_order_acquire);
77 void *ReturnNullOrDieOnBadRequest() {
78 if (MayReturnNull()) return nullptr;
79 ReportAllocatorCannotReturnNull(false);
82 void *ReturnNullOrDieOnOOM() {
83 if (MayReturnNull()) return nullptr;
84 ReportAllocatorCannotReturnNull(true);
87 void SetMayReturnNull(bool may_return_null) {
88 atomic_store(&may_return_null_, may_return_null, memory_order_release);
91 void Deallocate(AllocatorStats *stat, void *p) {
92 Header *h = GetHeader(p);
94 SpinMutexLock l(&mutex_);
95 uptr idx = h->chunk_idx;
96 CHECK_EQ(chunks_[idx], h);
97 CHECK_LT(idx, n_chunks_);
98 chunks_[idx] = chunks_[n_chunks_ - 1];
99 chunks_[idx]->chunk_idx = idx;
100 n_chunks_--;
101 chunks_sorted_ = false;
102 stats.n_frees++;
103 stats.currently_allocated -= h->map_size;
104 stat->Sub(AllocatorStatAllocated, h->map_size);
105 stat->Sub(AllocatorStatMapped, h->map_size);
107 MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
108 UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
111 uptr TotalMemoryUsed() {
112 SpinMutexLock l(&mutex_);
113 uptr res = 0;
114 for (uptr i = 0; i < n_chunks_; i++) {
115 Header *h = chunks_[i];
116 CHECK_EQ(h->chunk_idx, i);
117 res += RoundUpMapSize(h->size);
119 return res;
122 bool PointerIsMine(const void *p) {
123 return GetBlockBegin(p) != nullptr;
126 uptr GetActuallyAllocatedSize(void *p) {
127 return RoundUpTo(GetHeader(p)->size, page_size_);
130 // At least page_size_/2 metadata bytes is available.
131 void *GetMetaData(const void *p) {
132 // Too slow: CHECK_EQ(p, GetBlockBegin(p));
133 if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {
134 Printf("%s: bad pointer %p\n", SanitizerToolName, p);
135 CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
137 return GetHeader(p) + 1;
140 void *GetBlockBegin(const void *ptr) {
141 uptr p = reinterpret_cast<uptr>(ptr);
142 SpinMutexLock l(&mutex_);
143 uptr nearest_chunk = 0;
144 // Cache-friendly linear search.
145 for (uptr i = 0; i < n_chunks_; i++) {
146 uptr ch = reinterpret_cast<uptr>(chunks_[i]);
147 if (p < ch) continue; // p is at left to this chunk, skip it.
148 if (p - ch < p - nearest_chunk)
149 nearest_chunk = ch;
151 if (!nearest_chunk)
152 return nullptr;
153 Header *h = reinterpret_cast<Header *>(nearest_chunk);
154 CHECK_GE(nearest_chunk, h->map_beg);
155 CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
156 CHECK_LE(nearest_chunk, p);
157 if (h->map_beg + h->map_size <= p)
158 return nullptr;
159 return GetUser(h);
162 // This function does the same as GetBlockBegin, but is much faster.
163 // Must be called with the allocator locked.
164 void *GetBlockBeginFastLocked(void *ptr) {
165 mutex_.CheckLocked();
166 uptr p = reinterpret_cast<uptr>(ptr);
167 uptr n = n_chunks_;
168 if (!n) return nullptr;
169 if (!chunks_sorted_) {
170 // Do one-time sort. chunks_sorted_ is reset in Allocate/Deallocate.
171 SortArray(reinterpret_cast<uptr*>(chunks_), n);
172 for (uptr i = 0; i < n; i++)
173 chunks_[i]->chunk_idx = i;
174 chunks_sorted_ = true;
175 min_mmap_ = reinterpret_cast<uptr>(chunks_[0]);
176 max_mmap_ = reinterpret_cast<uptr>(chunks_[n - 1]) +
177 chunks_[n - 1]->map_size;
179 if (p < min_mmap_ || p >= max_mmap_)
180 return nullptr;
181 uptr beg = 0, end = n - 1;
182 // This loop is a log(n) lower_bound. It does not check for the exact match
183 // to avoid expensive cache-thrashing loads.
184 while (end - beg >= 2) {
185 uptr mid = (beg + end) / 2; // Invariant: mid >= beg + 1
186 if (p < reinterpret_cast<uptr>(chunks_[mid]))
187 end = mid - 1; // We are not interested in chunks_[mid].
188 else
189 beg = mid; // chunks_[mid] may still be what we want.
192 if (beg < end) {
193 CHECK_EQ(beg + 1, end);
194 // There are 2 chunks left, choose one.
195 if (p >= reinterpret_cast<uptr>(chunks_[end]))
196 beg = end;
199 Header *h = chunks_[beg];
200 if (h->map_beg + h->map_size <= p || p < h->map_beg)
201 return nullptr;
202 return GetUser(h);
205 void PrintStats() {
206 Printf("Stats: LargeMmapAllocator: allocated %zd times, "
207 "remains %zd (%zd K) max %zd M; by size logs: ",
208 stats.n_allocs, stats.n_allocs - stats.n_frees,
209 stats.currently_allocated >> 10, stats.max_allocated >> 20);
210 for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
211 uptr c = stats.by_size_log[i];
212 if (!c) continue;
213 Printf("%zd:%zd; ", i, c);
215 Printf("\n");
218 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
219 // introspection API.
220 void ForceLock() {
221 mutex_.Lock();
224 void ForceUnlock() {
225 mutex_.Unlock();
228 // Iterate over all existing chunks.
229 // The allocator must be locked when calling this function.
230 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
231 for (uptr i = 0; i < n_chunks_; i++)
232 callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg);
235 private:
236 static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
237 struct Header {
238 uptr map_beg;
239 uptr map_size;
240 uptr size;
241 uptr chunk_idx;
244 Header *GetHeader(uptr p) {
245 CHECK(IsAligned(p, page_size_));
246 return reinterpret_cast<Header*>(p - page_size_);
248 Header *GetHeader(const void *p) {
249 return GetHeader(reinterpret_cast<uptr>(p));
252 void *GetUser(Header *h) {
253 CHECK(IsAligned((uptr)h, page_size_));
254 return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
257 uptr RoundUpMapSize(uptr size) {
258 return RoundUpTo(size, page_size_) + page_size_;
261 uptr page_size_;
262 Header *chunks_[kMaxNumChunks];
263 uptr n_chunks_;
264 uptr min_mmap_, max_mmap_;
265 bool chunks_sorted_;
266 struct Stats {
267 uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
268 } stats;
269 atomic_uint8_t may_return_null_;
270 SpinMutex mutex_;