Update LOCAL_PATCHES after libsanitizer merge.
[official-gcc.git] / libsanitizer / sanitizer_common / sanitizer_quarantine.h
blob0ebe97d1e40e750fc377ca98781a3745bea38cfb
1 //===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // Memory quarantine for AddressSanitizer and potentially other tools.
9 // Quarantine caches some specified amount of memory in per-thread caches,
10 // then evicts to global FIFO queue. When the queue reaches specified threshold,
11 // oldest memory is recycled.
13 //===----------------------------------------------------------------------===//
15 #ifndef SANITIZER_QUARANTINE_H
16 #define SANITIZER_QUARANTINE_H
18 #include "sanitizer_internal_defs.h"
19 #include "sanitizer_mutex.h"
20 #include "sanitizer_list.h"
22 namespace __sanitizer {
24 template<typename Node> class QuarantineCache;
26 struct QuarantineBatch {
27 static const uptr kSize = 1021;
28 QuarantineBatch *next;
29 uptr size;
30 uptr count;
31 void *batch[kSize];
33 void init(void *ptr, uptr size) {
34 count = 1;
35 batch[0] = ptr;
36 this->size = size + sizeof(QuarantineBatch); // Account for the batch size.
39 // The total size of quarantined nodes recorded in this batch.
40 uptr quarantined_size() const {
41 return size - sizeof(QuarantineBatch);
44 void push_back(void *ptr, uptr size) {
45 CHECK_LT(count, kSize);
46 batch[count++] = ptr;
47 this->size += size;
50 bool can_merge(const QuarantineBatch* const from) const {
51 return count + from->count <= kSize;
54 void merge(QuarantineBatch* const from) {
55 CHECK_LE(count + from->count, kSize);
56 CHECK_GE(size, sizeof(QuarantineBatch));
58 for (uptr i = 0; i < from->count; ++i)
59 batch[count + i] = from->batch[i];
60 count += from->count;
61 size += from->quarantined_size();
63 from->count = 0;
64 from->size = sizeof(QuarantineBatch);
68 COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13)); // 8Kb.
70 // The callback interface is:
71 // void Callback::Recycle(Node *ptr);
72 // void *cb.Allocate(uptr size);
73 // void cb.Deallocate(void *ptr);
74 template<typename Callback, typename Node>
75 class Quarantine {
76 public:
77 typedef QuarantineCache<Callback> Cache;
79 explicit Quarantine(LinkerInitialized)
80 : cache_(LINKER_INITIALIZED) {
83 void Init(uptr size, uptr cache_size) {
84 // Thread local quarantine size can be zero only when global quarantine size
85 // is zero (it allows us to perform just one atomic read per Put() call).
86 CHECK((size == 0 && cache_size == 0) || cache_size != 0);
88 atomic_store_relaxed(&max_size_, size);
89 atomic_store_relaxed(&min_size_, size / 10 * 9); // 90% of max size.
90 atomic_store_relaxed(&max_cache_size_, cache_size);
92 cache_mutex_.Init();
93 recycle_mutex_.Init();
96 uptr GetSize() const { return atomic_load_relaxed(&max_size_); }
97 uptr GetCacheSize() const {
98 return atomic_load_relaxed(&max_cache_size_);
101 void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
102 uptr cache_size = GetCacheSize();
103 if (cache_size) {
104 c->Enqueue(cb, ptr, size);
105 } else {
106 // GetCacheSize() == 0 only when GetSize() == 0 (see Init).
107 cb.Recycle(ptr);
109 // Check cache size anyway to accommodate for runtime cache_size change.
110 if (c->Size() > cache_size)
111 Drain(c, cb);
114 void NOINLINE Drain(Cache *c, Callback cb) {
116 SpinMutexLock l(&cache_mutex_);
117 cache_.Transfer(c);
119 if (cache_.Size() > GetSize() && recycle_mutex_.TryLock())
120 Recycle(atomic_load_relaxed(&min_size_), cb);
123 void NOINLINE DrainAndRecycle(Cache *c, Callback cb) {
125 SpinMutexLock l(&cache_mutex_);
126 cache_.Transfer(c);
128 recycle_mutex_.Lock();
129 Recycle(0, cb);
132 void PrintStats() const {
133 // It assumes that the world is stopped, just as the allocator's PrintStats.
134 Printf("Quarantine limits: global: %zdMb; thread local: %zdKb\n",
135 GetSize() >> 20, GetCacheSize() >> 10);
136 cache_.PrintStats();
139 private:
140 // Read-only data.
141 char pad0_[kCacheLineSize];
142 atomic_uintptr_t max_size_;
143 atomic_uintptr_t min_size_;
144 atomic_uintptr_t max_cache_size_;
145 char pad1_[kCacheLineSize];
146 StaticSpinMutex cache_mutex_;
147 StaticSpinMutex recycle_mutex_;
148 Cache cache_;
149 char pad2_[kCacheLineSize];
151 void NOINLINE Recycle(uptr min_size, Callback cb) {
152 Cache tmp;
154 SpinMutexLock l(&cache_mutex_);
155 // Go over the batches and merge partially filled ones to
156 // save some memory, otherwise batches themselves (since the memory used
157 // by them is counted against quarantine limit) can overcome the actual
158 // user's quarantined chunks, which diminishes the purpose of the
159 // quarantine.
160 uptr cache_size = cache_.Size();
161 uptr overhead_size = cache_.OverheadSize();
162 CHECK_GE(cache_size, overhead_size);
163 // Do the merge only when overhead exceeds this predefined limit (might
164 // require some tuning). It saves us merge attempt when the batch list
165 // quarantine is unlikely to contain batches suitable for merge.
166 const uptr kOverheadThresholdPercents = 100;
167 if (cache_size > overhead_size &&
168 overhead_size * (100 + kOverheadThresholdPercents) >
169 cache_size * kOverheadThresholdPercents) {
170 cache_.MergeBatches(&tmp);
172 // Extract enough chunks from the quarantine to get below the max
173 // quarantine size and leave some leeway for the newly quarantined chunks.
174 while (cache_.Size() > min_size) {
175 tmp.EnqueueBatch(cache_.DequeueBatch());
178 recycle_mutex_.Unlock();
179 DoRecycle(&tmp, cb);
182 void NOINLINE DoRecycle(Cache *c, Callback cb) {
183 while (QuarantineBatch *b = c->DequeueBatch()) {
184 const uptr kPrefetch = 16;
185 CHECK(kPrefetch <= ARRAY_SIZE(b->batch));
186 for (uptr i = 0; i < kPrefetch; i++)
187 PREFETCH(b->batch[i]);
188 for (uptr i = 0, count = b->count; i < count; i++) {
189 if (i + kPrefetch < count)
190 PREFETCH(b->batch[i + kPrefetch]);
191 cb.Recycle((Node*)b->batch[i]);
193 cb.Deallocate(b);
198 // Per-thread cache of memory blocks.
199 template<typename Callback>
200 class QuarantineCache {
201 public:
202 explicit QuarantineCache(LinkerInitialized) {
205 QuarantineCache()
206 : size_() {
207 list_.clear();
210 // Total memory used, including internal accounting.
211 uptr Size() const {
212 return atomic_load_relaxed(&size_);
215 // Memory used for internal accounting.
216 uptr OverheadSize() const {
217 return list_.size() * sizeof(QuarantineBatch);
220 void Enqueue(Callback cb, void *ptr, uptr size) {
221 if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) {
222 QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
223 CHECK(b);
224 b->init(ptr, size);
225 EnqueueBatch(b);
226 } else {
227 list_.back()->push_back(ptr, size);
228 SizeAdd(size);
232 void Transfer(QuarantineCache *from_cache) {
233 list_.append_back(&from_cache->list_);
234 SizeAdd(from_cache->Size());
236 atomic_store_relaxed(&from_cache->size_, 0);
239 void EnqueueBatch(QuarantineBatch *b) {
240 list_.push_back(b);
241 SizeAdd(b->size);
244 QuarantineBatch *DequeueBatch() {
245 if (list_.empty())
246 return nullptr;
247 QuarantineBatch *b = list_.front();
248 list_.pop_front();
249 SizeSub(b->size);
250 return b;
253 void MergeBatches(QuarantineCache *to_deallocate) {
254 uptr extracted_size = 0;
255 QuarantineBatch *current = list_.front();
256 while (current && current->next) {
257 if (current->can_merge(current->next)) {
258 QuarantineBatch *extracted = current->next;
259 // Move all the chunks into the current batch.
260 current->merge(extracted);
261 CHECK_EQ(extracted->count, 0);
262 CHECK_EQ(extracted->size, sizeof(QuarantineBatch));
263 // Remove the next batch from the list and account for its size.
264 list_.extract(current, extracted);
265 extracted_size += extracted->size;
266 // Add it to deallocation list.
267 to_deallocate->EnqueueBatch(extracted);
268 } else {
269 current = current->next;
272 SizeSub(extracted_size);
275 void PrintStats() const {
276 uptr batch_count = 0;
277 uptr total_overhead_bytes = 0;
278 uptr total_bytes = 0;
279 uptr total_quarantine_chunks = 0;
280 for (List::ConstIterator it = list_.begin(); it != list_.end(); ++it) {
281 batch_count++;
282 total_bytes += (*it).size;
283 total_overhead_bytes += (*it).size - (*it).quarantined_size();
284 total_quarantine_chunks += (*it).count;
286 uptr quarantine_chunks_capacity = batch_count * QuarantineBatch::kSize;
287 int chunks_usage_percent = quarantine_chunks_capacity == 0 ?
288 0 : total_quarantine_chunks * 100 / quarantine_chunks_capacity;
289 uptr total_quarantined_bytes = total_bytes - total_overhead_bytes;
290 int memory_overhead_percent = total_quarantined_bytes == 0 ?
291 0 : total_overhead_bytes * 100 / total_quarantined_bytes;
292 Printf("Global quarantine stats: batches: %zd; bytes: %zd (user: %zd); "
293 "chunks: %zd (capacity: %zd); %d%% chunks used; %d%% memory overhead"
294 "\n",
295 batch_count, total_bytes, total_quarantined_bytes,
296 total_quarantine_chunks, quarantine_chunks_capacity,
297 chunks_usage_percent, memory_overhead_percent);
300 private:
301 typedef IntrusiveList<QuarantineBatch> List;
303 List list_;
304 atomic_uintptr_t size_;
306 void SizeAdd(uptr add) {
307 atomic_store_relaxed(&size_, Size() + add);
309 void SizeSub(uptr sub) {
310 atomic_store_relaxed(&size_, Size() - sub);
314 } // namespace __sanitizer
316 #endif // SANITIZER_QUARANTINE_H