1 //===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // Memory quarantine for AddressSanitizer and potentially other tools.
9 // Quarantine caches some specified amount of memory in per-thread caches,
10 // then evicts to global FIFO queue. When the queue reaches specified threshold,
11 // oldest memory is recycled.
13 //===----------------------------------------------------------------------===//
15 #ifndef SANITIZER_QUARANTINE_H
16 #define SANITIZER_QUARANTINE_H
18 #include "sanitizer_internal_defs.h"
19 #include "sanitizer_mutex.h"
20 #include "sanitizer_list.h"
22 namespace __sanitizer
{
24 template<typename Node
> class QuarantineCache
;
26 struct QuarantineBatch
{
27 static const uptr kSize
= 1021;
28 QuarantineBatch
*next
;
33 void init(void *ptr
, uptr size
) {
36 this->size
= size
+ sizeof(QuarantineBatch
); // Account for the batch size.
39 // The total size of quarantined nodes recorded in this batch.
40 uptr
quarantined_size() const {
41 return size
- sizeof(QuarantineBatch
);
44 void push_back(void *ptr
, uptr size
) {
45 CHECK_LT(count
, kSize
);
50 bool can_merge(const QuarantineBatch
* const from
) const {
51 return count
+ from
->count
<= kSize
;
54 void merge(QuarantineBatch
* const from
) {
55 CHECK_LE(count
+ from
->count
, kSize
);
56 CHECK_GE(size
, sizeof(QuarantineBatch
));
58 for (uptr i
= 0; i
< from
->count
; ++i
)
59 batch
[count
+ i
] = from
->batch
[i
];
61 size
+= from
->quarantined_size();
64 from
->size
= sizeof(QuarantineBatch
);
68 COMPILER_CHECK(sizeof(QuarantineBatch
) <= (1 << 13)); // 8Kb.
70 // The callback interface is:
71 // void Callback::Recycle(Node *ptr);
72 // void *cb.Allocate(uptr size);
73 // void cb.Deallocate(void *ptr);
74 template<typename Callback
, typename Node
>
77 typedef QuarantineCache
<Callback
> Cache
;
79 explicit Quarantine(LinkerInitialized
)
80 : cache_(LINKER_INITIALIZED
) {
83 void Init(uptr size
, uptr cache_size
) {
84 // Thread local quarantine size can be zero only when global quarantine size
85 // is zero (it allows us to perform just one atomic read per Put() call).
86 CHECK((size
== 0 && cache_size
== 0) || cache_size
!= 0);
88 atomic_store(&max_size_
, size
, memory_order_relaxed
);
89 atomic_store(&min_size_
, size
/ 10 * 9,
90 memory_order_relaxed
); // 90% of max size.
91 atomic_store(&max_cache_size_
, cache_size
, memory_order_relaxed
);
94 uptr
GetSize() const { return atomic_load(&max_size_
, memory_order_relaxed
); }
95 uptr
GetCacheSize() const {
96 return atomic_load(&max_cache_size_
, memory_order_relaxed
);
99 void Put(Cache
*c
, Callback cb
, Node
*ptr
, uptr size
) {
100 uptr cache_size
= GetCacheSize();
102 c
->Enqueue(cb
, ptr
, size
);
104 // GetCacheSize() == 0 only when GetSize() == 0 (see Init).
107 // Check cache size anyway to accommodate for runtime cache_size change.
108 if (c
->Size() > cache_size
)
112 void NOINLINE
Drain(Cache
*c
, Callback cb
) {
114 SpinMutexLock
l(&cache_mutex_
);
117 if (cache_
.Size() > GetSize() && recycle_mutex_
.TryLock())
121 void PrintStats() const {
122 // It assumes that the world is stopped, just as the allocator's PrintStats.
123 Printf("Quarantine limits: global: %zdMb; thread local: %zdKb\n",
124 GetSize() >> 20, GetCacheSize() >> 10);
130 char pad0_
[kCacheLineSize
];
131 atomic_uintptr_t max_size_
;
132 atomic_uintptr_t min_size_
;
133 atomic_uintptr_t max_cache_size_
;
134 char pad1_
[kCacheLineSize
];
135 SpinMutex cache_mutex_
;
136 SpinMutex recycle_mutex_
;
138 char pad2_
[kCacheLineSize
];
140 void NOINLINE
Recycle(Callback cb
) {
142 uptr min_size
= atomic_load(&min_size_
, memory_order_relaxed
);
144 SpinMutexLock
l(&cache_mutex_
);
145 // Go over the batches and merge partially filled ones to
146 // save some memory, otherwise batches themselves (since the memory used
147 // by them is counted against quarantine limit) can overcome the actual
148 // user's quarantined chunks, which diminishes the purpose of the
150 uptr cache_size
= cache_
.Size();
151 uptr overhead_size
= cache_
.OverheadSize();
152 CHECK_GE(cache_size
, overhead_size
);
153 // Do the merge only when overhead exceeds this predefined limit (might
154 // require some tuning). It saves us merge attempt when the batch list
155 // quarantine is unlikely to contain batches suitable for merge.
156 const uptr kOverheadThresholdPercents
= 100;
157 if (cache_size
> overhead_size
&&
158 overhead_size
* (100 + kOverheadThresholdPercents
) >
159 cache_size
* kOverheadThresholdPercents
) {
160 cache_
.MergeBatches(&tmp
);
162 // Extract enough chunks from the quarantine to get below the max
163 // quarantine size and leave some leeway for the newly quarantined chunks.
164 while (cache_
.Size() > min_size
) {
165 tmp
.EnqueueBatch(cache_
.DequeueBatch());
168 recycle_mutex_
.Unlock();
172 void NOINLINE
DoRecycle(Cache
*c
, Callback cb
) {
173 while (QuarantineBatch
*b
= c
->DequeueBatch()) {
174 const uptr kPrefetch
= 16;
175 CHECK(kPrefetch
<= ARRAY_SIZE(b
->batch
));
176 for (uptr i
= 0; i
< kPrefetch
; i
++)
177 PREFETCH(b
->batch
[i
]);
178 for (uptr i
= 0, count
= b
->count
; i
< count
; i
++) {
179 if (i
+ kPrefetch
< count
)
180 PREFETCH(b
->batch
[i
+ kPrefetch
]);
181 cb
.Recycle((Node
*)b
->batch
[i
]);
188 // Per-thread cache of memory blocks.
189 template<typename Callback
>
190 class QuarantineCache
{
192 explicit QuarantineCache(LinkerInitialized
) {
200 // Total memory used, including internal accounting.
202 return atomic_load(&size_
, memory_order_relaxed
);
205 // Memory used for internal accounting.
206 uptr
OverheadSize() const {
207 return list_
.size() * sizeof(QuarantineBatch
);
210 void Enqueue(Callback cb
, void *ptr
, uptr size
) {
211 if (list_
.empty() || list_
.back()->count
== QuarantineBatch::kSize
) {
212 QuarantineBatch
*b
= (QuarantineBatch
*)cb
.Allocate(sizeof(*b
));
217 list_
.back()->push_back(ptr
, size
);
222 void Transfer(QuarantineCache
*from_cache
) {
223 list_
.append_back(&from_cache
->list_
);
224 SizeAdd(from_cache
->Size());
226 atomic_store(&from_cache
->size_
, 0, memory_order_relaxed
);
229 void EnqueueBatch(QuarantineBatch
*b
) {
234 QuarantineBatch
*DequeueBatch() {
237 QuarantineBatch
*b
= list_
.front();
243 void MergeBatches(QuarantineCache
*to_deallocate
) {
244 uptr extracted_size
= 0;
245 QuarantineBatch
*current
= list_
.front();
246 while (current
&& current
->next
) {
247 if (current
->can_merge(current
->next
)) {
248 QuarantineBatch
*extracted
= current
->next
;
249 // Move all the chunks into the current batch.
250 current
->merge(extracted
);
251 CHECK_EQ(extracted
->count
, 0);
252 CHECK_EQ(extracted
->size
, sizeof(QuarantineBatch
));
253 // Remove the next batch from the list and account for its size.
254 list_
.extract(current
, extracted
);
255 extracted_size
+= extracted
->size
;
256 // Add it to deallocation list.
257 to_deallocate
->EnqueueBatch(extracted
);
259 current
= current
->next
;
262 SizeSub(extracted_size
);
265 void PrintStats() const {
266 uptr batch_count
= 0;
267 uptr total_overhead_bytes
= 0;
268 uptr total_bytes
= 0;
269 uptr total_quarantine_chunks
= 0;
270 for (List::ConstIterator it
= list_
.begin(); it
!= list_
.end(); ++it
) {
272 total_bytes
+= (*it
).size
;
273 total_overhead_bytes
+= (*it
).size
- (*it
).quarantined_size();
274 total_quarantine_chunks
+= (*it
).count
;
276 uptr quarantine_chunks_capacity
= batch_count
* QuarantineBatch::kSize
;
277 int chunks_usage_percent
= quarantine_chunks_capacity
== 0 ?
278 0 : total_quarantine_chunks
* 100 / quarantine_chunks_capacity
;
279 uptr total_quarantined_bytes
= total_bytes
- total_overhead_bytes
;
280 int memory_overhead_percent
= total_quarantined_bytes
== 0 ?
281 0 : total_overhead_bytes
* 100 / total_quarantined_bytes
;
282 Printf("Global quarantine stats: batches: %zd; bytes: %zd (user: %zd); "
283 "chunks: %zd (capacity: %zd); %d%% chunks used; %d%% memory overhead"
285 batch_count
, total_bytes
, total_quarantined_bytes
,
286 total_quarantine_chunks
, quarantine_chunks_capacity
,
287 chunks_usage_percent
, memory_overhead_percent
);
291 typedef IntrusiveList
<QuarantineBatch
> List
;
294 atomic_uintptr_t size_
;
296 void SizeAdd(uptr add
) {
297 atomic_store(&size_
, Size() + add
, memory_order_relaxed
);
299 void SizeSub(uptr sub
) {
300 atomic_store(&size_
, Size() - sub
, memory_order_relaxed
);
304 } // namespace __sanitizer
306 #endif // SANITIZER_QUARANTINE_H