1 //===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // Memory quarantine for AddressSanitizer and potentially other tools.
9 // Quarantine caches some specified amount of memory in per-thread caches,
10 // then evicts to global FIFO queue. When the queue reaches specified threshold,
11 // oldest memory is recycled.
13 //===----------------------------------------------------------------------===//
15 #ifndef SANITIZER_QUARANTINE_H
16 #define SANITIZER_QUARANTINE_H
18 #include "sanitizer_internal_defs.h"
19 #include "sanitizer_mutex.h"
20 #include "sanitizer_list.h"
22 namespace __sanitizer
{
24 template<typename Node
> class QuarantineCache
;
26 struct QuarantineBatch
{
27 static const uptr kSize
= 1021;
28 QuarantineBatch
*next
;
33 void init(void *ptr
, uptr size
) {
36 this->size
= size
+ sizeof(QuarantineBatch
); // Account for the batch size.
39 // The total size of quarantined nodes recorded in this batch.
40 uptr
quarantined_size() const {
41 return size
- sizeof(QuarantineBatch
);
44 void push_back(void *ptr
, uptr size
) {
45 CHECK_LT(count
, kSize
);
50 bool can_merge(const QuarantineBatch
* const from
) const {
51 return count
+ from
->count
<= kSize
;
54 void merge(QuarantineBatch
* const from
) {
55 CHECK_LE(count
+ from
->count
, kSize
);
56 CHECK_GE(size
, sizeof(QuarantineBatch
));
58 for (uptr i
= 0; i
< from
->count
; ++i
)
59 batch
[count
+ i
] = from
->batch
[i
];
61 size
+= from
->quarantined_size();
64 from
->size
= sizeof(QuarantineBatch
);
68 COMPILER_CHECK(sizeof(QuarantineBatch
) <= (1 << 13)); // 8Kb.
70 // The callback interface is:
71 // void Callback::Recycle(Node *ptr);
72 // void *cb.Allocate(uptr size);
73 // void cb.Deallocate(void *ptr);
74 template<typename Callback
, typename Node
>
77 typedef QuarantineCache
<Callback
> Cache
;
79 explicit Quarantine(LinkerInitialized
)
80 : cache_(LINKER_INITIALIZED
) {
83 void Init(uptr size
, uptr cache_size
) {
84 // Thread local quarantine size can be zero only when global quarantine size
85 // is zero (it allows us to perform just one atomic read per Put() call).
86 CHECK((size
== 0 && cache_size
== 0) || cache_size
!= 0);
88 atomic_store_relaxed(&max_size_
, size
);
89 atomic_store_relaxed(&min_size_
, size
/ 10 * 9); // 90% of max size.
90 atomic_store_relaxed(&max_cache_size_
, cache_size
);
93 recycle_mutex_
.Init();
96 uptr
GetSize() const { return atomic_load_relaxed(&max_size_
); }
97 uptr
GetCacheSize() const {
98 return atomic_load_relaxed(&max_cache_size_
);
101 void Put(Cache
*c
, Callback cb
, Node
*ptr
, uptr size
) {
102 uptr cache_size
= GetCacheSize();
104 c
->Enqueue(cb
, ptr
, size
);
106 // GetCacheSize() == 0 only when GetSize() == 0 (see Init).
109 // Check cache size anyway to accommodate for runtime cache_size change.
110 if (c
->Size() > cache_size
)
114 void NOINLINE
Drain(Cache
*c
, Callback cb
) {
116 SpinMutexLock
l(&cache_mutex_
);
119 if (cache_
.Size() > GetSize() && recycle_mutex_
.TryLock())
120 Recycle(atomic_load_relaxed(&min_size_
), cb
);
123 void NOINLINE
DrainAndRecycle(Cache
*c
, Callback cb
) {
125 SpinMutexLock
l(&cache_mutex_
);
128 recycle_mutex_
.Lock();
132 void PrintStats() const {
133 // It assumes that the world is stopped, just as the allocator's PrintStats.
134 Printf("Quarantine limits: global: %zdMb; thread local: %zdKb\n",
135 GetSize() >> 20, GetCacheSize() >> 10);
141 char pad0_
[kCacheLineSize
];
142 atomic_uintptr_t max_size_
;
143 atomic_uintptr_t min_size_
;
144 atomic_uintptr_t max_cache_size_
;
145 char pad1_
[kCacheLineSize
];
146 StaticSpinMutex cache_mutex_
;
147 StaticSpinMutex recycle_mutex_
;
149 char pad2_
[kCacheLineSize
];
151 void NOINLINE
Recycle(uptr min_size
, Callback cb
) {
154 SpinMutexLock
l(&cache_mutex_
);
155 // Go over the batches and merge partially filled ones to
156 // save some memory, otherwise batches themselves (since the memory used
157 // by them is counted against quarantine limit) can overcome the actual
158 // user's quarantined chunks, which diminishes the purpose of the
160 uptr cache_size
= cache_
.Size();
161 uptr overhead_size
= cache_
.OverheadSize();
162 CHECK_GE(cache_size
, overhead_size
);
163 // Do the merge only when overhead exceeds this predefined limit (might
164 // require some tuning). It saves us merge attempt when the batch list
165 // quarantine is unlikely to contain batches suitable for merge.
166 const uptr kOverheadThresholdPercents
= 100;
167 if (cache_size
> overhead_size
&&
168 overhead_size
* (100 + kOverheadThresholdPercents
) >
169 cache_size
* kOverheadThresholdPercents
) {
170 cache_
.MergeBatches(&tmp
);
172 // Extract enough chunks from the quarantine to get below the max
173 // quarantine size and leave some leeway for the newly quarantined chunks.
174 while (cache_
.Size() > min_size
) {
175 tmp
.EnqueueBatch(cache_
.DequeueBatch());
178 recycle_mutex_
.Unlock();
182 void NOINLINE
DoRecycle(Cache
*c
, Callback cb
) {
183 while (QuarantineBatch
*b
= c
->DequeueBatch()) {
184 const uptr kPrefetch
= 16;
185 CHECK(kPrefetch
<= ARRAY_SIZE(b
->batch
));
186 for (uptr i
= 0; i
< kPrefetch
; i
++)
187 PREFETCH(b
->batch
[i
]);
188 for (uptr i
= 0, count
= b
->count
; i
< count
; i
++) {
189 if (i
+ kPrefetch
< count
)
190 PREFETCH(b
->batch
[i
+ kPrefetch
]);
191 cb
.Recycle((Node
*)b
->batch
[i
]);
198 // Per-thread cache of memory blocks.
199 template<typename Callback
>
200 class QuarantineCache
{
202 explicit QuarantineCache(LinkerInitialized
) {
210 // Total memory used, including internal accounting.
212 return atomic_load_relaxed(&size_
);
215 // Memory used for internal accounting.
216 uptr
OverheadSize() const {
217 return list_
.size() * sizeof(QuarantineBatch
);
220 void Enqueue(Callback cb
, void *ptr
, uptr size
) {
221 if (list_
.empty() || list_
.back()->count
== QuarantineBatch::kSize
) {
222 QuarantineBatch
*b
= (QuarantineBatch
*)cb
.Allocate(sizeof(*b
));
227 list_
.back()->push_back(ptr
, size
);
232 void Transfer(QuarantineCache
*from_cache
) {
233 list_
.append_back(&from_cache
->list_
);
234 SizeAdd(from_cache
->Size());
236 atomic_store_relaxed(&from_cache
->size_
, 0);
239 void EnqueueBatch(QuarantineBatch
*b
) {
244 QuarantineBatch
*DequeueBatch() {
247 QuarantineBatch
*b
= list_
.front();
253 void MergeBatches(QuarantineCache
*to_deallocate
) {
254 uptr extracted_size
= 0;
255 QuarantineBatch
*current
= list_
.front();
256 while (current
&& current
->next
) {
257 if (current
->can_merge(current
->next
)) {
258 QuarantineBatch
*extracted
= current
->next
;
259 // Move all the chunks into the current batch.
260 current
->merge(extracted
);
261 CHECK_EQ(extracted
->count
, 0);
262 CHECK_EQ(extracted
->size
, sizeof(QuarantineBatch
));
263 // Remove the next batch from the list and account for its size.
264 list_
.extract(current
, extracted
);
265 extracted_size
+= extracted
->size
;
266 // Add it to deallocation list.
267 to_deallocate
->EnqueueBatch(extracted
);
269 current
= current
->next
;
272 SizeSub(extracted_size
);
275 void PrintStats() const {
276 uptr batch_count
= 0;
277 uptr total_overhead_bytes
= 0;
278 uptr total_bytes
= 0;
279 uptr total_quarantine_chunks
= 0;
280 for (List::ConstIterator it
= list_
.begin(); it
!= list_
.end(); ++it
) {
282 total_bytes
+= (*it
).size
;
283 total_overhead_bytes
+= (*it
).size
- (*it
).quarantined_size();
284 total_quarantine_chunks
+= (*it
).count
;
286 uptr quarantine_chunks_capacity
= batch_count
* QuarantineBatch::kSize
;
287 int chunks_usage_percent
= quarantine_chunks_capacity
== 0 ?
288 0 : total_quarantine_chunks
* 100 / quarantine_chunks_capacity
;
289 uptr total_quarantined_bytes
= total_bytes
- total_overhead_bytes
;
290 int memory_overhead_percent
= total_quarantined_bytes
== 0 ?
291 0 : total_overhead_bytes
* 100 / total_quarantined_bytes
;
292 Printf("Global quarantine stats: batches: %zd; bytes: %zd (user: %zd); "
293 "chunks: %zd (capacity: %zd); %d%% chunks used; %d%% memory overhead"
295 batch_count
, total_bytes
, total_quarantined_bytes
,
296 total_quarantine_chunks
, quarantine_chunks_capacity
,
297 chunks_usage_percent
, memory_overhead_percent
);
301 typedef IntrusiveList
<QuarantineBatch
> List
;
304 atomic_uintptr_t size_
;
306 void SizeAdd(uptr add
) {
307 atomic_store_relaxed(&size_
, Size() + add
);
309 void SizeSub(uptr sub
) {
310 atomic_store_relaxed(&size_
, Size() - sub
);
314 } // namespace __sanitizer
316 #endif // SANITIZER_QUARANTINE_H