1 //===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Memory quarantine for AddressSanitizer and potentially other tools.
10 // Quarantine caches some specified amount of memory in per-thread caches,
11 // then evicts to global FIFO queue. When the queue reaches specified threshold,
12 // oldest memory is recycled.
14 //===----------------------------------------------------------------------===//
16 #ifndef SANITIZER_QUARANTINE_H
17 #define SANITIZER_QUARANTINE_H
19 #include "sanitizer_internal_defs.h"
20 #include "sanitizer_mutex.h"
21 #include "sanitizer_list.h"
23 namespace __sanitizer
{
25 template<typename Node
> class QuarantineCache
;
27 struct QuarantineBatch
{
28 static const uptr kSize
= 1021;
29 QuarantineBatch
*next
;
34 void init(void *ptr
, uptr size
) {
37 this->size
= size
+ sizeof(QuarantineBatch
); // Account for the batch size.
40 // The total size of quarantined nodes recorded in this batch.
41 uptr
quarantined_size() const {
42 return size
- sizeof(QuarantineBatch
);
45 void push_back(void *ptr
, uptr size
) {
46 CHECK_LT(count
, kSize
);
51 bool can_merge(const QuarantineBatch
* const from
) const {
52 return count
+ from
->count
<= kSize
;
55 void merge(QuarantineBatch
* const from
) {
56 CHECK_LE(count
+ from
->count
, kSize
);
57 CHECK_GE(size
, sizeof(QuarantineBatch
));
59 for (uptr i
= 0; i
< from
->count
; ++i
)
60 batch
[count
+ i
] = from
->batch
[i
];
62 size
+= from
->quarantined_size();
65 from
->size
= sizeof(QuarantineBatch
);
69 COMPILER_CHECK(sizeof(QuarantineBatch
) <= (1 << 13)); // 8Kb.
71 // The callback interface is:
72 // void Callback::Recycle(Node *ptr);
73 // void *cb.Allocate(uptr size);
74 // void cb.Deallocate(void *ptr);
75 template<typename Callback
, typename Node
>
78 typedef QuarantineCache
<Callback
> Cache
;
80 explicit Quarantine(LinkerInitialized
)
81 : cache_(LINKER_INITIALIZED
) {
84 void Init(uptr size
, uptr cache_size
) {
85 // Thread local quarantine size can be zero only when global quarantine size
86 // is zero (it allows us to perform just one atomic read per Put() call).
87 CHECK((size
== 0 && cache_size
== 0) || cache_size
!= 0);
89 atomic_store_relaxed(&max_size_
, size
);
90 atomic_store_relaxed(&min_size_
, size
/ 10 * 9); // 90% of max size.
91 atomic_store_relaxed(&max_cache_size_
, cache_size
);
94 recycle_mutex_
.Init();
97 uptr
GetSize() const { return atomic_load_relaxed(&max_size_
); }
98 uptr
GetCacheSize() const {
99 return atomic_load_relaxed(&max_cache_size_
);
102 void Put(Cache
*c
, Callback cb
, Node
*ptr
, uptr size
) {
103 uptr cache_size
= GetCacheSize();
105 c
->Enqueue(cb
, ptr
, size
);
107 // GetCacheSize() == 0 only when GetSize() == 0 (see Init).
110 // Check cache size anyway to accommodate for runtime cache_size change.
111 if (c
->Size() > cache_size
)
115 void NOINLINE
Drain(Cache
*c
, Callback cb
) {
117 SpinMutexLock
l(&cache_mutex_
);
120 if (cache_
.Size() > GetSize() && recycle_mutex_
.TryLock())
121 Recycle(atomic_load_relaxed(&min_size_
), cb
);
124 void NOINLINE
DrainAndRecycle(Cache
*c
, Callback cb
) {
126 SpinMutexLock
l(&cache_mutex_
);
129 recycle_mutex_
.Lock();
133 void PrintStats() const {
134 // It assumes that the world is stopped, just as the allocator's PrintStats.
135 Printf("Quarantine limits: global: %zdMb; thread local: %zdKb\n",
136 GetSize() >> 20, GetCacheSize() >> 10);
142 char pad0_
[kCacheLineSize
];
143 atomic_uintptr_t max_size_
;
144 atomic_uintptr_t min_size_
;
145 atomic_uintptr_t max_cache_size_
;
146 char pad1_
[kCacheLineSize
];
147 StaticSpinMutex cache_mutex_
;
148 StaticSpinMutex recycle_mutex_
;
150 char pad2_
[kCacheLineSize
];
152 void NOINLINE
Recycle(uptr min_size
, Callback cb
) {
155 SpinMutexLock
l(&cache_mutex_
);
156 // Go over the batches and merge partially filled ones to
157 // save some memory, otherwise batches themselves (since the memory used
158 // by them is counted against quarantine limit) can overcome the actual
159 // user's quarantined chunks, which diminishes the purpose of the
161 uptr cache_size
= cache_
.Size();
162 uptr overhead_size
= cache_
.OverheadSize();
163 CHECK_GE(cache_size
, overhead_size
);
164 // Do the merge only when overhead exceeds this predefined limit (might
165 // require some tuning). It saves us merge attempt when the batch list
166 // quarantine is unlikely to contain batches suitable for merge.
167 const uptr kOverheadThresholdPercents
= 100;
168 if (cache_size
> overhead_size
&&
169 overhead_size
* (100 + kOverheadThresholdPercents
) >
170 cache_size
* kOverheadThresholdPercents
) {
171 cache_
.MergeBatches(&tmp
);
173 // Extract enough chunks from the quarantine to get below the max
174 // quarantine size and leave some leeway for the newly quarantined chunks.
175 while (cache_
.Size() > min_size
) {
176 tmp
.EnqueueBatch(cache_
.DequeueBatch());
179 recycle_mutex_
.Unlock();
183 void NOINLINE
DoRecycle(Cache
*c
, Callback cb
) {
184 while (QuarantineBatch
*b
= c
->DequeueBatch()) {
185 const uptr kPrefetch
= 16;
186 CHECK(kPrefetch
<= ARRAY_SIZE(b
->batch
));
187 for (uptr i
= 0; i
< kPrefetch
; i
++)
188 PREFETCH(b
->batch
[i
]);
189 for (uptr i
= 0, count
= b
->count
; i
< count
; i
++) {
190 if (i
+ kPrefetch
< count
)
191 PREFETCH(b
->batch
[i
+ kPrefetch
]);
192 cb
.Recycle((Node
*)b
->batch
[i
]);
199 // Per-thread cache of memory blocks.
200 template<typename Callback
>
201 class QuarantineCache
{
203 explicit QuarantineCache(LinkerInitialized
) {
211 // Total memory used, including internal accounting.
213 return atomic_load_relaxed(&size_
);
216 // Memory used for internal accounting.
217 uptr
OverheadSize() const {
218 return list_
.size() * sizeof(QuarantineBatch
);
221 void Enqueue(Callback cb
, void *ptr
, uptr size
) {
222 if (list_
.empty() || list_
.back()->count
== QuarantineBatch::kSize
) {
223 QuarantineBatch
*b
= (QuarantineBatch
*)cb
.Allocate(sizeof(*b
));
228 list_
.back()->push_back(ptr
, size
);
233 void Transfer(QuarantineCache
*from_cache
) {
234 list_
.append_back(&from_cache
->list_
);
235 SizeAdd(from_cache
->Size());
237 atomic_store_relaxed(&from_cache
->size_
, 0);
240 void EnqueueBatch(QuarantineBatch
*b
) {
245 QuarantineBatch
*DequeueBatch() {
248 QuarantineBatch
*b
= list_
.front();
254 void MergeBatches(QuarantineCache
*to_deallocate
) {
255 uptr extracted_size
= 0;
256 QuarantineBatch
*current
= list_
.front();
257 while (current
&& current
->next
) {
258 if (current
->can_merge(current
->next
)) {
259 QuarantineBatch
*extracted
= current
->next
;
260 // Move all the chunks into the current batch.
261 current
->merge(extracted
);
262 CHECK_EQ(extracted
->count
, 0);
263 CHECK_EQ(extracted
->size
, sizeof(QuarantineBatch
));
264 // Remove the next batch from the list and account for its size.
265 list_
.extract(current
, extracted
);
266 extracted_size
+= extracted
->size
;
267 // Add it to deallocation list.
268 to_deallocate
->EnqueueBatch(extracted
);
270 current
= current
->next
;
273 SizeSub(extracted_size
);
276 void PrintStats() const {
277 uptr batch_count
= 0;
278 uptr total_overhead_bytes
= 0;
279 uptr total_bytes
= 0;
280 uptr total_quarantine_chunks
= 0;
281 for (List::ConstIterator it
= list_
.begin(); it
!= list_
.end(); ++it
) {
283 total_bytes
+= (*it
).size
;
284 total_overhead_bytes
+= (*it
).size
- (*it
).quarantined_size();
285 total_quarantine_chunks
+= (*it
).count
;
287 uptr quarantine_chunks_capacity
= batch_count
* QuarantineBatch::kSize
;
288 int chunks_usage_percent
= quarantine_chunks_capacity
== 0 ?
289 0 : total_quarantine_chunks
* 100 / quarantine_chunks_capacity
;
290 uptr total_quarantined_bytes
= total_bytes
- total_overhead_bytes
;
291 int memory_overhead_percent
= total_quarantined_bytes
== 0 ?
292 0 : total_overhead_bytes
* 100 / total_quarantined_bytes
;
293 Printf("Global quarantine stats: batches: %zd; bytes: %zd (user: %zd); "
294 "chunks: %zd (capacity: %zd); %d%% chunks used; %d%% memory overhead"
296 batch_count
, total_bytes
, total_quarantined_bytes
,
297 total_quarantine_chunks
, quarantine_chunks_capacity
,
298 chunks_usage_percent
, memory_overhead_percent
);
302 typedef IntrusiveList
<QuarantineBatch
> List
;
305 atomic_uintptr_t size_
;
307 void SizeAdd(uptr add
) {
308 atomic_store_relaxed(&size_
, Size() + add
);
310 void SizeSub(uptr sub
) {
311 atomic_store_relaxed(&size_
, Size() - sub
);
315 } // namespace __sanitizer
317 #endif // SANITIZER_QUARANTINE_H