1 //===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // Memory quarantine for AddressSanitizer and potentially other tools.
9 // Quarantine caches some specified amount of memory in per-thread caches,
10 // then evicts to global FIFO queue. When the queue reaches specified threshold,
11 // oldest memory is recycled.
13 //===----------------------------------------------------------------------===//
15 #ifndef SANITIZER_QUARANTINE_H
16 #define SANITIZER_QUARANTINE_H
18 #include "sanitizer_internal_defs.h"
19 #include "sanitizer_mutex.h"
20 #include "sanitizer_list.h"
22 namespace __sanitizer
{
24 template<typename Node
> class QuarantineCache
;
26 struct QuarantineBatch
{
27 static const uptr kSize
= 1021;
28 QuarantineBatch
*next
;
34 COMPILER_CHECK(sizeof(QuarantineBatch
) <= (1 << 13)); // 8Kb.
36 // The callback interface is:
37 // void Callback::Recycle(Node *ptr);
38 // void *cb.Allocate(uptr size);
39 // void cb.Deallocate(void *ptr);
40 template<typename Callback
, typename Node
>
43 typedef QuarantineCache
<Callback
> Cache
;
45 explicit Quarantine(LinkerInitialized
)
46 : cache_(LINKER_INITIALIZED
) {
49 void Init(uptr size
, uptr cache_size
) {
50 atomic_store(&max_size_
, size
, memory_order_release
);
51 atomic_store(&min_size_
, size
/ 10 * 9,
52 memory_order_release
); // 90% of max size.
53 max_cache_size_
= cache_size
;
56 uptr
GetSize() const { return atomic_load(&max_size_
, memory_order_acquire
); }
58 void Put(Cache
*c
, Callback cb
, Node
*ptr
, uptr size
) {
59 c
->Enqueue(cb
, ptr
, size
);
60 if (c
->Size() > max_cache_size_
)
64 void NOINLINE
Drain(Cache
*c
, Callback cb
) {
66 SpinMutexLock
l(&cache_mutex_
);
69 if (cache_
.Size() > GetSize() && recycle_mutex_
.TryLock())
75 char pad0_
[kCacheLineSize
];
76 atomic_uintptr_t max_size_
;
77 atomic_uintptr_t min_size_
;
79 char pad1_
[kCacheLineSize
];
80 SpinMutex cache_mutex_
;
81 SpinMutex recycle_mutex_
;
83 char pad2_
[kCacheLineSize
];
85 void NOINLINE
Recycle(Callback cb
) {
87 uptr min_size
= atomic_load(&min_size_
, memory_order_acquire
);
89 SpinMutexLock
l(&cache_mutex_
);
90 while (cache_
.Size() > min_size
) {
91 QuarantineBatch
*b
= cache_
.DequeueBatch();
95 recycle_mutex_
.Unlock();
99 void NOINLINE
DoRecycle(Cache
*c
, Callback cb
) {
100 while (QuarantineBatch
*b
= c
->DequeueBatch()) {
101 const uptr kPrefetch
= 16;
102 CHECK(kPrefetch
<= ARRAY_SIZE(b
->batch
));
103 for (uptr i
= 0; i
< kPrefetch
; i
++)
104 PREFETCH(b
->batch
[i
]);
105 for (uptr i
= 0, count
= b
->count
; i
< count
; i
++) {
106 if (i
+ kPrefetch
< count
)
107 PREFETCH(b
->batch
[i
+ kPrefetch
]);
108 cb
.Recycle((Node
*)b
->batch
[i
]);
115 // Per-thread cache of memory blocks.
116 template<typename Callback
>
117 class QuarantineCache
{
119 explicit QuarantineCache(LinkerInitialized
) {
128 return atomic_load(&size_
, memory_order_relaxed
);
131 void Enqueue(Callback cb
, void *ptr
, uptr size
) {
132 if (list_
.empty() || list_
.back()->count
== QuarantineBatch::kSize
) {
134 size
+= sizeof(QuarantineBatch
); // Count the batch in Quarantine size.
136 QuarantineBatch
*b
= list_
.back();
138 b
->batch
[b
->count
++] = ptr
;
143 void Transfer(QuarantineCache
*c
) {
144 list_
.append_back(&c
->list_
);
146 atomic_store(&c
->size_
, 0, memory_order_relaxed
);
149 void EnqueueBatch(QuarantineBatch
*b
) {
154 QuarantineBatch
*DequeueBatch() {
157 QuarantineBatch
*b
= list_
.front();
164 IntrusiveList
<QuarantineBatch
> list_
;
165 atomic_uintptr_t size_
;
167 void SizeAdd(uptr add
) {
168 atomic_store(&size_
, Size() + add
, memory_order_relaxed
);
170 void SizeSub(uptr sub
) {
171 atomic_store(&size_
, Size() - sub
, memory_order_relaxed
);
174 NOINLINE QuarantineBatch
* AllocBatch(Callback cb
) {
175 QuarantineBatch
*b
= (QuarantineBatch
*)cb
.Allocate(sizeof(*b
));
183 } // namespace __sanitizer
185 #endif // SANITIZER_QUARANTINE_H