1 //===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // Memory quarantine for AddressSanitizer and potentially other tools.
9 // Quarantine caches some specified amount of memory in per-thread caches,
10 // then evicts to global FIFO queue. When the queue reaches specified threshold,
11 // oldest memory is recycled.
13 //===----------------------------------------------------------------------===//
15 #ifndef SANITIZER_QUARANTINE_H
16 #define SANITIZER_QUARANTINE_H
18 #include "sanitizer_internal_defs.h"
19 #include "sanitizer_mutex.h"
20 #include "sanitizer_list.h"
22 namespace __sanitizer
{
24 template<typename Node
> class QuarantineCache
;
26 struct QuarantineBatch
{
27 static const uptr kSize
= 1021;
28 QuarantineBatch
*next
;
34 COMPILER_CHECK(sizeof(QuarantineBatch
) <= (1 << 13)); // 8Kb.
36 // The callback interface is:
37 // void Callback::Recycle(Node *ptr);
38 // void *cb.Allocate(uptr size);
39 // void cb.Deallocate(void *ptr);
40 template<typename Callback
, typename Node
>
43 typedef QuarantineCache
<Callback
> Cache
;
45 explicit Quarantine(LinkerInitialized
)
46 : cache_(LINKER_INITIALIZED
) {
49 void Init(uptr size
, uptr cache_size
) {
51 min_size_
= size
/ 10 * 9; // 90% of max size.
52 max_cache_size_
= cache_size
;
55 void Put(Cache
*c
, Callback cb
, Node
*ptr
, uptr size
) {
56 c
->Enqueue(cb
, ptr
, size
);
57 if (c
->Size() > max_cache_size_
)
61 void NOINLINE
Drain(Cache
*c
, Callback cb
) {
63 SpinMutexLock
l(&cache_mutex_
);
66 if (cache_
.Size() > max_size_
&& recycle_mutex_
.TryLock())
72 char pad0_
[kCacheLineSize
];
76 char pad1_
[kCacheLineSize
];
77 SpinMutex cache_mutex_
;
78 SpinMutex recycle_mutex_
;
80 char pad2_
[kCacheLineSize
];
82 void NOINLINE
Recycle(Callback cb
) {
85 SpinMutexLock
l(&cache_mutex_
);
86 while (cache_
.Size() > min_size_
) {
87 QuarantineBatch
*b
= cache_
.DequeueBatch();
91 recycle_mutex_
.Unlock();
95 void NOINLINE
DoRecycle(Cache
*c
, Callback cb
) {
96 while (QuarantineBatch
*b
= c
->DequeueBatch()) {
97 const uptr kPrefetch
= 16;
98 for (uptr i
= 0; i
< kPrefetch
; i
++)
99 PREFETCH(b
->batch
[i
]);
100 for (uptr i
= 0; i
< b
->count
; i
++) {
101 PREFETCH(b
->batch
[i
+ kPrefetch
]);
102 cb
.Recycle((Node
*)b
->batch
[i
]);
109 // Per-thread cache of memory blocks.
110 template<typename Callback
>
111 class QuarantineCache
{
113 explicit QuarantineCache(LinkerInitialized
) {
122 return atomic_load(&size_
, memory_order_relaxed
);
125 void Enqueue(Callback cb
, void *ptr
, uptr size
) {
126 if (list_
.empty() || list_
.back()->count
== QuarantineBatch::kSize
) {
128 size
+= sizeof(QuarantineBatch
); // Count the batch in Quarantine size.
130 QuarantineBatch
*b
= list_
.back();
131 b
->batch
[b
->count
++] = ptr
;
136 void Transfer(QuarantineCache
*c
) {
137 list_
.append_back(&c
->list_
);
139 atomic_store(&c
->size_
, 0, memory_order_relaxed
);
142 void EnqueueBatch(QuarantineBatch
*b
) {
147 QuarantineBatch
*DequeueBatch() {
150 QuarantineBatch
*b
= list_
.front();
157 IntrusiveList
<QuarantineBatch
> list_
;
158 atomic_uintptr_t size_
;
160 void SizeAdd(uptr add
) {
161 atomic_store(&size_
, Size() + add
, memory_order_relaxed
);
163 void SizeSub(uptr sub
) {
164 atomic_store(&size_
, Size() - sub
, memory_order_relaxed
);
167 NOINLINE QuarantineBatch
* AllocBatch(Callback cb
) {
168 QuarantineBatch
*b
= (QuarantineBatch
*)cb
.Allocate(sizeof(*b
));
175 } // namespace __sanitizer
177 #endif // #ifndef SANITIZER_QUARANTINE_H