1 //===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // Memory quarantine for AddressSanitizer and potentially other tools.
9 // Quarantine caches some specified amount of memory in per-thread caches,
10 // then evicts to global FIFO queue. When the queue reaches specified threshold,
11 // oldest memory is recycled.
13 //===----------------------------------------------------------------------===//
15 #ifndef SANITIZER_QUARANTINE_H
16 #define SANITIZER_QUARANTINE_H
18 #include "sanitizer_internal_defs.h"
19 #include "sanitizer_mutex.h"
20 #include "sanitizer_list.h"
22 namespace __sanitizer
{
24 template<typename Node
> class QuarantineCache
;
26 struct QuarantineBatch
{
27 static const uptr kSize
= 1024;
28 QuarantineBatch
*next
;
34 // The callback interface is:
35 // void Callback::Recycle(Node *ptr);
36 // void *cb.Allocate(uptr size);
37 // void cb.Deallocate(void *ptr);
38 template<typename Callback
, typename Node
>
41 typedef QuarantineCache
<Callback
> Cache
;
43 explicit Quarantine(LinkerInitialized
)
44 : cache_(LINKER_INITIALIZED
) {
47 void Init(uptr size
, uptr cache_size
) {
49 min_size_
= size
/ 10 * 9; // 90% of max size.
50 max_cache_size_
= cache_size
;
53 void Put(Cache
*c
, Callback cb
, Node
*ptr
, uptr size
) {
54 c
->Enqueue(cb
, ptr
, size
);
55 if (c
->Size() > max_cache_size_
)
59 void NOINLINE
Drain(Cache
*c
, Callback cb
) {
61 SpinMutexLock
l(&cache_mutex_
);
64 if (cache_
.Size() > max_size_
&& recycle_mutex_
.TryLock())
70 char pad0_
[kCacheLineSize
];
74 char pad1_
[kCacheLineSize
];
75 SpinMutex cache_mutex_
;
76 SpinMutex recycle_mutex_
;
78 char pad2_
[kCacheLineSize
];
80 void NOINLINE
Recycle(Callback cb
) {
83 SpinMutexLock
l(&cache_mutex_
);
84 while (cache_
.Size() > min_size_
) {
85 QuarantineBatch
*b
= cache_
.DequeueBatch();
89 recycle_mutex_
.Unlock();
93 void NOINLINE
DoRecycle(Cache
*c
, Callback cb
) {
94 while (QuarantineBatch
*b
= c
->DequeueBatch()) {
95 const uptr kPrefetch
= 16;
96 for (uptr i
= 0; i
< kPrefetch
; i
++)
97 PREFETCH(b
->batch
[i
]);
98 for (uptr i
= 0; i
< b
->count
; i
++) {
99 PREFETCH(b
->batch
[i
+ kPrefetch
]);
100 cb
.Recycle((Node
*)b
->batch
[i
]);
107 // Per-thread cache of memory blocks.
108 template<typename Callback
>
109 class QuarantineCache
{
111 explicit QuarantineCache(LinkerInitialized
) {
120 return atomic_load(&size_
, memory_order_relaxed
);
123 void Enqueue(Callback cb
, void *ptr
, uptr size
) {
124 if (list_
.empty() || list_
.back()->count
== QuarantineBatch::kSize
)
126 QuarantineBatch
*b
= list_
.back();
127 b
->batch
[b
->count
++] = ptr
;
132 void Transfer(QuarantineCache
*c
) {
133 list_
.append_back(&c
->list_
);
135 atomic_store(&c
->size_
, 0, memory_order_relaxed
);
138 void EnqueueBatch(QuarantineBatch
*b
) {
143 QuarantineBatch
*DequeueBatch() {
146 QuarantineBatch
*b
= list_
.front();
153 IntrusiveList
<QuarantineBatch
> list_
;
154 atomic_uintptr_t size_
;
156 void SizeAdd(uptr add
) {
157 atomic_store(&size_
, Size() + add
, memory_order_relaxed
);
160 QuarantineBatch
*NOINLINE
AllocBatch(Callback cb
) {
161 QuarantineBatch
*b
= (QuarantineBatch
*)cb
.Allocate(sizeof(*b
));
170 #endif // #ifndef SANITIZER_QUARANTINE_H