Fix date
[official-gcc.git] / libsanitizer / sanitizer_common / sanitizer_quarantine.h
blob9e9268f2a5d26b7e677270be717b4d57dff99747
1 //===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // Memory quarantine for AddressSanitizer and potentially other tools.
9 // Quarantine caches some specified amount of memory in per-thread caches,
10 // then evicts to global FIFO queue. When the queue reaches specified threshold,
11 // oldest memory is recycled.
13 //===----------------------------------------------------------------------===//
15 #ifndef SANITIZER_QUARANTINE_H
16 #define SANITIZER_QUARANTINE_H
18 #include "sanitizer_internal_defs.h"
19 #include "sanitizer_mutex.h"
20 #include "sanitizer_list.h"
22 namespace __sanitizer {
24 template<typename Node> class QuarantineCache;
26 struct QuarantineBatch {
27 static const uptr kSize = 1021;
28 QuarantineBatch *next;
29 uptr size;
30 uptr count;
31 void *batch[kSize];
34 COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13)); // 8Kb.
36 // The callback interface is:
37 // void Callback::Recycle(Node *ptr);
38 // void *cb.Allocate(uptr size);
39 // void cb.Deallocate(void *ptr);
40 template<typename Callback, typename Node>
41 class Quarantine {
42 public:
43 typedef QuarantineCache<Callback> Cache;
45 explicit Quarantine(LinkerInitialized)
46 : cache_(LINKER_INITIALIZED) {
49 void Init(uptr size, uptr cache_size) {
50 atomic_store(&max_size_, size, memory_order_release);
51 atomic_store(&min_size_, size / 10 * 9,
52 memory_order_release); // 90% of max size.
53 max_cache_size_ = cache_size;
56 uptr GetSize() const { return atomic_load(&max_size_, memory_order_acquire); }
58 void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
59 c->Enqueue(cb, ptr, size);
60 if (c->Size() > max_cache_size_)
61 Drain(c, cb);
64 void NOINLINE Drain(Cache *c, Callback cb) {
66 SpinMutexLock l(&cache_mutex_);
67 cache_.Transfer(c);
69 if (cache_.Size() > GetSize() && recycle_mutex_.TryLock())
70 Recycle(cb);
73 private:
74 // Read-only data.
75 char pad0_[kCacheLineSize];
76 atomic_uintptr_t max_size_;
77 atomic_uintptr_t min_size_;
78 uptr max_cache_size_;
79 char pad1_[kCacheLineSize];
80 SpinMutex cache_mutex_;
81 SpinMutex recycle_mutex_;
82 Cache cache_;
83 char pad2_[kCacheLineSize];
85 void NOINLINE Recycle(Callback cb) {
86 Cache tmp;
87 uptr min_size = atomic_load(&min_size_, memory_order_acquire);
89 SpinMutexLock l(&cache_mutex_);
90 while (cache_.Size() > min_size) {
91 QuarantineBatch *b = cache_.DequeueBatch();
92 tmp.EnqueueBatch(b);
95 recycle_mutex_.Unlock();
96 DoRecycle(&tmp, cb);
99 void NOINLINE DoRecycle(Cache *c, Callback cb) {
100 while (QuarantineBatch *b = c->DequeueBatch()) {
101 const uptr kPrefetch = 16;
102 CHECK(kPrefetch <= ARRAY_SIZE(b->batch));
103 for (uptr i = 0; i < kPrefetch; i++)
104 PREFETCH(b->batch[i]);
105 for (uptr i = 0, count = b->count; i < count; i++) {
106 if (i + kPrefetch < count)
107 PREFETCH(b->batch[i + kPrefetch]);
108 cb.Recycle((Node*)b->batch[i]);
110 cb.Deallocate(b);
115 // Per-thread cache of memory blocks.
116 template<typename Callback>
117 class QuarantineCache {
118 public:
119 explicit QuarantineCache(LinkerInitialized) {
122 QuarantineCache()
123 : size_() {
124 list_.clear();
127 uptr Size() const {
128 return atomic_load(&size_, memory_order_relaxed);
131 void Enqueue(Callback cb, void *ptr, uptr size) {
132 if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) {
133 AllocBatch(cb);
134 size += sizeof(QuarantineBatch); // Count the batch in Quarantine size.
136 QuarantineBatch *b = list_.back();
137 CHECK(b);
138 b->batch[b->count++] = ptr;
139 b->size += size;
140 SizeAdd(size);
143 void Transfer(QuarantineCache *c) {
144 list_.append_back(&c->list_);
145 SizeAdd(c->Size());
146 atomic_store(&c->size_, 0, memory_order_relaxed);
149 void EnqueueBatch(QuarantineBatch *b) {
150 list_.push_back(b);
151 SizeAdd(b->size);
154 QuarantineBatch *DequeueBatch() {
155 if (list_.empty())
156 return nullptr;
157 QuarantineBatch *b = list_.front();
158 list_.pop_front();
159 SizeSub(b->size);
160 return b;
163 private:
164 IntrusiveList<QuarantineBatch> list_;
165 atomic_uintptr_t size_;
167 void SizeAdd(uptr add) {
168 atomic_store(&size_, Size() + add, memory_order_relaxed);
170 void SizeSub(uptr sub) {
171 atomic_store(&size_, Size() - sub, memory_order_relaxed);
174 NOINLINE QuarantineBatch* AllocBatch(Callback cb) {
175 QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
176 CHECK(b);
177 b->count = 0;
178 b->size = 0;
179 list_.push_back(b);
180 return b;
183 } // namespace __sanitizer
185 #endif // SANITIZER_QUARANTINE_H