[OpenACC] cleanup trans-stmt.h
[official-gcc.git] / libsanitizer / sanitizer_common / sanitizer_allocator_local_cache.h
blob99013e37f8a3ca458f13fc9592a1f3c55666ba42
1 //===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // Part of the Sanitizer Allocator.
9 //
10 //===----------------------------------------------------------------------===//
11 #ifndef SANITIZER_ALLOCATOR_H
12 #error This file must be included inside sanitizer_allocator.h
13 #endif
15 // Objects of this type should be used as local caches for SizeClassAllocator64
16 // or SizeClassAllocator32. Since the typical use of this class is to have one
17 // object per thread in TLS, is has to be POD.
18 template<class SizeClassAllocator>
19 struct SizeClassAllocatorLocalCache
20 : SizeClassAllocator::AllocatorCache {
23 // Cache used by SizeClassAllocator64.
24 template <class SizeClassAllocator>
25 struct SizeClassAllocator64LocalCache {
26 typedef SizeClassAllocator Allocator;
28 void Init(AllocatorGlobalStats *s) {
29 stats_.Init();
30 if (s)
31 s->Register(&stats_);
34 void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
35 Drain(allocator);
36 if (s)
37 s->Unregister(&stats_);
40 void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
41 CHECK_NE(class_id, 0UL);
42 CHECK_LT(class_id, kNumClasses);
43 PerClass *c = &per_class_[class_id];
44 if (UNLIKELY(c->count == 0)) {
45 if (UNLIKELY(!Refill(c, allocator, class_id)))
46 return nullptr;
48 stats_.Add(AllocatorStatAllocated, c->class_size);
49 CHECK_GT(c->count, 0);
50 CompactPtrT chunk = c->chunks[--c->count];
51 void *res = reinterpret_cast<void *>(allocator->CompactPtrToPointer(
52 allocator->GetRegionBeginBySizeClass(class_id), chunk));
53 return res;
56 void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
57 CHECK_NE(class_id, 0UL);
58 CHECK_LT(class_id, kNumClasses);
59 // If the first allocator call on a new thread is a deallocation, then
60 // max_count will be zero, leading to check failure.
61 InitCache();
62 PerClass *c = &per_class_[class_id];
63 stats_.Sub(AllocatorStatAllocated, c->class_size);
64 CHECK_NE(c->max_count, 0UL);
65 if (UNLIKELY(c->count == c->max_count))
66 Drain(c, allocator, class_id, c->max_count / 2);
67 CompactPtrT chunk = allocator->PointerToCompactPtr(
68 allocator->GetRegionBeginBySizeClass(class_id),
69 reinterpret_cast<uptr>(p));
70 c->chunks[c->count++] = chunk;
73 void Drain(SizeClassAllocator *allocator) {
74 for (uptr i = 0; i < kNumClasses; i++) {
75 PerClass *c = &per_class_[i];
76 while (c->count > 0)
77 Drain(c, allocator, i, c->count);
81 private:
82 typedef typename Allocator::SizeClassMapT SizeClassMap;
83 static const uptr kNumClasses = SizeClassMap::kNumClasses;
84 typedef typename Allocator::CompactPtrT CompactPtrT;
86 struct PerClass {
87 u32 count;
88 u32 max_count;
89 uptr class_size;
90 CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
92 PerClass per_class_[kNumClasses];
93 AllocatorStats stats_;
95 void InitCache() {
96 if (LIKELY(per_class_[1].max_count))
97 return;
98 for (uptr i = 0; i < kNumClasses; i++) {
99 PerClass *c = &per_class_[i];
100 c->max_count = 2 * SizeClassMap::MaxCachedHint(i);
101 c->class_size = Allocator::ClassIdToSize(i);
105 NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
106 uptr class_id) {
107 InitCache();
108 uptr num_requested_chunks = c->max_count / 2;
109 if (UNLIKELY(!allocator->GetFromAllocator(&stats_, class_id, c->chunks,
110 num_requested_chunks)))
111 return false;
112 c->count = num_requested_chunks;
113 return true;
116 NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id,
117 uptr count) {
118 InitCache();
119 CHECK_GE(c->count, count);
120 uptr first_idx_to_drain = c->count - count;
121 c->count -= count;
122 allocator->ReturnToAllocator(&stats_, class_id,
123 &c->chunks[first_idx_to_drain], count);
127 // Cache used by SizeClassAllocator32.
128 template <class SizeClassAllocator>
129 struct SizeClassAllocator32LocalCache {
130 typedef SizeClassAllocator Allocator;
131 typedef typename Allocator::TransferBatch TransferBatch;
133 void Init(AllocatorGlobalStats *s) {
134 stats_.Init();
135 if (s)
136 s->Register(&stats_);
139 // Returns a TransferBatch suitable for class_id.
140 TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
141 TransferBatch *b) {
142 if (uptr batch_class_id = per_class_[class_id].batch_class_id)
143 return (TransferBatch*)Allocate(allocator, batch_class_id);
144 return b;
147 // Destroys TransferBatch b.
148 void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
149 TransferBatch *b) {
150 if (uptr batch_class_id = per_class_[class_id].batch_class_id)
151 Deallocate(allocator, batch_class_id, b);
154 void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
155 Drain(allocator);
156 if (s)
157 s->Unregister(&stats_);
160 void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
161 CHECK_NE(class_id, 0UL);
162 CHECK_LT(class_id, kNumClasses);
163 PerClass *c = &per_class_[class_id];
164 if (UNLIKELY(c->count == 0)) {
165 if (UNLIKELY(!Refill(allocator, class_id)))
166 return nullptr;
168 stats_.Add(AllocatorStatAllocated, c->class_size);
169 void *res = c->batch[--c->count];
170 PREFETCH(c->batch[c->count - 1]);
171 return res;
174 void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
175 CHECK_NE(class_id, 0UL);
176 CHECK_LT(class_id, kNumClasses);
177 // If the first allocator call on a new thread is a deallocation, then
178 // max_count will be zero, leading to check failure.
179 InitCache();
180 PerClass *c = &per_class_[class_id];
181 stats_.Sub(AllocatorStatAllocated, c->class_size);
182 CHECK_NE(c->max_count, 0UL);
183 if (UNLIKELY(c->count == c->max_count))
184 Drain(allocator, class_id);
185 c->batch[c->count++] = p;
188 void Drain(SizeClassAllocator *allocator) {
189 for (uptr i = 0; i < kNumClasses; i++) {
190 PerClass *c = &per_class_[i];
191 while (c->count > 0)
192 Drain(allocator, i);
196 private:
197 typedef typename Allocator::SizeClassMapT SizeClassMap;
198 static const uptr kBatchClassID = SizeClassMap::kBatchClassID;
199 static const uptr kNumClasses = SizeClassMap::kNumClasses;
200 // If kUseSeparateSizeClassForBatch is true, all TransferBatch objects are
201 // allocated from kBatchClassID size class (except for those that are needed
202 // for kBatchClassID itself). The goal is to have TransferBatches in a totally
203 // different region of RAM to improve security.
204 static const bool kUseSeparateSizeClassForBatch =
205 Allocator::kUseSeparateSizeClassForBatch;
207 struct PerClass {
208 uptr count;
209 uptr max_count;
210 uptr class_size;
211 uptr batch_class_id;
212 void *batch[2 * TransferBatch::kMaxNumCached];
214 PerClass per_class_[kNumClasses];
215 AllocatorStats stats_;
217 void InitCache() {
218 if (LIKELY(per_class_[1].max_count))
219 return;
220 const uptr batch_class_id = SizeClassMap::ClassID(sizeof(TransferBatch));
221 for (uptr i = 0; i < kNumClasses; i++) {
222 PerClass *c = &per_class_[i];
223 uptr max_cached = TransferBatch::MaxCached(i);
224 c->max_count = 2 * max_cached;
225 c->class_size = Allocator::ClassIdToSize(i);
226 // Precompute the class id to use to store batches for the current class
227 // id. 0 means the class size is large enough to store a batch within one
228 // of the chunks. If using a separate size class, it will always be
229 // kBatchClassID, except for kBatchClassID itself.
230 if (kUseSeparateSizeClassForBatch) {
231 c->batch_class_id = (i == kBatchClassID) ? 0 : kBatchClassID;
232 } else {
233 c->batch_class_id = (c->class_size <
234 TransferBatch::AllocationSizeRequiredForNElements(max_cached)) ?
235 batch_class_id : 0;
240 NOINLINE bool Refill(SizeClassAllocator *allocator, uptr class_id) {
241 InitCache();
242 PerClass *c = &per_class_[class_id];
243 TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
244 if (UNLIKELY(!b))
245 return false;
246 CHECK_GT(b->Count(), 0);
247 b->CopyToArray(c->batch);
248 c->count = b->Count();
249 DestroyBatch(class_id, allocator, b);
250 return true;
253 NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
254 InitCache();
255 PerClass *c = &per_class_[class_id];
256 uptr cnt = Min(c->max_count / 2, c->count);
257 uptr first_idx_to_drain = c->count - cnt;
258 TransferBatch *b = CreateBatch(
259 class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
260 // Failure to allocate a batch while releasing memory is non recoverable.
261 // TODO(alekseys): Figure out how to do it without allocating a new batch.
262 if (UNLIKELY(!b))
263 DieOnFailure::OnOOM();
264 b->SetFromArray(allocator->GetRegionBeginBySizeClass(class_id),
265 &c->batch[first_idx_to_drain], cnt);
266 c->count -= cnt;
267 allocator->DeallocateBatch(&stats_, class_id, b);