function.c (assign_parm_adjust_stack_rtl): Revise STRICT_ALIGNMENT check to use targe...
[official-gcc.git] / libsanitizer / sanitizer_common / sanitizer_allocator_local_cache.h
blobd23c59aa05c3fa761534eae28f051003c9032a4b
1 //===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // Part of the Sanitizer Allocator.
9 //
10 //===----------------------------------------------------------------------===//
11 #ifndef SANITIZER_ALLOCATOR_H
12 #error This file must be included inside sanitizer_allocator.h
13 #endif
15 // Objects of this type should be used as local caches for SizeClassAllocator64
16 // or SizeClassAllocator32. Since the typical use of this class is to have one
17 // object per thread in TLS, is has to be POD.
18 template<class SizeClassAllocator>
19 struct SizeClassAllocatorLocalCache
20 : SizeClassAllocator::AllocatorCache {};
22 // Cache used by SizeClassAllocator64.
23 template <class SizeClassAllocator>
24 struct SizeClassAllocator64LocalCache {
25 typedef SizeClassAllocator Allocator;
27 void Init(AllocatorGlobalStats *s) {
28 stats_.Init();
29 if (s)
30 s->Register(&stats_);
33 void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
34 Drain(allocator);
35 if (s)
36 s->Unregister(&stats_);
39 void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
40 CHECK_NE(class_id, 0UL);
41 CHECK_LT(class_id, kNumClasses);
42 PerClass *c = &per_class_[class_id];
43 if (UNLIKELY(c->count == 0)) {
44 if (UNLIKELY(!Refill(c, allocator, class_id)))
45 return nullptr;
46 DCHECK_GT(c->count, 0);
48 CompactPtrT chunk = c->chunks[--c->count];
49 stats_.Add(AllocatorStatAllocated, c->class_size);
50 return reinterpret_cast<void *>(allocator->CompactPtrToPointer(
51 allocator->GetRegionBeginBySizeClass(class_id), chunk));
54 void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
55 CHECK_NE(class_id, 0UL);
56 CHECK_LT(class_id, kNumClasses);
57 // If the first allocator call on a new thread is a deallocation, then
58 // max_count will be zero, leading to check failure.
59 PerClass *c = &per_class_[class_id];
60 InitCache(c);
61 if (UNLIKELY(c->count == c->max_count))
62 Drain(c, allocator, class_id, c->max_count / 2);
63 CompactPtrT chunk = allocator->PointerToCompactPtr(
64 allocator->GetRegionBeginBySizeClass(class_id),
65 reinterpret_cast<uptr>(p));
66 c->chunks[c->count++] = chunk;
67 stats_.Sub(AllocatorStatAllocated, c->class_size);
70 void Drain(SizeClassAllocator *allocator) {
71 for (uptr i = 1; i < kNumClasses; i++) {
72 PerClass *c = &per_class_[i];
73 while (c->count > 0)
74 Drain(c, allocator, i, c->count);
78 private:
79 typedef typename Allocator::SizeClassMapT SizeClassMap;
80 static const uptr kNumClasses = SizeClassMap::kNumClasses;
81 typedef typename Allocator::CompactPtrT CompactPtrT;
83 struct PerClass {
84 u32 count;
85 u32 max_count;
86 uptr class_size;
87 CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
89 PerClass per_class_[kNumClasses];
90 AllocatorStats stats_;
92 void InitCache(PerClass *c) {
93 if (LIKELY(c->max_count))
94 return;
95 for (uptr i = 1; i < kNumClasses; i++) {
96 PerClass *c = &per_class_[i];
97 const uptr size = Allocator::ClassIdToSize(i);
98 c->max_count = 2 * SizeClassMap::MaxCachedHint(size);
99 c->class_size = size;
101 DCHECK_NE(c->max_count, 0UL);
104 NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
105 uptr class_id) {
106 InitCache(c);
107 const uptr num_requested_chunks = c->max_count / 2;
108 if (UNLIKELY(!allocator->GetFromAllocator(&stats_, class_id, c->chunks,
109 num_requested_chunks)))
110 return false;
111 c->count = num_requested_chunks;
112 return true;
115 NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id,
116 uptr count) {
117 CHECK_GE(c->count, count);
118 const uptr first_idx_to_drain = c->count - count;
119 c->count -= count;
120 allocator->ReturnToAllocator(&stats_, class_id,
121 &c->chunks[first_idx_to_drain], count);
125 // Cache used by SizeClassAllocator32.
126 template <class SizeClassAllocator>
127 struct SizeClassAllocator32LocalCache {
128 typedef SizeClassAllocator Allocator;
129 typedef typename Allocator::TransferBatch TransferBatch;
131 void Init(AllocatorGlobalStats *s) {
132 stats_.Init();
133 if (s)
134 s->Register(&stats_);
137 // Returns a TransferBatch suitable for class_id.
138 TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
139 TransferBatch *b) {
140 if (uptr batch_class_id = per_class_[class_id].batch_class_id)
141 return (TransferBatch*)Allocate(allocator, batch_class_id);
142 return b;
145 // Destroys TransferBatch b.
146 void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
147 TransferBatch *b) {
148 if (uptr batch_class_id = per_class_[class_id].batch_class_id)
149 Deallocate(allocator, batch_class_id, b);
152 void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
153 Drain(allocator);
154 if (s)
155 s->Unregister(&stats_);
158 void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
159 CHECK_NE(class_id, 0UL);
160 CHECK_LT(class_id, kNumClasses);
161 PerClass *c = &per_class_[class_id];
162 if (UNLIKELY(c->count == 0)) {
163 if (UNLIKELY(!Refill(c, allocator, class_id)))
164 return nullptr;
165 DCHECK_GT(c->count, 0);
167 void *res = c->batch[--c->count];
168 PREFETCH(c->batch[c->count - 1]);
169 stats_.Add(AllocatorStatAllocated, c->class_size);
170 return res;
173 void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
174 CHECK_NE(class_id, 0UL);
175 CHECK_LT(class_id, kNumClasses);
176 // If the first allocator call on a new thread is a deallocation, then
177 // max_count will be zero, leading to check failure.
178 PerClass *c = &per_class_[class_id];
179 InitCache(c);
180 if (UNLIKELY(c->count == c->max_count))
181 Drain(c, allocator, class_id);
182 c->batch[c->count++] = p;
183 stats_.Sub(AllocatorStatAllocated, c->class_size);
186 void Drain(SizeClassAllocator *allocator) {
187 for (uptr i = 1; i < kNumClasses; i++) {
188 PerClass *c = &per_class_[i];
189 while (c->count > 0)
190 Drain(c, allocator, i);
194 private:
195 typedef typename Allocator::SizeClassMapT SizeClassMap;
196 static const uptr kBatchClassID = SizeClassMap::kBatchClassID;
197 static const uptr kNumClasses = SizeClassMap::kNumClasses;
198 // If kUseSeparateSizeClassForBatch is true, all TransferBatch objects are
199 // allocated from kBatchClassID size class (except for those that are needed
200 // for kBatchClassID itself). The goal is to have TransferBatches in a totally
201 // different region of RAM to improve security.
202 static const bool kUseSeparateSizeClassForBatch =
203 Allocator::kUseSeparateSizeClassForBatch;
205 struct PerClass {
206 uptr count;
207 uptr max_count;
208 uptr class_size;
209 uptr batch_class_id;
210 void *batch[2 * TransferBatch::kMaxNumCached];
212 PerClass per_class_[kNumClasses];
213 AllocatorStats stats_;
215 void InitCache(PerClass *c) {
216 if (LIKELY(c->max_count))
217 return;
218 const uptr batch_class_id = SizeClassMap::ClassID(sizeof(TransferBatch));
219 for (uptr i = 1; i < kNumClasses; i++) {
220 PerClass *c = &per_class_[i];
221 const uptr size = Allocator::ClassIdToSize(i);
222 const uptr max_cached = TransferBatch::MaxCached(size);
223 c->max_count = 2 * max_cached;
224 c->class_size = size;
225 // Precompute the class id to use to store batches for the current class
226 // id. 0 means the class size is large enough to store a batch within one
227 // of the chunks. If using a separate size class, it will always be
228 // kBatchClassID, except for kBatchClassID itself.
229 if (kUseSeparateSizeClassForBatch) {
230 c->batch_class_id = (i == kBatchClassID) ? 0 : kBatchClassID;
231 } else {
232 c->batch_class_id = (size <
233 TransferBatch::AllocationSizeRequiredForNElements(max_cached)) ?
234 batch_class_id : 0;
237 DCHECK_NE(c->max_count, 0UL);
240 NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
241 uptr class_id) {
242 InitCache(c);
243 TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
244 if (UNLIKELY(!b))
245 return false;
246 CHECK_GT(b->Count(), 0);
247 b->CopyToArray(c->batch);
248 c->count = b->Count();
249 DestroyBatch(class_id, allocator, b);
250 return true;
253 NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator,
254 uptr class_id) {
255 const uptr count = Min(c->max_count / 2, c->count);
256 const uptr first_idx_to_drain = c->count - count;
257 TransferBatch *b = CreateBatch(
258 class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
259 // Failure to allocate a batch while releasing memory is non recoverable.
260 // TODO(alekseys): Figure out how to do it without allocating a new batch.
261 if (UNLIKELY(!b)) {
262 Report("FATAL: Internal error: %s's allocator failed to allocate a "
263 "transfer batch.\n", SanitizerToolName);
264 Die();
266 b->SetFromArray(&c->batch[first_idx_to_drain], count);
267 c->count -= count;
268 allocator->DeallocateBatch(&stats_, class_id, b);