1 //===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // Part of the Sanitizer Allocator.
10 //===----------------------------------------------------------------------===//
11 #ifndef SANITIZER_ALLOCATOR_H
12 #error This file must be included inside sanitizer_allocator.h
15 // Objects of this type should be used as local caches for SizeClassAllocator64
16 // or SizeClassAllocator32. Since the typical use of this class is to have one
17 // object per thread in TLS, is has to be POD.
18 template<class SizeClassAllocator
>
19 struct SizeClassAllocatorLocalCache
20 : SizeClassAllocator::AllocatorCache
{};
22 // Cache used by SizeClassAllocator64.
23 template <class SizeClassAllocator
>
24 struct SizeClassAllocator64LocalCache
{
25 typedef SizeClassAllocator Allocator
;
27 void Init(AllocatorGlobalStats
*s
) {
33 void Destroy(SizeClassAllocator
*allocator
, AllocatorGlobalStats
*s
) {
36 s
->Unregister(&stats_
);
39 void *Allocate(SizeClassAllocator
*allocator
, uptr class_id
) {
40 CHECK_NE(class_id
, 0UL);
41 CHECK_LT(class_id
, kNumClasses
);
42 PerClass
*c
= &per_class_
[class_id
];
43 if (UNLIKELY(c
->count
== 0)) {
44 if (UNLIKELY(!Refill(c
, allocator
, class_id
)))
46 DCHECK_GT(c
->count
, 0);
48 CompactPtrT chunk
= c
->chunks
[--c
->count
];
49 stats_
.Add(AllocatorStatAllocated
, c
->class_size
);
50 return reinterpret_cast<void *>(allocator
->CompactPtrToPointer(
51 allocator
->GetRegionBeginBySizeClass(class_id
), chunk
));
54 void Deallocate(SizeClassAllocator
*allocator
, uptr class_id
, void *p
) {
55 CHECK_NE(class_id
, 0UL);
56 CHECK_LT(class_id
, kNumClasses
);
57 // If the first allocator call on a new thread is a deallocation, then
58 // max_count will be zero, leading to check failure.
59 PerClass
*c
= &per_class_
[class_id
];
61 if (UNLIKELY(c
->count
== c
->max_count
))
62 Drain(c
, allocator
, class_id
, c
->max_count
/ 2);
63 CompactPtrT chunk
= allocator
->PointerToCompactPtr(
64 allocator
->GetRegionBeginBySizeClass(class_id
),
65 reinterpret_cast<uptr
>(p
));
66 c
->chunks
[c
->count
++] = chunk
;
67 stats_
.Sub(AllocatorStatAllocated
, c
->class_size
);
70 void Drain(SizeClassAllocator
*allocator
) {
71 for (uptr i
= 1; i
< kNumClasses
; i
++) {
72 PerClass
*c
= &per_class_
[i
];
74 Drain(c
, allocator
, i
, c
->count
);
79 typedef typename
Allocator::SizeClassMapT SizeClassMap
;
80 static const uptr kNumClasses
= SizeClassMap::kNumClasses
;
81 typedef typename
Allocator::CompactPtrT CompactPtrT
;
87 CompactPtrT chunks
[2 * SizeClassMap::kMaxNumCachedHint
];
89 PerClass per_class_
[kNumClasses
];
90 AllocatorStats stats_
;
92 void InitCache(PerClass
*c
) {
93 if (LIKELY(c
->max_count
))
95 for (uptr i
= 1; i
< kNumClasses
; i
++) {
96 PerClass
*c
= &per_class_
[i
];
97 const uptr size
= Allocator::ClassIdToSize(i
);
98 c
->max_count
= 2 * SizeClassMap::MaxCachedHint(size
);
101 DCHECK_NE(c
->max_count
, 0UL);
104 NOINLINE
bool Refill(PerClass
*c
, SizeClassAllocator
*allocator
,
107 const uptr num_requested_chunks
= c
->max_count
/ 2;
108 if (UNLIKELY(!allocator
->GetFromAllocator(&stats_
, class_id
, c
->chunks
,
109 num_requested_chunks
)))
111 c
->count
= num_requested_chunks
;
115 NOINLINE
void Drain(PerClass
*c
, SizeClassAllocator
*allocator
, uptr class_id
,
117 CHECK_GE(c
->count
, count
);
118 const uptr first_idx_to_drain
= c
->count
- count
;
120 allocator
->ReturnToAllocator(&stats_
, class_id
,
121 &c
->chunks
[first_idx_to_drain
], count
);
125 // Cache used by SizeClassAllocator32.
126 template <class SizeClassAllocator
>
127 struct SizeClassAllocator32LocalCache
{
128 typedef SizeClassAllocator Allocator
;
129 typedef typename
Allocator::TransferBatch TransferBatch
;
131 void Init(AllocatorGlobalStats
*s
) {
134 s
->Register(&stats_
);
137 // Returns a TransferBatch suitable for class_id.
138 TransferBatch
*CreateBatch(uptr class_id
, SizeClassAllocator
*allocator
,
140 if (uptr batch_class_id
= per_class_
[class_id
].batch_class_id
)
141 return (TransferBatch
*)Allocate(allocator
, batch_class_id
);
145 // Destroys TransferBatch b.
146 void DestroyBatch(uptr class_id
, SizeClassAllocator
*allocator
,
148 if (uptr batch_class_id
= per_class_
[class_id
].batch_class_id
)
149 Deallocate(allocator
, batch_class_id
, b
);
152 void Destroy(SizeClassAllocator
*allocator
, AllocatorGlobalStats
*s
) {
155 s
->Unregister(&stats_
);
158 void *Allocate(SizeClassAllocator
*allocator
, uptr class_id
) {
159 CHECK_NE(class_id
, 0UL);
160 CHECK_LT(class_id
, kNumClasses
);
161 PerClass
*c
= &per_class_
[class_id
];
162 if (UNLIKELY(c
->count
== 0)) {
163 if (UNLIKELY(!Refill(c
, allocator
, class_id
)))
165 DCHECK_GT(c
->count
, 0);
167 void *res
= c
->batch
[--c
->count
];
168 PREFETCH(c
->batch
[c
->count
- 1]);
169 stats_
.Add(AllocatorStatAllocated
, c
->class_size
);
173 void Deallocate(SizeClassAllocator
*allocator
, uptr class_id
, void *p
) {
174 CHECK_NE(class_id
, 0UL);
175 CHECK_LT(class_id
, kNumClasses
);
176 // If the first allocator call on a new thread is a deallocation, then
177 // max_count will be zero, leading to check failure.
178 PerClass
*c
= &per_class_
[class_id
];
180 if (UNLIKELY(c
->count
== c
->max_count
))
181 Drain(c
, allocator
, class_id
);
182 c
->batch
[c
->count
++] = p
;
183 stats_
.Sub(AllocatorStatAllocated
, c
->class_size
);
186 void Drain(SizeClassAllocator
*allocator
) {
187 for (uptr i
= 1; i
< kNumClasses
; i
++) {
188 PerClass
*c
= &per_class_
[i
];
190 Drain(c
, allocator
, i
);
195 typedef typename
Allocator::SizeClassMapT SizeClassMap
;
196 static const uptr kBatchClassID
= SizeClassMap::kBatchClassID
;
197 static const uptr kNumClasses
= SizeClassMap::kNumClasses
;
198 // If kUseSeparateSizeClassForBatch is true, all TransferBatch objects are
199 // allocated from kBatchClassID size class (except for those that are needed
200 // for kBatchClassID itself). The goal is to have TransferBatches in a totally
201 // different region of RAM to improve security.
202 static const bool kUseSeparateSizeClassForBatch
=
203 Allocator::kUseSeparateSizeClassForBatch
;
210 void *batch
[2 * TransferBatch::kMaxNumCached
];
212 PerClass per_class_
[kNumClasses
];
213 AllocatorStats stats_
;
215 void InitCache(PerClass
*c
) {
216 if (LIKELY(c
->max_count
))
218 const uptr batch_class_id
= SizeClassMap::ClassID(sizeof(TransferBatch
));
219 for (uptr i
= 1; i
< kNumClasses
; i
++) {
220 PerClass
*c
= &per_class_
[i
];
221 const uptr size
= Allocator::ClassIdToSize(i
);
222 const uptr max_cached
= TransferBatch::MaxCached(size
);
223 c
->max_count
= 2 * max_cached
;
224 c
->class_size
= size
;
225 // Precompute the class id to use to store batches for the current class
226 // id. 0 means the class size is large enough to store a batch within one
227 // of the chunks. If using a separate size class, it will always be
228 // kBatchClassID, except for kBatchClassID itself.
229 if (kUseSeparateSizeClassForBatch
) {
230 c
->batch_class_id
= (i
== kBatchClassID
) ? 0 : kBatchClassID
;
232 c
->batch_class_id
= (size
<
233 TransferBatch::AllocationSizeRequiredForNElements(max_cached
)) ?
237 DCHECK_NE(c
->max_count
, 0UL);
240 NOINLINE
bool Refill(PerClass
*c
, SizeClassAllocator
*allocator
,
243 TransferBatch
*b
= allocator
->AllocateBatch(&stats_
, this, class_id
);
246 CHECK_GT(b
->Count(), 0);
247 b
->CopyToArray(c
->batch
);
248 c
->count
= b
->Count();
249 DestroyBatch(class_id
, allocator
, b
);
253 NOINLINE
void Drain(PerClass
*c
, SizeClassAllocator
*allocator
,
255 const uptr count
= Min(c
->max_count
/ 2, c
->count
);
256 const uptr first_idx_to_drain
= c
->count
- count
;
257 TransferBatch
*b
= CreateBatch(
258 class_id
, allocator
, (TransferBatch
*)c
->batch
[first_idx_to_drain
]);
259 // Failure to allocate a batch while releasing memory is non recoverable.
260 // TODO(alekseys): Figure out how to do it without allocating a new batch.
262 Report("FATAL: Internal error: %s's allocator failed to allocate a "
263 "transfer batch.\n", SanitizerToolName
);
266 b
->SetFromArray(&c
->batch
[first_idx_to_drain
], count
);
268 allocator
->DeallocateBatch(&stats_
, class_id
, b
);