1 //===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // Part of the Sanitizer Allocator.
10 //===----------------------------------------------------------------------===//
11 #ifndef SANITIZER_ALLOCATOR_H
12 #error This file must be included inside sanitizer_allocator.h
15 // Objects of this type should be used as local caches for SizeClassAllocator64
16 // or SizeClassAllocator32. Since the typical use of this class is to have one
17 // object per thread in TLS, is has to be POD.
18 template<class SizeClassAllocator
>
19 struct SizeClassAllocatorLocalCache
20 : SizeClassAllocator::AllocatorCache
{
23 // Cache used by SizeClassAllocator64.
24 template <class SizeClassAllocator
>
25 struct SizeClassAllocator64LocalCache
{
26 typedef SizeClassAllocator Allocator
;
28 void Init(AllocatorGlobalStats
*s
) {
34 void Destroy(SizeClassAllocator
*allocator
, AllocatorGlobalStats
*s
) {
37 s
->Unregister(&stats_
);
40 void *Allocate(SizeClassAllocator
*allocator
, uptr class_id
) {
41 CHECK_NE(class_id
, 0UL);
42 CHECK_LT(class_id
, kNumClasses
);
43 PerClass
*c
= &per_class_
[class_id
];
44 if (UNLIKELY(c
->count
== 0)) {
45 if (UNLIKELY(!Refill(c
, allocator
, class_id
)))
48 stats_
.Add(AllocatorStatAllocated
, c
->class_size
);
49 CHECK_GT(c
->count
, 0);
50 CompactPtrT chunk
= c
->chunks
[--c
->count
];
51 void *res
= reinterpret_cast<void *>(allocator
->CompactPtrToPointer(
52 allocator
->GetRegionBeginBySizeClass(class_id
), chunk
));
56 void Deallocate(SizeClassAllocator
*allocator
, uptr class_id
, void *p
) {
57 CHECK_NE(class_id
, 0UL);
58 CHECK_LT(class_id
, kNumClasses
);
59 // If the first allocator call on a new thread is a deallocation, then
60 // max_count will be zero, leading to check failure.
62 PerClass
*c
= &per_class_
[class_id
];
63 stats_
.Sub(AllocatorStatAllocated
, c
->class_size
);
64 CHECK_NE(c
->max_count
, 0UL);
65 if (UNLIKELY(c
->count
== c
->max_count
))
66 Drain(c
, allocator
, class_id
, c
->max_count
/ 2);
67 CompactPtrT chunk
= allocator
->PointerToCompactPtr(
68 allocator
->GetRegionBeginBySizeClass(class_id
),
69 reinterpret_cast<uptr
>(p
));
70 c
->chunks
[c
->count
++] = chunk
;
73 void Drain(SizeClassAllocator
*allocator
) {
74 for (uptr i
= 0; i
< kNumClasses
; i
++) {
75 PerClass
*c
= &per_class_
[i
];
77 Drain(c
, allocator
, i
, c
->count
);
82 typedef typename
Allocator::SizeClassMapT SizeClassMap
;
83 static const uptr kNumClasses
= SizeClassMap::kNumClasses
;
84 typedef typename
Allocator::CompactPtrT CompactPtrT
;
90 CompactPtrT chunks
[2 * SizeClassMap::kMaxNumCachedHint
];
92 PerClass per_class_
[kNumClasses
];
93 AllocatorStats stats_
;
96 if (LIKELY(per_class_
[1].max_count
))
98 for (uptr i
= 0; i
< kNumClasses
; i
++) {
99 PerClass
*c
= &per_class_
[i
];
100 c
->max_count
= 2 * SizeClassMap::MaxCachedHint(i
);
101 c
->class_size
= Allocator::ClassIdToSize(i
);
105 NOINLINE
bool Refill(PerClass
*c
, SizeClassAllocator
*allocator
,
108 uptr num_requested_chunks
= c
->max_count
/ 2;
109 if (UNLIKELY(!allocator
->GetFromAllocator(&stats_
, class_id
, c
->chunks
,
110 num_requested_chunks
)))
112 c
->count
= num_requested_chunks
;
116 NOINLINE
void Drain(PerClass
*c
, SizeClassAllocator
*allocator
, uptr class_id
,
119 CHECK_GE(c
->count
, count
);
120 uptr first_idx_to_drain
= c
->count
- count
;
122 allocator
->ReturnToAllocator(&stats_
, class_id
,
123 &c
->chunks
[first_idx_to_drain
], count
);
127 // Cache used by SizeClassAllocator32.
128 template <class SizeClassAllocator
>
129 struct SizeClassAllocator32LocalCache
{
130 typedef SizeClassAllocator Allocator
;
131 typedef typename
Allocator::TransferBatch TransferBatch
;
133 void Init(AllocatorGlobalStats
*s
) {
136 s
->Register(&stats_
);
139 // Returns a TransferBatch suitable for class_id.
140 TransferBatch
*CreateBatch(uptr class_id
, SizeClassAllocator
*allocator
,
142 if (uptr batch_class_id
= per_class_
[class_id
].batch_class_id
)
143 return (TransferBatch
*)Allocate(allocator
, batch_class_id
);
147 // Destroys TransferBatch b.
148 void DestroyBatch(uptr class_id
, SizeClassAllocator
*allocator
,
150 if (uptr batch_class_id
= per_class_
[class_id
].batch_class_id
)
151 Deallocate(allocator
, batch_class_id
, b
);
154 void Destroy(SizeClassAllocator
*allocator
, AllocatorGlobalStats
*s
) {
157 s
->Unregister(&stats_
);
160 void *Allocate(SizeClassAllocator
*allocator
, uptr class_id
) {
161 CHECK_NE(class_id
, 0UL);
162 CHECK_LT(class_id
, kNumClasses
);
163 PerClass
*c
= &per_class_
[class_id
];
164 if (UNLIKELY(c
->count
== 0)) {
165 if (UNLIKELY(!Refill(allocator
, class_id
)))
168 stats_
.Add(AllocatorStatAllocated
, c
->class_size
);
169 void *res
= c
->batch
[--c
->count
];
170 PREFETCH(c
->batch
[c
->count
- 1]);
174 void Deallocate(SizeClassAllocator
*allocator
, uptr class_id
, void *p
) {
175 CHECK_NE(class_id
, 0UL);
176 CHECK_LT(class_id
, kNumClasses
);
177 // If the first allocator call on a new thread is a deallocation, then
178 // max_count will be zero, leading to check failure.
180 PerClass
*c
= &per_class_
[class_id
];
181 stats_
.Sub(AllocatorStatAllocated
, c
->class_size
);
182 CHECK_NE(c
->max_count
, 0UL);
183 if (UNLIKELY(c
->count
== c
->max_count
))
184 Drain(allocator
, class_id
);
185 c
->batch
[c
->count
++] = p
;
188 void Drain(SizeClassAllocator
*allocator
) {
189 for (uptr i
= 0; i
< kNumClasses
; i
++) {
190 PerClass
*c
= &per_class_
[i
];
197 typedef typename
Allocator::SizeClassMapT SizeClassMap
;
198 static const uptr kBatchClassID
= SizeClassMap::kBatchClassID
;
199 static const uptr kNumClasses
= SizeClassMap::kNumClasses
;
200 // If kUseSeparateSizeClassForBatch is true, all TransferBatch objects are
201 // allocated from kBatchClassID size class (except for those that are needed
202 // for kBatchClassID itself). The goal is to have TransferBatches in a totally
203 // different region of RAM to improve security.
204 static const bool kUseSeparateSizeClassForBatch
=
205 Allocator::kUseSeparateSizeClassForBatch
;
212 void *batch
[2 * TransferBatch::kMaxNumCached
];
214 PerClass per_class_
[kNumClasses
];
215 AllocatorStats stats_
;
218 if (LIKELY(per_class_
[1].max_count
))
220 const uptr batch_class_id
= SizeClassMap::ClassID(sizeof(TransferBatch
));
221 for (uptr i
= 0; i
< kNumClasses
; i
++) {
222 PerClass
*c
= &per_class_
[i
];
223 uptr max_cached
= TransferBatch::MaxCached(i
);
224 c
->max_count
= 2 * max_cached
;
225 c
->class_size
= Allocator::ClassIdToSize(i
);
226 // Precompute the class id to use to store batches for the current class
227 // id. 0 means the class size is large enough to store a batch within one
228 // of the chunks. If using a separate size class, it will always be
229 // kBatchClassID, except for kBatchClassID itself.
230 if (kUseSeparateSizeClassForBatch
) {
231 c
->batch_class_id
= (i
== kBatchClassID
) ? 0 : kBatchClassID
;
233 c
->batch_class_id
= (c
->class_size
<
234 TransferBatch::AllocationSizeRequiredForNElements(max_cached
)) ?
240 NOINLINE
bool Refill(SizeClassAllocator
*allocator
, uptr class_id
) {
242 PerClass
*c
= &per_class_
[class_id
];
243 TransferBatch
*b
= allocator
->AllocateBatch(&stats_
, this, class_id
);
246 CHECK_GT(b
->Count(), 0);
247 b
->CopyToArray(c
->batch
);
248 c
->count
= b
->Count();
249 DestroyBatch(class_id
, allocator
, b
);
253 NOINLINE
void Drain(SizeClassAllocator
*allocator
, uptr class_id
) {
255 PerClass
*c
= &per_class_
[class_id
];
256 uptr cnt
= Min(c
->max_count
/ 2, c
->count
);
257 uptr first_idx_to_drain
= c
->count
- cnt
;
258 TransferBatch
*b
= CreateBatch(
259 class_id
, allocator
, (TransferBatch
*)c
->batch
[first_idx_to_drain
]);
260 // Failure to allocate a batch while releasing memory is non recoverable.
261 // TODO(alekseys): Figure out how to do it without allocating a new batch.
263 DieOnFailure::OnOOM();
264 b
->SetFromArray(allocator
->GetRegionBeginBySizeClass(class_id
),
265 &c
->batch
[first_idx_to_drain
], cnt
);
267 allocator
->DeallocateBatch(&stats_
, class_id
, b
);