1 //===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Part of the Sanitizer Allocator.
11 //===----------------------------------------------------------------------===//
12 #ifndef SANITIZER_ALLOCATOR_H
13 #error This file must be included inside sanitizer_allocator.h
16 // Cache used by SizeClassAllocator64.
17 template <class SizeClassAllocator
>
18 struct SizeClassAllocator64LocalCache
{
19 typedef SizeClassAllocator Allocator
;
21 void Init(AllocatorGlobalStats
*s
) {
27 void Destroy(SizeClassAllocator
*allocator
, AllocatorGlobalStats
*s
) {
30 s
->Unregister(&stats_
);
33 void *Allocate(SizeClassAllocator
*allocator
, uptr class_id
) {
34 CHECK_NE(class_id
, 0UL);
35 CHECK_LT(class_id
, kNumClasses
);
36 PerClass
*c
= &per_class_
[class_id
];
37 if (UNLIKELY(c
->count
== 0)) {
38 if (UNLIKELY(!Refill(c
, allocator
, class_id
)))
40 DCHECK_GT(c
->count
, 0);
42 CompactPtrT chunk
= c
->chunks
[--c
->count
];
43 stats_
.Add(AllocatorStatAllocated
, c
->class_size
);
44 return reinterpret_cast<void *>(allocator
->CompactPtrToPointer(
45 allocator
->GetRegionBeginBySizeClass(class_id
), chunk
));
48 void Deallocate(SizeClassAllocator
*allocator
, uptr class_id
, void *p
) {
49 CHECK_NE(class_id
, 0UL);
50 CHECK_LT(class_id
, kNumClasses
);
51 // If the first allocator call on a new thread is a deallocation, then
52 // max_count will be zero, leading to check failure.
53 PerClass
*c
= &per_class_
[class_id
];
55 if (UNLIKELY(c
->count
== c
->max_count
))
56 Drain(c
, allocator
, class_id
, c
->max_count
/ 2);
57 CompactPtrT chunk
= allocator
->PointerToCompactPtr(
58 allocator
->GetRegionBeginBySizeClass(class_id
),
59 reinterpret_cast<uptr
>(p
));
60 c
->chunks
[c
->count
++] = chunk
;
61 stats_
.Sub(AllocatorStatAllocated
, c
->class_size
);
64 void Drain(SizeClassAllocator
*allocator
) {
65 for (uptr i
= 1; i
< kNumClasses
; i
++) {
66 PerClass
*c
= &per_class_
[i
];
68 Drain(c
, allocator
, i
, c
->count
);
73 typedef typename
Allocator::SizeClassMapT SizeClassMap
;
74 static const uptr kNumClasses
= SizeClassMap::kNumClasses
;
75 typedef typename
Allocator::CompactPtrT CompactPtrT
;
81 CompactPtrT chunks
[2 * SizeClassMap::kMaxNumCachedHint
];
83 PerClass per_class_
[kNumClasses
];
84 AllocatorStats stats_
;
86 void InitCache(PerClass
*c
) {
87 if (LIKELY(c
->max_count
))
89 for (uptr i
= 1; i
< kNumClasses
; i
++) {
90 PerClass
*c
= &per_class_
[i
];
91 const uptr size
= Allocator::ClassIdToSize(i
);
92 c
->max_count
= 2 * SizeClassMap::MaxCachedHint(size
);
95 DCHECK_NE(c
->max_count
, 0UL);
98 NOINLINE
bool Refill(PerClass
*c
, SizeClassAllocator
*allocator
,
101 const uptr num_requested_chunks
= c
->max_count
/ 2;
102 if (UNLIKELY(!allocator
->GetFromAllocator(&stats_
, class_id
, c
->chunks
,
103 num_requested_chunks
)))
105 c
->count
= num_requested_chunks
;
109 NOINLINE
void Drain(PerClass
*c
, SizeClassAllocator
*allocator
, uptr class_id
,
111 CHECK_GE(c
->count
, count
);
112 const uptr first_idx_to_drain
= c
->count
- count
;
114 allocator
->ReturnToAllocator(&stats_
, class_id
,
115 &c
->chunks
[first_idx_to_drain
], count
);
119 // Cache used by SizeClassAllocator32.
120 template <class SizeClassAllocator
>
121 struct SizeClassAllocator32LocalCache
{
122 typedef SizeClassAllocator Allocator
;
123 typedef typename
Allocator::TransferBatch TransferBatch
;
125 void Init(AllocatorGlobalStats
*s
) {
128 s
->Register(&stats_
);
131 // Returns a TransferBatch suitable for class_id.
132 TransferBatch
*CreateBatch(uptr class_id
, SizeClassAllocator
*allocator
,
134 if (uptr batch_class_id
= per_class_
[class_id
].batch_class_id
)
135 return (TransferBatch
*)Allocate(allocator
, batch_class_id
);
139 // Destroys TransferBatch b.
140 void DestroyBatch(uptr class_id
, SizeClassAllocator
*allocator
,
142 if (uptr batch_class_id
= per_class_
[class_id
].batch_class_id
)
143 Deallocate(allocator
, batch_class_id
, b
);
146 void Destroy(SizeClassAllocator
*allocator
, AllocatorGlobalStats
*s
) {
149 s
->Unregister(&stats_
);
152 void *Allocate(SizeClassAllocator
*allocator
, uptr class_id
) {
153 CHECK_NE(class_id
, 0UL);
154 CHECK_LT(class_id
, kNumClasses
);
155 PerClass
*c
= &per_class_
[class_id
];
156 if (UNLIKELY(c
->count
== 0)) {
157 if (UNLIKELY(!Refill(c
, allocator
, class_id
)))
159 DCHECK_GT(c
->count
, 0);
161 void *res
= c
->batch
[--c
->count
];
162 PREFETCH(c
->batch
[c
->count
- 1]);
163 stats_
.Add(AllocatorStatAllocated
, c
->class_size
);
167 void Deallocate(SizeClassAllocator
*allocator
, uptr class_id
, void *p
) {
168 CHECK_NE(class_id
, 0UL);
169 CHECK_LT(class_id
, kNumClasses
);
170 // If the first allocator call on a new thread is a deallocation, then
171 // max_count will be zero, leading to check failure.
172 PerClass
*c
= &per_class_
[class_id
];
174 if (UNLIKELY(c
->count
== c
->max_count
))
175 Drain(c
, allocator
, class_id
);
176 c
->batch
[c
->count
++] = p
;
177 stats_
.Sub(AllocatorStatAllocated
, c
->class_size
);
180 void Drain(SizeClassAllocator
*allocator
) {
181 for (uptr i
= 1; i
< kNumClasses
; i
++) {
182 PerClass
*c
= &per_class_
[i
];
184 Drain(c
, allocator
, i
);
189 typedef typename
Allocator::SizeClassMapT SizeClassMap
;
190 static const uptr kBatchClassID
= SizeClassMap::kBatchClassID
;
191 static const uptr kNumClasses
= SizeClassMap::kNumClasses
;
192 // If kUseSeparateSizeClassForBatch is true, all TransferBatch objects are
193 // allocated from kBatchClassID size class (except for those that are needed
194 // for kBatchClassID itself). The goal is to have TransferBatches in a totally
195 // different region of RAM to improve security.
196 static const bool kUseSeparateSizeClassForBatch
=
197 Allocator::kUseSeparateSizeClassForBatch
;
204 void *batch
[2 * TransferBatch::kMaxNumCached
];
206 PerClass per_class_
[kNumClasses
];
207 AllocatorStats stats_
;
209 void InitCache(PerClass
*c
) {
210 if (LIKELY(c
->max_count
))
212 const uptr batch_class_id
= SizeClassMap::ClassID(sizeof(TransferBatch
));
213 for (uptr i
= 1; i
< kNumClasses
; i
++) {
214 PerClass
*c
= &per_class_
[i
];
215 const uptr size
= Allocator::ClassIdToSize(i
);
216 const uptr max_cached
= TransferBatch::MaxCached(size
);
217 c
->max_count
= 2 * max_cached
;
218 c
->class_size
= size
;
219 // Precompute the class id to use to store batches for the current class
220 // id. 0 means the class size is large enough to store a batch within one
221 // of the chunks. If using a separate size class, it will always be
222 // kBatchClassID, except for kBatchClassID itself.
223 if (kUseSeparateSizeClassForBatch
) {
224 c
->batch_class_id
= (i
== kBatchClassID
) ? 0 : kBatchClassID
;
226 c
->batch_class_id
= (size
<
227 TransferBatch::AllocationSizeRequiredForNElements(max_cached
)) ?
231 DCHECK_NE(c
->max_count
, 0UL);
234 NOINLINE
bool Refill(PerClass
*c
, SizeClassAllocator
*allocator
,
237 TransferBatch
*b
= allocator
->AllocateBatch(&stats_
, this, class_id
);
240 CHECK_GT(b
->Count(), 0);
241 b
->CopyToArray(c
->batch
);
242 c
->count
= b
->Count();
243 DestroyBatch(class_id
, allocator
, b
);
247 NOINLINE
void Drain(PerClass
*c
, SizeClassAllocator
*allocator
,
249 const uptr count
= Min(c
->max_count
/ 2, c
->count
);
250 const uptr first_idx_to_drain
= c
->count
- count
;
251 TransferBatch
*b
= CreateBatch(
252 class_id
, allocator
, (TransferBatch
*)c
->batch
[first_idx_to_drain
]);
253 // Failure to allocate a batch while releasing memory is non recoverable.
254 // TODO(alekseys): Figure out how to do it without allocating a new batch.
256 Report("FATAL: Internal error: %s's allocator failed to allocate a "
257 "transfer batch.\n", SanitizerToolName
);
260 b
->SetFromArray(&c
->batch
[first_idx_to_drain
], count
);
262 allocator
->DeallocateBatch(&stats_
, class_id
, b
);