1 //===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // Part of the Sanitizer Allocator.
10 //===----------------------------------------------------------------------===//
11 #ifndef SANITIZER_ALLOCATOR_H
12 #error This file must be included inside sanitizer_allocator.h
15 // Objects of this type should be used as local caches for SizeClassAllocator64
16 // or SizeClassAllocator32. Since the typical use of this class is to have one
17 // object per thread in TLS, is has to be POD.
18 template<class SizeClassAllocator
>
19 struct SizeClassAllocatorLocalCache
20 : SizeClassAllocator::AllocatorCache
{
23 // Cache used by SizeClassAllocator64.
24 template <class SizeClassAllocator
>
25 struct SizeClassAllocator64LocalCache
{
26 typedef SizeClassAllocator Allocator
;
27 static const uptr kNumClasses
= SizeClassAllocator::kNumClasses
;
28 typedef typename
Allocator::SizeClassMapT SizeClassMap
;
29 typedef typename
Allocator::CompactPtrT CompactPtrT
;
31 void Init(AllocatorGlobalStats
*s
) {
37 void Destroy(SizeClassAllocator
*allocator
, AllocatorGlobalStats
*s
) {
40 s
->Unregister(&stats_
);
43 void *Allocate(SizeClassAllocator
*allocator
, uptr class_id
) {
44 CHECK_NE(class_id
, 0UL);
45 CHECK_LT(class_id
, kNumClasses
);
46 stats_
.Add(AllocatorStatAllocated
, Allocator::ClassIdToSize(class_id
));
47 PerClass
*c
= &per_class_
[class_id
];
48 if (UNLIKELY(c
->count
== 0))
49 Refill(c
, allocator
, class_id
);
50 CHECK_GT(c
->count
, 0);
51 CompactPtrT chunk
= c
->chunks
[--c
->count
];
52 void *res
= reinterpret_cast<void *>(allocator
->CompactPtrToPointer(
53 allocator
->GetRegionBeginBySizeClass(class_id
), chunk
));
57 void Deallocate(SizeClassAllocator
*allocator
, uptr class_id
, void *p
) {
58 CHECK_NE(class_id
, 0UL);
59 CHECK_LT(class_id
, kNumClasses
);
60 // If the first allocator call on a new thread is a deallocation, then
61 // max_count will be zero, leading to check failure.
63 stats_
.Sub(AllocatorStatAllocated
, Allocator::ClassIdToSize(class_id
));
64 PerClass
*c
= &per_class_
[class_id
];
65 CHECK_NE(c
->max_count
, 0UL);
66 if (UNLIKELY(c
->count
== c
->max_count
))
67 Drain(c
, allocator
, class_id
, c
->max_count
/ 2);
68 CompactPtrT chunk
= allocator
->PointerToCompactPtr(
69 allocator
->GetRegionBeginBySizeClass(class_id
),
70 reinterpret_cast<uptr
>(p
));
71 c
->chunks
[c
->count
++] = chunk
;
74 void Drain(SizeClassAllocator
*allocator
) {
75 for (uptr class_id
= 0; class_id
< kNumClasses
; class_id
++) {
76 PerClass
*c
= &per_class_
[class_id
];
78 Drain(c
, allocator
, class_id
, c
->count
);
86 CompactPtrT chunks
[2 * SizeClassMap::kMaxNumCachedHint
];
88 PerClass per_class_
[kNumClasses
];
89 AllocatorStats stats_
;
92 if (per_class_
[1].max_count
)
94 for (uptr i
= 0; i
< kNumClasses
; i
++) {
95 PerClass
*c
= &per_class_
[i
];
96 c
->max_count
= 2 * SizeClassMap::MaxCachedHint(i
);
100 NOINLINE
void Refill(PerClass
*c
, SizeClassAllocator
*allocator
,
103 uptr num_requested_chunks
= SizeClassMap::MaxCachedHint(class_id
);
104 allocator
->GetFromAllocator(&stats_
, class_id
, c
->chunks
,
105 num_requested_chunks
);
106 c
->count
= num_requested_chunks
;
109 NOINLINE
void Drain(PerClass
*c
, SizeClassAllocator
*allocator
, uptr class_id
,
112 CHECK_GE(c
->count
, count
);
113 uptr first_idx_to_drain
= c
->count
- count
;
115 allocator
->ReturnToAllocator(&stats_
, class_id
,
116 &c
->chunks
[first_idx_to_drain
], count
);
120 // Cache used by SizeClassAllocator32.
121 template <class SizeClassAllocator
>
122 struct SizeClassAllocator32LocalCache
{
123 typedef SizeClassAllocator Allocator
;
124 typedef typename
Allocator::TransferBatch TransferBatch
;
125 static const uptr kNumClasses
= SizeClassAllocator::kNumClasses
;
127 void Init(AllocatorGlobalStats
*s
) {
130 s
->Register(&stats_
);
133 void Destroy(SizeClassAllocator
*allocator
, AllocatorGlobalStats
*s
) {
136 s
->Unregister(&stats_
);
139 void *Allocate(SizeClassAllocator
*allocator
, uptr class_id
) {
140 CHECK_NE(class_id
, 0UL);
141 CHECK_LT(class_id
, kNumClasses
);
142 stats_
.Add(AllocatorStatAllocated
, Allocator::ClassIdToSize(class_id
));
143 PerClass
*c
= &per_class_
[class_id
];
144 if (UNLIKELY(c
->count
== 0))
145 Refill(allocator
, class_id
);
146 void *res
= c
->batch
[--c
->count
];
147 PREFETCH(c
->batch
[c
->count
- 1]);
151 void Deallocate(SizeClassAllocator
*allocator
, uptr class_id
, void *p
) {
152 CHECK_NE(class_id
, 0UL);
153 CHECK_LT(class_id
, kNumClasses
);
154 // If the first allocator call on a new thread is a deallocation, then
155 // max_count will be zero, leading to check failure.
157 stats_
.Sub(AllocatorStatAllocated
, Allocator::ClassIdToSize(class_id
));
158 PerClass
*c
= &per_class_
[class_id
];
159 CHECK_NE(c
->max_count
, 0UL);
160 if (UNLIKELY(c
->count
== c
->max_count
))
161 Drain(allocator
, class_id
);
162 c
->batch
[c
->count
++] = p
;
165 void Drain(SizeClassAllocator
*allocator
) {
166 for (uptr class_id
= 0; class_id
< kNumClasses
; class_id
++) {
167 PerClass
*c
= &per_class_
[class_id
];
169 Drain(allocator
, class_id
);
174 typedef typename
SizeClassAllocator::SizeClassMapT SizeClassMap
;
178 void *batch
[2 * TransferBatch::kMaxNumCached
];
180 PerClass per_class_
[kNumClasses
];
181 AllocatorStats stats_
;
184 if (per_class_
[1].max_count
)
186 for (uptr i
= 0; i
< kNumClasses
; i
++) {
187 PerClass
*c
= &per_class_
[i
];
188 c
->max_count
= 2 * TransferBatch::MaxCached(i
);
192 // TransferBatch class is declared in SizeClassAllocator.
193 // We transfer chunks between central and thread-local free lists in batches.
194 // For small size classes we allocate batches separately.
195 // For large size classes we may use one of the chunks to store the batch.
196 // sizeof(TransferBatch) must be a power of 2 for more efficient allocation.
197 static uptr
SizeClassForTransferBatch(uptr class_id
) {
198 if (Allocator::ClassIdToSize(class_id
) <
199 TransferBatch::AllocationSizeRequiredForNElements(
200 TransferBatch::MaxCached(class_id
)))
201 return SizeClassMap::ClassID(sizeof(TransferBatch
));
205 // Returns a TransferBatch suitable for class_id.
206 // For small size classes allocates the batch from the allocator.
207 // For large size classes simply returns b.
208 TransferBatch
*CreateBatch(uptr class_id
, SizeClassAllocator
*allocator
,
210 if (uptr batch_class_id
= SizeClassForTransferBatch(class_id
))
211 return (TransferBatch
*)Allocate(allocator
, batch_class_id
);
215 // Destroys TransferBatch b.
216 // For small size classes deallocates b to the allocator.
217 // Does notthing for large size classes.
218 void DestroyBatch(uptr class_id
, SizeClassAllocator
*allocator
,
220 if (uptr batch_class_id
= SizeClassForTransferBatch(class_id
))
221 Deallocate(allocator
, batch_class_id
, b
);
224 NOINLINE
void Refill(SizeClassAllocator
*allocator
, uptr class_id
) {
226 PerClass
*c
= &per_class_
[class_id
];
227 TransferBatch
*b
= allocator
->AllocateBatch(&stats_
, this, class_id
);
228 CHECK_GT(b
->Count(), 0);
229 b
->CopyToArray(c
->batch
);
230 c
->count
= b
->Count();
231 DestroyBatch(class_id
, allocator
, b
);
234 NOINLINE
void Drain(SizeClassAllocator
*allocator
, uptr class_id
) {
236 PerClass
*c
= &per_class_
[class_id
];
237 uptr cnt
= Min(c
->max_count
/ 2, c
->count
);
238 uptr first_idx_to_drain
= c
->count
- cnt
;
239 TransferBatch
*b
= CreateBatch(
240 class_id
, allocator
, (TransferBatch
*)c
->batch
[first_idx_to_drain
]);
241 b
->SetFromArray(allocator
->GetRegionBeginBySizeClass(class_id
),
242 &c
->batch
[first_idx_to_drain
], cnt
);
244 allocator
->DeallocateBatch(&stats_
, class_id
, b
);