1 //===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // Part of the Sanitizer Allocator.
10 //===----------------------------------------------------------------------===//
11 #ifndef SANITIZER_ALLOCATOR_H
12 #error This file must be included inside sanitizer_allocator.h
15 template<class SizeClassAllocator
> struct SizeClassAllocator32LocalCache
;
17 // SizeClassAllocator32 -- allocator for 32-bit address space.
18 // This allocator can theoretically be used on 64-bit arch, but there it is less
19 // efficient than SizeClassAllocator64.
21 // [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
22 // be returned by MmapOrDie().
25 // a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize,
27 // Since the regions are aligned by kRegionSize, there are exactly
28 // kNumPossibleRegions possible regions in the address space and so we keep
29 // a ByteMap possible_regions to store the size classes of each Region.
30 // 0 size class means the region is not used by the allocator.
32 // One Region is used to allocate chunks of a single size class.
33 // A Region looks like this:
34 // UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
36 // In order to avoid false sharing the objects of this class should be
37 // chache-line aligned.
39 struct SizeClassAllocator32FlagMasks
{ // Bit masks.
41 kRandomShuffleChunks
= 1,
42 kUseSeparateSizeClassForBatch
= 2,
46 template <class Params
>
47 class SizeClassAllocator32
{
49 static const uptr kSpaceBeg
= Params::kSpaceBeg
;
50 static const u64 kSpaceSize
= Params::kSpaceSize
;
51 static const uptr kMetadataSize
= Params::kMetadataSize
;
52 typedef typename
Params::SizeClassMap SizeClassMap
;
53 static const uptr kRegionSizeLog
= Params::kRegionSizeLog
;
54 typedef typename
Params::ByteMap ByteMap
;
55 typedef typename
Params::MapUnmapCallback MapUnmapCallback
;
57 static const bool kRandomShuffleChunks
= Params::kFlags
&
58 SizeClassAllocator32FlagMasks::kRandomShuffleChunks
;
59 static const bool kUseSeparateSizeClassForBatch
= Params::kFlags
&
60 SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch
;
62 struct TransferBatch
{
63 static const uptr kMaxNumCached
= SizeClassMap::kMaxNumCachedHint
- 2;
64 void SetFromArray(uptr region_beg_unused
, void *batch
[], uptr count
) {
66 CHECK_LE(count_
, kMaxNumCached
);
67 for (uptr i
= 0; i
< count
; i
++)
70 uptr
Count() const { return count_
; }
71 void Clear() { count_
= 0; }
73 batch_
[count_
++] = ptr
;
74 CHECK_LE(count_
, kMaxNumCached
);
76 void CopyToArray(void *to_batch
[]) {
77 for (uptr i
= 0, n
= Count(); i
< n
; i
++)
78 to_batch
[i
] = batch_
[i
];
81 // How much memory do we need for a batch containing n elements.
82 static uptr
AllocationSizeRequiredForNElements(uptr n
) {
83 return sizeof(uptr
) * 2 + sizeof(void *) * n
;
85 static uptr
MaxCached(uptr class_id
) {
86 return Min(kMaxNumCached
, SizeClassMap::MaxCachedHint(class_id
));
93 void *batch_
[kMaxNumCached
];
96 static const uptr kBatchSize
= sizeof(TransferBatch
);
97 COMPILER_CHECK((kBatchSize
& (kBatchSize
- 1)) == 0);
98 COMPILER_CHECK(kBatchSize
== SizeClassMap::kMaxNumCachedHint
* sizeof(uptr
));
100 static uptr
ClassIdToSize(uptr class_id
) {
101 return (class_id
== SizeClassMap::kBatchClassID
) ?
102 kBatchSize
: SizeClassMap::Size(class_id
);
105 typedef SizeClassAllocator32
<Params
> ThisT
;
106 typedef SizeClassAllocator32LocalCache
<ThisT
> AllocatorCache
;
108 void Init(s32 release_to_os_interval_ms
) {
109 possible_regions
.TestOnlyInit();
110 internal_memset(size_class_info_array
, 0, sizeof(size_class_info_array
));
113 s32
ReleaseToOSIntervalMs() const {
114 return kReleaseToOSIntervalNever
;
117 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms
) {
118 // This is empty here. Currently only implemented in 64-bit allocator.
121 void *MapWithCallback(uptr size
) {
122 void *res
= MmapOrDie(size
, "SizeClassAllocator32");
123 MapUnmapCallback().OnMap((uptr
)res
, size
);
127 void UnmapWithCallback(uptr beg
, uptr size
) {
128 MapUnmapCallback().OnUnmap(beg
, size
);
129 UnmapOrDie(reinterpret_cast<void *>(beg
), size
);
132 static bool CanAllocate(uptr size
, uptr alignment
) {
133 return size
<= SizeClassMap::kMaxSize
&&
134 alignment
<= SizeClassMap::kMaxSize
;
137 void *GetMetaData(const void *p
) {
138 CHECK(PointerIsMine(p
));
139 uptr mem
= reinterpret_cast<uptr
>(p
);
140 uptr beg
= ComputeRegionBeg(mem
);
141 uptr size
= ClassIdToSize(GetSizeClass(p
));
142 u32 offset
= mem
- beg
;
143 uptr n
= offset
/ (u32
)size
; // 32-bit division
144 uptr meta
= (beg
+ kRegionSize
) - (n
+ 1) * kMetadataSize
;
145 return reinterpret_cast<void*>(meta
);
148 NOINLINE TransferBatch
*AllocateBatch(AllocatorStats
*stat
, AllocatorCache
*c
,
150 CHECK_LT(class_id
, kNumClasses
);
151 SizeClassInfo
*sci
= GetSizeClassInfo(class_id
);
152 SpinMutexLock
l(&sci
->mutex
);
153 if (sci
->free_list
.empty() &&
154 UNLIKELY(!PopulateFreeList(stat
, c
, sci
, class_id
)))
156 CHECK(!sci
->free_list
.empty());
157 TransferBatch
*b
= sci
->free_list
.front();
158 sci
->free_list
.pop_front();
162 NOINLINE
void DeallocateBatch(AllocatorStats
*stat
, uptr class_id
,
164 CHECK_LT(class_id
, kNumClasses
);
165 CHECK_GT(b
->Count(), 0);
166 SizeClassInfo
*sci
= GetSizeClassInfo(class_id
);
167 SpinMutexLock
l(&sci
->mutex
);
168 sci
->free_list
.push_front(b
);
171 uptr
GetRegionBeginBySizeClass(uptr class_id
) { return 0; }
173 bool PointerIsMine(const void *p
) {
174 uptr mem
= reinterpret_cast<uptr
>(p
);
175 if (mem
< kSpaceBeg
|| mem
>= kSpaceBeg
+ kSpaceSize
)
177 return GetSizeClass(p
) != 0;
180 uptr
GetSizeClass(const void *p
) {
181 return possible_regions
[ComputeRegionId(reinterpret_cast<uptr
>(p
))];
184 void *GetBlockBegin(const void *p
) {
185 CHECK(PointerIsMine(p
));
186 uptr mem
= reinterpret_cast<uptr
>(p
);
187 uptr beg
= ComputeRegionBeg(mem
);
188 uptr size
= ClassIdToSize(GetSizeClass(p
));
189 u32 offset
= mem
- beg
;
190 u32 n
= offset
/ (u32
)size
; // 32-bit division
191 uptr res
= beg
+ (n
* (u32
)size
);
192 return reinterpret_cast<void*>(res
);
195 uptr
GetActuallyAllocatedSize(void *p
) {
196 CHECK(PointerIsMine(p
));
197 return ClassIdToSize(GetSizeClass(p
));
200 uptr
ClassID(uptr size
) { return SizeClassMap::ClassID(size
); }
202 uptr
TotalMemoryUsed() {
203 // No need to lock here.
205 for (uptr i
= 0; i
< kNumPossibleRegions
; i
++)
206 if (possible_regions
[i
])
211 void TestOnlyUnmap() {
212 for (uptr i
= 0; i
< kNumPossibleRegions
; i
++)
213 if (possible_regions
[i
])
214 UnmapWithCallback((i
* kRegionSize
), kRegionSize
);
217 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
218 // introspection API.
220 for (uptr i
= 0; i
< kNumClasses
; i
++) {
221 GetSizeClassInfo(i
)->mutex
.Lock();
226 for (int i
= kNumClasses
- 1; i
>= 0; i
--) {
227 GetSizeClassInfo(i
)->mutex
.Unlock();
231 // Iterate over all existing chunks.
232 // The allocator must be locked when calling this function.
233 void ForEachChunk(ForEachChunkCallback callback
, void *arg
) {
234 for (uptr region
= 0; region
< kNumPossibleRegions
; region
++)
235 if (possible_regions
[region
]) {
236 uptr chunk_size
= ClassIdToSize(possible_regions
[region
]);
237 uptr max_chunks_in_region
= kRegionSize
/ (chunk_size
+ kMetadataSize
);
238 uptr region_beg
= region
* kRegionSize
;
239 for (uptr chunk
= region_beg
;
240 chunk
< region_beg
+ max_chunks_in_region
* chunk_size
;
241 chunk
+= chunk_size
) {
242 // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
243 callback(chunk
, arg
);
251 static uptr
AdditionalSize() {
255 typedef SizeClassMap SizeClassMapT
;
256 static const uptr kNumClasses
= SizeClassMap::kNumClasses
;
259 static const uptr kRegionSize
= 1 << kRegionSizeLog
;
260 static const uptr kNumPossibleRegions
= kSpaceSize
/ kRegionSize
;
262 struct SizeClassInfo
{
264 IntrusiveList
<TransferBatch
> free_list
;
265 char padding
[kCacheLineSize
- sizeof(uptr
) -
266 sizeof(IntrusiveList
<TransferBatch
>)];
268 COMPILER_CHECK(sizeof(SizeClassInfo
) == kCacheLineSize
);
270 uptr
ComputeRegionId(uptr mem
) {
271 uptr res
= mem
>> kRegionSizeLog
;
272 CHECK_LT(res
, kNumPossibleRegions
);
276 uptr
ComputeRegionBeg(uptr mem
) {
277 return mem
& ~(kRegionSize
- 1);
280 uptr
AllocateRegion(AllocatorStats
*stat
, uptr class_id
) {
281 CHECK_LT(class_id
, kNumClasses
);
282 uptr res
= reinterpret_cast<uptr
>(MmapAlignedOrDieOnFatalError(
283 kRegionSize
, kRegionSize
, "SizeClassAllocator32"));
286 MapUnmapCallback().OnMap(res
, kRegionSize
);
287 stat
->Add(AllocatorStatMapped
, kRegionSize
);
288 CHECK(IsAligned(res
, kRegionSize
));
289 possible_regions
.set(ComputeRegionId(res
), static_cast<u8
>(class_id
));
293 SizeClassInfo
*GetSizeClassInfo(uptr class_id
) {
294 CHECK_LT(class_id
, kNumClasses
);
295 return &size_class_info_array
[class_id
];
298 bool PopulateFreeList(AllocatorStats
*stat
, AllocatorCache
*c
,
299 SizeClassInfo
*sci
, uptr class_id
) {
300 uptr size
= ClassIdToSize(class_id
);
301 uptr reg
= AllocateRegion(stat
, class_id
);
304 uptr n_chunks
= kRegionSize
/ (size
+ kMetadataSize
);
305 uptr max_count
= TransferBatch::MaxCached(class_id
);
306 CHECK_GT(max_count
, 0);
307 TransferBatch
*b
= nullptr;
308 for (uptr i
= reg
; i
< reg
+ n_chunks
* size
; i
+= size
) {
310 b
= c
->CreateBatch(class_id
, this, (TransferBatch
*)i
);
316 if (b
->Count() == max_count
) {
317 sci
->free_list
.push_back(b
);
322 CHECK_GT(b
->Count(), 0);
323 sci
->free_list
.push_back(b
);
328 ByteMap possible_regions
;
329 SizeClassInfo size_class_info_array
[kNumClasses
];