1 //===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // Part of the Sanitizer Allocator.
10 //===----------------------------------------------------------------------===//
11 #ifndef SANITIZER_ALLOCATOR_H
12 #error This file must be included inside sanitizer_allocator.h
15 template<class SizeClassAllocator
> struct SizeClassAllocator32LocalCache
;
17 // SizeClassAllocator32 -- allocator for 32-bit address space.
18 // This allocator can theoretically be used on 64-bit arch, but there it is less
19 // efficient than SizeClassAllocator64.
21 // [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
22 // be returned by MmapOrDie().
25 // a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
26 // Since the regions are aligned by kRegionSize, there are exactly
27 // kNumPossibleRegions possible regions in the address space and so we keep
28 // a ByteMap possible_regions to store the size classes of each Region.
29 // 0 size class means the region is not used by the allocator.
31 // One Region is used to allocate chunks of a single size class.
32 // A Region looks like this:
33 // UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
35 // In order to avoid false sharing the objects of this class should be
36 // chache-line aligned.
37 template <const uptr kSpaceBeg
, const u64 kSpaceSize
,
38 const uptr kMetadataSize
, class SizeClassMap
,
39 const uptr kRegionSizeLog
,
41 class MapUnmapCallback
= NoOpMapUnmapCallback
>
42 class SizeClassAllocator32
{
44 struct TransferBatch
{
45 static const uptr kMaxNumCached
= SizeClassMap::kMaxNumCachedHint
- 2;
46 void SetFromArray(uptr region_beg_unused
, void *batch
[], uptr count
) {
48 CHECK_LE(count_
, kMaxNumCached
);
49 for (uptr i
= 0; i
< count
; i
++)
52 uptr
Count() const { return count_
; }
53 void Clear() { count_
= 0; }
55 batch_
[count_
++] = ptr
;
56 CHECK_LE(count_
, kMaxNumCached
);
58 void CopyToArray(void *to_batch
[]) {
59 for (uptr i
= 0, n
= Count(); i
< n
; i
++)
60 to_batch
[i
] = batch_
[i
];
63 // How much memory do we need for a batch containing n elements.
64 static uptr
AllocationSizeRequiredForNElements(uptr n
) {
65 return sizeof(uptr
) * 2 + sizeof(void *) * n
;
67 static uptr
MaxCached(uptr class_id
) {
68 return Min(kMaxNumCached
, SizeClassMap::MaxCachedHint(class_id
));
75 void *batch_
[kMaxNumCached
];
78 static const uptr kBatchSize
= sizeof(TransferBatch
);
79 COMPILER_CHECK((kBatchSize
& (kBatchSize
- 1)) == 0);
80 COMPILER_CHECK(sizeof(TransferBatch
) ==
81 SizeClassMap::kMaxNumCachedHint
* sizeof(uptr
));
83 static uptr
ClassIdToSize(uptr class_id
) {
84 return SizeClassMap::Size(class_id
);
87 typedef SizeClassAllocator32
<kSpaceBeg
, kSpaceSize
, kMetadataSize
,
88 SizeClassMap
, kRegionSizeLog
, ByteMap
, MapUnmapCallback
> ThisT
;
89 typedef SizeClassAllocator32LocalCache
<ThisT
> AllocatorCache
;
92 possible_regions
.TestOnlyInit();
93 internal_memset(size_class_info_array
, 0, sizeof(size_class_info_array
));
96 void *MapWithCallback(uptr size
) {
97 size
= RoundUpTo(size
, GetPageSizeCached());
98 void *res
= MmapOrDie(size
, "SizeClassAllocator32");
99 MapUnmapCallback().OnMap((uptr
)res
, size
);
103 void UnmapWithCallback(uptr beg
, uptr size
) {
104 MapUnmapCallback().OnUnmap(beg
, size
);
105 UnmapOrDie(reinterpret_cast<void *>(beg
), size
);
108 static bool CanAllocate(uptr size
, uptr alignment
) {
109 return size
<= SizeClassMap::kMaxSize
&&
110 alignment
<= SizeClassMap::kMaxSize
;
113 void *GetMetaData(const void *p
) {
114 CHECK(PointerIsMine(p
));
115 uptr mem
= reinterpret_cast<uptr
>(p
);
116 uptr beg
= ComputeRegionBeg(mem
);
117 uptr size
= ClassIdToSize(GetSizeClass(p
));
118 u32 offset
= mem
- beg
;
119 uptr n
= offset
/ (u32
)size
; // 32-bit division
120 uptr meta
= (beg
+ kRegionSize
) - (n
+ 1) * kMetadataSize
;
121 return reinterpret_cast<void*>(meta
);
124 NOINLINE TransferBatch
*AllocateBatch(AllocatorStats
*stat
, AllocatorCache
*c
,
126 CHECK_LT(class_id
, kNumClasses
);
127 SizeClassInfo
*sci
= GetSizeClassInfo(class_id
);
128 SpinMutexLock
l(&sci
->mutex
);
129 if (sci
->free_list
.empty())
130 PopulateFreeList(stat
, c
, sci
, class_id
);
131 CHECK(!sci
->free_list
.empty());
132 TransferBatch
*b
= sci
->free_list
.front();
133 sci
->free_list
.pop_front();
137 NOINLINE
void DeallocateBatch(AllocatorStats
*stat
, uptr class_id
,
139 CHECK_LT(class_id
, kNumClasses
);
140 SizeClassInfo
*sci
= GetSizeClassInfo(class_id
);
141 SpinMutexLock
l(&sci
->mutex
);
142 CHECK_GT(b
->Count(), 0);
143 sci
->free_list
.push_front(b
);
146 uptr
GetRegionBeginBySizeClass(uptr class_id
) { return 0; }
148 bool PointerIsMine(const void *p
) {
149 uptr mem
= reinterpret_cast<uptr
>(p
);
150 if (mem
< kSpaceBeg
|| mem
>= kSpaceBeg
+ kSpaceSize
)
152 return GetSizeClass(p
) != 0;
155 uptr
GetSizeClass(const void *p
) {
156 return possible_regions
[ComputeRegionId(reinterpret_cast<uptr
>(p
))];
159 void *GetBlockBegin(const void *p
) {
160 CHECK(PointerIsMine(p
));
161 uptr mem
= reinterpret_cast<uptr
>(p
);
162 uptr beg
= ComputeRegionBeg(mem
);
163 uptr size
= ClassIdToSize(GetSizeClass(p
));
164 u32 offset
= mem
- beg
;
165 u32 n
= offset
/ (u32
)size
; // 32-bit division
166 uptr res
= beg
+ (n
* (u32
)size
);
167 return reinterpret_cast<void*>(res
);
170 uptr
GetActuallyAllocatedSize(void *p
) {
171 CHECK(PointerIsMine(p
));
172 return ClassIdToSize(GetSizeClass(p
));
175 uptr
ClassID(uptr size
) { return SizeClassMap::ClassID(size
); }
177 uptr
TotalMemoryUsed() {
178 // No need to lock here.
180 for (uptr i
= 0; i
< kNumPossibleRegions
; i
++)
181 if (possible_regions
[i
])
186 void TestOnlyUnmap() {
187 for (uptr i
= 0; i
< kNumPossibleRegions
; i
++)
188 if (possible_regions
[i
])
189 UnmapWithCallback((i
* kRegionSize
), kRegionSize
);
192 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
193 // introspection API.
195 for (uptr i
= 0; i
< kNumClasses
; i
++) {
196 GetSizeClassInfo(i
)->mutex
.Lock();
201 for (int i
= kNumClasses
- 1; i
>= 0; i
--) {
202 GetSizeClassInfo(i
)->mutex
.Unlock();
206 // Iterate over all existing chunks.
207 // The allocator must be locked when calling this function.
208 void ForEachChunk(ForEachChunkCallback callback
, void *arg
) {
209 for (uptr region
= 0; region
< kNumPossibleRegions
; region
++)
210 if (possible_regions
[region
]) {
211 uptr chunk_size
= ClassIdToSize(possible_regions
[region
]);
212 uptr max_chunks_in_region
= kRegionSize
/ (chunk_size
+ kMetadataSize
);
213 uptr region_beg
= region
* kRegionSize
;
214 for (uptr chunk
= region_beg
;
215 chunk
< region_beg
+ max_chunks_in_region
* chunk_size
;
216 chunk
+= chunk_size
) {
217 // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
218 callback(chunk
, arg
);
226 static uptr
AdditionalSize() {
230 // This is empty here. Currently only implemented in 64-bit allocator.
231 void ReleaseToOS() { }
234 typedef SizeClassMap SizeClassMapT
;
235 static const uptr kNumClasses
= SizeClassMap::kNumClasses
;
238 static const uptr kRegionSize
= 1 << kRegionSizeLog
;
239 static const uptr kNumPossibleRegions
= kSpaceSize
/ kRegionSize
;
241 struct SizeClassInfo
{
243 IntrusiveList
<TransferBatch
> free_list
;
244 char padding
[kCacheLineSize
- sizeof(uptr
) -
245 sizeof(IntrusiveList
<TransferBatch
>)];
247 COMPILER_CHECK(sizeof(SizeClassInfo
) == kCacheLineSize
);
249 uptr
ComputeRegionId(uptr mem
) {
250 uptr res
= mem
>> kRegionSizeLog
;
251 CHECK_LT(res
, kNumPossibleRegions
);
255 uptr
ComputeRegionBeg(uptr mem
) {
256 return mem
& ~(kRegionSize
- 1);
259 uptr
AllocateRegion(AllocatorStats
*stat
, uptr class_id
) {
260 CHECK_LT(class_id
, kNumClasses
);
261 uptr res
= reinterpret_cast<uptr
>(MmapAlignedOrDie(kRegionSize
, kRegionSize
,
262 "SizeClassAllocator32"));
263 MapUnmapCallback().OnMap(res
, kRegionSize
);
264 stat
->Add(AllocatorStatMapped
, kRegionSize
);
265 CHECK_EQ(0U, (res
& (kRegionSize
- 1)));
266 possible_regions
.set(ComputeRegionId(res
), static_cast<u8
>(class_id
));
270 SizeClassInfo
*GetSizeClassInfo(uptr class_id
) {
271 CHECK_LT(class_id
, kNumClasses
);
272 return &size_class_info_array
[class_id
];
275 void PopulateFreeList(AllocatorStats
*stat
, AllocatorCache
*c
,
276 SizeClassInfo
*sci
, uptr class_id
) {
277 uptr size
= ClassIdToSize(class_id
);
278 uptr reg
= AllocateRegion(stat
, class_id
);
279 uptr n_chunks
= kRegionSize
/ (size
+ kMetadataSize
);
280 uptr max_count
= TransferBatch::MaxCached(class_id
);
281 TransferBatch
*b
= nullptr;
282 for (uptr i
= reg
; i
< reg
+ n_chunks
* size
; i
+= size
) {
284 b
= c
->CreateBatch(class_id
, this, (TransferBatch
*)i
);
288 if (b
->Count() == max_count
) {
289 CHECK_GT(b
->Count(), 0);
290 sci
->free_list
.push_back(b
);
295 CHECK_GT(b
->Count(), 0);
296 sci
->free_list
.push_back(b
);
300 ByteMap possible_regions
;
301 SizeClassInfo size_class_info_array
[kNumClasses
];