1 //===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Part of the Sanitizer Allocator.
11 //===----------------------------------------------------------------------===//
12 #ifndef SANITIZER_ALLOCATOR_H
13 #error This file must be included inside sanitizer_allocator.h
16 // This class implements a complete memory allocator by using two
17 // internal allocators:
18 // PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
19 // When allocating 2^x bytes it should return 2^x aligned chunk.
20 // PrimaryAllocator is used via a local AllocatorCache.
21 // SecondaryAllocator can allocate anything, but is not efficient.
22 template <class PrimaryAllocator
,
23 class LargeMmapAllocatorPtrArray
= DefaultLargeMmapAllocatorPtrArray
>
24 class CombinedAllocator
{
26 using AllocatorCache
= typename
PrimaryAllocator::AllocatorCache
;
27 using SecondaryAllocator
=
28 LargeMmapAllocator
<typename
PrimaryAllocator::MapUnmapCallback
,
29 LargeMmapAllocatorPtrArray
,
30 typename
PrimaryAllocator::AddressSpaceView
>;
32 void InitLinkerInitialized(s32 release_to_os_interval_ms
) {
33 stats_
.InitLinkerInitialized();
34 primary_
.Init(release_to_os_interval_ms
);
35 secondary_
.InitLinkerInitialized();
38 void Init(s32 release_to_os_interval_ms
) {
40 primary_
.Init(release_to_os_interval_ms
);
44 void *Allocate(AllocatorCache
*cache
, uptr size
, uptr alignment
) {
45 // Returning 0 on malloc(0) may break a lot of code.
48 if (size
+ alignment
< size
) {
49 Report("WARNING: %s: CombinedAllocator allocation overflow: "
50 "0x%zx bytes with 0x%zx alignment requested\n",
51 SanitizerToolName
, size
, alignment
);
54 uptr original_size
= size
;
55 // If alignment requirements are to be fulfilled by the frontend allocator
56 // rather than by the primary or secondary, passing an alignment lower than
57 // or equal to 8 will prevent any further rounding up, as well as the later
60 size
= RoundUpTo(size
, alignment
);
61 // The primary allocator should return a 2^x aligned allocation when
62 // requested 2^x bytes, hence using the rounded up 'size' when being
63 // serviced by the primary (this is no longer true when the primary is
64 // using a non-fixed base address). The secondary takes care of the
65 // alignment without such requirement, and allocating 'size' would use
66 // extraneous memory, so we employ 'original_size'.
68 if (primary_
.CanAllocate(size
, alignment
))
69 res
= cache
->Allocate(&primary_
, primary_
.ClassID(size
));
71 res
= secondary_
.Allocate(&stats_
, original_size
, alignment
);
73 CHECK_EQ(reinterpret_cast<uptr
>(res
) & (alignment
- 1), 0);
77 s32
ReleaseToOSIntervalMs() const {
78 return primary_
.ReleaseToOSIntervalMs();
81 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms
) {
82 primary_
.SetReleaseToOSIntervalMs(release_to_os_interval_ms
);
85 void ForceReleaseToOS() {
86 primary_
.ForceReleaseToOS();
89 void Deallocate(AllocatorCache
*cache
, void *p
) {
91 if (primary_
.PointerIsMine(p
))
92 cache
->Deallocate(&primary_
, primary_
.GetSizeClass(p
), p
);
94 secondary_
.Deallocate(&stats_
, p
);
97 void *Reallocate(AllocatorCache
*cache
, void *p
, uptr new_size
,
100 return Allocate(cache
, new_size
, alignment
);
102 Deallocate(cache
, p
);
105 CHECK(PointerIsMine(p
));
106 uptr old_size
= GetActuallyAllocatedSize(p
);
107 uptr memcpy_size
= Min(new_size
, old_size
);
108 void *new_p
= Allocate(cache
, new_size
, alignment
);
110 internal_memcpy(new_p
, p
, memcpy_size
);
111 Deallocate(cache
, p
);
115 bool PointerIsMine(void *p
) {
116 if (primary_
.PointerIsMine(p
))
118 return secondary_
.PointerIsMine(p
);
121 bool FromPrimary(void *p
) {
122 return primary_
.PointerIsMine(p
);
125 void *GetMetaData(const void *p
) {
126 if (primary_
.PointerIsMine(p
))
127 return primary_
.GetMetaData(p
);
128 return secondary_
.GetMetaData(p
);
131 void *GetBlockBegin(const void *p
) {
132 if (primary_
.PointerIsMine(p
))
133 return primary_
.GetBlockBegin(p
);
134 return secondary_
.GetBlockBegin(p
);
137 // This function does the same as GetBlockBegin, but is much faster.
138 // Must be called with the allocator locked.
139 void *GetBlockBeginFastLocked(void *p
) {
140 if (primary_
.PointerIsMine(p
))
141 return primary_
.GetBlockBegin(p
);
142 return secondary_
.GetBlockBeginFastLocked(p
);
145 uptr
GetActuallyAllocatedSize(void *p
) {
146 if (primary_
.PointerIsMine(p
))
147 return primary_
.GetActuallyAllocatedSize(p
);
148 return secondary_
.GetActuallyAllocatedSize(p
);
151 uptr
TotalMemoryUsed() {
152 return primary_
.TotalMemoryUsed() + secondary_
.TotalMemoryUsed();
155 void TestOnlyUnmap() { primary_
.TestOnlyUnmap(); }
157 void InitCache(AllocatorCache
*cache
) {
158 cache
->Init(&stats_
);
161 void DestroyCache(AllocatorCache
*cache
) {
162 cache
->Destroy(&primary_
, &stats_
);
165 void SwallowCache(AllocatorCache
*cache
) {
166 cache
->Drain(&primary_
);
169 void GetStats(AllocatorStatCounters s
) const {
174 primary_
.PrintStats();
175 secondary_
.PrintStats();
178 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
179 // introspection API.
181 primary_
.ForceLock();
182 secondary_
.ForceLock();
186 secondary_
.ForceUnlock();
187 primary_
.ForceUnlock();
190 // Iterate over all existing chunks.
191 // The allocator must be locked when calling this function.
192 void ForEachChunk(ForEachChunkCallback callback
, void *arg
) {
193 primary_
.ForEachChunk(callback
, arg
);
194 secondary_
.ForEachChunk(callback
, arg
);
198 PrimaryAllocator primary_
;
199 SecondaryAllocator secondary_
;
200 AllocatorGlobalStats stats_
;