1 //===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // Part of the Sanitizer Allocator.
10 //===----------------------------------------------------------------------===//
11 #ifndef SANITIZER_ALLOCATOR_H
12 #error This file must be included inside sanitizer_allocator.h
15 // This class implements a complete memory allocator by using two
16 // internal allocators:
17 // PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
18 // When allocating 2^x bytes it should return 2^x aligned chunk.
19 // PrimaryAllocator is used via a local AllocatorCache.
20 // SecondaryAllocator can allocate anything, but is not efficient.
21 template <class PrimaryAllocator
, class AllocatorCache
,
22 class SecondaryAllocator
> // NOLINT
23 class CombinedAllocator
{
25 void InitCommon(bool may_return_null
) {
27 atomic_store(&may_return_null_
, may_return_null
, memory_order_relaxed
);
30 void InitLinkerInitialized(bool may_return_null
) {
31 secondary_
.InitLinkerInitialized(may_return_null
);
32 stats_
.InitLinkerInitialized();
33 InitCommon(may_return_null
);
36 void Init(bool may_return_null
) {
37 secondary_
.Init(may_return_null
);
39 InitCommon(may_return_null
);
42 void *Allocate(AllocatorCache
*cache
, uptr size
, uptr alignment
,
43 bool cleared
= false, bool check_rss_limit
= false) {
44 // Returning 0 on malloc(0) may break a lot of code.
47 if (size
+ alignment
< size
) return ReturnNullOrDieOnBadRequest();
48 if (check_rss_limit
&& RssLimitIsExceeded()) return ReturnNullOrDieOnOOM();
50 size
= RoundUpTo(size
, alignment
);
52 bool from_primary
= primary_
.CanAllocate(size
, alignment
);
54 res
= cache
->Allocate(&primary_
, primary_
.ClassID(size
));
56 res
= secondary_
.Allocate(&stats_
, size
, alignment
);
58 CHECK_EQ(reinterpret_cast<uptr
>(res
) & (alignment
- 1), 0);
59 if (cleared
&& res
&& from_primary
)
60 internal_bzero_aligned16(res
, RoundUpTo(size
, 16));
64 bool MayReturnNull() const {
65 return atomic_load(&may_return_null_
, memory_order_acquire
);
68 void *ReturnNullOrDieOnBadRequest() {
71 ReportAllocatorCannotReturnNull(false);
74 void *ReturnNullOrDieOnOOM() {
75 if (MayReturnNull()) return nullptr;
76 ReportAllocatorCannotReturnNull(true);
79 void SetMayReturnNull(bool may_return_null
) {
80 secondary_
.SetMayReturnNull(may_return_null
);
81 atomic_store(&may_return_null_
, may_return_null
, memory_order_release
);
84 bool RssLimitIsExceeded() {
85 return atomic_load(&rss_limit_is_exceeded_
, memory_order_acquire
);
88 void SetRssLimitIsExceeded(bool rss_limit_is_exceeded
) {
89 atomic_store(&rss_limit_is_exceeded_
, rss_limit_is_exceeded
,
90 memory_order_release
);
93 void Deallocate(AllocatorCache
*cache
, void *p
) {
95 if (primary_
.PointerIsMine(p
))
96 cache
->Deallocate(&primary_
, primary_
.GetSizeClass(p
), p
);
98 secondary_
.Deallocate(&stats_
, p
);
101 void *Reallocate(AllocatorCache
*cache
, void *p
, uptr new_size
,
104 return Allocate(cache
, new_size
, alignment
);
106 Deallocate(cache
, p
);
109 CHECK(PointerIsMine(p
));
110 uptr old_size
= GetActuallyAllocatedSize(p
);
111 uptr memcpy_size
= Min(new_size
, old_size
);
112 void *new_p
= Allocate(cache
, new_size
, alignment
);
114 internal_memcpy(new_p
, p
, memcpy_size
);
115 Deallocate(cache
, p
);
119 bool PointerIsMine(void *p
) {
120 if (primary_
.PointerIsMine(p
))
122 return secondary_
.PointerIsMine(p
);
125 bool FromPrimary(void *p
) {
126 return primary_
.PointerIsMine(p
);
129 void *GetMetaData(const void *p
) {
130 if (primary_
.PointerIsMine(p
))
131 return primary_
.GetMetaData(p
);
132 return secondary_
.GetMetaData(p
);
135 void *GetBlockBegin(const void *p
) {
136 if (primary_
.PointerIsMine(p
))
137 return primary_
.GetBlockBegin(p
);
138 return secondary_
.GetBlockBegin(p
);
141 // This function does the same as GetBlockBegin, but is much faster.
142 // Must be called with the allocator locked.
143 void *GetBlockBeginFastLocked(void *p
) {
144 if (primary_
.PointerIsMine(p
))
145 return primary_
.GetBlockBegin(p
);
146 return secondary_
.GetBlockBeginFastLocked(p
);
149 uptr
GetActuallyAllocatedSize(void *p
) {
150 if (primary_
.PointerIsMine(p
))
151 return primary_
.GetActuallyAllocatedSize(p
);
152 return secondary_
.GetActuallyAllocatedSize(p
);
155 uptr
TotalMemoryUsed() {
156 return primary_
.TotalMemoryUsed() + secondary_
.TotalMemoryUsed();
159 void TestOnlyUnmap() { primary_
.TestOnlyUnmap(); }
161 void InitCache(AllocatorCache
*cache
) {
162 cache
->Init(&stats_
);
165 void DestroyCache(AllocatorCache
*cache
) {
166 cache
->Destroy(&primary_
, &stats_
);
169 void SwallowCache(AllocatorCache
*cache
) {
170 cache
->Drain(&primary_
);
173 void GetStats(AllocatorStatCounters s
) const {
178 primary_
.PrintStats();
179 secondary_
.PrintStats();
182 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
183 // introspection API.
185 primary_
.ForceLock();
186 secondary_
.ForceLock();
190 secondary_
.ForceUnlock();
191 primary_
.ForceUnlock();
194 void ReleaseToOS() { primary_
.ReleaseToOS(); }
196 // Iterate over all existing chunks.
197 // The allocator must be locked when calling this function.
198 void ForEachChunk(ForEachChunkCallback callback
, void *arg
) {
199 primary_
.ForEachChunk(callback
, arg
);
200 secondary_
.ForEachChunk(callback
, arg
);
204 PrimaryAllocator primary_
;
205 SecondaryAllocator secondary_
;
206 AllocatorGlobalStats stats_
;
207 atomic_uint8_t may_return_null_
;
208 atomic_uint8_t rss_limit_is_exceeded_
;