1 //===-- sanitizer_allocator.cc --------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is shared between AddressSanitizer and ThreadSanitizer
10 // This allocator is used inside run-times.
11 //===----------------------------------------------------------------------===//
13 #include "sanitizer_allocator.h"
15 #include "sanitizer_allocator_checks.h"
16 #include "sanitizer_allocator_internal.h"
17 #include "sanitizer_atomic.h"
18 #include "sanitizer_common.h"
20 namespace __sanitizer
{
22 // ThreadSanitizer for Go uses libc malloc/free.
23 #if SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
24 # if SANITIZER_LINUX && !SANITIZER_ANDROID
25 extern "C" void *__libc_malloc(uptr size
);
27 extern "C" void *__libc_memalign(uptr alignment
, uptr size
);
29 extern "C" void *__libc_realloc(void *ptr
, uptr size
);
30 extern "C" void __libc_free(void *ptr
);
33 # define __libc_malloc malloc
35 static void *__libc_memalign(uptr alignment
, uptr size
) {
37 uptr error
= posix_memalign(&p
, alignment
, size
);
38 if (error
) return nullptr;
42 # define __libc_realloc realloc
43 # define __libc_free free
46 static void *RawInternalAlloc(uptr size
, InternalAllocatorCache
*cache
,
51 return __libc_malloc(size
);
53 return __libc_memalign(alignment
, size
);
55 // Windows does not provide __libc_memalign/posix_memalign. It provides
56 // __aligned_malloc, but the allocated blocks can't be passed to free,
57 // they need to be passed to __aligned_free. InternalAlloc interface does
58 // not account for such requirement. Alignemnt does not seem to be used
59 // anywhere in runtime, so just call __libc_malloc for now.
60 DCHECK_EQ(alignment
, 0);
61 return __libc_malloc(size
);
65 static void *RawInternalRealloc(void *ptr
, uptr size
,
66 InternalAllocatorCache
*cache
) {
68 return __libc_realloc(ptr
, size
);
71 static void RawInternalFree(void *ptr
, InternalAllocatorCache
*cache
) {
76 InternalAllocator
*internal_allocator() {
80 #else // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
82 static ALIGNED(64) char internal_alloc_placeholder
[sizeof(InternalAllocator
)];
83 static atomic_uint8_t internal_allocator_initialized
;
84 static StaticSpinMutex internal_alloc_init_mu
;
86 static InternalAllocatorCache internal_allocator_cache
;
87 static StaticSpinMutex internal_allocator_cache_mu
;
89 InternalAllocator
*internal_allocator() {
90 InternalAllocator
*internal_allocator_instance
=
91 reinterpret_cast<InternalAllocator
*>(&internal_alloc_placeholder
);
92 if (atomic_load(&internal_allocator_initialized
, memory_order_acquire
) == 0) {
93 SpinMutexLock
l(&internal_alloc_init_mu
);
94 if (atomic_load(&internal_allocator_initialized
, memory_order_relaxed
) ==
96 internal_allocator_instance
->Init(kReleaseToOSIntervalNever
);
97 atomic_store(&internal_allocator_initialized
, 1, memory_order_release
);
100 return internal_allocator_instance
;
103 static void *RawInternalAlloc(uptr size
, InternalAllocatorCache
*cache
,
105 if (alignment
== 0) alignment
= 8;
107 SpinMutexLock
l(&internal_allocator_cache_mu
);
108 return internal_allocator()->Allocate(&internal_allocator_cache
, size
,
111 return internal_allocator()->Allocate(cache
, size
, alignment
);
114 static void *RawInternalRealloc(void *ptr
, uptr size
,
115 InternalAllocatorCache
*cache
) {
118 SpinMutexLock
l(&internal_allocator_cache_mu
);
119 return internal_allocator()->Reallocate(&internal_allocator_cache
, ptr
,
122 return internal_allocator()->Reallocate(cache
, ptr
, size
, alignment
);
125 static void RawInternalFree(void *ptr
, InternalAllocatorCache
*cache
) {
127 SpinMutexLock
l(&internal_allocator_cache_mu
);
128 return internal_allocator()->Deallocate(&internal_allocator_cache
, ptr
);
130 internal_allocator()->Deallocate(cache
, ptr
);
133 #endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
135 const u64 kBlockMagic
= 0x6A6CB03ABCEBC041ull
;
137 void *InternalAlloc(uptr size
, InternalAllocatorCache
*cache
, uptr alignment
) {
138 if (size
+ sizeof(u64
) < size
)
140 void *p
= RawInternalAlloc(size
+ sizeof(u64
), cache
, alignment
);
143 ((u64
*)p
)[0] = kBlockMagic
;
144 return (char*)p
+ sizeof(u64
);
147 void *InternalRealloc(void *addr
, uptr size
, InternalAllocatorCache
*cache
) {
149 return InternalAlloc(size
, cache
);
150 if (size
+ sizeof(u64
) < size
)
152 addr
= (char*)addr
- sizeof(u64
);
153 size
= size
+ sizeof(u64
);
154 CHECK_EQ(kBlockMagic
, ((u64
*)addr
)[0]);
155 void *p
= RawInternalRealloc(addr
, size
, cache
);
158 return (char*)p
+ sizeof(u64
);
161 void *InternalCalloc(uptr count
, uptr size
, InternalAllocatorCache
*cache
) {
162 if (UNLIKELY(CheckForCallocOverflow(count
, size
)))
163 return InternalAllocator::FailureHandler::OnBadRequest();
164 void *p
= InternalAlloc(count
* size
, cache
);
165 if (p
) internal_memset(p
, 0, count
* size
);
169 void InternalFree(void *addr
, InternalAllocatorCache
*cache
) {
172 addr
= (char*)addr
- sizeof(u64
);
173 CHECK_EQ(kBlockMagic
, ((u64
*)addr
)[0]);
175 RawInternalFree(addr
, cache
);
179 static LowLevelAllocateCallback low_level_alloc_callback
;
181 void *LowLevelAllocator::Allocate(uptr size
) {
182 // Align allocation size.
183 size
= RoundUpTo(size
, 8);
184 if (allocated_end_
- allocated_current_
< (sptr
)size
) {
185 uptr size_to_allocate
= Max(size
, GetPageSizeCached());
187 (char*)MmapOrDie(size_to_allocate
, __func__
);
188 allocated_end_
= allocated_current_
+ size_to_allocate
;
189 if (low_level_alloc_callback
) {
190 low_level_alloc_callback((uptr
)allocated_current_
,
194 CHECK(allocated_end_
- allocated_current_
>= (sptr
)size
);
195 void *res
= allocated_current_
;
196 allocated_current_
+= size
;
200 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback
) {
201 low_level_alloc_callback
= callback
;
204 static atomic_uint8_t allocator_out_of_memory
= {0};
205 static atomic_uint8_t allocator_may_return_null
= {0};
207 bool IsAllocatorOutOfMemory() {
208 return atomic_load_relaxed(&allocator_out_of_memory
);
211 // Prints error message and kills the program.
212 void NORETURN
ReportAllocatorCannotReturnNull() {
213 Report("%s's allocator is terminating the process instead of returning 0\n",
215 Report("If you don't like this behavior set allocator_may_return_null=1\n");
220 bool AllocatorMayReturnNull() {
221 return atomic_load(&allocator_may_return_null
, memory_order_relaxed
);
224 void SetAllocatorMayReturnNull(bool may_return_null
) {
225 atomic_store(&allocator_may_return_null
, may_return_null
,
226 memory_order_relaxed
);
229 void *ReturnNullOrDieOnFailure::OnBadRequest() {
230 if (AllocatorMayReturnNull())
232 ReportAllocatorCannotReturnNull();
235 void *ReturnNullOrDieOnFailure::OnOOM() {
236 atomic_store_relaxed(&allocator_out_of_memory
, 1);
237 if (AllocatorMayReturnNull())
239 ReportAllocatorCannotReturnNull();
242 void NORETURN
*DieOnFailure::OnBadRequest() {
243 ReportAllocatorCannotReturnNull();
246 void NORETURN
*DieOnFailure::OnOOM() {
247 atomic_store_relaxed(&allocator_out_of_memory
, 1);
248 ReportAllocatorCannotReturnNull();
251 } // namespace __sanitizer