1 //===-- tsan_mman.cpp -----------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //===----------------------------------------------------------------------===//
12 #include "sanitizer_common/sanitizer_allocator_checks.h"
13 #include "sanitizer_common/sanitizer_allocator_interface.h"
14 #include "sanitizer_common/sanitizer_allocator_report.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 #include "sanitizer_common/sanitizer_errno.h"
17 #include "sanitizer_common/sanitizer_placement_new.h"
18 #include "tsan_mman.h"
20 #include "tsan_report.h"
21 #include "tsan_flags.h"
23 // May be overriden by front-end.
24 SANITIZER_WEAK_DEFAULT_IMPL
25 void __sanitizer_malloc_hook(void *ptr
, uptr size
) {
30 SANITIZER_WEAK_DEFAULT_IMPL
31 void __sanitizer_free_hook(void *ptr
) {
37 struct MapUnmapCallback
{
38 void OnMap(uptr p
, uptr size
) const { }
39 void OnUnmap(uptr p
, uptr size
) const {
40 // We are about to unmap a chunk of user memory.
41 // Mark the corresponding shadow memory as not needed.
42 DontNeedShadowFor(p
, size
);
43 // Mark the corresponding meta shadow memory as not needed.
44 // Note the block does not contain any meta info at this point
45 // (this happens after free).
46 const uptr kMetaRatio
= kMetaShadowCell
/ kMetaShadowSize
;
47 const uptr kPageSize
= GetPageSizeCached() * kMetaRatio
;
48 // Block came from LargeMmapAllocator, so must be large.
49 // We rely on this in the calculations below.
50 CHECK_GE(size
, 2 * kPageSize
);
51 uptr diff
= RoundUp(p
, kPageSize
) - p
;
56 diff
= p
+ size
- RoundDown(p
+ size
, kPageSize
);
59 uptr p_meta
= (uptr
)MemToMeta(p
);
60 ReleaseMemoryPagesToOS(p_meta
, p_meta
+ size
/ kMetaRatio
);
64 static char allocator_placeholder
[sizeof(Allocator
)] ALIGNED(64);
65 Allocator
*allocator() {
66 return reinterpret_cast<Allocator
*>(&allocator_placeholder
);
74 : mtx(MutexTypeGlobalProc
, StatMtxGlobalProc
)
75 , proc(ProcCreate()) {
79 static char global_proc_placeholder
[sizeof(GlobalProc
)] ALIGNED(64);
80 GlobalProc
*global_proc() {
81 return reinterpret_cast<GlobalProc
*>(&global_proc_placeholder
);
84 ScopedGlobalProcessor::ScopedGlobalProcessor() {
85 GlobalProc
*gp
= global_proc();
86 ThreadState
*thr
= cur_thread();
89 // If we don't have a proc, use the global one.
90 // There are currently only two known case where this path is triggered:
92 // __nptl_deallocate_tsd
97 // __interceptor_munmap
101 // Ideally, we destroy thread state (and unwire proc) when a thread actually
102 // exits (i.e. when we join/wait it). Then we would not need the global proc
104 ProcWire(gp
->proc
, thr
);
107 ScopedGlobalProcessor::~ScopedGlobalProcessor() {
108 GlobalProc
*gp
= global_proc();
109 ThreadState
*thr
= cur_thread();
110 if (thr
->proc() != gp
->proc
)
112 ProcUnwire(gp
->proc
, thr
);
116 static constexpr uptr kMaxAllowedMallocSize
= 1ull << 40;
117 static uptr max_user_defined_malloc_size
;
119 void InitializeAllocator() {
120 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null
);
121 allocator()->Init(common_flags()->allocator_release_to_os_interval_ms
);
122 max_user_defined_malloc_size
= common_flags()->max_allocation_size_mb
123 ? common_flags()->max_allocation_size_mb
125 : kMaxAllowedMallocSize
;
128 void InitializeAllocatorLate() {
129 new(global_proc()) GlobalProc();
132 void AllocatorProcStart(Processor
*proc
) {
133 allocator()->InitCache(&proc
->alloc_cache
);
134 internal_allocator()->InitCache(&proc
->internal_alloc_cache
);
137 void AllocatorProcFinish(Processor
*proc
) {
138 allocator()->DestroyCache(&proc
->alloc_cache
);
139 internal_allocator()->DestroyCache(&proc
->internal_alloc_cache
);
142 void AllocatorPrintStats() {
143 allocator()->PrintStats();
146 static void SignalUnsafeCall(ThreadState
*thr
, uptr pc
) {
147 if (atomic_load_relaxed(&thr
->in_signal_handler
) == 0 ||
148 !ShouldReport(thr
, ReportTypeSignalUnsafe
))
150 VarSizeStackTrace stack
;
151 ObtainCurrentStack(thr
, pc
, &stack
);
152 if (IsFiredSuppression(ctx
, ReportTypeSignalUnsafe
, stack
))
154 ThreadRegistryLock
l(ctx
->thread_registry
);
155 ScopedReport
rep(ReportTypeSignalUnsafe
);
156 rep
.AddStack(stack
, true);
157 OutputReport(thr
, rep
);
161 void *user_alloc_internal(ThreadState
*thr
, uptr pc
, uptr sz
, uptr align
,
163 if (sz
>= kMaxAllowedMallocSize
|| align
>= kMaxAllowedMallocSize
||
164 sz
> max_user_defined_malloc_size
) {
165 if (AllocatorMayReturnNull())
168 Min(kMaxAllowedMallocSize
, max_user_defined_malloc_size
);
169 GET_STACK_TRACE_FATAL(thr
, pc
);
170 ReportAllocationSizeTooBig(sz
, malloc_limit
, &stack
);
172 void *p
= allocator()->Allocate(&thr
->proc()->alloc_cache
, sz
, align
);
174 SetAllocatorOutOfMemory();
175 if (AllocatorMayReturnNull())
177 GET_STACK_TRACE_FATAL(thr
, pc
);
178 ReportOutOfMemory(sz
, &stack
);
180 if (ctx
&& ctx
->initialized
)
181 OnUserAlloc(thr
, pc
, (uptr
)p
, sz
, true);
183 SignalUnsafeCall(thr
, pc
);
187 void user_free(ThreadState
*thr
, uptr pc
, void *p
, bool signal
) {
188 ScopedGlobalProcessor sgp
;
189 if (ctx
&& ctx
->initialized
)
190 OnUserFree(thr
, pc
, (uptr
)p
, true);
191 allocator()->Deallocate(&thr
->proc()->alloc_cache
, p
);
193 SignalUnsafeCall(thr
, pc
);
196 void *user_alloc(ThreadState
*thr
, uptr pc
, uptr sz
) {
197 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
, kDefaultAlignment
));
200 void *user_calloc(ThreadState
*thr
, uptr pc
, uptr size
, uptr n
) {
201 if (UNLIKELY(CheckForCallocOverflow(size
, n
))) {
202 if (AllocatorMayReturnNull())
203 return SetErrnoOnNull(nullptr);
204 GET_STACK_TRACE_FATAL(thr
, pc
);
205 ReportCallocOverflow(n
, size
, &stack
);
207 void *p
= user_alloc_internal(thr
, pc
, n
* size
);
209 internal_memset(p
, 0, n
* size
);
210 return SetErrnoOnNull(p
);
213 void *user_reallocarray(ThreadState
*thr
, uptr pc
, void *p
, uptr size
, uptr n
) {
214 if (UNLIKELY(CheckForCallocOverflow(size
, n
))) {
215 if (AllocatorMayReturnNull())
216 return SetErrnoOnNull(nullptr);
217 GET_STACK_TRACE_FATAL(thr
, pc
);
218 ReportReallocArrayOverflow(size
, n
, &stack
);
220 return user_realloc(thr
, pc
, p
, size
* n
);
223 void OnUserAlloc(ThreadState
*thr
, uptr pc
, uptr p
, uptr sz
, bool write
) {
224 DPrintf("#%d: alloc(%zu) = %p\n", thr
->tid
, sz
, p
);
225 ctx
->metamap
.AllocBlock(thr
, pc
, p
, sz
);
226 if (write
&& thr
->ignore_reads_and_writes
== 0)
227 MemoryRangeImitateWrite(thr
, pc
, (uptr
)p
, sz
);
229 MemoryResetRange(thr
, pc
, (uptr
)p
, sz
);
232 void OnUserFree(ThreadState
*thr
, uptr pc
, uptr p
, bool write
) {
233 CHECK_NE(p
, (void*)0);
234 uptr sz
= ctx
->metamap
.FreeBlock(thr
->proc(), p
);
235 DPrintf("#%d: free(%p, %zu)\n", thr
->tid
, p
, sz
);
236 if (write
&& thr
->ignore_reads_and_writes
== 0)
237 MemoryRangeFreed(thr
, pc
, (uptr
)p
, sz
);
240 void *user_realloc(ThreadState
*thr
, uptr pc
, void *p
, uptr sz
) {
241 // FIXME: Handle "shrinking" more efficiently,
242 // it seems that some software actually does this.
244 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
));
246 user_free(thr
, pc
, p
);
249 void *new_p
= user_alloc_internal(thr
, pc
, sz
);
251 uptr old_sz
= user_alloc_usable_size(p
);
252 internal_memcpy(new_p
, p
, min(old_sz
, sz
));
253 user_free(thr
, pc
, p
);
255 return SetErrnoOnNull(new_p
);
258 void *user_memalign(ThreadState
*thr
, uptr pc
, uptr align
, uptr sz
) {
259 if (UNLIKELY(!IsPowerOfTwo(align
))) {
260 errno
= errno_EINVAL
;
261 if (AllocatorMayReturnNull())
263 GET_STACK_TRACE_FATAL(thr
, pc
);
264 ReportInvalidAllocationAlignment(align
, &stack
);
266 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
, align
));
269 int user_posix_memalign(ThreadState
*thr
, uptr pc
, void **memptr
, uptr align
,
271 if (UNLIKELY(!CheckPosixMemalignAlignment(align
))) {
272 if (AllocatorMayReturnNull())
274 GET_STACK_TRACE_FATAL(thr
, pc
);
275 ReportInvalidPosixMemalignAlignment(align
, &stack
);
277 void *ptr
= user_alloc_internal(thr
, pc
, sz
, align
);
279 // OOM error is already taken care of by user_alloc_internal.
281 CHECK(IsAligned((uptr
)ptr
, align
));
286 void *user_aligned_alloc(ThreadState
*thr
, uptr pc
, uptr align
, uptr sz
) {
287 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align
, sz
))) {
288 errno
= errno_EINVAL
;
289 if (AllocatorMayReturnNull())
291 GET_STACK_TRACE_FATAL(thr
, pc
);
292 ReportInvalidAlignedAllocAlignment(sz
, align
, &stack
);
294 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
, align
));
297 void *user_valloc(ThreadState
*thr
, uptr pc
, uptr sz
) {
298 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
, GetPageSizeCached()));
301 void *user_pvalloc(ThreadState
*thr
, uptr pc
, uptr sz
) {
302 uptr PageSize
= GetPageSizeCached();
303 if (UNLIKELY(CheckForPvallocOverflow(sz
, PageSize
))) {
304 errno
= errno_ENOMEM
;
305 if (AllocatorMayReturnNull())
307 GET_STACK_TRACE_FATAL(thr
, pc
);
308 ReportPvallocOverflow(sz
, &stack
);
310 // pvalloc(0) should allocate one page.
311 sz
= sz
? RoundUpTo(sz
, PageSize
) : PageSize
;
312 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
, PageSize
));
315 uptr
user_alloc_usable_size(const void *p
) {
318 MBlock
*b
= ctx
->metamap
.GetBlock((uptr
)p
);
320 return 0; // Not a valid pointer.
322 return 1; // Zero-sized allocations are actually 1 byte.
326 void invoke_malloc_hook(void *ptr
, uptr size
) {
327 ThreadState
*thr
= cur_thread();
328 if (ctx
== 0 || !ctx
->initialized
|| thr
->ignore_interceptors
)
330 __sanitizer_malloc_hook(ptr
, size
);
331 RunMallocHooks(ptr
, size
);
334 void invoke_free_hook(void *ptr
) {
335 ThreadState
*thr
= cur_thread();
336 if (ctx
== 0 || !ctx
->initialized
|| thr
->ignore_interceptors
)
338 __sanitizer_free_hook(ptr
);
342 void *internal_alloc(MBlockType typ
, uptr sz
) {
343 ThreadState
*thr
= cur_thread();
345 thr
->nomalloc
= 0; // CHECK calls internal_malloc().
348 return InternalAlloc(sz
, &thr
->proc()->internal_alloc_cache
);
351 void internal_free(void *p
) {
352 ThreadState
*thr
= cur_thread();
354 thr
->nomalloc
= 0; // CHECK calls internal_malloc().
357 InternalFree(p
, &thr
->proc()->internal_alloc_cache
);
360 } // namespace __tsan
362 using namespace __tsan
;
365 uptr
__sanitizer_get_current_allocated_bytes() {
366 uptr stats
[AllocatorStatCount
];
367 allocator()->GetStats(stats
);
368 return stats
[AllocatorStatAllocated
];
371 uptr
__sanitizer_get_heap_size() {
372 uptr stats
[AllocatorStatCount
];
373 allocator()->GetStats(stats
);
374 return stats
[AllocatorStatMapped
];
377 uptr
__sanitizer_get_free_bytes() {
381 uptr
__sanitizer_get_unmapped_bytes() {
385 uptr
__sanitizer_get_estimated_allocated_size(uptr size
) {
389 int __sanitizer_get_ownership(const void *p
) {
390 return allocator()->GetBlockBegin(p
) != 0;
393 uptr
__sanitizer_get_allocated_size(const void *p
) {
394 return user_alloc_usable_size(p
);
397 void __tsan_on_thread_idle() {
398 ThreadState
*thr
= cur_thread();
399 thr
->clock
.ResetCached(&thr
->proc()->clock_cache
);
400 thr
->last_sleep_clock
.ResetCached(&thr
->proc()->clock_cache
);
401 allocator()->SwallowCache(&thr
->proc()->alloc_cache
);
402 internal_allocator()->SwallowCache(&thr
->proc()->internal_alloc_cache
);
403 ctx
->metamap
.OnProcIdle(thr
->proc());