1 //===-- tsan_mman.cpp -----------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //===----------------------------------------------------------------------===//
12 #include "sanitizer_common/sanitizer_allocator_checks.h"
13 #include "sanitizer_common/sanitizer_allocator_interface.h"
14 #include "sanitizer_common/sanitizer_allocator_report.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 #include "sanitizer_common/sanitizer_errno.h"
17 #include "sanitizer_common/sanitizer_placement_new.h"
18 #include "tsan_interface.h"
19 #include "tsan_mman.h"
21 #include "tsan_report.h"
22 #include "tsan_flags.h"
26 struct MapUnmapCallback
{
27 void OnMap(uptr p
, uptr size
) const { }
28 void OnUnmap(uptr p
, uptr size
) const {
29 // We are about to unmap a chunk of user memory.
30 // Mark the corresponding shadow memory as not needed.
31 DontNeedShadowFor(p
, size
);
32 // Mark the corresponding meta shadow memory as not needed.
33 // Note the block does not contain any meta info at this point
34 // (this happens after free).
35 const uptr kMetaRatio
= kMetaShadowCell
/ kMetaShadowSize
;
36 const uptr kPageSize
= GetPageSizeCached() * kMetaRatio
;
37 // Block came from LargeMmapAllocator, so must be large.
38 // We rely on this in the calculations below.
39 CHECK_GE(size
, 2 * kPageSize
);
40 uptr diff
= RoundUp(p
, kPageSize
) - p
;
45 diff
= p
+ size
- RoundDown(p
+ size
, kPageSize
);
48 uptr p_meta
= (uptr
)MemToMeta(p
);
49 ReleaseMemoryPagesToOS(p_meta
, p_meta
+ size
/ kMetaRatio
);
53 static char allocator_placeholder
[sizeof(Allocator
)] ALIGNED(64);
54 Allocator
*allocator() {
55 return reinterpret_cast<Allocator
*>(&allocator_placeholder
);
61 // This mutex represents the internal allocator combined for
62 // the purposes of deadlock detection. The internal allocator
63 // uses multiple mutexes, moreover they are locked only occasionally
64 // and they are spin mutexes which don't support deadlock detection.
65 // So we use this fake mutex to serve as a substitute for these mutexes.
66 CheckedMutex internal_alloc_mtx
;
69 : mtx(MutexTypeGlobalProc
),
71 internal_alloc_mtx(MutexTypeInternalAlloc
) {}
74 static char global_proc_placeholder
[sizeof(GlobalProc
)] ALIGNED(64);
75 GlobalProc
*global_proc() {
76 return reinterpret_cast<GlobalProc
*>(&global_proc_placeholder
);
79 static void InternalAllocAccess() {
80 global_proc()->internal_alloc_mtx
.Lock();
81 global_proc()->internal_alloc_mtx
.Unlock();
84 ScopedGlobalProcessor::ScopedGlobalProcessor() {
85 GlobalProc
*gp
= global_proc();
86 ThreadState
*thr
= cur_thread();
89 // If we don't have a proc, use the global one.
90 // There are currently only two known case where this path is triggered:
92 // __nptl_deallocate_tsd
97 // __interceptor_munmap
101 // Ideally, we destroy thread state (and unwire proc) when a thread actually
102 // exits (i.e. when we join/wait it). Then we would not need the global proc
104 ProcWire(gp
->proc
, thr
);
107 ScopedGlobalProcessor::~ScopedGlobalProcessor() {
108 GlobalProc
*gp
= global_proc();
109 ThreadState
*thr
= cur_thread();
110 if (thr
->proc() != gp
->proc
)
112 ProcUnwire(gp
->proc
, thr
);
116 void AllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
117 global_proc()->internal_alloc_mtx
.Lock();
118 InternalAllocatorLock();
121 void AllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
122 InternalAllocatorUnlock();
123 global_proc()->internal_alloc_mtx
.Unlock();
126 void GlobalProcessorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
127 global_proc()->mtx
.Lock();
130 void GlobalProcessorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
131 global_proc()->mtx
.Unlock();
134 static constexpr uptr kMaxAllowedMallocSize
= 1ull << 40;
135 static uptr max_user_defined_malloc_size
;
137 void InitializeAllocator() {
138 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null
);
139 allocator()->Init(common_flags()->allocator_release_to_os_interval_ms
);
140 max_user_defined_malloc_size
= common_flags()->max_allocation_size_mb
141 ? common_flags()->max_allocation_size_mb
143 : kMaxAllowedMallocSize
;
146 void InitializeAllocatorLate() {
147 new(global_proc()) GlobalProc();
150 void AllocatorProcStart(Processor
*proc
) {
151 allocator()->InitCache(&proc
->alloc_cache
);
152 internal_allocator()->InitCache(&proc
->internal_alloc_cache
);
155 void AllocatorProcFinish(Processor
*proc
) {
156 allocator()->DestroyCache(&proc
->alloc_cache
);
157 internal_allocator()->DestroyCache(&proc
->internal_alloc_cache
);
160 void AllocatorPrintStats() {
161 allocator()->PrintStats();
164 static void SignalUnsafeCall(ThreadState
*thr
, uptr pc
) {
165 if (atomic_load_relaxed(&thr
->in_signal_handler
) == 0 ||
166 !ShouldReport(thr
, ReportTypeSignalUnsafe
))
168 VarSizeStackTrace stack
;
169 ObtainCurrentStack(thr
, pc
, &stack
);
170 if (IsFiredSuppression(ctx
, ReportTypeSignalUnsafe
, stack
))
172 ThreadRegistryLock
l(&ctx
->thread_registry
);
173 ScopedReport
rep(ReportTypeSignalUnsafe
);
174 rep
.AddStack(stack
, true);
175 OutputReport(thr
, rep
);
179 void *user_alloc_internal(ThreadState
*thr
, uptr pc
, uptr sz
, uptr align
,
181 if (sz
>= kMaxAllowedMallocSize
|| align
>= kMaxAllowedMallocSize
||
182 sz
> max_user_defined_malloc_size
) {
183 if (AllocatorMayReturnNull())
186 Min(kMaxAllowedMallocSize
, max_user_defined_malloc_size
);
187 GET_STACK_TRACE_FATAL(thr
, pc
);
188 ReportAllocationSizeTooBig(sz
, malloc_limit
, &stack
);
190 if (UNLIKELY(IsRssLimitExceeded())) {
191 if (AllocatorMayReturnNull())
193 GET_STACK_TRACE_FATAL(thr
, pc
);
194 ReportRssLimitExceeded(&stack
);
196 void *p
= allocator()->Allocate(&thr
->proc()->alloc_cache
, sz
, align
);
198 SetAllocatorOutOfMemory();
199 if (AllocatorMayReturnNull())
201 GET_STACK_TRACE_FATAL(thr
, pc
);
202 ReportOutOfMemory(sz
, &stack
);
204 if (ctx
&& ctx
->initialized
)
205 OnUserAlloc(thr
, pc
, (uptr
)p
, sz
, true);
207 SignalUnsafeCall(thr
, pc
);
211 void user_free(ThreadState
*thr
, uptr pc
, void *p
, bool signal
) {
212 ScopedGlobalProcessor sgp
;
213 if (ctx
&& ctx
->initialized
)
214 OnUserFree(thr
, pc
, (uptr
)p
, true);
215 allocator()->Deallocate(&thr
->proc()->alloc_cache
, p
);
217 SignalUnsafeCall(thr
, pc
);
220 void *user_alloc(ThreadState
*thr
, uptr pc
, uptr sz
) {
221 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
, kDefaultAlignment
));
224 void *user_calloc(ThreadState
*thr
, uptr pc
, uptr size
, uptr n
) {
225 if (UNLIKELY(CheckForCallocOverflow(size
, n
))) {
226 if (AllocatorMayReturnNull())
227 return SetErrnoOnNull(nullptr);
228 GET_STACK_TRACE_FATAL(thr
, pc
);
229 ReportCallocOverflow(n
, size
, &stack
);
231 void *p
= user_alloc_internal(thr
, pc
, n
* size
);
233 internal_memset(p
, 0, n
* size
);
234 return SetErrnoOnNull(p
);
237 void *user_reallocarray(ThreadState
*thr
, uptr pc
, void *p
, uptr size
, uptr n
) {
238 if (UNLIKELY(CheckForCallocOverflow(size
, n
))) {
239 if (AllocatorMayReturnNull())
240 return SetErrnoOnNull(nullptr);
241 GET_STACK_TRACE_FATAL(thr
, pc
);
242 ReportReallocArrayOverflow(size
, n
, &stack
);
244 return user_realloc(thr
, pc
, p
, size
* n
);
247 void OnUserAlloc(ThreadState
*thr
, uptr pc
, uptr p
, uptr sz
, bool write
) {
248 DPrintf("#%d: alloc(%zu) = 0x%zx\n", thr
->tid
, sz
, p
);
249 // Note: this can run before thread initialization/after finalization.
250 // As a result this is not necessarily synchronized with DoReset,
251 // which iterates over and resets all sync objects,
252 // but it is fine to create new MBlocks in this context.
253 ctx
->metamap
.AllocBlock(thr
, pc
, p
, sz
);
254 // If this runs before thread initialization/after finalization
255 // and we don't have trace initialized, we can't imitate writes.
256 // In such case just reset the shadow range, it is fine since
257 // it affects only a small fraction of special objects.
258 if (write
&& thr
->ignore_reads_and_writes
== 0 &&
259 atomic_load_relaxed(&thr
->trace_pos
))
260 MemoryRangeImitateWrite(thr
, pc
, (uptr
)p
, sz
);
262 MemoryResetRange(thr
, pc
, (uptr
)p
, sz
);
265 void OnUserFree(ThreadState
*thr
, uptr pc
, uptr p
, bool write
) {
266 CHECK_NE(p
, (void*)0);
268 // Very early/late in thread lifetime, or during fork.
269 UNUSED uptr sz
= ctx
->metamap
.FreeBlock(thr
->proc(), p
, false);
270 DPrintf("#%d: free(0x%zx, %zu) (no slot)\n", thr
->tid
, p
, sz
);
273 SlotLocker
locker(thr
);
274 uptr sz
= ctx
->metamap
.FreeBlock(thr
->proc(), p
, true);
275 DPrintf("#%d: free(0x%zx, %zu)\n", thr
->tid
, p
, sz
);
276 if (write
&& thr
->ignore_reads_and_writes
== 0)
277 MemoryRangeFreed(thr
, pc
, (uptr
)p
, sz
);
280 void *user_realloc(ThreadState
*thr
, uptr pc
, void *p
, uptr sz
) {
281 // FIXME: Handle "shrinking" more efficiently,
282 // it seems that some software actually does this.
284 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
));
286 user_free(thr
, pc
, p
);
289 void *new_p
= user_alloc_internal(thr
, pc
, sz
);
291 uptr old_sz
= user_alloc_usable_size(p
);
292 internal_memcpy(new_p
, p
, min(old_sz
, sz
));
293 user_free(thr
, pc
, p
);
295 return SetErrnoOnNull(new_p
);
298 void *user_memalign(ThreadState
*thr
, uptr pc
, uptr align
, uptr sz
) {
299 if (UNLIKELY(!IsPowerOfTwo(align
))) {
300 errno
= errno_EINVAL
;
301 if (AllocatorMayReturnNull())
303 GET_STACK_TRACE_FATAL(thr
, pc
);
304 ReportInvalidAllocationAlignment(align
, &stack
);
306 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
, align
));
309 int user_posix_memalign(ThreadState
*thr
, uptr pc
, void **memptr
, uptr align
,
311 if (UNLIKELY(!CheckPosixMemalignAlignment(align
))) {
312 if (AllocatorMayReturnNull())
314 GET_STACK_TRACE_FATAL(thr
, pc
);
315 ReportInvalidPosixMemalignAlignment(align
, &stack
);
317 void *ptr
= user_alloc_internal(thr
, pc
, sz
, align
);
319 // OOM error is already taken care of by user_alloc_internal.
321 CHECK(IsAligned((uptr
)ptr
, align
));
326 void *user_aligned_alloc(ThreadState
*thr
, uptr pc
, uptr align
, uptr sz
) {
327 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align
, sz
))) {
328 errno
= errno_EINVAL
;
329 if (AllocatorMayReturnNull())
331 GET_STACK_TRACE_FATAL(thr
, pc
);
332 ReportInvalidAlignedAllocAlignment(sz
, align
, &stack
);
334 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
, align
));
337 void *user_valloc(ThreadState
*thr
, uptr pc
, uptr sz
) {
338 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
, GetPageSizeCached()));
341 void *user_pvalloc(ThreadState
*thr
, uptr pc
, uptr sz
) {
342 uptr PageSize
= GetPageSizeCached();
343 if (UNLIKELY(CheckForPvallocOverflow(sz
, PageSize
))) {
344 errno
= errno_ENOMEM
;
345 if (AllocatorMayReturnNull())
347 GET_STACK_TRACE_FATAL(thr
, pc
);
348 ReportPvallocOverflow(sz
, &stack
);
350 // pvalloc(0) should allocate one page.
351 sz
= sz
? RoundUpTo(sz
, PageSize
) : PageSize
;
352 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
, PageSize
));
355 static const void *user_alloc_begin(const void *p
) {
356 if (p
== nullptr || !IsAppMem((uptr
)p
))
358 void *beg
= allocator()->GetBlockBegin(p
);
362 MBlock
*b
= ctx
->metamap
.GetBlock((uptr
)beg
);
364 return nullptr; // Not a valid pointer.
366 return (const void *)beg
;
369 uptr
user_alloc_usable_size(const void *p
) {
370 if (p
== 0 || !IsAppMem((uptr
)p
))
372 MBlock
*b
= ctx
->metamap
.GetBlock((uptr
)p
);
374 return 0; // Not a valid pointer.
376 return 1; // Zero-sized allocations are actually 1 byte.
380 void invoke_malloc_hook(void *ptr
, uptr size
) {
381 ThreadState
*thr
= cur_thread();
382 if (ctx
== 0 || !ctx
->initialized
|| thr
->ignore_interceptors
)
384 RunMallocHooks(ptr
, size
);
387 void invoke_free_hook(void *ptr
) {
388 ThreadState
*thr
= cur_thread();
389 if (ctx
== 0 || !ctx
->initialized
|| thr
->ignore_interceptors
)
394 void *Alloc(uptr sz
) {
395 ThreadState
*thr
= cur_thread();
397 thr
->nomalloc
= 0; // CHECK calls internal_malloc().
400 InternalAllocAccess();
401 return InternalAlloc(sz
, &thr
->proc()->internal_alloc_cache
);
404 void FreeImpl(void *p
) {
405 ThreadState
*thr
= cur_thread();
407 thr
->nomalloc
= 0; // CHECK calls internal_malloc().
410 InternalAllocAccess();
411 InternalFree(p
, &thr
->proc()->internal_alloc_cache
);
414 } // namespace __tsan
416 using namespace __tsan
;
419 uptr
__sanitizer_get_current_allocated_bytes() {
420 uptr stats
[AllocatorStatCount
];
421 allocator()->GetStats(stats
);
422 return stats
[AllocatorStatAllocated
];
425 uptr
__sanitizer_get_heap_size() {
426 uptr stats
[AllocatorStatCount
];
427 allocator()->GetStats(stats
);
428 return stats
[AllocatorStatMapped
];
431 uptr
__sanitizer_get_free_bytes() {
435 uptr
__sanitizer_get_unmapped_bytes() {
439 uptr
__sanitizer_get_estimated_allocated_size(uptr size
) {
443 int __sanitizer_get_ownership(const void *p
) {
444 return allocator()->GetBlockBegin(p
) != 0;
447 const void *__sanitizer_get_allocated_begin(const void *p
) {
448 return user_alloc_begin(p
);
451 uptr
__sanitizer_get_allocated_size(const void *p
) {
452 return user_alloc_usable_size(p
);
455 void __tsan_on_thread_idle() {
456 ThreadState
*thr
= cur_thread();
457 allocator()->SwallowCache(&thr
->proc()->alloc_cache
);
458 internal_allocator()->SwallowCache(&thr
->proc()->internal_alloc_cache
);
459 ctx
->metamap
.OnProcIdle(thr
->proc());