1 //===-- tsan_mman.cpp -----------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //===----------------------------------------------------------------------===//
12 #include "sanitizer_common/sanitizer_allocator_checks.h"
13 #include "sanitizer_common/sanitizer_allocator_interface.h"
14 #include "sanitizer_common/sanitizer_allocator_report.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 #include "sanitizer_common/sanitizer_errno.h"
17 #include "sanitizer_common/sanitizer_placement_new.h"
18 #include "tsan_interface.h"
19 #include "tsan_mman.h"
21 #include "tsan_report.h"
22 #include "tsan_flags.h"
26 struct MapUnmapCallback
{
27 void OnMap(uptr p
, uptr size
) const { }
28 void OnMapSecondary(uptr p
, uptr size
, uptr user_begin
,
29 uptr user_size
) const {};
30 void OnUnmap(uptr p
, uptr size
) const {
31 // We are about to unmap a chunk of user memory.
32 // Mark the corresponding shadow memory as not needed.
33 DontNeedShadowFor(p
, size
);
34 // Mark the corresponding meta shadow memory as not needed.
35 // Note the block does not contain any meta info at this point
36 // (this happens after free).
37 const uptr kMetaRatio
= kMetaShadowCell
/ kMetaShadowSize
;
38 const uptr kPageSize
= GetPageSizeCached() * kMetaRatio
;
39 // Block came from LargeMmapAllocator, so must be large.
40 // We rely on this in the calculations below.
41 CHECK_GE(size
, 2 * kPageSize
);
42 uptr diff
= RoundUp(p
, kPageSize
) - p
;
47 diff
= p
+ size
- RoundDown(p
+ size
, kPageSize
);
50 uptr p_meta
= (uptr
)MemToMeta(p
);
51 ReleaseMemoryPagesToOS(p_meta
, p_meta
+ size
/ kMetaRatio
);
55 static char allocator_placeholder
[sizeof(Allocator
)] ALIGNED(64);
56 Allocator
*allocator() {
57 return reinterpret_cast<Allocator
*>(&allocator_placeholder
);
63 // This mutex represents the internal allocator combined for
64 // the purposes of deadlock detection. The internal allocator
65 // uses multiple mutexes, moreover they are locked only occasionally
66 // and they are spin mutexes which don't support deadlock detection.
67 // So we use this fake mutex to serve as a substitute for these mutexes.
68 CheckedMutex internal_alloc_mtx
;
71 : mtx(MutexTypeGlobalProc
),
73 internal_alloc_mtx(MutexTypeInternalAlloc
) {}
76 static char global_proc_placeholder
[sizeof(GlobalProc
)] ALIGNED(64);
77 GlobalProc
*global_proc() {
78 return reinterpret_cast<GlobalProc
*>(&global_proc_placeholder
);
81 static void InternalAllocAccess() {
82 global_proc()->internal_alloc_mtx
.Lock();
83 global_proc()->internal_alloc_mtx
.Unlock();
86 ScopedGlobalProcessor::ScopedGlobalProcessor() {
87 GlobalProc
*gp
= global_proc();
88 ThreadState
*thr
= cur_thread();
91 // If we don't have a proc, use the global one.
92 // There are currently only two known case where this path is triggered:
94 // __nptl_deallocate_tsd
99 // __interceptor_munmap
100 // __deallocate_stack
103 // Ideally, we destroy thread state (and unwire proc) when a thread actually
104 // exits (i.e. when we join/wait it). Then we would not need the global proc
106 ProcWire(gp
->proc
, thr
);
109 ScopedGlobalProcessor::~ScopedGlobalProcessor() {
110 GlobalProc
*gp
= global_proc();
111 ThreadState
*thr
= cur_thread();
112 if (thr
->proc() != gp
->proc
)
114 ProcUnwire(gp
->proc
, thr
);
118 void AllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
119 global_proc()->internal_alloc_mtx
.Lock();
120 InternalAllocatorLock();
123 void AllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
124 InternalAllocatorUnlock();
125 global_proc()->internal_alloc_mtx
.Unlock();
128 void GlobalProcessorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
129 global_proc()->mtx
.Lock();
132 void GlobalProcessorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
133 global_proc()->mtx
.Unlock();
136 static constexpr uptr kMaxAllowedMallocSize
= 1ull << 40;
137 static uptr max_user_defined_malloc_size
;
139 void InitializeAllocator() {
140 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null
);
141 allocator()->Init(common_flags()->allocator_release_to_os_interval_ms
);
142 max_user_defined_malloc_size
= common_flags()->max_allocation_size_mb
143 ? common_flags()->max_allocation_size_mb
145 : kMaxAllowedMallocSize
;
148 void InitializeAllocatorLate() {
149 new(global_proc()) GlobalProc();
152 void AllocatorProcStart(Processor
*proc
) {
153 allocator()->InitCache(&proc
->alloc_cache
);
154 internal_allocator()->InitCache(&proc
->internal_alloc_cache
);
157 void AllocatorProcFinish(Processor
*proc
) {
158 allocator()->DestroyCache(&proc
->alloc_cache
);
159 internal_allocator()->DestroyCache(&proc
->internal_alloc_cache
);
162 void AllocatorPrintStats() {
163 allocator()->PrintStats();
166 static void SignalUnsafeCall(ThreadState
*thr
, uptr pc
) {
167 if (atomic_load_relaxed(&thr
->in_signal_handler
) == 0 ||
168 !ShouldReport(thr
, ReportTypeSignalUnsafe
))
170 VarSizeStackTrace stack
;
171 ObtainCurrentStack(thr
, pc
, &stack
);
172 if (IsFiredSuppression(ctx
, ReportTypeSignalUnsafe
, stack
))
174 ThreadRegistryLock
l(&ctx
->thread_registry
);
175 ScopedReport
rep(ReportTypeSignalUnsafe
);
176 rep
.AddStack(stack
, true);
177 OutputReport(thr
, rep
);
181 void *user_alloc_internal(ThreadState
*thr
, uptr pc
, uptr sz
, uptr align
,
183 if (sz
>= kMaxAllowedMallocSize
|| align
>= kMaxAllowedMallocSize
||
184 sz
> max_user_defined_malloc_size
) {
185 if (AllocatorMayReturnNull())
188 Min(kMaxAllowedMallocSize
, max_user_defined_malloc_size
);
189 GET_STACK_TRACE_FATAL(thr
, pc
);
190 ReportAllocationSizeTooBig(sz
, malloc_limit
, &stack
);
192 if (UNLIKELY(IsRssLimitExceeded())) {
193 if (AllocatorMayReturnNull())
195 GET_STACK_TRACE_FATAL(thr
, pc
);
196 ReportRssLimitExceeded(&stack
);
198 void *p
= allocator()->Allocate(&thr
->proc()->alloc_cache
, sz
, align
);
200 SetAllocatorOutOfMemory();
201 if (AllocatorMayReturnNull())
203 GET_STACK_TRACE_FATAL(thr
, pc
);
204 ReportOutOfMemory(sz
, &stack
);
206 if (ctx
&& ctx
->initialized
)
207 OnUserAlloc(thr
, pc
, (uptr
)p
, sz
, true);
209 SignalUnsafeCall(thr
, pc
);
213 void user_free(ThreadState
*thr
, uptr pc
, void *p
, bool signal
) {
214 ScopedGlobalProcessor sgp
;
215 if (ctx
&& ctx
->initialized
)
216 OnUserFree(thr
, pc
, (uptr
)p
, true);
217 allocator()->Deallocate(&thr
->proc()->alloc_cache
, p
);
219 SignalUnsafeCall(thr
, pc
);
222 void *user_alloc(ThreadState
*thr
, uptr pc
, uptr sz
) {
223 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
, kDefaultAlignment
));
226 void *user_calloc(ThreadState
*thr
, uptr pc
, uptr size
, uptr n
) {
227 if (UNLIKELY(CheckForCallocOverflow(size
, n
))) {
228 if (AllocatorMayReturnNull())
229 return SetErrnoOnNull(nullptr);
230 GET_STACK_TRACE_FATAL(thr
, pc
);
231 ReportCallocOverflow(n
, size
, &stack
);
233 void *p
= user_alloc_internal(thr
, pc
, n
* size
);
235 internal_memset(p
, 0, n
* size
);
236 return SetErrnoOnNull(p
);
239 void *user_reallocarray(ThreadState
*thr
, uptr pc
, void *p
, uptr size
, uptr n
) {
240 if (UNLIKELY(CheckForCallocOverflow(size
, n
))) {
241 if (AllocatorMayReturnNull())
242 return SetErrnoOnNull(nullptr);
243 GET_STACK_TRACE_FATAL(thr
, pc
);
244 ReportReallocArrayOverflow(size
, n
, &stack
);
246 return user_realloc(thr
, pc
, p
, size
* n
);
249 void OnUserAlloc(ThreadState
*thr
, uptr pc
, uptr p
, uptr sz
, bool write
) {
250 DPrintf("#%d: alloc(%zu) = 0x%zx\n", thr
->tid
, sz
, p
);
251 // Note: this can run before thread initialization/after finalization.
252 // As a result this is not necessarily synchronized with DoReset,
253 // which iterates over and resets all sync objects,
254 // but it is fine to create new MBlocks in this context.
255 ctx
->metamap
.AllocBlock(thr
, pc
, p
, sz
);
256 // If this runs before thread initialization/after finalization
257 // and we don't have trace initialized, we can't imitate writes.
258 // In such case just reset the shadow range, it is fine since
259 // it affects only a small fraction of special objects.
260 if (write
&& thr
->ignore_reads_and_writes
== 0 &&
261 atomic_load_relaxed(&thr
->trace_pos
))
262 MemoryRangeImitateWrite(thr
, pc
, (uptr
)p
, sz
);
264 MemoryResetRange(thr
, pc
, (uptr
)p
, sz
);
267 void OnUserFree(ThreadState
*thr
, uptr pc
, uptr p
, bool write
) {
268 CHECK_NE(p
, (void*)0);
270 // Very early/late in thread lifetime, or during fork.
271 UNUSED uptr sz
= ctx
->metamap
.FreeBlock(thr
->proc(), p
, false);
272 DPrintf("#%d: free(0x%zx, %zu) (no slot)\n", thr
->tid
, p
, sz
);
275 SlotLocker
locker(thr
);
276 uptr sz
= ctx
->metamap
.FreeBlock(thr
->proc(), p
, true);
277 DPrintf("#%d: free(0x%zx, %zu)\n", thr
->tid
, p
, sz
);
278 if (write
&& thr
->ignore_reads_and_writes
== 0)
279 MemoryRangeFreed(thr
, pc
, (uptr
)p
, sz
);
282 void *user_realloc(ThreadState
*thr
, uptr pc
, void *p
, uptr sz
) {
283 // FIXME: Handle "shrinking" more efficiently,
284 // it seems that some software actually does this.
286 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
));
288 user_free(thr
, pc
, p
);
291 void *new_p
= user_alloc_internal(thr
, pc
, sz
);
293 uptr old_sz
= user_alloc_usable_size(p
);
294 internal_memcpy(new_p
, p
, min(old_sz
, sz
));
295 user_free(thr
, pc
, p
);
297 return SetErrnoOnNull(new_p
);
300 void *user_memalign(ThreadState
*thr
, uptr pc
, uptr align
, uptr sz
) {
301 if (UNLIKELY(!IsPowerOfTwo(align
))) {
302 errno
= errno_EINVAL
;
303 if (AllocatorMayReturnNull())
305 GET_STACK_TRACE_FATAL(thr
, pc
);
306 ReportInvalidAllocationAlignment(align
, &stack
);
308 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
, align
));
311 int user_posix_memalign(ThreadState
*thr
, uptr pc
, void **memptr
, uptr align
,
313 if (UNLIKELY(!CheckPosixMemalignAlignment(align
))) {
314 if (AllocatorMayReturnNull())
316 GET_STACK_TRACE_FATAL(thr
, pc
);
317 ReportInvalidPosixMemalignAlignment(align
, &stack
);
319 void *ptr
= user_alloc_internal(thr
, pc
, sz
, align
);
321 // OOM error is already taken care of by user_alloc_internal.
323 CHECK(IsAligned((uptr
)ptr
, align
));
328 void *user_aligned_alloc(ThreadState
*thr
, uptr pc
, uptr align
, uptr sz
) {
329 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align
, sz
))) {
330 errno
= errno_EINVAL
;
331 if (AllocatorMayReturnNull())
333 GET_STACK_TRACE_FATAL(thr
, pc
);
334 ReportInvalidAlignedAllocAlignment(sz
, align
, &stack
);
336 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
, align
));
339 void *user_valloc(ThreadState
*thr
, uptr pc
, uptr sz
) {
340 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
, GetPageSizeCached()));
343 void *user_pvalloc(ThreadState
*thr
, uptr pc
, uptr sz
) {
344 uptr PageSize
= GetPageSizeCached();
345 if (UNLIKELY(CheckForPvallocOverflow(sz
, PageSize
))) {
346 errno
= errno_ENOMEM
;
347 if (AllocatorMayReturnNull())
349 GET_STACK_TRACE_FATAL(thr
, pc
);
350 ReportPvallocOverflow(sz
, &stack
);
352 // pvalloc(0) should allocate one page.
353 sz
= sz
? RoundUpTo(sz
, PageSize
) : PageSize
;
354 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
, PageSize
));
357 static const void *user_alloc_begin(const void *p
) {
358 if (p
== nullptr || !IsAppMem((uptr
)p
))
360 void *beg
= allocator()->GetBlockBegin(p
);
364 MBlock
*b
= ctx
->metamap
.GetBlock((uptr
)beg
);
366 return nullptr; // Not a valid pointer.
368 return (const void *)beg
;
371 uptr
user_alloc_usable_size(const void *p
) {
372 if (p
== 0 || !IsAppMem((uptr
)p
))
374 MBlock
*b
= ctx
->metamap
.GetBlock((uptr
)p
);
376 return 0; // Not a valid pointer.
378 return 1; // Zero-sized allocations are actually 1 byte.
382 uptr
user_alloc_usable_size_fast(const void *p
) {
383 MBlock
*b
= ctx
->metamap
.GetBlock((uptr
)p
);
384 // Static objects may have malloc'd before tsan completes
385 // initialization, and may believe returned ptrs to be valid.
387 return 0; // Not a valid pointer.
389 return 1; // Zero-sized allocations are actually 1 byte.
393 void invoke_malloc_hook(void *ptr
, uptr size
) {
394 ThreadState
*thr
= cur_thread();
395 if (ctx
== 0 || !ctx
->initialized
|| thr
->ignore_interceptors
)
397 RunMallocHooks(ptr
, size
);
400 void invoke_free_hook(void *ptr
) {
401 ThreadState
*thr
= cur_thread();
402 if (ctx
== 0 || !ctx
->initialized
|| thr
->ignore_interceptors
)
407 void *Alloc(uptr sz
) {
408 ThreadState
*thr
= cur_thread();
410 thr
->nomalloc
= 0; // CHECK calls internal_malloc().
413 InternalAllocAccess();
414 return InternalAlloc(sz
, &thr
->proc()->internal_alloc_cache
);
417 void FreeImpl(void *p
) {
418 ThreadState
*thr
= cur_thread();
420 thr
->nomalloc
= 0; // CHECK calls internal_malloc().
423 InternalAllocAccess();
424 InternalFree(p
, &thr
->proc()->internal_alloc_cache
);
427 } // namespace __tsan
429 using namespace __tsan
;
432 uptr
__sanitizer_get_current_allocated_bytes() {
433 uptr stats
[AllocatorStatCount
];
434 allocator()->GetStats(stats
);
435 return stats
[AllocatorStatAllocated
];
438 uptr
__sanitizer_get_heap_size() {
439 uptr stats
[AllocatorStatCount
];
440 allocator()->GetStats(stats
);
441 return stats
[AllocatorStatMapped
];
444 uptr
__sanitizer_get_free_bytes() {
448 uptr
__sanitizer_get_unmapped_bytes() {
452 uptr
__sanitizer_get_estimated_allocated_size(uptr size
) {
456 int __sanitizer_get_ownership(const void *p
) {
457 return allocator()->GetBlockBegin(p
) != 0;
460 const void *__sanitizer_get_allocated_begin(const void *p
) {
461 return user_alloc_begin(p
);
464 uptr
__sanitizer_get_allocated_size(const void *p
) {
465 return user_alloc_usable_size(p
);
468 uptr
__sanitizer_get_allocated_size_fast(const void *p
) {
469 DCHECK_EQ(p
, __sanitizer_get_allocated_begin(p
));
470 uptr ret
= user_alloc_usable_size_fast(p
);
471 DCHECK_EQ(ret
, __sanitizer_get_allocated_size(p
));
475 void __sanitizer_purge_allocator() {
476 allocator()->ForceReleaseToOS();
479 void __tsan_on_thread_idle() {
480 ThreadState
*thr
= cur_thread();
481 allocator()->SwallowCache(&thr
->proc()->alloc_cache
);
482 internal_allocator()->SwallowCache(&thr
->proc()->internal_alloc_cache
);
483 ctx
->metamap
.OnProcIdle(thr
->proc());