1 //===-- tsan_mman.cpp -----------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //===----------------------------------------------------------------------===//
12 #include "sanitizer_common/sanitizer_allocator_checks.h"
13 #include "sanitizer_common/sanitizer_allocator_interface.h"
14 #include "sanitizer_common/sanitizer_allocator_report.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 #include "sanitizer_common/sanitizer_errno.h"
17 #include "sanitizer_common/sanitizer_placement_new.h"
18 #include "tsan_mman.h"
20 #include "tsan_report.h"
21 #include "tsan_flags.h"
23 // May be overriden by front-end.
24 SANITIZER_WEAK_DEFAULT_IMPL
25 void __sanitizer_malloc_hook(void *ptr
, uptr size
) {
30 SANITIZER_WEAK_DEFAULT_IMPL
31 void __sanitizer_free_hook(void *ptr
) {
37 struct MapUnmapCallback
{
38 void OnMap(uptr p
, uptr size
) const { }
39 void OnUnmap(uptr p
, uptr size
) const {
40 // We are about to unmap a chunk of user memory.
41 // Mark the corresponding shadow memory as not needed.
42 DontNeedShadowFor(p
, size
);
43 // Mark the corresponding meta shadow memory as not needed.
44 // Note the block does not contain any meta info at this point
45 // (this happens after free).
46 const uptr kMetaRatio
= kMetaShadowCell
/ kMetaShadowSize
;
47 const uptr kPageSize
= GetPageSizeCached() * kMetaRatio
;
48 // Block came from LargeMmapAllocator, so must be large.
49 // We rely on this in the calculations below.
50 CHECK_GE(size
, 2 * kPageSize
);
51 uptr diff
= RoundUp(p
, kPageSize
) - p
;
56 diff
= p
+ size
- RoundDown(p
+ size
, kPageSize
);
59 uptr p_meta
= (uptr
)MemToMeta(p
);
60 ReleaseMemoryPagesToOS(p_meta
, p_meta
+ size
/ kMetaRatio
);
64 static char allocator_placeholder
[sizeof(Allocator
)] ALIGNED(64);
65 Allocator
*allocator() {
66 return reinterpret_cast<Allocator
*>(&allocator_placeholder
);
73 GlobalProc() : mtx(MutexTypeGlobalProc
), proc(ProcCreate()) {}
76 static char global_proc_placeholder
[sizeof(GlobalProc
)] ALIGNED(64);
77 GlobalProc
*global_proc() {
78 return reinterpret_cast<GlobalProc
*>(&global_proc_placeholder
);
81 ScopedGlobalProcessor::ScopedGlobalProcessor() {
82 GlobalProc
*gp
= global_proc();
83 ThreadState
*thr
= cur_thread();
86 // If we don't have a proc, use the global one.
87 // There are currently only two known case where this path is triggered:
89 // __nptl_deallocate_tsd
94 // __interceptor_munmap
98 // Ideally, we destroy thread state (and unwire proc) when a thread actually
99 // exits (i.e. when we join/wait it). Then we would not need the global proc
101 ProcWire(gp
->proc
, thr
);
104 ScopedGlobalProcessor::~ScopedGlobalProcessor() {
105 GlobalProc
*gp
= global_proc();
106 ThreadState
*thr
= cur_thread();
107 if (thr
->proc() != gp
->proc
)
109 ProcUnwire(gp
->proc
, thr
);
113 static constexpr uptr kMaxAllowedMallocSize
= 1ull << 40;
114 static uptr max_user_defined_malloc_size
;
116 void InitializeAllocator() {
117 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null
);
118 allocator()->Init(common_flags()->allocator_release_to_os_interval_ms
);
119 max_user_defined_malloc_size
= common_flags()->max_allocation_size_mb
120 ? common_flags()->max_allocation_size_mb
122 : kMaxAllowedMallocSize
;
125 void InitializeAllocatorLate() {
126 new(global_proc()) GlobalProc();
129 void AllocatorProcStart(Processor
*proc
) {
130 allocator()->InitCache(&proc
->alloc_cache
);
131 internal_allocator()->InitCache(&proc
->internal_alloc_cache
);
134 void AllocatorProcFinish(Processor
*proc
) {
135 allocator()->DestroyCache(&proc
->alloc_cache
);
136 internal_allocator()->DestroyCache(&proc
->internal_alloc_cache
);
139 void AllocatorPrintStats() {
140 allocator()->PrintStats();
143 static void SignalUnsafeCall(ThreadState
*thr
, uptr pc
) {
144 if (atomic_load_relaxed(&thr
->in_signal_handler
) == 0 ||
145 !ShouldReport(thr
, ReportTypeSignalUnsafe
))
147 VarSizeStackTrace stack
;
148 ObtainCurrentStack(thr
, pc
, &stack
);
149 if (IsFiredSuppression(ctx
, ReportTypeSignalUnsafe
, stack
))
151 ThreadRegistryLock
l(&ctx
->thread_registry
);
152 ScopedReport
rep(ReportTypeSignalUnsafe
);
153 rep
.AddStack(stack
, true);
154 OutputReport(thr
, rep
);
158 void *user_alloc_internal(ThreadState
*thr
, uptr pc
, uptr sz
, uptr align
,
160 if (sz
>= kMaxAllowedMallocSize
|| align
>= kMaxAllowedMallocSize
||
161 sz
> max_user_defined_malloc_size
) {
162 if (AllocatorMayReturnNull())
165 Min(kMaxAllowedMallocSize
, max_user_defined_malloc_size
);
166 GET_STACK_TRACE_FATAL(thr
, pc
);
167 ReportAllocationSizeTooBig(sz
, malloc_limit
, &stack
);
169 void *p
= allocator()->Allocate(&thr
->proc()->alloc_cache
, sz
, align
);
171 SetAllocatorOutOfMemory();
172 if (AllocatorMayReturnNull())
174 GET_STACK_TRACE_FATAL(thr
, pc
);
175 ReportOutOfMemory(sz
, &stack
);
177 if (ctx
&& ctx
->initialized
)
178 OnUserAlloc(thr
, pc
, (uptr
)p
, sz
, true);
180 SignalUnsafeCall(thr
, pc
);
184 void user_free(ThreadState
*thr
, uptr pc
, void *p
, bool signal
) {
185 ScopedGlobalProcessor sgp
;
186 if (ctx
&& ctx
->initialized
)
187 OnUserFree(thr
, pc
, (uptr
)p
, true);
188 allocator()->Deallocate(&thr
->proc()->alloc_cache
, p
);
190 SignalUnsafeCall(thr
, pc
);
193 void *user_alloc(ThreadState
*thr
, uptr pc
, uptr sz
) {
194 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
, kDefaultAlignment
));
197 void *user_calloc(ThreadState
*thr
, uptr pc
, uptr size
, uptr n
) {
198 if (UNLIKELY(CheckForCallocOverflow(size
, n
))) {
199 if (AllocatorMayReturnNull())
200 return SetErrnoOnNull(nullptr);
201 GET_STACK_TRACE_FATAL(thr
, pc
);
202 ReportCallocOverflow(n
, size
, &stack
);
204 void *p
= user_alloc_internal(thr
, pc
, n
* size
);
206 internal_memset(p
, 0, n
* size
);
207 return SetErrnoOnNull(p
);
210 void *user_reallocarray(ThreadState
*thr
, uptr pc
, void *p
, uptr size
, uptr n
) {
211 if (UNLIKELY(CheckForCallocOverflow(size
, n
))) {
212 if (AllocatorMayReturnNull())
213 return SetErrnoOnNull(nullptr);
214 GET_STACK_TRACE_FATAL(thr
, pc
);
215 ReportReallocArrayOverflow(size
, n
, &stack
);
217 return user_realloc(thr
, pc
, p
, size
* n
);
220 void OnUserAlloc(ThreadState
*thr
, uptr pc
, uptr p
, uptr sz
, bool write
) {
221 DPrintf("#%d: alloc(%zu) = 0x%zx\n", thr
->tid
, sz
, p
);
222 ctx
->metamap
.AllocBlock(thr
, pc
, p
, sz
);
223 if (write
&& thr
->ignore_reads_and_writes
== 0)
224 MemoryRangeImitateWrite(thr
, pc
, (uptr
)p
, sz
);
226 MemoryResetRange(thr
, pc
, (uptr
)p
, sz
);
229 void OnUserFree(ThreadState
*thr
, uptr pc
, uptr p
, bool write
) {
230 CHECK_NE(p
, (void*)0);
231 uptr sz
= ctx
->metamap
.FreeBlock(thr
->proc(), p
);
232 DPrintf("#%d: free(0x%zx, %zu)\n", thr
->tid
, p
, sz
);
233 if (write
&& thr
->ignore_reads_and_writes
== 0)
234 MemoryRangeFreed(thr
, pc
, (uptr
)p
, sz
);
237 void *user_realloc(ThreadState
*thr
, uptr pc
, void *p
, uptr sz
) {
238 // FIXME: Handle "shrinking" more efficiently,
239 // it seems that some software actually does this.
241 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
));
243 user_free(thr
, pc
, p
);
246 void *new_p
= user_alloc_internal(thr
, pc
, sz
);
248 uptr old_sz
= user_alloc_usable_size(p
);
249 internal_memcpy(new_p
, p
, min(old_sz
, sz
));
250 user_free(thr
, pc
, p
);
252 return SetErrnoOnNull(new_p
);
255 void *user_memalign(ThreadState
*thr
, uptr pc
, uptr align
, uptr sz
) {
256 if (UNLIKELY(!IsPowerOfTwo(align
))) {
257 errno
= errno_EINVAL
;
258 if (AllocatorMayReturnNull())
260 GET_STACK_TRACE_FATAL(thr
, pc
);
261 ReportInvalidAllocationAlignment(align
, &stack
);
263 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
, align
));
266 int user_posix_memalign(ThreadState
*thr
, uptr pc
, void **memptr
, uptr align
,
268 if (UNLIKELY(!CheckPosixMemalignAlignment(align
))) {
269 if (AllocatorMayReturnNull())
271 GET_STACK_TRACE_FATAL(thr
, pc
);
272 ReportInvalidPosixMemalignAlignment(align
, &stack
);
274 void *ptr
= user_alloc_internal(thr
, pc
, sz
, align
);
276 // OOM error is already taken care of by user_alloc_internal.
278 CHECK(IsAligned((uptr
)ptr
, align
));
283 void *user_aligned_alloc(ThreadState
*thr
, uptr pc
, uptr align
, uptr sz
) {
284 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align
, sz
))) {
285 errno
= errno_EINVAL
;
286 if (AllocatorMayReturnNull())
288 GET_STACK_TRACE_FATAL(thr
, pc
);
289 ReportInvalidAlignedAllocAlignment(sz
, align
, &stack
);
291 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
, align
));
294 void *user_valloc(ThreadState
*thr
, uptr pc
, uptr sz
) {
295 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
, GetPageSizeCached()));
298 void *user_pvalloc(ThreadState
*thr
, uptr pc
, uptr sz
) {
299 uptr PageSize
= GetPageSizeCached();
300 if (UNLIKELY(CheckForPvallocOverflow(sz
, PageSize
))) {
301 errno
= errno_ENOMEM
;
302 if (AllocatorMayReturnNull())
304 GET_STACK_TRACE_FATAL(thr
, pc
);
305 ReportPvallocOverflow(sz
, &stack
);
307 // pvalloc(0) should allocate one page.
308 sz
= sz
? RoundUpTo(sz
, PageSize
) : PageSize
;
309 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
, PageSize
));
312 uptr
user_alloc_usable_size(const void *p
) {
315 MBlock
*b
= ctx
->metamap
.GetBlock((uptr
)p
);
317 return 0; // Not a valid pointer.
319 return 1; // Zero-sized allocations are actually 1 byte.
323 void invoke_malloc_hook(void *ptr
, uptr size
) {
324 ThreadState
*thr
= cur_thread();
325 if (ctx
== 0 || !ctx
->initialized
|| thr
->ignore_interceptors
)
327 __sanitizer_malloc_hook(ptr
, size
);
328 RunMallocHooks(ptr
, size
);
331 void invoke_free_hook(void *ptr
) {
332 ThreadState
*thr
= cur_thread();
333 if (ctx
== 0 || !ctx
->initialized
|| thr
->ignore_interceptors
)
335 __sanitizer_free_hook(ptr
);
339 void *Alloc(uptr sz
) {
340 ThreadState
*thr
= cur_thread();
342 thr
->nomalloc
= 0; // CHECK calls internal_malloc().
345 return InternalAlloc(sz
, &thr
->proc()->internal_alloc_cache
);
348 void FreeImpl(void *p
) {
349 ThreadState
*thr
= cur_thread();
351 thr
->nomalloc
= 0; // CHECK calls internal_malloc().
354 InternalFree(p
, &thr
->proc()->internal_alloc_cache
);
357 } // namespace __tsan
359 using namespace __tsan
;
362 uptr
__sanitizer_get_current_allocated_bytes() {
363 uptr stats
[AllocatorStatCount
];
364 allocator()->GetStats(stats
);
365 return stats
[AllocatorStatAllocated
];
368 uptr
__sanitizer_get_heap_size() {
369 uptr stats
[AllocatorStatCount
];
370 allocator()->GetStats(stats
);
371 return stats
[AllocatorStatMapped
];
374 uptr
__sanitizer_get_free_bytes() {
378 uptr
__sanitizer_get_unmapped_bytes() {
382 uptr
__sanitizer_get_estimated_allocated_size(uptr size
) {
386 int __sanitizer_get_ownership(const void *p
) {
387 return allocator()->GetBlockBegin(p
) != 0;
390 uptr
__sanitizer_get_allocated_size(const void *p
) {
391 return user_alloc_usable_size(p
);
394 void __tsan_on_thread_idle() {
395 ThreadState
*thr
= cur_thread();
396 thr
->clock
.ResetCached(&thr
->proc()->clock_cache
);
397 thr
->last_sleep_clock
.ResetCached(&thr
->proc()->clock_cache
);
398 allocator()->SwallowCache(&thr
->proc()->alloc_cache
);
399 internal_allocator()->SwallowCache(&thr
->proc()->internal_alloc_cache
);
400 ctx
->metamap
.OnProcIdle(thr
->proc());