1 //===-- tsan_mman.cc ------------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
11 #include "sanitizer_common/sanitizer_allocator_checks.h"
12 #include "sanitizer_common/sanitizer_allocator_interface.h"
13 #include "sanitizer_common/sanitizer_common.h"
14 #include "sanitizer_common/sanitizer_errno.h"
15 #include "sanitizer_common/sanitizer_placement_new.h"
16 #include "tsan_mman.h"
18 #include "tsan_report.h"
19 #include "tsan_flags.h"
21 // May be overriden by front-end.
22 SANITIZER_WEAK_DEFAULT_IMPL
23 void __sanitizer_malloc_hook(void *ptr
, uptr size
) {
28 SANITIZER_WEAK_DEFAULT_IMPL
29 void __sanitizer_free_hook(void *ptr
) {
35 struct MapUnmapCallback
{
36 void OnMap(uptr p
, uptr size
) const { }
37 void OnUnmap(uptr p
, uptr size
) const {
38 // We are about to unmap a chunk of user memory.
39 // Mark the corresponding shadow memory as not needed.
40 DontNeedShadowFor(p
, size
);
41 // Mark the corresponding meta shadow memory as not needed.
42 // Note the block does not contain any meta info at this point
43 // (this happens after free).
44 const uptr kMetaRatio
= kMetaShadowCell
/ kMetaShadowSize
;
45 const uptr kPageSize
= GetPageSizeCached() * kMetaRatio
;
46 // Block came from LargeMmapAllocator, so must be large.
47 // We rely on this in the calculations below.
48 CHECK_GE(size
, 2 * kPageSize
);
49 uptr diff
= RoundUp(p
, kPageSize
) - p
;
54 diff
= p
+ size
- RoundDown(p
+ size
, kPageSize
);
57 uptr p_meta
= (uptr
)MemToMeta(p
);
58 ReleaseMemoryPagesToOS(p_meta
, p_meta
+ size
/ kMetaRatio
);
62 static char allocator_placeholder
[sizeof(Allocator
)] ALIGNED(64);
63 Allocator
*allocator() {
64 return reinterpret_cast<Allocator
*>(&allocator_placeholder
);
72 : mtx(MutexTypeGlobalProc
, StatMtxGlobalProc
)
73 , proc(ProcCreate()) {
77 static char global_proc_placeholder
[sizeof(GlobalProc
)] ALIGNED(64);
78 GlobalProc
*global_proc() {
79 return reinterpret_cast<GlobalProc
*>(&global_proc_placeholder
);
82 ScopedGlobalProcessor::ScopedGlobalProcessor() {
83 GlobalProc
*gp
= global_proc();
84 ThreadState
*thr
= cur_thread();
87 // If we don't have a proc, use the global one.
88 // There are currently only two known case where this path is triggered:
90 // __nptl_deallocate_tsd
95 // __interceptor_munmap
99 // Ideally, we destroy thread state (and unwire proc) when a thread actually
100 // exits (i.e. when we join/wait it). Then we would not need the global proc
102 ProcWire(gp
->proc
, thr
);
105 ScopedGlobalProcessor::~ScopedGlobalProcessor() {
106 GlobalProc
*gp
= global_proc();
107 ThreadState
*thr
= cur_thread();
108 if (thr
->proc() != gp
->proc
)
110 ProcUnwire(gp
->proc
, thr
);
114 void InitializeAllocator() {
115 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null
);
116 allocator()->Init(common_flags()->allocator_release_to_os_interval_ms
);
119 void InitializeAllocatorLate() {
120 new(global_proc()) GlobalProc();
123 void AllocatorProcStart(Processor
*proc
) {
124 allocator()->InitCache(&proc
->alloc_cache
);
125 internal_allocator()->InitCache(&proc
->internal_alloc_cache
);
128 void AllocatorProcFinish(Processor
*proc
) {
129 allocator()->DestroyCache(&proc
->alloc_cache
);
130 internal_allocator()->DestroyCache(&proc
->internal_alloc_cache
);
133 void AllocatorPrintStats() {
134 allocator()->PrintStats();
137 static void SignalUnsafeCall(ThreadState
*thr
, uptr pc
) {
138 if (atomic_load_relaxed(&thr
->in_signal_handler
) == 0 ||
139 !flags()->report_signal_unsafe
)
141 VarSizeStackTrace stack
;
142 ObtainCurrentStack(thr
, pc
, &stack
);
143 if (IsFiredSuppression(ctx
, ReportTypeSignalUnsafe
, stack
))
145 ThreadRegistryLock
l(ctx
->thread_registry
);
146 ScopedReport
rep(ReportTypeSignalUnsafe
);
147 rep
.AddStack(stack
, true);
148 OutputReport(thr
, rep
);
151 void *user_alloc_internal(ThreadState
*thr
, uptr pc
, uptr sz
, uptr align
,
153 if ((sz
>= (1ull << 40)) || (align
>= (1ull << 40)))
154 return Allocator::FailureHandler::OnBadRequest();
155 void *p
= allocator()->Allocate(&thr
->proc()->alloc_cache
, sz
, align
);
156 if (UNLIKELY(p
== 0))
158 if (ctx
&& ctx
->initialized
)
159 OnUserAlloc(thr
, pc
, (uptr
)p
, sz
, true);
161 SignalUnsafeCall(thr
, pc
);
165 void user_free(ThreadState
*thr
, uptr pc
, void *p
, bool signal
) {
166 ScopedGlobalProcessor sgp
;
167 if (ctx
&& ctx
->initialized
)
168 OnUserFree(thr
, pc
, (uptr
)p
, true);
169 allocator()->Deallocate(&thr
->proc()->alloc_cache
, p
);
171 SignalUnsafeCall(thr
, pc
);
174 void *user_alloc(ThreadState
*thr
, uptr pc
, uptr sz
) {
175 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
, kDefaultAlignment
));
178 void *user_calloc(ThreadState
*thr
, uptr pc
, uptr size
, uptr n
) {
179 if (UNLIKELY(CheckForCallocOverflow(size
, n
)))
180 return SetErrnoOnNull(Allocator::FailureHandler::OnBadRequest());
181 void *p
= user_alloc_internal(thr
, pc
, n
* size
);
183 internal_memset(p
, 0, n
* size
);
184 return SetErrnoOnNull(p
);
187 void OnUserAlloc(ThreadState
*thr
, uptr pc
, uptr p
, uptr sz
, bool write
) {
188 DPrintf("#%d: alloc(%zu) = %p\n", thr
->tid
, sz
, p
);
189 ctx
->metamap
.AllocBlock(thr
, pc
, p
, sz
);
190 if (write
&& thr
->ignore_reads_and_writes
== 0)
191 MemoryRangeImitateWrite(thr
, pc
, (uptr
)p
, sz
);
193 MemoryResetRange(thr
, pc
, (uptr
)p
, sz
);
196 void OnUserFree(ThreadState
*thr
, uptr pc
, uptr p
, bool write
) {
197 CHECK_NE(p
, (void*)0);
198 uptr sz
= ctx
->metamap
.FreeBlock(thr
->proc(), p
);
199 DPrintf("#%d: free(%p, %zu)\n", thr
->tid
, p
, sz
);
200 if (write
&& thr
->ignore_reads_and_writes
== 0)
201 MemoryRangeFreed(thr
, pc
, (uptr
)p
, sz
);
204 void *user_realloc(ThreadState
*thr
, uptr pc
, void *p
, uptr sz
) {
205 // FIXME: Handle "shrinking" more efficiently,
206 // it seems that some software actually does this.
208 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
));
210 user_free(thr
, pc
, p
);
213 void *new_p
= user_alloc_internal(thr
, pc
, sz
);
215 uptr old_sz
= user_alloc_usable_size(p
);
216 internal_memcpy(new_p
, p
, min(old_sz
, sz
));
217 user_free(thr
, pc
, p
);
219 return SetErrnoOnNull(new_p
);
222 void *user_memalign(ThreadState
*thr
, uptr pc
, uptr align
, uptr sz
) {
223 if (UNLIKELY(!IsPowerOfTwo(align
))) {
224 errno
= errno_EINVAL
;
225 return Allocator::FailureHandler::OnBadRequest();
227 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
, align
));
230 int user_posix_memalign(ThreadState
*thr
, uptr pc
, void **memptr
, uptr align
,
232 if (UNLIKELY(!CheckPosixMemalignAlignment(align
))) {
233 Allocator::FailureHandler::OnBadRequest();
236 void *ptr
= user_alloc_internal(thr
, pc
, sz
, align
);
239 CHECK(IsAligned((uptr
)ptr
, align
));
244 void *user_aligned_alloc(ThreadState
*thr
, uptr pc
, uptr align
, uptr sz
) {
245 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align
, sz
))) {
246 errno
= errno_EINVAL
;
247 return Allocator::FailureHandler::OnBadRequest();
249 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
, align
));
252 void *user_valloc(ThreadState
*thr
, uptr pc
, uptr sz
) {
253 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
, GetPageSizeCached()));
256 void *user_pvalloc(ThreadState
*thr
, uptr pc
, uptr sz
) {
257 uptr PageSize
= GetPageSizeCached();
258 if (UNLIKELY(CheckForPvallocOverflow(sz
, PageSize
))) {
259 errno
= errno_ENOMEM
;
260 return Allocator::FailureHandler::OnBadRequest();
262 // pvalloc(0) should allocate one page.
263 sz
= sz
? RoundUpTo(sz
, PageSize
) : PageSize
;
264 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
, PageSize
));
267 uptr
user_alloc_usable_size(const void *p
) {
270 MBlock
*b
= ctx
->metamap
.GetBlock((uptr
)p
);
272 return 0; // Not a valid pointer.
274 return 1; // Zero-sized allocations are actually 1 byte.
278 void invoke_malloc_hook(void *ptr
, uptr size
) {
279 ThreadState
*thr
= cur_thread();
280 if (ctx
== 0 || !ctx
->initialized
|| thr
->ignore_interceptors
)
282 __sanitizer_malloc_hook(ptr
, size
);
283 RunMallocHooks(ptr
, size
);
286 void invoke_free_hook(void *ptr
) {
287 ThreadState
*thr
= cur_thread();
288 if (ctx
== 0 || !ctx
->initialized
|| thr
->ignore_interceptors
)
290 __sanitizer_free_hook(ptr
);
294 void *internal_alloc(MBlockType typ
, uptr sz
) {
295 ThreadState
*thr
= cur_thread();
297 thr
->nomalloc
= 0; // CHECK calls internal_malloc().
300 return InternalAlloc(sz
, &thr
->proc()->internal_alloc_cache
);
303 void internal_free(void *p
) {
304 ThreadState
*thr
= cur_thread();
306 thr
->nomalloc
= 0; // CHECK calls internal_malloc().
309 InternalFree(p
, &thr
->proc()->internal_alloc_cache
);
312 } // namespace __tsan
314 using namespace __tsan
;
317 uptr
__sanitizer_get_current_allocated_bytes() {
318 uptr stats
[AllocatorStatCount
];
319 allocator()->GetStats(stats
);
320 return stats
[AllocatorStatAllocated
];
323 uptr
__sanitizer_get_heap_size() {
324 uptr stats
[AllocatorStatCount
];
325 allocator()->GetStats(stats
);
326 return stats
[AllocatorStatMapped
];
329 uptr
__sanitizer_get_free_bytes() {
333 uptr
__sanitizer_get_unmapped_bytes() {
337 uptr
__sanitizer_get_estimated_allocated_size(uptr size
) {
341 int __sanitizer_get_ownership(const void *p
) {
342 return allocator()->GetBlockBegin(p
) != 0;
345 uptr
__sanitizer_get_allocated_size(const void *p
) {
346 return user_alloc_usable_size(p
);
349 void __tsan_on_thread_idle() {
350 ThreadState
*thr
= cur_thread();
351 thr
->clock
.ResetCached(&thr
->proc()->clock_cache
);
352 thr
->last_sleep_clock
.ResetCached(&thr
->proc()->clock_cache
);
353 allocator()->SwallowCache(&thr
->proc()->alloc_cache
);
354 internal_allocator()->SwallowCache(&thr
->proc()->internal_alloc_cache
);
355 ctx
->metamap
.OnProcIdle(thr
->proc());