1 //===-- tsan_mman.cc ------------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
11 #include "sanitizer_common/sanitizer_allocator_interface.h"
12 #include "sanitizer_common/sanitizer_common.h"
13 #include "sanitizer_common/sanitizer_placement_new.h"
14 #include "tsan_mman.h"
16 #include "tsan_report.h"
17 #include "tsan_flags.h"
19 // May be overriden by front-end.
20 SANITIZER_WEAK_DEFAULT_IMPL
21 void __sanitizer_malloc_hook(void *ptr
, uptr size
) {
26 SANITIZER_WEAK_DEFAULT_IMPL
27 void __sanitizer_free_hook(void *ptr
) {
33 struct MapUnmapCallback
{
34 void OnMap(uptr p
, uptr size
) const { }
35 void OnUnmap(uptr p
, uptr size
) const {
36 // We are about to unmap a chunk of user memory.
37 // Mark the corresponding shadow memory as not needed.
38 DontNeedShadowFor(p
, size
);
39 // Mark the corresponding meta shadow memory as not needed.
40 // Note the block does not contain any meta info at this point
41 // (this happens after free).
42 const uptr kMetaRatio
= kMetaShadowCell
/ kMetaShadowSize
;
43 const uptr kPageSize
= GetPageSizeCached() * kMetaRatio
;
44 // Block came from LargeMmapAllocator, so must be large.
45 // We rely on this in the calculations below.
46 CHECK_GE(size
, 2 * kPageSize
);
47 uptr diff
= RoundUp(p
, kPageSize
) - p
;
52 diff
= p
+ size
- RoundDown(p
+ size
, kPageSize
);
55 ReleaseMemoryToOS((uptr
)MemToMeta(p
), size
/ kMetaRatio
);
59 static char allocator_placeholder
[sizeof(Allocator
)] ALIGNED(64);
60 Allocator
*allocator() {
61 return reinterpret_cast<Allocator
*>(&allocator_placeholder
);
69 : mtx(MutexTypeGlobalProc
, StatMtxGlobalProc
)
70 , proc(ProcCreate()) {
74 static char global_proc_placeholder
[sizeof(GlobalProc
)] ALIGNED(64);
75 GlobalProc
*global_proc() {
76 return reinterpret_cast<GlobalProc
*>(&global_proc_placeholder
);
79 ScopedGlobalProcessor::ScopedGlobalProcessor() {
80 GlobalProc
*gp
= global_proc();
81 ThreadState
*thr
= cur_thread();
84 // If we don't have a proc, use the global one.
85 // There are currently only two known case where this path is triggered:
87 // __nptl_deallocate_tsd
92 // __interceptor_munmap
96 // Ideally, we destroy thread state (and unwire proc) when a thread actually
97 // exits (i.e. when we join/wait it). Then we would not need the global proc
99 ProcWire(gp
->proc
, thr
);
102 ScopedGlobalProcessor::~ScopedGlobalProcessor() {
103 GlobalProc
*gp
= global_proc();
104 ThreadState
*thr
= cur_thread();
105 if (thr
->proc() != gp
->proc
)
107 ProcUnwire(gp
->proc
, thr
);
111 void InitializeAllocator() {
112 allocator()->Init(common_flags()->allocator_may_return_null
);
115 void InitializeAllocatorLate() {
116 new(global_proc()) GlobalProc();
119 void AllocatorProcStart(Processor
*proc
) {
120 allocator()->InitCache(&proc
->alloc_cache
);
121 internal_allocator()->InitCache(&proc
->internal_alloc_cache
);
124 void AllocatorProcFinish(Processor
*proc
) {
125 allocator()->DestroyCache(&proc
->alloc_cache
);
126 internal_allocator()->DestroyCache(&proc
->internal_alloc_cache
);
129 void AllocatorPrintStats() {
130 allocator()->PrintStats();
133 static void SignalUnsafeCall(ThreadState
*thr
, uptr pc
) {
134 if (atomic_load_relaxed(&thr
->in_signal_handler
) == 0 ||
135 !flags()->report_signal_unsafe
)
137 VarSizeStackTrace stack
;
138 ObtainCurrentStack(thr
, pc
, &stack
);
139 if (IsFiredSuppression(ctx
, ReportTypeSignalUnsafe
, stack
))
141 ThreadRegistryLock
l(ctx
->thread_registry
);
142 ScopedReport
rep(ReportTypeSignalUnsafe
);
143 rep
.AddStack(stack
, true);
144 OutputReport(thr
, rep
);
147 void *user_alloc(ThreadState
*thr
, uptr pc
, uptr sz
, uptr align
, bool signal
) {
148 if ((sz
>= (1ull << 40)) || (align
>= (1ull << 40)))
149 return allocator()->ReturnNullOrDieOnBadRequest();
150 void *p
= allocator()->Allocate(&thr
->proc()->alloc_cache
, sz
, align
);
153 if (ctx
&& ctx
->initialized
)
154 OnUserAlloc(thr
, pc
, (uptr
)p
, sz
, true);
156 SignalUnsafeCall(thr
, pc
);
160 void *user_calloc(ThreadState
*thr
, uptr pc
, uptr size
, uptr n
) {
161 if (CallocShouldReturnNullDueToOverflow(size
, n
))
162 return allocator()->ReturnNullOrDieOnBadRequest();
163 void *p
= user_alloc(thr
, pc
, n
* size
);
165 internal_memset(p
, 0, n
* size
);
169 void user_free(ThreadState
*thr
, uptr pc
, void *p
, bool signal
) {
170 ScopedGlobalProcessor sgp
;
171 if (ctx
&& ctx
->initialized
)
172 OnUserFree(thr
, pc
, (uptr
)p
, true);
173 allocator()->Deallocate(&thr
->proc()->alloc_cache
, p
);
175 SignalUnsafeCall(thr
, pc
);
178 void OnUserAlloc(ThreadState
*thr
, uptr pc
, uptr p
, uptr sz
, bool write
) {
179 DPrintf("#%d: alloc(%zu) = %p\n", thr
->tid
, sz
, p
);
180 ctx
->metamap
.AllocBlock(thr
, pc
, p
, sz
);
181 if (write
&& thr
->ignore_reads_and_writes
== 0)
182 MemoryRangeImitateWrite(thr
, pc
, (uptr
)p
, sz
);
184 MemoryResetRange(thr
, pc
, (uptr
)p
, sz
);
187 void OnUserFree(ThreadState
*thr
, uptr pc
, uptr p
, bool write
) {
188 CHECK_NE(p
, (void*)0);
189 uptr sz
= ctx
->metamap
.FreeBlock(thr
->proc(), p
);
190 DPrintf("#%d: free(%p, %zu)\n", thr
->tid
, p
, sz
);
191 if (write
&& thr
->ignore_reads_and_writes
== 0)
192 MemoryRangeFreed(thr
, pc
, (uptr
)p
, sz
);
195 void *user_realloc(ThreadState
*thr
, uptr pc
, void *p
, uptr sz
) {
196 // FIXME: Handle "shrinking" more efficiently,
197 // it seems that some software actually does this.
198 void *p2
= user_alloc(thr
, pc
, sz
);
202 uptr oldsz
= user_alloc_usable_size(p
);
203 internal_memcpy(p2
, p
, min(oldsz
, sz
));
204 user_free(thr
, pc
, p
);
209 uptr
user_alloc_usable_size(const void *p
) {
212 MBlock
*b
= ctx
->metamap
.GetBlock((uptr
)p
);
214 return 0; // Not a valid pointer.
216 return 1; // Zero-sized allocations are actually 1 byte.
220 void invoke_malloc_hook(void *ptr
, uptr size
) {
221 ThreadState
*thr
= cur_thread();
222 if (ctx
== 0 || !ctx
->initialized
|| thr
->ignore_interceptors
)
224 __sanitizer_malloc_hook(ptr
, size
);
225 RunMallocHooks(ptr
, size
);
228 void invoke_free_hook(void *ptr
) {
229 ThreadState
*thr
= cur_thread();
230 if (ctx
== 0 || !ctx
->initialized
|| thr
->ignore_interceptors
)
232 __sanitizer_free_hook(ptr
);
236 void *internal_alloc(MBlockType typ
, uptr sz
) {
237 ThreadState
*thr
= cur_thread();
239 thr
->nomalloc
= 0; // CHECK calls internal_malloc().
242 return InternalAlloc(sz
, &thr
->proc()->internal_alloc_cache
);
245 void internal_free(void *p
) {
246 ThreadState
*thr
= cur_thread();
248 thr
->nomalloc
= 0; // CHECK calls internal_malloc().
251 InternalFree(p
, &thr
->proc()->internal_alloc_cache
);
254 } // namespace __tsan
256 using namespace __tsan
;
259 uptr
__sanitizer_get_current_allocated_bytes() {
260 uptr stats
[AllocatorStatCount
];
261 allocator()->GetStats(stats
);
262 return stats
[AllocatorStatAllocated
];
265 uptr
__sanitizer_get_heap_size() {
266 uptr stats
[AllocatorStatCount
];
267 allocator()->GetStats(stats
);
268 return stats
[AllocatorStatMapped
];
271 uptr
__sanitizer_get_free_bytes() {
275 uptr
__sanitizer_get_unmapped_bytes() {
279 uptr
__sanitizer_get_estimated_allocated_size(uptr size
) {
283 int __sanitizer_get_ownership(const void *p
) {
284 return allocator()->GetBlockBegin(p
) != 0;
287 uptr
__sanitizer_get_allocated_size(const void *p
) {
288 return user_alloc_usable_size(p
);
291 void __tsan_on_thread_idle() {
292 ThreadState
*thr
= cur_thread();
293 allocator()->SwallowCache(&thr
->proc()->alloc_cache
);
294 internal_allocator()->SwallowCache(&thr
->proc()->internal_alloc_cache
);
295 ctx
->metamap
.OnProcIdle(thr
->proc());