1 //===-- tsan_mman.cc ------------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
11 #include "sanitizer_common/sanitizer_common.h"
12 #include "sanitizer_common/sanitizer_placement_new.h"
13 #include "tsan_mman.h"
15 #include "tsan_report.h"
16 #include "tsan_flags.h"
18 // May be overriden by front-end.
19 extern "C" void WEAK
__tsan_malloc_hook(void *ptr
, uptr size
) {
24 extern "C" void WEAK
__tsan_free_hook(void *ptr
) {
30 COMPILER_CHECK(sizeof(MBlock
) == 16);
33 atomic_uintptr_t
*a
= reinterpret_cast<atomic_uintptr_t
*>(this);
34 uptr v
= atomic_load(a
, memory_order_relaxed
);
35 for (int iter
= 0;; iter
++) {
40 internal_sched_yield();
41 v
= atomic_load(a
, memory_order_relaxed
);
44 if (atomic_compare_exchange_weak(a
, &v
, v
| 1, memory_order_acquire
))
49 void MBlock::Unlock() {
50 atomic_uintptr_t
*a
= reinterpret_cast<atomic_uintptr_t
*>(this);
51 uptr v
= atomic_load(a
, memory_order_relaxed
);
53 atomic_store(a
, v
& ~1, memory_order_relaxed
);
56 struct MapUnmapCallback
{
57 void OnMap(uptr p
, uptr size
) const { }
58 void OnUnmap(uptr p
, uptr size
) const {
59 // We are about to unmap a chunk of user memory.
60 // Mark the corresponding shadow memory as not needed.
61 DontNeedShadowFor(p
, size
);
65 static char allocator_placeholder
[sizeof(Allocator
)] ALIGNED(64);
66 Allocator
*allocator() {
67 return reinterpret_cast<Allocator
*>(&allocator_placeholder
);
70 void InitializeAllocator() {
74 void AllocatorThreadStart(ThreadState
*thr
) {
75 allocator()->InitCache(&thr
->alloc_cache
);
76 internal_allocator()->InitCache(&thr
->internal_alloc_cache
);
79 void AllocatorThreadFinish(ThreadState
*thr
) {
80 allocator()->DestroyCache(&thr
->alloc_cache
);
81 internal_allocator()->DestroyCache(&thr
->internal_alloc_cache
);
84 void AllocatorPrintStats() {
85 allocator()->PrintStats();
88 static void SignalUnsafeCall(ThreadState
*thr
, uptr pc
) {
89 if (!thr
->in_signal_handler
|| !flags()->report_signal_unsafe
)
92 stack
.ObtainCurrent(thr
, pc
);
93 ThreadRegistryLock
l(ctx
->thread_registry
);
94 ScopedReport
rep(ReportTypeSignalUnsafe
);
95 if (!IsFiredSuppression(ctx
, rep
, stack
)) {
97 OutputReport(ctx
, rep
, rep
.GetReport()->stacks
[0]);
101 void *user_alloc(ThreadState
*thr
, uptr pc
, uptr sz
, uptr align
) {
102 if ((sz
>= (1ull << 40)) || (align
>= (1ull << 40)))
103 return AllocatorReturnNull();
104 void *p
= allocator()->Allocate(&thr
->alloc_cache
, sz
, align
);
107 MBlock
*b
= new(allocator()->GetMetaData(p
)) MBlock
;
108 b
->Init(sz
, thr
->tid
, CurrentStackId(thr
, pc
));
109 if (ctx
&& ctx
->initialized
) {
110 if (thr
->ignore_reads_and_writes
== 0)
111 MemoryRangeImitateWrite(thr
, pc
, (uptr
)p
, sz
);
113 MemoryResetRange(thr
, pc
, (uptr
)p
, sz
);
115 DPrintf("#%d: alloc(%zu) = %p\n", thr
->tid
, sz
, p
);
116 SignalUnsafeCall(thr
, pc
);
120 void user_free(ThreadState
*thr
, uptr pc
, void *p
) {
121 CHECK_NE(p
, (void*)0);
122 DPrintf("#%d: free(%p)\n", thr
->tid
, p
);
123 MBlock
*b
= (MBlock
*)allocator()->GetMetaData(p
);
125 MBlock::ScopedLock
l(b
);
126 for (SyncVar
*s
= b
->ListHead(); s
;) {
129 StatInc(thr
, StatSyncDestroyed
);
136 if (ctx
&& ctx
->initialized
) {
137 if (thr
->ignore_reads_and_writes
== 0)
138 MemoryRangeFreed(thr
, pc
, (uptr
)p
, b
->Size());
140 allocator()->Deallocate(&thr
->alloc_cache
, p
);
141 SignalUnsafeCall(thr
, pc
);
144 void *user_realloc(ThreadState
*thr
, uptr pc
, void *p
, uptr sz
) {
146 // FIXME: Handle "shrinking" more efficiently,
147 // it seems that some software actually does this.
149 p2
= user_alloc(thr
, pc
, sz
);
153 MBlock
*b
= user_mblock(thr
, p
);
155 internal_memcpy(p2
, p
, min(b
->Size(), sz
));
159 user_free(thr
, pc
, p
);
163 uptr
user_alloc_usable_size(ThreadState
*thr
, uptr pc
, void *p
) {
166 MBlock
*b
= (MBlock
*)allocator()->GetMetaData(p
);
167 return b
? b
->Size() : 0;
170 MBlock
*user_mblock(ThreadState
*thr
, void *p
) {
172 Allocator
*a
= allocator();
173 void *b
= a
->GetBlockBegin(p
);
176 return (MBlock
*)a
->GetMetaData(b
);
179 void invoke_malloc_hook(void *ptr
, uptr size
) {
180 ThreadState
*thr
= cur_thread();
181 if (ctx
== 0 || !ctx
->initialized
|| thr
->ignore_interceptors
)
183 __tsan_malloc_hook(ptr
, size
);
186 void invoke_free_hook(void *ptr
) {
187 ThreadState
*thr
= cur_thread();
188 if (ctx
== 0 || !ctx
->initialized
|| thr
->ignore_interceptors
)
190 __tsan_free_hook(ptr
);
193 void *internal_alloc(MBlockType typ
, uptr sz
) {
194 ThreadState
*thr
= cur_thread();
195 CHECK_LE(sz
, InternalSizeClassMap::kMaxSize
);
197 thr
->nomalloc
= 0; // CHECK calls internal_malloc().
200 return InternalAlloc(sz
, &thr
->internal_alloc_cache
);
203 void internal_free(void *p
) {
204 ThreadState
*thr
= cur_thread();
206 thr
->nomalloc
= 0; // CHECK calls internal_malloc().
209 InternalFree(p
, &thr
->internal_alloc_cache
);
212 } // namespace __tsan
214 using namespace __tsan
;
217 uptr
__tsan_get_current_allocated_bytes() {
218 uptr stats
[AllocatorStatCount
];
219 allocator()->GetStats(stats
);
220 return stats
[AllocatorStatAllocated
];
223 uptr
__tsan_get_heap_size() {
224 uptr stats
[AllocatorStatCount
];
225 allocator()->GetStats(stats
);
226 return stats
[AllocatorStatMapped
];
229 uptr
__tsan_get_free_bytes() {
233 uptr
__tsan_get_unmapped_bytes() {
237 uptr
__tsan_get_estimated_allocated_size(uptr size
) {
241 bool __tsan_get_ownership(void *p
) {
242 return allocator()->GetBlockBegin(p
) != 0;
245 uptr
__tsan_get_allocated_size(void *p
) {
248 p
= allocator()->GetBlockBegin(p
);
251 MBlock
*b
= (MBlock
*)allocator()->GetMetaData(p
);
255 void __tsan_on_thread_idle() {
256 ThreadState
*thr
= cur_thread();
257 allocator()->SwallowCache(&thr
->alloc_cache
);
258 internal_allocator()->SwallowCache(&thr
->internal_alloc_cache
);