1 //===-- tsan_mman.cc ------------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
11 #include "sanitizer_common/sanitizer_common.h"
12 #include "sanitizer_common/sanitizer_placement_new.h"
13 #include "tsan_mman.h"
15 #include "tsan_report.h"
16 #include "tsan_flags.h"
18 // May be overriden by front-end.
19 extern "C" void WEAK
__tsan_malloc_hook(void *ptr
, uptr size
) {
24 extern "C" void WEAK
__tsan_free_hook(void *ptr
) {
30 COMPILER_CHECK(sizeof(MBlock
) == 16);
33 atomic_uintptr_t
*a
= reinterpret_cast<atomic_uintptr_t
*>(this);
34 uptr v
= atomic_load(a
, memory_order_relaxed
);
35 for (int iter
= 0;; iter
++) {
40 internal_sched_yield();
41 v
= atomic_load(a
, memory_order_relaxed
);
44 if (atomic_compare_exchange_weak(a
, &v
, v
| 1, memory_order_acquire
))
49 void MBlock::Unlock() {
50 atomic_uintptr_t
*a
= reinterpret_cast<atomic_uintptr_t
*>(this);
51 uptr v
= atomic_load(a
, memory_order_relaxed
);
53 atomic_store(a
, v
& ~1, memory_order_relaxed
);
56 struct MapUnmapCallback
{
57 void OnMap(uptr p
, uptr size
) const { }
58 void OnUnmap(uptr p
, uptr size
) const {
59 // We are about to unmap a chunk of user memory.
60 // Mark the corresponding shadow memory as not needed.
61 DontNeedShadowFor(p
, size
);
65 static char allocator_placeholder
[sizeof(Allocator
)] ALIGNED(64);
66 Allocator
*allocator() {
67 return reinterpret_cast<Allocator
*>(&allocator_placeholder
);
70 void InitializeAllocator() {
74 void AllocatorThreadStart(ThreadState
*thr
) {
75 allocator()->InitCache(&thr
->alloc_cache
);
76 internal_allocator()->InitCache(&thr
->internal_alloc_cache
);
79 void AllocatorThreadFinish(ThreadState
*thr
) {
80 allocator()->DestroyCache(&thr
->alloc_cache
);
81 internal_allocator()->DestroyCache(&thr
->internal_alloc_cache
);
84 void AllocatorPrintStats() {
85 allocator()->PrintStats();
88 static void SignalUnsafeCall(ThreadState
*thr
, uptr pc
) {
89 if (!thr
->in_signal_handler
|| !flags()->report_signal_unsafe
)
93 stack
.ObtainCurrent(thr
, pc
);
94 ThreadRegistryLock
l(ctx
->thread_registry
);
95 ScopedReport
rep(ReportTypeSignalUnsafe
);
96 if (!IsFiredSuppression(ctx
, rep
, stack
)) {
98 OutputReport(ctx
, rep
, rep
.GetReport()->stacks
[0]);
102 void *user_alloc(ThreadState
*thr
, uptr pc
, uptr sz
, uptr align
) {
103 CHECK_GT(thr
->in_rtl
, 0);
104 if ((sz
>= (1ull << 40)) || (align
>= (1ull << 40)))
105 return AllocatorReturnNull();
106 void *p
= allocator()->Allocate(&thr
->alloc_cache
, sz
, align
);
109 MBlock
*b
= new(allocator()->GetMetaData(p
)) MBlock
;
110 b
->Init(sz
, thr
->tid
, CurrentStackId(thr
, pc
));
111 if (CTX() && CTX()->initialized
) {
112 if (thr
->ignore_reads_and_writes
== 0)
113 MemoryRangeImitateWrite(thr
, pc
, (uptr
)p
, sz
);
115 MemoryResetRange(thr
, pc
, (uptr
)p
, sz
);
117 DPrintf("#%d: alloc(%zu) = %p\n", thr
->tid
, sz
, p
);
118 SignalUnsafeCall(thr
, pc
);
122 void user_free(ThreadState
*thr
, uptr pc
, void *p
) {
123 CHECK_GT(thr
->in_rtl
, 0);
124 CHECK_NE(p
, (void*)0);
125 DPrintf("#%d: free(%p)\n", thr
->tid
, p
);
126 MBlock
*b
= (MBlock
*)allocator()->GetMetaData(p
);
128 MBlock::ScopedLock
l(b
);
129 for (SyncVar
*s
= b
->ListHead(); s
;) {
132 StatInc(thr
, StatSyncDestroyed
);
139 if (CTX() && CTX()->initialized
&& thr
->in_rtl
== 1) {
140 if (thr
->ignore_reads_and_writes
== 0)
141 MemoryRangeFreed(thr
, pc
, (uptr
)p
, b
->Size());
143 allocator()->Deallocate(&thr
->alloc_cache
, p
);
144 SignalUnsafeCall(thr
, pc
);
147 void *user_realloc(ThreadState
*thr
, uptr pc
, void *p
, uptr sz
) {
148 CHECK_GT(thr
->in_rtl
, 0);
150 // FIXME: Handle "shrinking" more efficiently,
151 // it seems that some software actually does this.
153 p2
= user_alloc(thr
, pc
, sz
);
157 MBlock
*b
= user_mblock(thr
, p
);
159 internal_memcpy(p2
, p
, min(b
->Size(), sz
));
163 user_free(thr
, pc
, p
);
167 uptr
user_alloc_usable_size(ThreadState
*thr
, uptr pc
, void *p
) {
168 CHECK_GT(thr
->in_rtl
, 0);
171 MBlock
*b
= (MBlock
*)allocator()->GetMetaData(p
);
172 return b
? b
->Size() : 0;
175 MBlock
*user_mblock(ThreadState
*thr
, void *p
) {
177 Allocator
*a
= allocator();
178 void *b
= a
->GetBlockBegin(p
);
181 return (MBlock
*)a
->GetMetaData(b
);
184 void invoke_malloc_hook(void *ptr
, uptr size
) {
185 Context
*ctx
= CTX();
186 ThreadState
*thr
= cur_thread();
187 if (ctx
== 0 || !ctx
->initialized
|| thr
->in_rtl
)
189 __tsan_malloc_hook(ptr
, size
);
192 void invoke_free_hook(void *ptr
) {
193 Context
*ctx
= CTX();
194 ThreadState
*thr
= cur_thread();
195 if (ctx
== 0 || !ctx
->initialized
|| thr
->in_rtl
)
197 __tsan_free_hook(ptr
);
200 void *internal_alloc(MBlockType typ
, uptr sz
) {
201 ThreadState
*thr
= cur_thread();
202 CHECK_GT(thr
->in_rtl
, 0);
203 CHECK_LE(sz
, InternalSizeClassMap::kMaxSize
);
205 thr
->nomalloc
= 0; // CHECK calls internal_malloc().
208 return InternalAlloc(sz
, &thr
->internal_alloc_cache
);
211 void internal_free(void *p
) {
212 ThreadState
*thr
= cur_thread();
213 CHECK_GT(thr
->in_rtl
, 0);
215 thr
->nomalloc
= 0; // CHECK calls internal_malloc().
218 InternalFree(p
, &thr
->internal_alloc_cache
);
221 } // namespace __tsan
223 using namespace __tsan
;
226 uptr
__tsan_get_current_allocated_bytes() {
227 u64 stats
[AllocatorStatCount
];
228 allocator()->GetStats(stats
);
229 u64 m
= stats
[AllocatorStatMalloced
];
230 u64 f
= stats
[AllocatorStatFreed
];
231 return m
>= f
? m
- f
: 1;
234 uptr
__tsan_get_heap_size() {
235 u64 stats
[AllocatorStatCount
];
236 allocator()->GetStats(stats
);
237 u64 m
= stats
[AllocatorStatMmapped
];
238 u64 f
= stats
[AllocatorStatUnmapped
];
239 return m
>= f
? m
- f
: 1;
242 uptr
__tsan_get_free_bytes() {
246 uptr
__tsan_get_unmapped_bytes() {
250 uptr
__tsan_get_estimated_allocated_size(uptr size
) {
254 bool __tsan_get_ownership(void *p
) {
255 return allocator()->GetBlockBegin(p
) != 0;
258 uptr
__tsan_get_allocated_size(void *p
) {
261 p
= allocator()->GetBlockBegin(p
);
264 MBlock
*b
= (MBlock
*)allocator()->GetMetaData(p
);
268 void __tsan_on_thread_idle() {
269 ThreadState
*thr
= cur_thread();
270 allocator()->SwallowCache(&thr
->alloc_cache
);
271 internal_allocator()->SwallowCache(&thr
->internal_alloc_cache
);