Daily bump.
[official-gcc.git] / libsanitizer / tsan / tsan_mman.cc
blob3df0531f0c8a7e11ee35b8f8d958aa60dd88a2cd
1 //===-- tsan_mman.cc ------------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
11 #include "sanitizer_common/sanitizer_common.h"
12 #include "sanitizer_common/sanitizer_placement_new.h"
13 #include "tsan_mman.h"
14 #include "tsan_rtl.h"
15 #include "tsan_report.h"
16 #include "tsan_flags.h"
18 // May be overriden by front-end.
19 extern "C" void WEAK __tsan_malloc_hook(void *ptr, uptr size) {
20 (void)ptr;
21 (void)size;
24 extern "C" void WEAK __tsan_free_hook(void *ptr) {
25 (void)ptr;
28 namespace __tsan {
30 COMPILER_CHECK(sizeof(MBlock) == 16);
32 void MBlock::Lock() {
33 atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
34 uptr v = atomic_load(a, memory_order_relaxed);
35 for (int iter = 0;; iter++) {
36 if (v & 1) {
37 if (iter < 10)
38 proc_yield(20);
39 else
40 internal_sched_yield();
41 v = atomic_load(a, memory_order_relaxed);
42 continue;
44 if (atomic_compare_exchange_weak(a, &v, v | 1, memory_order_acquire))
45 break;
49 void MBlock::Unlock() {
50 atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
51 uptr v = atomic_load(a, memory_order_relaxed);
52 DCHECK(v & 1);
53 atomic_store(a, v & ~1, memory_order_relaxed);
56 struct MapUnmapCallback {
57 void OnMap(uptr p, uptr size) const { }
58 void OnUnmap(uptr p, uptr size) const {
59 // We are about to unmap a chunk of user memory.
60 // Mark the corresponding shadow memory as not needed.
61 DontNeedShadowFor(p, size);
65 static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
66 Allocator *allocator() {
67 return reinterpret_cast<Allocator*>(&allocator_placeholder);
70 void InitializeAllocator() {
71 allocator()->Init();
74 void AllocatorThreadStart(ThreadState *thr) {
75 allocator()->InitCache(&thr->alloc_cache);
76 internal_allocator()->InitCache(&thr->internal_alloc_cache);
79 void AllocatorThreadFinish(ThreadState *thr) {
80 allocator()->DestroyCache(&thr->alloc_cache);
81 internal_allocator()->DestroyCache(&thr->internal_alloc_cache);
84 void AllocatorPrintStats() {
85 allocator()->PrintStats();
88 static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
89 if (!thr->in_signal_handler || !flags()->report_signal_unsafe)
90 return;
91 StackTrace stack;
92 stack.ObtainCurrent(thr, pc);
93 ThreadRegistryLock l(ctx->thread_registry);
94 ScopedReport rep(ReportTypeSignalUnsafe);
95 if (!IsFiredSuppression(ctx, rep, stack)) {
96 rep.AddStack(&stack);
97 OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
101 void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) {
102 if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
103 return AllocatorReturnNull();
104 void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
105 if (p == 0)
106 return 0;
107 MBlock *b = new(allocator()->GetMetaData(p)) MBlock;
108 b->Init(sz, thr->tid, CurrentStackId(thr, pc));
109 if (ctx && ctx->initialized) {
110 if (thr->ignore_reads_and_writes == 0)
111 MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
112 else
113 MemoryResetRange(thr, pc, (uptr)p, sz);
115 DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
116 SignalUnsafeCall(thr, pc);
117 return p;
120 void user_free(ThreadState *thr, uptr pc, void *p) {
121 CHECK_NE(p, (void*)0);
122 DPrintf("#%d: free(%p)\n", thr->tid, p);
123 MBlock *b = (MBlock*)allocator()->GetMetaData(p);
124 if (b->ListHead()) {
125 MBlock::ScopedLock l(b);
126 for (SyncVar *s = b->ListHead(); s;) {
127 SyncVar *res = s;
128 s = s->next;
129 StatInc(thr, StatSyncDestroyed);
130 res->mtx.Lock();
131 res->mtx.Unlock();
132 DestroyAndFree(res);
134 b->ListReset();
136 if (ctx && ctx->initialized) {
137 if (thr->ignore_reads_and_writes == 0)
138 MemoryRangeFreed(thr, pc, (uptr)p, b->Size());
140 allocator()->Deallocate(&thr->alloc_cache, p);
141 SignalUnsafeCall(thr, pc);
144 void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
145 void *p2 = 0;
146 // FIXME: Handle "shrinking" more efficiently,
147 // it seems that some software actually does this.
148 if (sz) {
149 p2 = user_alloc(thr, pc, sz);
150 if (p2 == 0)
151 return 0;
152 if (p) {
153 MBlock *b = user_mblock(thr, p);
154 CHECK_NE(b, 0);
155 internal_memcpy(p2, p, min(b->Size(), sz));
158 if (p)
159 user_free(thr, pc, p);
160 return p2;
163 uptr user_alloc_usable_size(ThreadState *thr, uptr pc, void *p) {
164 if (p == 0)
165 return 0;
166 MBlock *b = (MBlock*)allocator()->GetMetaData(p);
167 return b ? b->Size() : 0;
170 MBlock *user_mblock(ThreadState *thr, void *p) {
171 CHECK_NE(p, 0);
172 Allocator *a = allocator();
173 void *b = a->GetBlockBegin(p);
174 if (b == 0)
175 return 0;
176 return (MBlock*)a->GetMetaData(b);
179 void invoke_malloc_hook(void *ptr, uptr size) {
180 ThreadState *thr = cur_thread();
181 if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
182 return;
183 __tsan_malloc_hook(ptr, size);
186 void invoke_free_hook(void *ptr) {
187 ThreadState *thr = cur_thread();
188 if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
189 return;
190 __tsan_free_hook(ptr);
193 void *internal_alloc(MBlockType typ, uptr sz) {
194 ThreadState *thr = cur_thread();
195 CHECK_LE(sz, InternalSizeClassMap::kMaxSize);
196 if (thr->nomalloc) {
197 thr->nomalloc = 0; // CHECK calls internal_malloc().
198 CHECK(0);
200 return InternalAlloc(sz, &thr->internal_alloc_cache);
203 void internal_free(void *p) {
204 ThreadState *thr = cur_thread();
205 if (thr->nomalloc) {
206 thr->nomalloc = 0; // CHECK calls internal_malloc().
207 CHECK(0);
209 InternalFree(p, &thr->internal_alloc_cache);
212 } // namespace __tsan
214 using namespace __tsan;
216 extern "C" {
217 uptr __tsan_get_current_allocated_bytes() {
218 uptr stats[AllocatorStatCount];
219 allocator()->GetStats(stats);
220 return stats[AllocatorStatAllocated];
223 uptr __tsan_get_heap_size() {
224 uptr stats[AllocatorStatCount];
225 allocator()->GetStats(stats);
226 return stats[AllocatorStatMapped];
229 uptr __tsan_get_free_bytes() {
230 return 1;
233 uptr __tsan_get_unmapped_bytes() {
234 return 1;
237 uptr __tsan_get_estimated_allocated_size(uptr size) {
238 return size;
241 bool __tsan_get_ownership(void *p) {
242 return allocator()->GetBlockBegin(p) != 0;
245 uptr __tsan_get_allocated_size(void *p) {
246 if (p == 0)
247 return 0;
248 p = allocator()->GetBlockBegin(p);
249 if (p == 0)
250 return 0;
251 MBlock *b = (MBlock*)allocator()->GetMetaData(p);
252 return b->Size();
255 void __tsan_on_thread_idle() {
256 ThreadState *thr = cur_thread();
257 allocator()->SwallowCache(&thr->alloc_cache);
258 internal_allocator()->SwallowCache(&thr->internal_alloc_cache);
260 } // extern "C"