* config/sh/sh.c (barrier_align): Return 0 when barrier_or_label
[official-gcc.git] / libsanitizer / tsan / tsan_mman.cc
blob832374becf5845b3504c03658674d61d148771f6
1 //===-- tsan_mman.cc ------------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
11 #include "sanitizer_common/sanitizer_common.h"
12 #include "sanitizer_common/sanitizer_placement_new.h"
13 #include "tsan_mman.h"
14 #include "tsan_rtl.h"
15 #include "tsan_report.h"
16 #include "tsan_flags.h"
18 // May be overriden by front-end.
19 extern "C" void WEAK __tsan_malloc_hook(void *ptr, uptr size) {
20 (void)ptr;
21 (void)size;
24 extern "C" void WEAK __tsan_free_hook(void *ptr) {
25 (void)ptr;
28 namespace __tsan {
30 COMPILER_CHECK(sizeof(MBlock) == 16);
32 void MBlock::Lock() {
33 atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
34 uptr v = atomic_load(a, memory_order_relaxed);
35 for (int iter = 0;; iter++) {
36 if (v & 1) {
37 if (iter < 10)
38 proc_yield(20);
39 else
40 internal_sched_yield();
41 v = atomic_load(a, memory_order_relaxed);
42 continue;
44 if (atomic_compare_exchange_weak(a, &v, v | 1, memory_order_acquire))
45 break;
49 void MBlock::Unlock() {
50 atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
51 uptr v = atomic_load(a, memory_order_relaxed);
52 DCHECK(v & 1);
53 atomic_store(a, v & ~1, memory_order_relaxed);
56 struct MapUnmapCallback {
57 void OnMap(uptr p, uptr size) const { }
58 void OnUnmap(uptr p, uptr size) const {
59 // We are about to unmap a chunk of user memory.
60 // Mark the corresponding shadow memory as not needed.
61 DontNeedShadowFor(p, size);
65 static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
66 Allocator *allocator() {
67 return reinterpret_cast<Allocator*>(&allocator_placeholder);
70 void InitializeAllocator() {
71 allocator()->Init();
74 void AllocatorThreadStart(ThreadState *thr) {
75 allocator()->InitCache(&thr->alloc_cache);
76 internal_allocator()->InitCache(&thr->internal_alloc_cache);
79 void AllocatorThreadFinish(ThreadState *thr) {
80 allocator()->DestroyCache(&thr->alloc_cache);
81 internal_allocator()->DestroyCache(&thr->internal_alloc_cache);
84 void AllocatorPrintStats() {
85 allocator()->PrintStats();
88 static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
89 if (!thr->in_signal_handler || !flags()->report_signal_unsafe)
90 return;
91 Context *ctx = CTX();
92 StackTrace stack;
93 stack.ObtainCurrent(thr, pc);
94 ThreadRegistryLock l(ctx->thread_registry);
95 ScopedReport rep(ReportTypeSignalUnsafe);
96 if (!IsFiredSuppression(ctx, rep, stack)) {
97 rep.AddStack(&stack);
98 OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
102 void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) {
103 CHECK_GT(thr->in_rtl, 0);
104 if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
105 return AllocatorReturnNull();
106 void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
107 if (p == 0)
108 return 0;
109 MBlock *b = new(allocator()->GetMetaData(p)) MBlock;
110 b->Init(sz, thr->tid, CurrentStackId(thr, pc));
111 if (CTX() && CTX()->initialized) {
112 if (thr->ignore_reads_and_writes == 0)
113 MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
114 else
115 MemoryResetRange(thr, pc, (uptr)p, sz);
117 DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
118 SignalUnsafeCall(thr, pc);
119 return p;
122 void user_free(ThreadState *thr, uptr pc, void *p) {
123 CHECK_GT(thr->in_rtl, 0);
124 CHECK_NE(p, (void*)0);
125 DPrintf("#%d: free(%p)\n", thr->tid, p);
126 MBlock *b = (MBlock*)allocator()->GetMetaData(p);
127 if (b->ListHead()) {
128 MBlock::ScopedLock l(b);
129 for (SyncVar *s = b->ListHead(); s;) {
130 SyncVar *res = s;
131 s = s->next;
132 StatInc(thr, StatSyncDestroyed);
133 res->mtx.Lock();
134 res->mtx.Unlock();
135 DestroyAndFree(res);
137 b->ListReset();
139 if (CTX() && CTX()->initialized && thr->in_rtl == 1) {
140 if (thr->ignore_reads_and_writes == 0)
141 MemoryRangeFreed(thr, pc, (uptr)p, b->Size());
143 allocator()->Deallocate(&thr->alloc_cache, p);
144 SignalUnsafeCall(thr, pc);
147 void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
148 CHECK_GT(thr->in_rtl, 0);
149 void *p2 = 0;
150 // FIXME: Handle "shrinking" more efficiently,
151 // it seems that some software actually does this.
152 if (sz) {
153 p2 = user_alloc(thr, pc, sz);
154 if (p2 == 0)
155 return 0;
156 if (p) {
157 MBlock *b = user_mblock(thr, p);
158 CHECK_NE(b, 0);
159 internal_memcpy(p2, p, min(b->Size(), sz));
162 if (p)
163 user_free(thr, pc, p);
164 return p2;
167 uptr user_alloc_usable_size(ThreadState *thr, uptr pc, void *p) {
168 CHECK_GT(thr->in_rtl, 0);
169 if (p == 0)
170 return 0;
171 MBlock *b = (MBlock*)allocator()->GetMetaData(p);
172 return b ? b->Size() : 0;
175 MBlock *user_mblock(ThreadState *thr, void *p) {
176 CHECK_NE(p, 0);
177 Allocator *a = allocator();
178 void *b = a->GetBlockBegin(p);
179 if (b == 0)
180 return 0;
181 return (MBlock*)a->GetMetaData(b);
184 void invoke_malloc_hook(void *ptr, uptr size) {
185 Context *ctx = CTX();
186 ThreadState *thr = cur_thread();
187 if (ctx == 0 || !ctx->initialized || thr->in_rtl)
188 return;
189 __tsan_malloc_hook(ptr, size);
192 void invoke_free_hook(void *ptr) {
193 Context *ctx = CTX();
194 ThreadState *thr = cur_thread();
195 if (ctx == 0 || !ctx->initialized || thr->in_rtl)
196 return;
197 __tsan_free_hook(ptr);
200 void *internal_alloc(MBlockType typ, uptr sz) {
201 ThreadState *thr = cur_thread();
202 CHECK_GT(thr->in_rtl, 0);
203 CHECK_LE(sz, InternalSizeClassMap::kMaxSize);
204 if (thr->nomalloc) {
205 thr->nomalloc = 0; // CHECK calls internal_malloc().
206 CHECK(0);
208 return InternalAlloc(sz, &thr->internal_alloc_cache);
211 void internal_free(void *p) {
212 ThreadState *thr = cur_thread();
213 CHECK_GT(thr->in_rtl, 0);
214 if (thr->nomalloc) {
215 thr->nomalloc = 0; // CHECK calls internal_malloc().
216 CHECK(0);
218 InternalFree(p, &thr->internal_alloc_cache);
221 } // namespace __tsan
223 using namespace __tsan;
225 extern "C" {
226 uptr __tsan_get_current_allocated_bytes() {
227 u64 stats[AllocatorStatCount];
228 allocator()->GetStats(stats);
229 u64 m = stats[AllocatorStatMalloced];
230 u64 f = stats[AllocatorStatFreed];
231 return m >= f ? m - f : 1;
234 uptr __tsan_get_heap_size() {
235 u64 stats[AllocatorStatCount];
236 allocator()->GetStats(stats);
237 u64 m = stats[AllocatorStatMmapped];
238 u64 f = stats[AllocatorStatUnmapped];
239 return m >= f ? m - f : 1;
242 uptr __tsan_get_free_bytes() {
243 return 1;
246 uptr __tsan_get_unmapped_bytes() {
247 return 1;
250 uptr __tsan_get_estimated_allocated_size(uptr size) {
251 return size;
254 bool __tsan_get_ownership(void *p) {
255 return allocator()->GetBlockBegin(p) != 0;
258 uptr __tsan_get_allocated_size(void *p) {
259 if (p == 0)
260 return 0;
261 p = allocator()->GetBlockBegin(p);
262 if (p == 0)
263 return 0;
264 MBlock *b = (MBlock*)allocator()->GetMetaData(p);
265 return b->Size();
268 void __tsan_on_thread_idle() {
269 ThreadState *thr = cur_thread();
270 allocator()->SwallowCache(&thr->alloc_cache);
271 internal_allocator()->SwallowCache(&thr->internal_alloc_cache);
273 } // extern "C"