2017-01-22 Matthias Klose <doko@ubuntu.com>
[official-gcc.git] / libsanitizer / tsan / tsan_rtl_mutex.cc
blobe575bbfb7e9e8089810bad7d1bc5c1f983dfc810
1 //===-- tsan_rtl_mutex.cc -------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
12 #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
13 #include <sanitizer_common/sanitizer_stackdepot.h>
15 #include "tsan_rtl.h"
16 #include "tsan_flags.h"
17 #include "tsan_sync.h"
18 #include "tsan_report.h"
19 #include "tsan_symbolize.h"
20 #include "tsan_platform.h"
22 namespace __tsan {
24 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
26 struct Callback : DDCallback {
27 ThreadState *thr;
28 uptr pc;
30 Callback(ThreadState *thr, uptr pc)
31 : thr(thr)
32 , pc(pc) {
33 DDCallback::pt = thr->proc()->dd_pt;
34 DDCallback::lt = thr->dd_lt;
37 u32 Unwind() override { return CurrentStackId(thr, pc); }
38 int UniqueTid() override { return thr->unique_id; }
41 void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
42 Callback cb(thr, pc);
43 ctx->dd->MutexInit(&cb, &s->dd);
44 s->dd.ctx = s->GetId();
47 static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
48 uptr addr, u64 mid) {
49 // In Go, these misuses are either impossible, or detected by std lib,
50 // or false positives (e.g. unlock in a different thread).
51 if (SANITIZER_GO)
52 return;
53 ThreadRegistryLock l(ctx->thread_registry);
54 ScopedReport rep(typ);
55 rep.AddMutex(mid);
56 VarSizeStackTrace trace;
57 ObtainCurrentStack(thr, pc, &trace);
58 rep.AddStack(trace, true);
59 rep.AddLocation(addr, 1);
60 OutputReport(thr, rep);
63 void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
64 bool rw, bool recursive, bool linker_init) {
65 DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
66 StatInc(thr, StatMutexCreate);
67 if (!linker_init && IsAppMem(addr)) {
68 CHECK(!thr->is_freeing);
69 thr->is_freeing = true;
70 MemoryWrite(thr, pc, addr, kSizeLog1);
71 thr->is_freeing = false;
73 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
74 s->is_rw = rw;
75 s->is_recursive = recursive;
76 s->is_linker_init = linker_init;
77 if (!SANITIZER_GO && s->creation_stack_id == 0)
78 s->creation_stack_id = CurrentStackId(thr, pc);
79 s->mtx.Unlock();
82 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
83 DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
84 StatInc(thr, StatMutexDestroy);
85 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
86 if (s == 0)
87 return;
88 if (s->is_linker_init) {
89 // Destroy is no-op for linker-initialized mutexes.
90 s->mtx.Unlock();
91 return;
93 if (common_flags()->detect_deadlocks) {
94 Callback cb(thr, pc);
95 ctx->dd->MutexDestroy(&cb, &s->dd);
96 ctx->dd->MutexInit(&cb, &s->dd);
98 bool unlock_locked = false;
99 if (flags()->report_destroy_locked
100 && s->owner_tid != SyncVar::kInvalidTid
101 && !s->is_broken) {
102 s->is_broken = true;
103 unlock_locked = true;
105 u64 mid = s->GetId();
106 u32 last_lock = s->last_lock;
107 if (!unlock_locked)
108 s->Reset(thr->proc()); // must not reset it before the report is printed
109 s->mtx.Unlock();
110 if (unlock_locked) {
111 ThreadRegistryLock l(ctx->thread_registry);
112 ScopedReport rep(ReportTypeMutexDestroyLocked);
113 rep.AddMutex(mid);
114 VarSizeStackTrace trace;
115 ObtainCurrentStack(thr, pc, &trace);
116 rep.AddStack(trace);
117 FastState last(last_lock);
118 RestoreStack(last.tid(), last.epoch(), &trace, 0);
119 rep.AddStack(trace, true);
120 rep.AddLocation(addr, 1);
121 OutputReport(thr, rep);
123 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
124 if (s != 0) {
125 s->Reset(thr->proc());
126 s->mtx.Unlock();
129 thr->mset.Remove(mid);
130 // Imitate a memory write to catch unlock-destroy races.
131 // Do this outside of sync mutex, because it can report a race which locks
132 // sync mutexes.
133 if (IsAppMem(addr)) {
134 CHECK(!thr->is_freeing);
135 thr->is_freeing = true;
136 MemoryWrite(thr, pc, addr, kSizeLog1);
137 thr->is_freeing = false;
139 // s will be destroyed and freed in MetaMap::FreeBlock.
142 void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) {
143 DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec);
144 CHECK_GT(rec, 0);
145 if (IsAppMem(addr))
146 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
147 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
148 thr->fast_state.IncrementEpoch();
149 TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
150 bool report_double_lock = false;
151 if (s->owner_tid == SyncVar::kInvalidTid) {
152 CHECK_EQ(s->recursion, 0);
153 s->owner_tid = thr->tid;
154 s->last_lock = thr->fast_state.raw();
155 } else if (s->owner_tid == thr->tid) {
156 CHECK_GT(s->recursion, 0);
157 } else if (flags()->report_mutex_bugs && !s->is_broken) {
158 s->is_broken = true;
159 report_double_lock = true;
161 if (s->recursion == 0) {
162 StatInc(thr, StatMutexLock);
163 AcquireImpl(thr, pc, &s->clock);
164 AcquireImpl(thr, pc, &s->read_clock);
165 } else if (!s->is_recursive) {
166 StatInc(thr, StatMutexRecLock);
168 s->recursion += rec;
169 thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
170 if (common_flags()->detect_deadlocks && (s->recursion - rec) == 0) {
171 Callback cb(thr, pc);
172 if (!try_lock)
173 ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
174 ctx->dd->MutexAfterLock(&cb, &s->dd, true, try_lock);
176 u64 mid = s->GetId();
177 s->mtx.Unlock();
178 // Can't touch s after this point.
179 if (report_double_lock)
180 ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
181 if (common_flags()->detect_deadlocks) {
182 Callback cb(thr, pc);
183 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
187 int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
188 DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all);
189 if (IsAppMem(addr))
190 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
191 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
192 thr->fast_state.IncrementEpoch();
193 TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
194 int rec = 0;
195 bool report_bad_unlock = false;
196 if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
197 if (flags()->report_mutex_bugs && !s->is_broken) {
198 s->is_broken = true;
199 report_bad_unlock = true;
201 } else {
202 rec = all ? s->recursion : 1;
203 s->recursion -= rec;
204 if (s->recursion == 0) {
205 StatInc(thr, StatMutexUnlock);
206 s->owner_tid = SyncVar::kInvalidTid;
207 ReleaseStoreImpl(thr, pc, &s->clock);
208 } else {
209 StatInc(thr, StatMutexRecUnlock);
212 thr->mset.Del(s->GetId(), true);
213 if (common_flags()->detect_deadlocks && s->recursion == 0 &&
214 !report_bad_unlock) {
215 Callback cb(thr, pc);
216 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
218 u64 mid = s->GetId();
219 s->mtx.Unlock();
220 // Can't touch s after this point.
221 if (report_bad_unlock)
222 ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
223 if (common_flags()->detect_deadlocks && !report_bad_unlock) {
224 Callback cb(thr, pc);
225 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
227 return rec;
230 void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool trylock) {
231 DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr);
232 StatInc(thr, StatMutexReadLock);
233 if (IsAppMem(addr))
234 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
235 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
236 thr->fast_state.IncrementEpoch();
237 TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
238 bool report_bad_lock = false;
239 if (s->owner_tid != SyncVar::kInvalidTid) {
240 if (flags()->report_mutex_bugs && !s->is_broken) {
241 s->is_broken = true;
242 report_bad_lock = true;
245 AcquireImpl(thr, pc, &s->clock);
246 s->last_lock = thr->fast_state.raw();
247 thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
248 if (common_flags()->detect_deadlocks && s->recursion == 0) {
249 Callback cb(thr, pc);
250 if (!trylock)
251 ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
252 ctx->dd->MutexAfterLock(&cb, &s->dd, false, trylock);
254 u64 mid = s->GetId();
255 s->mtx.ReadUnlock();
256 // Can't touch s after this point.
257 if (report_bad_lock)
258 ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
259 if (common_flags()->detect_deadlocks) {
260 Callback cb(thr, pc);
261 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
265 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
266 DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
267 StatInc(thr, StatMutexReadUnlock);
268 if (IsAppMem(addr))
269 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
270 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
271 thr->fast_state.IncrementEpoch();
272 TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
273 bool report_bad_unlock = false;
274 if (s->owner_tid != SyncVar::kInvalidTid) {
275 if (flags()->report_mutex_bugs && !s->is_broken) {
276 s->is_broken = true;
277 report_bad_unlock = true;
280 ReleaseImpl(thr, pc, &s->read_clock);
281 if (common_flags()->detect_deadlocks && s->recursion == 0) {
282 Callback cb(thr, pc);
283 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
285 u64 mid = s->GetId();
286 s->mtx.Unlock();
287 // Can't touch s after this point.
288 thr->mset.Del(mid, false);
289 if (report_bad_unlock)
290 ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
291 if (common_flags()->detect_deadlocks) {
292 Callback cb(thr, pc);
293 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
297 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
298 DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
299 if (IsAppMem(addr))
300 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
301 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
302 bool write = true;
303 bool report_bad_unlock = false;
304 if (s->owner_tid == SyncVar::kInvalidTid) {
305 // Seems to be read unlock.
306 write = false;
307 StatInc(thr, StatMutexReadUnlock);
308 thr->fast_state.IncrementEpoch();
309 TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
310 ReleaseImpl(thr, pc, &s->read_clock);
311 } else if (s->owner_tid == thr->tid) {
312 // Seems to be write unlock.
313 thr->fast_state.IncrementEpoch();
314 TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
315 CHECK_GT(s->recursion, 0);
316 s->recursion--;
317 if (s->recursion == 0) {
318 StatInc(thr, StatMutexUnlock);
319 s->owner_tid = SyncVar::kInvalidTid;
320 ReleaseImpl(thr, pc, &s->clock);
321 } else {
322 StatInc(thr, StatMutexRecUnlock);
324 } else if (!s->is_broken) {
325 s->is_broken = true;
326 report_bad_unlock = true;
328 thr->mset.Del(s->GetId(), write);
329 if (common_flags()->detect_deadlocks && s->recursion == 0) {
330 Callback cb(thr, pc);
331 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
333 u64 mid = s->GetId();
334 s->mtx.Unlock();
335 // Can't touch s after this point.
336 if (report_bad_unlock)
337 ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
338 if (common_flags()->detect_deadlocks) {
339 Callback cb(thr, pc);
340 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
344 void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
345 DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
346 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
347 s->owner_tid = SyncVar::kInvalidTid;
348 s->recursion = 0;
349 s->mtx.Unlock();
352 void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) {
353 DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr);
354 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
355 u64 mid = s->GetId();
356 s->mtx.Unlock();
357 ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, mid);
360 void Acquire(ThreadState *thr, uptr pc, uptr addr) {
361 DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
362 if (thr->ignore_sync)
363 return;
364 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, false);
365 if (!s)
366 return;
367 AcquireImpl(thr, pc, &s->clock);
368 s->mtx.ReadUnlock();
371 static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
372 ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
373 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
374 if (tctx->status == ThreadStatusRunning)
375 thr->clock.set(tctx->tid, tctx->thr->fast_state.epoch());
376 else
377 thr->clock.set(tctx->tid, tctx->epoch1);
380 void AcquireGlobal(ThreadState *thr, uptr pc) {
381 DPrintf("#%d: AcquireGlobal\n", thr->tid);
382 if (thr->ignore_sync)
383 return;
384 ThreadRegistryLock l(ctx->thread_registry);
385 ctx->thread_registry->RunCallbackForEachThreadLocked(
386 UpdateClockCallback, thr);
389 void Release(ThreadState *thr, uptr pc, uptr addr) {
390 DPrintf("#%d: Release %zx\n", thr->tid, addr);
391 if (thr->ignore_sync)
392 return;
393 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
394 thr->fast_state.IncrementEpoch();
395 // Can't increment epoch w/o writing to the trace as well.
396 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
397 ReleaseImpl(thr, pc, &s->clock);
398 s->mtx.Unlock();
401 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
402 DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
403 if (thr->ignore_sync)
404 return;
405 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
406 thr->fast_state.IncrementEpoch();
407 // Can't increment epoch w/o writing to the trace as well.
408 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
409 ReleaseStoreImpl(thr, pc, &s->clock);
410 s->mtx.Unlock();
413 #if !SANITIZER_GO
414 static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
415 ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
416 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
417 if (tctx->status == ThreadStatusRunning)
418 thr->last_sleep_clock.set(tctx->tid, tctx->thr->fast_state.epoch());
419 else
420 thr->last_sleep_clock.set(tctx->tid, tctx->epoch1);
423 void AfterSleep(ThreadState *thr, uptr pc) {
424 DPrintf("#%d: AfterSleep %zx\n", thr->tid);
425 if (thr->ignore_sync)
426 return;
427 thr->last_sleep_stack_id = CurrentStackId(thr, pc);
428 ThreadRegistryLock l(ctx->thread_registry);
429 ctx->thread_registry->RunCallbackForEachThreadLocked(
430 UpdateSleepClockCallback, thr);
432 #endif
434 void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
435 if (thr->ignore_sync)
436 return;
437 thr->clock.set(thr->fast_state.epoch());
438 thr->clock.acquire(&thr->proc()->clock_cache, c);
439 StatInc(thr, StatSyncAcquire);
442 void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
443 if (thr->ignore_sync)
444 return;
445 thr->clock.set(thr->fast_state.epoch());
446 thr->fast_synch_epoch = thr->fast_state.epoch();
447 thr->clock.release(&thr->proc()->clock_cache, c);
448 StatInc(thr, StatSyncRelease);
451 void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
452 if (thr->ignore_sync)
453 return;
454 thr->clock.set(thr->fast_state.epoch());
455 thr->fast_synch_epoch = thr->fast_state.epoch();
456 thr->clock.ReleaseStore(&thr->proc()->clock_cache, c);
457 StatInc(thr, StatSyncRelease);
460 void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
461 if (thr->ignore_sync)
462 return;
463 thr->clock.set(thr->fast_state.epoch());
464 thr->fast_synch_epoch = thr->fast_state.epoch();
465 thr->clock.acq_rel(&thr->proc()->clock_cache, c);
466 StatInc(thr, StatSyncAcquire);
467 StatInc(thr, StatSyncRelease);
470 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
471 if (r == 0)
472 return;
473 ThreadRegistryLock l(ctx->thread_registry);
474 ScopedReport rep(ReportTypeDeadlock);
475 for (int i = 0; i < r->n; i++) {
476 rep.AddMutex(r->loop[i].mtx_ctx0);
477 rep.AddUniqueTid((int)r->loop[i].thr_ctx);
478 rep.AddThread((int)r->loop[i].thr_ctx);
480 uptr dummy_pc = 0x42;
481 for (int i = 0; i < r->n; i++) {
482 for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
483 u32 stk = r->loop[i].stk[j];
484 if (stk && stk != 0xffffffff) {
485 rep.AddStack(StackDepotGet(stk), true);
486 } else {
487 // Sometimes we fail to extract the stack trace (FIXME: investigate),
488 // but we should still produce some stack trace in the report.
489 rep.AddStack(StackTrace(&dummy_pc, 1), true);
493 OutputReport(thr, rep);
496 } // namespace __tsan