2016-10-21 Paul Thomas <pault@gcc.gnu.org>
[official-gcc.git] / libsanitizer / tsan / tsan_rtl_mutex.cc
blobdeb7722f521ed7dc71005de2e024ef3a7dd9a73d
1 //===-- tsan_rtl_mutex.cc -------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
12 #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
13 #include <sanitizer_common/sanitizer_stackdepot.h>
15 #include "tsan_rtl.h"
16 #include "tsan_flags.h"
17 #include "tsan_sync.h"
18 #include "tsan_report.h"
19 #include "tsan_symbolize.h"
20 #include "tsan_platform.h"
22 namespace __tsan {
24 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
26 struct Callback : DDCallback {
27 ThreadState *thr;
28 uptr pc;
30 Callback(ThreadState *thr, uptr pc)
31 : thr(thr)
32 , pc(pc) {
33 DDCallback::pt = thr->dd_pt;
34 DDCallback::lt = thr->dd_lt;
37 u32 Unwind() override { return CurrentStackId(thr, pc); }
38 int UniqueTid() override { return thr->unique_id; }
41 void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
42 Callback cb(thr, pc);
43 ctx->dd->MutexInit(&cb, &s->dd);
44 s->dd.ctx = s->GetId();
47 static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
48 uptr addr, u64 mid) {
49 // In Go, these misuses are either impossible, or detected by std lib,
50 // or false positives (e.g. unlock in a different thread).
51 if (kGoMode)
52 return;
53 ThreadRegistryLock l(ctx->thread_registry);
54 ScopedReport rep(typ);
55 rep.AddMutex(mid);
56 VarSizeStackTrace trace;
57 ObtainCurrentStack(thr, pc, &trace);
58 rep.AddStack(trace, true);
59 rep.AddLocation(addr, 1);
60 OutputReport(thr, rep);
63 void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
64 bool rw, bool recursive, bool linker_init) {
65 DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
66 StatInc(thr, StatMutexCreate);
67 if (!linker_init && IsAppMem(addr)) {
68 CHECK(!thr->is_freeing);
69 thr->is_freeing = true;
70 MemoryWrite(thr, pc, addr, kSizeLog1);
71 thr->is_freeing = false;
73 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
74 s->is_rw = rw;
75 s->is_recursive = recursive;
76 s->is_linker_init = linker_init;
77 if (kCppMode && s->creation_stack_id == 0)
78 s->creation_stack_id = CurrentStackId(thr, pc);
79 s->mtx.Unlock();
82 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
83 DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
84 StatInc(thr, StatMutexDestroy);
85 #ifndef SANITIZER_GO
86 // Global mutexes not marked as LINKER_INITIALIZED
87 // cause tons of not interesting reports, so just ignore it.
88 if (IsGlobalVar(addr))
89 return;
90 #endif
91 if (IsAppMem(addr)) {
92 CHECK(!thr->is_freeing);
93 thr->is_freeing = true;
94 MemoryWrite(thr, pc, addr, kSizeLog1);
95 thr->is_freeing = false;
97 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
98 if (s == 0)
99 return;
100 if (common_flags()->detect_deadlocks) {
101 Callback cb(thr, pc);
102 ctx->dd->MutexDestroy(&cb, &s->dd);
103 ctx->dd->MutexInit(&cb, &s->dd);
105 bool unlock_locked = false;
106 if (flags()->report_destroy_locked
107 && s->owner_tid != SyncVar::kInvalidTid
108 && !s->is_broken) {
109 s->is_broken = true;
110 unlock_locked = true;
112 u64 mid = s->GetId();
113 u32 last_lock = s->last_lock;
114 if (!unlock_locked)
115 s->Reset(thr); // must not reset it before the report is printed
116 s->mtx.Unlock();
117 if (unlock_locked) {
118 ThreadRegistryLock l(ctx->thread_registry);
119 ScopedReport rep(ReportTypeMutexDestroyLocked);
120 rep.AddMutex(mid);
121 VarSizeStackTrace trace;
122 ObtainCurrentStack(thr, pc, &trace);
123 rep.AddStack(trace);
124 FastState last(last_lock);
125 RestoreStack(last.tid(), last.epoch(), &trace, 0);
126 rep.AddStack(trace, true);
127 rep.AddLocation(addr, 1);
128 OutputReport(thr, rep);
130 if (unlock_locked) {
131 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
132 if (s != 0) {
133 s->Reset(thr);
134 s->mtx.Unlock();
137 thr->mset.Remove(mid);
138 // s will be destroyed and freed in MetaMap::FreeBlock.
141 void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) {
142 DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec);
143 CHECK_GT(rec, 0);
144 if (IsAppMem(addr))
145 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
146 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
147 thr->fast_state.IncrementEpoch();
148 TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
149 bool report_double_lock = false;
150 if (s->owner_tid == SyncVar::kInvalidTid) {
151 CHECK_EQ(s->recursion, 0);
152 s->owner_tid = thr->tid;
153 s->last_lock = thr->fast_state.raw();
154 } else if (s->owner_tid == thr->tid) {
155 CHECK_GT(s->recursion, 0);
156 } else if (flags()->report_mutex_bugs && !s->is_broken) {
157 s->is_broken = true;
158 report_double_lock = true;
160 if (s->recursion == 0) {
161 StatInc(thr, StatMutexLock);
162 AcquireImpl(thr, pc, &s->clock);
163 AcquireImpl(thr, pc, &s->read_clock);
164 } else if (!s->is_recursive) {
165 StatInc(thr, StatMutexRecLock);
167 s->recursion += rec;
168 thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
169 if (common_flags()->detect_deadlocks && (s->recursion - rec) == 0) {
170 Callback cb(thr, pc);
171 if (!try_lock)
172 ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
173 ctx->dd->MutexAfterLock(&cb, &s->dd, true, try_lock);
175 u64 mid = s->GetId();
176 s->mtx.Unlock();
177 // Can't touch s after this point.
178 if (report_double_lock)
179 ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
180 if (common_flags()->detect_deadlocks) {
181 Callback cb(thr, pc);
182 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
186 int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
187 DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all);
188 if (IsAppMem(addr))
189 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
190 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
191 thr->fast_state.IncrementEpoch();
192 TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
193 int rec = 0;
194 bool report_bad_unlock = false;
195 if (kCppMode && (s->recursion == 0 || s->owner_tid != thr->tid)) {
196 if (flags()->report_mutex_bugs && !s->is_broken) {
197 s->is_broken = true;
198 report_bad_unlock = true;
200 } else {
201 rec = all ? s->recursion : 1;
202 s->recursion -= rec;
203 if (s->recursion == 0) {
204 StatInc(thr, StatMutexUnlock);
205 s->owner_tid = SyncVar::kInvalidTid;
206 ReleaseStoreImpl(thr, pc, &s->clock);
207 } else {
208 StatInc(thr, StatMutexRecUnlock);
211 thr->mset.Del(s->GetId(), true);
212 if (common_flags()->detect_deadlocks && s->recursion == 0 &&
213 !report_bad_unlock) {
214 Callback cb(thr, pc);
215 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
217 u64 mid = s->GetId();
218 s->mtx.Unlock();
219 // Can't touch s after this point.
220 if (report_bad_unlock)
221 ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
222 if (common_flags()->detect_deadlocks && !report_bad_unlock) {
223 Callback cb(thr, pc);
224 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
226 return rec;
229 void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool trylock) {
230 DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr);
231 StatInc(thr, StatMutexReadLock);
232 if (IsAppMem(addr))
233 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
234 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
235 thr->fast_state.IncrementEpoch();
236 TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
237 bool report_bad_lock = false;
238 if (s->owner_tid != SyncVar::kInvalidTid) {
239 if (flags()->report_mutex_bugs && !s->is_broken) {
240 s->is_broken = true;
241 report_bad_lock = true;
244 AcquireImpl(thr, pc, &s->clock);
245 s->last_lock = thr->fast_state.raw();
246 thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
247 if (common_flags()->detect_deadlocks && s->recursion == 0) {
248 Callback cb(thr, pc);
249 if (!trylock)
250 ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
251 ctx->dd->MutexAfterLock(&cb, &s->dd, false, trylock);
253 u64 mid = s->GetId();
254 s->mtx.ReadUnlock();
255 // Can't touch s after this point.
256 if (report_bad_lock)
257 ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
258 if (common_flags()->detect_deadlocks) {
259 Callback cb(thr, pc);
260 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
264 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
265 DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
266 StatInc(thr, StatMutexReadUnlock);
267 if (IsAppMem(addr))
268 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
269 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
270 thr->fast_state.IncrementEpoch();
271 TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
272 bool report_bad_unlock = false;
273 if (s->owner_tid != SyncVar::kInvalidTid) {
274 if (flags()->report_mutex_bugs && !s->is_broken) {
275 s->is_broken = true;
276 report_bad_unlock = true;
279 ReleaseImpl(thr, pc, &s->read_clock);
280 if (common_flags()->detect_deadlocks && s->recursion == 0) {
281 Callback cb(thr, pc);
282 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
284 u64 mid = s->GetId();
285 s->mtx.Unlock();
286 // Can't touch s after this point.
287 thr->mset.Del(mid, false);
288 if (report_bad_unlock)
289 ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
290 if (common_flags()->detect_deadlocks) {
291 Callback cb(thr, pc);
292 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
296 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
297 DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
298 if (IsAppMem(addr))
299 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
300 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
301 bool write = true;
302 bool report_bad_unlock = false;
303 if (s->owner_tid == SyncVar::kInvalidTid) {
304 // Seems to be read unlock.
305 write = false;
306 StatInc(thr, StatMutexReadUnlock);
307 thr->fast_state.IncrementEpoch();
308 TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
309 ReleaseImpl(thr, pc, &s->read_clock);
310 } else if (s->owner_tid == thr->tid) {
311 // Seems to be write unlock.
312 thr->fast_state.IncrementEpoch();
313 TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
314 CHECK_GT(s->recursion, 0);
315 s->recursion--;
316 if (s->recursion == 0) {
317 StatInc(thr, StatMutexUnlock);
318 s->owner_tid = SyncVar::kInvalidTid;
319 ReleaseImpl(thr, pc, &s->clock);
320 } else {
321 StatInc(thr, StatMutexRecUnlock);
323 } else if (!s->is_broken) {
324 s->is_broken = true;
325 report_bad_unlock = true;
327 thr->mset.Del(s->GetId(), write);
328 if (common_flags()->detect_deadlocks && s->recursion == 0) {
329 Callback cb(thr, pc);
330 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
332 u64 mid = s->GetId();
333 s->mtx.Unlock();
334 // Can't touch s after this point.
335 if (report_bad_unlock)
336 ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
337 if (common_flags()->detect_deadlocks) {
338 Callback cb(thr, pc);
339 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
343 void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
344 DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
345 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
346 s->owner_tid = SyncVar::kInvalidTid;
347 s->recursion = 0;
348 s->mtx.Unlock();
351 void Acquire(ThreadState *thr, uptr pc, uptr addr) {
352 DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
353 if (thr->ignore_sync)
354 return;
355 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
356 AcquireImpl(thr, pc, &s->clock);
357 s->mtx.ReadUnlock();
360 static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
361 ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
362 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
363 if (tctx->status == ThreadStatusRunning)
364 thr->clock.set(tctx->tid, tctx->thr->fast_state.epoch());
365 else
366 thr->clock.set(tctx->tid, tctx->epoch1);
369 void AcquireGlobal(ThreadState *thr, uptr pc) {
370 DPrintf("#%d: AcquireGlobal\n", thr->tid);
371 if (thr->ignore_sync)
372 return;
373 ThreadRegistryLock l(ctx->thread_registry);
374 ctx->thread_registry->RunCallbackForEachThreadLocked(
375 UpdateClockCallback, thr);
378 void Release(ThreadState *thr, uptr pc, uptr addr) {
379 DPrintf("#%d: Release %zx\n", thr->tid, addr);
380 if (thr->ignore_sync)
381 return;
382 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
383 thr->fast_state.IncrementEpoch();
384 // Can't increment epoch w/o writing to the trace as well.
385 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
386 ReleaseImpl(thr, pc, &s->clock);
387 s->mtx.Unlock();
390 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
391 DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
392 if (thr->ignore_sync)
393 return;
394 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
395 thr->fast_state.IncrementEpoch();
396 // Can't increment epoch w/o writing to the trace as well.
397 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
398 ReleaseStoreImpl(thr, pc, &s->clock);
399 s->mtx.Unlock();
402 #ifndef SANITIZER_GO
403 static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
404 ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
405 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
406 if (tctx->status == ThreadStatusRunning)
407 thr->last_sleep_clock.set(tctx->tid, tctx->thr->fast_state.epoch());
408 else
409 thr->last_sleep_clock.set(tctx->tid, tctx->epoch1);
412 void AfterSleep(ThreadState *thr, uptr pc) {
413 DPrintf("#%d: AfterSleep %zx\n", thr->tid);
414 if (thr->ignore_sync)
415 return;
416 thr->last_sleep_stack_id = CurrentStackId(thr, pc);
417 ThreadRegistryLock l(ctx->thread_registry);
418 ctx->thread_registry->RunCallbackForEachThreadLocked(
419 UpdateSleepClockCallback, thr);
421 #endif
423 void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
424 if (thr->ignore_sync)
425 return;
426 thr->clock.set(thr->fast_state.epoch());
427 thr->clock.acquire(&thr->clock_cache, c);
428 StatInc(thr, StatSyncAcquire);
431 void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
432 if (thr->ignore_sync)
433 return;
434 thr->clock.set(thr->fast_state.epoch());
435 thr->fast_synch_epoch = thr->fast_state.epoch();
436 thr->clock.release(&thr->clock_cache, c);
437 StatInc(thr, StatSyncRelease);
440 void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
441 if (thr->ignore_sync)
442 return;
443 thr->clock.set(thr->fast_state.epoch());
444 thr->fast_synch_epoch = thr->fast_state.epoch();
445 thr->clock.ReleaseStore(&thr->clock_cache, c);
446 StatInc(thr, StatSyncRelease);
449 void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
450 if (thr->ignore_sync)
451 return;
452 thr->clock.set(thr->fast_state.epoch());
453 thr->fast_synch_epoch = thr->fast_state.epoch();
454 thr->clock.acq_rel(&thr->clock_cache, c);
455 StatInc(thr, StatSyncAcquire);
456 StatInc(thr, StatSyncRelease);
459 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
460 if (r == 0)
461 return;
462 ThreadRegistryLock l(ctx->thread_registry);
463 ScopedReport rep(ReportTypeDeadlock);
464 for (int i = 0; i < r->n; i++) {
465 rep.AddMutex(r->loop[i].mtx_ctx0);
466 rep.AddUniqueTid((int)r->loop[i].thr_ctx);
467 rep.AddThread((int)r->loop[i].thr_ctx);
469 uptr dummy_pc = 0x42;
470 for (int i = 0; i < r->n; i++) {
471 for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
472 u32 stk = r->loop[i].stk[j];
473 if (stk && stk != 0xffffffff) {
474 rep.AddStack(StackDepotGet(stk), true);
475 } else {
476 // Sometimes we fail to extract the stack trace (FIXME: investigate),
477 // but we should still produce some stack trace in the report.
478 rep.AddStack(StackTrace(&dummy_pc, 1), true);
482 OutputReport(thr, rep);
485 } // namespace __tsan