PR target/37072
[official-gcc.git] / libsanitizer / tsan / tsan_rtl_mutex.cc
blobd731b4bfb594770c6bf2608e71a39194d3c1df21
1 //===-- tsan_rtl_mutex.cc -------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
12 #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
13 #include <sanitizer_common/sanitizer_stackdepot.h>
15 #include "tsan_rtl.h"
16 #include "tsan_flags.h"
17 #include "tsan_sync.h"
18 #include "tsan_report.h"
19 #include "tsan_symbolize.h"
20 #include "tsan_platform.h"
22 namespace __tsan {
24 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
26 struct Callback : DDCallback {
27 ThreadState *thr;
28 uptr pc;
30 Callback(ThreadState *thr, uptr pc)
31 : thr(thr)
32 , pc(pc) {
33 DDCallback::pt = thr->dd_pt;
34 DDCallback::lt = thr->dd_lt;
37 virtual u32 Unwind() {
38 return CurrentStackId(thr, pc);
40 virtual int UniqueTid() {
41 return thr->unique_id;
45 void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
46 Callback cb(thr, pc);
47 ctx->dd->MutexInit(&cb, &s->dd);
48 s->dd.ctx = s->GetId();
51 static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
52 uptr addr, u64 mid) {
53 // In Go, these misuses are either impossible, or detected by std lib,
54 // or false positives (e.g. unlock in a different thread).
55 if (kGoMode)
56 return;
57 ThreadRegistryLock l(ctx->thread_registry);
58 ScopedReport rep(typ);
59 rep.AddMutex(mid);
60 VarSizeStackTrace trace;
61 ObtainCurrentStack(thr, pc, &trace);
62 rep.AddStack(trace, true);
63 rep.AddLocation(addr, 1);
64 OutputReport(thr, rep);
67 void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
68 bool rw, bool recursive, bool linker_init) {
69 DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
70 StatInc(thr, StatMutexCreate);
71 if (!linker_init && IsAppMem(addr)) {
72 CHECK(!thr->is_freeing);
73 thr->is_freeing = true;
74 MemoryWrite(thr, pc, addr, kSizeLog1);
75 thr->is_freeing = false;
77 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
78 s->is_rw = rw;
79 s->is_recursive = recursive;
80 s->is_linker_init = linker_init;
81 if (kCppMode && s->creation_stack_id == 0)
82 s->creation_stack_id = CurrentStackId(thr, pc);
83 s->mtx.Unlock();
86 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
87 DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
88 StatInc(thr, StatMutexDestroy);
89 #ifndef TSAN_GO
90 // Global mutexes not marked as LINKER_INITIALIZED
91 // cause tons of not interesting reports, so just ignore it.
92 if (IsGlobalVar(addr))
93 return;
94 #endif
95 if (IsAppMem(addr)) {
96 CHECK(!thr->is_freeing);
97 thr->is_freeing = true;
98 MemoryWrite(thr, pc, addr, kSizeLog1);
99 thr->is_freeing = false;
101 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
102 if (s == 0)
103 return;
104 if (common_flags()->detect_deadlocks) {
105 Callback cb(thr, pc);
106 ctx->dd->MutexDestroy(&cb, &s->dd);
107 ctx->dd->MutexInit(&cb, &s->dd);
109 bool unlock_locked = false;
110 if (flags()->report_destroy_locked
111 && s->owner_tid != SyncVar::kInvalidTid
112 && !s->is_broken) {
113 s->is_broken = true;
114 unlock_locked = true;
116 u64 mid = s->GetId();
117 u32 last_lock = s->last_lock;
118 if (!unlock_locked)
119 s->Reset(thr); // must not reset it before the report is printed
120 s->mtx.Unlock();
121 if (unlock_locked) {
122 ThreadRegistryLock l(ctx->thread_registry);
123 ScopedReport rep(ReportTypeMutexDestroyLocked);
124 rep.AddMutex(mid);
125 VarSizeStackTrace trace;
126 ObtainCurrentStack(thr, pc, &trace);
127 rep.AddStack(trace);
128 FastState last(last_lock);
129 RestoreStack(last.tid(), last.epoch(), &trace, 0);
130 rep.AddStack(trace, true);
131 rep.AddLocation(addr, 1);
132 OutputReport(thr, rep);
134 if (unlock_locked) {
135 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
136 if (s != 0) {
137 s->Reset(thr);
138 s->mtx.Unlock();
141 thr->mset.Remove(mid);
142 // s will be destroyed and freed in MetaMap::FreeBlock.
145 void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) {
146 DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec);
147 CHECK_GT(rec, 0);
148 if (IsAppMem(addr))
149 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
150 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
151 thr->fast_state.IncrementEpoch();
152 TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
153 bool report_double_lock = false;
154 if (s->owner_tid == SyncVar::kInvalidTid) {
155 CHECK_EQ(s->recursion, 0);
156 s->owner_tid = thr->tid;
157 s->last_lock = thr->fast_state.raw();
158 } else if (s->owner_tid == thr->tid) {
159 CHECK_GT(s->recursion, 0);
160 } else if (flags()->report_mutex_bugs && !s->is_broken) {
161 s->is_broken = true;
162 report_double_lock = true;
164 if (s->recursion == 0) {
165 StatInc(thr, StatMutexLock);
166 AcquireImpl(thr, pc, &s->clock);
167 AcquireImpl(thr, pc, &s->read_clock);
168 } else if (!s->is_recursive) {
169 StatInc(thr, StatMutexRecLock);
171 s->recursion += rec;
172 thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
173 if (common_flags()->detect_deadlocks && (s->recursion - rec) == 0) {
174 Callback cb(thr, pc);
175 if (!try_lock)
176 ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
177 ctx->dd->MutexAfterLock(&cb, &s->dd, true, try_lock);
179 u64 mid = s->GetId();
180 s->mtx.Unlock();
181 // Can't touch s after this point.
182 if (report_double_lock)
183 ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
184 if (common_flags()->detect_deadlocks) {
185 Callback cb(thr, pc);
186 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
190 int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
191 DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all);
192 if (IsAppMem(addr))
193 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
194 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
195 thr->fast_state.IncrementEpoch();
196 TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
197 int rec = 0;
198 bool report_bad_unlock = false;
199 if (kCppMode && (s->recursion == 0 || s->owner_tid != thr->tid)) {
200 if (flags()->report_mutex_bugs && !s->is_broken) {
201 s->is_broken = true;
202 report_bad_unlock = true;
204 } else {
205 rec = all ? s->recursion : 1;
206 s->recursion -= rec;
207 if (s->recursion == 0) {
208 StatInc(thr, StatMutexUnlock);
209 s->owner_tid = SyncVar::kInvalidTid;
210 ReleaseStoreImpl(thr, pc, &s->clock);
211 } else {
212 StatInc(thr, StatMutexRecUnlock);
215 thr->mset.Del(s->GetId(), true);
216 if (common_flags()->detect_deadlocks && s->recursion == 0 &&
217 !report_bad_unlock) {
218 Callback cb(thr, pc);
219 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
221 u64 mid = s->GetId();
222 s->mtx.Unlock();
223 // Can't touch s after this point.
224 if (report_bad_unlock)
225 ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
226 if (common_flags()->detect_deadlocks && !report_bad_unlock) {
227 Callback cb(thr, pc);
228 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
230 return rec;
233 void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool trylock) {
234 DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr);
235 StatInc(thr, StatMutexReadLock);
236 if (IsAppMem(addr))
237 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
238 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
239 thr->fast_state.IncrementEpoch();
240 TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
241 bool report_bad_lock = false;
242 if (s->owner_tid != SyncVar::kInvalidTid) {
243 if (flags()->report_mutex_bugs && !s->is_broken) {
244 s->is_broken = true;
245 report_bad_lock = true;
248 AcquireImpl(thr, pc, &s->clock);
249 s->last_lock = thr->fast_state.raw();
250 thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
251 if (common_flags()->detect_deadlocks && s->recursion == 0) {
252 Callback cb(thr, pc);
253 if (!trylock)
254 ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
255 ctx->dd->MutexAfterLock(&cb, &s->dd, false, trylock);
257 u64 mid = s->GetId();
258 s->mtx.ReadUnlock();
259 // Can't touch s after this point.
260 if (report_bad_lock)
261 ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
262 if (common_flags()->detect_deadlocks) {
263 Callback cb(thr, pc);
264 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
268 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
269 DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
270 StatInc(thr, StatMutexReadUnlock);
271 if (IsAppMem(addr))
272 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
273 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
274 thr->fast_state.IncrementEpoch();
275 TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
276 bool report_bad_unlock = false;
277 if (s->owner_tid != SyncVar::kInvalidTid) {
278 if (flags()->report_mutex_bugs && !s->is_broken) {
279 s->is_broken = true;
280 report_bad_unlock = true;
283 ReleaseImpl(thr, pc, &s->read_clock);
284 if (common_flags()->detect_deadlocks && s->recursion == 0) {
285 Callback cb(thr, pc);
286 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
288 u64 mid = s->GetId();
289 s->mtx.Unlock();
290 // Can't touch s after this point.
291 thr->mset.Del(mid, false);
292 if (report_bad_unlock)
293 ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
294 if (common_flags()->detect_deadlocks) {
295 Callback cb(thr, pc);
296 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
300 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
301 DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
302 if (IsAppMem(addr))
303 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
304 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
305 bool write = true;
306 bool report_bad_unlock = false;
307 if (s->owner_tid == SyncVar::kInvalidTid) {
308 // Seems to be read unlock.
309 write = false;
310 StatInc(thr, StatMutexReadUnlock);
311 thr->fast_state.IncrementEpoch();
312 TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
313 ReleaseImpl(thr, pc, &s->read_clock);
314 } else if (s->owner_tid == thr->tid) {
315 // Seems to be write unlock.
316 thr->fast_state.IncrementEpoch();
317 TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
318 CHECK_GT(s->recursion, 0);
319 s->recursion--;
320 if (s->recursion == 0) {
321 StatInc(thr, StatMutexUnlock);
322 s->owner_tid = SyncVar::kInvalidTid;
323 ReleaseImpl(thr, pc, &s->clock);
324 } else {
325 StatInc(thr, StatMutexRecUnlock);
327 } else if (!s->is_broken) {
328 s->is_broken = true;
329 report_bad_unlock = true;
331 thr->mset.Del(s->GetId(), write);
332 if (common_flags()->detect_deadlocks && s->recursion == 0) {
333 Callback cb(thr, pc);
334 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
336 u64 mid = s->GetId();
337 s->mtx.Unlock();
338 // Can't touch s after this point.
339 if (report_bad_unlock)
340 ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
341 if (common_flags()->detect_deadlocks) {
342 Callback cb(thr, pc);
343 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
347 void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
348 DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
349 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
350 s->owner_tid = SyncVar::kInvalidTid;
351 s->recursion = 0;
352 s->mtx.Unlock();
355 void Acquire(ThreadState *thr, uptr pc, uptr addr) {
356 DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
357 if (thr->ignore_sync)
358 return;
359 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
360 AcquireImpl(thr, pc, &s->clock);
361 s->mtx.ReadUnlock();
364 static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
365 ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
366 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
367 if (tctx->status == ThreadStatusRunning)
368 thr->clock.set(tctx->tid, tctx->thr->fast_state.epoch());
369 else
370 thr->clock.set(tctx->tid, tctx->epoch1);
373 void AcquireGlobal(ThreadState *thr, uptr pc) {
374 DPrintf("#%d: AcquireGlobal\n", thr->tid);
375 if (thr->ignore_sync)
376 return;
377 ThreadRegistryLock l(ctx->thread_registry);
378 ctx->thread_registry->RunCallbackForEachThreadLocked(
379 UpdateClockCallback, thr);
382 void Release(ThreadState *thr, uptr pc, uptr addr) {
383 DPrintf("#%d: Release %zx\n", thr->tid, addr);
384 if (thr->ignore_sync)
385 return;
386 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
387 thr->fast_state.IncrementEpoch();
388 // Can't increment epoch w/o writing to the trace as well.
389 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
390 ReleaseImpl(thr, pc, &s->clock);
391 s->mtx.Unlock();
394 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
395 DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
396 if (thr->ignore_sync)
397 return;
398 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
399 thr->fast_state.IncrementEpoch();
400 // Can't increment epoch w/o writing to the trace as well.
401 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
402 ReleaseStoreImpl(thr, pc, &s->clock);
403 s->mtx.Unlock();
406 #ifndef TSAN_GO
407 static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
408 ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
409 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
410 if (tctx->status == ThreadStatusRunning)
411 thr->last_sleep_clock.set(tctx->tid, tctx->thr->fast_state.epoch());
412 else
413 thr->last_sleep_clock.set(tctx->tid, tctx->epoch1);
416 void AfterSleep(ThreadState *thr, uptr pc) {
417 DPrintf("#%d: AfterSleep %zx\n", thr->tid);
418 if (thr->ignore_sync)
419 return;
420 thr->last_sleep_stack_id = CurrentStackId(thr, pc);
421 ThreadRegistryLock l(ctx->thread_registry);
422 ctx->thread_registry->RunCallbackForEachThreadLocked(
423 UpdateSleepClockCallback, thr);
425 #endif
427 void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
428 if (thr->ignore_sync)
429 return;
430 thr->clock.set(thr->fast_state.epoch());
431 thr->clock.acquire(&thr->clock_cache, c);
432 StatInc(thr, StatSyncAcquire);
435 void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
436 if (thr->ignore_sync)
437 return;
438 thr->clock.set(thr->fast_state.epoch());
439 thr->fast_synch_epoch = thr->fast_state.epoch();
440 thr->clock.release(&thr->clock_cache, c);
441 StatInc(thr, StatSyncRelease);
444 void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
445 if (thr->ignore_sync)
446 return;
447 thr->clock.set(thr->fast_state.epoch());
448 thr->fast_synch_epoch = thr->fast_state.epoch();
449 thr->clock.ReleaseStore(&thr->clock_cache, c);
450 StatInc(thr, StatSyncRelease);
453 void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
454 if (thr->ignore_sync)
455 return;
456 thr->clock.set(thr->fast_state.epoch());
457 thr->fast_synch_epoch = thr->fast_state.epoch();
458 thr->clock.acq_rel(&thr->clock_cache, c);
459 StatInc(thr, StatSyncAcquire);
460 StatInc(thr, StatSyncRelease);
463 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
464 if (r == 0)
465 return;
466 ThreadRegistryLock l(ctx->thread_registry);
467 ScopedReport rep(ReportTypeDeadlock);
468 for (int i = 0; i < r->n; i++) {
469 rep.AddMutex(r->loop[i].mtx_ctx0);
470 rep.AddUniqueTid((int)r->loop[i].thr_ctx);
471 rep.AddThread((int)r->loop[i].thr_ctx);
473 uptr dummy_pc = 0x42;
474 for (int i = 0; i < r->n; i++) {
475 for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
476 u32 stk = r->loop[i].stk[j];
477 if (stk) {
478 rep.AddStack(StackDepotGet(stk), true);
479 } else {
480 // Sometimes we fail to extract the stack trace (FIXME: investigate),
481 // but we should still produce some stack trace in the report.
482 rep.AddStack(StackTrace(&dummy_pc, 1), true);
486 OutputReport(thr, rep);
489 } // namespace __tsan