1 //===-- tsan_rtl_mutex.cc -------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
12 #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
13 #include <sanitizer_common/sanitizer_stackdepot.h>
16 #include "tsan_flags.h"
17 #include "tsan_sync.h"
18 #include "tsan_report.h"
19 #include "tsan_symbolize.h"
20 #include "tsan_platform.h"
24 void ReportDeadlock(ThreadState
*thr
, uptr pc
, DDReport
*r
);
26 struct Callback
: DDCallback
{
30 Callback(ThreadState
*thr
, uptr pc
)
33 DDCallback::pt
= thr
->dd_pt
;
34 DDCallback::lt
= thr
->dd_lt
;
37 u32
Unwind() override
{ return CurrentStackId(thr
, pc
); }
38 int UniqueTid() override
{ return thr
->unique_id
; }
41 void DDMutexInit(ThreadState
*thr
, uptr pc
, SyncVar
*s
) {
43 ctx
->dd
->MutexInit(&cb
, &s
->dd
);
44 s
->dd
.ctx
= s
->GetId();
47 static void ReportMutexMisuse(ThreadState
*thr
, uptr pc
, ReportType typ
,
49 // In Go, these misuses are either impossible, or detected by std lib,
50 // or false positives (e.g. unlock in a different thread).
53 ThreadRegistryLock
l(ctx
->thread_registry
);
54 ScopedReport
rep(typ
);
56 VarSizeStackTrace trace
;
57 ObtainCurrentStack(thr
, pc
, &trace
);
58 rep
.AddStack(trace
, true);
59 rep
.AddLocation(addr
, 1);
60 OutputReport(thr
, rep
);
63 void MutexCreate(ThreadState
*thr
, uptr pc
, uptr addr
,
64 bool rw
, bool recursive
, bool linker_init
) {
65 DPrintf("#%d: MutexCreate %zx\n", thr
->tid
, addr
);
66 StatInc(thr
, StatMutexCreate
);
67 if (!linker_init
&& IsAppMem(addr
)) {
68 CHECK(!thr
->is_freeing
);
69 thr
->is_freeing
= true;
70 MemoryWrite(thr
, pc
, addr
, kSizeLog1
);
71 thr
->is_freeing
= false;
73 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
75 s
->is_recursive
= recursive
;
76 s
->is_linker_init
= linker_init
;
77 if (kCppMode
&& s
->creation_stack_id
== 0)
78 s
->creation_stack_id
= CurrentStackId(thr
, pc
);
82 void MutexDestroy(ThreadState
*thr
, uptr pc
, uptr addr
) {
83 DPrintf("#%d: MutexDestroy %zx\n", thr
->tid
, addr
);
84 StatInc(thr
, StatMutexDestroy
);
86 // Global mutexes not marked as LINKER_INITIALIZED
87 // cause tons of not interesting reports, so just ignore it.
88 if (IsGlobalVar(addr
))
92 CHECK(!thr
->is_freeing
);
93 thr
->is_freeing
= true;
94 MemoryWrite(thr
, pc
, addr
, kSizeLog1
);
95 thr
->is_freeing
= false;
97 SyncVar
*s
= ctx
->metamap
.GetIfExistsAndLock(addr
);
100 if (common_flags()->detect_deadlocks
) {
101 Callback
cb(thr
, pc
);
102 ctx
->dd
->MutexDestroy(&cb
, &s
->dd
);
103 ctx
->dd
->MutexInit(&cb
, &s
->dd
);
105 bool unlock_locked
= false;
106 if (flags()->report_destroy_locked
107 && s
->owner_tid
!= SyncVar::kInvalidTid
110 unlock_locked
= true;
112 u64 mid
= s
->GetId();
113 u32 last_lock
= s
->last_lock
;
115 s
->Reset(thr
); // must not reset it before the report is printed
118 ThreadRegistryLock
l(ctx
->thread_registry
);
119 ScopedReport
rep(ReportTypeMutexDestroyLocked
);
121 VarSizeStackTrace trace
;
122 ObtainCurrentStack(thr
, pc
, &trace
);
124 FastState
last(last_lock
);
125 RestoreStack(last
.tid(), last
.epoch(), &trace
, 0);
126 rep
.AddStack(trace
, true);
127 rep
.AddLocation(addr
, 1);
128 OutputReport(thr
, rep
);
131 SyncVar
*s
= ctx
->metamap
.GetIfExistsAndLock(addr
);
137 thr
->mset
.Remove(mid
);
138 // s will be destroyed and freed in MetaMap::FreeBlock.
141 void MutexLock(ThreadState
*thr
, uptr pc
, uptr addr
, int rec
, bool try_lock
) {
142 DPrintf("#%d: MutexLock %zx rec=%d\n", thr
->tid
, addr
, rec
);
145 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
146 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
147 thr
->fast_state
.IncrementEpoch();
148 TraceAddEvent(thr
, thr
->fast_state
, EventTypeLock
, s
->GetId());
149 bool report_double_lock
= false;
150 if (s
->owner_tid
== SyncVar::kInvalidTid
) {
151 CHECK_EQ(s
->recursion
, 0);
152 s
->owner_tid
= thr
->tid
;
153 s
->last_lock
= thr
->fast_state
.raw();
154 } else if (s
->owner_tid
== thr
->tid
) {
155 CHECK_GT(s
->recursion
, 0);
156 } else if (flags()->report_mutex_bugs
&& !s
->is_broken
) {
158 report_double_lock
= true;
160 if (s
->recursion
== 0) {
161 StatInc(thr
, StatMutexLock
);
162 AcquireImpl(thr
, pc
, &s
->clock
);
163 AcquireImpl(thr
, pc
, &s
->read_clock
);
164 } else if (!s
->is_recursive
) {
165 StatInc(thr
, StatMutexRecLock
);
168 thr
->mset
.Add(s
->GetId(), true, thr
->fast_state
.epoch());
169 if (common_flags()->detect_deadlocks
&& (s
->recursion
- rec
) == 0) {
170 Callback
cb(thr
, pc
);
172 ctx
->dd
->MutexBeforeLock(&cb
, &s
->dd
, true);
173 ctx
->dd
->MutexAfterLock(&cb
, &s
->dd
, true, try_lock
);
175 u64 mid
= s
->GetId();
177 // Can't touch s after this point.
178 if (report_double_lock
)
179 ReportMutexMisuse(thr
, pc
, ReportTypeMutexDoubleLock
, addr
, mid
);
180 if (common_flags()->detect_deadlocks
) {
181 Callback
cb(thr
, pc
);
182 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
186 int MutexUnlock(ThreadState
*thr
, uptr pc
, uptr addr
, bool all
) {
187 DPrintf("#%d: MutexUnlock %zx all=%d\n", thr
->tid
, addr
, all
);
189 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
190 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
191 thr
->fast_state
.IncrementEpoch();
192 TraceAddEvent(thr
, thr
->fast_state
, EventTypeUnlock
, s
->GetId());
194 bool report_bad_unlock
= false;
195 if (kCppMode
&& (s
->recursion
== 0 || s
->owner_tid
!= thr
->tid
)) {
196 if (flags()->report_mutex_bugs
&& !s
->is_broken
) {
198 report_bad_unlock
= true;
201 rec
= all
? s
->recursion
: 1;
203 if (s
->recursion
== 0) {
204 StatInc(thr
, StatMutexUnlock
);
205 s
->owner_tid
= SyncVar::kInvalidTid
;
206 ReleaseStoreImpl(thr
, pc
, &s
->clock
);
208 StatInc(thr
, StatMutexRecUnlock
);
211 thr
->mset
.Del(s
->GetId(), true);
212 if (common_flags()->detect_deadlocks
&& s
->recursion
== 0 &&
213 !report_bad_unlock
) {
214 Callback
cb(thr
, pc
);
215 ctx
->dd
->MutexBeforeUnlock(&cb
, &s
->dd
, true);
217 u64 mid
= s
->GetId();
219 // Can't touch s after this point.
220 if (report_bad_unlock
)
221 ReportMutexMisuse(thr
, pc
, ReportTypeMutexBadUnlock
, addr
, mid
);
222 if (common_flags()->detect_deadlocks
&& !report_bad_unlock
) {
223 Callback
cb(thr
, pc
);
224 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
229 void MutexReadLock(ThreadState
*thr
, uptr pc
, uptr addr
, bool trylock
) {
230 DPrintf("#%d: MutexReadLock %zx\n", thr
->tid
, addr
);
231 StatInc(thr
, StatMutexReadLock
);
233 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
234 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, false);
235 thr
->fast_state
.IncrementEpoch();
236 TraceAddEvent(thr
, thr
->fast_state
, EventTypeRLock
, s
->GetId());
237 bool report_bad_lock
= false;
238 if (s
->owner_tid
!= SyncVar::kInvalidTid
) {
239 if (flags()->report_mutex_bugs
&& !s
->is_broken
) {
241 report_bad_lock
= true;
244 AcquireImpl(thr
, pc
, &s
->clock
);
245 s
->last_lock
= thr
->fast_state
.raw();
246 thr
->mset
.Add(s
->GetId(), false, thr
->fast_state
.epoch());
247 if (common_flags()->detect_deadlocks
&& s
->recursion
== 0) {
248 Callback
cb(thr
, pc
);
250 ctx
->dd
->MutexBeforeLock(&cb
, &s
->dd
, false);
251 ctx
->dd
->MutexAfterLock(&cb
, &s
->dd
, false, trylock
);
253 u64 mid
= s
->GetId();
255 // Can't touch s after this point.
257 ReportMutexMisuse(thr
, pc
, ReportTypeMutexBadReadLock
, addr
, mid
);
258 if (common_flags()->detect_deadlocks
) {
259 Callback
cb(thr
, pc
);
260 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
264 void MutexReadUnlock(ThreadState
*thr
, uptr pc
, uptr addr
) {
265 DPrintf("#%d: MutexReadUnlock %zx\n", thr
->tid
, addr
);
266 StatInc(thr
, StatMutexReadUnlock
);
268 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
269 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
270 thr
->fast_state
.IncrementEpoch();
271 TraceAddEvent(thr
, thr
->fast_state
, EventTypeRUnlock
, s
->GetId());
272 bool report_bad_unlock
= false;
273 if (s
->owner_tid
!= SyncVar::kInvalidTid
) {
274 if (flags()->report_mutex_bugs
&& !s
->is_broken
) {
276 report_bad_unlock
= true;
279 ReleaseImpl(thr
, pc
, &s
->read_clock
);
280 if (common_flags()->detect_deadlocks
&& s
->recursion
== 0) {
281 Callback
cb(thr
, pc
);
282 ctx
->dd
->MutexBeforeUnlock(&cb
, &s
->dd
, false);
284 u64 mid
= s
->GetId();
286 // Can't touch s after this point.
287 thr
->mset
.Del(mid
, false);
288 if (report_bad_unlock
)
289 ReportMutexMisuse(thr
, pc
, ReportTypeMutexBadReadUnlock
, addr
, mid
);
290 if (common_flags()->detect_deadlocks
) {
291 Callback
cb(thr
, pc
);
292 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
296 void MutexReadOrWriteUnlock(ThreadState
*thr
, uptr pc
, uptr addr
) {
297 DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr
->tid
, addr
);
299 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
300 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
302 bool report_bad_unlock
= false;
303 if (s
->owner_tid
== SyncVar::kInvalidTid
) {
304 // Seems to be read unlock.
306 StatInc(thr
, StatMutexReadUnlock
);
307 thr
->fast_state
.IncrementEpoch();
308 TraceAddEvent(thr
, thr
->fast_state
, EventTypeRUnlock
, s
->GetId());
309 ReleaseImpl(thr
, pc
, &s
->read_clock
);
310 } else if (s
->owner_tid
== thr
->tid
) {
311 // Seems to be write unlock.
312 thr
->fast_state
.IncrementEpoch();
313 TraceAddEvent(thr
, thr
->fast_state
, EventTypeUnlock
, s
->GetId());
314 CHECK_GT(s
->recursion
, 0);
316 if (s
->recursion
== 0) {
317 StatInc(thr
, StatMutexUnlock
);
318 s
->owner_tid
= SyncVar::kInvalidTid
;
319 ReleaseImpl(thr
, pc
, &s
->clock
);
321 StatInc(thr
, StatMutexRecUnlock
);
323 } else if (!s
->is_broken
) {
325 report_bad_unlock
= true;
327 thr
->mset
.Del(s
->GetId(), write
);
328 if (common_flags()->detect_deadlocks
&& s
->recursion
== 0) {
329 Callback
cb(thr
, pc
);
330 ctx
->dd
->MutexBeforeUnlock(&cb
, &s
->dd
, write
);
332 u64 mid
= s
->GetId();
334 // Can't touch s after this point.
335 if (report_bad_unlock
)
336 ReportMutexMisuse(thr
, pc
, ReportTypeMutexBadUnlock
, addr
, mid
);
337 if (common_flags()->detect_deadlocks
) {
338 Callback
cb(thr
, pc
);
339 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
343 void MutexRepair(ThreadState
*thr
, uptr pc
, uptr addr
) {
344 DPrintf("#%d: MutexRepair %zx\n", thr
->tid
, addr
);
345 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
346 s
->owner_tid
= SyncVar::kInvalidTid
;
351 void Acquire(ThreadState
*thr
, uptr pc
, uptr addr
) {
352 DPrintf("#%d: Acquire %zx\n", thr
->tid
, addr
);
353 if (thr
->ignore_sync
)
355 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, false);
356 AcquireImpl(thr
, pc
, &s
->clock
);
360 static void UpdateClockCallback(ThreadContextBase
*tctx_base
, void *arg
) {
361 ThreadState
*thr
= reinterpret_cast<ThreadState
*>(arg
);
362 ThreadContext
*tctx
= static_cast<ThreadContext
*>(tctx_base
);
363 if (tctx
->status
== ThreadStatusRunning
)
364 thr
->clock
.set(tctx
->tid
, tctx
->thr
->fast_state
.epoch());
366 thr
->clock
.set(tctx
->tid
, tctx
->epoch1
);
369 void AcquireGlobal(ThreadState
*thr
, uptr pc
) {
370 DPrintf("#%d: AcquireGlobal\n", thr
->tid
);
371 if (thr
->ignore_sync
)
373 ThreadRegistryLock
l(ctx
->thread_registry
);
374 ctx
->thread_registry
->RunCallbackForEachThreadLocked(
375 UpdateClockCallback
, thr
);
378 void Release(ThreadState
*thr
, uptr pc
, uptr addr
) {
379 DPrintf("#%d: Release %zx\n", thr
->tid
, addr
);
380 if (thr
->ignore_sync
)
382 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
383 thr
->fast_state
.IncrementEpoch();
384 // Can't increment epoch w/o writing to the trace as well.
385 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
386 ReleaseImpl(thr
, pc
, &s
->clock
);
390 void ReleaseStore(ThreadState
*thr
, uptr pc
, uptr addr
) {
391 DPrintf("#%d: ReleaseStore %zx\n", thr
->tid
, addr
);
392 if (thr
->ignore_sync
)
394 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
395 thr
->fast_state
.IncrementEpoch();
396 // Can't increment epoch w/o writing to the trace as well.
397 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
398 ReleaseStoreImpl(thr
, pc
, &s
->clock
);
403 static void UpdateSleepClockCallback(ThreadContextBase
*tctx_base
, void *arg
) {
404 ThreadState
*thr
= reinterpret_cast<ThreadState
*>(arg
);
405 ThreadContext
*tctx
= static_cast<ThreadContext
*>(tctx_base
);
406 if (tctx
->status
== ThreadStatusRunning
)
407 thr
->last_sleep_clock
.set(tctx
->tid
, tctx
->thr
->fast_state
.epoch());
409 thr
->last_sleep_clock
.set(tctx
->tid
, tctx
->epoch1
);
412 void AfterSleep(ThreadState
*thr
, uptr pc
) {
413 DPrintf("#%d: AfterSleep %zx\n", thr
->tid
);
414 if (thr
->ignore_sync
)
416 thr
->last_sleep_stack_id
= CurrentStackId(thr
, pc
);
417 ThreadRegistryLock
l(ctx
->thread_registry
);
418 ctx
->thread_registry
->RunCallbackForEachThreadLocked(
419 UpdateSleepClockCallback
, thr
);
423 void AcquireImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
424 if (thr
->ignore_sync
)
426 thr
->clock
.set(thr
->fast_state
.epoch());
427 thr
->clock
.acquire(&thr
->clock_cache
, c
);
428 StatInc(thr
, StatSyncAcquire
);
431 void ReleaseImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
432 if (thr
->ignore_sync
)
434 thr
->clock
.set(thr
->fast_state
.epoch());
435 thr
->fast_synch_epoch
= thr
->fast_state
.epoch();
436 thr
->clock
.release(&thr
->clock_cache
, c
);
437 StatInc(thr
, StatSyncRelease
);
440 void ReleaseStoreImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
441 if (thr
->ignore_sync
)
443 thr
->clock
.set(thr
->fast_state
.epoch());
444 thr
->fast_synch_epoch
= thr
->fast_state
.epoch();
445 thr
->clock
.ReleaseStore(&thr
->clock_cache
, c
);
446 StatInc(thr
, StatSyncRelease
);
449 void AcquireReleaseImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
450 if (thr
->ignore_sync
)
452 thr
->clock
.set(thr
->fast_state
.epoch());
453 thr
->fast_synch_epoch
= thr
->fast_state
.epoch();
454 thr
->clock
.acq_rel(&thr
->clock_cache
, c
);
455 StatInc(thr
, StatSyncAcquire
);
456 StatInc(thr
, StatSyncRelease
);
459 void ReportDeadlock(ThreadState
*thr
, uptr pc
, DDReport
*r
) {
462 ThreadRegistryLock
l(ctx
->thread_registry
);
463 ScopedReport
rep(ReportTypeDeadlock
);
464 for (int i
= 0; i
< r
->n
; i
++) {
465 rep
.AddMutex(r
->loop
[i
].mtx_ctx0
);
466 rep
.AddUniqueTid((int)r
->loop
[i
].thr_ctx
);
467 rep
.AddThread((int)r
->loop
[i
].thr_ctx
);
469 uptr dummy_pc
= 0x42;
470 for (int i
= 0; i
< r
->n
; i
++) {
471 for (int j
= 0; j
< (flags()->second_deadlock_stack
? 2 : 1); j
++) {
472 u32 stk
= r
->loop
[i
].stk
[j
];
473 if (stk
&& stk
!= 0xffffffff) {
474 rep
.AddStack(StackDepotGet(stk
), true);
476 // Sometimes we fail to extract the stack trace (FIXME: investigate),
477 // but we should still produce some stack trace in the report.
478 rep
.AddStack(StackTrace(&dummy_pc
, 1), true);
482 OutputReport(thr
, rep
);
485 } // namespace __tsan