1 //===-- tsan_rtl_mutex.cc -------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
12 #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
13 #include <sanitizer_common/sanitizer_stackdepot.h>
16 #include "tsan_flags.h"
17 #include "tsan_sync.h"
18 #include "tsan_report.h"
19 #include "tsan_symbolize.h"
20 #include "tsan_platform.h"
24 void ReportDeadlock(ThreadState
*thr
, uptr pc
, DDReport
*r
);
26 struct Callback
: DDCallback
{
30 Callback(ThreadState
*thr
, uptr pc
)
33 DDCallback::pt
= thr
->dd_pt
;
34 DDCallback::lt
= thr
->dd_lt
;
37 virtual u32
Unwind() {
38 return CurrentStackId(thr
, pc
);
40 virtual int UniqueTid() {
41 return thr
->unique_id
;
45 void DDMutexInit(ThreadState
*thr
, uptr pc
, SyncVar
*s
) {
47 ctx
->dd
->MutexInit(&cb
, &s
->dd
);
48 s
->dd
.ctx
= s
->GetId();
51 static void ReportMutexMisuse(ThreadState
*thr
, uptr pc
, ReportType typ
,
53 // In Go, these misuses are either impossible, or detected by std lib,
54 // or false positives (e.g. unlock in a different thread).
57 ThreadRegistryLock
l(ctx
->thread_registry
);
58 ScopedReport
rep(typ
);
61 trace
.ObtainCurrent(thr
, pc
);
62 rep
.AddStack(&trace
, true);
63 rep
.AddLocation(addr
, 1);
64 OutputReport(thr
, rep
);
67 void MutexCreate(ThreadState
*thr
, uptr pc
, uptr addr
,
68 bool rw
, bool recursive
, bool linker_init
) {
69 DPrintf("#%d: MutexCreate %zx\n", thr
->tid
, addr
);
70 StatInc(thr
, StatMutexCreate
);
71 if (!linker_init
&& IsAppMem(addr
)) {
72 CHECK(!thr
->is_freeing
);
73 thr
->is_freeing
= true;
74 MemoryWrite(thr
, pc
, addr
, kSizeLog1
);
75 thr
->is_freeing
= false;
77 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
79 s
->is_recursive
= recursive
;
80 s
->is_linker_init
= linker_init
;
81 if (kCppMode
&& s
->creation_stack_id
== 0)
82 s
->creation_stack_id
= CurrentStackId(thr
, pc
);
86 void MutexDestroy(ThreadState
*thr
, uptr pc
, uptr addr
) {
87 DPrintf("#%d: MutexDestroy %zx\n", thr
->tid
, addr
);
88 StatInc(thr
, StatMutexDestroy
);
90 // Global mutexes not marked as LINKER_INITIALIZED
91 // cause tons of not interesting reports, so just ignore it.
92 if (IsGlobalVar(addr
))
96 CHECK(!thr
->is_freeing
);
97 thr
->is_freeing
= true;
98 MemoryWrite(thr
, pc
, addr
, kSizeLog1
);
99 thr
->is_freeing
= false;
101 SyncVar
*s
= ctx
->metamap
.GetIfExistsAndLock(addr
);
104 if (common_flags()->detect_deadlocks
) {
105 Callback
cb(thr
, pc
);
106 ctx
->dd
->MutexDestroy(&cb
, &s
->dd
);
107 ctx
->dd
->MutexInit(&cb
, &s
->dd
);
109 bool unlock_locked
= false;
110 if (flags()->report_destroy_locked
111 && s
->owner_tid
!= SyncVar::kInvalidTid
114 unlock_locked
= true;
116 u64 mid
= s
->GetId();
117 u32 last_lock
= s
->last_lock
;
119 s
->Reset(thr
); // must not reset it before the report is printed
122 ThreadRegistryLock
l(ctx
->thread_registry
);
123 ScopedReport
rep(ReportTypeMutexDestroyLocked
);
126 trace
.ObtainCurrent(thr
, pc
);
127 rep
.AddStack(&trace
);
128 FastState
last(last_lock
);
129 RestoreStack(last
.tid(), last
.epoch(), &trace
, 0);
130 rep
.AddStack(&trace
, true);
131 rep
.AddLocation(addr
, 1);
132 OutputReport(thr
, rep
);
135 SyncVar
*s
= ctx
->metamap
.GetIfExistsAndLock(addr
);
141 thr
->mset
.Remove(mid
);
142 // s will be destroyed and freed in MetaMap::FreeBlock.
145 void MutexLock(ThreadState
*thr
, uptr pc
, uptr addr
, int rec
, bool try_lock
) {
146 DPrintf("#%d: MutexLock %zx rec=%d\n", thr
->tid
, addr
, rec
);
149 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
150 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
151 thr
->fast_state
.IncrementEpoch();
152 TraceAddEvent(thr
, thr
->fast_state
, EventTypeLock
, s
->GetId());
153 bool report_double_lock
= false;
154 if (s
->owner_tid
== SyncVar::kInvalidTid
) {
155 CHECK_EQ(s
->recursion
, 0);
156 s
->owner_tid
= thr
->tid
;
157 s
->last_lock
= thr
->fast_state
.raw();
158 } else if (s
->owner_tid
== thr
->tid
) {
159 CHECK_GT(s
->recursion
, 0);
160 } else if (flags()->report_mutex_bugs
&& !s
->is_broken
) {
162 report_double_lock
= true;
164 if (s
->recursion
== 0) {
165 StatInc(thr
, StatMutexLock
);
166 AcquireImpl(thr
, pc
, &s
->clock
);
167 AcquireImpl(thr
, pc
, &s
->read_clock
);
168 } else if (!s
->is_recursive
) {
169 StatInc(thr
, StatMutexRecLock
);
172 thr
->mset
.Add(s
->GetId(), true, thr
->fast_state
.epoch());
173 if (common_flags()->detect_deadlocks
&& (s
->recursion
- rec
) == 0) {
174 Callback
cb(thr
, pc
);
176 ctx
->dd
->MutexBeforeLock(&cb
, &s
->dd
, true);
177 ctx
->dd
->MutexAfterLock(&cb
, &s
->dd
, true, try_lock
);
179 u64 mid
= s
->GetId();
181 // Can't touch s after this point.
182 if (report_double_lock
)
183 ReportMutexMisuse(thr
, pc
, ReportTypeMutexDoubleLock
, addr
, mid
);
184 if (common_flags()->detect_deadlocks
) {
185 Callback
cb(thr
, pc
);
186 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
190 int MutexUnlock(ThreadState
*thr
, uptr pc
, uptr addr
, bool all
) {
191 DPrintf("#%d: MutexUnlock %zx all=%d\n", thr
->tid
, addr
, all
);
193 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
194 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
195 thr
->fast_state
.IncrementEpoch();
196 TraceAddEvent(thr
, thr
->fast_state
, EventTypeUnlock
, s
->GetId());
198 bool report_bad_unlock
= false;
199 if (kCppMode
&& (s
->recursion
== 0 || s
->owner_tid
!= thr
->tid
)) {
200 if (flags()->report_mutex_bugs
&& !s
->is_broken
) {
202 report_bad_unlock
= true;
205 rec
= all
? s
->recursion
: 1;
207 if (s
->recursion
== 0) {
208 StatInc(thr
, StatMutexUnlock
);
209 s
->owner_tid
= SyncVar::kInvalidTid
;
210 ReleaseStoreImpl(thr
, pc
, &s
->clock
);
212 StatInc(thr
, StatMutexRecUnlock
);
215 thr
->mset
.Del(s
->GetId(), true);
216 if (common_flags()->detect_deadlocks
&& s
->recursion
== 0 &&
217 !report_bad_unlock
) {
218 Callback
cb(thr
, pc
);
219 ctx
->dd
->MutexBeforeUnlock(&cb
, &s
->dd
, true);
221 u64 mid
= s
->GetId();
223 // Can't touch s after this point.
224 if (report_bad_unlock
)
225 ReportMutexMisuse(thr
, pc
, ReportTypeMutexBadUnlock
, addr
, mid
);
226 if (common_flags()->detect_deadlocks
&& !report_bad_unlock
) {
227 Callback
cb(thr
, pc
);
228 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
233 void MutexReadLock(ThreadState
*thr
, uptr pc
, uptr addr
, bool trylock
) {
234 DPrintf("#%d: MutexReadLock %zx\n", thr
->tid
, addr
);
235 StatInc(thr
, StatMutexReadLock
);
237 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
238 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, false);
239 thr
->fast_state
.IncrementEpoch();
240 TraceAddEvent(thr
, thr
->fast_state
, EventTypeRLock
, s
->GetId());
241 bool report_bad_lock
= false;
242 if (s
->owner_tid
!= SyncVar::kInvalidTid
) {
243 if (flags()->report_mutex_bugs
&& !s
->is_broken
) {
245 report_bad_lock
= true;
248 AcquireImpl(thr
, pc
, &s
->clock
);
249 s
->last_lock
= thr
->fast_state
.raw();
250 thr
->mset
.Add(s
->GetId(), false, thr
->fast_state
.epoch());
251 if (common_flags()->detect_deadlocks
&& s
->recursion
== 0) {
252 Callback
cb(thr
, pc
);
254 ctx
->dd
->MutexBeforeLock(&cb
, &s
->dd
, false);
255 ctx
->dd
->MutexAfterLock(&cb
, &s
->dd
, false, trylock
);
257 u64 mid
= s
->GetId();
259 // Can't touch s after this point.
261 ReportMutexMisuse(thr
, pc
, ReportTypeMutexBadReadLock
, addr
, mid
);
262 if (common_flags()->detect_deadlocks
) {
263 Callback
cb(thr
, pc
);
264 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
268 void MutexReadUnlock(ThreadState
*thr
, uptr pc
, uptr addr
) {
269 DPrintf("#%d: MutexReadUnlock %zx\n", thr
->tid
, addr
);
270 StatInc(thr
, StatMutexReadUnlock
);
272 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
273 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
274 thr
->fast_state
.IncrementEpoch();
275 TraceAddEvent(thr
, thr
->fast_state
, EventTypeRUnlock
, s
->GetId());
276 bool report_bad_unlock
= false;
277 if (s
->owner_tid
!= SyncVar::kInvalidTid
) {
278 if (flags()->report_mutex_bugs
&& !s
->is_broken
) {
280 report_bad_unlock
= true;
283 ReleaseImpl(thr
, pc
, &s
->read_clock
);
284 if (common_flags()->detect_deadlocks
&& s
->recursion
== 0) {
285 Callback
cb(thr
, pc
);
286 ctx
->dd
->MutexBeforeUnlock(&cb
, &s
->dd
, false);
288 u64 mid
= s
->GetId();
290 // Can't touch s after this point.
291 thr
->mset
.Del(mid
, false);
292 if (report_bad_unlock
)
293 ReportMutexMisuse(thr
, pc
, ReportTypeMutexBadReadUnlock
, addr
, mid
);
294 if (common_flags()->detect_deadlocks
) {
295 Callback
cb(thr
, pc
);
296 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
300 void MutexReadOrWriteUnlock(ThreadState
*thr
, uptr pc
, uptr addr
) {
301 DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr
->tid
, addr
);
303 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
304 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
306 bool report_bad_unlock
= false;
307 if (s
->owner_tid
== SyncVar::kInvalidTid
) {
308 // Seems to be read unlock.
310 StatInc(thr
, StatMutexReadUnlock
);
311 thr
->fast_state
.IncrementEpoch();
312 TraceAddEvent(thr
, thr
->fast_state
, EventTypeRUnlock
, s
->GetId());
313 ReleaseImpl(thr
, pc
, &s
->read_clock
);
314 } else if (s
->owner_tid
== thr
->tid
) {
315 // Seems to be write unlock.
316 thr
->fast_state
.IncrementEpoch();
317 TraceAddEvent(thr
, thr
->fast_state
, EventTypeUnlock
, s
->GetId());
318 CHECK_GT(s
->recursion
, 0);
320 if (s
->recursion
== 0) {
321 StatInc(thr
, StatMutexUnlock
);
322 s
->owner_tid
= SyncVar::kInvalidTid
;
323 ReleaseImpl(thr
, pc
, &s
->clock
);
325 StatInc(thr
, StatMutexRecUnlock
);
327 } else if (!s
->is_broken
) {
329 report_bad_unlock
= true;
331 thr
->mset
.Del(s
->GetId(), write
);
332 if (common_flags()->detect_deadlocks
&& s
->recursion
== 0) {
333 Callback
cb(thr
, pc
);
334 ctx
->dd
->MutexBeforeUnlock(&cb
, &s
->dd
, write
);
336 u64 mid
= s
->GetId();
338 // Can't touch s after this point.
339 if (report_bad_unlock
)
340 ReportMutexMisuse(thr
, pc
, ReportTypeMutexBadUnlock
, addr
, mid
);
341 if (common_flags()->detect_deadlocks
) {
342 Callback
cb(thr
, pc
);
343 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
347 void MutexRepair(ThreadState
*thr
, uptr pc
, uptr addr
) {
348 DPrintf("#%d: MutexRepair %zx\n", thr
->tid
, addr
);
349 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
350 s
->owner_tid
= SyncVar::kInvalidTid
;
355 void Acquire(ThreadState
*thr
, uptr pc
, uptr addr
) {
356 DPrintf("#%d: Acquire %zx\n", thr
->tid
, addr
);
357 if (thr
->ignore_sync
)
359 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, false);
360 AcquireImpl(thr
, pc
, &s
->clock
);
364 static void UpdateClockCallback(ThreadContextBase
*tctx_base
, void *arg
) {
365 ThreadState
*thr
= reinterpret_cast<ThreadState
*>(arg
);
366 ThreadContext
*tctx
= static_cast<ThreadContext
*>(tctx_base
);
367 if (tctx
->status
== ThreadStatusRunning
)
368 thr
->clock
.set(tctx
->tid
, tctx
->thr
->fast_state
.epoch());
370 thr
->clock
.set(tctx
->tid
, tctx
->epoch1
);
373 void AcquireGlobal(ThreadState
*thr
, uptr pc
) {
374 DPrintf("#%d: AcquireGlobal\n", thr
->tid
);
375 if (thr
->ignore_sync
)
377 ThreadRegistryLock
l(ctx
->thread_registry
);
378 ctx
->thread_registry
->RunCallbackForEachThreadLocked(
379 UpdateClockCallback
, thr
);
382 void Release(ThreadState
*thr
, uptr pc
, uptr addr
) {
383 DPrintf("#%d: Release %zx\n", thr
->tid
, addr
);
384 if (thr
->ignore_sync
)
386 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
387 thr
->fast_state
.IncrementEpoch();
388 // Can't increment epoch w/o writing to the trace as well.
389 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
390 ReleaseImpl(thr
, pc
, &s
->clock
);
394 void ReleaseStore(ThreadState
*thr
, uptr pc
, uptr addr
) {
395 DPrintf("#%d: ReleaseStore %zx\n", thr
->tid
, addr
);
396 if (thr
->ignore_sync
)
398 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
399 thr
->fast_state
.IncrementEpoch();
400 // Can't increment epoch w/o writing to the trace as well.
401 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
402 ReleaseStoreImpl(thr
, pc
, &s
->clock
);
407 static void UpdateSleepClockCallback(ThreadContextBase
*tctx_base
, void *arg
) {
408 ThreadState
*thr
= reinterpret_cast<ThreadState
*>(arg
);
409 ThreadContext
*tctx
= static_cast<ThreadContext
*>(tctx_base
);
410 if (tctx
->status
== ThreadStatusRunning
)
411 thr
->last_sleep_clock
.set(tctx
->tid
, tctx
->thr
->fast_state
.epoch());
413 thr
->last_sleep_clock
.set(tctx
->tid
, tctx
->epoch1
);
416 void AfterSleep(ThreadState
*thr
, uptr pc
) {
417 DPrintf("#%d: AfterSleep %zx\n", thr
->tid
);
418 if (thr
->ignore_sync
)
420 thr
->last_sleep_stack_id
= CurrentStackId(thr
, pc
);
421 ThreadRegistryLock
l(ctx
->thread_registry
);
422 ctx
->thread_registry
->RunCallbackForEachThreadLocked(
423 UpdateSleepClockCallback
, thr
);
427 void AcquireImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
428 if (thr
->ignore_sync
)
430 thr
->clock
.set(thr
->fast_state
.epoch());
431 thr
->clock
.acquire(&thr
->clock_cache
, c
);
432 StatInc(thr
, StatSyncAcquire
);
435 void ReleaseImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
436 if (thr
->ignore_sync
)
438 thr
->clock
.set(thr
->fast_state
.epoch());
439 thr
->fast_synch_epoch
= thr
->fast_state
.epoch();
440 thr
->clock
.release(&thr
->clock_cache
, c
);
441 StatInc(thr
, StatSyncRelease
);
444 void ReleaseStoreImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
445 if (thr
->ignore_sync
)
447 thr
->clock
.set(thr
->fast_state
.epoch());
448 thr
->fast_synch_epoch
= thr
->fast_state
.epoch();
449 thr
->clock
.ReleaseStore(&thr
->clock_cache
, c
);
450 StatInc(thr
, StatSyncRelease
);
453 void AcquireReleaseImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
454 if (thr
->ignore_sync
)
456 thr
->clock
.set(thr
->fast_state
.epoch());
457 thr
->fast_synch_epoch
= thr
->fast_state
.epoch();
458 thr
->clock
.acq_rel(&thr
->clock_cache
, c
);
459 StatInc(thr
, StatSyncAcquire
);
460 StatInc(thr
, StatSyncRelease
);
463 void ReportDeadlock(ThreadState
*thr
, uptr pc
, DDReport
*r
) {
466 ThreadRegistryLock
l(ctx
->thread_registry
);
467 ScopedReport
rep(ReportTypeDeadlock
);
468 for (int i
= 0; i
< r
->n
; i
++) {
469 rep
.AddMutex(r
->loop
[i
].mtx_ctx0
);
470 rep
.AddUniqueTid((int)r
->loop
[i
].thr_ctx
);
471 rep
.AddThread((int)r
->loop
[i
].thr_ctx
);
473 InternalScopedBuffer
<StackTrace
> stacks(2 * DDReport::kMaxLoopSize
);
474 uptr dummy_pc
= 0x42;
475 for (int i
= 0; i
< r
->n
; i
++) {
477 for (int j
= 0; j
< (flags()->second_deadlock_stack
? 2 : 1); j
++) {
478 u32 stk
= r
->loop
[i
].stk
[j
];
480 const uptr
*trace
= StackDepotGet(stk
, &size
);
481 stacks
[i
].Init(const_cast<uptr
*>(trace
), size
);
483 // Sometimes we fail to extract the stack trace (FIXME: investigate),
484 // but we should still produce some stack trace in the report.
485 stacks
[i
].Init(&dummy_pc
, 1);
487 rep
.AddStack(&stacks
[i
], true);
490 OutputReport(thr
, rep
);
493 } // namespace __tsan