1 //===-- tsan_rtl_mutex.cc -------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
12 #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
13 #include <sanitizer_common/sanitizer_stackdepot.h>
16 #include "tsan_flags.h"
17 #include "tsan_sync.h"
18 #include "tsan_report.h"
19 #include "tsan_symbolize.h"
20 #include "tsan_platform.h"
24 void ReportDeadlock(ThreadState
*thr
, uptr pc
, DDReport
*r
);
26 struct Callback
: DDCallback
{
30 Callback(ThreadState
*thr
, uptr pc
)
33 DDCallback::pt
= thr
->dd_pt
;
34 DDCallback::lt
= thr
->dd_lt
;
37 virtual u32
Unwind() {
38 return CurrentStackId(thr
, pc
);
40 virtual int UniqueTid() {
41 return thr
->unique_id
;
45 void DDMutexInit(ThreadState
*thr
, uptr pc
, SyncVar
*s
) {
47 ctx
->dd
->MutexInit(&cb
, &s
->dd
);
48 s
->dd
.ctx
= s
->GetId();
51 static void ReportMutexMisuse(ThreadState
*thr
, uptr pc
, ReportType typ
,
53 ThreadRegistryLock
l(ctx
->thread_registry
);
54 ScopedReport
rep(typ
);
57 trace
.ObtainCurrent(thr
, pc
);
59 rep
.AddLocation(addr
, 1);
60 OutputReport(ctx
, rep
, rep
.GetReport()->stacks
[0]);
63 void MutexCreate(ThreadState
*thr
, uptr pc
, uptr addr
,
64 bool rw
, bool recursive
, bool linker_init
) {
65 DPrintf("#%d: MutexCreate %zx\n", thr
->tid
, addr
);
66 StatInc(thr
, StatMutexCreate
);
67 if (!linker_init
&& IsAppMem(addr
)) {
68 CHECK(!thr
->is_freeing
);
69 thr
->is_freeing
= true;
70 MemoryWrite(thr
, pc
, addr
, kSizeLog1
);
71 thr
->is_freeing
= false;
73 SyncVar
*s
= ctx
->synctab
.GetOrCreateAndLock(thr
, pc
, addr
, true);
75 s
->is_recursive
= recursive
;
76 s
->is_linker_init
= linker_init
;
80 void MutexDestroy(ThreadState
*thr
, uptr pc
, uptr addr
) {
81 DPrintf("#%d: MutexDestroy %zx\n", thr
->tid
, addr
);
82 StatInc(thr
, StatMutexDestroy
);
84 // Global mutexes not marked as LINKER_INITIALIZED
85 // cause tons of not interesting reports, so just ignore it.
86 if (IsGlobalVar(addr
))
89 SyncVar
*s
= ctx
->synctab
.GetAndRemove(thr
, pc
, addr
);
92 if (flags()->detect_deadlocks
) {
94 ctx
->dd
->MutexDestroy(&cb
, &s
->dd
);
97 CHECK(!thr
->is_freeing
);
98 thr
->is_freeing
= true;
99 MemoryWrite(thr
, pc
, addr
, kSizeLog1
);
100 thr
->is_freeing
= false;
102 if (flags()->report_destroy_locked
103 && s
->owner_tid
!= SyncVar::kInvalidTid
106 ThreadRegistryLock
l(ctx
->thread_registry
);
107 ScopedReport
rep(ReportTypeMutexDestroyLocked
);
110 trace
.ObtainCurrent(thr
, pc
);
111 rep
.AddStack(&trace
);
112 FastState
last(s
->last_lock
);
113 RestoreStack(last
.tid(), last
.epoch(), &trace
, 0);
114 rep
.AddStack(&trace
);
115 rep
.AddLocation(s
->addr
, 1);
116 OutputReport(ctx
, rep
, rep
.GetReport()->stacks
[0]);
118 thr
->mset
.Remove(s
->GetId());
122 void MutexLock(ThreadState
*thr
, uptr pc
, uptr addr
, int rec
, bool try_lock
) {
123 DPrintf("#%d: MutexLock %zx rec=%d\n", thr
->tid
, addr
, rec
);
126 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
127 SyncVar
*s
= ctx
->synctab
.GetOrCreateAndLock(thr
, pc
, addr
, true);
128 thr
->fast_state
.IncrementEpoch();
129 TraceAddEvent(thr
, thr
->fast_state
, EventTypeLock
, s
->GetId());
130 bool report_double_lock
= false;
131 if (s
->owner_tid
== SyncVar::kInvalidTid
) {
132 CHECK_EQ(s
->recursion
, 0);
133 s
->owner_tid
= thr
->tid
;
134 s
->last_lock
= thr
->fast_state
.raw();
135 } else if (s
->owner_tid
== thr
->tid
) {
136 CHECK_GT(s
->recursion
, 0);
137 } else if (flags()->report_mutex_bugs
&& !s
->is_broken
) {
139 report_double_lock
= true;
141 if (s
->recursion
== 0) {
142 StatInc(thr
, StatMutexLock
);
143 AcquireImpl(thr
, pc
, &s
->clock
);
144 AcquireImpl(thr
, pc
, &s
->read_clock
);
145 } else if (!s
->is_recursive
) {
146 StatInc(thr
, StatMutexRecLock
);
149 thr
->mset
.Add(s
->GetId(), true, thr
->fast_state
.epoch());
150 if (flags()->detect_deadlocks
&& s
->recursion
== 1) {
151 Callback
cb(thr
, pc
);
153 ctx
->dd
->MutexBeforeLock(&cb
, &s
->dd
, true);
154 ctx
->dd
->MutexAfterLock(&cb
, &s
->dd
, true, try_lock
);
156 u64 mid
= s
->GetId();
158 // Can't touch s after this point.
159 if (report_double_lock
)
160 ReportMutexMisuse(thr
, pc
, ReportTypeMutexDoubleLock
, addr
, mid
);
161 if (flags()->detect_deadlocks
) {
162 Callback
cb(thr
, pc
);
163 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
167 int MutexUnlock(ThreadState
*thr
, uptr pc
, uptr addr
, bool all
) {
168 DPrintf("#%d: MutexUnlock %zx all=%d\n", thr
->tid
, addr
, all
);
170 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
171 SyncVar
*s
= ctx
->synctab
.GetOrCreateAndLock(thr
, pc
, addr
, true);
172 thr
->fast_state
.IncrementEpoch();
173 TraceAddEvent(thr
, thr
->fast_state
, EventTypeUnlock
, s
->GetId());
175 bool report_bad_unlock
= false;
176 if (s
->recursion
== 0 || s
->owner_tid
!= thr
->tid
) {
177 if (flags()->report_mutex_bugs
&& !s
->is_broken
) {
179 report_bad_unlock
= true;
182 rec
= all
? s
->recursion
: 1;
184 if (s
->recursion
== 0) {
185 StatInc(thr
, StatMutexUnlock
);
186 s
->owner_tid
= SyncVar::kInvalidTid
;
187 ReleaseStoreImpl(thr
, pc
, &s
->clock
);
189 StatInc(thr
, StatMutexRecUnlock
);
192 thr
->mset
.Del(s
->GetId(), true);
193 if (flags()->detect_deadlocks
&& s
->recursion
== 0) {
194 Callback
cb(thr
, pc
);
195 ctx
->dd
->MutexBeforeUnlock(&cb
, &s
->dd
, true);
197 u64 mid
= s
->GetId();
199 // Can't touch s after this point.
200 if (report_bad_unlock
)
201 ReportMutexMisuse(thr
, pc
, ReportTypeMutexBadUnlock
, addr
, mid
);
202 if (flags()->detect_deadlocks
) {
203 Callback
cb(thr
, pc
);
204 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
209 void MutexReadLock(ThreadState
*thr
, uptr pc
, uptr addr
, bool trylock
) {
210 DPrintf("#%d: MutexReadLock %zx\n", thr
->tid
, addr
);
211 StatInc(thr
, StatMutexReadLock
);
213 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
214 SyncVar
*s
= ctx
->synctab
.GetOrCreateAndLock(thr
, pc
, addr
, false);
215 thr
->fast_state
.IncrementEpoch();
216 TraceAddEvent(thr
, thr
->fast_state
, EventTypeRLock
, s
->GetId());
217 bool report_bad_lock
= false;
218 if (s
->owner_tid
!= SyncVar::kInvalidTid
) {
219 if (flags()->report_mutex_bugs
&& !s
->is_broken
) {
221 report_bad_lock
= true;
224 AcquireImpl(thr
, pc
, &s
->clock
);
225 s
->last_lock
= thr
->fast_state
.raw();
226 thr
->mset
.Add(s
->GetId(), false, thr
->fast_state
.epoch());
227 if (flags()->detect_deadlocks
&& s
->recursion
== 0) {
228 Callback
cb(thr
, pc
);
230 ctx
->dd
->MutexBeforeLock(&cb
, &s
->dd
, false);
231 ctx
->dd
->MutexAfterLock(&cb
, &s
->dd
, false, trylock
);
233 u64 mid
= s
->GetId();
235 // Can't touch s after this point.
237 ReportMutexMisuse(thr
, pc
, ReportTypeMutexBadReadLock
, addr
, mid
);
238 if (flags()->detect_deadlocks
) {
239 Callback
cb(thr
, pc
);
240 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
244 void MutexReadUnlock(ThreadState
*thr
, uptr pc
, uptr addr
) {
245 DPrintf("#%d: MutexReadUnlock %zx\n", thr
->tid
, addr
);
246 StatInc(thr
, StatMutexReadUnlock
);
248 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
249 SyncVar
*s
= ctx
->synctab
.GetOrCreateAndLock(thr
, pc
, addr
, true);
250 thr
->fast_state
.IncrementEpoch();
251 TraceAddEvent(thr
, thr
->fast_state
, EventTypeRUnlock
, s
->GetId());
252 bool report_bad_unlock
= false;
253 if (s
->owner_tid
!= SyncVar::kInvalidTid
) {
254 if (flags()->report_mutex_bugs
&& !s
->is_broken
) {
256 report_bad_unlock
= true;
259 ReleaseImpl(thr
, pc
, &s
->read_clock
);
260 if (flags()->detect_deadlocks
&& s
->recursion
== 0) {
261 Callback
cb(thr
, pc
);
262 ctx
->dd
->MutexBeforeUnlock(&cb
, &s
->dd
, false);
264 u64 mid
= s
->GetId();
266 // Can't touch s after this point.
267 thr
->mset
.Del(mid
, false);
268 if (report_bad_unlock
)
269 ReportMutexMisuse(thr
, pc
, ReportTypeMutexBadReadUnlock
, addr
, mid
);
270 if (flags()->detect_deadlocks
) {
271 Callback
cb(thr
, pc
);
272 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
276 void MutexReadOrWriteUnlock(ThreadState
*thr
, uptr pc
, uptr addr
) {
277 DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr
->tid
, addr
);
279 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
280 SyncVar
*s
= ctx
->synctab
.GetOrCreateAndLock(thr
, pc
, addr
, true);
282 bool report_bad_unlock
= false;
283 if (s
->owner_tid
== SyncVar::kInvalidTid
) {
284 // Seems to be read unlock.
286 StatInc(thr
, StatMutexReadUnlock
);
287 thr
->fast_state
.IncrementEpoch();
288 TraceAddEvent(thr
, thr
->fast_state
, EventTypeRUnlock
, s
->GetId());
289 ReleaseImpl(thr
, pc
, &s
->read_clock
);
290 } else if (s
->owner_tid
== thr
->tid
) {
291 // Seems to be write unlock.
292 thr
->fast_state
.IncrementEpoch();
293 TraceAddEvent(thr
, thr
->fast_state
, EventTypeUnlock
, s
->GetId());
294 CHECK_GT(s
->recursion
, 0);
296 if (s
->recursion
== 0) {
297 StatInc(thr
, StatMutexUnlock
);
298 s
->owner_tid
= SyncVar::kInvalidTid
;
299 ReleaseImpl(thr
, pc
, &s
->clock
);
301 StatInc(thr
, StatMutexRecUnlock
);
303 } else if (!s
->is_broken
) {
305 report_bad_unlock
= true;
307 thr
->mset
.Del(s
->GetId(), write
);
308 if (flags()->detect_deadlocks
&& s
->recursion
== 0) {
309 Callback
cb(thr
, pc
);
310 ctx
->dd
->MutexBeforeUnlock(&cb
, &s
->dd
, write
);
312 u64 mid
= s
->GetId();
314 // Can't touch s after this point.
315 if (report_bad_unlock
)
316 ReportMutexMisuse(thr
, pc
, ReportTypeMutexBadUnlock
, addr
, mid
);
317 if (flags()->detect_deadlocks
) {
318 Callback
cb(thr
, pc
);
319 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
323 void MutexRepair(ThreadState
*thr
, uptr pc
, uptr addr
) {
324 DPrintf("#%d: MutexRepair %zx\n", thr
->tid
, addr
);
325 SyncVar
*s
= ctx
->synctab
.GetOrCreateAndLock(thr
, pc
, addr
, true);
326 s
->owner_tid
= SyncVar::kInvalidTid
;
331 void Acquire(ThreadState
*thr
, uptr pc
, uptr addr
) {
332 DPrintf("#%d: Acquire %zx\n", thr
->tid
, addr
);
333 if (thr
->ignore_sync
)
335 SyncVar
*s
= ctx
->synctab
.GetOrCreateAndLock(thr
, pc
, addr
, false);
336 AcquireImpl(thr
, pc
, &s
->clock
);
340 static void UpdateClockCallback(ThreadContextBase
*tctx_base
, void *arg
) {
341 ThreadState
*thr
= reinterpret_cast<ThreadState
*>(arg
);
342 ThreadContext
*tctx
= static_cast<ThreadContext
*>(tctx_base
);
343 if (tctx
->status
== ThreadStatusRunning
)
344 thr
->clock
.set(tctx
->tid
, tctx
->thr
->fast_state
.epoch());
346 thr
->clock
.set(tctx
->tid
, tctx
->epoch1
);
349 void AcquireGlobal(ThreadState
*thr
, uptr pc
) {
350 DPrintf("#%d: AcquireGlobal\n", thr
->tid
);
351 if (thr
->ignore_sync
)
353 ThreadRegistryLock
l(ctx
->thread_registry
);
354 ctx
->thread_registry
->RunCallbackForEachThreadLocked(
355 UpdateClockCallback
, thr
);
358 void Release(ThreadState
*thr
, uptr pc
, uptr addr
) {
359 DPrintf("#%d: Release %zx\n", thr
->tid
, addr
);
360 if (thr
->ignore_sync
)
362 SyncVar
*s
= ctx
->synctab
.GetOrCreateAndLock(thr
, pc
, addr
, true);
363 thr
->fast_state
.IncrementEpoch();
364 // Can't increment epoch w/o writing to the trace as well.
365 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
366 ReleaseImpl(thr
, pc
, &s
->clock
);
370 void ReleaseStore(ThreadState
*thr
, uptr pc
, uptr addr
) {
371 DPrintf("#%d: ReleaseStore %zx\n", thr
->tid
, addr
);
372 if (thr
->ignore_sync
)
374 SyncVar
*s
= ctx
->synctab
.GetOrCreateAndLock(thr
, pc
, addr
, true);
375 thr
->fast_state
.IncrementEpoch();
376 // Can't increment epoch w/o writing to the trace as well.
377 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
378 ReleaseStoreImpl(thr
, pc
, &s
->clock
);
383 static void UpdateSleepClockCallback(ThreadContextBase
*tctx_base
, void *arg
) {
384 ThreadState
*thr
= reinterpret_cast<ThreadState
*>(arg
);
385 ThreadContext
*tctx
= static_cast<ThreadContext
*>(tctx_base
);
386 if (tctx
->status
== ThreadStatusRunning
)
387 thr
->last_sleep_clock
.set(tctx
->tid
, tctx
->thr
->fast_state
.epoch());
389 thr
->last_sleep_clock
.set(tctx
->tid
, tctx
->epoch1
);
392 void AfterSleep(ThreadState
*thr
, uptr pc
) {
393 DPrintf("#%d: AfterSleep %zx\n", thr
->tid
);
394 if (thr
->ignore_sync
)
396 thr
->last_sleep_stack_id
= CurrentStackId(thr
, pc
);
397 ThreadRegistryLock
l(ctx
->thread_registry
);
398 ctx
->thread_registry
->RunCallbackForEachThreadLocked(
399 UpdateSleepClockCallback
, thr
);
403 void AcquireImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
404 if (thr
->ignore_sync
)
406 thr
->clock
.set(thr
->fast_state
.epoch());
407 thr
->clock
.acquire(c
);
408 StatInc(thr
, StatSyncAcquire
);
411 void ReleaseImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
412 if (thr
->ignore_sync
)
414 thr
->clock
.set(thr
->fast_state
.epoch());
415 thr
->fast_synch_epoch
= thr
->fast_state
.epoch();
416 thr
->clock
.release(c
);
417 StatInc(thr
, StatSyncRelease
);
420 void ReleaseStoreImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
421 if (thr
->ignore_sync
)
423 thr
->clock
.set(thr
->fast_state
.epoch());
424 thr
->fast_synch_epoch
= thr
->fast_state
.epoch();
425 thr
->clock
.ReleaseStore(c
);
426 StatInc(thr
, StatSyncRelease
);
429 void AcquireReleaseImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
430 if (thr
->ignore_sync
)
432 thr
->clock
.set(thr
->fast_state
.epoch());
433 thr
->fast_synch_epoch
= thr
->fast_state
.epoch();
434 thr
->clock
.acq_rel(c
);
435 StatInc(thr
, StatSyncAcquire
);
436 StatInc(thr
, StatSyncRelease
);
439 void ReportDeadlock(ThreadState
*thr
, uptr pc
, DDReport
*r
) {
442 ThreadRegistryLock
l(ctx
->thread_registry
);
443 ScopedReport
rep(ReportTypeDeadlock
);
444 for (int i
= 0; i
< r
->n
; i
++) {
445 rep
.AddMutex(r
->loop
[i
].mtx_ctx0
);
446 rep
.AddUniqueTid((int)r
->loop
[i
].thr_ctx
);
447 rep
.AddThread((int)r
->loop
[i
].thr_ctx
);
449 StackTrace stacks
[2 * DDReport::kMaxLoopSize
];
450 uptr dummy_pc
= 0x42;
451 for (int i
= 0; i
< r
->n
; i
++) {
453 for (int j
= 0; j
< (flags()->second_deadlock_stack
? 2 : 1); j
++) {
454 u32 stk
= r
->loop
[i
].stk
[j
];
456 const uptr
*trace
= StackDepotGet(stk
, &size
);
457 stacks
[i
].Init(const_cast<uptr
*>(trace
), size
);
459 // Sometimes we fail to extract the stack trace (FIXME: investigate),
460 // but we should still produce some stack trace in the report.
461 stacks
[i
].Init(&dummy_pc
, 1);
463 rep
.AddStack(&stacks
[i
]);
466 // FIXME: use all stacks for suppressions, not just the second stack of the
468 OutputReport(ctx
, rep
, rep
.GetReport()->stacks
[0]);
471 } // namespace __tsan