1 //===-- tsan_rtl_mutex.cc -------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
13 #include "tsan_flags.h"
14 #include "tsan_sync.h"
15 #include "tsan_report.h"
16 #include "tsan_symbolize.h"
17 #include "tsan_platform.h"
21 void MutexCreate(ThreadState
*thr
, uptr pc
, uptr addr
,
22 bool rw
, bool recursive
, bool linker_init
) {
24 CHECK_GT(thr
->in_rtl
, 0);
25 DPrintf("#%d: MutexCreate %zx\n", thr
->tid
, addr
);
26 StatInc(thr
, StatMutexCreate
);
27 if (!linker_init
&& IsAppMem(addr
)) {
28 CHECK(!thr
->is_freeing
);
29 thr
->is_freeing
= true;
30 MemoryWrite(thr
, pc
, addr
, kSizeLog1
);
31 thr
->is_freeing
= false;
33 SyncVar
*s
= ctx
->synctab
.GetOrCreateAndLock(thr
, pc
, addr
, true);
35 s
->is_recursive
= recursive
;
36 s
->is_linker_init
= linker_init
;
40 void MutexDestroy(ThreadState
*thr
, uptr pc
, uptr addr
) {
42 CHECK_GT(thr
->in_rtl
, 0);
43 DPrintf("#%d: MutexDestroy %zx\n", thr
->tid
, addr
);
44 StatInc(thr
, StatMutexDestroy
);
46 // Global mutexes not marked as LINKER_INITIALIZED
47 // cause tons of not interesting reports, so just ignore it.
48 if (IsGlobalVar(addr
))
51 SyncVar
*s
= ctx
->synctab
.GetAndRemove(thr
, pc
, addr
);
55 CHECK(!thr
->is_freeing
);
56 thr
->is_freeing
= true;
57 MemoryWrite(thr
, pc
, addr
, kSizeLog1
);
58 thr
->is_freeing
= false;
60 if (flags()->report_destroy_locked
61 && s
->owner_tid
!= SyncVar::kInvalidTid
64 ThreadRegistryLock
l(ctx
->thread_registry
);
65 ScopedReport
rep(ReportTypeMutexDestroyLocked
);
68 trace
.ObtainCurrent(thr
, pc
);
70 FastState
last(s
->last_lock
);
71 RestoreStack(last
.tid(), last
.epoch(), &trace
, 0);
73 rep
.AddLocation(s
->addr
, 1);
74 OutputReport(ctx
, rep
);
76 thr
->mset
.Remove(s
->GetId());
80 void MutexLock(ThreadState
*thr
, uptr pc
, uptr addr
, int rec
) {
81 CHECK_GT(thr
->in_rtl
, 0);
82 DPrintf("#%d: MutexLock %zx rec=%d\n", thr
->tid
, addr
, rec
);
85 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
86 SyncVar
*s
= CTX()->synctab
.GetOrCreateAndLock(thr
, pc
, addr
, true);
87 thr
->fast_state
.IncrementEpoch();
88 TraceAddEvent(thr
, thr
->fast_state
, EventTypeLock
, s
->GetId());
89 if (s
->owner_tid
== SyncVar::kInvalidTid
) {
90 CHECK_EQ(s
->recursion
, 0);
91 s
->owner_tid
= thr
->tid
;
92 s
->last_lock
= thr
->fast_state
.raw();
93 } else if (s
->owner_tid
== thr
->tid
) {
94 CHECK_GT(s
->recursion
, 0);
96 Printf("ThreadSanitizer WARNING: double lock of mutex %p\n", addr
);
97 PrintCurrentStack(thr
, pc
);
99 if (s
->recursion
== 0) {
100 StatInc(thr
, StatMutexLock
);
101 AcquireImpl(thr
, pc
, &s
->clock
);
102 AcquireImpl(thr
, pc
, &s
->read_clock
);
103 } else if (!s
->is_recursive
) {
104 StatInc(thr
, StatMutexRecLock
);
107 thr
->mset
.Add(s
->GetId(), true, thr
->fast_state
.epoch());
111 int MutexUnlock(ThreadState
*thr
, uptr pc
, uptr addr
, bool all
) {
112 CHECK_GT(thr
->in_rtl
, 0);
113 DPrintf("#%d: MutexUnlock %zx all=%d\n", thr
->tid
, addr
, all
);
115 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
116 SyncVar
*s
= CTX()->synctab
.GetOrCreateAndLock(thr
, pc
, addr
, true);
117 thr
->fast_state
.IncrementEpoch();
118 TraceAddEvent(thr
, thr
->fast_state
, EventTypeUnlock
, s
->GetId());
120 if (s
->recursion
== 0) {
123 Printf("ThreadSanitizer WARNING: unlock of unlocked mutex %p\n", addr
);
124 PrintCurrentStack(thr
, pc
);
126 } else if (s
->owner_tid
!= thr
->tid
) {
129 Printf("ThreadSanitizer WARNING: mutex %p is unlocked by wrong thread\n",
131 PrintCurrentStack(thr
, pc
);
134 rec
= all
? s
->recursion
: 1;
136 if (s
->recursion
== 0) {
137 StatInc(thr
, StatMutexUnlock
);
138 s
->owner_tid
= SyncVar::kInvalidTid
;
139 ReleaseStoreImpl(thr
, pc
, &s
->clock
);
141 StatInc(thr
, StatMutexRecUnlock
);
144 thr
->mset
.Del(s
->GetId(), true);
149 void MutexReadLock(ThreadState
*thr
, uptr pc
, uptr addr
) {
150 CHECK_GT(thr
->in_rtl
, 0);
151 DPrintf("#%d: MutexReadLock %zx\n", thr
->tid
, addr
);
152 StatInc(thr
, StatMutexReadLock
);
154 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
155 SyncVar
*s
= CTX()->synctab
.GetOrCreateAndLock(thr
, pc
, addr
, false);
156 thr
->fast_state
.IncrementEpoch();
157 TraceAddEvent(thr
, thr
->fast_state
, EventTypeRLock
, s
->GetId());
158 if (s
->owner_tid
!= SyncVar::kInvalidTid
) {
159 Printf("ThreadSanitizer WARNING: read lock of a write locked mutex %p\n",
161 PrintCurrentStack(thr
, pc
);
163 AcquireImpl(thr
, pc
, &s
->clock
);
164 s
->last_lock
= thr
->fast_state
.raw();
165 thr
->mset
.Add(s
->GetId(), false, thr
->fast_state
.epoch());
169 void MutexReadUnlock(ThreadState
*thr
, uptr pc
, uptr addr
) {
170 CHECK_GT(thr
->in_rtl
, 0);
171 DPrintf("#%d: MutexReadUnlock %zx\n", thr
->tid
, addr
);
172 StatInc(thr
, StatMutexReadUnlock
);
174 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
175 SyncVar
*s
= CTX()->synctab
.GetOrCreateAndLock(thr
, pc
, addr
, true);
176 thr
->fast_state
.IncrementEpoch();
177 TraceAddEvent(thr
, thr
->fast_state
, EventTypeRUnlock
, s
->GetId());
178 if (s
->owner_tid
!= SyncVar::kInvalidTid
) {
179 Printf("ThreadSanitizer WARNING: read unlock of a write locked mutex %p\n",
181 PrintCurrentStack(thr
, pc
);
183 ReleaseImpl(thr
, pc
, &s
->read_clock
);
185 thr
->mset
.Del(s
->GetId(), false);
188 void MutexReadOrWriteUnlock(ThreadState
*thr
, uptr pc
, uptr addr
) {
189 CHECK_GT(thr
->in_rtl
, 0);
190 DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr
->tid
, addr
);
192 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
193 SyncVar
*s
= CTX()->synctab
.GetOrCreateAndLock(thr
, pc
, addr
, true);
195 if (s
->owner_tid
== SyncVar::kInvalidTid
) {
196 // Seems to be read unlock.
198 StatInc(thr
, StatMutexReadUnlock
);
199 thr
->fast_state
.IncrementEpoch();
200 TraceAddEvent(thr
, thr
->fast_state
, EventTypeRUnlock
, s
->GetId());
201 ReleaseImpl(thr
, pc
, &s
->read_clock
);
202 } else if (s
->owner_tid
== thr
->tid
) {
203 // Seems to be write unlock.
204 thr
->fast_state
.IncrementEpoch();
205 TraceAddEvent(thr
, thr
->fast_state
, EventTypeUnlock
, s
->GetId());
206 CHECK_GT(s
->recursion
, 0);
208 if (s
->recursion
== 0) {
209 StatInc(thr
, StatMutexUnlock
);
210 s
->owner_tid
= SyncVar::kInvalidTid
;
211 ReleaseImpl(thr
, pc
, &s
->clock
);
213 StatInc(thr
, StatMutexRecUnlock
);
215 } else if (!s
->is_broken
) {
217 Printf("ThreadSanitizer WARNING: mutex %p is unlock by wrong thread\n",
219 PrintCurrentStack(thr
, pc
);
221 thr
->mset
.Del(s
->GetId(), write
);
225 void MutexRepair(ThreadState
*thr
, uptr pc
, uptr addr
) {
226 Context
*ctx
= CTX();
227 CHECK_GT(thr
->in_rtl
, 0);
228 DPrintf("#%d: MutexRepair %zx\n", thr
->tid
, addr
);
229 SyncVar
*s
= ctx
->synctab
.GetOrCreateAndLock(thr
, pc
, addr
, true);
230 s
->owner_tid
= SyncVar::kInvalidTid
;
235 void Acquire(ThreadState
*thr
, uptr pc
, uptr addr
) {
236 CHECK_GT(thr
->in_rtl
, 0);
237 DPrintf("#%d: Acquire %zx\n", thr
->tid
, addr
);
238 if (thr
->ignore_sync
)
240 SyncVar
*s
= CTX()->synctab
.GetOrCreateAndLock(thr
, pc
, addr
, false);
241 AcquireImpl(thr
, pc
, &s
->clock
);
245 static void UpdateClockCallback(ThreadContextBase
*tctx_base
, void *arg
) {
246 ThreadState
*thr
= reinterpret_cast<ThreadState
*>(arg
);
247 ThreadContext
*tctx
= static_cast<ThreadContext
*>(tctx_base
);
248 if (tctx
->status
== ThreadStatusRunning
)
249 thr
->clock
.set(tctx
->tid
, tctx
->thr
->fast_state
.epoch());
251 thr
->clock
.set(tctx
->tid
, tctx
->epoch1
);
254 void AcquireGlobal(ThreadState
*thr
, uptr pc
) {
255 DPrintf("#%d: AcquireGlobal\n", thr
->tid
);
256 if (thr
->ignore_sync
)
258 ThreadRegistryLock
l(CTX()->thread_registry
);
259 CTX()->thread_registry
->RunCallbackForEachThreadLocked(
260 UpdateClockCallback
, thr
);
263 void Release(ThreadState
*thr
, uptr pc
, uptr addr
) {
264 CHECK_GT(thr
->in_rtl
, 0);
265 DPrintf("#%d: Release %zx\n", thr
->tid
, addr
);
266 if (thr
->ignore_sync
)
268 SyncVar
*s
= CTX()->synctab
.GetOrCreateAndLock(thr
, pc
, addr
, true);
269 thr
->fast_state
.IncrementEpoch();
270 // Can't increment epoch w/o writing to the trace as well.
271 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
272 ReleaseImpl(thr
, pc
, &s
->clock
);
276 void ReleaseStore(ThreadState
*thr
, uptr pc
, uptr addr
) {
277 CHECK_GT(thr
->in_rtl
, 0);
278 DPrintf("#%d: ReleaseStore %zx\n", thr
->tid
, addr
);
279 if (thr
->ignore_sync
)
281 SyncVar
*s
= CTX()->synctab
.GetOrCreateAndLock(thr
, pc
, addr
, true);
282 thr
->fast_state
.IncrementEpoch();
283 // Can't increment epoch w/o writing to the trace as well.
284 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
285 ReleaseStoreImpl(thr
, pc
, &s
->clock
);
290 static void UpdateSleepClockCallback(ThreadContextBase
*tctx_base
, void *arg
) {
291 ThreadState
*thr
= reinterpret_cast<ThreadState
*>(arg
);
292 ThreadContext
*tctx
= static_cast<ThreadContext
*>(tctx_base
);
293 if (tctx
->status
== ThreadStatusRunning
)
294 thr
->last_sleep_clock
.set(tctx
->tid
, tctx
->thr
->fast_state
.epoch());
296 thr
->last_sleep_clock
.set(tctx
->tid
, tctx
->epoch1
);
299 void AfterSleep(ThreadState
*thr
, uptr pc
) {
300 DPrintf("#%d: AfterSleep %zx\n", thr
->tid
);
301 if (thr
->ignore_sync
)
303 thr
->last_sleep_stack_id
= CurrentStackId(thr
, pc
);
304 ThreadRegistryLock
l(CTX()->thread_registry
);
305 CTX()->thread_registry
->RunCallbackForEachThreadLocked(
306 UpdateSleepClockCallback
, thr
);
310 void AcquireImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
311 if (thr
->ignore_sync
)
313 thr
->clock
.set(thr
->tid
, thr
->fast_state
.epoch());
314 thr
->clock
.acquire(c
);
315 StatInc(thr
, StatSyncAcquire
);
318 void ReleaseImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
319 if (thr
->ignore_sync
)
321 thr
->clock
.set(thr
->tid
, thr
->fast_state
.epoch());
322 thr
->fast_synch_epoch
= thr
->fast_state
.epoch();
323 thr
->clock
.release(c
);
324 StatInc(thr
, StatSyncRelease
);
327 void ReleaseStoreImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
328 if (thr
->ignore_sync
)
330 thr
->clock
.set(thr
->tid
, thr
->fast_state
.epoch());
331 thr
->fast_synch_epoch
= thr
->fast_state
.epoch();
332 thr
->clock
.ReleaseStore(c
);
333 StatInc(thr
, StatSyncRelease
);
336 void AcquireReleaseImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
337 if (thr
->ignore_sync
)
339 thr
->clock
.set(thr
->tid
, thr
->fast_state
.epoch());
340 thr
->fast_synch_epoch
= thr
->fast_state
.epoch();
341 thr
->clock
.acq_rel(c
);
342 StatInc(thr
, StatSyncAcquire
);
343 StatInc(thr
, StatSyncRelease
);
346 } // namespace __tsan