Daily bump.
[official-gcc.git] / libsanitizer / tsan / tsan_rtl_mutex.cc
blob3724571cfff21df6c2f75013bc90aa1de2c67405
1 //===-- tsan_rtl_mutex.cc -------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
12 #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
13 #include <sanitizer_common/sanitizer_stackdepot.h>
15 #include "tsan_rtl.h"
16 #include "tsan_flags.h"
17 #include "tsan_sync.h"
18 #include "tsan_report.h"
19 #include "tsan_symbolize.h"
20 #include "tsan_platform.h"
22 namespace __tsan {
24 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
26 struct Callback : DDCallback {
27 ThreadState *thr;
28 uptr pc;
30 Callback(ThreadState *thr, uptr pc)
31 : thr(thr)
32 , pc(pc) {
33 DDCallback::pt = thr->dd_pt;
34 DDCallback::lt = thr->dd_lt;
37 virtual u32 Unwind() {
38 return CurrentStackId(thr, pc);
40 virtual int UniqueTid() {
41 return thr->unique_id;
45 void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
46 Callback cb(thr, pc);
47 ctx->dd->MutexInit(&cb, &s->dd);
48 s->dd.ctx = s->GetId();
51 static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
52 uptr addr, u64 mid) {
53 ThreadRegistryLock l(ctx->thread_registry);
54 ScopedReport rep(typ);
55 rep.AddMutex(mid);
56 StackTrace trace;
57 trace.ObtainCurrent(thr, pc);
58 rep.AddStack(&trace);
59 rep.AddLocation(addr, 1);
60 OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
63 void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
64 bool rw, bool recursive, bool linker_init) {
65 DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
66 StatInc(thr, StatMutexCreate);
67 if (!linker_init && IsAppMem(addr)) {
68 CHECK(!thr->is_freeing);
69 thr->is_freeing = true;
70 MemoryWrite(thr, pc, addr, kSizeLog1);
71 thr->is_freeing = false;
73 SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
74 s->is_rw = rw;
75 s->is_recursive = recursive;
76 s->is_linker_init = linker_init;
77 s->mtx.Unlock();
80 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
81 DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
82 StatInc(thr, StatMutexDestroy);
83 #ifndef TSAN_GO
84 // Global mutexes not marked as LINKER_INITIALIZED
85 // cause tons of not interesting reports, so just ignore it.
86 if (IsGlobalVar(addr))
87 return;
88 #endif
89 SyncVar *s = ctx->synctab.GetAndRemove(thr, pc, addr);
90 if (s == 0)
91 return;
92 if (flags()->detect_deadlocks) {
93 Callback cb(thr, pc);
94 ctx->dd->MutexDestroy(&cb, &s->dd);
96 if (IsAppMem(addr)) {
97 CHECK(!thr->is_freeing);
98 thr->is_freeing = true;
99 MemoryWrite(thr, pc, addr, kSizeLog1);
100 thr->is_freeing = false;
102 if (flags()->report_destroy_locked
103 && s->owner_tid != SyncVar::kInvalidTid
104 && !s->is_broken) {
105 s->is_broken = true;
106 ThreadRegistryLock l(ctx->thread_registry);
107 ScopedReport rep(ReportTypeMutexDestroyLocked);
108 rep.AddMutex(s);
109 StackTrace trace;
110 trace.ObtainCurrent(thr, pc);
111 rep.AddStack(&trace);
112 FastState last(s->last_lock);
113 RestoreStack(last.tid(), last.epoch(), &trace, 0);
114 rep.AddStack(&trace);
115 rep.AddLocation(s->addr, 1);
116 OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
118 thr->mset.Remove(s->GetId());
119 DestroyAndFree(s);
122 void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) {
123 DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec);
124 CHECK_GT(rec, 0);
125 if (IsAppMem(addr))
126 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
127 SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
128 thr->fast_state.IncrementEpoch();
129 TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
130 bool report_double_lock = false;
131 if (s->owner_tid == SyncVar::kInvalidTid) {
132 CHECK_EQ(s->recursion, 0);
133 s->owner_tid = thr->tid;
134 s->last_lock = thr->fast_state.raw();
135 } else if (s->owner_tid == thr->tid) {
136 CHECK_GT(s->recursion, 0);
137 } else if (flags()->report_mutex_bugs && !s->is_broken) {
138 s->is_broken = true;
139 report_double_lock = true;
141 if (s->recursion == 0) {
142 StatInc(thr, StatMutexLock);
143 AcquireImpl(thr, pc, &s->clock);
144 AcquireImpl(thr, pc, &s->read_clock);
145 } else if (!s->is_recursive) {
146 StatInc(thr, StatMutexRecLock);
148 s->recursion += rec;
149 thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
150 if (flags()->detect_deadlocks && s->recursion == 1) {
151 Callback cb(thr, pc);
152 if (!try_lock)
153 ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
154 ctx->dd->MutexAfterLock(&cb, &s->dd, true, try_lock);
156 u64 mid = s->GetId();
157 s->mtx.Unlock();
158 // Can't touch s after this point.
159 if (report_double_lock)
160 ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
161 if (flags()->detect_deadlocks) {
162 Callback cb(thr, pc);
163 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
167 int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
168 DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all);
169 if (IsAppMem(addr))
170 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
171 SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
172 thr->fast_state.IncrementEpoch();
173 TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
174 int rec = 0;
175 bool report_bad_unlock = false;
176 if (s->recursion == 0 || s->owner_tid != thr->tid) {
177 if (flags()->report_mutex_bugs && !s->is_broken) {
178 s->is_broken = true;
179 report_bad_unlock = true;
181 } else {
182 rec = all ? s->recursion : 1;
183 s->recursion -= rec;
184 if (s->recursion == 0) {
185 StatInc(thr, StatMutexUnlock);
186 s->owner_tid = SyncVar::kInvalidTid;
187 ReleaseStoreImpl(thr, pc, &s->clock);
188 } else {
189 StatInc(thr, StatMutexRecUnlock);
192 thr->mset.Del(s->GetId(), true);
193 if (flags()->detect_deadlocks && s->recursion == 0) {
194 Callback cb(thr, pc);
195 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
197 u64 mid = s->GetId();
198 s->mtx.Unlock();
199 // Can't touch s after this point.
200 if (report_bad_unlock)
201 ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
202 if (flags()->detect_deadlocks) {
203 Callback cb(thr, pc);
204 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
206 return rec;
209 void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool trylock) {
210 DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr);
211 StatInc(thr, StatMutexReadLock);
212 if (IsAppMem(addr))
213 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
214 SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, false);
215 thr->fast_state.IncrementEpoch();
216 TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
217 bool report_bad_lock = false;
218 if (s->owner_tid != SyncVar::kInvalidTid) {
219 if (flags()->report_mutex_bugs && !s->is_broken) {
220 s->is_broken = true;
221 report_bad_lock = true;
224 AcquireImpl(thr, pc, &s->clock);
225 s->last_lock = thr->fast_state.raw();
226 thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
227 if (flags()->detect_deadlocks && s->recursion == 0) {
228 Callback cb(thr, pc);
229 if (!trylock)
230 ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
231 ctx->dd->MutexAfterLock(&cb, &s->dd, false, trylock);
233 u64 mid = s->GetId();
234 s->mtx.ReadUnlock();
235 // Can't touch s after this point.
236 if (report_bad_lock)
237 ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
238 if (flags()->detect_deadlocks) {
239 Callback cb(thr, pc);
240 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
244 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
245 DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
246 StatInc(thr, StatMutexReadUnlock);
247 if (IsAppMem(addr))
248 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
249 SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
250 thr->fast_state.IncrementEpoch();
251 TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
252 bool report_bad_unlock = false;
253 if (s->owner_tid != SyncVar::kInvalidTid) {
254 if (flags()->report_mutex_bugs && !s->is_broken) {
255 s->is_broken = true;
256 report_bad_unlock = true;
259 ReleaseImpl(thr, pc, &s->read_clock);
260 if (flags()->detect_deadlocks && s->recursion == 0) {
261 Callback cb(thr, pc);
262 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
264 u64 mid = s->GetId();
265 s->mtx.Unlock();
266 // Can't touch s after this point.
267 thr->mset.Del(mid, false);
268 if (report_bad_unlock)
269 ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
270 if (flags()->detect_deadlocks) {
271 Callback cb(thr, pc);
272 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
276 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
277 DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
278 if (IsAppMem(addr))
279 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
280 SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
281 bool write = true;
282 bool report_bad_unlock = false;
283 if (s->owner_tid == SyncVar::kInvalidTid) {
284 // Seems to be read unlock.
285 write = false;
286 StatInc(thr, StatMutexReadUnlock);
287 thr->fast_state.IncrementEpoch();
288 TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
289 ReleaseImpl(thr, pc, &s->read_clock);
290 } else if (s->owner_tid == thr->tid) {
291 // Seems to be write unlock.
292 thr->fast_state.IncrementEpoch();
293 TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
294 CHECK_GT(s->recursion, 0);
295 s->recursion--;
296 if (s->recursion == 0) {
297 StatInc(thr, StatMutexUnlock);
298 s->owner_tid = SyncVar::kInvalidTid;
299 ReleaseImpl(thr, pc, &s->clock);
300 } else {
301 StatInc(thr, StatMutexRecUnlock);
303 } else if (!s->is_broken) {
304 s->is_broken = true;
305 report_bad_unlock = true;
307 thr->mset.Del(s->GetId(), write);
308 if (flags()->detect_deadlocks && s->recursion == 0) {
309 Callback cb(thr, pc);
310 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
312 u64 mid = s->GetId();
313 s->mtx.Unlock();
314 // Can't touch s after this point.
315 if (report_bad_unlock)
316 ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
317 if (flags()->detect_deadlocks) {
318 Callback cb(thr, pc);
319 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
323 void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
324 DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
325 SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
326 s->owner_tid = SyncVar::kInvalidTid;
327 s->recursion = 0;
328 s->mtx.Unlock();
331 void Acquire(ThreadState *thr, uptr pc, uptr addr) {
332 DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
333 if (thr->ignore_sync)
334 return;
335 SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, false);
336 AcquireImpl(thr, pc, &s->clock);
337 s->mtx.ReadUnlock();
340 static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
341 ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
342 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
343 if (tctx->status == ThreadStatusRunning)
344 thr->clock.set(tctx->tid, tctx->thr->fast_state.epoch());
345 else
346 thr->clock.set(tctx->tid, tctx->epoch1);
349 void AcquireGlobal(ThreadState *thr, uptr pc) {
350 DPrintf("#%d: AcquireGlobal\n", thr->tid);
351 if (thr->ignore_sync)
352 return;
353 ThreadRegistryLock l(ctx->thread_registry);
354 ctx->thread_registry->RunCallbackForEachThreadLocked(
355 UpdateClockCallback, thr);
358 void Release(ThreadState *thr, uptr pc, uptr addr) {
359 DPrintf("#%d: Release %zx\n", thr->tid, addr);
360 if (thr->ignore_sync)
361 return;
362 SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
363 thr->fast_state.IncrementEpoch();
364 // Can't increment epoch w/o writing to the trace as well.
365 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
366 ReleaseImpl(thr, pc, &s->clock);
367 s->mtx.Unlock();
370 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
371 DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
372 if (thr->ignore_sync)
373 return;
374 SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
375 thr->fast_state.IncrementEpoch();
376 // Can't increment epoch w/o writing to the trace as well.
377 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
378 ReleaseStoreImpl(thr, pc, &s->clock);
379 s->mtx.Unlock();
382 #ifndef TSAN_GO
383 static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
384 ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
385 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
386 if (tctx->status == ThreadStatusRunning)
387 thr->last_sleep_clock.set(tctx->tid, tctx->thr->fast_state.epoch());
388 else
389 thr->last_sleep_clock.set(tctx->tid, tctx->epoch1);
392 void AfterSleep(ThreadState *thr, uptr pc) {
393 DPrintf("#%d: AfterSleep %zx\n", thr->tid);
394 if (thr->ignore_sync)
395 return;
396 thr->last_sleep_stack_id = CurrentStackId(thr, pc);
397 ThreadRegistryLock l(ctx->thread_registry);
398 ctx->thread_registry->RunCallbackForEachThreadLocked(
399 UpdateSleepClockCallback, thr);
401 #endif
403 void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
404 if (thr->ignore_sync)
405 return;
406 thr->clock.set(thr->fast_state.epoch());
407 thr->clock.acquire(c);
408 StatInc(thr, StatSyncAcquire);
411 void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
412 if (thr->ignore_sync)
413 return;
414 thr->clock.set(thr->fast_state.epoch());
415 thr->fast_synch_epoch = thr->fast_state.epoch();
416 thr->clock.release(c);
417 StatInc(thr, StatSyncRelease);
420 void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
421 if (thr->ignore_sync)
422 return;
423 thr->clock.set(thr->fast_state.epoch());
424 thr->fast_synch_epoch = thr->fast_state.epoch();
425 thr->clock.ReleaseStore(c);
426 StatInc(thr, StatSyncRelease);
429 void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
430 if (thr->ignore_sync)
431 return;
432 thr->clock.set(thr->fast_state.epoch());
433 thr->fast_synch_epoch = thr->fast_state.epoch();
434 thr->clock.acq_rel(c);
435 StatInc(thr, StatSyncAcquire);
436 StatInc(thr, StatSyncRelease);
439 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
440 if (r == 0)
441 return;
442 ThreadRegistryLock l(ctx->thread_registry);
443 ScopedReport rep(ReportTypeDeadlock);
444 for (int i = 0; i < r->n; i++) {
445 rep.AddMutex(r->loop[i].mtx_ctx0);
446 rep.AddUniqueTid((int)r->loop[i].thr_ctx);
447 rep.AddThread((int)r->loop[i].thr_ctx);
449 StackTrace stacks[2 * DDReport::kMaxLoopSize];
450 uptr dummy_pc = 0x42;
451 for (int i = 0; i < r->n; i++) {
452 uptr size;
453 for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
454 u32 stk = r->loop[i].stk[j];
455 if (stk) {
456 const uptr *trace = StackDepotGet(stk, &size);
457 stacks[i].Init(const_cast<uptr *>(trace), size);
458 } else {
459 // Sometimes we fail to extract the stack trace (FIXME: investigate),
460 // but we should still produce some stack trace in the report.
461 stacks[i].Init(&dummy_pc, 1);
463 rep.AddStack(&stacks[i]);
466 // FIXME: use all stacks for suppressions, not just the second stack of the
467 // first edge.
468 OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
471 } // namespace __tsan