[RS6000] PR97107, libgo fails to build for power10
[official-gcc.git] / libsanitizer / tsan / tsan_rtl_mutex.cpp
blobebd0d722181885d3b19670d8fc9a9983d25d77d1
1 //===-- tsan_rtl_mutex.cpp ------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //===----------------------------------------------------------------------===//
13 #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
14 #include <sanitizer_common/sanitizer_stackdepot.h>
16 #include "tsan_rtl.h"
17 #include "tsan_flags.h"
18 #include "tsan_sync.h"
19 #include "tsan_report.h"
20 #include "tsan_symbolize.h"
21 #include "tsan_platform.h"
23 namespace __tsan {
25 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
27 struct Callback : DDCallback {
28 ThreadState *thr;
29 uptr pc;
31 Callback(ThreadState *thr, uptr pc)
32 : thr(thr)
33 , pc(pc) {
34 DDCallback::pt = thr->proc()->dd_pt;
35 DDCallback::lt = thr->dd_lt;
38 u32 Unwind() override { return CurrentStackId(thr, pc); }
39 int UniqueTid() override { return thr->unique_id; }
42 void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
43 Callback cb(thr, pc);
44 ctx->dd->MutexInit(&cb, &s->dd);
45 s->dd.ctx = s->GetId();
48 static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
49 uptr addr, u64 mid) {
50 // In Go, these misuses are either impossible, or detected by std lib,
51 // or false positives (e.g. unlock in a different thread).
52 if (SANITIZER_GO)
53 return;
54 ThreadRegistryLock l(ctx->thread_registry);
55 ScopedReport rep(typ);
56 rep.AddMutex(mid);
57 VarSizeStackTrace trace;
58 ObtainCurrentStack(thr, pc, &trace);
59 rep.AddStack(trace, true);
60 rep.AddLocation(addr, 1);
61 OutputReport(thr, rep);
64 void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
65 DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz);
66 StatInc(thr, StatMutexCreate);
67 if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) {
68 CHECK(!thr->is_freeing);
69 thr->is_freeing = true;
70 MemoryWrite(thr, pc, addr, kSizeLog1);
71 thr->is_freeing = false;
73 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
74 s->SetFlags(flagz & MutexCreationFlagMask);
75 if (!SANITIZER_GO && s->creation_stack_id == 0)
76 s->creation_stack_id = CurrentStackId(thr, pc);
77 s->mtx.Unlock();
80 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
81 DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
82 StatInc(thr, StatMutexDestroy);
83 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
84 if (s == 0)
85 return;
86 if ((flagz & MutexFlagLinkerInit)
87 || s->IsFlagSet(MutexFlagLinkerInit)
88 || ((flagz & MutexFlagNotStatic) && !s->IsFlagSet(MutexFlagNotStatic))) {
89 // Destroy is no-op for linker-initialized mutexes.
90 s->mtx.Unlock();
91 return;
93 if (common_flags()->detect_deadlocks) {
94 Callback cb(thr, pc);
95 ctx->dd->MutexDestroy(&cb, &s->dd);
96 ctx->dd->MutexInit(&cb, &s->dd);
98 bool unlock_locked = false;
99 if (flags()->report_destroy_locked
100 && s->owner_tid != SyncVar::kInvalidTid
101 && !s->IsFlagSet(MutexFlagBroken)) {
102 s->SetFlags(MutexFlagBroken);
103 unlock_locked = true;
105 u64 mid = s->GetId();
106 u64 last_lock = s->last_lock;
107 if (!unlock_locked)
108 s->Reset(thr->proc()); // must not reset it before the report is printed
109 s->mtx.Unlock();
110 if (unlock_locked) {
111 ThreadRegistryLock l(ctx->thread_registry);
112 ScopedReport rep(ReportTypeMutexDestroyLocked);
113 rep.AddMutex(mid);
114 VarSizeStackTrace trace;
115 ObtainCurrentStack(thr, pc, &trace);
116 rep.AddStack(trace, true);
117 FastState last(last_lock);
118 RestoreStack(last.tid(), last.epoch(), &trace, 0);
119 rep.AddStack(trace, true);
120 rep.AddLocation(addr, 1);
121 OutputReport(thr, rep);
123 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
124 if (s != 0) {
125 s->Reset(thr->proc());
126 s->mtx.Unlock();
129 thr->mset.Remove(mid);
130 // Imitate a memory write to catch unlock-destroy races.
131 // Do this outside of sync mutex, because it can report a race which locks
132 // sync mutexes.
133 if (IsAppMem(addr)) {
134 CHECK(!thr->is_freeing);
135 thr->is_freeing = true;
136 MemoryWrite(thr, pc, addr, kSizeLog1);
137 thr->is_freeing = false;
139 // s will be destroyed and freed in MetaMap::FreeBlock.
142 void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
143 DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
144 if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
145 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
146 s->UpdateFlags(flagz);
147 if (s->owner_tid != thr->tid) {
148 Callback cb(thr, pc);
149 ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
150 s->mtx.ReadUnlock();
151 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
152 } else {
153 s->mtx.ReadUnlock();
158 void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
159 DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n",
160 thr->tid, addr, flagz, rec);
161 if (flagz & MutexFlagRecursiveLock)
162 CHECK_GT(rec, 0);
163 else
164 rec = 1;
165 if (IsAppMem(addr))
166 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
167 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
168 s->UpdateFlags(flagz);
169 thr->fast_state.IncrementEpoch();
170 TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
171 bool report_double_lock = false;
172 if (s->owner_tid == SyncVar::kInvalidTid) {
173 CHECK_EQ(s->recursion, 0);
174 s->owner_tid = thr->tid;
175 s->last_lock = thr->fast_state.raw();
176 } else if (s->owner_tid == thr->tid) {
177 CHECK_GT(s->recursion, 0);
178 } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
179 s->SetFlags(MutexFlagBroken);
180 report_double_lock = true;
182 const bool first = s->recursion == 0;
183 s->recursion += rec;
184 if (first) {
185 StatInc(thr, StatMutexLock);
186 AcquireImpl(thr, pc, &s->clock);
187 AcquireImpl(thr, pc, &s->read_clock);
188 } else if (!s->IsFlagSet(MutexFlagWriteReentrant)) {
189 StatInc(thr, StatMutexRecLock);
191 thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
192 bool pre_lock = false;
193 if (first && common_flags()->detect_deadlocks) {
194 pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
195 !(flagz & MutexFlagTryLock);
196 Callback cb(thr, pc);
197 if (pre_lock)
198 ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
199 ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock);
201 u64 mid = s->GetId();
202 s->mtx.Unlock();
203 // Can't touch s after this point.
204 s = 0;
205 if (report_double_lock)
206 ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
207 if (first && pre_lock && common_flags()->detect_deadlocks) {
208 Callback cb(thr, pc);
209 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
213 int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
214 DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz);
215 if (IsAppMem(addr))
216 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
217 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
218 thr->fast_state.IncrementEpoch();
219 TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
220 int rec = 0;
221 bool report_bad_unlock = false;
222 if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
223 if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
224 s->SetFlags(MutexFlagBroken);
225 report_bad_unlock = true;
227 } else {
228 rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
229 s->recursion -= rec;
230 if (s->recursion == 0) {
231 StatInc(thr, StatMutexUnlock);
232 s->owner_tid = SyncVar::kInvalidTid;
233 ReleaseStoreImpl(thr, pc, &s->clock);
234 } else {
235 StatInc(thr, StatMutexRecUnlock);
238 thr->mset.Del(s->GetId(), true);
239 if (common_flags()->detect_deadlocks && s->recursion == 0 &&
240 !report_bad_unlock) {
241 Callback cb(thr, pc);
242 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
244 u64 mid = s->GetId();
245 s->mtx.Unlock();
246 // Can't touch s after this point.
247 if (report_bad_unlock)
248 ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
249 if (common_flags()->detect_deadlocks && !report_bad_unlock) {
250 Callback cb(thr, pc);
251 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
253 return rec;
256 void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
257 DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
258 if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
259 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
260 s->UpdateFlags(flagz);
261 Callback cb(thr, pc);
262 ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
263 s->mtx.ReadUnlock();
264 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
268 void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
269 DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
270 StatInc(thr, StatMutexReadLock);
271 if (IsAppMem(addr))
272 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
273 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
274 s->UpdateFlags(flagz);
275 thr->fast_state.IncrementEpoch();
276 TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
277 bool report_bad_lock = false;
278 if (s->owner_tid != SyncVar::kInvalidTid) {
279 if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
280 s->SetFlags(MutexFlagBroken);
281 report_bad_lock = true;
284 AcquireImpl(thr, pc, &s->clock);
285 s->last_lock = thr->fast_state.raw();
286 thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
287 bool pre_lock = false;
288 if (common_flags()->detect_deadlocks) {
289 pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
290 !(flagz & MutexFlagTryLock);
291 Callback cb(thr, pc);
292 if (pre_lock)
293 ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
294 ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock);
296 u64 mid = s->GetId();
297 s->mtx.ReadUnlock();
298 // Can't touch s after this point.
299 s = 0;
300 if (report_bad_lock)
301 ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
302 if (pre_lock && common_flags()->detect_deadlocks) {
303 Callback cb(thr, pc);
304 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
308 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
309 DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
310 StatInc(thr, StatMutexReadUnlock);
311 if (IsAppMem(addr))
312 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
313 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
314 thr->fast_state.IncrementEpoch();
315 TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
316 bool report_bad_unlock = false;
317 if (s->owner_tid != SyncVar::kInvalidTid) {
318 if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
319 s->SetFlags(MutexFlagBroken);
320 report_bad_unlock = true;
323 ReleaseImpl(thr, pc, &s->read_clock);
324 if (common_flags()->detect_deadlocks && s->recursion == 0) {
325 Callback cb(thr, pc);
326 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
328 u64 mid = s->GetId();
329 s->mtx.Unlock();
330 // Can't touch s after this point.
331 thr->mset.Del(mid, false);
332 if (report_bad_unlock)
333 ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
334 if (common_flags()->detect_deadlocks) {
335 Callback cb(thr, pc);
336 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
340 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
341 DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
342 if (IsAppMem(addr))
343 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
344 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
345 bool write = true;
346 bool report_bad_unlock = false;
347 if (s->owner_tid == SyncVar::kInvalidTid) {
348 // Seems to be read unlock.
349 write = false;
350 StatInc(thr, StatMutexReadUnlock);
351 thr->fast_state.IncrementEpoch();
352 TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
353 ReleaseImpl(thr, pc, &s->read_clock);
354 } else if (s->owner_tid == thr->tid) {
355 // Seems to be write unlock.
356 thr->fast_state.IncrementEpoch();
357 TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
358 CHECK_GT(s->recursion, 0);
359 s->recursion--;
360 if (s->recursion == 0) {
361 StatInc(thr, StatMutexUnlock);
362 s->owner_tid = SyncVar::kInvalidTid;
363 ReleaseStoreImpl(thr, pc, &s->clock);
364 } else {
365 StatInc(thr, StatMutexRecUnlock);
367 } else if (!s->IsFlagSet(MutexFlagBroken)) {
368 s->SetFlags(MutexFlagBroken);
369 report_bad_unlock = true;
371 thr->mset.Del(s->GetId(), write);
372 if (common_flags()->detect_deadlocks && s->recursion == 0) {
373 Callback cb(thr, pc);
374 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
376 u64 mid = s->GetId();
377 s->mtx.Unlock();
378 // Can't touch s after this point.
379 if (report_bad_unlock)
380 ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
381 if (common_flags()->detect_deadlocks) {
382 Callback cb(thr, pc);
383 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
387 void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
388 DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
389 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
390 s->owner_tid = SyncVar::kInvalidTid;
391 s->recursion = 0;
392 s->mtx.Unlock();
395 void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) {
396 DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr);
397 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
398 u64 mid = s->GetId();
399 s->mtx.Unlock();
400 ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, mid);
403 void Acquire(ThreadState *thr, uptr pc, uptr addr) {
404 DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
405 if (thr->ignore_sync)
406 return;
407 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, false);
408 if (!s)
409 return;
410 AcquireImpl(thr, pc, &s->clock);
411 s->mtx.ReadUnlock();
414 static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
415 ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
416 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
417 u64 epoch = tctx->epoch1;
418 if (tctx->status == ThreadStatusRunning) {
419 epoch = tctx->thr->fast_state.epoch();
420 tctx->thr->clock.NoteGlobalAcquire(epoch);
422 thr->clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
425 void AcquireGlobal(ThreadState *thr, uptr pc) {
426 DPrintf("#%d: AcquireGlobal\n", thr->tid);
427 if (thr->ignore_sync)
428 return;
429 ThreadRegistryLock l(ctx->thread_registry);
430 ctx->thread_registry->RunCallbackForEachThreadLocked(
431 UpdateClockCallback, thr);
434 void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr) {
435 DPrintf("#%d: ReleaseStoreAcquire %zx\n", thr->tid, addr);
436 if (thr->ignore_sync)
437 return;
438 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
439 thr->fast_state.IncrementEpoch();
440 // Can't increment epoch w/o writing to the trace as well.
441 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
442 ReleaseStoreAcquireImpl(thr, pc, &s->clock);
443 s->mtx.Unlock();
446 void Release(ThreadState *thr, uptr pc, uptr addr) {
447 DPrintf("#%d: Release %zx\n", thr->tid, addr);
448 if (thr->ignore_sync)
449 return;
450 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
451 thr->fast_state.IncrementEpoch();
452 // Can't increment epoch w/o writing to the trace as well.
453 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
454 ReleaseImpl(thr, pc, &s->clock);
455 s->mtx.Unlock();
458 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
459 DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
460 if (thr->ignore_sync)
461 return;
462 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
463 thr->fast_state.IncrementEpoch();
464 // Can't increment epoch w/o writing to the trace as well.
465 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
466 ReleaseStoreImpl(thr, pc, &s->clock);
467 s->mtx.Unlock();
470 #if !SANITIZER_GO
471 static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
472 ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
473 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
474 u64 epoch = tctx->epoch1;
475 if (tctx->status == ThreadStatusRunning)
476 epoch = tctx->thr->fast_state.epoch();
477 thr->last_sleep_clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
480 void AfterSleep(ThreadState *thr, uptr pc) {
481 DPrintf("#%d: AfterSleep %zx\n", thr->tid);
482 if (thr->ignore_sync)
483 return;
484 thr->last_sleep_stack_id = CurrentStackId(thr, pc);
485 ThreadRegistryLock l(ctx->thread_registry);
486 ctx->thread_registry->RunCallbackForEachThreadLocked(
487 UpdateSleepClockCallback, thr);
489 #endif
491 void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
492 if (thr->ignore_sync)
493 return;
494 thr->clock.set(thr->fast_state.epoch());
495 thr->clock.acquire(&thr->proc()->clock_cache, c);
496 StatInc(thr, StatSyncAcquire);
499 void ReleaseStoreAcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
500 if (thr->ignore_sync)
501 return;
502 thr->clock.set(thr->fast_state.epoch());
503 thr->fast_synch_epoch = thr->fast_state.epoch();
504 thr->clock.releaseStoreAcquire(&thr->proc()->clock_cache, c);
505 StatInc(thr, StatSyncReleaseStoreAcquire);
508 void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
509 if (thr->ignore_sync)
510 return;
511 thr->clock.set(thr->fast_state.epoch());
512 thr->fast_synch_epoch = thr->fast_state.epoch();
513 thr->clock.release(&thr->proc()->clock_cache, c);
514 StatInc(thr, StatSyncRelease);
517 void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
518 if (thr->ignore_sync)
519 return;
520 thr->clock.set(thr->fast_state.epoch());
521 thr->fast_synch_epoch = thr->fast_state.epoch();
522 thr->clock.ReleaseStore(&thr->proc()->clock_cache, c);
523 StatInc(thr, StatSyncRelease);
526 void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
527 if (thr->ignore_sync)
528 return;
529 thr->clock.set(thr->fast_state.epoch());
530 thr->fast_synch_epoch = thr->fast_state.epoch();
531 thr->clock.acq_rel(&thr->proc()->clock_cache, c);
532 StatInc(thr, StatSyncAcquire);
533 StatInc(thr, StatSyncRelease);
536 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
537 if (r == 0)
538 return;
539 ThreadRegistryLock l(ctx->thread_registry);
540 ScopedReport rep(ReportTypeDeadlock);
541 for (int i = 0; i < r->n; i++) {
542 rep.AddMutex(r->loop[i].mtx_ctx0);
543 rep.AddUniqueTid((int)r->loop[i].thr_ctx);
544 rep.AddThread((int)r->loop[i].thr_ctx);
546 uptr dummy_pc = 0x42;
547 for (int i = 0; i < r->n; i++) {
548 for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
549 u32 stk = r->loop[i].stk[j];
550 if (stk && stk != 0xffffffff) {
551 rep.AddStack(StackDepotGet(stk), true);
552 } else {
553 // Sometimes we fail to extract the stack trace (FIXME: investigate),
554 // but we should still produce some stack trace in the report.
555 rep.AddStack(StackTrace(&dummy_pc, 1), true);
559 OutputReport(thr, rep);
562 } // namespace __tsan