1 //===-- tsan_rtl_mutex.cpp ------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //===----------------------------------------------------------------------===//
13 #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
14 #include <sanitizer_common/sanitizer_stackdepot.h>
17 #include "tsan_flags.h"
18 #include "tsan_sync.h"
19 #include "tsan_report.h"
20 #include "tsan_symbolize.h"
21 #include "tsan_platform.h"
25 void ReportDeadlock(ThreadState
*thr
, uptr pc
, DDReport
*r
);
27 struct Callback final
: public DDCallback
{
31 Callback(ThreadState
*thr
, uptr pc
)
34 DDCallback::pt
= thr
->proc()->dd_pt
;
35 DDCallback::lt
= thr
->dd_lt
;
38 StackID
Unwind() override
{ return CurrentStackId(thr
, pc
); }
39 int UniqueTid() override
{ return thr
->unique_id
; }
42 void DDMutexInit(ThreadState
*thr
, uptr pc
, SyncVar
*s
) {
44 ctx
->dd
->MutexInit(&cb
, &s
->dd
);
45 s
->dd
.ctx
= s
->GetId();
48 static void ReportMutexMisuse(ThreadState
*thr
, uptr pc
, ReportType typ
,
50 // In Go, these misuses are either impossible, or detected by std lib,
51 // or false positives (e.g. unlock in a different thread).
54 if (!ShouldReport(thr
, typ
))
56 ThreadRegistryLock
l(&ctx
->thread_registry
);
57 ScopedReport
rep(typ
);
59 VarSizeStackTrace trace
;
60 ObtainCurrentStack(thr
, pc
, &trace
);
61 rep
.AddStack(trace
, true);
62 rep
.AddLocation(addr
, 1);
63 OutputReport(thr
, rep
);
66 void MutexCreate(ThreadState
*thr
, uptr pc
, uptr addr
, u32 flagz
) {
67 DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr
->tid
, addr
, flagz
);
68 if (!(flagz
& MutexFlagLinkerInit
) && IsAppMem(addr
)) {
69 CHECK(!thr
->is_freeing
);
70 thr
->is_freeing
= true;
71 MemoryAccess(thr
, pc
, addr
, 1, kAccessWrite
);
72 thr
->is_freeing
= false;
74 SyncVar
*s
= ctx
->metamap
.GetSyncOrCreate(thr
, pc
, addr
, true);
76 s
->SetFlags(flagz
& MutexCreationFlagMask
);
77 // Save stack in the case the sync object was created before as atomic.
78 if (!SANITIZER_GO
&& s
->creation_stack_id
== 0)
79 s
->creation_stack_id
= CurrentStackId(thr
, pc
);
82 void MutexDestroy(ThreadState
*thr
, uptr pc
, uptr addr
, u32 flagz
) {
83 DPrintf("#%d: MutexDestroy %zx\n", thr
->tid
, addr
);
84 bool unlock_locked
= false;
88 SyncVar
*s
= ctx
->metamap
.GetSyncIfExists(addr
);
92 if ((flagz
& MutexFlagLinkerInit
) || s
->IsFlagSet(MutexFlagLinkerInit
) ||
93 ((flagz
& MutexFlagNotStatic
) && !s
->IsFlagSet(MutexFlagNotStatic
))) {
94 // Destroy is no-op for linker-initialized mutexes.
97 if (common_flags()->detect_deadlocks
) {
99 ctx
->dd
->MutexDestroy(&cb
, &s
->dd
);
100 ctx
->dd
->MutexInit(&cb
, &s
->dd
);
102 if (flags()->report_destroy_locked
&& s
->owner_tid
!= kInvalidTid
&&
103 !s
->IsFlagSet(MutexFlagBroken
)) {
104 s
->SetFlags(MutexFlagBroken
);
105 unlock_locked
= true;
108 last_lock
= s
->last_lock
;
110 s
->Reset(thr
->proc()); // must not reset it before the report is printed
112 if (unlock_locked
&& ShouldReport(thr
, ReportTypeMutexDestroyLocked
)) {
113 ThreadRegistryLock
l(&ctx
->thread_registry
);
114 ScopedReport
rep(ReportTypeMutexDestroyLocked
);
116 VarSizeStackTrace trace
;
117 ObtainCurrentStack(thr
, pc
, &trace
);
118 rep
.AddStack(trace
, true);
119 FastState
last(last_lock
);
120 RestoreStack(last
.tid(), last
.epoch(), &trace
, 0);
121 rep
.AddStack(trace
, true);
122 rep
.AddLocation(addr
, 1);
123 OutputReport(thr
, rep
);
125 SyncVar
*s
= ctx
->metamap
.GetSyncIfExists(addr
);
128 s
->Reset(thr
->proc());
131 thr
->mset
.Remove(mid
);
132 // Imitate a memory write to catch unlock-destroy races.
133 // Do this outside of sync mutex, because it can report a race which locks
136 MemoryAccess(thr
, pc
, addr
, 1, kAccessWrite
| kAccessFree
);
137 // s will be destroyed and freed in MetaMap::FreeBlock.
140 void MutexPreLock(ThreadState
*thr
, uptr pc
, uptr addr
, u32 flagz
) {
141 DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr
->tid
, addr
, flagz
);
142 if (!(flagz
& MutexFlagTryLock
) && common_flags()->detect_deadlocks
) {
143 SyncVar
*s
= ctx
->metamap
.GetSyncOrCreate(thr
, pc
, addr
, true);
146 s
->UpdateFlags(flagz
);
147 if (s
->owner_tid
!= thr
->tid
) {
148 Callback
cb(thr
, pc
);
149 ctx
->dd
->MutexBeforeLock(&cb
, &s
->dd
, true);
152 Callback
cb(thr
, pc
);
153 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
157 void MutexPostLock(ThreadState
*thr
, uptr pc
, uptr addr
, u32 flagz
, int rec
) {
158 DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n",
159 thr
->tid
, addr
, flagz
, rec
);
160 if (flagz
& MutexFlagRecursiveLock
)
165 MemoryAccess(thr
, pc
, addr
, 1, kAccessRead
| kAccessAtomic
);
167 bool pre_lock
= false;
169 bool report_double_lock
= false;
171 SyncVar
*s
= ctx
->metamap
.GetSyncOrCreate(thr
, pc
, addr
, true);
173 s
->UpdateFlags(flagz
);
174 thr
->fast_state
.IncrementEpoch();
175 TraceAddEvent(thr
, thr
->fast_state
, EventTypeLock
, s
->GetId());
176 if (s
->owner_tid
== kInvalidTid
) {
177 CHECK_EQ(s
->recursion
, 0);
178 s
->owner_tid
= thr
->tid
;
179 s
->last_lock
= thr
->fast_state
.raw();
180 } else if (s
->owner_tid
== thr
->tid
) {
181 CHECK_GT(s
->recursion
, 0);
182 } else if (flags()->report_mutex_bugs
&& !s
->IsFlagSet(MutexFlagBroken
)) {
183 s
->SetFlags(MutexFlagBroken
);
184 report_double_lock
= true;
186 first
= s
->recursion
== 0;
189 AcquireImpl(thr
, pc
, &s
->clock
);
190 AcquireImpl(thr
, pc
, &s
->read_clock
);
191 } else if (!s
->IsFlagSet(MutexFlagWriteReentrant
)) {
193 thr
->mset
.Add(s
->GetId(), true, thr
->fast_state
.epoch());
194 if (first
&& common_flags()->detect_deadlocks
) {
196 (flagz
& MutexFlagDoPreLockOnPostLock
) && !(flagz
& MutexFlagTryLock
);
197 Callback
cb(thr
, pc
);
199 ctx
->dd
->MutexBeforeLock(&cb
, &s
->dd
, true);
200 ctx
->dd
->MutexAfterLock(&cb
, &s
->dd
, true, flagz
& MutexFlagTryLock
);
204 if (report_double_lock
)
205 ReportMutexMisuse(thr
, pc
, ReportTypeMutexDoubleLock
, addr
, mid
);
206 if (first
&& pre_lock
&& common_flags()->detect_deadlocks
) {
207 Callback
cb(thr
, pc
);
208 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
212 int MutexUnlock(ThreadState
*thr
, uptr pc
, uptr addr
, u32 flagz
) {
213 DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr
->tid
, addr
, flagz
);
215 MemoryAccess(thr
, pc
, addr
, 1, kAccessRead
| kAccessAtomic
);
217 bool report_bad_unlock
= false;
220 SyncVar
*s
= ctx
->metamap
.GetSyncOrCreate(thr
, pc
, addr
, true);
222 thr
->fast_state
.IncrementEpoch();
223 TraceAddEvent(thr
, thr
->fast_state
, EventTypeUnlock
, s
->GetId());
224 if (!SANITIZER_GO
&& (s
->recursion
== 0 || s
->owner_tid
!= thr
->tid
)) {
225 if (flags()->report_mutex_bugs
&& !s
->IsFlagSet(MutexFlagBroken
)) {
226 s
->SetFlags(MutexFlagBroken
);
227 report_bad_unlock
= true;
230 rec
= (flagz
& MutexFlagRecursiveUnlock
) ? s
->recursion
: 1;
232 if (s
->recursion
== 0) {
233 s
->owner_tid
= kInvalidTid
;
234 ReleaseStoreImpl(thr
, pc
, &s
->clock
);
238 thr
->mset
.Del(s
->GetId(), true);
239 if (common_flags()->detect_deadlocks
&& s
->recursion
== 0 &&
240 !report_bad_unlock
) {
241 Callback
cb(thr
, pc
);
242 ctx
->dd
->MutexBeforeUnlock(&cb
, &s
->dd
, true);
246 if (report_bad_unlock
)
247 ReportMutexMisuse(thr
, pc
, ReportTypeMutexBadUnlock
, addr
, mid
);
248 if (common_flags()->detect_deadlocks
&& !report_bad_unlock
) {
249 Callback
cb(thr
, pc
);
250 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
255 void MutexPreReadLock(ThreadState
*thr
, uptr pc
, uptr addr
, u32 flagz
) {
256 DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr
->tid
, addr
, flagz
);
257 if (!(flagz
& MutexFlagTryLock
) && common_flags()->detect_deadlocks
) {
259 SyncVar
*s
= ctx
->metamap
.GetSyncOrCreate(thr
, pc
, addr
, true);
261 s
->UpdateFlags(flagz
);
262 Callback
cb(thr
, pc
);
263 ctx
->dd
->MutexBeforeLock(&cb
, &s
->dd
, false);
265 Callback
cb(thr
, pc
);
266 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
270 void MutexPostReadLock(ThreadState
*thr
, uptr pc
, uptr addr
, u32 flagz
) {
271 DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr
->tid
, addr
, flagz
);
273 MemoryAccess(thr
, pc
, addr
, 1, kAccessRead
| kAccessAtomic
);
275 bool report_bad_lock
= false;
276 bool pre_lock
= false;
278 SyncVar
*s
= ctx
->metamap
.GetSyncOrCreate(thr
, pc
, addr
, true);
280 s
->UpdateFlags(flagz
);
281 thr
->fast_state
.IncrementEpoch();
282 TraceAddEvent(thr
, thr
->fast_state
, EventTypeRLock
, s
->GetId());
283 if (s
->owner_tid
!= kInvalidTid
) {
284 if (flags()->report_mutex_bugs
&& !s
->IsFlagSet(MutexFlagBroken
)) {
285 s
->SetFlags(MutexFlagBroken
);
286 report_bad_lock
= true;
289 AcquireImpl(thr
, pc
, &s
->clock
);
290 s
->last_lock
= thr
->fast_state
.raw();
291 thr
->mset
.Add(s
->GetId(), false, thr
->fast_state
.epoch());
292 if (common_flags()->detect_deadlocks
) {
294 (flagz
& MutexFlagDoPreLockOnPostLock
) && !(flagz
& MutexFlagTryLock
);
295 Callback
cb(thr
, pc
);
297 ctx
->dd
->MutexBeforeLock(&cb
, &s
->dd
, false);
298 ctx
->dd
->MutexAfterLock(&cb
, &s
->dd
, false, flagz
& MutexFlagTryLock
);
303 ReportMutexMisuse(thr
, pc
, ReportTypeMutexBadReadLock
, addr
, mid
);
304 if (pre_lock
&& common_flags()->detect_deadlocks
) {
305 Callback
cb(thr
, pc
);
306 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
310 void MutexReadUnlock(ThreadState
*thr
, uptr pc
, uptr addr
) {
311 DPrintf("#%d: MutexReadUnlock %zx\n", thr
->tid
, addr
);
313 MemoryAccess(thr
, pc
, addr
, 1, kAccessRead
| kAccessAtomic
);
315 bool report_bad_unlock
= false;
317 SyncVar
*s
= ctx
->metamap
.GetSyncOrCreate(thr
, pc
, addr
, true);
319 thr
->fast_state
.IncrementEpoch();
320 TraceAddEvent(thr
, thr
->fast_state
, EventTypeRUnlock
, s
->GetId());
321 if (s
->owner_tid
!= kInvalidTid
) {
322 if (flags()->report_mutex_bugs
&& !s
->IsFlagSet(MutexFlagBroken
)) {
323 s
->SetFlags(MutexFlagBroken
);
324 report_bad_unlock
= true;
327 ReleaseImpl(thr
, pc
, &s
->read_clock
);
328 if (common_flags()->detect_deadlocks
&& s
->recursion
== 0) {
329 Callback
cb(thr
, pc
);
330 ctx
->dd
->MutexBeforeUnlock(&cb
, &s
->dd
, false);
334 thr
->mset
.Del(mid
, false);
335 if (report_bad_unlock
)
336 ReportMutexMisuse(thr
, pc
, ReportTypeMutexBadReadUnlock
, addr
, mid
);
337 if (common_flags()->detect_deadlocks
) {
338 Callback
cb(thr
, pc
);
339 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
343 void MutexReadOrWriteUnlock(ThreadState
*thr
, uptr pc
, uptr addr
) {
344 DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr
->tid
, addr
);
346 MemoryAccess(thr
, pc
, addr
, 1, kAccessRead
| kAccessAtomic
);
348 bool report_bad_unlock
= false;
350 SyncVar
*s
= ctx
->metamap
.GetSyncOrCreate(thr
, pc
, addr
, true);
353 if (s
->owner_tid
== kInvalidTid
) {
354 // Seems to be read unlock.
356 thr
->fast_state
.IncrementEpoch();
357 TraceAddEvent(thr
, thr
->fast_state
, EventTypeRUnlock
, s
->GetId());
358 ReleaseImpl(thr
, pc
, &s
->read_clock
);
359 } else if (s
->owner_tid
== thr
->tid
) {
360 // Seems to be write unlock.
361 thr
->fast_state
.IncrementEpoch();
362 TraceAddEvent(thr
, thr
->fast_state
, EventTypeUnlock
, s
->GetId());
363 CHECK_GT(s
->recursion
, 0);
365 if (s
->recursion
== 0) {
366 s
->owner_tid
= kInvalidTid
;
367 ReleaseStoreImpl(thr
, pc
, &s
->clock
);
370 } else if (!s
->IsFlagSet(MutexFlagBroken
)) {
371 s
->SetFlags(MutexFlagBroken
);
372 report_bad_unlock
= true;
374 thr
->mset
.Del(s
->GetId(), write
);
375 if (common_flags()->detect_deadlocks
&& s
->recursion
== 0) {
376 Callback
cb(thr
, pc
);
377 ctx
->dd
->MutexBeforeUnlock(&cb
, &s
->dd
, write
);
381 if (report_bad_unlock
)
382 ReportMutexMisuse(thr
, pc
, ReportTypeMutexBadUnlock
, addr
, mid
);
383 if (common_flags()->detect_deadlocks
) {
384 Callback
cb(thr
, pc
);
385 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
389 void MutexRepair(ThreadState
*thr
, uptr pc
, uptr addr
) {
390 DPrintf("#%d: MutexRepair %zx\n", thr
->tid
, addr
);
391 SyncVar
*s
= ctx
->metamap
.GetSyncOrCreate(thr
, pc
, addr
, true);
393 s
->owner_tid
= kInvalidTid
;
397 void MutexInvalidAccess(ThreadState
*thr
, uptr pc
, uptr addr
) {
398 DPrintf("#%d: MutexInvalidAccess %zx\n", thr
->tid
, addr
);
399 SyncVar
*s
= ctx
->metamap
.GetSyncOrCreate(thr
, pc
, addr
, true);
400 ReportMutexMisuse(thr
, pc
, ReportTypeMutexInvalidAccess
, addr
, s
->GetId());
403 void Acquire(ThreadState
*thr
, uptr pc
, uptr addr
) {
404 DPrintf("#%d: Acquire %zx\n", thr
->tid
, addr
);
405 if (thr
->ignore_sync
)
407 SyncVar
*s
= ctx
->metamap
.GetSyncIfExists(addr
);
411 AcquireImpl(thr
, pc
, &s
->clock
);
414 static void UpdateClockCallback(ThreadContextBase
*tctx_base
, void *arg
) {
415 ThreadState
*thr
= reinterpret_cast<ThreadState
*>(arg
);
416 ThreadContext
*tctx
= static_cast<ThreadContext
*>(tctx_base
);
417 u64 epoch
= tctx
->epoch1
;
418 if (tctx
->status
== ThreadStatusRunning
) {
419 epoch
= tctx
->thr
->fast_state
.epoch();
420 tctx
->thr
->clock
.NoteGlobalAcquire(epoch
);
422 thr
->clock
.set(&thr
->proc()->clock_cache
, tctx
->tid
, epoch
);
425 void AcquireGlobal(ThreadState
*thr
) {
426 DPrintf("#%d: AcquireGlobal\n", thr
->tid
);
427 if (thr
->ignore_sync
)
429 ThreadRegistryLock
l(&ctx
->thread_registry
);
430 ctx
->thread_registry
.RunCallbackForEachThreadLocked(UpdateClockCallback
, thr
);
433 void ReleaseStoreAcquire(ThreadState
*thr
, uptr pc
, uptr addr
) {
434 DPrintf("#%d: ReleaseStoreAcquire %zx\n", thr
->tid
, addr
);
435 if (thr
->ignore_sync
)
437 SyncVar
*s
= ctx
->metamap
.GetSyncOrCreate(thr
, pc
, addr
, false);
439 thr
->fast_state
.IncrementEpoch();
440 // Can't increment epoch w/o writing to the trace as well.
441 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
442 ReleaseStoreAcquireImpl(thr
, pc
, &s
->clock
);
445 void Release(ThreadState
*thr
, uptr pc
, uptr addr
) {
446 DPrintf("#%d: Release %zx\n", thr
->tid
, addr
);
447 if (thr
->ignore_sync
)
449 SyncVar
*s
= ctx
->metamap
.GetSyncOrCreate(thr
, pc
, addr
, false);
451 thr
->fast_state
.IncrementEpoch();
452 // Can't increment epoch w/o writing to the trace as well.
453 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
454 ReleaseImpl(thr
, pc
, &s
->clock
);
457 void ReleaseStore(ThreadState
*thr
, uptr pc
, uptr addr
) {
458 DPrintf("#%d: ReleaseStore %zx\n", thr
->tid
, addr
);
459 if (thr
->ignore_sync
)
461 SyncVar
*s
= ctx
->metamap
.GetSyncOrCreate(thr
, pc
, addr
, false);
463 thr
->fast_state
.IncrementEpoch();
464 // Can't increment epoch w/o writing to the trace as well.
465 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
466 ReleaseStoreImpl(thr
, pc
, &s
->clock
);
470 static void UpdateSleepClockCallback(ThreadContextBase
*tctx_base
, void *arg
) {
471 ThreadState
*thr
= reinterpret_cast<ThreadState
*>(arg
);
472 ThreadContext
*tctx
= static_cast<ThreadContext
*>(tctx_base
);
473 u64 epoch
= tctx
->epoch1
;
474 if (tctx
->status
== ThreadStatusRunning
)
475 epoch
= tctx
->thr
->fast_state
.epoch();
476 thr
->last_sleep_clock
.set(&thr
->proc()->clock_cache
, tctx
->tid
, epoch
);
479 void AfterSleep(ThreadState
*thr
, uptr pc
) {
480 DPrintf("#%d: AfterSleep\n", thr
->tid
);
481 if (thr
->ignore_sync
)
483 thr
->last_sleep_stack_id
= CurrentStackId(thr
, pc
);
484 ThreadRegistryLock
l(&ctx
->thread_registry
);
485 ctx
->thread_registry
.RunCallbackForEachThreadLocked(UpdateSleepClockCallback
,
490 void AcquireImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
491 if (thr
->ignore_sync
)
493 thr
->clock
.set(thr
->fast_state
.epoch());
494 thr
->clock
.acquire(&thr
->proc()->clock_cache
, c
);
497 void ReleaseStoreAcquireImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
498 if (thr
->ignore_sync
)
500 thr
->clock
.set(thr
->fast_state
.epoch());
501 thr
->fast_synch_epoch
= thr
->fast_state
.epoch();
502 thr
->clock
.releaseStoreAcquire(&thr
->proc()->clock_cache
, c
);
505 void ReleaseImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
506 if (thr
->ignore_sync
)
508 thr
->clock
.set(thr
->fast_state
.epoch());
509 thr
->fast_synch_epoch
= thr
->fast_state
.epoch();
510 thr
->clock
.release(&thr
->proc()->clock_cache
, c
);
513 void ReleaseStoreImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
514 if (thr
->ignore_sync
)
516 thr
->clock
.set(thr
->fast_state
.epoch());
517 thr
->fast_synch_epoch
= thr
->fast_state
.epoch();
518 thr
->clock
.ReleaseStore(&thr
->proc()->clock_cache
, c
);
521 void AcquireReleaseImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
522 if (thr
->ignore_sync
)
524 thr
->clock
.set(thr
->fast_state
.epoch());
525 thr
->fast_synch_epoch
= thr
->fast_state
.epoch();
526 thr
->clock
.acq_rel(&thr
->proc()->clock_cache
, c
);
529 void ReportDeadlock(ThreadState
*thr
, uptr pc
, DDReport
*r
) {
530 if (r
== 0 || !ShouldReport(thr
, ReportTypeDeadlock
))
532 ThreadRegistryLock
l(&ctx
->thread_registry
);
533 ScopedReport
rep(ReportTypeDeadlock
);
534 for (int i
= 0; i
< r
->n
; i
++) {
535 rep
.AddMutex(r
->loop
[i
].mtx_ctx0
);
536 rep
.AddUniqueTid((int)r
->loop
[i
].thr_ctx
);
537 rep
.AddThread((int)r
->loop
[i
].thr_ctx
);
539 uptr dummy_pc
= 0x42;
540 for (int i
= 0; i
< r
->n
; i
++) {
541 for (int j
= 0; j
< (flags()->second_deadlock_stack
? 2 : 1); j
++) {
542 u32 stk
= r
->loop
[i
].stk
[j
];
543 if (stk
&& stk
!= 0xffffffff) {
544 rep
.AddStack(StackDepotGet(stk
), true);
546 // Sometimes we fail to extract the stack trace (FIXME: investigate),
547 // but we should still produce some stack trace in the report.
548 rep
.AddStack(StackTrace(&dummy_pc
, 1), true);
552 OutputReport(thr
, rep
);
555 } // namespace __tsan