1 //===-- tsan_rtl_mutex.cpp ------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //===----------------------------------------------------------------------===//
13 #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
14 #include <sanitizer_common/sanitizer_stackdepot.h>
17 #include "tsan_flags.h"
18 #include "tsan_sync.h"
19 #include "tsan_report.h"
20 #include "tsan_symbolize.h"
21 #include "tsan_platform.h"
25 void ReportDeadlock(ThreadState
*thr
, uptr pc
, DDReport
*r
);
27 struct Callback
: DDCallback
{
31 Callback(ThreadState
*thr
, uptr pc
)
34 DDCallback::pt
= thr
->proc()->dd_pt
;
35 DDCallback::lt
= thr
->dd_lt
;
38 u32
Unwind() override
{ return CurrentStackId(thr
, pc
); }
39 int UniqueTid() override
{ return thr
->unique_id
; }
42 void DDMutexInit(ThreadState
*thr
, uptr pc
, SyncVar
*s
) {
44 ctx
->dd
->MutexInit(&cb
, &s
->dd
);
45 s
->dd
.ctx
= s
->GetId();
48 static void ReportMutexMisuse(ThreadState
*thr
, uptr pc
, ReportType typ
,
50 // In Go, these misuses are either impossible, or detected by std lib,
51 // or false positives (e.g. unlock in a different thread).
54 ThreadRegistryLock
l(ctx
->thread_registry
);
55 ScopedReport
rep(typ
);
57 VarSizeStackTrace trace
;
58 ObtainCurrentStack(thr
, pc
, &trace
);
59 rep
.AddStack(trace
, true);
60 rep
.AddLocation(addr
, 1);
61 OutputReport(thr
, rep
);
64 void MutexCreate(ThreadState
*thr
, uptr pc
, uptr addr
, u32 flagz
) {
65 DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr
->tid
, addr
, flagz
);
66 StatInc(thr
, StatMutexCreate
);
67 if (!(flagz
& MutexFlagLinkerInit
) && IsAppMem(addr
)) {
68 CHECK(!thr
->is_freeing
);
69 thr
->is_freeing
= true;
70 MemoryWrite(thr
, pc
, addr
, kSizeLog1
);
71 thr
->is_freeing
= false;
73 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
74 s
->SetFlags(flagz
& MutexCreationFlagMask
);
75 if (!SANITIZER_GO
&& s
->creation_stack_id
== 0)
76 s
->creation_stack_id
= CurrentStackId(thr
, pc
);
80 void MutexDestroy(ThreadState
*thr
, uptr pc
, uptr addr
, u32 flagz
) {
81 DPrintf("#%d: MutexDestroy %zx\n", thr
->tid
, addr
);
82 StatInc(thr
, StatMutexDestroy
);
83 SyncVar
*s
= ctx
->metamap
.GetIfExistsAndLock(addr
, true);
86 if ((flagz
& MutexFlagLinkerInit
)
87 || s
->IsFlagSet(MutexFlagLinkerInit
)
88 || ((flagz
& MutexFlagNotStatic
) && !s
->IsFlagSet(MutexFlagNotStatic
))) {
89 // Destroy is no-op for linker-initialized mutexes.
93 if (common_flags()->detect_deadlocks
) {
95 ctx
->dd
->MutexDestroy(&cb
, &s
->dd
);
96 ctx
->dd
->MutexInit(&cb
, &s
->dd
);
98 bool unlock_locked
= false;
99 if (flags()->report_destroy_locked
100 && s
->owner_tid
!= SyncVar::kInvalidTid
101 && !s
->IsFlagSet(MutexFlagBroken
)) {
102 s
->SetFlags(MutexFlagBroken
);
103 unlock_locked
= true;
105 u64 mid
= s
->GetId();
106 u64 last_lock
= s
->last_lock
;
108 s
->Reset(thr
->proc()); // must not reset it before the report is printed
111 ThreadRegistryLock
l(ctx
->thread_registry
);
112 ScopedReport
rep(ReportTypeMutexDestroyLocked
);
114 VarSizeStackTrace trace
;
115 ObtainCurrentStack(thr
, pc
, &trace
);
116 rep
.AddStack(trace
, true);
117 FastState
last(last_lock
);
118 RestoreStack(last
.tid(), last
.epoch(), &trace
, 0);
119 rep
.AddStack(trace
, true);
120 rep
.AddLocation(addr
, 1);
121 OutputReport(thr
, rep
);
123 SyncVar
*s
= ctx
->metamap
.GetIfExistsAndLock(addr
, true);
125 s
->Reset(thr
->proc());
129 thr
->mset
.Remove(mid
);
130 // Imitate a memory write to catch unlock-destroy races.
131 // Do this outside of sync mutex, because it can report a race which locks
133 if (IsAppMem(addr
)) {
134 CHECK(!thr
->is_freeing
);
135 thr
->is_freeing
= true;
136 MemoryWrite(thr
, pc
, addr
, kSizeLog1
);
137 thr
->is_freeing
= false;
139 // s will be destroyed and freed in MetaMap::FreeBlock.
142 void MutexPreLock(ThreadState
*thr
, uptr pc
, uptr addr
, u32 flagz
) {
143 DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr
->tid
, addr
, flagz
);
144 if (!(flagz
& MutexFlagTryLock
) && common_flags()->detect_deadlocks
) {
145 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, false);
146 s
->UpdateFlags(flagz
);
147 if (s
->owner_tid
!= thr
->tid
) {
148 Callback
cb(thr
, pc
);
149 ctx
->dd
->MutexBeforeLock(&cb
, &s
->dd
, true);
151 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
158 void MutexPostLock(ThreadState
*thr
, uptr pc
, uptr addr
, u32 flagz
, int rec
) {
159 DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n",
160 thr
->tid
, addr
, flagz
, rec
);
161 if (flagz
& MutexFlagRecursiveLock
)
166 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
167 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
168 s
->UpdateFlags(flagz
);
169 thr
->fast_state
.IncrementEpoch();
170 TraceAddEvent(thr
, thr
->fast_state
, EventTypeLock
, s
->GetId());
171 bool report_double_lock
= false;
172 if (s
->owner_tid
== SyncVar::kInvalidTid
) {
173 CHECK_EQ(s
->recursion
, 0);
174 s
->owner_tid
= thr
->tid
;
175 s
->last_lock
= thr
->fast_state
.raw();
176 } else if (s
->owner_tid
== thr
->tid
) {
177 CHECK_GT(s
->recursion
, 0);
178 } else if (flags()->report_mutex_bugs
&& !s
->IsFlagSet(MutexFlagBroken
)) {
179 s
->SetFlags(MutexFlagBroken
);
180 report_double_lock
= true;
182 const bool first
= s
->recursion
== 0;
185 StatInc(thr
, StatMutexLock
);
186 AcquireImpl(thr
, pc
, &s
->clock
);
187 AcquireImpl(thr
, pc
, &s
->read_clock
);
188 } else if (!s
->IsFlagSet(MutexFlagWriteReentrant
)) {
189 StatInc(thr
, StatMutexRecLock
);
191 thr
->mset
.Add(s
->GetId(), true, thr
->fast_state
.epoch());
192 bool pre_lock
= false;
193 if (first
&& common_flags()->detect_deadlocks
) {
194 pre_lock
= (flagz
& MutexFlagDoPreLockOnPostLock
) &&
195 !(flagz
& MutexFlagTryLock
);
196 Callback
cb(thr
, pc
);
198 ctx
->dd
->MutexBeforeLock(&cb
, &s
->dd
, true);
199 ctx
->dd
->MutexAfterLock(&cb
, &s
->dd
, true, flagz
& MutexFlagTryLock
);
201 u64 mid
= s
->GetId();
203 // Can't touch s after this point.
205 if (report_double_lock
)
206 ReportMutexMisuse(thr
, pc
, ReportTypeMutexDoubleLock
, addr
, mid
);
207 if (first
&& pre_lock
&& common_flags()->detect_deadlocks
) {
208 Callback
cb(thr
, pc
);
209 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
213 int MutexUnlock(ThreadState
*thr
, uptr pc
, uptr addr
, u32 flagz
) {
214 DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr
->tid
, addr
, flagz
);
216 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
217 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
218 thr
->fast_state
.IncrementEpoch();
219 TraceAddEvent(thr
, thr
->fast_state
, EventTypeUnlock
, s
->GetId());
221 bool report_bad_unlock
= false;
222 if (!SANITIZER_GO
&& (s
->recursion
== 0 || s
->owner_tid
!= thr
->tid
)) {
223 if (flags()->report_mutex_bugs
&& !s
->IsFlagSet(MutexFlagBroken
)) {
224 s
->SetFlags(MutexFlagBroken
);
225 report_bad_unlock
= true;
228 rec
= (flagz
& MutexFlagRecursiveUnlock
) ? s
->recursion
: 1;
230 if (s
->recursion
== 0) {
231 StatInc(thr
, StatMutexUnlock
);
232 s
->owner_tid
= SyncVar::kInvalidTid
;
233 ReleaseStoreImpl(thr
, pc
, &s
->clock
);
235 StatInc(thr
, StatMutexRecUnlock
);
238 thr
->mset
.Del(s
->GetId(), true);
239 if (common_flags()->detect_deadlocks
&& s
->recursion
== 0 &&
240 !report_bad_unlock
) {
241 Callback
cb(thr
, pc
);
242 ctx
->dd
->MutexBeforeUnlock(&cb
, &s
->dd
, true);
244 u64 mid
= s
->GetId();
246 // Can't touch s after this point.
247 if (report_bad_unlock
)
248 ReportMutexMisuse(thr
, pc
, ReportTypeMutexBadUnlock
, addr
, mid
);
249 if (common_flags()->detect_deadlocks
&& !report_bad_unlock
) {
250 Callback
cb(thr
, pc
);
251 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
256 void MutexPreReadLock(ThreadState
*thr
, uptr pc
, uptr addr
, u32 flagz
) {
257 DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr
->tid
, addr
, flagz
);
258 if (!(flagz
& MutexFlagTryLock
) && common_flags()->detect_deadlocks
) {
259 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, false);
260 s
->UpdateFlags(flagz
);
261 Callback
cb(thr
, pc
);
262 ctx
->dd
->MutexBeforeLock(&cb
, &s
->dd
, false);
264 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
268 void MutexPostReadLock(ThreadState
*thr
, uptr pc
, uptr addr
, u32 flagz
) {
269 DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr
->tid
, addr
, flagz
);
270 StatInc(thr
, StatMutexReadLock
);
272 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
273 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, false);
274 s
->UpdateFlags(flagz
);
275 thr
->fast_state
.IncrementEpoch();
276 TraceAddEvent(thr
, thr
->fast_state
, EventTypeRLock
, s
->GetId());
277 bool report_bad_lock
= false;
278 if (s
->owner_tid
!= SyncVar::kInvalidTid
) {
279 if (flags()->report_mutex_bugs
&& !s
->IsFlagSet(MutexFlagBroken
)) {
280 s
->SetFlags(MutexFlagBroken
);
281 report_bad_lock
= true;
284 AcquireImpl(thr
, pc
, &s
->clock
);
285 s
->last_lock
= thr
->fast_state
.raw();
286 thr
->mset
.Add(s
->GetId(), false, thr
->fast_state
.epoch());
287 bool pre_lock
= false;
288 if (common_flags()->detect_deadlocks
) {
289 pre_lock
= (flagz
& MutexFlagDoPreLockOnPostLock
) &&
290 !(flagz
& MutexFlagTryLock
);
291 Callback
cb(thr
, pc
);
293 ctx
->dd
->MutexBeforeLock(&cb
, &s
->dd
, false);
294 ctx
->dd
->MutexAfterLock(&cb
, &s
->dd
, false, flagz
& MutexFlagTryLock
);
296 u64 mid
= s
->GetId();
298 // Can't touch s after this point.
301 ReportMutexMisuse(thr
, pc
, ReportTypeMutexBadReadLock
, addr
, mid
);
302 if (pre_lock
&& common_flags()->detect_deadlocks
) {
303 Callback
cb(thr
, pc
);
304 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
308 void MutexReadUnlock(ThreadState
*thr
, uptr pc
, uptr addr
) {
309 DPrintf("#%d: MutexReadUnlock %zx\n", thr
->tid
, addr
);
310 StatInc(thr
, StatMutexReadUnlock
);
312 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
313 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
314 thr
->fast_state
.IncrementEpoch();
315 TraceAddEvent(thr
, thr
->fast_state
, EventTypeRUnlock
, s
->GetId());
316 bool report_bad_unlock
= false;
317 if (s
->owner_tid
!= SyncVar::kInvalidTid
) {
318 if (flags()->report_mutex_bugs
&& !s
->IsFlagSet(MutexFlagBroken
)) {
319 s
->SetFlags(MutexFlagBroken
);
320 report_bad_unlock
= true;
323 ReleaseImpl(thr
, pc
, &s
->read_clock
);
324 if (common_flags()->detect_deadlocks
&& s
->recursion
== 0) {
325 Callback
cb(thr
, pc
);
326 ctx
->dd
->MutexBeforeUnlock(&cb
, &s
->dd
, false);
328 u64 mid
= s
->GetId();
330 // Can't touch s after this point.
331 thr
->mset
.Del(mid
, false);
332 if (report_bad_unlock
)
333 ReportMutexMisuse(thr
, pc
, ReportTypeMutexBadReadUnlock
, addr
, mid
);
334 if (common_flags()->detect_deadlocks
) {
335 Callback
cb(thr
, pc
);
336 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
340 void MutexReadOrWriteUnlock(ThreadState
*thr
, uptr pc
, uptr addr
) {
341 DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr
->tid
, addr
);
343 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
344 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
346 bool report_bad_unlock
= false;
347 if (s
->owner_tid
== SyncVar::kInvalidTid
) {
348 // Seems to be read unlock.
350 StatInc(thr
, StatMutexReadUnlock
);
351 thr
->fast_state
.IncrementEpoch();
352 TraceAddEvent(thr
, thr
->fast_state
, EventTypeRUnlock
, s
->GetId());
353 ReleaseImpl(thr
, pc
, &s
->read_clock
);
354 } else if (s
->owner_tid
== thr
->tid
) {
355 // Seems to be write unlock.
356 thr
->fast_state
.IncrementEpoch();
357 TraceAddEvent(thr
, thr
->fast_state
, EventTypeUnlock
, s
->GetId());
358 CHECK_GT(s
->recursion
, 0);
360 if (s
->recursion
== 0) {
361 StatInc(thr
, StatMutexUnlock
);
362 s
->owner_tid
= SyncVar::kInvalidTid
;
363 ReleaseStoreImpl(thr
, pc
, &s
->clock
);
365 StatInc(thr
, StatMutexRecUnlock
);
367 } else if (!s
->IsFlagSet(MutexFlagBroken
)) {
368 s
->SetFlags(MutexFlagBroken
);
369 report_bad_unlock
= true;
371 thr
->mset
.Del(s
->GetId(), write
);
372 if (common_flags()->detect_deadlocks
&& s
->recursion
== 0) {
373 Callback
cb(thr
, pc
);
374 ctx
->dd
->MutexBeforeUnlock(&cb
, &s
->dd
, write
);
376 u64 mid
= s
->GetId();
378 // Can't touch s after this point.
379 if (report_bad_unlock
)
380 ReportMutexMisuse(thr
, pc
, ReportTypeMutexBadUnlock
, addr
, mid
);
381 if (common_flags()->detect_deadlocks
) {
382 Callback
cb(thr
, pc
);
383 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
387 void MutexRepair(ThreadState
*thr
, uptr pc
, uptr addr
) {
388 DPrintf("#%d: MutexRepair %zx\n", thr
->tid
, addr
);
389 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
390 s
->owner_tid
= SyncVar::kInvalidTid
;
395 void MutexInvalidAccess(ThreadState
*thr
, uptr pc
, uptr addr
) {
396 DPrintf("#%d: MutexInvalidAccess %zx\n", thr
->tid
, addr
);
397 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
398 u64 mid
= s
->GetId();
400 ReportMutexMisuse(thr
, pc
, ReportTypeMutexInvalidAccess
, addr
, mid
);
403 void Acquire(ThreadState
*thr
, uptr pc
, uptr addr
) {
404 DPrintf("#%d: Acquire %zx\n", thr
->tid
, addr
);
405 if (thr
->ignore_sync
)
407 SyncVar
*s
= ctx
->metamap
.GetIfExistsAndLock(addr
, false);
410 AcquireImpl(thr
, pc
, &s
->clock
);
414 static void UpdateClockCallback(ThreadContextBase
*tctx_base
, void *arg
) {
415 ThreadState
*thr
= reinterpret_cast<ThreadState
*>(arg
);
416 ThreadContext
*tctx
= static_cast<ThreadContext
*>(tctx_base
);
417 u64 epoch
= tctx
->epoch1
;
418 if (tctx
->status
== ThreadStatusRunning
)
419 epoch
= tctx
->thr
->fast_state
.epoch();
420 thr
->clock
.set(&thr
->proc()->clock_cache
, tctx
->tid
, epoch
);
423 void AcquireGlobal(ThreadState
*thr
, uptr pc
) {
424 DPrintf("#%d: AcquireGlobal\n", thr
->tid
);
425 if (thr
->ignore_sync
)
427 ThreadRegistryLock
l(ctx
->thread_registry
);
428 ctx
->thread_registry
->RunCallbackForEachThreadLocked(
429 UpdateClockCallback
, thr
);
432 void Release(ThreadState
*thr
, uptr pc
, uptr addr
) {
433 DPrintf("#%d: Release %zx\n", thr
->tid
, addr
);
434 if (thr
->ignore_sync
)
436 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
437 thr
->fast_state
.IncrementEpoch();
438 // Can't increment epoch w/o writing to the trace as well.
439 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
440 ReleaseImpl(thr
, pc
, &s
->clock
);
444 void ReleaseStore(ThreadState
*thr
, uptr pc
, uptr addr
) {
445 DPrintf("#%d: ReleaseStore %zx\n", thr
->tid
, addr
);
446 if (thr
->ignore_sync
)
448 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
449 thr
->fast_state
.IncrementEpoch();
450 // Can't increment epoch w/o writing to the trace as well.
451 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
452 ReleaseStoreImpl(thr
, pc
, &s
->clock
);
457 static void UpdateSleepClockCallback(ThreadContextBase
*tctx_base
, void *arg
) {
458 ThreadState
*thr
= reinterpret_cast<ThreadState
*>(arg
);
459 ThreadContext
*tctx
= static_cast<ThreadContext
*>(tctx_base
);
460 u64 epoch
= tctx
->epoch1
;
461 if (tctx
->status
== ThreadStatusRunning
)
462 epoch
= tctx
->thr
->fast_state
.epoch();
463 thr
->last_sleep_clock
.set(&thr
->proc()->clock_cache
, tctx
->tid
, epoch
);
466 void AfterSleep(ThreadState
*thr
, uptr pc
) {
467 DPrintf("#%d: AfterSleep %zx\n", thr
->tid
);
468 if (thr
->ignore_sync
)
470 thr
->last_sleep_stack_id
= CurrentStackId(thr
, pc
);
471 ThreadRegistryLock
l(ctx
->thread_registry
);
472 ctx
->thread_registry
->RunCallbackForEachThreadLocked(
473 UpdateSleepClockCallback
, thr
);
477 void AcquireImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
478 if (thr
->ignore_sync
)
480 thr
->clock
.set(thr
->fast_state
.epoch());
481 thr
->clock
.acquire(&thr
->proc()->clock_cache
, c
);
482 StatInc(thr
, StatSyncAcquire
);
485 void ReleaseImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
486 if (thr
->ignore_sync
)
488 thr
->clock
.set(thr
->fast_state
.epoch());
489 thr
->fast_synch_epoch
= thr
->fast_state
.epoch();
490 thr
->clock
.release(&thr
->proc()->clock_cache
, c
);
491 StatInc(thr
, StatSyncRelease
);
494 void ReleaseStoreImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
495 if (thr
->ignore_sync
)
497 thr
->clock
.set(thr
->fast_state
.epoch());
498 thr
->fast_synch_epoch
= thr
->fast_state
.epoch();
499 thr
->clock
.ReleaseStore(&thr
->proc()->clock_cache
, c
);
500 StatInc(thr
, StatSyncRelease
);
503 void AcquireReleaseImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
504 if (thr
->ignore_sync
)
506 thr
->clock
.set(thr
->fast_state
.epoch());
507 thr
->fast_synch_epoch
= thr
->fast_state
.epoch();
508 thr
->clock
.acq_rel(&thr
->proc()->clock_cache
, c
);
509 StatInc(thr
, StatSyncAcquire
);
510 StatInc(thr
, StatSyncRelease
);
513 void ReportDeadlock(ThreadState
*thr
, uptr pc
, DDReport
*r
) {
516 ThreadRegistryLock
l(ctx
->thread_registry
);
517 ScopedReport
rep(ReportTypeDeadlock
);
518 for (int i
= 0; i
< r
->n
; i
++) {
519 rep
.AddMutex(r
->loop
[i
].mtx_ctx0
);
520 rep
.AddUniqueTid((int)r
->loop
[i
].thr_ctx
);
521 rep
.AddThread((int)r
->loop
[i
].thr_ctx
);
523 uptr dummy_pc
= 0x42;
524 for (int i
= 0; i
< r
->n
; i
++) {
525 for (int j
= 0; j
< (flags()->second_deadlock_stack
? 2 : 1); j
++) {
526 u32 stk
= r
->loop
[i
].stk
[j
];
527 if (stk
&& stk
!= 0xffffffff) {
528 rep
.AddStack(StackDepotGet(stk
), true);
530 // Sometimes we fail to extract the stack trace (FIXME: investigate),
531 // but we should still produce some stack trace in the report.
532 rep
.AddStack(StackTrace(&dummy_pc
, 1), true);
536 OutputReport(thr
, rep
);
539 } // namespace __tsan