1 //===-- tsan_rtl_mutex.cc -------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
12 #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
13 #include <sanitizer_common/sanitizer_stackdepot.h>
16 #include "tsan_flags.h"
17 #include "tsan_sync.h"
18 #include "tsan_report.h"
19 #include "tsan_symbolize.h"
20 #include "tsan_platform.h"
24 void ReportDeadlock(ThreadState
*thr
, uptr pc
, DDReport
*r
);
26 struct Callback
: DDCallback
{
30 Callback(ThreadState
*thr
, uptr pc
)
33 DDCallback::pt
= thr
->proc()->dd_pt
;
34 DDCallback::lt
= thr
->dd_lt
;
37 u32
Unwind() override
{ return CurrentStackId(thr
, pc
); }
38 int UniqueTid() override
{ return thr
->unique_id
; }
41 void DDMutexInit(ThreadState
*thr
, uptr pc
, SyncVar
*s
) {
43 ctx
->dd
->MutexInit(&cb
, &s
->dd
);
44 s
->dd
.ctx
= s
->GetId();
47 static void ReportMutexMisuse(ThreadState
*thr
, uptr pc
, ReportType typ
,
49 // In Go, these misuses are either impossible, or detected by std lib,
50 // or false positives (e.g. unlock in a different thread).
53 ThreadRegistryLock
l(ctx
->thread_registry
);
54 ScopedReport
rep(typ
);
56 VarSizeStackTrace trace
;
57 ObtainCurrentStack(thr
, pc
, &trace
);
58 rep
.AddStack(trace
, true);
59 rep
.AddLocation(addr
, 1);
60 OutputReport(thr
, rep
);
63 void MutexCreate(ThreadState
*thr
, uptr pc
, uptr addr
, u32 flagz
) {
64 DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr
->tid
, addr
, flagz
);
65 StatInc(thr
, StatMutexCreate
);
66 if (!(flagz
& MutexFlagLinkerInit
) && IsAppMem(addr
)) {
67 CHECK(!thr
->is_freeing
);
68 thr
->is_freeing
= true;
69 MemoryWrite(thr
, pc
, addr
, kSizeLog1
);
70 thr
->is_freeing
= false;
72 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
73 s
->SetFlags(flagz
& MutexCreationFlagMask
);
74 if (!SANITIZER_GO
&& s
->creation_stack_id
== 0)
75 s
->creation_stack_id
= CurrentStackId(thr
, pc
);
79 void MutexDestroy(ThreadState
*thr
, uptr pc
, uptr addr
, u32 flagz
) {
80 DPrintf("#%d: MutexDestroy %zx\n", thr
->tid
, addr
);
81 StatInc(thr
, StatMutexDestroy
);
82 SyncVar
*s
= ctx
->metamap
.GetIfExistsAndLock(addr
, true);
85 if ((flagz
& MutexFlagLinkerInit
) || s
->IsFlagSet(MutexFlagLinkerInit
)) {
86 // Destroy is no-op for linker-initialized mutexes.
90 if (common_flags()->detect_deadlocks
) {
92 ctx
->dd
->MutexDestroy(&cb
, &s
->dd
);
93 ctx
->dd
->MutexInit(&cb
, &s
->dd
);
95 bool unlock_locked
= false;
96 if (flags()->report_destroy_locked
97 && s
->owner_tid
!= SyncVar::kInvalidTid
98 && !s
->IsFlagSet(MutexFlagBroken
)) {
99 s
->SetFlags(MutexFlagBroken
);
100 unlock_locked
= true;
102 u64 mid
= s
->GetId();
103 u32 last_lock
= s
->last_lock
;
105 s
->Reset(thr
->proc()); // must not reset it before the report is printed
108 ThreadRegistryLock
l(ctx
->thread_registry
);
109 ScopedReport
rep(ReportTypeMutexDestroyLocked
);
111 VarSizeStackTrace trace
;
112 ObtainCurrentStack(thr
, pc
, &trace
);
114 FastState
last(last_lock
);
115 RestoreStack(last
.tid(), last
.epoch(), &trace
, 0);
116 rep
.AddStack(trace
, true);
117 rep
.AddLocation(addr
, 1);
118 OutputReport(thr
, rep
);
120 SyncVar
*s
= ctx
->metamap
.GetIfExistsAndLock(addr
, true);
122 s
->Reset(thr
->proc());
126 thr
->mset
.Remove(mid
);
127 // Imitate a memory write to catch unlock-destroy races.
128 // Do this outside of sync mutex, because it can report a race which locks
130 if (IsAppMem(addr
)) {
131 CHECK(!thr
->is_freeing
);
132 thr
->is_freeing
= true;
133 MemoryWrite(thr
, pc
, addr
, kSizeLog1
);
134 thr
->is_freeing
= false;
136 // s will be destroyed and freed in MetaMap::FreeBlock.
139 void MutexPreLock(ThreadState
*thr
, uptr pc
, uptr addr
, u32 flagz
) {
140 DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr
->tid
, addr
, flagz
);
141 if (!(flagz
& MutexFlagTryLock
) && common_flags()->detect_deadlocks
) {
142 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, false);
143 s
->UpdateFlags(flagz
);
144 if (s
->owner_tid
!= thr
->tid
) {
145 Callback
cb(thr
, pc
);
146 ctx
->dd
->MutexBeforeLock(&cb
, &s
->dd
, true);
148 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
155 void MutexPostLock(ThreadState
*thr
, uptr pc
, uptr addr
, u32 flagz
, int rec
) {
156 DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n",
157 thr
->tid
, addr
, flagz
, rec
);
158 if (flagz
& MutexFlagRecursiveLock
)
163 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
164 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
165 s
->UpdateFlags(flagz
);
166 thr
->fast_state
.IncrementEpoch();
167 TraceAddEvent(thr
, thr
->fast_state
, EventTypeLock
, s
->GetId());
168 bool report_double_lock
= false;
169 if (s
->owner_tid
== SyncVar::kInvalidTid
) {
170 CHECK_EQ(s
->recursion
, 0);
171 s
->owner_tid
= thr
->tid
;
172 s
->last_lock
= thr
->fast_state
.raw();
173 } else if (s
->owner_tid
== thr
->tid
) {
174 CHECK_GT(s
->recursion
, 0);
175 } else if (flags()->report_mutex_bugs
&& !s
->IsFlagSet(MutexFlagBroken
)) {
176 s
->SetFlags(MutexFlagBroken
);
177 report_double_lock
= true;
179 const bool first
= s
->recursion
== 0;
182 StatInc(thr
, StatMutexLock
);
183 AcquireImpl(thr
, pc
, &s
->clock
);
184 AcquireImpl(thr
, pc
, &s
->read_clock
);
185 } else if (!s
->IsFlagSet(MutexFlagWriteReentrant
)) {
186 StatInc(thr
, StatMutexRecLock
);
188 thr
->mset
.Add(s
->GetId(), true, thr
->fast_state
.epoch());
189 bool pre_lock
= false;
190 if (first
&& common_flags()->detect_deadlocks
) {
191 pre_lock
= (flagz
& MutexFlagDoPreLockOnPostLock
) &&
192 !(flagz
& MutexFlagTryLock
);
193 Callback
cb(thr
, pc
);
195 ctx
->dd
->MutexBeforeLock(&cb
, &s
->dd
, true);
196 ctx
->dd
->MutexAfterLock(&cb
, &s
->dd
, true, flagz
& MutexFlagTryLock
);
198 u64 mid
= s
->GetId();
200 // Can't touch s after this point.
202 if (report_double_lock
)
203 ReportMutexMisuse(thr
, pc
, ReportTypeMutexDoubleLock
, addr
, mid
);
204 if (first
&& pre_lock
&& common_flags()->detect_deadlocks
) {
205 Callback
cb(thr
, pc
);
206 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
210 int MutexUnlock(ThreadState
*thr
, uptr pc
, uptr addr
, u32 flagz
) {
211 DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr
->tid
, addr
, flagz
);
213 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
214 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
215 thr
->fast_state
.IncrementEpoch();
216 TraceAddEvent(thr
, thr
->fast_state
, EventTypeUnlock
, s
->GetId());
218 bool report_bad_unlock
= false;
219 if (!SANITIZER_GO
&& (s
->recursion
== 0 || s
->owner_tid
!= thr
->tid
)) {
220 if (flags()->report_mutex_bugs
&& !s
->IsFlagSet(MutexFlagBroken
)) {
221 s
->SetFlags(MutexFlagBroken
);
222 report_bad_unlock
= true;
225 rec
= (flagz
& MutexFlagRecursiveUnlock
) ? s
->recursion
: 1;
227 if (s
->recursion
== 0) {
228 StatInc(thr
, StatMutexUnlock
);
229 s
->owner_tid
= SyncVar::kInvalidTid
;
230 ReleaseStoreImpl(thr
, pc
, &s
->clock
);
232 StatInc(thr
, StatMutexRecUnlock
);
235 thr
->mset
.Del(s
->GetId(), true);
236 if (common_flags()->detect_deadlocks
&& s
->recursion
== 0 &&
237 !report_bad_unlock
) {
238 Callback
cb(thr
, pc
);
239 ctx
->dd
->MutexBeforeUnlock(&cb
, &s
->dd
, true);
241 u64 mid
= s
->GetId();
243 // Can't touch s after this point.
244 if (report_bad_unlock
)
245 ReportMutexMisuse(thr
, pc
, ReportTypeMutexBadUnlock
, addr
, mid
);
246 if (common_flags()->detect_deadlocks
&& !report_bad_unlock
) {
247 Callback
cb(thr
, pc
);
248 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
253 void MutexPreReadLock(ThreadState
*thr
, uptr pc
, uptr addr
, u32 flagz
) {
254 DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr
->tid
, addr
, flagz
);
255 if (!(flagz
& MutexFlagTryLock
) && common_flags()->detect_deadlocks
) {
256 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, false);
257 s
->UpdateFlags(flagz
);
258 Callback
cb(thr
, pc
);
259 ctx
->dd
->MutexBeforeLock(&cb
, &s
->dd
, false);
261 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
265 void MutexPostReadLock(ThreadState
*thr
, uptr pc
, uptr addr
, u32 flagz
) {
266 DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr
->tid
, addr
, flagz
);
267 StatInc(thr
, StatMutexReadLock
);
269 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
270 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, false);
271 s
->UpdateFlags(flagz
);
272 thr
->fast_state
.IncrementEpoch();
273 TraceAddEvent(thr
, thr
->fast_state
, EventTypeRLock
, s
->GetId());
274 bool report_bad_lock
= false;
275 if (s
->owner_tid
!= SyncVar::kInvalidTid
) {
276 if (flags()->report_mutex_bugs
&& !s
->IsFlagSet(MutexFlagBroken
)) {
277 s
->SetFlags(MutexFlagBroken
);
278 report_bad_lock
= true;
281 AcquireImpl(thr
, pc
, &s
->clock
);
282 s
->last_lock
= thr
->fast_state
.raw();
283 thr
->mset
.Add(s
->GetId(), false, thr
->fast_state
.epoch());
284 bool pre_lock
= false;
285 if (common_flags()->detect_deadlocks
) {
286 pre_lock
= (flagz
& MutexFlagDoPreLockOnPostLock
) &&
287 !(flagz
& MutexFlagTryLock
);
288 Callback
cb(thr
, pc
);
290 ctx
->dd
->MutexBeforeLock(&cb
, &s
->dd
, false);
291 ctx
->dd
->MutexAfterLock(&cb
, &s
->dd
, false, flagz
& MutexFlagTryLock
);
293 u64 mid
= s
->GetId();
295 // Can't touch s after this point.
298 ReportMutexMisuse(thr
, pc
, ReportTypeMutexBadReadLock
, addr
, mid
);
299 if (pre_lock
&& common_flags()->detect_deadlocks
) {
300 Callback
cb(thr
, pc
);
301 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
305 void MutexReadUnlock(ThreadState
*thr
, uptr pc
, uptr addr
) {
306 DPrintf("#%d: MutexReadUnlock %zx\n", thr
->tid
, addr
);
307 StatInc(thr
, StatMutexReadUnlock
);
309 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
310 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
311 thr
->fast_state
.IncrementEpoch();
312 TraceAddEvent(thr
, thr
->fast_state
, EventTypeRUnlock
, s
->GetId());
313 bool report_bad_unlock
= false;
314 if (s
->owner_tid
!= SyncVar::kInvalidTid
) {
315 if (flags()->report_mutex_bugs
&& !s
->IsFlagSet(MutexFlagBroken
)) {
316 s
->SetFlags(MutexFlagBroken
);
317 report_bad_unlock
= true;
320 ReleaseImpl(thr
, pc
, &s
->read_clock
);
321 if (common_flags()->detect_deadlocks
&& s
->recursion
== 0) {
322 Callback
cb(thr
, pc
);
323 ctx
->dd
->MutexBeforeUnlock(&cb
, &s
->dd
, false);
325 u64 mid
= s
->GetId();
327 // Can't touch s after this point.
328 thr
->mset
.Del(mid
, false);
329 if (report_bad_unlock
)
330 ReportMutexMisuse(thr
, pc
, ReportTypeMutexBadReadUnlock
, addr
, mid
);
331 if (common_flags()->detect_deadlocks
) {
332 Callback
cb(thr
, pc
);
333 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
337 void MutexReadOrWriteUnlock(ThreadState
*thr
, uptr pc
, uptr addr
) {
338 DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr
->tid
, addr
);
340 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
341 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
343 bool report_bad_unlock
= false;
344 if (s
->owner_tid
== SyncVar::kInvalidTid
) {
345 // Seems to be read unlock.
347 StatInc(thr
, StatMutexReadUnlock
);
348 thr
->fast_state
.IncrementEpoch();
349 TraceAddEvent(thr
, thr
->fast_state
, EventTypeRUnlock
, s
->GetId());
350 ReleaseImpl(thr
, pc
, &s
->read_clock
);
351 } else if (s
->owner_tid
== thr
->tid
) {
352 // Seems to be write unlock.
353 thr
->fast_state
.IncrementEpoch();
354 TraceAddEvent(thr
, thr
->fast_state
, EventTypeUnlock
, s
->GetId());
355 CHECK_GT(s
->recursion
, 0);
357 if (s
->recursion
== 0) {
358 StatInc(thr
, StatMutexUnlock
);
359 s
->owner_tid
= SyncVar::kInvalidTid
;
360 ReleaseImpl(thr
, pc
, &s
->clock
);
362 StatInc(thr
, StatMutexRecUnlock
);
364 } else if (!s
->IsFlagSet(MutexFlagBroken
)) {
365 s
->SetFlags(MutexFlagBroken
);
366 report_bad_unlock
= true;
368 thr
->mset
.Del(s
->GetId(), write
);
369 if (common_flags()->detect_deadlocks
&& s
->recursion
== 0) {
370 Callback
cb(thr
, pc
);
371 ctx
->dd
->MutexBeforeUnlock(&cb
, &s
->dd
, write
);
373 u64 mid
= s
->GetId();
375 // Can't touch s after this point.
376 if (report_bad_unlock
)
377 ReportMutexMisuse(thr
, pc
, ReportTypeMutexBadUnlock
, addr
, mid
);
378 if (common_flags()->detect_deadlocks
) {
379 Callback
cb(thr
, pc
);
380 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
384 void MutexRepair(ThreadState
*thr
, uptr pc
, uptr addr
) {
385 DPrintf("#%d: MutexRepair %zx\n", thr
->tid
, addr
);
386 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
387 s
->owner_tid
= SyncVar::kInvalidTid
;
392 void MutexInvalidAccess(ThreadState
*thr
, uptr pc
, uptr addr
) {
393 DPrintf("#%d: MutexInvalidAccess %zx\n", thr
->tid
, addr
);
394 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
395 u64 mid
= s
->GetId();
397 ReportMutexMisuse(thr
, pc
, ReportTypeMutexInvalidAccess
, addr
, mid
);
400 void Acquire(ThreadState
*thr
, uptr pc
, uptr addr
) {
401 DPrintf("#%d: Acquire %zx\n", thr
->tid
, addr
);
402 if (thr
->ignore_sync
)
404 SyncVar
*s
= ctx
->metamap
.GetIfExistsAndLock(addr
, false);
407 AcquireImpl(thr
, pc
, &s
->clock
);
411 static void UpdateClockCallback(ThreadContextBase
*tctx_base
, void *arg
) {
412 ThreadState
*thr
= reinterpret_cast<ThreadState
*>(arg
);
413 ThreadContext
*tctx
= static_cast<ThreadContext
*>(tctx_base
);
414 u64 epoch
= tctx
->epoch1
;
415 if (tctx
->status
== ThreadStatusRunning
)
416 epoch
= tctx
->thr
->fast_state
.epoch();
417 thr
->clock
.set(&thr
->proc()->clock_cache
, tctx
->tid
, epoch
);
420 void AcquireGlobal(ThreadState
*thr
, uptr pc
) {
421 DPrintf("#%d: AcquireGlobal\n", thr
->tid
);
422 if (thr
->ignore_sync
)
424 ThreadRegistryLock
l(ctx
->thread_registry
);
425 ctx
->thread_registry
->RunCallbackForEachThreadLocked(
426 UpdateClockCallback
, thr
);
429 void Release(ThreadState
*thr
, uptr pc
, uptr addr
) {
430 DPrintf("#%d: Release %zx\n", thr
->tid
, addr
);
431 if (thr
->ignore_sync
)
433 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
434 thr
->fast_state
.IncrementEpoch();
435 // Can't increment epoch w/o writing to the trace as well.
436 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
437 ReleaseImpl(thr
, pc
, &s
->clock
);
441 void ReleaseStore(ThreadState
*thr
, uptr pc
, uptr addr
) {
442 DPrintf("#%d: ReleaseStore %zx\n", thr
->tid
, addr
);
443 if (thr
->ignore_sync
)
445 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
446 thr
->fast_state
.IncrementEpoch();
447 // Can't increment epoch w/o writing to the trace as well.
448 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
449 ReleaseStoreImpl(thr
, pc
, &s
->clock
);
454 static void UpdateSleepClockCallback(ThreadContextBase
*tctx_base
, void *arg
) {
455 ThreadState
*thr
= reinterpret_cast<ThreadState
*>(arg
);
456 ThreadContext
*tctx
= static_cast<ThreadContext
*>(tctx_base
);
457 u64 epoch
= tctx
->epoch1
;
458 if (tctx
->status
== ThreadStatusRunning
)
459 epoch
= tctx
->thr
->fast_state
.epoch();
460 thr
->last_sleep_clock
.set(&thr
->proc()->clock_cache
, tctx
->tid
, epoch
);
463 void AfterSleep(ThreadState
*thr
, uptr pc
) {
464 DPrintf("#%d: AfterSleep %zx\n", thr
->tid
);
465 if (thr
->ignore_sync
)
467 thr
->last_sleep_stack_id
= CurrentStackId(thr
, pc
);
468 ThreadRegistryLock
l(ctx
->thread_registry
);
469 ctx
->thread_registry
->RunCallbackForEachThreadLocked(
470 UpdateSleepClockCallback
, thr
);
474 void AcquireImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
475 if (thr
->ignore_sync
)
477 thr
->clock
.set(thr
->fast_state
.epoch());
478 thr
->clock
.acquire(&thr
->proc()->clock_cache
, c
);
479 StatInc(thr
, StatSyncAcquire
);
482 void ReleaseImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
483 if (thr
->ignore_sync
)
485 thr
->clock
.set(thr
->fast_state
.epoch());
486 thr
->fast_synch_epoch
= thr
->fast_state
.epoch();
487 thr
->clock
.release(&thr
->proc()->clock_cache
, c
);
488 StatInc(thr
, StatSyncRelease
);
491 void ReleaseStoreImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
492 if (thr
->ignore_sync
)
494 thr
->clock
.set(thr
->fast_state
.epoch());
495 thr
->fast_synch_epoch
= thr
->fast_state
.epoch();
496 thr
->clock
.ReleaseStore(&thr
->proc()->clock_cache
, c
);
497 StatInc(thr
, StatSyncRelease
);
500 void AcquireReleaseImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
501 if (thr
->ignore_sync
)
503 thr
->clock
.set(thr
->fast_state
.epoch());
504 thr
->fast_synch_epoch
= thr
->fast_state
.epoch();
505 thr
->clock
.acq_rel(&thr
->proc()->clock_cache
, c
);
506 StatInc(thr
, StatSyncAcquire
);
507 StatInc(thr
, StatSyncRelease
);
510 void ReportDeadlock(ThreadState
*thr
, uptr pc
, DDReport
*r
) {
513 ThreadRegistryLock
l(ctx
->thread_registry
);
514 ScopedReport
rep(ReportTypeDeadlock
);
515 for (int i
= 0; i
< r
->n
; i
++) {
516 rep
.AddMutex(r
->loop
[i
].mtx_ctx0
);
517 rep
.AddUniqueTid((int)r
->loop
[i
].thr_ctx
);
518 rep
.AddThread((int)r
->loop
[i
].thr_ctx
);
520 uptr dummy_pc
= 0x42;
521 for (int i
= 0; i
< r
->n
; i
++) {
522 for (int j
= 0; j
< (flags()->second_deadlock_stack
? 2 : 1); j
++) {
523 u32 stk
= r
->loop
[i
].stk
[j
];
524 if (stk
&& stk
!= 0xffffffff) {
525 rep
.AddStack(StackDepotGet(stk
), true);
527 // Sometimes we fail to extract the stack trace (FIXME: investigate),
528 // but we should still produce some stack trace in the report.
529 rep
.AddStack(StackTrace(&dummy_pc
, 1), true);
533 OutputReport(thr
, rep
);
536 } // namespace __tsan