1 //===-- sanitizer_deadlock_detector2.cc -----------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // Deadlock detector implementation based on adjacency lists.
10 //===----------------------------------------------------------------------===//
12 #include "sanitizer_deadlock_detector_interface.h"
13 #include "sanitizer_common.h"
14 #include "sanitizer_allocator_internal.h"
15 #include "sanitizer_placement_new.h"
16 #include "sanitizer_mutex.h"
18 #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
20 namespace __sanitizer
{
22 const int kMaxNesting
= 64;
24 const u32 kEndId
= -2;
25 const int kMaxLink
= 8;
26 const int kL1Size
= 1024;
27 const int kL2Size
= 1024;
28 const int kMaxMutex
= kL1Size
* kL2Size
;
34 explicit Id(u32 id
= 0, u32 seq
= 0)
47 explicit Link(u32 id
= 0, u32 seq
= 0, u32 tid
= 0, u32 s0
= 0, u32 s1
= 0)
56 struct DDPhysicalThread
{
59 bool visited
[kMaxMutex
];
60 Link pending
[kMaxMutex
];
69 struct DDLogicalThread
{
71 ThreadMutex locked
[kMaxNesting
];
82 struct DD
: public DDetector
{
83 explicit DD(const DDFlags
*flags
);
85 DDPhysicalThread
* CreatePhysicalThread();
86 void DestroyPhysicalThread(DDPhysicalThread
*pt
);
88 DDLogicalThread
* CreateLogicalThread(u64 ctx
);
89 void DestroyLogicalThread(DDLogicalThread
*lt
);
91 void MutexInit(DDCallback
*cb
, DDMutex
*m
);
92 void MutexBeforeLock(DDCallback
*cb
, DDMutex
*m
, bool wlock
);
93 void MutexAfterLock(DDCallback
*cb
, DDMutex
*m
, bool wlock
,
95 void MutexBeforeUnlock(DDCallback
*cb
, DDMutex
*m
, bool wlock
);
96 void MutexDestroy(DDCallback
*cb
, DDMutex
*m
);
98 DDReport
*GetReport(DDCallback
*cb
);
100 void CycleCheck(DDPhysicalThread
*pt
, DDLogicalThread
*lt
, DDMutex
*mtx
);
101 void Report(DDPhysicalThread
*pt
, DDLogicalThread
*lt
, int npath
);
102 u32
allocateId(DDCallback
*cb
);
103 Mutex
*getMutex(u32 id
);
104 u32
getMutexId(Mutex
*m
);
108 Mutex
* mutex
[kL1Size
];
111 InternalMmapVector
<u32
> free_id
;
115 DDetector
*DDetector::Create(const DDFlags
*flags
) {
117 void *mem
= MmapOrDie(sizeof(DD
), "deadlock detector");
118 return new(mem
) DD(flags
);
121 DD::DD(const DDFlags
*flags
)
127 DDPhysicalThread
* DD::CreatePhysicalThread() {
128 DDPhysicalThread
*pt
= (DDPhysicalThread
*)MmapOrDie(sizeof(DDPhysicalThread
),
129 "deadlock detector (physical thread)");
133 void DD::DestroyPhysicalThread(DDPhysicalThread
*pt
) {
134 pt
->~DDPhysicalThread();
135 UnmapOrDie(pt
, sizeof(DDPhysicalThread
));
138 DDLogicalThread
* DD::CreateLogicalThread(u64 ctx
) {
139 DDLogicalThread
*lt
= (DDLogicalThread
*)InternalAlloc(
140 sizeof(DDLogicalThread
));
146 void DD::DestroyLogicalThread(DDLogicalThread
*lt
) {
147 lt
->~DDLogicalThread();
151 void DD::MutexInit(DDCallback
*cb
, DDMutex
*m
) {
152 VPrintf(2, "#%llu: DD::MutexInit(%p)\n", cb
->lt
->ctx
, m
);
155 atomic_store(&m
->owner
, 0, memory_order_relaxed
);
158 Mutex
*DD::getMutex(u32 id
) {
159 return &mutex
[id
/ kL2Size
][id
% kL2Size
];
162 u32
DD::getMutexId(Mutex
*m
) {
163 for (int i
= 0; i
< kL1Size
; i
++) {
164 Mutex
*tab
= mutex
[i
];
167 if (m
>= tab
&& m
< tab
+ kL2Size
)
168 return i
* kL2Size
+ (m
- tab
);
173 u32
DD::allocateId(DDCallback
*cb
) {
175 SpinMutexLock
l(&mtx
);
176 if (free_id
.size() > 0) {
180 CHECK_LT(id_gen
, kMaxMutex
);
181 if ((id_gen
% kL2Size
) == 0) {
182 mutex
[id_gen
/ kL2Size
] = (Mutex
*)MmapOrDie(kL2Size
* sizeof(Mutex
),
183 "deadlock detector (mutex table)");
187 CHECK_LE(id
, kMaxMutex
);
188 VPrintf(3, "#%llu: DD::allocateId assign id %d\n", cb
->lt
->ctx
, id
);
192 void DD::MutexBeforeLock(DDCallback
*cb
, DDMutex
*m
, bool wlock
) {
193 VPrintf(2, "#%llu: DD::MutexBeforeLock(%p, wlock=%d) nlocked=%d\n",
194 cb
->lt
->ctx
, m
, wlock
, cb
->lt
->nlocked
);
195 DDPhysicalThread
*pt
= cb
->pt
;
196 DDLogicalThread
*lt
= cb
->lt
;
198 uptr owner
= atomic_load(&m
->owner
, memory_order_relaxed
);
199 if (owner
== (uptr
)cb
->lt
) {
200 VPrintf(3, "#%llu: DD::MutexBeforeLock recursive\n",
205 CHECK_LE(lt
->nlocked
, kMaxNesting
);
207 // FIXME(dvyukov): don't allocate id if lt->nlocked == 0?
209 m
->id
= allocateId(cb
);
211 ThreadMutex
*tm
= <
->locked
[lt
->nlocked
++];
213 if (flags
.second_deadlock_stack
)
214 tm
->stk
= cb
->Unwind();
215 if (lt
->nlocked
== 1) {
216 VPrintf(3, "#%llu: DD::MutexBeforeLock first mutex\n",
222 Mutex
*mtx
= getMutex(m
->id
);
223 for (int i
= 0; i
< lt
->nlocked
- 1; i
++) {
224 u32 id1
= lt
->locked
[i
].id
;
225 u32 stk1
= lt
->locked
[i
].stk
;
226 Mutex
*mtx1
= getMutex(id1
);
227 SpinMutexLock
l(&mtx1
->mtx
);
228 if (mtx1
->nlink
== kMaxLink
) {
229 // FIXME(dvyukov): check stale links
233 for (; li
< mtx1
->nlink
; li
++) {
234 Link
*link
= &mtx1
->link
[li
];
235 if (link
->id
== m
->id
) {
236 if (link
->seq
!= mtx
->seq
) {
237 link
->seq
= mtx
->seq
;
240 link
->stk1
= cb
->Unwind();
242 VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
243 cb
->lt
->ctx
, getMutexId(mtx1
), m
->id
);
248 if (li
== mtx1
->nlink
) {
249 // FIXME(dvyukov): check stale links
250 Link
*link
= &mtx1
->link
[mtx1
->nlink
++];
252 link
->seq
= mtx
->seq
;
255 link
->stk1
= cb
->Unwind();
257 VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
258 cb
->lt
->ctx
, getMutexId(mtx1
), m
->id
);
262 if (!added
|| mtx
->nlink
== 0) {
263 VPrintf(3, "#%llu: DD::MutexBeforeLock don't check\n",
268 CycleCheck(pt
, lt
, m
);
271 void DD::MutexAfterLock(DDCallback
*cb
, DDMutex
*m
, bool wlock
,
273 VPrintf(2, "#%llu: DD::MutexAfterLock(%p, wlock=%d, try=%d) nlocked=%d\n",
274 cb
->lt
->ctx
, m
, wlock
, trylock
, cb
->lt
->nlocked
);
275 DDLogicalThread
*lt
= cb
->lt
;
277 uptr owner
= atomic_load(&m
->owner
, memory_order_relaxed
);
278 if (owner
== (uptr
)cb
->lt
) {
279 VPrintf(3, "#%llu: DD::MutexAfterLock recursive\n", cb
->lt
->ctx
);
286 VPrintf(3, "#%llu: DD::MutexAfterLock set owner\n", cb
->lt
->ctx
);
287 CHECK_EQ(m
->recursion
, 0);
289 atomic_store(&m
->owner
, (uptr
)cb
->lt
, memory_order_relaxed
);
295 CHECK_LE(lt
->nlocked
, kMaxNesting
);
297 m
->id
= allocateId(cb
);
298 ThreadMutex
*tm
= <
->locked
[lt
->nlocked
++];
300 if (flags
.second_deadlock_stack
)
301 tm
->stk
= cb
->Unwind();
304 void DD::MutexBeforeUnlock(DDCallback
*cb
, DDMutex
*m
, bool wlock
) {
305 VPrintf(2, "#%llu: DD::MutexBeforeUnlock(%p, wlock=%d) nlocked=%d\n",
306 cb
->lt
->ctx
, m
, wlock
, cb
->lt
->nlocked
);
307 DDLogicalThread
*lt
= cb
->lt
;
309 uptr owner
= atomic_load(&m
->owner
, memory_order_relaxed
);
310 if (owner
== (uptr
)cb
->lt
) {
311 VPrintf(3, "#%llu: DD::MutexBeforeUnlock recursive\n", cb
->lt
->ctx
);
312 if (--m
->recursion
> 0)
314 VPrintf(3, "#%llu: DD::MutexBeforeUnlock reset owner\n", cb
->lt
->ctx
);
315 atomic_store(&m
->owner
, 0, memory_order_relaxed
);
317 CHECK_NE(m
->id
, kNoId
);
318 int last
= lt
->nlocked
- 1;
319 for (int i
= last
; i
>= 0; i
--) {
320 if (cb
->lt
->locked
[i
].id
== m
->id
) {
321 lt
->locked
[i
] = lt
->locked
[last
];
328 void DD::MutexDestroy(DDCallback
*cb
, DDMutex
*m
) {
329 VPrintf(2, "#%llu: DD::MutexDestroy(%p)\n",
331 DDLogicalThread
*lt
= cb
->lt
;
336 // Remove the mutex from lt->locked if there.
337 int last
= lt
->nlocked
- 1;
338 for (int i
= last
; i
>= 0; i
--) {
339 if (lt
->locked
[i
].id
== m
->id
) {
340 lt
->locked
[i
] = lt
->locked
[last
];
346 // Clear and invalidate the mutex descriptor.
348 Mutex
*mtx
= getMutex(m
->id
);
349 SpinMutexLock
l(&mtx
->mtx
);
354 // Return id to cache.
356 SpinMutexLock
l(&mtx
);
357 free_id
.push_back(m
->id
);
361 void DD::CycleCheck(DDPhysicalThread
*pt
, DDLogicalThread
*lt
,
363 internal_memset(pt
->visited
, 0, sizeof(pt
->visited
));
367 Mutex
*mtx
= getMutex(m
->id
);
368 SpinMutexLock
l(&mtx
->mtx
);
369 for (int li
= 0; li
< mtx
->nlink
; li
++)
370 pt
->pending
[npending
++] = mtx
->link
[li
];
372 while (npending
> 0) {
373 Link link
= pt
->pending
[--npending
];
374 if (link
.id
== kEndId
) {
378 if (pt
->visited
[link
.id
])
380 Mutex
*mtx1
= getMutex(link
.id
);
381 SpinMutexLock
l(&mtx1
->mtx
);
382 if (mtx1
->seq
!= link
.seq
)
384 pt
->visited
[link
.id
] = true;
385 if (mtx1
->nlink
== 0)
387 pt
->path
[npath
++] = link
;
388 pt
->pending
[npending
++] = Link(kEndId
);
389 if (link
.id
== m
->id
)
390 return Report(pt
, lt
, npath
); // Bingo!
391 for (int li
= 0; li
< mtx1
->nlink
; li
++) {
392 Link
*link1
= &mtx1
->link
[li
];
393 // Mutex *mtx2 = getMutex(link->id);
394 // FIXME(dvyukov): fast seq check
395 // FIXME(dvyukov): fast nlink != 0 check
396 // FIXME(dvyukov): fast pending check?
397 // FIXME(dvyukov): npending can be larger than kMaxMutex
398 pt
->pending
[npending
++] = *link1
;
403 void DD::Report(DDPhysicalThread
*pt
, DDLogicalThread
*lt
, int npath
) {
404 DDReport
*rep
= &pt
->rep
;
406 for (int i
= 0; i
< npath
; i
++) {
407 Link
*link
= &pt
->path
[i
];
408 Link
*link0
= &pt
->path
[i
? i
- 1 : npath
- 1];
409 rep
->loop
[i
].thr_ctx
= link
->tid
;
410 rep
->loop
[i
].mtx_ctx0
= link0
->id
;
411 rep
->loop
[i
].mtx_ctx1
= link
->id
;
412 rep
->loop
[i
].stk
[0] = flags
.second_deadlock_stack
? link
->stk0
: 0;
413 rep
->loop
[i
].stk
[1] = link
->stk1
;
415 pt
->report_pending
= true;
418 DDReport
*DD::GetReport(DDCallback
*cb
) {
419 if (!cb
->pt
->report_pending
)
421 cb
->pt
->report_pending
= false;
425 } // namespace __sanitizer
426 #endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2