1 //===-- sanitizer_deadlock_detector2.cc -----------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Deadlock detector implementation based on adjacency lists.
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_deadlock_detector_interface.h"
15 #include "sanitizer_common.h"
16 #include "sanitizer_allocator_internal.h"
17 #include "sanitizer_placement_new.h"
18 #include "sanitizer_mutex.h"
20 #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
22 namespace __sanitizer
{
24 const int kMaxNesting
= 64;
26 const u32 kEndId
= -2;
27 const int kMaxLink
= 8;
28 const int kL1Size
= 1024;
29 const int kL2Size
= 1024;
30 const int kMaxMutex
= kL1Size
* kL2Size
;
36 explicit Id(u32 id
= 0, u32 seq
= 0)
49 explicit Link(u32 id
= 0, u32 seq
= 0, u32 tid
= 0, u32 s0
= 0, u32 s1
= 0)
58 struct DDPhysicalThread
{
61 bool visited
[kMaxMutex
];
62 Link pending
[kMaxMutex
];
71 struct DDLogicalThread
{
73 ThreadMutex locked
[kMaxNesting
];
84 struct DD
: public DDetector
{
85 explicit DD(const DDFlags
*flags
);
87 DDPhysicalThread
* CreatePhysicalThread();
88 void DestroyPhysicalThread(DDPhysicalThread
*pt
);
90 DDLogicalThread
* CreateLogicalThread(u64 ctx
);
91 void DestroyLogicalThread(DDLogicalThread
*lt
);
93 void MutexInit(DDCallback
*cb
, DDMutex
*m
);
94 void MutexBeforeLock(DDCallback
*cb
, DDMutex
*m
, bool wlock
);
95 void MutexAfterLock(DDCallback
*cb
, DDMutex
*m
, bool wlock
,
97 void MutexBeforeUnlock(DDCallback
*cb
, DDMutex
*m
, bool wlock
);
98 void MutexDestroy(DDCallback
*cb
, DDMutex
*m
);
100 DDReport
*GetReport(DDCallback
*cb
);
102 void CycleCheck(DDPhysicalThread
*pt
, DDLogicalThread
*lt
, DDMutex
*mtx
);
103 void Report(DDPhysicalThread
*pt
, DDLogicalThread
*lt
, int npath
);
104 u32
allocateId(DDCallback
*cb
);
105 Mutex
*getMutex(u32 id
);
106 u32
getMutexId(Mutex
*m
);
110 Mutex
* mutex
[kL1Size
];
113 InternalMmapVector
<u32
> free_id
;
117 DDetector
*DDetector::Create(const DDFlags
*flags
) {
119 void *mem
= MmapOrDie(sizeof(DD
), "deadlock detector");
120 return new(mem
) DD(flags
);
123 DD::DD(const DDFlags
*flags
)
129 DDPhysicalThread
* DD::CreatePhysicalThread() {
130 DDPhysicalThread
*pt
= (DDPhysicalThread
*)MmapOrDie(sizeof(DDPhysicalThread
),
131 "deadlock detector (physical thread)");
135 void DD::DestroyPhysicalThread(DDPhysicalThread
*pt
) {
136 pt
->~DDPhysicalThread();
137 UnmapOrDie(pt
, sizeof(DDPhysicalThread
));
140 DDLogicalThread
* DD::CreateLogicalThread(u64 ctx
) {
141 DDLogicalThread
*lt
= (DDLogicalThread
*)InternalAlloc(
142 sizeof(DDLogicalThread
));
148 void DD::DestroyLogicalThread(DDLogicalThread
*lt
) {
149 lt
->~DDLogicalThread();
153 void DD::MutexInit(DDCallback
*cb
, DDMutex
*m
) {
154 VPrintf(2, "#%llu: DD::MutexInit(%p)\n", cb
->lt
->ctx
, m
);
157 atomic_store(&m
->owner
, 0, memory_order_relaxed
);
160 Mutex
*DD::getMutex(u32 id
) {
161 return &mutex
[id
/ kL2Size
][id
% kL2Size
];
164 u32
DD::getMutexId(Mutex
*m
) {
165 for (int i
= 0; i
< kL1Size
; i
++) {
166 Mutex
*tab
= mutex
[i
];
169 if (m
>= tab
&& m
< tab
+ kL2Size
)
170 return i
* kL2Size
+ (m
- tab
);
175 u32
DD::allocateId(DDCallback
*cb
) {
177 SpinMutexLock
l(&mtx
);
178 if (free_id
.size() > 0) {
182 CHECK_LT(id_gen
, kMaxMutex
);
183 if ((id_gen
% kL2Size
) == 0) {
184 mutex
[id_gen
/ kL2Size
] = (Mutex
*)MmapOrDie(kL2Size
* sizeof(Mutex
),
185 "deadlock detector (mutex table)");
189 CHECK_LE(id
, kMaxMutex
);
190 VPrintf(3, "#%llu: DD::allocateId assign id %d\n", cb
->lt
->ctx
, id
);
194 void DD::MutexBeforeLock(DDCallback
*cb
, DDMutex
*m
, bool wlock
) {
195 VPrintf(2, "#%llu: DD::MutexBeforeLock(%p, wlock=%d) nlocked=%d\n",
196 cb
->lt
->ctx
, m
, wlock
, cb
->lt
->nlocked
);
197 DDPhysicalThread
*pt
= cb
->pt
;
198 DDLogicalThread
*lt
= cb
->lt
;
200 uptr owner
= atomic_load(&m
->owner
, memory_order_relaxed
);
201 if (owner
== (uptr
)cb
->lt
) {
202 VPrintf(3, "#%llu: DD::MutexBeforeLock recursive\n",
207 CHECK_LE(lt
->nlocked
, kMaxNesting
);
209 // FIXME(dvyukov): don't allocate id if lt->nlocked == 0?
211 m
->id
= allocateId(cb
);
213 ThreadMutex
*tm
= <
->locked
[lt
->nlocked
++];
215 if (flags
.second_deadlock_stack
)
216 tm
->stk
= cb
->Unwind();
217 if (lt
->nlocked
== 1) {
218 VPrintf(3, "#%llu: DD::MutexBeforeLock first mutex\n",
224 Mutex
*mtx
= getMutex(m
->id
);
225 for (int i
= 0; i
< lt
->nlocked
- 1; i
++) {
226 u32 id1
= lt
->locked
[i
].id
;
227 u32 stk1
= lt
->locked
[i
].stk
;
228 Mutex
*mtx1
= getMutex(id1
);
229 SpinMutexLock
l(&mtx1
->mtx
);
230 if (mtx1
->nlink
== kMaxLink
) {
231 // FIXME(dvyukov): check stale links
235 for (; li
< mtx1
->nlink
; li
++) {
236 Link
*link
= &mtx1
->link
[li
];
237 if (link
->id
== m
->id
) {
238 if (link
->seq
!= mtx
->seq
) {
239 link
->seq
= mtx
->seq
;
242 link
->stk1
= cb
->Unwind();
244 VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
245 cb
->lt
->ctx
, getMutexId(mtx1
), m
->id
);
250 if (li
== mtx1
->nlink
) {
251 // FIXME(dvyukov): check stale links
252 Link
*link
= &mtx1
->link
[mtx1
->nlink
++];
254 link
->seq
= mtx
->seq
;
257 link
->stk1
= cb
->Unwind();
259 VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
260 cb
->lt
->ctx
, getMutexId(mtx1
), m
->id
);
264 if (!added
|| mtx
->nlink
== 0) {
265 VPrintf(3, "#%llu: DD::MutexBeforeLock don't check\n",
270 CycleCheck(pt
, lt
, m
);
273 void DD::MutexAfterLock(DDCallback
*cb
, DDMutex
*m
, bool wlock
,
275 VPrintf(2, "#%llu: DD::MutexAfterLock(%p, wlock=%d, try=%d) nlocked=%d\n",
276 cb
->lt
->ctx
, m
, wlock
, trylock
, cb
->lt
->nlocked
);
277 DDLogicalThread
*lt
= cb
->lt
;
279 uptr owner
= atomic_load(&m
->owner
, memory_order_relaxed
);
280 if (owner
== (uptr
)cb
->lt
) {
281 VPrintf(3, "#%llu: DD::MutexAfterLock recursive\n", cb
->lt
->ctx
);
288 VPrintf(3, "#%llu: DD::MutexAfterLock set owner\n", cb
->lt
->ctx
);
289 CHECK_EQ(m
->recursion
, 0);
291 atomic_store(&m
->owner
, (uptr
)cb
->lt
, memory_order_relaxed
);
297 CHECK_LE(lt
->nlocked
, kMaxNesting
);
299 m
->id
= allocateId(cb
);
300 ThreadMutex
*tm
= <
->locked
[lt
->nlocked
++];
302 if (flags
.second_deadlock_stack
)
303 tm
->stk
= cb
->Unwind();
306 void DD::MutexBeforeUnlock(DDCallback
*cb
, DDMutex
*m
, bool wlock
) {
307 VPrintf(2, "#%llu: DD::MutexBeforeUnlock(%p, wlock=%d) nlocked=%d\n",
308 cb
->lt
->ctx
, m
, wlock
, cb
->lt
->nlocked
);
309 DDLogicalThread
*lt
= cb
->lt
;
311 uptr owner
= atomic_load(&m
->owner
, memory_order_relaxed
);
312 if (owner
== (uptr
)cb
->lt
) {
313 VPrintf(3, "#%llu: DD::MutexBeforeUnlock recursive\n", cb
->lt
->ctx
);
314 if (--m
->recursion
> 0)
316 VPrintf(3, "#%llu: DD::MutexBeforeUnlock reset owner\n", cb
->lt
->ctx
);
317 atomic_store(&m
->owner
, 0, memory_order_relaxed
);
319 CHECK_NE(m
->id
, kNoId
);
320 int last
= lt
->nlocked
- 1;
321 for (int i
= last
; i
>= 0; i
--) {
322 if (cb
->lt
->locked
[i
].id
== m
->id
) {
323 lt
->locked
[i
] = lt
->locked
[last
];
330 void DD::MutexDestroy(DDCallback
*cb
, DDMutex
*m
) {
331 VPrintf(2, "#%llu: DD::MutexDestroy(%p)\n",
333 DDLogicalThread
*lt
= cb
->lt
;
338 // Remove the mutex from lt->locked if there.
339 int last
= lt
->nlocked
- 1;
340 for (int i
= last
; i
>= 0; i
--) {
341 if (lt
->locked
[i
].id
== m
->id
) {
342 lt
->locked
[i
] = lt
->locked
[last
];
348 // Clear and invalidate the mutex descriptor.
350 Mutex
*mtx
= getMutex(m
->id
);
351 SpinMutexLock
l(&mtx
->mtx
);
356 // Return id to cache.
358 SpinMutexLock
l(&mtx
);
359 free_id
.push_back(m
->id
);
363 void DD::CycleCheck(DDPhysicalThread
*pt
, DDLogicalThread
*lt
,
365 internal_memset(pt
->visited
, 0, sizeof(pt
->visited
));
369 Mutex
*mtx
= getMutex(m
->id
);
370 SpinMutexLock
l(&mtx
->mtx
);
371 for (int li
= 0; li
< mtx
->nlink
; li
++)
372 pt
->pending
[npending
++] = mtx
->link
[li
];
374 while (npending
> 0) {
375 Link link
= pt
->pending
[--npending
];
376 if (link
.id
== kEndId
) {
380 if (pt
->visited
[link
.id
])
382 Mutex
*mtx1
= getMutex(link
.id
);
383 SpinMutexLock
l(&mtx1
->mtx
);
384 if (mtx1
->seq
!= link
.seq
)
386 pt
->visited
[link
.id
] = true;
387 if (mtx1
->nlink
== 0)
389 pt
->path
[npath
++] = link
;
390 pt
->pending
[npending
++] = Link(kEndId
);
391 if (link
.id
== m
->id
)
392 return Report(pt
, lt
, npath
); // Bingo!
393 for (int li
= 0; li
< mtx1
->nlink
; li
++) {
394 Link
*link1
= &mtx1
->link
[li
];
395 // Mutex *mtx2 = getMutex(link->id);
396 // FIXME(dvyukov): fast seq check
397 // FIXME(dvyukov): fast nlink != 0 check
398 // FIXME(dvyukov): fast pending check?
399 // FIXME(dvyukov): npending can be larger than kMaxMutex
400 pt
->pending
[npending
++] = *link1
;
405 void DD::Report(DDPhysicalThread
*pt
, DDLogicalThread
*lt
, int npath
) {
406 DDReport
*rep
= &pt
->rep
;
408 for (int i
= 0; i
< npath
; i
++) {
409 Link
*link
= &pt
->path
[i
];
410 Link
*link0
= &pt
->path
[i
? i
- 1 : npath
- 1];
411 rep
->loop
[i
].thr_ctx
= link
->tid
;
412 rep
->loop
[i
].mtx_ctx0
= link0
->id
;
413 rep
->loop
[i
].mtx_ctx1
= link
->id
;
414 rep
->loop
[i
].stk
[0] = flags
.second_deadlock_stack
? link
->stk0
: 0;
415 rep
->loop
[i
].stk
[1] = link
->stk1
;
417 pt
->report_pending
= true;
420 DDReport
*DD::GetReport(DDCallback
*cb
) {
421 if (!cb
->pt
->report_pending
)
423 cb
->pt
->report_pending
= false;
427 } // namespace __sanitizer
428 #endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2