1 //===-- sanitizer_deadlock_detector1.cc -----------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // Deadlock detector implementation based on NxN adjacency bit matrix.
10 //===----------------------------------------------------------------------===//
12 #include "sanitizer_deadlock_detector_interface.h"
13 #include "sanitizer_deadlock_detector.h"
14 #include "sanitizer_allocator_internal.h"
15 #include "sanitizer_placement_new.h"
16 #include "sanitizer_mutex.h"
18 #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
20 namespace __sanitizer
{
22 typedef TwoLevelBitVector
<> DDBV
; // DeadlockDetector's bit vector.
24 struct DDPhysicalThread
{
27 struct DDLogicalThread
{
29 DeadlockDetectorTLS
<DDBV
> dd
;
34 struct DD
: public DDetector
{
36 DeadlockDetector
<DDBV
> dd
;
39 explicit DD(const DDFlags
*flags
);
41 DDPhysicalThread
* CreatePhysicalThread();
42 void DestroyPhysicalThread(DDPhysicalThread
*pt
);
44 DDLogicalThread
* CreateLogicalThread(u64 ctx
);
45 void DestroyLogicalThread(DDLogicalThread
*lt
);
47 void MutexInit(DDCallback
*cb
, DDMutex
*m
);
48 void MutexBeforeLock(DDCallback
*cb
, DDMutex
*m
, bool wlock
);
49 void MutexAfterLock(DDCallback
*cb
, DDMutex
*m
, bool wlock
, bool trylock
);
50 void MutexBeforeUnlock(DDCallback
*cb
, DDMutex
*m
, bool wlock
);
51 void MutexDestroy(DDCallback
*cb
, DDMutex
*m
);
53 DDReport
*GetReport(DDCallback
*cb
);
55 void MutexEnsureID(DDLogicalThread
*lt
, DDMutex
*m
);
56 void ReportDeadlock(DDCallback
*cb
, DDMutex
*m
);
59 DDetector
*DDetector::Create(const DDFlags
*flags
) {
61 void *mem
= MmapOrDie(sizeof(DD
), "deadlock detector");
62 return new(mem
) DD(flags
);
65 DD::DD(const DDFlags
*flags
)
70 DDPhysicalThread
* DD::CreatePhysicalThread() {
74 void DD::DestroyPhysicalThread(DDPhysicalThread
*pt
) {
77 DDLogicalThread
* DD::CreateLogicalThread(u64 ctx
) {
78 DDLogicalThread
*lt
= (DDLogicalThread
*)InternalAlloc(sizeof(*lt
));
81 lt
->report_pending
= false;
85 void DD::DestroyLogicalThread(DDLogicalThread
*lt
) {
86 lt
->~DDLogicalThread();
90 void DD::MutexInit(DDCallback
*cb
, DDMutex
*m
) {
92 m
->stk
= cb
->Unwind();
95 void DD::MutexEnsureID(DDLogicalThread
*lt
, DDMutex
*m
) {
96 if (!dd
.nodeBelongsToCurrentEpoch(m
->id
))
97 m
->id
= dd
.newNode(reinterpret_cast<uptr
>(m
));
98 dd
.ensureCurrentEpoch(<
->dd
);
101 void DD::MutexBeforeLock(DDCallback
*cb
,
102 DDMutex
*m
, bool wlock
) {
103 DDLogicalThread
*lt
= cb
->lt
;
104 if (lt
->dd
.empty()) return; // This will be the first lock held by lt.
105 if (dd
.hasAllEdges(<
->dd
, m
->id
)) return; // We already have all edges.
106 SpinMutexLock
lk(&mtx
);
107 MutexEnsureID(lt
, m
);
108 if (dd
.isHeld(<
->dd
, m
->id
))
109 return; // FIXME: allow this only for recursive locks.
110 if (dd
.onLockBefore(<
->dd
, m
->id
)) {
111 // Actually add this edge now so that we have all the stack traces.
112 dd
.addEdges(<
->dd
, m
->id
, cb
->Unwind(), cb
->UniqueTid());
113 ReportDeadlock(cb
, m
);
117 void DD::ReportDeadlock(DDCallback
*cb
, DDMutex
*m
) {
118 DDLogicalThread
*lt
= cb
->lt
;
120 uptr len
= dd
.findPathToLock(<
->dd
, m
->id
, path
, ARRAY_SIZE(path
));
121 CHECK_GT(len
, 0U); // Hm.. cycle of 10 locks? I'd like to see that.
122 CHECK_EQ(m
->id
, path
[0]);
123 lt
->report_pending
= true;
124 DDReport
*rep
= <
->rep
;
126 for (uptr i
= 0; i
< len
; i
++) {
128 uptr to
= path
[(i
+ 1) % len
];
129 DDMutex
*m0
= (DDMutex
*)dd
.getData(from
);
130 DDMutex
*m1
= (DDMutex
*)dd
.getData(to
);
132 u32 stk_from
= -1U, stk_to
= -1U;
134 dd
.findEdge(from
, to
, &stk_from
, &stk_to
, &unique_tid
);
135 // Printf("Edge: %zd=>%zd: %u/%u T%d\n", from, to, stk_from, stk_to,
137 rep
->loop
[i
].thr_ctx
= unique_tid
;
138 rep
->loop
[i
].mtx_ctx0
= m0
->ctx
;
139 rep
->loop
[i
].mtx_ctx1
= m1
->ctx
;
140 rep
->loop
[i
].stk
[0] = stk_to
;
141 rep
->loop
[i
].stk
[1] = stk_from
;
145 void DD::MutexAfterLock(DDCallback
*cb
, DDMutex
*m
, bool wlock
, bool trylock
) {
146 DDLogicalThread
*lt
= cb
->lt
;
148 if (flags
.second_deadlock_stack
)
150 // Printf("T%p MutexLock: %zx stk %u\n", lt, m->id, stk);
151 if (dd
.onFirstLock(<
->dd
, m
->id
, stk
))
153 if (dd
.onLockFast(<
->dd
, m
->id
, stk
))
156 SpinMutexLock
lk(&mtx
);
157 MutexEnsureID(lt
, m
);
158 if (wlock
) // Only a recursive rlock may be held.
159 CHECK(!dd
.isHeld(<
->dd
, m
->id
));
161 dd
.addEdges(<
->dd
, m
->id
, stk
? stk
: cb
->Unwind(), cb
->UniqueTid());
162 dd
.onLockAfter(<
->dd
, m
->id
, stk
);
165 void DD::MutexBeforeUnlock(DDCallback
*cb
, DDMutex
*m
, bool wlock
) {
166 // Printf("T%p MutexUnLock: %zx\n", cb->lt, m->id);
167 dd
.onUnlock(&cb
->lt
->dd
, m
->id
);
170 void DD::MutexDestroy(DDCallback
*cb
,
173 SpinMutexLock
lk(&mtx
);
174 if (dd
.nodeBelongsToCurrentEpoch(m
->id
))
175 dd
.removeNode(m
->id
);
179 DDReport
*DD::GetReport(DDCallback
*cb
) {
180 if (!cb
->lt
->report_pending
)
182 cb
->lt
->report_pending
= false;
186 } // namespace __sanitizer
187 #endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1