1 //===-- sanitizer_deadlock_detector1.cc -----------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // Deadlock detector implementation based on NxN adjacency bit matrix.
10 //===----------------------------------------------------------------------===//
12 #include "sanitizer_deadlock_detector_interface.h"
13 #include "sanitizer_deadlock_detector.h"
14 #include "sanitizer_allocator_internal.h"
15 #include "sanitizer_placement_new.h"
16 #include "sanitizer_mutex.h"
18 #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
20 namespace __sanitizer
{
22 typedef TwoLevelBitVector
<> DDBV
; // DeadlockDetector's bit vector.
24 struct DDPhysicalThread
{
27 struct DDLogicalThread
{
29 DeadlockDetectorTLS
<DDBV
> dd
;
34 struct DD
: public DDetector
{
36 DeadlockDetector
<DDBV
> dd
;
39 explicit DD(const DDFlags
*flags
);
41 DDPhysicalThread
*CreatePhysicalThread() override
;
42 void DestroyPhysicalThread(DDPhysicalThread
*pt
) override
;
44 DDLogicalThread
*CreateLogicalThread(u64 ctx
) override
;
45 void DestroyLogicalThread(DDLogicalThread
*lt
) override
;
47 void MutexInit(DDCallback
*cb
, DDMutex
*m
) override
;
48 void MutexBeforeLock(DDCallback
*cb
, DDMutex
*m
, bool wlock
) override
;
49 void MutexAfterLock(DDCallback
*cb
, DDMutex
*m
, bool wlock
,
50 bool trylock
) override
;
51 void MutexBeforeUnlock(DDCallback
*cb
, DDMutex
*m
, bool wlock
) override
;
52 void MutexDestroy(DDCallback
*cb
, DDMutex
*m
) override
;
54 DDReport
*GetReport(DDCallback
*cb
) override
;
56 void MutexEnsureID(DDLogicalThread
*lt
, DDMutex
*m
);
57 void ReportDeadlock(DDCallback
*cb
, DDMutex
*m
);
60 DDetector
*DDetector::Create(const DDFlags
*flags
) {
62 void *mem
= MmapOrDie(sizeof(DD
), "deadlock detector");
63 return new(mem
) DD(flags
);
66 DD::DD(const DDFlags
*flags
)
71 DDPhysicalThread
* DD::CreatePhysicalThread() {
75 void DD::DestroyPhysicalThread(DDPhysicalThread
*pt
) {
78 DDLogicalThread
* DD::CreateLogicalThread(u64 ctx
) {
79 DDLogicalThread
*lt
= (DDLogicalThread
*)InternalAlloc(sizeof(*lt
));
82 lt
->report_pending
= false;
86 void DD::DestroyLogicalThread(DDLogicalThread
*lt
) {
87 lt
->~DDLogicalThread();
91 void DD::MutexInit(DDCallback
*cb
, DDMutex
*m
) {
93 m
->stk
= cb
->Unwind();
96 void DD::MutexEnsureID(DDLogicalThread
*lt
, DDMutex
*m
) {
97 if (!dd
.nodeBelongsToCurrentEpoch(m
->id
))
98 m
->id
= dd
.newNode(reinterpret_cast<uptr
>(m
));
99 dd
.ensureCurrentEpoch(<
->dd
);
102 void DD::MutexBeforeLock(DDCallback
*cb
,
103 DDMutex
*m
, bool wlock
) {
104 DDLogicalThread
*lt
= cb
->lt
;
105 if (lt
->dd
.empty()) return; // This will be the first lock held by lt.
106 if (dd
.hasAllEdges(<
->dd
, m
->id
)) return; // We already have all edges.
107 SpinMutexLock
lk(&mtx
);
108 MutexEnsureID(lt
, m
);
109 if (dd
.isHeld(<
->dd
, m
->id
))
110 return; // FIXME: allow this only for recursive locks.
111 if (dd
.onLockBefore(<
->dd
, m
->id
)) {
112 // Actually add this edge now so that we have all the stack traces.
113 dd
.addEdges(<
->dd
, m
->id
, cb
->Unwind(), cb
->UniqueTid());
114 ReportDeadlock(cb
, m
);
118 void DD::ReportDeadlock(DDCallback
*cb
, DDMutex
*m
) {
119 DDLogicalThread
*lt
= cb
->lt
;
121 uptr len
= dd
.findPathToLock(<
->dd
, m
->id
, path
, ARRAY_SIZE(path
));
122 CHECK_GT(len
, 0U); // Hm.. cycle of 10 locks? I'd like to see that.
123 CHECK_EQ(m
->id
, path
[0]);
124 lt
->report_pending
= true;
125 DDReport
*rep
= <
->rep
;
127 for (uptr i
= 0; i
< len
; i
++) {
129 uptr to
= path
[(i
+ 1) % len
];
130 DDMutex
*m0
= (DDMutex
*)dd
.getData(from
);
131 DDMutex
*m1
= (DDMutex
*)dd
.getData(to
);
133 u32 stk_from
= -1U, stk_to
= -1U;
135 dd
.findEdge(from
, to
, &stk_from
, &stk_to
, &unique_tid
);
136 // Printf("Edge: %zd=>%zd: %u/%u T%d\n", from, to, stk_from, stk_to,
138 rep
->loop
[i
].thr_ctx
= unique_tid
;
139 rep
->loop
[i
].mtx_ctx0
= m0
->ctx
;
140 rep
->loop
[i
].mtx_ctx1
= m1
->ctx
;
141 rep
->loop
[i
].stk
[0] = stk_to
;
142 rep
->loop
[i
].stk
[1] = stk_from
;
146 void DD::MutexAfterLock(DDCallback
*cb
, DDMutex
*m
, bool wlock
, bool trylock
) {
147 DDLogicalThread
*lt
= cb
->lt
;
149 if (flags
.second_deadlock_stack
)
151 // Printf("T%p MutexLock: %zx stk %u\n", lt, m->id, stk);
152 if (dd
.onFirstLock(<
->dd
, m
->id
, stk
))
154 if (dd
.onLockFast(<
->dd
, m
->id
, stk
))
157 SpinMutexLock
lk(&mtx
);
158 MutexEnsureID(lt
, m
);
159 if (wlock
) // Only a recursive rlock may be held.
160 CHECK(!dd
.isHeld(<
->dd
, m
->id
));
162 dd
.addEdges(<
->dd
, m
->id
, stk
? stk
: cb
->Unwind(), cb
->UniqueTid());
163 dd
.onLockAfter(<
->dd
, m
->id
, stk
);
166 void DD::MutexBeforeUnlock(DDCallback
*cb
, DDMutex
*m
, bool wlock
) {
167 // Printf("T%p MutexUnLock: %zx\n", cb->lt, m->id);
168 dd
.onUnlock(&cb
->lt
->dd
, m
->id
);
171 void DD::MutexDestroy(DDCallback
*cb
,
174 SpinMutexLock
lk(&mtx
);
175 if (dd
.nodeBelongsToCurrentEpoch(m
->id
))
176 dd
.removeNode(m
->id
);
180 DDReport
*DD::GetReport(DDCallback
*cb
) {
181 if (!cb
->lt
->report_pending
)
183 cb
->lt
->report_pending
= false;
187 } // namespace __sanitizer
188 #endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1