1 //===-- sanitizer_deadlock_detector1.cpp ----------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Deadlock detector implementation based on NxN adjacency bit matrix.
11 //===----------------------------------------------------------------------===//
13 #include "sanitizer_deadlock_detector_interface.h"
14 #include "sanitizer_deadlock_detector.h"
15 #include "sanitizer_allocator_internal.h"
16 #include "sanitizer_placement_new.h"
17 #include "sanitizer_mutex.h"
19 #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
21 namespace __sanitizer
{
23 typedef TwoLevelBitVector
<> DDBV
; // DeadlockDetector's bit vector.
25 struct DDPhysicalThread
{
28 struct DDLogicalThread
{
30 DeadlockDetectorTLS
<DDBV
> dd
;
35 struct DD final
: public DDetector
{
37 DeadlockDetector
<DDBV
> dd
;
40 explicit DD(const DDFlags
*flags
);
42 DDPhysicalThread
*CreatePhysicalThread() override
;
43 void DestroyPhysicalThread(DDPhysicalThread
*pt
) override
;
45 DDLogicalThread
*CreateLogicalThread(u64 ctx
) override
;
46 void DestroyLogicalThread(DDLogicalThread
*lt
) override
;
48 void MutexInit(DDCallback
*cb
, DDMutex
*m
) override
;
49 void MutexBeforeLock(DDCallback
*cb
, DDMutex
*m
, bool wlock
) override
;
50 void MutexAfterLock(DDCallback
*cb
, DDMutex
*m
, bool wlock
,
51 bool trylock
) override
;
52 void MutexBeforeUnlock(DDCallback
*cb
, DDMutex
*m
, bool wlock
) override
;
53 void MutexDestroy(DDCallback
*cb
, DDMutex
*m
) override
;
55 DDReport
*GetReport(DDCallback
*cb
) override
;
57 void MutexEnsureID(DDLogicalThread
*lt
, DDMutex
*m
);
58 void ReportDeadlock(DDCallback
*cb
, DDMutex
*m
);
61 DDetector
*DDetector::Create(const DDFlags
*flags
) {
63 void *mem
= MmapOrDie(sizeof(DD
), "deadlock detector");
64 return new(mem
) DD(flags
);
67 DD::DD(const DDFlags
*flags
)
72 DDPhysicalThread
* DD::CreatePhysicalThread() {
76 void DD::DestroyPhysicalThread(DDPhysicalThread
*pt
) {
79 DDLogicalThread
* DD::CreateLogicalThread(u64 ctx
) {
80 DDLogicalThread
*lt
= (DDLogicalThread
*)InternalAlloc(sizeof(*lt
));
83 lt
->report_pending
= false;
87 void DD::DestroyLogicalThread(DDLogicalThread
*lt
) {
88 lt
->~DDLogicalThread();
92 void DD::MutexInit(DDCallback
*cb
, DDMutex
*m
) {
94 m
->stk
= cb
->Unwind();
97 void DD::MutexEnsureID(DDLogicalThread
*lt
, DDMutex
*m
) {
98 if (!dd
.nodeBelongsToCurrentEpoch(m
->id
))
99 m
->id
= dd
.newNode(reinterpret_cast<uptr
>(m
));
100 dd
.ensureCurrentEpoch(<
->dd
);
103 void DD::MutexBeforeLock(DDCallback
*cb
,
104 DDMutex
*m
, bool wlock
) {
105 DDLogicalThread
*lt
= cb
->lt
;
106 if (lt
->dd
.empty()) return; // This will be the first lock held by lt.
107 if (dd
.hasAllEdges(<
->dd
, m
->id
)) return; // We already have all edges.
108 SpinMutexLock
lk(&mtx
);
109 MutexEnsureID(lt
, m
);
110 if (dd
.isHeld(<
->dd
, m
->id
))
111 return; // FIXME: allow this only for recursive locks.
112 if (dd
.onLockBefore(<
->dd
, m
->id
)) {
113 // Actually add this edge now so that we have all the stack traces.
114 dd
.addEdges(<
->dd
, m
->id
, cb
->Unwind(), cb
->UniqueTid());
115 ReportDeadlock(cb
, m
);
119 void DD::ReportDeadlock(DDCallback
*cb
, DDMutex
*m
) {
120 DDLogicalThread
*lt
= cb
->lt
;
122 uptr len
= dd
.findPathToLock(<
->dd
, m
->id
, path
, ARRAY_SIZE(path
));
124 // A cycle of 20+ locks? Well, that's a bit odd...
125 Printf("WARNING: too long mutex cycle found\n");
128 CHECK_EQ(m
->id
, path
[0]);
129 lt
->report_pending
= true;
130 len
= Min
<uptr
>(len
, DDReport::kMaxLoopSize
);
131 DDReport
*rep
= <
->rep
;
133 for (uptr i
= 0; i
< len
; i
++) {
135 uptr to
= path
[(i
+ 1) % len
];
136 DDMutex
*m0
= (DDMutex
*)dd
.getData(from
);
137 DDMutex
*m1
= (DDMutex
*)dd
.getData(to
);
139 u32 stk_from
= 0, stk_to
= 0;
141 dd
.findEdge(from
, to
, &stk_from
, &stk_to
, &unique_tid
);
142 // Printf("Edge: %zd=>%zd: %u/%u T%d\n", from, to, stk_from, stk_to,
144 rep
->loop
[i
].thr_ctx
= unique_tid
;
145 rep
->loop
[i
].mtx_ctx0
= m0
->ctx
;
146 rep
->loop
[i
].mtx_ctx1
= m1
->ctx
;
147 rep
->loop
[i
].stk
[0] = stk_to
;
148 rep
->loop
[i
].stk
[1] = stk_from
;
152 void DD::MutexAfterLock(DDCallback
*cb
, DDMutex
*m
, bool wlock
, bool trylock
) {
153 DDLogicalThread
*lt
= cb
->lt
;
155 if (flags
.second_deadlock_stack
)
157 // Printf("T%p MutexLock: %zx stk %u\n", lt, m->id, stk);
158 if (dd
.onFirstLock(<
->dd
, m
->id
, stk
))
160 if (dd
.onLockFast(<
->dd
, m
->id
, stk
))
163 SpinMutexLock
lk(&mtx
);
164 MutexEnsureID(lt
, m
);
165 if (wlock
) // Only a recursive rlock may be held.
166 CHECK(!dd
.isHeld(<
->dd
, m
->id
));
168 dd
.addEdges(<
->dd
, m
->id
, stk
? stk
: cb
->Unwind(), cb
->UniqueTid());
169 dd
.onLockAfter(<
->dd
, m
->id
, stk
);
172 void DD::MutexBeforeUnlock(DDCallback
*cb
, DDMutex
*m
, bool wlock
) {
173 // Printf("T%p MutexUnLock: %zx\n", cb->lt, m->id);
174 dd
.onUnlock(&cb
->lt
->dd
, m
->id
);
177 void DD::MutexDestroy(DDCallback
*cb
,
180 SpinMutexLock
lk(&mtx
);
181 if (dd
.nodeBelongsToCurrentEpoch(m
->id
))
182 dd
.removeNode(m
->id
);
186 DDReport
*DD::GetReport(DDCallback
*cb
) {
187 if (!cb
->lt
->report_pending
)
189 cb
->lt
->report_pending
= false;
193 } // namespace __sanitizer
194 #endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1