function.c (assign_parm_adjust_stack_rtl): Revise STRICT_ALIGNMENT check to use targe...
[official-gcc.git] / libsanitizer / sanitizer_common / sanitizer_deadlock_detector1.cc
blobe2aedc24da9e2f3acba0be48aeb10548bc197137
1 //===-- sanitizer_deadlock_detector1.cc -----------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // Deadlock detector implementation based on NxN adjacency bit matrix.
9 //
10 //===----------------------------------------------------------------------===//
12 #include "sanitizer_deadlock_detector_interface.h"
13 #include "sanitizer_deadlock_detector.h"
14 #include "sanitizer_allocator_internal.h"
15 #include "sanitizer_placement_new.h"
16 #include "sanitizer_mutex.h"
18 #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
20 namespace __sanitizer {
22 typedef TwoLevelBitVector<> DDBV; // DeadlockDetector's bit vector.
24 struct DDPhysicalThread {
27 struct DDLogicalThread {
28 u64 ctx;
29 DeadlockDetectorTLS<DDBV> dd;
30 DDReport rep;
31 bool report_pending;
34 struct DD : public DDetector {
35 SpinMutex mtx;
36 DeadlockDetector<DDBV> dd;
37 DDFlags flags;
39 explicit DD(const DDFlags *flags);
41 DDPhysicalThread *CreatePhysicalThread() override;
42 void DestroyPhysicalThread(DDPhysicalThread *pt) override;
44 DDLogicalThread *CreateLogicalThread(u64 ctx) override;
45 void DestroyLogicalThread(DDLogicalThread *lt) override;
47 void MutexInit(DDCallback *cb, DDMutex *m) override;
48 void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) override;
49 void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
50 bool trylock) override;
51 void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) override;
52 void MutexDestroy(DDCallback *cb, DDMutex *m) override;
54 DDReport *GetReport(DDCallback *cb) override;
56 void MutexEnsureID(DDLogicalThread *lt, DDMutex *m);
57 void ReportDeadlock(DDCallback *cb, DDMutex *m);
60 DDetector *DDetector::Create(const DDFlags *flags) {
61 (void)flags;
62 void *mem = MmapOrDie(sizeof(DD), "deadlock detector");
63 return new(mem) DD(flags);
66 DD::DD(const DDFlags *flags)
67 : flags(*flags) {
68 dd.clear();
71 DDPhysicalThread* DD::CreatePhysicalThread() {
72 return nullptr;
75 void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
78 DDLogicalThread* DD::CreateLogicalThread(u64 ctx) {
79 DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(sizeof(*lt));
80 lt->ctx = ctx;
81 lt->dd.clear();
82 lt->report_pending = false;
83 return lt;
86 void DD::DestroyLogicalThread(DDLogicalThread *lt) {
87 lt->~DDLogicalThread();
88 InternalFree(lt);
91 void DD::MutexInit(DDCallback *cb, DDMutex *m) {
92 m->id = 0;
93 m->stk = cb->Unwind();
96 void DD::MutexEnsureID(DDLogicalThread *lt, DDMutex *m) {
97 if (!dd.nodeBelongsToCurrentEpoch(m->id))
98 m->id = dd.newNode(reinterpret_cast<uptr>(m));
99 dd.ensureCurrentEpoch(&lt->dd);
102 void DD::MutexBeforeLock(DDCallback *cb,
103 DDMutex *m, bool wlock) {
104 DDLogicalThread *lt = cb->lt;
105 if (lt->dd.empty()) return; // This will be the first lock held by lt.
106 if (dd.hasAllEdges(&lt->dd, m->id)) return; // We already have all edges.
107 SpinMutexLock lk(&mtx);
108 MutexEnsureID(lt, m);
109 if (dd.isHeld(&lt->dd, m->id))
110 return; // FIXME: allow this only for recursive locks.
111 if (dd.onLockBefore(&lt->dd, m->id)) {
112 // Actually add this edge now so that we have all the stack traces.
113 dd.addEdges(&lt->dd, m->id, cb->Unwind(), cb->UniqueTid());
114 ReportDeadlock(cb, m);
118 void DD::ReportDeadlock(DDCallback *cb, DDMutex *m) {
119 DDLogicalThread *lt = cb->lt;
120 uptr path[20];
121 uptr len = dd.findPathToLock(&lt->dd, m->id, path, ARRAY_SIZE(path));
122 if (len == 0U) {
123 // A cycle of 20+ locks? Well, that's a bit odd...
124 Printf("WARNING: too long mutex cycle found\n");
125 return;
127 CHECK_EQ(m->id, path[0]);
128 lt->report_pending = true;
129 len = Min<uptr>(len, DDReport::kMaxLoopSize);
130 DDReport *rep = &lt->rep;
131 rep->n = len;
132 for (uptr i = 0; i < len; i++) {
133 uptr from = path[i];
134 uptr to = path[(i + 1) % len];
135 DDMutex *m0 = (DDMutex*)dd.getData(from);
136 DDMutex *m1 = (DDMutex*)dd.getData(to);
138 u32 stk_from = -1U, stk_to = -1U;
139 int unique_tid = 0;
140 dd.findEdge(from, to, &stk_from, &stk_to, &unique_tid);
141 // Printf("Edge: %zd=>%zd: %u/%u T%d\n", from, to, stk_from, stk_to,
142 // unique_tid);
143 rep->loop[i].thr_ctx = unique_tid;
144 rep->loop[i].mtx_ctx0 = m0->ctx;
145 rep->loop[i].mtx_ctx1 = m1->ctx;
146 rep->loop[i].stk[0] = stk_to;
147 rep->loop[i].stk[1] = stk_from;
151 void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock, bool trylock) {
152 DDLogicalThread *lt = cb->lt;
153 u32 stk = 0;
154 if (flags.second_deadlock_stack)
155 stk = cb->Unwind();
156 // Printf("T%p MutexLock: %zx stk %u\n", lt, m->id, stk);
157 if (dd.onFirstLock(&lt->dd, m->id, stk))
158 return;
159 if (dd.onLockFast(&lt->dd, m->id, stk))
160 return;
162 SpinMutexLock lk(&mtx);
163 MutexEnsureID(lt, m);
164 if (wlock) // Only a recursive rlock may be held.
165 CHECK(!dd.isHeld(&lt->dd, m->id));
166 if (!trylock)
167 dd.addEdges(&lt->dd, m->id, stk ? stk : cb->Unwind(), cb->UniqueTid());
168 dd.onLockAfter(&lt->dd, m->id, stk);
171 void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
172 // Printf("T%p MutexUnLock: %zx\n", cb->lt, m->id);
173 dd.onUnlock(&cb->lt->dd, m->id);
176 void DD::MutexDestroy(DDCallback *cb,
177 DDMutex *m) {
178 if (!m->id) return;
179 SpinMutexLock lk(&mtx);
180 if (dd.nodeBelongsToCurrentEpoch(m->id))
181 dd.removeNode(m->id);
182 m->id = 0;
185 DDReport *DD::GetReport(DDCallback *cb) {
186 if (!cb->lt->report_pending)
187 return nullptr;
188 cb->lt->report_pending = false;
189 return &cb->lt->rep;
192 } // namespace __sanitizer
193 #endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1