Clean up some minor white space issues in trans-decl.c and trans-expr.c
[official-gcc.git] / libsanitizer / sanitizer_common / sanitizer_deadlock_detector1.cc
blob7a318c963f749fd4a6cafef6a317634b68c871eb
1 //===-- sanitizer_deadlock_detector1.cc -----------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // Deadlock detector implementation based on NxN adjacency bit matrix.
9 //
10 //===----------------------------------------------------------------------===//
12 #include "sanitizer_deadlock_detector_interface.h"
13 #include "sanitizer_deadlock_detector.h"
14 #include "sanitizer_allocator_internal.h"
15 #include "sanitizer_placement_new.h"
16 #include "sanitizer_mutex.h"
18 #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
20 namespace __sanitizer {
22 typedef TwoLevelBitVector<> DDBV; // DeadlockDetector's bit vector.
24 struct DDPhysicalThread {
27 struct DDLogicalThread {
28 u64 ctx;
29 DeadlockDetectorTLS<DDBV> dd;
30 DDReport rep;
31 bool report_pending;
34 struct DD : public DDetector {
35 SpinMutex mtx;
36 DeadlockDetector<DDBV> dd;
37 DDFlags flags;
39 explicit DD(const DDFlags *flags);
41 DDPhysicalThread *CreatePhysicalThread() override;
42 void DestroyPhysicalThread(DDPhysicalThread *pt) override;
44 DDLogicalThread *CreateLogicalThread(u64 ctx) override;
45 void DestroyLogicalThread(DDLogicalThread *lt) override;
47 void MutexInit(DDCallback *cb, DDMutex *m) override;
48 void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) override;
49 void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
50 bool trylock) override;
51 void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) override;
52 void MutexDestroy(DDCallback *cb, DDMutex *m) override;
54 DDReport *GetReport(DDCallback *cb) override;
56 void MutexEnsureID(DDLogicalThread *lt, DDMutex *m);
57 void ReportDeadlock(DDCallback *cb, DDMutex *m);
60 DDetector *DDetector::Create(const DDFlags *flags) {
61 (void)flags;
62 void *mem = MmapOrDie(sizeof(DD), "deadlock detector");
63 return new(mem) DD(flags);
66 DD::DD(const DDFlags *flags)
67 : flags(*flags) {
68 dd.clear();
71 DDPhysicalThread* DD::CreatePhysicalThread() {
72 return nullptr;
75 void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
78 DDLogicalThread* DD::CreateLogicalThread(u64 ctx) {
79 DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(sizeof(*lt));
80 lt->ctx = ctx;
81 lt->dd.clear();
82 lt->report_pending = false;
83 return lt;
86 void DD::DestroyLogicalThread(DDLogicalThread *lt) {
87 lt->~DDLogicalThread();
88 InternalFree(lt);
91 void DD::MutexInit(DDCallback *cb, DDMutex *m) {
92 m->id = 0;
93 m->stk = cb->Unwind();
96 void DD::MutexEnsureID(DDLogicalThread *lt, DDMutex *m) {
97 if (!dd.nodeBelongsToCurrentEpoch(m->id))
98 m->id = dd.newNode(reinterpret_cast<uptr>(m));
99 dd.ensureCurrentEpoch(&lt->dd);
102 void DD::MutexBeforeLock(DDCallback *cb,
103 DDMutex *m, bool wlock) {
104 DDLogicalThread *lt = cb->lt;
105 if (lt->dd.empty()) return; // This will be the first lock held by lt.
106 if (dd.hasAllEdges(&lt->dd, m->id)) return; // We already have all edges.
107 SpinMutexLock lk(&mtx);
108 MutexEnsureID(lt, m);
109 if (dd.isHeld(&lt->dd, m->id))
110 return; // FIXME: allow this only for recursive locks.
111 if (dd.onLockBefore(&lt->dd, m->id)) {
112 // Actually add this edge now so that we have all the stack traces.
113 dd.addEdges(&lt->dd, m->id, cb->Unwind(), cb->UniqueTid());
114 ReportDeadlock(cb, m);
118 void DD::ReportDeadlock(DDCallback *cb, DDMutex *m) {
119 DDLogicalThread *lt = cb->lt;
120 uptr path[10];
121 uptr len = dd.findPathToLock(&lt->dd, m->id, path, ARRAY_SIZE(path));
122 CHECK_GT(len, 0U); // Hm.. cycle of 10 locks? I'd like to see that.
123 CHECK_EQ(m->id, path[0]);
124 lt->report_pending = true;
125 DDReport *rep = &lt->rep;
126 rep->n = len;
127 for (uptr i = 0; i < len; i++) {
128 uptr from = path[i];
129 uptr to = path[(i + 1) % len];
130 DDMutex *m0 = (DDMutex*)dd.getData(from);
131 DDMutex *m1 = (DDMutex*)dd.getData(to);
133 u32 stk_from = -1U, stk_to = -1U;
134 int unique_tid = 0;
135 dd.findEdge(from, to, &stk_from, &stk_to, &unique_tid);
136 // Printf("Edge: %zd=>%zd: %u/%u T%d\n", from, to, stk_from, stk_to,
137 // unique_tid);
138 rep->loop[i].thr_ctx = unique_tid;
139 rep->loop[i].mtx_ctx0 = m0->ctx;
140 rep->loop[i].mtx_ctx1 = m1->ctx;
141 rep->loop[i].stk[0] = stk_to;
142 rep->loop[i].stk[1] = stk_from;
146 void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock, bool trylock) {
147 DDLogicalThread *lt = cb->lt;
148 u32 stk = 0;
149 if (flags.second_deadlock_stack)
150 stk = cb->Unwind();
151 // Printf("T%p MutexLock: %zx stk %u\n", lt, m->id, stk);
152 if (dd.onFirstLock(&lt->dd, m->id, stk))
153 return;
154 if (dd.onLockFast(&lt->dd, m->id, stk))
155 return;
157 SpinMutexLock lk(&mtx);
158 MutexEnsureID(lt, m);
159 if (wlock) // Only a recursive rlock may be held.
160 CHECK(!dd.isHeld(&lt->dd, m->id));
161 if (!trylock)
162 dd.addEdges(&lt->dd, m->id, stk ? stk : cb->Unwind(), cb->UniqueTid());
163 dd.onLockAfter(&lt->dd, m->id, stk);
166 void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
167 // Printf("T%p MutexUnLock: %zx\n", cb->lt, m->id);
168 dd.onUnlock(&cb->lt->dd, m->id);
171 void DD::MutexDestroy(DDCallback *cb,
172 DDMutex *m) {
173 if (!m->id) return;
174 SpinMutexLock lk(&mtx);
175 if (dd.nodeBelongsToCurrentEpoch(m->id))
176 dd.removeNode(m->id);
177 m->id = 0;
180 DDReport *DD::GetReport(DDCallback *cb) {
181 if (!cb->lt->report_pending)
182 return nullptr;
183 cb->lt->report_pending = false;
184 return &cb->lt->rep;
187 } // namespace __sanitizer
188 #endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1