PR rtl-optimization/87817
[official-gcc.git] / libsanitizer / sanitizer_common / sanitizer_deadlock_detector2.cc
blobfb4785317f0adb7e71b83b3c879c90cecb5a0889
1 //===-- sanitizer_deadlock_detector2.cc -----------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // Deadlock detector implementation based on adjacency lists.
9 //
10 //===----------------------------------------------------------------------===//
12 #include "sanitizer_deadlock_detector_interface.h"
13 #include "sanitizer_common.h"
14 #include "sanitizer_allocator_internal.h"
15 #include "sanitizer_placement_new.h"
16 #include "sanitizer_mutex.h"
18 #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
20 namespace __sanitizer {
22 const int kMaxNesting = 64;
23 const u32 kNoId = -1;
24 const u32 kEndId = -2;
25 const int kMaxLink = 8;
26 const int kL1Size = 1024;
27 const int kL2Size = 1024;
28 const int kMaxMutex = kL1Size * kL2Size;
30 struct Id {
31 u32 id;
32 u32 seq;
34 explicit Id(u32 id = 0, u32 seq = 0)
35 : id(id)
36 , seq(seq) {
40 struct Link {
41 u32 id;
42 u32 seq;
43 u32 tid;
44 u32 stk0;
45 u32 stk1;
47 explicit Link(u32 id = 0, u32 seq = 0, u32 tid = 0, u32 s0 = 0, u32 s1 = 0)
48 : id(id)
49 , seq(seq)
50 , tid(tid)
51 , stk0(s0)
52 , stk1(s1) {
56 struct DDPhysicalThread {
57 DDReport rep;
58 bool report_pending;
59 bool visited[kMaxMutex];
60 Link pending[kMaxMutex];
61 Link path[kMaxMutex];
64 struct ThreadMutex {
65 u32 id;
66 u32 stk;
69 struct DDLogicalThread {
70 u64 ctx;
71 ThreadMutex locked[kMaxNesting];
72 int nlocked;
75 struct Mutex {
76 StaticSpinMutex mtx;
77 u32 seq;
78 int nlink;
79 Link link[kMaxLink];
82 struct DD : public DDetector {
83 explicit DD(const DDFlags *flags);
85 DDPhysicalThread* CreatePhysicalThread();
86 void DestroyPhysicalThread(DDPhysicalThread *pt);
88 DDLogicalThread* CreateLogicalThread(u64 ctx);
89 void DestroyLogicalThread(DDLogicalThread *lt);
91 void MutexInit(DDCallback *cb, DDMutex *m);
92 void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock);
93 void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
94 bool trylock);
95 void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock);
96 void MutexDestroy(DDCallback *cb, DDMutex *m);
98 DDReport *GetReport(DDCallback *cb);
100 void CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt, DDMutex *mtx);
101 void Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath);
102 u32 allocateId(DDCallback *cb);
103 Mutex *getMutex(u32 id);
104 u32 getMutexId(Mutex *m);
106 DDFlags flags;
108 Mutex* mutex[kL1Size];
110 SpinMutex mtx;
111 InternalMmapVector<u32> free_id;
112 int id_gen = 0;
115 DDetector *DDetector::Create(const DDFlags *flags) {
116 (void)flags;
117 void *mem = MmapOrDie(sizeof(DD), "deadlock detector");
118 return new(mem) DD(flags);
121 DD::DD(const DDFlags *flags) : flags(*flags) { free_id.reserve(1024); }
123 DDPhysicalThread* DD::CreatePhysicalThread() {
124 DDPhysicalThread *pt = (DDPhysicalThread*)MmapOrDie(sizeof(DDPhysicalThread),
125 "deadlock detector (physical thread)");
126 return pt;
129 void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
130 pt->~DDPhysicalThread();
131 UnmapOrDie(pt, sizeof(DDPhysicalThread));
134 DDLogicalThread* DD::CreateLogicalThread(u64 ctx) {
135 DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(
136 sizeof(DDLogicalThread));
137 lt->ctx = ctx;
138 lt->nlocked = 0;
139 return lt;
142 void DD::DestroyLogicalThread(DDLogicalThread *lt) {
143 lt->~DDLogicalThread();
144 InternalFree(lt);
147 void DD::MutexInit(DDCallback *cb, DDMutex *m) {
148 VPrintf(2, "#%llu: DD::MutexInit(%p)\n", cb->lt->ctx, m);
149 m->id = kNoId;
150 m->recursion = 0;
151 atomic_store(&m->owner, 0, memory_order_relaxed);
154 Mutex *DD::getMutex(u32 id) {
155 return &mutex[id / kL2Size][id % kL2Size];
158 u32 DD::getMutexId(Mutex *m) {
159 for (int i = 0; i < kL1Size; i++) {
160 Mutex *tab = mutex[i];
161 if (tab == 0)
162 break;
163 if (m >= tab && m < tab + kL2Size)
164 return i * kL2Size + (m - tab);
166 return -1;
169 u32 DD::allocateId(DDCallback *cb) {
170 u32 id = -1;
171 SpinMutexLock l(&mtx);
172 if (free_id.size() > 0) {
173 id = free_id.back();
174 free_id.pop_back();
175 } else {
176 CHECK_LT(id_gen, kMaxMutex);
177 if ((id_gen % kL2Size) == 0) {
178 mutex[id_gen / kL2Size] = (Mutex*)MmapOrDie(kL2Size * sizeof(Mutex),
179 "deadlock detector (mutex table)");
181 id = id_gen++;
183 CHECK_LE(id, kMaxMutex);
184 VPrintf(3, "#%llu: DD::allocateId assign id %d\n", cb->lt->ctx, id);
185 return id;
188 void DD::MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) {
189 VPrintf(2, "#%llu: DD::MutexBeforeLock(%p, wlock=%d) nlocked=%d\n",
190 cb->lt->ctx, m, wlock, cb->lt->nlocked);
191 DDPhysicalThread *pt = cb->pt;
192 DDLogicalThread *lt = cb->lt;
194 uptr owner = atomic_load(&m->owner, memory_order_relaxed);
195 if (owner == (uptr)cb->lt) {
196 VPrintf(3, "#%llu: DD::MutexBeforeLock recursive\n",
197 cb->lt->ctx);
198 return;
201 CHECK_LE(lt->nlocked, kMaxNesting);
203 // FIXME(dvyukov): don't allocate id if lt->nlocked == 0?
204 if (m->id == kNoId)
205 m->id = allocateId(cb);
207 ThreadMutex *tm = &lt->locked[lt->nlocked++];
208 tm->id = m->id;
209 if (flags.second_deadlock_stack)
210 tm->stk = cb->Unwind();
211 if (lt->nlocked == 1) {
212 VPrintf(3, "#%llu: DD::MutexBeforeLock first mutex\n",
213 cb->lt->ctx);
214 return;
217 bool added = false;
218 Mutex *mtx = getMutex(m->id);
219 for (int i = 0; i < lt->nlocked - 1; i++) {
220 u32 id1 = lt->locked[i].id;
221 u32 stk1 = lt->locked[i].stk;
222 Mutex *mtx1 = getMutex(id1);
223 SpinMutexLock l(&mtx1->mtx);
224 if (mtx1->nlink == kMaxLink) {
225 // FIXME(dvyukov): check stale links
226 continue;
228 int li = 0;
229 for (; li < mtx1->nlink; li++) {
230 Link *link = &mtx1->link[li];
231 if (link->id == m->id) {
232 if (link->seq != mtx->seq) {
233 link->seq = mtx->seq;
234 link->tid = lt->ctx;
235 link->stk0 = stk1;
236 link->stk1 = cb->Unwind();
237 added = true;
238 VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
239 cb->lt->ctx, getMutexId(mtx1), m->id);
241 break;
244 if (li == mtx1->nlink) {
245 // FIXME(dvyukov): check stale links
246 Link *link = &mtx1->link[mtx1->nlink++];
247 link->id = m->id;
248 link->seq = mtx->seq;
249 link->tid = lt->ctx;
250 link->stk0 = stk1;
251 link->stk1 = cb->Unwind();
252 added = true;
253 VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
254 cb->lt->ctx, getMutexId(mtx1), m->id);
258 if (!added || mtx->nlink == 0) {
259 VPrintf(3, "#%llu: DD::MutexBeforeLock don't check\n",
260 cb->lt->ctx);
261 return;
264 CycleCheck(pt, lt, m);
267 void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
268 bool trylock) {
269 VPrintf(2, "#%llu: DD::MutexAfterLock(%p, wlock=%d, try=%d) nlocked=%d\n",
270 cb->lt->ctx, m, wlock, trylock, cb->lt->nlocked);
271 DDLogicalThread *lt = cb->lt;
273 uptr owner = atomic_load(&m->owner, memory_order_relaxed);
274 if (owner == (uptr)cb->lt) {
275 VPrintf(3, "#%llu: DD::MutexAfterLock recursive\n", cb->lt->ctx);
276 CHECK(wlock);
277 m->recursion++;
278 return;
280 CHECK_EQ(owner, 0);
281 if (wlock) {
282 VPrintf(3, "#%llu: DD::MutexAfterLock set owner\n", cb->lt->ctx);
283 CHECK_EQ(m->recursion, 0);
284 m->recursion = 1;
285 atomic_store(&m->owner, (uptr)cb->lt, memory_order_relaxed);
288 if (!trylock)
289 return;
291 CHECK_LE(lt->nlocked, kMaxNesting);
292 if (m->id == kNoId)
293 m->id = allocateId(cb);
294 ThreadMutex *tm = &lt->locked[lt->nlocked++];
295 tm->id = m->id;
296 if (flags.second_deadlock_stack)
297 tm->stk = cb->Unwind();
300 void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
301 VPrintf(2, "#%llu: DD::MutexBeforeUnlock(%p, wlock=%d) nlocked=%d\n",
302 cb->lt->ctx, m, wlock, cb->lt->nlocked);
303 DDLogicalThread *lt = cb->lt;
305 uptr owner = atomic_load(&m->owner, memory_order_relaxed);
306 if (owner == (uptr)cb->lt) {
307 VPrintf(3, "#%llu: DD::MutexBeforeUnlock recursive\n", cb->lt->ctx);
308 if (--m->recursion > 0)
309 return;
310 VPrintf(3, "#%llu: DD::MutexBeforeUnlock reset owner\n", cb->lt->ctx);
311 atomic_store(&m->owner, 0, memory_order_relaxed);
313 CHECK_NE(m->id, kNoId);
314 int last = lt->nlocked - 1;
315 for (int i = last; i >= 0; i--) {
316 if (cb->lt->locked[i].id == m->id) {
317 lt->locked[i] = lt->locked[last];
318 lt->nlocked--;
319 break;
324 void DD::MutexDestroy(DDCallback *cb, DDMutex *m) {
325 VPrintf(2, "#%llu: DD::MutexDestroy(%p)\n",
326 cb->lt->ctx, m);
327 DDLogicalThread *lt = cb->lt;
329 if (m->id == kNoId)
330 return;
332 // Remove the mutex from lt->locked if there.
333 int last = lt->nlocked - 1;
334 for (int i = last; i >= 0; i--) {
335 if (lt->locked[i].id == m->id) {
336 lt->locked[i] = lt->locked[last];
337 lt->nlocked--;
338 break;
342 // Clear and invalidate the mutex descriptor.
344 Mutex *mtx = getMutex(m->id);
345 SpinMutexLock l(&mtx->mtx);
346 mtx->seq++;
347 mtx->nlink = 0;
350 // Return id to cache.
352 SpinMutexLock l(&mtx);
353 free_id.push_back(m->id);
357 void DD::CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt,
358 DDMutex *m) {
359 internal_memset(pt->visited, 0, sizeof(pt->visited));
360 int npath = 0;
361 int npending = 0;
363 Mutex *mtx = getMutex(m->id);
364 SpinMutexLock l(&mtx->mtx);
365 for (int li = 0; li < mtx->nlink; li++)
366 pt->pending[npending++] = mtx->link[li];
368 while (npending > 0) {
369 Link link = pt->pending[--npending];
370 if (link.id == kEndId) {
371 npath--;
372 continue;
374 if (pt->visited[link.id])
375 continue;
376 Mutex *mtx1 = getMutex(link.id);
377 SpinMutexLock l(&mtx1->mtx);
378 if (mtx1->seq != link.seq)
379 continue;
380 pt->visited[link.id] = true;
381 if (mtx1->nlink == 0)
382 continue;
383 pt->path[npath++] = link;
384 pt->pending[npending++] = Link(kEndId);
385 if (link.id == m->id)
386 return Report(pt, lt, npath); // Bingo!
387 for (int li = 0; li < mtx1->nlink; li++) {
388 Link *link1 = &mtx1->link[li];
389 // Mutex *mtx2 = getMutex(link->id);
390 // FIXME(dvyukov): fast seq check
391 // FIXME(dvyukov): fast nlink != 0 check
392 // FIXME(dvyukov): fast pending check?
393 // FIXME(dvyukov): npending can be larger than kMaxMutex
394 pt->pending[npending++] = *link1;
399 void DD::Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath) {
400 DDReport *rep = &pt->rep;
401 rep->n = npath;
402 for (int i = 0; i < npath; i++) {
403 Link *link = &pt->path[i];
404 Link *link0 = &pt->path[i ? i - 1 : npath - 1];
405 rep->loop[i].thr_ctx = link->tid;
406 rep->loop[i].mtx_ctx0 = link0->id;
407 rep->loop[i].mtx_ctx1 = link->id;
408 rep->loop[i].stk[0] = flags.second_deadlock_stack ? link->stk0 : 0;
409 rep->loop[i].stk[1] = link->stk1;
411 pt->report_pending = true;
414 DDReport *DD::GetReport(DDCallback *cb) {
415 if (!cb->pt->report_pending)
416 return 0;
417 cb->pt->report_pending = false;
418 return &cb->pt->rep;
421 } // namespace __sanitizer
422 #endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2