* gcc.target/i386/fuse-caller-save-xmm.c (dg-options): Use
[official-gcc.git] / libsanitizer / sanitizer_common / sanitizer_deadlock_detector2.cc
blobf4d46d99736dac68270ecbdd2418235a19d3e32c
1 //===-- sanitizer_deadlock_detector2.cc -----------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // Deadlock detector implementation based on adjacency lists.
9 //
10 //===----------------------------------------------------------------------===//
12 #include "sanitizer_deadlock_detector_interface.h"
13 #include "sanitizer_common.h"
14 #include "sanitizer_allocator_internal.h"
15 #include "sanitizer_placement_new.h"
16 #include "sanitizer_mutex.h"
18 #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
20 namespace __sanitizer {
22 const int kMaxNesting = 64;
23 const u32 kNoId = -1;
24 const u32 kEndId = -2;
25 const int kMaxLink = 8;
26 const int kL1Size = 1024;
27 const int kL2Size = 1024;
28 const int kMaxMutex = kL1Size * kL2Size;
30 struct Id {
31 u32 id;
32 u32 seq;
34 explicit Id(u32 id = 0, u32 seq = 0)
35 : id(id)
36 , seq(seq) {
40 struct Link {
41 u32 id;
42 u32 seq;
43 u32 tid;
44 u32 stk0;
45 u32 stk1;
47 explicit Link(u32 id = 0, u32 seq = 0, u32 tid = 0, u32 s0 = 0, u32 s1 = 0)
48 : id(id)
49 , seq(seq)
50 , tid(tid)
51 , stk0(s0)
52 , stk1(s1) {
56 struct DDPhysicalThread {
57 DDReport rep;
58 bool report_pending;
59 bool visited[kMaxMutex];
60 Link pending[kMaxMutex];
61 Link path[kMaxMutex];
64 struct ThreadMutex {
65 u32 id;
66 u32 stk;
69 struct DDLogicalThread {
70 u64 ctx;
71 ThreadMutex locked[kMaxNesting];
72 int nlocked;
75 struct Mutex {
76 StaticSpinMutex mtx;
77 u32 seq;
78 int nlink;
79 Link link[kMaxLink];
82 struct DD : public DDetector {
83 explicit DD(const DDFlags *flags);
85 DDPhysicalThread* CreatePhysicalThread();
86 void DestroyPhysicalThread(DDPhysicalThread *pt);
88 DDLogicalThread* CreateLogicalThread(u64 ctx);
89 void DestroyLogicalThread(DDLogicalThread *lt);
91 void MutexInit(DDCallback *cb, DDMutex *m);
92 void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock);
93 void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
94 bool trylock);
95 void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock);
96 void MutexDestroy(DDCallback *cb, DDMutex *m);
98 DDReport *GetReport(DDCallback *cb);
100 void CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt, DDMutex *mtx);
101 void Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath);
102 u32 allocateId(DDCallback *cb);
103 Mutex *getMutex(u32 id);
104 u32 getMutexId(Mutex *m);
106 DDFlags flags;
108 Mutex* mutex[kL1Size];
110 SpinMutex mtx;
111 InternalMmapVector<u32> free_id;
112 int id_gen;
115 DDetector *DDetector::Create(const DDFlags *flags) {
116 (void)flags;
117 void *mem = MmapOrDie(sizeof(DD), "deadlock detector");
118 return new(mem) DD(flags);
121 DD::DD(const DDFlags *flags)
122 : flags(*flags)
123 , free_id(1024) {
124 id_gen = 0;
127 DDPhysicalThread* DD::CreatePhysicalThread() {
128 DDPhysicalThread *pt = (DDPhysicalThread*)MmapOrDie(sizeof(DDPhysicalThread),
129 "deadlock detector (physical thread)");
130 return pt;
133 void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
134 pt->~DDPhysicalThread();
135 UnmapOrDie(pt, sizeof(DDPhysicalThread));
138 DDLogicalThread* DD::CreateLogicalThread(u64 ctx) {
139 DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(
140 sizeof(DDLogicalThread));
141 lt->ctx = ctx;
142 lt->nlocked = 0;
143 return lt;
146 void DD::DestroyLogicalThread(DDLogicalThread *lt) {
147 lt->~DDLogicalThread();
148 InternalFree(lt);
151 void DD::MutexInit(DDCallback *cb, DDMutex *m) {
152 VPrintf(2, "#%llu: DD::MutexInit(%p)\n", cb->lt->ctx, m);
153 m->id = kNoId;
154 m->recursion = 0;
155 atomic_store(&m->owner, 0, memory_order_relaxed);
158 Mutex *DD::getMutex(u32 id) {
159 return &mutex[id / kL2Size][id % kL2Size];
162 u32 DD::getMutexId(Mutex *m) {
163 for (int i = 0; i < kL1Size; i++) {
164 Mutex *tab = mutex[i];
165 if (tab == 0)
166 break;
167 if (m >= tab && m < tab + kL2Size)
168 return i * kL2Size + (m - tab);
170 return -1;
173 u32 DD::allocateId(DDCallback *cb) {
174 u32 id = -1;
175 SpinMutexLock l(&mtx);
176 if (free_id.size() > 0) {
177 id = free_id.back();
178 free_id.pop_back();
179 } else {
180 CHECK_LT(id_gen, kMaxMutex);
181 if ((id_gen % kL2Size) == 0) {
182 mutex[id_gen / kL2Size] = (Mutex*)MmapOrDie(kL2Size * sizeof(Mutex),
183 "deadlock detector (mutex table)");
185 id = id_gen++;
187 CHECK_LE(id, kMaxMutex);
188 VPrintf(3, "#%llu: DD::allocateId assign id %d\n",
189 cb->lt->ctx, id);
190 return id;
193 void DD::MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) {
194 VPrintf(2, "#%llu: DD::MutexBeforeLock(%p, wlock=%d) nlocked=%d\n",
195 cb->lt->ctx, m, wlock, cb->lt->nlocked);
196 DDPhysicalThread *pt = cb->pt;
197 DDLogicalThread *lt = cb->lt;
199 uptr owner = atomic_load(&m->owner, memory_order_relaxed);
200 if (owner == (uptr)cb->lt) {
201 VPrintf(3, "#%llu: DD::MutexBeforeLock recursive\n",
202 cb->lt->ctx);
203 return;
206 CHECK_LE(lt->nlocked, kMaxNesting);
208 // FIXME(dvyukov): don't allocate id if lt->nlocked == 0?
209 if (m->id == kNoId)
210 m->id = allocateId(cb);
212 ThreadMutex *tm = &lt->locked[lt->nlocked++];
213 tm->id = m->id;
214 if (flags.second_deadlock_stack)
215 tm->stk = cb->Unwind();
216 if (lt->nlocked == 1) {
217 VPrintf(3, "#%llu: DD::MutexBeforeLock first mutex\n",
218 cb->lt->ctx);
219 return;
222 bool added = false;
223 Mutex *mtx = getMutex(m->id);
224 for (int i = 0; i < lt->nlocked - 1; i++) {
225 u32 id1 = lt->locked[i].id;
226 u32 stk1 = lt->locked[i].stk;
227 Mutex *mtx1 = getMutex(id1);
228 SpinMutexLock l(&mtx1->mtx);
229 if (mtx1->nlink == kMaxLink) {
230 // FIXME(dvyukov): check stale links
231 continue;
233 int li = 0;
234 for (; li < mtx1->nlink; li++) {
235 Link *link = &mtx1->link[li];
236 if (link->id == m->id) {
237 if (link->seq != mtx->seq) {
238 link->seq = mtx->seq;
239 link->tid = lt->ctx;
240 link->stk0 = stk1;
241 link->stk1 = cb->Unwind();
242 added = true;
243 VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
244 cb->lt->ctx, getMutexId(mtx1), m->id);
246 break;
249 if (li == mtx1->nlink) {
250 // FIXME(dvyukov): check stale links
251 Link *link = &mtx1->link[mtx1->nlink++];
252 link->id = m->id;
253 link->seq = mtx->seq;
254 link->tid = lt->ctx;
255 link->stk0 = stk1;
256 link->stk1 = cb->Unwind();
257 added = true;
258 VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
259 cb->lt->ctx, getMutexId(mtx1), m->id);
263 if (!added || mtx->nlink == 0) {
264 VPrintf(3, "#%llu: DD::MutexBeforeLock don't check\n",
265 cb->lt->ctx);
266 return;
269 CycleCheck(pt, lt, m);
272 void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
273 bool trylock) {
274 VPrintf(2, "#%llu: DD::MutexAfterLock(%p, wlock=%d, try=%d) nlocked=%d\n",
275 cb->lt->ctx, m, wlock, trylock, cb->lt->nlocked);
276 DDLogicalThread *lt = cb->lt;
278 uptr owner = atomic_load(&m->owner, memory_order_relaxed);
279 if (owner == (uptr)cb->lt) {
280 VPrintf(3, "#%llu: DD::MutexAfterLock recursive\n", cb->lt->ctx);
281 CHECK(wlock);
282 m->recursion++;
283 return;
285 CHECK_EQ(owner, 0);
286 if (wlock) {
287 VPrintf(3, "#%llu: DD::MutexAfterLock set owner\n", cb->lt->ctx);
288 CHECK_EQ(m->recursion, 0);
289 m->recursion = 1;
290 atomic_store(&m->owner, (uptr)cb->lt, memory_order_relaxed);
293 if (!trylock)
294 return;
296 CHECK_LE(lt->nlocked, kMaxNesting);
297 if (m->id == kNoId)
298 m->id = allocateId(cb);
299 ThreadMutex *tm = &lt->locked[lt->nlocked++];
300 tm->id = m->id;
301 if (flags.second_deadlock_stack)
302 tm->stk = cb->Unwind();
305 void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
306 VPrintf(2, "#%llu: DD::MutexBeforeUnlock(%p, wlock=%d) nlocked=%d\n",
307 cb->lt->ctx, m, wlock, cb->lt->nlocked);
308 DDLogicalThread *lt = cb->lt;
310 uptr owner = atomic_load(&m->owner, memory_order_relaxed);
311 if (owner == (uptr)cb->lt) {
312 VPrintf(3, "#%llu: DD::MutexBeforeUnlock recursive\n", cb->lt->ctx);
313 if (--m->recursion > 0)
314 return;
315 VPrintf(3, "#%llu: DD::MutexBeforeUnlock reset owner\n", cb->lt->ctx);
316 atomic_store(&m->owner, 0, memory_order_relaxed);
318 CHECK_NE(m->id, kNoId);
319 int last = lt->nlocked - 1;
320 for (int i = last; i >= 0; i--) {
321 if (cb->lt->locked[i].id == m->id) {
322 lt->locked[i] = lt->locked[last];
323 lt->nlocked--;
324 break;
329 void DD::MutexDestroy(DDCallback *cb, DDMutex *m) {
330 VPrintf(2, "#%llu: DD::MutexDestroy(%p)\n",
331 cb->lt->ctx, m);
332 DDLogicalThread *lt = cb->lt;
334 if (m->id == kNoId)
335 return;
337 // Remove the mutex from lt->locked if there.
338 int last = lt->nlocked - 1;
339 for (int i = last; i >= 0; i--) {
340 if (lt->locked[i].id == m->id) {
341 lt->locked[i] = lt->locked[last];
342 lt->nlocked--;
343 break;
347 // Clear and invalidate the mutex descriptor.
349 Mutex *mtx = getMutex(m->id);
350 SpinMutexLock l(&mtx->mtx);
351 mtx->seq++;
352 mtx->nlink = 0;
355 // Return id to cache.
357 SpinMutexLock l(&mtx);
358 free_id.push_back(m->id);
362 void DD::CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt,
363 DDMutex *m) {
364 internal_memset(pt->visited, 0, sizeof(pt->visited));
365 int npath = 0;
366 int npending = 0;
368 Mutex *mtx = getMutex(m->id);
369 SpinMutexLock l(&mtx->mtx);
370 for (int li = 0; li < mtx->nlink; li++)
371 pt->pending[npending++] = mtx->link[li];
373 while (npending > 0) {
374 Link link = pt->pending[--npending];
375 if (link.id == kEndId) {
376 npath--;
377 continue;
379 if (pt->visited[link.id])
380 continue;
381 Mutex *mtx1 = getMutex(link.id);
382 SpinMutexLock l(&mtx1->mtx);
383 if (mtx1->seq != link.seq)
384 continue;
385 pt->visited[link.id] = true;
386 if (mtx1->nlink == 0)
387 continue;
388 pt->path[npath++] = link;
389 pt->pending[npending++] = Link(kEndId);
390 if (link.id == m->id)
391 return Report(pt, lt, npath); // Bingo!
392 for (int li = 0; li < mtx1->nlink; li++) {
393 Link *link1 = &mtx1->link[li];
394 // Mutex *mtx2 = getMutex(link->id);
395 // FIXME(dvyukov): fast seq check
396 // FIXME(dvyukov): fast nlink != 0 check
397 // FIXME(dvyukov): fast pending check?
398 // FIXME(dvyukov): npending can be larger than kMaxMutex
399 pt->pending[npending++] = *link1;
404 void DD::Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath) {
405 DDReport *rep = &pt->rep;
406 rep->n = npath;
407 for (int i = 0; i < npath; i++) {
408 Link *link = &pt->path[i];
409 Link *link0 = &pt->path[i ? i - 1 : npath - 1];
410 rep->loop[i].thr_ctx = link->tid;
411 rep->loop[i].mtx_ctx0 = link0->id;
412 rep->loop[i].mtx_ctx1 = link->id;
413 rep->loop[i].stk[0] = flags.second_deadlock_stack ? link->stk0 : 0;
414 rep->loop[i].stk[1] = link->stk1;
416 pt->report_pending = true;
419 DDReport *DD::GetReport(DDCallback *cb) {
420 if (!cb->pt->report_pending)
421 return 0;
422 cb->pt->report_pending = false;
423 return &cb->pt->rep;
426 } // namespace __sanitizer
427 #endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2