1 //===-- tsan_mutex.cc -----------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
11 #include "sanitizer_common/sanitizer_libc.h"
12 #include "tsan_mutex.h"
13 #include "tsan_platform.h"
18 // Simple reader-writer spin-mutex. Optimized for not-so-contended case.
19 // Readers have preference, can possibly starvate writers.
21 // The table fixes what mutexes can be locked under what mutexes.
22 // E.g. if the row for MutexTypeThreads contains MutexTypeReport,
23 // then Report mutex can be locked while under Threads mutex.
24 // The leaf mutexes can be locked under any other mutexes.
25 // Recursive locking is not supported.
26 #if TSAN_DEBUG && !TSAN_GO
27 const MutexType MutexTypeLeaf
= (MutexType
)-1;
28 static MutexType CanLockTab
[MutexTypeCount
][MutexTypeCount
] = {
29 /*0 MutexTypeInvalid*/ {},
30 /*1 MutexTypeTrace*/ {MutexTypeLeaf
},
31 /*2 MutexTypeThreads*/ {MutexTypeReport
},
32 /*3 MutexTypeReport*/ {MutexTypeSyncVar
,
33 MutexTypeMBlock
, MutexTypeJavaMBlock
},
34 /*4 MutexTypeSyncVar*/ {MutexTypeDDetector
},
35 /*5 MutexTypeSyncTab*/ {}, // unused
36 /*6 MutexTypeSlab*/ {MutexTypeLeaf
},
37 /*7 MutexTypeAnnotations*/ {},
38 /*8 MutexTypeAtExit*/ {MutexTypeSyncVar
},
39 /*9 MutexTypeMBlock*/ {MutexTypeSyncVar
},
40 /*10 MutexTypeJavaMBlock*/ {MutexTypeSyncVar
},
41 /*11 MutexTypeDDetector*/ {},
44 static bool CanLockAdj
[MutexTypeCount
][MutexTypeCount
];
47 void InitializeMutex() {
48 #if TSAN_DEBUG && !TSAN_GO
49 // Build the "can lock" adjacency matrix.
50 // If [i][j]==true, then one can lock mutex j while under mutex i.
51 const int N
= MutexTypeCount
;
54 for (int i
= 1; i
< N
; i
++) {
55 for (int j
= 0; j
< N
; j
++) {
56 MutexType z
= CanLockTab
[i
][j
];
57 if (z
== MutexTypeInvalid
)
59 if (z
== MutexTypeLeaf
) {
64 CHECK(!CanLockAdj
[i
][(int)z
]);
65 CanLockAdj
[i
][(int)z
] = true;
69 for (int i
= 0; i
< N
; i
++) {
70 CHECK(!leaf
[i
] || cnt
[i
] == 0);
73 for (int i
= 0; i
< N
; i
++) {
76 for (int j
= 0; j
< N
; j
++) {
77 if (i
== j
|| leaf
[j
] || j
== MutexTypeInvalid
)
79 CHECK(!CanLockAdj
[j
][i
]);
80 CanLockAdj
[j
][i
] = true;
83 // Build the transitive closure.
84 bool CanLockAdj2
[MutexTypeCount
][MutexTypeCount
];
85 for (int i
= 0; i
< N
; i
++) {
86 for (int j
= 0; j
< N
; j
++) {
87 CanLockAdj2
[i
][j
] = CanLockAdj
[i
][j
];
90 for (int k
= 0; k
< N
; k
++) {
91 for (int i
= 0; i
< N
; i
++) {
92 for (int j
= 0; j
< N
; j
++) {
93 if (CanLockAdj2
[i
][k
] && CanLockAdj2
[k
][j
]) {
94 CanLockAdj2
[i
][j
] = true;
100 Printf("Can lock graph:\n");
101 for (int i
= 0; i
< N
; i
++) {
102 for (int j
= 0; j
< N
; j
++) {
103 Printf("%d ", CanLockAdj
[i
][j
]);
107 Printf("Can lock graph closure:\n");
108 for (int i
= 0; i
< N
; i
++) {
109 for (int j
= 0; j
< N
; j
++) {
110 Printf("%d ", CanLockAdj2
[i
][j
]);
115 // Verify that the graph is acyclic.
116 for (int i
= 0; i
< N
; i
++) {
117 if (CanLockAdj2
[i
][i
]) {
118 Printf("Mutex %d participates in a cycle\n", i
);
125 InternalDeadlockDetector::InternalDeadlockDetector() {
126 // Rely on zero initialization because some mutexes can be locked before ctor.
129 #if TSAN_DEBUG && !TSAN_GO
130 void InternalDeadlockDetector::Lock(MutexType t
) {
131 // Printf("LOCK %d @%zu\n", t, seq_ + 1);
132 CHECK_GT(t
, MutexTypeInvalid
);
133 CHECK_LT(t
, MutexTypeCount
);
135 u64 max_idx
= MutexTypeInvalid
;
136 for (int i
= 0; i
!= MutexTypeCount
; i
++) {
139 CHECK_NE(locked_
[i
], max_seq
);
140 if (max_seq
< locked_
[i
]) {
141 max_seq
= locked_
[i
];
146 if (max_idx
== MutexTypeInvalid
)
148 // Printf(" last %d @%zu\n", max_idx, max_seq);
149 if (!CanLockAdj
[max_idx
][t
]) {
150 Printf("ThreadSanitizer: internal deadlock detected\n");
151 Printf("ThreadSanitizer: can't lock %d while under %zu\n",
157 void InternalDeadlockDetector::Unlock(MutexType t
) {
158 // Printf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]);
163 void InternalDeadlockDetector::CheckNoLocks() {
164 for (int i
= 0; i
!= MutexTypeCount
; i
++) {
165 CHECK_EQ(locked_
[i
], 0);
170 void CheckNoLocks(ThreadState
*thr
) {
171 #if TSAN_DEBUG && !TSAN_GO
172 thr
->internal_deadlock_detector
.CheckNoLocks();
176 const uptr kUnlocked
= 0;
177 const uptr kWriteLock
= 1;
178 const uptr kReadLock
= 2;
187 if (iter_
++ < kActiveSpinIters
)
188 proc_yield(kActiveSpinCnt
);
190 internal_sched_yield();
194 u64
Contention() const {
195 u64 active
= iter_
% kActiveSpinIters
;
196 u64 passive
= iter_
- active
;
197 return active
+ 10 * passive
;
202 static const int kActiveSpinIters
= 10;
203 static const int kActiveSpinCnt
= 20;
206 Mutex::Mutex(MutexType type
, StatType stat_type
) {
207 CHECK_GT(type
, MutexTypeInvalid
);
208 CHECK_LT(type
, MutexTypeCount
);
212 #if TSAN_COLLECT_STATS
213 stat_type_
= stat_type
;
215 atomic_store(&state_
, kUnlocked
, memory_order_relaxed
);
219 CHECK_EQ(atomic_load(&state_
, memory_order_relaxed
), kUnlocked
);
223 #if TSAN_DEBUG && !TSAN_GO
224 cur_thread()->internal_deadlock_detector
.Lock(type_
);
226 uptr cmp
= kUnlocked
;
227 if (atomic_compare_exchange_strong(&state_
, &cmp
, kWriteLock
,
228 memory_order_acquire
))
230 for (Backoff backoff
; backoff
.Do();) {
231 if (atomic_load(&state_
, memory_order_relaxed
) == kUnlocked
) {
233 if (atomic_compare_exchange_weak(&state_
, &cmp
, kWriteLock
,
234 memory_order_acquire
)) {
235 #if TSAN_COLLECT_STATS && !TSAN_GO
236 StatInc(cur_thread(), stat_type_
, backoff
.Contention());
244 void Mutex::Unlock() {
245 uptr prev
= atomic_fetch_sub(&state_
, kWriteLock
, memory_order_release
);
247 DCHECK_NE(prev
& kWriteLock
, 0);
248 #if TSAN_DEBUG && !TSAN_GO
249 cur_thread()->internal_deadlock_detector
.Unlock(type_
);
253 void Mutex::ReadLock() {
254 #if TSAN_DEBUG && !TSAN_GO
255 cur_thread()->internal_deadlock_detector
.Lock(type_
);
257 uptr prev
= atomic_fetch_add(&state_
, kReadLock
, memory_order_acquire
);
258 if ((prev
& kWriteLock
) == 0)
260 for (Backoff backoff
; backoff
.Do();) {
261 prev
= atomic_load(&state_
, memory_order_acquire
);
262 if ((prev
& kWriteLock
) == 0) {
263 #if TSAN_COLLECT_STATS && !TSAN_GO
264 StatInc(cur_thread(), stat_type_
, backoff
.Contention());
271 void Mutex::ReadUnlock() {
272 uptr prev
= atomic_fetch_sub(&state_
, kReadLock
, memory_order_release
);
274 DCHECK_EQ(prev
& kWriteLock
, 0);
275 DCHECK_GT(prev
& ~kWriteLock
, 0);
276 #if TSAN_DEBUG && !TSAN_GO
277 cur_thread()->internal_deadlock_detector
.Unlock(type_
);
281 void Mutex::CheckLocked() {
282 CHECK_NE(atomic_load(&state_
, memory_order_relaxed
), 0);
285 } // namespace __tsan