1 //===-- tsan_mutex.cc -----------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
11 #include "sanitizer_common/sanitizer_libc.h"
12 #include "tsan_mutex.h"
13 #include "tsan_platform.h"
18 // Simple reader-writer spin-mutex. Optimized for not-so-contended case.
19 // Readers have preference, can possibly starvate writers.
21 // The table fixes what mutexes can be locked under what mutexes.
22 // E.g. if the row for MutexTypeThreads contains MutexTypeReport,
23 // then Report mutex can be locked while under Threads mutex.
24 // The leaf mutexes can be locked under any other mutexes.
25 // Recursive locking is not supported.
26 #if SANITIZER_DEBUG && !SANITIZER_GO
27 const MutexType MutexTypeLeaf
= (MutexType
)-1;
28 static MutexType CanLockTab
[MutexTypeCount
][MutexTypeCount
] = {
29 /*0 MutexTypeInvalid*/ {},
30 /*1 MutexTypeTrace*/ {MutexTypeLeaf
},
31 /*2 MutexTypeThreads*/ {MutexTypeReport
},
32 /*3 MutexTypeReport*/ {MutexTypeSyncVar
,
33 MutexTypeMBlock
, MutexTypeJavaMBlock
},
34 /*4 MutexTypeSyncVar*/ {MutexTypeDDetector
},
35 /*5 MutexTypeSyncTab*/ {}, // unused
36 /*6 MutexTypeSlab*/ {MutexTypeLeaf
},
37 /*7 MutexTypeAnnotations*/ {},
38 /*8 MutexTypeAtExit*/ {MutexTypeSyncVar
},
39 /*9 MutexTypeMBlock*/ {MutexTypeSyncVar
},
40 /*10 MutexTypeJavaMBlock*/ {MutexTypeSyncVar
},
41 /*11 MutexTypeDDetector*/ {},
42 /*12 MutexTypeFired*/ {MutexTypeLeaf
},
43 /*13 MutexTypeRacy*/ {MutexTypeLeaf
},
46 static bool CanLockAdj
[MutexTypeCount
][MutexTypeCount
];
49 void InitializeMutex() {
50 #if SANITIZER_DEBUG && !SANITIZER_GO
51 // Build the "can lock" adjacency matrix.
52 // If [i][j]==true, then one can lock mutex j while under mutex i.
53 const int N
= MutexTypeCount
;
56 for (int i
= 1; i
< N
; i
++) {
57 for (int j
= 0; j
< N
; j
++) {
58 MutexType z
= CanLockTab
[i
][j
];
59 if (z
== MutexTypeInvalid
)
61 if (z
== MutexTypeLeaf
) {
66 CHECK(!CanLockAdj
[i
][(int)z
]);
67 CanLockAdj
[i
][(int)z
] = true;
71 for (int i
= 0; i
< N
; i
++) {
72 CHECK(!leaf
[i
] || cnt
[i
] == 0);
75 for (int i
= 0; i
< N
; i
++) {
78 for (int j
= 0; j
< N
; j
++) {
79 if (i
== j
|| leaf
[j
] || j
== MutexTypeInvalid
)
81 CHECK(!CanLockAdj
[j
][i
]);
82 CanLockAdj
[j
][i
] = true;
85 // Build the transitive closure.
86 bool CanLockAdj2
[MutexTypeCount
][MutexTypeCount
];
87 for (int i
= 0; i
< N
; i
++) {
88 for (int j
= 0; j
< N
; j
++) {
89 CanLockAdj2
[i
][j
] = CanLockAdj
[i
][j
];
92 for (int k
= 0; k
< N
; k
++) {
93 for (int i
= 0; i
< N
; i
++) {
94 for (int j
= 0; j
< N
; j
++) {
95 if (CanLockAdj2
[i
][k
] && CanLockAdj2
[k
][j
]) {
96 CanLockAdj2
[i
][j
] = true;
102 Printf("Can lock graph:\n");
103 for (int i
= 0; i
< N
; i
++) {
104 for (int j
= 0; j
< N
; j
++) {
105 Printf("%d ", CanLockAdj
[i
][j
]);
109 Printf("Can lock graph closure:\n");
110 for (int i
= 0; i
< N
; i
++) {
111 for (int j
= 0; j
< N
; j
++) {
112 Printf("%d ", CanLockAdj2
[i
][j
]);
117 // Verify that the graph is acyclic.
118 for (int i
= 0; i
< N
; i
++) {
119 if (CanLockAdj2
[i
][i
]) {
120 Printf("Mutex %d participates in a cycle\n", i
);
127 InternalDeadlockDetector::InternalDeadlockDetector() {
128 // Rely on zero initialization because some mutexes can be locked before ctor.
131 #if SANITIZER_DEBUG && !SANITIZER_GO
132 void InternalDeadlockDetector::Lock(MutexType t
) {
133 // Printf("LOCK %d @%zu\n", t, seq_ + 1);
134 CHECK_GT(t
, MutexTypeInvalid
);
135 CHECK_LT(t
, MutexTypeCount
);
137 u64 max_idx
= MutexTypeInvalid
;
138 for (int i
= 0; i
!= MutexTypeCount
; i
++) {
141 CHECK_NE(locked_
[i
], max_seq
);
142 if (max_seq
< locked_
[i
]) {
143 max_seq
= locked_
[i
];
148 if (max_idx
== MutexTypeInvalid
)
150 // Printf(" last %d @%zu\n", max_idx, max_seq);
151 if (!CanLockAdj
[max_idx
][t
]) {
152 Printf("ThreadSanitizer: internal deadlock detected\n");
153 Printf("ThreadSanitizer: can't lock %d while under %zu\n",
159 void InternalDeadlockDetector::Unlock(MutexType t
) {
160 // Printf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]);
165 void InternalDeadlockDetector::CheckNoLocks() {
166 for (int i
= 0; i
!= MutexTypeCount
; i
++) {
167 CHECK_EQ(locked_
[i
], 0);
172 void CheckNoLocks(ThreadState
*thr
) {
173 #if SANITIZER_DEBUG && !SANITIZER_GO
174 thr
->internal_deadlock_detector
.CheckNoLocks();
178 const uptr kUnlocked
= 0;
179 const uptr kWriteLock
= 1;
180 const uptr kReadLock
= 2;
189 if (iter_
++ < kActiveSpinIters
)
190 proc_yield(kActiveSpinCnt
);
192 internal_sched_yield();
196 u64
Contention() const {
197 u64 active
= iter_
% kActiveSpinIters
;
198 u64 passive
= iter_
- active
;
199 return active
+ 10 * passive
;
204 static const int kActiveSpinIters
= 10;
205 static const int kActiveSpinCnt
= 20;
208 Mutex::Mutex(MutexType type
, StatType stat_type
) {
209 CHECK_GT(type
, MutexTypeInvalid
);
210 CHECK_LT(type
, MutexTypeCount
);
214 #if TSAN_COLLECT_STATS
215 stat_type_
= stat_type
;
217 atomic_store(&state_
, kUnlocked
, memory_order_relaxed
);
221 CHECK_EQ(atomic_load(&state_
, memory_order_relaxed
), kUnlocked
);
225 #if SANITIZER_DEBUG && !SANITIZER_GO
226 cur_thread()->internal_deadlock_detector
.Lock(type_
);
228 uptr cmp
= kUnlocked
;
229 if (atomic_compare_exchange_strong(&state_
, &cmp
, kWriteLock
,
230 memory_order_acquire
))
232 for (Backoff backoff
; backoff
.Do();) {
233 if (atomic_load(&state_
, memory_order_relaxed
) == kUnlocked
) {
235 if (atomic_compare_exchange_weak(&state_
, &cmp
, kWriteLock
,
236 memory_order_acquire
)) {
237 #if TSAN_COLLECT_STATS && !SANITIZER_GO
238 StatInc(cur_thread(), stat_type_
, backoff
.Contention());
246 void Mutex::Unlock() {
247 uptr prev
= atomic_fetch_sub(&state_
, kWriteLock
, memory_order_release
);
249 DCHECK_NE(prev
& kWriteLock
, 0);
250 #if SANITIZER_DEBUG && !SANITIZER_GO
251 cur_thread()->internal_deadlock_detector
.Unlock(type_
);
255 void Mutex::ReadLock() {
256 #if SANITIZER_DEBUG && !SANITIZER_GO
257 cur_thread()->internal_deadlock_detector
.Lock(type_
);
259 uptr prev
= atomic_fetch_add(&state_
, kReadLock
, memory_order_acquire
);
260 if ((prev
& kWriteLock
) == 0)
262 for (Backoff backoff
; backoff
.Do();) {
263 prev
= atomic_load(&state_
, memory_order_acquire
);
264 if ((prev
& kWriteLock
) == 0) {
265 #if TSAN_COLLECT_STATS && !SANITIZER_GO
266 StatInc(cur_thread(), stat_type_
, backoff
.Contention());
273 void Mutex::ReadUnlock() {
274 uptr prev
= atomic_fetch_sub(&state_
, kReadLock
, memory_order_release
);
276 DCHECK_EQ(prev
& kWriteLock
, 0);
277 DCHECK_GT(prev
& ~kWriteLock
, 0);
278 #if SANITIZER_DEBUG && !SANITIZER_GO
279 cur_thread()->internal_deadlock_detector
.Unlock(type_
);
283 void Mutex::CheckLocked() {
284 CHECK_NE(atomic_load(&state_
, memory_order_relaxed
), 0);
287 } // namespace __tsan