1 //===-- tsan_mutex.cc -----------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
11 #include "sanitizer_common/sanitizer_libc.h"
12 #include "tsan_mutex.h"
13 #include "tsan_platform.h"
18 // Simple reader-writer spin-mutex. Optimized for not-so-contended case.
19 // Readers have preference, can possibly starvate writers.
21 // The table fixes what mutexes can be locked under what mutexes.
22 // E.g. if the row for MutexTypeThreads contains MutexTypeReport,
23 // then Report mutex can be locked while under Threads mutex.
24 // The leaf mutexes can be locked under any other mutexes.
25 // Recursive locking is not supported.
26 #if SANITIZER_DEBUG && !SANITIZER_GO
27 const MutexType MutexTypeLeaf
= (MutexType
)-1;
28 static MutexType CanLockTab
[MutexTypeCount
][MutexTypeCount
] = {
29 /*0 MutexTypeInvalid*/ {},
30 /*1 MutexTypeTrace*/ {MutexTypeLeaf
},
31 /*2 MutexTypeThreads*/ {MutexTypeReport
},
32 /*3 MutexTypeReport*/ {MutexTypeSyncVar
,
33 MutexTypeMBlock
, MutexTypeJavaMBlock
},
34 /*4 MutexTypeSyncVar*/ {MutexTypeDDetector
},
35 /*5 MutexTypeSyncTab*/ {}, // unused
36 /*6 MutexTypeSlab*/ {MutexTypeLeaf
},
37 /*7 MutexTypeAnnotations*/ {},
38 /*8 MutexTypeAtExit*/ {MutexTypeSyncVar
},
39 /*9 MutexTypeMBlock*/ {MutexTypeSyncVar
},
40 /*10 MutexTypeJavaMBlock*/ {MutexTypeSyncVar
},
41 /*11 MutexTypeDDetector*/ {},
42 /*12 MutexTypeFired*/ {MutexTypeLeaf
},
43 /*13 MutexTypeRacy*/ {MutexTypeLeaf
},
44 /*14 MutexTypeGlobalProc*/ {},
47 static bool CanLockAdj
[MutexTypeCount
][MutexTypeCount
];
50 void InitializeMutex() {
51 #if SANITIZER_DEBUG && !SANITIZER_GO
52 // Build the "can lock" adjacency matrix.
53 // If [i][j]==true, then one can lock mutex j while under mutex i.
54 const int N
= MutexTypeCount
;
57 for (int i
= 1; i
< N
; i
++) {
58 for (int j
= 0; j
< N
; j
++) {
59 MutexType z
= CanLockTab
[i
][j
];
60 if (z
== MutexTypeInvalid
)
62 if (z
== MutexTypeLeaf
) {
67 CHECK(!CanLockAdj
[i
][(int)z
]);
68 CanLockAdj
[i
][(int)z
] = true;
72 for (int i
= 0; i
< N
; i
++) {
73 CHECK(!leaf
[i
] || cnt
[i
] == 0);
76 for (int i
= 0; i
< N
; i
++) {
79 for (int j
= 0; j
< N
; j
++) {
80 if (i
== j
|| leaf
[j
] || j
== MutexTypeInvalid
)
82 CHECK(!CanLockAdj
[j
][i
]);
83 CanLockAdj
[j
][i
] = true;
86 // Build the transitive closure.
87 bool CanLockAdj2
[MutexTypeCount
][MutexTypeCount
];
88 for (int i
= 0; i
< N
; i
++) {
89 for (int j
= 0; j
< N
; j
++) {
90 CanLockAdj2
[i
][j
] = CanLockAdj
[i
][j
];
93 for (int k
= 0; k
< N
; k
++) {
94 for (int i
= 0; i
< N
; i
++) {
95 for (int j
= 0; j
< N
; j
++) {
96 if (CanLockAdj2
[i
][k
] && CanLockAdj2
[k
][j
]) {
97 CanLockAdj2
[i
][j
] = true;
103 Printf("Can lock graph:\n");
104 for (int i
= 0; i
< N
; i
++) {
105 for (int j
= 0; j
< N
; j
++) {
106 Printf("%d ", CanLockAdj
[i
][j
]);
110 Printf("Can lock graph closure:\n");
111 for (int i
= 0; i
< N
; i
++) {
112 for (int j
= 0; j
< N
; j
++) {
113 Printf("%d ", CanLockAdj2
[i
][j
]);
118 // Verify that the graph is acyclic.
119 for (int i
= 0; i
< N
; i
++) {
120 if (CanLockAdj2
[i
][i
]) {
121 Printf("Mutex %d participates in a cycle\n", i
);
128 InternalDeadlockDetector::InternalDeadlockDetector() {
129 // Rely on zero initialization because some mutexes can be locked before ctor.
132 #if SANITIZER_DEBUG && !SANITIZER_GO
133 void InternalDeadlockDetector::Lock(MutexType t
) {
134 // Printf("LOCK %d @%zu\n", t, seq_ + 1);
135 CHECK_GT(t
, MutexTypeInvalid
);
136 CHECK_LT(t
, MutexTypeCount
);
138 u64 max_idx
= MutexTypeInvalid
;
139 for (int i
= 0; i
!= MutexTypeCount
; i
++) {
142 CHECK_NE(locked_
[i
], max_seq
);
143 if (max_seq
< locked_
[i
]) {
144 max_seq
= locked_
[i
];
149 if (max_idx
== MutexTypeInvalid
)
151 // Printf(" last %d @%zu\n", max_idx, max_seq);
152 if (!CanLockAdj
[max_idx
][t
]) {
153 Printf("ThreadSanitizer: internal deadlock detected\n");
154 Printf("ThreadSanitizer: can't lock %d while under %zu\n",
160 void InternalDeadlockDetector::Unlock(MutexType t
) {
161 // Printf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]);
166 void InternalDeadlockDetector::CheckNoLocks() {
167 for (int i
= 0; i
!= MutexTypeCount
; i
++) {
168 CHECK_EQ(locked_
[i
], 0);
173 void CheckNoLocks(ThreadState
*thr
) {
174 #if SANITIZER_DEBUG && !SANITIZER_GO
175 thr
->internal_deadlock_detector
.CheckNoLocks();
179 const uptr kUnlocked
= 0;
180 const uptr kWriteLock
= 1;
181 const uptr kReadLock
= 2;
190 if (iter_
++ < kActiveSpinIters
)
191 proc_yield(kActiveSpinCnt
);
193 internal_sched_yield();
197 u64
Contention() const {
198 u64 active
= iter_
% kActiveSpinIters
;
199 u64 passive
= iter_
- active
;
200 return active
+ 10 * passive
;
205 static const int kActiveSpinIters
= 10;
206 static const int kActiveSpinCnt
= 20;
209 Mutex::Mutex(MutexType type
, StatType stat_type
) {
210 CHECK_GT(type
, MutexTypeInvalid
);
211 CHECK_LT(type
, MutexTypeCount
);
215 #if TSAN_COLLECT_STATS
216 stat_type_
= stat_type
;
218 atomic_store(&state_
, kUnlocked
, memory_order_relaxed
);
222 CHECK_EQ(atomic_load(&state_
, memory_order_relaxed
), kUnlocked
);
226 #if SANITIZER_DEBUG && !SANITIZER_GO
227 cur_thread()->internal_deadlock_detector
.Lock(type_
);
229 uptr cmp
= kUnlocked
;
230 if (atomic_compare_exchange_strong(&state_
, &cmp
, kWriteLock
,
231 memory_order_acquire
))
233 for (Backoff backoff
; backoff
.Do();) {
234 if (atomic_load(&state_
, memory_order_relaxed
) == kUnlocked
) {
236 if (atomic_compare_exchange_weak(&state_
, &cmp
, kWriteLock
,
237 memory_order_acquire
)) {
238 #if TSAN_COLLECT_STATS && !SANITIZER_GO
239 StatInc(cur_thread(), stat_type_
, backoff
.Contention());
247 void Mutex::Unlock() {
248 uptr prev
= atomic_fetch_sub(&state_
, kWriteLock
, memory_order_release
);
250 DCHECK_NE(prev
& kWriteLock
, 0);
251 #if SANITIZER_DEBUG && !SANITIZER_GO
252 cur_thread()->internal_deadlock_detector
.Unlock(type_
);
256 void Mutex::ReadLock() {
257 #if SANITIZER_DEBUG && !SANITIZER_GO
258 cur_thread()->internal_deadlock_detector
.Lock(type_
);
260 uptr prev
= atomic_fetch_add(&state_
, kReadLock
, memory_order_acquire
);
261 if ((prev
& kWriteLock
) == 0)
263 for (Backoff backoff
; backoff
.Do();) {
264 prev
= atomic_load(&state_
, memory_order_acquire
);
265 if ((prev
& kWriteLock
) == 0) {
266 #if TSAN_COLLECT_STATS && !SANITIZER_GO
267 StatInc(cur_thread(), stat_type_
, backoff
.Contention());
274 void Mutex::ReadUnlock() {
275 uptr prev
= atomic_fetch_sub(&state_
, kReadLock
, memory_order_release
);
277 DCHECK_EQ(prev
& kWriteLock
, 0);
278 DCHECK_GT(prev
& ~kWriteLock
, 0);
279 #if SANITIZER_DEBUG && !SANITIZER_GO
280 cur_thread()->internal_deadlock_detector
.Unlock(type_
);
284 void Mutex::CheckLocked() {
285 CHECK_NE(atomic_load(&state_
, memory_order_relaxed
), 0);
288 } // namespace __tsan