1 //===-- tsan_mutex.cc -----------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
11 #include "sanitizer_common/sanitizer_libc.h"
12 #include "tsan_mutex.h"
13 #include "tsan_platform.h"
18 // Simple reader-writer spin-mutex. Optimized for not-so-contended case.
19 // Readers have preference, can possibly starvate writers.
21 // The table fixes what mutexes can be locked under what mutexes.
22 // E.g. if the row for MutexTypeThreads contains MutexTypeReport,
23 // then Report mutex can be locked while under Threads mutex.
24 // The leaf mutexes can be locked under any other mutexes.
25 // Recursive locking is not supported.
26 #if TSAN_DEBUG && !TSAN_GO
27 const MutexType MutexTypeLeaf
= (MutexType
)-1;
28 static MutexType CanLockTab
[MutexTypeCount
][MutexTypeCount
] = {
29 /*0 MutexTypeInvalid*/ {},
30 /*1 MutexTypeTrace*/ {MutexTypeLeaf
},
31 /*2 MutexTypeThreads*/ {MutexTypeReport
},
32 /*3 MutexTypeReport*/ {MutexTypeSyncTab
, MutexTypeMBlock
,
34 /*4 MutexTypeSyncVar*/ {},
35 /*5 MutexTypeSyncTab*/ {MutexTypeSyncVar
},
36 /*6 MutexTypeSlab*/ {MutexTypeLeaf
},
37 /*7 MutexTypeAnnotations*/ {},
38 /*8 MutexTypeAtExit*/ {MutexTypeSyncTab
},
39 /*9 MutexTypeMBlock*/ {MutexTypeSyncVar
},
40 /*10 MutexTypeJavaMBlock*/ {MutexTypeSyncVar
},
43 static bool CanLockAdj
[MutexTypeCount
][MutexTypeCount
];
46 void InitializeMutex() {
47 #if TSAN_DEBUG && !TSAN_GO
48 // Build the "can lock" adjacency matrix.
49 // If [i][j]==true, then one can lock mutex j while under mutex i.
50 const int N
= MutexTypeCount
;
53 for (int i
= 1; i
< N
; i
++) {
54 for (int j
= 0; j
< N
; j
++) {
55 MutexType z
= CanLockTab
[i
][j
];
56 if (z
== MutexTypeInvalid
)
58 if (z
== MutexTypeLeaf
) {
63 CHECK(!CanLockAdj
[i
][(int)z
]);
64 CanLockAdj
[i
][(int)z
] = true;
68 for (int i
= 0; i
< N
; i
++) {
69 CHECK(!leaf
[i
] || cnt
[i
] == 0);
72 for (int i
= 0; i
< N
; i
++) {
75 for (int j
= 0; j
< N
; j
++) {
76 if (i
== j
|| leaf
[j
] || j
== MutexTypeInvalid
)
78 CHECK(!CanLockAdj
[j
][i
]);
79 CanLockAdj
[j
][i
] = true;
82 // Build the transitive closure.
83 bool CanLockAdj2
[MutexTypeCount
][MutexTypeCount
];
84 for (int i
= 0; i
< N
; i
++) {
85 for (int j
= 0; j
< N
; j
++) {
86 CanLockAdj2
[i
][j
] = CanLockAdj
[i
][j
];
89 for (int k
= 0; k
< N
; k
++) {
90 for (int i
= 0; i
< N
; i
++) {
91 for (int j
= 0; j
< N
; j
++) {
92 if (CanLockAdj2
[i
][k
] && CanLockAdj2
[k
][j
]) {
93 CanLockAdj2
[i
][j
] = true;
99 Printf("Can lock graph:\n");
100 for (int i
= 0; i
< N
; i
++) {
101 for (int j
= 0; j
< N
; j
++) {
102 Printf("%d ", CanLockAdj
[i
][j
]);
106 Printf("Can lock graph closure:\n");
107 for (int i
= 0; i
< N
; i
++) {
108 for (int j
= 0; j
< N
; j
++) {
109 Printf("%d ", CanLockAdj2
[i
][j
]);
114 // Verify that the graph is acyclic.
115 for (int i
= 0; i
< N
; i
++) {
116 if (CanLockAdj2
[i
][i
]) {
117 Printf("Mutex %d participates in a cycle\n", i
);
124 DeadlockDetector::DeadlockDetector() {
125 // Rely on zero initialization because some mutexes can be locked before ctor.
128 #if TSAN_DEBUG && !TSAN_GO
129 void DeadlockDetector::Lock(MutexType t
) {
130 // Printf("LOCK %d @%zu\n", t, seq_ + 1);
131 CHECK_GT(t
, MutexTypeInvalid
);
132 CHECK_LT(t
, MutexTypeCount
);
134 u64 max_idx
= MutexTypeInvalid
;
135 for (int i
= 0; i
!= MutexTypeCount
; i
++) {
138 CHECK_NE(locked_
[i
], max_seq
);
139 if (max_seq
< locked_
[i
]) {
140 max_seq
= locked_
[i
];
145 if (max_idx
== MutexTypeInvalid
)
147 // Printf(" last %d @%zu\n", max_idx, max_seq);
148 if (!CanLockAdj
[max_idx
][t
]) {
149 Printf("ThreadSanitizer: internal deadlock detected\n");
150 Printf("ThreadSanitizer: can't lock %d while under %zu\n",
156 void DeadlockDetector::Unlock(MutexType t
) {
157 // Printf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]);
163 const uptr kUnlocked
= 0;
164 const uptr kWriteLock
= 1;
165 const uptr kReadLock
= 2;
174 if (iter_
++ < kActiveSpinIters
)
175 proc_yield(kActiveSpinCnt
);
177 internal_sched_yield();
181 u64
Contention() const {
182 u64 active
= iter_
% kActiveSpinIters
;
183 u64 passive
= iter_
- active
;
184 return active
+ 10 * passive
;
189 static const int kActiveSpinIters
= 10;
190 static const int kActiveSpinCnt
= 20;
193 Mutex::Mutex(MutexType type
, StatType stat_type
) {
194 CHECK_GT(type
, MutexTypeInvalid
);
195 CHECK_LT(type
, MutexTypeCount
);
199 #if TSAN_COLLECT_STATS
200 stat_type_
= stat_type
;
202 atomic_store(&state_
, kUnlocked
, memory_order_relaxed
);
206 CHECK_EQ(atomic_load(&state_
, memory_order_relaxed
), kUnlocked
);
210 #if TSAN_DEBUG && !TSAN_GO
211 cur_thread()->deadlock_detector
.Lock(type_
);
213 uptr cmp
= kUnlocked
;
214 if (atomic_compare_exchange_strong(&state_
, &cmp
, kWriteLock
,
215 memory_order_acquire
))
217 for (Backoff backoff
; backoff
.Do();) {
218 if (atomic_load(&state_
, memory_order_relaxed
) == kUnlocked
) {
220 if (atomic_compare_exchange_weak(&state_
, &cmp
, kWriteLock
,
221 memory_order_acquire
)) {
222 #if TSAN_COLLECT_STATS
223 StatInc(cur_thread(), stat_type_
, backoff
.Contention());
231 void Mutex::Unlock() {
232 uptr prev
= atomic_fetch_sub(&state_
, kWriteLock
, memory_order_release
);
234 DCHECK_NE(prev
& kWriteLock
, 0);
235 #if TSAN_DEBUG && !TSAN_GO
236 cur_thread()->deadlock_detector
.Unlock(type_
);
240 void Mutex::ReadLock() {
241 #if TSAN_DEBUG && !TSAN_GO
242 cur_thread()->deadlock_detector
.Lock(type_
);
244 uptr prev
= atomic_fetch_add(&state_
, kReadLock
, memory_order_acquire
);
245 if ((prev
& kWriteLock
) == 0)
247 for (Backoff backoff
; backoff
.Do();) {
248 prev
= atomic_load(&state_
, memory_order_acquire
);
249 if ((prev
& kWriteLock
) == 0) {
250 #if TSAN_COLLECT_STATS
251 StatInc(cur_thread(), stat_type_
, backoff
.Contention());
258 void Mutex::ReadUnlock() {
259 uptr prev
= atomic_fetch_sub(&state_
, kReadLock
, memory_order_release
);
261 DCHECK_EQ(prev
& kWriteLock
, 0);
262 DCHECK_GT(prev
& ~kWriteLock
, 0);
263 #if TSAN_DEBUG && !TSAN_GO
264 cur_thread()->deadlock_detector
.Unlock(type_
);
268 void Mutex::CheckLocked() {
269 CHECK_NE(atomic_load(&state_
, memory_order_relaxed
), 0);
272 } // namespace __tsan