[PR67828] don't unswitch on default defs of non-parms
[official-gcc.git] / libsanitizer / tsan / tsan_mutex.cc
blob2e49b9d2de9d4b42b3cbc628a4face9f655e7635
1 //===-- tsan_mutex.cc -----------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
11 #include "sanitizer_common/sanitizer_libc.h"
12 #include "tsan_mutex.h"
13 #include "tsan_platform.h"
14 #include "tsan_rtl.h"
16 namespace __tsan {
18 // Simple reader-writer spin-mutex. Optimized for not-so-contended case.
19 // Readers have preference, can possibly starvate writers.
21 // The table fixes what mutexes can be locked under what mutexes.
22 // E.g. if the row for MutexTypeThreads contains MutexTypeReport,
23 // then Report mutex can be locked while under Threads mutex.
24 // The leaf mutexes can be locked under any other mutexes.
25 // Recursive locking is not supported.
26 #if TSAN_DEBUG && !TSAN_GO
27 const MutexType MutexTypeLeaf = (MutexType)-1;
28 static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
29 /*0 MutexTypeInvalid*/ {},
30 /*1 MutexTypeTrace*/ {MutexTypeLeaf},
31 /*2 MutexTypeThreads*/ {MutexTypeReport},
32 /*3 MutexTypeReport*/ {MutexTypeSyncVar,
33 MutexTypeMBlock, MutexTypeJavaMBlock},
34 /*4 MutexTypeSyncVar*/ {MutexTypeDDetector},
35 /*5 MutexTypeSyncTab*/ {}, // unused
36 /*6 MutexTypeSlab*/ {MutexTypeLeaf},
37 /*7 MutexTypeAnnotations*/ {},
38 /*8 MutexTypeAtExit*/ {MutexTypeSyncVar},
39 /*9 MutexTypeMBlock*/ {MutexTypeSyncVar},
40 /*10 MutexTypeJavaMBlock*/ {MutexTypeSyncVar},
41 /*11 MutexTypeDDetector*/ {},
44 static bool CanLockAdj[MutexTypeCount][MutexTypeCount];
45 #endif
47 void InitializeMutex() {
48 #if TSAN_DEBUG && !TSAN_GO
49 // Build the "can lock" adjacency matrix.
50 // If [i][j]==true, then one can lock mutex j while under mutex i.
51 const int N = MutexTypeCount;
52 int cnt[N] = {};
53 bool leaf[N] = {};
54 for (int i = 1; i < N; i++) {
55 for (int j = 0; j < N; j++) {
56 MutexType z = CanLockTab[i][j];
57 if (z == MutexTypeInvalid)
58 continue;
59 if (z == MutexTypeLeaf) {
60 CHECK(!leaf[i]);
61 leaf[i] = true;
62 continue;
64 CHECK(!CanLockAdj[i][(int)z]);
65 CanLockAdj[i][(int)z] = true;
66 cnt[i]++;
69 for (int i = 0; i < N; i++) {
70 CHECK(!leaf[i] || cnt[i] == 0);
72 // Add leaf mutexes.
73 for (int i = 0; i < N; i++) {
74 if (!leaf[i])
75 continue;
76 for (int j = 0; j < N; j++) {
77 if (i == j || leaf[j] || j == MutexTypeInvalid)
78 continue;
79 CHECK(!CanLockAdj[j][i]);
80 CanLockAdj[j][i] = true;
83 // Build the transitive closure.
84 bool CanLockAdj2[MutexTypeCount][MutexTypeCount];
85 for (int i = 0; i < N; i++) {
86 for (int j = 0; j < N; j++) {
87 CanLockAdj2[i][j] = CanLockAdj[i][j];
90 for (int k = 0; k < N; k++) {
91 for (int i = 0; i < N; i++) {
92 for (int j = 0; j < N; j++) {
93 if (CanLockAdj2[i][k] && CanLockAdj2[k][j]) {
94 CanLockAdj2[i][j] = true;
99 #if 0
100 Printf("Can lock graph:\n");
101 for (int i = 0; i < N; i++) {
102 for (int j = 0; j < N; j++) {
103 Printf("%d ", CanLockAdj[i][j]);
105 Printf("\n");
107 Printf("Can lock graph closure:\n");
108 for (int i = 0; i < N; i++) {
109 for (int j = 0; j < N; j++) {
110 Printf("%d ", CanLockAdj2[i][j]);
112 Printf("\n");
114 #endif
115 // Verify that the graph is acyclic.
116 for (int i = 0; i < N; i++) {
117 if (CanLockAdj2[i][i]) {
118 Printf("Mutex %d participates in a cycle\n", i);
119 Die();
122 #endif
125 InternalDeadlockDetector::InternalDeadlockDetector() {
126 // Rely on zero initialization because some mutexes can be locked before ctor.
129 #if TSAN_DEBUG && !TSAN_GO
130 void InternalDeadlockDetector::Lock(MutexType t) {
131 // Printf("LOCK %d @%zu\n", t, seq_ + 1);
132 CHECK_GT(t, MutexTypeInvalid);
133 CHECK_LT(t, MutexTypeCount);
134 u64 max_seq = 0;
135 u64 max_idx = MutexTypeInvalid;
136 for (int i = 0; i != MutexTypeCount; i++) {
137 if (locked_[i] == 0)
138 continue;
139 CHECK_NE(locked_[i], max_seq);
140 if (max_seq < locked_[i]) {
141 max_seq = locked_[i];
142 max_idx = i;
145 locked_[t] = ++seq_;
146 if (max_idx == MutexTypeInvalid)
147 return;
148 // Printf(" last %d @%zu\n", max_idx, max_seq);
149 if (!CanLockAdj[max_idx][t]) {
150 Printf("ThreadSanitizer: internal deadlock detected\n");
151 Printf("ThreadSanitizer: can't lock %d while under %zu\n",
152 t, (uptr)max_idx);
153 CHECK(0);
157 void InternalDeadlockDetector::Unlock(MutexType t) {
158 // Printf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]);
159 CHECK(locked_[t]);
160 locked_[t] = 0;
163 void InternalDeadlockDetector::CheckNoLocks() {
164 for (int i = 0; i != MutexTypeCount; i++) {
165 CHECK_EQ(locked_[i], 0);
168 #endif
170 void CheckNoLocks(ThreadState *thr) {
171 #if TSAN_DEBUG && !TSAN_GO
172 thr->internal_deadlock_detector.CheckNoLocks();
173 #endif
176 const uptr kUnlocked = 0;
177 const uptr kWriteLock = 1;
178 const uptr kReadLock = 2;
180 class Backoff {
181 public:
182 Backoff()
183 : iter_() {
186 bool Do() {
187 if (iter_++ < kActiveSpinIters)
188 proc_yield(kActiveSpinCnt);
189 else
190 internal_sched_yield();
191 return true;
194 u64 Contention() const {
195 u64 active = iter_ % kActiveSpinIters;
196 u64 passive = iter_ - active;
197 return active + 10 * passive;
200 private:
201 int iter_;
202 static const int kActiveSpinIters = 10;
203 static const int kActiveSpinCnt = 20;
206 Mutex::Mutex(MutexType type, StatType stat_type) {
207 CHECK_GT(type, MutexTypeInvalid);
208 CHECK_LT(type, MutexTypeCount);
209 #if TSAN_DEBUG
210 type_ = type;
211 #endif
212 #if TSAN_COLLECT_STATS
213 stat_type_ = stat_type;
214 #endif
215 atomic_store(&state_, kUnlocked, memory_order_relaxed);
218 Mutex::~Mutex() {
219 CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
222 void Mutex::Lock() {
223 #if TSAN_DEBUG && !TSAN_GO
224 cur_thread()->internal_deadlock_detector.Lock(type_);
225 #endif
226 uptr cmp = kUnlocked;
227 if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
228 memory_order_acquire))
229 return;
230 for (Backoff backoff; backoff.Do();) {
231 if (atomic_load(&state_, memory_order_relaxed) == kUnlocked) {
232 cmp = kUnlocked;
233 if (atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
234 memory_order_acquire)) {
235 #if TSAN_COLLECT_STATS && !TSAN_GO
236 StatInc(cur_thread(), stat_type_, backoff.Contention());
237 #endif
238 return;
244 void Mutex::Unlock() {
245 uptr prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
246 (void)prev;
247 DCHECK_NE(prev & kWriteLock, 0);
248 #if TSAN_DEBUG && !TSAN_GO
249 cur_thread()->internal_deadlock_detector.Unlock(type_);
250 #endif
253 void Mutex::ReadLock() {
254 #if TSAN_DEBUG && !TSAN_GO
255 cur_thread()->internal_deadlock_detector.Lock(type_);
256 #endif
257 uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
258 if ((prev & kWriteLock) == 0)
259 return;
260 for (Backoff backoff; backoff.Do();) {
261 prev = atomic_load(&state_, memory_order_acquire);
262 if ((prev & kWriteLock) == 0) {
263 #if TSAN_COLLECT_STATS && !TSAN_GO
264 StatInc(cur_thread(), stat_type_, backoff.Contention());
265 #endif
266 return;
271 void Mutex::ReadUnlock() {
272 uptr prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
273 (void)prev;
274 DCHECK_EQ(prev & kWriteLock, 0);
275 DCHECK_GT(prev & ~kWriteLock, 0);
276 #if TSAN_DEBUG && !TSAN_GO
277 cur_thread()->internal_deadlock_detector.Unlock(type_);
278 #endif
281 void Mutex::CheckLocked() {
282 CHECK_NE(atomic_load(&state_, memory_order_relaxed), 0);
285 } // namespace __tsan