* include/bits/allocator.h (operator==, operator!=): Add exception
[official-gcc.git] / libsanitizer / tsan / tsan_mutex.cc
blobe7846c53e4af02b82d0f7f89b0cb8131fc19ffe4
1 //===-- tsan_mutex.cc -----------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
11 #include "sanitizer_common/sanitizer_libc.h"
12 #include "tsan_mutex.h"
13 #include "tsan_platform.h"
14 #include "tsan_rtl.h"
16 namespace __tsan {
18 // Simple reader-writer spin-mutex. Optimized for not-so-contended case.
19 // Readers have preference, can possibly starvate writers.
21 // The table fixes what mutexes can be locked under what mutexes.
22 // E.g. if the row for MutexTypeThreads contains MutexTypeReport,
23 // then Report mutex can be locked while under Threads mutex.
24 // The leaf mutexes can be locked under any other mutexes.
25 // Recursive locking is not supported.
26 #if TSAN_DEBUG && !TSAN_GO
27 const MutexType MutexTypeLeaf = (MutexType)-1;
28 static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
29 /*0 MutexTypeInvalid*/ {},
30 /*1 MutexTypeTrace*/ {MutexTypeLeaf},
31 /*2 MutexTypeThreads*/ {MutexTypeReport},
32 /*3 MutexTypeReport*/ {MutexTypeSyncTab, MutexTypeSyncVar,
33 MutexTypeMBlock, MutexTypeJavaMBlock},
34 /*4 MutexTypeSyncVar*/ {},
35 /*5 MutexTypeSyncTab*/ {MutexTypeSyncVar},
36 /*6 MutexTypeSlab*/ {MutexTypeLeaf},
37 /*7 MutexTypeAnnotations*/ {},
38 /*8 MutexTypeAtExit*/ {MutexTypeSyncTab},
39 /*9 MutexTypeMBlock*/ {MutexTypeSyncVar},
40 /*10 MutexTypeJavaMBlock*/ {MutexTypeSyncVar},
43 static bool CanLockAdj[MutexTypeCount][MutexTypeCount];
44 #endif
46 void InitializeMutex() {
47 #if TSAN_DEBUG && !TSAN_GO
48 // Build the "can lock" adjacency matrix.
49 // If [i][j]==true, then one can lock mutex j while under mutex i.
50 const int N = MutexTypeCount;
51 int cnt[N] = {};
52 bool leaf[N] = {};
53 for (int i = 1; i < N; i++) {
54 for (int j = 0; j < N; j++) {
55 MutexType z = CanLockTab[i][j];
56 if (z == MutexTypeInvalid)
57 continue;
58 if (z == MutexTypeLeaf) {
59 CHECK(!leaf[i]);
60 leaf[i] = true;
61 continue;
63 CHECK(!CanLockAdj[i][(int)z]);
64 CanLockAdj[i][(int)z] = true;
65 cnt[i]++;
68 for (int i = 0; i < N; i++) {
69 CHECK(!leaf[i] || cnt[i] == 0);
71 // Add leaf mutexes.
72 for (int i = 0; i < N; i++) {
73 if (!leaf[i])
74 continue;
75 for (int j = 0; j < N; j++) {
76 if (i == j || leaf[j] || j == MutexTypeInvalid)
77 continue;
78 CHECK(!CanLockAdj[j][i]);
79 CanLockAdj[j][i] = true;
82 // Build the transitive closure.
83 bool CanLockAdj2[MutexTypeCount][MutexTypeCount];
84 for (int i = 0; i < N; i++) {
85 for (int j = 0; j < N; j++) {
86 CanLockAdj2[i][j] = CanLockAdj[i][j];
89 for (int k = 0; k < N; k++) {
90 for (int i = 0; i < N; i++) {
91 for (int j = 0; j < N; j++) {
92 if (CanLockAdj2[i][k] && CanLockAdj2[k][j]) {
93 CanLockAdj2[i][j] = true;
98 #if 0
99 Printf("Can lock graph:\n");
100 for (int i = 0; i < N; i++) {
101 for (int j = 0; j < N; j++) {
102 Printf("%d ", CanLockAdj[i][j]);
104 Printf("\n");
106 Printf("Can lock graph closure:\n");
107 for (int i = 0; i < N; i++) {
108 for (int j = 0; j < N; j++) {
109 Printf("%d ", CanLockAdj2[i][j]);
111 Printf("\n");
113 #endif
114 // Verify that the graph is acyclic.
115 for (int i = 0; i < N; i++) {
116 if (CanLockAdj2[i][i]) {
117 Printf("Mutex %d participates in a cycle\n", i);
118 Die();
121 #endif
124 DeadlockDetector::DeadlockDetector() {
125 // Rely on zero initialization because some mutexes can be locked before ctor.
128 #if TSAN_DEBUG && !TSAN_GO
129 void DeadlockDetector::Lock(MutexType t) {
130 // Printf("LOCK %d @%zu\n", t, seq_ + 1);
131 CHECK_GT(t, MutexTypeInvalid);
132 CHECK_LT(t, MutexTypeCount);
133 u64 max_seq = 0;
134 u64 max_idx = MutexTypeInvalid;
135 for (int i = 0; i != MutexTypeCount; i++) {
136 if (locked_[i] == 0)
137 continue;
138 CHECK_NE(locked_[i], max_seq);
139 if (max_seq < locked_[i]) {
140 max_seq = locked_[i];
141 max_idx = i;
144 locked_[t] = ++seq_;
145 if (max_idx == MutexTypeInvalid)
146 return;
147 // Printf(" last %d @%zu\n", max_idx, max_seq);
148 if (!CanLockAdj[max_idx][t]) {
149 Printf("ThreadSanitizer: internal deadlock detected\n");
150 Printf("ThreadSanitizer: can't lock %d while under %zu\n",
151 t, (uptr)max_idx);
152 CHECK(0);
156 void DeadlockDetector::Unlock(MutexType t) {
157 // Printf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]);
158 CHECK(locked_[t]);
159 locked_[t] = 0;
161 #endif
163 const uptr kUnlocked = 0;
164 const uptr kWriteLock = 1;
165 const uptr kReadLock = 2;
167 class Backoff {
168 public:
169 Backoff()
170 : iter_() {
173 bool Do() {
174 if (iter_++ < kActiveSpinIters)
175 proc_yield(kActiveSpinCnt);
176 else
177 internal_sched_yield();
178 return true;
181 u64 Contention() const {
182 u64 active = iter_ % kActiveSpinIters;
183 u64 passive = iter_ - active;
184 return active + 10 * passive;
187 private:
188 int iter_;
189 static const int kActiveSpinIters = 10;
190 static const int kActiveSpinCnt = 20;
193 Mutex::Mutex(MutexType type, StatType stat_type) {
194 CHECK_GT(type, MutexTypeInvalid);
195 CHECK_LT(type, MutexTypeCount);
196 #if TSAN_DEBUG
197 type_ = type;
198 #endif
199 #if TSAN_COLLECT_STATS
200 stat_type_ = stat_type;
201 #endif
202 atomic_store(&state_, kUnlocked, memory_order_relaxed);
205 Mutex::~Mutex() {
206 CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
209 void Mutex::Lock() {
210 #if TSAN_DEBUG && !TSAN_GO
211 cur_thread()->deadlock_detector.Lock(type_);
212 #endif
213 uptr cmp = kUnlocked;
214 if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
215 memory_order_acquire))
216 return;
217 for (Backoff backoff; backoff.Do();) {
218 if (atomic_load(&state_, memory_order_relaxed) == kUnlocked) {
219 cmp = kUnlocked;
220 if (atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
221 memory_order_acquire)) {
222 #if TSAN_COLLECT_STATS
223 StatInc(cur_thread(), stat_type_, backoff.Contention());
224 #endif
225 return;
231 void Mutex::Unlock() {
232 uptr prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
233 (void)prev;
234 DCHECK_NE(prev & kWriteLock, 0);
235 #if TSAN_DEBUG && !TSAN_GO
236 cur_thread()->deadlock_detector.Unlock(type_);
237 #endif
240 void Mutex::ReadLock() {
241 #if TSAN_DEBUG && !TSAN_GO
242 cur_thread()->deadlock_detector.Lock(type_);
243 #endif
244 uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
245 if ((prev & kWriteLock) == 0)
246 return;
247 for (Backoff backoff; backoff.Do();) {
248 prev = atomic_load(&state_, memory_order_acquire);
249 if ((prev & kWriteLock) == 0) {
250 #if TSAN_COLLECT_STATS
251 StatInc(cur_thread(), stat_type_, backoff.Contention());
252 #endif
253 return;
258 void Mutex::ReadUnlock() {
259 uptr prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
260 (void)prev;
261 DCHECK_EQ(prev & kWriteLock, 0);
262 DCHECK_GT(prev & ~kWriteLock, 0);
263 #if TSAN_DEBUG && !TSAN_GO
264 cur_thread()->deadlock_detector.Unlock(type_);
265 #endif
268 void Mutex::CheckLocked() {
269 CHECK_NE(atomic_load(&state_, memory_order_relaxed), 0);
272 } // namespace __tsan