[PR c++/84492] stmt expr ending with overload
[official-gcc.git] / libsanitizer / tsan / tsan_mutex.cc
blob9b105cf201be2b3ec95cef02db6c9af4762b1c37
1 //===-- tsan_mutex.cc -----------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
11 #include "sanitizer_common/sanitizer_libc.h"
12 #include "tsan_mutex.h"
13 #include "tsan_platform.h"
14 #include "tsan_rtl.h"
16 namespace __tsan {
18 // Simple reader-writer spin-mutex. Optimized for not-so-contended case.
19 // Readers have preference, can possibly starvate writers.
21 // The table fixes what mutexes can be locked under what mutexes.
22 // E.g. if the row for MutexTypeThreads contains MutexTypeReport,
23 // then Report mutex can be locked while under Threads mutex.
24 // The leaf mutexes can be locked under any other mutexes.
25 // Recursive locking is not supported.
26 #if SANITIZER_DEBUG && !SANITIZER_GO
27 const MutexType MutexTypeLeaf = (MutexType)-1;
28 static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
29 /*0 MutexTypeInvalid*/ {},
30 /*1 MutexTypeTrace*/ {MutexTypeLeaf},
31 /*2 MutexTypeThreads*/ {MutexTypeReport},
32 /*3 MutexTypeReport*/ {MutexTypeSyncVar,
33 MutexTypeMBlock, MutexTypeJavaMBlock},
34 /*4 MutexTypeSyncVar*/ {MutexTypeDDetector},
35 /*5 MutexTypeSyncTab*/ {}, // unused
36 /*6 MutexTypeSlab*/ {MutexTypeLeaf},
37 /*7 MutexTypeAnnotations*/ {},
38 /*8 MutexTypeAtExit*/ {MutexTypeSyncVar},
39 /*9 MutexTypeMBlock*/ {MutexTypeSyncVar},
40 /*10 MutexTypeJavaMBlock*/ {MutexTypeSyncVar},
41 /*11 MutexTypeDDetector*/ {},
42 /*12 MutexTypeFired*/ {MutexTypeLeaf},
43 /*13 MutexTypeRacy*/ {MutexTypeLeaf},
44 /*14 MutexTypeGlobalProc*/ {},
47 static bool CanLockAdj[MutexTypeCount][MutexTypeCount];
48 #endif
50 void InitializeMutex() {
51 #if SANITIZER_DEBUG && !SANITIZER_GO
52 // Build the "can lock" adjacency matrix.
53 // If [i][j]==true, then one can lock mutex j while under mutex i.
54 const int N = MutexTypeCount;
55 int cnt[N] = {};
56 bool leaf[N] = {};
57 for (int i = 1; i < N; i++) {
58 for (int j = 0; j < N; j++) {
59 MutexType z = CanLockTab[i][j];
60 if (z == MutexTypeInvalid)
61 continue;
62 if (z == MutexTypeLeaf) {
63 CHECK(!leaf[i]);
64 leaf[i] = true;
65 continue;
67 CHECK(!CanLockAdj[i][(int)z]);
68 CanLockAdj[i][(int)z] = true;
69 cnt[i]++;
72 for (int i = 0; i < N; i++) {
73 CHECK(!leaf[i] || cnt[i] == 0);
75 // Add leaf mutexes.
76 for (int i = 0; i < N; i++) {
77 if (!leaf[i])
78 continue;
79 for (int j = 0; j < N; j++) {
80 if (i == j || leaf[j] || j == MutexTypeInvalid)
81 continue;
82 CHECK(!CanLockAdj[j][i]);
83 CanLockAdj[j][i] = true;
86 // Build the transitive closure.
87 bool CanLockAdj2[MutexTypeCount][MutexTypeCount];
88 for (int i = 0; i < N; i++) {
89 for (int j = 0; j < N; j++) {
90 CanLockAdj2[i][j] = CanLockAdj[i][j];
93 for (int k = 0; k < N; k++) {
94 for (int i = 0; i < N; i++) {
95 for (int j = 0; j < N; j++) {
96 if (CanLockAdj2[i][k] && CanLockAdj2[k][j]) {
97 CanLockAdj2[i][j] = true;
102 #if 0
103 Printf("Can lock graph:\n");
104 for (int i = 0; i < N; i++) {
105 for (int j = 0; j < N; j++) {
106 Printf("%d ", CanLockAdj[i][j]);
108 Printf("\n");
110 Printf("Can lock graph closure:\n");
111 for (int i = 0; i < N; i++) {
112 for (int j = 0; j < N; j++) {
113 Printf("%d ", CanLockAdj2[i][j]);
115 Printf("\n");
117 #endif
118 // Verify that the graph is acyclic.
119 for (int i = 0; i < N; i++) {
120 if (CanLockAdj2[i][i]) {
121 Printf("Mutex %d participates in a cycle\n", i);
122 Die();
125 #endif
128 InternalDeadlockDetector::InternalDeadlockDetector() {
129 // Rely on zero initialization because some mutexes can be locked before ctor.
132 #if SANITIZER_DEBUG && !SANITIZER_GO
133 void InternalDeadlockDetector::Lock(MutexType t) {
134 // Printf("LOCK %d @%zu\n", t, seq_ + 1);
135 CHECK_GT(t, MutexTypeInvalid);
136 CHECK_LT(t, MutexTypeCount);
137 u64 max_seq = 0;
138 u64 max_idx = MutexTypeInvalid;
139 for (int i = 0; i != MutexTypeCount; i++) {
140 if (locked_[i] == 0)
141 continue;
142 CHECK_NE(locked_[i], max_seq);
143 if (max_seq < locked_[i]) {
144 max_seq = locked_[i];
145 max_idx = i;
148 locked_[t] = ++seq_;
149 if (max_idx == MutexTypeInvalid)
150 return;
151 // Printf(" last %d @%zu\n", max_idx, max_seq);
152 if (!CanLockAdj[max_idx][t]) {
153 Printf("ThreadSanitizer: internal deadlock detected\n");
154 Printf("ThreadSanitizer: can't lock %d while under %zu\n",
155 t, (uptr)max_idx);
156 CHECK(0);
160 void InternalDeadlockDetector::Unlock(MutexType t) {
161 // Printf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]);
162 CHECK(locked_[t]);
163 locked_[t] = 0;
166 void InternalDeadlockDetector::CheckNoLocks() {
167 for (int i = 0; i != MutexTypeCount; i++) {
168 CHECK_EQ(locked_[i], 0);
171 #endif
173 void CheckNoLocks(ThreadState *thr) {
174 #if SANITIZER_DEBUG && !SANITIZER_GO
175 thr->internal_deadlock_detector.CheckNoLocks();
176 #endif
179 const uptr kUnlocked = 0;
180 const uptr kWriteLock = 1;
181 const uptr kReadLock = 2;
183 class Backoff {
184 public:
185 Backoff()
186 : iter_() {
189 bool Do() {
190 if (iter_++ < kActiveSpinIters)
191 proc_yield(kActiveSpinCnt);
192 else
193 internal_sched_yield();
194 return true;
197 u64 Contention() const {
198 u64 active = iter_ % kActiveSpinIters;
199 u64 passive = iter_ - active;
200 return active + 10 * passive;
203 private:
204 int iter_;
205 static const int kActiveSpinIters = 10;
206 static const int kActiveSpinCnt = 20;
209 Mutex::Mutex(MutexType type, StatType stat_type) {
210 CHECK_GT(type, MutexTypeInvalid);
211 CHECK_LT(type, MutexTypeCount);
212 #if SANITIZER_DEBUG
213 type_ = type;
214 #endif
215 #if TSAN_COLLECT_STATS
216 stat_type_ = stat_type;
217 #endif
218 atomic_store(&state_, kUnlocked, memory_order_relaxed);
221 Mutex::~Mutex() {
222 CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
225 void Mutex::Lock() {
226 #if SANITIZER_DEBUG && !SANITIZER_GO
227 cur_thread()->internal_deadlock_detector.Lock(type_);
228 #endif
229 uptr cmp = kUnlocked;
230 if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
231 memory_order_acquire))
232 return;
233 for (Backoff backoff; backoff.Do();) {
234 if (atomic_load(&state_, memory_order_relaxed) == kUnlocked) {
235 cmp = kUnlocked;
236 if (atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
237 memory_order_acquire)) {
238 #if TSAN_COLLECT_STATS && !SANITIZER_GO
239 StatInc(cur_thread(), stat_type_, backoff.Contention());
240 #endif
241 return;
247 void Mutex::Unlock() {
248 uptr prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
249 (void)prev;
250 DCHECK_NE(prev & kWriteLock, 0);
251 #if SANITIZER_DEBUG && !SANITIZER_GO
252 cur_thread()->internal_deadlock_detector.Unlock(type_);
253 #endif
256 void Mutex::ReadLock() {
257 #if SANITIZER_DEBUG && !SANITIZER_GO
258 cur_thread()->internal_deadlock_detector.Lock(type_);
259 #endif
260 uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
261 if ((prev & kWriteLock) == 0)
262 return;
263 for (Backoff backoff; backoff.Do();) {
264 prev = atomic_load(&state_, memory_order_acquire);
265 if ((prev & kWriteLock) == 0) {
266 #if TSAN_COLLECT_STATS && !SANITIZER_GO
267 StatInc(cur_thread(), stat_type_, backoff.Contention());
268 #endif
269 return;
274 void Mutex::ReadUnlock() {
275 uptr prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
276 (void)prev;
277 DCHECK_EQ(prev & kWriteLock, 0);
278 DCHECK_GT(prev & ~kWriteLock, 0);
279 #if SANITIZER_DEBUG && !SANITIZER_GO
280 cur_thread()->internal_deadlock_detector.Unlock(type_);
281 #endif
284 void Mutex::CheckLocked() {
285 CHECK_NE(atomic_load(&state_, memory_order_relaxed), 0);
288 } // namespace __tsan