[PR67828] don't unswitch on default defs of non-parms
[official-gcc.git] / libsanitizer / sanitizer_common / sanitizer_mutex.h
blobadc3add6008ad32ee5e87b14425bca3f59ce16ea
1 //===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
9 //
10 //===----------------------------------------------------------------------===//
12 #ifndef SANITIZER_MUTEX_H
13 #define SANITIZER_MUTEX_H
15 #include "sanitizer_atomic.h"
16 #include "sanitizer_internal_defs.h"
17 #include "sanitizer_libc.h"
19 namespace __sanitizer {
21 class StaticSpinMutex {
22 public:
23 void Init() {
24 atomic_store(&state_, 0, memory_order_relaxed);
27 void Lock() {
28 if (TryLock())
29 return;
30 LockSlow();
33 bool TryLock() {
34 return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
37 void Unlock() {
38 atomic_store(&state_, 0, memory_order_release);
41 void CheckLocked() {
42 CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
45 private:
46 atomic_uint8_t state_;
48 void NOINLINE LockSlow() {
49 for (int i = 0;; i++) {
50 if (i < 10)
51 proc_yield(10);
52 else
53 internal_sched_yield();
54 if (atomic_load(&state_, memory_order_relaxed) == 0
55 && atomic_exchange(&state_, 1, memory_order_acquire) == 0)
56 return;
61 class SpinMutex : public StaticSpinMutex {
62 public:
63 SpinMutex() {
64 Init();
67 private:
68 SpinMutex(const SpinMutex&);
69 void operator=(const SpinMutex&);
72 class BlockingMutex {
73 public:
74 explicit BlockingMutex(LinkerInitialized);
75 BlockingMutex();
76 void Lock();
77 void Unlock();
78 void CheckLocked();
79 private:
80 uptr opaque_storage_[10];
81 uptr owner_; // for debugging
84 // Reader-writer spin mutex.
85 class RWMutex {
86 public:
87 RWMutex() {
88 atomic_store(&state_, kUnlocked, memory_order_relaxed);
91 ~RWMutex() {
92 CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
95 void Lock() {
96 u32 cmp = kUnlocked;
97 if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
98 memory_order_acquire))
99 return;
100 LockSlow();
103 void Unlock() {
104 u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
105 DCHECK_NE(prev & kWriteLock, 0);
106 (void)prev;
109 void ReadLock() {
110 u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
111 if ((prev & kWriteLock) == 0)
112 return;
113 ReadLockSlow();
116 void ReadUnlock() {
117 u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
118 DCHECK_EQ(prev & kWriteLock, 0);
119 DCHECK_GT(prev & ~kWriteLock, 0);
120 (void)prev;
123 void CheckLocked() {
124 CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
127 private:
128 atomic_uint32_t state_;
130 enum {
131 kUnlocked = 0,
132 kWriteLock = 1,
133 kReadLock = 2
136 void NOINLINE LockSlow() {
137 for (int i = 0;; i++) {
138 if (i < 10)
139 proc_yield(10);
140 else
141 internal_sched_yield();
142 u32 cmp = atomic_load(&state_, memory_order_relaxed);
143 if (cmp == kUnlocked &&
144 atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
145 memory_order_acquire))
146 return;
150 void NOINLINE ReadLockSlow() {
151 for (int i = 0;; i++) {
152 if (i < 10)
153 proc_yield(10);
154 else
155 internal_sched_yield();
156 u32 prev = atomic_load(&state_, memory_order_acquire);
157 if ((prev & kWriteLock) == 0)
158 return;
162 RWMutex(const RWMutex&);
163 void operator = (const RWMutex&);
166 template<typename MutexType>
167 class GenericScopedLock {
168 public:
169 explicit GenericScopedLock(MutexType *mu)
170 : mu_(mu) {
171 mu_->Lock();
174 ~GenericScopedLock() {
175 mu_->Unlock();
178 private:
179 MutexType *mu_;
181 GenericScopedLock(const GenericScopedLock&);
182 void operator=(const GenericScopedLock&);
185 template<typename MutexType>
186 class GenericScopedReadLock {
187 public:
188 explicit GenericScopedReadLock(MutexType *mu)
189 : mu_(mu) {
190 mu_->ReadLock();
193 ~GenericScopedReadLock() {
194 mu_->ReadUnlock();
197 private:
198 MutexType *mu_;
200 GenericScopedReadLock(const GenericScopedReadLock&);
201 void operator=(const GenericScopedReadLock&);
204 typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
205 typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
206 typedef GenericScopedLock<RWMutex> RWMutexLock;
207 typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;
209 } // namespace __sanitizer
211 #endif // SANITIZER_MUTEX_H