1 //===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
10 //===----------------------------------------------------------------------===//
12 #ifndef SANITIZER_MUTEX_H
13 #define SANITIZER_MUTEX_H
15 #include "sanitizer_atomic.h"
16 #include "sanitizer_internal_defs.h"
17 #include "sanitizer_libc.h"
19 namespace __sanitizer
{
21 class StaticSpinMutex
{
24 atomic_store(&state_
, 0, memory_order_relaxed
);
34 return atomic_exchange(&state_
, 1, memory_order_acquire
) == 0;
38 atomic_store(&state_
, 0, memory_order_release
);
42 CHECK_EQ(atomic_load(&state_
, memory_order_relaxed
), 1);
46 atomic_uint8_t state_
;
48 void NOINLINE
LockSlow() {
49 for (int i
= 0;; i
++) {
53 internal_sched_yield();
54 if (atomic_load(&state_
, memory_order_relaxed
) == 0
55 && atomic_exchange(&state_
, 1, memory_order_acquire
) == 0)
61 class SpinMutex
: public StaticSpinMutex
{
68 SpinMutex(const SpinMutex
&);
69 void operator=(const SpinMutex
&);
75 // Windows does not currently support LinkerInitialized
76 explicit BlockingMutex(LinkerInitialized
);
78 explicit constexpr BlockingMutex(LinkerInitialized
)
79 : opaque_storage_
{0, }, owner_(0) {}
86 uptr opaque_storage_
[10];
87 uptr owner_
; // for debugging
90 // Reader-writer spin mutex.
94 atomic_store(&state_
, kUnlocked
, memory_order_relaxed
);
98 CHECK_EQ(atomic_load(&state_
, memory_order_relaxed
), kUnlocked
);
103 if (atomic_compare_exchange_strong(&state_
, &cmp
, kWriteLock
,
104 memory_order_acquire
))
110 u32 prev
= atomic_fetch_sub(&state_
, kWriteLock
, memory_order_release
);
111 DCHECK_NE(prev
& kWriteLock
, 0);
116 u32 prev
= atomic_fetch_add(&state_
, kReadLock
, memory_order_acquire
);
117 if ((prev
& kWriteLock
) == 0)
123 u32 prev
= atomic_fetch_sub(&state_
, kReadLock
, memory_order_release
);
124 DCHECK_EQ(prev
& kWriteLock
, 0);
125 DCHECK_GT(prev
& ~kWriteLock
, 0);
130 CHECK_NE(atomic_load(&state_
, memory_order_relaxed
), kUnlocked
);
134 atomic_uint32_t state_
;
142 void NOINLINE
LockSlow() {
143 for (int i
= 0;; i
++) {
147 internal_sched_yield();
148 u32 cmp
= atomic_load(&state_
, memory_order_relaxed
);
149 if (cmp
== kUnlocked
&&
150 atomic_compare_exchange_weak(&state_
, &cmp
, kWriteLock
,
151 memory_order_acquire
))
156 void NOINLINE
ReadLockSlow() {
157 for (int i
= 0;; i
++) {
161 internal_sched_yield();
162 u32 prev
= atomic_load(&state_
, memory_order_acquire
);
163 if ((prev
& kWriteLock
) == 0)
168 RWMutex(const RWMutex
&);
169 void operator = (const RWMutex
&);
172 template<typename MutexType
>
173 class GenericScopedLock
{
175 explicit GenericScopedLock(MutexType
*mu
)
180 ~GenericScopedLock() {
187 GenericScopedLock(const GenericScopedLock
&);
188 void operator=(const GenericScopedLock
&);
191 template<typename MutexType
>
192 class GenericScopedReadLock
{
194 explicit GenericScopedReadLock(MutexType
*mu
)
199 ~GenericScopedReadLock() {
206 GenericScopedReadLock(const GenericScopedReadLock
&);
207 void operator=(const GenericScopedReadLock
&);
210 typedef GenericScopedLock
<StaticSpinMutex
> SpinMutexLock
;
211 typedef GenericScopedLock
<BlockingMutex
> BlockingMutexLock
;
212 typedef GenericScopedLock
<RWMutex
> RWMutexLock
;
213 typedef GenericScopedReadLock
<RWMutex
> RWMutexReadLock
;
215 } // namespace __sanitizer
217 #endif // SANITIZER_MUTEX_H