1 //===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
10 //===----------------------------------------------------------------------===//
12 #ifndef SANITIZER_MUTEX_H
13 #define SANITIZER_MUTEX_H
15 #include "sanitizer_atomic.h"
16 #include "sanitizer_internal_defs.h"
17 #include "sanitizer_libc.h"
19 namespace __sanitizer
{
21 class StaticSpinMutex
{
24 atomic_store(&state_
, 0, memory_order_relaxed
);
34 return atomic_exchange(&state_
, 1, memory_order_acquire
) == 0;
38 atomic_store(&state_
, 0, memory_order_release
);
42 CHECK_EQ(atomic_load(&state_
, memory_order_relaxed
), 1);
46 atomic_uint8_t state_
;
48 void NOINLINE
LockSlow() {
49 for (int i
= 0;; i
++) {
53 internal_sched_yield();
54 if (atomic_load(&state_
, memory_order_relaxed
) == 0
55 && atomic_exchange(&state_
, 1, memory_order_acquire
) == 0)
61 class SpinMutex
: public StaticSpinMutex
{
68 SpinMutex(const SpinMutex
&);
69 void operator=(const SpinMutex
&);
74 explicit BlockingMutex(LinkerInitialized
);
80 uptr opaque_storage_
[10];
81 uptr owner_
; // for debugging
84 // Reader-writer spin mutex.
88 atomic_store(&state_
, kUnlocked
, memory_order_relaxed
);
92 CHECK_EQ(atomic_load(&state_
, memory_order_relaxed
), kUnlocked
);
97 if (atomic_compare_exchange_strong(&state_
, &cmp
, kWriteLock
,
98 memory_order_acquire
))
104 u32 prev
= atomic_fetch_sub(&state_
, kWriteLock
, memory_order_release
);
105 DCHECK_NE(prev
& kWriteLock
, 0);
110 u32 prev
= atomic_fetch_add(&state_
, kReadLock
, memory_order_acquire
);
111 if ((prev
& kWriteLock
) == 0)
117 u32 prev
= atomic_fetch_sub(&state_
, kReadLock
, memory_order_release
);
118 DCHECK_EQ(prev
& kWriteLock
, 0);
119 DCHECK_GT(prev
& ~kWriteLock
, 0);
124 CHECK_NE(atomic_load(&state_
, memory_order_relaxed
), kUnlocked
);
128 atomic_uint32_t state_
;
136 void NOINLINE
LockSlow() {
137 for (int i
= 0;; i
++) {
141 internal_sched_yield();
142 u32 cmp
= atomic_load(&state_
, memory_order_relaxed
);
143 if (cmp
== kUnlocked
&&
144 atomic_compare_exchange_weak(&state_
, &cmp
, kWriteLock
,
145 memory_order_acquire
))
150 void NOINLINE
ReadLockSlow() {
151 for (int i
= 0;; i
++) {
155 internal_sched_yield();
156 u32 prev
= atomic_load(&state_
, memory_order_acquire
);
157 if ((prev
& kWriteLock
) == 0)
162 RWMutex(const RWMutex
&);
163 void operator = (const RWMutex
&);
166 template<typename MutexType
>
167 class GenericScopedLock
{
169 explicit GenericScopedLock(MutexType
*mu
)
174 ~GenericScopedLock() {
181 GenericScopedLock(const GenericScopedLock
&);
182 void operator=(const GenericScopedLock
&);
185 template<typename MutexType
>
186 class GenericScopedReadLock
{
188 explicit GenericScopedReadLock(MutexType
*mu
)
193 ~GenericScopedReadLock() {
200 GenericScopedReadLock(const GenericScopedReadLock
&);
201 void operator=(const GenericScopedReadLock
&);
204 typedef GenericScopedLock
<StaticSpinMutex
> SpinMutexLock
;
205 typedef GenericScopedLock
<BlockingMutex
> BlockingMutexLock
;
206 typedef GenericScopedLock
<RWMutex
> RWMutexLock
;
207 typedef GenericScopedReadLock
<RWMutex
> RWMutexReadLock
;
209 } // namespace __sanitizer
211 #endif // SANITIZER_MUTEX_H