1 //===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
10 //===----------------------------------------------------------------------===//
12 #ifndef SANITIZER_MUTEX_H
13 #define SANITIZER_MUTEX_H
15 #include "sanitizer_atomic.h"
16 #include "sanitizer_internal_defs.h"
17 #include "sanitizer_libc.h"
19 namespace __sanitizer
{
21 class StaticSpinMutex
{
24 atomic_store(&state_
, 0, memory_order_relaxed
);
34 return atomic_exchange(&state_
, 1, memory_order_acquire
) == 0;
38 atomic_store(&state_
, 0, memory_order_release
);
42 CHECK_EQ(atomic_load(&state_
, memory_order_relaxed
), 1);
46 atomic_uint8_t state_
;
48 void NOINLINE
LockSlow() {
49 for (int i
= 0;; i
++) {
53 internal_sched_yield();
54 if (atomic_load(&state_
, memory_order_relaxed
) == 0
55 && atomic_exchange(&state_
, 1, memory_order_acquire
) == 0)
61 class SpinMutex
: public StaticSpinMutex
{
68 SpinMutex(const SpinMutex
&);
69 void operator=(const SpinMutex
&);
75 // Windows does not currently support LinkerInitialized
76 explicit BlockingMutex(LinkerInitialized
);
78 explicit constexpr BlockingMutex(LinkerInitialized
)
79 : opaque_storage_
{0, }, owner_(0) {}
85 // This function does not guarantee an explicit check that the calling thread
86 // is the thread which owns the mutex. This behavior, while more strictly
87 // correct, causes problems in cases like StopTheWorld, where a parent thread
88 // owns the mutex but a child checks that it is locked. Rather than
89 // maintaining complex state to work around those situations, the check only
90 // checks that the mutex is owned, and assumes callers to be generally
94 uptr opaque_storage_
[10];
95 uptr owner_
; // for debugging
98 // Reader-writer spin mutex.
102 atomic_store(&state_
, kUnlocked
, memory_order_relaxed
);
106 CHECK_EQ(atomic_load(&state_
, memory_order_relaxed
), kUnlocked
);
111 if (atomic_compare_exchange_strong(&state_
, &cmp
, kWriteLock
,
112 memory_order_acquire
))
118 u32 prev
= atomic_fetch_sub(&state_
, kWriteLock
, memory_order_release
);
119 DCHECK_NE(prev
& kWriteLock
, 0);
124 u32 prev
= atomic_fetch_add(&state_
, kReadLock
, memory_order_acquire
);
125 if ((prev
& kWriteLock
) == 0)
131 u32 prev
= atomic_fetch_sub(&state_
, kReadLock
, memory_order_release
);
132 DCHECK_EQ(prev
& kWriteLock
, 0);
133 DCHECK_GT(prev
& ~kWriteLock
, 0);
138 CHECK_NE(atomic_load(&state_
, memory_order_relaxed
), kUnlocked
);
142 atomic_uint32_t state_
;
150 void NOINLINE
LockSlow() {
151 for (int i
= 0;; i
++) {
155 internal_sched_yield();
156 u32 cmp
= atomic_load(&state_
, memory_order_relaxed
);
157 if (cmp
== kUnlocked
&&
158 atomic_compare_exchange_weak(&state_
, &cmp
, kWriteLock
,
159 memory_order_acquire
))
164 void NOINLINE
ReadLockSlow() {
165 for (int i
= 0;; i
++) {
169 internal_sched_yield();
170 u32 prev
= atomic_load(&state_
, memory_order_acquire
);
171 if ((prev
& kWriteLock
) == 0)
176 RWMutex(const RWMutex
&);
177 void operator = (const RWMutex
&);
180 template<typename MutexType
>
181 class GenericScopedLock
{
183 explicit GenericScopedLock(MutexType
*mu
)
188 ~GenericScopedLock() {
195 GenericScopedLock(const GenericScopedLock
&);
196 void operator=(const GenericScopedLock
&);
199 template<typename MutexType
>
200 class GenericScopedReadLock
{
202 explicit GenericScopedReadLock(MutexType
*mu
)
207 ~GenericScopedReadLock() {
214 GenericScopedReadLock(const GenericScopedReadLock
&);
215 void operator=(const GenericScopedReadLock
&);
218 typedef GenericScopedLock
<StaticSpinMutex
> SpinMutexLock
;
219 typedef GenericScopedLock
<BlockingMutex
> BlockingMutexLock
;
220 typedef GenericScopedLock
<RWMutex
> RWMutexLock
;
221 typedef GenericScopedReadLock
<RWMutex
> RWMutexReadLock
;
223 } // namespace __sanitizer
225 #endif // SANITIZER_MUTEX_H