1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
15 #if defined(XP_DARWIN)
19 #include "mozilla/Assertions.h"
20 #include "mozilla/Attributes.h"
21 #include "mozilla/ThreadSafety.h"
23 #if defined(XP_DARWIN)
24 // For information about the following undocumented flags and functions see
25 // https://github.com/apple/darwin-xnu/blob/main/bsd/sys/ulock.h and
26 // https://github.com/apple/darwin-libplatform/blob/main/private/os/lock_private.h
27 # define OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION (0x00010000)
28 # define OS_UNFAIR_LOCK_ADAPTIVE_SPIN (0x00040000)
32 typedef uint32_t os_unfair_lock_options_t
;
33 OS_UNFAIR_LOCK_AVAILABILITY
34 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
void os_unfair_lock_lock_with_options(
35 os_unfair_lock_t lock
, os_unfair_lock_options_t options
);
37 #endif // defined(XP_DARWIN)
39 // Mutexes based on spinlocks. We can't use normal pthread spinlocks in all
40 // places, because they require malloc()ed memory, which causes bootstrapping
41 // issues in some cases. We also can't use constructors, because for statics,
42 // they would fire after the first use of malloc, resetting the locks.
43 struct MOZ_CAPABILITY("mutex") Mutex
{
45 CRITICAL_SECTION mMutex
;
46 #elif defined(XP_DARWIN)
47 os_unfair_lock mMutex
;
49 pthread_mutex_t mMutex
;
52 // Initializes a mutex. Returns whether initialization succeeded.
55 if (!InitializeCriticalSectionAndSpinCount(&mMutex
, 5000)) {
58 #elif defined(XP_DARWIN)
59 mMutex
= OS_UNFAIR_LOCK_INIT
;
60 #elif defined(XP_LINUX) && !defined(ANDROID)
61 pthread_mutexattr_t attr
;
62 if (pthread_mutexattr_init(&attr
) != 0) {
65 pthread_mutexattr_settype(&attr
, PTHREAD_MUTEX_ADAPTIVE_NP
);
66 if (pthread_mutex_init(&mMutex
, &attr
) != 0) {
67 pthread_mutexattr_destroy(&attr
);
70 pthread_mutexattr_destroy(&attr
);
72 if (pthread_mutex_init(&mMutex
, nullptr) != 0) {
79 inline void Lock() MOZ_CAPABILITY_ACQUIRE() {
81 EnterCriticalSection(&mMutex
);
82 #elif defined(XP_DARWIN)
83 // We rely on a non-public function to improve performance here.
84 // The OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION flag informs the kernel that
85 // the calling thread is able to make progress even in absence of actions
86 // from other threads and the OS_UNFAIR_LOCK_ADAPTIVE_SPIN one causes the
87 // kernel to spin on a contested lock if the owning thread is running on
88 // the same physical core (presumably only on x86 CPUs given that ARM
89 // macs don't have cores capable of SMT). On versions of macOS older than
90 // 10.15 the latter is not available and we spin in userspace instead.
91 if (Mutex::gSpinInKernelSpace
) {
92 os_unfair_lock_lock_with_options(
94 OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
| OS_UNFAIR_LOCK_ADAPTIVE_SPIN
);
96 # if defined(__x86_64__)
97 // On older versions of macOS (10.14 and older) the
98 // `OS_UNFAIR_LOCK_ADAPTIVE_SPIN` flag is not supported by the kernel,
99 // we spin in user-space instead like `OSSpinLock` does:
100 // https://github.com/apple/darwin-libplatform/blob/215b09856ab5765b7462a91be7076183076600df/src/os/lock.c#L183-L198
101 // Note that `OSSpinLock` uses 1000 iterations on x86-64:
102 // https://github.com/apple/darwin-libplatform/blob/215b09856ab5765b7462a91be7076183076600df/src/os/lock.c#L93
103 // ...but we only use 100 like it does on ARM:
104 // https://github.com/apple/darwin-libplatform/blob/215b09856ab5765b7462a91be7076183076600df/src/os/lock.c#L90
105 // We choose this value because it yields the same results in our
106 // benchmarks but is less likely to have detrimental effects caused by
107 // excessive spinning.
108 uint32_t retries
= 100;
111 if (os_unfair_lock_trylock(&mMutex
)) {
115 __asm__
__volatile__("pause");
118 os_unfair_lock_lock_with_options(&mMutex
,
119 OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
);
121 MOZ_CRASH("User-space spin-locks should never be used on ARM");
122 # endif // defined(__x86_64__)
125 pthread_mutex_lock(&mMutex
);
129 [[nodiscard
]] bool TryLock() MOZ_TRY_ACQUIRE(true);
131 inline void Unlock() MOZ_CAPABILITY_RELEASE() {
133 LeaveCriticalSection(&mMutex
);
134 #elif defined(XP_DARWIN)
135 os_unfair_lock_unlock(&mMutex
);
137 pthread_mutex_unlock(&mMutex
);
141 #if defined(XP_DARWIN)
142 static bool SpinInKernelSpace();
143 static const bool gSpinInKernelSpace
;
147 // Mutex that can be used for static initialization.
148 // On Windows, CRITICAL_SECTION requires a function call to be initialized,
149 // but for the initialization lock, a static initializer calling the
150 // function would be called too late. We need no-function-call
151 // initialization, which SRWLock provides.
152 // Ideally, we'd use the same type of locks everywhere, but SRWLocks
153 // everywhere incur a performance penalty. See bug 1418389.
155 struct MOZ_CAPABILITY("mutex") StaticMutex
{
158 inline void Lock() MOZ_CAPABILITY_ACQUIRE() {
159 AcquireSRWLockExclusive(&mMutex
);
162 inline void Unlock() MOZ_CAPABILITY_RELEASE() {
163 ReleaseSRWLockExclusive(&mMutex
);
167 // Normally, we'd use a constexpr constructor, but MSVC likes to create
168 // static initializers anyways.
169 # define STATIC_MUTEX_INIT SRWLOCK_INIT
172 typedef Mutex StaticMutex
;
174 # if defined(XP_DARWIN)
175 # define STATIC_MUTEX_INIT OS_UNFAIR_LOCK_INIT
176 # elif defined(XP_LINUX) && !defined(ANDROID)
177 # define STATIC_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
179 # define STATIC_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
185 typedef DWORD ThreadId
;
186 inline ThreadId
GetThreadId() { return GetCurrentThreadId(); }
188 typedef pthread_t ThreadId
;
189 inline ThreadId
GetThreadId() { return pthread_self(); }
192 class MOZ_CAPABILITY("mutex") MaybeMutex
: public Mutex
{
199 bool Init(DoLock aDoLock
) {
202 mThreadId
= GetThreadId();
204 return Mutex::Init();
208 // Re initialise after fork(), assumes that mDoLock is already initialised.
209 void Reinit(pthread_t aForkingThread
) {
210 if (mDoLock
== MUST_LOCK
) {
215 // If this is an eluded lock we can only safely re-initialise it if the
216 // thread that called fork is the one that owns the lock.
217 if (pthread_equal(mThreadId
, aForkingThread
)) {
218 mThreadId
= GetThreadId();
221 // We can't guantee that whatever resource this lock protects (probably a
222 // jemalloc arena) is in a consistent state.
223 mDeniedAfterFork
= true;
229 inline void Lock() MOZ_CAPABILITY_ACQUIRE() {
235 inline void Unlock() MOZ_CAPABILITY_RELEASE() {
241 // Return true if we can use this resource from this thread, either because
242 // we'll use the lock or because this is the only thread that will access the
243 // protected resource.
245 bool SafeOnThisThread() const {
246 return mDoLock
== MUST_LOCK
|| GetThreadId() == mThreadId
;
250 bool LockIsEnabled() const { return mDoLock
== MUST_LOCK
; }
255 MOZ_ASSERT(!mDeniedAfterFork
);
258 if (mDoLock
== MUST_LOCK
) {
262 MOZ_ASSERT(GetThreadId() == mThreadId
);
270 bool mDeniedAfterFork
= false;
275 template <typename T
>
276 struct MOZ_SCOPED_CAPABILITY MOZ_RAII AutoLock
{
277 explicit AutoLock(T
& aMutex
) MOZ_CAPABILITY_ACQUIRE(aMutex
) : mMutex(aMutex
) {
281 ~AutoLock() MOZ_CAPABILITY_RELEASE() { mMutex
.Unlock(); }
283 AutoLock(const AutoLock
&) = delete;
284 AutoLock(AutoLock
&&) = delete;
290 using MutexAutoLock
= AutoLock
<Mutex
>;
292 using MaybeMutexAutoLock
= AutoLock
<MaybeMutex
>;