Bug 1845715 - Check for failure when getting RegExp match result template r=iain
[gecko.git] / memory / build / Mutex.h
blobd313882083635f0bbc92320473ad54fcf9ff6e13
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #ifndef Mutex_h
8 #define Mutex_h
10 #if defined(XP_WIN)
11 # include <windows.h>
12 #else
13 # include <pthread.h>
14 #endif
15 #if defined(XP_DARWIN)
16 # include <os/lock.h>
17 #endif
19 #include "mozilla/Assertions.h"
20 #include "mozilla/Attributes.h"
21 #include "mozilla/ThreadSafety.h"
23 #if defined(XP_DARWIN)
24 // For information about the following undocumented flags and functions see
25 // https://github.com/apple/darwin-xnu/blob/main/bsd/sys/ulock.h and
26 // https://github.com/apple/darwin-libplatform/blob/main/private/os/lock_private.h
27 # define OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION (0x00010000)
28 # define OS_UNFAIR_LOCK_ADAPTIVE_SPIN (0x00040000)
30 extern "C" {
32 typedef uint32_t os_unfair_lock_options_t;
33 OS_UNFAIR_LOCK_AVAILABILITY
34 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL void os_unfair_lock_lock_with_options(
35 os_unfair_lock_t lock, os_unfair_lock_options_t options);
37 #endif // defined(XP_DARWIN)
39 // Mutexes based on spinlocks. We can't use normal pthread spinlocks in all
40 // places, because they require malloc()ed memory, which causes bootstrapping
41 // issues in some cases. We also can't use constructors, because for statics,
42 // they would fire after the first use of malloc, resetting the locks.
43 struct MOZ_CAPABILITY("mutex") Mutex {
44 #if defined(XP_WIN)
45 CRITICAL_SECTION mMutex;
46 #elif defined(XP_DARWIN)
47 os_unfair_lock mMutex;
48 #else
49 pthread_mutex_t mMutex;
50 #endif
52 // Initializes a mutex. Returns whether initialization succeeded.
53 inline bool Init() {
54 #if defined(XP_WIN)
55 if (!InitializeCriticalSectionAndSpinCount(&mMutex, 5000)) {
56 return false;
58 #elif defined(XP_DARWIN)
59 mMutex = OS_UNFAIR_LOCK_INIT;
60 #elif defined(XP_LINUX) && !defined(ANDROID)
61 pthread_mutexattr_t attr;
62 if (pthread_mutexattr_init(&attr) != 0) {
63 return false;
65 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
66 if (pthread_mutex_init(&mMutex, &attr) != 0) {
67 pthread_mutexattr_destroy(&attr);
68 return false;
70 pthread_mutexattr_destroy(&attr);
71 #else
72 if (pthread_mutex_init(&mMutex, nullptr) != 0) {
73 return false;
75 #endif
76 return true;
79 inline void Lock() MOZ_CAPABILITY_ACQUIRE() {
80 #if defined(XP_WIN)
81 EnterCriticalSection(&mMutex);
82 #elif defined(XP_DARWIN)
83 // We rely on a non-public function to improve performance here.
84 // The OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION flag informs the kernel that
85 // the calling thread is able to make progress even in absence of actions
86 // from other threads and the OS_UNFAIR_LOCK_ADAPTIVE_SPIN one causes the
87 // kernel to spin on a contested lock if the owning thread is running on
88 // the same physical core (presumably only on x86 CPUs given that ARM
89 // macs don't have cores capable of SMT). On versions of macOS older than
90 // 10.15 the latter is not available and we spin in userspace instead.
91 if (Mutex::gSpinInKernelSpace) {
92 os_unfair_lock_lock_with_options(
93 &mMutex,
94 OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION | OS_UNFAIR_LOCK_ADAPTIVE_SPIN);
95 } else {
96 # if defined(__x86_64__)
97 // On older versions of macOS (10.14 and older) the
98 // `OS_UNFAIR_LOCK_ADAPTIVE_SPIN` flag is not supported by the kernel,
99 // we spin in user-space instead like `OSSpinLock` does:
100 // https://github.com/apple/darwin-libplatform/blob/215b09856ab5765b7462a91be7076183076600df/src/os/lock.c#L183-L198
101 // Note that `OSSpinLock` uses 1000 iterations on x86-64:
102 // https://github.com/apple/darwin-libplatform/blob/215b09856ab5765b7462a91be7076183076600df/src/os/lock.c#L93
103 // ...but we only use 100 like it does on ARM:
104 // https://github.com/apple/darwin-libplatform/blob/215b09856ab5765b7462a91be7076183076600df/src/os/lock.c#L90
105 // We choose this value because it yields the same results in our
106 // benchmarks but is less likely to have detrimental effects caused by
107 // excessive spinning.
108 uint32_t retries = 100;
110 do {
111 if (os_unfair_lock_trylock(&mMutex)) {
112 return;
115 __asm__ __volatile__("pause");
116 } while (retries--);
118 os_unfair_lock_lock_with_options(&mMutex,
119 OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION);
120 # else
121 MOZ_CRASH("User-space spin-locks should never be used on ARM");
122 # endif // defined(__x86_64__)
124 #else
125 pthread_mutex_lock(&mMutex);
126 #endif
129 [[nodiscard]] bool TryLock() MOZ_TRY_ACQUIRE(true);
131 inline void Unlock() MOZ_CAPABILITY_RELEASE() {
132 #if defined(XP_WIN)
133 LeaveCriticalSection(&mMutex);
134 #elif defined(XP_DARWIN)
135 os_unfair_lock_unlock(&mMutex);
136 #else
137 pthread_mutex_unlock(&mMutex);
138 #endif
141 #if defined(XP_DARWIN)
142 static bool SpinInKernelSpace();
143 static const bool gSpinInKernelSpace;
144 #endif // XP_DARWIN
147 // Mutex that can be used for static initialization.
148 // On Windows, CRITICAL_SECTION requires a function call to be initialized,
149 // but for the initialization lock, a static initializer calling the
150 // function would be called too late. We need no-function-call
151 // initialization, which SRWLock provides.
152 // Ideally, we'd use the same type of locks everywhere, but SRWLocks
153 // everywhere incur a performance penalty. See bug 1418389.
154 #if defined(XP_WIN)
155 struct MOZ_CAPABILITY("mutex") StaticMutex {
156 SRWLOCK mMutex;
158 inline void Lock() MOZ_CAPABILITY_ACQUIRE() {
159 AcquireSRWLockExclusive(&mMutex);
162 inline void Unlock() MOZ_CAPABILITY_RELEASE() {
163 ReleaseSRWLockExclusive(&mMutex);
167 // Normally, we'd use a constexpr constructor, but MSVC likes to create
168 // static initializers anyways.
169 # define STATIC_MUTEX_INIT SRWLOCK_INIT
171 #else
172 typedef Mutex StaticMutex;
174 # if defined(XP_DARWIN)
175 # define STATIC_MUTEX_INIT OS_UNFAIR_LOCK_INIT
176 # elif defined(XP_LINUX) && !defined(ANDROID)
177 # define STATIC_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
178 # else
179 # define STATIC_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
180 # endif
182 #endif
184 #ifdef XP_WIN
185 typedef DWORD ThreadId;
186 inline ThreadId GetThreadId() { return GetCurrentThreadId(); }
187 #else
188 typedef pthread_t ThreadId;
189 inline ThreadId GetThreadId() { return pthread_self(); }
190 #endif
192 class MOZ_CAPABILITY("mutex") MaybeMutex : public Mutex {
193 public:
194 enum DoLock {
195 MUST_LOCK,
196 AVOID_LOCK_UNSAFE,
199 bool Init(DoLock aDoLock) {
200 mDoLock = aDoLock;
201 #ifdef MOZ_DEBUG
202 mThreadId = GetThreadId();
203 #endif
204 return Mutex::Init();
207 #ifndef XP_WIN
208 // Re initialise after fork(), assumes that mDoLock is already initialised.
209 void Reinit(pthread_t aForkingThread) {
210 if (mDoLock == MUST_LOCK) {
211 Mutex::Init();
212 return;
214 # ifdef MOZ_DEBUG
215 // If this is an eluded lock we can only safely re-initialise it if the
216 // thread that called fork is the one that owns the lock.
217 if (pthread_equal(mThreadId, aForkingThread)) {
218 mThreadId = GetThreadId();
219 Mutex::Init();
220 } else {
221 // We can't guantee that whatever resource this lock protects (probably a
222 // jemalloc arena) is in a consistent state.
223 mDeniedAfterFork = true;
225 # endif
227 #endif
229 inline void Lock() MOZ_CAPABILITY_ACQUIRE() {
230 if (ShouldLock()) {
231 Mutex::Lock();
235 inline void Unlock() MOZ_CAPABILITY_RELEASE() {
236 if (ShouldLock()) {
237 Mutex::Unlock();
241 // Return true if we can use this resource from this thread, either because
242 // we'll use the lock or because this is the only thread that will access the
243 // protected resource.
244 #ifdef MOZ_DEBUG
245 bool SafeOnThisThread() const {
246 return mDoLock == MUST_LOCK || GetThreadId() == mThreadId;
248 #endif
250 bool LockIsEnabled() const { return mDoLock == MUST_LOCK; }
252 private:
253 bool ShouldLock() {
254 #ifndef XP_WIN
255 MOZ_ASSERT(!mDeniedAfterFork);
256 #endif
258 if (mDoLock == MUST_LOCK) {
259 return true;
262 MOZ_ASSERT(GetThreadId() == mThreadId);
263 return false;
266 DoLock mDoLock;
267 #ifdef MOZ_DEBUG
268 ThreadId mThreadId;
269 # ifndef XP_WIN
270 bool mDeniedAfterFork = false;
271 # endif
272 #endif
275 template <typename T>
276 struct MOZ_SCOPED_CAPABILITY MOZ_RAII AutoLock {
277 explicit AutoLock(T& aMutex) MOZ_CAPABILITY_ACQUIRE(aMutex) : mMutex(aMutex) {
278 mMutex.Lock();
281 ~AutoLock() MOZ_CAPABILITY_RELEASE() { mMutex.Unlock(); }
283 AutoLock(const AutoLock&) = delete;
284 AutoLock(AutoLock&&) = delete;
286 private:
287 T& mMutex;
290 using MutexAutoLock = AutoLock<Mutex>;
292 using MaybeMutexAutoLock = AutoLock<MaybeMutex>;
294 #endif