Bug 1874684 - Part 17: Fix uninitialised variable warnings from clang-tidy. r=allstarschh
[gecko.git] / memory / build / Mutex.h
blobf360084f64d0f5d577733cdaf2d4ddcacc9df9d7
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #ifndef Mutex_h
8 #define Mutex_h
10 #if defined(XP_WIN)
11 # include <windows.h>
12 #else
13 # include <pthread.h>
14 #endif
15 #if defined(XP_DARWIN)
16 # include <os/lock.h>
17 #endif
19 #include "mozilla/Assertions.h"
20 #include "mozilla/Attributes.h"
21 #include "mozilla/ThreadSafety.h"
23 #if defined(XP_DARWIN)
24 // For information about the following undocumented flags and functions see
25 // https://github.com/apple/darwin-xnu/blob/main/bsd/sys/ulock.h and
26 // https://github.com/apple/darwin-libplatform/blob/main/private/os/lock_private.h
27 # define OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION (0x00010000)
28 # define OS_UNFAIR_LOCK_ADAPTIVE_SPIN (0x00040000)
30 extern "C" {
32 typedef uint32_t os_unfair_lock_options_t;
33 OS_UNFAIR_LOCK_AVAILABILITY
34 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL void os_unfair_lock_lock_with_options(
35 os_unfair_lock_t lock, os_unfair_lock_options_t options);
37 #endif // defined(XP_DARWIN)
39 // Mutexes based on spinlocks. We can't use normal pthread spinlocks in all
40 // places, because they require malloc()ed memory, which causes bootstrapping
41 // issues in some cases. We also can't use constructors, because for statics,
42 // they would fire after the first use of malloc, resetting the locks.
43 struct MOZ_CAPABILITY("mutex") Mutex {
44 #if defined(XP_WIN)
45 CRITICAL_SECTION mMutex;
46 #elif defined(XP_DARWIN)
47 os_unfair_lock mMutex;
48 #else
49 pthread_mutex_t mMutex;
50 #endif
52 // Initializes a mutex. Returns whether initialization succeeded.
53 inline bool Init() {
54 #if defined(XP_WIN)
55 if (!InitializeCriticalSectionAndSpinCount(&mMutex, 5000)) {
56 return false;
58 #elif defined(XP_DARWIN)
59 mMutex = OS_UNFAIR_LOCK_INIT;
60 #elif defined(XP_LINUX) && !defined(ANDROID)
61 pthread_mutexattr_t attr;
62 if (pthread_mutexattr_init(&attr) != 0) {
63 return false;
65 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
66 if (pthread_mutex_init(&mMutex, &attr) != 0) {
67 pthread_mutexattr_destroy(&attr);
68 return false;
70 pthread_mutexattr_destroy(&attr);
71 #else
72 if (pthread_mutex_init(&mMutex, nullptr) != 0) {
73 return false;
75 #endif
76 return true;
79 inline void Lock() MOZ_CAPABILITY_ACQUIRE() {
80 #if defined(XP_WIN)
81 EnterCriticalSection(&mMutex);
82 #elif defined(XP_DARWIN)
83 // We rely on a non-public function to improve performance here.
84 // The OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION flag informs the kernel that
85 // the calling thread is able to make progress even in absence of actions
86 // from other threads and the OS_UNFAIR_LOCK_ADAPTIVE_SPIN one causes the
87 // kernel to spin on a contested lock if the owning thread is running on
88 // the same physical core (presumably only on x86 CPUs given that ARM
89 // macs don't have cores capable of SMT).
90 os_unfair_lock_lock_with_options(
91 &mMutex,
92 OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION | OS_UNFAIR_LOCK_ADAPTIVE_SPIN);
93 #else
94 pthread_mutex_lock(&mMutex);
95 #endif
98 [[nodiscard]] bool TryLock() MOZ_TRY_ACQUIRE(true);
100 inline void Unlock() MOZ_CAPABILITY_RELEASE() {
101 #if defined(XP_WIN)
102 LeaveCriticalSection(&mMutex);
103 #elif defined(XP_DARWIN)
104 os_unfair_lock_unlock(&mMutex);
105 #else
106 pthread_mutex_unlock(&mMutex);
107 #endif
110 #if defined(XP_DARWIN)
111 static bool SpinInKernelSpace();
112 static const bool gSpinInKernelSpace;
113 #endif // XP_DARWIN
116 // Mutex that can be used for static initialization.
117 // On Windows, CRITICAL_SECTION requires a function call to be initialized,
118 // but for the initialization lock, a static initializer calling the
119 // function would be called too late. We need no-function-call
120 // initialization, which SRWLock provides.
121 // Ideally, we'd use the same type of locks everywhere, but SRWLocks
122 // everywhere incur a performance penalty. See bug 1418389.
123 #if defined(XP_WIN)
124 struct MOZ_CAPABILITY("mutex") StaticMutex {
125 SRWLOCK mMutex;
127 inline void Lock() MOZ_CAPABILITY_ACQUIRE() {
128 AcquireSRWLockExclusive(&mMutex);
131 inline void Unlock() MOZ_CAPABILITY_RELEASE() {
132 ReleaseSRWLockExclusive(&mMutex);
136 // Normally, we'd use a constexpr constructor, but MSVC likes to create
137 // static initializers anyways.
138 # define STATIC_MUTEX_INIT SRWLOCK_INIT
140 #else
141 typedef Mutex StaticMutex;
143 # if defined(XP_DARWIN)
144 # define STATIC_MUTEX_INIT OS_UNFAIR_LOCK_INIT
145 # elif defined(XP_LINUX) && !defined(ANDROID)
146 # define STATIC_MUTEX_INIT PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
147 # else
148 # define STATIC_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
149 # endif
151 #endif
153 #ifdef XP_WIN
154 typedef DWORD ThreadId;
155 inline ThreadId GetThreadId() { return GetCurrentThreadId(); }
156 #else
157 typedef pthread_t ThreadId;
158 inline ThreadId GetThreadId() { return pthread_self(); }
159 #endif
161 class MOZ_CAPABILITY("mutex") MaybeMutex : public Mutex {
162 public:
163 enum DoLock {
164 MUST_LOCK,
165 AVOID_LOCK_UNSAFE,
168 bool Init(DoLock aDoLock) {
169 mDoLock = aDoLock;
170 #ifdef MOZ_DEBUG
171 mThreadId = GetThreadId();
172 #endif
173 return Mutex::Init();
176 #ifndef XP_WIN
177 // Re initialise after fork(), assumes that mDoLock is already initialised.
178 void Reinit(pthread_t aForkingThread) {
179 if (mDoLock == MUST_LOCK) {
180 Mutex::Init();
181 return;
183 # ifdef MOZ_DEBUG
184 // If this is an eluded lock we can only safely re-initialise it if the
185 // thread that called fork is the one that owns the lock.
186 if (pthread_equal(mThreadId, aForkingThread)) {
187 mThreadId = GetThreadId();
188 Mutex::Init();
189 } else {
190 // We can't guantee that whatever resource this lock protects (probably a
191 // jemalloc arena) is in a consistent state.
192 mDeniedAfterFork = true;
194 # endif
196 #endif
198 inline void Lock() MOZ_CAPABILITY_ACQUIRE() {
199 if (ShouldLock()) {
200 Mutex::Lock();
204 inline void Unlock() MOZ_CAPABILITY_RELEASE() {
205 if (ShouldLock()) {
206 Mutex::Unlock();
210 // Return true if we can use this resource from this thread, either because
211 // we'll use the lock or because this is the only thread that will access the
212 // protected resource.
213 #ifdef MOZ_DEBUG
214 bool SafeOnThisThread() const {
215 return mDoLock == MUST_LOCK || GetThreadId() == mThreadId;
217 #endif
219 bool LockIsEnabled() const { return mDoLock == MUST_LOCK; }
221 private:
222 bool ShouldLock() {
223 #ifndef XP_WIN
224 MOZ_ASSERT(!mDeniedAfterFork);
225 #endif
227 if (mDoLock == MUST_LOCK) {
228 return true;
231 MOZ_ASSERT(GetThreadId() == mThreadId);
232 return false;
235 DoLock mDoLock;
236 #ifdef MOZ_DEBUG
237 ThreadId mThreadId;
238 # ifndef XP_WIN
239 bool mDeniedAfterFork = false;
240 # endif
241 #endif
244 template <typename T>
245 struct MOZ_SCOPED_CAPABILITY MOZ_RAII AutoLock {
246 explicit AutoLock(T& aMutex) MOZ_CAPABILITY_ACQUIRE(aMutex) : mMutex(aMutex) {
247 mMutex.Lock();
250 ~AutoLock() MOZ_CAPABILITY_RELEASE() { mMutex.Unlock(); }
252 AutoLock(const AutoLock&) = delete;
253 AutoLock(AutoLock&&) = delete;
255 private:
256 T& mMutex;
259 using MutexAutoLock = AutoLock<Mutex>;
261 using MaybeMutexAutoLock = AutoLock<MaybeMutex>;
263 #endif