Bug 1827566 - Isolate SSE2 requirements to SSE-compiled file r=glandium
[gecko.git] / mfbt / RefCounted.h
blobe0458ac6bcb59b7493b1fdc3dff0548ae5c5f37d
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 /* CRTP refcounting templates. Do not use unless you are an Expert. */
9 #ifndef mozilla_RefCounted_h
10 #define mozilla_RefCounted_h
12 #include <utility>
14 #include "mozilla/AlreadyAddRefed.h"
15 #include "mozilla/Assertions.h"
16 #include "mozilla/Atomics.h"
17 #include "mozilla/Attributes.h"
18 #include "mozilla/RefCountType.h"
20 #ifdef __wasi__
21 # include "mozilla/WasiAtomic.h"
22 #else
23 # include <atomic>
24 #endif // __wasi__
26 #if defined(MOZILLA_INTERNAL_API)
27 # include "nsXPCOM.h"
28 #endif
30 #if defined(MOZILLA_INTERNAL_API) && defined(NS_BUILD_REFCNT_LOGGING)
31 # define MOZ_REFCOUNTED_LEAK_CHECKING
32 #endif
34 namespace mozilla {
36 /**
37 * RefCounted<T> is a sort of a "mixin" for a class T. RefCounted
38 * manages, well, refcounting for T, and because RefCounted is
39 * parameterized on T, RefCounted<T> can call T's destructor directly.
40 * This means T doesn't need to have a virtual dtor and so doesn't
41 * need a vtable.
43 * RefCounted<T> is created with refcount == 0. Newly-allocated
44 * RefCounted<T> must immediately be assigned to a RefPtr to make the
45 * refcount > 0. It's an error to allocate and free a bare
46 * RefCounted<T>, i.e. outside of the RefPtr machinery. Attempts to
47 * do so will abort DEBUG builds.
49 * Live RefCounted<T> have refcount > 0. The lifetime (refcounts) of
50 * live RefCounted<T> are controlled by RefPtr<T> and
51 * RefPtr<super/subclass of T>. Upon a transition from refcounted==1
52 * to 0, the RefCounted<T> "dies" and is destroyed. The "destroyed"
53 * state is represented in DEBUG builds by refcount==0xffffdead. This
54 * state distinguishes use-before-ref (refcount==0) from
55 * use-after-destroy (refcount==0xffffdead).
57 * Note that when deriving from RefCounted or AtomicRefCounted, you
58 * should add MOZ_DECLARE_REFCOUNTED_TYPENAME(ClassName) to the public
59 * section of your class, where ClassName is the name of your class.
61 * Note: SpiderMonkey should use js::RefCounted instead since that type
62 * will use appropriate js_delete and also not break ref-count logging.
64 namespace detail {
65 const MozRefCountType DEAD = 0xffffdead;
67 // When building code that gets compiled into Gecko, try to use the
68 // trace-refcount leak logging facilities.
69 class RefCountLogger {
70 public:
71 // Called by `RefCounted`-like classes to log a successful AddRef call in the
72 // Gecko leak-logging system. This call is a no-op outside of Gecko. Should be
73 // called afer incrementing the reference count.
74 template <class T>
75 static void logAddRef(const T* aPointer, MozRefCountType aRefCount) {
76 #ifdef MOZ_REFCOUNTED_LEAK_CHECKING
77 const void* pointer = aPointer;
78 const char* typeName = aPointer->typeName();
79 uint32_t typeSize = aPointer->typeSize();
80 NS_LogAddRef(const_cast<void*>(pointer), aRefCount, typeName, typeSize);
81 #endif
84 // Created by `RefCounted`-like classes to log a successful Release call in
85 // the Gecko leak-logging system. The constructor should be invoked before the
86 // refcount is decremented to avoid invoking `typeName()` with a zero
87 // reference count. This call is a no-op outside of Gecko.
88 class MOZ_STACK_CLASS ReleaseLogger final {
89 public:
90 template <class T>
91 explicit ReleaseLogger(const T* aPointer)
92 #ifdef MOZ_REFCOUNTED_LEAK_CHECKING
93 : mPointer(aPointer),
94 mTypeName(aPointer->typeName())
95 #endif
99 void logRelease(MozRefCountType aRefCount) {
100 #ifdef MOZ_REFCOUNTED_LEAK_CHECKING
101 MOZ_ASSERT(aRefCount != DEAD);
102 NS_LogRelease(const_cast<void*>(mPointer), aRefCount, mTypeName);
103 #endif
106 #ifdef MOZ_REFCOUNTED_LEAK_CHECKING
107 const void* mPointer;
108 const char* mTypeName;
109 #endif
113 // This is used WeakPtr.h as well as this file.
114 enum RefCountAtomicity { AtomicRefCount, NonAtomicRefCount };
116 template <typename T, RefCountAtomicity Atomicity>
117 class RC {
118 public:
119 explicit RC(T aCount) : mValue(aCount) {}
121 RC(const RC&) = delete;
122 RC& operator=(const RC&) = delete;
123 RC(RC&&) = delete;
124 RC& operator=(RC&&) = delete;
126 T operator++() { return ++mValue; }
127 T operator--() { return --mValue; }
129 #ifdef DEBUG
130 void operator=(const T& aValue) { mValue = aValue; }
131 #endif
133 operator T() const { return mValue; }
135 private:
136 T mValue;
139 template <typename T>
140 class RC<T, AtomicRefCount> {
141 public:
142 explicit RC(T aCount) : mValue(aCount) {}
144 RC(const RC&) = delete;
145 RC& operator=(const RC&) = delete;
146 RC(RC&&) = delete;
147 RC& operator=(RC&&) = delete;
149 T operator++() {
150 // Memory synchronization is not required when incrementing a
151 // reference count. The first increment of a reference count on a
152 // thread is not important, since the first use of the object on a
153 // thread can happen before it. What is important is the transfer
154 // of the pointer to that thread, which may happen prior to the
155 // first increment on that thread. The necessary memory
156 // synchronization is done by the mechanism that transfers the
157 // pointer between threads.
158 return mValue.fetch_add(1, std::memory_order_relaxed) + 1;
161 T operator--() {
162 // Since this may be the last release on this thread, we need
163 // release semantics so that prior writes on this thread are visible
164 // to the thread that destroys the object when it reads mValue with
165 // acquire semantics.
166 T result = mValue.fetch_sub(1, std::memory_order_release) - 1;
167 if (result == 0) {
168 // We're going to destroy the object on this thread, so we need
169 // acquire semantics to synchronize with the memory released by
170 // the last release on other threads, that is, to ensure that
171 // writes prior to that release are now visible on this thread.
172 #if defined(MOZ_TSAN) || defined(__wasi__)
173 // TSan doesn't understand std::atomic_thread_fence, so in order
174 // to avoid a false positive for every time a refcounted object
175 // is deleted, we replace the fence with an atomic operation.
176 mValue.load(std::memory_order_acquire);
177 #else
178 std::atomic_thread_fence(std::memory_order_acquire);
179 #endif
181 return result;
184 #ifdef DEBUG
185 // This method is only called in debug builds, so we're not too concerned
186 // about its performance.
187 void operator=(const T& aValue) {
188 mValue.store(aValue, std::memory_order_seq_cst);
190 #endif
192 operator T() const {
193 // Use acquire semantics since we're not sure what the caller is
194 // doing.
195 return mValue.load(std::memory_order_acquire);
198 T IncrementIfNonzero() {
199 // This can be a relaxed load as any write of 0 that we observe will leave
200 // the field in a permanently zero (or `DEAD`) state (so a "stale" read of 0
201 // is fine), and any other value is confirmed by the CAS below.
203 // This roughly matches rust's Arc::upgrade implementation as of rust 1.49.0
204 T prev = mValue.load(std::memory_order_relaxed);
205 while (prev != 0) {
206 MOZ_ASSERT(prev != detail::DEAD,
207 "Cannot IncrementIfNonzero if marked as dead!");
208 // TODO: It may be possible to use relaxed success ordering here?
209 if (mValue.compare_exchange_weak(prev, prev + 1,
210 std::memory_order_acquire,
211 std::memory_order_relaxed)) {
212 return prev + 1;
215 return 0;
218 private:
219 std::atomic<T> mValue;
222 template <typename T, RefCountAtomicity Atomicity>
223 class RefCounted {
224 protected:
225 RefCounted() : mRefCnt(0) {}
226 #ifdef DEBUG
227 ~RefCounted() { MOZ_ASSERT(mRefCnt == detail::DEAD); }
228 #endif
230 public:
231 // Compatibility with RefPtr.
232 void AddRef() const {
233 // Note: this method must be thread safe for AtomicRefCounted.
234 MOZ_ASSERT(int32_t(mRefCnt) >= 0);
235 MozRefCountType cnt = ++mRefCnt;
236 detail::RefCountLogger::logAddRef(static_cast<const T*>(this), cnt);
239 void Release() const {
240 // Note: this method must be thread safe for AtomicRefCounted.
241 MOZ_ASSERT(int32_t(mRefCnt) > 0);
242 detail::RefCountLogger::ReleaseLogger logger(static_cast<const T*>(this));
243 MozRefCountType cnt = --mRefCnt;
244 // Note: it's not safe to touch |this| after decrementing the refcount,
245 // except for below.
246 logger.logRelease(cnt);
247 if (0 == cnt) {
248 // Because we have atomically decremented the refcount above, only
249 // one thread can get a 0 count here, so as long as we can assume that
250 // everything else in the system is accessing this object through
251 // RefPtrs, it's safe to access |this| here.
252 #ifdef DEBUG
253 mRefCnt = detail::DEAD;
254 #endif
255 delete static_cast<const T*>(this);
259 // Compatibility with wtf::RefPtr.
260 void ref() { AddRef(); }
261 void deref() { Release(); }
262 MozRefCountType refCount() const { return mRefCnt; }
263 bool hasOneRef() const {
264 MOZ_ASSERT(mRefCnt > 0);
265 return mRefCnt == 1;
268 private:
269 mutable RC<MozRefCountType, Atomicity> mRefCnt;
272 #ifdef MOZ_REFCOUNTED_LEAK_CHECKING
273 // Passing override for the optional argument marks the typeName and
274 // typeSize functions defined by this macro as overrides.
275 # define MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(T, ...) \
276 virtual const char* typeName() const __VA_ARGS__ { return #T; } \
277 virtual size_t typeSize() const __VA_ARGS__ { return sizeof(*this); }
278 #else
279 # define MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(T, ...)
280 #endif
282 // Note that this macro is expanded unconditionally because it declares only
283 // two small inline functions which will hopefully get eliminated by the linker
284 // in non-leak-checking builds.
285 #define MOZ_DECLARE_REFCOUNTED_TYPENAME(T) \
286 const char* typeName() const { return #T; } \
287 size_t typeSize() const { return sizeof(*this); }
289 } // namespace detail
291 template <typename T>
292 class RefCounted : public detail::RefCounted<T, detail::NonAtomicRefCount> {
293 public:
294 ~RefCounted() {
295 static_assert(std::is_base_of<RefCounted, T>::value,
296 "T must derive from RefCounted<T>");
300 namespace external {
303 * AtomicRefCounted<T> is like RefCounted<T>, with an atomically updated
304 * reference counter.
306 * NOTE: Please do not use this class, use NS_INLINE_DECL_THREADSAFE_REFCOUNTING
307 * instead.
309 template <typename T>
310 class AtomicRefCounted
311 : public mozilla::detail::RefCounted<T, mozilla::detail::AtomicRefCount> {
312 public:
313 ~AtomicRefCounted() {
314 static_assert(std::is_base_of<AtomicRefCounted, T>::value,
315 "T must derive from AtomicRefCounted<T>");
319 } // namespace external
321 } // namespace mozilla
323 #endif // mozilla_RefCounted_h