1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 /* CRTP refcounting templates. Do not use unless you are an Expert. */
9 #ifndef mozilla_RefCounted_h
10 #define mozilla_RefCounted_h
15 #include "mozilla/AlreadyAddRefed.h"
16 #include "mozilla/Assertions.h"
17 #include "mozilla/Atomics.h"
18 #include "mozilla/Attributes.h"
19 #include "mozilla/RefCountType.h"
21 #if defined(MOZILLA_INTERNAL_API)
25 #if defined(MOZILLA_INTERNAL_API) && \
26 (defined(DEBUG) || defined(FORCE_BUILD_REFCNT_LOGGING))
27 # define MOZ_REFCOUNTED_LEAK_CHECKING
33 * RefCounted<T> is a sort of a "mixin" for a class T. RefCounted
34 * manages, well, refcounting for T, and because RefCounted is
35 * parameterized on T, RefCounted<T> can call T's destructor directly.
36 * This means T doesn't need to have a virtual dtor and so doesn't
39 * RefCounted<T> is created with refcount == 0. Newly-allocated
40 * RefCounted<T> must immediately be assigned to a RefPtr to make the
41 * refcount > 0. It's an error to allocate and free a bare
42 * RefCounted<T>, i.e. outside of the RefPtr machinery. Attempts to
43 * do so will abort DEBUG builds.
45 * Live RefCounted<T> have refcount > 0. The lifetime (refcounts) of
46 * live RefCounted<T> are controlled by RefPtr<T> and
47 * RefPtr<super/subclass of T>. Upon a transition from refcounted==1
48 * to 0, the RefCounted<T> "dies" and is destroyed. The "destroyed"
49 * state is represented in DEBUG builds by refcount==0xffffdead. This
50 * state distinguishes use-before-ref (refcount==0) from
51 * use-after-destroy (refcount==0xffffdead).
53 * Note that when deriving from RefCounted or AtomicRefCounted, you
54 * should add MOZ_DECLARE_REFCOUNTED_TYPENAME(ClassName) to the public
55 * section of your class, where ClassName is the name of your class.
57 * Note: SpiderMonkey should use js::RefCounted instead since that type
58 * will use appropriate js_delete and also not break ref-count logging.
61 const MozRefCountType DEAD
= 0xffffdead;
63 // When building code that gets compiled into Gecko, try to use the
64 // trace-refcount leak logging facilities.
65 class RefCountLogger
{
67 // Called by `RefCounted`-like classes to log a successful AddRef call in the
68 // Gecko leak-logging system. This call is a no-op outside of Gecko. Should be
69 // called afer incrementing the reference count.
71 static void logAddRef(const T
* aPointer
, MozRefCountType aRefCount
) {
72 #ifdef MOZ_REFCOUNTED_LEAK_CHECKING
73 const void* pointer
= aPointer
;
74 const char* typeName
= aPointer
->typeName();
75 uint32_t typeSize
= aPointer
->typeSize();
76 NS_LogAddRef(const_cast<void*>(pointer
), aRefCount
, typeName
, typeSize
);
80 // Created by `RefCounted`-like classes to log a successful Release call in
81 // the Gecko leak-logging system. The constructor should be invoked before the
82 // refcount is decremented to avoid invoking `typeName()` with a zero
83 // reference count. This call is a no-op outside of Gecko.
84 class MOZ_STACK_CLASS ReleaseLogger final
{
87 explicit ReleaseLogger(const T
* aPointer
)
88 #ifdef MOZ_REFCOUNTED_LEAK_CHECKING
90 mTypeName(aPointer
->typeName())
95 void logRelease(MozRefCountType aRefCount
) {
96 #ifdef MOZ_REFCOUNTED_LEAK_CHECKING
97 MOZ_ASSERT(aRefCount
!= DEAD
);
98 NS_LogRelease(const_cast<void*>(mPointer
), aRefCount
, mTypeName
);
102 #ifdef MOZ_REFCOUNTED_LEAK_CHECKING
103 const void* mPointer
;
104 const char* mTypeName
;
109 // This is used WeakPtr.h as well as this file.
110 enum RefCountAtomicity
{ AtomicRefCount
, NonAtomicRefCount
};
112 template <typename T
, RefCountAtomicity Atomicity
>
115 explicit RC(T aCount
) : mValue(aCount
) {}
117 RC(const RC
&) = delete;
118 RC
& operator=(const RC
&) = delete;
120 RC
& operator=(RC
&&) = delete;
122 T
operator++() { return ++mValue
; }
123 T
operator--() { return --mValue
; }
126 void operator=(const T
& aValue
) { mValue
= aValue
; }
129 operator T() const { return mValue
; }
135 template <typename T
>
136 class RC
<T
, AtomicRefCount
> {
138 explicit RC(T aCount
) : mValue(aCount
) {}
140 RC(const RC
&) = delete;
141 RC
& operator=(const RC
&) = delete;
143 RC
& operator=(RC
&&) = delete;
146 // Memory synchronization is not required when incrementing a
147 // reference count. The first increment of a reference count on a
148 // thread is not important, since the first use of the object on a
149 // thread can happen before it. What is important is the transfer
150 // of the pointer to that thread, which may happen prior to the
151 // first increment on that thread. The necessary memory
152 // synchronization is done by the mechanism that transfers the
153 // pointer between threads.
154 return mValue
.fetch_add(1, std::memory_order_relaxed
) + 1;
158 // Since this may be the last release on this thread, we need
159 // release semantics so that prior writes on this thread are visible
160 // to the thread that destroys the object when it reads mValue with
161 // acquire semantics.
162 T result
= mValue
.fetch_sub(1, std::memory_order_release
) - 1;
164 // We're going to destroy the object on this thread, so we need
165 // acquire semantics to synchronize with the memory released by
166 // the last release on other threads, that is, to ensure that
167 // writes prior to that release are now visible on this thread.
169 // TSan doesn't understand std::atomic_thread_fence, so in order
170 // to avoid a false positive for every time a refcounted object
171 // is deleted, we replace the fence with an atomic operation.
172 mValue
.load(std::memory_order_acquire
);
174 std::atomic_thread_fence(std::memory_order_acquire
);
181 // This method is only called in debug builds, so we're not too concerned
182 // about its performance.
183 void operator=(const T
& aValue
) {
184 mValue
.store(aValue
, std::memory_order_seq_cst
);
189 // Use acquire semantics since we're not sure what the caller is
191 return mValue
.load(std::memory_order_acquire
);
194 T
IncrementIfNonzero() {
195 // This can be a relaxed load as any write of 0 that we observe will leave
196 // the field in a permanently zero (or `DEAD`) state (so a "stale" read of 0
197 // is fine), and any other value is confirmed by the CAS below.
199 // This roughly matches rust's Arc::upgrade implementation as of rust 1.49.0
200 T prev
= mValue
.load(std::memory_order_relaxed
);
202 MOZ_ASSERT(prev
!= detail::DEAD
,
203 "Cannot IncrementIfNonzero if marked as dead!");
204 // TODO: It may be possible to use relaxed success ordering here?
205 if (mValue
.compare_exchange_weak(prev
, prev
+ 1,
206 std::memory_order_acquire
,
207 std::memory_order_relaxed
)) {
215 std::atomic
<T
> mValue
;
218 template <typename T
, RefCountAtomicity Atomicity
>
221 RefCounted() : mRefCnt(0) {}
223 ~RefCounted() { MOZ_ASSERT(mRefCnt
== detail::DEAD
); }
227 // Compatibility with RefPtr.
228 void AddRef() const {
229 // Note: this method must be thread safe for AtomicRefCounted.
230 MOZ_ASSERT(int32_t(mRefCnt
) >= 0);
231 MozRefCountType cnt
= ++mRefCnt
;
232 detail::RefCountLogger::logAddRef(static_cast<const T
*>(this), cnt
);
235 void Release() const {
236 // Note: this method must be thread safe for AtomicRefCounted.
237 MOZ_ASSERT(int32_t(mRefCnt
) > 0);
238 detail::RefCountLogger::ReleaseLogger
logger(static_cast<const T
*>(this));
239 MozRefCountType cnt
= --mRefCnt
;
240 // Note: it's not safe to touch |this| after decrementing the refcount,
242 logger
.logRelease(cnt
);
244 // Because we have atomically decremented the refcount above, only
245 // one thread can get a 0 count here, so as long as we can assume that
246 // everything else in the system is accessing this object through
247 // RefPtrs, it's safe to access |this| here.
249 mRefCnt
= detail::DEAD
;
251 delete static_cast<const T
*>(this);
255 // Compatibility with wtf::RefPtr.
256 void ref() { AddRef(); }
257 void deref() { Release(); }
258 MozRefCountType
refCount() const { return mRefCnt
; }
259 bool hasOneRef() const {
260 MOZ_ASSERT(mRefCnt
> 0);
265 mutable RC
<MozRefCountType
, Atomicity
> mRefCnt
;
268 #ifdef MOZ_REFCOUNTED_LEAK_CHECKING
269 // Passing override for the optional argument marks the typeName and
270 // typeSize functions defined by this macro as overrides.
271 # define MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(T, ...) \
272 virtual const char* typeName() const __VA_ARGS__ { return #T; } \
273 virtual size_t typeSize() const __VA_ARGS__ { return sizeof(*this); }
275 # define MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(T, ...)
278 // Note that this macro is expanded unconditionally because it declares only
279 // two small inline functions which will hopefully get eliminated by the linker
280 // in non-leak-checking builds.
281 #define MOZ_DECLARE_REFCOUNTED_TYPENAME(T) \
282 const char* typeName() const { return #T; } \
283 size_t typeSize() const { return sizeof(*this); }
285 } // namespace detail
287 template <typename T
>
288 class RefCounted
: public detail::RefCounted
<T
, detail::NonAtomicRefCount
> {
291 static_assert(std::is_base_of
<RefCounted
, T
>::value
,
292 "T must derive from RefCounted<T>");
299 * AtomicRefCounted<T> is like RefCounted<T>, with an atomically updated
302 * NOTE: Please do not use this class, use NS_INLINE_DECL_THREADSAFE_REFCOUNTING
305 template <typename T
>
306 class AtomicRefCounted
307 : public mozilla::detail::RefCounted
<T
, mozilla::detail::AtomicRefCount
> {
309 ~AtomicRefCounted() {
310 static_assert(std::is_base_of
<AtomicRefCounted
, T
>::value
,
311 "T must derive from AtomicRefCounted<T>");
315 } // namespace external
317 } // namespace mozilla
319 #endif // mozilla_RefCounted_h