Bug 1830741 - Add tests for mach try perf comparators. r=perftest-reviewers,Alexandru...
[gecko.git] / gfx / layers / SurfacePoolCA.h
blob97344f7d068c00f7467bff642d250af557d685ec
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* This Source Code Form is subject to the terms of the Mozilla Public
3 * License, v. 2.0. If a copy of the MPL was not distributed with this
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 #ifndef mozilla_layers_SurfacePoolCA_h
7 #define mozilla_layers_SurfacePoolCA_h
9 #include <IOSurface/IOSurface.h>
11 #include <deque>
12 #include <unordered_map>
14 #include "mozilla/Atomics.h"
15 #include "mozilla/DataMutex.h"
17 #include "mozilla/layers/SurfacePool.h"
18 #include "CFTypeRefPtr.h"
19 #include "MozFramebuffer.h"
20 #include "nsISupportsImpl.h"
22 namespace mozilla {
24 namespace gl {
25 class MozFramebuffer;
26 } // namespace gl
28 namespace layers {
30 class SurfacePoolHandleCA;
31 struct SurfacePoolCAWrapperForGL;
33 // An implementation of SurfacePool for IOSurfaces and GL framebuffers.
34 // The goal of having this pool is to avoid creating and destroying IOSurfaces
35 // and framebuffers frequently, because doing so is expensive.
36 // SurfacePoolCA is threadsafe. All its data is wrapped inside LockedPool, and
37 // each access to LockedPool is guarded with a lock through DataMutex.
39 // The pool satisfies the following requirements:
40 // - It can be shared across windows, even across windows with different
41 // GLContexts.
42 // - The number of unused surfaces that are available for recycling is capped
43 // to a fixed value per pool, regardless of how many windows use that pool.
44 // - When all windows are closed (all handles are gone), no surfaces are kept
45 // alive (the pool is destroyed).
46 // - There is an explicit way of deleting GL resources for a GLContext so that
47 // it can happen at a deterministic time on the right thread.
48 // - Additionally, once a GLContext is no longer being used in any window
49 // (really: any pool handle), all surface-associated GL resources of that
50 // context are destroyed.
51 // - For every IOSurface, only one set of GL resources is in existence at any
52 // given time. We don't want there to be framebuffers in two different
53 // GLContexts for one surface.
54 // - We do not want to recycle an IOSurface that currently has GL resources of
55 // context A for a pool handle that uses context B.
56 // - We need to delay IOSurface recycling until the window server is done with
57 // the surface (`!IOSurfaceIsInUse(surf)`)
58 class SurfacePoolCA final : public SurfacePool {
59 public:
60 // Get a handle for a new window. aGL can be nullptr.
61 RefPtr<SurfacePoolHandle> GetHandleForGL(gl::GLContext* aGL) override;
63 // Destroy all GL resources associated with aGL managed by this pool.
64 void DestroyGLResourcesForContext(gl::GLContext* aGL) override;
66 private:
67 friend struct SurfacePoolCAWrapperForGL;
68 friend class SurfacePoolHandleCA;
69 friend RefPtr<SurfacePool> SurfacePool::Create(size_t aPoolSizeLimit);
71 explicit SurfacePoolCA(size_t aPoolSizeLimit);
72 ~SurfacePoolCA() override;
74 // Get an existing surface of aSize from the pool or create a new surface.
75 // The returned surface is guaranteed not to be in use by the window server.
76 CFTypeRefPtr<IOSurfaceRef> ObtainSurfaceFromPool(const gfx::IntSize& aSize,
77 gl::GLContext* aGL);
79 // Place a surface that was previously obtained from this pool back into the
80 // pool. aSurface may or may not be in use by the window server.
81 void ReturnSurfaceToPool(CFTypeRefPtr<IOSurfaceRef> aSurface);
83 // Re-run checks whether the window server still uses IOSurfaces which are
84 // eligible for recycling. The purpose of the "generation" counter is to
85 // reduce the number of calls to IOSurfaceIsInUse in a scenario where many
86 // windows / handles are calling CollectPendingSurfaces in the same frame
87 // (i.e. multiple simultaneously-animating windows).
88 uint64_t CollectPendingSurfaces(uint64_t aCheckGenerationsUpTo);
90 // Enforce the pool size limit by evicting surfaces as necessary. This should
91 // happen at the end of the frame so that we can temporarily exceed the limit
92 // within a frame.
93 void EnforcePoolSizeLimit();
95 // Get or create the framebuffer for the given surface and GL context.
96 // The returned framebuffer handle will become invalid once
97 // DestroyGLResourcesForContext or DecrementGLContextHandleCount are called.
98 // The framebuffer's depth buffer (if present) may be shared between multiple
99 // framebuffers! Do not assume anything about the depth buffer's existing
100 // contents (i.e. clear it at the beginning of the draw), and do not
101 // interleave drawing commands to different framebuffers in such a way that
102 // the shared depth buffer could cause trouble.
103 Maybe<GLuint> GetFramebufferForSurface(CFTypeRefPtr<IOSurfaceRef> aSurface,
104 gl::GLContext* aGL,
105 bool aNeedsDepthBuffer);
107 // Called by the destructor of SurfacePoolCAWrapperForGL so that we can clear
108 // our weak reference to it and delete GL resources.
109 void OnWrapperDestroyed(gl::GLContext* aGL,
110 SurfacePoolCAWrapperForGL* aWrapper);
112 // The actual pool implementation lives in LockedPool, which is accessed in
113 // a thread-safe manner.
114 struct LockedPool {
115 explicit LockedPool(size_t aPoolSizeLimit);
116 LockedPool(LockedPool&&) = default;
117 ~LockedPool();
119 RefPtr<SurfacePoolCAWrapperForGL> GetWrapperForGL(SurfacePoolCA* aPool,
120 gl::GLContext* aGL);
121 void DestroyGLResourcesForContext(gl::GLContext* aGL);
123 CFTypeRefPtr<IOSurfaceRef> ObtainSurfaceFromPool(const gfx::IntSize& aSize,
124 gl::GLContext* aGL);
125 void ReturnSurfaceToPool(CFTypeRefPtr<IOSurfaceRef> aSurface);
126 uint64_t CollectPendingSurfaces(uint64_t aCheckGenerationsUpTo);
127 void EnforcePoolSizeLimit();
128 Maybe<GLuint> GetFramebufferForSurface(CFTypeRefPtr<IOSurfaceRef> aSurface,
129 gl::GLContext* aGL,
130 bool aNeedsDepthBuffer);
131 void OnWrapperDestroyed(gl::GLContext* aGL,
132 SurfacePoolCAWrapperForGL* aWrapper);
133 uint64_t EstimateTotalMemory();
135 uint64_t mCollectionGeneration = 0;
137 protected:
138 struct GLResourcesForSurface {
139 RefPtr<gl::GLContext> mGLContext; // non-null
140 UniquePtr<gl::MozFramebuffer> mFramebuffer; // non-null
143 struct SurfacePoolEntry {
144 gfx::IntSize mSize;
145 CFTypeRefPtr<IOSurfaceRef> mIOSurface; // non-null
146 Maybe<GLResourcesForSurface> mGLResources;
149 struct PendingSurfaceEntry {
150 SurfacePoolEntry mEntry;
151 // The value of LockedPool::mCollectionGeneration at the time
152 // IOSurfaceIsInUse was last called for mEntry.mIOSurface.
153 uint64_t mPreviousCheckGeneration;
154 // The number of times an IOSurfaceIsInUse check has been performed.
155 uint64_t mCheckCount;
158 template <typename F>
159 void MutateEntryStorage(const char* aMutationType,
160 const gfx::IntSize& aSize, F aFn);
162 template <typename F>
163 void ForEachEntry(F aFn);
165 bool CanRecycleSurfaceForRequest(const SurfacePoolEntry& aEntry,
166 const gfx::IntSize& aSize,
167 gl::GLContext* aGL);
169 RefPtr<gl::DepthAndStencilBuffer> GetDepthBufferForSharing(
170 gl::GLContext* aGL, const gfx::IntSize& aSize);
171 UniquePtr<gl::MozFramebuffer> CreateFramebufferForTexture(
172 gl::GLContext* aGL, const gfx::IntSize& aSize, GLuint aTexture,
173 bool aNeedsDepthBuffer);
175 // Every IOSurface that is managed by the pool is wrapped in a
176 // SurfacePoolEntry object. Every entry is stored in one of three buckets at
177 // any given time: mInUseEntries, mPendingEntries, or mAvailableEntries. All
178 // mutations to these buckets are performed via calls to
179 // MutateEntryStorage(). Entries can move between the buckets in the
180 // following ways:
182 // [new]
183 // | Create
184 // v
185 // +----------------------------------------------------------------+
186 // | mInUseEntries |
187 // +------+------------------------------+--------------------------+
188 // | ^ | Start waiting for
189 // | | Recycle v
190 // | | +-----------------------------+
191 // | | | mPendingEntries |
192 // | | +--+--------------------+-----+
193 // | Retain | | Stop waiting for |
194 // v | v |
195 // +-------------------+-------------------------+ |
196 // | mAvailableEntries | |
197 // +-----------------------------+---------------+ |
198 // | Evict | Eject
199 // v v
200 // [destroyed] [destroyed]
202 // Each arrow corresponds to one invocation of MutateEntryStorage() with the
203 // arrow's label passed as the aMutationType string parameter.
205 // Stores the entries for surfaces that are in use by NativeLayerCA, i.e. an
206 // entry is inside mInUseEntries between calls to ObtainSurfaceFromPool()
207 // and ReturnSurfaceToPool().
208 std::unordered_map<CFTypeRefPtr<IOSurfaceRef>, SurfacePoolEntry>
209 mInUseEntries;
211 // Stores entries which are no longer in use by NativeLayerCA but are still
212 // in use by the window server, i.e. for which
213 // IOSurfaceIsInUse(pendingSurfaceEntry.mEntry.mIOSurface.get()) still
214 // returns true. These entries are checked once per frame inside
215 // CollectPendingSurfaces(), and returned to mAvailableEntries once the
216 // window server is done.
217 nsTArray<PendingSurfaceEntry> mPendingEntries;
219 // Stores entries which are available for recycling. These entries are not
220 // in use by a NativeLayerCA or by the window server.
221 nsTArray<SurfacePoolEntry> mAvailableEntries;
223 // Keeps weak references to SurfacePoolCAWrapperForGL instances.
224 // For each GLContext* value (including nullptr), only one wrapper can
225 // exist at any time. The wrapper keeps a strong reference to us and
226 // notifies us when it gets destroyed. At that point we can call
227 // DestroyGLResourcesForContext because we know no other SurfaceHandles for
228 // that context exist.
229 std::unordered_map<gl::GLContext*, SurfacePoolCAWrapperForGL*> mWrappers;
230 size_t mPoolSizeLimit = 0;
232 struct DepthBufferEntry {
233 RefPtr<gl::GLContext> mGLContext;
234 gfx::IntSize mSize;
235 WeakPtr<gl::DepthAndStencilBuffer> mBuffer;
238 nsTArray<DepthBufferEntry> mDepthBuffers;
241 DataMutex<LockedPool> mPool;
244 // One process-wide instance per (SurfacePoolCA*, GLContext*) pair.
245 // Keeps the SurfacePool alive, and the SurfacePool has a weak reference to the
246 // wrapper so that it can ensure that there's only one wrapper for it per
247 // GLContext* at any time.
248 struct SurfacePoolCAWrapperForGL {
249 NS_INLINE_DECL_THREADSAFE_REFCOUNTING(SurfacePoolCAWrapperForGL);
251 const RefPtr<SurfacePoolCA> mPool; // non-null
252 const RefPtr<gl::GLContext> mGL; // can be null
254 SurfacePoolCAWrapperForGL(SurfacePoolCA* aPool, gl::GLContext* aGL)
255 : mPool(aPool), mGL(aGL) {}
257 protected:
258 ~SurfacePoolCAWrapperForGL() { mPool->OnWrapperDestroyed(mGL, this); }
261 // A surface pool handle that is stored on NativeLayerCA and keeps the
262 // SurfacePool alive.
263 class SurfacePoolHandleCA final : public SurfacePoolHandle {
264 public:
265 SurfacePoolHandleCA* AsSurfacePoolHandleCA() override { return this; }
266 const auto& gl() { return mPoolWrapper->mGL; }
267 CFTypeRefPtr<IOSurfaceRef> ObtainSurfaceFromPool(const gfx::IntSize& aSize);
268 void ReturnSurfaceToPool(CFTypeRefPtr<IOSurfaceRef> aSurface);
269 Maybe<GLuint> GetFramebufferForSurface(CFTypeRefPtr<IOSurfaceRef> aSurface,
270 bool aNeedsDepthBuffer);
271 RefPtr<SurfacePool> Pool() override { return mPoolWrapper->mPool; }
272 void OnBeginFrame() override;
273 void OnEndFrame() override;
275 private:
276 friend class SurfacePoolCA;
277 SurfacePoolHandleCA(RefPtr<SurfacePoolCAWrapperForGL>&& aPoolWrapper,
278 uint64_t aCurrentCollectionGeneration);
279 ~SurfacePoolHandleCA() override;
281 const RefPtr<SurfacePoolCAWrapperForGL> mPoolWrapper;
282 DataMutex<uint64_t> mPreviousFrameCollectionGeneration;
285 } // namespace layers
286 } // namespace mozilla
288 #endif // mozilla_layers_SurfacePoolCA_h