1 /* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * This Source Code Form is subject to the terms of the Mozilla Public
3 * License, v. 2.0. If a copy of the MPL was not distributed with this
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 #include "mozilla/layers/SurfacePoolCA.h"
8 #import <CoreVideo/CVPixelBuffer.h>
11 #include <unordered_set>
14 #include "mozilla/ProfilerLabels.h"
15 #include "mozilla/ProfilerMarkers.h"
16 #include "mozilla/StaticMutex.h"
17 #include "mozilla/StaticPrefs_gfx.h"
19 #include "GLContextCGL.h"
20 #include "MozFramebuffer.h"
21 #include "ScopedGLHelpers.h"
31 using gl::GLContextCGL;
33 /* static */ RefPtr<SurfacePool> SurfacePool::Create(size_t aPoolSizeLimit) {
34 return new SurfacePoolCA(aPoolSizeLimit);
37 // SurfacePoolCA::LockedPool
39 SurfacePoolCA::LockedPool::LockedPool(size_t aPoolSizeLimit)
40 : mPoolSizeLimit(aPoolSizeLimit) {}
42 SurfacePoolCA::LockedPool::~LockedPool() {
45 "Any outstanding wrappers should have kept the surface pool alive");
46 MOZ_RELEASE_ASSERT(mInUseEntries.empty(),
47 "Leak! No more surfaces should be in use at this point.");
48 // Remove all entries in mPendingEntries and mAvailableEntries.
49 MutateEntryStorage("Clear", {}, [&]() {
50 mPendingEntries.Clear();
51 mAvailableEntries.Clear();
55 RefPtr<SurfacePoolCAWrapperForGL> SurfacePoolCA::LockedPool::GetWrapperForGL(
56 SurfacePoolCA* aPool, GLContext* aGL) {
57 auto& wrapper = mWrappers[aGL];
59 wrapper = new SurfacePoolCAWrapperForGL(aPool, aGL);
64 void SurfacePoolCA::LockedPool::DestroyGLResourcesForContext(GLContext* aGL) {
65 ForEachEntry([&](SurfacePoolEntry& entry) {
66 if (entry.mGLResources && entry.mGLResources->mGLContext == aGL) {
67 entry.mGLResources = Nothing();
70 mDepthBuffers.RemoveElementsBy(
71 [&](const DepthBufferEntry& entry) { return entry.mGLContext == aGL; });
75 void SurfacePoolCA::LockedPool::MutateEntryStorage(const char* aMutationType,
76 const gfx::IntSize& aSize,
78 [[maybe_unused]] size_t inUseCountBefore = mInUseEntries.size();
79 [[maybe_unused]] size_t pendingCountBefore = mPendingEntries.Length();
80 [[maybe_unused]] size_t availableCountBefore = mAvailableEntries.Length();
81 [[maybe_unused]] TimeStamp before = TimeStamp::Now();
85 if (profiler_thread_is_being_profiled_for_markers()) {
87 "SurfacePool", GRAPHICS, MarkerTiming::IntervalUntilNowFrom(before),
88 nsPrintfCString("%d -> %d in use | %d -> %d waiting for | %d -> %d "
89 "available | %s %dx%d | %dMB total memory",
90 int(inUseCountBefore), int(mInUseEntries.size()),
91 int(pendingCountBefore), int(mPendingEntries.Length()),
92 int(availableCountBefore),
93 int(mAvailableEntries.Length()), aMutationType,
94 aSize.width, aSize.height,
95 int(EstimateTotalMemory() / 1000 / 1000)));
100 void SurfacePoolCA::LockedPool::ForEachEntry(F aFn) {
101 for (auto& iter : mInUseEntries) {
104 for (auto& entry : mPendingEntries) {
107 for (auto& entry : mAvailableEntries) {
112 uint64_t SurfacePoolCA::LockedPool::EstimateTotalMemory() {
113 std::unordered_set<const gl::DepthAndStencilBuffer*> depthAndStencilBuffers;
114 uint64_t memBytes = 0;
116 ForEachEntry([&](const SurfacePoolEntry& entry) {
117 auto size = entry.mSize;
118 memBytes += size.width * 4 * size.height;
119 if (entry.mGLResources) {
120 const auto& fb = *entry.mGLResources->mFramebuffer;
121 if (const auto& buffer = fb.GetDepthAndStencilBuffer()) {
122 depthAndStencilBuffers.insert(buffer.get());
127 for (const auto& buffer : depthAndStencilBuffers) {
128 memBytes += buffer->EstimateMemory();
134 bool SurfacePoolCA::LockedPool::CanRecycleSurfaceForRequest(
135 const SurfacePoolEntry& aEntry, const IntSize& aSize, GLContext* aGL) {
136 if (aEntry.mSize != aSize) {
139 if (aEntry.mGLResources) {
140 return aEntry.mGLResources->mGLContext == aGL;
145 CFTypeRefPtr<IOSurfaceRef> SurfacePoolCA::LockedPool::ObtainSurfaceFromPool(
146 const IntSize& aSize, GLContext* aGL) {
147 // Do a linear scan through mAvailableEntries to find an eligible surface,
148 // going from oldest to newest. The size of this array is limited, so the
149 // linear scan is fast.
151 std::find_if(mAvailableEntries.begin(), mAvailableEntries.end(),
152 [&](const SurfacePoolEntry& aEntry) {
153 return CanRecycleSurfaceForRequest(aEntry, aSize, aGL);
155 if (iterToRecycle != mAvailableEntries.end()) {
156 CFTypeRefPtr<IOSurfaceRef> surface = iterToRecycle->mIOSurface;
157 MOZ_RELEASE_ASSERT(surface.get(), "Available surfaces should be non-null.");
158 // Move the entry from mAvailableEntries to mInUseEntries.
159 MutateEntryStorage("Recycle", aSize, [&]() {
160 mInUseEntries.insert({surface, std::move(*iterToRecycle)});
161 mAvailableEntries.RemoveElementAt(iterToRecycle);
166 AUTO_PROFILER_LABEL_DYNAMIC_NSCSTRING(
167 "IOSurface creation", GRAPHICS_TileAllocation,
168 nsPrintfCString("%dx%d", aSize.width, aSize.height));
169 CFTypeRefPtr<IOSurfaceRef> surface =
170 CFTypeRefPtr<IOSurfaceRef>::WrapUnderCreateRule(
171 IOSurfaceCreate((__bridge CFDictionaryRef) @{
172 (__bridge NSString*)kIOSurfaceWidth : @(aSize.width),
173 (__bridge NSString*)kIOSurfaceHeight : @(aSize.height),
175 kIOSurfacePixelFormat : @(kCVPixelFormatType_32BGRA),
176 (__bridge NSString*)kIOSurfaceBytesPerElement : @(4),
179 if (StaticPrefs::gfx_color_management_native_srgb()) {
180 IOSurfaceSetValue(surface.get(), CFSTR("IOSurfaceColorSpace"),
183 // Create a new entry in mInUseEntries.
184 MutateEntryStorage("Create", aSize, [&]() {
185 mInUseEntries.insert({surface, SurfacePoolEntry{aSize, surface, {}}});
191 void SurfacePoolCA::LockedPool::ReturnSurfaceToPool(
192 CFTypeRefPtr<IOSurfaceRef> aSurface) {
193 auto inUseEntryIter = mInUseEntries.find(aSurface);
194 MOZ_RELEASE_ASSERT(inUseEntryIter != mInUseEntries.end());
195 if (IOSurfaceIsInUse(aSurface.get())) {
196 // Move the entry from mInUseEntries to mPendingEntries.
198 "Start waiting for", IntSize(inUseEntryIter->second.mSize), [&]() {
199 mPendingEntries.AppendElement(PendingSurfaceEntry{
200 std::move(inUseEntryIter->second), mCollectionGeneration, 0});
201 mInUseEntries.erase(inUseEntryIter);
204 // Move the entry from mInUseEntries to mAvailableEntries.
205 MOZ_RELEASE_ASSERT(inUseEntryIter->second.mIOSurface.get(),
206 "In use surfaces should be non-null.");
207 MutateEntryStorage("Retain", IntSize(inUseEntryIter->second.mSize), [&]() {
208 mAvailableEntries.AppendElement(std::move(inUseEntryIter->second));
209 mInUseEntries.erase(inUseEntryIter);
214 void SurfacePoolCA::LockedPool::EnforcePoolSizeLimit() {
215 // Enforce the pool size limit, removing least-recently-used entries as
217 while (mAvailableEntries.Length() > mPoolSizeLimit) {
218 MutateEntryStorage("Evict", IntSize(mAvailableEntries[0].mSize),
219 [&]() { mAvailableEntries.RemoveElementAt(0); });
223 uint64_t SurfacePoolCA::LockedPool::CollectPendingSurfaces(
224 uint64_t aCheckGenerationsUpTo) {
225 mCollectionGeneration++;
227 // Loop from back to front, potentially deleting items as we iterate.
228 // mPendingEntries is used as a set; the order of its items is not meaningful.
229 size_t i = mPendingEntries.Length();
232 auto& pendingSurf = mPendingEntries[i];
233 if (pendingSurf.mPreviousCheckGeneration > aCheckGenerationsUpTo) {
236 // Check if the window server is still using the surface. As long as it is
237 // doing that, we cannot move the surface to mAvailableSurfaces because
238 // anything we draw to it could reach the screen in a place where we don't
240 if (IOSurfaceIsInUse(pendingSurf.mEntry.mIOSurface.get())) {
241 // The surface is still in use. Update mPreviousCheckGeneration and
243 pendingSurf.mPreviousCheckGeneration = mCollectionGeneration;
244 pendingSurf.mCheckCount++;
245 if (pendingSurf.mCheckCount >= 30) {
246 // The window server has been holding on to this surface for an
247 // unreasonably long time. This is known to happen sometimes, for
248 // example in occluded windows or after a GPU switch. In that case,
249 // release our references to the surface so that it's Not Our Problem
250 // anymore. Remove the entry from mPendingEntries.
251 MutateEntryStorage("Eject", IntSize(pendingSurf.mEntry.mSize),
252 [&]() { mPendingEntries.RemoveElementAt(i); });
255 // The surface has become unused!
256 // Move the entry from mPendingEntries to mAvailableEntries.
257 MOZ_RELEASE_ASSERT(pendingSurf.mEntry.mIOSurface.get(),
258 "Pending surfaces should be non-null.");
260 "Stop waiting for", IntSize(pendingSurf.mEntry.mSize), [&]() {
261 mAvailableEntries.AppendElement(std::move(pendingSurf.mEntry));
262 mPendingEntries.RemoveElementAt(i);
266 return mCollectionGeneration;
269 void SurfacePoolCA::LockedPool::OnWrapperDestroyed(
270 gl::GLContext* aGL, SurfacePoolCAWrapperForGL* aWrapper) {
272 DestroyGLResourcesForContext(aGL);
275 auto iter = mWrappers.find(aGL);
276 MOZ_RELEASE_ASSERT(iter != mWrappers.end());
277 MOZ_RELEASE_ASSERT(iter->second == aWrapper,
278 "Only one SurfacePoolCAWrapperForGL object should "
279 "exist for each GLContext* at any time");
280 mWrappers.erase(iter);
283 Maybe<GLuint> SurfacePoolCA::LockedPool::GetFramebufferForSurface(
284 CFTypeRefPtr<IOSurfaceRef> aSurface, GLContext* aGL,
285 bool aNeedsDepthBuffer) {
286 MOZ_RELEASE_ASSERT(aGL);
288 auto inUseEntryIter = mInUseEntries.find(aSurface);
289 MOZ_RELEASE_ASSERT(inUseEntryIter != mInUseEntries.end());
291 SurfacePoolEntry& entry = inUseEntryIter->second;
292 if (entry.mGLResources) {
293 // We have an existing framebuffer.
294 MOZ_RELEASE_ASSERT(entry.mGLResources->mGLContext == aGL,
295 "Recycled surface that still had GL resources from a "
296 "different GL context. "
297 "This shouldn't happen.");
298 if (!aNeedsDepthBuffer || entry.mGLResources->mFramebuffer->HasDepth()) {
299 return Some(entry.mGLResources->mFramebuffer->mFB);
303 // No usable existing framebuffer, we need to create one.
305 AUTO_PROFILER_LABEL_DYNAMIC_NSCSTRING(
306 "Framebuffer creation", GRAPHICS_TileAllocation,
307 nsPrintfCString("%dx%d", entry.mSize.width, entry.mSize.height));
309 RefPtr<GLContextCGL> cgl = GLContextCGL::Cast(aGL);
310 MOZ_RELEASE_ASSERT(cgl, "Unexpected GLContext type");
312 if (!aGL->MakeCurrent()) {
313 // Context may have been destroyed.
317 GLuint tex = aGL->CreateTexture();
319 const gl::ScopedBindTexture bindTex(aGL, tex,
320 LOCAL_GL_TEXTURE_RECTANGLE_ARB);
321 CGLTexImageIOSurface2D(cgl->GetCGLContext(), LOCAL_GL_TEXTURE_RECTANGLE_ARB,
322 LOCAL_GL_RGBA, entry.mSize.width, entry.mSize.height,
323 LOCAL_GL_BGRA, LOCAL_GL_UNSIGNED_INT_8_8_8_8_REV,
324 entry.mIOSurface.get(), 0);
328 CreateFramebufferForTexture(aGL, entry.mSize, tex, aNeedsDepthBuffer);
330 // Framebuffer completeness check may have failed.
334 GLuint fbo = fb->mFB;
335 entry.mGLResources = Some(GLResourcesForSurface{aGL, std::move(fb)});
339 RefPtr<gl::DepthAndStencilBuffer>
340 SurfacePoolCA::LockedPool::GetDepthBufferForSharing(GLContext* aGL,
341 const IntSize& aSize) {
342 // Clean out entries for which the weak pointer has become null.
343 mDepthBuffers.RemoveElementsBy(
344 [&](const DepthBufferEntry& entry) { return !entry.mBuffer; });
346 for (const auto& entry : mDepthBuffers) {
347 if (entry.mGLContext == aGL && entry.mSize == aSize) {
348 return entry.mBuffer.get();
354 UniquePtr<gl::MozFramebuffer>
355 SurfacePoolCA::LockedPool::CreateFramebufferForTexture(GLContext* aGL,
356 const IntSize& aSize,
358 bool aNeedsDepthBuffer) {
359 if (aNeedsDepthBuffer) {
360 // Try to find an existing depth buffer of aSize in aGL and create a
361 // framebuffer that shares it.
362 if (auto buffer = GetDepthBufferForSharing(aGL, aSize)) {
363 return gl::MozFramebuffer::CreateForBackingWithSharedDepthAndStencil(
364 aSize, 0, LOCAL_GL_TEXTURE_RECTANGLE_ARB, aTexture, buffer);
368 // No depth buffer needed or we didn't find one. Create a framebuffer with a
369 // new depth buffer and store a weak pointer to the new depth buffer in
371 UniquePtr<gl::MozFramebuffer> fb = gl::MozFramebuffer::CreateForBacking(
372 aGL, aSize, 0, aNeedsDepthBuffer, LOCAL_GL_TEXTURE_RECTANGLE_ARB,
374 if (fb && fb->GetDepthAndStencilBuffer()) {
375 mDepthBuffers.AppendElement(
376 DepthBufferEntry{aGL, aSize, fb->GetDepthAndStencilBuffer().get()});
382 // SurfacePoolHandleCA
384 SurfacePoolHandleCA::SurfacePoolHandleCA(
385 RefPtr<SurfacePoolCAWrapperForGL>&& aPoolWrapper,
386 uint64_t aCurrentCollectionGeneration)
387 : mPoolWrapper(aPoolWrapper),
388 mPreviousFrameCollectionGeneration(
389 "SurfacePoolHandleCA::mPreviousFrameCollectionGeneration") {
390 auto generation = mPreviousFrameCollectionGeneration.Lock();
391 *generation = aCurrentCollectionGeneration;
394 SurfacePoolHandleCA::~SurfacePoolHandleCA() {}
396 void SurfacePoolHandleCA::OnBeginFrame() {
397 auto generation = mPreviousFrameCollectionGeneration.Lock();
398 *generation = mPoolWrapper->mPool->CollectPendingSurfaces(*generation);
401 void SurfacePoolHandleCA::OnEndFrame() {
402 mPoolWrapper->mPool->EnforcePoolSizeLimit();
405 CFTypeRefPtr<IOSurfaceRef> SurfacePoolHandleCA::ObtainSurfaceFromPool(
406 const IntSize& aSize) {
407 return mPoolWrapper->mPool->ObtainSurfaceFromPool(aSize, mPoolWrapper->mGL);
410 void SurfacePoolHandleCA::ReturnSurfaceToPool(
411 CFTypeRefPtr<IOSurfaceRef> aSurface) {
412 mPoolWrapper->mPool->ReturnSurfaceToPool(aSurface);
415 Maybe<GLuint> SurfacePoolHandleCA::GetFramebufferForSurface(
416 CFTypeRefPtr<IOSurfaceRef> aSurface, bool aNeedsDepthBuffer) {
417 return mPoolWrapper->mPool->GetFramebufferForSurface(
418 aSurface, mPoolWrapper->mGL, aNeedsDepthBuffer);
423 SurfacePoolCA::SurfacePoolCA(size_t aPoolSizeLimit)
424 : mPool(LockedPool(aPoolSizeLimit), "SurfacePoolCA::mPool") {}
426 SurfacePoolCA::~SurfacePoolCA() {}
428 RefPtr<SurfacePoolHandle> SurfacePoolCA::GetHandleForGL(GLContext* aGL) {
429 RefPtr<SurfacePoolCAWrapperForGL> wrapper;
430 uint64_t collectionGeneration = 0;
432 auto pool = mPool.Lock();
433 wrapper = pool->GetWrapperForGL(this, aGL);
434 collectionGeneration = pool->mCollectionGeneration;
437 // Run the SurfacePoolHandleCA constructor outside of the lock so that the
438 // mPool lock and the handle's lock are always ordered the same way.
439 return new SurfacePoolHandleCA(std::move(wrapper), collectionGeneration);
442 void SurfacePoolCA::DestroyGLResourcesForContext(GLContext* aGL) {
443 auto pool = mPool.Lock();
444 pool->DestroyGLResourcesForContext(aGL);
447 CFTypeRefPtr<IOSurfaceRef> SurfacePoolCA::ObtainSurfaceFromPool(
448 const IntSize& aSize, GLContext* aGL) {
449 auto pool = mPool.Lock();
450 return pool->ObtainSurfaceFromPool(aSize, aGL);
453 void SurfacePoolCA::ReturnSurfaceToPool(CFTypeRefPtr<IOSurfaceRef> aSurface) {
454 auto pool = mPool.Lock();
455 pool->ReturnSurfaceToPool(aSurface);
458 uint64_t SurfacePoolCA::CollectPendingSurfaces(uint64_t aCheckGenerationsUpTo) {
459 auto pool = mPool.Lock();
460 return pool->CollectPendingSurfaces(aCheckGenerationsUpTo);
462 void SurfacePoolCA::EnforcePoolSizeLimit() {
463 auto pool = mPool.Lock();
464 pool->EnforcePoolSizeLimit();
467 Maybe<GLuint> SurfacePoolCA::GetFramebufferForSurface(
468 CFTypeRefPtr<IOSurfaceRef> aSurface, GLContext* aGL,
469 bool aNeedsDepthBuffer) {
470 auto pool = mPool.Lock();
471 return pool->GetFramebufferForSurface(aSurface, aGL, aNeedsDepthBuffer);
474 void SurfacePoolCA::OnWrapperDestroyed(gl::GLContext* aGL,
475 SurfacePoolCAWrapperForGL* aWrapper) {
476 auto pool = mPool.Lock();
477 return pool->OnWrapperDestroyed(aGL, aWrapper);
480 } // namespace layers
481 } // namespace mozilla