1 /* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * This Source Code Form is subject to the terms of the Mozilla Public
3 * License, v. 2.0. If a copy of the MPL was not distributed with this
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 #include "mozilla/layers/SurfacePoolCA.h"
8 #import <CoreVideo/CVPixelBuffer.h>
9 #include <IOSurface/IOSurfaceTypes.h>
12 #include <unordered_set>
15 #include "mozilla/ProfilerLabels.h"
16 #include "mozilla/ProfilerMarkers.h"
17 #include "mozilla/StaticMutex.h"
18 #include "mozilla/StaticPrefs_gfx.h"
21 # include "GLContextCGL.h"
23 # include "GLContextEAGL.h"
26 #include "MozFramebuffer.h"
27 #include "ScopedGLHelpers.h"
38 using gl::GLContextCGL;
40 using gl::GLContextEAGL;
43 /* static */ RefPtr<SurfacePool> SurfacePool::Create(size_t aPoolSizeLimit) {
44 return new SurfacePoolCA(aPoolSizeLimit);
47 // SurfacePoolCA::LockedPool
49 SurfacePoolCA::LockedPool::LockedPool(size_t aPoolSizeLimit)
50 : mPoolSizeLimit(aPoolSizeLimit) {}
52 SurfacePoolCA::LockedPool::~LockedPool() {
55 "Any outstanding wrappers should have kept the surface pool alive");
56 MOZ_RELEASE_ASSERT(mInUseEntries.empty(),
57 "Leak! No more surfaces should be in use at this point.");
58 // Remove all entries in mPendingEntries and mAvailableEntries.
59 MutateEntryStorage("Clear", {}, [&]() {
60 mPendingEntries.Clear();
61 mAvailableEntries.Clear();
65 RefPtr<SurfacePoolCAWrapperForGL> SurfacePoolCA::LockedPool::GetWrapperForGL(
66 SurfacePoolCA* aPool, GLContext* aGL) {
67 auto& wrapper = mWrappers[aGL];
69 wrapper = new SurfacePoolCAWrapperForGL(aPool, aGL);
74 void SurfacePoolCA::LockedPool::DestroyGLResourcesForContext(GLContext* aGL) {
75 ForEachEntry([&](SurfacePoolEntry& entry) {
76 if (entry.mGLResources && entry.mGLResources->mGLContext == aGL) {
77 entry.mGLResources = Nothing();
80 mDepthBuffers.RemoveElementsBy(
81 [&](const DepthBufferEntry& entry) { return entry.mGLContext == aGL; });
85 void SurfacePoolCA::LockedPool::MutateEntryStorage(const char* aMutationType,
86 const gfx::IntSize& aSize,
88 [[maybe_unused]] size_t inUseCountBefore = mInUseEntries.size();
89 [[maybe_unused]] size_t pendingCountBefore = mPendingEntries.Length();
90 [[maybe_unused]] size_t availableCountBefore = mAvailableEntries.Length();
91 [[maybe_unused]] TimeStamp before = TimeStamp::Now();
95 if (profiler_thread_is_being_profiled_for_markers()) {
97 "SurfacePool", GRAPHICS, MarkerTiming::IntervalUntilNowFrom(before),
98 nsPrintfCString("%d -> %d in use | %d -> %d waiting for | %d -> %d "
99 "available | %s %dx%d | %dMB total memory",
100 int(inUseCountBefore), int(mInUseEntries.size()),
101 int(pendingCountBefore), int(mPendingEntries.Length()),
102 int(availableCountBefore),
103 int(mAvailableEntries.Length()), aMutationType,
104 aSize.width, aSize.height,
105 int(EstimateTotalMemory() / 1000 / 1000)));
109 template <typename F>
110 void SurfacePoolCA::LockedPool::ForEachEntry(F aFn) {
111 for (auto& iter : mInUseEntries) {
114 for (auto& entry : mPendingEntries) {
117 for (auto& entry : mAvailableEntries) {
122 uint64_t SurfacePoolCA::LockedPool::EstimateTotalMemory() {
123 std::unordered_set<const gl::DepthAndStencilBuffer*> depthAndStencilBuffers;
124 uint64_t memBytes = 0;
126 ForEachEntry([&](const SurfacePoolEntry& entry) {
127 auto size = entry.mSize;
128 memBytes += size.width * 4 * size.height;
129 if (entry.mGLResources) {
130 const auto& fb = *entry.mGLResources->mFramebuffer;
131 if (const auto& buffer = fb.GetDepthAndStencilBuffer()) {
132 depthAndStencilBuffers.insert(buffer.get());
137 for (const auto& buffer : depthAndStencilBuffers) {
138 memBytes += buffer->EstimateMemory();
144 bool SurfacePoolCA::LockedPool::CanRecycleSurfaceForRequest(
145 const SurfacePoolEntry& aEntry, const IntSize& aSize, GLContext* aGL) {
146 if (aEntry.mSize != aSize) {
149 if (aEntry.mGLResources) {
150 return aEntry.mGLResources->mGLContext == aGL;
155 CFTypeRefPtr<IOSurfaceRef> SurfacePoolCA::LockedPool::ObtainSurfaceFromPool(
156 const IntSize& aSize, GLContext* aGL) {
157 // Do a linear scan through mAvailableEntries to find an eligible surface,
158 // going from oldest to newest. The size of this array is limited, so the
159 // linear scan is fast.
161 std::find_if(mAvailableEntries.begin(), mAvailableEntries.end(),
162 [&](const SurfacePoolEntry& aEntry) {
163 return CanRecycleSurfaceForRequest(aEntry, aSize, aGL);
165 if (iterToRecycle != mAvailableEntries.end()) {
166 CFTypeRefPtr<IOSurfaceRef> surface = iterToRecycle->mIOSurface;
167 MOZ_RELEASE_ASSERT(surface.get(), "Available surfaces should be non-null.");
168 // Move the entry from mAvailableEntries to mInUseEntries.
169 MutateEntryStorage("Recycle", aSize, [&]() {
170 mInUseEntries.insert({surface, std::move(*iterToRecycle)});
171 mAvailableEntries.RemoveElementAt(iterToRecycle);
176 AUTO_PROFILER_LABEL_DYNAMIC_NSCSTRING(
177 "IOSurface creation", GRAPHICS_TileAllocation,
178 nsPrintfCString("%dx%d", aSize.width, aSize.height));
179 CFTypeRefPtr<IOSurfaceRef> surface =
180 CFTypeRefPtr<IOSurfaceRef>::WrapUnderCreateRule(
181 IOSurfaceCreate((__bridge CFDictionaryRef) @{
182 (__bridge NSString*)kIOSurfaceWidth : @(aSize.width),
183 (__bridge NSString*)kIOSurfaceHeight : @(aSize.height),
185 kIOSurfacePixelFormat : @(kCVPixelFormatType_32BGRA),
186 (__bridge NSString*)kIOSurfaceBytesPerElement : @(4),
189 if (StaticPrefs::gfx_color_management_native_srgb()) {
190 IOSurfaceSetValue(surface.get(), CFSTR("IOSurfaceColorSpace"),
193 // Create a new entry in mInUseEntries.
194 MutateEntryStorage("Create", aSize, [&]() {
195 mInUseEntries.insert({surface, SurfacePoolEntry{aSize, surface, {}}});
201 void SurfacePoolCA::LockedPool::ReturnSurfaceToPool(
202 CFTypeRefPtr<IOSurfaceRef> aSurface) {
203 auto inUseEntryIter = mInUseEntries.find(aSurface);
204 MOZ_RELEASE_ASSERT(inUseEntryIter != mInUseEntries.end());
205 if (IOSurfaceIsInUse(aSurface.get())) {
206 // Move the entry from mInUseEntries to mPendingEntries.
208 "Start waiting for", IntSize(inUseEntryIter->second.mSize), [&]() {
209 mPendingEntries.AppendElement(PendingSurfaceEntry{
210 std::move(inUseEntryIter->second), mCollectionGeneration, 0});
211 mInUseEntries.erase(inUseEntryIter);
214 // Move the entry from mInUseEntries to mAvailableEntries.
215 MOZ_RELEASE_ASSERT(inUseEntryIter->second.mIOSurface.get(),
216 "In use surfaces should be non-null.");
217 MutateEntryStorage("Retain", IntSize(inUseEntryIter->second.mSize), [&]() {
218 mAvailableEntries.AppendElement(std::move(inUseEntryIter->second));
219 mInUseEntries.erase(inUseEntryIter);
224 void SurfacePoolCA::LockedPool::EnforcePoolSizeLimit() {
225 // Enforce the pool size limit, removing least-recently-used entries as
227 while (mAvailableEntries.Length() > mPoolSizeLimit) {
228 MutateEntryStorage("Evict", IntSize(mAvailableEntries[0].mSize),
229 [&]() { mAvailableEntries.RemoveElementAt(0); });
233 uint64_t SurfacePoolCA::LockedPool::CollectPendingSurfaces(
234 uint64_t aCheckGenerationsUpTo) {
235 mCollectionGeneration++;
237 // Loop from back to front, potentially deleting items as we iterate.
238 // mPendingEntries is used as a set; the order of its items is not meaningful.
239 size_t i = mPendingEntries.Length();
242 auto& pendingSurf = mPendingEntries[i];
243 if (pendingSurf.mPreviousCheckGeneration > aCheckGenerationsUpTo) {
246 // Check if the window server is still using the surface. As long as it is
247 // doing that, we cannot move the surface to mAvailableSurfaces because
248 // anything we draw to it could reach the screen in a place where we don't
250 if (IOSurfaceIsInUse(pendingSurf.mEntry.mIOSurface.get())) {
251 // The surface is still in use. Update mPreviousCheckGeneration and
253 pendingSurf.mPreviousCheckGeneration = mCollectionGeneration;
254 pendingSurf.mCheckCount++;
255 if (pendingSurf.mCheckCount >= 30) {
256 // The window server has been holding on to this surface for an
257 // unreasonably long time. This is known to happen sometimes, for
258 // example in occluded windows or after a GPU switch. In that case,
259 // release our references to the surface so that it's Not Our Problem
260 // anymore. Remove the entry from mPendingEntries.
261 MutateEntryStorage("Eject", IntSize(pendingSurf.mEntry.mSize),
262 [&]() { mPendingEntries.RemoveElementAt(i); });
265 // The surface has become unused!
266 // Move the entry from mPendingEntries to mAvailableEntries.
267 MOZ_RELEASE_ASSERT(pendingSurf.mEntry.mIOSurface.get(),
268 "Pending surfaces should be non-null.");
270 "Stop waiting for", IntSize(pendingSurf.mEntry.mSize), [&]() {
271 mAvailableEntries.AppendElement(std::move(pendingSurf.mEntry));
272 mPendingEntries.RemoveElementAt(i);
276 return mCollectionGeneration;
279 void SurfacePoolCA::LockedPool::OnWrapperDestroyed(
280 gl::GLContext* aGL, SurfacePoolCAWrapperForGL* aWrapper) {
282 DestroyGLResourcesForContext(aGL);
285 auto iter = mWrappers.find(aGL);
286 MOZ_RELEASE_ASSERT(iter != mWrappers.end());
287 MOZ_RELEASE_ASSERT(iter->second == aWrapper,
288 "Only one SurfacePoolCAWrapperForGL object should "
289 "exist for each GLContext* at any time");
290 mWrappers.erase(iter);
293 Maybe<GLuint> SurfacePoolCA::LockedPool::GetFramebufferForSurface(
294 CFTypeRefPtr<IOSurfaceRef> aSurface, GLContext* aGL,
295 bool aNeedsDepthBuffer) {
296 MOZ_RELEASE_ASSERT(aGL);
298 auto inUseEntryIter = mInUseEntries.find(aSurface);
299 MOZ_RELEASE_ASSERT(inUseEntryIter != mInUseEntries.end());
301 SurfacePoolEntry& entry = inUseEntryIter->second;
302 if (entry.mGLResources) {
303 // We have an existing framebuffer.
304 MOZ_RELEASE_ASSERT(entry.mGLResources->mGLContext == aGL,
305 "Recycled surface that still had GL resources from a "
306 "different GL context. "
307 "This shouldn't happen.");
308 if (!aNeedsDepthBuffer || entry.mGLResources->mFramebuffer->HasDepth()) {
309 return Some(entry.mGLResources->mFramebuffer->mFB);
313 // No usable existing framebuffer, we need to create one.
315 AUTO_PROFILER_LABEL_DYNAMIC_NSCSTRING(
316 "Framebuffer creation", GRAPHICS_TileAllocation,
317 nsPrintfCString("%dx%d", entry.mSize.width, entry.mSize.height));
320 RefPtr<GLContextCGL> cgl = GLContextCGL::Cast(aGL);
321 MOZ_RELEASE_ASSERT(cgl, "Unexpected GLContext type");
323 RefPtr<GLContextEAGL> eagl = GLContextEAGL::Cast(aGL);
324 MOZ_RELEASE_ASSERT(eagl, "Unexpected GLContext type");
327 if (!aGL->MakeCurrent()) {
328 // Context may have been destroyed.
332 GLuint tex = aGL->CreateTexture();
334 const gl::ScopedBindTexture bindTex(aGL, tex,
335 LOCAL_GL_TEXTURE_RECTANGLE_ARB);
337 CGLTexImageIOSurface2D(cgl->GetCGLContext(), LOCAL_GL_TEXTURE_RECTANGLE_ARB,
338 LOCAL_GL_RGBA, entry.mSize.width, entry.mSize.height,
339 LOCAL_GL_BGRA, LOCAL_GL_UNSIGNED_INT_8_8_8_8_REV,
340 entry.mIOSurface.get(), 0);
342 MOZ_CRASH("unimplemented");
347 CreateFramebufferForTexture(aGL, entry.mSize, tex, aNeedsDepthBuffer);
349 // Framebuffer completeness check may have failed.
353 GLuint fbo = fb->mFB;
354 entry.mGLResources = Some(GLResourcesForSurface{aGL, std::move(fb)});
358 RefPtr<gl::DepthAndStencilBuffer>
359 SurfacePoolCA::LockedPool::GetDepthBufferForSharing(GLContext* aGL,
360 const IntSize& aSize) {
361 // Clean out entries for which the weak pointer has become null.
362 mDepthBuffers.RemoveElementsBy(
363 [&](const DepthBufferEntry& entry) { return !entry.mBuffer; });
365 for (const auto& entry : mDepthBuffers) {
366 if (entry.mGLContext == aGL && entry.mSize == aSize) {
367 return entry.mBuffer.get();
373 UniquePtr<gl::MozFramebuffer>
374 SurfacePoolCA::LockedPool::CreateFramebufferForTexture(GLContext* aGL,
375 const IntSize& aSize,
377 bool aNeedsDepthBuffer) {
378 if (aNeedsDepthBuffer) {
379 // Try to find an existing depth buffer of aSize in aGL and create a
380 // framebuffer that shares it.
381 if (auto buffer = GetDepthBufferForSharing(aGL, aSize)) {
382 return gl::MozFramebuffer::CreateForBackingWithSharedDepthAndStencil(
383 aSize, 0, LOCAL_GL_TEXTURE_RECTANGLE_ARB, aTexture, buffer);
387 // No depth buffer needed or we didn't find one. Create a framebuffer with a
388 // new depth buffer and store a weak pointer to the new depth buffer in
390 UniquePtr<gl::MozFramebuffer> fb = gl::MozFramebuffer::CreateForBacking(
391 aGL, aSize, 0, aNeedsDepthBuffer, LOCAL_GL_TEXTURE_RECTANGLE_ARB,
393 if (fb && fb->GetDepthAndStencilBuffer()) {
394 mDepthBuffers.AppendElement(
395 DepthBufferEntry{aGL, aSize, fb->GetDepthAndStencilBuffer().get()});
401 // SurfacePoolHandleCA
403 SurfacePoolHandleCA::SurfacePoolHandleCA(
404 RefPtr<SurfacePoolCAWrapperForGL>&& aPoolWrapper,
405 uint64_t aCurrentCollectionGeneration)
406 : mPoolWrapper(aPoolWrapper),
407 mPreviousFrameCollectionGeneration(
408 "SurfacePoolHandleCA::mPreviousFrameCollectionGeneration") {
409 auto generation = mPreviousFrameCollectionGeneration.Lock();
410 *generation = aCurrentCollectionGeneration;
413 SurfacePoolHandleCA::~SurfacePoolHandleCA() {}
415 void SurfacePoolHandleCA::OnBeginFrame() {
416 auto generation = mPreviousFrameCollectionGeneration.Lock();
417 *generation = mPoolWrapper->mPool->CollectPendingSurfaces(*generation);
420 void SurfacePoolHandleCA::OnEndFrame() {
421 mPoolWrapper->mPool->EnforcePoolSizeLimit();
424 CFTypeRefPtr<IOSurfaceRef> SurfacePoolHandleCA::ObtainSurfaceFromPool(
425 const IntSize& aSize) {
426 return mPoolWrapper->mPool->ObtainSurfaceFromPool(aSize, mPoolWrapper->mGL);
429 void SurfacePoolHandleCA::ReturnSurfaceToPool(
430 CFTypeRefPtr<IOSurfaceRef> aSurface) {
431 mPoolWrapper->mPool->ReturnSurfaceToPool(aSurface);
434 Maybe<GLuint> SurfacePoolHandleCA::GetFramebufferForSurface(
435 CFTypeRefPtr<IOSurfaceRef> aSurface, bool aNeedsDepthBuffer) {
436 return mPoolWrapper->mPool->GetFramebufferForSurface(
437 aSurface, mPoolWrapper->mGL, aNeedsDepthBuffer);
442 SurfacePoolCA::SurfacePoolCA(size_t aPoolSizeLimit)
443 : mPool(LockedPool(aPoolSizeLimit), "SurfacePoolCA::mPool") {}
445 SurfacePoolCA::~SurfacePoolCA() {}
447 RefPtr<SurfacePoolHandle> SurfacePoolCA::GetHandleForGL(GLContext* aGL) {
448 RefPtr<SurfacePoolCAWrapperForGL> wrapper;
449 uint64_t collectionGeneration = 0;
451 auto pool = mPool.Lock();
452 wrapper = pool->GetWrapperForGL(this, aGL);
453 collectionGeneration = pool->mCollectionGeneration;
456 // Run the SurfacePoolHandleCA constructor outside of the lock so that the
457 // mPool lock and the handle's lock are always ordered the same way.
458 return new SurfacePoolHandleCA(std::move(wrapper), collectionGeneration);
461 void SurfacePoolCA::DestroyGLResourcesForContext(GLContext* aGL) {
462 auto pool = mPool.Lock();
463 pool->DestroyGLResourcesForContext(aGL);
466 CFTypeRefPtr<IOSurfaceRef> SurfacePoolCA::ObtainSurfaceFromPool(
467 const IntSize& aSize, GLContext* aGL) {
468 auto pool = mPool.Lock();
469 return pool->ObtainSurfaceFromPool(aSize, aGL);
472 void SurfacePoolCA::ReturnSurfaceToPool(CFTypeRefPtr<IOSurfaceRef> aSurface) {
473 auto pool = mPool.Lock();
474 pool->ReturnSurfaceToPool(aSurface);
477 uint64_t SurfacePoolCA::CollectPendingSurfaces(uint64_t aCheckGenerationsUpTo) {
478 auto pool = mPool.Lock();
479 return pool->CollectPendingSurfaces(aCheckGenerationsUpTo);
481 void SurfacePoolCA::EnforcePoolSizeLimit() {
482 auto pool = mPool.Lock();
483 pool->EnforcePoolSizeLimit();
486 Maybe<GLuint> SurfacePoolCA::GetFramebufferForSurface(
487 CFTypeRefPtr<IOSurfaceRef> aSurface, GLContext* aGL,
488 bool aNeedsDepthBuffer) {
489 auto pool = mPool.Lock();
490 return pool->GetFramebufferForSurface(aSurface, aGL, aNeedsDepthBuffer);
493 void SurfacePoolCA::OnWrapperDestroyed(gl::GLContext* aGL,
494 SurfacePoolCAWrapperForGL* aWrapper) {
495 auto pool = mPool.Lock();
496 return pool->OnWrapperDestroyed(aGL, aWrapper);
499 } // namespace layers
500 } // namespace mozilla