1 /* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * This Source Code Form is subject to the terms of the Mozilla Public
3 * License, v. 2.0. If a copy of the MPL was not distributed with this
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 #include "mozilla/layers/NativeLayerCA.h"
8 #import <AppKit/NSAnimationContext.h>
9 #import <AppKit/NSColor.h>
10 #import <AVFoundation/AVFoundation.h>
12 #import <QuartzCore/QuartzCore.h>
21 #include "GLBlitHelper.h"
22 #include "GLContextCGL.h"
23 #include "GLContextProvider.h"
24 #include "MozFramebuffer.h"
25 #include "mozilla/gfx/Swizzle.h"
26 #include "mozilla/layers/ScreenshotGrabber.h"
27 #include "mozilla/layers/SurfacePoolCA.h"
28 #include "mozilla/StaticPrefs_gfx.h"
29 #include "mozilla/Telemetry.h"
30 #include "mozilla/webrender/RenderMacIOSurfaceTextureHost.h"
31 #include "ScopedGLHelpers.h"
33 @interface CALayer (PrivateSetContentsOpaque)
34 - (void)setContentsOpaque:(BOOL)opaque;
40 using gfx::DataSourceSurface;
46 using gfx::SurfaceFormat;
48 using gl::GLContextCGL;
50 static Maybe<Telemetry::LABELS_GFX_MACOS_VIDEO_LOW_POWER>
51 VideoLowPowerTypeToTelemetryType(VideoLowPowerType aVideoLowPower) {
52 switch (aVideoLowPower) {
53 case VideoLowPowerType::LowPower:
54 return Some(Telemetry::LABELS_GFX_MACOS_VIDEO_LOW_POWER::LowPower);
56 case VideoLowPowerType::FailMultipleVideo:
58 Telemetry::LABELS_GFX_MACOS_VIDEO_LOW_POWER::FailMultipleVideo);
60 case VideoLowPowerType::FailWindowed:
61 return Some(Telemetry::LABELS_GFX_MACOS_VIDEO_LOW_POWER::FailWindowed);
63 case VideoLowPowerType::FailOverlaid:
64 return Some(Telemetry::LABELS_GFX_MACOS_VIDEO_LOW_POWER::FailOverlaid);
66 case VideoLowPowerType::FailBacking:
67 return Some(Telemetry::LABELS_GFX_MACOS_VIDEO_LOW_POWER::FailBacking);
69 case VideoLowPowerType::FailMacOSVersion:
71 Telemetry::LABELS_GFX_MACOS_VIDEO_LOW_POWER::FailMacOSVersion);
73 case VideoLowPowerType::FailPref:
74 return Some(Telemetry::LABELS_GFX_MACOS_VIDEO_LOW_POWER::FailPref);
76 case VideoLowPowerType::FailSurface:
77 return Some(Telemetry::LABELS_GFX_MACOS_VIDEO_LOW_POWER::FailSurface);
79 case VideoLowPowerType::FailEnqueue:
80 return Some(Telemetry::LABELS_GFX_MACOS_VIDEO_LOW_POWER::FailEnqueue);
87 static void EmitTelemetryForVideoLowPower(VideoLowPowerType aVideoLowPower) {
88 auto telemetryValue = VideoLowPowerTypeToTelemetryType(aVideoLowPower);
89 if (telemetryValue.isSome()) {
90 Telemetry::AccumulateCategorical(telemetryValue.value());
94 // Utility classes for NativeLayerRootSnapshotter (NLRS) profiler screenshots.
96 class RenderSourceNLRS : public profiler_screenshots::RenderSource {
98 explicit RenderSourceNLRS(UniquePtr<gl::MozFramebuffer>&& aFramebuffer)
99 : RenderSource(aFramebuffer->mSize),
100 mFramebuffer(std::move(aFramebuffer)) {}
101 auto& FB() { return *mFramebuffer; }
104 UniquePtr<gl::MozFramebuffer> mFramebuffer;
107 class DownscaleTargetNLRS : public profiler_screenshots::DownscaleTarget {
109 DownscaleTargetNLRS(gl::GLContext* aGL,
110 UniquePtr<gl::MozFramebuffer>&& aFramebuffer)
111 : profiler_screenshots::DownscaleTarget(aFramebuffer->mSize),
113 mRenderSource(new RenderSourceNLRS(std::move(aFramebuffer))) {}
114 already_AddRefed<profiler_screenshots::RenderSource> AsRenderSource()
116 return do_AddRef(mRenderSource);
118 bool DownscaleFrom(profiler_screenshots::RenderSource* aSource,
119 const IntRect& aSourceRect,
120 const IntRect& aDestRect) override;
123 RefPtr<gl::GLContext> mGL;
124 RefPtr<RenderSourceNLRS> mRenderSource;
127 class AsyncReadbackBufferNLRS
128 : public profiler_screenshots::AsyncReadbackBuffer {
130 AsyncReadbackBufferNLRS(gl::GLContext* aGL, const IntSize& aSize,
131 GLuint aBufferHandle)
132 : profiler_screenshots::AsyncReadbackBuffer(aSize),
134 mBufferHandle(aBufferHandle) {}
135 void CopyFrom(profiler_screenshots::RenderSource* aSource) override;
136 bool MapAndCopyInto(DataSourceSurface* aSurface,
137 const IntSize& aReadSize) override;
140 virtual ~AsyncReadbackBufferNLRS();
141 RefPtr<gl::GLContext> mGL;
142 GLuint mBufferHandle = 0;
145 // Needs to be on the stack whenever CALayer mutations are performed.
146 // (Mutating CALayers outside of a transaction can result in permanently stuck
147 // rendering, because such mutations create an implicit transaction which never
148 // auto-commits if the current thread does not have a native runloop.) Uses
149 // NSAnimationContext, which wraps CATransaction with additional off-main-thread
150 // protection, see bug 1585523.
151 struct MOZ_STACK_CLASS AutoCATransaction final {
152 AutoCATransaction() {
153 [NSAnimationContext beginGrouping];
154 // By default, mutating a CALayer property triggers an animation which
155 // smoothly transitions the property to the new value. We don't need these
156 // animations, and this call turns them off:
157 [CATransaction setDisableActions:YES];
159 ~AutoCATransaction() { [NSAnimationContext endGrouping]; }
162 /* static */ already_AddRefed<NativeLayerRootCA>
163 NativeLayerRootCA::CreateForCALayer(CALayer* aLayer) {
164 RefPtr<NativeLayerRootCA> layerRoot = new NativeLayerRootCA(aLayer);
165 return layerRoot.forget();
168 // Returns an autoreleased CALayer* object.
169 static CALayer* MakeOffscreenRootCALayer() {
170 // This layer should behave similarly to the backing layer of a flipped
171 // NSView. It will never be rendered on the screen and it will never be
172 // attached to an NSView's layer; instead, it will be the root layer of a
173 // "local" CAContext. Setting geometryFlipped to YES causes the orientation of
174 // descendant CALayers' contents (such as IOSurfaces) to be consistent with
175 // what happens in a layer subtree that is attached to a flipped NSView.
176 // Setting it to NO would cause the surfaces in individual leaf layers to
177 // render upside down (rather than just flipping the entire layer tree upside
179 AutoCATransaction transaction;
180 CALayer* layer = [CALayer layer];
181 layer.position = NSZeroPoint;
182 layer.bounds = NSZeroRect;
183 layer.anchorPoint = NSZeroPoint;
184 layer.contentsGravity = kCAGravityTopLeft;
185 layer.masksToBounds = YES;
186 layer.geometryFlipped = YES;
190 NativeLayerRootCA::NativeLayerRootCA(CALayer* aLayer)
191 : mMutex("NativeLayerRootCA"),
192 mOnscreenRepresentation(aLayer),
193 mOffscreenRepresentation(MakeOffscreenRootCALayer()) {}
195 NativeLayerRootCA::~NativeLayerRootCA() {
197 mSublayers.IsEmpty(),
198 "Please clear all layers before destroying the layer root.");
201 already_AddRefed<NativeLayer> NativeLayerRootCA::CreateLayer(
202 const IntSize& aSize, bool aIsOpaque,
203 SurfacePoolHandle* aSurfacePoolHandle) {
204 RefPtr<NativeLayer> layer = new NativeLayerCA(
205 aSize, aIsOpaque, aSurfacePoolHandle->AsSurfacePoolHandleCA());
206 return layer.forget();
209 already_AddRefed<NativeLayer> NativeLayerRootCA::CreateLayerForExternalTexture(
211 RefPtr<NativeLayer> layer = new NativeLayerCA(aIsOpaque);
212 return layer.forget();
215 already_AddRefed<NativeLayer> NativeLayerRootCA::CreateLayerForColor(
216 gfx::DeviceColor aColor) {
217 RefPtr<NativeLayer> layer = new NativeLayerCA(aColor);
218 return layer.forget();
221 void NativeLayerRootCA::AppendLayer(NativeLayer* aLayer) {
222 MutexAutoLock lock(mMutex);
224 RefPtr<NativeLayerCA> layerCA = aLayer->AsNativeLayerCA();
225 MOZ_RELEASE_ASSERT(layerCA);
227 mSublayers.AppendElement(layerCA);
228 layerCA->SetBackingScale(mBackingScale);
229 layerCA->SetRootWindowIsFullscreen(mWindowIsFullscreen);
230 ForAllRepresentations(
231 [&](Representation& r) { r.mMutatedLayerStructure = true; });
234 void NativeLayerRootCA::RemoveLayer(NativeLayer* aLayer) {
235 MutexAutoLock lock(mMutex);
237 RefPtr<NativeLayerCA> layerCA = aLayer->AsNativeLayerCA();
238 MOZ_RELEASE_ASSERT(layerCA);
240 mSublayers.RemoveElement(layerCA);
241 ForAllRepresentations(
242 [&](Representation& r) { r.mMutatedLayerStructure = true; });
245 void NativeLayerRootCA::SetLayers(
246 const nsTArray<RefPtr<NativeLayer>>& aLayers) {
247 MutexAutoLock lock(mMutex);
249 // Ideally, we'd just be able to do mSublayers = std::move(aLayers).
250 // However, aLayers has a different type: it carries NativeLayer objects,
251 // whereas mSublayers carries NativeLayerCA objects, so we have to downcast
252 // all the elements first. There's one other reason to look at all the
253 // elements in aLayers first: We need to make sure any new layers know about
254 // our current backing scale.
256 nsTArray<RefPtr<NativeLayerCA>> layersCA(aLayers.Length());
257 for (auto& layer : aLayers) {
258 RefPtr<NativeLayerCA> layerCA = layer->AsNativeLayerCA();
259 MOZ_RELEASE_ASSERT(layerCA);
260 layerCA->SetBackingScale(mBackingScale);
261 layerCA->SetRootWindowIsFullscreen(mWindowIsFullscreen);
262 layersCA.AppendElement(std::move(layerCA));
265 if (layersCA != mSublayers) {
266 mSublayers = std::move(layersCA);
267 ForAllRepresentations(
268 [&](Representation& r) { r.mMutatedLayerStructure = true; });
272 void NativeLayerRootCA::SetBackingScale(float aBackingScale) {
273 MutexAutoLock lock(mMutex);
275 mBackingScale = aBackingScale;
276 for (auto layer : mSublayers) {
277 layer->SetBackingScale(aBackingScale);
281 float NativeLayerRootCA::BackingScale() {
282 MutexAutoLock lock(mMutex);
283 return mBackingScale;
286 void NativeLayerRootCA::SuspendOffMainThreadCommits() {
287 MutexAutoLock lock(mMutex);
288 mOffMainThreadCommitsSuspended = true;
291 bool NativeLayerRootCA::UnsuspendOffMainThreadCommits() {
292 MutexAutoLock lock(mMutex);
293 mOffMainThreadCommitsSuspended = false;
294 return mCommitPending;
297 bool NativeLayerRootCA::AreOffMainThreadCommitsSuspended() {
298 MutexAutoLock lock(mMutex);
299 return mOffMainThreadCommitsSuspended;
302 bool NativeLayerRootCA::CommitToScreen() {
304 MutexAutoLock lock(mMutex);
306 if (!NS_IsMainThread() && mOffMainThreadCommitsSuspended) {
307 mCommitPending = true;
311 mOnscreenRepresentation.Commit(WhichRepresentation::ONSCREEN, mSublayers,
312 mWindowIsFullscreen);
314 mCommitPending = false;
317 if (StaticPrefs::gfx_webrender_debug_dump_native_layer_tree_to_file()) {
318 static uint32_t sFrameID = 0;
319 uint32_t frameID = sFrameID++;
322 [NSString stringWithFormat:@"%@/Desktop/nativelayerdumps-%d",
323 NSHomeDirectory(), getpid()];
324 if ([NSFileManager.defaultManager createDirectoryAtPath:dirPath
325 withIntermediateDirectories:YES
329 [NSString stringWithFormat:@"frame-%d.html", frameID];
330 NSString* filePath = [dirPath stringByAppendingPathComponent:filename];
331 DumpLayerTreeToFile([filePath UTF8String]);
333 NSLog(@"Failed to create directory %@", dirPath);
337 // Decide if we are going to emit telemetry about video low power on this
339 static const int32_t TELEMETRY_COMMIT_PERIOD =
340 StaticPrefs::gfx_core_animation_low_power_telemetry_frames_AtStartup();
341 mTelemetryCommitCount = (mTelemetryCommitCount + 1) % TELEMETRY_COMMIT_PERIOD;
342 if (mTelemetryCommitCount == 0) {
343 // Figure out if we are hitting video low power mode.
344 VideoLowPowerType videoLowPower = CheckVideoLowPower();
345 EmitTelemetryForVideoLowPower(videoLowPower);
351 UniquePtr<NativeLayerRootSnapshotter> NativeLayerRootCA::CreateSnapshotter() {
352 MutexAutoLock lock(mMutex);
353 MOZ_RELEASE_ASSERT(!mWeakSnapshotter,
354 "No NativeLayerRootSnapshotter for this NativeLayerRoot "
355 "should exist when this is called");
357 auto cr = NativeLayerRootSnapshotterCA::Create(
358 this, mOffscreenRepresentation.mRootCALayer);
360 mWeakSnapshotter = cr.get();
365 void NativeLayerRootCA::OnNativeLayerRootSnapshotterDestroyed(
366 NativeLayerRootSnapshotterCA* aNativeLayerRootSnapshotter) {
367 MutexAutoLock lock(mMutex);
368 MOZ_RELEASE_ASSERT(mWeakSnapshotter == aNativeLayerRootSnapshotter);
369 mWeakSnapshotter = nullptr;
372 void NativeLayerRootCA::CommitOffscreen() {
373 MutexAutoLock lock(mMutex);
374 mOffscreenRepresentation.Commit(WhichRepresentation::OFFSCREEN, mSublayers,
375 mWindowIsFullscreen);
378 template <typename F>
379 void NativeLayerRootCA::ForAllRepresentations(F aFn) {
380 aFn(mOnscreenRepresentation);
381 aFn(mOffscreenRepresentation);
384 NativeLayerRootCA::Representation::Representation(CALayer* aRootCALayer)
385 : mRootCALayer([aRootCALayer retain]) {}
387 NativeLayerRootCA::Representation::~Representation() {
388 if (mMutatedLayerStructure) {
389 // Clear the root layer's sublayers. At this point the window is usually
390 // closed, so this transaction does not cause any screen updates.
391 AutoCATransaction transaction;
392 mRootCALayer.sublayers = @[];
395 [mRootCALayer release];
398 void NativeLayerRootCA::Representation::Commit(
399 WhichRepresentation aRepresentation,
400 const nsTArray<RefPtr<NativeLayerCA>>& aSublayers,
401 bool aWindowIsFullscreen) {
402 bool mustRebuild = mMutatedLayerStructure;
404 // Check which type of update we need to do, if any.
405 NativeLayerCA::UpdateType updateRequired = NativeLayerCA::UpdateType::None;
407 for (auto layer : aSublayers) {
408 // Use the ordering of our UpdateType enums to build a maximal update
411 std::max(updateRequired, layer->HasUpdate(aRepresentation));
412 if (updateRequired == NativeLayerCA::UpdateType::All) {
417 if (updateRequired == NativeLayerCA::UpdateType::None) {
418 // Nothing more needed, so early exit.
422 if (updateRequired == NativeLayerCA::UpdateType::OnlyVideo) {
423 bool allUpdatesSucceeded = std::all_of(
424 aSublayers.begin(), aSublayers.end(),
425 [=](const RefPtr<NativeLayerCA>& layer) {
426 return layer->ApplyChanges(aRepresentation,
427 NativeLayerCA::UpdateType::OnlyVideo);
430 if (allUpdatesSucceeded) {
431 // Nothing more needed, so early exit;
437 // We're going to do a full update now, which requires a transaction. Update
438 // all of the sublayers. Afterwards, only continue processing the sublayers
439 // which have an extent.
440 AutoCATransaction transaction;
441 nsTArray<NativeLayerCA*> sublayersWithExtent;
442 for (auto layer : aSublayers) {
443 mustRebuild |= layer->WillUpdateAffectLayers(aRepresentation);
444 layer->ApplyChanges(aRepresentation, NativeLayerCA::UpdateType::All);
445 CALayer* caLayer = layer->UnderlyingCALayer(aRepresentation);
446 if (!caLayer.masksToBounds || !NSIsEmptyRect(caLayer.bounds)) {
447 // This layer has an extent. If it didn't before, we need to rebuild.
448 mustRebuild |= !layer->HasExtent();
449 layer->SetHasExtent(true);
450 sublayersWithExtent.AppendElement(layer);
452 // This layer has no extent. If it did before, we need to rebuild.
453 mustRebuild |= layer->HasExtent();
454 layer->SetHasExtent(false);
457 // One other reason we may need to rebuild is if the caLayer is not part of
458 // the root layer's sublayers. This might happen if the caLayer was rebuilt.
459 // We construct this check in a way that maximizes the boolean
460 // short-circuit, because we don't want to call containsObject unless
461 // absolutely necessary.
463 mustRebuild || ![mRootCALayer.sublayers containsObject:caLayer];
467 uint32_t sublayersCount = sublayersWithExtent.Length();
468 NSMutableArray<CALayer*>* sublayers =
469 [NSMutableArray arrayWithCapacity:sublayersCount];
470 for (auto layer : sublayersWithExtent) {
471 [sublayers addObject:layer->UnderlyingCALayer(aRepresentation)];
473 mRootCALayer.sublayers = sublayers;
476 mMutatedLayerStructure = false;
479 /* static */ UniquePtr<NativeLayerRootSnapshotterCA>
480 NativeLayerRootSnapshotterCA::Create(NativeLayerRootCA* aLayerRoot,
481 CALayer* aRootCALayer) {
482 if (NS_IsMainThread()) {
483 // Disallow creating snapshotters on the main thread.
484 // On the main thread, any explicit CATransaction / NSAnimationContext is
485 // nested within a global implicit transaction. This makes it impossible to
486 // apply CALayer mutations synchronously such that they become visible to
487 // CARenderer. As a result, the snapshotter would not capture the right
488 // output on the main thread.
492 nsCString failureUnused;
493 RefPtr<gl::GLContext> gl = gl::GLContextProvider::CreateHeadless(
494 {gl::CreateContextFlags::ALLOW_OFFLINE_RENDERER |
495 gl::CreateContextFlags::REQUIRE_COMPAT_PROFILE},
501 return UniquePtr<NativeLayerRootSnapshotterCA>(
502 new NativeLayerRootSnapshotterCA(aLayerRoot, std::move(gl),
506 void NativeLayerRootCA::DumpLayerTreeToFile(const char* aPath) {
507 MutexAutoLock lock(mMutex);
508 NSLog(@"Dumping NativeLayer contents to %s", aPath);
509 std::ofstream fileOutput(aPath);
510 if (fileOutput.fail()) {
511 NSLog(@"Opening %s for writing failed.", aPath);
514 // Make sure floating point values use a period for the decimal separator.
515 fileOutput.imbue(std::locale("C"));
517 fileOutput << "<html>\n";
518 for (const auto& layer : mSublayers) {
519 layer->DumpLayer(fileOutput);
521 fileOutput << "</html>\n";
525 void NativeLayerRootCA::SetWindowIsFullscreen(bool aFullscreen) {
526 MutexAutoLock lock(mMutex);
528 if (mWindowIsFullscreen != aFullscreen) {
529 mWindowIsFullscreen = aFullscreen;
531 for (auto layer : mSublayers) {
532 layer->SetRootWindowIsFullscreen(mWindowIsFullscreen);
537 /* static */ bool IsCGColorOpaqueBlack(CGColorRef aColor) {
538 if (CGColorEqualToColor(aColor, CGColorGetConstantColor(kCGColorBlack))) {
541 size_t componentCount = CGColorGetNumberOfComponents(aColor);
542 if (componentCount == 0) {
543 // This will happen if aColor is kCGColorClear. It's not opaque black.
547 const CGFloat* components = CGColorGetComponents(aColor);
548 for (size_t c = 0; c < componentCount - 1; ++c) {
549 if (components[c] > 0.0f) {
553 return components[componentCount - 1] >= 1.0f;
556 VideoLowPowerType NativeLayerRootCA::CheckVideoLowPower() {
557 // This deteremines whether the current layer contents qualify for the
558 // macOS Core Animation video low power mode. Those requirements are
560 // https://developer.apple.com/documentation/webkit/delivering_video_content_for_safari
561 // and we verify them by checking:
562 // 1) There must be exactly one video showing.
563 // 2) The topmost CALayer must be a AVSampleBufferDisplayLayer.
564 // 3) The video layer must be showing a buffer encoded in one of the
565 // kCVPixelFormatType_420YpCbCr pixel formats.
566 // 4) The layer below that must cover the entire screen and have a black
568 // 5) The window must be fullscreen.
569 // This function checks these requirements empirically. If one of the checks
570 // fail, we either return immediately or do additional processing to
571 // determine more detail.
573 uint32_t videoLayerCount = 0;
574 NativeLayerCA* topLayer = nullptr;
575 CALayer* topCALayer = nil;
576 CALayer* secondCALayer = nil;
577 bool topLayerIsVideo = false;
579 for (auto layer : mSublayers) {
580 // Only layers with extent are contributing to our sublayers.
581 if (layer->HasExtent()) {
584 secondCALayer = topCALayer;
585 topCALayer = topLayer->UnderlyingCALayer(WhichRepresentation::ONSCREEN);
586 topLayerIsVideo = topLayer->IsVideo();
587 if (topLayerIsVideo) {
593 if (videoLayerCount == 0) {
594 return VideoLowPowerType::NotVideo;
597 // Most importantly, check if the window is fullscreen. If the user is
598 // watching video in a window, then all of the other enums are irrelevant to
599 // achieving the low power mode.
600 if (!mWindowIsFullscreen) {
601 return VideoLowPowerType::FailWindowed;
604 if (videoLayerCount > 1) {
605 return VideoLowPowerType::FailMultipleVideo;
608 if (!topLayerIsVideo) {
609 return VideoLowPowerType::FailOverlaid;
612 if (!secondCALayer || !IsCGColorOpaqueBlack(secondCALayer.backgroundColor) ||
613 !CGRectContainsRect(secondCALayer.frame,
614 secondCALayer.superlayer.bounds)) {
615 return VideoLowPowerType::FailBacking;
618 CALayer* topContentCALayer = topCALayer.sublayers[0];
619 if (![topContentCALayer isKindOfClass:[AVSampleBufferDisplayLayer class]]) {
620 // We didn't create a AVSampleBufferDisplayLayer for the top video layer.
621 // Try to figure out why by following some of the logic in
622 // NativeLayerCA::ShouldSpecializeVideo.
624 if (!StaticPrefs::gfx_core_animation_specialize_video()) {
625 return VideoLowPowerType::FailPref;
628 // The only remaining reason is that the surface wasn't eligible. We
629 // assert this instead of if-ing it, to ensure that we always have a
630 // return value from this clause.
632 MOZ_ASSERT(topLayer->mTextureHost);
633 MacIOSurface* macIOSurface = topLayer->mTextureHost->GetSurface();
634 CFTypeRefPtr<IOSurfaceRef> surface = macIOSurface->GetIOSurfaceRef();
635 OSType pixelFormat = IOSurfaceGetPixelFormat(surface.get());
637 !(pixelFormat == kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange ||
638 pixelFormat == kCVPixelFormatType_420YpCbCr8BiPlanarFullRange ||
639 pixelFormat == kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange ||
640 pixelFormat == kCVPixelFormatType_420YpCbCr10BiPlanarFullRange));
642 return VideoLowPowerType::FailSurface;
645 AVSampleBufferDisplayLayer* topVideoLayer =
646 (AVSampleBufferDisplayLayer*)topContentCALayer;
647 if (topVideoLayer.status != AVQueuedSampleBufferRenderingStatusRendering) {
648 return VideoLowPowerType::FailEnqueue;
651 // As best we can tell, we're eligible for video low power mode. Hurrah!
652 return VideoLowPowerType::LowPower;
655 NativeLayerRootSnapshotterCA::NativeLayerRootSnapshotterCA(
656 NativeLayerRootCA* aLayerRoot, RefPtr<GLContext>&& aGL,
657 CALayer* aRootCALayer)
658 : mLayerRoot(aLayerRoot), mGL(aGL) {
659 AutoCATransaction transaction;
660 mRenderer = [[CARenderer
661 rendererWithCGLContext:gl::GLContextCGL::Cast(mGL)->GetCGLContext()
662 options:nil] retain];
663 mRenderer.layer = aRootCALayer;
666 NativeLayerRootSnapshotterCA::~NativeLayerRootSnapshotterCA() {
667 mLayerRoot->OnNativeLayerRootSnapshotterDestroyed(this);
671 already_AddRefed<profiler_screenshots::RenderSource>
672 NativeLayerRootSnapshotterCA::GetWindowContents(const IntSize& aWindowSize) {
673 UpdateSnapshot(aWindowSize);
674 return do_AddRef(mSnapshot);
677 void NativeLayerRootSnapshotterCA::UpdateSnapshot(const IntSize& aSize) {
678 CGRect bounds = CGRectMake(0, 0, aSize.width, aSize.height);
681 // Set the correct bounds and scale on the renderer and its root layer.
682 // CARenderer always renders at unit scale, i.e. the coordinates on the root
683 // layer must map 1:1 to render target pixels. But the coordinates on our
684 // content layers are in "points", where 1 point maps to 2 device pixels on
685 // HiDPI. So in order to render at the full device pixel resolution, we set
686 // a scale transform on the root offscreen layer.
687 AutoCATransaction transaction;
688 mRenderer.layer.bounds = bounds;
689 float scale = mLayerRoot->BackingScale();
690 mRenderer.layer.sublayerTransform = CATransform3DMakeScale(scale, scale, 1);
691 mRenderer.bounds = bounds;
694 mLayerRoot->CommitOffscreen();
698 bool needToRedrawEverything = false;
699 if (!mSnapshot || mSnapshot->Size() != aSize) {
701 auto fb = gl::MozFramebuffer::Create(mGL, aSize, 0, false);
705 mSnapshot = new RenderSourceNLRS(std::move(fb));
706 needToRedrawEverything = true;
709 const gl::ScopedBindFramebuffer bindFB(mGL, mSnapshot->FB().mFB);
710 mGL->fViewport(0.0, 0.0, aSize.width, aSize.height);
712 // These legacy OpenGL function calls are part of CARenderer's API contract,
713 // see CARenderer.h. The size passed to glOrtho must be the device pixel size
714 // of the render target, otherwise CARenderer will produce incorrect results.
715 glMatrixMode(GL_PROJECTION);
717 glOrtho(0.0, aSize.width, 0.0, aSize.height, -1, 1);
719 float mediaTime = CACurrentMediaTime();
720 [mRenderer beginFrameAtTime:mediaTime timeStamp:nullptr];
721 if (needToRedrawEverything) {
722 [mRenderer addUpdateRect:bounds];
724 if (!CGRectIsEmpty([mRenderer updateBounds])) {
725 // CARenderer assumes the layer tree is opaque. It only ever paints over
726 // existing content, it never erases anything. However, our layer tree is
727 // not necessarily opaque. So we manually erase the area that's going to be
728 // redrawn. This ensures correct rendering in the transparent areas.
730 // Since we erase the bounds of the update area, this will erase more than
731 // necessary if the update area is not a single rectangle. Unfortunately we
732 // cannot get the precise update region from CARenderer, we can only get the
734 CGRect updateBounds = [mRenderer updateBounds];
735 gl::ScopedGLState scopedScissorTestState(mGL, LOCAL_GL_SCISSOR_TEST, true);
736 gl::ScopedScissorRect scissor(
737 mGL, updateBounds.origin.x, updateBounds.origin.y,
738 updateBounds.size.width, updateBounds.size.height);
739 mGL->fClearColor(0.0, 0.0, 0.0, 0.0);
740 mGL->fClear(LOCAL_GL_COLOR_BUFFER_BIT);
741 // We erased the update region's bounds. Make sure the entire update bounds
743 [mRenderer addUpdateRect:updateBounds];
746 [mRenderer endFrame];
749 bool NativeLayerRootSnapshotterCA::ReadbackPixels(
750 const IntSize& aReadbackSize, SurfaceFormat aReadbackFormat,
751 const Range<uint8_t>& aReadbackBuffer) {
752 if (aReadbackFormat != SurfaceFormat::B8G8R8A8) {
756 UpdateSnapshot(aReadbackSize);
761 const gl::ScopedBindFramebuffer bindFB(mGL, mSnapshot->FB().mFB);
762 gl::ScopedPackState safePackState(mGL);
763 mGL->fReadPixels(0.0f, 0.0f, aReadbackSize.width, aReadbackSize.height,
764 LOCAL_GL_BGRA, LOCAL_GL_UNSIGNED_BYTE, &aReadbackBuffer[0]);
769 already_AddRefed<profiler_screenshots::DownscaleTarget>
770 NativeLayerRootSnapshotterCA::CreateDownscaleTarget(const IntSize& aSize) {
771 auto fb = gl::MozFramebuffer::Create(mGL, aSize, 0, false);
775 RefPtr<profiler_screenshots::DownscaleTarget> dt =
776 new DownscaleTargetNLRS(mGL, std::move(fb));
780 already_AddRefed<profiler_screenshots::AsyncReadbackBuffer>
781 NativeLayerRootSnapshotterCA::CreateAsyncReadbackBuffer(const IntSize& aSize) {
782 size_t bufferByteCount = aSize.width * aSize.height * 4;
783 GLuint bufferHandle = 0;
784 mGL->fGenBuffers(1, &bufferHandle);
786 gl::ScopedPackState scopedPackState(mGL);
787 mGL->fBindBuffer(LOCAL_GL_PIXEL_PACK_BUFFER, bufferHandle);
788 mGL->fPixelStorei(LOCAL_GL_PACK_ALIGNMENT, 1);
789 mGL->fBufferData(LOCAL_GL_PIXEL_PACK_BUFFER, bufferByteCount, nullptr,
790 LOCAL_GL_STREAM_READ);
791 return MakeAndAddRef<AsyncReadbackBufferNLRS>(mGL, aSize, bufferHandle);
794 NativeLayerCA::NativeLayerCA(const IntSize& aSize, bool aIsOpaque,
795 SurfacePoolHandleCA* aSurfacePoolHandle)
796 : mMutex("NativeLayerCA"),
797 mSurfacePoolHandle(aSurfacePoolHandle),
799 mIsOpaque(aIsOpaque) {
800 MOZ_RELEASE_ASSERT(mSurfacePoolHandle,
801 "Need a non-null surface pool handle.");
804 NativeLayerCA::NativeLayerCA(bool aIsOpaque)
805 : mMutex("NativeLayerCA"),
806 mSurfacePoolHandle(nullptr),
807 mIsOpaque(aIsOpaque) {
809 if (StaticPrefs::gfx_core_animation_specialize_video_log()) {
810 NSLog(@"VIDEO_LOG: NativeLayerCA: %p is being created to host video, which "
811 @"will force a video "
818 CGColorRef CGColorCreateForDeviceColor(gfx::DeviceColor aColor) {
819 if (StaticPrefs::gfx_color_management_native_srgb()) {
820 return CGColorCreateSRGB(aColor.r, aColor.g, aColor.b, aColor.a);
823 return CGColorCreateGenericRGB(aColor.r, aColor.g, aColor.b, aColor.a);
826 NativeLayerCA::NativeLayerCA(gfx::DeviceColor aColor)
827 : mMutex("NativeLayerCA"),
828 mSurfacePoolHandle(nullptr),
829 mIsOpaque(aColor.a >= 1.0f) {
830 MOZ_ASSERT(aColor.a > 0.0f, "Can't handle a fully transparent backdrop.");
831 mColor.AssignUnderCreateRule(CGColorCreateForDeviceColor(aColor));
834 NativeLayerCA::~NativeLayerCA() {
836 if (mHasEverAttachExternalImage &&
837 StaticPrefs::gfx_core_animation_specialize_video_log()) {
838 NSLog(@"VIDEO_LOG: ~NativeLayerCA: %p is being destroyed after hosting "
843 if (mInProgressLockedIOSurface) {
844 mInProgressLockedIOSurface->Unlock(false);
845 mInProgressLockedIOSurface = nullptr;
847 if (mInProgressSurface) {
848 IOSurfaceDecrementUseCount(mInProgressSurface->mSurface.get());
849 mSurfacePoolHandle->ReturnSurfaceToPool(mInProgressSurface->mSurface);
852 mSurfacePoolHandle->ReturnSurfaceToPool(mFrontSurface->mSurface);
854 for (const auto& surf : mSurfaces) {
855 mSurfacePoolHandle->ReturnSurfaceToPool(surf.mEntry.mSurface);
859 void NativeLayerCA::AttachExternalImage(wr::RenderTextureHost* aExternalImage) {
860 MutexAutoLock lock(mMutex);
863 mHasEverAttachExternalImage = true;
864 MOZ_RELEASE_ASSERT(!mHasEverNotifySurfaceReady,
865 "Shouldn't change layer type to external.");
868 wr::RenderMacIOSurfaceTextureHost* texture =
869 aExternalImage->AsRenderMacIOSurfaceTextureHost();
870 MOZ_ASSERT(texture || aExternalImage->IsWrappingAsyncRemoteTexture());
871 mTextureHost = texture;
873 gfxCriticalNoteOnce << "ExternalImage is not RenderMacIOSurfaceTextureHost";
877 gfx::IntSize oldSize = mSize;
878 mSize = texture->GetSize(0);
879 bool changedSizeAndDisplayRect = (mSize != oldSize);
881 mDisplayRect = IntRect(IntPoint{}, mSize);
883 bool oldSpecializeVideo = mSpecializeVideo;
884 mSpecializeVideo = ShouldSpecializeVideo(lock);
885 bool changedSpecializeVideo = (mSpecializeVideo != oldSpecializeVideo);
887 if (changedSpecializeVideo &&
888 StaticPrefs::gfx_core_animation_specialize_video_log()) {
890 @"VIDEO_LOG: AttachExternalImage: %p is forcing a video layer rebuild.",
895 bool oldIsDRM = mIsDRM;
896 mIsDRM = aExternalImage->IsFromDRMSource();
897 bool changedIsDRM = (mIsDRM != oldIsDRM);
899 ForAllRepresentations([&](Representation& r) {
900 r.mMutatedFrontSurface = true;
901 r.mMutatedDisplayRect |= changedSizeAndDisplayRect;
902 r.mMutatedSize |= changedSizeAndDisplayRect;
903 r.mMutatedSpecializeVideo |= changedSpecializeVideo;
904 r.mMutatedIsDRM |= changedIsDRM;
908 bool NativeLayerCA::IsVideo() {
909 // Anything with a texture host is considered a video source.
913 bool NativeLayerCA::IsVideoAndLocked(const MutexAutoLock& aProofOfLock) {
914 // Anything with a texture host is considered a video source.
918 bool NativeLayerCA::ShouldSpecializeVideo(const MutexAutoLock& aProofOfLock) {
919 if (!IsVideoAndLocked(aProofOfLock)) {
920 // Only videos are eligible.
924 MOZ_ASSERT(mTextureHost);
926 // DRM video is supported in macOS 10.15 and beyond, and such video must use
927 // a specialized video layer.
928 if (mTextureHost->IsFromDRMSource()) {
932 // Beyond this point, we need to know about the format of the video.
933 MacIOSurface* macIOSurface = mTextureHost->GetSurface();
934 if (macIOSurface->GetYUVColorSpace() == gfx::YUVColorSpace::BT2020) {
935 // BT2020 is a signifier of HDR color space, whether or not the bit depth
936 // is expanded to cover that color space. This video needs a specialized
941 CFTypeRefPtr<IOSurfaceRef> surface = macIOSurface->GetIOSurfaceRef();
942 OSType pixelFormat = IOSurfaceGetPixelFormat(surface.get());
943 if (pixelFormat == kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange ||
944 pixelFormat == kCVPixelFormatType_420YpCbCr10BiPlanarFullRange) {
945 // HDR videos require specialized video layers.
949 // Beyond this point, we return true if-and-only-if we think we can achieve
950 // the power-saving "detached mode" of the macOS compositor.
952 if (!StaticPrefs::gfx_core_animation_specialize_video()) {
957 if (pixelFormat != kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange &&
958 pixelFormat != kCVPixelFormatType_420YpCbCr8BiPlanarFullRange) {
959 // The video is not in one of the formats that qualifies for detachment.
963 // It will only detach if we're fullscreen.
964 return mRootWindowIsFullscreen;
967 void NativeLayerCA::SetRootWindowIsFullscreen(bool aFullscreen) {
968 if (mRootWindowIsFullscreen == aFullscreen) {
972 MutexAutoLock lock(mMutex);
974 mRootWindowIsFullscreen = aFullscreen;
976 bool oldSpecializeVideo = mSpecializeVideo;
977 mSpecializeVideo = ShouldSpecializeVideo(lock);
978 bool changedSpecializeVideo = (mSpecializeVideo != oldSpecializeVideo);
980 if (changedSpecializeVideo) {
982 if (StaticPrefs::gfx_core_animation_specialize_video_log()) {
983 NSLog(@"VIDEO_LOG: SetRootWindowIsFullscreen: %p is forcing a video "
989 ForAllRepresentations(
990 [&](Representation& r) { r.mMutatedSpecializeVideo = true; });
994 void NativeLayerCA::SetSurfaceIsFlipped(bool aIsFlipped) {
995 MutexAutoLock lock(mMutex);
997 if (aIsFlipped != mSurfaceIsFlipped) {
998 mSurfaceIsFlipped = aIsFlipped;
999 ForAllRepresentations(
1000 [&](Representation& r) { r.mMutatedSurfaceIsFlipped = true; });
1004 bool NativeLayerCA::SurfaceIsFlipped() {
1005 MutexAutoLock lock(mMutex);
1006 return mSurfaceIsFlipped;
1009 IntSize NativeLayerCA::GetSize() {
1010 MutexAutoLock lock(mMutex);
1014 void NativeLayerCA::SetPosition(const IntPoint& aPosition) {
1015 MutexAutoLock lock(mMutex);
1017 if (aPosition != mPosition) {
1018 mPosition = aPosition;
1019 ForAllRepresentations(
1020 [&](Representation& r) { r.mMutatedPosition = true; });
1024 IntPoint NativeLayerCA::GetPosition() {
1025 MutexAutoLock lock(mMutex);
1029 void NativeLayerCA::SetTransform(const Matrix4x4& aTransform) {
1030 MutexAutoLock lock(mMutex);
1031 MOZ_ASSERT(aTransform.IsRectilinear());
1033 if (aTransform != mTransform) {
1034 mTransform = aTransform;
1035 ForAllRepresentations(
1036 [&](Representation& r) { r.mMutatedTransform = true; });
1040 void NativeLayerCA::SetSamplingFilter(gfx::SamplingFilter aSamplingFilter) {
1041 MutexAutoLock lock(mMutex);
1043 if (aSamplingFilter != mSamplingFilter) {
1044 mSamplingFilter = aSamplingFilter;
1045 ForAllRepresentations(
1046 [&](Representation& r) { r.mMutatedSamplingFilter = true; });
1050 Matrix4x4 NativeLayerCA::GetTransform() {
1051 MutexAutoLock lock(mMutex);
1055 IntRect NativeLayerCA::GetRect() {
1056 MutexAutoLock lock(mMutex);
1057 return IntRect(mPosition, mSize);
1060 void NativeLayerCA::SetBackingScale(float aBackingScale) {
1061 MutexAutoLock lock(mMutex);
1063 if (aBackingScale != mBackingScale) {
1064 mBackingScale = aBackingScale;
1065 ForAllRepresentations(
1066 [&](Representation& r) { r.mMutatedBackingScale = true; });
1070 bool NativeLayerCA::IsOpaque() {
1071 // mIsOpaque is const, so no need for a lock.
1075 void NativeLayerCA::SetClipRect(const Maybe<gfx::IntRect>& aClipRect) {
1076 MutexAutoLock lock(mMutex);
1078 if (aClipRect != mClipRect) {
1079 mClipRect = aClipRect;
1080 ForAllRepresentations(
1081 [&](Representation& r) { r.mMutatedClipRect = true; });
1085 Maybe<gfx::IntRect> NativeLayerCA::ClipRect() {
1086 MutexAutoLock lock(mMutex);
1090 void NativeLayerCA::DumpLayer(std::ostream& aOutputStream) {
1091 MutexAutoLock lock(mMutex);
1093 Maybe<CGRect> scaledClipRect = CalculateClipGeometry(
1094 mSize, mPosition, mTransform, mDisplayRect, mClipRect, mBackingScale);
1097 if (scaledClipRect.isSome()) {
1098 useClipRect = *scaledClipRect;
1100 useClipRect = CGRectZero;
1103 aOutputStream << "<div style=\"";
1104 aOutputStream << "position: absolute; ";
1105 aOutputStream << "left: " << useClipRect.origin.x << "px; ";
1106 aOutputStream << "top: " << useClipRect.origin.y << "px; ";
1107 aOutputStream << "width: " << useClipRect.size.width << "px; ";
1108 aOutputStream << "height: " << useClipRect.size.height << "px; ";
1110 if (scaledClipRect.isSome()) {
1111 aOutputStream << "overflow: hidden; ";
1115 const CGFloat* components = CGColorGetComponents(mColor.get());
1116 aOutputStream << "background: rgb(" << components[0] * 255.0f << " "
1117 << components[1] * 255.0f << " " << components[2] * 255.0f
1118 << "); opacity: " << components[3] << "; ";
1120 // That's all we need for color layers. We don't need to specify an image.
1121 aOutputStream << "\"/></div>\n";
1125 aOutputStream << "\">";
1127 auto size = gfx::Size(mSize) / mBackingScale;
1129 aOutputStream << "<img style=\"";
1130 aOutputStream << "width: " << size.width << "px; ";
1131 aOutputStream << "height: " << size.height << "px; ";
1133 if (mSamplingFilter == gfx::SamplingFilter::POINT) {
1134 aOutputStream << "image-rendering: crisp-edges; ";
1137 Matrix4x4 transform = mTransform;
1138 transform.PreTranslate(mPosition.x, mPosition.y, 0);
1139 transform.PostTranslate((-useClipRect.origin.x * mBackingScale),
1140 (-useClipRect.origin.y * mBackingScale), 0);
1142 if (mSurfaceIsFlipped) {
1143 transform.PreTranslate(0, mSize.height, 0).PreScale(1, -1, 1);
1146 if (!transform.IsIdentity()) {
1147 const auto& m = transform;
1148 aOutputStream << "transform-origin: top left; ";
1149 aOutputStream << "transform: matrix3d(";
1150 aOutputStream << m._11 << ", " << m._12 << ", " << m._13 << ", " << m._14
1152 aOutputStream << m._21 << ", " << m._22 << ", " << m._23 << ", " << m._24
1154 aOutputStream << m._31 << ", " << m._32 << ", " << m._33 << ", " << m._34
1156 aOutputStream << m._41 / mBackingScale << ", " << m._42 / mBackingScale
1157 << ", " << m._43 << ", " << m._44;
1158 aOutputStream << "); ";
1160 aOutputStream << "\" ";
1162 CFTypeRefPtr<IOSurfaceRef> surface;
1163 if (mFrontSurface) {
1164 surface = mFrontSurface->mSurface;
1165 aOutputStream << "alt=\"regular surface 0x" << std::hex
1166 << int(IOSurfaceGetID(surface.get())) << "\" ";
1167 } else if (mTextureHost) {
1168 surface = mTextureHost->GetSurface()->GetIOSurfaceRef();
1169 aOutputStream << "alt=\"TextureHost surface 0x" << std::hex
1170 << int(IOSurfaceGetID(surface.get())) << "\" ";
1172 aOutputStream << "alt=\"no surface 0x\" ";
1175 aOutputStream << "src=\"";
1178 // Attempt to render the surface as a PNG. Skia can do this for RGB
1180 RefPtr<MacIOSurface> surf = new MacIOSurface(surface);
1182 SurfaceFormat format = surf->GetFormat();
1183 if (format == SurfaceFormat::B8G8R8A8 ||
1184 format == SurfaceFormat::B8G8R8X8) {
1185 RefPtr<gfx::DrawTarget> dt =
1186 surf->GetAsDrawTargetLocked(gfx::BackendType::SKIA);
1188 RefPtr<gfx::SourceSurface> sourceSurf = dt->Snapshot();
1190 gfxUtils::EncodeSourceSurface(sourceSurf, ImageType::PNG, u""_ns,
1191 gfxUtils::eDataURIEncode, nullptr,
1193 aOutputStream << dataUrl.get();
1199 aOutputStream << "\"/></div>\n";
1202 gfx::IntRect NativeLayerCA::CurrentSurfaceDisplayRect() {
1203 MutexAutoLock lock(mMutex);
1204 return mDisplayRect;
1207 NativeLayerCA::Representation::Representation()
1208 : mMutatedPosition(true),
1209 mMutatedTransform(true),
1210 mMutatedDisplayRect(true),
1211 mMutatedClipRect(true),
1212 mMutatedBackingScale(true),
1214 mMutatedSurfaceIsFlipped(true),
1215 mMutatedFrontSurface(true),
1216 mMutatedSamplingFilter(true),
1217 mMutatedSpecializeVideo(true),
1218 mMutatedIsDRM(true) {}
1220 NativeLayerCA::Representation::~Representation() {
1221 [mContentCALayer release];
1222 [mOpaquenessTintLayer release];
1223 [mWrappingCALayer release];
1226 void NativeLayerCA::InvalidateRegionThroughoutSwapchain(
1227 const MutexAutoLock& aProofOfLock, const IntRegion& aRegion) {
1228 IntRegion r = aRegion;
1229 if (mInProgressSurface) {
1230 mInProgressSurface->mInvalidRegion.OrWith(r);
1232 if (mFrontSurface) {
1233 mFrontSurface->mInvalidRegion.OrWith(r);
1235 for (auto& surf : mSurfaces) {
1236 surf.mEntry.mInvalidRegion.OrWith(r);
1240 bool NativeLayerCA::NextSurface(const MutexAutoLock& aProofOfLock) {
1241 if (mSize.IsEmpty()) {
1243 << "NextSurface returning false because of invalid mSize ("
1244 << mSize.width << ", " << mSize.height << ").";
1248 MOZ_RELEASE_ASSERT(!mInProgressSurface,
1249 "ERROR: Do not call NextSurface twice in sequence. Call "
1250 "NotifySurfaceReady before the "
1251 "next call to NextSurface.");
1253 Maybe<SurfaceWithInvalidRegion> surf =
1254 GetUnusedSurfaceAndCleanUp(aProofOfLock);
1256 CFTypeRefPtr<IOSurfaceRef> newSurf =
1257 mSurfacePoolHandle->ObtainSurfaceFromPool(mSize);
1259 newSurf, "NextSurface IOSurfaceCreate failed to create the surface.");
1260 surf = Some(SurfaceWithInvalidRegion{newSurf, IntRect({}, mSize)});
1263 mInProgressSurface = std::move(surf);
1264 IOSurfaceIncrementUseCount(mInProgressSurface->mSurface.get());
1268 template <typename F>
1269 void NativeLayerCA::HandlePartialUpdate(const MutexAutoLock& aProofOfLock,
1270 const IntRect& aDisplayRect,
1271 const IntRegion& aUpdateRegion,
1273 MOZ_RELEASE_ASSERT(IntRect({}, mSize).Contains(aUpdateRegion.GetBounds()),
1274 "The update region should be within the surface bounds.");
1275 MOZ_RELEASE_ASSERT(IntRect({}, mSize).Contains(aDisplayRect),
1276 "The display rect should be within the surface bounds.");
1278 MOZ_RELEASE_ASSERT(!mInProgressUpdateRegion);
1279 MOZ_RELEASE_ASSERT(!mInProgressDisplayRect);
1281 mInProgressUpdateRegion = Some(aUpdateRegion);
1282 mInProgressDisplayRect = Some(aDisplayRect);
1284 if (mFrontSurface) {
1285 // Copy not-overwritten valid content from mFrontSurface so that valid
1286 // content never gets lost.
1287 gfx::IntRegion copyRegion;
1288 copyRegion.Sub(mInProgressSurface->mInvalidRegion, aUpdateRegion);
1289 copyRegion.SubOut(mFrontSurface->mInvalidRegion);
1291 if (!copyRegion.IsEmpty()) {
1292 // Now copy the valid content, using a caller-provided copy function.
1293 aCopyFn(mFrontSurface->mSurface, copyRegion);
1294 mInProgressSurface->mInvalidRegion.SubOut(copyRegion);
1298 InvalidateRegionThroughoutSwapchain(aProofOfLock, aUpdateRegion);
1301 RefPtr<gfx::DrawTarget> NativeLayerCA::NextSurfaceAsDrawTarget(
1302 const IntRect& aDisplayRect, const IntRegion& aUpdateRegion,
1303 gfx::BackendType aBackendType) {
1304 MutexAutoLock lock(mMutex);
1305 if (!NextSurface(lock)) {
1309 mInProgressLockedIOSurface = new MacIOSurface(mInProgressSurface->mSurface);
1310 mInProgressLockedIOSurface->Lock(false);
1311 RefPtr<gfx::DrawTarget> dt =
1312 mInProgressLockedIOSurface->GetAsDrawTargetLocked(aBackendType);
1314 HandlePartialUpdate(
1315 lock, aDisplayRect, aUpdateRegion,
1316 [&](CFTypeRefPtr<IOSurfaceRef> validSource,
1317 const gfx::IntRegion& copyRegion) {
1318 RefPtr<MacIOSurface> source = new MacIOSurface(validSource);
1321 RefPtr<gfx::DrawTarget> sourceDT =
1322 source->GetAsDrawTargetLocked(aBackendType);
1323 RefPtr<gfx::SourceSurface> sourceSurface = sourceDT->Snapshot();
1325 for (auto iter = copyRegion.RectIter(); !iter.Done(); iter.Next()) {
1326 const gfx::IntRect& r = iter.Get();
1327 dt->CopySurface(sourceSurface, r, r.TopLeft());
1330 source->Unlock(true);
1336 Maybe<GLuint> NativeLayerCA::NextSurfaceAsFramebuffer(
1337 const IntRect& aDisplayRect, const IntRegion& aUpdateRegion,
1339 MutexAutoLock lock(mMutex);
1340 MOZ_RELEASE_ASSERT(NextSurface(lock),
1341 "NextSurfaceAsFramebuffer needs a surface.");
1343 Maybe<GLuint> fbo = mSurfacePoolHandle->GetFramebufferForSurface(
1344 mInProgressSurface->mSurface, aNeedsDepth);
1345 MOZ_RELEASE_ASSERT(fbo, "GetFramebufferForSurface failed.");
1347 HandlePartialUpdate(
1348 lock, aDisplayRect, aUpdateRegion,
1349 [&](CFTypeRefPtr<IOSurfaceRef> validSource,
1350 const gfx::IntRegion& copyRegion) {
1351 // Copy copyRegion from validSource to fbo.
1352 MOZ_RELEASE_ASSERT(mSurfacePoolHandle->gl());
1353 mSurfacePoolHandle->gl()->MakeCurrent();
1354 Maybe<GLuint> sourceFBO =
1355 mSurfacePoolHandle->GetFramebufferForSurface(validSource, false);
1358 "GetFramebufferForSurface failed during HandlePartialUpdate.");
1359 for (auto iter = copyRegion.RectIter(); !iter.Done(); iter.Next()) {
1360 gfx::IntRect r = iter.Get();
1361 if (mSurfaceIsFlipped) {
1362 r.y = mSize.height - r.YMost();
1364 mSurfacePoolHandle->gl()->BlitHelper()->BlitFramebufferToFramebuffer(
1365 *sourceFBO, *fbo, r, r, LOCAL_GL_NEAREST);
1372 void NativeLayerCA::NotifySurfaceReady() {
1373 MutexAutoLock lock(mMutex);
1375 #ifdef NIGHTLY_BUILD
1376 mHasEverNotifySurfaceReady = true;
1377 MOZ_RELEASE_ASSERT(!mHasEverAttachExternalImage,
1378 "Shouldn't change layer type to drawn.");
1383 "NotifySurfaceReady called without preceding call to NextSurface");
1385 if (mInProgressLockedIOSurface) {
1386 mInProgressLockedIOSurface->Unlock(false);
1387 mInProgressLockedIOSurface = nullptr;
1390 if (mFrontSurface) {
1391 mSurfaces.push_back({*mFrontSurface, 0});
1392 mFrontSurface = Nothing();
1395 MOZ_RELEASE_ASSERT(mInProgressUpdateRegion);
1396 IOSurfaceDecrementUseCount(mInProgressSurface->mSurface.get());
1397 mFrontSurface = std::move(mInProgressSurface);
1398 mFrontSurface->mInvalidRegion.SubOut(mInProgressUpdateRegion.extract());
1400 ForAllRepresentations(
1401 [&](Representation& r) { r.mMutatedFrontSurface = true; });
1403 MOZ_RELEASE_ASSERT(mInProgressDisplayRect);
1404 if (!mDisplayRect.IsEqualInterior(*mInProgressDisplayRect)) {
1405 mDisplayRect = *mInProgressDisplayRect;
1406 ForAllRepresentations(
1407 [&](Representation& r) { r.mMutatedDisplayRect = true; });
1409 mInProgressDisplayRect = Nothing();
1412 void NativeLayerCA::DiscardBackbuffers() {
1413 MutexAutoLock lock(mMutex);
1415 for (const auto& surf : mSurfaces) {
1416 mSurfacePoolHandle->ReturnSurfaceToPool(surf.mEntry.mSurface);
1421 NativeLayerCA::Representation& NativeLayerCA::GetRepresentation(
1422 WhichRepresentation aRepresentation) {
1423 switch (aRepresentation) {
1424 case WhichRepresentation::ONSCREEN:
1425 return mOnscreenRepresentation;
1426 case WhichRepresentation::OFFSCREEN:
1427 return mOffscreenRepresentation;
1431 template <typename F>
1432 void NativeLayerCA::ForAllRepresentations(F aFn) {
1433 aFn(mOnscreenRepresentation);
1434 aFn(mOffscreenRepresentation);
1437 NativeLayerCA::UpdateType NativeLayerCA::HasUpdate(
1438 WhichRepresentation aRepresentation) {
1439 MutexAutoLock lock(mMutex);
1440 return GetRepresentation(aRepresentation).HasUpdate(IsVideoAndLocked(lock));
1444 Maybe<CGRect> NativeLayerCA::CalculateClipGeometry(
1445 const gfx::IntSize& aSize, const gfx::IntPoint& aPosition,
1446 const gfx::Matrix4x4& aTransform, const gfx::IntRect& aDisplayRect,
1447 const Maybe<gfx::IntRect>& aClipRect, float aBackingScale) {
1448 Maybe<IntRect> clipFromDisplayRect;
1449 if (!aDisplayRect.IsEqualInterior(IntRect({}, aSize))) {
1450 // When the display rect is a subset of the layer, then we want to guarantee
1451 // that no pixels outside that rect are sampled, since they might be
1452 // uninitialized. Transforming the display rect into a post-transform clip
1453 // only maintains this if it's an integer translation, which is all we
1454 // support for this case currently.
1455 MOZ_ASSERT(aTransform.Is2DIntegerTranslation());
1456 clipFromDisplayRect = Some(RoundedToInt(
1457 aTransform.TransformBounds(IntRectToRect(aDisplayRect + aPosition))));
1460 Maybe<gfx::IntRect> effectiveClip =
1461 IntersectMaybeRects(aClipRect, clipFromDisplayRect);
1462 if (!effectiveClip) {
1466 return Some(CGRectMake(effectiveClip->X() / aBackingScale,
1467 effectiveClip->Y() / aBackingScale,
1468 effectiveClip->Width() / aBackingScale,
1469 effectiveClip->Height() / aBackingScale));
1472 bool NativeLayerCA::ApplyChanges(WhichRepresentation aRepresentation,
1473 NativeLayerCA::UpdateType aUpdate) {
1474 MutexAutoLock lock(mMutex);
1475 CFTypeRefPtr<IOSurfaceRef> surface;
1476 if (mFrontSurface) {
1477 surface = mFrontSurface->mSurface;
1478 } else if (mTextureHost) {
1479 surface = mTextureHost->GetSurface()->GetIOSurfaceRef();
1481 return GetRepresentation(aRepresentation)
1482 .ApplyChanges(aUpdate, mSize, mIsOpaque, mPosition, mTransform,
1483 mDisplayRect, mClipRect, mBackingScale, mSurfaceIsFlipped,
1484 mSamplingFilter, mSpecializeVideo, surface, mColor, mIsDRM,
1488 CALayer* NativeLayerCA::UnderlyingCALayer(WhichRepresentation aRepresentation) {
1489 MutexAutoLock lock(mMutex);
1490 return GetRepresentation(aRepresentation).UnderlyingCALayer();
1493 static NSString* NSStringForOSType(OSType type) {
1495 c[0] = (type >> 24) & 0xFF;
1496 c[1] = (type >> 16) & 0xFF;
1497 c[2] = (type >> 8) & 0xFF;
1498 c[3] = (type >> 0) & 0xFF;
1499 NSString* string = [[NSString stringWithCharacters:c length:4] autorelease];
1503 /* static */ void LogSurface(IOSurfaceRef aSurfaceRef, CVPixelBufferRef aBuffer,
1504 CMVideoFormatDescriptionRef aFormat) {
1505 NSLog(@"VIDEO_LOG: LogSurface...\n");
1507 CFDictionaryRef surfaceValues = IOSurfaceCopyAllValues(aSurfaceRef);
1508 NSLog(@"Surface values are %@.\n", surfaceValues);
1509 CFRelease(surfaceValues);
1512 CGColorSpaceRef colorSpace = CVImageBufferGetColorSpace(aBuffer);
1513 NSLog(@"ColorSpace is %@.\n", colorSpace);
1515 CFDictionaryRef bufferAttachments =
1516 CVBufferGetAttachments(aBuffer, kCVAttachmentMode_ShouldPropagate);
1517 NSLog(@"Buffer attachments are %@.\n", bufferAttachments);
1521 OSType codec = CMFormatDescriptionGetMediaSubType(aFormat);
1522 NSLog(@"Codec is %@.\n", NSStringForOSType(codec));
1524 CFDictionaryRef extensions = CMFormatDescriptionGetExtensions(aFormat);
1525 NSLog(@"Format extensions are %@.\n", extensions);
1529 bool NativeLayerCA::Representation::EnqueueSurface(IOSurfaceRef aSurfaceRef) {
1531 [mContentCALayer isKindOfClass:[AVSampleBufferDisplayLayer class]]);
1532 AVSampleBufferDisplayLayer* videoLayer =
1533 (AVSampleBufferDisplayLayer*)mContentCALayer;
1535 if (@available(macOS 11.0, iOS 14.0, *)) {
1536 if (videoLayer.requiresFlushToResumeDecoding) {
1541 // If the layer can't handle a new sample, early exit.
1542 if (!videoLayer.readyForMoreMediaData) {
1543 #ifdef NIGHTLY_BUILD
1544 if (StaticPrefs::gfx_core_animation_specialize_video_log()) {
1545 NSLog(@"VIDEO_LOG: EnqueueSurface failed on readyForMoreMediaData.");
1551 // Convert the IOSurfaceRef into a CMSampleBuffer, so we can enqueue it in
1553 CVPixelBufferRef pixelBuffer = nullptr;
1554 CVReturn cvValue = CVPixelBufferCreateWithIOSurface(
1555 kCFAllocatorDefault, aSurfaceRef, nullptr, &pixelBuffer);
1556 if (cvValue != kCVReturnSuccess) {
1557 MOZ_ASSERT(pixelBuffer == nullptr,
1558 "Failed call shouldn't allocate memory.");
1559 #ifdef NIGHTLY_BUILD
1560 if (StaticPrefs::gfx_core_animation_specialize_video_log()) {
1561 NSLog(@"VIDEO_LOG: EnqueueSurface failed on allocating pixel buffer.");
1567 #ifdef NIGHTLY_BUILD
1568 if (StaticPrefs::gfx_core_animation_specialize_video_check_color_space()) {
1569 // Ensure the resulting pixel buffer has a color space. If it doesn't, then
1570 // modify the surface and create the buffer again.
1571 CFTypeRefPtr<CGColorSpaceRef> colorSpace =
1572 CFTypeRefPtr<CGColorSpaceRef>::WrapUnderGetRule(
1573 CVImageBufferGetColorSpace(pixelBuffer));
1575 // Use our main display color space.
1576 colorSpace = CFTypeRefPtr<CGColorSpaceRef>::WrapUnderCreateRule(
1577 CGDisplayCopyColorSpace(CGMainDisplayID()));
1578 auto colorData = CFTypeRefPtr<CFDataRef>::WrapUnderCreateRule(
1579 CGColorSpaceCopyICCData(colorSpace.get()));
1580 IOSurfaceSetValue(aSurfaceRef, CFSTR("IOSurfaceColorSpace"),
1583 // Get rid of our old pixel buffer and create a new one.
1584 CFRelease(pixelBuffer);
1585 cvValue = CVPixelBufferCreateWithIOSurface(
1586 kCFAllocatorDefault, aSurfaceRef, nullptr, &pixelBuffer);
1587 if (cvValue != kCVReturnSuccess) {
1588 MOZ_ASSERT(pixelBuffer == nullptr,
1589 "Failed call shouldn't allocate memory.");
1593 MOZ_ASSERT(CVImageBufferGetColorSpace(pixelBuffer),
1594 "Pixel buffer should have a color space.");
1598 CFTypeRefPtr<CVPixelBufferRef> pixelBufferDeallocator =
1599 CFTypeRefPtr<CVPixelBufferRef>::WrapUnderCreateRule(pixelBuffer);
1601 CMVideoFormatDescriptionRef formatDescription = nullptr;
1602 OSStatus osValue = CMVideoFormatDescriptionCreateForImageBuffer(
1603 kCFAllocatorDefault, pixelBuffer, &formatDescription);
1604 if (osValue != noErr) {
1605 MOZ_ASSERT(formatDescription == nullptr,
1606 "Failed call shouldn't allocate memory.");
1607 #ifdef NIGHTLY_BUILD
1608 if (StaticPrefs::gfx_core_animation_specialize_video_log()) {
1609 NSLog(@"VIDEO_LOG: EnqueueSurface failed on allocating format "
1615 CFTypeRefPtr<CMVideoFormatDescriptionRef> formatDescriptionDeallocator =
1616 CFTypeRefPtr<CMVideoFormatDescriptionRef>::WrapUnderCreateRule(
1619 #ifdef NIGHTLY_BUILD
1620 if (mLogNextVideoSurface &&
1621 StaticPrefs::gfx_core_animation_specialize_video_log()) {
1622 LogSurface(aSurfaceRef, pixelBuffer, formatDescription);
1623 mLogNextVideoSurface = false;
1627 CMSampleTimingInfo timingInfo = kCMTimingInfoInvalid;
1629 bool spoofTiming = false;
1630 #ifdef NIGHTLY_BUILD
1631 spoofTiming = StaticPrefs::gfx_core_animation_specialize_video_spoof_timing();
1634 // Since we don't have timing information for the sample, set the sample to
1635 // play at the current timestamp.
1636 CMTimebaseRef timebase =
1637 [(AVSampleBufferDisplayLayer*)mContentCALayer controlTimebase];
1638 CMTime nowTime = CMTimebaseGetTime(timebase);
1639 timingInfo = {.presentationTimeStamp = nowTime};
1642 CMSampleBufferRef sampleBuffer = nullptr;
1643 osValue = CMSampleBufferCreateReadyWithImageBuffer(
1644 kCFAllocatorDefault, pixelBuffer, formatDescription, &timingInfo,
1646 if (osValue != noErr) {
1647 MOZ_ASSERT(sampleBuffer == nullptr,
1648 "Failed call shouldn't allocate memory.");
1649 #ifdef NIGHTLY_BUILD
1650 if (StaticPrefs::gfx_core_animation_specialize_video_log()) {
1651 NSLog(@"VIDEO_LOG: EnqueueSurface failed on allocating sample buffer.");
1656 CFTypeRefPtr<CMSampleBufferRef> sampleBufferDeallocator =
1657 CFTypeRefPtr<CMSampleBufferRef>::WrapUnderCreateRule(sampleBuffer);
1660 // Since we don't have timing information for the sample, before we enqueue
1661 // it, we attach an attribute that specifies that the sample should be
1662 // played immediately.
1663 CFArrayRef attachmentsArray =
1664 CMSampleBufferGetSampleAttachmentsArray(sampleBuffer, YES);
1665 if (!attachmentsArray || CFArrayGetCount(attachmentsArray) == 0) {
1666 // No dictionary to alter.
1669 CFMutableDictionaryRef sample0Dictionary =
1670 (__bridge CFMutableDictionaryRef)CFArrayGetValueAtIndex(
1671 attachmentsArray, 0);
1672 CFDictionarySetValue(sample0Dictionary,
1673 kCMSampleAttachmentKey_DisplayImmediately,
1677 [videoLayer enqueueSampleBuffer:sampleBuffer];
1682 bool NativeLayerCA::Representation::ApplyChanges(
1683 NativeLayerCA::UpdateType aUpdate, const IntSize& aSize, bool aIsOpaque,
1684 const IntPoint& aPosition, const Matrix4x4& aTransform,
1685 const IntRect& aDisplayRect, const Maybe<IntRect>& aClipRect,
1686 float aBackingScale, bool aSurfaceIsFlipped,
1687 gfx::SamplingFilter aSamplingFilter, bool aSpecializeVideo,
1688 CFTypeRefPtr<IOSurfaceRef> aFrontSurface, CFTypeRefPtr<CGColorRef> aColor,
1689 bool aIsDRM, bool aIsVideo) {
1690 // If we have an OnlyVideo update, handle it and early exit.
1691 if (aUpdate == UpdateType::OnlyVideo) {
1692 // If we don't have any updates to do, exit early with success. This is
1693 // important to do so that the overall OnlyVideo pass will succeed as long
1694 // as the video layers are successful.
1695 if (HasUpdate(true) == UpdateType::None) {
1699 MOZ_ASSERT(!mMutatedSpecializeVideo && mMutatedFrontSurface,
1700 "Shouldn't attempt a OnlyVideo update in this case.");
1702 bool updateSucceeded = false;
1703 if (aSpecializeVideo) {
1704 IOSurfaceRef surface = aFrontSurface.get();
1705 updateSucceeded = EnqueueSurface(surface);
1707 if (updateSucceeded) {
1708 mMutatedFrontSurface = false;
1710 // Set mMutatedSpecializeVideo, which will ensure that the next update
1711 // will rebuild the video layer.
1712 mMutatedSpecializeVideo = true;
1713 #ifdef NIGHTLY_BUILD
1714 if (StaticPrefs::gfx_core_animation_specialize_video_log()) {
1715 NSLog(@"VIDEO_LOG: EnqueueSurface failed in OnlyVideo update.");
1721 return updateSucceeded;
1724 MOZ_ASSERT(aUpdate == UpdateType::All);
1726 if (mWrappingCALayer && mMutatedSpecializeVideo) {
1727 // Since specialize video changes the way we construct our wrapping and
1728 // content layers, we have to scrap them if this value has changed.
1729 #ifdef NIGHTLY_BUILD
1730 if (aIsVideo && StaticPrefs::gfx_core_animation_specialize_video_log()) {
1731 NSLog(@"VIDEO_LOG: Scrapping existing video layer.");
1734 [mContentCALayer release];
1735 mContentCALayer = nil;
1736 [mOpaquenessTintLayer release];
1737 mOpaquenessTintLayer = nil;
1738 [mWrappingCALayer removeFromSuperlayer];
1739 [mWrappingCALayer release];
1740 mWrappingCALayer = nil;
1743 bool layerNeedsInitialization = false;
1744 if (!mWrappingCALayer) {
1745 layerNeedsInitialization = true;
1746 mWrappingCALayer = [[CALayer layer] retain];
1747 mWrappingCALayer.position = CGPointZero;
1748 mWrappingCALayer.bounds = CGRectZero;
1749 mWrappingCALayer.anchorPoint = CGPointZero;
1750 mWrappingCALayer.contentsGravity = kCAGravityTopLeft;
1751 mWrappingCALayer.edgeAntialiasingMask = 0;
1754 // Color layers set a color on the wrapping layer and don't get a content
1756 mWrappingCALayer.backgroundColor = aColor.get();
1758 if (aSpecializeVideo) {
1759 #ifdef NIGHTLY_BUILD
1761 StaticPrefs::gfx_core_animation_specialize_video_log()) {
1762 NSLog(@"VIDEO_LOG: Rebuilding video layer with "
1763 @"AVSampleBufferDisplayLayer.");
1764 mLogNextVideoSurface = true;
1767 mContentCALayer = [[AVSampleBufferDisplayLayer layer] retain];
1768 CMTimebaseRef timebase;
1769 #ifdef CMTIMEBASE_USE_SOURCE_TERMINOLOGY
1770 CMTimebaseCreateWithSourceClock(kCFAllocatorDefault,
1771 CMClockGetHostTimeClock(), &timebase);
1773 CMTimebaseCreateWithMasterClock(kCFAllocatorDefault,
1774 CMClockGetHostTimeClock(), &timebase);
1776 CMTimebaseSetRate(timebase, 1.0f);
1777 [(AVSampleBufferDisplayLayer*)mContentCALayer
1778 setControlTimebase:timebase];
1779 CFRelease(timebase);
1781 #ifdef NIGHTLY_BUILD
1783 StaticPrefs::gfx_core_animation_specialize_video_log()) {
1784 NSLog(@"VIDEO_LOG: Rebuilding video layer with CALayer.");
1785 mLogNextVideoSurface = true;
1788 mContentCALayer = [[CALayer layer] retain];
1790 mContentCALayer.position = CGPointZero;
1791 mContentCALayer.anchorPoint = CGPointZero;
1792 mContentCALayer.contentsGravity = kCAGravityTopLeft;
1793 mContentCALayer.contentsScale = 1;
1794 mContentCALayer.bounds = CGRectMake(0, 0, aSize.width, aSize.height);
1795 mContentCALayer.edgeAntialiasingMask = 0;
1796 mContentCALayer.opaque = aIsOpaque;
1797 if ([mContentCALayer respondsToSelector:@selector(setContentsOpaque:)]) {
1798 // The opaque property seems to not be enough when using IOSurface
1799 // contents. Additionally, call the private method setContentsOpaque.
1800 [mContentCALayer setContentsOpaque:aIsOpaque];
1803 [mWrappingCALayer addSublayer:mContentCALayer];
1807 if (aSpecializeVideo && mMutatedIsDRM) {
1808 ((AVSampleBufferDisplayLayer*)mContentCALayer).preventsCapture = aIsDRM;
1811 bool shouldTintOpaqueness = StaticPrefs::gfx_core_animation_tint_opaque();
1812 if (shouldTintOpaqueness && !mOpaquenessTintLayer) {
1813 mOpaquenessTintLayer = [[CALayer layer] retain];
1814 mOpaquenessTintLayer.position = CGPointZero;
1815 mOpaquenessTintLayer.bounds = mContentCALayer.bounds;
1816 mOpaquenessTintLayer.anchorPoint = CGPointZero;
1817 mOpaquenessTintLayer.contentsGravity = kCAGravityTopLeft;
1819 mOpaquenessTintLayer.backgroundColor =
1820 [[[NSColor greenColor] colorWithAlphaComponent:0.5] CGColor];
1822 mOpaquenessTintLayer.backgroundColor =
1823 [[[NSColor redColor] colorWithAlphaComponent:0.5] CGColor];
1825 [mWrappingCALayer addSublayer:mOpaquenessTintLayer];
1826 } else if (!shouldTintOpaqueness && mOpaquenessTintLayer) {
1827 [mOpaquenessTintLayer removeFromSuperlayer];
1828 [mOpaquenessTintLayer release];
1829 mOpaquenessTintLayer = nullptr;
1832 // CALayers have a position and a size, specified through the position and the
1833 // bounds properties. layer.bounds.origin must always be (0, 0). A layer's
1834 // position affects the layer's entire layer subtree. In other words, each
1835 // layer's position is relative to its superlayer's position. We implement the
1836 // clip rect using masksToBounds on mWrappingCALayer. So mContentCALayer's
1837 // position is relative to the clip rect position. Note: The Core Animation
1838 // docs on "Positioning and Sizing Sublayers" say:
1839 // Important: Always use integral numbers for the width and height of your
1841 // We hope that this refers to integral physical pixels, and not to integral
1842 // logical coordinates.
1844 if (mContentCALayer &&
1845 (mMutatedBackingScale || mMutatedSize || layerNeedsInitialization)) {
1846 mContentCALayer.bounds = CGRectMake(0, 0, aSize.width / aBackingScale,
1847 aSize.height / aBackingScale);
1848 if (mOpaquenessTintLayer) {
1849 mOpaquenessTintLayer.bounds = mContentCALayer.bounds;
1851 mContentCALayer.contentsScale = aBackingScale;
1854 if (mMutatedBackingScale || mMutatedPosition || mMutatedDisplayRect ||
1855 mMutatedClipRect || mMutatedTransform || mMutatedSurfaceIsFlipped ||
1856 mMutatedSize || layerNeedsInitialization) {
1857 Maybe<CGRect> scaledClipRect = CalculateClipGeometry(
1858 aSize, aPosition, aTransform, aDisplayRect, aClipRect, aBackingScale);
1861 if (scaledClipRect.isSome()) {
1862 useClipRect = *scaledClipRect;
1864 useClipRect = CGRectZero;
1867 mWrappingCALayer.position = useClipRect.origin;
1868 mWrappingCALayer.bounds =
1869 CGRectMake(0, 0, useClipRect.size.width, useClipRect.size.height);
1870 mWrappingCALayer.masksToBounds = scaledClipRect.isSome();
1872 if (mContentCALayer) {
1873 Matrix4x4 transform = aTransform;
1874 transform.PreTranslate(aPosition.x, aPosition.y, 0);
1875 transform.PostTranslate((-useClipRect.origin.x * aBackingScale),
1876 (-useClipRect.origin.y * aBackingScale), 0);
1878 if (aSurfaceIsFlipped) {
1879 transform.PreTranslate(0, aSize.height, 0).PreScale(1, -1, 1);
1882 CATransform3D transformCA{transform._11,
1894 transform._41 / aBackingScale,
1895 transform._42 / aBackingScale,
1898 mContentCALayer.transform = transformCA;
1899 if (mOpaquenessTintLayer) {
1900 mOpaquenessTintLayer.transform = mContentCALayer.transform;
1905 if (mContentCALayer && (mMutatedSamplingFilter || layerNeedsInitialization)) {
1906 if (aSamplingFilter == gfx::SamplingFilter::POINT) {
1907 mContentCALayer.minificationFilter = kCAFilterNearest;
1908 mContentCALayer.magnificationFilter = kCAFilterNearest;
1910 mContentCALayer.minificationFilter = kCAFilterLinear;
1911 mContentCALayer.magnificationFilter = kCAFilterLinear;
1915 if (mMutatedFrontSurface) {
1916 // This is handled last because a video update could fail, causing us to
1917 // early exit, leaving the mutation bits untouched. We do this so that the
1918 // *next* update will clear the video layer and setup a regular layer.
1920 IOSurfaceRef surface = aFrontSurface.get();
1921 if (aSpecializeVideo) {
1922 // Attempt to enqueue this as a video frame. If we fail, we'll rebuild
1923 // our video layer in the next update.
1924 bool isEnqueued = EnqueueSurface(surface);
1926 // Set mMutatedSpecializeVideo, which will ensure that the next update
1927 // will rebuild the video layer.
1928 mMutatedSpecializeVideo = true;
1929 #ifdef NIGHTLY_BUILD
1930 if (StaticPrefs::gfx_core_animation_specialize_video_log()) {
1931 NSLog(@"VIDEO_LOG: EnqueueSurface failed in All update.");
1937 #ifdef NIGHTLY_BUILD
1938 if (mLogNextVideoSurface &&
1939 StaticPrefs::gfx_core_animation_specialize_video_log()) {
1940 LogSurface(surface, nullptr, nullptr);
1941 mLogNextVideoSurface = false;
1944 mContentCALayer.contents = (id)surface;
1948 mMutatedPosition = false;
1949 mMutatedTransform = false;
1950 mMutatedBackingScale = false;
1951 mMutatedSize = false;
1952 mMutatedSurfaceIsFlipped = false;
1953 mMutatedDisplayRect = false;
1954 mMutatedClipRect = false;
1955 mMutatedFrontSurface = false;
1956 mMutatedSamplingFilter = false;
1957 mMutatedSpecializeVideo = false;
1958 mMutatedIsDRM = false;
1963 NativeLayerCA::UpdateType NativeLayerCA::Representation::HasUpdate(
1965 if (!mWrappingCALayer) {
1966 return UpdateType::All;
1969 // This check intentionally skips mMutatedFrontSurface. We'll check it later
1970 // to see if we can attempt an OnlyVideo update.
1971 if (mMutatedPosition || mMutatedTransform || mMutatedDisplayRect ||
1972 mMutatedClipRect || mMutatedBackingScale || mMutatedSize ||
1973 mMutatedSurfaceIsFlipped || mMutatedSamplingFilter ||
1974 mMutatedSpecializeVideo || mMutatedIsDRM) {
1975 return UpdateType::All;
1978 // Check if we should try an OnlyVideo update. We know from the above check
1979 // that our specialize video is stable (we don't know what value we'll
1980 // receive, though), so we just have to check that we have a surface to
1982 if (mMutatedFrontSurface) {
1983 return (aIsVideo ? UpdateType::OnlyVideo : UpdateType::All);
1986 return UpdateType::None;
1989 bool NativeLayerCA::WillUpdateAffectLayers(
1990 WhichRepresentation aRepresentation) {
1991 MutexAutoLock lock(mMutex);
1992 auto& r = GetRepresentation(aRepresentation);
1993 return r.mMutatedSpecializeVideo || !r.UnderlyingCALayer();
1996 // Called when mMutex is already being held by the current thread.
1997 Maybe<NativeLayerCA::SurfaceWithInvalidRegion>
1998 NativeLayerCA::GetUnusedSurfaceAndCleanUp(const MutexAutoLock& aProofOfLock) {
1999 std::vector<SurfaceWithInvalidRegionAndCheckCount> usedSurfaces;
2000 Maybe<SurfaceWithInvalidRegion> unusedSurface;
2002 // Separate mSurfaces into used and unused surfaces.
2003 for (auto& surf : mSurfaces) {
2004 if (IOSurfaceIsInUse(surf.mEntry.mSurface.get())) {
2006 if (surf.mCheckCount < 10) {
2007 usedSurfaces.push_back(std::move(surf));
2009 // The window server has been holding on to this surface for an
2010 // unreasonably long time. This is known to happen sometimes, for
2011 // example in occluded windows or after a GPU switch. In that case,
2012 // release our references to the surface so that it doesn't look like
2013 // we're trying to keep it alive.
2014 mSurfacePoolHandle->ReturnSurfaceToPool(
2015 std::move(surf.mEntry.mSurface));
2018 if (unusedSurface) {
2019 // Multiple surfaces are unused. Keep the most recent one and release
2020 // any earlier ones. The most recent one requires the least amount of
2021 // copying during partial repaints.
2022 mSurfacePoolHandle->ReturnSurfaceToPool(
2023 std::move(unusedSurface->mSurface));
2025 unusedSurface = Some(std::move(surf.mEntry));
2029 // Put the used surfaces back into mSurfaces.
2030 mSurfaces = std::move(usedSurfaces);
2032 return unusedSurface;
2035 bool DownscaleTargetNLRS::DownscaleFrom(
2036 profiler_screenshots::RenderSource* aSource, const IntRect& aSourceRect,
2037 const IntRect& aDestRect) {
2038 mGL->BlitHelper()->BlitFramebufferToFramebuffer(
2039 static_cast<RenderSourceNLRS*>(aSource)->FB().mFB,
2040 mRenderSource->FB().mFB, aSourceRect, aDestRect, LOCAL_GL_LINEAR);
2045 void AsyncReadbackBufferNLRS::CopyFrom(
2046 profiler_screenshots::RenderSource* aSource) {
2047 IntSize size = aSource->Size();
2048 MOZ_RELEASE_ASSERT(Size() == size);
2050 gl::ScopedPackState scopedPackState(mGL);
2051 mGL->fBindBuffer(LOCAL_GL_PIXEL_PACK_BUFFER, mBufferHandle);
2052 mGL->fPixelStorei(LOCAL_GL_PACK_ALIGNMENT, 1);
2053 const gl::ScopedBindFramebuffer bindFB(
2054 mGL, static_cast<RenderSourceNLRS*>(aSource)->FB().mFB);
2055 mGL->fReadPixels(0, 0, size.width, size.height, LOCAL_GL_RGBA,
2056 LOCAL_GL_UNSIGNED_BYTE, 0);
2059 bool AsyncReadbackBufferNLRS::MapAndCopyInto(DataSourceSurface* aSurface,
2060 const IntSize& aReadSize) {
2061 MOZ_RELEASE_ASSERT(aReadSize <= aSurface->GetSize());
2063 if (!mGL || !mGL->MakeCurrent()) {
2067 gl::ScopedPackState scopedPackState(mGL);
2068 mGL->fBindBuffer(LOCAL_GL_PIXEL_PACK_BUFFER, mBufferHandle);
2069 mGL->fPixelStorei(LOCAL_GL_PACK_ALIGNMENT, 1);
2071 const uint8_t* srcData = nullptr;
2072 if (mGL->IsSupported(gl::GLFeature::map_buffer_range)) {
2073 srcData = static_cast<uint8_t*>(mGL->fMapBufferRange(
2074 LOCAL_GL_PIXEL_PACK_BUFFER, 0, aReadSize.height * aReadSize.width * 4,
2075 LOCAL_GL_MAP_READ_BIT));
2077 srcData = static_cast<uint8_t*>(
2078 mGL->fMapBuffer(LOCAL_GL_PIXEL_PACK_BUFFER, LOCAL_GL_READ_ONLY));
2085 int32_t srcStride = mSize.width * 4; // Bind() sets an alignment of 1
2086 DataSourceSurface::ScopedMap map(aSurface, DataSourceSurface::WRITE);
2087 uint8_t* destData = map.GetData();
2088 int32_t destStride = map.GetStride();
2089 SurfaceFormat destFormat = aSurface->GetFormat();
2090 for (int32_t destRow = 0; destRow < aReadSize.height; destRow++) {
2091 // Turn srcData upside down during the copy.
2092 int32_t srcRow = aReadSize.height - 1 - destRow;
2093 const uint8_t* src = &srcData[srcRow * srcStride];
2094 uint8_t* dest = &destData[destRow * destStride];
2095 SwizzleData(src, srcStride, SurfaceFormat::R8G8B8A8, dest, destStride,
2096 destFormat, IntSize(aReadSize.width, 1));
2099 mGL->fUnmapBuffer(LOCAL_GL_PIXEL_PACK_BUFFER);
2104 AsyncReadbackBufferNLRS::~AsyncReadbackBufferNLRS() {
2105 if (mGL && mGL->MakeCurrent()) {
2106 mGL->fDeleteBuffers(1, &mBufferHandle);
2110 } // namespace layers
2111 } // namespace mozilla