1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
8 #include "ImageRegion.h"
9 #include "ShutdownTracker.h"
10 #include "SurfaceCache.h"
14 #include "gfx2DGlue.h"
15 #include "gfxPlatform.h"
19 #include "GeckoProfiler.h"
20 #include "MainThreadUtils.h"
21 #include "mozilla/CheckedInt.h"
22 #include "mozilla/gfx/gfxVars.h"
23 #include "mozilla/gfx/Tools.h"
24 #include "mozilla/gfx/SourceSurfaceRawData.h"
25 #include "mozilla/image/RecyclingSourceSurface.h"
26 #include "mozilla/layers/SourceSurfaceSharedData.h"
27 #include "mozilla/layers/SourceSurfaceVolatileData.h"
28 #include "mozilla/Likely.h"
29 #include "mozilla/MemoryReporting.h"
31 #include "nsRefreshDriver.h"
32 #include "nsThreadUtils.h"
40 static int32_t VolatileSurfaceStride(const IntSize
& size
,
41 SurfaceFormat format
) {
42 // Stride must be a multiple of four or cairo will complain.
43 return (size
.width
* BytesPerPixel(format
) + 0x3) & ~0x3;
46 static already_AddRefed
<DataSourceSurface
> CreateLockedSurface(
47 DataSourceSurface
* aSurface
, const IntSize
& size
, SurfaceFormat format
) {
48 switch (aSurface
->GetType()) {
49 case SurfaceType::DATA_SHARED
:
50 case SurfaceType::DATA_ALIGNED
: {
51 // Shared memory is never released until the surface itself is released.
52 // Similar for aligned/heap surfaces.
53 RefPtr
<DataSourceSurface
> surf(aSurface
);
57 // Volatile memory requires us to map it first, and it is fallible.
58 DataSourceSurface::ScopedMap
smap(aSurface
,
59 DataSourceSurface::READ_WRITE
);
60 if (smap
.IsMapped()) {
61 return MakeAndAddRef
<SourceSurfaceMappedData
>(std::move(smap
), size
,
71 static bool ShouldUseHeap(const IntSize
& aSize
, int32_t aStride
,
73 // On some platforms (i.e. Android), a volatile buffer actually keeps a file
74 // handle active. We would like to avoid too many since we could easily
75 // exhaust the pool. However, other platforms we do not have the file handle
76 // problem, and additionally we may avoid a superfluous memset since the
77 // volatile memory starts out as zero-filled. Hence the knobs below.
79 // For as long as an animated image is retained, its frames will never be
80 // released to let the OS purge volatile buffers.
81 if (aIsAnimated
&& StaticPrefs::image_mem_animated_use_heap()) {
85 // Lets us avoid too many small images consuming all of the handles. The
86 // actual allocation checks for overflow.
87 int32_t bufferSize
= (aStride
* aSize
.height
) / 1024;
88 if (bufferSize
< StaticPrefs::image_mem_volatile_min_threshold_kb()) {
95 static already_AddRefed
<DataSourceSurface
> AllocateBufferForImage(
96 const IntSize
& size
, SurfaceFormat format
, bool aIsAnimated
= false) {
97 int32_t stride
= VolatileSurfaceStride(size
, format
);
99 if (gfxVars::GetUseWebRenderOrDefault() && StaticPrefs::image_mem_shared()) {
100 RefPtr
<SourceSurfaceSharedData
> newSurf
= new SourceSurfaceSharedData();
101 if (newSurf
->Init(size
, stride
, format
)) {
102 return newSurf
.forget();
104 } else if (ShouldUseHeap(size
, stride
, aIsAnimated
)) {
105 RefPtr
<SourceSurfaceAlignedRawData
> newSurf
=
106 new SourceSurfaceAlignedRawData();
107 if (newSurf
->Init(size
, format
, false, 0, stride
)) {
108 return newSurf
.forget();
111 RefPtr
<SourceSurfaceVolatileData
> newSurf
= new SourceSurfaceVolatileData();
112 if (newSurf
->Init(size
, stride
, format
)) {
113 return newSurf
.forget();
119 static bool GreenSurface(DataSourceSurface
* aSurface
, const IntSize
& aSize
,
120 SurfaceFormat aFormat
) {
121 int32_t stride
= aSurface
->Stride();
122 uint32_t* surfaceData
= reinterpret_cast<uint32_t*>(aSurface
->GetData());
123 uint32_t surfaceDataLength
= (stride
* aSize
.height
) / sizeof(uint32_t);
125 // Start by assuming that GG is in the second byte and
126 // AA is in the final byte -- the most common case.
127 uint32_t color
= mozilla::NativeEndian::swapFromBigEndian(0x00FF00FF);
129 // We are only going to handle this type of test under
130 // certain circumstances.
131 MOZ_ASSERT(surfaceData
);
132 MOZ_ASSERT(aFormat
== SurfaceFormat::B8G8R8A8
||
133 aFormat
== SurfaceFormat::B8G8R8X8
||
134 aFormat
== SurfaceFormat::R8G8B8A8
||
135 aFormat
== SurfaceFormat::R8G8B8X8
||
136 aFormat
== SurfaceFormat::A8R8G8B8
||
137 aFormat
== SurfaceFormat::X8R8G8B8
);
138 MOZ_ASSERT((stride
* aSize
.height
) % sizeof(uint32_t));
140 if (aFormat
== SurfaceFormat::A8R8G8B8
||
141 aFormat
== SurfaceFormat::X8R8G8B8
) {
142 color
= mozilla::NativeEndian::swapFromBigEndian(0xFF00FF00);
145 for (uint32_t i
= 0; i
< surfaceDataLength
; i
++) {
146 surfaceData
[i
] = color
;
152 static bool ClearSurface(DataSourceSurface
* aSurface
, const IntSize
& aSize
,
153 SurfaceFormat aFormat
) {
154 int32_t stride
= aSurface
->Stride();
155 uint8_t* data
= aSurface
->GetData();
158 if (aFormat
== SurfaceFormat::OS_RGBX
) {
159 // Skia doesn't support RGBX surfaces, so ensure the alpha value is set
160 // to opaque white. While it would be nice to only do this for Skia,
161 // imgFrame can run off main thread and past shutdown where
162 // we might not have gfxPlatform, so just memset every time instead.
163 memset(data
, 0xFF, stride
* aSize
.height
);
164 } else if (aSurface
->OnHeap()) {
165 // We only need to memset it if the buffer was allocated on the heap.
166 // Otherwise, it's allocated via mmap and refers to a zeroed page and will
167 // be COW once it's written to.
168 memset(data
, 0, stride
* aSize
.height
);
175 : mMonitor("imgFrame"),
176 mDecoded(0, 0, 0, 0),
178 mRecycleLockCount(0),
182 mShouldRecycle(false),
183 mTimeout(FrameTimeout::FromRawMilliseconds(100)),
184 mDisposalMethod(DisposalMethod::NOT_SPECIFIED
),
185 mBlendMethod(BlendMethod::OVER
),
186 mFormat(SurfaceFormat::UNKNOWN
),
187 mNonPremult(false) {}
189 imgFrame::~imgFrame() {
191 MonitorAutoLock
lock(mMonitor
);
192 MOZ_ASSERT(mAborted
|| AreAllPixelsWritten());
193 MOZ_ASSERT(mAborted
|| mFinished
);
197 nsresult
imgFrame::InitForDecoder(const nsIntSize
& aImageSize
,
198 SurfaceFormat aFormat
, bool aNonPremult
,
199 const Maybe
<AnimationParams
>& aAnimParams
,
200 bool aShouldRecycle
) {
201 // Assert for properties that should be verified by decoders,
202 // warn for properties related to bad content.
203 if (!SurfaceCache::IsLegalSize(aImageSize
)) {
204 NS_WARNING("Should have legal image size");
206 return NS_ERROR_FAILURE
;
209 mImageSize
= aImageSize
;
211 // May be updated shortly after InitForDecoder by BlendAnimationFilter
212 // because it needs to take into consideration the previous frames to
213 // properly calculate. We start with the whole frame as dirty.
214 mDirtyRect
= GetRect();
217 mBlendRect
= aAnimParams
->mBlendRect
;
218 mTimeout
= aAnimParams
->mTimeout
;
219 mBlendMethod
= aAnimParams
->mBlendMethod
;
220 mDisposalMethod
= aAnimParams
->mDisposalMethod
;
222 mBlendRect
= GetRect();
225 if (aShouldRecycle
) {
226 // If we are recycling then we should always use BGRA for the underlying
227 // surface because if we use BGRX, the next frame composited into the
228 // surface could be BGRA and cause rendering problems.
229 MOZ_ASSERT(aAnimParams
);
230 mFormat
= SurfaceFormat::OS_RGBA
;
235 mNonPremult
= aNonPremult
;
236 mShouldRecycle
= aShouldRecycle
;
238 MOZ_ASSERT(!mLockedSurface
, "Called imgFrame::InitForDecoder() twice?");
240 bool postFirstFrame
= aAnimParams
&& aAnimParams
->mFrameNum
> 0;
241 mRawSurface
= AllocateBufferForImage(mImageSize
, mFormat
, postFirstFrame
);
244 return NS_ERROR_OUT_OF_MEMORY
;
247 if (StaticPrefs::browser_measurement_render_anims_and_video_solid() &&
249 mBlankRawSurface
= AllocateBufferForImage(mImageSize
, mFormat
);
250 if (!mBlankRawSurface
) {
252 return NS_ERROR_OUT_OF_MEMORY
;
256 mLockedSurface
= CreateLockedSurface(mRawSurface
, mImageSize
, mFormat
);
257 if (!mLockedSurface
) {
258 NS_WARNING("Failed to create LockedSurface");
260 return NS_ERROR_OUT_OF_MEMORY
;
263 if (mBlankRawSurface
) {
264 mBlankLockedSurface
=
265 CreateLockedSurface(mBlankRawSurface
, mImageSize
, mFormat
);
266 if (!mBlankLockedSurface
) {
267 NS_WARNING("Failed to create BlankLockedSurface");
269 return NS_ERROR_OUT_OF_MEMORY
;
273 if (!ClearSurface(mRawSurface
, mImageSize
, mFormat
)) {
274 NS_WARNING("Could not clear allocated buffer");
276 return NS_ERROR_OUT_OF_MEMORY
;
279 if (mBlankRawSurface
) {
280 if (!GreenSurface(mBlankRawSurface
, mImageSize
, mFormat
)) {
281 NS_WARNING("Could not clear allocated blank buffer");
283 return NS_ERROR_OUT_OF_MEMORY
;
290 nsresult
imgFrame::InitForDecoderRecycle(const AnimationParams
& aAnimParams
) {
291 // We want to recycle this frame, but there is no guarantee that consumers are
292 // done with it in a timely manner. Let's ensure they are done with it first.
293 MonitorAutoLock
lock(mMonitor
);
295 MOZ_ASSERT(mLockCount
> 0);
296 MOZ_ASSERT(mLockedSurface
);
298 if (!mShouldRecycle
) {
299 // This frame either was never marked as recyclable, or the flag was cleared
300 // for a caller which does not support recycling.
301 return NS_ERROR_NOT_AVAILABLE
;
304 if (mRecycleLockCount
> 0) {
305 if (NS_IsMainThread()) {
306 // We should never be both decoding and recycling on the main thread. Sync
307 // decoding can only be used to produce the first set of frames. Those
308 // either never use recycling because advancing was blocked (main thread
309 // is busy) or we were auto-advancing (to seek to a frame) and the frames
310 // were never accessed (and thus cannot have recycle locks).
311 MOZ_ASSERT_UNREACHABLE("Recycling/decoding on the main thread?");
312 return NS_ERROR_NOT_AVAILABLE
;
315 // We don't want to wait forever to reclaim the frame because we have no
316 // idea why it is still held. It is possibly due to OMTP. Since we are off
317 // the main thread, and we generally have frames already buffered for the
318 // animation, we can afford to wait a short period of time to hopefully
319 // complete the transaction and reclaim the buffer.
321 // We choose to wait for, at most, the refresh driver interval, so that we
322 // won't skip more than one frame. If the frame is still in use due to
323 // outstanding transactions, we are already skipping frames. If the frame
324 // is still in use for some other purpose, it won't be returned to the pool
325 // and its owner can hold onto it forever without additional impact here.
326 TimeDuration timeout
=
327 TimeDuration::FromMilliseconds(nsRefreshDriver::DefaultInterval());
329 TimeStamp start
= TimeStamp::Now();
330 mMonitor
.Wait(timeout
);
331 if (mRecycleLockCount
== 0) {
335 TimeDuration delta
= TimeStamp::Now() - start
;
336 if (delta
>= timeout
) {
337 // We couldn't secure the frame for recycling. It will allocate a new
339 return NS_ERROR_NOT_AVAILABLE
;
346 mBlendRect
= aAnimParams
.mBlendRect
;
347 mTimeout
= aAnimParams
.mTimeout
;
348 mBlendMethod
= aAnimParams
.mBlendMethod
;
349 mDisposalMethod
= aAnimParams
.mDisposalMethod
;
350 mDirtyRect
= GetRect();
355 nsresult
imgFrame::InitWithDrawable(gfxDrawable
* aDrawable
,
356 const nsIntSize
& aSize
,
357 const SurfaceFormat aFormat
,
358 SamplingFilter aSamplingFilter
,
359 uint32_t aImageFlags
,
360 gfx::BackendType aBackend
) {
361 // Assert for properties that should be verified by decoders,
362 // warn for properties related to bad content.
363 if (!SurfaceCache::IsLegalSize(aSize
)) {
364 NS_WARNING("Should have legal image size");
366 return NS_ERROR_FAILURE
;
372 RefPtr
<DrawTarget
> target
;
374 bool canUseDataSurface
= Factory::DoesBackendSupportDataDrawtarget(aBackend
);
375 if (canUseDataSurface
) {
376 // It's safe to use data surfaces for content on this platform, so we can
377 // get away with using volatile buffers.
378 MOZ_ASSERT(!mLockedSurface
, "Called imgFrame::InitWithDrawable() twice?");
380 mRawSurface
= AllocateBufferForImage(mImageSize
, mFormat
);
383 return NS_ERROR_OUT_OF_MEMORY
;
386 mLockedSurface
= CreateLockedSurface(mRawSurface
, mImageSize
, mFormat
);
387 if (!mLockedSurface
) {
388 NS_WARNING("Failed to create LockedSurface");
390 return NS_ERROR_OUT_OF_MEMORY
;
393 if (!ClearSurface(mRawSurface
, mImageSize
, mFormat
)) {
394 NS_WARNING("Could not clear allocated buffer");
396 return NS_ERROR_OUT_OF_MEMORY
;
399 target
= gfxPlatform::CreateDrawTargetForData(
400 mLockedSurface
->GetData(), mImageSize
, mLockedSurface
->Stride(),
403 // We can't use data surfaces for content, so we'll create an offscreen
404 // surface instead. This means if someone later calls RawAccessRef(), we
405 // may have to do an expensive readback, but we warned callers about that in
406 // the documentation for this method.
407 MOZ_ASSERT(!mOptSurface
, "Called imgFrame::InitWithDrawable() twice?");
409 if (gfxPlatform::GetPlatform()->SupportsAzureContentForType(aBackend
)) {
410 target
= gfxPlatform::GetPlatform()->CreateDrawTargetForBackend(
411 aBackend
, mImageSize
, mFormat
);
413 target
= gfxPlatform::GetPlatform()->CreateOffscreenContentDrawTarget(
414 mImageSize
, mFormat
);
418 if (!target
|| !target
->IsValid()) {
420 return NS_ERROR_OUT_OF_MEMORY
;
423 // Draw using the drawable the caller provided.
424 RefPtr
<gfxContext
> ctx
= gfxContext::CreateOrNull(target
);
425 MOZ_ASSERT(ctx
); // Already checked the draw target above.
426 gfxUtils::DrawPixelSnapped(ctx
, aDrawable
, SizeDouble(mImageSize
),
427 ImageRegion::Create(ThebesRect(GetRect())),
428 mFormat
, aSamplingFilter
, aImageFlags
);
430 if (canUseDataSurface
&& !mLockedSurface
) {
431 NS_WARNING("Failed to create VolatileDataSourceSurface");
433 return NS_ERROR_OUT_OF_MEMORY
;
436 if (!canUseDataSurface
) {
437 // We used an offscreen surface, which is an "optimized" surface from
438 // imgFrame's perspective.
439 mOptSurface
= target
->Snapshot();
444 // If we reach this point, we should regard ourselves as complete.
445 mDecoded
= GetRect();
449 MonitorAutoLock
lock(mMonitor
);
450 MOZ_ASSERT(AreAllPixelsWritten());
456 nsresult
imgFrame::Optimize(DrawTarget
* aTarget
) {
457 MOZ_ASSERT(NS_IsMainThread());
458 mMonitor
.AssertCurrentThreadOwns();
460 if (mLockCount
> 0 || !mOptimizable
) {
461 // Don't optimize right now.
465 // Check whether image optimization is disabled -- not thread safe!
466 static bool gDisableOptimize
= false;
467 static bool hasCheckedOptimize
= false;
468 if (!hasCheckedOptimize
) {
469 if (PR_GetEnv("MOZ_DISABLE_IMAGE_OPTIMIZE")) {
470 gDisableOptimize
= true;
472 hasCheckedOptimize
= true;
475 // Don't optimize during shutdown because gfxPlatform may not be available.
476 if (ShutdownTracker::ShutdownHasStarted()) {
480 if (gDisableOptimize
) {
488 // XXX(seth): It's currently unclear if there's any reason why we can't
489 // optimize non-premult surfaces. We should look into removing this.
493 if (!gfxVars::UseWebRender()) {
494 mOptSurface
= aTarget
->OptimizeSourceSurface(mLockedSurface
);
496 mOptSurface
= gfxPlatform::GetPlatform()
497 ->ScreenReferenceDrawTarget()
498 ->OptimizeSourceSurface(mLockedSurface
);
500 if (mOptSurface
== mLockedSurface
) {
501 mOptSurface
= nullptr;
505 // There's no reason to keep our original surface around if we have an
506 // optimized surface. Release our reference to it. This will leave
507 // |mLockedSurface| as the only thing keeping it alive, so it'll get freed
509 mRawSurface
= nullptr;
512 // Release all strong references to the surface's memory. If the underlying
513 // surface is volatile, this will allow the operating system to free the
514 // memory if it needs to.
515 mLockedSurface
= nullptr;
516 mOptimizable
= false;
521 DrawableFrameRef
imgFrame::DrawableRef() { return DrawableFrameRef(this); }
523 RawAccessFrameRef
imgFrame::RawAccessRef(bool aOnlyFinished
/*= false*/) {
524 return RawAccessFrameRef(this, aOnlyFinished
);
527 void imgFrame::SetRawAccessOnly() {
528 AssertImageDataLocked();
530 // Lock our data and throw away the key.
531 LockImageData(false);
534 imgFrame::SurfaceWithFormat
imgFrame::SurfaceForDrawing(
535 bool aDoPartialDecode
, bool aDoTile
, ImageRegion
& aRegion
,
536 SourceSurface
* aSurface
) {
537 MOZ_ASSERT(NS_IsMainThread());
538 mMonitor
.AssertCurrentThreadOwns();
540 if (!aDoPartialDecode
) {
541 return SurfaceWithFormat(new gfxSurfaceDrawable(aSurface
, mImageSize
),
546 gfxRect(mDecoded
.X(), mDecoded
.Y(), mDecoded
.Width(), mDecoded
.Height());
549 // Create a temporary surface.
550 // Give this surface an alpha channel because there are
551 // transparent pixels in the padding or undecoded area
552 RefPtr
<DrawTarget
> target
=
553 gfxPlatform::GetPlatform()->CreateOffscreenContentDrawTarget(
554 mImageSize
, SurfaceFormat::OS_RGBA
);
556 return SurfaceWithFormat();
559 SurfacePattern
pattern(aSurface
, aRegion
.GetExtendMode(),
560 Matrix::Translation(mDecoded
.X(), mDecoded
.Y()));
561 target
->FillRect(ToRect(aRegion
.Intersect(available
).Rect()), pattern
);
563 RefPtr
<SourceSurface
> newsurf
= target
->Snapshot();
564 return SurfaceWithFormat(new gfxSurfaceDrawable(newsurf
, mImageSize
),
565 target
->GetFormat());
568 // Not tiling, and we have a surface, so we can account for
569 // a partial decode just by twiddling parameters.
570 aRegion
= aRegion
.Intersect(available
);
571 IntSize
availableSize(mDecoded
.Width(), mDecoded
.Height());
573 return SurfaceWithFormat(new gfxSurfaceDrawable(aSurface
, availableSize
),
577 bool imgFrame::Draw(gfxContext
* aContext
, const ImageRegion
& aRegion
,
578 SamplingFilter aSamplingFilter
, uint32_t aImageFlags
,
580 AUTO_PROFILER_LABEL("imgFrame::Draw", GRAPHICS
);
582 MOZ_ASSERT(NS_IsMainThread());
583 NS_ASSERTION(!aRegion
.Rect().IsEmpty(), "Drawing empty region!");
584 NS_ASSERTION(!aRegion
.IsRestricted() ||
585 !aRegion
.Rect().Intersect(aRegion
.Restriction()).IsEmpty(),
586 "We must be allowed to sample *some* source pixels!");
588 // Perform the draw and freeing of the surface outside the lock. We want to
589 // avoid contention with the decoder if we can. The surface may also attempt
590 // to relock the monitor if it is freed (e.g. RecyclingSourceSurface).
591 RefPtr
<SourceSurface
> surf
;
592 SurfaceWithFormat surfaceResult
;
593 ImageRegion
region(aRegion
);
594 gfxRect
imageRect(0, 0, mImageSize
.width
, mImageSize
.height
);
597 MonitorAutoLock
lock(mMonitor
);
599 // Possibly convert this image into a GPU texture, this may also cause our
600 // mLockedSurface to be released and the OS to release the underlying
602 Optimize(aContext
->GetDrawTarget());
604 bool doPartialDecode
= !AreAllPixelsWritten();
606 // Most draw targets will just use the surface only during DrawPixelSnapped
607 // but captures/recordings will retain a reference outside this stack
608 // context. While in theory a decoder thread could be trying to recycle this
609 // frame at this very moment, in practice the only way we can get here is if
610 // this frame is the current frame of the animation. Since we can only
611 // advance on the main thread, we know nothing else will try to use it.
612 DrawTarget
* drawTarget
= aContext
->GetDrawTarget();
613 bool recording
= drawTarget
->GetBackendType() == BackendType::RECORDING
;
614 bool temporary
= !drawTarget
->IsCaptureDT() && !recording
;
615 RefPtr
<SourceSurface
> surf
= GetSourceSurfaceInternal(temporary
);
620 bool doTile
= !imageRect
.Contains(aRegion
.Rect()) &&
621 !(aImageFlags
& imgIContainer::FLAG_CLAMP
);
623 surfaceResult
= SurfaceForDrawing(doPartialDecode
, doTile
, region
, surf
);
625 // If we are recording, then we cannot recycle the surface. The blob
626 // rasterizer is not properly synchronized for recycling in the compositor
627 // process. The easiest thing to do is just mark the frames it consumes as
629 if (recording
&& surfaceResult
.IsValid()) {
630 mShouldRecycle
= false;
634 if (surfaceResult
.IsValid()) {
635 gfxUtils::DrawPixelSnapped(aContext
, surfaceResult
.mDrawable
,
636 imageRect
.Size(), region
, surfaceResult
.mFormat
,
637 aSamplingFilter
, aImageFlags
, aOpacity
);
643 nsresult
imgFrame::ImageUpdated(const nsIntRect
& aUpdateRect
) {
644 MonitorAutoLock
lock(mMonitor
);
645 return ImageUpdatedInternal(aUpdateRect
);
648 nsresult
imgFrame::ImageUpdatedInternal(const nsIntRect
& aUpdateRect
) {
649 mMonitor
.AssertCurrentThreadOwns();
651 // Clamp to the frame rect to ensure that decoder bugs don't result in a
652 // decoded rect that extends outside the bounds of the frame rect.
653 IntRect updateRect
= aUpdateRect
.Intersect(GetRect());
654 if (updateRect
.IsEmpty()) {
658 mDecoded
.UnionRect(mDecoded
, updateRect
);
660 // Update our invalidation counters for any consumers watching for changes
663 mRawSurface
->Invalidate(updateRect
);
665 if (mLockedSurface
&& mRawSurface
!= mLockedSurface
) {
666 mLockedSurface
->Invalidate(updateRect
);
671 void imgFrame::Finish(Opacity aFrameOpacity
/* = Opacity::SOME_TRANSPARENCY */,
672 bool aFinalize
/* = true */) {
673 MonitorAutoLock
lock(mMonitor
);
674 MOZ_ASSERT(mLockCount
> 0, "Image data should be locked");
676 IntRect
frameRect(GetRect());
677 if (!mDecoded
.IsEqualEdges(frameRect
)) {
678 // The decoder should have produced rows starting from either the bottom or
679 // the top of the image. We need to calculate the region for which we have
680 // not yet invalidated.
681 IntRect
delta(0, 0, frameRect
.width
, 0);
682 if (mDecoded
.y
== 0) {
683 delta
.y
= mDecoded
.height
;
684 delta
.height
= frameRect
.height
- mDecoded
.height
;
685 } else if (mDecoded
.y
+ mDecoded
.height
== frameRect
.height
) {
686 delta
.height
= frameRect
.height
- mDecoded
.y
;
688 MOZ_ASSERT_UNREACHABLE("Decoder only updated middle of image!");
692 ImageUpdatedInternal(delta
);
695 MOZ_ASSERT(mDecoded
.IsEqualEdges(frameRect
));
698 FinalizeSurfaceInternal();
703 // The image is now complete, wake up anyone who's waiting.
704 mMonitor
.NotifyAll();
707 uint32_t imgFrame::GetImageBytesPerRow() const {
708 mMonitor
.AssertCurrentThreadOwns();
711 return mImageSize
.width
* BytesPerPixel(mFormat
);
717 uint32_t imgFrame::GetImageDataLength() const {
718 return GetImageBytesPerRow() * mImageSize
.height
;
721 void imgFrame::GetImageData(uint8_t** aData
, uint32_t* aLength
) const {
722 MonitorAutoLock
lock(mMonitor
);
723 GetImageDataInternal(aData
, aLength
);
726 void imgFrame::GetImageDataInternal(uint8_t** aData
, uint32_t* aLength
) const {
727 mMonitor
.AssertCurrentThreadOwns();
728 MOZ_ASSERT(mLockCount
> 0, "Image data should be locked");
729 MOZ_ASSERT(mLockedSurface
);
731 if (mLockedSurface
) {
732 // TODO: This is okay for now because we only realloc shared surfaces on
733 // the main thread after decoding has finished, but if animations want to
734 // read frame data off the main thread, we will need to reconsider this.
735 *aData
= mLockedSurface
->GetData();
738 "mLockedSurface is non-null, but GetData is null in GetImageData");
743 *aLength
= GetImageDataLength();
746 uint8_t* imgFrame::GetImageData() const {
749 GetImageData(&data
, &length
);
753 uint8_t* imgFrame::LockImageData(bool aOnlyFinished
) {
754 MonitorAutoLock
lock(mMonitor
);
756 MOZ_ASSERT(mLockCount
>= 0, "Unbalanced locks and unlocks");
757 if (mLockCount
< 0 || (aOnlyFinished
&& !mFinished
)) {
762 if (mLockedSurface
) {
763 data
= mLockedSurface
->GetData();
768 // If the raw data is still available, we should get a valid pointer for it.
770 MOZ_ASSERT_UNREACHABLE("It's illegal to re-lock an optimized imgFrame");
778 void imgFrame::AssertImageDataLocked() const {
780 MonitorAutoLock
lock(mMonitor
);
781 MOZ_ASSERT(mLockCount
> 0, "Image data should be locked");
785 nsresult
imgFrame::UnlockImageData() {
786 MonitorAutoLock
lock(mMonitor
);
788 MOZ_ASSERT(mLockCount
> 0, "Unlocking an unlocked image!");
789 if (mLockCount
<= 0) {
790 return NS_ERROR_FAILURE
;
793 MOZ_ASSERT(mLockCount
> 1 || mFinished
|| mAborted
,
794 "Should have Finish()'d or aborted before unlocking");
801 void imgFrame::SetOptimizable() {
802 AssertImageDataLocked();
803 MonitorAutoLock
lock(mMonitor
);
807 void imgFrame::FinalizeSurface() {
808 MonitorAutoLock
lock(mMonitor
);
809 FinalizeSurfaceInternal();
812 void imgFrame::FinalizeSurfaceInternal() {
813 mMonitor
.AssertCurrentThreadOwns();
815 // Not all images will have mRawSurface to finalize (i.e. paletted images).
816 if (mShouldRecycle
|| !mRawSurface
||
817 mRawSurface
->GetType() != SurfaceType::DATA_SHARED
) {
821 auto sharedSurf
= static_cast<SourceSurfaceSharedData
*>(mRawSurface
.get());
822 sharedSurf
->Finalize();
825 already_AddRefed
<SourceSurface
> imgFrame::GetSourceSurface() {
826 MonitorAutoLock
lock(mMonitor
);
827 return GetSourceSurfaceInternal(/* aTemporary */ false);
830 already_AddRefed
<SourceSurface
> imgFrame::GetSourceSurfaceInternal(
832 mMonitor
.AssertCurrentThreadOwns();
835 if (mOptSurface
->IsValid()) {
836 RefPtr
<SourceSurface
> surf(mOptSurface
);
837 return surf
.forget();
839 mOptSurface
= nullptr;
843 if (mBlankLockedSurface
) {
844 // We are going to return the blank surface because of the flags.
845 // We are including comments here that are copied from below
846 // just so that we are on the same page!
848 // We don't need to create recycling wrapper for some callers because they
849 // promise to release the surface immediately after.
850 if (!aTemporary
&& mShouldRecycle
) {
851 RefPtr
<SourceSurface
> surf
=
852 new RecyclingSourceSurface(this, mBlankLockedSurface
);
853 return surf
.forget();
856 RefPtr
<SourceSurface
> surf(mBlankLockedSurface
);
857 return surf
.forget();
860 if (mLockedSurface
) {
861 // We don't need to create recycling wrapper for some callers because they
862 // promise to release the surface immediately after.
863 if (!aTemporary
&& mShouldRecycle
) {
864 RefPtr
<SourceSurface
> surf
=
865 new RecyclingSourceSurface(this, mLockedSurface
);
866 return surf
.forget();
869 RefPtr
<SourceSurface
> surf(mLockedSurface
);
870 return surf
.forget();
873 MOZ_ASSERT(!mShouldRecycle
, "Should recycle but no locked surface!");
879 return CreateLockedSurface(mRawSurface
, mImageSize
, mFormat
);
882 void imgFrame::Abort() {
883 MonitorAutoLock
lock(mMonitor
);
887 // Wake up anyone who's waiting.
888 mMonitor
.NotifyAll();
891 bool imgFrame::IsAborted() const {
892 MonitorAutoLock
lock(mMonitor
);
896 bool imgFrame::IsFinished() const {
897 MonitorAutoLock
lock(mMonitor
);
901 void imgFrame::WaitUntilFinished() const {
902 MonitorAutoLock
lock(mMonitor
);
905 // Return if we're aborted or complete.
906 if (mAborted
|| mFinished
) {
910 // Not complete yet, so we'll have to wait.
915 bool imgFrame::AreAllPixelsWritten() const {
916 mMonitor
.AssertCurrentThreadOwns();
917 return mDecoded
.IsEqualInterior(GetRect());
920 void imgFrame::AddSizeOfExcludingThis(MallocSizeOf aMallocSizeOf
,
921 const AddSizeOfCb
& aCallback
) const {
922 MonitorAutoLock
lock(mMonitor
);
924 AddSizeOfCbData metadata
;
926 metadata
.finished
= mFinished
;
927 if (mLockedSurface
) {
928 metadata
.heap
+= aMallocSizeOf(mLockedSurface
);
931 metadata
.heap
+= aMallocSizeOf(mOptSurface
);
934 metadata
.heap
+= aMallocSizeOf(mRawSurface
);
935 mRawSurface
->AddSizeOfExcludingThis(aMallocSizeOf
, metadata
.heap
,
936 metadata
.nonHeap
, metadata
.handles
,
937 metadata
.externalId
);
943 RecyclingSourceSurface::RecyclingSourceSurface(imgFrame
* aParent
,
944 DataSourceSurface
* aSurface
)
945 : mParent(aParent
), mSurface(aSurface
), mType(SurfaceType::DATA
) {
946 mParent
->mMonitor
.AssertCurrentThreadOwns();
947 ++mParent
->mRecycleLockCount
;
949 if (aSurface
->GetType() == SurfaceType::DATA_SHARED
) {
950 mType
= SurfaceType::DATA_RECYCLING_SHARED
;
954 RecyclingSourceSurface::~RecyclingSourceSurface() {
955 MonitorAutoLock
lock(mParent
->mMonitor
);
956 MOZ_ASSERT(mParent
->mRecycleLockCount
> 0);
957 if (--mParent
->mRecycleLockCount
== 0) {
958 mParent
->mMonitor
.NotifyAll();
963 } // namespace mozilla