1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
8 #include "ImageRegion.h"
9 #include "ShutdownTracker.h"
13 #include "gfx2DGlue.h"
14 #include "gfxPlatform.h"
18 #include "GeckoProfiler.h"
19 #include "MainThreadUtils.h"
20 #include "mozilla/CheckedInt.h"
21 #include "mozilla/gfx/gfxVars.h"
22 #include "mozilla/gfx/Tools.h"
23 #include "mozilla/gfx/SourceSurfaceRawData.h"
24 #include "mozilla/layers/SourceSurfaceSharedData.h"
25 #include "mozilla/layers/SourceSurfaceVolatileData.h"
26 #include "mozilla/Likely.h"
27 #include "mozilla/MemoryReporting.h"
29 #include "nsThreadUtils.h"
38 ScopedMapRelease(void* aMap
)
40 delete static_cast<DataSourceSurface::ScopedMap
*>(aMap
);
44 VolatileSurfaceStride(const IntSize
& size
, SurfaceFormat format
)
46 // Stride must be a multiple of four or cairo will complain.
47 return (size
.width
* BytesPerPixel(format
) + 0x3) & ~0x3;
50 static already_AddRefed
<DataSourceSurface
>
51 CreateLockedSurface(DataSourceSurface
*aSurface
,
55 // Shared memory is never released until the surface itself is released
56 if (aSurface
->GetType() == SurfaceType::DATA_SHARED
) {
57 RefPtr
<DataSourceSurface
> surf(aSurface
);
61 DataSourceSurface::ScopedMap
* smap
=
62 new DataSourceSurface::ScopedMap(aSurface
, DataSourceSurface::READ_WRITE
);
63 if (smap
->IsMapped()) {
64 // The ScopedMap is held by this DataSourceSurface.
65 RefPtr
<DataSourceSurface
> surf
=
66 Factory::CreateWrappingDataSourceSurface(smap
->GetData(),
71 static_cast<void*>(smap
));
82 ShouldUseHeap(const IntSize
& aSize
,
86 // On some platforms (i.e. Android), a volatile buffer actually keeps a file
87 // handle active. We would like to avoid too many since we could easily
88 // exhaust the pool. However, other platforms we do not have the file handle
89 // problem, and additionally we may avoid a superfluous memset since the
90 // volatile memory starts out as zero-filled. Hence the knobs below.
92 // For as long as an animated image is retained, its frames will never be
93 // released to let the OS purge volatile buffers.
94 if (aIsAnimated
&& gfxPrefs::ImageMemAnimatedUseHeap()) {
98 // Lets us avoid too many small images consuming all of the handles. The
99 // actual allocation checks for overflow.
100 int32_t bufferSize
= (aStride
* aSize
.width
) / 1024;
101 if (bufferSize
< gfxPrefs::ImageMemVolatileMinThresholdKB()) {
108 static already_AddRefed
<DataSourceSurface
>
109 AllocateBufferForImage(const IntSize
& size
,
110 SurfaceFormat format
,
111 bool aIsAnimated
= false)
113 int32_t stride
= VolatileSurfaceStride(size
, format
);
115 if (ShouldUseHeap(size
, stride
, aIsAnimated
)) {
116 RefPtr
<SourceSurfaceAlignedRawData
> newSurf
=
117 new SourceSurfaceAlignedRawData();
118 if (newSurf
->Init(size
, format
, false, 0, stride
)) {
119 return newSurf
.forget();
123 if (!aIsAnimated
&& gfxVars::GetUseWebRenderOrDefault()
124 && gfxPrefs::ImageMemShared()) {
125 RefPtr
<SourceSurfaceSharedData
> newSurf
= new SourceSurfaceSharedData();
126 if (newSurf
->Init(size
, stride
, format
)) {
127 return newSurf
.forget();
130 RefPtr
<SourceSurfaceVolatileData
> newSurf
= new SourceSurfaceVolatileData();
131 if (newSurf
->Init(size
, stride
, format
)) {
132 return newSurf
.forget();
139 ClearSurface(DataSourceSurface
* aSurface
, const IntSize
& aSize
, SurfaceFormat aFormat
)
141 int32_t stride
= aSurface
->Stride();
142 uint8_t* data
= aSurface
->GetData();
145 if (aFormat
== SurfaceFormat::B8G8R8X8
) {
146 // Skia doesn't support RGBX surfaces, so ensure the alpha value is set
147 // to opaque white. While it would be nice to only do this for Skia,
148 // imgFrame can run off main thread and past shutdown where
149 // we might not have gfxPlatform, so just memset everytime instead.
150 memset(data
, 0xFF, stride
* aSize
.height
);
151 } else if (aSurface
->OnHeap()) {
152 // We only need to memset it if the buffer was allocated on the heap.
153 // Otherwise, it's allocated via mmap and refers to a zeroed page and will
154 // be COW once it's written to.
155 memset(data
, 0, stride
* aSize
.height
);
161 // Returns true if an image of aWidth x aHeight is allowed and legal.
163 AllowedImageSize(int32_t aWidth
, int32_t aHeight
)
165 // reject over-wide or over-tall images
166 const int32_t k64KLimit
= 0x0000FFFF;
167 if (MOZ_UNLIKELY(aWidth
> k64KLimit
|| aHeight
> k64KLimit
)) {
168 NS_WARNING("image too big");
172 // protect against invalid sizes
173 if (MOZ_UNLIKELY(aHeight
<= 0 || aWidth
<= 0)) {
177 // check to make sure we don't overflow a 32-bit
178 CheckedInt32 requiredBytes
= CheckedInt32(aWidth
) * CheckedInt32(aHeight
) * 4;
179 if (MOZ_UNLIKELY(!requiredBytes
.isValid())) {
180 NS_WARNING("width or height too large");
186 static bool AllowedImageAndFrameDimensions(const nsIntSize
& aImageSize
,
187 const nsIntRect
& aFrameRect
)
189 if (!AllowedImageSize(aImageSize
.width
, aImageSize
.height
)) {
192 if (!AllowedImageSize(aFrameRect
.Width(), aFrameRect
.Height())) {
195 nsIntRect
imageRect(0, 0, aImageSize
.width
, aImageSize
.height
);
196 if (!imageRect
.Contains(aFrameRect
)) {
197 NS_WARNING("Animated image frame does not fit inside bounds of image");
203 : mMonitor("imgFrame")
204 , mDecoded(0, 0, 0, 0)
206 , mTimeout(FrameTimeout::FromRawMilliseconds(100))
207 , mDisposalMethod(DisposalMethod::NOT_SPECIFIED
)
208 , mBlendMethod(BlendMethod::OVER
)
211 , mOptimizable(false)
212 , mPalettedImageData(nullptr)
215 , mCompositingFailed(false)
219 imgFrame::~imgFrame()
222 MonitorAutoLock
lock(mMonitor
);
223 MOZ_ASSERT(mAborted
|| AreAllPixelsWritten());
224 MOZ_ASSERT(mAborted
|| mFinished
);
227 free(mPalettedImageData
);
228 mPalettedImageData
= nullptr;
232 imgFrame::InitForDecoder(const nsIntSize
& aImageSize
,
233 const nsIntRect
& aRect
,
234 SurfaceFormat aFormat
,
235 uint8_t aPaletteDepth
/* = 0 */,
236 bool aNonPremult
/* = false */,
237 bool aIsAnimated
/* = false */)
239 // Assert for properties that should be verified by decoders,
240 // warn for properties related to bad content.
241 if (!AllowedImageAndFrameDimensions(aImageSize
, aRect
)) {
242 NS_WARNING("Should have legal image size");
244 return NS_ERROR_FAILURE
;
247 mImageSize
= aImageSize
;
250 // We only allow a non-trivial frame rect (i.e., a frame rect that doesn't
251 // cover the entire image) for paletted animation frames. We never draw those
252 // frames directly; we just use FrameAnimator to composite them and produce a
253 // BGRA surface that we actually draw. We enforce this here to make sure that
254 // imgFrame::Draw(), which is responsible for drawing all other kinds of
255 // frames, never has to deal with a non-trivial frame rect.
256 if (aPaletteDepth
== 0 &&
257 !mFrameRect
.IsEqualEdges(IntRect(IntPoint(), mImageSize
))) {
258 MOZ_ASSERT_UNREACHABLE("Creating a non-paletted imgFrame with a "
259 "non-trivial frame rect");
260 return NS_ERROR_FAILURE
;
264 mPaletteDepth
= aPaletteDepth
;
265 mNonPremult
= aNonPremult
;
267 if (aPaletteDepth
!= 0) {
268 // We're creating for a paletted image.
269 if (aPaletteDepth
> 8) {
270 NS_WARNING("Should have legal palette depth");
271 NS_ERROR("This Depth is not supported");
273 return NS_ERROR_FAILURE
;
276 // Use the fallible allocator here. Paletted images always use 1 byte per
277 // pixel, so calculating the amount of memory we need is straightforward.
278 size_t dataSize
= PaletteDataLength() + mFrameRect
.Area();
279 mPalettedImageData
= static_cast<uint8_t*>(calloc(dataSize
, sizeof(uint8_t)));
280 if (!mPalettedImageData
) {
281 NS_WARNING("Call to calloc for paletted image data should succeed");
283 NS_ENSURE_TRUE(mPalettedImageData
, NS_ERROR_OUT_OF_MEMORY
);
285 MOZ_ASSERT(!mLockedSurface
, "Called imgFrame::InitForDecoder() twice?");
287 mRawSurface
= AllocateBufferForImage(mFrameRect
.Size(), mFormat
, aIsAnimated
);
290 return NS_ERROR_OUT_OF_MEMORY
;
293 mLockedSurface
= CreateLockedSurface(mRawSurface
, mFrameRect
.Size(), mFormat
);
294 if (!mLockedSurface
) {
295 NS_WARNING("Failed to create LockedSurface");
297 return NS_ERROR_OUT_OF_MEMORY
;
300 if (!ClearSurface(mRawSurface
, mFrameRect
.Size(), mFormat
)) {
301 NS_WARNING("Could not clear allocated buffer");
303 return NS_ERROR_OUT_OF_MEMORY
;
311 imgFrame::InitWithDrawable(gfxDrawable
* aDrawable
,
312 const nsIntSize
& aSize
,
313 const SurfaceFormat aFormat
,
314 SamplingFilter aSamplingFilter
,
315 uint32_t aImageFlags
,
316 gfx::BackendType aBackend
)
318 // Assert for properties that should be verified by decoders,
319 // warn for properties related to bad content.
320 if (!AllowedImageSize(aSize
.width
, aSize
.height
)) {
321 NS_WARNING("Should have legal image size");
323 return NS_ERROR_FAILURE
;
327 mFrameRect
= IntRect(IntPoint(0, 0), aSize
);
332 RefPtr
<DrawTarget
> target
;
334 bool canUseDataSurface
= Factory::DoesBackendSupportDataDrawtarget(aBackend
);
335 if (canUseDataSurface
) {
336 // It's safe to use data surfaces for content on this platform, so we can
337 // get away with using volatile buffers.
338 MOZ_ASSERT(!mLockedSurface
, "Called imgFrame::InitWithDrawable() twice?");
340 mRawSurface
= AllocateBufferForImage(mFrameRect
.Size(), mFormat
);
343 return NS_ERROR_OUT_OF_MEMORY
;
346 mLockedSurface
= CreateLockedSurface(mRawSurface
, mFrameRect
.Size(), mFormat
);
347 if (!mLockedSurface
) {
348 NS_WARNING("Failed to create LockedSurface");
350 return NS_ERROR_OUT_OF_MEMORY
;
353 if (!ClearSurface(mRawSurface
, mFrameRect
.Size(), mFormat
)) {
354 NS_WARNING("Could not clear allocated buffer");
356 return NS_ERROR_OUT_OF_MEMORY
;
359 target
= gfxPlatform::CreateDrawTargetForData(
360 mLockedSurface
->GetData(),
362 mLockedSurface
->Stride(),
365 // We can't use data surfaces for content, so we'll create an offscreen
366 // surface instead. This means if someone later calls RawAccessRef(), we
367 // may have to do an expensive readback, but we warned callers about that in
368 // the documentation for this method.
369 MOZ_ASSERT(!mOptSurface
, "Called imgFrame::InitWithDrawable() twice?");
371 if (gfxPlatform::GetPlatform()->SupportsAzureContentForType(aBackend
)) {
372 target
= gfxPlatform::GetPlatform()->
373 CreateDrawTargetForBackend(aBackend
, mFrameRect
.Size(), mFormat
);
375 target
= gfxPlatform::GetPlatform()->
376 CreateOffscreenContentDrawTarget(mFrameRect
.Size(), mFormat
);
380 if (!target
|| !target
->IsValid()) {
382 return NS_ERROR_OUT_OF_MEMORY
;
385 // Draw using the drawable the caller provided.
386 RefPtr
<gfxContext
> ctx
= gfxContext::CreateOrNull(target
);
387 MOZ_ASSERT(ctx
); // Already checked the draw target above.
388 gfxUtils::DrawPixelSnapped(ctx
, aDrawable
, SizeDouble(mFrameRect
.Size()),
389 ImageRegion::Create(ThebesRect(mFrameRect
)),
390 mFormat
, aSamplingFilter
, aImageFlags
);
392 if (canUseDataSurface
&& !mLockedSurface
) {
393 NS_WARNING("Failed to create VolatileDataSourceSurface");
395 return NS_ERROR_OUT_OF_MEMORY
;
398 if (!canUseDataSurface
) {
399 // We used an offscreen surface, which is an "optimized" surface from
400 // imgFrame's perspective.
401 mOptSurface
= target
->Snapshot();
406 // If we reach this point, we should regard ourselves as complete.
407 mDecoded
= GetRect();
411 MonitorAutoLock
lock(mMonitor
);
412 MOZ_ASSERT(AreAllPixelsWritten());
419 imgFrame::Optimize(DrawTarget
* aTarget
)
421 MOZ_ASSERT(NS_IsMainThread());
422 mMonitor
.AssertCurrentThreadOwns();
424 if (mLockCount
> 0 || !mOptimizable
) {
425 // Don't optimize right now.
429 // Check whether image optimization is disabled -- not thread safe!
430 static bool gDisableOptimize
= false;
431 static bool hasCheckedOptimize
= false;
432 if (!hasCheckedOptimize
) {
433 if (PR_GetEnv("MOZ_DISABLE_IMAGE_OPTIMIZE")) {
434 gDisableOptimize
= true;
436 hasCheckedOptimize
= true;
439 // Don't optimize during shutdown because gfxPlatform may not be available.
440 if (ShutdownTracker::ShutdownHasStarted()) {
444 if (gDisableOptimize
) {
448 if (mPalettedImageData
|| mOptSurface
) {
452 // XXX(seth): It's currently unclear if there's any reason why we can't
453 // optimize non-premult surfaces. We should look into removing this.
458 mOptSurface
= gfxPlatform::GetPlatform()
459 ->ScreenReferenceDrawTarget()->OptimizeSourceSurface(mLockedSurface
);
460 if (mOptSurface
== mLockedSurface
) {
461 mOptSurface
= nullptr;
465 // There's no reason to keep our original surface around if we have an
466 // optimized surface. Release our reference to it. This will leave
467 // |mLockedSurface| as the only thing keeping it alive, so it'll get freed
469 mRawSurface
= nullptr;
472 // Release all strong references to the surface's memory. If the underlying
473 // surface is volatile, this will allow the operating system to free the
474 // memory if it needs to.
475 mLockedSurface
= nullptr;
476 mOptimizable
= false;
482 imgFrame::DrawableRef()
484 return DrawableFrameRef(this);
488 imgFrame::RawAccessRef()
490 return RawAccessFrameRef(this);
494 imgFrame::SetRawAccessOnly()
496 AssertImageDataLocked();
498 // Lock our data and throw away the key.
503 imgFrame::SurfaceWithFormat
504 imgFrame::SurfaceForDrawing(bool aDoPartialDecode
,
506 ImageRegion
& aRegion
,
507 SourceSurface
* aSurface
)
509 MOZ_ASSERT(NS_IsMainThread());
510 mMonitor
.AssertCurrentThreadOwns();
512 if (!aDoPartialDecode
) {
513 return SurfaceWithFormat(new gfxSurfaceDrawable(aSurface
, mImageSize
),
517 gfxRect available
= gfxRect(mDecoded
.X(), mDecoded
.Y(), mDecoded
.Width(),
521 // Create a temporary surface.
522 // Give this surface an alpha channel because there are
523 // transparent pixels in the padding or undecoded area
524 RefPtr
<DrawTarget
> target
=
525 gfxPlatform::GetPlatform()->
526 CreateOffscreenContentDrawTarget(mImageSize
, SurfaceFormat::B8G8R8A8
);
528 return SurfaceWithFormat();
531 SurfacePattern
pattern(aSurface
,
532 aRegion
.GetExtendMode(),
533 Matrix::Translation(mDecoded
.X(), mDecoded
.Y()));
534 target
->FillRect(ToRect(aRegion
.Intersect(available
).Rect()), pattern
);
536 RefPtr
<SourceSurface
> newsurf
= target
->Snapshot();
537 return SurfaceWithFormat(new gfxSurfaceDrawable(newsurf
, mImageSize
),
538 target
->GetFormat());
541 // Not tiling, and we have a surface, so we can account for
542 // a partial decode just by twiddling parameters.
543 aRegion
= aRegion
.Intersect(available
);
544 IntSize
availableSize(mDecoded
.Width(), mDecoded
.Height());
546 return SurfaceWithFormat(new gfxSurfaceDrawable(aSurface
, availableSize
),
550 bool imgFrame::Draw(gfxContext
* aContext
, const ImageRegion
& aRegion
,
551 SamplingFilter aSamplingFilter
, uint32_t aImageFlags
,
554 AUTO_PROFILER_LABEL("imgFrame::Draw", GRAPHICS
);
556 MOZ_ASSERT(NS_IsMainThread());
557 NS_ASSERTION(!aRegion
.Rect().IsEmpty(), "Drawing empty region!");
558 NS_ASSERTION(!aRegion
.IsRestricted() ||
559 !aRegion
.Rect().Intersect(aRegion
.Restriction()).IsEmpty(),
560 "We must be allowed to sample *some* source pixels!");
561 MOZ_ASSERT(mFrameRect
.IsEqualEdges(IntRect(IntPoint(), mImageSize
)),
562 "Directly drawing an image with a non-trivial frame rect!");
564 if (mPalettedImageData
) {
565 MOZ_ASSERT_UNREACHABLE("Directly drawing a paletted image!");
569 MonitorAutoLock
lock(mMonitor
);
571 // Possibly convert this image into a GPU texture, this may also cause our
572 // mLockedSurface to be released and the OS to release the underlying memory.
573 Optimize(aContext
->GetDrawTarget());
575 bool doPartialDecode
= !AreAllPixelsWritten();
577 RefPtr
<SourceSurface
> surf
= GetSourceSurfaceInternal();
582 gfxRect
imageRect(0, 0, mImageSize
.width
, mImageSize
.height
);
583 bool doTile
= !imageRect
.Contains(aRegion
.Rect()) &&
584 !(aImageFlags
& imgIContainer::FLAG_CLAMP
);
586 ImageRegion
region(aRegion
);
587 SurfaceWithFormat surfaceResult
=
588 SurfaceForDrawing(doPartialDecode
, doTile
, region
, surf
);
590 if (surfaceResult
.IsValid()) {
591 gfxUtils::DrawPixelSnapped(aContext
, surfaceResult
.mDrawable
,
592 imageRect
.Size(), region
, surfaceResult
.mFormat
,
593 aSamplingFilter
, aImageFlags
, aOpacity
);
600 imgFrame::ImageUpdated(const nsIntRect
& aUpdateRect
)
602 MonitorAutoLock
lock(mMonitor
);
603 return ImageUpdatedInternal(aUpdateRect
);
607 imgFrame::ImageUpdatedInternal(const nsIntRect
& aUpdateRect
)
609 mMonitor
.AssertCurrentThreadOwns();
611 mDecoded
.UnionRect(mDecoded
, aUpdateRect
);
613 // Clamp to the frame rect to ensure that decoder bugs don't result in a
614 // decoded rect that extends outside the bounds of the frame rect.
615 mDecoded
.IntersectRect(mDecoded
, mFrameRect
);
617 // Update our invalidation counters for any consumers watching for changes
620 mRawSurface
->Invalidate();
622 if (mLockedSurface
&& mRawSurface
!= mLockedSurface
) {
623 mLockedSurface
->Invalidate();
629 imgFrame::Finish(Opacity aFrameOpacity
/* = Opacity::SOME_TRANSPARENCY */,
630 DisposalMethod aDisposalMethod
/* = DisposalMethod::KEEP */,
631 FrameTimeout aTimeout
632 /* = FrameTimeout::FromRawMilliseconds(0) */,
633 BlendMethod aBlendMethod
/* = BlendMethod::OVER */,
634 const Maybe
<IntRect
>& aBlendRect
/* = Nothing() */,
635 bool aFinalize
/* = true */)
637 MonitorAutoLock
lock(mMonitor
);
638 MOZ_ASSERT(mLockCount
> 0, "Image data should be locked");
640 mDisposalMethod
= aDisposalMethod
;
642 mBlendMethod
= aBlendMethod
;
643 mBlendRect
= aBlendRect
;
644 ImageUpdatedInternal(GetRect());
647 FinalizeSurfaceInternal();
652 // The image is now complete, wake up anyone who's waiting.
653 mMonitor
.NotifyAll();
657 imgFrame::GetImageBytesPerRow() const
659 mMonitor
.AssertCurrentThreadOwns();
662 return mFrameRect
.Width() * BytesPerPixel(mFormat
);
666 return mFrameRect
.Width();
673 imgFrame::GetImageDataLength() const
675 return GetImageBytesPerRow() * mFrameRect
.Height();
679 imgFrame::GetImageData(uint8_t** aData
, uint32_t* aLength
) const
681 MonitorAutoLock
lock(mMonitor
);
682 GetImageDataInternal(aData
, aLength
);
686 imgFrame::GetImageDataInternal(uint8_t** aData
, uint32_t* aLength
) const
688 mMonitor
.AssertCurrentThreadOwns();
689 MOZ_ASSERT(mLockCount
> 0, "Image data should be locked");
691 if (mLockedSurface
) {
692 // TODO: This is okay for now because we only realloc shared surfaces on
693 // the main thread after decoding has finished, but if animations want to
694 // read frame data off the main thread, we will need to reconsider this.
695 *aData
= mLockedSurface
->GetData();
697 "mLockedSurface is non-null, but GetData is null in GetImageData");
698 } else if (mPalettedImageData
) {
699 *aData
= mPalettedImageData
+ PaletteDataLength();
701 "mPalettedImageData is non-null, but result is null in GetImageData");
704 "Have neither mLockedSurface nor mPalettedImageData in GetImageData");
708 *aLength
= GetImageDataLength();
712 imgFrame::GetImageData() const
716 GetImageData(&data
, &length
);
721 imgFrame::GetIsPaletted() const
723 return mPalettedImageData
!= nullptr;
727 imgFrame::GetPaletteData(uint32_t** aPalette
, uint32_t* length
) const
729 AssertImageDataLocked();
731 if (!mPalettedImageData
) {
735 *aPalette
= (uint32_t*) mPalettedImageData
;
736 *length
= PaletteDataLength();
741 imgFrame::GetPaletteData() const
745 GetPaletteData(&data
, &length
);
750 imgFrame::LockImageData()
752 MonitorAutoLock
lock(mMonitor
);
754 MOZ_ASSERT(mLockCount
>= 0, "Unbalanced locks and unlocks");
755 if (mLockCount
< 0) {
756 return NS_ERROR_FAILURE
;
761 // If we are not the first lock, there's nothing to do.
762 if (mLockCount
!= 1) {
766 // If we're the first lock, but have the locked surface, we're OK.
767 if (mLockedSurface
) {
771 // Paletted images don't have surfaces, so there's nothing to do.
772 if (mPalettedImageData
) {
776 MOZ_ASSERT_UNREACHABLE("It's illegal to re-lock an optimized imgFrame");
777 return NS_ERROR_FAILURE
;
781 imgFrame::AssertImageDataLocked() const
784 MonitorAutoLock
lock(mMonitor
);
785 MOZ_ASSERT(mLockCount
> 0, "Image data should be locked");
790 imgFrame::UnlockImageData()
792 MonitorAutoLock
lock(mMonitor
);
794 MOZ_ASSERT(mLockCount
> 0, "Unlocking an unlocked image!");
795 if (mLockCount
<= 0) {
796 return NS_ERROR_FAILURE
;
799 MOZ_ASSERT(mLockCount
> 1 || mFinished
|| mAborted
,
800 "Should have Finish()'d or aborted before unlocking");
808 imgFrame::SetOptimizable()
810 AssertImageDataLocked();
811 MonitorAutoLock
lock(mMonitor
);
816 imgFrame::FinalizeSurface()
818 MonitorAutoLock
lock(mMonitor
);
819 FinalizeSurfaceInternal();
823 imgFrame::FinalizeSurfaceInternal()
825 mMonitor
.AssertCurrentThreadOwns();
827 // Not all images will have mRawSurface to finalize (i.e. paletted images).
828 if (!mRawSurface
|| mRawSurface
->GetType() != SurfaceType::DATA_SHARED
) {
832 auto sharedSurf
= static_cast<SourceSurfaceSharedData
*>(mRawSurface
.get());
833 sharedSurf
->Finalize();
836 already_AddRefed
<SourceSurface
>
837 imgFrame::GetSourceSurface()
839 MonitorAutoLock
lock(mMonitor
);
840 return GetSourceSurfaceInternal();
843 already_AddRefed
<SourceSurface
>
844 imgFrame::GetSourceSurfaceInternal()
846 mMonitor
.AssertCurrentThreadOwns();
849 if (mOptSurface
->IsValid()) {
850 RefPtr
<SourceSurface
> surf(mOptSurface
);
851 return surf
.forget();
853 mOptSurface
= nullptr;
857 if (mLockedSurface
) {
858 RefPtr
<SourceSurface
> surf(mLockedSurface
);
859 return surf
.forget();
866 return CreateLockedSurface(mRawSurface
, mFrameRect
.Size(), mFormat
);
870 imgFrame::GetAnimationData() const
872 MonitorAutoLock
lock(mMonitor
);
873 MOZ_ASSERT(mLockCount
> 0, "Image data should be locked");
876 if (mPalettedImageData
) {
877 data
= mPalettedImageData
;
880 GetImageDataInternal(&data
, &length
);
883 bool hasAlpha
= mFormat
== SurfaceFormat::B8G8R8A8
;
885 return AnimationData(data
, PaletteDataLength(), mTimeout
, GetRect(),
886 mBlendMethod
, mBlendRect
, mDisposalMethod
, hasAlpha
);
892 MonitorAutoLock
lock(mMonitor
);
896 // Wake up anyone who's waiting.
897 mMonitor
.NotifyAll();
901 imgFrame::IsAborted() const
903 MonitorAutoLock
lock(mMonitor
);
908 imgFrame::IsFinished() const
910 MonitorAutoLock
lock(mMonitor
);
915 imgFrame::WaitUntilFinished() const
917 MonitorAutoLock
lock(mMonitor
);
920 // Return if we're aborted or complete.
921 if (mAborted
|| mFinished
) {
925 // Not complete yet, so we'll have to wait.
931 imgFrame::AreAllPixelsWritten() const
933 mMonitor
.AssertCurrentThreadOwns();
934 return mDecoded
.IsEqualInterior(mFrameRect
);
937 bool imgFrame::GetCompositingFailed() const
939 MOZ_ASSERT(NS_IsMainThread());
940 return mCompositingFailed
;
944 imgFrame::SetCompositingFailed(bool val
)
946 MOZ_ASSERT(NS_IsMainThread());
947 mCompositingFailed
= val
;
951 imgFrame::AddSizeOfExcludingThis(MallocSizeOf aMallocSizeOf
,
952 size_t& aHeapSizeOut
,
953 size_t& aNonHeapSizeOut
,
954 size_t& aExtHandlesOut
) const
956 MonitorAutoLock
lock(mMonitor
);
958 if (mPalettedImageData
) {
959 aHeapSizeOut
+= aMallocSizeOf(mPalettedImageData
);
961 if (mLockedSurface
) {
962 aHeapSizeOut
+= aMallocSizeOf(mLockedSurface
);
965 aHeapSizeOut
+= aMallocSizeOf(mOptSurface
);
968 aHeapSizeOut
+= aMallocSizeOf(mRawSurface
);
969 mRawSurface
->AddSizeOfExcludingThis(aMallocSizeOf
, aHeapSizeOut
,
970 aNonHeapSizeOut
, aExtHandlesOut
);
975 } // namespace mozilla