Bug 1690340 - Part 2: Use the new naming for the developer tools menu items. r=jdescottes
[gecko.git] / image / imgFrame.cpp
blob73ea66a6d3b749d86707d4efa631f05108ef58b6
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "imgFrame.h"
8 #include "ImageRegion.h"
9 #include "ShutdownTracker.h"
10 #include "SurfaceCache.h"
12 #include "prenv.h"
14 #include "gfx2DGlue.h"
15 #include "gfxContext.h"
16 #include "gfxPlatform.h"
18 #include "gfxUtils.h"
20 #include "MainThreadUtils.h"
21 #include "mozilla/CheckedInt.h"
22 #include "mozilla/gfx/gfxVars.h"
23 #include "mozilla/gfx/Tools.h"
24 #include "mozilla/gfx/SourceSurfaceRawData.h"
25 #include "mozilla/layers/SourceSurfaceSharedData.h"
26 #include "mozilla/layers/SourceSurfaceVolatileData.h"
27 #include "mozilla/Likely.h"
28 #include "mozilla/MemoryReporting.h"
29 #include "mozilla/ProfilerLabels.h"
30 #include "mozilla/StaticPrefs_browser.h"
31 #include "mozilla/StaticPrefs_image.h"
32 #include "nsMargin.h"
33 #include "nsRefreshDriver.h"
34 #include "nsThreadUtils.h"
36 #include <algorithm> // for min, max
38 namespace mozilla {
40 using namespace gfx;
42 namespace image {
44 /**
45 * This class is identical to SourceSurfaceSharedData but returns a different
46 * type so that SharedSurfacesChild is aware imagelib wants to recycle this
47 * surface for future animation frames.
49 class RecyclingSourceSurfaceSharedData final : public SourceSurfaceSharedData {
50 public:
51 MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(RecyclingSourceSurfaceSharedData,
52 override)
54 SurfaceType GetType() const override {
55 return SurfaceType::DATA_RECYCLING_SHARED;
59 static int32_t VolatileSurfaceStride(const IntSize& size,
60 SurfaceFormat format) {
61 // Stride must be a multiple of four or cairo will complain.
62 return (size.width * BytesPerPixel(format) + 0x3) & ~0x3;
65 static already_AddRefed<DataSourceSurface> CreateLockedSurface(
66 DataSourceSurface* aSurface, const IntSize& size, SurfaceFormat format) {
67 switch (aSurface->GetType()) {
68 case SurfaceType::DATA_SHARED:
69 case SurfaceType::DATA_RECYCLING_SHARED:
70 case SurfaceType::DATA_ALIGNED: {
71 // Shared memory is never released until the surface itself is released.
72 // Similar for aligned/heap surfaces.
73 RefPtr<DataSourceSurface> surf(aSurface);
74 return surf.forget();
76 default: {
77 // Volatile memory requires us to map it first, and it is fallible.
78 DataSourceSurface::ScopedMap smap(aSurface,
79 DataSourceSurface::READ_WRITE);
80 if (smap.IsMapped()) {
81 return MakeAndAddRef<SourceSurfaceMappedData>(std::move(smap), size,
82 format);
84 break;
88 return nullptr;
91 static bool ShouldUseHeap(const IntSize& aSize, int32_t aStride,
92 bool aIsAnimated) {
93 // On some platforms (i.e. Android), a volatile buffer actually keeps a file
94 // handle active. We would like to avoid too many since we could easily
95 // exhaust the pool. However, other platforms we do not have the file handle
96 // problem, and additionally we may avoid a superfluous memset since the
97 // volatile memory starts out as zero-filled. Hence the knobs below.
99 // For as long as an animated image is retained, its frames will never be
100 // released to let the OS purge volatile buffers.
101 if (aIsAnimated && StaticPrefs::image_mem_animated_use_heap()) {
102 return true;
105 // Lets us avoid too many small images consuming all of the handles. The
106 // actual allocation checks for overflow.
107 int32_t bufferSize = (aStride * aSize.height) / 1024;
108 return bufferSize < StaticPrefs::image_mem_volatile_min_threshold_kb();
111 static already_AddRefed<DataSourceSurface> AllocateBufferForImage(
112 const IntSize& size, SurfaceFormat format, bool aShouldRecycle = false,
113 bool aIsAnimated = false) {
114 int32_t stride = VolatileSurfaceStride(size, format);
116 if (gfxVars::GetUseWebRenderOrDefault() && StaticPrefs::image_mem_shared()) {
117 RefPtr<SourceSurfaceSharedData> newSurf;
118 if (aShouldRecycle) {
119 newSurf = new RecyclingSourceSurfaceSharedData();
120 } else {
121 newSurf = new SourceSurfaceSharedData();
123 if (newSurf->Init(size, stride, format)) {
124 return newSurf.forget();
126 } else if (ShouldUseHeap(size, stride, aIsAnimated)) {
127 RefPtr<SourceSurfaceAlignedRawData> newSurf =
128 new SourceSurfaceAlignedRawData();
129 if (newSurf->Init(size, format, false, 0, stride)) {
130 return newSurf.forget();
132 } else {
133 RefPtr<SourceSurfaceVolatileData> newSurf = new SourceSurfaceVolatileData();
134 if (newSurf->Init(size, stride, format)) {
135 return newSurf.forget();
138 return nullptr;
141 static bool GreenSurface(DataSourceSurface* aSurface, const IntSize& aSize,
142 SurfaceFormat aFormat) {
143 int32_t stride = aSurface->Stride();
144 uint32_t* surfaceData = reinterpret_cast<uint32_t*>(aSurface->GetData());
145 uint32_t surfaceDataLength = (stride * aSize.height) / sizeof(uint32_t);
147 // Start by assuming that GG is in the second byte and
148 // AA is in the final byte -- the most common case.
149 uint32_t color = mozilla::NativeEndian::swapFromBigEndian(0x00FF00FF);
151 // We are only going to handle this type of test under
152 // certain circumstances.
153 MOZ_ASSERT(surfaceData);
154 MOZ_ASSERT(aFormat == SurfaceFormat::B8G8R8A8 ||
155 aFormat == SurfaceFormat::B8G8R8X8 ||
156 aFormat == SurfaceFormat::R8G8B8A8 ||
157 aFormat == SurfaceFormat::R8G8B8X8 ||
158 aFormat == SurfaceFormat::A8R8G8B8 ||
159 aFormat == SurfaceFormat::X8R8G8B8);
160 MOZ_ASSERT((stride * aSize.height) % sizeof(uint32_t));
162 if (aFormat == SurfaceFormat::A8R8G8B8 ||
163 aFormat == SurfaceFormat::X8R8G8B8) {
164 color = mozilla::NativeEndian::swapFromBigEndian(0xFF00FF00);
167 for (uint32_t i = 0; i < surfaceDataLength; i++) {
168 surfaceData[i] = color;
171 return true;
174 static bool ClearSurface(DataSourceSurface* aSurface, const IntSize& aSize,
175 SurfaceFormat aFormat) {
176 int32_t stride = aSurface->Stride();
177 uint8_t* data = aSurface->GetData();
178 MOZ_ASSERT(data);
180 if (aFormat == SurfaceFormat::OS_RGBX) {
181 // Skia doesn't support RGBX surfaces, so ensure the alpha value is set
182 // to opaque white. While it would be nice to only do this for Skia,
183 // imgFrame can run off main thread and past shutdown where
184 // we might not have gfxPlatform, so just memset every time instead.
185 memset(data, 0xFF, stride * aSize.height);
186 } else if (aSurface->OnHeap()) {
187 // We only need to memset it if the buffer was allocated on the heap.
188 // Otherwise, it's allocated via mmap and refers to a zeroed page and will
189 // be COW once it's written to.
190 memset(data, 0, stride * aSize.height);
193 return true;
196 imgFrame::imgFrame()
197 : mMonitor("imgFrame"),
198 mDecoded(0, 0, 0, 0),
199 mLockCount(0),
200 mAborted(false),
201 mFinished(false),
202 mOptimizable(false),
203 mShouldRecycle(false),
204 mTimeout(FrameTimeout::FromRawMilliseconds(100)),
205 mDisposalMethod(DisposalMethod::NOT_SPECIFIED),
206 mBlendMethod(BlendMethod::OVER),
207 mFormat(SurfaceFormat::UNKNOWN),
208 mNonPremult(false) {}
210 imgFrame::~imgFrame() {
211 #ifdef DEBUG
212 MonitorAutoLock lock(mMonitor);
213 MOZ_ASSERT(mAborted || AreAllPixelsWritten());
214 MOZ_ASSERT(mAborted || mFinished);
215 #endif
218 nsresult imgFrame::InitForDecoder(const nsIntSize& aImageSize,
219 SurfaceFormat aFormat, bool aNonPremult,
220 const Maybe<AnimationParams>& aAnimParams,
221 bool aShouldRecycle) {
222 // Assert for properties that should be verified by decoders,
223 // warn for properties related to bad content.
224 if (!SurfaceCache::IsLegalSize(aImageSize)) {
225 NS_WARNING("Should have legal image size");
226 mAborted = true;
227 return NS_ERROR_FAILURE;
230 mImageSize = aImageSize;
232 // May be updated shortly after InitForDecoder by BlendAnimationFilter
233 // because it needs to take into consideration the previous frames to
234 // properly calculate. We start with the whole frame as dirty.
235 mDirtyRect = GetRect();
237 if (aAnimParams) {
238 mBlendRect = aAnimParams->mBlendRect;
239 mTimeout = aAnimParams->mTimeout;
240 mBlendMethod = aAnimParams->mBlendMethod;
241 mDisposalMethod = aAnimParams->mDisposalMethod;
242 } else {
243 mBlendRect = GetRect();
246 if (aShouldRecycle) {
247 // If we are recycling then we should always use BGRA for the underlying
248 // surface because if we use BGRX, the next frame composited into the
249 // surface could be BGRA and cause rendering problems.
250 MOZ_ASSERT(aAnimParams);
251 mFormat = SurfaceFormat::OS_RGBA;
252 } else {
253 mFormat = aFormat;
256 mNonPremult = aNonPremult;
257 mShouldRecycle = aShouldRecycle;
259 MOZ_ASSERT(!mLockedSurface, "Called imgFrame::InitForDecoder() twice?");
261 bool postFirstFrame = aAnimParams && aAnimParams->mFrameNum > 0;
262 mRawSurface = AllocateBufferForImage(mImageSize, mFormat, mShouldRecycle,
263 postFirstFrame);
264 if (!mRawSurface) {
265 mAborted = true;
266 return NS_ERROR_OUT_OF_MEMORY;
269 if (StaticPrefs::browser_measurement_render_anims_and_video_solid() &&
270 aAnimParams) {
271 mBlankRawSurface = AllocateBufferForImage(mImageSize, mFormat);
272 if (!mBlankRawSurface) {
273 mAborted = true;
274 return NS_ERROR_OUT_OF_MEMORY;
278 mLockedSurface = CreateLockedSurface(mRawSurface, mImageSize, mFormat);
279 if (!mLockedSurface) {
280 NS_WARNING("Failed to create LockedSurface");
281 mAborted = true;
282 return NS_ERROR_OUT_OF_MEMORY;
285 if (mBlankRawSurface) {
286 mBlankLockedSurface =
287 CreateLockedSurface(mBlankRawSurface, mImageSize, mFormat);
288 if (!mBlankLockedSurface) {
289 NS_WARNING("Failed to create BlankLockedSurface");
290 mAborted = true;
291 return NS_ERROR_OUT_OF_MEMORY;
295 if (!ClearSurface(mRawSurface, mImageSize, mFormat)) {
296 NS_WARNING("Could not clear allocated buffer");
297 mAborted = true;
298 return NS_ERROR_OUT_OF_MEMORY;
301 if (mBlankRawSurface) {
302 if (!GreenSurface(mBlankRawSurface, mImageSize, mFormat)) {
303 NS_WARNING("Could not clear allocated blank buffer");
304 mAborted = true;
305 return NS_ERROR_OUT_OF_MEMORY;
309 return NS_OK;
312 nsresult imgFrame::InitForDecoderRecycle(const AnimationParams& aAnimParams) {
313 // We want to recycle this frame, but there is no guarantee that consumers are
314 // done with it in a timely manner. Let's ensure they are done with it first.
315 MonitorAutoLock lock(mMonitor);
317 MOZ_ASSERT(mLockCount > 0);
318 MOZ_ASSERT(mLockedSurface);
320 if (!mShouldRecycle) {
321 // This frame either was never marked as recyclable, or the flag was cleared
322 // for a caller which does not support recycling.
323 return NS_ERROR_NOT_AVAILABLE;
326 // Ensure we account for all internal references to the surface.
327 MozRefCountType internalRefs = 1;
328 if (mRawSurface == mLockedSurface) {
329 ++internalRefs;
331 if (mOptSurface == mLockedSurface) {
332 ++internalRefs;
335 if (mLockedSurface->refCount() > internalRefs) {
336 if (NS_IsMainThread()) {
337 // We should never be both decoding and recycling on the main thread. Sync
338 // decoding can only be used to produce the first set of frames. Those
339 // either never use recycling because advancing was blocked (main thread
340 // is busy) or we were auto-advancing (to seek to a frame) and the frames
341 // were never accessed (and thus cannot have recycle locks).
342 MOZ_ASSERT_UNREACHABLE("Recycling/decoding on the main thread?");
343 return NS_ERROR_NOT_AVAILABLE;
346 // We don't want to wait forever to reclaim the frame because we have no
347 // idea why it is still held. It is possibly due to OMTP. Since we are off
348 // the main thread, and we generally have frames already buffered for the
349 // animation, we can afford to wait a short period of time to hopefully
350 // complete the transaction and reclaim the buffer.
352 // We choose to wait for, at most, the refresh driver interval, so that we
353 // won't skip more than one frame. If the frame is still in use due to
354 // outstanding transactions, we are already skipping frames. If the frame
355 // is still in use for some other purpose, it won't be returned to the pool
356 // and its owner can hold onto it forever without additional impact here.
357 int32_t refreshInterval =
358 std::max(std::min(nsRefreshDriver::DefaultInterval(), 20), 4);
359 TimeDuration waitInterval =
360 TimeDuration::FromMilliseconds(refreshInterval >> 2);
361 TimeStamp timeout =
362 TimeStamp::Now() + TimeDuration::FromMilliseconds(refreshInterval);
363 while (true) {
364 mMonitor.Wait(waitInterval);
365 if (mLockedSurface->refCount() <= internalRefs) {
366 break;
369 if (timeout <= TimeStamp::Now()) {
370 // We couldn't secure the frame for recycling. It will allocate a new
371 // frame instead.
372 return NS_ERROR_NOT_AVAILABLE;
377 mBlendRect = aAnimParams.mBlendRect;
378 mTimeout = aAnimParams.mTimeout;
379 mBlendMethod = aAnimParams.mBlendMethod;
380 mDisposalMethod = aAnimParams.mDisposalMethod;
381 mDirtyRect = GetRect();
383 return NS_OK;
386 nsresult imgFrame::InitWithDrawable(gfxDrawable* aDrawable,
387 const nsIntSize& aSize,
388 const SurfaceFormat aFormat,
389 SamplingFilter aSamplingFilter,
390 uint32_t aImageFlags,
391 gfx::BackendType aBackend) {
392 // Assert for properties that should be verified by decoders,
393 // warn for properties related to bad content.
394 if (!SurfaceCache::IsLegalSize(aSize)) {
395 NS_WARNING("Should have legal image size");
396 mAborted = true;
397 return NS_ERROR_FAILURE;
400 mImageSize = aSize;
401 mFormat = aFormat;
403 RefPtr<DrawTarget> target;
405 bool canUseDataSurface = Factory::DoesBackendSupportDataDrawtarget(aBackend);
406 if (canUseDataSurface) {
407 // It's safe to use data surfaces for content on this platform, so we can
408 // get away with using volatile buffers.
409 MOZ_ASSERT(!mLockedSurface, "Called imgFrame::InitWithDrawable() twice?");
411 mRawSurface = AllocateBufferForImage(mImageSize, mFormat);
412 if (!mRawSurface) {
413 mAborted = true;
414 return NS_ERROR_OUT_OF_MEMORY;
417 mLockedSurface = CreateLockedSurface(mRawSurface, mImageSize, mFormat);
418 if (!mLockedSurface) {
419 NS_WARNING("Failed to create LockedSurface");
420 mAborted = true;
421 return NS_ERROR_OUT_OF_MEMORY;
424 if (!ClearSurface(mRawSurface, mImageSize, mFormat)) {
425 NS_WARNING("Could not clear allocated buffer");
426 mAborted = true;
427 return NS_ERROR_OUT_OF_MEMORY;
430 target = gfxPlatform::CreateDrawTargetForData(
431 mLockedSurface->GetData(), mImageSize, mLockedSurface->Stride(),
432 mFormat);
433 } else {
434 // We can't use data surfaces for content, so we'll create an offscreen
435 // surface instead. This means if someone later calls RawAccessRef(), we
436 // may have to do an expensive readback, but we warned callers about that in
437 // the documentation for this method.
438 MOZ_ASSERT(!mOptSurface, "Called imgFrame::InitWithDrawable() twice?");
440 if (gfxPlatform::GetPlatform()->SupportsAzureContentForType(aBackend)) {
441 target = gfxPlatform::GetPlatform()->CreateDrawTargetForBackend(
442 aBackend, mImageSize, mFormat);
443 } else {
444 target = gfxPlatform::GetPlatform()->CreateOffscreenContentDrawTarget(
445 mImageSize, mFormat);
449 if (!target || !target->IsValid()) {
450 mAborted = true;
451 return NS_ERROR_OUT_OF_MEMORY;
454 // Draw using the drawable the caller provided.
455 RefPtr<gfxContext> ctx = gfxContext::CreateOrNull(target);
456 MOZ_ASSERT(ctx); // Already checked the draw target above.
457 gfxUtils::DrawPixelSnapped(ctx, aDrawable, SizeDouble(mImageSize),
458 ImageRegion::Create(ThebesRect(GetRect())),
459 mFormat, aSamplingFilter, aImageFlags);
461 if (canUseDataSurface && !mLockedSurface) {
462 NS_WARNING("Failed to create VolatileDataSourceSurface");
463 mAborted = true;
464 return NS_ERROR_OUT_OF_MEMORY;
467 if (!canUseDataSurface) {
468 // We used an offscreen surface, which is an "optimized" surface from
469 // imgFrame's perspective.
470 mOptSurface = target->Snapshot();
471 } else {
472 FinalizeSurface();
475 // If we reach this point, we should regard ourselves as complete.
476 mDecoded = GetRect();
477 mFinished = true;
479 #ifdef DEBUG
480 MonitorAutoLock lock(mMonitor);
481 MOZ_ASSERT(AreAllPixelsWritten());
482 #endif
484 return NS_OK;
487 nsresult imgFrame::Optimize(DrawTarget* aTarget) {
488 MOZ_ASSERT(NS_IsMainThread());
489 mMonitor.AssertCurrentThreadOwns();
491 if (mLockCount > 0 || !mOptimizable) {
492 // Don't optimize right now.
493 return NS_OK;
496 // Check whether image optimization is disabled -- not thread safe!
497 static bool gDisableOptimize = false;
498 static bool hasCheckedOptimize = false;
499 if (!hasCheckedOptimize) {
500 if (PR_GetEnv("MOZ_DISABLE_IMAGE_OPTIMIZE")) {
501 gDisableOptimize = true;
503 hasCheckedOptimize = true;
506 // Don't optimize during shutdown because gfxPlatform may not be available.
507 if (ShutdownTracker::ShutdownHasStarted()) {
508 return NS_OK;
511 if (gDisableOptimize) {
512 return NS_OK;
515 if (mOptSurface) {
516 return NS_OK;
519 // XXX(seth): It's currently unclear if there's any reason why we can't
520 // optimize non-premult surfaces. We should look into removing this.
521 if (mNonPremult) {
522 return NS_OK;
524 if (!gfxVars::UseWebRender()) {
525 mOptSurface = aTarget->OptimizeSourceSurface(mLockedSurface);
526 } else {
527 mOptSurface = gfxPlatform::GetPlatform()
528 ->ScreenReferenceDrawTarget()
529 ->OptimizeSourceSurface(mLockedSurface);
531 if (mOptSurface == mLockedSurface) {
532 mOptSurface = nullptr;
535 if (mOptSurface) {
536 // There's no reason to keep our original surface around if we have an
537 // optimized surface. Release our reference to it. This will leave
538 // |mLockedSurface| as the only thing keeping it alive, so it'll get freed
539 // below.
540 mRawSurface = nullptr;
543 // Release all strong references to the surface's memory. If the underlying
544 // surface is volatile, this will allow the operating system to free the
545 // memory if it needs to.
546 mLockedSurface = nullptr;
547 mOptimizable = false;
549 return NS_OK;
552 DrawableFrameRef imgFrame::DrawableRef() { return DrawableFrameRef(this); }
554 RawAccessFrameRef imgFrame::RawAccessRef(bool aOnlyFinished /*= false*/) {
555 return RawAccessFrameRef(this, aOnlyFinished);
558 void imgFrame::SetRawAccessOnly() {
559 AssertImageDataLocked();
561 // Lock our data and throw away the key.
562 LockImageData(false);
565 imgFrame::SurfaceWithFormat imgFrame::SurfaceForDrawing(
566 bool aDoPartialDecode, bool aDoTile, ImageRegion& aRegion,
567 SourceSurface* aSurface) {
568 MOZ_ASSERT(NS_IsMainThread());
569 mMonitor.AssertCurrentThreadOwns();
571 if (!aDoPartialDecode) {
572 return SurfaceWithFormat(new gfxSurfaceDrawable(aSurface, mImageSize),
573 mFormat);
576 gfxRect available =
577 gfxRect(mDecoded.X(), mDecoded.Y(), mDecoded.Width(), mDecoded.Height());
579 if (aDoTile) {
580 // Create a temporary surface.
581 // Give this surface an alpha channel because there are
582 // transparent pixels in the padding or undecoded area
583 RefPtr<DrawTarget> target =
584 gfxPlatform::GetPlatform()->CreateOffscreenContentDrawTarget(
585 mImageSize, SurfaceFormat::OS_RGBA);
586 if (!target) {
587 return SurfaceWithFormat();
590 SurfacePattern pattern(aSurface, aRegion.GetExtendMode(),
591 Matrix::Translation(mDecoded.X(), mDecoded.Y()));
592 target->FillRect(ToRect(aRegion.Intersect(available).Rect()), pattern);
594 RefPtr<SourceSurface> newsurf = target->Snapshot();
595 return SurfaceWithFormat(new gfxSurfaceDrawable(newsurf, mImageSize),
596 target->GetFormat());
599 // Not tiling, and we have a surface, so we can account for
600 // a partial decode just by twiddling parameters.
601 aRegion = aRegion.Intersect(available);
602 IntSize availableSize(mDecoded.Width(), mDecoded.Height());
604 return SurfaceWithFormat(new gfxSurfaceDrawable(aSurface, availableSize),
605 mFormat);
608 bool imgFrame::Draw(gfxContext* aContext, const ImageRegion& aRegion,
609 SamplingFilter aSamplingFilter, uint32_t aImageFlags,
610 float aOpacity) {
611 AUTO_PROFILER_LABEL("imgFrame::Draw", GRAPHICS);
613 MOZ_ASSERT(NS_IsMainThread());
614 NS_ASSERTION(!aRegion.Rect().IsEmpty(), "Drawing empty region!");
615 NS_ASSERTION(!aRegion.IsRestricted() ||
616 !aRegion.Rect().Intersect(aRegion.Restriction()).IsEmpty(),
617 "We must be allowed to sample *some* source pixels!");
619 // Perform the draw and freeing of the surface outside the lock. We want to
620 // avoid contention with the decoder if we can. The surface may also attempt
621 // to relock the monitor if it is freed (e.g. RecyclingSourceSurface).
622 RefPtr<SourceSurface> surf;
623 SurfaceWithFormat surfaceResult;
624 ImageRegion region(aRegion);
625 gfxRect imageRect(0, 0, mImageSize.width, mImageSize.height);
628 MonitorAutoLock lock(mMonitor);
630 // Possibly convert this image into a GPU texture, this may also cause our
631 // mLockedSurface to be released and the OS to release the underlying
632 // memory.
633 Optimize(aContext->GetDrawTarget());
635 bool doPartialDecode = !AreAllPixelsWritten();
637 // Most draw targets will just use the surface only during DrawPixelSnapped
638 // but captures/recordings will retain a reference outside this stack
639 // context. While in theory a decoder thread could be trying to recycle this
640 // frame at this very moment, in practice the only way we can get here is if
641 // this frame is the current frame of the animation. Since we can only
642 // advance on the main thread, we know nothing else will try to use it.
643 DrawTarget* drawTarget = aContext->GetDrawTarget();
644 bool recording = drawTarget->GetBackendType() == BackendType::RECORDING;
645 RefPtr<SourceSurface> surf = GetSourceSurfaceInternal();
646 if (!surf) {
647 return false;
650 bool doTile = !imageRect.Contains(aRegion.Rect()) &&
651 !(aImageFlags & imgIContainer::FLAG_CLAMP);
653 surfaceResult = SurfaceForDrawing(doPartialDecode, doTile, region, surf);
655 // If we are recording, then we cannot recycle the surface. The blob
656 // rasterizer is not properly synchronized for recycling in the compositor
657 // process. The easiest thing to do is just mark the frames it consumes as
658 // non-recyclable.
659 if (recording && surfaceResult.IsValid()) {
660 mShouldRecycle = false;
664 if (surfaceResult.IsValid()) {
665 gfxUtils::DrawPixelSnapped(aContext, surfaceResult.mDrawable,
666 imageRect.Size(), region, surfaceResult.mFormat,
667 aSamplingFilter, aImageFlags, aOpacity);
670 return true;
673 nsresult imgFrame::ImageUpdated(const nsIntRect& aUpdateRect) {
674 MonitorAutoLock lock(mMonitor);
675 return ImageUpdatedInternal(aUpdateRect);
678 nsresult imgFrame::ImageUpdatedInternal(const nsIntRect& aUpdateRect) {
679 mMonitor.AssertCurrentThreadOwns();
681 // Clamp to the frame rect to ensure that decoder bugs don't result in a
682 // decoded rect that extends outside the bounds of the frame rect.
683 IntRect updateRect = aUpdateRect.Intersect(GetRect());
684 if (updateRect.IsEmpty()) {
685 return NS_OK;
688 mDecoded.UnionRect(mDecoded, updateRect);
690 // Update our invalidation counters for any consumers watching for changes
691 // in the surface.
692 if (mRawSurface) {
693 mRawSurface->Invalidate(updateRect);
695 if (mLockedSurface && mRawSurface != mLockedSurface) {
696 mLockedSurface->Invalidate(updateRect);
698 return NS_OK;
701 void imgFrame::Finish(Opacity aFrameOpacity /* = Opacity::SOME_TRANSPARENCY */,
702 bool aFinalize /* = true */) {
703 MonitorAutoLock lock(mMonitor);
704 MOZ_ASSERT(mLockCount > 0, "Image data should be locked");
706 IntRect frameRect(GetRect());
707 if (!mDecoded.IsEqualEdges(frameRect)) {
708 // The decoder should have produced rows starting from either the bottom or
709 // the top of the image. We need to calculate the region for which we have
710 // not yet invalidated.
711 IntRect delta(0, 0, frameRect.width, 0);
712 if (mDecoded.y == 0) {
713 delta.y = mDecoded.height;
714 delta.height = frameRect.height - mDecoded.height;
715 } else if (mDecoded.y + mDecoded.height == frameRect.height) {
716 delta.height = frameRect.height - mDecoded.y;
717 } else {
718 MOZ_ASSERT_UNREACHABLE("Decoder only updated middle of image!");
719 delta = frameRect;
722 ImageUpdatedInternal(delta);
725 MOZ_ASSERT(mDecoded.IsEqualEdges(frameRect));
727 if (aFinalize) {
728 FinalizeSurfaceInternal();
731 mFinished = true;
733 // The image is now complete, wake up anyone who's waiting.
734 mMonitor.NotifyAll();
737 uint32_t imgFrame::GetImageBytesPerRow() const {
738 mMonitor.AssertCurrentThreadOwns();
740 if (mRawSurface) {
741 return mImageSize.width * BytesPerPixel(mFormat);
744 return 0;
747 uint32_t imgFrame::GetImageDataLength() const {
748 return GetImageBytesPerRow() * mImageSize.height;
751 void imgFrame::GetImageData(uint8_t** aData, uint32_t* aLength) const {
752 MonitorAutoLock lock(mMonitor);
753 GetImageDataInternal(aData, aLength);
756 void imgFrame::GetImageDataInternal(uint8_t** aData, uint32_t* aLength) const {
757 mMonitor.AssertCurrentThreadOwns();
758 MOZ_ASSERT(mLockCount > 0, "Image data should be locked");
759 MOZ_ASSERT(mLockedSurface);
761 if (mLockedSurface) {
762 // TODO: This is okay for now because we only realloc shared surfaces on
763 // the main thread after decoding has finished, but if animations want to
764 // read frame data off the main thread, we will need to reconsider this.
765 *aData = mLockedSurface->GetData();
766 MOZ_ASSERT(
767 *aData,
768 "mLockedSurface is non-null, but GetData is null in GetImageData");
769 } else {
770 *aData = nullptr;
773 *aLength = GetImageDataLength();
776 uint8_t* imgFrame::GetImageData() const {
777 uint8_t* data;
778 uint32_t length;
779 GetImageData(&data, &length);
780 return data;
783 uint8_t* imgFrame::LockImageData(bool aOnlyFinished) {
784 MonitorAutoLock lock(mMonitor);
786 MOZ_ASSERT(mLockCount >= 0, "Unbalanced locks and unlocks");
787 if (mLockCount < 0 || (aOnlyFinished && !mFinished)) {
788 return nullptr;
791 uint8_t* data;
792 if (mLockedSurface) {
793 data = mLockedSurface->GetData();
794 } else {
795 data = nullptr;
798 // If the raw data is still available, we should get a valid pointer for it.
799 if (!data) {
800 MOZ_ASSERT_UNREACHABLE("It's illegal to re-lock an optimized imgFrame");
801 return nullptr;
804 ++mLockCount;
805 return data;
808 void imgFrame::AssertImageDataLocked() const {
809 #ifdef DEBUG
810 MonitorAutoLock lock(mMonitor);
811 MOZ_ASSERT(mLockCount > 0, "Image data should be locked");
812 #endif
815 nsresult imgFrame::UnlockImageData() {
816 MonitorAutoLock lock(mMonitor);
818 MOZ_ASSERT(mLockCount > 0, "Unlocking an unlocked image!");
819 if (mLockCount <= 0) {
820 return NS_ERROR_FAILURE;
823 MOZ_ASSERT(mLockCount > 1 || mFinished || mAborted,
824 "Should have Finish()'d or aborted before unlocking");
826 mLockCount--;
828 return NS_OK;
831 void imgFrame::SetOptimizable() {
832 AssertImageDataLocked();
833 MonitorAutoLock lock(mMonitor);
834 mOptimizable = true;
837 void imgFrame::FinalizeSurface() {
838 MonitorAutoLock lock(mMonitor);
839 FinalizeSurfaceInternal();
842 void imgFrame::FinalizeSurfaceInternal() {
843 mMonitor.AssertCurrentThreadOwns();
845 // Not all images will have mRawSurface to finalize (i.e. paletted images).
846 if (mShouldRecycle || !mRawSurface ||
847 mRawSurface->GetType() != SurfaceType::DATA_SHARED) {
848 return;
851 auto* sharedSurf = static_cast<SourceSurfaceSharedData*>(mRawSurface.get());
852 sharedSurf->Finalize();
855 already_AddRefed<SourceSurface> imgFrame::GetSourceSurface() {
856 MonitorAutoLock lock(mMonitor);
857 return GetSourceSurfaceInternal();
860 already_AddRefed<SourceSurface> imgFrame::GetSourceSurfaceInternal() {
861 mMonitor.AssertCurrentThreadOwns();
863 if (mOptSurface) {
864 if (mOptSurface->IsValid()) {
865 RefPtr<SourceSurface> surf(mOptSurface);
866 return surf.forget();
868 mOptSurface = nullptr;
871 if (mBlankLockedSurface) {
872 // We are going to return the blank surface because of the flags.
873 // We are including comments here that are copied from below
874 // just so that we are on the same page!
875 RefPtr<SourceSurface> surf(mBlankLockedSurface);
876 return surf.forget();
879 if (mLockedSurface) {
880 RefPtr<SourceSurface> surf(mLockedSurface);
881 return surf.forget();
884 MOZ_ASSERT(!mShouldRecycle, "Should recycle but no locked surface!");
886 if (!mRawSurface) {
887 return nullptr;
890 return CreateLockedSurface(mRawSurface, mImageSize, mFormat);
893 void imgFrame::Abort() {
894 MonitorAutoLock lock(mMonitor);
896 mAborted = true;
898 // Wake up anyone who's waiting.
899 mMonitor.NotifyAll();
902 bool imgFrame::IsAborted() const {
903 MonitorAutoLock lock(mMonitor);
904 return mAborted;
907 bool imgFrame::IsFinished() const {
908 MonitorAutoLock lock(mMonitor);
909 return mFinished;
912 void imgFrame::WaitUntilFinished() const {
913 MonitorAutoLock lock(mMonitor);
915 while (true) {
916 // Return if we're aborted or complete.
917 if (mAborted || mFinished) {
918 return;
921 // Not complete yet, so we'll have to wait.
922 mMonitor.Wait();
926 bool imgFrame::AreAllPixelsWritten() const {
927 mMonitor.AssertCurrentThreadOwns();
928 return mDecoded.IsEqualInterior(GetRect());
931 void imgFrame::AddSizeOfExcludingThis(MallocSizeOf aMallocSizeOf,
932 const AddSizeOfCb& aCallback) const {
933 MonitorAutoLock lock(mMonitor);
935 AddSizeOfCbData metadata;
937 metadata.mFinished = mFinished;
938 if (mLockedSurface) {
939 // The locked surface should only be present if we have mRawSurface. Hence
940 // we only need to get its allocation size to avoid double counting.
941 metadata.mHeapBytes += aMallocSizeOf(mLockedSurface);
942 metadata.AddType(mLockedSurface->GetType());
944 if (mOptSurface) {
945 metadata.mHeapBytes += aMallocSizeOf(mOptSurface);
947 SourceSurface::SizeOfInfo info;
948 mOptSurface->SizeOfExcludingThis(aMallocSizeOf, info);
949 metadata.Accumulate(info);
951 if (mRawSurface) {
952 metadata.mHeapBytes += aMallocSizeOf(mRawSurface);
954 SourceSurface::SizeOfInfo info;
955 mRawSurface->SizeOfExcludingThis(aMallocSizeOf, info);
956 metadata.Accumulate(info);
959 aCallback(metadata);
962 } // namespace image
963 } // namespace mozilla