Bug 1669129 - [devtools] Enable devtools.overflow.debugging.enabled. r=jdescottes
[gecko.git] / image / imgFrame.cpp
blob6003154ed04f1a63009f99808ebaf1270a4a4c37
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "imgFrame.h"
8 #include "ImageRegion.h"
9 #include "ShutdownTracker.h"
10 #include "SurfaceCache.h"
12 #include "prenv.h"
14 #include "gfx2DGlue.h"
15 #include "gfxPlatform.h"
17 #include "gfxUtils.h"
19 #include "GeckoProfiler.h"
20 #include "MainThreadUtils.h"
21 #include "mozilla/CheckedInt.h"
22 #include "mozilla/gfx/gfxVars.h"
23 #include "mozilla/gfx/Tools.h"
24 #include "mozilla/gfx/SourceSurfaceRawData.h"
25 #include "mozilla/layers/SourceSurfaceSharedData.h"
26 #include "mozilla/layers/SourceSurfaceVolatileData.h"
27 #include "mozilla/Likely.h"
28 #include "mozilla/MemoryReporting.h"
29 #include "mozilla/StaticPrefs_browser.h"
30 #include "nsMargin.h"
31 #include "nsRefreshDriver.h"
32 #include "nsThreadUtils.h"
34 #include <algorithm> // for min, max
36 namespace mozilla {
38 using namespace gfx;
40 namespace image {
42 /**
43 * This class is identical to SourceSurfaceSharedData but returns a different
44 * type so that SharedSurfacesChild is aware imagelib wants to recycle this
45 * surface for future animation frames.
47 class RecyclingSourceSurfaceSharedData final : public SourceSurfaceSharedData {
48 public:
49 MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(RecyclingSourceSurfaceSharedData,
50 override)
52 SurfaceType GetType() const override {
53 return SurfaceType::DATA_RECYCLING_SHARED;
57 static int32_t VolatileSurfaceStride(const IntSize& size,
58 SurfaceFormat format) {
59 // Stride must be a multiple of four or cairo will complain.
60 return (size.width * BytesPerPixel(format) + 0x3) & ~0x3;
63 static already_AddRefed<DataSourceSurface> CreateLockedSurface(
64 DataSourceSurface* aSurface, const IntSize& size, SurfaceFormat format) {
65 switch (aSurface->GetType()) {
66 case SurfaceType::DATA_SHARED:
67 case SurfaceType::DATA_RECYCLING_SHARED:
68 case SurfaceType::DATA_ALIGNED: {
69 // Shared memory is never released until the surface itself is released.
70 // Similar for aligned/heap surfaces.
71 RefPtr<DataSourceSurface> surf(aSurface);
72 return surf.forget();
74 default: {
75 // Volatile memory requires us to map it first, and it is fallible.
76 DataSourceSurface::ScopedMap smap(aSurface,
77 DataSourceSurface::READ_WRITE);
78 if (smap.IsMapped()) {
79 return MakeAndAddRef<SourceSurfaceMappedData>(std::move(smap), size,
80 format);
82 break;
86 return nullptr;
89 static bool ShouldUseHeap(const IntSize& aSize, int32_t aStride,
90 bool aIsAnimated) {
91 // On some platforms (i.e. Android), a volatile buffer actually keeps a file
92 // handle active. We would like to avoid too many since we could easily
93 // exhaust the pool. However, other platforms we do not have the file handle
94 // problem, and additionally we may avoid a superfluous memset since the
95 // volatile memory starts out as zero-filled. Hence the knobs below.
97 // For as long as an animated image is retained, its frames will never be
98 // released to let the OS purge volatile buffers.
99 if (aIsAnimated && StaticPrefs::image_mem_animated_use_heap()) {
100 return true;
103 // Lets us avoid too many small images consuming all of the handles. The
104 // actual allocation checks for overflow.
105 int32_t bufferSize = (aStride * aSize.height) / 1024;
106 return bufferSize < StaticPrefs::image_mem_volatile_min_threshold_kb();
109 static already_AddRefed<DataSourceSurface> AllocateBufferForImage(
110 const IntSize& size, SurfaceFormat format, bool aShouldRecycle = false,
111 bool aIsAnimated = false) {
112 int32_t stride = VolatileSurfaceStride(size, format);
114 if (gfxVars::GetUseWebRenderOrDefault() && StaticPrefs::image_mem_shared()) {
115 RefPtr<SourceSurfaceSharedData> newSurf;
116 if (aShouldRecycle) {
117 newSurf = new RecyclingSourceSurfaceSharedData();
118 } else {
119 newSurf = new SourceSurfaceSharedData();
121 if (newSurf->Init(size, stride, format)) {
122 return newSurf.forget();
124 } else if (ShouldUseHeap(size, stride, aIsAnimated)) {
125 RefPtr<SourceSurfaceAlignedRawData> newSurf =
126 new SourceSurfaceAlignedRawData();
127 if (newSurf->Init(size, format, false, 0, stride)) {
128 return newSurf.forget();
130 } else {
131 RefPtr<SourceSurfaceVolatileData> newSurf = new SourceSurfaceVolatileData();
132 if (newSurf->Init(size, stride, format)) {
133 return newSurf.forget();
136 return nullptr;
139 static bool GreenSurface(DataSourceSurface* aSurface, const IntSize& aSize,
140 SurfaceFormat aFormat) {
141 int32_t stride = aSurface->Stride();
142 uint32_t* surfaceData = reinterpret_cast<uint32_t*>(aSurface->GetData());
143 uint32_t surfaceDataLength = (stride * aSize.height) / sizeof(uint32_t);
145 // Start by assuming that GG is in the second byte and
146 // AA is in the final byte -- the most common case.
147 uint32_t color = mozilla::NativeEndian::swapFromBigEndian(0x00FF00FF);
149 // We are only going to handle this type of test under
150 // certain circumstances.
151 MOZ_ASSERT(surfaceData);
152 MOZ_ASSERT(aFormat == SurfaceFormat::B8G8R8A8 ||
153 aFormat == SurfaceFormat::B8G8R8X8 ||
154 aFormat == SurfaceFormat::R8G8B8A8 ||
155 aFormat == SurfaceFormat::R8G8B8X8 ||
156 aFormat == SurfaceFormat::A8R8G8B8 ||
157 aFormat == SurfaceFormat::X8R8G8B8);
158 MOZ_ASSERT((stride * aSize.height) % sizeof(uint32_t));
160 if (aFormat == SurfaceFormat::A8R8G8B8 ||
161 aFormat == SurfaceFormat::X8R8G8B8) {
162 color = mozilla::NativeEndian::swapFromBigEndian(0xFF00FF00);
165 for (uint32_t i = 0; i < surfaceDataLength; i++) {
166 surfaceData[i] = color;
169 return true;
172 static bool ClearSurface(DataSourceSurface* aSurface, const IntSize& aSize,
173 SurfaceFormat aFormat) {
174 int32_t stride = aSurface->Stride();
175 uint8_t* data = aSurface->GetData();
176 MOZ_ASSERT(data);
178 if (aFormat == SurfaceFormat::OS_RGBX) {
179 // Skia doesn't support RGBX surfaces, so ensure the alpha value is set
180 // to opaque white. While it would be nice to only do this for Skia,
181 // imgFrame can run off main thread and past shutdown where
182 // we might not have gfxPlatform, so just memset every time instead.
183 memset(data, 0xFF, stride * aSize.height);
184 } else if (aSurface->OnHeap()) {
185 // We only need to memset it if the buffer was allocated on the heap.
186 // Otherwise, it's allocated via mmap and refers to a zeroed page and will
187 // be COW once it's written to.
188 memset(data, 0, stride * aSize.height);
191 return true;
194 imgFrame::imgFrame()
195 : mMonitor("imgFrame"),
196 mDecoded(0, 0, 0, 0),
197 mLockCount(0),
198 mAborted(false),
199 mFinished(false),
200 mOptimizable(false),
201 mShouldRecycle(false),
202 mTimeout(FrameTimeout::FromRawMilliseconds(100)),
203 mDisposalMethod(DisposalMethod::NOT_SPECIFIED),
204 mBlendMethod(BlendMethod::OVER),
205 mFormat(SurfaceFormat::UNKNOWN),
206 mNonPremult(false) {}
208 imgFrame::~imgFrame() {
209 #ifdef DEBUG
210 MonitorAutoLock lock(mMonitor);
211 MOZ_ASSERT(mAborted || AreAllPixelsWritten());
212 MOZ_ASSERT(mAborted || mFinished);
213 #endif
216 nsresult imgFrame::InitForDecoder(const nsIntSize& aImageSize,
217 SurfaceFormat aFormat, bool aNonPremult,
218 const Maybe<AnimationParams>& aAnimParams,
219 bool aShouldRecycle) {
220 // Assert for properties that should be verified by decoders,
221 // warn for properties related to bad content.
222 if (!SurfaceCache::IsLegalSize(aImageSize)) {
223 NS_WARNING("Should have legal image size");
224 mAborted = true;
225 return NS_ERROR_FAILURE;
228 mImageSize = aImageSize;
230 // May be updated shortly after InitForDecoder by BlendAnimationFilter
231 // because it needs to take into consideration the previous frames to
232 // properly calculate. We start with the whole frame as dirty.
233 mDirtyRect = GetRect();
235 if (aAnimParams) {
236 mBlendRect = aAnimParams->mBlendRect;
237 mTimeout = aAnimParams->mTimeout;
238 mBlendMethod = aAnimParams->mBlendMethod;
239 mDisposalMethod = aAnimParams->mDisposalMethod;
240 } else {
241 mBlendRect = GetRect();
244 if (aShouldRecycle) {
245 // If we are recycling then we should always use BGRA for the underlying
246 // surface because if we use BGRX, the next frame composited into the
247 // surface could be BGRA and cause rendering problems.
248 MOZ_ASSERT(aAnimParams);
249 mFormat = SurfaceFormat::OS_RGBA;
250 } else {
251 mFormat = aFormat;
254 mNonPremult = aNonPremult;
255 mShouldRecycle = aShouldRecycle;
257 MOZ_ASSERT(!mLockedSurface, "Called imgFrame::InitForDecoder() twice?");
259 bool postFirstFrame = aAnimParams && aAnimParams->mFrameNum > 0;
260 mRawSurface = AllocateBufferForImage(mImageSize, mFormat, mShouldRecycle,
261 postFirstFrame);
262 if (!mRawSurface) {
263 mAborted = true;
264 return NS_ERROR_OUT_OF_MEMORY;
267 if (StaticPrefs::browser_measurement_render_anims_and_video_solid() &&
268 aAnimParams) {
269 mBlankRawSurface = AllocateBufferForImage(mImageSize, mFormat);
270 if (!mBlankRawSurface) {
271 mAborted = true;
272 return NS_ERROR_OUT_OF_MEMORY;
276 mLockedSurface = CreateLockedSurface(mRawSurface, mImageSize, mFormat);
277 if (!mLockedSurface) {
278 NS_WARNING("Failed to create LockedSurface");
279 mAborted = true;
280 return NS_ERROR_OUT_OF_MEMORY;
283 if (mBlankRawSurface) {
284 mBlankLockedSurface =
285 CreateLockedSurface(mBlankRawSurface, mImageSize, mFormat);
286 if (!mBlankLockedSurface) {
287 NS_WARNING("Failed to create BlankLockedSurface");
288 mAborted = true;
289 return NS_ERROR_OUT_OF_MEMORY;
293 if (!ClearSurface(mRawSurface, mImageSize, mFormat)) {
294 NS_WARNING("Could not clear allocated buffer");
295 mAborted = true;
296 return NS_ERROR_OUT_OF_MEMORY;
299 if (mBlankRawSurface) {
300 if (!GreenSurface(mBlankRawSurface, mImageSize, mFormat)) {
301 NS_WARNING("Could not clear allocated blank buffer");
302 mAborted = true;
303 return NS_ERROR_OUT_OF_MEMORY;
307 return NS_OK;
310 nsresult imgFrame::InitForDecoderRecycle(const AnimationParams& aAnimParams) {
311 // We want to recycle this frame, but there is no guarantee that consumers are
312 // done with it in a timely manner. Let's ensure they are done with it first.
313 MonitorAutoLock lock(mMonitor);
315 MOZ_ASSERT(mLockCount > 0);
316 MOZ_ASSERT(mLockedSurface);
318 if (!mShouldRecycle) {
319 // This frame either was never marked as recyclable, or the flag was cleared
320 // for a caller which does not support recycling.
321 return NS_ERROR_NOT_AVAILABLE;
324 // Ensure we account for all internal references to the surface.
325 MozRefCountType internalRefs = 1;
326 if (mRawSurface == mLockedSurface) {
327 ++internalRefs;
329 if (mOptSurface == mLockedSurface) {
330 ++internalRefs;
333 if (mLockedSurface->refCount() > internalRefs) {
334 if (NS_IsMainThread()) {
335 // We should never be both decoding and recycling on the main thread. Sync
336 // decoding can only be used to produce the first set of frames. Those
337 // either never use recycling because advancing was blocked (main thread
338 // is busy) or we were auto-advancing (to seek to a frame) and the frames
339 // were never accessed (and thus cannot have recycle locks).
340 MOZ_ASSERT_UNREACHABLE("Recycling/decoding on the main thread?");
341 return NS_ERROR_NOT_AVAILABLE;
344 // We don't want to wait forever to reclaim the frame because we have no
345 // idea why it is still held. It is possibly due to OMTP. Since we are off
346 // the main thread, and we generally have frames already buffered for the
347 // animation, we can afford to wait a short period of time to hopefully
348 // complete the transaction and reclaim the buffer.
350 // We choose to wait for, at most, the refresh driver interval, so that we
351 // won't skip more than one frame. If the frame is still in use due to
352 // outstanding transactions, we are already skipping frames. If the frame
353 // is still in use for some other purpose, it won't be returned to the pool
354 // and its owner can hold onto it forever without additional impact here.
355 int32_t refreshInterval =
356 std::max(std::min(nsRefreshDriver::DefaultInterval(), 20), 4);
357 TimeDuration waitInterval =
358 TimeDuration::FromMilliseconds(refreshInterval >> 2);
359 TimeStamp timeout =
360 TimeStamp::Now() + TimeDuration::FromMilliseconds(refreshInterval);
361 while (true) {
362 mMonitor.Wait(waitInterval);
363 if (mLockedSurface->refCount() <= internalRefs) {
364 break;
367 if (timeout <= TimeStamp::Now()) {
368 // We couldn't secure the frame for recycling. It will allocate a new
369 // frame instead.
370 return NS_ERROR_NOT_AVAILABLE;
375 mBlendRect = aAnimParams.mBlendRect;
376 mTimeout = aAnimParams.mTimeout;
377 mBlendMethod = aAnimParams.mBlendMethod;
378 mDisposalMethod = aAnimParams.mDisposalMethod;
379 mDirtyRect = GetRect();
381 return NS_OK;
384 nsresult imgFrame::InitWithDrawable(gfxDrawable* aDrawable,
385 const nsIntSize& aSize,
386 const SurfaceFormat aFormat,
387 SamplingFilter aSamplingFilter,
388 uint32_t aImageFlags,
389 gfx::BackendType aBackend) {
390 // Assert for properties that should be verified by decoders,
391 // warn for properties related to bad content.
392 if (!SurfaceCache::IsLegalSize(aSize)) {
393 NS_WARNING("Should have legal image size");
394 mAborted = true;
395 return NS_ERROR_FAILURE;
398 mImageSize = aSize;
399 mFormat = aFormat;
401 RefPtr<DrawTarget> target;
403 bool canUseDataSurface = Factory::DoesBackendSupportDataDrawtarget(aBackend);
404 if (canUseDataSurface) {
405 // It's safe to use data surfaces for content on this platform, so we can
406 // get away with using volatile buffers.
407 MOZ_ASSERT(!mLockedSurface, "Called imgFrame::InitWithDrawable() twice?");
409 mRawSurface = AllocateBufferForImage(mImageSize, mFormat);
410 if (!mRawSurface) {
411 mAborted = true;
412 return NS_ERROR_OUT_OF_MEMORY;
415 mLockedSurface = CreateLockedSurface(mRawSurface, mImageSize, mFormat);
416 if (!mLockedSurface) {
417 NS_WARNING("Failed to create LockedSurface");
418 mAborted = true;
419 return NS_ERROR_OUT_OF_MEMORY;
422 if (!ClearSurface(mRawSurface, mImageSize, mFormat)) {
423 NS_WARNING("Could not clear allocated buffer");
424 mAborted = true;
425 return NS_ERROR_OUT_OF_MEMORY;
428 target = gfxPlatform::CreateDrawTargetForData(
429 mLockedSurface->GetData(), mImageSize, mLockedSurface->Stride(),
430 mFormat);
431 } else {
432 // We can't use data surfaces for content, so we'll create an offscreen
433 // surface instead. This means if someone later calls RawAccessRef(), we
434 // may have to do an expensive readback, but we warned callers about that in
435 // the documentation for this method.
436 MOZ_ASSERT(!mOptSurface, "Called imgFrame::InitWithDrawable() twice?");
438 if (gfxPlatform::GetPlatform()->SupportsAzureContentForType(aBackend)) {
439 target = gfxPlatform::GetPlatform()->CreateDrawTargetForBackend(
440 aBackend, mImageSize, mFormat);
441 } else {
442 target = gfxPlatform::GetPlatform()->CreateOffscreenContentDrawTarget(
443 mImageSize, mFormat);
447 if (!target || !target->IsValid()) {
448 mAborted = true;
449 return NS_ERROR_OUT_OF_MEMORY;
452 // Draw using the drawable the caller provided.
453 RefPtr<gfxContext> ctx = gfxContext::CreateOrNull(target);
454 MOZ_ASSERT(ctx); // Already checked the draw target above.
455 gfxUtils::DrawPixelSnapped(ctx, aDrawable, SizeDouble(mImageSize),
456 ImageRegion::Create(ThebesRect(GetRect())),
457 mFormat, aSamplingFilter, aImageFlags);
459 if (canUseDataSurface && !mLockedSurface) {
460 NS_WARNING("Failed to create VolatileDataSourceSurface");
461 mAborted = true;
462 return NS_ERROR_OUT_OF_MEMORY;
465 if (!canUseDataSurface) {
466 // We used an offscreen surface, which is an "optimized" surface from
467 // imgFrame's perspective.
468 mOptSurface = target->Snapshot();
469 } else {
470 FinalizeSurface();
473 // If we reach this point, we should regard ourselves as complete.
474 mDecoded = GetRect();
475 mFinished = true;
477 #ifdef DEBUG
478 MonitorAutoLock lock(mMonitor);
479 MOZ_ASSERT(AreAllPixelsWritten());
480 #endif
482 return NS_OK;
485 nsresult imgFrame::Optimize(DrawTarget* aTarget) {
486 MOZ_ASSERT(NS_IsMainThread());
487 mMonitor.AssertCurrentThreadOwns();
489 if (mLockCount > 0 || !mOptimizable) {
490 // Don't optimize right now.
491 return NS_OK;
494 // Check whether image optimization is disabled -- not thread safe!
495 static bool gDisableOptimize = false;
496 static bool hasCheckedOptimize = false;
497 if (!hasCheckedOptimize) {
498 if (PR_GetEnv("MOZ_DISABLE_IMAGE_OPTIMIZE")) {
499 gDisableOptimize = true;
501 hasCheckedOptimize = true;
504 // Don't optimize during shutdown because gfxPlatform may not be available.
505 if (ShutdownTracker::ShutdownHasStarted()) {
506 return NS_OK;
509 if (gDisableOptimize) {
510 return NS_OK;
513 if (mOptSurface) {
514 return NS_OK;
517 // XXX(seth): It's currently unclear if there's any reason why we can't
518 // optimize non-premult surfaces. We should look into removing this.
519 if (mNonPremult) {
520 return NS_OK;
522 if (!gfxVars::UseWebRender()) {
523 mOptSurface = aTarget->OptimizeSourceSurface(mLockedSurface);
524 } else {
525 mOptSurface = gfxPlatform::GetPlatform()
526 ->ScreenReferenceDrawTarget()
527 ->OptimizeSourceSurface(mLockedSurface);
529 if (mOptSurface == mLockedSurface) {
530 mOptSurface = nullptr;
533 if (mOptSurface) {
534 // There's no reason to keep our original surface around if we have an
535 // optimized surface. Release our reference to it. This will leave
536 // |mLockedSurface| as the only thing keeping it alive, so it'll get freed
537 // below.
538 mRawSurface = nullptr;
541 // Release all strong references to the surface's memory. If the underlying
542 // surface is volatile, this will allow the operating system to free the
543 // memory if it needs to.
544 mLockedSurface = nullptr;
545 mOptimizable = false;
547 return NS_OK;
550 DrawableFrameRef imgFrame::DrawableRef() { return DrawableFrameRef(this); }
552 RawAccessFrameRef imgFrame::RawAccessRef(bool aOnlyFinished /*= false*/) {
553 return RawAccessFrameRef(this, aOnlyFinished);
556 void imgFrame::SetRawAccessOnly() {
557 AssertImageDataLocked();
559 // Lock our data and throw away the key.
560 LockImageData(false);
563 imgFrame::SurfaceWithFormat imgFrame::SurfaceForDrawing(
564 bool aDoPartialDecode, bool aDoTile, ImageRegion& aRegion,
565 SourceSurface* aSurface) {
566 MOZ_ASSERT(NS_IsMainThread());
567 mMonitor.AssertCurrentThreadOwns();
569 if (!aDoPartialDecode) {
570 return SurfaceWithFormat(new gfxSurfaceDrawable(aSurface, mImageSize),
571 mFormat);
574 gfxRect available =
575 gfxRect(mDecoded.X(), mDecoded.Y(), mDecoded.Width(), mDecoded.Height());
577 if (aDoTile) {
578 // Create a temporary surface.
579 // Give this surface an alpha channel because there are
580 // transparent pixels in the padding or undecoded area
581 RefPtr<DrawTarget> target =
582 gfxPlatform::GetPlatform()->CreateOffscreenContentDrawTarget(
583 mImageSize, SurfaceFormat::OS_RGBA);
584 if (!target) {
585 return SurfaceWithFormat();
588 SurfacePattern pattern(aSurface, aRegion.GetExtendMode(),
589 Matrix::Translation(mDecoded.X(), mDecoded.Y()));
590 target->FillRect(ToRect(aRegion.Intersect(available).Rect()), pattern);
592 RefPtr<SourceSurface> newsurf = target->Snapshot();
593 return SurfaceWithFormat(new gfxSurfaceDrawable(newsurf, mImageSize),
594 target->GetFormat());
597 // Not tiling, and we have a surface, so we can account for
598 // a partial decode just by twiddling parameters.
599 aRegion = aRegion.Intersect(available);
600 IntSize availableSize(mDecoded.Width(), mDecoded.Height());
602 return SurfaceWithFormat(new gfxSurfaceDrawable(aSurface, availableSize),
603 mFormat);
606 bool imgFrame::Draw(gfxContext* aContext, const ImageRegion& aRegion,
607 SamplingFilter aSamplingFilter, uint32_t aImageFlags,
608 float aOpacity) {
609 AUTO_PROFILER_LABEL("imgFrame::Draw", GRAPHICS);
611 MOZ_ASSERT(NS_IsMainThread());
612 NS_ASSERTION(!aRegion.Rect().IsEmpty(), "Drawing empty region!");
613 NS_ASSERTION(!aRegion.IsRestricted() ||
614 !aRegion.Rect().Intersect(aRegion.Restriction()).IsEmpty(),
615 "We must be allowed to sample *some* source pixels!");
617 // Perform the draw and freeing of the surface outside the lock. We want to
618 // avoid contention with the decoder if we can. The surface may also attempt
619 // to relock the monitor if it is freed (e.g. RecyclingSourceSurface).
620 RefPtr<SourceSurface> surf;
621 SurfaceWithFormat surfaceResult;
622 ImageRegion region(aRegion);
623 gfxRect imageRect(0, 0, mImageSize.width, mImageSize.height);
626 MonitorAutoLock lock(mMonitor);
628 // Possibly convert this image into a GPU texture, this may also cause our
629 // mLockedSurface to be released and the OS to release the underlying
630 // memory.
631 Optimize(aContext->GetDrawTarget());
633 bool doPartialDecode = !AreAllPixelsWritten();
635 // Most draw targets will just use the surface only during DrawPixelSnapped
636 // but captures/recordings will retain a reference outside this stack
637 // context. While in theory a decoder thread could be trying to recycle this
638 // frame at this very moment, in practice the only way we can get here is if
639 // this frame is the current frame of the animation. Since we can only
640 // advance on the main thread, we know nothing else will try to use it.
641 DrawTarget* drawTarget = aContext->GetDrawTarget();
642 bool recording = drawTarget->GetBackendType() == BackendType::RECORDING;
643 RefPtr<SourceSurface> surf = GetSourceSurfaceInternal();
644 if (!surf) {
645 return false;
648 bool doTile = !imageRect.Contains(aRegion.Rect()) &&
649 !(aImageFlags & imgIContainer::FLAG_CLAMP);
651 surfaceResult = SurfaceForDrawing(doPartialDecode, doTile, region, surf);
653 // If we are recording, then we cannot recycle the surface. The blob
654 // rasterizer is not properly synchronized for recycling in the compositor
655 // process. The easiest thing to do is just mark the frames it consumes as
656 // non-recyclable.
657 if (recording && surfaceResult.IsValid()) {
658 mShouldRecycle = false;
662 if (surfaceResult.IsValid()) {
663 gfxUtils::DrawPixelSnapped(aContext, surfaceResult.mDrawable,
664 imageRect.Size(), region, surfaceResult.mFormat,
665 aSamplingFilter, aImageFlags, aOpacity);
668 return true;
671 nsresult imgFrame::ImageUpdated(const nsIntRect& aUpdateRect) {
672 MonitorAutoLock lock(mMonitor);
673 return ImageUpdatedInternal(aUpdateRect);
676 nsresult imgFrame::ImageUpdatedInternal(const nsIntRect& aUpdateRect) {
677 mMonitor.AssertCurrentThreadOwns();
679 // Clamp to the frame rect to ensure that decoder bugs don't result in a
680 // decoded rect that extends outside the bounds of the frame rect.
681 IntRect updateRect = aUpdateRect.Intersect(GetRect());
682 if (updateRect.IsEmpty()) {
683 return NS_OK;
686 mDecoded.UnionRect(mDecoded, updateRect);
688 // Update our invalidation counters for any consumers watching for changes
689 // in the surface.
690 if (mRawSurface) {
691 mRawSurface->Invalidate(updateRect);
693 if (mLockedSurface && mRawSurface != mLockedSurface) {
694 mLockedSurface->Invalidate(updateRect);
696 return NS_OK;
699 void imgFrame::Finish(Opacity aFrameOpacity /* = Opacity::SOME_TRANSPARENCY */,
700 bool aFinalize /* = true */) {
701 MonitorAutoLock lock(mMonitor);
702 MOZ_ASSERT(mLockCount > 0, "Image data should be locked");
704 IntRect frameRect(GetRect());
705 if (!mDecoded.IsEqualEdges(frameRect)) {
706 // The decoder should have produced rows starting from either the bottom or
707 // the top of the image. We need to calculate the region for which we have
708 // not yet invalidated.
709 IntRect delta(0, 0, frameRect.width, 0);
710 if (mDecoded.y == 0) {
711 delta.y = mDecoded.height;
712 delta.height = frameRect.height - mDecoded.height;
713 } else if (mDecoded.y + mDecoded.height == frameRect.height) {
714 delta.height = frameRect.height - mDecoded.y;
715 } else {
716 MOZ_ASSERT_UNREACHABLE("Decoder only updated middle of image!");
717 delta = frameRect;
720 ImageUpdatedInternal(delta);
723 MOZ_ASSERT(mDecoded.IsEqualEdges(frameRect));
725 if (aFinalize) {
726 FinalizeSurfaceInternal();
729 mFinished = true;
731 // The image is now complete, wake up anyone who's waiting.
732 mMonitor.NotifyAll();
735 uint32_t imgFrame::GetImageBytesPerRow() const {
736 mMonitor.AssertCurrentThreadOwns();
738 if (mRawSurface) {
739 return mImageSize.width * BytesPerPixel(mFormat);
742 return 0;
745 uint32_t imgFrame::GetImageDataLength() const {
746 return GetImageBytesPerRow() * mImageSize.height;
749 void imgFrame::GetImageData(uint8_t** aData, uint32_t* aLength) const {
750 MonitorAutoLock lock(mMonitor);
751 GetImageDataInternal(aData, aLength);
754 void imgFrame::GetImageDataInternal(uint8_t** aData, uint32_t* aLength) const {
755 mMonitor.AssertCurrentThreadOwns();
756 MOZ_ASSERT(mLockCount > 0, "Image data should be locked");
757 MOZ_ASSERT(mLockedSurface);
759 if (mLockedSurface) {
760 // TODO: This is okay for now because we only realloc shared surfaces on
761 // the main thread after decoding has finished, but if animations want to
762 // read frame data off the main thread, we will need to reconsider this.
763 *aData = mLockedSurface->GetData();
764 MOZ_ASSERT(
765 *aData,
766 "mLockedSurface is non-null, but GetData is null in GetImageData");
767 } else {
768 *aData = nullptr;
771 *aLength = GetImageDataLength();
774 uint8_t* imgFrame::GetImageData() const {
775 uint8_t* data;
776 uint32_t length;
777 GetImageData(&data, &length);
778 return data;
781 uint8_t* imgFrame::LockImageData(bool aOnlyFinished) {
782 MonitorAutoLock lock(mMonitor);
784 MOZ_ASSERT(mLockCount >= 0, "Unbalanced locks and unlocks");
785 if (mLockCount < 0 || (aOnlyFinished && !mFinished)) {
786 return nullptr;
789 uint8_t* data;
790 if (mLockedSurface) {
791 data = mLockedSurface->GetData();
792 } else {
793 data = nullptr;
796 // If the raw data is still available, we should get a valid pointer for it.
797 if (!data) {
798 MOZ_ASSERT_UNREACHABLE("It's illegal to re-lock an optimized imgFrame");
799 return nullptr;
802 ++mLockCount;
803 return data;
806 void imgFrame::AssertImageDataLocked() const {
807 #ifdef DEBUG
808 MonitorAutoLock lock(mMonitor);
809 MOZ_ASSERT(mLockCount > 0, "Image data should be locked");
810 #endif
813 nsresult imgFrame::UnlockImageData() {
814 MonitorAutoLock lock(mMonitor);
816 MOZ_ASSERT(mLockCount > 0, "Unlocking an unlocked image!");
817 if (mLockCount <= 0) {
818 return NS_ERROR_FAILURE;
821 MOZ_ASSERT(mLockCount > 1 || mFinished || mAborted,
822 "Should have Finish()'d or aborted before unlocking");
824 mLockCount--;
826 return NS_OK;
829 void imgFrame::SetOptimizable() {
830 AssertImageDataLocked();
831 MonitorAutoLock lock(mMonitor);
832 mOptimizable = true;
835 void imgFrame::FinalizeSurface() {
836 MonitorAutoLock lock(mMonitor);
837 FinalizeSurfaceInternal();
840 void imgFrame::FinalizeSurfaceInternal() {
841 mMonitor.AssertCurrentThreadOwns();
843 // Not all images will have mRawSurface to finalize (i.e. paletted images).
844 if (mShouldRecycle || !mRawSurface ||
845 mRawSurface->GetType() != SurfaceType::DATA_SHARED) {
846 return;
849 auto* sharedSurf = static_cast<SourceSurfaceSharedData*>(mRawSurface.get());
850 sharedSurf->Finalize();
853 already_AddRefed<SourceSurface> imgFrame::GetSourceSurface() {
854 MonitorAutoLock lock(mMonitor);
855 return GetSourceSurfaceInternal();
858 already_AddRefed<SourceSurface> imgFrame::GetSourceSurfaceInternal() {
859 mMonitor.AssertCurrentThreadOwns();
861 if (mOptSurface) {
862 if (mOptSurface->IsValid()) {
863 RefPtr<SourceSurface> surf(mOptSurface);
864 return surf.forget();
866 mOptSurface = nullptr;
869 if (mBlankLockedSurface) {
870 // We are going to return the blank surface because of the flags.
871 // We are including comments here that are copied from below
872 // just so that we are on the same page!
873 RefPtr<SourceSurface> surf(mBlankLockedSurface);
874 return surf.forget();
877 if (mLockedSurface) {
878 RefPtr<SourceSurface> surf(mLockedSurface);
879 return surf.forget();
882 MOZ_ASSERT(!mShouldRecycle, "Should recycle but no locked surface!");
884 if (!mRawSurface) {
885 return nullptr;
888 return CreateLockedSurface(mRawSurface, mImageSize, mFormat);
891 void imgFrame::Abort() {
892 MonitorAutoLock lock(mMonitor);
894 mAborted = true;
896 // Wake up anyone who's waiting.
897 mMonitor.NotifyAll();
900 bool imgFrame::IsAborted() const {
901 MonitorAutoLock lock(mMonitor);
902 return mAborted;
905 bool imgFrame::IsFinished() const {
906 MonitorAutoLock lock(mMonitor);
907 return mFinished;
910 void imgFrame::WaitUntilFinished() const {
911 MonitorAutoLock lock(mMonitor);
913 while (true) {
914 // Return if we're aborted or complete.
915 if (mAborted || mFinished) {
916 return;
919 // Not complete yet, so we'll have to wait.
920 mMonitor.Wait();
924 bool imgFrame::AreAllPixelsWritten() const {
925 mMonitor.AssertCurrentThreadOwns();
926 return mDecoded.IsEqualInterior(GetRect());
929 void imgFrame::AddSizeOfExcludingThis(MallocSizeOf aMallocSizeOf,
930 const AddSizeOfCb& aCallback) const {
931 MonitorAutoLock lock(mMonitor);
933 AddSizeOfCbData metadata;
935 metadata.mFinished = mFinished;
936 if (mLockedSurface) {
937 // The locked surface should only be present if we have mRawSurface. Hence
938 // we only need to get its allocation size to avoid double counting.
939 metadata.mHeapBytes += aMallocSizeOf(mLockedSurface);
940 metadata.AddType(mLockedSurface->GetType());
942 if (mOptSurface) {
943 metadata.mHeapBytes += aMallocSizeOf(mOptSurface);
945 SourceSurface::SizeOfInfo info;
946 mOptSurface->SizeOfExcludingThis(aMallocSizeOf, info);
947 metadata.Accumulate(info);
949 if (mRawSurface) {
950 metadata.mHeapBytes += aMallocSizeOf(mRawSurface);
952 SourceSurface::SizeOfInfo info;
953 mRawSurface->SizeOfExcludingThis(aMallocSizeOf, info);
954 metadata.Accumulate(info);
957 aCallback(metadata);
960 } // namespace image
961 } // namespace mozilla