Bug 1583654 - Use the subgridToParentMap to check that the node was previously a...
[gecko.git] / image / imgFrame.cpp
blob2e850aa4173694dcaaedd1a2b9817cc97130f92f
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "imgFrame.h"
8 #include "ImageRegion.h"
9 #include "ShutdownTracker.h"
10 #include "SurfaceCache.h"
12 #include "prenv.h"
14 #include "gfx2DGlue.h"
15 #include "gfxPlatform.h"
17 #include "gfxUtils.h"
19 #include "GeckoProfiler.h"
20 #include "MainThreadUtils.h"
21 #include "mozilla/CheckedInt.h"
22 #include "mozilla/gfx/gfxVars.h"
23 #include "mozilla/gfx/Tools.h"
24 #include "mozilla/gfx/SourceSurfaceRawData.h"
25 #include "mozilla/image/RecyclingSourceSurface.h"
26 #include "mozilla/layers/SourceSurfaceSharedData.h"
27 #include "mozilla/layers/SourceSurfaceVolatileData.h"
28 #include "mozilla/Likely.h"
29 #include "mozilla/MemoryReporting.h"
30 #include "nsMargin.h"
31 #include "nsRefreshDriver.h"
32 #include "nsThreadUtils.h"
34 namespace mozilla {
36 using namespace gfx;
38 namespace image {
40 static void ScopedMapRelease(void* aMap) {
41 delete static_cast<DataSourceSurface::ScopedMap*>(aMap);
44 static int32_t VolatileSurfaceStride(const IntSize& size,
45 SurfaceFormat format) {
46 // Stride must be a multiple of four or cairo will complain.
47 return (size.width * BytesPerPixel(format) + 0x3) & ~0x3;
50 static already_AddRefed<DataSourceSurface> CreateLockedSurface(
51 DataSourceSurface* aSurface, const IntSize& size, SurfaceFormat format) {
52 // Shared memory is never released until the surface itself is released
53 if (aSurface->GetType() == SurfaceType::DATA_SHARED) {
54 RefPtr<DataSourceSurface> surf(aSurface);
55 return surf.forget();
58 DataSourceSurface::ScopedMap* smap =
59 new DataSourceSurface::ScopedMap(aSurface, DataSourceSurface::READ_WRITE);
60 if (smap->IsMapped()) {
61 // The ScopedMap is held by this DataSourceSurface.
62 RefPtr<DataSourceSurface> surf = Factory::CreateWrappingDataSourceSurface(
63 smap->GetData(), aSurface->Stride(), size, format, &ScopedMapRelease,
64 static_cast<void*>(smap));
65 if (surf) {
66 return surf.forget();
70 delete smap;
71 return nullptr;
74 static bool ShouldUseHeap(const IntSize& aSize, int32_t aStride,
75 bool aIsAnimated) {
76 // On some platforms (i.e. Android), a volatile buffer actually keeps a file
77 // handle active. We would like to avoid too many since we could easily
78 // exhaust the pool. However, other platforms we do not have the file handle
79 // problem, and additionally we may avoid a superfluous memset since the
80 // volatile memory starts out as zero-filled. Hence the knobs below.
82 // For as long as an animated image is retained, its frames will never be
83 // released to let the OS purge volatile buffers.
84 if (aIsAnimated && StaticPrefs::image_mem_animated_use_heap()) {
85 return true;
88 // Lets us avoid too many small images consuming all of the handles. The
89 // actual allocation checks for overflow.
90 int32_t bufferSize = (aStride * aSize.height) / 1024;
91 if (bufferSize < StaticPrefs::image_mem_volatile_min_threshold_kb()) {
92 return true;
95 return false;
98 static already_AddRefed<DataSourceSurface> AllocateBufferForImage(
99 const IntSize& size, SurfaceFormat format, bool aIsAnimated = false) {
100 int32_t stride = VolatileSurfaceStride(size, format);
102 if (gfxVars::GetUseWebRenderOrDefault() && StaticPrefs::image_mem_shared()) {
103 RefPtr<SourceSurfaceSharedData> newSurf = new SourceSurfaceSharedData();
104 if (newSurf->Init(size, stride, format)) {
105 return newSurf.forget();
107 } else if (ShouldUseHeap(size, stride, aIsAnimated)) {
108 RefPtr<SourceSurfaceAlignedRawData> newSurf =
109 new SourceSurfaceAlignedRawData();
110 if (newSurf->Init(size, format, false, 0, stride)) {
111 return newSurf.forget();
113 } else {
114 RefPtr<SourceSurfaceVolatileData> newSurf = new SourceSurfaceVolatileData();
115 if (newSurf->Init(size, stride, format)) {
116 return newSurf.forget();
119 return nullptr;
122 static bool GreenSurface(DataSourceSurface* aSurface, const IntSize& aSize,
123 SurfaceFormat aFormat) {
124 int32_t stride = aSurface->Stride();
125 uint32_t* surfaceData = reinterpret_cast<uint32_t*>(aSurface->GetData());
126 uint32_t surfaceDataLength = (stride * aSize.height) / sizeof(uint32_t);
128 // Start by assuming that GG is in the second byte and
129 // AA is in the final byte -- the most common case.
130 uint32_t color = mozilla::NativeEndian::swapFromBigEndian(0x00FF00FF);
132 // We are only going to handle this type of test under
133 // certain circumstances.
134 MOZ_ASSERT(surfaceData);
135 MOZ_ASSERT(aFormat == SurfaceFormat::B8G8R8A8 ||
136 aFormat == SurfaceFormat::B8G8R8X8 ||
137 aFormat == SurfaceFormat::R8G8B8A8 ||
138 aFormat == SurfaceFormat::R8G8B8X8 ||
139 aFormat == SurfaceFormat::A8R8G8B8 ||
140 aFormat == SurfaceFormat::X8R8G8B8);
141 MOZ_ASSERT((stride * aSize.height) % sizeof(uint32_t));
143 if (aFormat == SurfaceFormat::A8R8G8B8 ||
144 aFormat == SurfaceFormat::X8R8G8B8) {
145 color = mozilla::NativeEndian::swapFromBigEndian(0xFF00FF00);
148 for (uint32_t i = 0; i < surfaceDataLength; i++) {
149 surfaceData[i] = color;
152 return true;
155 static bool ClearSurface(DataSourceSurface* aSurface, const IntSize& aSize,
156 SurfaceFormat aFormat) {
157 int32_t stride = aSurface->Stride();
158 uint8_t* data = aSurface->GetData();
159 MOZ_ASSERT(data);
161 if (aFormat == SurfaceFormat::B8G8R8X8) {
162 // Skia doesn't support RGBX surfaces, so ensure the alpha value is set
163 // to opaque white. While it would be nice to only do this for Skia,
164 // imgFrame can run off main thread and past shutdown where
165 // we might not have gfxPlatform, so just memset every time instead.
166 memset(data, 0xFF, stride * aSize.height);
167 } else if (aSurface->OnHeap()) {
168 // We only need to memset it if the buffer was allocated on the heap.
169 // Otherwise, it's allocated via mmap and refers to a zeroed page and will
170 // be COW once it's written to.
171 memset(data, 0, stride * aSize.height);
174 return true;
177 imgFrame::imgFrame()
178 : mMonitor("imgFrame"),
179 mDecoded(0, 0, 0, 0),
180 mLockCount(0),
181 mRecycleLockCount(0),
182 mAborted(false),
183 mFinished(false),
184 mOptimizable(false),
185 mShouldRecycle(false),
186 mTimeout(FrameTimeout::FromRawMilliseconds(100)),
187 mDisposalMethod(DisposalMethod::NOT_SPECIFIED),
188 mBlendMethod(BlendMethod::OVER),
189 mFormat(SurfaceFormat::UNKNOWN),
190 mNonPremult(false) {}
192 imgFrame::~imgFrame() {
193 #ifdef DEBUG
194 MonitorAutoLock lock(mMonitor);
195 MOZ_ASSERT(mAborted || AreAllPixelsWritten());
196 MOZ_ASSERT(mAborted || mFinished);
197 #endif
200 nsresult imgFrame::InitForDecoder(const nsIntSize& aImageSize,
201 SurfaceFormat aFormat, bool aNonPremult,
202 const Maybe<AnimationParams>& aAnimParams,
203 bool aShouldRecycle) {
204 // Assert for properties that should be verified by decoders,
205 // warn for properties related to bad content.
206 if (!SurfaceCache::IsLegalSize(aImageSize)) {
207 NS_WARNING("Should have legal image size");
208 mAborted = true;
209 return NS_ERROR_FAILURE;
212 mImageSize = aImageSize;
214 // May be updated shortly after InitForDecoder by BlendAnimationFilter
215 // because it needs to take into consideration the previous frames to
216 // properly calculate. We start with the whole frame as dirty.
217 mDirtyRect = GetRect();
219 if (aAnimParams) {
220 mBlendRect = aAnimParams->mBlendRect;
221 mTimeout = aAnimParams->mTimeout;
222 mBlendMethod = aAnimParams->mBlendMethod;
223 mDisposalMethod = aAnimParams->mDisposalMethod;
224 } else {
225 mBlendRect = GetRect();
228 if (aShouldRecycle) {
229 // If we are recycling then we should always use BGRA for the underlying
230 // surface because if we use BGRX, the next frame composited into the
231 // surface could be BGRA and cause rendering problems.
232 MOZ_ASSERT(aAnimParams);
233 mFormat = SurfaceFormat::B8G8R8A8;
234 } else {
235 mFormat = aFormat;
238 mNonPremult = aNonPremult;
239 mShouldRecycle = aShouldRecycle;
241 MOZ_ASSERT(!mLockedSurface, "Called imgFrame::InitForDecoder() twice?");
243 bool postFirstFrame = aAnimParams && aAnimParams->mFrameNum > 0;
244 mRawSurface = AllocateBufferForImage(mImageSize, mFormat, postFirstFrame);
245 if (!mRawSurface) {
246 mAborted = true;
247 return NS_ERROR_OUT_OF_MEMORY;
250 if (StaticPrefs::browser_measurement_render_anims_and_video_solid() &&
251 aAnimParams) {
252 mBlankRawSurface = AllocateBufferForImage(mImageSize, mFormat);
253 if (!mBlankRawSurface) {
254 mAborted = true;
255 return NS_ERROR_OUT_OF_MEMORY;
259 mLockedSurface = CreateLockedSurface(mRawSurface, mImageSize, mFormat);
260 if (!mLockedSurface) {
261 NS_WARNING("Failed to create LockedSurface");
262 mAborted = true;
263 return NS_ERROR_OUT_OF_MEMORY;
266 if (mBlankRawSurface) {
267 mBlankLockedSurface =
268 CreateLockedSurface(mBlankRawSurface, mImageSize, mFormat);
269 if (!mBlankLockedSurface) {
270 NS_WARNING("Failed to create BlankLockedSurface");
271 mAborted = true;
272 return NS_ERROR_OUT_OF_MEMORY;
276 if (!ClearSurface(mRawSurface, mImageSize, mFormat)) {
277 NS_WARNING("Could not clear allocated buffer");
278 mAborted = true;
279 return NS_ERROR_OUT_OF_MEMORY;
282 if (mBlankRawSurface) {
283 if (!GreenSurface(mBlankRawSurface, mImageSize, mFormat)) {
284 NS_WARNING("Could not clear allocated blank buffer");
285 mAborted = true;
286 return NS_ERROR_OUT_OF_MEMORY;
290 return NS_OK;
293 nsresult imgFrame::InitForDecoderRecycle(const AnimationParams& aAnimParams) {
294 // We want to recycle this frame, but there is no guarantee that consumers are
295 // done with it in a timely manner. Let's ensure they are done with it first.
296 MonitorAutoLock lock(mMonitor);
298 MOZ_ASSERT(mLockCount > 0);
299 MOZ_ASSERT(mLockedSurface);
301 if (!mShouldRecycle) {
302 // This frame either was never marked as recyclable, or the flag was cleared
303 // for a caller which does not support recycling.
304 return NS_ERROR_NOT_AVAILABLE;
307 if (mRecycleLockCount > 0) {
308 if (NS_IsMainThread()) {
309 // We should never be both decoding and recycling on the main thread. Sync
310 // decoding can only be used to produce the first set of frames. Those
311 // either never use recycling because advancing was blocked (main thread
312 // is busy) or we were auto-advancing (to seek to a frame) and the frames
313 // were never accessed (and thus cannot have recycle locks).
314 MOZ_ASSERT_UNREACHABLE("Recycling/decoding on the main thread?");
315 return NS_ERROR_NOT_AVAILABLE;
318 // We don't want to wait forever to reclaim the frame because we have no
319 // idea why it is still held. It is possibly due to OMTP. Since we are off
320 // the main thread, and we generally have frames already buffered for the
321 // animation, we can afford to wait a short period of time to hopefully
322 // complete the transaction and reclaim the buffer.
324 // We choose to wait for, at most, the refresh driver interval, so that we
325 // won't skip more than one frame. If the frame is still in use due to
326 // outstanding transactions, we are already skipping frames. If the frame
327 // is still in use for some other purpose, it won't be returned to the pool
328 // and its owner can hold onto it forever without additional impact here.
329 TimeDuration timeout =
330 TimeDuration::FromMilliseconds(nsRefreshDriver::DefaultInterval());
331 while (true) {
332 TimeStamp start = TimeStamp::Now();
333 mMonitor.Wait(timeout);
334 if (mRecycleLockCount == 0) {
335 break;
338 TimeDuration delta = TimeStamp::Now() - start;
339 if (delta >= timeout) {
340 // We couldn't secure the frame for recycling. It will allocate a new
341 // frame instead.
342 return NS_ERROR_NOT_AVAILABLE;
345 timeout -= delta;
349 mBlendRect = aAnimParams.mBlendRect;
350 mTimeout = aAnimParams.mTimeout;
351 mBlendMethod = aAnimParams.mBlendMethod;
352 mDisposalMethod = aAnimParams.mDisposalMethod;
353 mDirtyRect = GetRect();
355 return NS_OK;
358 nsresult imgFrame::InitWithDrawable(gfxDrawable* aDrawable,
359 const nsIntSize& aSize,
360 const SurfaceFormat aFormat,
361 SamplingFilter aSamplingFilter,
362 uint32_t aImageFlags,
363 gfx::BackendType aBackend) {
364 // Assert for properties that should be verified by decoders,
365 // warn for properties related to bad content.
366 if (!SurfaceCache::IsLegalSize(aSize)) {
367 NS_WARNING("Should have legal image size");
368 mAborted = true;
369 return NS_ERROR_FAILURE;
372 mImageSize = aSize;
373 mFormat = aFormat;
375 RefPtr<DrawTarget> target;
377 bool canUseDataSurface = Factory::DoesBackendSupportDataDrawtarget(aBackend);
378 if (canUseDataSurface) {
379 // It's safe to use data surfaces for content on this platform, so we can
380 // get away with using volatile buffers.
381 MOZ_ASSERT(!mLockedSurface, "Called imgFrame::InitWithDrawable() twice?");
383 mRawSurface = AllocateBufferForImage(mImageSize, mFormat);
384 if (!mRawSurface) {
385 mAborted = true;
386 return NS_ERROR_OUT_OF_MEMORY;
389 mLockedSurface = CreateLockedSurface(mRawSurface, mImageSize, mFormat);
390 if (!mLockedSurface) {
391 NS_WARNING("Failed to create LockedSurface");
392 mAborted = true;
393 return NS_ERROR_OUT_OF_MEMORY;
396 if (!ClearSurface(mRawSurface, mImageSize, mFormat)) {
397 NS_WARNING("Could not clear allocated buffer");
398 mAborted = true;
399 return NS_ERROR_OUT_OF_MEMORY;
402 target = gfxPlatform::CreateDrawTargetForData(
403 mLockedSurface->GetData(), mImageSize, mLockedSurface->Stride(),
404 mFormat);
405 } else {
406 // We can't use data surfaces for content, so we'll create an offscreen
407 // surface instead. This means if someone later calls RawAccessRef(), we
408 // may have to do an expensive readback, but we warned callers about that in
409 // the documentation for this method.
410 MOZ_ASSERT(!mOptSurface, "Called imgFrame::InitWithDrawable() twice?");
412 if (gfxPlatform::GetPlatform()->SupportsAzureContentForType(aBackend)) {
413 target = gfxPlatform::GetPlatform()->CreateDrawTargetForBackend(
414 aBackend, mImageSize, mFormat);
415 } else {
416 target = gfxPlatform::GetPlatform()->CreateOffscreenContentDrawTarget(
417 mImageSize, mFormat);
421 if (!target || !target->IsValid()) {
422 mAborted = true;
423 return NS_ERROR_OUT_OF_MEMORY;
426 // Draw using the drawable the caller provided.
427 RefPtr<gfxContext> ctx = gfxContext::CreateOrNull(target);
428 MOZ_ASSERT(ctx); // Already checked the draw target above.
429 gfxUtils::DrawPixelSnapped(ctx, aDrawable, SizeDouble(mImageSize),
430 ImageRegion::Create(ThebesRect(GetRect())),
431 mFormat, aSamplingFilter, aImageFlags);
433 if (canUseDataSurface && !mLockedSurface) {
434 NS_WARNING("Failed to create VolatileDataSourceSurface");
435 mAborted = true;
436 return NS_ERROR_OUT_OF_MEMORY;
439 if (!canUseDataSurface) {
440 // We used an offscreen surface, which is an "optimized" surface from
441 // imgFrame's perspective.
442 mOptSurface = target->Snapshot();
443 } else {
444 FinalizeSurface();
447 // If we reach this point, we should regard ourselves as complete.
448 mDecoded = GetRect();
449 mFinished = true;
451 #ifdef DEBUG
452 MonitorAutoLock lock(mMonitor);
453 MOZ_ASSERT(AreAllPixelsWritten());
454 #endif
456 return NS_OK;
459 nsresult imgFrame::Optimize(DrawTarget* aTarget) {
460 MOZ_ASSERT(NS_IsMainThread());
461 mMonitor.AssertCurrentThreadOwns();
463 if (mLockCount > 0 || !mOptimizable) {
464 // Don't optimize right now.
465 return NS_OK;
468 // Check whether image optimization is disabled -- not thread safe!
469 static bool gDisableOptimize = false;
470 static bool hasCheckedOptimize = false;
471 if (!hasCheckedOptimize) {
472 if (PR_GetEnv("MOZ_DISABLE_IMAGE_OPTIMIZE")) {
473 gDisableOptimize = true;
475 hasCheckedOptimize = true;
478 // Don't optimize during shutdown because gfxPlatform may not be available.
479 if (ShutdownTracker::ShutdownHasStarted()) {
480 return NS_OK;
483 if (gDisableOptimize) {
484 return NS_OK;
487 if (mOptSurface) {
488 return NS_OK;
491 // XXX(seth): It's currently unclear if there's any reason why we can't
492 // optimize non-premult surfaces. We should look into removing this.
493 if (mNonPremult) {
494 return NS_OK;
496 if (!gfxVars::UseWebRender()) {
497 mOptSurface = aTarget->OptimizeSourceSurface(mLockedSurface);
498 } else {
499 mOptSurface = gfxPlatform::GetPlatform()
500 ->ScreenReferenceDrawTarget()
501 ->OptimizeSourceSurface(mLockedSurface);
503 if (mOptSurface == mLockedSurface) {
504 mOptSurface = nullptr;
507 if (mOptSurface) {
508 // There's no reason to keep our original surface around if we have an
509 // optimized surface. Release our reference to it. This will leave
510 // |mLockedSurface| as the only thing keeping it alive, so it'll get freed
511 // below.
512 mRawSurface = nullptr;
515 // Release all strong references to the surface's memory. If the underlying
516 // surface is volatile, this will allow the operating system to free the
517 // memory if it needs to.
518 mLockedSurface = nullptr;
519 mOptimizable = false;
521 return NS_OK;
524 DrawableFrameRef imgFrame::DrawableRef() { return DrawableFrameRef(this); }
526 RawAccessFrameRef imgFrame::RawAccessRef(bool aOnlyFinished /*= false*/) {
527 return RawAccessFrameRef(this, aOnlyFinished);
530 void imgFrame::SetRawAccessOnly() {
531 AssertImageDataLocked();
533 // Lock our data and throw away the key.
534 LockImageData(false);
537 imgFrame::SurfaceWithFormat imgFrame::SurfaceForDrawing(
538 bool aDoPartialDecode, bool aDoTile, ImageRegion& aRegion,
539 SourceSurface* aSurface) {
540 MOZ_ASSERT(NS_IsMainThread());
541 mMonitor.AssertCurrentThreadOwns();
543 if (!aDoPartialDecode) {
544 return SurfaceWithFormat(new gfxSurfaceDrawable(aSurface, mImageSize),
545 mFormat);
548 gfxRect available =
549 gfxRect(mDecoded.X(), mDecoded.Y(), mDecoded.Width(), mDecoded.Height());
551 if (aDoTile) {
552 // Create a temporary surface.
553 // Give this surface an alpha channel because there are
554 // transparent pixels in the padding or undecoded area
555 RefPtr<DrawTarget> target =
556 gfxPlatform::GetPlatform()->CreateOffscreenContentDrawTarget(
557 mImageSize, SurfaceFormat::B8G8R8A8);
558 if (!target) {
559 return SurfaceWithFormat();
562 SurfacePattern pattern(aSurface, aRegion.GetExtendMode(),
563 Matrix::Translation(mDecoded.X(), mDecoded.Y()));
564 target->FillRect(ToRect(aRegion.Intersect(available).Rect()), pattern);
566 RefPtr<SourceSurface> newsurf = target->Snapshot();
567 return SurfaceWithFormat(new gfxSurfaceDrawable(newsurf, mImageSize),
568 target->GetFormat());
571 // Not tiling, and we have a surface, so we can account for
572 // a partial decode just by twiddling parameters.
573 aRegion = aRegion.Intersect(available);
574 IntSize availableSize(mDecoded.Width(), mDecoded.Height());
576 return SurfaceWithFormat(new gfxSurfaceDrawable(aSurface, availableSize),
577 mFormat);
580 bool imgFrame::Draw(gfxContext* aContext, const ImageRegion& aRegion,
581 SamplingFilter aSamplingFilter, uint32_t aImageFlags,
582 float aOpacity) {
583 AUTO_PROFILER_LABEL("imgFrame::Draw", GRAPHICS);
585 MOZ_ASSERT(NS_IsMainThread());
586 NS_ASSERTION(!aRegion.Rect().IsEmpty(), "Drawing empty region!");
587 NS_ASSERTION(!aRegion.IsRestricted() ||
588 !aRegion.Rect().Intersect(aRegion.Restriction()).IsEmpty(),
589 "We must be allowed to sample *some* source pixels!");
591 // Perform the draw and freeing of the surface outside the lock. We want to
592 // avoid contention with the decoder if we can. The surface may also attempt
593 // to relock the monitor if it is freed (e.g. RecyclingSourceSurface).
594 RefPtr<SourceSurface> surf;
595 SurfaceWithFormat surfaceResult;
596 ImageRegion region(aRegion);
597 gfxRect imageRect(0, 0, mImageSize.width, mImageSize.height);
600 MonitorAutoLock lock(mMonitor);
602 // Possibly convert this image into a GPU texture, this may also cause our
603 // mLockedSurface to be released and the OS to release the underlying
604 // memory.
605 Optimize(aContext->GetDrawTarget());
607 bool doPartialDecode = !AreAllPixelsWritten();
609 // Most draw targets will just use the surface only during DrawPixelSnapped
610 // but captures/recordings will retain a reference outside this stack
611 // context. While in theory a decoder thread could be trying to recycle this
612 // frame at this very moment, in practice the only way we can get here is if
613 // this frame is the current frame of the animation. Since we can only
614 // advance on the main thread, we know nothing else will try to use it.
615 DrawTarget* drawTarget = aContext->GetDrawTarget();
616 bool recording = drawTarget->GetBackendType() == BackendType::RECORDING;
617 bool temporary = !drawTarget->IsCaptureDT() && !recording;
618 RefPtr<SourceSurface> surf = GetSourceSurfaceInternal(temporary);
619 if (!surf) {
620 return false;
623 bool doTile = !imageRect.Contains(aRegion.Rect()) &&
624 !(aImageFlags & imgIContainer::FLAG_CLAMP);
626 surfaceResult = SurfaceForDrawing(doPartialDecode, doTile, region, surf);
628 // If we are recording, then we cannot recycle the surface. The blob
629 // rasterizer is not properly synchronized for recycling in the compositor
630 // process. The easiest thing to do is just mark the frames it consumes as
631 // non-recyclable.
632 if (recording && surfaceResult.IsValid()) {
633 mShouldRecycle = false;
637 if (surfaceResult.IsValid()) {
638 gfxUtils::DrawPixelSnapped(aContext, surfaceResult.mDrawable,
639 imageRect.Size(), region, surfaceResult.mFormat,
640 aSamplingFilter, aImageFlags, aOpacity);
643 return true;
646 nsresult imgFrame::ImageUpdated(const nsIntRect& aUpdateRect) {
647 MonitorAutoLock lock(mMonitor);
648 return ImageUpdatedInternal(aUpdateRect);
651 nsresult imgFrame::ImageUpdatedInternal(const nsIntRect& aUpdateRect) {
652 mMonitor.AssertCurrentThreadOwns();
654 // Clamp to the frame rect to ensure that decoder bugs don't result in a
655 // decoded rect that extends outside the bounds of the frame rect.
656 IntRect updateRect = aUpdateRect.Intersect(GetRect());
657 if (updateRect.IsEmpty()) {
658 return NS_OK;
661 mDecoded.UnionRect(mDecoded, updateRect);
663 // Update our invalidation counters for any consumers watching for changes
664 // in the surface.
665 if (mRawSurface) {
666 mRawSurface->Invalidate(updateRect);
668 if (mLockedSurface && mRawSurface != mLockedSurface) {
669 mLockedSurface->Invalidate(updateRect);
671 return NS_OK;
674 void imgFrame::Finish(Opacity aFrameOpacity /* = Opacity::SOME_TRANSPARENCY */,
675 bool aFinalize /* = true */) {
676 MonitorAutoLock lock(mMonitor);
677 MOZ_ASSERT(mLockCount > 0, "Image data should be locked");
679 IntRect frameRect(GetRect());
680 if (!mDecoded.IsEqualEdges(frameRect)) {
681 // The decoder should have produced rows starting from either the bottom or
682 // the top of the image. We need to calculate the region for which we have
683 // not yet invalidated.
684 IntRect delta(0, 0, frameRect.width, 0);
685 if (mDecoded.y == 0) {
686 delta.y = mDecoded.height;
687 delta.height = frameRect.height - mDecoded.height;
688 } else if (mDecoded.y + mDecoded.height == frameRect.height) {
689 delta.height = frameRect.height - mDecoded.y;
690 } else {
691 MOZ_ASSERT_UNREACHABLE("Decoder only updated middle of image!");
692 delta = frameRect;
695 ImageUpdatedInternal(delta);
698 MOZ_ASSERT(mDecoded.IsEqualEdges(frameRect));
700 if (aFinalize) {
701 FinalizeSurfaceInternal();
704 mFinished = true;
706 // The image is now complete, wake up anyone who's waiting.
707 mMonitor.NotifyAll();
710 uint32_t imgFrame::GetImageBytesPerRow() const {
711 mMonitor.AssertCurrentThreadOwns();
713 if (mRawSurface) {
714 return mImageSize.width * BytesPerPixel(mFormat);
717 return 0;
720 uint32_t imgFrame::GetImageDataLength() const {
721 return GetImageBytesPerRow() * mImageSize.height;
724 void imgFrame::GetImageData(uint8_t** aData, uint32_t* aLength) const {
725 MonitorAutoLock lock(mMonitor);
726 GetImageDataInternal(aData, aLength);
729 void imgFrame::GetImageDataInternal(uint8_t** aData, uint32_t* aLength) const {
730 mMonitor.AssertCurrentThreadOwns();
731 MOZ_ASSERT(mLockCount > 0, "Image data should be locked");
732 MOZ_ASSERT(mLockedSurface);
734 if (mLockedSurface) {
735 // TODO: This is okay for now because we only realloc shared surfaces on
736 // the main thread after decoding has finished, but if animations want to
737 // read frame data off the main thread, we will need to reconsider this.
738 *aData = mLockedSurface->GetData();
739 MOZ_ASSERT(
740 *aData,
741 "mLockedSurface is non-null, but GetData is null in GetImageData");
742 } else {
743 *aData = nullptr;
746 *aLength = GetImageDataLength();
749 uint8_t* imgFrame::GetImageData() const {
750 uint8_t* data;
751 uint32_t length;
752 GetImageData(&data, &length);
753 return data;
756 uint8_t* imgFrame::LockImageData(bool aOnlyFinished) {
757 MonitorAutoLock lock(mMonitor);
759 MOZ_ASSERT(mLockCount >= 0, "Unbalanced locks and unlocks");
760 if (mLockCount < 0 || (aOnlyFinished && !mFinished)) {
761 return nullptr;
764 uint8_t* data;
765 if (mLockedSurface) {
766 data = mLockedSurface->GetData();
767 } else {
768 data = nullptr;
771 // If the raw data is still available, we should get a valid pointer for it.
772 if (!data) {
773 MOZ_ASSERT_UNREACHABLE("It's illegal to re-lock an optimized imgFrame");
774 return nullptr;
777 ++mLockCount;
778 return data;
781 void imgFrame::AssertImageDataLocked() const {
782 #ifdef DEBUG
783 MonitorAutoLock lock(mMonitor);
784 MOZ_ASSERT(mLockCount > 0, "Image data should be locked");
785 #endif
788 nsresult imgFrame::UnlockImageData() {
789 MonitorAutoLock lock(mMonitor);
791 MOZ_ASSERT(mLockCount > 0, "Unlocking an unlocked image!");
792 if (mLockCount <= 0) {
793 return NS_ERROR_FAILURE;
796 MOZ_ASSERT(mLockCount > 1 || mFinished || mAborted,
797 "Should have Finish()'d or aborted before unlocking");
799 mLockCount--;
801 return NS_OK;
804 void imgFrame::SetOptimizable() {
805 AssertImageDataLocked();
806 MonitorAutoLock lock(mMonitor);
807 mOptimizable = true;
810 void imgFrame::FinalizeSurface() {
811 MonitorAutoLock lock(mMonitor);
812 FinalizeSurfaceInternal();
815 void imgFrame::FinalizeSurfaceInternal() {
816 mMonitor.AssertCurrentThreadOwns();
818 // Not all images will have mRawSurface to finalize (i.e. paletted images).
819 if (mShouldRecycle || !mRawSurface ||
820 mRawSurface->GetType() != SurfaceType::DATA_SHARED) {
821 return;
824 auto sharedSurf = static_cast<SourceSurfaceSharedData*>(mRawSurface.get());
825 sharedSurf->Finalize();
828 already_AddRefed<SourceSurface> imgFrame::GetSourceSurface() {
829 MonitorAutoLock lock(mMonitor);
830 return GetSourceSurfaceInternal(/* aTemporary */ false);
833 already_AddRefed<SourceSurface> imgFrame::GetSourceSurfaceInternal(
834 bool aTemporary) {
835 mMonitor.AssertCurrentThreadOwns();
837 if (mOptSurface) {
838 if (mOptSurface->IsValid()) {
839 RefPtr<SourceSurface> surf(mOptSurface);
840 return surf.forget();
841 } else {
842 mOptSurface = nullptr;
846 if (mBlankLockedSurface) {
847 // We are going to return the blank surface because of the flags.
848 // We are including comments here that are copied from below
849 // just so that we are on the same page!
851 // We don't need to create recycling wrapper for some callers because they
852 // promise to release the surface immediately after.
853 if (!aTemporary && mShouldRecycle) {
854 RefPtr<SourceSurface> surf =
855 new RecyclingSourceSurface(this, mBlankLockedSurface);
856 return surf.forget();
859 RefPtr<SourceSurface> surf(mBlankLockedSurface);
860 return surf.forget();
863 if (mLockedSurface) {
864 // We don't need to create recycling wrapper for some callers because they
865 // promise to release the surface immediately after.
866 if (!aTemporary && mShouldRecycle) {
867 RefPtr<SourceSurface> surf =
868 new RecyclingSourceSurface(this, mLockedSurface);
869 return surf.forget();
872 RefPtr<SourceSurface> surf(mLockedSurface);
873 return surf.forget();
876 MOZ_ASSERT(!mShouldRecycle, "Should recycle but no locked surface!");
878 if (!mRawSurface) {
879 return nullptr;
882 return CreateLockedSurface(mRawSurface, mImageSize, mFormat);
885 void imgFrame::Abort() {
886 MonitorAutoLock lock(mMonitor);
888 mAborted = true;
890 // Wake up anyone who's waiting.
891 mMonitor.NotifyAll();
894 bool imgFrame::IsAborted() const {
895 MonitorAutoLock lock(mMonitor);
896 return mAborted;
899 bool imgFrame::IsFinished() const {
900 MonitorAutoLock lock(mMonitor);
901 return mFinished;
904 void imgFrame::WaitUntilFinished() const {
905 MonitorAutoLock lock(mMonitor);
907 while (true) {
908 // Return if we're aborted or complete.
909 if (mAborted || mFinished) {
910 return;
913 // Not complete yet, so we'll have to wait.
914 mMonitor.Wait();
918 bool imgFrame::AreAllPixelsWritten() const {
919 mMonitor.AssertCurrentThreadOwns();
920 return mDecoded.IsEqualInterior(GetRect());
923 void imgFrame::AddSizeOfExcludingThis(MallocSizeOf aMallocSizeOf,
924 const AddSizeOfCb& aCallback) const {
925 MonitorAutoLock lock(mMonitor);
927 AddSizeOfCbData metadata;
928 if (mLockedSurface) {
929 metadata.heap += aMallocSizeOf(mLockedSurface);
931 if (mOptSurface) {
932 metadata.heap += aMallocSizeOf(mOptSurface);
934 if (mRawSurface) {
935 metadata.heap += aMallocSizeOf(mRawSurface);
936 mRawSurface->AddSizeOfExcludingThis(aMallocSizeOf, metadata.heap,
937 metadata.nonHeap, metadata.handles,
938 metadata.externalId);
941 aCallback(metadata);
944 RecyclingSourceSurface::RecyclingSourceSurface(imgFrame* aParent,
945 DataSourceSurface* aSurface)
946 : mParent(aParent), mSurface(aSurface), mType(SurfaceType::DATA) {
947 mParent->mMonitor.AssertCurrentThreadOwns();
948 ++mParent->mRecycleLockCount;
950 if (aSurface->GetType() == SurfaceType::DATA_SHARED) {
951 mType = SurfaceType::DATA_RECYCLING_SHARED;
955 RecyclingSourceSurface::~RecyclingSourceSurface() {
956 MonitorAutoLock lock(mParent->mMonitor);
957 MOZ_ASSERT(mParent->mRecycleLockCount > 0);
958 if (--mParent->mRecycleLockCount == 0) {
959 mParent->mMonitor.NotifyAll();
963 } // namespace image
964 } // namespace mozilla