Bug 1529208 [wpt PR 15469] - [Code Health] Fix incorrect test name, a=testonly
[gecko.git] / image / imgFrame.cpp
blob65ebe64a3473c11747b4003e63e7d9879f4fba23
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "imgFrame.h"
8 #include "ImageRegion.h"
9 #include "ShutdownTracker.h"
10 #include "SurfaceCache.h"
12 #include "prenv.h"
14 #include "gfx2DGlue.h"
15 #include "gfxPlatform.h"
16 #include "gfxPrefs.h"
17 #include "gfxUtils.h"
19 #include "GeckoProfiler.h"
20 #include "MainThreadUtils.h"
21 #include "mozilla/CheckedInt.h"
22 #include "mozilla/gfx/gfxVars.h"
23 #include "mozilla/gfx/Tools.h"
24 #include "mozilla/gfx/SourceSurfaceRawData.h"
25 #include "mozilla/image/RecyclingSourceSurface.h"
26 #include "mozilla/layers/SourceSurfaceSharedData.h"
27 #include "mozilla/layers/SourceSurfaceVolatileData.h"
28 #include "mozilla/Likely.h"
29 #include "mozilla/MemoryReporting.h"
30 #include "nsMargin.h"
31 #include "nsRefreshDriver.h"
32 #include "nsThreadUtils.h"
34 namespace mozilla {
36 using namespace gfx;
38 namespace image {
40 static void ScopedMapRelease(void* aMap) {
41 delete static_cast<DataSourceSurface::ScopedMap*>(aMap);
44 static int32_t VolatileSurfaceStride(const IntSize& size,
45 SurfaceFormat format) {
46 // Stride must be a multiple of four or cairo will complain.
47 return (size.width * BytesPerPixel(format) + 0x3) & ~0x3;
50 static already_AddRefed<DataSourceSurface> CreateLockedSurface(
51 DataSourceSurface* aSurface, const IntSize& size, SurfaceFormat format) {
52 // Shared memory is never released until the surface itself is released
53 if (aSurface->GetType() == SurfaceType::DATA_SHARED) {
54 RefPtr<DataSourceSurface> surf(aSurface);
55 return surf.forget();
58 DataSourceSurface::ScopedMap* smap =
59 new DataSourceSurface::ScopedMap(aSurface, DataSourceSurface::READ_WRITE);
60 if (smap->IsMapped()) {
61 // The ScopedMap is held by this DataSourceSurface.
62 RefPtr<DataSourceSurface> surf = Factory::CreateWrappingDataSourceSurface(
63 smap->GetData(), aSurface->Stride(), size, format, &ScopedMapRelease,
64 static_cast<void*>(smap));
65 if (surf) {
66 return surf.forget();
70 delete smap;
71 return nullptr;
74 static bool ShouldUseHeap(const IntSize& aSize, int32_t aStride,
75 bool aIsAnimated) {
76 // On some platforms (i.e. Android), a volatile buffer actually keeps a file
77 // handle active. We would like to avoid too many since we could easily
78 // exhaust the pool. However, other platforms we do not have the file handle
79 // problem, and additionally we may avoid a superfluous memset since the
80 // volatile memory starts out as zero-filled. Hence the knobs below.
82 // For as long as an animated image is retained, its frames will never be
83 // released to let the OS purge volatile buffers.
84 if (aIsAnimated && gfxPrefs::ImageMemAnimatedUseHeap()) {
85 return true;
88 // Lets us avoid too many small images consuming all of the handles. The
89 // actual allocation checks for overflow.
90 int32_t bufferSize = (aStride * aSize.width) / 1024;
91 if (bufferSize < gfxPrefs::ImageMemVolatileMinThresholdKB()) {
92 return true;
95 return false;
98 static already_AddRefed<DataSourceSurface> AllocateBufferForImage(
99 const IntSize& size, SurfaceFormat format, bool aIsAnimated = false,
100 bool aIsFullFrame = true) {
101 int32_t stride = VolatileSurfaceStride(size, format);
103 if (gfxVars::GetUseWebRenderOrDefault() && gfxPrefs::ImageMemShared() &&
104 aIsFullFrame) {
105 RefPtr<SourceSurfaceSharedData> newSurf = new SourceSurfaceSharedData();
106 if (newSurf->Init(size, stride, format)) {
107 return newSurf.forget();
109 } else if (ShouldUseHeap(size, stride, aIsAnimated)) {
110 RefPtr<SourceSurfaceAlignedRawData> newSurf =
111 new SourceSurfaceAlignedRawData();
112 if (newSurf->Init(size, format, false, 0, stride)) {
113 return newSurf.forget();
115 } else {
116 RefPtr<SourceSurfaceVolatileData> newSurf = new SourceSurfaceVolatileData();
117 if (newSurf->Init(size, stride, format)) {
118 return newSurf.forget();
121 return nullptr;
124 static bool GreenSurface(DataSourceSurface* aSurface, const IntSize& aSize,
125 SurfaceFormat aFormat) {
126 int32_t stride = aSurface->Stride();
127 uint32_t* surfaceData = reinterpret_cast<uint32_t*>(aSurface->GetData());
128 uint32_t surfaceDataLength = (stride * aSize.height) / sizeof(uint32_t);
130 // Start by assuming that GG is in the second byte and
131 // AA is in the final byte -- the most common case.
132 uint32_t color = mozilla::NativeEndian::swapFromBigEndian(0x00FF00FF);
134 // We are only going to handle this type of test under
135 // certain circumstances.
136 MOZ_ASSERT(surfaceData);
137 MOZ_ASSERT(aFormat == SurfaceFormat::B8G8R8A8 ||
138 aFormat == SurfaceFormat::B8G8R8X8 ||
139 aFormat == SurfaceFormat::R8G8B8A8 ||
140 aFormat == SurfaceFormat::R8G8B8X8 ||
141 aFormat == SurfaceFormat::A8R8G8B8 ||
142 aFormat == SurfaceFormat::X8R8G8B8);
143 MOZ_ASSERT((stride * aSize.height) % sizeof(uint32_t));
145 if (aFormat == SurfaceFormat::A8R8G8B8 ||
146 aFormat == SurfaceFormat::X8R8G8B8) {
147 color = mozilla::NativeEndian::swapFromBigEndian(0xFF00FF00);
150 for (uint32_t i = 0; i < surfaceDataLength; i++) {
151 surfaceData[i] = color;
154 return true;
157 static bool ClearSurface(DataSourceSurface* aSurface, const IntSize& aSize,
158 SurfaceFormat aFormat) {
159 int32_t stride = aSurface->Stride();
160 uint8_t* data = aSurface->GetData();
161 MOZ_ASSERT(data);
163 if (aFormat == SurfaceFormat::B8G8R8X8) {
164 // Skia doesn't support RGBX surfaces, so ensure the alpha value is set
165 // to opaque white. While it would be nice to only do this for Skia,
166 // imgFrame can run off main thread and past shutdown where
167 // we might not have gfxPlatform, so just memset everytime instead.
168 memset(data, 0xFF, stride * aSize.height);
169 } else if (aSurface->OnHeap()) {
170 // We only need to memset it if the buffer was allocated on the heap.
171 // Otherwise, it's allocated via mmap and refers to a zeroed page and will
172 // be COW once it's written to.
173 memset(data, 0, stride * aSize.height);
176 return true;
179 static bool AllowedImageAndFrameDimensions(const nsIntSize& aImageSize,
180 const nsIntRect& aFrameRect) {
181 if (!SurfaceCache::IsLegalSize(aImageSize)) {
182 return false;
184 if (!SurfaceCache::IsLegalSize(aFrameRect.Size())) {
185 return false;
187 nsIntRect imageRect(0, 0, aImageSize.width, aImageSize.height);
188 if (!imageRect.Contains(aFrameRect)) {
189 NS_WARNING("Animated image frame does not fit inside bounds of image");
191 return true;
194 imgFrame::imgFrame()
195 : mMonitor("imgFrame"),
196 mDecoded(0, 0, 0, 0),
197 mLockCount(0),
198 mRecycleLockCount(0),
199 mAborted(false),
200 mFinished(false),
201 mOptimizable(false),
202 mShouldRecycle(false),
203 mTimeout(FrameTimeout::FromRawMilliseconds(100)),
204 mDisposalMethod(DisposalMethod::NOT_SPECIFIED),
205 mBlendMethod(BlendMethod::OVER),
206 mFormat(SurfaceFormat::UNKNOWN),
207 mPalettedImageData(nullptr),
208 mPaletteDepth(0),
209 mNonPremult(false),
210 mIsFullFrame(false),
211 mCompositingFailed(false) {}
213 imgFrame::~imgFrame() {
214 #ifdef DEBUG
215 MonitorAutoLock lock(mMonitor);
216 MOZ_ASSERT(mAborted || AreAllPixelsWritten());
217 MOZ_ASSERT(mAborted || mFinished);
218 #endif
220 free(mPalettedImageData);
221 mPalettedImageData = nullptr;
224 nsresult imgFrame::InitForDecoder(const nsIntSize& aImageSize,
225 const nsIntRect& aRect, SurfaceFormat aFormat,
226 uint8_t aPaletteDepth, bool aNonPremult,
227 const Maybe<AnimationParams>& aAnimParams,
228 bool aIsFullFrame, bool aShouldRecycle) {
229 // Assert for properties that should be verified by decoders,
230 // warn for properties related to bad content.
231 if (!AllowedImageAndFrameDimensions(aImageSize, aRect)) {
232 NS_WARNING("Should have legal image size");
233 mAborted = true;
234 return NS_ERROR_FAILURE;
237 mImageSize = aImageSize;
238 mFrameRect = aRect;
240 // May be updated shortly after InitForDecoder by BlendAnimationFilter
241 // because it needs to take into consideration the previous frames to
242 // properly calculate. We start with the whole frame as dirty.
243 mDirtyRect = aRect;
245 if (aAnimParams) {
246 mBlendRect = aAnimParams->mBlendRect;
247 mTimeout = aAnimParams->mTimeout;
248 mBlendMethod = aAnimParams->mBlendMethod;
249 mDisposalMethod = aAnimParams->mDisposalMethod;
250 mIsFullFrame = aAnimParams->mFrameNum == 0 || aIsFullFrame;
251 } else {
252 mBlendRect = aRect;
253 mIsFullFrame = true;
256 // We only allow a non-trivial frame rect (i.e., a frame rect that doesn't
257 // cover the entire image) for paletted animation frames. We never draw those
258 // frames directly; we just use FrameAnimator to composite them and produce a
259 // BGRA surface that we actually draw. We enforce this here to make sure that
260 // imgFrame::Draw(), which is responsible for drawing all other kinds of
261 // frames, never has to deal with a non-trivial frame rect.
262 if (aPaletteDepth == 0 &&
263 !mFrameRect.IsEqualEdges(IntRect(IntPoint(), mImageSize))) {
264 MOZ_ASSERT_UNREACHABLE(
265 "Creating a non-paletted imgFrame with a "
266 "non-trivial frame rect");
267 return NS_ERROR_FAILURE;
270 if (aShouldRecycle) {
271 // If we are recycling then we should always use BGRA for the underlying
272 // surface because if we use BGRX, the next frame composited into the
273 // surface could be BGRA and cause rendering problems.
274 MOZ_ASSERT(mIsFullFrame);
275 MOZ_ASSERT(aPaletteDepth == 0);
276 MOZ_ASSERT(aAnimParams);
277 mFormat = SurfaceFormat::B8G8R8A8;
278 } else {
279 mFormat = aFormat;
282 mPaletteDepth = aPaletteDepth;
283 mNonPremult = aNonPremult;
284 mShouldRecycle = aShouldRecycle;
286 if (aPaletteDepth != 0) {
287 // We're creating for a paletted image.
288 if (aPaletteDepth > 8) {
289 NS_WARNING("Should have legal palette depth");
290 NS_ERROR("This Depth is not supported");
291 mAborted = true;
292 return NS_ERROR_FAILURE;
295 // Use the fallible allocator here. Paletted images always use 1 byte per
296 // pixel, so calculating the amount of memory we need is straightforward.
297 size_t dataSize = PaletteDataLength() + mFrameRect.Area();
298 mPalettedImageData =
299 static_cast<uint8_t*>(calloc(dataSize, sizeof(uint8_t)));
300 if (!mPalettedImageData) {
301 NS_WARNING("Call to calloc for paletted image data should succeed");
303 NS_ENSURE_TRUE(mPalettedImageData, NS_ERROR_OUT_OF_MEMORY);
304 } else {
305 MOZ_ASSERT(!mLockedSurface, "Called imgFrame::InitForDecoder() twice?");
307 bool postFirstFrame = aAnimParams && aAnimParams->mFrameNum > 0;
308 mRawSurface = AllocateBufferForImage(mFrameRect.Size(), mFormat,
309 postFirstFrame, mIsFullFrame);
310 if (!mRawSurface) {
311 mAborted = true;
312 return NS_ERROR_OUT_OF_MEMORY;
315 if (StaticPrefs::browser_measurement_render_anims_and_video_solid() &&
316 aAnimParams) {
317 mBlankRawSurface = AllocateBufferForImage(mFrameRect.Size(), mFormat);
318 if (!mBlankRawSurface) {
319 mAborted = true;
320 return NS_ERROR_OUT_OF_MEMORY;
324 mLockedSurface =
325 CreateLockedSurface(mRawSurface, mFrameRect.Size(), mFormat);
326 if (!mLockedSurface) {
327 NS_WARNING("Failed to create LockedSurface");
328 mAborted = true;
329 return NS_ERROR_OUT_OF_MEMORY;
332 if (mBlankRawSurface) {
333 mBlankLockedSurface =
334 CreateLockedSurface(mBlankRawSurface, mFrameRect.Size(), mFormat);
335 if (!mBlankLockedSurface) {
336 NS_WARNING("Failed to create BlankLockedSurface");
337 mAborted = true;
338 return NS_ERROR_OUT_OF_MEMORY;
342 if (!ClearSurface(mRawSurface, mFrameRect.Size(), mFormat)) {
343 NS_WARNING("Could not clear allocated buffer");
344 mAborted = true;
345 return NS_ERROR_OUT_OF_MEMORY;
348 if (mBlankRawSurface) {
349 if (!GreenSurface(mBlankRawSurface, mFrameRect.Size(), mFormat)) {
350 NS_WARNING("Could not clear allocated blank buffer");
351 mAborted = true;
352 return NS_ERROR_OUT_OF_MEMORY;
357 return NS_OK;
360 nsresult imgFrame::InitForDecoderRecycle(const AnimationParams& aAnimParams) {
361 // We want to recycle this frame, but there is no guarantee that consumers are
362 // done with it in a timely manner. Let's ensure they are done with it first.
363 MonitorAutoLock lock(mMonitor);
365 MOZ_ASSERT(mIsFullFrame);
366 MOZ_ASSERT(mLockCount > 0);
367 MOZ_ASSERT(mLockedSurface);
369 if (!mShouldRecycle) {
370 // This frame either was never marked as recyclable, or the flag was cleared
371 // for a caller which does not support recycling.
372 return NS_ERROR_NOT_AVAILABLE;
375 if (mRecycleLockCount > 0) {
376 if (NS_IsMainThread()) {
377 // We should never be both decoding and recycling on the main thread. Sync
378 // decoding can only be used to produce the first set of frames. Those
379 // either never use recycling because advancing was blocked (main thread
380 // is busy) or we were auto-advancing (to seek to a frame) and the frames
381 // were never accessed (and thus cannot have recycle locks).
382 MOZ_ASSERT_UNREACHABLE("Recycling/decoding on the main thread?");
383 return NS_ERROR_NOT_AVAILABLE;
386 // We don't want to wait forever to reclaim the frame because we have no
387 // idea why it is still held. It is possibly due to OMTP. Since we are off
388 // the main thread, and we generally have frames already buffered for the
389 // animation, we can afford to wait a short period of time to hopefully
390 // complete the transaction and reclaim the buffer.
392 // We choose to wait for, at most, the refresh driver interval, so that we
393 // won't skip more than one frame. If the frame is still in use due to
394 // outstanding transactions, we are already skipping frames. If the frame
395 // is still in use for some other purpose, it won't be returned to the pool
396 // and its owner can hold onto it forever without additional impact here.
397 TimeDuration timeout =
398 TimeDuration::FromMilliseconds(nsRefreshDriver::DefaultInterval());
399 while (true) {
400 TimeStamp start = TimeStamp::Now();
401 mMonitor.Wait(timeout);
402 if (mRecycleLockCount == 0) {
403 break;
406 TimeDuration delta = TimeStamp::Now() - start;
407 if (delta >= timeout) {
408 // We couldn't secure the frame for recycling. It will allocate a new
409 // frame instead.
410 return NS_ERROR_NOT_AVAILABLE;
413 timeout -= delta;
417 mBlendRect = aAnimParams.mBlendRect;
418 mTimeout = aAnimParams.mTimeout;
419 mBlendMethod = aAnimParams.mBlendMethod;
420 mDisposalMethod = aAnimParams.mDisposalMethod;
421 mDirtyRect = mFrameRect;
423 return NS_OK;
426 nsresult imgFrame::InitWithDrawable(
427 gfxDrawable* aDrawable, const nsIntSize& aSize, const SurfaceFormat aFormat,
428 SamplingFilter aSamplingFilter, uint32_t aImageFlags,
429 gfx::BackendType aBackend, DrawTarget* aTargetDT) {
430 // Assert for properties that should be verified by decoders,
431 // warn for properties related to bad content.
432 if (!SurfaceCache::IsLegalSize(aSize)) {
433 NS_WARNING("Should have legal image size");
434 mAborted = true;
435 return NS_ERROR_FAILURE;
438 mImageSize = aSize;
439 mFrameRect = IntRect(IntPoint(0, 0), aSize);
441 mFormat = aFormat;
442 mPaletteDepth = 0;
444 RefPtr<DrawTarget> target;
446 bool canUseDataSurface = Factory::DoesBackendSupportDataDrawtarget(aBackend);
447 if (canUseDataSurface) {
448 // It's safe to use data surfaces for content on this platform, so we can
449 // get away with using volatile buffers.
450 MOZ_ASSERT(!mLockedSurface, "Called imgFrame::InitWithDrawable() twice?");
452 mRawSurface = AllocateBufferForImage(mFrameRect.Size(), mFormat);
453 if (!mRawSurface) {
454 mAborted = true;
455 return NS_ERROR_OUT_OF_MEMORY;
458 mLockedSurface =
459 CreateLockedSurface(mRawSurface, mFrameRect.Size(), mFormat);
460 if (!mLockedSurface) {
461 NS_WARNING("Failed to create LockedSurface");
462 mAborted = true;
463 return NS_ERROR_OUT_OF_MEMORY;
466 if (!ClearSurface(mRawSurface, mFrameRect.Size(), mFormat)) {
467 NS_WARNING("Could not clear allocated buffer");
468 mAborted = true;
469 return NS_ERROR_OUT_OF_MEMORY;
472 target = gfxPlatform::CreateDrawTargetForData(
473 mLockedSurface->GetData(), mFrameRect.Size(), mLockedSurface->Stride(),
474 mFormat);
475 } else {
476 // We can't use data surfaces for content, so we'll create an offscreen
477 // surface instead. This means if someone later calls RawAccessRef(), we
478 // may have to do an expensive readback, but we warned callers about that in
479 // the documentation for this method.
480 MOZ_ASSERT(!mOptSurface, "Called imgFrame::InitWithDrawable() twice?");
482 if (aTargetDT && !gfxVars::UseWebRender()) {
483 target = aTargetDT->CreateSimilarDrawTarget(mFrameRect.Size(), mFormat);
484 } else {
485 if (gfxPlatform::GetPlatform()->SupportsAzureContentForType(aBackend)) {
486 target = gfxPlatform::GetPlatform()->CreateDrawTargetForBackend(
487 aBackend, mFrameRect.Size(), mFormat);
488 } else {
489 target = gfxPlatform::GetPlatform()->CreateOffscreenContentDrawTarget(
490 mFrameRect.Size(), mFormat);
495 if (!target || !target->IsValid()) {
496 mAborted = true;
497 return NS_ERROR_OUT_OF_MEMORY;
500 // Draw using the drawable the caller provided.
501 RefPtr<gfxContext> ctx = gfxContext::CreateOrNull(target);
502 MOZ_ASSERT(ctx); // Already checked the draw target above.
503 gfxUtils::DrawPixelSnapped(ctx, aDrawable, SizeDouble(mFrameRect.Size()),
504 ImageRegion::Create(ThebesRect(mFrameRect)),
505 mFormat, aSamplingFilter, aImageFlags);
507 if (canUseDataSurface && !mLockedSurface) {
508 NS_WARNING("Failed to create VolatileDataSourceSurface");
509 mAborted = true;
510 return NS_ERROR_OUT_OF_MEMORY;
513 if (!canUseDataSurface) {
514 // We used an offscreen surface, which is an "optimized" surface from
515 // imgFrame's perspective.
516 mOptSurface = target->Snapshot();
517 } else {
518 FinalizeSurface();
521 // If we reach this point, we should regard ourselves as complete.
522 mDecoded = GetRect();
523 mFinished = true;
525 #ifdef DEBUG
526 MonitorAutoLock lock(mMonitor);
527 MOZ_ASSERT(AreAllPixelsWritten());
528 #endif
530 return NS_OK;
533 nsresult imgFrame::Optimize(DrawTarget* aTarget) {
534 MOZ_ASSERT(NS_IsMainThread());
535 mMonitor.AssertCurrentThreadOwns();
537 if (mLockCount > 0 || !mOptimizable) {
538 // Don't optimize right now.
539 return NS_OK;
542 // Check whether image optimization is disabled -- not thread safe!
543 static bool gDisableOptimize = false;
544 static bool hasCheckedOptimize = false;
545 if (!hasCheckedOptimize) {
546 if (PR_GetEnv("MOZ_DISABLE_IMAGE_OPTIMIZE")) {
547 gDisableOptimize = true;
549 hasCheckedOptimize = true;
552 // Don't optimize during shutdown because gfxPlatform may not be available.
553 if (ShutdownTracker::ShutdownHasStarted()) {
554 return NS_OK;
557 if (gDisableOptimize) {
558 return NS_OK;
561 if (mPalettedImageData || mOptSurface) {
562 return NS_OK;
565 // XXX(seth): It's currently unclear if there's any reason why we can't
566 // optimize non-premult surfaces. We should look into removing this.
567 if (mNonPremult) {
568 return NS_OK;
570 if (!gfxVars::UseWebRender()) {
571 mOptSurface = aTarget->OptimizeSourceSurface(mLockedSurface);
572 } else {
573 mOptSurface = gfxPlatform::GetPlatform()
574 ->ScreenReferenceDrawTarget()
575 ->OptimizeSourceSurface(mLockedSurface);
577 if (mOptSurface == mLockedSurface) {
578 mOptSurface = nullptr;
581 if (mOptSurface) {
582 // There's no reason to keep our original surface around if we have an
583 // optimized surface. Release our reference to it. This will leave
584 // |mLockedSurface| as the only thing keeping it alive, so it'll get freed
585 // below.
586 mRawSurface = nullptr;
589 // Release all strong references to the surface's memory. If the underlying
590 // surface is volatile, this will allow the operating system to free the
591 // memory if it needs to.
592 mLockedSurface = nullptr;
593 mOptimizable = false;
595 return NS_OK;
598 DrawableFrameRef imgFrame::DrawableRef() { return DrawableFrameRef(this); }
600 RawAccessFrameRef imgFrame::RawAccessRef(bool aOnlyFinished /*= false*/) {
601 return RawAccessFrameRef(this, aOnlyFinished);
604 void imgFrame::SetRawAccessOnly() {
605 AssertImageDataLocked();
607 // Lock our data and throw away the key.
608 LockImageData(false);
611 imgFrame::SurfaceWithFormat imgFrame::SurfaceForDrawing(
612 bool aDoPartialDecode, bool aDoTile, ImageRegion& aRegion,
613 SourceSurface* aSurface) {
614 MOZ_ASSERT(NS_IsMainThread());
615 mMonitor.AssertCurrentThreadOwns();
617 if (!aDoPartialDecode) {
618 return SurfaceWithFormat(new gfxSurfaceDrawable(aSurface, mImageSize),
619 mFormat);
622 gfxRect available =
623 gfxRect(mDecoded.X(), mDecoded.Y(), mDecoded.Width(), mDecoded.Height());
625 if (aDoTile) {
626 // Create a temporary surface.
627 // Give this surface an alpha channel because there are
628 // transparent pixels in the padding or undecoded area
629 RefPtr<DrawTarget> target =
630 gfxPlatform::GetPlatform()->CreateOffscreenContentDrawTarget(
631 mImageSize, SurfaceFormat::B8G8R8A8);
632 if (!target) {
633 return SurfaceWithFormat();
636 SurfacePattern pattern(aSurface, aRegion.GetExtendMode(),
637 Matrix::Translation(mDecoded.X(), mDecoded.Y()));
638 target->FillRect(ToRect(aRegion.Intersect(available).Rect()), pattern);
640 RefPtr<SourceSurface> newsurf = target->Snapshot();
641 return SurfaceWithFormat(new gfxSurfaceDrawable(newsurf, mImageSize),
642 target->GetFormat());
645 // Not tiling, and we have a surface, so we can account for
646 // a partial decode just by twiddling parameters.
647 aRegion = aRegion.Intersect(available);
648 IntSize availableSize(mDecoded.Width(), mDecoded.Height());
650 return SurfaceWithFormat(new gfxSurfaceDrawable(aSurface, availableSize),
651 mFormat);
654 bool imgFrame::Draw(gfxContext* aContext, const ImageRegion& aRegion,
655 SamplingFilter aSamplingFilter, uint32_t aImageFlags,
656 float aOpacity) {
657 AUTO_PROFILER_LABEL("imgFrame::Draw", GRAPHICS);
659 MOZ_ASSERT(NS_IsMainThread());
660 NS_ASSERTION(!aRegion.Rect().IsEmpty(), "Drawing empty region!");
661 NS_ASSERTION(!aRegion.IsRestricted() ||
662 !aRegion.Rect().Intersect(aRegion.Restriction()).IsEmpty(),
663 "We must be allowed to sample *some* source pixels!");
664 MOZ_ASSERT(mFrameRect.IsEqualEdges(IntRect(IntPoint(), mImageSize)),
665 "Directly drawing an image with a non-trivial frame rect!");
667 if (mPalettedImageData) {
668 MOZ_ASSERT_UNREACHABLE("Directly drawing a paletted image!");
669 return false;
672 // Perform the draw and freeing of the surface outside the lock. We want to
673 // avoid contention with the decoder if we can. The surface may also attempt
674 // to relock the monitor if it is freed (e.g. RecyclingSourceSurface).
675 RefPtr<SourceSurface> surf;
676 SurfaceWithFormat surfaceResult;
677 ImageRegion region(aRegion);
678 gfxRect imageRect(0, 0, mImageSize.width, mImageSize.height);
681 MonitorAutoLock lock(mMonitor);
683 // Possibly convert this image into a GPU texture, this may also cause our
684 // mLockedSurface to be released and the OS to release the underlying
685 // memory.
686 Optimize(aContext->GetDrawTarget());
688 bool doPartialDecode = !AreAllPixelsWritten();
690 // Most draw targets will just use the surface only during DrawPixelSnapped
691 // but captures/recordings will retain a reference outside this stack
692 // context. While in theory a decoder thread could be trying to recycle this
693 // frame at this very moment, in practice the only way we can get here is if
694 // this frame is the current frame of the animation. Since we can only
695 // advance on the main thread, we know nothing else will try to use it.
696 DrawTarget* drawTarget = aContext->GetDrawTarget();
697 bool recording = drawTarget->GetBackendType() == BackendType::RECORDING;
698 bool temporary = !drawTarget->IsCaptureDT() && !recording;
699 RefPtr<SourceSurface> surf = GetSourceSurfaceInternal(temporary);
700 if (!surf) {
701 return false;
704 bool doTile = !imageRect.Contains(aRegion.Rect()) &&
705 !(aImageFlags & imgIContainer::FLAG_CLAMP);
707 surfaceResult = SurfaceForDrawing(doPartialDecode, doTile, region, surf);
709 // If we are recording, then we cannot recycle the surface. The blob
710 // rasterizer is not properly synchronized for recycling in the compositor
711 // process. The easiest thing to do is just mark the frames it consumes as
712 // non-recyclable.
713 if (recording && surfaceResult.IsValid()) {
714 mShouldRecycle = false;
718 if (surfaceResult.IsValid()) {
719 gfxUtils::DrawPixelSnapped(aContext, surfaceResult.mDrawable,
720 imageRect.Size(), region, surfaceResult.mFormat,
721 aSamplingFilter, aImageFlags, aOpacity);
724 return true;
727 nsresult imgFrame::ImageUpdated(const nsIntRect& aUpdateRect) {
728 MonitorAutoLock lock(mMonitor);
729 return ImageUpdatedInternal(aUpdateRect);
732 nsresult imgFrame::ImageUpdatedInternal(const nsIntRect& aUpdateRect) {
733 mMonitor.AssertCurrentThreadOwns();
735 // Clamp to the frame rect to ensure that decoder bugs don't result in a
736 // decoded rect that extends outside the bounds of the frame rect.
737 IntRect updateRect = mFrameRect.Intersect(aUpdateRect);
738 if (updateRect.IsEmpty()) {
739 return NS_OK;
742 mDecoded.UnionRect(mDecoded, updateRect);
744 // Paletted images cannot invalidate.
745 if (mPalettedImageData) {
746 return NS_OK;
749 // Update our invalidation counters for any consumers watching for changes
750 // in the surface.
751 if (mRawSurface) {
752 mRawSurface->Invalidate(updateRect);
754 if (mLockedSurface && mRawSurface != mLockedSurface) {
755 mLockedSurface->Invalidate(updateRect);
757 return NS_OK;
760 void imgFrame::Finish(Opacity aFrameOpacity /* = Opacity::SOME_TRANSPARENCY */,
761 bool aFinalize /* = true */) {
762 MonitorAutoLock lock(mMonitor);
763 MOZ_ASSERT(mLockCount > 0, "Image data should be locked");
765 if (mPalettedImageData) {
766 ImageUpdatedInternal(mFrameRect);
767 } else if (!mDecoded.IsEqualEdges(mFrameRect)) {
768 // The decoder should have produced rows starting from either the bottom or
769 // the top of the image. We need to calculate the region for which we have
770 // not yet invalidated.
771 IntRect delta(0, 0, mFrameRect.width, 0);
772 if (mDecoded.y == 0) {
773 delta.y = mDecoded.height;
774 delta.height = mFrameRect.height - mDecoded.height;
775 } else if (mDecoded.y + mDecoded.height == mFrameRect.height) {
776 delta.height = mFrameRect.height - mDecoded.y;
777 } else {
778 MOZ_ASSERT_UNREACHABLE("Decoder only updated middle of image!");
779 delta = mFrameRect;
782 ImageUpdatedInternal(delta);
785 MOZ_ASSERT(mDecoded.IsEqualEdges(mFrameRect));
787 if (aFinalize) {
788 FinalizeSurfaceInternal();
791 mFinished = true;
793 // The image is now complete, wake up anyone who's waiting.
794 mMonitor.NotifyAll();
797 uint32_t imgFrame::GetImageBytesPerRow() const {
798 mMonitor.AssertCurrentThreadOwns();
800 if (mRawSurface) {
801 return mFrameRect.Width() * BytesPerPixel(mFormat);
804 if (mPaletteDepth) {
805 return mFrameRect.Width();
808 return 0;
811 uint32_t imgFrame::GetImageDataLength() const {
812 return GetImageBytesPerRow() * mFrameRect.Height();
815 void imgFrame::GetImageData(uint8_t** aData, uint32_t* aLength) const {
816 MonitorAutoLock lock(mMonitor);
817 GetImageDataInternal(aData, aLength);
820 void imgFrame::GetImageDataInternal(uint8_t** aData, uint32_t* aLength) const {
821 mMonitor.AssertCurrentThreadOwns();
822 MOZ_ASSERT(mLockCount > 0, "Image data should be locked");
824 if (mLockedSurface) {
825 // TODO: This is okay for now because we only realloc shared surfaces on
826 // the main thread after decoding has finished, but if animations want to
827 // read frame data off the main thread, we will need to reconsider this.
828 *aData = mLockedSurface->GetData();
829 MOZ_ASSERT(
830 *aData,
831 "mLockedSurface is non-null, but GetData is null in GetImageData");
832 } else if (mPalettedImageData) {
833 *aData = mPalettedImageData + PaletteDataLength();
834 MOZ_ASSERT(
835 *aData,
836 "mPalettedImageData is non-null, but result is null in GetImageData");
837 } else {
838 MOZ_ASSERT(
839 false,
840 "Have neither mLockedSurface nor mPalettedImageData in GetImageData");
841 *aData = nullptr;
844 *aLength = GetImageDataLength();
847 uint8_t* imgFrame::GetImageData() const {
848 uint8_t* data;
849 uint32_t length;
850 GetImageData(&data, &length);
851 return data;
854 bool imgFrame::GetIsPaletted() const { return mPalettedImageData != nullptr; }
856 void imgFrame::GetPaletteData(uint32_t** aPalette, uint32_t* length) const {
857 AssertImageDataLocked();
859 if (!mPalettedImageData) {
860 *aPalette = nullptr;
861 *length = 0;
862 } else {
863 *aPalette = (uint32_t*)mPalettedImageData;
864 *length = PaletteDataLength();
868 uint32_t* imgFrame::GetPaletteData() const {
869 uint32_t* data;
870 uint32_t length;
871 GetPaletteData(&data, &length);
872 return data;
875 uint8_t* imgFrame::LockImageData(bool aOnlyFinished) {
876 MonitorAutoLock lock(mMonitor);
878 MOZ_ASSERT(mLockCount >= 0, "Unbalanced locks and unlocks");
879 if (mLockCount < 0 || (aOnlyFinished && !mFinished)) {
880 return nullptr;
883 uint8_t* data;
884 if (mPalettedImageData) {
885 data = mPalettedImageData;
886 } else if (mLockedSurface) {
887 data = mLockedSurface->GetData();
888 } else {
889 data = nullptr;
892 // If the raw data is still available, we should get a valid pointer for it.
893 if (!data) {
894 MOZ_ASSERT_UNREACHABLE("It's illegal to re-lock an optimized imgFrame");
895 return nullptr;
898 ++mLockCount;
899 return data;
902 void imgFrame::AssertImageDataLocked() const {
903 #ifdef DEBUG
904 MonitorAutoLock lock(mMonitor);
905 MOZ_ASSERT(mLockCount > 0, "Image data should be locked");
906 #endif
909 nsresult imgFrame::UnlockImageData() {
910 MonitorAutoLock lock(mMonitor);
912 MOZ_ASSERT(mLockCount > 0, "Unlocking an unlocked image!");
913 if (mLockCount <= 0) {
914 return NS_ERROR_FAILURE;
917 MOZ_ASSERT(mLockCount > 1 || mFinished || mAborted,
918 "Should have Finish()'d or aborted before unlocking");
920 mLockCount--;
922 return NS_OK;
925 void imgFrame::SetOptimizable() {
926 AssertImageDataLocked();
927 MonitorAutoLock lock(mMonitor);
928 mOptimizable = true;
931 void imgFrame::FinalizeSurface() {
932 MonitorAutoLock lock(mMonitor);
933 FinalizeSurfaceInternal();
936 void imgFrame::FinalizeSurfaceInternal() {
937 mMonitor.AssertCurrentThreadOwns();
939 // Not all images will have mRawSurface to finalize (i.e. paletted images).
940 if (mShouldRecycle || !mRawSurface ||
941 mRawSurface->GetType() != SurfaceType::DATA_SHARED) {
942 return;
945 auto sharedSurf = static_cast<SourceSurfaceSharedData*>(mRawSurface.get());
946 sharedSurf->Finalize();
949 already_AddRefed<SourceSurface> imgFrame::GetSourceSurface() {
950 MonitorAutoLock lock(mMonitor);
951 return GetSourceSurfaceInternal(/* aTemporary */ false);
954 already_AddRefed<SourceSurface> imgFrame::GetSourceSurfaceInternal(
955 bool aTemporary) {
956 mMonitor.AssertCurrentThreadOwns();
958 if (mOptSurface) {
959 if (mOptSurface->IsValid()) {
960 RefPtr<SourceSurface> surf(mOptSurface);
961 return surf.forget();
962 } else {
963 mOptSurface = nullptr;
967 if (mBlankLockedSurface) {
968 // We are going to return the blank surface because of the flags.
969 // We are including comments here that are copied from below
970 // just so that we are on the same page!
972 // We don't need to create recycling wrapper for some callers because they
973 // promise to release the surface immediately after.
974 if (!aTemporary && mShouldRecycle) {
975 RefPtr<SourceSurface> surf =
976 new RecyclingSourceSurface(this, mBlankLockedSurface);
977 return surf.forget();
980 RefPtr<SourceSurface> surf(mBlankLockedSurface);
981 return surf.forget();
984 if (mLockedSurface) {
985 // We don't need to create recycling wrapper for some callers because they
986 // promise to release the surface immediately after.
987 if (!aTemporary && mShouldRecycle) {
988 RefPtr<SourceSurface> surf =
989 new RecyclingSourceSurface(this, mLockedSurface);
990 return surf.forget();
993 RefPtr<SourceSurface> surf(mLockedSurface);
994 return surf.forget();
997 MOZ_ASSERT(!mShouldRecycle, "Should recycle but no locked surface!");
999 if (!mRawSurface) {
1000 return nullptr;
1003 return CreateLockedSurface(mRawSurface, mFrameRect.Size(), mFormat);
1006 void imgFrame::Abort() {
1007 MonitorAutoLock lock(mMonitor);
1009 mAborted = true;
1011 // Wake up anyone who's waiting.
1012 mMonitor.NotifyAll();
1015 bool imgFrame::IsAborted() const {
1016 MonitorAutoLock lock(mMonitor);
1017 return mAborted;
1020 bool imgFrame::IsFinished() const {
1021 MonitorAutoLock lock(mMonitor);
1022 return mFinished;
1025 void imgFrame::WaitUntilFinished() const {
1026 MonitorAutoLock lock(mMonitor);
1028 while (true) {
1029 // Return if we're aborted or complete.
1030 if (mAborted || mFinished) {
1031 return;
1034 // Not complete yet, so we'll have to wait.
1035 mMonitor.Wait();
1039 bool imgFrame::AreAllPixelsWritten() const {
1040 mMonitor.AssertCurrentThreadOwns();
1041 return mDecoded.IsEqualInterior(mFrameRect);
1044 bool imgFrame::GetCompositingFailed() const {
1045 MOZ_ASSERT(NS_IsMainThread());
1046 return mCompositingFailed;
1049 void imgFrame::SetCompositingFailed(bool val) {
1050 MOZ_ASSERT(NS_IsMainThread());
1051 mCompositingFailed = val;
1054 void imgFrame::AddSizeOfExcludingThis(MallocSizeOf aMallocSizeOf,
1055 const AddSizeOfCb& aCallback) const {
1056 MonitorAutoLock lock(mMonitor);
1058 AddSizeOfCbData metadata;
1059 if (mPalettedImageData) {
1060 metadata.heap += aMallocSizeOf(mPalettedImageData);
1062 if (mLockedSurface) {
1063 metadata.heap += aMallocSizeOf(mLockedSurface);
1065 if (mOptSurface) {
1066 metadata.heap += aMallocSizeOf(mOptSurface);
1068 if (mRawSurface) {
1069 metadata.heap += aMallocSizeOf(mRawSurface);
1070 mRawSurface->AddSizeOfExcludingThis(aMallocSizeOf, metadata.heap,
1071 metadata.nonHeap, metadata.handles,
1072 metadata.externalId);
1075 aCallback(metadata);
1078 RecyclingSourceSurface::RecyclingSourceSurface(imgFrame* aParent,
1079 DataSourceSurface* aSurface)
1080 : mParent(aParent), mSurface(aSurface), mType(SurfaceType::DATA) {
1081 mParent->mMonitor.AssertCurrentThreadOwns();
1082 ++mParent->mRecycleLockCount;
1084 if (aSurface->GetType() == SurfaceType::DATA_SHARED) {
1085 mType = SurfaceType::DATA_RECYCLING_SHARED;
1089 RecyclingSourceSurface::~RecyclingSourceSurface() {
1090 MonitorAutoLock lock(mParent->mMonitor);
1091 MOZ_ASSERT(mParent->mRecycleLockCount > 0);
1092 if (--mParent->mRecycleLockCount == 0) {
1093 mParent->mMonitor.NotifyAll();
1097 } // namespace image
1098 } // namespace mozilla