Bug 1777732 [wpt PR 34677] - Check for 'revert-layer' in both versions of IsCSSWideKe...
[gecko.git] / image / SurfaceCache.cpp
blobe0de32338ca96bdeeb4ab610ce1cb44a870f9d72
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* This Source Code Form is subject to the terms of the Mozilla Public
3 * License, v. 2.0. If a copy of the MPL was not distributed with this
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 /**
7 * SurfaceCache is a service for caching temporary surfaces in imagelib.
8 */
10 #include "SurfaceCache.h"
12 #include <algorithm>
13 #include <utility>
15 #include "ISurfaceProvider.h"
16 #include "Image.h"
17 #include "LookupResult.h"
18 #include "ShutdownTracker.h"
19 #include "gfx2DGlue.h"
20 #include "gfxPlatform.h"
21 #include "imgFrame.h"
22 #include "mozilla/Assertions.h"
23 #include "mozilla/Attributes.h"
24 #include "mozilla/CheckedInt.h"
25 #include "mozilla/DebugOnly.h"
26 #include "mozilla/Likely.h"
27 #include "mozilla/RefPtr.h"
28 #include "mozilla/StaticMutex.h"
29 #include "mozilla/StaticPrefs_image.h"
30 #include "mozilla/StaticPtr.h"
31 #include "mozilla/Tuple.h"
32 #include "nsExpirationTracker.h"
33 #include "nsHashKeys.h"
34 #include "nsIMemoryReporter.h"
35 #include "nsRefPtrHashtable.h"
36 #include "nsSize.h"
37 #include "nsTArray.h"
38 #include "Orientation.h"
39 #include "prsystem.h"
41 using std::max;
42 using std::min;
44 namespace mozilla {
46 using namespace gfx;
48 namespace image {
50 MOZ_DEFINE_MALLOC_SIZE_OF(SurfaceCacheMallocSizeOf)
52 class CachedSurface;
53 class SurfaceCacheImpl;
55 ///////////////////////////////////////////////////////////////////////////////
56 // Static Data
57 ///////////////////////////////////////////////////////////////////////////////
59 // The single surface cache instance.
60 static StaticRefPtr<SurfaceCacheImpl> sInstance;
62 // The mutex protecting the surface cache.
63 static StaticMutex sInstanceMutex MOZ_UNANNOTATED;
65 ///////////////////////////////////////////////////////////////////////////////
66 // SurfaceCache Implementation
67 ///////////////////////////////////////////////////////////////////////////////
69 /**
70 * Cost models the cost of storing a surface in the cache. Right now, this is
71 * simply an estimate of the size of the surface in bytes, but in the future it
72 * may be worth taking into account the cost of rematerializing the surface as
73 * well.
75 typedef size_t Cost;
77 static Cost ComputeCost(const IntSize& aSize, uint32_t aBytesPerPixel) {
78 MOZ_ASSERT(aBytesPerPixel == 1 || aBytesPerPixel == 4);
79 return aSize.width * aSize.height * aBytesPerPixel;
82 /**
83 * Since we want to be able to make eviction decisions based on cost, we need to
84 * be able to look up the CachedSurface which has a certain cost as well as the
85 * cost associated with a certain CachedSurface. To make this possible, in data
86 * structures we actually store a CostEntry, which contains a weak pointer to
87 * its associated surface.
89 * To make usage of the weak pointer safe, SurfaceCacheImpl always calls
90 * StartTracking after a surface is stored in the cache and StopTracking before
91 * it is removed.
93 class CostEntry {
94 public:
95 CostEntry(NotNull<CachedSurface*> aSurface, Cost aCost)
96 : mSurface(aSurface), mCost(aCost) {}
98 NotNull<CachedSurface*> Surface() const { return mSurface; }
99 Cost GetCost() const { return mCost; }
101 bool operator==(const CostEntry& aOther) const {
102 return mSurface == aOther.mSurface && mCost == aOther.mCost;
105 bool operator<(const CostEntry& aOther) const {
106 return mCost < aOther.mCost ||
107 (mCost == aOther.mCost && mSurface < aOther.mSurface);
110 private:
111 NotNull<CachedSurface*> mSurface;
112 Cost mCost;
116 * A CachedSurface associates a surface with a key that uniquely identifies that
117 * surface.
119 class CachedSurface {
120 ~CachedSurface() {}
122 public:
123 MOZ_DECLARE_REFCOUNTED_TYPENAME(CachedSurface)
124 NS_INLINE_DECL_THREADSAFE_REFCOUNTING(CachedSurface)
126 explicit CachedSurface(NotNull<ISurfaceProvider*> aProvider)
127 : mProvider(aProvider), mIsLocked(false) {}
129 DrawableSurface GetDrawableSurface() const {
130 if (MOZ_UNLIKELY(IsPlaceholder())) {
131 MOZ_ASSERT_UNREACHABLE("Called GetDrawableSurface() on a placeholder");
132 return DrawableSurface();
135 return mProvider->Surface();
138 DrawableSurface GetDrawableSurfaceEvenIfPlaceholder() const {
139 return mProvider->Surface();
142 void SetLocked(bool aLocked) {
143 if (IsPlaceholder()) {
144 return; // Can't lock a placeholder.
147 // Update both our state and our provider's state. Some surface providers
148 // are permanently locked; maintaining our own locking state enables us to
149 // respect SetLocked() even when it's meaningless from the provider's
150 // perspective.
151 mIsLocked = aLocked;
152 mProvider->SetLocked(aLocked);
155 bool IsLocked() const {
156 return !IsPlaceholder() && mIsLocked && mProvider->IsLocked();
159 void SetCannotSubstitute() {
160 mProvider->Availability().SetCannotSubstitute();
162 bool CannotSubstitute() const {
163 return mProvider->Availability().CannotSubstitute();
166 bool IsPlaceholder() const {
167 return mProvider->Availability().IsPlaceholder();
169 bool IsDecoded() const { return !IsPlaceholder() && mProvider->IsFinished(); }
171 ImageKey GetImageKey() const { return mProvider->GetImageKey(); }
172 const SurfaceKey& GetSurfaceKey() const { return mProvider->GetSurfaceKey(); }
173 nsExpirationState* GetExpirationState() { return &mExpirationState; }
175 CostEntry GetCostEntry() {
176 return image::CostEntry(WrapNotNull(this), mProvider->LogicalSizeInBytes());
179 size_t ShallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
180 return aMallocSizeOf(this) + aMallocSizeOf(mProvider.get());
183 void InvalidateRecording() { mProvider->InvalidateRecording(); }
185 // A helper type used by SurfaceCacheImpl::CollectSizeOfSurfaces.
186 struct MOZ_STACK_CLASS SurfaceMemoryReport {
187 SurfaceMemoryReport(nsTArray<SurfaceMemoryCounter>& aCounters,
188 MallocSizeOf aMallocSizeOf)
189 : mCounters(aCounters), mMallocSizeOf(aMallocSizeOf) {}
191 void Add(NotNull<CachedSurface*> aCachedSurface, bool aIsFactor2) {
192 if (aCachedSurface->IsPlaceholder()) {
193 return;
196 // Record the memory used by the ISurfaceProvider. This may not have a
197 // straightforward relationship to the size of the surface that
198 // DrawableRef() returns if the surface is generated dynamically. (i.e.,
199 // for surfaces with PlaybackType::eAnimated.)
200 aCachedSurface->mProvider->AddSizeOfExcludingThis(
201 mMallocSizeOf, [&](ISurfaceProvider::AddSizeOfCbData& aMetadata) {
202 SurfaceMemoryCounter counter(aCachedSurface->GetSurfaceKey(),
203 aCachedSurface->IsLocked(),
204 aCachedSurface->CannotSubstitute(),
205 aIsFactor2, aMetadata.mFinished);
207 counter.Values().SetDecodedHeap(aMetadata.mHeapBytes);
208 counter.Values().SetDecodedNonHeap(aMetadata.mNonHeapBytes);
209 counter.Values().SetDecodedUnknown(aMetadata.mUnknownBytes);
210 counter.Values().SetExternalHandles(aMetadata.mExternalHandles);
211 counter.Values().SetFrameIndex(aMetadata.mIndex);
212 counter.Values().SetExternalId(aMetadata.mExternalId);
213 counter.Values().SetSurfaceTypes(aMetadata.mTypes);
215 mCounters.AppendElement(counter);
219 private:
220 nsTArray<SurfaceMemoryCounter>& mCounters;
221 MallocSizeOf mMallocSizeOf;
224 private:
225 nsExpirationState mExpirationState;
226 NotNull<RefPtr<ISurfaceProvider>> mProvider;
227 bool mIsLocked;
230 static int64_t AreaOfIntSize(const IntSize& aSize) {
231 return static_cast<int64_t>(aSize.width) * static_cast<int64_t>(aSize.height);
235 * An ImageSurfaceCache is a per-image surface cache. For correctness we must be
236 * able to remove all surfaces associated with an image when the image is
237 * destroyed or invalidated. Since this will happen frequently, it makes sense
238 * to make it cheap by storing the surfaces for each image separately.
240 * ImageSurfaceCache also keeps track of whether its associated image is locked
241 * or unlocked.
243 * The cache may also enter "factor of 2" mode which occurs when the number of
244 * surfaces in the cache exceeds the "image.cache.factor2.threshold-surfaces"
245 * pref plus the number of native sizes of the image. When in "factor of 2"
246 * mode, the cache will strongly favour sizes which are a factor of 2 of the
247 * largest native size. It accomplishes this by suggesting a factor of 2 size
248 * when lookups fail and substituting the nearest factor of 2 surface to the
249 * ideal size as the "best" available (as opposed to substitution but not
250 * found). This allows us to minimize memory consumption and CPU time spent
251 * decoding when a website requires many variants of the same surface.
253 class ImageSurfaceCache {
254 ~ImageSurfaceCache() {}
256 public:
257 explicit ImageSurfaceCache(const ImageKey aImageKey)
258 : mLocked(false),
259 mFactor2Mode(false),
260 mFactor2Pruned(false),
261 mIsVectorImage(aImageKey->GetType() == imgIContainer::TYPE_VECTOR) {}
263 MOZ_DECLARE_REFCOUNTED_TYPENAME(ImageSurfaceCache)
264 NS_INLINE_DECL_THREADSAFE_REFCOUNTING(ImageSurfaceCache)
266 typedef nsRefPtrHashtable<nsGenericHashKey<SurfaceKey>, CachedSurface>
267 SurfaceTable;
269 auto Values() const { return mSurfaces.Values(); }
270 uint32_t Count() const { return mSurfaces.Count(); }
271 bool IsEmpty() const { return mSurfaces.Count() == 0; }
273 size_t ShallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
274 size_t bytes = aMallocSizeOf(this) +
275 mSurfaces.ShallowSizeOfExcludingThis(aMallocSizeOf);
276 for (const auto& value : Values()) {
277 bytes += value->ShallowSizeOfIncludingThis(aMallocSizeOf);
279 return bytes;
282 [[nodiscard]] bool Insert(NotNull<CachedSurface*> aSurface) {
283 MOZ_ASSERT(!mLocked || aSurface->IsPlaceholder() || aSurface->IsLocked(),
284 "Inserting an unlocked surface for a locked image");
285 const auto& surfaceKey = aSurface->GetSurfaceKey();
286 if (surfaceKey.Region()) {
287 // We don't allow substitutes for surfaces with regions, so we don't want
288 // to allow factor of 2 mode pruning to release these surfaces.
289 aSurface->SetCannotSubstitute();
291 return mSurfaces.InsertOrUpdate(surfaceKey, RefPtr<CachedSurface>{aSurface},
292 fallible);
295 already_AddRefed<CachedSurface> Remove(NotNull<CachedSurface*> aSurface) {
296 MOZ_ASSERT(mSurfaces.GetWeak(aSurface->GetSurfaceKey()),
297 "Should not be removing a surface we don't have");
299 RefPtr<CachedSurface> surface;
300 mSurfaces.Remove(aSurface->GetSurfaceKey(), getter_AddRefs(surface));
301 AfterMaybeRemove();
302 return surface.forget();
305 already_AddRefed<CachedSurface> Lookup(const SurfaceKey& aSurfaceKey,
306 bool aForAccess) {
307 RefPtr<CachedSurface> surface;
308 mSurfaces.Get(aSurfaceKey, getter_AddRefs(surface));
310 if (aForAccess) {
311 if (surface) {
312 // We don't want to allow factor of 2 mode pruning to release surfaces
313 // for which the callers will accept no substitute.
314 surface->SetCannotSubstitute();
315 } else if (!mFactor2Mode) {
316 // If no exact match is found, and this is for use rather than internal
317 // accounting (i.e. insert and removal), we know this will trigger a
318 // decode. Make sure we switch now to factor of 2 mode if necessary.
319 MaybeSetFactor2Mode();
323 return surface.forget();
327 * @returns A tuple containing the best matching CachedSurface if available,
328 * a MatchType describing how the CachedSurface was selected, and
329 * an IntSize which is the size the caller should choose to decode
330 * at should it attempt to do so.
332 Tuple<already_AddRefed<CachedSurface>, MatchType, IntSize> LookupBestMatch(
333 const SurfaceKey& aIdealKey) {
334 // Try for an exact match first.
335 RefPtr<CachedSurface> exactMatch;
336 mSurfaces.Get(aIdealKey, getter_AddRefs(exactMatch));
337 if (exactMatch) {
338 if (exactMatch->IsDecoded()) {
339 return MakeTuple(exactMatch.forget(), MatchType::EXACT, IntSize());
341 } else if (aIdealKey.Region()) {
342 // We cannot substitute if we have a region. Allow it to create an exact
343 // match.
344 return MakeTuple(exactMatch.forget(), MatchType::NOT_FOUND, IntSize());
345 } else if (!mFactor2Mode) {
346 // If no exact match is found, and we are not in factor of 2 mode, then
347 // we know that we will trigger a decode because at best we will provide
348 // a substitute. Make sure we switch now to factor of 2 mode if necessary.
349 MaybeSetFactor2Mode();
352 // Try for a best match second, if using compact.
353 IntSize suggestedSize = SuggestedSize(aIdealKey.Size());
354 if (suggestedSize != aIdealKey.Size()) {
355 if (!exactMatch) {
356 SurfaceKey compactKey = aIdealKey.CloneWithSize(suggestedSize);
357 mSurfaces.Get(compactKey, getter_AddRefs(exactMatch));
358 if (exactMatch && exactMatch->IsDecoded()) {
359 MOZ_ASSERT(suggestedSize != aIdealKey.Size());
360 return MakeTuple(exactMatch.forget(),
361 MatchType::SUBSTITUTE_BECAUSE_BEST, suggestedSize);
366 // There's no perfect match, so find the best match we can.
367 RefPtr<CachedSurface> bestMatch;
368 for (const auto& value : Values()) {
369 NotNull<CachedSurface*> current = WrapNotNull(value);
370 const SurfaceKey& currentKey = current->GetSurfaceKey();
372 // We never match a placeholder or a surface with a region.
373 if (current->IsPlaceholder() || currentKey.Region()) {
374 continue;
376 // Matching the playback type and SVG context is required.
377 if (currentKey.Playback() != aIdealKey.Playback() ||
378 currentKey.SVGContext() != aIdealKey.SVGContext()) {
379 continue;
381 // Matching the flags is required.
382 if (currentKey.Flags() != aIdealKey.Flags()) {
383 continue;
385 // Anything is better than nothing! (Within the constraints we just
386 // checked, of course.)
387 if (!bestMatch) {
388 bestMatch = current;
389 continue;
392 MOZ_ASSERT(bestMatch, "Should have a current best match");
394 // Always prefer completely decoded surfaces.
395 bool bestMatchIsDecoded = bestMatch->IsDecoded();
396 if (bestMatchIsDecoded && !current->IsDecoded()) {
397 continue;
399 if (!bestMatchIsDecoded && current->IsDecoded()) {
400 bestMatch = current;
401 continue;
404 SurfaceKey bestMatchKey = bestMatch->GetSurfaceKey();
405 if (CompareArea(aIdealKey.Size(), bestMatchKey.Size(),
406 currentKey.Size())) {
407 bestMatch = current;
411 MatchType matchType;
412 if (bestMatch) {
413 if (!exactMatch) {
414 // No exact match, neither ideal nor factor of 2.
415 MOZ_ASSERT(suggestedSize != bestMatch->GetSurfaceKey().Size(),
416 "No exact match despite the fact the sizes match!");
417 matchType = MatchType::SUBSTITUTE_BECAUSE_NOT_FOUND;
418 } else if (exactMatch != bestMatch) {
419 // The exact match is still decoding, but we found a substitute.
420 matchType = MatchType::SUBSTITUTE_BECAUSE_PENDING;
421 } else if (aIdealKey.Size() != bestMatch->GetSurfaceKey().Size()) {
422 // The best factor of 2 match is still decoding, but the best we've got.
423 MOZ_ASSERT(suggestedSize != aIdealKey.Size());
424 MOZ_ASSERT(mFactor2Mode || mIsVectorImage);
425 matchType = MatchType::SUBSTITUTE_BECAUSE_BEST;
426 } else {
427 // The exact match is still decoding, but it's the best we've got.
428 matchType = MatchType::EXACT;
430 } else {
431 if (exactMatch) {
432 // We found an "exact match"; it must have been a placeholder.
433 MOZ_ASSERT(exactMatch->IsPlaceholder());
434 matchType = MatchType::PENDING;
435 } else {
436 // We couldn't find an exact match *or* a substitute.
437 matchType = MatchType::NOT_FOUND;
441 return MakeTuple(bestMatch.forget(), matchType, suggestedSize);
444 void MaybeSetFactor2Mode() {
445 MOZ_ASSERT(!mFactor2Mode);
447 // Typically an image cache will not have too many size-varying surfaces, so
448 // if we exceed the given threshold, we should consider using a subset.
449 int32_t thresholdSurfaces =
450 StaticPrefs::image_cache_factor2_threshold_surfaces();
451 if (thresholdSurfaces < 0 ||
452 mSurfaces.Count() <= static_cast<uint32_t>(thresholdSurfaces)) {
453 return;
456 // Determine how many native surfaces this image has. If it is zero, and it
457 // is a vector image, then we should impute a single native size. Otherwise,
458 // it may be zero because we don't know yet, or the image has an error, or
459 // it isn't supported.
460 NotNull<CachedSurface*> current =
461 WrapNotNull(mSurfaces.ConstIter().UserData());
462 Image* image = static_cast<Image*>(current->GetImageKey());
463 size_t nativeSizes = image->GetNativeSizesLength();
464 if (mIsVectorImage) {
465 MOZ_ASSERT(nativeSizes == 0);
466 nativeSizes = 1;
467 } else if (nativeSizes == 0) {
468 return;
471 // Increase the threshold by the number of native sizes. This ensures that
472 // we do not prevent decoding of the image at all its native sizes. It does
473 // not guarantee we will provide a surface at that size however (i.e. many
474 // other sized surfaces are requested, in addition to the native sizes).
475 thresholdSurfaces += nativeSizes;
476 if (mSurfaces.Count() <= static_cast<uint32_t>(thresholdSurfaces)) {
477 return;
480 // We have a valid size, we can change modes.
481 mFactor2Mode = true;
484 template <typename Function>
485 void Prune(Function&& aRemoveCallback) {
486 if (!mFactor2Mode || mFactor2Pruned) {
487 return;
490 // Attempt to discard any surfaces which are not factor of 2 and the best
491 // factor of 2 match exists.
492 bool hasNotFactorSize = false;
493 for (auto iter = mSurfaces.Iter(); !iter.Done(); iter.Next()) {
494 NotNull<CachedSurface*> current = WrapNotNull(iter.UserData());
495 const SurfaceKey& currentKey = current->GetSurfaceKey();
496 const IntSize& currentSize = currentKey.Size();
498 // First we check if someone requested this size and would not accept
499 // an alternatively sized surface.
500 if (current->CannotSubstitute()) {
501 continue;
504 // Next we find the best factor of 2 size for this surface. If this
505 // surface is a factor of 2 size, then we want to keep it.
506 IntSize bestSize = SuggestedSize(currentSize);
507 if (bestSize == currentSize) {
508 continue;
511 // Check the cache for a surface with the same parameters except for the
512 // size which uses the closest factor of 2 size.
513 SurfaceKey compactKey = currentKey.CloneWithSize(bestSize);
514 RefPtr<CachedSurface> compactMatch;
515 mSurfaces.Get(compactKey, getter_AddRefs(compactMatch));
516 if (compactMatch && compactMatch->IsDecoded()) {
517 aRemoveCallback(current);
518 iter.Remove();
519 } else {
520 hasNotFactorSize = true;
524 // We have no surfaces that are not factor of 2 sized, so we can stop
525 // pruning henceforth, because we avoid the insertion of new surfaces that
526 // don't match our sizing set (unless the caller won't accept a
527 // substitution.)
528 if (!hasNotFactorSize) {
529 mFactor2Pruned = true;
532 // We should never leave factor of 2 mode due to pruning in of itself, but
533 // if we discarded surfaces due to the volatile buffers getting released,
534 // it is possible.
535 AfterMaybeRemove();
538 template <typename Function>
539 bool Invalidate(Function&& aRemoveCallback) {
540 // Remove all non-blob recordings from the cache. Invalidate any blob
541 // recordings.
542 bool foundRecording = false;
543 for (auto iter = mSurfaces.Iter(); !iter.Done(); iter.Next()) {
544 NotNull<CachedSurface*> current = WrapNotNull(iter.UserData());
546 if (current->GetSurfaceKey().Flags() & SurfaceFlags::RECORD_BLOB) {
547 foundRecording = true;
548 current->InvalidateRecording();
549 continue;
552 aRemoveCallback(current);
553 iter.Remove();
556 AfterMaybeRemove();
557 return foundRecording;
560 IntSize SuggestedSize(const IntSize& aSize) const {
561 IntSize suggestedSize = SuggestedSizeInternal(aSize);
562 if (mIsVectorImage) {
563 suggestedSize = SurfaceCache::ClampVectorSize(suggestedSize);
565 return suggestedSize;
568 IntSize SuggestedSizeInternal(const IntSize& aSize) const {
569 // When not in factor of 2 mode, we can always decode at the given size.
570 if (!mFactor2Mode) {
571 return aSize;
574 // We cannot enter factor of 2 mode unless we have a minimum number of
575 // surfaces, and we should have left it if the cache was emptied.
576 if (MOZ_UNLIKELY(IsEmpty())) {
577 MOZ_ASSERT_UNREACHABLE("Should not be empty and in factor of 2 mode!");
578 return aSize;
581 // This bit of awkwardness gets the largest native size of the image.
582 NotNull<CachedSurface*> firstSurface =
583 WrapNotNull(mSurfaces.ConstIter().UserData());
584 Image* image = static_cast<Image*>(firstSurface->GetImageKey());
585 IntSize factorSize;
586 if (NS_FAILED(image->GetWidth(&factorSize.width)) ||
587 NS_FAILED(image->GetHeight(&factorSize.height)) ||
588 factorSize.IsEmpty()) {
589 // Valid vector images may have a default size of 0x0. In that case, just
590 // assume a default size of 100x100 and apply the intrinsic ratio if
591 // available. If our guess was too small, don't use factor-of-scaling.
592 MOZ_ASSERT(mIsVectorImage);
593 factorSize = IntSize(100, 100);
594 Maybe<AspectRatio> aspectRatio = image->GetIntrinsicRatio();
595 if (aspectRatio && *aspectRatio) {
596 factorSize.width =
597 NSToIntRound(aspectRatio->ApplyToFloat(float(factorSize.height)));
598 if (factorSize.IsEmpty()) {
599 return aSize;
604 if (mIsVectorImage) {
605 // Ensure the aspect ratio matches the native size before forcing the
606 // caller to accept a factor of 2 size. The difference between the aspect
607 // ratios is:
609 // delta = nativeWidth/nativeHeight - desiredWidth/desiredHeight
611 // delta*nativeHeight*desiredHeight = nativeWidth*desiredHeight
612 // - desiredWidth*nativeHeight
614 // Using the maximum accepted delta as a constant, we can avoid the
615 // floating point division and just compare after some integer ops.
616 int32_t delta =
617 factorSize.width * aSize.height - aSize.width * factorSize.height;
618 int32_t maxDelta = (factorSize.height * aSize.height) >> 4;
619 if (delta > maxDelta || delta < -maxDelta) {
620 return aSize;
623 // If the requested size is bigger than the native size, we actually need
624 // to grow the native size instead of shrinking it.
625 if (factorSize.width < aSize.width) {
626 do {
627 IntSize candidate(factorSize.width * 2, factorSize.height * 2);
628 if (!SurfaceCache::IsLegalSize(candidate)) {
629 break;
632 factorSize = candidate;
633 } while (factorSize.width < aSize.width);
635 return factorSize;
638 // Otherwise we can find the best fit as normal.
641 // Start with the native size as the best first guess.
642 IntSize bestSize = factorSize;
643 factorSize.width /= 2;
644 factorSize.height /= 2;
646 while (!factorSize.IsEmpty()) {
647 if (!CompareArea(aSize, bestSize, factorSize)) {
648 // This size is not better than the last. Since we proceed from largest
649 // to smallest, we know that the next size will not be better if the
650 // previous size was rejected. Break early.
651 break;
654 // The current factor of 2 size is better than the last selected size.
655 bestSize = factorSize;
656 factorSize.width /= 2;
657 factorSize.height /= 2;
660 return bestSize;
663 bool CompareArea(const IntSize& aIdealSize, const IntSize& aBestSize,
664 const IntSize& aSize) const {
665 // Compare sizes. We use an area-based heuristic here instead of computing a
666 // truly optimal answer, since it seems very unlikely to make a difference
667 // for realistic sizes.
668 int64_t idealArea = AreaOfIntSize(aIdealSize);
669 int64_t currentArea = AreaOfIntSize(aSize);
670 int64_t bestMatchArea = AreaOfIntSize(aBestSize);
672 // If the best match is smaller than the ideal size, prefer bigger sizes.
673 if (bestMatchArea < idealArea) {
674 if (currentArea > bestMatchArea) {
675 return true;
677 return false;
680 // Other, prefer sizes closer to the ideal size, but still not smaller.
681 if (idealArea <= currentArea && currentArea < bestMatchArea) {
682 return true;
685 // This surface isn't an improvement over the current best match.
686 return false;
689 template <typename Function>
690 void CollectSizeOfSurfaces(nsTArray<SurfaceMemoryCounter>& aCounters,
691 MallocSizeOf aMallocSizeOf,
692 Function&& aRemoveCallback) {
693 CachedSurface::SurfaceMemoryReport report(aCounters, aMallocSizeOf);
694 for (auto iter = mSurfaces.Iter(); !iter.Done(); iter.Next()) {
695 NotNull<CachedSurface*> surface = WrapNotNull(iter.UserData());
697 // We don't need the drawable surface for ourselves, but adding a surface
698 // to the report will trigger this indirectly. If the surface was
699 // discarded by the OS because it was in volatile memory, we should remove
700 // it from the cache immediately rather than include it in the report.
701 DrawableSurface drawableSurface;
702 if (!surface->IsPlaceholder()) {
703 drawableSurface = surface->GetDrawableSurface();
704 if (!drawableSurface) {
705 aRemoveCallback(surface);
706 iter.Remove();
707 continue;
711 const IntSize& size = surface->GetSurfaceKey().Size();
712 bool factor2Size = false;
713 if (mFactor2Mode) {
714 factor2Size = (size == SuggestedSize(size));
716 report.Add(surface, factor2Size);
719 AfterMaybeRemove();
722 void SetLocked(bool aLocked) { mLocked = aLocked; }
723 bool IsLocked() const { return mLocked; }
725 private:
726 void AfterMaybeRemove() {
727 if (IsEmpty() && mFactor2Mode) {
728 // The last surface for this cache was removed. This can happen if the
729 // surface was stored in a volatile buffer and got purged, or the surface
730 // expired from the cache. If the cache itself lingers for some reason
731 // (e.g. in the process of performing a lookup, the cache itself is
732 // locked), then we need to reset the factor of 2 state because it
733 // requires at least one surface present to get the native size
734 // information from the image.
735 mFactor2Mode = mFactor2Pruned = false;
739 SurfaceTable mSurfaces;
741 bool mLocked;
743 // True in "factor of 2" mode.
744 bool mFactor2Mode;
746 // True if all non-factor of 2 surfaces have been removed from the cache. Note
747 // that this excludes unsubstitutable sizes.
748 bool mFactor2Pruned;
750 // True if the surfaces are produced from a vector image. If so, it must match
751 // the aspect ratio when using factor of 2 mode.
752 bool mIsVectorImage;
756 * SurfaceCacheImpl is responsible for determining which surfaces will be cached
757 * and managing the surface cache data structures. Rather than interact with
758 * SurfaceCacheImpl directly, client code interacts with SurfaceCache, which
759 * maintains high-level invariants and encapsulates the details of the surface
760 * cache's implementation.
762 class SurfaceCacheImpl final : public nsIMemoryReporter {
763 public:
764 NS_DECL_ISUPPORTS
766 SurfaceCacheImpl(uint32_t aSurfaceCacheExpirationTimeMS,
767 uint32_t aSurfaceCacheDiscardFactor,
768 uint32_t aSurfaceCacheSize)
769 : mExpirationTracker(aSurfaceCacheExpirationTimeMS),
770 mMemoryPressureObserver(new MemoryPressureObserver),
771 mDiscardFactor(aSurfaceCacheDiscardFactor),
772 mMaxCost(aSurfaceCacheSize),
773 mAvailableCost(aSurfaceCacheSize),
774 mLockedCost(0),
775 mOverflowCount(0),
776 mAlreadyPresentCount(0),
777 mTableFailureCount(0),
778 mTrackingFailureCount(0) {
779 nsCOMPtr<nsIObserverService> os = services::GetObserverService();
780 if (os) {
781 os->AddObserver(mMemoryPressureObserver, "memory-pressure", false);
785 private:
786 virtual ~SurfaceCacheImpl() {
787 nsCOMPtr<nsIObserverService> os = services::GetObserverService();
788 if (os) {
789 os->RemoveObserver(mMemoryPressureObserver, "memory-pressure");
792 UnregisterWeakMemoryReporter(this);
795 public:
796 void InitMemoryReporter() { RegisterWeakMemoryReporter(this); }
798 InsertOutcome Insert(NotNull<ISurfaceProvider*> aProvider, bool aSetAvailable,
799 const StaticMutexAutoLock& aAutoLock) {
800 // If this is a duplicate surface, refuse to replace the original.
801 // XXX(seth): Calling Lookup() and then RemoveEntry() does the lookup
802 // twice. We'll make this more efficient in bug 1185137.
803 LookupResult result =
804 Lookup(aProvider->GetImageKey(), aProvider->GetSurfaceKey(), aAutoLock,
805 /* aMarkUsed = */ false);
806 if (MOZ_UNLIKELY(result)) {
807 mAlreadyPresentCount++;
808 return InsertOutcome::FAILURE_ALREADY_PRESENT;
811 if (result.Type() == MatchType::PENDING) {
812 RemoveEntry(aProvider->GetImageKey(), aProvider->GetSurfaceKey(),
813 aAutoLock);
816 MOZ_ASSERT(result.Type() == MatchType::NOT_FOUND ||
817 result.Type() == MatchType::PENDING,
818 "A LookupResult with no surface should be NOT_FOUND or PENDING");
820 // If this is bigger than we can hold after discarding everything we can,
821 // refuse to cache it.
822 Cost cost = aProvider->LogicalSizeInBytes();
823 if (MOZ_UNLIKELY(!CanHoldAfterDiscarding(cost))) {
824 mOverflowCount++;
825 return InsertOutcome::FAILURE;
828 // Remove elements in order of cost until we can fit this in the cache. Note
829 // that locked surfaces aren't in mCosts, so we never remove them here.
830 while (cost > mAvailableCost) {
831 MOZ_ASSERT(!mCosts.IsEmpty(),
832 "Removed everything and it still won't fit");
833 Remove(mCosts.LastElement().Surface(), /* aStopTracking */ true,
834 aAutoLock);
837 // Locate the appropriate per-image cache. If there's not an existing cache
838 // for this image, create it.
839 const ImageKey imageKey = aProvider->GetImageKey();
840 RefPtr<ImageSurfaceCache> cache = GetImageCache(imageKey);
841 if (!cache) {
842 cache = new ImageSurfaceCache(imageKey);
843 if (!mImageCaches.InsertOrUpdate(aProvider->GetImageKey(), RefPtr{cache},
844 fallible)) {
845 mTableFailureCount++;
846 return InsertOutcome::FAILURE;
850 // If we were asked to mark the cache entry available, do so.
851 if (aSetAvailable) {
852 aProvider->Availability().SetAvailable();
855 auto surface = MakeNotNull<RefPtr<CachedSurface>>(aProvider);
857 // We require that locking succeed if the image is locked and we're not
858 // inserting a placeholder; the caller may need to know this to handle
859 // errors correctly.
860 bool mustLock = cache->IsLocked() && !surface->IsPlaceholder();
861 if (mustLock) {
862 surface->SetLocked(true);
863 if (!surface->IsLocked()) {
864 return InsertOutcome::FAILURE;
868 // Insert.
869 MOZ_ASSERT(cost <= mAvailableCost, "Inserting despite too large a cost");
870 if (!cache->Insert(surface)) {
871 mTableFailureCount++;
872 if (mustLock) {
873 surface->SetLocked(false);
875 return InsertOutcome::FAILURE;
878 if (MOZ_UNLIKELY(!StartTracking(surface, aAutoLock))) {
879 MOZ_ASSERT(!mustLock);
880 Remove(surface, /* aStopTracking */ false, aAutoLock);
881 return InsertOutcome::FAILURE;
884 return InsertOutcome::SUCCESS;
887 void Remove(NotNull<CachedSurface*> aSurface, bool aStopTracking,
888 const StaticMutexAutoLock& aAutoLock) {
889 ImageKey imageKey = aSurface->GetImageKey();
891 RefPtr<ImageSurfaceCache> cache = GetImageCache(imageKey);
892 MOZ_ASSERT(cache, "Shouldn't try to remove a surface with no image cache");
894 // If the surface was not a placeholder, tell its image that we discarded
895 // it.
896 if (!aSurface->IsPlaceholder()) {
897 static_cast<Image*>(imageKey)->OnSurfaceDiscarded(
898 aSurface->GetSurfaceKey());
901 // If we failed during StartTracking, we can skip this step.
902 if (aStopTracking) {
903 StopTracking(aSurface, /* aIsTracked */ true, aAutoLock);
906 // Individual surfaces must be freed outside the lock.
907 mCachedSurfacesDiscard.AppendElement(cache->Remove(aSurface));
909 MaybeRemoveEmptyCache(imageKey, cache);
912 bool StartTracking(NotNull<CachedSurface*> aSurface,
913 const StaticMutexAutoLock& aAutoLock) {
914 CostEntry costEntry = aSurface->GetCostEntry();
915 MOZ_ASSERT(costEntry.GetCost() <= mAvailableCost,
916 "Cost too large and the caller didn't catch it");
918 if (aSurface->IsLocked()) {
919 mLockedCost += costEntry.GetCost();
920 MOZ_ASSERT(mLockedCost <= mMaxCost, "Locked more than we can hold?");
921 } else {
922 if (NS_WARN_IF(!mCosts.InsertElementSorted(costEntry, fallible))) {
923 mTrackingFailureCount++;
924 return false;
927 // This may fail during XPCOM shutdown, so we need to ensure the object is
928 // tracked before calling RemoveObject in StopTracking.
929 nsresult rv = mExpirationTracker.AddObjectLocked(aSurface, aAutoLock);
930 if (NS_WARN_IF(NS_FAILED(rv))) {
931 DebugOnly<bool> foundInCosts = mCosts.RemoveElementSorted(costEntry);
932 MOZ_ASSERT(foundInCosts, "Lost track of costs for this surface");
933 mTrackingFailureCount++;
934 return false;
938 mAvailableCost -= costEntry.GetCost();
939 return true;
942 void StopTracking(NotNull<CachedSurface*> aSurface, bool aIsTracked,
943 const StaticMutexAutoLock& aAutoLock) {
944 CostEntry costEntry = aSurface->GetCostEntry();
946 if (aSurface->IsLocked()) {
947 MOZ_ASSERT(mLockedCost >= costEntry.GetCost(), "Costs don't balance");
948 mLockedCost -= costEntry.GetCost();
949 // XXX(seth): It'd be nice to use an O(log n) lookup here. This is O(n).
950 MOZ_ASSERT(!mCosts.Contains(costEntry),
951 "Shouldn't have a cost entry for a locked surface");
952 } else {
953 if (MOZ_LIKELY(aSurface->GetExpirationState()->IsTracked())) {
954 MOZ_ASSERT(aIsTracked, "Expiration-tracking a surface unexpectedly!");
955 mExpirationTracker.RemoveObjectLocked(aSurface, aAutoLock);
956 } else {
957 // Our call to AddObject must have failed in StartTracking; most likely
958 // we're in XPCOM shutdown right now.
959 MOZ_ASSERT(!aIsTracked, "Not expiration-tracking an unlocked surface!");
962 DebugOnly<bool> foundInCosts = mCosts.RemoveElementSorted(costEntry);
963 MOZ_ASSERT(foundInCosts, "Lost track of costs for this surface");
966 mAvailableCost += costEntry.GetCost();
967 MOZ_ASSERT(mAvailableCost <= mMaxCost,
968 "More available cost than we started with");
971 LookupResult Lookup(const ImageKey aImageKey, const SurfaceKey& aSurfaceKey,
972 const StaticMutexAutoLock& aAutoLock, bool aMarkUsed) {
973 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
974 if (!cache) {
975 // No cached surfaces for this image.
976 return LookupResult(MatchType::NOT_FOUND);
979 RefPtr<CachedSurface> surface = cache->Lookup(aSurfaceKey, aMarkUsed);
980 if (!surface) {
981 // Lookup in the per-image cache missed.
982 return LookupResult(MatchType::NOT_FOUND);
985 if (surface->IsPlaceholder()) {
986 return LookupResult(MatchType::PENDING);
989 DrawableSurface drawableSurface = surface->GetDrawableSurface();
990 if (!drawableSurface) {
991 // The surface was released by the operating system. Remove the cache
992 // entry as well.
993 Remove(WrapNotNull(surface), /* aStopTracking */ true, aAutoLock);
994 return LookupResult(MatchType::NOT_FOUND);
997 if (aMarkUsed &&
998 !MarkUsed(WrapNotNull(surface), WrapNotNull(cache), aAutoLock)) {
999 Remove(WrapNotNull(surface), /* aStopTracking */ false, aAutoLock);
1000 return LookupResult(MatchType::NOT_FOUND);
1003 MOZ_ASSERT(surface->GetSurfaceKey() == aSurfaceKey,
1004 "Lookup() not returning an exact match?");
1005 return LookupResult(std::move(drawableSurface), MatchType::EXACT);
1008 LookupResult LookupBestMatch(const ImageKey aImageKey,
1009 const SurfaceKey& aSurfaceKey,
1010 const StaticMutexAutoLock& aAutoLock,
1011 bool aMarkUsed) {
1012 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1013 if (!cache) {
1014 // No cached surfaces for this image.
1015 return LookupResult(
1016 MatchType::NOT_FOUND,
1017 SurfaceCache::ClampSize(aImageKey, aSurfaceKey.Size()));
1020 // Repeatedly look up the best match, trying again if the resulting surface
1021 // has been freed by the operating system, until we can either lock a
1022 // surface for drawing or there are no matching surfaces left.
1023 // XXX(seth): This is O(N^2), but N is expected to be very small. If we
1024 // encounter a performance problem here we can revisit this.
1026 RefPtr<CachedSurface> surface;
1027 DrawableSurface drawableSurface;
1028 MatchType matchType = MatchType::NOT_FOUND;
1029 IntSize suggestedSize;
1030 while (true) {
1031 Tie(surface, matchType, suggestedSize) =
1032 cache->LookupBestMatch(aSurfaceKey);
1034 if (!surface) {
1035 return LookupResult(
1036 matchType, suggestedSize); // Lookup in the per-image cache missed.
1039 drawableSurface = surface->GetDrawableSurface();
1040 if (drawableSurface) {
1041 break;
1044 // The surface was released by the operating system. Remove the cache
1045 // entry as well.
1046 Remove(WrapNotNull(surface), /* aStopTracking */ true, aAutoLock);
1049 MOZ_ASSERT_IF(matchType == MatchType::EXACT,
1050 surface->GetSurfaceKey() == aSurfaceKey);
1051 MOZ_ASSERT_IF(
1052 matchType == MatchType::SUBSTITUTE_BECAUSE_NOT_FOUND ||
1053 matchType == MatchType::SUBSTITUTE_BECAUSE_PENDING,
1054 surface->GetSurfaceKey().Region() == aSurfaceKey.Region() &&
1055 surface->GetSurfaceKey().SVGContext() == aSurfaceKey.SVGContext() &&
1056 surface->GetSurfaceKey().Playback() == aSurfaceKey.Playback() &&
1057 surface->GetSurfaceKey().Flags() == aSurfaceKey.Flags());
1059 if (matchType == MatchType::EXACT ||
1060 matchType == MatchType::SUBSTITUTE_BECAUSE_BEST) {
1061 if (aMarkUsed &&
1062 !MarkUsed(WrapNotNull(surface), WrapNotNull(cache), aAutoLock)) {
1063 Remove(WrapNotNull(surface), /* aStopTracking */ false, aAutoLock);
1067 return LookupResult(std::move(drawableSurface), matchType, suggestedSize);
1070 bool CanHold(const Cost aCost) const { return aCost <= mMaxCost; }
1072 size_t MaximumCapacity() const { return size_t(mMaxCost); }
1074 void SurfaceAvailable(NotNull<ISurfaceProvider*> aProvider,
1075 const StaticMutexAutoLock& aAutoLock) {
1076 if (!aProvider->Availability().IsPlaceholder()) {
1077 MOZ_ASSERT_UNREACHABLE("Calling SurfaceAvailable on non-placeholder");
1078 return;
1081 // Reinsert the provider, requesting that Insert() mark it available. This
1082 // may or may not succeed, depending on whether some other decoder has
1083 // beaten us to the punch and inserted a non-placeholder version of this
1084 // surface first, but it's fine either way.
1085 // XXX(seth): This could be implemented more efficiently; we should be able
1086 // to just update our data structures without reinserting.
1087 Insert(aProvider, /* aSetAvailable = */ true, aAutoLock);
1090 void LockImage(const ImageKey aImageKey) {
1091 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1092 if (!cache) {
1093 cache = new ImageSurfaceCache(aImageKey);
1094 mImageCaches.InsertOrUpdate(aImageKey, RefPtr{cache});
1097 cache->SetLocked(true);
1099 // We don't relock this image's existing surfaces right away; instead, the
1100 // image should arrange for Lookup() to touch them if they are still useful.
1103 void UnlockImage(const ImageKey aImageKey,
1104 const StaticMutexAutoLock& aAutoLock) {
1105 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1106 if (!cache || !cache->IsLocked()) {
1107 return; // Already unlocked.
1110 cache->SetLocked(false);
1111 DoUnlockSurfaces(WrapNotNull(cache), /* aStaticOnly = */ false, aAutoLock);
1114 void UnlockEntries(const ImageKey aImageKey,
1115 const StaticMutexAutoLock& aAutoLock) {
1116 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1117 if (!cache || !cache->IsLocked()) {
1118 return; // Already unlocked.
1121 // (Note that we *don't* unlock the per-image cache here; that's the
1122 // difference between this and UnlockImage.)
1123 DoUnlockSurfaces(WrapNotNull(cache),
1124 /* aStaticOnly = */
1125 !StaticPrefs::image_mem_animated_discardable_AtStartup(),
1126 aAutoLock);
1129 already_AddRefed<ImageSurfaceCache> RemoveImage(
1130 const ImageKey aImageKey, const StaticMutexAutoLock& aAutoLock) {
1131 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1132 if (!cache) {
1133 return nullptr; // No cached surfaces for this image, so nothing to do.
1136 // Discard all of the cached surfaces for this image.
1137 // XXX(seth): This is O(n^2) since for each item in the cache we are
1138 // removing an element from the costs array. Since n is expected to be
1139 // small, performance should be good, but if usage patterns change we should
1140 // change the data structure used for mCosts.
1141 for (const auto& value : cache->Values()) {
1142 StopTracking(WrapNotNull(value),
1143 /* aIsTracked */ true, aAutoLock);
1146 // The per-image cache isn't needed anymore, so remove it as well.
1147 // This implicitly unlocks the image if it was locked.
1148 mImageCaches.Remove(aImageKey);
1150 // Since we did not actually remove any of the surfaces from the cache
1151 // itself, only stopped tracking them, we should free it outside the lock.
1152 return cache.forget();
1155 void PruneImage(const ImageKey aImageKey,
1156 const StaticMutexAutoLock& aAutoLock) {
1157 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1158 if (!cache) {
1159 return; // No cached surfaces for this image, so nothing to do.
1162 cache->Prune([this, &aAutoLock](NotNull<CachedSurface*> aSurface) -> void {
1163 StopTracking(aSurface, /* aIsTracked */ true, aAutoLock);
1164 // Individual surfaces must be freed outside the lock.
1165 mCachedSurfacesDiscard.AppendElement(aSurface);
1168 MaybeRemoveEmptyCache(aImageKey, cache);
1171 bool InvalidateImage(const ImageKey aImageKey,
1172 const StaticMutexAutoLock& aAutoLock) {
1173 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1174 if (!cache) {
1175 return false; // No cached surfaces for this image, so nothing to do.
1178 bool rv = cache->Invalidate(
1179 [this, &aAutoLock](NotNull<CachedSurface*> aSurface) -> void {
1180 StopTracking(aSurface, /* aIsTracked */ true, aAutoLock);
1181 // Individual surfaces must be freed outside the lock.
1182 mCachedSurfacesDiscard.AppendElement(aSurface);
1185 MaybeRemoveEmptyCache(aImageKey, cache);
1186 return rv;
1189 void DiscardAll(const StaticMutexAutoLock& aAutoLock) {
1190 // Remove in order of cost because mCosts is an array and the other data
1191 // structures are all hash tables. Note that locked surfaces are not
1192 // removed, since they aren't present in mCosts.
1193 while (!mCosts.IsEmpty()) {
1194 Remove(mCosts.LastElement().Surface(), /* aStopTracking */ true,
1195 aAutoLock);
1199 void DiscardForMemoryPressure(const StaticMutexAutoLock& aAutoLock) {
1200 // Compute our discardable cost. Since locked surfaces aren't discardable,
1201 // we exclude them.
1202 const Cost discardableCost = (mMaxCost - mAvailableCost) - mLockedCost;
1203 MOZ_ASSERT(discardableCost <= mMaxCost, "Discardable cost doesn't add up");
1205 // Our target is to raise our available cost by (1 / mDiscardFactor) of our
1206 // discardable cost - in other words, we want to end up with about
1207 // (discardableCost / mDiscardFactor) fewer bytes stored in the surface
1208 // cache after we're done.
1209 const Cost targetCost = mAvailableCost + (discardableCost / mDiscardFactor);
1211 if (targetCost > mMaxCost - mLockedCost) {
1212 MOZ_ASSERT_UNREACHABLE("Target cost is more than we can discard");
1213 DiscardAll(aAutoLock);
1214 return;
1217 // Discard surfaces until we've reduced our cost to our target cost.
1218 while (mAvailableCost < targetCost) {
1219 MOZ_ASSERT(!mCosts.IsEmpty(), "Removed everything and still not done");
1220 Remove(mCosts.LastElement().Surface(), /* aStopTracking */ true,
1221 aAutoLock);
1225 void TakeDiscard(nsTArray<RefPtr<CachedSurface>>& aDiscard,
1226 const StaticMutexAutoLock& aAutoLock) {
1227 MOZ_ASSERT(aDiscard.IsEmpty());
1228 aDiscard = std::move(mCachedSurfacesDiscard);
1231 already_AddRefed<CachedSurface> GetSurfaceForResetAnimation(
1232 const ImageKey aImageKey, const SurfaceKey& aSurfaceKey,
1233 const StaticMutexAutoLock& aAutoLock) {
1234 RefPtr<CachedSurface> surface;
1236 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1237 if (!cache) {
1238 // No cached surfaces for this image.
1239 return surface.forget();
1242 surface = cache->Lookup(aSurfaceKey, /* aForAccess = */ false);
1243 return surface.forget();
1246 void LockSurface(NotNull<CachedSurface*> aSurface,
1247 const StaticMutexAutoLock& aAutoLock) {
1248 if (aSurface->IsPlaceholder() || aSurface->IsLocked()) {
1249 return;
1252 StopTracking(aSurface, /* aIsTracked */ true, aAutoLock);
1254 // Lock the surface. This can fail.
1255 aSurface->SetLocked(true);
1256 DebugOnly<bool> tracked = StartTracking(aSurface, aAutoLock);
1257 MOZ_ASSERT(tracked);
1260 size_t ShallowSizeOfIncludingThis(
1261 MallocSizeOf aMallocSizeOf, const StaticMutexAutoLock& aAutoLock) const {
1262 size_t bytes =
1263 aMallocSizeOf(this) + mCosts.ShallowSizeOfExcludingThis(aMallocSizeOf) +
1264 mImageCaches.ShallowSizeOfExcludingThis(aMallocSizeOf) +
1265 mCachedSurfacesDiscard.ShallowSizeOfExcludingThis(aMallocSizeOf) +
1266 mExpirationTracker.ShallowSizeOfExcludingThis(aMallocSizeOf);
1267 for (const auto& data : mImageCaches.Values()) {
1268 bytes += data->ShallowSizeOfIncludingThis(aMallocSizeOf);
1270 return bytes;
1273 NS_IMETHOD
1274 CollectReports(nsIHandleReportCallback* aHandleReport, nsISupports* aData,
1275 bool aAnonymize) override {
1276 StaticMutexAutoLock lock(sInstanceMutex);
1278 uint32_t lockedImageCount = 0;
1279 uint32_t totalSurfaceCount = 0;
1280 uint32_t lockedSurfaceCount = 0;
1281 for (const auto& cache : mImageCaches.Values()) {
1282 totalSurfaceCount += cache->Count();
1283 if (cache->IsLocked()) {
1284 ++lockedImageCount;
1286 for (const auto& value : cache->Values()) {
1287 if (value->IsLocked()) {
1288 ++lockedSurfaceCount;
1293 // clang-format off
1294 // We have explicit memory reporting for the surface cache which is more
1295 // accurate than the cost metrics we report here, but these metrics are
1296 // still useful to report, since they control the cache's behavior.
1297 MOZ_COLLECT_REPORT(
1298 "explicit/images/cache/overhead", KIND_HEAP, UNITS_BYTES,
1299 ShallowSizeOfIncludingThis(SurfaceCacheMallocSizeOf, lock),
1300 "Memory used by the surface cache data structures, excluding surface data.");
1302 MOZ_COLLECT_REPORT(
1303 "imagelib-surface-cache-estimated-total",
1304 KIND_OTHER, UNITS_BYTES, (mMaxCost - mAvailableCost),
1305 "Estimated total memory used by the imagelib surface cache.");
1307 MOZ_COLLECT_REPORT(
1308 "imagelib-surface-cache-estimated-locked",
1309 KIND_OTHER, UNITS_BYTES, mLockedCost,
1310 "Estimated memory used by locked surfaces in the imagelib surface cache.");
1312 MOZ_COLLECT_REPORT(
1313 "imagelib-surface-cache-tracked-cost-count",
1314 KIND_OTHER, UNITS_COUNT, mCosts.Length(),
1315 "Total number of surfaces tracked for cost (and expiry) in the imagelib surface cache.");
1317 MOZ_COLLECT_REPORT(
1318 "imagelib-surface-cache-tracked-expiry-count",
1319 KIND_OTHER, UNITS_COUNT, mExpirationTracker.Length(lock),
1320 "Total number of surfaces tracked for expiry (and cost) in the imagelib surface cache.");
1322 MOZ_COLLECT_REPORT(
1323 "imagelib-surface-cache-image-count",
1324 KIND_OTHER, UNITS_COUNT, mImageCaches.Count(),
1325 "Total number of images in the imagelib surface cache.");
1327 MOZ_COLLECT_REPORT(
1328 "imagelib-surface-cache-locked-image-count",
1329 KIND_OTHER, UNITS_COUNT, lockedImageCount,
1330 "Total number of locked images in the imagelib surface cache.");
1332 MOZ_COLLECT_REPORT(
1333 "imagelib-surface-cache-image-surface-count",
1334 KIND_OTHER, UNITS_COUNT, totalSurfaceCount,
1335 "Total number of surfaces in the imagelib surface cache.");
1337 MOZ_COLLECT_REPORT(
1338 "imagelib-surface-cache-locked-surfaces-count",
1339 KIND_OTHER, UNITS_COUNT, lockedSurfaceCount,
1340 "Total number of locked surfaces in the imagelib surface cache.");
1342 MOZ_COLLECT_REPORT(
1343 "imagelib-surface-cache-overflow-count",
1344 KIND_OTHER, UNITS_COUNT, mOverflowCount,
1345 "Count of how many times the surface cache has hit its capacity and been "
1346 "unable to insert a new surface.");
1348 MOZ_COLLECT_REPORT(
1349 "imagelib-surface-cache-tracking-failure-count",
1350 KIND_OTHER, UNITS_COUNT, mTrackingFailureCount,
1351 "Count of how many times the surface cache has failed to begin tracking a "
1352 "given surface.");
1354 MOZ_COLLECT_REPORT(
1355 "imagelib-surface-cache-already-present-count",
1356 KIND_OTHER, UNITS_COUNT, mAlreadyPresentCount,
1357 "Count of how many times the surface cache has failed to insert a surface "
1358 "because it is already present.");
1360 MOZ_COLLECT_REPORT(
1361 "imagelib-surface-cache-table-failure-count",
1362 KIND_OTHER, UNITS_COUNT, mTableFailureCount,
1363 "Count of how many times the surface cache has failed to insert a surface "
1364 "because a hash table could not accept an entry.");
1365 // clang-format on
1367 return NS_OK;
1370 void CollectSizeOfSurfaces(const ImageKey aImageKey,
1371 nsTArray<SurfaceMemoryCounter>& aCounters,
1372 MallocSizeOf aMallocSizeOf,
1373 const StaticMutexAutoLock& aAutoLock) {
1374 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1375 if (!cache) {
1376 return; // No surfaces for this image.
1379 // Report all surfaces in the per-image cache.
1380 cache->CollectSizeOfSurfaces(
1381 aCounters, aMallocSizeOf,
1382 [this, &aAutoLock](NotNull<CachedSurface*> aSurface) -> void {
1383 StopTracking(aSurface, /* aIsTracked */ true, aAutoLock);
1384 // Individual surfaces must be freed outside the lock.
1385 mCachedSurfacesDiscard.AppendElement(aSurface);
1388 MaybeRemoveEmptyCache(aImageKey, cache);
1391 void ReleaseImageOnMainThread(already_AddRefed<image::Image>&& aImage,
1392 const StaticMutexAutoLock& aAutoLock) {
1393 RefPtr<image::Image> image = aImage;
1394 if (!image) {
1395 return;
1398 bool needsDispatch = mReleasingImagesOnMainThread.IsEmpty();
1399 mReleasingImagesOnMainThread.AppendElement(image);
1401 if (!needsDispatch || gXPCOMThreadsShutDown) {
1402 // Either there is already a ongoing task for ClearReleasingImages() or
1403 // it's too late in shutdown to dispatch.
1404 return;
1407 NS_DispatchToMainThread(NS_NewRunnableFunction(
1408 "SurfaceCacheImpl::ReleaseImageOnMainThread",
1409 []() -> void { SurfaceCache::ClearReleasingImages(); }));
1412 void TakeReleasingImages(nsTArray<RefPtr<image::Image>>& aImage,
1413 const StaticMutexAutoLock& aAutoLock) {
1414 MOZ_ASSERT(NS_IsMainThread());
1415 aImage.SwapElements(mReleasingImagesOnMainThread);
1418 private:
1419 already_AddRefed<ImageSurfaceCache> GetImageCache(const ImageKey aImageKey) {
1420 RefPtr<ImageSurfaceCache> imageCache;
1421 mImageCaches.Get(aImageKey, getter_AddRefs(imageCache));
1422 return imageCache.forget();
1425 void MaybeRemoveEmptyCache(const ImageKey aImageKey,
1426 ImageSurfaceCache* aCache) {
1427 // Remove the per-image cache if it's unneeded now. Keep it if the image is
1428 // locked, since the per-image cache is where we store that state. Note that
1429 // we don't push it into mImageCachesDiscard because all of its surfaces
1430 // have been removed, so it is safe to free while holding the lock.
1431 if (aCache->IsEmpty() && !aCache->IsLocked()) {
1432 mImageCaches.Remove(aImageKey);
1436 // This is similar to CanHold() except that it takes into account the costs of
1437 // locked surfaces. It's used internally in Insert(), but it's not exposed
1438 // publicly because we permit multithreaded access to the surface cache, which
1439 // means that the result would be meaningless: another thread could insert a
1440 // surface or lock an image at any time.
1441 bool CanHoldAfterDiscarding(const Cost aCost) const {
1442 return aCost <= mMaxCost - mLockedCost;
1445 bool MarkUsed(NotNull<CachedSurface*> aSurface,
1446 NotNull<ImageSurfaceCache*> aCache,
1447 const StaticMutexAutoLock& aAutoLock) {
1448 if (aCache->IsLocked()) {
1449 LockSurface(aSurface, aAutoLock);
1450 return true;
1453 nsresult rv = mExpirationTracker.MarkUsedLocked(aSurface, aAutoLock);
1454 if (NS_WARN_IF(NS_FAILED(rv))) {
1455 // If mark used fails, it is because it failed to reinsert the surface
1456 // after removing it from the tracker. Thus we need to update our
1457 // own accounting but otherwise expect it to be untracked.
1458 StopTracking(aSurface, /* aIsTracked */ false, aAutoLock);
1459 return false;
1461 return true;
1464 void DoUnlockSurfaces(NotNull<ImageSurfaceCache*> aCache, bool aStaticOnly,
1465 const StaticMutexAutoLock& aAutoLock) {
1466 AutoTArray<NotNull<CachedSurface*>, 8> discard;
1468 // Unlock all the surfaces the per-image cache is holding.
1469 for (const auto& value : aCache->Values()) {
1470 NotNull<CachedSurface*> surface = WrapNotNull(value);
1471 if (surface->IsPlaceholder() || !surface->IsLocked()) {
1472 continue;
1474 if (aStaticOnly &&
1475 surface->GetSurfaceKey().Playback() != PlaybackType::eStatic) {
1476 continue;
1478 StopTracking(surface, /* aIsTracked */ true, aAutoLock);
1479 surface->SetLocked(false);
1480 if (MOZ_UNLIKELY(!StartTracking(surface, aAutoLock))) {
1481 discard.AppendElement(surface);
1485 // Discard any that we failed to track.
1486 for (auto iter = discard.begin(); iter != discard.end(); ++iter) {
1487 Remove(*iter, /* aStopTracking */ false, aAutoLock);
1491 void RemoveEntry(const ImageKey aImageKey, const SurfaceKey& aSurfaceKey,
1492 const StaticMutexAutoLock& aAutoLock) {
1493 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1494 if (!cache) {
1495 return; // No cached surfaces for this image.
1498 RefPtr<CachedSurface> surface =
1499 cache->Lookup(aSurfaceKey, /* aForAccess = */ false);
1500 if (!surface) {
1501 return; // Lookup in the per-image cache missed.
1504 Remove(WrapNotNull(surface), /* aStopTracking */ true, aAutoLock);
1507 class SurfaceTracker final
1508 : public ExpirationTrackerImpl<CachedSurface, 2, StaticMutex,
1509 StaticMutexAutoLock> {
1510 public:
1511 explicit SurfaceTracker(uint32_t aSurfaceCacheExpirationTimeMS)
1512 : ExpirationTrackerImpl<CachedSurface, 2, StaticMutex,
1513 StaticMutexAutoLock>(
1514 aSurfaceCacheExpirationTimeMS, "SurfaceTracker") {}
1516 protected:
1517 void NotifyExpiredLocked(CachedSurface* aSurface,
1518 const StaticMutexAutoLock& aAutoLock) override {
1519 sInstance->Remove(WrapNotNull(aSurface), /* aStopTracking */ true,
1520 aAutoLock);
1523 void NotifyHandlerEndLocked(const StaticMutexAutoLock& aAutoLock) override {
1524 sInstance->TakeDiscard(mDiscard, aAutoLock);
1527 void NotifyHandlerEnd() override {
1528 nsTArray<RefPtr<CachedSurface>> discard(std::move(mDiscard));
1531 StaticMutex& GetMutex() override { return sInstanceMutex; }
1533 nsTArray<RefPtr<CachedSurface>> mDiscard;
1536 class MemoryPressureObserver final : public nsIObserver {
1537 public:
1538 NS_DECL_ISUPPORTS
1540 NS_IMETHOD Observe(nsISupports*, const char* aTopic,
1541 const char16_t*) override {
1542 nsTArray<RefPtr<CachedSurface>> discard;
1544 StaticMutexAutoLock lock(sInstanceMutex);
1545 if (sInstance && strcmp(aTopic, "memory-pressure") == 0) {
1546 sInstance->DiscardForMemoryPressure(lock);
1547 sInstance->TakeDiscard(discard, lock);
1550 return NS_OK;
1553 private:
1554 virtual ~MemoryPressureObserver() {}
1557 nsTArray<CostEntry> mCosts;
1558 nsRefPtrHashtable<nsPtrHashKey<Image>, ImageSurfaceCache> mImageCaches;
1559 nsTArray<RefPtr<CachedSurface>> mCachedSurfacesDiscard;
1560 SurfaceTracker mExpirationTracker;
1561 RefPtr<MemoryPressureObserver> mMemoryPressureObserver;
1562 nsTArray<RefPtr<image::Image>> mReleasingImagesOnMainThread;
1563 const uint32_t mDiscardFactor;
1564 const Cost mMaxCost;
1565 Cost mAvailableCost;
1566 Cost mLockedCost;
1567 size_t mOverflowCount;
1568 size_t mAlreadyPresentCount;
1569 size_t mTableFailureCount;
1570 size_t mTrackingFailureCount;
1573 NS_IMPL_ISUPPORTS(SurfaceCacheImpl, nsIMemoryReporter)
1574 NS_IMPL_ISUPPORTS(SurfaceCacheImpl::MemoryPressureObserver, nsIObserver)
1576 ///////////////////////////////////////////////////////////////////////////////
1577 // Public API
1578 ///////////////////////////////////////////////////////////////////////////////
1580 /* static */
1581 void SurfaceCache::Initialize() {
1582 // Initialize preferences.
1583 MOZ_ASSERT(NS_IsMainThread());
1584 MOZ_ASSERT(!sInstance, "Shouldn't initialize more than once");
1586 // See StaticPrefs for the default values of these preferences.
1588 // Length of time before an unused surface is removed from the cache, in
1589 // milliseconds.
1590 uint32_t surfaceCacheExpirationTimeMS =
1591 StaticPrefs::image_mem_surfacecache_min_expiration_ms_AtStartup();
1593 // What fraction of the memory used by the surface cache we should discard
1594 // when we get a memory pressure notification. This value is interpreted as
1595 // 1/N, so 1 means to discard everything, 2 means to discard about half of the
1596 // memory we're using, and so forth. We clamp it to avoid division by zero.
1597 uint32_t surfaceCacheDiscardFactor =
1598 max(StaticPrefs::image_mem_surfacecache_discard_factor_AtStartup(), 1u);
1600 // Maximum size of the surface cache, in kilobytes.
1601 uint64_t surfaceCacheMaxSizeKB =
1602 StaticPrefs::image_mem_surfacecache_max_size_kb_AtStartup();
1604 if (sizeof(uintptr_t) <= 4) {
1605 // Limit surface cache to 1 GB if our address space is 32 bit.
1606 surfaceCacheMaxSizeKB = 1024 * 1024;
1609 // A knob determining the actual size of the surface cache. Currently the
1610 // cache is (size of main memory) / (surface cache size factor) KB
1611 // or (surface cache max size) KB, whichever is smaller. The formula
1612 // may change in the future, though.
1613 // For example, a value of 4 would yield a 256MB cache on a 1GB machine.
1614 // The smallest machines we are likely to run this code on have 256MB
1615 // of memory, which would yield a 64MB cache on this setting.
1616 // We clamp this value to avoid division by zero.
1617 uint32_t surfaceCacheSizeFactor =
1618 max(StaticPrefs::image_mem_surfacecache_size_factor_AtStartup(), 1u);
1620 // Compute the size of the surface cache.
1621 uint64_t memorySize = PR_GetPhysicalMemorySize();
1622 if (memorySize == 0) {
1623 MOZ_ASSERT_UNREACHABLE("PR_GetPhysicalMemorySize not implemented here");
1624 memorySize = 256 * 1024 * 1024; // Fall back to 256MB.
1626 uint64_t proposedSize = memorySize / surfaceCacheSizeFactor;
1627 uint64_t surfaceCacheSizeBytes =
1628 min(proposedSize, surfaceCacheMaxSizeKB * 1024);
1629 uint32_t finalSurfaceCacheSizeBytes =
1630 min(surfaceCacheSizeBytes, uint64_t(UINT32_MAX));
1632 // Create the surface cache singleton with the requested settings. Note that
1633 // the size is a limit that the cache may not grow beyond, but we do not
1634 // actually allocate any storage for surfaces at this time.
1635 sInstance = new SurfaceCacheImpl(surfaceCacheExpirationTimeMS,
1636 surfaceCacheDiscardFactor,
1637 finalSurfaceCacheSizeBytes);
1638 sInstance->InitMemoryReporter();
1641 /* static */
1642 void SurfaceCache::Shutdown() {
1643 RefPtr<SurfaceCacheImpl> cache;
1645 StaticMutexAutoLock lock(sInstanceMutex);
1646 MOZ_ASSERT(NS_IsMainThread());
1647 MOZ_ASSERT(sInstance, "No singleton - was Shutdown() called twice?");
1648 cache = sInstance.forget();
1652 /* static */
1653 LookupResult SurfaceCache::Lookup(const ImageKey aImageKey,
1654 const SurfaceKey& aSurfaceKey,
1655 bool aMarkUsed) {
1656 nsTArray<RefPtr<CachedSurface>> discard;
1657 LookupResult rv(MatchType::NOT_FOUND);
1660 StaticMutexAutoLock lock(sInstanceMutex);
1661 if (!sInstance) {
1662 return rv;
1665 rv = sInstance->Lookup(aImageKey, aSurfaceKey, lock, aMarkUsed);
1666 sInstance->TakeDiscard(discard, lock);
1669 return rv;
1672 /* static */
1673 LookupResult SurfaceCache::LookupBestMatch(const ImageKey aImageKey,
1674 const SurfaceKey& aSurfaceKey,
1675 bool aMarkUsed) {
1676 nsTArray<RefPtr<CachedSurface>> discard;
1677 LookupResult rv(MatchType::NOT_FOUND);
1680 StaticMutexAutoLock lock(sInstanceMutex);
1681 if (!sInstance) {
1682 return rv;
1685 rv = sInstance->LookupBestMatch(aImageKey, aSurfaceKey, lock, aMarkUsed);
1686 sInstance->TakeDiscard(discard, lock);
1689 return rv;
1692 /* static */
1693 InsertOutcome SurfaceCache::Insert(NotNull<ISurfaceProvider*> aProvider) {
1694 nsTArray<RefPtr<CachedSurface>> discard;
1695 InsertOutcome rv(InsertOutcome::FAILURE);
1698 StaticMutexAutoLock lock(sInstanceMutex);
1699 if (!sInstance) {
1700 return rv;
1703 rv = sInstance->Insert(aProvider, /* aSetAvailable = */ false, lock);
1704 sInstance->TakeDiscard(discard, lock);
1707 return rv;
1710 /* static */
1711 bool SurfaceCache::CanHold(const IntSize& aSize,
1712 uint32_t aBytesPerPixel /* = 4 */) {
1713 StaticMutexAutoLock lock(sInstanceMutex);
1714 if (!sInstance) {
1715 return false;
1718 Cost cost = ComputeCost(aSize, aBytesPerPixel);
1719 return sInstance->CanHold(cost);
1722 /* static */
1723 bool SurfaceCache::CanHold(size_t aSize) {
1724 StaticMutexAutoLock lock(sInstanceMutex);
1725 if (!sInstance) {
1726 return false;
1729 return sInstance->CanHold(aSize);
1732 /* static */
1733 void SurfaceCache::SurfaceAvailable(NotNull<ISurfaceProvider*> aProvider) {
1734 StaticMutexAutoLock lock(sInstanceMutex);
1735 if (!sInstance) {
1736 return;
1739 sInstance->SurfaceAvailable(aProvider, lock);
1742 /* static */
1743 void SurfaceCache::LockImage(const ImageKey aImageKey) {
1744 StaticMutexAutoLock lock(sInstanceMutex);
1745 if (sInstance) {
1746 return sInstance->LockImage(aImageKey);
1750 /* static */
1751 void SurfaceCache::UnlockImage(const ImageKey aImageKey) {
1752 StaticMutexAutoLock lock(sInstanceMutex);
1753 if (sInstance) {
1754 return sInstance->UnlockImage(aImageKey, lock);
1758 /* static */
1759 void SurfaceCache::UnlockEntries(const ImageKey aImageKey) {
1760 StaticMutexAutoLock lock(sInstanceMutex);
1761 if (sInstance) {
1762 return sInstance->UnlockEntries(aImageKey, lock);
1766 /* static */
1767 void SurfaceCache::RemoveImage(const ImageKey aImageKey) {
1768 RefPtr<ImageSurfaceCache> discard;
1770 StaticMutexAutoLock lock(sInstanceMutex);
1771 if (sInstance) {
1772 discard = sInstance->RemoveImage(aImageKey, lock);
1777 /* static */
1778 void SurfaceCache::PruneImage(const ImageKey aImageKey) {
1779 nsTArray<RefPtr<CachedSurface>> discard;
1781 StaticMutexAutoLock lock(sInstanceMutex);
1782 if (sInstance) {
1783 sInstance->PruneImage(aImageKey, lock);
1784 sInstance->TakeDiscard(discard, lock);
1789 /* static */
1790 bool SurfaceCache::InvalidateImage(const ImageKey aImageKey) {
1791 nsTArray<RefPtr<CachedSurface>> discard;
1792 bool rv = false;
1794 StaticMutexAutoLock lock(sInstanceMutex);
1795 if (sInstance) {
1796 rv = sInstance->InvalidateImage(aImageKey, lock);
1797 sInstance->TakeDiscard(discard, lock);
1800 return rv;
1803 /* static */
1804 void SurfaceCache::DiscardAll() {
1805 nsTArray<RefPtr<CachedSurface>> discard;
1807 StaticMutexAutoLock lock(sInstanceMutex);
1808 if (sInstance) {
1809 sInstance->DiscardAll(lock);
1810 sInstance->TakeDiscard(discard, lock);
1815 /* static */
1816 void SurfaceCache::ResetAnimation(const ImageKey aImageKey,
1817 const SurfaceKey& aSurfaceKey) {
1818 RefPtr<CachedSurface> surface;
1819 nsTArray<RefPtr<CachedSurface>> discard;
1821 StaticMutexAutoLock lock(sInstanceMutex);
1822 if (!sInstance) {
1823 return;
1826 surface =
1827 sInstance->GetSurfaceForResetAnimation(aImageKey, aSurfaceKey, lock);
1828 sInstance->TakeDiscard(discard, lock);
1831 // Calling Reset will acquire the AnimationSurfaceProvider::mFramesMutex
1832 // mutex. In other places we acquire the mFramesMutex then call into the
1833 // surface cache (acquiring the surface cache mutex), so that determines a
1834 // lock order which we must obey by calling Reset after releasing the surface
1835 // cache mutex.
1836 if (surface) {
1837 DrawableSurface drawableSurface =
1838 surface->GetDrawableSurfaceEvenIfPlaceholder();
1839 if (drawableSurface) {
1840 MOZ_ASSERT(surface->GetSurfaceKey() == aSurfaceKey,
1841 "ResetAnimation() not returning an exact match?");
1843 drawableSurface.Reset();
1848 /* static */
1849 void SurfaceCache::CollectSizeOfSurfaces(
1850 const ImageKey aImageKey, nsTArray<SurfaceMemoryCounter>& aCounters,
1851 MallocSizeOf aMallocSizeOf) {
1852 nsTArray<RefPtr<CachedSurface>> discard;
1854 StaticMutexAutoLock lock(sInstanceMutex);
1855 if (!sInstance) {
1856 return;
1859 sInstance->CollectSizeOfSurfaces(aImageKey, aCounters, aMallocSizeOf, lock);
1860 sInstance->TakeDiscard(discard, lock);
1864 /* static */
1865 size_t SurfaceCache::MaximumCapacity() {
1866 StaticMutexAutoLock lock(sInstanceMutex);
1867 if (!sInstance) {
1868 return 0;
1871 return sInstance->MaximumCapacity();
1874 /* static */
1875 bool SurfaceCache::IsLegalSize(const IntSize& aSize) {
1876 // reject over-wide or over-tall images
1877 const int32_t k64KLimit = 0x0000FFFF;
1878 if (MOZ_UNLIKELY(aSize.width > k64KLimit || aSize.height > k64KLimit)) {
1879 NS_WARNING("image too big");
1880 return false;
1883 // protect against invalid sizes
1884 if (MOZ_UNLIKELY(aSize.height <= 0 || aSize.width <= 0)) {
1885 return false;
1888 // check to make sure we don't overflow a 32-bit
1889 CheckedInt32 requiredBytes =
1890 CheckedInt32(aSize.width) * CheckedInt32(aSize.height) * 4;
1891 if (MOZ_UNLIKELY(!requiredBytes.isValid())) {
1892 NS_WARNING("width or height too large");
1893 return false;
1895 return true;
1898 IntSize SurfaceCache::ClampVectorSize(const IntSize& aSize) {
1899 // If we exceed the maximum, we need to scale the size downwards to fit.
1900 // It shouldn't get here if it is significantly larger because
1901 // VectorImage::UseSurfaceCacheForSize should prevent us from requesting
1902 // a rasterized version of a surface greater than 4x the maximum.
1903 int32_t maxSizeKB =
1904 StaticPrefs::image_cache_max_rasterized_svg_threshold_kb();
1905 if (maxSizeKB <= 0) {
1906 return aSize;
1909 int64_t proposedKB = int64_t(aSize.width) * aSize.height / 256;
1910 if (maxSizeKB >= proposedKB) {
1911 return aSize;
1914 double scale = sqrt(double(maxSizeKB) / proposedKB);
1915 return IntSize(int32_t(scale * aSize.width), int32_t(scale * aSize.height));
1918 IntSize SurfaceCache::ClampSize(ImageKey aImageKey, const IntSize& aSize) {
1919 if (aImageKey->GetType() != imgIContainer::TYPE_VECTOR) {
1920 return aSize;
1923 return ClampVectorSize(aSize);
1926 /* static */
1927 void SurfaceCache::ReleaseImageOnMainThread(
1928 already_AddRefed<image::Image> aImage, bool aAlwaysProxy) {
1929 if (NS_IsMainThread() && !aAlwaysProxy) {
1930 RefPtr<image::Image> image = std::move(aImage);
1931 return;
1934 // Don't try to dispatch the release after shutdown, we'll just leak the
1935 // runnable.
1936 if (gXPCOMThreadsShutDown) {
1937 return;
1940 StaticMutexAutoLock lock(sInstanceMutex);
1941 if (sInstance) {
1942 sInstance->ReleaseImageOnMainThread(std::move(aImage), lock);
1943 } else {
1944 NS_ReleaseOnMainThread("SurfaceCache::ReleaseImageOnMainThread",
1945 std::move(aImage), /* aAlwaysProxy */ true);
1949 /* static */
1950 void SurfaceCache::ClearReleasingImages() {
1951 MOZ_ASSERT(NS_IsMainThread());
1953 nsTArray<RefPtr<image::Image>> images;
1955 StaticMutexAutoLock lock(sInstanceMutex);
1956 if (sInstance) {
1957 sInstance->TakeReleasingImages(images, lock);
1962 } // namespace image
1963 } // namespace mozilla