Bug 1794070 - Make a pan start event wait for the browser gesture code response....
[gecko.git] / image / SurfaceCache.cpp
blob26cb5c637742c20a97e5d50bddc560b1e027a32f
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* This Source Code Form is subject to the terms of the Mozilla Public
3 * License, v. 2.0. If a copy of the MPL was not distributed with this
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 /**
7 * SurfaceCache is a service for caching temporary surfaces in imagelib.
8 */
10 #include "SurfaceCache.h"
12 #include <algorithm>
13 #include <utility>
15 #include "ISurfaceProvider.h"
16 #include "Image.h"
17 #include "LookupResult.h"
18 #include "ShutdownTracker.h"
19 #include "gfx2DGlue.h"
20 #include "gfxPlatform.h"
21 #include "imgFrame.h"
22 #include "mozilla/AppShutdown.h"
23 #include "mozilla/Assertions.h"
24 #include "mozilla/Attributes.h"
25 #include "mozilla/CheckedInt.h"
26 #include "mozilla/DebugOnly.h"
27 #include "mozilla/Likely.h"
28 #include "mozilla/RefPtr.h"
29 #include "mozilla/StaticMutex.h"
30 #include "mozilla/StaticPrefs_image.h"
31 #include "mozilla/StaticPtr.h"
32 #include "mozilla/Tuple.h"
33 #include "nsExpirationTracker.h"
34 #include "nsHashKeys.h"
35 #include "nsIMemoryReporter.h"
36 #include "nsRefPtrHashtable.h"
37 #include "nsSize.h"
38 #include "nsTArray.h"
39 #include "Orientation.h"
40 #include "prsystem.h"
42 using std::max;
43 using std::min;
45 namespace mozilla {
47 using namespace gfx;
49 namespace image {
51 MOZ_DEFINE_MALLOC_SIZE_OF(SurfaceCacheMallocSizeOf)
53 class CachedSurface;
54 class SurfaceCacheImpl;
56 ///////////////////////////////////////////////////////////////////////////////
57 // Static Data
58 ///////////////////////////////////////////////////////////////////////////////
60 // The single surface cache instance.
61 static StaticRefPtr<SurfaceCacheImpl> sInstance;
63 // The mutex protecting the surface cache.
64 static StaticMutex sInstanceMutex MOZ_UNANNOTATED;
66 ///////////////////////////////////////////////////////////////////////////////
67 // SurfaceCache Implementation
68 ///////////////////////////////////////////////////////////////////////////////
70 /**
71 * Cost models the cost of storing a surface in the cache. Right now, this is
72 * simply an estimate of the size of the surface in bytes, but in the future it
73 * may be worth taking into account the cost of rematerializing the surface as
74 * well.
76 typedef size_t Cost;
78 static Cost ComputeCost(const IntSize& aSize, uint32_t aBytesPerPixel) {
79 MOZ_ASSERT(aBytesPerPixel == 1 || aBytesPerPixel == 4);
80 return aSize.width * aSize.height * aBytesPerPixel;
83 /**
84 * Since we want to be able to make eviction decisions based on cost, we need to
85 * be able to look up the CachedSurface which has a certain cost as well as the
86 * cost associated with a certain CachedSurface. To make this possible, in data
87 * structures we actually store a CostEntry, which contains a weak pointer to
88 * its associated surface.
90 * To make usage of the weak pointer safe, SurfaceCacheImpl always calls
91 * StartTracking after a surface is stored in the cache and StopTracking before
92 * it is removed.
94 class CostEntry {
95 public:
96 CostEntry(NotNull<CachedSurface*> aSurface, Cost aCost)
97 : mSurface(aSurface), mCost(aCost) {}
99 NotNull<CachedSurface*> Surface() const { return mSurface; }
100 Cost GetCost() const { return mCost; }
102 bool operator==(const CostEntry& aOther) const {
103 return mSurface == aOther.mSurface && mCost == aOther.mCost;
106 bool operator<(const CostEntry& aOther) const {
107 return mCost < aOther.mCost ||
108 (mCost == aOther.mCost && mSurface < aOther.mSurface);
111 private:
112 NotNull<CachedSurface*> mSurface;
113 Cost mCost;
117 * A CachedSurface associates a surface with a key that uniquely identifies that
118 * surface.
120 class CachedSurface {
121 ~CachedSurface() {}
123 public:
124 MOZ_DECLARE_REFCOUNTED_TYPENAME(CachedSurface)
125 NS_INLINE_DECL_THREADSAFE_REFCOUNTING(CachedSurface)
127 explicit CachedSurface(NotNull<ISurfaceProvider*> aProvider)
128 : mProvider(aProvider), mIsLocked(false) {}
130 DrawableSurface GetDrawableSurface() const {
131 if (MOZ_UNLIKELY(IsPlaceholder())) {
132 MOZ_ASSERT_UNREACHABLE("Called GetDrawableSurface() on a placeholder");
133 return DrawableSurface();
136 return mProvider->Surface();
139 DrawableSurface GetDrawableSurfaceEvenIfPlaceholder() const {
140 return mProvider->Surface();
143 void SetLocked(bool aLocked) {
144 if (IsPlaceholder()) {
145 return; // Can't lock a placeholder.
148 // Update both our state and our provider's state. Some surface providers
149 // are permanently locked; maintaining our own locking state enables us to
150 // respect SetLocked() even when it's meaningless from the provider's
151 // perspective.
152 mIsLocked = aLocked;
153 mProvider->SetLocked(aLocked);
156 bool IsLocked() const {
157 return !IsPlaceholder() && mIsLocked && mProvider->IsLocked();
160 void SetCannotSubstitute() {
161 mProvider->Availability().SetCannotSubstitute();
163 bool CannotSubstitute() const {
164 return mProvider->Availability().CannotSubstitute();
167 bool IsPlaceholder() const {
168 return mProvider->Availability().IsPlaceholder();
170 bool IsDecoded() const { return !IsPlaceholder() && mProvider->IsFinished(); }
172 ImageKey GetImageKey() const { return mProvider->GetImageKey(); }
173 const SurfaceKey& GetSurfaceKey() const { return mProvider->GetSurfaceKey(); }
174 nsExpirationState* GetExpirationState() { return &mExpirationState; }
176 CostEntry GetCostEntry() {
177 return image::CostEntry(WrapNotNull(this), mProvider->LogicalSizeInBytes());
180 size_t ShallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
181 return aMallocSizeOf(this) + aMallocSizeOf(mProvider.get());
184 void InvalidateRecording() { mProvider->InvalidateRecording(); }
186 // A helper type used by SurfaceCacheImpl::CollectSizeOfSurfaces.
187 struct MOZ_STACK_CLASS SurfaceMemoryReport {
188 SurfaceMemoryReport(nsTArray<SurfaceMemoryCounter>& aCounters,
189 MallocSizeOf aMallocSizeOf)
190 : mCounters(aCounters), mMallocSizeOf(aMallocSizeOf) {}
192 void Add(NotNull<CachedSurface*> aCachedSurface, bool aIsFactor2) {
193 if (aCachedSurface->IsPlaceholder()) {
194 return;
197 // Record the memory used by the ISurfaceProvider. This may not have a
198 // straightforward relationship to the size of the surface that
199 // DrawableRef() returns if the surface is generated dynamically. (i.e.,
200 // for surfaces with PlaybackType::eAnimated.)
201 aCachedSurface->mProvider->AddSizeOfExcludingThis(
202 mMallocSizeOf, [&](ISurfaceProvider::AddSizeOfCbData& aMetadata) {
203 SurfaceMemoryCounter counter(aCachedSurface->GetSurfaceKey(),
204 aCachedSurface->IsLocked(),
205 aCachedSurface->CannotSubstitute(),
206 aIsFactor2, aMetadata.mFinished);
208 counter.Values().SetDecodedHeap(aMetadata.mHeapBytes);
209 counter.Values().SetDecodedNonHeap(aMetadata.mNonHeapBytes);
210 counter.Values().SetDecodedUnknown(aMetadata.mUnknownBytes);
211 counter.Values().SetExternalHandles(aMetadata.mExternalHandles);
212 counter.Values().SetFrameIndex(aMetadata.mIndex);
213 counter.Values().SetExternalId(aMetadata.mExternalId);
214 counter.Values().SetSurfaceTypes(aMetadata.mTypes);
216 mCounters.AppendElement(counter);
220 private:
221 nsTArray<SurfaceMemoryCounter>& mCounters;
222 MallocSizeOf mMallocSizeOf;
225 private:
226 nsExpirationState mExpirationState;
227 NotNull<RefPtr<ISurfaceProvider>> mProvider;
228 bool mIsLocked;
231 static int64_t AreaOfIntSize(const IntSize& aSize) {
232 return static_cast<int64_t>(aSize.width) * static_cast<int64_t>(aSize.height);
236 * An ImageSurfaceCache is a per-image surface cache. For correctness we must be
237 * able to remove all surfaces associated with an image when the image is
238 * destroyed or invalidated. Since this will happen frequently, it makes sense
239 * to make it cheap by storing the surfaces for each image separately.
241 * ImageSurfaceCache also keeps track of whether its associated image is locked
242 * or unlocked.
244 * The cache may also enter "factor of 2" mode which occurs when the number of
245 * surfaces in the cache exceeds the "image.cache.factor2.threshold-surfaces"
246 * pref plus the number of native sizes of the image. When in "factor of 2"
247 * mode, the cache will strongly favour sizes which are a factor of 2 of the
248 * largest native size. It accomplishes this by suggesting a factor of 2 size
249 * when lookups fail and substituting the nearest factor of 2 surface to the
250 * ideal size as the "best" available (as opposed to substitution but not
251 * found). This allows us to minimize memory consumption and CPU time spent
252 * decoding when a website requires many variants of the same surface.
254 class ImageSurfaceCache {
255 ~ImageSurfaceCache() {}
257 public:
258 explicit ImageSurfaceCache(const ImageKey aImageKey)
259 : mLocked(false),
260 mFactor2Mode(false),
261 mFactor2Pruned(false),
262 mIsVectorImage(aImageKey->GetType() == imgIContainer::TYPE_VECTOR) {}
264 MOZ_DECLARE_REFCOUNTED_TYPENAME(ImageSurfaceCache)
265 NS_INLINE_DECL_THREADSAFE_REFCOUNTING(ImageSurfaceCache)
267 typedef nsRefPtrHashtable<nsGenericHashKey<SurfaceKey>, CachedSurface>
268 SurfaceTable;
270 auto Values() const { return mSurfaces.Values(); }
271 uint32_t Count() const { return mSurfaces.Count(); }
272 bool IsEmpty() const { return mSurfaces.Count() == 0; }
274 size_t ShallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
275 size_t bytes = aMallocSizeOf(this) +
276 mSurfaces.ShallowSizeOfExcludingThis(aMallocSizeOf);
277 for (const auto& value : Values()) {
278 bytes += value->ShallowSizeOfIncludingThis(aMallocSizeOf);
280 return bytes;
283 [[nodiscard]] bool Insert(NotNull<CachedSurface*> aSurface) {
284 MOZ_ASSERT(!mLocked || aSurface->IsPlaceholder() || aSurface->IsLocked(),
285 "Inserting an unlocked surface for a locked image");
286 const auto& surfaceKey = aSurface->GetSurfaceKey();
287 if (surfaceKey.Region()) {
288 // We don't allow substitutes for surfaces with regions, so we don't want
289 // to allow factor of 2 mode pruning to release these surfaces.
290 aSurface->SetCannotSubstitute();
292 return mSurfaces.InsertOrUpdate(surfaceKey, RefPtr<CachedSurface>{aSurface},
293 fallible);
296 already_AddRefed<CachedSurface> Remove(NotNull<CachedSurface*> aSurface) {
297 MOZ_ASSERT(mSurfaces.GetWeak(aSurface->GetSurfaceKey()),
298 "Should not be removing a surface we don't have");
300 RefPtr<CachedSurface> surface;
301 mSurfaces.Remove(aSurface->GetSurfaceKey(), getter_AddRefs(surface));
302 AfterMaybeRemove();
303 return surface.forget();
306 already_AddRefed<CachedSurface> Lookup(const SurfaceKey& aSurfaceKey,
307 bool aForAccess) {
308 RefPtr<CachedSurface> surface;
309 mSurfaces.Get(aSurfaceKey, getter_AddRefs(surface));
311 if (aForAccess) {
312 if (surface) {
313 // We don't want to allow factor of 2 mode pruning to release surfaces
314 // for which the callers will accept no substitute.
315 surface->SetCannotSubstitute();
316 } else if (!mFactor2Mode) {
317 // If no exact match is found, and this is for use rather than internal
318 // accounting (i.e. insert and removal), we know this will trigger a
319 // decode. Make sure we switch now to factor of 2 mode if necessary.
320 MaybeSetFactor2Mode();
324 return surface.forget();
328 * @returns A tuple containing the best matching CachedSurface if available,
329 * a MatchType describing how the CachedSurface was selected, and
330 * an IntSize which is the size the caller should choose to decode
331 * at should it attempt to do so.
333 Tuple<already_AddRefed<CachedSurface>, MatchType, IntSize> LookupBestMatch(
334 const SurfaceKey& aIdealKey) {
335 // Try for an exact match first.
336 RefPtr<CachedSurface> exactMatch;
337 mSurfaces.Get(aIdealKey, getter_AddRefs(exactMatch));
338 if (exactMatch) {
339 if (exactMatch->IsDecoded()) {
340 return MakeTuple(exactMatch.forget(), MatchType::EXACT, IntSize());
342 } else if (aIdealKey.Region()) {
343 // We cannot substitute if we have a region. Allow it to create an exact
344 // match.
345 return MakeTuple(exactMatch.forget(), MatchType::NOT_FOUND, IntSize());
346 } else if (!mFactor2Mode) {
347 // If no exact match is found, and we are not in factor of 2 mode, then
348 // we know that we will trigger a decode because at best we will provide
349 // a substitute. Make sure we switch now to factor of 2 mode if necessary.
350 MaybeSetFactor2Mode();
353 // Try for a best match second, if using compact.
354 IntSize suggestedSize = SuggestedSize(aIdealKey.Size());
355 if (suggestedSize != aIdealKey.Size()) {
356 if (!exactMatch) {
357 SurfaceKey compactKey = aIdealKey.CloneWithSize(suggestedSize);
358 mSurfaces.Get(compactKey, getter_AddRefs(exactMatch));
359 if (exactMatch && exactMatch->IsDecoded()) {
360 MOZ_ASSERT(suggestedSize != aIdealKey.Size());
361 return MakeTuple(exactMatch.forget(),
362 MatchType::SUBSTITUTE_BECAUSE_BEST, suggestedSize);
367 // There's no perfect match, so find the best match we can.
368 RefPtr<CachedSurface> bestMatch;
369 for (const auto& value : Values()) {
370 NotNull<CachedSurface*> current = WrapNotNull(value);
371 const SurfaceKey& currentKey = current->GetSurfaceKey();
373 // We never match a placeholder or a surface with a region.
374 if (current->IsPlaceholder() || currentKey.Region()) {
375 continue;
377 // Matching the playback type and SVG context is required.
378 if (currentKey.Playback() != aIdealKey.Playback() ||
379 currentKey.SVGContext() != aIdealKey.SVGContext()) {
380 continue;
382 // Matching the flags is required.
383 if (currentKey.Flags() != aIdealKey.Flags()) {
384 continue;
386 // Anything is better than nothing! (Within the constraints we just
387 // checked, of course.)
388 if (!bestMatch) {
389 bestMatch = current;
390 continue;
393 MOZ_ASSERT(bestMatch, "Should have a current best match");
395 // Always prefer completely decoded surfaces.
396 bool bestMatchIsDecoded = bestMatch->IsDecoded();
397 if (bestMatchIsDecoded && !current->IsDecoded()) {
398 continue;
400 if (!bestMatchIsDecoded && current->IsDecoded()) {
401 bestMatch = current;
402 continue;
405 SurfaceKey bestMatchKey = bestMatch->GetSurfaceKey();
406 if (CompareArea(aIdealKey.Size(), bestMatchKey.Size(),
407 currentKey.Size())) {
408 bestMatch = current;
412 MatchType matchType;
413 if (bestMatch) {
414 if (!exactMatch) {
415 // No exact match, neither ideal nor factor of 2.
416 MOZ_ASSERT(suggestedSize != bestMatch->GetSurfaceKey().Size(),
417 "No exact match despite the fact the sizes match!");
418 matchType = MatchType::SUBSTITUTE_BECAUSE_NOT_FOUND;
419 } else if (exactMatch != bestMatch) {
420 // The exact match is still decoding, but we found a substitute.
421 matchType = MatchType::SUBSTITUTE_BECAUSE_PENDING;
422 } else if (aIdealKey.Size() != bestMatch->GetSurfaceKey().Size()) {
423 // The best factor of 2 match is still decoding, but the best we've got.
424 MOZ_ASSERT(suggestedSize != aIdealKey.Size());
425 MOZ_ASSERT(mFactor2Mode || mIsVectorImage);
426 matchType = MatchType::SUBSTITUTE_BECAUSE_BEST;
427 } else {
428 // The exact match is still decoding, but it's the best we've got.
429 matchType = MatchType::EXACT;
431 } else {
432 if (exactMatch) {
433 // We found an "exact match"; it must have been a placeholder.
434 MOZ_ASSERT(exactMatch->IsPlaceholder());
435 matchType = MatchType::PENDING;
436 } else {
437 // We couldn't find an exact match *or* a substitute.
438 matchType = MatchType::NOT_FOUND;
442 return MakeTuple(bestMatch.forget(), matchType, suggestedSize);
445 void MaybeSetFactor2Mode() {
446 MOZ_ASSERT(!mFactor2Mode);
448 // Typically an image cache will not have too many size-varying surfaces, so
449 // if we exceed the given threshold, we should consider using a subset.
450 int32_t thresholdSurfaces =
451 StaticPrefs::image_cache_factor2_threshold_surfaces();
452 if (thresholdSurfaces < 0 ||
453 mSurfaces.Count() <= static_cast<uint32_t>(thresholdSurfaces)) {
454 return;
457 // Determine how many native surfaces this image has. If it is zero, and it
458 // is a vector image, then we should impute a single native size. Otherwise,
459 // it may be zero because we don't know yet, or the image has an error, or
460 // it isn't supported.
461 NotNull<CachedSurface*> current =
462 WrapNotNull(mSurfaces.ConstIter().UserData());
463 Image* image = static_cast<Image*>(current->GetImageKey());
464 size_t nativeSizes = image->GetNativeSizesLength();
465 if (mIsVectorImage) {
466 MOZ_ASSERT(nativeSizes == 0);
467 nativeSizes = 1;
468 } else if (nativeSizes == 0) {
469 return;
472 // Increase the threshold by the number of native sizes. This ensures that
473 // we do not prevent decoding of the image at all its native sizes. It does
474 // not guarantee we will provide a surface at that size however (i.e. many
475 // other sized surfaces are requested, in addition to the native sizes).
476 thresholdSurfaces += nativeSizes;
477 if (mSurfaces.Count() <= static_cast<uint32_t>(thresholdSurfaces)) {
478 return;
481 // We have a valid size, we can change modes.
482 mFactor2Mode = true;
485 template <typename Function>
486 void Prune(Function&& aRemoveCallback) {
487 if (!mFactor2Mode || mFactor2Pruned) {
488 return;
491 // Attempt to discard any surfaces which are not factor of 2 and the best
492 // factor of 2 match exists.
493 bool hasNotFactorSize = false;
494 for (auto iter = mSurfaces.Iter(); !iter.Done(); iter.Next()) {
495 NotNull<CachedSurface*> current = WrapNotNull(iter.UserData());
496 const SurfaceKey& currentKey = current->GetSurfaceKey();
497 const IntSize& currentSize = currentKey.Size();
499 // First we check if someone requested this size and would not accept
500 // an alternatively sized surface.
501 if (current->CannotSubstitute()) {
502 continue;
505 // Next we find the best factor of 2 size for this surface. If this
506 // surface is a factor of 2 size, then we want to keep it.
507 IntSize bestSize = SuggestedSize(currentSize);
508 if (bestSize == currentSize) {
509 continue;
512 // Check the cache for a surface with the same parameters except for the
513 // size which uses the closest factor of 2 size.
514 SurfaceKey compactKey = currentKey.CloneWithSize(bestSize);
515 RefPtr<CachedSurface> compactMatch;
516 mSurfaces.Get(compactKey, getter_AddRefs(compactMatch));
517 if (compactMatch && compactMatch->IsDecoded()) {
518 aRemoveCallback(current);
519 iter.Remove();
520 } else {
521 hasNotFactorSize = true;
525 // We have no surfaces that are not factor of 2 sized, so we can stop
526 // pruning henceforth, because we avoid the insertion of new surfaces that
527 // don't match our sizing set (unless the caller won't accept a
528 // substitution.)
529 if (!hasNotFactorSize) {
530 mFactor2Pruned = true;
533 // We should never leave factor of 2 mode due to pruning in of itself, but
534 // if we discarded surfaces due to the volatile buffers getting released,
535 // it is possible.
536 AfterMaybeRemove();
539 template <typename Function>
540 bool Invalidate(Function&& aRemoveCallback) {
541 // Remove all non-blob recordings from the cache. Invalidate any blob
542 // recordings.
543 bool foundRecording = false;
544 for (auto iter = mSurfaces.Iter(); !iter.Done(); iter.Next()) {
545 NotNull<CachedSurface*> current = WrapNotNull(iter.UserData());
547 if (current->GetSurfaceKey().Flags() & SurfaceFlags::RECORD_BLOB) {
548 foundRecording = true;
549 current->InvalidateRecording();
550 continue;
553 aRemoveCallback(current);
554 iter.Remove();
557 AfterMaybeRemove();
558 return foundRecording;
561 IntSize SuggestedSize(const IntSize& aSize) const {
562 IntSize suggestedSize = SuggestedSizeInternal(aSize);
563 if (mIsVectorImage) {
564 suggestedSize = SurfaceCache::ClampVectorSize(suggestedSize);
566 return suggestedSize;
569 IntSize SuggestedSizeInternal(const IntSize& aSize) const {
570 // When not in factor of 2 mode, we can always decode at the given size.
571 if (!mFactor2Mode) {
572 return aSize;
575 // We cannot enter factor of 2 mode unless we have a minimum number of
576 // surfaces, and we should have left it if the cache was emptied.
577 if (MOZ_UNLIKELY(IsEmpty())) {
578 MOZ_ASSERT_UNREACHABLE("Should not be empty and in factor of 2 mode!");
579 return aSize;
582 // This bit of awkwardness gets the largest native size of the image.
583 NotNull<CachedSurface*> firstSurface =
584 WrapNotNull(mSurfaces.ConstIter().UserData());
585 Image* image = static_cast<Image*>(firstSurface->GetImageKey());
586 IntSize factorSize;
587 if (NS_FAILED(image->GetWidth(&factorSize.width)) ||
588 NS_FAILED(image->GetHeight(&factorSize.height)) ||
589 factorSize.IsEmpty()) {
590 // Valid vector images may have a default size of 0x0. In that case, just
591 // assume a default size of 100x100 and apply the intrinsic ratio if
592 // available. If our guess was too small, don't use factor-of-scaling.
593 MOZ_ASSERT(mIsVectorImage);
594 factorSize = IntSize(100, 100);
595 Maybe<AspectRatio> aspectRatio = image->GetIntrinsicRatio();
596 if (aspectRatio && *aspectRatio) {
597 factorSize.width =
598 NSToIntRound(aspectRatio->ApplyToFloat(float(factorSize.height)));
599 if (factorSize.IsEmpty()) {
600 return aSize;
605 if (mIsVectorImage) {
606 // Ensure the aspect ratio matches the native size before forcing the
607 // caller to accept a factor of 2 size. The difference between the aspect
608 // ratios is:
610 // delta = nativeWidth/nativeHeight - desiredWidth/desiredHeight
612 // delta*nativeHeight*desiredHeight = nativeWidth*desiredHeight
613 // - desiredWidth*nativeHeight
615 // Using the maximum accepted delta as a constant, we can avoid the
616 // floating point division and just compare after some integer ops.
617 int32_t delta =
618 factorSize.width * aSize.height - aSize.width * factorSize.height;
619 int32_t maxDelta = (factorSize.height * aSize.height) >> 4;
620 if (delta > maxDelta || delta < -maxDelta) {
621 return aSize;
624 // If the requested size is bigger than the native size, we actually need
625 // to grow the native size instead of shrinking it.
626 if (factorSize.width < aSize.width) {
627 do {
628 IntSize candidate(factorSize.width * 2, factorSize.height * 2);
629 if (!SurfaceCache::IsLegalSize(candidate)) {
630 break;
633 factorSize = candidate;
634 } while (factorSize.width < aSize.width);
636 return factorSize;
639 // Otherwise we can find the best fit as normal.
642 // Start with the native size as the best first guess.
643 IntSize bestSize = factorSize;
644 factorSize.width /= 2;
645 factorSize.height /= 2;
647 while (!factorSize.IsEmpty()) {
648 if (!CompareArea(aSize, bestSize, factorSize)) {
649 // This size is not better than the last. Since we proceed from largest
650 // to smallest, we know that the next size will not be better if the
651 // previous size was rejected. Break early.
652 break;
655 // The current factor of 2 size is better than the last selected size.
656 bestSize = factorSize;
657 factorSize.width /= 2;
658 factorSize.height /= 2;
661 return bestSize;
664 bool CompareArea(const IntSize& aIdealSize, const IntSize& aBestSize,
665 const IntSize& aSize) const {
666 // Compare sizes. We use an area-based heuristic here instead of computing a
667 // truly optimal answer, since it seems very unlikely to make a difference
668 // for realistic sizes.
669 int64_t idealArea = AreaOfIntSize(aIdealSize);
670 int64_t currentArea = AreaOfIntSize(aSize);
671 int64_t bestMatchArea = AreaOfIntSize(aBestSize);
673 // If the best match is smaller than the ideal size, prefer bigger sizes.
674 if (bestMatchArea < idealArea) {
675 if (currentArea > bestMatchArea) {
676 return true;
678 return false;
681 // Other, prefer sizes closer to the ideal size, but still not smaller.
682 if (idealArea <= currentArea && currentArea < bestMatchArea) {
683 return true;
686 // This surface isn't an improvement over the current best match.
687 return false;
690 template <typename Function>
691 void CollectSizeOfSurfaces(nsTArray<SurfaceMemoryCounter>& aCounters,
692 MallocSizeOf aMallocSizeOf,
693 Function&& aRemoveCallback) {
694 CachedSurface::SurfaceMemoryReport report(aCounters, aMallocSizeOf);
695 for (auto iter = mSurfaces.Iter(); !iter.Done(); iter.Next()) {
696 NotNull<CachedSurface*> surface = WrapNotNull(iter.UserData());
698 // We don't need the drawable surface for ourselves, but adding a surface
699 // to the report will trigger this indirectly. If the surface was
700 // discarded by the OS because it was in volatile memory, we should remove
701 // it from the cache immediately rather than include it in the report.
702 DrawableSurface drawableSurface;
703 if (!surface->IsPlaceholder()) {
704 drawableSurface = surface->GetDrawableSurface();
705 if (!drawableSurface) {
706 aRemoveCallback(surface);
707 iter.Remove();
708 continue;
712 const IntSize& size = surface->GetSurfaceKey().Size();
713 bool factor2Size = false;
714 if (mFactor2Mode) {
715 factor2Size = (size == SuggestedSize(size));
717 report.Add(surface, factor2Size);
720 AfterMaybeRemove();
723 void SetLocked(bool aLocked) { mLocked = aLocked; }
724 bool IsLocked() const { return mLocked; }
726 private:
727 void AfterMaybeRemove() {
728 if (IsEmpty() && mFactor2Mode) {
729 // The last surface for this cache was removed. This can happen if the
730 // surface was stored in a volatile buffer and got purged, or the surface
731 // expired from the cache. If the cache itself lingers for some reason
732 // (e.g. in the process of performing a lookup, the cache itself is
733 // locked), then we need to reset the factor of 2 state because it
734 // requires at least one surface present to get the native size
735 // information from the image.
736 mFactor2Mode = mFactor2Pruned = false;
740 SurfaceTable mSurfaces;
742 bool mLocked;
744 // True in "factor of 2" mode.
745 bool mFactor2Mode;
747 // True if all non-factor of 2 surfaces have been removed from the cache. Note
748 // that this excludes unsubstitutable sizes.
749 bool mFactor2Pruned;
751 // True if the surfaces are produced from a vector image. If so, it must match
752 // the aspect ratio when using factor of 2 mode.
753 bool mIsVectorImage;
757 * SurfaceCacheImpl is responsible for determining which surfaces will be cached
758 * and managing the surface cache data structures. Rather than interact with
759 * SurfaceCacheImpl directly, client code interacts with SurfaceCache, which
760 * maintains high-level invariants and encapsulates the details of the surface
761 * cache's implementation.
763 class SurfaceCacheImpl final : public nsIMemoryReporter {
764 public:
765 NS_DECL_ISUPPORTS
767 SurfaceCacheImpl(uint32_t aSurfaceCacheExpirationTimeMS,
768 uint32_t aSurfaceCacheDiscardFactor,
769 uint32_t aSurfaceCacheSize)
770 : mExpirationTracker(aSurfaceCacheExpirationTimeMS),
771 mMemoryPressureObserver(new MemoryPressureObserver),
772 mDiscardFactor(aSurfaceCacheDiscardFactor),
773 mMaxCost(aSurfaceCacheSize),
774 mAvailableCost(aSurfaceCacheSize),
775 mLockedCost(0),
776 mOverflowCount(0),
777 mAlreadyPresentCount(0),
778 mTableFailureCount(0),
779 mTrackingFailureCount(0) {
780 nsCOMPtr<nsIObserverService> os = services::GetObserverService();
781 if (os) {
782 os->AddObserver(mMemoryPressureObserver, "memory-pressure", false);
786 private:
787 virtual ~SurfaceCacheImpl() {
788 nsCOMPtr<nsIObserverService> os = services::GetObserverService();
789 if (os) {
790 os->RemoveObserver(mMemoryPressureObserver, "memory-pressure");
793 UnregisterWeakMemoryReporter(this);
796 public:
797 void InitMemoryReporter() { RegisterWeakMemoryReporter(this); }
799 InsertOutcome Insert(NotNull<ISurfaceProvider*> aProvider, bool aSetAvailable,
800 const StaticMutexAutoLock& aAutoLock) {
801 // If this is a duplicate surface, refuse to replace the original.
802 // XXX(seth): Calling Lookup() and then RemoveEntry() does the lookup
803 // twice. We'll make this more efficient in bug 1185137.
804 LookupResult result =
805 Lookup(aProvider->GetImageKey(), aProvider->GetSurfaceKey(), aAutoLock,
806 /* aMarkUsed = */ false);
807 if (MOZ_UNLIKELY(result)) {
808 mAlreadyPresentCount++;
809 return InsertOutcome::FAILURE_ALREADY_PRESENT;
812 if (result.Type() == MatchType::PENDING) {
813 RemoveEntry(aProvider->GetImageKey(), aProvider->GetSurfaceKey(),
814 aAutoLock);
817 MOZ_ASSERT(result.Type() == MatchType::NOT_FOUND ||
818 result.Type() == MatchType::PENDING,
819 "A LookupResult with no surface should be NOT_FOUND or PENDING");
821 // If this is bigger than we can hold after discarding everything we can,
822 // refuse to cache it.
823 Cost cost = aProvider->LogicalSizeInBytes();
824 if (MOZ_UNLIKELY(!CanHoldAfterDiscarding(cost))) {
825 mOverflowCount++;
826 return InsertOutcome::FAILURE;
829 // Remove elements in order of cost until we can fit this in the cache. Note
830 // that locked surfaces aren't in mCosts, so we never remove them here.
831 while (cost > mAvailableCost) {
832 MOZ_ASSERT(!mCosts.IsEmpty(),
833 "Removed everything and it still won't fit");
834 Remove(mCosts.LastElement().Surface(), /* aStopTracking */ true,
835 aAutoLock);
838 // Locate the appropriate per-image cache. If there's not an existing cache
839 // for this image, create it.
840 const ImageKey imageKey = aProvider->GetImageKey();
841 RefPtr<ImageSurfaceCache> cache = GetImageCache(imageKey);
842 if (!cache) {
843 cache = new ImageSurfaceCache(imageKey);
844 if (!mImageCaches.InsertOrUpdate(aProvider->GetImageKey(), RefPtr{cache},
845 fallible)) {
846 mTableFailureCount++;
847 return InsertOutcome::FAILURE;
851 // If we were asked to mark the cache entry available, do so.
852 if (aSetAvailable) {
853 aProvider->Availability().SetAvailable();
856 auto surface = MakeNotNull<RefPtr<CachedSurface>>(aProvider);
858 // We require that locking succeed if the image is locked and we're not
859 // inserting a placeholder; the caller may need to know this to handle
860 // errors correctly.
861 bool mustLock = cache->IsLocked() && !surface->IsPlaceholder();
862 if (mustLock) {
863 surface->SetLocked(true);
864 if (!surface->IsLocked()) {
865 return InsertOutcome::FAILURE;
869 // Insert.
870 MOZ_ASSERT(cost <= mAvailableCost, "Inserting despite too large a cost");
871 if (!cache->Insert(surface)) {
872 mTableFailureCount++;
873 if (mustLock) {
874 surface->SetLocked(false);
876 return InsertOutcome::FAILURE;
879 if (MOZ_UNLIKELY(!StartTracking(surface, aAutoLock))) {
880 MOZ_ASSERT(!mustLock);
881 Remove(surface, /* aStopTracking */ false, aAutoLock);
882 return InsertOutcome::FAILURE;
885 return InsertOutcome::SUCCESS;
888 void Remove(NotNull<CachedSurface*> aSurface, bool aStopTracking,
889 const StaticMutexAutoLock& aAutoLock) {
890 ImageKey imageKey = aSurface->GetImageKey();
892 RefPtr<ImageSurfaceCache> cache = GetImageCache(imageKey);
893 MOZ_ASSERT(cache, "Shouldn't try to remove a surface with no image cache");
895 // If the surface was not a placeholder, tell its image that we discarded
896 // it.
897 if (!aSurface->IsPlaceholder()) {
898 static_cast<Image*>(imageKey)->OnSurfaceDiscarded(
899 aSurface->GetSurfaceKey());
902 // If we failed during StartTracking, we can skip this step.
903 if (aStopTracking) {
904 StopTracking(aSurface, /* aIsTracked */ true, aAutoLock);
907 // Individual surfaces must be freed outside the lock.
908 mCachedSurfacesDiscard.AppendElement(cache->Remove(aSurface));
910 MaybeRemoveEmptyCache(imageKey, cache);
913 bool StartTracking(NotNull<CachedSurface*> aSurface,
914 const StaticMutexAutoLock& aAutoLock) {
915 CostEntry costEntry = aSurface->GetCostEntry();
916 MOZ_ASSERT(costEntry.GetCost() <= mAvailableCost,
917 "Cost too large and the caller didn't catch it");
919 if (aSurface->IsLocked()) {
920 mLockedCost += costEntry.GetCost();
921 MOZ_ASSERT(mLockedCost <= mMaxCost, "Locked more than we can hold?");
922 } else {
923 if (NS_WARN_IF(!mCosts.InsertElementSorted(costEntry, fallible))) {
924 mTrackingFailureCount++;
925 return false;
928 // This may fail during XPCOM shutdown, so we need to ensure the object is
929 // tracked before calling RemoveObject in StopTracking.
930 nsresult rv = mExpirationTracker.AddObjectLocked(aSurface, aAutoLock);
931 if (NS_WARN_IF(NS_FAILED(rv))) {
932 DebugOnly<bool> foundInCosts = mCosts.RemoveElementSorted(costEntry);
933 MOZ_ASSERT(foundInCosts, "Lost track of costs for this surface");
934 mTrackingFailureCount++;
935 return false;
939 mAvailableCost -= costEntry.GetCost();
940 return true;
943 void StopTracking(NotNull<CachedSurface*> aSurface, bool aIsTracked,
944 const StaticMutexAutoLock& aAutoLock) {
945 CostEntry costEntry = aSurface->GetCostEntry();
947 if (aSurface->IsLocked()) {
948 MOZ_ASSERT(mLockedCost >= costEntry.GetCost(), "Costs don't balance");
949 mLockedCost -= costEntry.GetCost();
950 // XXX(seth): It'd be nice to use an O(log n) lookup here. This is O(n).
951 MOZ_ASSERT(!mCosts.Contains(costEntry),
952 "Shouldn't have a cost entry for a locked surface");
953 } else {
954 if (MOZ_LIKELY(aSurface->GetExpirationState()->IsTracked())) {
955 MOZ_ASSERT(aIsTracked, "Expiration-tracking a surface unexpectedly!");
956 mExpirationTracker.RemoveObjectLocked(aSurface, aAutoLock);
957 } else {
958 // Our call to AddObject must have failed in StartTracking; most likely
959 // we're in XPCOM shutdown right now.
960 MOZ_ASSERT(!aIsTracked, "Not expiration-tracking an unlocked surface!");
963 DebugOnly<bool> foundInCosts = mCosts.RemoveElementSorted(costEntry);
964 MOZ_ASSERT(foundInCosts, "Lost track of costs for this surface");
967 mAvailableCost += costEntry.GetCost();
968 MOZ_ASSERT(mAvailableCost <= mMaxCost,
969 "More available cost than we started with");
972 LookupResult Lookup(const ImageKey aImageKey, const SurfaceKey& aSurfaceKey,
973 const StaticMutexAutoLock& aAutoLock, bool aMarkUsed) {
974 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
975 if (!cache) {
976 // No cached surfaces for this image.
977 return LookupResult(MatchType::NOT_FOUND);
980 RefPtr<CachedSurface> surface = cache->Lookup(aSurfaceKey, aMarkUsed);
981 if (!surface) {
982 // Lookup in the per-image cache missed.
983 return LookupResult(MatchType::NOT_FOUND);
986 if (surface->IsPlaceholder()) {
987 return LookupResult(MatchType::PENDING);
990 DrawableSurface drawableSurface = surface->GetDrawableSurface();
991 if (!drawableSurface) {
992 // The surface was released by the operating system. Remove the cache
993 // entry as well.
994 Remove(WrapNotNull(surface), /* aStopTracking */ true, aAutoLock);
995 return LookupResult(MatchType::NOT_FOUND);
998 if (aMarkUsed &&
999 !MarkUsed(WrapNotNull(surface), WrapNotNull(cache), aAutoLock)) {
1000 Remove(WrapNotNull(surface), /* aStopTracking */ false, aAutoLock);
1001 return LookupResult(MatchType::NOT_FOUND);
1004 MOZ_ASSERT(surface->GetSurfaceKey() == aSurfaceKey,
1005 "Lookup() not returning an exact match?");
1006 return LookupResult(std::move(drawableSurface), MatchType::EXACT);
1009 LookupResult LookupBestMatch(const ImageKey aImageKey,
1010 const SurfaceKey& aSurfaceKey,
1011 const StaticMutexAutoLock& aAutoLock,
1012 bool aMarkUsed) {
1013 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1014 if (!cache) {
1015 // No cached surfaces for this image.
1016 return LookupResult(
1017 MatchType::NOT_FOUND,
1018 SurfaceCache::ClampSize(aImageKey, aSurfaceKey.Size()));
1021 // Repeatedly look up the best match, trying again if the resulting surface
1022 // has been freed by the operating system, until we can either lock a
1023 // surface for drawing or there are no matching surfaces left.
1024 // XXX(seth): This is O(N^2), but N is expected to be very small. If we
1025 // encounter a performance problem here we can revisit this.
1027 RefPtr<CachedSurface> surface;
1028 DrawableSurface drawableSurface;
1029 MatchType matchType = MatchType::NOT_FOUND;
1030 IntSize suggestedSize;
1031 while (true) {
1032 Tie(surface, matchType, suggestedSize) =
1033 cache->LookupBestMatch(aSurfaceKey);
1035 if (!surface) {
1036 return LookupResult(
1037 matchType, suggestedSize); // Lookup in the per-image cache missed.
1040 drawableSurface = surface->GetDrawableSurface();
1041 if (drawableSurface) {
1042 break;
1045 // The surface was released by the operating system. Remove the cache
1046 // entry as well.
1047 Remove(WrapNotNull(surface), /* aStopTracking */ true, aAutoLock);
1050 MOZ_ASSERT_IF(matchType == MatchType::EXACT,
1051 surface->GetSurfaceKey() == aSurfaceKey);
1052 MOZ_ASSERT_IF(
1053 matchType == MatchType::SUBSTITUTE_BECAUSE_NOT_FOUND ||
1054 matchType == MatchType::SUBSTITUTE_BECAUSE_PENDING,
1055 surface->GetSurfaceKey().Region() == aSurfaceKey.Region() &&
1056 surface->GetSurfaceKey().SVGContext() == aSurfaceKey.SVGContext() &&
1057 surface->GetSurfaceKey().Playback() == aSurfaceKey.Playback() &&
1058 surface->GetSurfaceKey().Flags() == aSurfaceKey.Flags());
1060 if (matchType == MatchType::EXACT ||
1061 matchType == MatchType::SUBSTITUTE_BECAUSE_BEST) {
1062 if (aMarkUsed &&
1063 !MarkUsed(WrapNotNull(surface), WrapNotNull(cache), aAutoLock)) {
1064 Remove(WrapNotNull(surface), /* aStopTracking */ false, aAutoLock);
1068 return LookupResult(std::move(drawableSurface), matchType, suggestedSize);
1071 bool CanHold(const Cost aCost) const { return aCost <= mMaxCost; }
1073 size_t MaximumCapacity() const { return size_t(mMaxCost); }
1075 void SurfaceAvailable(NotNull<ISurfaceProvider*> aProvider,
1076 const StaticMutexAutoLock& aAutoLock) {
1077 if (!aProvider->Availability().IsPlaceholder()) {
1078 MOZ_ASSERT_UNREACHABLE("Calling SurfaceAvailable on non-placeholder");
1079 return;
1082 // Reinsert the provider, requesting that Insert() mark it available. This
1083 // may or may not succeed, depending on whether some other decoder has
1084 // beaten us to the punch and inserted a non-placeholder version of this
1085 // surface first, but it's fine either way.
1086 // XXX(seth): This could be implemented more efficiently; we should be able
1087 // to just update our data structures without reinserting.
1088 Insert(aProvider, /* aSetAvailable = */ true, aAutoLock);
1091 void LockImage(const ImageKey aImageKey) {
1092 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1093 if (!cache) {
1094 cache = new ImageSurfaceCache(aImageKey);
1095 mImageCaches.InsertOrUpdate(aImageKey, RefPtr{cache});
1098 cache->SetLocked(true);
1100 // We don't relock this image's existing surfaces right away; instead, the
1101 // image should arrange for Lookup() to touch them if they are still useful.
1104 void UnlockImage(const ImageKey aImageKey,
1105 const StaticMutexAutoLock& aAutoLock) {
1106 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1107 if (!cache || !cache->IsLocked()) {
1108 return; // Already unlocked.
1111 cache->SetLocked(false);
1112 DoUnlockSurfaces(WrapNotNull(cache), /* aStaticOnly = */ false, aAutoLock);
1115 void UnlockEntries(const ImageKey aImageKey,
1116 const StaticMutexAutoLock& aAutoLock) {
1117 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1118 if (!cache || !cache->IsLocked()) {
1119 return; // Already unlocked.
1122 // (Note that we *don't* unlock the per-image cache here; that's the
1123 // difference between this and UnlockImage.)
1124 DoUnlockSurfaces(WrapNotNull(cache),
1125 /* aStaticOnly = */
1126 !StaticPrefs::image_mem_animated_discardable_AtStartup(),
1127 aAutoLock);
1130 already_AddRefed<ImageSurfaceCache> RemoveImage(
1131 const ImageKey aImageKey, const StaticMutexAutoLock& aAutoLock) {
1132 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1133 if (!cache) {
1134 return nullptr; // No cached surfaces for this image, so nothing to do.
1137 // Discard all of the cached surfaces for this image.
1138 // XXX(seth): This is O(n^2) since for each item in the cache we are
1139 // removing an element from the costs array. Since n is expected to be
1140 // small, performance should be good, but if usage patterns change we should
1141 // change the data structure used for mCosts.
1142 for (const auto& value : cache->Values()) {
1143 StopTracking(WrapNotNull(value),
1144 /* aIsTracked */ true, aAutoLock);
1147 // The per-image cache isn't needed anymore, so remove it as well.
1148 // This implicitly unlocks the image if it was locked.
1149 mImageCaches.Remove(aImageKey);
1151 // Since we did not actually remove any of the surfaces from the cache
1152 // itself, only stopped tracking them, we should free it outside the lock.
1153 return cache.forget();
1156 void PruneImage(const ImageKey aImageKey,
1157 const StaticMutexAutoLock& aAutoLock) {
1158 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1159 if (!cache) {
1160 return; // No cached surfaces for this image, so nothing to do.
1163 cache->Prune([this, &aAutoLock](NotNull<CachedSurface*> aSurface) -> void {
1164 StopTracking(aSurface, /* aIsTracked */ true, aAutoLock);
1165 // Individual surfaces must be freed outside the lock.
1166 mCachedSurfacesDiscard.AppendElement(aSurface);
1169 MaybeRemoveEmptyCache(aImageKey, cache);
1172 bool InvalidateImage(const ImageKey aImageKey,
1173 const StaticMutexAutoLock& aAutoLock) {
1174 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1175 if (!cache) {
1176 return false; // No cached surfaces for this image, so nothing to do.
1179 bool rv = cache->Invalidate(
1180 [this, &aAutoLock](NotNull<CachedSurface*> aSurface) -> void {
1181 StopTracking(aSurface, /* aIsTracked */ true, aAutoLock);
1182 // Individual surfaces must be freed outside the lock.
1183 mCachedSurfacesDiscard.AppendElement(aSurface);
1186 MaybeRemoveEmptyCache(aImageKey, cache);
1187 return rv;
1190 void DiscardAll(const StaticMutexAutoLock& aAutoLock) {
1191 // Remove in order of cost because mCosts is an array and the other data
1192 // structures are all hash tables. Note that locked surfaces are not
1193 // removed, since they aren't present in mCosts.
1194 while (!mCosts.IsEmpty()) {
1195 Remove(mCosts.LastElement().Surface(), /* aStopTracking */ true,
1196 aAutoLock);
1200 void DiscardForMemoryPressure(const StaticMutexAutoLock& aAutoLock) {
1201 // Compute our discardable cost. Since locked surfaces aren't discardable,
1202 // we exclude them.
1203 const Cost discardableCost = (mMaxCost - mAvailableCost) - mLockedCost;
1204 MOZ_ASSERT(discardableCost <= mMaxCost, "Discardable cost doesn't add up");
1206 // Our target is to raise our available cost by (1 / mDiscardFactor) of our
1207 // discardable cost - in other words, we want to end up with about
1208 // (discardableCost / mDiscardFactor) fewer bytes stored in the surface
1209 // cache after we're done.
1210 const Cost targetCost = mAvailableCost + (discardableCost / mDiscardFactor);
1212 if (targetCost > mMaxCost - mLockedCost) {
1213 MOZ_ASSERT_UNREACHABLE("Target cost is more than we can discard");
1214 DiscardAll(aAutoLock);
1215 return;
1218 // Discard surfaces until we've reduced our cost to our target cost.
1219 while (mAvailableCost < targetCost) {
1220 MOZ_ASSERT(!mCosts.IsEmpty(), "Removed everything and still not done");
1221 Remove(mCosts.LastElement().Surface(), /* aStopTracking */ true,
1222 aAutoLock);
1226 void TakeDiscard(nsTArray<RefPtr<CachedSurface>>& aDiscard,
1227 const StaticMutexAutoLock& aAutoLock) {
1228 MOZ_ASSERT(aDiscard.IsEmpty());
1229 aDiscard = std::move(mCachedSurfacesDiscard);
1232 already_AddRefed<CachedSurface> GetSurfaceForResetAnimation(
1233 const ImageKey aImageKey, const SurfaceKey& aSurfaceKey,
1234 const StaticMutexAutoLock& aAutoLock) {
1235 RefPtr<CachedSurface> surface;
1237 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1238 if (!cache) {
1239 // No cached surfaces for this image.
1240 return surface.forget();
1243 surface = cache->Lookup(aSurfaceKey, /* aForAccess = */ false);
1244 return surface.forget();
1247 void LockSurface(NotNull<CachedSurface*> aSurface,
1248 const StaticMutexAutoLock& aAutoLock) {
1249 if (aSurface->IsPlaceholder() || aSurface->IsLocked()) {
1250 return;
1253 StopTracking(aSurface, /* aIsTracked */ true, aAutoLock);
1255 // Lock the surface. This can fail.
1256 aSurface->SetLocked(true);
1257 DebugOnly<bool> tracked = StartTracking(aSurface, aAutoLock);
1258 MOZ_ASSERT(tracked);
1261 size_t ShallowSizeOfIncludingThis(
1262 MallocSizeOf aMallocSizeOf, const StaticMutexAutoLock& aAutoLock) const {
1263 size_t bytes =
1264 aMallocSizeOf(this) + mCosts.ShallowSizeOfExcludingThis(aMallocSizeOf) +
1265 mImageCaches.ShallowSizeOfExcludingThis(aMallocSizeOf) +
1266 mCachedSurfacesDiscard.ShallowSizeOfExcludingThis(aMallocSizeOf) +
1267 mExpirationTracker.ShallowSizeOfExcludingThis(aMallocSizeOf);
1268 for (const auto& data : mImageCaches.Values()) {
1269 bytes += data->ShallowSizeOfIncludingThis(aMallocSizeOf);
1271 return bytes;
1274 NS_IMETHOD
1275 CollectReports(nsIHandleReportCallback* aHandleReport, nsISupports* aData,
1276 bool aAnonymize) override {
1277 StaticMutexAutoLock lock(sInstanceMutex);
1279 uint32_t lockedImageCount = 0;
1280 uint32_t totalSurfaceCount = 0;
1281 uint32_t lockedSurfaceCount = 0;
1282 for (const auto& cache : mImageCaches.Values()) {
1283 totalSurfaceCount += cache->Count();
1284 if (cache->IsLocked()) {
1285 ++lockedImageCount;
1287 for (const auto& value : cache->Values()) {
1288 if (value->IsLocked()) {
1289 ++lockedSurfaceCount;
1294 // clang-format off
1295 // We have explicit memory reporting for the surface cache which is more
1296 // accurate than the cost metrics we report here, but these metrics are
1297 // still useful to report, since they control the cache's behavior.
1298 MOZ_COLLECT_REPORT(
1299 "explicit/images/cache/overhead", KIND_HEAP, UNITS_BYTES,
1300 ShallowSizeOfIncludingThis(SurfaceCacheMallocSizeOf, lock),
1301 "Memory used by the surface cache data structures, excluding surface data.");
1303 MOZ_COLLECT_REPORT(
1304 "imagelib-surface-cache-estimated-total",
1305 KIND_OTHER, UNITS_BYTES, (mMaxCost - mAvailableCost),
1306 "Estimated total memory used by the imagelib surface cache.");
1308 MOZ_COLLECT_REPORT(
1309 "imagelib-surface-cache-estimated-locked",
1310 KIND_OTHER, UNITS_BYTES, mLockedCost,
1311 "Estimated memory used by locked surfaces in the imagelib surface cache.");
1313 MOZ_COLLECT_REPORT(
1314 "imagelib-surface-cache-tracked-cost-count",
1315 KIND_OTHER, UNITS_COUNT, mCosts.Length(),
1316 "Total number of surfaces tracked for cost (and expiry) in the imagelib surface cache.");
1318 MOZ_COLLECT_REPORT(
1319 "imagelib-surface-cache-tracked-expiry-count",
1320 KIND_OTHER, UNITS_COUNT, mExpirationTracker.Length(lock),
1321 "Total number of surfaces tracked for expiry (and cost) in the imagelib surface cache.");
1323 MOZ_COLLECT_REPORT(
1324 "imagelib-surface-cache-image-count",
1325 KIND_OTHER, UNITS_COUNT, mImageCaches.Count(),
1326 "Total number of images in the imagelib surface cache.");
1328 MOZ_COLLECT_REPORT(
1329 "imagelib-surface-cache-locked-image-count",
1330 KIND_OTHER, UNITS_COUNT, lockedImageCount,
1331 "Total number of locked images in the imagelib surface cache.");
1333 MOZ_COLLECT_REPORT(
1334 "imagelib-surface-cache-image-surface-count",
1335 KIND_OTHER, UNITS_COUNT, totalSurfaceCount,
1336 "Total number of surfaces in the imagelib surface cache.");
1338 MOZ_COLLECT_REPORT(
1339 "imagelib-surface-cache-locked-surfaces-count",
1340 KIND_OTHER, UNITS_COUNT, lockedSurfaceCount,
1341 "Total number of locked surfaces in the imagelib surface cache.");
1343 MOZ_COLLECT_REPORT(
1344 "imagelib-surface-cache-overflow-count",
1345 KIND_OTHER, UNITS_COUNT, mOverflowCount,
1346 "Count of how many times the surface cache has hit its capacity and been "
1347 "unable to insert a new surface.");
1349 MOZ_COLLECT_REPORT(
1350 "imagelib-surface-cache-tracking-failure-count",
1351 KIND_OTHER, UNITS_COUNT, mTrackingFailureCount,
1352 "Count of how many times the surface cache has failed to begin tracking a "
1353 "given surface.");
1355 MOZ_COLLECT_REPORT(
1356 "imagelib-surface-cache-already-present-count",
1357 KIND_OTHER, UNITS_COUNT, mAlreadyPresentCount,
1358 "Count of how many times the surface cache has failed to insert a surface "
1359 "because it is already present.");
1361 MOZ_COLLECT_REPORT(
1362 "imagelib-surface-cache-table-failure-count",
1363 KIND_OTHER, UNITS_COUNT, mTableFailureCount,
1364 "Count of how many times the surface cache has failed to insert a surface "
1365 "because a hash table could not accept an entry.");
1366 // clang-format on
1368 return NS_OK;
1371 void CollectSizeOfSurfaces(const ImageKey aImageKey,
1372 nsTArray<SurfaceMemoryCounter>& aCounters,
1373 MallocSizeOf aMallocSizeOf,
1374 const StaticMutexAutoLock& aAutoLock) {
1375 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1376 if (!cache) {
1377 return; // No surfaces for this image.
1380 // Report all surfaces in the per-image cache.
1381 cache->CollectSizeOfSurfaces(
1382 aCounters, aMallocSizeOf,
1383 [this, &aAutoLock](NotNull<CachedSurface*> aSurface) -> void {
1384 StopTracking(aSurface, /* aIsTracked */ true, aAutoLock);
1385 // Individual surfaces must be freed outside the lock.
1386 mCachedSurfacesDiscard.AppendElement(aSurface);
1389 MaybeRemoveEmptyCache(aImageKey, cache);
1392 void ReleaseImageOnMainThread(already_AddRefed<image::Image>&& aImage,
1393 const StaticMutexAutoLock& aAutoLock) {
1394 RefPtr<image::Image> image = aImage;
1395 if (!image) {
1396 return;
1399 bool needsDispatch = mReleasingImagesOnMainThread.IsEmpty();
1400 mReleasingImagesOnMainThread.AppendElement(image);
1402 if (!needsDispatch ||
1403 AppShutdown::IsInOrBeyond(ShutdownPhase::XPCOMShutdownFinal)) {
1404 // Either there is already a ongoing task for ClearReleasingImages() or
1405 // it's too late in shutdown to dispatch.
1406 return;
1409 NS_DispatchToMainThread(NS_NewRunnableFunction(
1410 "SurfaceCacheImpl::ReleaseImageOnMainThread",
1411 []() -> void { SurfaceCache::ClearReleasingImages(); }));
1414 void TakeReleasingImages(nsTArray<RefPtr<image::Image>>& aImage,
1415 const StaticMutexAutoLock& aAutoLock) {
1416 MOZ_ASSERT(NS_IsMainThread());
1417 aImage.SwapElements(mReleasingImagesOnMainThread);
1420 private:
1421 already_AddRefed<ImageSurfaceCache> GetImageCache(const ImageKey aImageKey) {
1422 RefPtr<ImageSurfaceCache> imageCache;
1423 mImageCaches.Get(aImageKey, getter_AddRefs(imageCache));
1424 return imageCache.forget();
1427 void MaybeRemoveEmptyCache(const ImageKey aImageKey,
1428 ImageSurfaceCache* aCache) {
1429 // Remove the per-image cache if it's unneeded now. Keep it if the image is
1430 // locked, since the per-image cache is where we store that state. Note that
1431 // we don't push it into mImageCachesDiscard because all of its surfaces
1432 // have been removed, so it is safe to free while holding the lock.
1433 if (aCache->IsEmpty() && !aCache->IsLocked()) {
1434 mImageCaches.Remove(aImageKey);
1438 // This is similar to CanHold() except that it takes into account the costs of
1439 // locked surfaces. It's used internally in Insert(), but it's not exposed
1440 // publicly because we permit multithreaded access to the surface cache, which
1441 // means that the result would be meaningless: another thread could insert a
1442 // surface or lock an image at any time.
1443 bool CanHoldAfterDiscarding(const Cost aCost) const {
1444 return aCost <= mMaxCost - mLockedCost;
1447 bool MarkUsed(NotNull<CachedSurface*> aSurface,
1448 NotNull<ImageSurfaceCache*> aCache,
1449 const StaticMutexAutoLock& aAutoLock) {
1450 if (aCache->IsLocked()) {
1451 LockSurface(aSurface, aAutoLock);
1452 return true;
1455 nsresult rv = mExpirationTracker.MarkUsedLocked(aSurface, aAutoLock);
1456 if (NS_WARN_IF(NS_FAILED(rv))) {
1457 // If mark used fails, it is because it failed to reinsert the surface
1458 // after removing it from the tracker. Thus we need to update our
1459 // own accounting but otherwise expect it to be untracked.
1460 StopTracking(aSurface, /* aIsTracked */ false, aAutoLock);
1461 return false;
1463 return true;
1466 void DoUnlockSurfaces(NotNull<ImageSurfaceCache*> aCache, bool aStaticOnly,
1467 const StaticMutexAutoLock& aAutoLock) {
1468 AutoTArray<NotNull<CachedSurface*>, 8> discard;
1470 // Unlock all the surfaces the per-image cache is holding.
1471 for (const auto& value : aCache->Values()) {
1472 NotNull<CachedSurface*> surface = WrapNotNull(value);
1473 if (surface->IsPlaceholder() || !surface->IsLocked()) {
1474 continue;
1476 if (aStaticOnly &&
1477 surface->GetSurfaceKey().Playback() != PlaybackType::eStatic) {
1478 continue;
1480 StopTracking(surface, /* aIsTracked */ true, aAutoLock);
1481 surface->SetLocked(false);
1482 if (MOZ_UNLIKELY(!StartTracking(surface, aAutoLock))) {
1483 discard.AppendElement(surface);
1487 // Discard any that we failed to track.
1488 for (auto iter = discard.begin(); iter != discard.end(); ++iter) {
1489 Remove(*iter, /* aStopTracking */ false, aAutoLock);
1493 void RemoveEntry(const ImageKey aImageKey, const SurfaceKey& aSurfaceKey,
1494 const StaticMutexAutoLock& aAutoLock) {
1495 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1496 if (!cache) {
1497 return; // No cached surfaces for this image.
1500 RefPtr<CachedSurface> surface =
1501 cache->Lookup(aSurfaceKey, /* aForAccess = */ false);
1502 if (!surface) {
1503 return; // Lookup in the per-image cache missed.
1506 Remove(WrapNotNull(surface), /* aStopTracking */ true, aAutoLock);
1509 class SurfaceTracker final
1510 : public ExpirationTrackerImpl<CachedSurface, 2, StaticMutex,
1511 StaticMutexAutoLock> {
1512 public:
1513 explicit SurfaceTracker(uint32_t aSurfaceCacheExpirationTimeMS)
1514 : ExpirationTrackerImpl<CachedSurface, 2, StaticMutex,
1515 StaticMutexAutoLock>(
1516 aSurfaceCacheExpirationTimeMS, "SurfaceTracker") {}
1518 protected:
1519 void NotifyExpiredLocked(CachedSurface* aSurface,
1520 const StaticMutexAutoLock& aAutoLock) override {
1521 sInstance->Remove(WrapNotNull(aSurface), /* aStopTracking */ true,
1522 aAutoLock);
1525 void NotifyHandlerEndLocked(const StaticMutexAutoLock& aAutoLock) override {
1526 sInstance->TakeDiscard(mDiscard, aAutoLock);
1529 void NotifyHandlerEnd() override {
1530 nsTArray<RefPtr<CachedSurface>> discard(std::move(mDiscard));
1533 StaticMutex& GetMutex() override { return sInstanceMutex; }
1535 nsTArray<RefPtr<CachedSurface>> mDiscard;
1538 class MemoryPressureObserver final : public nsIObserver {
1539 public:
1540 NS_DECL_ISUPPORTS
1542 NS_IMETHOD Observe(nsISupports*, const char* aTopic,
1543 const char16_t*) override {
1544 nsTArray<RefPtr<CachedSurface>> discard;
1546 StaticMutexAutoLock lock(sInstanceMutex);
1547 if (sInstance && strcmp(aTopic, "memory-pressure") == 0) {
1548 sInstance->DiscardForMemoryPressure(lock);
1549 sInstance->TakeDiscard(discard, lock);
1552 return NS_OK;
1555 private:
1556 virtual ~MemoryPressureObserver() {}
1559 nsTArray<CostEntry> mCosts;
1560 nsRefPtrHashtable<nsPtrHashKey<Image>, ImageSurfaceCache> mImageCaches;
1561 nsTArray<RefPtr<CachedSurface>> mCachedSurfacesDiscard;
1562 SurfaceTracker mExpirationTracker;
1563 RefPtr<MemoryPressureObserver> mMemoryPressureObserver;
1564 nsTArray<RefPtr<image::Image>> mReleasingImagesOnMainThread;
1565 const uint32_t mDiscardFactor;
1566 const Cost mMaxCost;
1567 Cost mAvailableCost;
1568 Cost mLockedCost;
1569 size_t mOverflowCount;
1570 size_t mAlreadyPresentCount;
1571 size_t mTableFailureCount;
1572 size_t mTrackingFailureCount;
1575 NS_IMPL_ISUPPORTS(SurfaceCacheImpl, nsIMemoryReporter)
1576 NS_IMPL_ISUPPORTS(SurfaceCacheImpl::MemoryPressureObserver, nsIObserver)
1578 ///////////////////////////////////////////////////////////////////////////////
1579 // Public API
1580 ///////////////////////////////////////////////////////////////////////////////
1582 /* static */
1583 void SurfaceCache::Initialize() {
1584 // Initialize preferences.
1585 MOZ_ASSERT(NS_IsMainThread());
1586 MOZ_ASSERT(!sInstance, "Shouldn't initialize more than once");
1588 // See StaticPrefs for the default values of these preferences.
1590 // Length of time before an unused surface is removed from the cache, in
1591 // milliseconds.
1592 uint32_t surfaceCacheExpirationTimeMS =
1593 StaticPrefs::image_mem_surfacecache_min_expiration_ms_AtStartup();
1595 // What fraction of the memory used by the surface cache we should discard
1596 // when we get a memory pressure notification. This value is interpreted as
1597 // 1/N, so 1 means to discard everything, 2 means to discard about half of the
1598 // memory we're using, and so forth. We clamp it to avoid division by zero.
1599 uint32_t surfaceCacheDiscardFactor =
1600 max(StaticPrefs::image_mem_surfacecache_discard_factor_AtStartup(), 1u);
1602 // Maximum size of the surface cache, in kilobytes.
1603 uint64_t surfaceCacheMaxSizeKB =
1604 StaticPrefs::image_mem_surfacecache_max_size_kb_AtStartup();
1606 if (sizeof(uintptr_t) <= 4) {
1607 // Limit surface cache to 1 GB if our address space is 32 bit.
1608 surfaceCacheMaxSizeKB = 1024 * 1024;
1611 // A knob determining the actual size of the surface cache. Currently the
1612 // cache is (size of main memory) / (surface cache size factor) KB
1613 // or (surface cache max size) KB, whichever is smaller. The formula
1614 // may change in the future, though.
1615 // For example, a value of 4 would yield a 256MB cache on a 1GB machine.
1616 // The smallest machines we are likely to run this code on have 256MB
1617 // of memory, which would yield a 64MB cache on this setting.
1618 // We clamp this value to avoid division by zero.
1619 uint32_t surfaceCacheSizeFactor =
1620 max(StaticPrefs::image_mem_surfacecache_size_factor_AtStartup(), 1u);
1622 // Compute the size of the surface cache.
1623 uint64_t memorySize = PR_GetPhysicalMemorySize();
1624 if (memorySize == 0) {
1625 MOZ_ASSERT_UNREACHABLE("PR_GetPhysicalMemorySize not implemented here");
1626 memorySize = 256 * 1024 * 1024; // Fall back to 256MB.
1628 uint64_t proposedSize = memorySize / surfaceCacheSizeFactor;
1629 uint64_t surfaceCacheSizeBytes =
1630 min(proposedSize, surfaceCacheMaxSizeKB * 1024);
1631 uint32_t finalSurfaceCacheSizeBytes =
1632 min(surfaceCacheSizeBytes, uint64_t(UINT32_MAX));
1634 // Create the surface cache singleton with the requested settings. Note that
1635 // the size is a limit that the cache may not grow beyond, but we do not
1636 // actually allocate any storage for surfaces at this time.
1637 sInstance = new SurfaceCacheImpl(surfaceCacheExpirationTimeMS,
1638 surfaceCacheDiscardFactor,
1639 finalSurfaceCacheSizeBytes);
1640 sInstance->InitMemoryReporter();
1643 /* static */
1644 void SurfaceCache::Shutdown() {
1645 RefPtr<SurfaceCacheImpl> cache;
1647 StaticMutexAutoLock lock(sInstanceMutex);
1648 MOZ_ASSERT(NS_IsMainThread());
1649 MOZ_ASSERT(sInstance, "No singleton - was Shutdown() called twice?");
1650 cache = sInstance.forget();
1654 /* static */
1655 LookupResult SurfaceCache::Lookup(const ImageKey aImageKey,
1656 const SurfaceKey& aSurfaceKey,
1657 bool aMarkUsed) {
1658 nsTArray<RefPtr<CachedSurface>> discard;
1659 LookupResult rv(MatchType::NOT_FOUND);
1662 StaticMutexAutoLock lock(sInstanceMutex);
1663 if (!sInstance) {
1664 return rv;
1667 rv = sInstance->Lookup(aImageKey, aSurfaceKey, lock, aMarkUsed);
1668 sInstance->TakeDiscard(discard, lock);
1671 return rv;
1674 /* static */
1675 LookupResult SurfaceCache::LookupBestMatch(const ImageKey aImageKey,
1676 const SurfaceKey& aSurfaceKey,
1677 bool aMarkUsed) {
1678 nsTArray<RefPtr<CachedSurface>> discard;
1679 LookupResult rv(MatchType::NOT_FOUND);
1682 StaticMutexAutoLock lock(sInstanceMutex);
1683 if (!sInstance) {
1684 return rv;
1687 rv = sInstance->LookupBestMatch(aImageKey, aSurfaceKey, lock, aMarkUsed);
1688 sInstance->TakeDiscard(discard, lock);
1691 return rv;
1694 /* static */
1695 InsertOutcome SurfaceCache::Insert(NotNull<ISurfaceProvider*> aProvider) {
1696 nsTArray<RefPtr<CachedSurface>> discard;
1697 InsertOutcome rv(InsertOutcome::FAILURE);
1700 StaticMutexAutoLock lock(sInstanceMutex);
1701 if (!sInstance) {
1702 return rv;
1705 rv = sInstance->Insert(aProvider, /* aSetAvailable = */ false, lock);
1706 sInstance->TakeDiscard(discard, lock);
1709 return rv;
1712 /* static */
1713 bool SurfaceCache::CanHold(const IntSize& aSize,
1714 uint32_t aBytesPerPixel /* = 4 */) {
1715 StaticMutexAutoLock lock(sInstanceMutex);
1716 if (!sInstance) {
1717 return false;
1720 Cost cost = ComputeCost(aSize, aBytesPerPixel);
1721 return sInstance->CanHold(cost);
1724 /* static */
1725 bool SurfaceCache::CanHold(size_t aSize) {
1726 StaticMutexAutoLock lock(sInstanceMutex);
1727 if (!sInstance) {
1728 return false;
1731 return sInstance->CanHold(aSize);
1734 /* static */
1735 void SurfaceCache::SurfaceAvailable(NotNull<ISurfaceProvider*> aProvider) {
1736 StaticMutexAutoLock lock(sInstanceMutex);
1737 if (!sInstance) {
1738 return;
1741 sInstance->SurfaceAvailable(aProvider, lock);
1744 /* static */
1745 void SurfaceCache::LockImage(const ImageKey aImageKey) {
1746 StaticMutexAutoLock lock(sInstanceMutex);
1747 if (sInstance) {
1748 return sInstance->LockImage(aImageKey);
1752 /* static */
1753 void SurfaceCache::UnlockImage(const ImageKey aImageKey) {
1754 StaticMutexAutoLock lock(sInstanceMutex);
1755 if (sInstance) {
1756 return sInstance->UnlockImage(aImageKey, lock);
1760 /* static */
1761 void SurfaceCache::UnlockEntries(const ImageKey aImageKey) {
1762 StaticMutexAutoLock lock(sInstanceMutex);
1763 if (sInstance) {
1764 return sInstance->UnlockEntries(aImageKey, lock);
1768 /* static */
1769 void SurfaceCache::RemoveImage(const ImageKey aImageKey) {
1770 RefPtr<ImageSurfaceCache> discard;
1772 StaticMutexAutoLock lock(sInstanceMutex);
1773 if (sInstance) {
1774 discard = sInstance->RemoveImage(aImageKey, lock);
1779 /* static */
1780 void SurfaceCache::PruneImage(const ImageKey aImageKey) {
1781 nsTArray<RefPtr<CachedSurface>> discard;
1783 StaticMutexAutoLock lock(sInstanceMutex);
1784 if (sInstance) {
1785 sInstance->PruneImage(aImageKey, lock);
1786 sInstance->TakeDiscard(discard, lock);
1791 /* static */
1792 bool SurfaceCache::InvalidateImage(const ImageKey aImageKey) {
1793 nsTArray<RefPtr<CachedSurface>> discard;
1794 bool rv = false;
1796 StaticMutexAutoLock lock(sInstanceMutex);
1797 if (sInstance) {
1798 rv = sInstance->InvalidateImage(aImageKey, lock);
1799 sInstance->TakeDiscard(discard, lock);
1802 return rv;
1805 /* static */
1806 void SurfaceCache::DiscardAll() {
1807 nsTArray<RefPtr<CachedSurface>> discard;
1809 StaticMutexAutoLock lock(sInstanceMutex);
1810 if (sInstance) {
1811 sInstance->DiscardAll(lock);
1812 sInstance->TakeDiscard(discard, lock);
1817 /* static */
1818 void SurfaceCache::ResetAnimation(const ImageKey aImageKey,
1819 const SurfaceKey& aSurfaceKey) {
1820 RefPtr<CachedSurface> surface;
1821 nsTArray<RefPtr<CachedSurface>> discard;
1823 StaticMutexAutoLock lock(sInstanceMutex);
1824 if (!sInstance) {
1825 return;
1828 surface =
1829 sInstance->GetSurfaceForResetAnimation(aImageKey, aSurfaceKey, lock);
1830 sInstance->TakeDiscard(discard, lock);
1833 // Calling Reset will acquire the AnimationSurfaceProvider::mFramesMutex
1834 // mutex. In other places we acquire the mFramesMutex then call into the
1835 // surface cache (acquiring the surface cache mutex), so that determines a
1836 // lock order which we must obey by calling Reset after releasing the surface
1837 // cache mutex.
1838 if (surface) {
1839 DrawableSurface drawableSurface =
1840 surface->GetDrawableSurfaceEvenIfPlaceholder();
1841 if (drawableSurface) {
1842 MOZ_ASSERT(surface->GetSurfaceKey() == aSurfaceKey,
1843 "ResetAnimation() not returning an exact match?");
1845 drawableSurface.Reset();
1850 /* static */
1851 void SurfaceCache::CollectSizeOfSurfaces(
1852 const ImageKey aImageKey, nsTArray<SurfaceMemoryCounter>& aCounters,
1853 MallocSizeOf aMallocSizeOf) {
1854 nsTArray<RefPtr<CachedSurface>> discard;
1856 StaticMutexAutoLock lock(sInstanceMutex);
1857 if (!sInstance) {
1858 return;
1861 sInstance->CollectSizeOfSurfaces(aImageKey, aCounters, aMallocSizeOf, lock);
1862 sInstance->TakeDiscard(discard, lock);
1866 /* static */
1867 size_t SurfaceCache::MaximumCapacity() {
1868 StaticMutexAutoLock lock(sInstanceMutex);
1869 if (!sInstance) {
1870 return 0;
1873 return sInstance->MaximumCapacity();
1876 /* static */
1877 bool SurfaceCache::IsLegalSize(const IntSize& aSize) {
1878 // reject over-wide or over-tall images
1879 const int32_t k64KLimit = 0x0000FFFF;
1880 if (MOZ_UNLIKELY(aSize.width > k64KLimit || aSize.height > k64KLimit)) {
1881 NS_WARNING("image too big");
1882 return false;
1885 // protect against invalid sizes
1886 if (MOZ_UNLIKELY(aSize.height <= 0 || aSize.width <= 0)) {
1887 return false;
1890 // check to make sure we don't overflow a 32-bit
1891 CheckedInt32 requiredBytes =
1892 CheckedInt32(aSize.width) * CheckedInt32(aSize.height) * 4;
1893 if (MOZ_UNLIKELY(!requiredBytes.isValid())) {
1894 NS_WARNING("width or height too large");
1895 return false;
1897 return true;
1900 IntSize SurfaceCache::ClampVectorSize(const IntSize& aSize) {
1901 // If we exceed the maximum, we need to scale the size downwards to fit.
1902 // It shouldn't get here if it is significantly larger because
1903 // VectorImage::UseSurfaceCacheForSize should prevent us from requesting
1904 // a rasterized version of a surface greater than 4x the maximum.
1905 int32_t maxSizeKB =
1906 StaticPrefs::image_cache_max_rasterized_svg_threshold_kb();
1907 if (maxSizeKB <= 0) {
1908 return aSize;
1911 int64_t proposedKB = int64_t(aSize.width) * aSize.height / 256;
1912 if (maxSizeKB >= proposedKB) {
1913 return aSize;
1916 double scale = sqrt(double(maxSizeKB) / proposedKB);
1917 return IntSize(int32_t(scale * aSize.width), int32_t(scale * aSize.height));
1920 IntSize SurfaceCache::ClampSize(ImageKey aImageKey, const IntSize& aSize) {
1921 if (aImageKey->GetType() != imgIContainer::TYPE_VECTOR) {
1922 return aSize;
1925 return ClampVectorSize(aSize);
1928 /* static */
1929 void SurfaceCache::ReleaseImageOnMainThread(
1930 already_AddRefed<image::Image> aImage, bool aAlwaysProxy) {
1931 if (NS_IsMainThread() && !aAlwaysProxy) {
1932 RefPtr<image::Image> image = std::move(aImage);
1933 return;
1936 // Don't try to dispatch the release after shutdown, we'll just leak the
1937 // runnable.
1938 if (AppShutdown::IsInOrBeyond(ShutdownPhase::XPCOMShutdownFinal)) {
1939 return;
1942 StaticMutexAutoLock lock(sInstanceMutex);
1943 if (sInstance) {
1944 sInstance->ReleaseImageOnMainThread(std::move(aImage), lock);
1945 } else {
1946 NS_ReleaseOnMainThread("SurfaceCache::ReleaseImageOnMainThread",
1947 std::move(aImage), /* aAlwaysProxy */ true);
1951 /* static */
1952 void SurfaceCache::ClearReleasingImages() {
1953 MOZ_ASSERT(NS_IsMainThread());
1955 nsTArray<RefPtr<image::Image>> images;
1957 StaticMutexAutoLock lock(sInstanceMutex);
1958 if (sInstance) {
1959 sInstance->TakeReleasingImages(images, lock);
1964 } // namespace image
1965 } // namespace mozilla