Backed out changeset 0f0edda611cf (bug 1874454) for causing bp-nu bustages in CocoaHe...
[gecko.git] / image / SurfaceCache.cpp
blob860a694c7ad47263c23ab9d20ab6591e9687ae25
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* This Source Code Form is subject to the terms of the Mozilla Public
3 * License, v. 2.0. If a copy of the MPL was not distributed with this
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 /**
7 * SurfaceCache is a service for caching temporary surfaces in imagelib.
8 */
10 #include "SurfaceCache.h"
12 #include <algorithm>
13 #include <utility>
15 #include "ISurfaceProvider.h"
16 #include "Image.h"
17 #include "LookupResult.h"
18 #include "ShutdownTracker.h"
19 #include "gfx2DGlue.h"
20 #include "gfxPlatform.h"
21 #include "imgFrame.h"
22 #include "mozilla/AppShutdown.h"
23 #include "mozilla/Assertions.h"
24 #include "mozilla/Attributes.h"
25 #include "mozilla/CheckedInt.h"
26 #include "mozilla/DebugOnly.h"
27 #include "mozilla/Likely.h"
28 #include "mozilla/RefPtr.h"
29 #include "mozilla/StaticMutex.h"
30 #include "mozilla/StaticPrefs_image.h"
31 #include "mozilla/StaticPtr.h"
33 #include "nsExpirationTracker.h"
34 #include "nsHashKeys.h"
35 #include "nsIMemoryReporter.h"
36 #include "nsRefPtrHashtable.h"
37 #include "nsSize.h"
38 #include "nsTArray.h"
39 #include "Orientation.h"
40 #include "prsystem.h"
42 using std::max;
43 using std::min;
45 namespace mozilla {
47 using namespace gfx;
49 namespace image {
51 MOZ_DEFINE_MALLOC_SIZE_OF(SurfaceCacheMallocSizeOf)
53 class CachedSurface;
54 class SurfaceCacheImpl;
56 ///////////////////////////////////////////////////////////////////////////////
57 // Static Data
58 ///////////////////////////////////////////////////////////////////////////////
60 // The single surface cache instance.
61 static StaticRefPtr<SurfaceCacheImpl> sInstance;
63 // The mutex protecting the surface cache.
64 static StaticMutex sInstanceMutex MOZ_UNANNOTATED;
66 ///////////////////////////////////////////////////////////////////////////////
67 // SurfaceCache Implementation
68 ///////////////////////////////////////////////////////////////////////////////
70 /**
71 * Cost models the cost of storing a surface in the cache. Right now, this is
72 * simply an estimate of the size of the surface in bytes, but in the future it
73 * may be worth taking into account the cost of rematerializing the surface as
74 * well.
76 typedef size_t Cost;
78 static Cost ComputeCost(const IntSize& aSize, uint32_t aBytesPerPixel) {
79 MOZ_ASSERT(aBytesPerPixel == 1 || aBytesPerPixel == 4);
80 return aSize.width * aSize.height * aBytesPerPixel;
83 /**
84 * Since we want to be able to make eviction decisions based on cost, we need to
85 * be able to look up the CachedSurface which has a certain cost as well as the
86 * cost associated with a certain CachedSurface. To make this possible, in data
87 * structures we actually store a CostEntry, which contains a weak pointer to
88 * its associated surface.
90 * To make usage of the weak pointer safe, SurfaceCacheImpl always calls
91 * StartTracking after a surface is stored in the cache and StopTracking before
92 * it is removed.
94 class CostEntry {
95 public:
96 CostEntry(NotNull<CachedSurface*> aSurface, Cost aCost)
97 : mSurface(aSurface), mCost(aCost) {}
99 NotNull<CachedSurface*> Surface() const { return mSurface; }
100 Cost GetCost() const { return mCost; }
102 bool operator==(const CostEntry& aOther) const {
103 return mSurface == aOther.mSurface && mCost == aOther.mCost;
106 bool operator<(const CostEntry& aOther) const {
107 return mCost < aOther.mCost ||
108 (mCost == aOther.mCost && mSurface < aOther.mSurface);
111 private:
112 NotNull<CachedSurface*> mSurface;
113 Cost mCost;
117 * A CachedSurface associates a surface with a key that uniquely identifies that
118 * surface.
120 class CachedSurface {
121 ~CachedSurface() {}
123 public:
124 MOZ_DECLARE_REFCOUNTED_TYPENAME(CachedSurface)
125 NS_INLINE_DECL_THREADSAFE_REFCOUNTING(CachedSurface)
127 explicit CachedSurface(NotNull<ISurfaceProvider*> aProvider)
128 : mProvider(aProvider), mIsLocked(false) {}
130 DrawableSurface GetDrawableSurface() const {
131 if (MOZ_UNLIKELY(IsPlaceholder())) {
132 MOZ_ASSERT_UNREACHABLE("Called GetDrawableSurface() on a placeholder");
133 return DrawableSurface();
136 return mProvider->Surface();
139 DrawableSurface GetDrawableSurfaceEvenIfPlaceholder() const {
140 return mProvider->Surface();
143 void SetLocked(bool aLocked) {
144 if (IsPlaceholder()) {
145 return; // Can't lock a placeholder.
148 // Update both our state and our provider's state. Some surface providers
149 // are permanently locked; maintaining our own locking state enables us to
150 // respect SetLocked() even when it's meaningless from the provider's
151 // perspective.
152 mIsLocked = aLocked;
153 mProvider->SetLocked(aLocked);
156 bool IsLocked() const {
157 return !IsPlaceholder() && mIsLocked && mProvider->IsLocked();
160 void SetCannotSubstitute() {
161 mProvider->Availability().SetCannotSubstitute();
163 bool CannotSubstitute() const {
164 return mProvider->Availability().CannotSubstitute();
167 bool IsPlaceholder() const {
168 return mProvider->Availability().IsPlaceholder();
170 bool IsDecoded() const { return !IsPlaceholder() && mProvider->IsFinished(); }
172 ImageKey GetImageKey() const { return mProvider->GetImageKey(); }
173 const SurfaceKey& GetSurfaceKey() const { return mProvider->GetSurfaceKey(); }
174 nsExpirationState* GetExpirationState() { return &mExpirationState; }
176 CostEntry GetCostEntry() {
177 return image::CostEntry(WrapNotNull(this), mProvider->LogicalSizeInBytes());
180 size_t ShallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
181 return aMallocSizeOf(this) + aMallocSizeOf(mProvider.get());
184 void InvalidateRecording() { mProvider->InvalidateRecording(); }
186 // A helper type used by SurfaceCacheImpl::CollectSizeOfSurfaces.
187 struct MOZ_STACK_CLASS SurfaceMemoryReport {
188 SurfaceMemoryReport(nsTArray<SurfaceMemoryCounter>& aCounters,
189 MallocSizeOf aMallocSizeOf)
190 : mCounters(aCounters), mMallocSizeOf(aMallocSizeOf) {}
192 void Add(NotNull<CachedSurface*> aCachedSurface, bool aIsFactor2) {
193 if (aCachedSurface->IsPlaceholder()) {
194 return;
197 // Record the memory used by the ISurfaceProvider. This may not have a
198 // straightforward relationship to the size of the surface that
199 // DrawableRef() returns if the surface is generated dynamically. (i.e.,
200 // for surfaces with PlaybackType::eAnimated.)
201 aCachedSurface->mProvider->AddSizeOfExcludingThis(
202 mMallocSizeOf, [&](ISurfaceProvider::AddSizeOfCbData& aMetadata) {
203 SurfaceMemoryCounter counter(aCachedSurface->GetSurfaceKey(),
204 aCachedSurface->IsLocked(),
205 aCachedSurface->CannotSubstitute(),
206 aIsFactor2, aMetadata.mFinished);
208 counter.Values().SetDecodedHeap(aMetadata.mHeapBytes);
209 counter.Values().SetDecodedNonHeap(aMetadata.mNonHeapBytes);
210 counter.Values().SetDecodedUnknown(aMetadata.mUnknownBytes);
211 counter.Values().SetExternalHandles(aMetadata.mExternalHandles);
212 counter.Values().SetFrameIndex(aMetadata.mIndex);
213 counter.Values().SetExternalId(aMetadata.mExternalId);
214 counter.Values().SetSurfaceTypes(aMetadata.mTypes);
216 mCounters.AppendElement(counter);
220 private:
221 nsTArray<SurfaceMemoryCounter>& mCounters;
222 MallocSizeOf mMallocSizeOf;
225 private:
226 nsExpirationState mExpirationState;
227 NotNull<RefPtr<ISurfaceProvider>> mProvider;
228 bool mIsLocked;
231 static int64_t AreaOfIntSize(const IntSize& aSize) {
232 return static_cast<int64_t>(aSize.width) * static_cast<int64_t>(aSize.height);
236 * An ImageSurfaceCache is a per-image surface cache. For correctness we must be
237 * able to remove all surfaces associated with an image when the image is
238 * destroyed or invalidated. Since this will happen frequently, it makes sense
239 * to make it cheap by storing the surfaces for each image separately.
241 * ImageSurfaceCache also keeps track of whether its associated image is locked
242 * or unlocked.
244 * The cache may also enter "factor of 2" mode which occurs when the number of
245 * surfaces in the cache exceeds the "image.cache.factor2.threshold-surfaces"
246 * pref plus the number of native sizes of the image. When in "factor of 2"
247 * mode, the cache will strongly favour sizes which are a factor of 2 of the
248 * largest native size. It accomplishes this by suggesting a factor of 2 size
249 * when lookups fail and substituting the nearest factor of 2 surface to the
250 * ideal size as the "best" available (as opposed to substitution but not
251 * found). This allows us to minimize memory consumption and CPU time spent
252 * decoding when a website requires many variants of the same surface.
254 class ImageSurfaceCache {
255 ~ImageSurfaceCache() {}
257 public:
258 explicit ImageSurfaceCache(const ImageKey aImageKey)
259 : mLocked(false),
260 mFactor2Mode(false),
261 mFactor2Pruned(false),
262 mIsVectorImage(aImageKey->GetType() == imgIContainer::TYPE_VECTOR) {}
264 MOZ_DECLARE_REFCOUNTED_TYPENAME(ImageSurfaceCache)
265 NS_INLINE_DECL_THREADSAFE_REFCOUNTING(ImageSurfaceCache)
267 typedef nsRefPtrHashtable<nsGenericHashKey<SurfaceKey>, CachedSurface>
268 SurfaceTable;
270 auto Values() const { return mSurfaces.Values(); }
271 uint32_t Count() const { return mSurfaces.Count(); }
272 bool IsEmpty() const { return mSurfaces.Count() == 0; }
274 size_t ShallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
275 size_t bytes = aMallocSizeOf(this) +
276 mSurfaces.ShallowSizeOfExcludingThis(aMallocSizeOf);
277 for (const auto& value : Values()) {
278 bytes += value->ShallowSizeOfIncludingThis(aMallocSizeOf);
280 return bytes;
283 [[nodiscard]] bool Insert(NotNull<CachedSurface*> aSurface) {
284 MOZ_ASSERT(!mLocked || aSurface->IsPlaceholder() || aSurface->IsLocked(),
285 "Inserting an unlocked surface for a locked image");
286 const auto& surfaceKey = aSurface->GetSurfaceKey();
287 if (surfaceKey.Region()) {
288 // We don't allow substitutes for surfaces with regions, so we don't want
289 // to allow factor of 2 mode pruning to release these surfaces.
290 aSurface->SetCannotSubstitute();
292 return mSurfaces.InsertOrUpdate(surfaceKey, RefPtr<CachedSurface>{aSurface},
293 fallible);
296 already_AddRefed<CachedSurface> Remove(NotNull<CachedSurface*> aSurface) {
297 MOZ_ASSERT(mSurfaces.GetWeak(aSurface->GetSurfaceKey()),
298 "Should not be removing a surface we don't have");
300 RefPtr<CachedSurface> surface;
301 mSurfaces.Remove(aSurface->GetSurfaceKey(), getter_AddRefs(surface));
302 AfterMaybeRemove();
303 return surface.forget();
306 already_AddRefed<CachedSurface> Lookup(const SurfaceKey& aSurfaceKey,
307 bool aForAccess) {
308 RefPtr<CachedSurface> surface;
309 mSurfaces.Get(aSurfaceKey, getter_AddRefs(surface));
311 if (aForAccess) {
312 if (surface) {
313 // We don't want to allow factor of 2 mode pruning to release surfaces
314 // for which the callers will accept no substitute.
315 surface->SetCannotSubstitute();
316 } else if (!mFactor2Mode) {
317 // If no exact match is found, and this is for use rather than internal
318 // accounting (i.e. insert and removal), we know this will trigger a
319 // decode. Make sure we switch now to factor of 2 mode if necessary.
320 MaybeSetFactor2Mode();
324 return surface.forget();
328 * @returns A tuple containing the best matching CachedSurface if available,
329 * a MatchType describing how the CachedSurface was selected, and
330 * an IntSize which is the size the caller should choose to decode
331 * at should it attempt to do so.
333 std::tuple<already_AddRefed<CachedSurface>, MatchType, IntSize>
334 LookupBestMatch(const SurfaceKey& aIdealKey) {
335 // Try for an exact match first.
336 RefPtr<CachedSurface> exactMatch;
337 mSurfaces.Get(aIdealKey, getter_AddRefs(exactMatch));
338 if (exactMatch) {
339 if (exactMatch->IsDecoded()) {
340 return std::make_tuple(exactMatch.forget(), MatchType::EXACT,
341 IntSize());
343 } else if (aIdealKey.Region()) {
344 // We cannot substitute if we have a region. Allow it to create an exact
345 // match.
346 return std::make_tuple(exactMatch.forget(), MatchType::NOT_FOUND,
347 IntSize());
348 } else if (!mFactor2Mode) {
349 // If no exact match is found, and we are not in factor of 2 mode, then
350 // we know that we will trigger a decode because at best we will provide
351 // a substitute. Make sure we switch now to factor of 2 mode if necessary.
352 MaybeSetFactor2Mode();
355 // Try for a best match second, if using compact.
356 IntSize suggestedSize = SuggestedSize(aIdealKey.Size());
357 if (suggestedSize != aIdealKey.Size()) {
358 if (!exactMatch) {
359 SurfaceKey compactKey = aIdealKey.CloneWithSize(suggestedSize);
360 mSurfaces.Get(compactKey, getter_AddRefs(exactMatch));
361 if (exactMatch && exactMatch->IsDecoded()) {
362 MOZ_ASSERT(suggestedSize != aIdealKey.Size());
363 return std::make_tuple(exactMatch.forget(),
364 MatchType::SUBSTITUTE_BECAUSE_BEST,
365 suggestedSize);
370 // There's no perfect match, so find the best match we can.
371 RefPtr<CachedSurface> bestMatch;
372 for (const auto& value : Values()) {
373 NotNull<CachedSurface*> current = WrapNotNull(value);
374 const SurfaceKey& currentKey = current->GetSurfaceKey();
376 // We never match a placeholder or a surface with a region.
377 if (current->IsPlaceholder() || currentKey.Region()) {
378 continue;
380 // Matching the playback type and SVG context is required.
381 if (currentKey.Playback() != aIdealKey.Playback() ||
382 currentKey.SVGContext() != aIdealKey.SVGContext()) {
383 continue;
385 // Matching the flags is required.
386 if (currentKey.Flags() != aIdealKey.Flags()) {
387 continue;
389 // Anything is better than nothing! (Within the constraints we just
390 // checked, of course.)
391 if (!bestMatch) {
392 bestMatch = current;
393 continue;
396 MOZ_ASSERT(bestMatch, "Should have a current best match");
398 // Always prefer completely decoded surfaces.
399 bool bestMatchIsDecoded = bestMatch->IsDecoded();
400 if (bestMatchIsDecoded && !current->IsDecoded()) {
401 continue;
403 if (!bestMatchIsDecoded && current->IsDecoded()) {
404 bestMatch = current;
405 continue;
408 SurfaceKey bestMatchKey = bestMatch->GetSurfaceKey();
409 if (CompareArea(aIdealKey.Size(), bestMatchKey.Size(),
410 currentKey.Size())) {
411 bestMatch = current;
415 MatchType matchType;
416 if (bestMatch) {
417 if (!exactMatch) {
418 // No exact match, neither ideal nor factor of 2.
419 MOZ_ASSERT(suggestedSize != bestMatch->GetSurfaceKey().Size(),
420 "No exact match despite the fact the sizes match!");
421 matchType = MatchType::SUBSTITUTE_BECAUSE_NOT_FOUND;
422 } else if (exactMatch != bestMatch) {
423 // The exact match is still decoding, but we found a substitute.
424 matchType = MatchType::SUBSTITUTE_BECAUSE_PENDING;
425 } else if (aIdealKey.Size() != bestMatch->GetSurfaceKey().Size()) {
426 // The best factor of 2 match is still decoding, but the best we've got.
427 MOZ_ASSERT(suggestedSize != aIdealKey.Size());
428 MOZ_ASSERT(mFactor2Mode || mIsVectorImage);
429 matchType = MatchType::SUBSTITUTE_BECAUSE_BEST;
430 } else {
431 // The exact match is still decoding, but it's the best we've got.
432 matchType = MatchType::EXACT;
434 } else {
435 if (exactMatch) {
436 // We found an "exact match"; it must have been a placeholder.
437 MOZ_ASSERT(exactMatch->IsPlaceholder());
438 matchType = MatchType::PENDING;
439 } else {
440 // We couldn't find an exact match *or* a substitute.
441 matchType = MatchType::NOT_FOUND;
445 return std::make_tuple(bestMatch.forget(), matchType, suggestedSize);
448 void MaybeSetFactor2Mode() {
449 MOZ_ASSERT(!mFactor2Mode);
451 // Typically an image cache will not have too many size-varying surfaces, so
452 // if we exceed the given threshold, we should consider using a subset.
453 int32_t thresholdSurfaces =
454 StaticPrefs::image_cache_factor2_threshold_surfaces();
455 if (thresholdSurfaces < 0 ||
456 mSurfaces.Count() <= static_cast<uint32_t>(thresholdSurfaces)) {
457 return;
460 // Determine how many native surfaces this image has. If it is zero, and it
461 // is a vector image, then we should impute a single native size. Otherwise,
462 // it may be zero because we don't know yet, or the image has an error, or
463 // it isn't supported.
464 NotNull<CachedSurface*> current =
465 WrapNotNull(mSurfaces.ConstIter().UserData());
466 Image* image = static_cast<Image*>(current->GetImageKey());
467 size_t nativeSizes = image->GetNativeSizesLength();
468 if (mIsVectorImage) {
469 MOZ_ASSERT(nativeSizes == 0);
470 nativeSizes = 1;
471 } else if (nativeSizes == 0) {
472 return;
475 // Increase the threshold by the number of native sizes. This ensures that
476 // we do not prevent decoding of the image at all its native sizes. It does
477 // not guarantee we will provide a surface at that size however (i.e. many
478 // other sized surfaces are requested, in addition to the native sizes).
479 thresholdSurfaces += nativeSizes;
480 if (mSurfaces.Count() <= static_cast<uint32_t>(thresholdSurfaces)) {
481 return;
484 // We have a valid size, we can change modes.
485 mFactor2Mode = true;
488 template <typename Function>
489 void Prune(Function&& aRemoveCallback) {
490 if (!mFactor2Mode || mFactor2Pruned) {
491 return;
494 // Attempt to discard any surfaces which are not factor of 2 and the best
495 // factor of 2 match exists.
496 bool hasNotFactorSize = false;
497 for (auto iter = mSurfaces.Iter(); !iter.Done(); iter.Next()) {
498 NotNull<CachedSurface*> current = WrapNotNull(iter.UserData());
499 const SurfaceKey& currentKey = current->GetSurfaceKey();
500 const IntSize& currentSize = currentKey.Size();
502 // First we check if someone requested this size and would not accept
503 // an alternatively sized surface.
504 if (current->CannotSubstitute()) {
505 continue;
508 // Next we find the best factor of 2 size for this surface. If this
509 // surface is a factor of 2 size, then we want to keep it.
510 IntSize bestSize = SuggestedSize(currentSize);
511 if (bestSize == currentSize) {
512 continue;
515 // Check the cache for a surface with the same parameters except for the
516 // size which uses the closest factor of 2 size.
517 SurfaceKey compactKey = currentKey.CloneWithSize(bestSize);
518 RefPtr<CachedSurface> compactMatch;
519 mSurfaces.Get(compactKey, getter_AddRefs(compactMatch));
520 if (compactMatch && compactMatch->IsDecoded()) {
521 aRemoveCallback(current);
522 iter.Remove();
523 } else {
524 hasNotFactorSize = true;
528 // We have no surfaces that are not factor of 2 sized, so we can stop
529 // pruning henceforth, because we avoid the insertion of new surfaces that
530 // don't match our sizing set (unless the caller won't accept a
531 // substitution.)
532 if (!hasNotFactorSize) {
533 mFactor2Pruned = true;
536 // We should never leave factor of 2 mode due to pruning in of itself, but
537 // if we discarded surfaces due to the volatile buffers getting released,
538 // it is possible.
539 AfterMaybeRemove();
542 template <typename Function>
543 bool Invalidate(Function&& aRemoveCallback) {
544 // Remove all non-blob recordings from the cache. Invalidate any blob
545 // recordings.
546 bool foundRecording = false;
547 for (auto iter = mSurfaces.Iter(); !iter.Done(); iter.Next()) {
548 NotNull<CachedSurface*> current = WrapNotNull(iter.UserData());
550 if (current->GetSurfaceKey().Flags() & SurfaceFlags::RECORD_BLOB) {
551 foundRecording = true;
552 current->InvalidateRecording();
553 continue;
556 aRemoveCallback(current);
557 iter.Remove();
560 AfterMaybeRemove();
561 return foundRecording;
564 IntSize SuggestedSize(const IntSize& aSize) const {
565 IntSize suggestedSize = SuggestedSizeInternal(aSize);
566 if (mIsVectorImage) {
567 suggestedSize = SurfaceCache::ClampVectorSize(suggestedSize);
569 return suggestedSize;
572 IntSize SuggestedSizeInternal(const IntSize& aSize) const {
573 // When not in factor of 2 mode, we can always decode at the given size.
574 if (!mFactor2Mode) {
575 return aSize;
578 // We cannot enter factor of 2 mode unless we have a minimum number of
579 // surfaces, and we should have left it if the cache was emptied.
580 if (MOZ_UNLIKELY(IsEmpty())) {
581 MOZ_ASSERT_UNREACHABLE("Should not be empty and in factor of 2 mode!");
582 return aSize;
585 // This bit of awkwardness gets the largest native size of the image.
586 NotNull<CachedSurface*> firstSurface =
587 WrapNotNull(mSurfaces.ConstIter().UserData());
588 Image* image = static_cast<Image*>(firstSurface->GetImageKey());
589 IntSize factorSize;
590 if (NS_FAILED(image->GetWidth(&factorSize.width)) ||
591 NS_FAILED(image->GetHeight(&factorSize.height)) ||
592 factorSize.IsEmpty()) {
593 // Valid vector images may have a default size of 0x0. In that case, just
594 // assume a default size of 100x100 and apply the intrinsic ratio if
595 // available. If our guess was too small, don't use factor-of-scaling.
596 MOZ_ASSERT(mIsVectorImage);
597 factorSize = IntSize(100, 100);
598 Maybe<AspectRatio> aspectRatio = image->GetIntrinsicRatio();
599 if (aspectRatio && *aspectRatio) {
600 factorSize.width =
601 NSToIntRound(aspectRatio->ApplyToFloat(float(factorSize.height)));
602 if (factorSize.IsEmpty()) {
603 return aSize;
608 if (mIsVectorImage) {
609 // Ensure the aspect ratio matches the native size before forcing the
610 // caller to accept a factor of 2 size. The difference between the aspect
611 // ratios is:
613 // delta = nativeWidth/nativeHeight - desiredWidth/desiredHeight
615 // delta*nativeHeight*desiredHeight = nativeWidth*desiredHeight
616 // - desiredWidth*nativeHeight
618 // Using the maximum accepted delta as a constant, we can avoid the
619 // floating point division and just compare after some integer ops.
620 int32_t delta =
621 factorSize.width * aSize.height - aSize.width * factorSize.height;
622 int32_t maxDelta = (factorSize.height * aSize.height) >> 4;
623 if (delta > maxDelta || delta < -maxDelta) {
624 return aSize;
627 // If the requested size is bigger than the native size, we actually need
628 // to grow the native size instead of shrinking it.
629 if (factorSize.width < aSize.width) {
630 do {
631 IntSize candidate(factorSize.width * 2, factorSize.height * 2);
632 if (!SurfaceCache::IsLegalSize(candidate)) {
633 break;
636 factorSize = candidate;
637 } while (factorSize.width < aSize.width);
639 return factorSize;
642 // Otherwise we can find the best fit as normal.
645 // Start with the native size as the best first guess.
646 IntSize bestSize = factorSize;
647 factorSize.width /= 2;
648 factorSize.height /= 2;
650 while (!factorSize.IsEmpty()) {
651 if (!CompareArea(aSize, bestSize, factorSize)) {
652 // This size is not better than the last. Since we proceed from largest
653 // to smallest, we know that the next size will not be better if the
654 // previous size was rejected. Break early.
655 break;
658 // The current factor of 2 size is better than the last selected size.
659 bestSize = factorSize;
660 factorSize.width /= 2;
661 factorSize.height /= 2;
664 return bestSize;
667 bool CompareArea(const IntSize& aIdealSize, const IntSize& aBestSize,
668 const IntSize& aSize) const {
669 // Compare sizes. We use an area-based heuristic here instead of computing a
670 // truly optimal answer, since it seems very unlikely to make a difference
671 // for realistic sizes.
672 int64_t idealArea = AreaOfIntSize(aIdealSize);
673 int64_t currentArea = AreaOfIntSize(aSize);
674 int64_t bestMatchArea = AreaOfIntSize(aBestSize);
676 // If the best match is smaller than the ideal size, prefer bigger sizes.
677 if (bestMatchArea < idealArea) {
678 if (currentArea > bestMatchArea) {
679 return true;
681 return false;
684 // Other, prefer sizes closer to the ideal size, but still not smaller.
685 if (idealArea <= currentArea && currentArea < bestMatchArea) {
686 return true;
689 // This surface isn't an improvement over the current best match.
690 return false;
693 template <typename Function>
694 void CollectSizeOfSurfaces(nsTArray<SurfaceMemoryCounter>& aCounters,
695 MallocSizeOf aMallocSizeOf,
696 Function&& aRemoveCallback) {
697 CachedSurface::SurfaceMemoryReport report(aCounters, aMallocSizeOf);
698 for (auto iter = mSurfaces.Iter(); !iter.Done(); iter.Next()) {
699 NotNull<CachedSurface*> surface = WrapNotNull(iter.UserData());
701 // We don't need the drawable surface for ourselves, but adding a surface
702 // to the report will trigger this indirectly. If the surface was
703 // discarded by the OS because it was in volatile memory, we should remove
704 // it from the cache immediately rather than include it in the report.
705 DrawableSurface drawableSurface;
706 if (!surface->IsPlaceholder()) {
707 drawableSurface = surface->GetDrawableSurface();
708 if (!drawableSurface) {
709 aRemoveCallback(surface);
710 iter.Remove();
711 continue;
715 const IntSize& size = surface->GetSurfaceKey().Size();
716 bool factor2Size = false;
717 if (mFactor2Mode) {
718 factor2Size = (size == SuggestedSize(size));
720 report.Add(surface, factor2Size);
723 AfterMaybeRemove();
726 void SetLocked(bool aLocked) { mLocked = aLocked; }
727 bool IsLocked() const { return mLocked; }
729 private:
730 void AfterMaybeRemove() {
731 if (IsEmpty() && mFactor2Mode) {
732 // The last surface for this cache was removed. This can happen if the
733 // surface was stored in a volatile buffer and got purged, or the surface
734 // expired from the cache. If the cache itself lingers for some reason
735 // (e.g. in the process of performing a lookup, the cache itself is
736 // locked), then we need to reset the factor of 2 state because it
737 // requires at least one surface present to get the native size
738 // information from the image.
739 mFactor2Mode = mFactor2Pruned = false;
743 SurfaceTable mSurfaces;
745 bool mLocked;
747 // True in "factor of 2" mode.
748 bool mFactor2Mode;
750 // True if all non-factor of 2 surfaces have been removed from the cache. Note
751 // that this excludes unsubstitutable sizes.
752 bool mFactor2Pruned;
754 // True if the surfaces are produced from a vector image. If so, it must match
755 // the aspect ratio when using factor of 2 mode.
756 bool mIsVectorImage;
760 * SurfaceCacheImpl is responsible for determining which surfaces will be cached
761 * and managing the surface cache data structures. Rather than interact with
762 * SurfaceCacheImpl directly, client code interacts with SurfaceCache, which
763 * maintains high-level invariants and encapsulates the details of the surface
764 * cache's implementation.
766 class SurfaceCacheImpl final : public nsIMemoryReporter {
767 public:
768 NS_DECL_ISUPPORTS
770 SurfaceCacheImpl(uint32_t aSurfaceCacheExpirationTimeMS,
771 uint32_t aSurfaceCacheDiscardFactor,
772 uint32_t aSurfaceCacheSize)
773 : mExpirationTracker(aSurfaceCacheExpirationTimeMS),
774 mMemoryPressureObserver(new MemoryPressureObserver),
775 mDiscardFactor(aSurfaceCacheDiscardFactor),
776 mMaxCost(aSurfaceCacheSize),
777 mAvailableCost(aSurfaceCacheSize),
778 mLockedCost(0),
779 mOverflowCount(0),
780 mAlreadyPresentCount(0),
781 mTableFailureCount(0),
782 mTrackingFailureCount(0) {
783 nsCOMPtr<nsIObserverService> os = services::GetObserverService();
784 if (os) {
785 os->AddObserver(mMemoryPressureObserver, "memory-pressure", false);
789 private:
790 virtual ~SurfaceCacheImpl() {
791 nsCOMPtr<nsIObserverService> os = services::GetObserverService();
792 if (os) {
793 os->RemoveObserver(mMemoryPressureObserver, "memory-pressure");
796 UnregisterWeakMemoryReporter(this);
799 public:
800 void InitMemoryReporter() { RegisterWeakMemoryReporter(this); }
802 InsertOutcome Insert(NotNull<ISurfaceProvider*> aProvider, bool aSetAvailable,
803 const StaticMutexAutoLock& aAutoLock) {
804 // If this is a duplicate surface, refuse to replace the original.
805 // XXX(seth): Calling Lookup() and then RemoveEntry() does the lookup
806 // twice. We'll make this more efficient in bug 1185137.
807 LookupResult result =
808 Lookup(aProvider->GetImageKey(), aProvider->GetSurfaceKey(), aAutoLock,
809 /* aMarkUsed = */ false);
810 if (MOZ_UNLIKELY(result)) {
811 mAlreadyPresentCount++;
812 return InsertOutcome::FAILURE_ALREADY_PRESENT;
815 if (result.Type() == MatchType::PENDING) {
816 RemoveEntry(aProvider->GetImageKey(), aProvider->GetSurfaceKey(),
817 aAutoLock);
820 MOZ_ASSERT(result.Type() == MatchType::NOT_FOUND ||
821 result.Type() == MatchType::PENDING,
822 "A LookupResult with no surface should be NOT_FOUND or PENDING");
824 // If this is bigger than we can hold after discarding everything we can,
825 // refuse to cache it.
826 Cost cost = aProvider->LogicalSizeInBytes();
827 if (MOZ_UNLIKELY(!CanHoldAfterDiscarding(cost))) {
828 mOverflowCount++;
829 return InsertOutcome::FAILURE;
832 // Remove elements in order of cost until we can fit this in the cache. Note
833 // that locked surfaces aren't in mCosts, so we never remove them here.
834 while (cost > mAvailableCost) {
835 MOZ_ASSERT(!mCosts.IsEmpty(),
836 "Removed everything and it still won't fit");
837 Remove(mCosts.LastElement().Surface(), /* aStopTracking */ true,
838 aAutoLock);
841 // Locate the appropriate per-image cache. If there's not an existing cache
842 // for this image, create it.
843 const ImageKey imageKey = aProvider->GetImageKey();
844 RefPtr<ImageSurfaceCache> cache = GetImageCache(imageKey);
845 if (!cache) {
846 cache = new ImageSurfaceCache(imageKey);
847 if (!mImageCaches.InsertOrUpdate(aProvider->GetImageKey(), RefPtr{cache},
848 fallible)) {
849 mTableFailureCount++;
850 return InsertOutcome::FAILURE;
854 // If we were asked to mark the cache entry available, do so.
855 if (aSetAvailable) {
856 aProvider->Availability().SetAvailable();
859 auto surface = MakeNotNull<RefPtr<CachedSurface>>(aProvider);
861 // We require that locking succeed if the image is locked and we're not
862 // inserting a placeholder; the caller may need to know this to handle
863 // errors correctly.
864 bool mustLock = cache->IsLocked() && !surface->IsPlaceholder();
865 if (mustLock) {
866 surface->SetLocked(true);
867 if (!surface->IsLocked()) {
868 return InsertOutcome::FAILURE;
872 // Insert.
873 MOZ_ASSERT(cost <= mAvailableCost, "Inserting despite too large a cost");
874 if (!cache->Insert(surface)) {
875 mTableFailureCount++;
876 if (mustLock) {
877 surface->SetLocked(false);
879 return InsertOutcome::FAILURE;
882 if (MOZ_UNLIKELY(!StartTracking(surface, aAutoLock))) {
883 MOZ_ASSERT(!mustLock);
884 Remove(surface, /* aStopTracking */ false, aAutoLock);
885 return InsertOutcome::FAILURE;
888 return InsertOutcome::SUCCESS;
891 void Remove(NotNull<CachedSurface*> aSurface, bool aStopTracking,
892 const StaticMutexAutoLock& aAutoLock) {
893 ImageKey imageKey = aSurface->GetImageKey();
895 RefPtr<ImageSurfaceCache> cache = GetImageCache(imageKey);
896 MOZ_ASSERT(cache, "Shouldn't try to remove a surface with no image cache");
898 // If the surface was not a placeholder, tell its image that we discarded
899 // it.
900 if (!aSurface->IsPlaceholder()) {
901 static_cast<Image*>(imageKey)->OnSurfaceDiscarded(
902 aSurface->GetSurfaceKey());
905 // If we failed during StartTracking, we can skip this step.
906 if (aStopTracking) {
907 StopTracking(aSurface, /* aIsTracked */ true, aAutoLock);
910 // Individual surfaces must be freed outside the lock.
911 mCachedSurfacesDiscard.AppendElement(cache->Remove(aSurface));
913 MaybeRemoveEmptyCache(imageKey, cache);
916 bool StartTracking(NotNull<CachedSurface*> aSurface,
917 const StaticMutexAutoLock& aAutoLock) {
918 CostEntry costEntry = aSurface->GetCostEntry();
919 MOZ_ASSERT(costEntry.GetCost() <= mAvailableCost,
920 "Cost too large and the caller didn't catch it");
922 if (aSurface->IsLocked()) {
923 mLockedCost += costEntry.GetCost();
924 MOZ_ASSERT(mLockedCost <= mMaxCost, "Locked more than we can hold?");
925 } else {
926 if (NS_WARN_IF(!mCosts.InsertElementSorted(costEntry, fallible))) {
927 mTrackingFailureCount++;
928 return false;
931 // This may fail during XPCOM shutdown, so we need to ensure the object is
932 // tracked before calling RemoveObject in StopTracking.
933 nsresult rv = mExpirationTracker.AddObjectLocked(aSurface, aAutoLock);
934 if (NS_WARN_IF(NS_FAILED(rv))) {
935 DebugOnly<bool> foundInCosts = mCosts.RemoveElementSorted(costEntry);
936 MOZ_ASSERT(foundInCosts, "Lost track of costs for this surface");
937 mTrackingFailureCount++;
938 return false;
942 mAvailableCost -= costEntry.GetCost();
943 return true;
946 void StopTracking(NotNull<CachedSurface*> aSurface, bool aIsTracked,
947 const StaticMutexAutoLock& aAutoLock) {
948 CostEntry costEntry = aSurface->GetCostEntry();
950 if (aSurface->IsLocked()) {
951 MOZ_ASSERT(mLockedCost >= costEntry.GetCost(), "Costs don't balance");
952 mLockedCost -= costEntry.GetCost();
953 // XXX(seth): It'd be nice to use an O(log n) lookup here. This is O(n).
954 MOZ_ASSERT(!mCosts.Contains(costEntry),
955 "Shouldn't have a cost entry for a locked surface");
956 } else {
957 if (MOZ_LIKELY(aSurface->GetExpirationState()->IsTracked())) {
958 MOZ_ASSERT(aIsTracked, "Expiration-tracking a surface unexpectedly!");
959 mExpirationTracker.RemoveObjectLocked(aSurface, aAutoLock);
960 } else {
961 // Our call to AddObject must have failed in StartTracking; most likely
962 // we're in XPCOM shutdown right now.
963 MOZ_ASSERT(!aIsTracked, "Not expiration-tracking an unlocked surface!");
966 DebugOnly<bool> foundInCosts = mCosts.RemoveElementSorted(costEntry);
967 MOZ_ASSERT(foundInCosts, "Lost track of costs for this surface");
970 mAvailableCost += costEntry.GetCost();
971 MOZ_ASSERT(mAvailableCost <= mMaxCost,
972 "More available cost than we started with");
975 LookupResult Lookup(const ImageKey aImageKey, const SurfaceKey& aSurfaceKey,
976 const StaticMutexAutoLock& aAutoLock, bool aMarkUsed) {
977 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
978 if (!cache) {
979 // No cached surfaces for this image.
980 return LookupResult(MatchType::NOT_FOUND);
983 RefPtr<CachedSurface> surface = cache->Lookup(aSurfaceKey, aMarkUsed);
984 if (!surface) {
985 // Lookup in the per-image cache missed.
986 return LookupResult(MatchType::NOT_FOUND);
989 if (surface->IsPlaceholder()) {
990 return LookupResult(MatchType::PENDING);
993 DrawableSurface drawableSurface = surface->GetDrawableSurface();
994 if (!drawableSurface) {
995 // The surface was released by the operating system. Remove the cache
996 // entry as well.
997 Remove(WrapNotNull(surface), /* aStopTracking */ true, aAutoLock);
998 return LookupResult(MatchType::NOT_FOUND);
1001 if (aMarkUsed &&
1002 !MarkUsed(WrapNotNull(surface), WrapNotNull(cache), aAutoLock)) {
1003 Remove(WrapNotNull(surface), /* aStopTracking */ false, aAutoLock);
1004 return LookupResult(MatchType::NOT_FOUND);
1007 MOZ_ASSERT(surface->GetSurfaceKey() == aSurfaceKey,
1008 "Lookup() not returning an exact match?");
1009 return LookupResult(std::move(drawableSurface), MatchType::EXACT);
1012 LookupResult LookupBestMatch(const ImageKey aImageKey,
1013 const SurfaceKey& aSurfaceKey,
1014 const StaticMutexAutoLock& aAutoLock,
1015 bool aMarkUsed) {
1016 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1017 if (!cache) {
1018 // No cached surfaces for this image.
1019 return LookupResult(
1020 MatchType::NOT_FOUND,
1021 SurfaceCache::ClampSize(aImageKey, aSurfaceKey.Size()));
1024 // Repeatedly look up the best match, trying again if the resulting surface
1025 // has been freed by the operating system, until we can either lock a
1026 // surface for drawing or there are no matching surfaces left.
1027 // XXX(seth): This is O(N^2), but N is expected to be very small. If we
1028 // encounter a performance problem here we can revisit this.
1030 RefPtr<CachedSurface> surface;
1031 DrawableSurface drawableSurface;
1032 MatchType matchType = MatchType::NOT_FOUND;
1033 IntSize suggestedSize;
1034 while (true) {
1035 std::tie(surface, matchType, suggestedSize) =
1036 cache->LookupBestMatch(aSurfaceKey);
1038 if (!surface) {
1039 return LookupResult(
1040 matchType, suggestedSize); // Lookup in the per-image cache missed.
1043 drawableSurface = surface->GetDrawableSurface();
1044 if (drawableSurface) {
1045 break;
1048 // The surface was released by the operating system. Remove the cache
1049 // entry as well.
1050 Remove(WrapNotNull(surface), /* aStopTracking */ true, aAutoLock);
1053 MOZ_ASSERT_IF(matchType == MatchType::EXACT,
1054 surface->GetSurfaceKey() == aSurfaceKey);
1055 MOZ_ASSERT_IF(
1056 matchType == MatchType::SUBSTITUTE_BECAUSE_NOT_FOUND ||
1057 matchType == MatchType::SUBSTITUTE_BECAUSE_PENDING,
1058 surface->GetSurfaceKey().Region() == aSurfaceKey.Region() &&
1059 surface->GetSurfaceKey().SVGContext() == aSurfaceKey.SVGContext() &&
1060 surface->GetSurfaceKey().Playback() == aSurfaceKey.Playback() &&
1061 surface->GetSurfaceKey().Flags() == aSurfaceKey.Flags());
1063 if (matchType == MatchType::EXACT ||
1064 matchType == MatchType::SUBSTITUTE_BECAUSE_BEST) {
1065 if (aMarkUsed &&
1066 !MarkUsed(WrapNotNull(surface), WrapNotNull(cache), aAutoLock)) {
1067 Remove(WrapNotNull(surface), /* aStopTracking */ false, aAutoLock);
1071 return LookupResult(std::move(drawableSurface), matchType, suggestedSize);
1074 bool CanHold(const Cost aCost) const { return aCost <= mMaxCost; }
1076 size_t MaximumCapacity() const { return size_t(mMaxCost); }
1078 void SurfaceAvailable(NotNull<ISurfaceProvider*> aProvider,
1079 const StaticMutexAutoLock& aAutoLock) {
1080 if (!aProvider->Availability().IsPlaceholder()) {
1081 MOZ_ASSERT_UNREACHABLE("Calling SurfaceAvailable on non-placeholder");
1082 return;
1085 // Reinsert the provider, requesting that Insert() mark it available. This
1086 // may or may not succeed, depending on whether some other decoder has
1087 // beaten us to the punch and inserted a non-placeholder version of this
1088 // surface first, but it's fine either way.
1089 // XXX(seth): This could be implemented more efficiently; we should be able
1090 // to just update our data structures without reinserting.
1091 Insert(aProvider, /* aSetAvailable = */ true, aAutoLock);
1094 void LockImage(const ImageKey aImageKey) {
1095 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1096 if (!cache) {
1097 cache = new ImageSurfaceCache(aImageKey);
1098 mImageCaches.InsertOrUpdate(aImageKey, RefPtr{cache});
1101 cache->SetLocked(true);
1103 // We don't relock this image's existing surfaces right away; instead, the
1104 // image should arrange for Lookup() to touch them if they are still useful.
1107 void UnlockImage(const ImageKey aImageKey,
1108 const StaticMutexAutoLock& aAutoLock) {
1109 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1110 if (!cache || !cache->IsLocked()) {
1111 return; // Already unlocked.
1114 cache->SetLocked(false);
1115 DoUnlockSurfaces(WrapNotNull(cache), /* aStaticOnly = */ false, aAutoLock);
1118 void UnlockEntries(const ImageKey aImageKey,
1119 const StaticMutexAutoLock& aAutoLock) {
1120 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1121 if (!cache || !cache->IsLocked()) {
1122 return; // Already unlocked.
1125 // (Note that we *don't* unlock the per-image cache here; that's the
1126 // difference between this and UnlockImage.)
1127 DoUnlockSurfaces(WrapNotNull(cache),
1128 /* aStaticOnly = */
1129 !StaticPrefs::image_mem_animated_discardable_AtStartup(),
1130 aAutoLock);
1133 already_AddRefed<ImageSurfaceCache> RemoveImage(
1134 const ImageKey aImageKey, const StaticMutexAutoLock& aAutoLock) {
1135 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1136 if (!cache) {
1137 return nullptr; // No cached surfaces for this image, so nothing to do.
1140 // Discard all of the cached surfaces for this image.
1141 // XXX(seth): This is O(n^2) since for each item in the cache we are
1142 // removing an element from the costs array. Since n is expected to be
1143 // small, performance should be good, but if usage patterns change we should
1144 // change the data structure used for mCosts.
1145 for (const auto& value : cache->Values()) {
1146 StopTracking(WrapNotNull(value),
1147 /* aIsTracked */ true, aAutoLock);
1150 // The per-image cache isn't needed anymore, so remove it as well.
1151 // This implicitly unlocks the image if it was locked.
1152 mImageCaches.Remove(aImageKey);
1154 // Since we did not actually remove any of the surfaces from the cache
1155 // itself, only stopped tracking them, we should free it outside the lock.
1156 return cache.forget();
1159 void PruneImage(const ImageKey aImageKey,
1160 const StaticMutexAutoLock& aAutoLock) {
1161 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1162 if (!cache) {
1163 return; // No cached surfaces for this image, so nothing to do.
1166 cache->Prune([this, &aAutoLock](NotNull<CachedSurface*> aSurface) -> void {
1167 StopTracking(aSurface, /* aIsTracked */ true, aAutoLock);
1168 // Individual surfaces must be freed outside the lock.
1169 mCachedSurfacesDiscard.AppendElement(aSurface);
1172 MaybeRemoveEmptyCache(aImageKey, cache);
1175 bool InvalidateImage(const ImageKey aImageKey,
1176 const StaticMutexAutoLock& aAutoLock) {
1177 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1178 if (!cache) {
1179 return false; // No cached surfaces for this image, so nothing to do.
1182 bool rv = cache->Invalidate(
1183 [this, &aAutoLock](NotNull<CachedSurface*> aSurface) -> void {
1184 StopTracking(aSurface, /* aIsTracked */ true, aAutoLock);
1185 // Individual surfaces must be freed outside the lock.
1186 mCachedSurfacesDiscard.AppendElement(aSurface);
1189 MaybeRemoveEmptyCache(aImageKey, cache);
1190 return rv;
1193 void DiscardAll(const StaticMutexAutoLock& aAutoLock) {
1194 // Remove in order of cost because mCosts is an array and the other data
1195 // structures are all hash tables. Note that locked surfaces are not
1196 // removed, since they aren't present in mCosts.
1197 while (!mCosts.IsEmpty()) {
1198 Remove(mCosts.LastElement().Surface(), /* aStopTracking */ true,
1199 aAutoLock);
1203 void DiscardForMemoryPressure(const StaticMutexAutoLock& aAutoLock) {
1204 // Compute our discardable cost. Since locked surfaces aren't discardable,
1205 // we exclude them.
1206 const Cost discardableCost = (mMaxCost - mAvailableCost) - mLockedCost;
1207 MOZ_ASSERT(discardableCost <= mMaxCost, "Discardable cost doesn't add up");
1209 // Our target is to raise our available cost by (1 / mDiscardFactor) of our
1210 // discardable cost - in other words, we want to end up with about
1211 // (discardableCost / mDiscardFactor) fewer bytes stored in the surface
1212 // cache after we're done.
1213 const Cost targetCost = mAvailableCost + (discardableCost / mDiscardFactor);
1215 if (targetCost > mMaxCost - mLockedCost) {
1216 MOZ_ASSERT_UNREACHABLE("Target cost is more than we can discard");
1217 DiscardAll(aAutoLock);
1218 return;
1221 // Discard surfaces until we've reduced our cost to our target cost.
1222 while (mAvailableCost < targetCost) {
1223 MOZ_ASSERT(!mCosts.IsEmpty(), "Removed everything and still not done");
1224 Remove(mCosts.LastElement().Surface(), /* aStopTracking */ true,
1225 aAutoLock);
1229 void TakeDiscard(nsTArray<RefPtr<CachedSurface>>& aDiscard,
1230 const StaticMutexAutoLock& aAutoLock) {
1231 MOZ_ASSERT(aDiscard.IsEmpty());
1232 aDiscard = std::move(mCachedSurfacesDiscard);
1235 already_AddRefed<CachedSurface> GetSurfaceForResetAnimation(
1236 const ImageKey aImageKey, const SurfaceKey& aSurfaceKey,
1237 const StaticMutexAutoLock& aAutoLock) {
1238 RefPtr<CachedSurface> surface;
1240 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1241 if (!cache) {
1242 // No cached surfaces for this image.
1243 return surface.forget();
1246 surface = cache->Lookup(aSurfaceKey, /* aForAccess = */ false);
1247 return surface.forget();
1250 void LockSurface(NotNull<CachedSurface*> aSurface,
1251 const StaticMutexAutoLock& aAutoLock) {
1252 if (aSurface->IsPlaceholder() || aSurface->IsLocked()) {
1253 return;
1256 StopTracking(aSurface, /* aIsTracked */ true, aAutoLock);
1258 // Lock the surface. This can fail.
1259 aSurface->SetLocked(true);
1260 DebugOnly<bool> tracked = StartTracking(aSurface, aAutoLock);
1261 MOZ_ASSERT(tracked);
1264 size_t ShallowSizeOfIncludingThis(
1265 MallocSizeOf aMallocSizeOf, const StaticMutexAutoLock& aAutoLock) const {
1266 size_t bytes =
1267 aMallocSizeOf(this) + mCosts.ShallowSizeOfExcludingThis(aMallocSizeOf) +
1268 mImageCaches.ShallowSizeOfExcludingThis(aMallocSizeOf) +
1269 mCachedSurfacesDiscard.ShallowSizeOfExcludingThis(aMallocSizeOf) +
1270 mExpirationTracker.ShallowSizeOfExcludingThis(aMallocSizeOf);
1271 for (const auto& data : mImageCaches.Values()) {
1272 bytes += data->ShallowSizeOfIncludingThis(aMallocSizeOf);
1274 return bytes;
1277 NS_IMETHOD
1278 CollectReports(nsIHandleReportCallback* aHandleReport, nsISupports* aData,
1279 bool aAnonymize) override {
1280 StaticMutexAutoLock lock(sInstanceMutex);
1282 uint32_t lockedImageCount = 0;
1283 uint32_t totalSurfaceCount = 0;
1284 uint32_t lockedSurfaceCount = 0;
1285 for (const auto& cache : mImageCaches.Values()) {
1286 totalSurfaceCount += cache->Count();
1287 if (cache->IsLocked()) {
1288 ++lockedImageCount;
1290 for (const auto& value : cache->Values()) {
1291 if (value->IsLocked()) {
1292 ++lockedSurfaceCount;
1297 // clang-format off
1298 // We have explicit memory reporting for the surface cache which is more
1299 // accurate than the cost metrics we report here, but these metrics are
1300 // still useful to report, since they control the cache's behavior.
1301 MOZ_COLLECT_REPORT(
1302 "explicit/images/cache/overhead", KIND_HEAP, UNITS_BYTES,
1303 ShallowSizeOfIncludingThis(SurfaceCacheMallocSizeOf, lock),
1304 "Memory used by the surface cache data structures, excluding surface data.");
1306 MOZ_COLLECT_REPORT(
1307 "imagelib-surface-cache-estimated-total",
1308 KIND_OTHER, UNITS_BYTES, (mMaxCost - mAvailableCost),
1309 "Estimated total memory used by the imagelib surface cache.");
1311 MOZ_COLLECT_REPORT(
1312 "imagelib-surface-cache-estimated-locked",
1313 KIND_OTHER, UNITS_BYTES, mLockedCost,
1314 "Estimated memory used by locked surfaces in the imagelib surface cache.");
1316 MOZ_COLLECT_REPORT(
1317 "imagelib-surface-cache-tracked-cost-count",
1318 KIND_OTHER, UNITS_COUNT, mCosts.Length(),
1319 "Total number of surfaces tracked for cost (and expiry) in the imagelib surface cache.");
1321 MOZ_COLLECT_REPORT(
1322 "imagelib-surface-cache-tracked-expiry-count",
1323 KIND_OTHER, UNITS_COUNT, mExpirationTracker.Length(lock),
1324 "Total number of surfaces tracked for expiry (and cost) in the imagelib surface cache.");
1326 MOZ_COLLECT_REPORT(
1327 "imagelib-surface-cache-image-count",
1328 KIND_OTHER, UNITS_COUNT, mImageCaches.Count(),
1329 "Total number of images in the imagelib surface cache.");
1331 MOZ_COLLECT_REPORT(
1332 "imagelib-surface-cache-locked-image-count",
1333 KIND_OTHER, UNITS_COUNT, lockedImageCount,
1334 "Total number of locked images in the imagelib surface cache.");
1336 MOZ_COLLECT_REPORT(
1337 "imagelib-surface-cache-image-surface-count",
1338 KIND_OTHER, UNITS_COUNT, totalSurfaceCount,
1339 "Total number of surfaces in the imagelib surface cache.");
1341 MOZ_COLLECT_REPORT(
1342 "imagelib-surface-cache-locked-surfaces-count",
1343 KIND_OTHER, UNITS_COUNT, lockedSurfaceCount,
1344 "Total number of locked surfaces in the imagelib surface cache.");
1346 MOZ_COLLECT_REPORT(
1347 "imagelib-surface-cache-overflow-count",
1348 KIND_OTHER, UNITS_COUNT, mOverflowCount,
1349 "Count of how many times the surface cache has hit its capacity and been "
1350 "unable to insert a new surface.");
1352 MOZ_COLLECT_REPORT(
1353 "imagelib-surface-cache-tracking-failure-count",
1354 KIND_OTHER, UNITS_COUNT, mTrackingFailureCount,
1355 "Count of how many times the surface cache has failed to begin tracking a "
1356 "given surface.");
1358 MOZ_COLLECT_REPORT(
1359 "imagelib-surface-cache-already-present-count",
1360 KIND_OTHER, UNITS_COUNT, mAlreadyPresentCount,
1361 "Count of how many times the surface cache has failed to insert a surface "
1362 "because it is already present.");
1364 MOZ_COLLECT_REPORT(
1365 "imagelib-surface-cache-table-failure-count",
1366 KIND_OTHER, UNITS_COUNT, mTableFailureCount,
1367 "Count of how many times the surface cache has failed to insert a surface "
1368 "because a hash table could not accept an entry.");
1369 // clang-format on
1371 return NS_OK;
1374 void CollectSizeOfSurfaces(const ImageKey aImageKey,
1375 nsTArray<SurfaceMemoryCounter>& aCounters,
1376 MallocSizeOf aMallocSizeOf,
1377 const StaticMutexAutoLock& aAutoLock) {
1378 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1379 if (!cache) {
1380 return; // No surfaces for this image.
1383 // Report all surfaces in the per-image cache.
1384 cache->CollectSizeOfSurfaces(
1385 aCounters, aMallocSizeOf,
1386 [this, &aAutoLock](NotNull<CachedSurface*> aSurface) -> void {
1387 StopTracking(aSurface, /* aIsTracked */ true, aAutoLock);
1388 // Individual surfaces must be freed outside the lock.
1389 mCachedSurfacesDiscard.AppendElement(aSurface);
1392 MaybeRemoveEmptyCache(aImageKey, cache);
1395 void ReleaseImageOnMainThread(already_AddRefed<image::Image>&& aImage,
1396 const StaticMutexAutoLock& aAutoLock) {
1397 RefPtr<image::Image> image = aImage;
1398 if (!image) {
1399 return;
1402 bool needsDispatch = mReleasingImagesOnMainThread.IsEmpty();
1403 mReleasingImagesOnMainThread.AppendElement(image);
1405 if (!needsDispatch ||
1406 AppShutdown::IsInOrBeyond(ShutdownPhase::XPCOMShutdownFinal)) {
1407 // Either there is already a ongoing task for ClearReleasingImages() or
1408 // it's too late in shutdown to dispatch.
1409 return;
1412 NS_DispatchToMainThread(NS_NewRunnableFunction(
1413 "SurfaceCacheImpl::ReleaseImageOnMainThread",
1414 []() -> void { SurfaceCache::ClearReleasingImages(); }));
1417 void TakeReleasingImages(nsTArray<RefPtr<image::Image>>& aImage,
1418 const StaticMutexAutoLock& aAutoLock) {
1419 MOZ_ASSERT(NS_IsMainThread());
1420 aImage.SwapElements(mReleasingImagesOnMainThread);
1423 private:
1424 already_AddRefed<ImageSurfaceCache> GetImageCache(const ImageKey aImageKey) {
1425 RefPtr<ImageSurfaceCache> imageCache;
1426 mImageCaches.Get(aImageKey, getter_AddRefs(imageCache));
1427 return imageCache.forget();
1430 void MaybeRemoveEmptyCache(const ImageKey aImageKey,
1431 ImageSurfaceCache* aCache) {
1432 // Remove the per-image cache if it's unneeded now. Keep it if the image is
1433 // locked, since the per-image cache is where we store that state. Note that
1434 // we don't push it into mImageCachesDiscard because all of its surfaces
1435 // have been removed, so it is safe to free while holding the lock.
1436 if (aCache->IsEmpty() && !aCache->IsLocked()) {
1437 mImageCaches.Remove(aImageKey);
1441 // This is similar to CanHold() except that it takes into account the costs of
1442 // locked surfaces. It's used internally in Insert(), but it's not exposed
1443 // publicly because we permit multithreaded access to the surface cache, which
1444 // means that the result would be meaningless: another thread could insert a
1445 // surface or lock an image at any time.
1446 bool CanHoldAfterDiscarding(const Cost aCost) const {
1447 return aCost <= mMaxCost - mLockedCost;
1450 bool MarkUsed(NotNull<CachedSurface*> aSurface,
1451 NotNull<ImageSurfaceCache*> aCache,
1452 const StaticMutexAutoLock& aAutoLock) {
1453 if (aCache->IsLocked()) {
1454 LockSurface(aSurface, aAutoLock);
1455 return true;
1458 nsresult rv = mExpirationTracker.MarkUsedLocked(aSurface, aAutoLock);
1459 if (NS_WARN_IF(NS_FAILED(rv))) {
1460 // If mark used fails, it is because it failed to reinsert the surface
1461 // after removing it from the tracker. Thus we need to update our
1462 // own accounting but otherwise expect it to be untracked.
1463 StopTracking(aSurface, /* aIsTracked */ false, aAutoLock);
1464 return false;
1466 return true;
1469 void DoUnlockSurfaces(NotNull<ImageSurfaceCache*> aCache, bool aStaticOnly,
1470 const StaticMutexAutoLock& aAutoLock) {
1471 AutoTArray<NotNull<CachedSurface*>, 8> discard;
1473 // Unlock all the surfaces the per-image cache is holding.
1474 for (const auto& value : aCache->Values()) {
1475 NotNull<CachedSurface*> surface = WrapNotNull(value);
1476 if (surface->IsPlaceholder() || !surface->IsLocked()) {
1477 continue;
1479 if (aStaticOnly &&
1480 surface->GetSurfaceKey().Playback() != PlaybackType::eStatic) {
1481 continue;
1483 StopTracking(surface, /* aIsTracked */ true, aAutoLock);
1484 surface->SetLocked(false);
1485 if (MOZ_UNLIKELY(!StartTracking(surface, aAutoLock))) {
1486 discard.AppendElement(surface);
1490 // Discard any that we failed to track.
1491 for (auto iter = discard.begin(); iter != discard.end(); ++iter) {
1492 Remove(*iter, /* aStopTracking */ false, aAutoLock);
1496 void RemoveEntry(const ImageKey aImageKey, const SurfaceKey& aSurfaceKey,
1497 const StaticMutexAutoLock& aAutoLock) {
1498 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1499 if (!cache) {
1500 return; // No cached surfaces for this image.
1503 RefPtr<CachedSurface> surface =
1504 cache->Lookup(aSurfaceKey, /* aForAccess = */ false);
1505 if (!surface) {
1506 return; // Lookup in the per-image cache missed.
1509 Remove(WrapNotNull(surface), /* aStopTracking */ true, aAutoLock);
1512 class SurfaceTracker final
1513 : public ExpirationTrackerImpl<CachedSurface, 2, StaticMutex,
1514 StaticMutexAutoLock> {
1515 public:
1516 explicit SurfaceTracker(uint32_t aSurfaceCacheExpirationTimeMS)
1517 : ExpirationTrackerImpl<CachedSurface, 2, StaticMutex,
1518 StaticMutexAutoLock>(
1519 aSurfaceCacheExpirationTimeMS, "SurfaceTracker") {}
1521 protected:
1522 void NotifyExpiredLocked(CachedSurface* aSurface,
1523 const StaticMutexAutoLock& aAutoLock) override {
1524 sInstance->Remove(WrapNotNull(aSurface), /* aStopTracking */ true,
1525 aAutoLock);
1528 void NotifyHandlerEndLocked(const StaticMutexAutoLock& aAutoLock) override {
1529 sInstance->TakeDiscard(mDiscard, aAutoLock);
1532 void NotifyHandlerEnd() override {
1533 nsTArray<RefPtr<CachedSurface>> discard(std::move(mDiscard));
1536 StaticMutex& GetMutex() override { return sInstanceMutex; }
1538 nsTArray<RefPtr<CachedSurface>> mDiscard;
1541 class MemoryPressureObserver final : public nsIObserver {
1542 public:
1543 NS_DECL_ISUPPORTS
1545 NS_IMETHOD Observe(nsISupports*, const char* aTopic,
1546 const char16_t*) override {
1547 nsTArray<RefPtr<CachedSurface>> discard;
1549 StaticMutexAutoLock lock(sInstanceMutex);
1550 if (sInstance && strcmp(aTopic, "memory-pressure") == 0) {
1551 sInstance->DiscardForMemoryPressure(lock);
1552 sInstance->TakeDiscard(discard, lock);
1555 return NS_OK;
1558 private:
1559 virtual ~MemoryPressureObserver() {}
1562 nsTArray<CostEntry> mCosts;
1563 nsRefPtrHashtable<nsPtrHashKey<Image>, ImageSurfaceCache> mImageCaches;
1564 nsTArray<RefPtr<CachedSurface>> mCachedSurfacesDiscard;
1565 SurfaceTracker mExpirationTracker;
1566 RefPtr<MemoryPressureObserver> mMemoryPressureObserver;
1567 nsTArray<RefPtr<image::Image>> mReleasingImagesOnMainThread;
1568 const uint32_t mDiscardFactor;
1569 const Cost mMaxCost;
1570 Cost mAvailableCost;
1571 Cost mLockedCost;
1572 size_t mOverflowCount;
1573 size_t mAlreadyPresentCount;
1574 size_t mTableFailureCount;
1575 size_t mTrackingFailureCount;
1578 NS_IMPL_ISUPPORTS(SurfaceCacheImpl, nsIMemoryReporter)
1579 NS_IMPL_ISUPPORTS(SurfaceCacheImpl::MemoryPressureObserver, nsIObserver)
1581 ///////////////////////////////////////////////////////////////////////////////
1582 // Public API
1583 ///////////////////////////////////////////////////////////////////////////////
1585 /* static */
1586 void SurfaceCache::Initialize() {
1587 // Initialize preferences.
1588 MOZ_ASSERT(NS_IsMainThread());
1589 MOZ_ASSERT(!sInstance, "Shouldn't initialize more than once");
1591 // See StaticPrefs for the default values of these preferences.
1593 // Length of time before an unused surface is removed from the cache, in
1594 // milliseconds.
1595 uint32_t surfaceCacheExpirationTimeMS =
1596 StaticPrefs::image_mem_surfacecache_min_expiration_ms_AtStartup();
1598 // What fraction of the memory used by the surface cache we should discard
1599 // when we get a memory pressure notification. This value is interpreted as
1600 // 1/N, so 1 means to discard everything, 2 means to discard about half of the
1601 // memory we're using, and so forth. We clamp it to avoid division by zero.
1602 uint32_t surfaceCacheDiscardFactor =
1603 max(StaticPrefs::image_mem_surfacecache_discard_factor_AtStartup(), 1u);
1605 // Maximum size of the surface cache, in kilobytes.
1606 uint64_t surfaceCacheMaxSizeKB =
1607 StaticPrefs::image_mem_surfacecache_max_size_kb_AtStartup();
1609 if (sizeof(uintptr_t) <= 4) {
1610 // Limit surface cache to 1 GB if our address space is 32 bit.
1611 surfaceCacheMaxSizeKB = 1024 * 1024;
1614 // A knob determining the actual size of the surface cache. Currently the
1615 // cache is (size of main memory) / (surface cache size factor) KB
1616 // or (surface cache max size) KB, whichever is smaller. The formula
1617 // may change in the future, though.
1618 // For example, a value of 4 would yield a 256MB cache on a 1GB machine.
1619 // The smallest machines we are likely to run this code on have 256MB
1620 // of memory, which would yield a 64MB cache on this setting.
1621 // We clamp this value to avoid division by zero.
1622 uint32_t surfaceCacheSizeFactor =
1623 max(StaticPrefs::image_mem_surfacecache_size_factor_AtStartup(), 1u);
1625 // Compute the size of the surface cache.
1626 uint64_t memorySize = PR_GetPhysicalMemorySize();
1627 if (memorySize == 0) {
1628 #if !defined(__DragonFly__)
1629 MOZ_ASSERT_UNREACHABLE("PR_GetPhysicalMemorySize not implemented here");
1630 #endif
1631 memorySize = 256 * 1024 * 1024; // Fall back to 256MB.
1633 uint64_t proposedSize = memorySize / surfaceCacheSizeFactor;
1634 uint64_t surfaceCacheSizeBytes =
1635 min(proposedSize, surfaceCacheMaxSizeKB * 1024);
1636 uint32_t finalSurfaceCacheSizeBytes =
1637 min(surfaceCacheSizeBytes, uint64_t(UINT32_MAX));
1639 // Create the surface cache singleton with the requested settings. Note that
1640 // the size is a limit that the cache may not grow beyond, but we do not
1641 // actually allocate any storage for surfaces at this time.
1642 sInstance = new SurfaceCacheImpl(surfaceCacheExpirationTimeMS,
1643 surfaceCacheDiscardFactor,
1644 finalSurfaceCacheSizeBytes);
1645 sInstance->InitMemoryReporter();
1648 /* static */
1649 void SurfaceCache::Shutdown() {
1650 RefPtr<SurfaceCacheImpl> cache;
1652 StaticMutexAutoLock lock(sInstanceMutex);
1653 MOZ_ASSERT(NS_IsMainThread());
1654 MOZ_ASSERT(sInstance, "No singleton - was Shutdown() called twice?");
1655 cache = sInstance.forget();
1659 /* static */
1660 LookupResult SurfaceCache::Lookup(const ImageKey aImageKey,
1661 const SurfaceKey& aSurfaceKey,
1662 bool aMarkUsed) {
1663 nsTArray<RefPtr<CachedSurface>> discard;
1664 LookupResult rv(MatchType::NOT_FOUND);
1667 StaticMutexAutoLock lock(sInstanceMutex);
1668 if (!sInstance) {
1669 return rv;
1672 rv = sInstance->Lookup(aImageKey, aSurfaceKey, lock, aMarkUsed);
1673 sInstance->TakeDiscard(discard, lock);
1676 return rv;
1679 /* static */
1680 LookupResult SurfaceCache::LookupBestMatch(const ImageKey aImageKey,
1681 const SurfaceKey& aSurfaceKey,
1682 bool aMarkUsed) {
1683 nsTArray<RefPtr<CachedSurface>> discard;
1684 LookupResult rv(MatchType::NOT_FOUND);
1687 StaticMutexAutoLock lock(sInstanceMutex);
1688 if (!sInstance) {
1689 return rv;
1692 rv = sInstance->LookupBestMatch(aImageKey, aSurfaceKey, lock, aMarkUsed);
1693 sInstance->TakeDiscard(discard, lock);
1696 return rv;
1699 /* static */
1700 InsertOutcome SurfaceCache::Insert(NotNull<ISurfaceProvider*> aProvider) {
1701 nsTArray<RefPtr<CachedSurface>> discard;
1702 InsertOutcome rv(InsertOutcome::FAILURE);
1705 StaticMutexAutoLock lock(sInstanceMutex);
1706 if (!sInstance) {
1707 return rv;
1710 rv = sInstance->Insert(aProvider, /* aSetAvailable = */ false, lock);
1711 sInstance->TakeDiscard(discard, lock);
1714 return rv;
1717 /* static */
1718 bool SurfaceCache::CanHold(const IntSize& aSize,
1719 uint32_t aBytesPerPixel /* = 4 */) {
1720 StaticMutexAutoLock lock(sInstanceMutex);
1721 if (!sInstance) {
1722 return false;
1725 Cost cost = ComputeCost(aSize, aBytesPerPixel);
1726 return sInstance->CanHold(cost);
1729 /* static */
1730 bool SurfaceCache::CanHold(size_t aSize) {
1731 StaticMutexAutoLock lock(sInstanceMutex);
1732 if (!sInstance) {
1733 return false;
1736 return sInstance->CanHold(aSize);
1739 /* static */
1740 void SurfaceCache::SurfaceAvailable(NotNull<ISurfaceProvider*> aProvider) {
1741 StaticMutexAutoLock lock(sInstanceMutex);
1742 if (!sInstance) {
1743 return;
1746 sInstance->SurfaceAvailable(aProvider, lock);
1749 /* static */
1750 void SurfaceCache::LockImage(const ImageKey aImageKey) {
1751 StaticMutexAutoLock lock(sInstanceMutex);
1752 if (sInstance) {
1753 return sInstance->LockImage(aImageKey);
1757 /* static */
1758 void SurfaceCache::UnlockImage(const ImageKey aImageKey) {
1759 StaticMutexAutoLock lock(sInstanceMutex);
1760 if (sInstance) {
1761 return sInstance->UnlockImage(aImageKey, lock);
1765 /* static */
1766 void SurfaceCache::UnlockEntries(const ImageKey aImageKey) {
1767 StaticMutexAutoLock lock(sInstanceMutex);
1768 if (sInstance) {
1769 return sInstance->UnlockEntries(aImageKey, lock);
1773 /* static */
1774 void SurfaceCache::RemoveImage(const ImageKey aImageKey) {
1775 RefPtr<ImageSurfaceCache> discard;
1777 StaticMutexAutoLock lock(sInstanceMutex);
1778 if (sInstance) {
1779 discard = sInstance->RemoveImage(aImageKey, lock);
1784 /* static */
1785 void SurfaceCache::PruneImage(const ImageKey aImageKey) {
1786 nsTArray<RefPtr<CachedSurface>> discard;
1788 StaticMutexAutoLock lock(sInstanceMutex);
1789 if (sInstance) {
1790 sInstance->PruneImage(aImageKey, lock);
1791 sInstance->TakeDiscard(discard, lock);
1796 /* static */
1797 bool SurfaceCache::InvalidateImage(const ImageKey aImageKey) {
1798 nsTArray<RefPtr<CachedSurface>> discard;
1799 bool rv = false;
1801 StaticMutexAutoLock lock(sInstanceMutex);
1802 if (sInstance) {
1803 rv = sInstance->InvalidateImage(aImageKey, lock);
1804 sInstance->TakeDiscard(discard, lock);
1807 return rv;
1810 /* static */
1811 void SurfaceCache::DiscardAll() {
1812 nsTArray<RefPtr<CachedSurface>> discard;
1814 StaticMutexAutoLock lock(sInstanceMutex);
1815 if (sInstance) {
1816 sInstance->DiscardAll(lock);
1817 sInstance->TakeDiscard(discard, lock);
1822 /* static */
1823 void SurfaceCache::ResetAnimation(const ImageKey aImageKey,
1824 const SurfaceKey& aSurfaceKey) {
1825 RefPtr<CachedSurface> surface;
1826 nsTArray<RefPtr<CachedSurface>> discard;
1828 StaticMutexAutoLock lock(sInstanceMutex);
1829 if (!sInstance) {
1830 return;
1833 surface =
1834 sInstance->GetSurfaceForResetAnimation(aImageKey, aSurfaceKey, lock);
1835 sInstance->TakeDiscard(discard, lock);
1838 // Calling Reset will acquire the AnimationSurfaceProvider::mFramesMutex
1839 // mutex. In other places we acquire the mFramesMutex then call into the
1840 // surface cache (acquiring the surface cache mutex), so that determines a
1841 // lock order which we must obey by calling Reset after releasing the surface
1842 // cache mutex.
1843 if (surface) {
1844 DrawableSurface drawableSurface =
1845 surface->GetDrawableSurfaceEvenIfPlaceholder();
1846 if (drawableSurface) {
1847 MOZ_ASSERT(surface->GetSurfaceKey() == aSurfaceKey,
1848 "ResetAnimation() not returning an exact match?");
1850 drawableSurface.Reset();
1855 /* static */
1856 void SurfaceCache::CollectSizeOfSurfaces(
1857 const ImageKey aImageKey, nsTArray<SurfaceMemoryCounter>& aCounters,
1858 MallocSizeOf aMallocSizeOf) {
1859 nsTArray<RefPtr<CachedSurface>> discard;
1861 StaticMutexAutoLock lock(sInstanceMutex);
1862 if (!sInstance) {
1863 return;
1866 sInstance->CollectSizeOfSurfaces(aImageKey, aCounters, aMallocSizeOf, lock);
1867 sInstance->TakeDiscard(discard, lock);
1871 /* static */
1872 size_t SurfaceCache::MaximumCapacity() {
1873 StaticMutexAutoLock lock(sInstanceMutex);
1874 if (!sInstance) {
1875 return 0;
1878 return sInstance->MaximumCapacity();
1881 /* static */
1882 bool SurfaceCache::IsLegalSize(const IntSize& aSize) {
1883 // reject over-wide or over-tall images
1884 const int32_t k64KLimit = 0x0000FFFF;
1885 if (MOZ_UNLIKELY(aSize.width > k64KLimit || aSize.height > k64KLimit)) {
1886 NS_WARNING("image too big");
1887 return false;
1890 // protect against invalid sizes
1891 if (MOZ_UNLIKELY(aSize.height <= 0 || aSize.width <= 0)) {
1892 return false;
1895 // check to make sure we don't overflow a 32-bit
1896 CheckedInt32 requiredBytes =
1897 CheckedInt32(aSize.width) * CheckedInt32(aSize.height) * 4;
1898 if (MOZ_UNLIKELY(!requiredBytes.isValid())) {
1899 NS_WARNING("width or height too large");
1900 return false;
1902 const int32_t maxSize =
1903 StaticPrefs::image_mem_max_legal_imgframe_size_kb_AtStartup();
1904 if (MOZ_UNLIKELY(maxSize > 0 && requiredBytes.value() / 1024 > maxSize)) {
1905 return false;
1907 return true;
1910 IntSize SurfaceCache::ClampVectorSize(const IntSize& aSize) {
1911 // If we exceed the maximum, we need to scale the size downwards to fit.
1912 // It shouldn't get here if it is significantly larger because
1913 // VectorImage::UseSurfaceCacheForSize should prevent us from requesting
1914 // a rasterized version of a surface greater than 4x the maximum.
1915 int32_t maxSizeKB =
1916 StaticPrefs::image_cache_max_rasterized_svg_threshold_kb();
1917 if (maxSizeKB <= 0) {
1918 return aSize;
1921 int64_t proposedKB = int64_t(aSize.width) * aSize.height / 256;
1922 if (maxSizeKB >= proposedKB) {
1923 return aSize;
1926 double scale = sqrt(double(maxSizeKB) / proposedKB);
1927 return IntSize(int32_t(scale * aSize.width), int32_t(scale * aSize.height));
1930 IntSize SurfaceCache::ClampSize(ImageKey aImageKey, const IntSize& aSize) {
1931 if (aImageKey->GetType() != imgIContainer::TYPE_VECTOR) {
1932 return aSize;
1935 return ClampVectorSize(aSize);
1938 /* static */
1939 void SurfaceCache::ReleaseImageOnMainThread(
1940 already_AddRefed<image::Image> aImage, bool aAlwaysProxy) {
1941 if (NS_IsMainThread() && !aAlwaysProxy) {
1942 RefPtr<image::Image> image = std::move(aImage);
1943 return;
1946 // Don't try to dispatch the release after shutdown, we'll just leak the
1947 // runnable.
1948 if (AppShutdown::IsInOrBeyond(ShutdownPhase::XPCOMShutdownFinal)) {
1949 return;
1952 StaticMutexAutoLock lock(sInstanceMutex);
1953 if (sInstance) {
1954 sInstance->ReleaseImageOnMainThread(std::move(aImage), lock);
1955 } else {
1956 NS_ReleaseOnMainThread("SurfaceCache::ReleaseImageOnMainThread",
1957 std::move(aImage), /* aAlwaysProxy */ true);
1961 /* static */
1962 void SurfaceCache::ClearReleasingImages() {
1963 MOZ_ASSERT(NS_IsMainThread());
1965 nsTArray<RefPtr<image::Image>> images;
1967 StaticMutexAutoLock lock(sInstanceMutex);
1968 if (sInstance) {
1969 sInstance->TakeReleasingImages(images, lock);
1974 } // namespace image
1975 } // namespace mozilla