Bug 1698786: part 1) Add some logging to `mozInlineSpellChecker`. r=masayuki
[gecko.git] / image / SurfaceCache.cpp
blobc55aeb9c0234f74f38e0ca65298b0b5917972455
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* This Source Code Form is subject to the terms of the Mozilla Public
3 * License, v. 2.0. If a copy of the MPL was not distributed with this
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 /**
7 * SurfaceCache is a service for caching temporary surfaces in imagelib.
8 */
10 #include "SurfaceCache.h"
12 #include <algorithm>
13 #include <utility>
15 #include "ISurfaceProvider.h"
16 #include "Image.h"
17 #include "LookupResult.h"
18 #include "ShutdownTracker.h"
19 #include "gfx2DGlue.h"
20 #include "gfxPlatform.h"
21 #include "imgFrame.h"
22 #include "mozilla/Assertions.h"
23 #include "mozilla/Attributes.h"
24 #include "mozilla/CheckedInt.h"
25 #include "mozilla/DebugOnly.h"
26 #include "mozilla/Likely.h"
27 #include "mozilla/RefPtr.h"
28 #include "mozilla/StaticMutex.h"
29 #include "mozilla/StaticPrefs_image.h"
30 #include "mozilla/StaticPtr.h"
31 #include "mozilla/Tuple.h"
32 #include "nsExpirationTracker.h"
33 #include "nsHashKeys.h"
34 #include "nsIMemoryReporter.h"
35 #include "nsRefPtrHashtable.h"
36 #include "nsSize.h"
37 #include "nsTArray.h"
38 #include "Orientation.h"
39 #include "prsystem.h"
41 using std::max;
42 using std::min;
44 namespace mozilla {
46 using namespace gfx;
48 namespace image {
50 MOZ_DEFINE_MALLOC_SIZE_OF(SurfaceCacheMallocSizeOf)
52 class CachedSurface;
53 class SurfaceCacheImpl;
55 ///////////////////////////////////////////////////////////////////////////////
56 // Static Data
57 ///////////////////////////////////////////////////////////////////////////////
59 // The single surface cache instance.
60 static StaticRefPtr<SurfaceCacheImpl> sInstance;
62 // The mutex protecting the surface cache.
63 static StaticMutex sInstanceMutex;
65 ///////////////////////////////////////////////////////////////////////////////
66 // SurfaceCache Implementation
67 ///////////////////////////////////////////////////////////////////////////////
69 /**
70 * Cost models the cost of storing a surface in the cache. Right now, this is
71 * simply an estimate of the size of the surface in bytes, but in the future it
72 * may be worth taking into account the cost of rematerializing the surface as
73 * well.
75 typedef size_t Cost;
77 static Cost ComputeCost(const IntSize& aSize, uint32_t aBytesPerPixel) {
78 MOZ_ASSERT(aBytesPerPixel == 1 || aBytesPerPixel == 4);
79 return aSize.width * aSize.height * aBytesPerPixel;
82 /**
83 * Since we want to be able to make eviction decisions based on cost, we need to
84 * be able to look up the CachedSurface which has a certain cost as well as the
85 * cost associated with a certain CachedSurface. To make this possible, in data
86 * structures we actually store a CostEntry, which contains a weak pointer to
87 * its associated surface.
89 * To make usage of the weak pointer safe, SurfaceCacheImpl always calls
90 * StartTracking after a surface is stored in the cache and StopTracking before
91 * it is removed.
93 class CostEntry {
94 public:
95 CostEntry(NotNull<CachedSurface*> aSurface, Cost aCost)
96 : mSurface(aSurface), mCost(aCost) {}
98 NotNull<CachedSurface*> Surface() const { return mSurface; }
99 Cost GetCost() const { return mCost; }
101 bool operator==(const CostEntry& aOther) const {
102 return mSurface == aOther.mSurface && mCost == aOther.mCost;
105 bool operator<(const CostEntry& aOther) const {
106 return mCost < aOther.mCost ||
107 (mCost == aOther.mCost && mSurface < aOther.mSurface);
110 private:
111 NotNull<CachedSurface*> mSurface;
112 Cost mCost;
116 * A CachedSurface associates a surface with a key that uniquely identifies that
117 * surface.
119 class CachedSurface {
120 ~CachedSurface() {}
122 public:
123 MOZ_DECLARE_REFCOUNTED_TYPENAME(CachedSurface)
124 NS_INLINE_DECL_THREADSAFE_REFCOUNTING(CachedSurface)
126 explicit CachedSurface(NotNull<ISurfaceProvider*> aProvider)
127 : mProvider(aProvider), mIsLocked(false) {}
129 DrawableSurface GetDrawableSurface() const {
130 if (MOZ_UNLIKELY(IsPlaceholder())) {
131 MOZ_ASSERT_UNREACHABLE("Called GetDrawableSurface() on a placeholder");
132 return DrawableSurface();
135 return mProvider->Surface();
138 void SetLocked(bool aLocked) {
139 if (IsPlaceholder()) {
140 return; // Can't lock a placeholder.
143 // Update both our state and our provider's state. Some surface providers
144 // are permanently locked; maintaining our own locking state enables us to
145 // respect SetLocked() even when it's meaningless from the provider's
146 // perspective.
147 mIsLocked = aLocked;
148 mProvider->SetLocked(aLocked);
151 bool IsLocked() const {
152 return !IsPlaceholder() && mIsLocked && mProvider->IsLocked();
155 void SetCannotSubstitute() {
156 mProvider->Availability().SetCannotSubstitute();
158 bool CannotSubstitute() const {
159 return mProvider->Availability().CannotSubstitute();
162 bool IsPlaceholder() const {
163 return mProvider->Availability().IsPlaceholder();
165 bool IsDecoded() const { return !IsPlaceholder() && mProvider->IsFinished(); }
167 ImageKey GetImageKey() const { return mProvider->GetImageKey(); }
168 const SurfaceKey& GetSurfaceKey() const { return mProvider->GetSurfaceKey(); }
169 nsExpirationState* GetExpirationState() { return &mExpirationState; }
171 CostEntry GetCostEntry() {
172 return image::CostEntry(WrapNotNull(this), mProvider->LogicalSizeInBytes());
175 size_t ShallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
176 return aMallocSizeOf(this) + aMallocSizeOf(mProvider.get());
179 // A helper type used by SurfaceCacheImpl::CollectSizeOfSurfaces.
180 struct MOZ_STACK_CLASS SurfaceMemoryReport {
181 SurfaceMemoryReport(nsTArray<SurfaceMemoryCounter>& aCounters,
182 MallocSizeOf aMallocSizeOf)
183 : mCounters(aCounters), mMallocSizeOf(aMallocSizeOf) {}
185 void Add(NotNull<CachedSurface*> aCachedSurface, bool aIsFactor2) {
186 if (aCachedSurface->IsPlaceholder()) {
187 return;
190 // Record the memory used by the ISurfaceProvider. This may not have a
191 // straightforward relationship to the size of the surface that
192 // DrawableRef() returns if the surface is generated dynamically. (i.e.,
193 // for surfaces with PlaybackType::eAnimated.)
194 aCachedSurface->mProvider->AddSizeOfExcludingThis(
195 mMallocSizeOf, [&](ISurfaceProvider::AddSizeOfCbData& aMetadata) {
196 SurfaceMemoryCounter counter(aCachedSurface->GetSurfaceKey(),
197 aCachedSurface->IsLocked(),
198 aCachedSurface->CannotSubstitute(),
199 aIsFactor2, aMetadata.mFinished);
201 counter.Values().SetDecodedHeap(aMetadata.mHeapBytes);
202 counter.Values().SetDecodedNonHeap(aMetadata.mNonHeapBytes);
203 counter.Values().SetDecodedUnknown(aMetadata.mUnknownBytes);
204 counter.Values().SetExternalHandles(aMetadata.mExternalHandles);
205 counter.Values().SetFrameIndex(aMetadata.mIndex);
206 counter.Values().SetExternalId(aMetadata.mExternalId);
207 counter.Values().SetSurfaceTypes(aMetadata.mTypes);
209 mCounters.AppendElement(counter);
213 private:
214 nsTArray<SurfaceMemoryCounter>& mCounters;
215 MallocSizeOf mMallocSizeOf;
218 private:
219 nsExpirationState mExpirationState;
220 NotNull<RefPtr<ISurfaceProvider>> mProvider;
221 bool mIsLocked;
224 static int64_t AreaOfIntSize(const IntSize& aSize) {
225 return static_cast<int64_t>(aSize.width) * static_cast<int64_t>(aSize.height);
229 * An ImageSurfaceCache is a per-image surface cache. For correctness we must be
230 * able to remove all surfaces associated with an image when the image is
231 * destroyed or invalidated. Since this will happen frequently, it makes sense
232 * to make it cheap by storing the surfaces for each image separately.
234 * ImageSurfaceCache also keeps track of whether its associated image is locked
235 * or unlocked.
237 * The cache may also enter "factor of 2" mode which occurs when the number of
238 * surfaces in the cache exceeds the "image.cache.factor2.threshold-surfaces"
239 * pref plus the number of native sizes of the image. When in "factor of 2"
240 * mode, the cache will strongly favour sizes which are a factor of 2 of the
241 * largest native size. It accomplishes this by suggesting a factor of 2 size
242 * when lookups fail and substituting the nearest factor of 2 surface to the
243 * ideal size as the "best" available (as opposed to substitution but not
244 * found). This allows us to minimize memory consumption and CPU time spent
245 * decoding when a website requires many variants of the same surface.
247 class ImageSurfaceCache {
248 ~ImageSurfaceCache() {}
250 public:
251 explicit ImageSurfaceCache(const ImageKey aImageKey)
252 : mLocked(false),
253 mFactor2Mode(false),
254 mFactor2Pruned(false),
255 mIsVectorImage(aImageKey->GetType() == imgIContainer::TYPE_VECTOR) {}
257 MOZ_DECLARE_REFCOUNTED_TYPENAME(ImageSurfaceCache)
258 NS_INLINE_DECL_THREADSAFE_REFCOUNTING(ImageSurfaceCache)
260 typedef nsRefPtrHashtable<nsGenericHashKey<SurfaceKey>, CachedSurface>
261 SurfaceTable;
263 bool IsEmpty() const { return mSurfaces.Count() == 0; }
265 size_t ShallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
266 size_t bytes = aMallocSizeOf(this) +
267 mSurfaces.ShallowSizeOfExcludingThis(aMallocSizeOf);
268 for (auto iter = ConstIter(); !iter.Done(); iter.Next()) {
269 bytes += iter.UserData()->ShallowSizeOfIncludingThis(aMallocSizeOf);
271 return bytes;
274 [[nodiscard]] bool Insert(NotNull<CachedSurface*> aSurface) {
275 MOZ_ASSERT(!mLocked || aSurface->IsPlaceholder() || aSurface->IsLocked(),
276 "Inserting an unlocked surface for a locked image");
277 return mSurfaces.InsertOrUpdate(aSurface->GetSurfaceKey(),
278 RefPtr<CachedSurface>{aSurface}, fallible);
281 already_AddRefed<CachedSurface> Remove(NotNull<CachedSurface*> aSurface) {
282 MOZ_ASSERT(mSurfaces.GetWeak(aSurface->GetSurfaceKey()),
283 "Should not be removing a surface we don't have");
285 RefPtr<CachedSurface> surface;
286 mSurfaces.Remove(aSurface->GetSurfaceKey(), getter_AddRefs(surface));
287 AfterMaybeRemove();
288 return surface.forget();
291 already_AddRefed<CachedSurface> Lookup(const SurfaceKey& aSurfaceKey,
292 bool aForAccess) {
293 RefPtr<CachedSurface> surface;
294 mSurfaces.Get(aSurfaceKey, getter_AddRefs(surface));
296 if (aForAccess) {
297 if (surface) {
298 // We don't want to allow factor of 2 mode pruning to release surfaces
299 // for which the callers will accept no substitute.
300 surface->SetCannotSubstitute();
301 } else if (!mFactor2Mode) {
302 // If no exact match is found, and this is for use rather than internal
303 // accounting (i.e. insert and removal), we know this will trigger a
304 // decode. Make sure we switch now to factor of 2 mode if necessary.
305 MaybeSetFactor2Mode();
309 return surface.forget();
313 * @returns A tuple containing the best matching CachedSurface if available,
314 * a MatchType describing how the CachedSurface was selected, and
315 * an IntSize which is the size the caller should choose to decode
316 * at should it attempt to do so.
318 Tuple<already_AddRefed<CachedSurface>, MatchType, IntSize> LookupBestMatch(
319 const SurfaceKey& aIdealKey) {
320 // Try for an exact match first.
321 RefPtr<CachedSurface> exactMatch;
322 mSurfaces.Get(aIdealKey, getter_AddRefs(exactMatch));
323 if (exactMatch) {
324 if (exactMatch->IsDecoded()) {
325 return MakeTuple(exactMatch.forget(), MatchType::EXACT, IntSize());
327 } else if (!mFactor2Mode) {
328 // If no exact match is found, and we are not in factor of 2 mode, then
329 // we know that we will trigger a decode because at best we will provide
330 // a substitute. Make sure we switch now to factor of 2 mode if necessary.
331 MaybeSetFactor2Mode();
334 // Try for a best match second, if using compact.
335 IntSize suggestedSize = SuggestedSize(aIdealKey.Size());
336 if (suggestedSize != aIdealKey.Size()) {
337 if (!exactMatch) {
338 SurfaceKey compactKey = aIdealKey.CloneWithSize(suggestedSize);
339 mSurfaces.Get(compactKey, getter_AddRefs(exactMatch));
340 if (exactMatch && exactMatch->IsDecoded()) {
341 MOZ_ASSERT(suggestedSize != aIdealKey.Size());
342 return MakeTuple(exactMatch.forget(),
343 MatchType::SUBSTITUTE_BECAUSE_BEST, suggestedSize);
348 // There's no perfect match, so find the best match we can.
349 RefPtr<CachedSurface> bestMatch;
350 for (auto iter = ConstIter(); !iter.Done(); iter.Next()) {
351 NotNull<CachedSurface*> current = WrapNotNull(iter.UserData());
352 const SurfaceKey& currentKey = current->GetSurfaceKey();
354 // We never match a placeholder.
355 if (current->IsPlaceholder()) {
356 continue;
358 // Matching the playback type and SVG context is required.
359 if (currentKey.Playback() != aIdealKey.Playback() ||
360 currentKey.SVGContext() != aIdealKey.SVGContext()) {
361 continue;
363 // Matching the flags is required.
364 if (currentKey.Flags() != aIdealKey.Flags()) {
365 continue;
367 // Anything is better than nothing! (Within the constraints we just
368 // checked, of course.)
369 if (!bestMatch) {
370 bestMatch = current;
371 continue;
374 MOZ_ASSERT(bestMatch, "Should have a current best match");
376 // Always prefer completely decoded surfaces.
377 bool bestMatchIsDecoded = bestMatch->IsDecoded();
378 if (bestMatchIsDecoded && !current->IsDecoded()) {
379 continue;
381 if (!bestMatchIsDecoded && current->IsDecoded()) {
382 bestMatch = current;
383 continue;
386 SurfaceKey bestMatchKey = bestMatch->GetSurfaceKey();
387 if (CompareArea(aIdealKey.Size(), bestMatchKey.Size(),
388 currentKey.Size())) {
389 bestMatch = current;
393 MatchType matchType;
394 if (bestMatch) {
395 if (!exactMatch) {
396 // No exact match, neither ideal nor factor of 2.
397 MOZ_ASSERT(suggestedSize != bestMatch->GetSurfaceKey().Size(),
398 "No exact match despite the fact the sizes match!");
399 matchType = MatchType::SUBSTITUTE_BECAUSE_NOT_FOUND;
400 } else if (exactMatch != bestMatch) {
401 // The exact match is still decoding, but we found a substitute.
402 matchType = MatchType::SUBSTITUTE_BECAUSE_PENDING;
403 } else if (aIdealKey.Size() != bestMatch->GetSurfaceKey().Size()) {
404 // The best factor of 2 match is still decoding, but the best we've got.
405 MOZ_ASSERT(suggestedSize != aIdealKey.Size());
406 MOZ_ASSERT(mFactor2Mode || mIsVectorImage);
407 matchType = MatchType::SUBSTITUTE_BECAUSE_BEST;
408 } else {
409 // The exact match is still decoding, but it's the best we've got.
410 matchType = MatchType::EXACT;
412 } else {
413 if (exactMatch) {
414 // We found an "exact match"; it must have been a placeholder.
415 MOZ_ASSERT(exactMatch->IsPlaceholder());
416 matchType = MatchType::PENDING;
417 } else {
418 // We couldn't find an exact match *or* a substitute.
419 matchType = MatchType::NOT_FOUND;
423 return MakeTuple(bestMatch.forget(), matchType, suggestedSize);
426 void MaybeSetFactor2Mode() {
427 MOZ_ASSERT(!mFactor2Mode);
429 // Typically an image cache will not have too many size-varying surfaces, so
430 // if we exceed the given threshold, we should consider using a subset.
431 int32_t thresholdSurfaces =
432 StaticPrefs::image_cache_factor2_threshold_surfaces();
433 if (thresholdSurfaces < 0 ||
434 mSurfaces.Count() <= static_cast<uint32_t>(thresholdSurfaces)) {
435 return;
438 // Determine how many native surfaces this image has. If it is zero, and it
439 // is a vector image, then we should impute a single native size. Otherwise,
440 // it may be zero because we don't know yet, or the image has an error, or
441 // it isn't supported.
442 auto first = ConstIter();
443 NotNull<CachedSurface*> current = WrapNotNull(first.UserData());
444 Image* image = static_cast<Image*>(current->GetImageKey());
445 size_t nativeSizes = image->GetNativeSizesLength();
446 if (mIsVectorImage) {
447 MOZ_ASSERT(nativeSizes == 0);
448 nativeSizes = 1;
449 } else if (nativeSizes == 0) {
450 return;
453 // Increase the threshold by the number of native sizes. This ensures that
454 // we do not prevent decoding of the image at all its native sizes. It does
455 // not guarantee we will provide a surface at that size however (i.e. many
456 // other sized surfaces are requested, in addition to the native sizes).
457 thresholdSurfaces += nativeSizes;
458 if (mSurfaces.Count() <= static_cast<uint32_t>(thresholdSurfaces)) {
459 return;
462 // Get our native size. While we know the image should be fully decoded,
463 // if it is an SVG, it is valid to have a zero size. We can't do compacting
464 // in that case because we need to know the width/height ratio to define a
465 // candidate set.
466 IntSize nativeSize;
467 if (NS_FAILED(image->GetWidth(&nativeSize.width)) ||
468 NS_FAILED(image->GetHeight(&nativeSize.height)) ||
469 nativeSize.IsEmpty()) {
470 return;
473 // We have a valid size, we can change modes.
474 mFactor2Mode = true;
477 template <typename Function>
478 void Prune(Function&& aRemoveCallback) {
479 if (!mFactor2Mode || mFactor2Pruned) {
480 return;
483 // Attempt to discard any surfaces which are not factor of 2 and the best
484 // factor of 2 match exists.
485 bool hasNotFactorSize = false;
486 for (auto iter = mSurfaces.Iter(); !iter.Done(); iter.Next()) {
487 NotNull<CachedSurface*> current = WrapNotNull(iter.UserData());
488 const SurfaceKey& currentKey = current->GetSurfaceKey();
489 const IntSize& currentSize = currentKey.Size();
491 // First we check if someone requested this size and would not accept
492 // an alternatively sized surface.
493 if (current->CannotSubstitute()) {
494 continue;
497 // Next we find the best factor of 2 size for this surface. If this
498 // surface is a factor of 2 size, then we want to keep it.
499 IntSize bestSize = SuggestedSize(currentSize);
500 if (bestSize == currentSize) {
501 continue;
504 // Check the cache for a surface with the same parameters except for the
505 // size which uses the closest factor of 2 size.
506 SurfaceKey compactKey = currentKey.CloneWithSize(bestSize);
507 RefPtr<CachedSurface> compactMatch;
508 mSurfaces.Get(compactKey, getter_AddRefs(compactMatch));
509 if (compactMatch && compactMatch->IsDecoded()) {
510 aRemoveCallback(current);
511 iter.Remove();
512 } else {
513 hasNotFactorSize = true;
517 // We have no surfaces that are not factor of 2 sized, so we can stop
518 // pruning henceforth, because we avoid the insertion of new surfaces that
519 // don't match our sizing set (unless the caller won't accept a
520 // substitution.)
521 if (!hasNotFactorSize) {
522 mFactor2Pruned = true;
525 // We should never leave factor of 2 mode due to pruning in of itself, but
526 // if we discarded surfaces due to the volatile buffers getting released,
527 // it is possible.
528 AfterMaybeRemove();
531 IntSize SuggestedSize(const IntSize& aSize) const {
532 IntSize suggestedSize = SuggestedSizeInternal(aSize);
533 if (mIsVectorImage) {
534 suggestedSize = SurfaceCache::ClampVectorSize(suggestedSize);
536 return suggestedSize;
539 IntSize SuggestedSizeInternal(const IntSize& aSize) const {
540 // When not in factor of 2 mode, we can always decode at the given size.
541 if (!mFactor2Mode) {
542 return aSize;
545 // We cannot enter factor of 2 mode unless we have a minimum number of
546 // surfaces, and we should have left it if the cache was emptied.
547 if (MOZ_UNLIKELY(IsEmpty())) {
548 MOZ_ASSERT_UNREACHABLE("Should not be empty and in factor of 2 mode!");
549 return aSize;
552 // This bit of awkwardness gets the largest native size of the image.
553 auto iter = ConstIter();
554 NotNull<CachedSurface*> firstSurface = WrapNotNull(iter.UserData());
555 Image* image = static_cast<Image*>(firstSurface->GetImageKey());
556 IntSize factorSize;
557 if (NS_FAILED(image->GetWidth(&factorSize.width)) ||
558 NS_FAILED(image->GetHeight(&factorSize.height)) ||
559 factorSize.IsEmpty()) {
560 // We should not have entered factor of 2 mode without a valid size, and
561 // several successfully decoded surfaces. Note that valid vector images
562 // may have a default size of 0x0, and those are not yet supported.
563 MOZ_ASSERT_UNREACHABLE("Expected valid native size!");
564 return aSize;
566 if (image->GetOrientation().SwapsWidthAndHeight() &&
567 image->HandledOrientation()) {
568 std::swap(factorSize.width, factorSize.height);
571 if (mIsVectorImage) {
572 // Ensure the aspect ratio matches the native size before forcing the
573 // caller to accept a factor of 2 size. The difference between the aspect
574 // ratios is:
576 // delta = nativeWidth/nativeHeight - desiredWidth/desiredHeight
578 // delta*nativeHeight*desiredHeight = nativeWidth*desiredHeight
579 // - desiredWidth*nativeHeight
581 // Using the maximum accepted delta as a constant, we can avoid the
582 // floating point division and just compare after some integer ops.
583 int32_t delta =
584 factorSize.width * aSize.height - aSize.width * factorSize.height;
585 int32_t maxDelta = (factorSize.height * aSize.height) >> 4;
586 if (delta > maxDelta || delta < -maxDelta) {
587 return aSize;
590 // If the requested size is bigger than the native size, we actually need
591 // to grow the native size instead of shrinking it.
592 if (factorSize.width < aSize.width) {
593 do {
594 IntSize candidate(factorSize.width * 2, factorSize.height * 2);
595 if (!SurfaceCache::IsLegalSize(candidate)) {
596 break;
599 factorSize = candidate;
600 } while (factorSize.width < aSize.width);
602 return factorSize;
605 // Otherwise we can find the best fit as normal.
608 // Start with the native size as the best first guess.
609 IntSize bestSize = factorSize;
610 factorSize.width /= 2;
611 factorSize.height /= 2;
613 while (!factorSize.IsEmpty()) {
614 if (!CompareArea(aSize, bestSize, factorSize)) {
615 // This size is not better than the last. Since we proceed from largest
616 // to smallest, we know that the next size will not be better if the
617 // previous size was rejected. Break early.
618 break;
621 // The current factor of 2 size is better than the last selected size.
622 bestSize = factorSize;
623 factorSize.width /= 2;
624 factorSize.height /= 2;
627 return bestSize;
630 bool CompareArea(const IntSize& aIdealSize, const IntSize& aBestSize,
631 const IntSize& aSize) const {
632 // Compare sizes. We use an area-based heuristic here instead of computing a
633 // truly optimal answer, since it seems very unlikely to make a difference
634 // for realistic sizes.
635 int64_t idealArea = AreaOfIntSize(aIdealSize);
636 int64_t currentArea = AreaOfIntSize(aSize);
637 int64_t bestMatchArea = AreaOfIntSize(aBestSize);
639 // If the best match is smaller than the ideal size, prefer bigger sizes.
640 if (bestMatchArea < idealArea) {
641 if (currentArea > bestMatchArea) {
642 return true;
644 return false;
647 // Other, prefer sizes closer to the ideal size, but still not smaller.
648 if (idealArea <= currentArea && currentArea < bestMatchArea) {
649 return true;
652 // This surface isn't an improvement over the current best match.
653 return false;
656 template <typename Function>
657 void CollectSizeOfSurfaces(nsTArray<SurfaceMemoryCounter>& aCounters,
658 MallocSizeOf aMallocSizeOf,
659 Function&& aRemoveCallback) {
660 CachedSurface::SurfaceMemoryReport report(aCounters, aMallocSizeOf);
661 for (auto iter = mSurfaces.Iter(); !iter.Done(); iter.Next()) {
662 NotNull<CachedSurface*> surface = WrapNotNull(iter.UserData());
664 // We don't need the drawable surface for ourselves, but adding a surface
665 // to the report will trigger this indirectly. If the surface was
666 // discarded by the OS because it was in volatile memory, we should remove
667 // it from the cache immediately rather than include it in the report.
668 DrawableSurface drawableSurface;
669 if (!surface->IsPlaceholder()) {
670 drawableSurface = surface->GetDrawableSurface();
671 if (!drawableSurface) {
672 aRemoveCallback(surface);
673 iter.Remove();
674 continue;
678 const IntSize& size = surface->GetSurfaceKey().Size();
679 bool factor2Size = false;
680 if (mFactor2Mode) {
681 factor2Size = (size == SuggestedSize(size));
683 report.Add(surface, factor2Size);
686 AfterMaybeRemove();
689 SurfaceTable::ConstIterator ConstIter() const {
690 return mSurfaces.ConstIter();
692 uint32_t Count() const { return mSurfaces.Count(); }
694 void SetLocked(bool aLocked) { mLocked = aLocked; }
695 bool IsLocked() const { return mLocked; }
697 private:
698 void AfterMaybeRemove() {
699 if (IsEmpty() && mFactor2Mode) {
700 // The last surface for this cache was removed. This can happen if the
701 // surface was stored in a volatile buffer and got purged, or the surface
702 // expired from the cache. If the cache itself lingers for some reason
703 // (e.g. in the process of performing a lookup, the cache itself is
704 // locked), then we need to reset the factor of 2 state because it
705 // requires at least one surface present to get the native size
706 // information from the image.
707 mFactor2Mode = mFactor2Pruned = false;
711 SurfaceTable mSurfaces;
713 bool mLocked;
715 // True in "factor of 2" mode.
716 bool mFactor2Mode;
718 // True if all non-factor of 2 surfaces have been removed from the cache. Note
719 // that this excludes unsubstitutable sizes.
720 bool mFactor2Pruned;
722 // True if the surfaces are produced from a vector image. If so, it must match
723 // the aspect ratio when using factor of 2 mode.
724 bool mIsVectorImage;
728 * SurfaceCacheImpl is responsible for determining which surfaces will be cached
729 * and managing the surface cache data structures. Rather than interact with
730 * SurfaceCacheImpl directly, client code interacts with SurfaceCache, which
731 * maintains high-level invariants and encapsulates the details of the surface
732 * cache's implementation.
734 class SurfaceCacheImpl final : public nsIMemoryReporter {
735 public:
736 NS_DECL_ISUPPORTS
738 SurfaceCacheImpl(uint32_t aSurfaceCacheExpirationTimeMS,
739 uint32_t aSurfaceCacheDiscardFactor,
740 uint32_t aSurfaceCacheSize)
741 : mExpirationTracker(aSurfaceCacheExpirationTimeMS),
742 mMemoryPressureObserver(new MemoryPressureObserver),
743 mDiscardFactor(aSurfaceCacheDiscardFactor),
744 mMaxCost(aSurfaceCacheSize),
745 mAvailableCost(aSurfaceCacheSize),
746 mLockedCost(0),
747 mOverflowCount(0),
748 mAlreadyPresentCount(0),
749 mTableFailureCount(0),
750 mTrackingFailureCount(0) {
751 nsCOMPtr<nsIObserverService> os = services::GetObserverService();
752 if (os) {
753 os->AddObserver(mMemoryPressureObserver, "memory-pressure", false);
757 private:
758 virtual ~SurfaceCacheImpl() {
759 nsCOMPtr<nsIObserverService> os = services::GetObserverService();
760 if (os) {
761 os->RemoveObserver(mMemoryPressureObserver, "memory-pressure");
764 UnregisterWeakMemoryReporter(this);
767 public:
768 void InitMemoryReporter() { RegisterWeakMemoryReporter(this); }
770 InsertOutcome Insert(NotNull<ISurfaceProvider*> aProvider, bool aSetAvailable,
771 const StaticMutexAutoLock& aAutoLock) {
772 // If this is a duplicate surface, refuse to replace the original.
773 // XXX(seth): Calling Lookup() and then RemoveEntry() does the lookup
774 // twice. We'll make this more efficient in bug 1185137.
775 LookupResult result =
776 Lookup(aProvider->GetImageKey(), aProvider->GetSurfaceKey(), aAutoLock,
777 /* aMarkUsed = */ false);
778 if (MOZ_UNLIKELY(result)) {
779 mAlreadyPresentCount++;
780 return InsertOutcome::FAILURE_ALREADY_PRESENT;
783 if (result.Type() == MatchType::PENDING) {
784 RemoveEntry(aProvider->GetImageKey(), aProvider->GetSurfaceKey(),
785 aAutoLock);
788 MOZ_ASSERT(result.Type() == MatchType::NOT_FOUND ||
789 result.Type() == MatchType::PENDING,
790 "A LookupResult with no surface should be NOT_FOUND or PENDING");
792 // If this is bigger than we can hold after discarding everything we can,
793 // refuse to cache it.
794 Cost cost = aProvider->LogicalSizeInBytes();
795 if (MOZ_UNLIKELY(!CanHoldAfterDiscarding(cost))) {
796 mOverflowCount++;
797 return InsertOutcome::FAILURE;
800 // Remove elements in order of cost until we can fit this in the cache. Note
801 // that locked surfaces aren't in mCosts, so we never remove them here.
802 while (cost > mAvailableCost) {
803 MOZ_ASSERT(!mCosts.IsEmpty(),
804 "Removed everything and it still won't fit");
805 Remove(mCosts.LastElement().Surface(), /* aStopTracking */ true,
806 aAutoLock);
809 // Locate the appropriate per-image cache. If there's not an existing cache
810 // for this image, create it.
811 const ImageKey imageKey = aProvider->GetImageKey();
812 RefPtr<ImageSurfaceCache> cache = GetImageCache(imageKey);
813 if (!cache) {
814 cache = new ImageSurfaceCache(imageKey);
815 if (!mImageCaches.InsertOrUpdate(aProvider->GetImageKey(), RefPtr{cache},
816 fallible)) {
817 mTableFailureCount++;
818 return InsertOutcome::FAILURE;
822 // If we were asked to mark the cache entry available, do so.
823 if (aSetAvailable) {
824 aProvider->Availability().SetAvailable();
827 auto surface = MakeNotNull<RefPtr<CachedSurface>>(aProvider);
829 // We require that locking succeed if the image is locked and we're not
830 // inserting a placeholder; the caller may need to know this to handle
831 // errors correctly.
832 bool mustLock = cache->IsLocked() && !surface->IsPlaceholder();
833 if (mustLock) {
834 surface->SetLocked(true);
835 if (!surface->IsLocked()) {
836 return InsertOutcome::FAILURE;
840 // Insert.
841 MOZ_ASSERT(cost <= mAvailableCost, "Inserting despite too large a cost");
842 if (!cache->Insert(surface)) {
843 mTableFailureCount++;
844 if (mustLock) {
845 surface->SetLocked(false);
847 return InsertOutcome::FAILURE;
850 if (MOZ_UNLIKELY(!StartTracking(surface, aAutoLock))) {
851 MOZ_ASSERT(!mustLock);
852 Remove(surface, /* aStopTracking */ false, aAutoLock);
853 return InsertOutcome::FAILURE;
856 return InsertOutcome::SUCCESS;
859 void Remove(NotNull<CachedSurface*> aSurface, bool aStopTracking,
860 const StaticMutexAutoLock& aAutoLock) {
861 ImageKey imageKey = aSurface->GetImageKey();
863 RefPtr<ImageSurfaceCache> cache = GetImageCache(imageKey);
864 MOZ_ASSERT(cache, "Shouldn't try to remove a surface with no image cache");
866 // If the surface was not a placeholder, tell its image that we discarded
867 // it.
868 if (!aSurface->IsPlaceholder()) {
869 static_cast<Image*>(imageKey)->OnSurfaceDiscarded(
870 aSurface->GetSurfaceKey());
873 // If we failed during StartTracking, we can skip this step.
874 if (aStopTracking) {
875 StopTracking(aSurface, /* aIsTracked */ true, aAutoLock);
878 // Individual surfaces must be freed outside the lock.
879 mCachedSurfacesDiscard.AppendElement(cache->Remove(aSurface));
881 MaybeRemoveEmptyCache(imageKey, cache);
884 bool StartTracking(NotNull<CachedSurface*> aSurface,
885 const StaticMutexAutoLock& aAutoLock) {
886 CostEntry costEntry = aSurface->GetCostEntry();
887 MOZ_ASSERT(costEntry.GetCost() <= mAvailableCost,
888 "Cost too large and the caller didn't catch it");
890 if (aSurface->IsLocked()) {
891 mLockedCost += costEntry.GetCost();
892 MOZ_ASSERT(mLockedCost <= mMaxCost, "Locked more than we can hold?");
893 } else {
894 if (NS_WARN_IF(!mCosts.InsertElementSorted(costEntry, fallible))) {
895 mTrackingFailureCount++;
896 return false;
899 // This may fail during XPCOM shutdown, so we need to ensure the object is
900 // tracked before calling RemoveObject in StopTracking.
901 nsresult rv = mExpirationTracker.AddObjectLocked(aSurface, aAutoLock);
902 if (NS_WARN_IF(NS_FAILED(rv))) {
903 DebugOnly<bool> foundInCosts = mCosts.RemoveElementSorted(costEntry);
904 MOZ_ASSERT(foundInCosts, "Lost track of costs for this surface");
905 mTrackingFailureCount++;
906 return false;
910 mAvailableCost -= costEntry.GetCost();
911 return true;
914 void StopTracking(NotNull<CachedSurface*> aSurface, bool aIsTracked,
915 const StaticMutexAutoLock& aAutoLock) {
916 CostEntry costEntry = aSurface->GetCostEntry();
918 if (aSurface->IsLocked()) {
919 MOZ_ASSERT(mLockedCost >= costEntry.GetCost(), "Costs don't balance");
920 mLockedCost -= costEntry.GetCost();
921 // XXX(seth): It'd be nice to use an O(log n) lookup here. This is O(n).
922 MOZ_ASSERT(!mCosts.Contains(costEntry),
923 "Shouldn't have a cost entry for a locked surface");
924 } else {
925 if (MOZ_LIKELY(aSurface->GetExpirationState()->IsTracked())) {
926 MOZ_ASSERT(aIsTracked, "Expiration-tracking a surface unexpectedly!");
927 mExpirationTracker.RemoveObjectLocked(aSurface, aAutoLock);
928 } else {
929 // Our call to AddObject must have failed in StartTracking; most likely
930 // we're in XPCOM shutdown right now.
931 MOZ_ASSERT(!aIsTracked, "Not expiration-tracking an unlocked surface!");
934 DebugOnly<bool> foundInCosts = mCosts.RemoveElementSorted(costEntry);
935 MOZ_ASSERT(foundInCosts, "Lost track of costs for this surface");
938 mAvailableCost += costEntry.GetCost();
939 MOZ_ASSERT(mAvailableCost <= mMaxCost,
940 "More available cost than we started with");
943 LookupResult Lookup(const ImageKey aImageKey, const SurfaceKey& aSurfaceKey,
944 const StaticMutexAutoLock& aAutoLock, bool aMarkUsed) {
945 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
946 if (!cache) {
947 // No cached surfaces for this image.
948 return LookupResult(MatchType::NOT_FOUND);
951 RefPtr<CachedSurface> surface = cache->Lookup(aSurfaceKey, aMarkUsed);
952 if (!surface) {
953 // Lookup in the per-image cache missed.
954 return LookupResult(MatchType::NOT_FOUND);
957 if (surface->IsPlaceholder()) {
958 return LookupResult(MatchType::PENDING);
961 DrawableSurface drawableSurface = surface->GetDrawableSurface();
962 if (!drawableSurface) {
963 // The surface was released by the operating system. Remove the cache
964 // entry as well.
965 Remove(WrapNotNull(surface), /* aStopTracking */ true, aAutoLock);
966 return LookupResult(MatchType::NOT_FOUND);
969 if (aMarkUsed &&
970 !MarkUsed(WrapNotNull(surface), WrapNotNull(cache), aAutoLock)) {
971 Remove(WrapNotNull(surface), /* aStopTracking */ false, aAutoLock);
972 return LookupResult(MatchType::NOT_FOUND);
975 MOZ_ASSERT(surface->GetSurfaceKey() == aSurfaceKey,
976 "Lookup() not returning an exact match?");
977 return LookupResult(std::move(drawableSurface), MatchType::EXACT);
980 LookupResult LookupBestMatch(const ImageKey aImageKey,
981 const SurfaceKey& aSurfaceKey,
982 const StaticMutexAutoLock& aAutoLock,
983 bool aMarkUsed) {
984 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
985 if (!cache) {
986 // No cached surfaces for this image.
987 return LookupResult(
988 MatchType::NOT_FOUND,
989 SurfaceCache::ClampSize(aImageKey, aSurfaceKey.Size()));
992 // Repeatedly look up the best match, trying again if the resulting surface
993 // has been freed by the operating system, until we can either lock a
994 // surface for drawing or there are no matching surfaces left.
995 // XXX(seth): This is O(N^2), but N is expected to be very small. If we
996 // encounter a performance problem here we can revisit this.
998 RefPtr<CachedSurface> surface;
999 DrawableSurface drawableSurface;
1000 MatchType matchType = MatchType::NOT_FOUND;
1001 IntSize suggestedSize;
1002 while (true) {
1003 Tie(surface, matchType, suggestedSize) =
1004 cache->LookupBestMatch(aSurfaceKey);
1006 if (!surface) {
1007 return LookupResult(
1008 matchType, suggestedSize); // Lookup in the per-image cache missed.
1011 drawableSurface = surface->GetDrawableSurface();
1012 if (drawableSurface) {
1013 break;
1016 // The surface was released by the operating system. Remove the cache
1017 // entry as well.
1018 Remove(WrapNotNull(surface), /* aStopTracking */ true, aAutoLock);
1021 MOZ_ASSERT_IF(matchType == MatchType::EXACT,
1022 surface->GetSurfaceKey() == aSurfaceKey);
1023 MOZ_ASSERT_IF(
1024 matchType == MatchType::SUBSTITUTE_BECAUSE_NOT_FOUND ||
1025 matchType == MatchType::SUBSTITUTE_BECAUSE_PENDING,
1026 surface->GetSurfaceKey().SVGContext() == aSurfaceKey.SVGContext() &&
1027 surface->GetSurfaceKey().Playback() == aSurfaceKey.Playback() &&
1028 surface->GetSurfaceKey().Flags() == aSurfaceKey.Flags());
1030 if (matchType == MatchType::EXACT ||
1031 matchType == MatchType::SUBSTITUTE_BECAUSE_BEST) {
1032 if (aMarkUsed &&
1033 !MarkUsed(WrapNotNull(surface), WrapNotNull(cache), aAutoLock)) {
1034 Remove(WrapNotNull(surface), /* aStopTracking */ false, aAutoLock);
1038 return LookupResult(std::move(drawableSurface), matchType, suggestedSize);
1041 bool CanHold(const Cost aCost) const { return aCost <= mMaxCost; }
1043 size_t MaximumCapacity() const { return size_t(mMaxCost); }
1045 void SurfaceAvailable(NotNull<ISurfaceProvider*> aProvider,
1046 const StaticMutexAutoLock& aAutoLock) {
1047 if (!aProvider->Availability().IsPlaceholder()) {
1048 MOZ_ASSERT_UNREACHABLE("Calling SurfaceAvailable on non-placeholder");
1049 return;
1052 // Reinsert the provider, requesting that Insert() mark it available. This
1053 // may or may not succeed, depending on whether some other decoder has
1054 // beaten us to the punch and inserted a non-placeholder version of this
1055 // surface first, but it's fine either way.
1056 // XXX(seth): This could be implemented more efficiently; we should be able
1057 // to just update our data structures without reinserting.
1058 Insert(aProvider, /* aSetAvailable = */ true, aAutoLock);
1061 void LockImage(const ImageKey aImageKey) {
1062 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1063 if (!cache) {
1064 cache = new ImageSurfaceCache(aImageKey);
1065 mImageCaches.InsertOrUpdate(aImageKey, RefPtr{cache});
1068 cache->SetLocked(true);
1070 // We don't relock this image's existing surfaces right away; instead, the
1071 // image should arrange for Lookup() to touch them if they are still useful.
1074 void UnlockImage(const ImageKey aImageKey,
1075 const StaticMutexAutoLock& aAutoLock) {
1076 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1077 if (!cache || !cache->IsLocked()) {
1078 return; // Already unlocked.
1081 cache->SetLocked(false);
1082 DoUnlockSurfaces(WrapNotNull(cache), /* aStaticOnly = */ false, aAutoLock);
1085 void UnlockEntries(const ImageKey aImageKey,
1086 const StaticMutexAutoLock& aAutoLock) {
1087 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1088 if (!cache || !cache->IsLocked()) {
1089 return; // Already unlocked.
1092 // (Note that we *don't* unlock the per-image cache here; that's the
1093 // difference between this and UnlockImage.)
1094 DoUnlockSurfaces(WrapNotNull(cache),
1095 /* aStaticOnly = */
1096 !StaticPrefs::image_mem_animated_discardable_AtStartup(),
1097 aAutoLock);
1100 already_AddRefed<ImageSurfaceCache> RemoveImage(
1101 const ImageKey aImageKey, const StaticMutexAutoLock& aAutoLock) {
1102 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1103 if (!cache) {
1104 return nullptr; // No cached surfaces for this image, so nothing to do.
1107 // Discard all of the cached surfaces for this image.
1108 // XXX(seth): This is O(n^2) since for each item in the cache we are
1109 // removing an element from the costs array. Since n is expected to be
1110 // small, performance should be good, but if usage patterns change we should
1111 // change the data structure used for mCosts.
1112 for (auto iter = cache->ConstIter(); !iter.Done(); iter.Next()) {
1113 StopTracking(WrapNotNull(iter.UserData()),
1114 /* aIsTracked */ true, aAutoLock);
1117 // The per-image cache isn't needed anymore, so remove it as well.
1118 // This implicitly unlocks the image if it was locked.
1119 mImageCaches.Remove(aImageKey);
1121 // Since we did not actually remove any of the surfaces from the cache
1122 // itself, only stopped tracking them, we should free it outside the lock.
1123 return cache.forget();
1126 void PruneImage(const ImageKey aImageKey,
1127 const StaticMutexAutoLock& aAutoLock) {
1128 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1129 if (!cache) {
1130 return; // No cached surfaces for this image, so nothing to do.
1133 cache->Prune([this, &aAutoLock](NotNull<CachedSurface*> aSurface) -> void {
1134 StopTracking(aSurface, /* aIsTracked */ true, aAutoLock);
1135 // Individual surfaces must be freed outside the lock.
1136 mCachedSurfacesDiscard.AppendElement(aSurface);
1139 MaybeRemoveEmptyCache(aImageKey, cache);
1142 void DiscardAll(const StaticMutexAutoLock& aAutoLock) {
1143 // Remove in order of cost because mCosts is an array and the other data
1144 // structures are all hash tables. Note that locked surfaces are not
1145 // removed, since they aren't present in mCosts.
1146 while (!mCosts.IsEmpty()) {
1147 Remove(mCosts.LastElement().Surface(), /* aStopTracking */ true,
1148 aAutoLock);
1152 void DiscardForMemoryPressure(const StaticMutexAutoLock& aAutoLock) {
1153 // Compute our discardable cost. Since locked surfaces aren't discardable,
1154 // we exclude them.
1155 const Cost discardableCost = (mMaxCost - mAvailableCost) - mLockedCost;
1156 MOZ_ASSERT(discardableCost <= mMaxCost, "Discardable cost doesn't add up");
1158 // Our target is to raise our available cost by (1 / mDiscardFactor) of our
1159 // discardable cost - in other words, we want to end up with about
1160 // (discardableCost / mDiscardFactor) fewer bytes stored in the surface
1161 // cache after we're done.
1162 const Cost targetCost = mAvailableCost + (discardableCost / mDiscardFactor);
1164 if (targetCost > mMaxCost - mLockedCost) {
1165 MOZ_ASSERT_UNREACHABLE("Target cost is more than we can discard");
1166 DiscardAll(aAutoLock);
1167 return;
1170 // Discard surfaces until we've reduced our cost to our target cost.
1171 while (mAvailableCost < targetCost) {
1172 MOZ_ASSERT(!mCosts.IsEmpty(), "Removed everything and still not done");
1173 Remove(mCosts.LastElement().Surface(), /* aStopTracking */ true,
1174 aAutoLock);
1178 void TakeDiscard(nsTArray<RefPtr<CachedSurface>>& aDiscard,
1179 const StaticMutexAutoLock& aAutoLock) {
1180 MOZ_ASSERT(aDiscard.IsEmpty());
1181 aDiscard = std::move(mCachedSurfacesDiscard);
1184 void LockSurface(NotNull<CachedSurface*> aSurface,
1185 const StaticMutexAutoLock& aAutoLock) {
1186 if (aSurface->IsPlaceholder() || aSurface->IsLocked()) {
1187 return;
1190 StopTracking(aSurface, /* aIsTracked */ true, aAutoLock);
1192 // Lock the surface. This can fail.
1193 aSurface->SetLocked(true);
1194 DebugOnly<bool> tracked = StartTracking(aSurface, aAutoLock);
1195 MOZ_ASSERT(tracked);
1198 size_t ShallowSizeOfIncludingThis(
1199 MallocSizeOf aMallocSizeOf, const StaticMutexAutoLock& aAutoLock) const {
1200 size_t bytes =
1201 aMallocSizeOf(this) + mCosts.ShallowSizeOfExcludingThis(aMallocSizeOf) +
1202 mImageCaches.ShallowSizeOfExcludingThis(aMallocSizeOf) +
1203 mCachedSurfacesDiscard.ShallowSizeOfExcludingThis(aMallocSizeOf) +
1204 mExpirationTracker.ShallowSizeOfExcludingThis(aMallocSizeOf);
1205 for (auto iter = mImageCaches.ConstIter(); !iter.Done(); iter.Next()) {
1206 bytes += iter.UserData()->ShallowSizeOfIncludingThis(aMallocSizeOf);
1208 return bytes;
1211 NS_IMETHOD
1212 CollectReports(nsIHandleReportCallback* aHandleReport, nsISupports* aData,
1213 bool aAnonymize) override {
1214 StaticMutexAutoLock lock(sInstanceMutex);
1216 uint32_t lockedImageCount = 0;
1217 uint32_t totalSurfaceCount = 0;
1218 uint32_t lockedSurfaceCount = 0;
1219 for (auto iter = mImageCaches.ConstIter(); !iter.Done(); iter.Next()) {
1220 totalSurfaceCount += iter.UserData()->Count();
1221 if (iter.UserData()->IsLocked()) {
1222 ++lockedImageCount;
1224 for (auto surfIter = iter.UserData()->ConstIter(); !surfIter.Done();
1225 surfIter.Next()) {
1226 if (surfIter.UserData()->IsLocked()) {
1227 ++lockedSurfaceCount;
1232 // clang-format off
1233 // We have explicit memory reporting for the surface cache which is more
1234 // accurate than the cost metrics we report here, but these metrics are
1235 // still useful to report, since they control the cache's behavior.
1236 MOZ_COLLECT_REPORT(
1237 "explicit/images/cache/overhead", KIND_HEAP, UNITS_BYTES,
1238 ShallowSizeOfIncludingThis(SurfaceCacheMallocSizeOf, lock),
1239 "Memory used by the surface cache data structures, excluding surface data.");
1241 MOZ_COLLECT_REPORT(
1242 "imagelib-surface-cache-estimated-total",
1243 KIND_OTHER, UNITS_BYTES, (mMaxCost - mAvailableCost),
1244 "Estimated total memory used by the imagelib surface cache.");
1246 MOZ_COLLECT_REPORT(
1247 "imagelib-surface-cache-estimated-locked",
1248 KIND_OTHER, UNITS_BYTES, mLockedCost,
1249 "Estimated memory used by locked surfaces in the imagelib surface cache.");
1251 MOZ_COLLECT_REPORT(
1252 "imagelib-surface-cache-tracked-cost-count",
1253 KIND_OTHER, UNITS_COUNT, mCosts.Length(),
1254 "Total number of surfaces tracked for cost (and expiry) in the imagelib surface cache.");
1256 MOZ_COLLECT_REPORT(
1257 "imagelib-surface-cache-tracked-expiry-count",
1258 KIND_OTHER, UNITS_COUNT, mExpirationTracker.Length(lock),
1259 "Total number of surfaces tracked for expiry (and cost) in the imagelib surface cache.");
1261 MOZ_COLLECT_REPORT(
1262 "imagelib-surface-cache-image-count",
1263 KIND_OTHER, UNITS_COUNT, mImageCaches.Count(),
1264 "Total number of images in the imagelib surface cache.");
1266 MOZ_COLLECT_REPORT(
1267 "imagelib-surface-cache-locked-image-count",
1268 KIND_OTHER, UNITS_COUNT, lockedImageCount,
1269 "Total number of locked images in the imagelib surface cache.");
1271 MOZ_COLLECT_REPORT(
1272 "imagelib-surface-cache-image-surface-count",
1273 KIND_OTHER, UNITS_COUNT, totalSurfaceCount,
1274 "Total number of surfaces in the imagelib surface cache.");
1276 MOZ_COLLECT_REPORT(
1277 "imagelib-surface-cache-locked-surfaces-count",
1278 KIND_OTHER, UNITS_COUNT, lockedSurfaceCount,
1279 "Total number of locked surfaces in the imagelib surface cache.");
1281 MOZ_COLLECT_REPORT(
1282 "imagelib-surface-cache-overflow-count",
1283 KIND_OTHER, UNITS_COUNT, mOverflowCount,
1284 "Count of how many times the surface cache has hit its capacity and been "
1285 "unable to insert a new surface.");
1287 MOZ_COLLECT_REPORT(
1288 "imagelib-surface-cache-tracking-failure-count",
1289 KIND_OTHER, UNITS_COUNT, mTrackingFailureCount,
1290 "Count of how many times the surface cache has failed to begin tracking a "
1291 "given surface.");
1293 MOZ_COLLECT_REPORT(
1294 "imagelib-surface-cache-already-present-count",
1295 KIND_OTHER, UNITS_COUNT, mAlreadyPresentCount,
1296 "Count of how many times the surface cache has failed to insert a surface "
1297 "because it is already present.");
1299 MOZ_COLLECT_REPORT(
1300 "imagelib-surface-cache-table-failure-count",
1301 KIND_OTHER, UNITS_COUNT, mTableFailureCount,
1302 "Count of how many times the surface cache has failed to insert a surface "
1303 "because a hash table could not accept an entry.");
1304 // clang-format on
1306 return NS_OK;
1309 void CollectSizeOfSurfaces(const ImageKey aImageKey,
1310 nsTArray<SurfaceMemoryCounter>& aCounters,
1311 MallocSizeOf aMallocSizeOf,
1312 const StaticMutexAutoLock& aAutoLock) {
1313 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1314 if (!cache) {
1315 return; // No surfaces for this image.
1318 // Report all surfaces in the per-image cache.
1319 cache->CollectSizeOfSurfaces(
1320 aCounters, aMallocSizeOf,
1321 [this, &aAutoLock](NotNull<CachedSurface*> aSurface) -> void {
1322 StopTracking(aSurface, /* aIsTracked */ true, aAutoLock);
1323 // Individual surfaces must be freed outside the lock.
1324 mCachedSurfacesDiscard.AppendElement(aSurface);
1327 MaybeRemoveEmptyCache(aImageKey, cache);
1330 void ReleaseImageOnMainThread(already_AddRefed<image::Image>&& aImage,
1331 const StaticMutexAutoLock& aAutoLock) {
1332 RefPtr<image::Image> image = aImage;
1333 if (!image) {
1334 return;
1337 bool needsDispatch = mReleasingImagesOnMainThread.IsEmpty();
1338 mReleasingImagesOnMainThread.AppendElement(image);
1340 if (!needsDispatch) {
1341 // There is already a ongoing task for ClearReleasingImages().
1342 return;
1345 NS_DispatchToMainThread(NS_NewRunnableFunction(
1346 "SurfaceCacheImpl::ReleaseImageOnMainThread",
1347 []() -> void { SurfaceCache::ClearReleasingImages(); }));
1350 void TakeReleasingImages(nsTArray<RefPtr<image::Image>>& aImage,
1351 const StaticMutexAutoLock& aAutoLock) {
1352 MOZ_ASSERT(NS_IsMainThread());
1353 aImage.SwapElements(mReleasingImagesOnMainThread);
1356 private:
1357 already_AddRefed<ImageSurfaceCache> GetImageCache(const ImageKey aImageKey) {
1358 RefPtr<ImageSurfaceCache> imageCache;
1359 mImageCaches.Get(aImageKey, getter_AddRefs(imageCache));
1360 return imageCache.forget();
1363 void MaybeRemoveEmptyCache(const ImageKey aImageKey,
1364 ImageSurfaceCache* aCache) {
1365 // Remove the per-image cache if it's unneeded now. Keep it if the image is
1366 // locked, since the per-image cache is where we store that state. Note that
1367 // we don't push it into mImageCachesDiscard because all of its surfaces
1368 // have been removed, so it is safe to free while holding the lock.
1369 if (aCache->IsEmpty() && !aCache->IsLocked()) {
1370 mImageCaches.Remove(aImageKey);
1374 // This is similar to CanHold() except that it takes into account the costs of
1375 // locked surfaces. It's used internally in Insert(), but it's not exposed
1376 // publicly because we permit multithreaded access to the surface cache, which
1377 // means that the result would be meaningless: another thread could insert a
1378 // surface or lock an image at any time.
1379 bool CanHoldAfterDiscarding(const Cost aCost) const {
1380 return aCost <= mMaxCost - mLockedCost;
1383 bool MarkUsed(NotNull<CachedSurface*> aSurface,
1384 NotNull<ImageSurfaceCache*> aCache,
1385 const StaticMutexAutoLock& aAutoLock) {
1386 if (aCache->IsLocked()) {
1387 LockSurface(aSurface, aAutoLock);
1388 return true;
1391 nsresult rv = mExpirationTracker.MarkUsedLocked(aSurface, aAutoLock);
1392 if (NS_WARN_IF(NS_FAILED(rv))) {
1393 // If mark used fails, it is because it failed to reinsert the surface
1394 // after removing it from the tracker. Thus we need to update our
1395 // own accounting but otherwise expect it to be untracked.
1396 StopTracking(aSurface, /* aIsTracked */ false, aAutoLock);
1397 return false;
1399 return true;
1402 void DoUnlockSurfaces(NotNull<ImageSurfaceCache*> aCache, bool aStaticOnly,
1403 const StaticMutexAutoLock& aAutoLock) {
1404 AutoTArray<NotNull<CachedSurface*>, 8> discard;
1406 // Unlock all the surfaces the per-image cache is holding.
1407 for (auto iter = aCache->ConstIter(); !iter.Done(); iter.Next()) {
1408 NotNull<CachedSurface*> surface = WrapNotNull(iter.UserData());
1409 if (surface->IsPlaceholder() || !surface->IsLocked()) {
1410 continue;
1412 if (aStaticOnly &&
1413 surface->GetSurfaceKey().Playback() != PlaybackType::eStatic) {
1414 continue;
1416 StopTracking(surface, /* aIsTracked */ true, aAutoLock);
1417 surface->SetLocked(false);
1418 if (MOZ_UNLIKELY(!StartTracking(surface, aAutoLock))) {
1419 discard.AppendElement(surface);
1423 // Discard any that we failed to track.
1424 for (auto iter = discard.begin(); iter != discard.end(); ++iter) {
1425 Remove(*iter, /* aStopTracking */ false, aAutoLock);
1429 void RemoveEntry(const ImageKey aImageKey, const SurfaceKey& aSurfaceKey,
1430 const StaticMutexAutoLock& aAutoLock) {
1431 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1432 if (!cache) {
1433 return; // No cached surfaces for this image.
1436 RefPtr<CachedSurface> surface =
1437 cache->Lookup(aSurfaceKey, /* aForAccess = */ false);
1438 if (!surface) {
1439 return; // Lookup in the per-image cache missed.
1442 Remove(WrapNotNull(surface), /* aStopTracking */ true, aAutoLock);
1445 class SurfaceTracker final
1446 : public ExpirationTrackerImpl<CachedSurface, 2, StaticMutex,
1447 StaticMutexAutoLock> {
1448 public:
1449 explicit SurfaceTracker(uint32_t aSurfaceCacheExpirationTimeMS)
1450 : ExpirationTrackerImpl<CachedSurface, 2, StaticMutex,
1451 StaticMutexAutoLock>(
1452 aSurfaceCacheExpirationTimeMS, "SurfaceTracker") {}
1454 protected:
1455 void NotifyExpiredLocked(CachedSurface* aSurface,
1456 const StaticMutexAutoLock& aAutoLock) override {
1457 sInstance->Remove(WrapNotNull(aSurface), /* aStopTracking */ true,
1458 aAutoLock);
1461 void NotifyHandlerEndLocked(const StaticMutexAutoLock& aAutoLock) override {
1462 sInstance->TakeDiscard(mDiscard, aAutoLock);
1465 void NotifyHandlerEnd() override {
1466 nsTArray<RefPtr<CachedSurface>> discard(std::move(mDiscard));
1469 StaticMutex& GetMutex() override { return sInstanceMutex; }
1471 nsTArray<RefPtr<CachedSurface>> mDiscard;
1474 class MemoryPressureObserver final : public nsIObserver {
1475 public:
1476 NS_DECL_ISUPPORTS
1478 NS_IMETHOD Observe(nsISupports*, const char* aTopic,
1479 const char16_t*) override {
1480 nsTArray<RefPtr<CachedSurface>> discard;
1482 StaticMutexAutoLock lock(sInstanceMutex);
1483 if (sInstance && strcmp(aTopic, "memory-pressure") == 0) {
1484 sInstance->DiscardForMemoryPressure(lock);
1485 sInstance->TakeDiscard(discard, lock);
1488 return NS_OK;
1491 private:
1492 virtual ~MemoryPressureObserver() {}
1495 nsTArray<CostEntry> mCosts;
1496 nsRefPtrHashtable<nsPtrHashKey<Image>, ImageSurfaceCache> mImageCaches;
1497 nsTArray<RefPtr<CachedSurface>> mCachedSurfacesDiscard;
1498 SurfaceTracker mExpirationTracker;
1499 RefPtr<MemoryPressureObserver> mMemoryPressureObserver;
1500 nsTArray<RefPtr<image::Image>> mReleasingImagesOnMainThread;
1501 const uint32_t mDiscardFactor;
1502 const Cost mMaxCost;
1503 Cost mAvailableCost;
1504 Cost mLockedCost;
1505 size_t mOverflowCount;
1506 size_t mAlreadyPresentCount;
1507 size_t mTableFailureCount;
1508 size_t mTrackingFailureCount;
1511 NS_IMPL_ISUPPORTS(SurfaceCacheImpl, nsIMemoryReporter)
1512 NS_IMPL_ISUPPORTS(SurfaceCacheImpl::MemoryPressureObserver, nsIObserver)
1514 ///////////////////////////////////////////////////////////////////////////////
1515 // Public API
1516 ///////////////////////////////////////////////////////////////////////////////
1518 /* static */
1519 void SurfaceCache::Initialize() {
1520 // Initialize preferences.
1521 MOZ_ASSERT(NS_IsMainThread());
1522 MOZ_ASSERT(!sInstance, "Shouldn't initialize more than once");
1524 // See StaticPrefs for the default values of these preferences.
1526 // Length of time before an unused surface is removed from the cache, in
1527 // milliseconds.
1528 uint32_t surfaceCacheExpirationTimeMS =
1529 StaticPrefs::image_mem_surfacecache_min_expiration_ms_AtStartup();
1531 // What fraction of the memory used by the surface cache we should discard
1532 // when we get a memory pressure notification. This value is interpreted as
1533 // 1/N, so 1 means to discard everything, 2 means to discard about half of the
1534 // memory we're using, and so forth. We clamp it to avoid division by zero.
1535 uint32_t surfaceCacheDiscardFactor =
1536 max(StaticPrefs::image_mem_surfacecache_discard_factor_AtStartup(), 1u);
1538 // Maximum size of the surface cache, in kilobytes.
1539 uint64_t surfaceCacheMaxSizeKB =
1540 StaticPrefs::image_mem_surfacecache_max_size_kb_AtStartup();
1542 if (sizeof(uintptr_t) <= 4) {
1543 // Limit surface cache to 1 GB if our address space is 32 bit.
1544 surfaceCacheMaxSizeKB = 1024 * 1024;
1547 // A knob determining the actual size of the surface cache. Currently the
1548 // cache is (size of main memory) / (surface cache size factor) KB
1549 // or (surface cache max size) KB, whichever is smaller. The formula
1550 // may change in the future, though.
1551 // For example, a value of 4 would yield a 256MB cache on a 1GB machine.
1552 // The smallest machines we are likely to run this code on have 256MB
1553 // of memory, which would yield a 64MB cache on this setting.
1554 // We clamp this value to avoid division by zero.
1555 uint32_t surfaceCacheSizeFactor =
1556 max(StaticPrefs::image_mem_surfacecache_size_factor_AtStartup(), 1u);
1558 // Compute the size of the surface cache.
1559 uint64_t memorySize = PR_GetPhysicalMemorySize();
1560 if (memorySize == 0) {
1561 MOZ_ASSERT_UNREACHABLE("PR_GetPhysicalMemorySize not implemented here");
1562 memorySize = 256 * 1024 * 1024; // Fall back to 256MB.
1564 uint64_t proposedSize = memorySize / surfaceCacheSizeFactor;
1565 uint64_t surfaceCacheSizeBytes =
1566 min(proposedSize, surfaceCacheMaxSizeKB * 1024);
1567 uint32_t finalSurfaceCacheSizeBytes =
1568 min(surfaceCacheSizeBytes, uint64_t(UINT32_MAX));
1570 // Create the surface cache singleton with the requested settings. Note that
1571 // the size is a limit that the cache may not grow beyond, but we do not
1572 // actually allocate any storage for surfaces at this time.
1573 sInstance = new SurfaceCacheImpl(surfaceCacheExpirationTimeMS,
1574 surfaceCacheDiscardFactor,
1575 finalSurfaceCacheSizeBytes);
1576 sInstance->InitMemoryReporter();
1579 /* static */
1580 void SurfaceCache::Shutdown() {
1581 RefPtr<SurfaceCacheImpl> cache;
1583 StaticMutexAutoLock lock(sInstanceMutex);
1584 MOZ_ASSERT(NS_IsMainThread());
1585 MOZ_ASSERT(sInstance, "No singleton - was Shutdown() called twice?");
1586 cache = sInstance.forget();
1590 /* static */
1591 LookupResult SurfaceCache::Lookup(const ImageKey aImageKey,
1592 const SurfaceKey& aSurfaceKey,
1593 bool aMarkUsed) {
1594 nsTArray<RefPtr<CachedSurface>> discard;
1595 LookupResult rv(MatchType::NOT_FOUND);
1598 StaticMutexAutoLock lock(sInstanceMutex);
1599 if (!sInstance) {
1600 return rv;
1603 rv = sInstance->Lookup(aImageKey, aSurfaceKey, lock, aMarkUsed);
1604 sInstance->TakeDiscard(discard, lock);
1607 return rv;
1610 /* static */
1611 LookupResult SurfaceCache::LookupBestMatch(const ImageKey aImageKey,
1612 const SurfaceKey& aSurfaceKey,
1613 bool aMarkUsed) {
1614 nsTArray<RefPtr<CachedSurface>> discard;
1615 LookupResult rv(MatchType::NOT_FOUND);
1618 StaticMutexAutoLock lock(sInstanceMutex);
1619 if (!sInstance) {
1620 return rv;
1623 rv = sInstance->LookupBestMatch(aImageKey, aSurfaceKey, lock, aMarkUsed);
1624 sInstance->TakeDiscard(discard, lock);
1627 return rv;
1630 /* static */
1631 InsertOutcome SurfaceCache::Insert(NotNull<ISurfaceProvider*> aProvider) {
1632 nsTArray<RefPtr<CachedSurface>> discard;
1633 InsertOutcome rv(InsertOutcome::FAILURE);
1636 StaticMutexAutoLock lock(sInstanceMutex);
1637 if (!sInstance) {
1638 return rv;
1641 rv = sInstance->Insert(aProvider, /* aSetAvailable = */ false, lock);
1642 sInstance->TakeDiscard(discard, lock);
1645 return rv;
1648 /* static */
1649 bool SurfaceCache::CanHold(const IntSize& aSize,
1650 uint32_t aBytesPerPixel /* = 4 */) {
1651 StaticMutexAutoLock lock(sInstanceMutex);
1652 if (!sInstance) {
1653 return false;
1656 Cost cost = ComputeCost(aSize, aBytesPerPixel);
1657 return sInstance->CanHold(cost);
1660 /* static */
1661 bool SurfaceCache::CanHold(size_t aSize) {
1662 StaticMutexAutoLock lock(sInstanceMutex);
1663 if (!sInstance) {
1664 return false;
1667 return sInstance->CanHold(aSize);
1670 /* static */
1671 void SurfaceCache::SurfaceAvailable(NotNull<ISurfaceProvider*> aProvider) {
1672 StaticMutexAutoLock lock(sInstanceMutex);
1673 if (!sInstance) {
1674 return;
1677 sInstance->SurfaceAvailable(aProvider, lock);
1680 /* static */
1681 void SurfaceCache::LockImage(const ImageKey aImageKey) {
1682 StaticMutexAutoLock lock(sInstanceMutex);
1683 if (sInstance) {
1684 return sInstance->LockImage(aImageKey);
1688 /* static */
1689 void SurfaceCache::UnlockImage(const ImageKey aImageKey) {
1690 StaticMutexAutoLock lock(sInstanceMutex);
1691 if (sInstance) {
1692 return sInstance->UnlockImage(aImageKey, lock);
1696 /* static */
1697 void SurfaceCache::UnlockEntries(const ImageKey aImageKey) {
1698 StaticMutexAutoLock lock(sInstanceMutex);
1699 if (sInstance) {
1700 return sInstance->UnlockEntries(aImageKey, lock);
1704 /* static */
1705 void SurfaceCache::RemoveImage(const ImageKey aImageKey) {
1706 RefPtr<ImageSurfaceCache> discard;
1708 StaticMutexAutoLock lock(sInstanceMutex);
1709 if (sInstance) {
1710 discard = sInstance->RemoveImage(aImageKey, lock);
1715 /* static */
1716 void SurfaceCache::PruneImage(const ImageKey aImageKey) {
1717 nsTArray<RefPtr<CachedSurface>> discard;
1719 StaticMutexAutoLock lock(sInstanceMutex);
1720 if (sInstance) {
1721 sInstance->PruneImage(aImageKey, lock);
1722 sInstance->TakeDiscard(discard, lock);
1727 /* static */
1728 void SurfaceCache::DiscardAll() {
1729 nsTArray<RefPtr<CachedSurface>> discard;
1731 StaticMutexAutoLock lock(sInstanceMutex);
1732 if (sInstance) {
1733 sInstance->DiscardAll(lock);
1734 sInstance->TakeDiscard(discard, lock);
1739 /* static */
1740 void SurfaceCache::CollectSizeOfSurfaces(
1741 const ImageKey aImageKey, nsTArray<SurfaceMemoryCounter>& aCounters,
1742 MallocSizeOf aMallocSizeOf) {
1743 nsTArray<RefPtr<CachedSurface>> discard;
1745 StaticMutexAutoLock lock(sInstanceMutex);
1746 if (!sInstance) {
1747 return;
1750 sInstance->CollectSizeOfSurfaces(aImageKey, aCounters, aMallocSizeOf, lock);
1751 sInstance->TakeDiscard(discard, lock);
1755 /* static */
1756 size_t SurfaceCache::MaximumCapacity() {
1757 StaticMutexAutoLock lock(sInstanceMutex);
1758 if (!sInstance) {
1759 return 0;
1762 return sInstance->MaximumCapacity();
1765 /* static */
1766 bool SurfaceCache::IsLegalSize(const IntSize& aSize) {
1767 // reject over-wide or over-tall images
1768 const int32_t k64KLimit = 0x0000FFFF;
1769 if (MOZ_UNLIKELY(aSize.width > k64KLimit || aSize.height > k64KLimit)) {
1770 NS_WARNING("image too big");
1771 return false;
1774 // protect against invalid sizes
1775 if (MOZ_UNLIKELY(aSize.height <= 0 || aSize.width <= 0)) {
1776 return false;
1779 // check to make sure we don't overflow a 32-bit
1780 CheckedInt32 requiredBytes =
1781 CheckedInt32(aSize.width) * CheckedInt32(aSize.height) * 4;
1782 if (MOZ_UNLIKELY(!requiredBytes.isValid())) {
1783 NS_WARNING("width or height too large");
1784 return false;
1786 return true;
1789 IntSize SurfaceCache::ClampVectorSize(const IntSize& aSize) {
1790 // If we exceed the maximum, we need to scale the size downwards to fit.
1791 // It shouldn't get here if it is significantly larger because
1792 // VectorImage::UseSurfaceCacheForSize should prevent us from requesting
1793 // a rasterized version of a surface greater than 4x the maximum.
1794 int32_t maxSizeKB =
1795 StaticPrefs::image_cache_max_rasterized_svg_threshold_kb();
1796 if (maxSizeKB <= 0) {
1797 return aSize;
1800 int64_t proposedKB = int64_t(aSize.width) * aSize.height / 256;
1801 if (maxSizeKB >= proposedKB) {
1802 return aSize;
1805 double scale = sqrt(double(maxSizeKB) / proposedKB);
1806 return IntSize(int32_t(scale * aSize.width), int32_t(scale * aSize.height));
1809 IntSize SurfaceCache::ClampSize(ImageKey aImageKey, const IntSize& aSize) {
1810 if (aImageKey->GetType() != imgIContainer::TYPE_VECTOR) {
1811 return aSize;
1814 return ClampVectorSize(aSize);
1817 /* static */
1818 void SurfaceCache::ReleaseImageOnMainThread(
1819 already_AddRefed<image::Image> aImage, bool aAlwaysProxy) {
1820 if (NS_IsMainThread() && !aAlwaysProxy) {
1821 RefPtr<image::Image> image = std::move(aImage);
1822 return;
1825 StaticMutexAutoLock lock(sInstanceMutex);
1826 if (sInstance) {
1827 sInstance->ReleaseImageOnMainThread(std::move(aImage), lock);
1828 } else {
1829 NS_ReleaseOnMainThread("SurfaceCache::ReleaseImageOnMainThread",
1830 std::move(aImage), /* aAlwaysProxy */ true);
1834 /* static */
1835 void SurfaceCache::ClearReleasingImages() {
1836 MOZ_ASSERT(NS_IsMainThread());
1838 nsTArray<RefPtr<image::Image>> images;
1840 StaticMutexAutoLock lock(sInstanceMutex);
1841 if (sInstance) {
1842 sInstance->TakeReleasingImages(images, lock);
1847 } // namespace image
1848 } // namespace mozilla