Bug 1833854 - Part 7: Add the FOR_EACH_GC_TUNABLE macro to describe tunable GC parame...
[gecko.git] / js / src / gc / Scheduling.cpp
blobec03c85f8d59a876c350bed09d52f054727aca4c
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "gc/Scheduling.h"
9 #include "mozilla/CheckedInt.h"
10 #include "mozilla/ScopeExit.h"
11 #include "mozilla/TimeStamp.h"
13 #include <algorithm>
14 #include <cmath>
16 #include "gc/Memory.h"
17 #include "gc/Nursery.h"
18 #include "gc/RelocationOverlay.h"
19 #include "gc/ZoneAllocator.h"
20 #include "util/DifferentialTesting.h"
21 #include "vm/MutexIDs.h"
23 using namespace js;
24 using namespace js::gc;
26 using mozilla::CheckedInt;
27 using mozilla::Maybe;
28 using mozilla::Nothing;
29 using mozilla::Some;
30 using mozilla::TimeDuration;
31 using mozilla::TimeStamp;
34 * We may start to collect a zone before its trigger threshold is reached if
35 * GCRuntime::maybeGC() is called for that zone or we start collecting other
36 * zones. These eager threshold factors are not configurable.
38 static constexpr double HighFrequencyEagerAllocTriggerFactor = 0.85;
39 static constexpr double LowFrequencyEagerAllocTriggerFactor = 0.9;
42 * Don't allow heap growth factors to be set so low that eager collections could
43 * reduce the trigger threshold.
45 static constexpr double MinHeapGrowthFactor =
46 1.0f / std::min(HighFrequencyEagerAllocTriggerFactor,
47 LowFrequencyEagerAllocTriggerFactor);
49 // Limit various parameters to reasonable levels to catch errors.
50 static constexpr double MaxHeapGrowthFactor = 100;
51 static constexpr size_t MaxNurseryBytesParam = 128 * 1024 * 1024;
53 namespace {
55 // Helper classes to marshal GC parameter values to/from uint32_t.
57 template <typename T>
58 struct ConvertGeneric {
59 static uint32_t toUint32(T value) {
60 static_assert(std::is_arithmetic_v<T>);
61 if constexpr (std::is_signed_v<T>) {
62 MOZ_ASSERT(value >= 0);
64 if constexpr (!std::is_same_v<T, bool> &&
65 std::numeric_limits<T>::max() >
66 std::numeric_limits<uint32_t>::max()) {
67 MOZ_ASSERT(value <= UINT32_MAX);
69 return uint32_t(value);
71 static Maybe<T> fromUint32(uint32_t param) {
72 // Currently we use explicit conversion and don't range check.
73 return Some(T(param));
77 using ConvertBool = ConvertGeneric<bool>;
78 using ConvertSize = ConvertGeneric<size_t>;
79 using ConvertDouble = ConvertGeneric<double>;
81 struct ConvertTimes100 {
82 static uint32_t toUint32(double value) { return uint32_t(value * 100.0); }
83 static Maybe<double> fromUint32(uint32_t param) {
84 return Some(double(param) / 100.0);
88 struct ConvertNurseryBytes : ConvertSize {
89 static Maybe<size_t> fromUint32(uint32_t param) {
90 return Some(Nursery::roundSize(param));
94 struct ConvertKB {
95 static uint32_t toUint32(size_t value) { return value / 1024; }
96 static Maybe<size_t> fromUint32(uint32_t param) {
97 // Parameters which represent heap sizes in bytes are restricted to values
98 // which can be represented on 32 bit platforms.
99 CheckedInt<uint32_t> size = CheckedInt<uint32_t>(param) * 1024;
100 return size.isValid() ? Some(size_t(size.value())) : Nothing();
104 struct ConvertMB {
105 static uint32_t toUint32(size_t value) { return value / (1024 * 1024); }
106 static Maybe<size_t> fromUint32(uint32_t param) {
107 // Parameters which represent heap sizes in bytes are restricted to values
108 // which can be represented on 32 bit platforms.
109 CheckedInt<uint32_t> size = CheckedInt<uint32_t>(param) * 1024 * 1024;
110 return size.isValid() ? Some(size_t(size.value())) : Nothing();
114 struct ConvertMillis {
115 static uint32_t toUint32(TimeDuration value) {
116 return uint32_t(value.ToMilliseconds());
118 static Maybe<TimeDuration> fromUint32(uint32_t param) {
119 return Some(TimeDuration::FromMilliseconds(param));
123 struct ConvertSeconds {
124 static uint32_t toUint32(TimeDuration value) {
125 return uint32_t(value.ToSeconds());
127 static Maybe<TimeDuration> fromUint32(uint32_t param) {
128 return Some(TimeDuration::FromSeconds(param));
132 } // anonymous namespace
134 // Helper functions to check GC parameter values
136 template <typename T>
137 static bool NoCheck(T value) {
138 return true;
141 template <typename T>
142 static bool CheckNonZero(T value) {
143 return value != 0;
146 static bool CheckNurserySize(size_t bytes) {
147 return bytes >= SystemPageSize() && bytes <= MaxNurseryBytesParam;
150 static bool CheckHeapGrowth(double growth) {
151 return growth >= MinHeapGrowthFactor && growth <= MaxHeapGrowthFactor;
154 static bool CheckIncrementalLimit(double factor) {
155 return factor >= 1.0 && factor <= MaxHeapGrowthFactor;
158 static bool CheckNonZeroUnitRange(double value) {
159 return value > 0.0 && value <= 100.0;
162 GCSchedulingTunables::GCSchedulingTunables() {
163 #define INIT_TUNABLE_FIELD(key, type, name, convert, check, default) \
164 name##_ = default; \
165 MOZ_ASSERT(check(name##_));
166 FOR_EACH_GC_TUNABLE(INIT_TUNABLE_FIELD)
167 #undef INIT_TUNABLE_FIELD
169 checkInvariants();
172 uint32_t GCSchedulingTunables::getParameter(JSGCParamKey key) {
173 switch (key) {
174 #define GET_TUNABLE_FIELD(key, type, name, convert, check, default) \
175 case key: \
176 return convert::toUint32(name##_);
177 FOR_EACH_GC_TUNABLE(GET_TUNABLE_FIELD)
178 #undef GET_TUNABLE_FIELD
180 default:
181 MOZ_CRASH("Unknown parameter key");
185 bool GCSchedulingTunables::setParameter(JSGCParamKey key, uint32_t value) {
186 auto guard = mozilla::MakeScopeExit([this] { checkInvariants(); });
188 switch (key) {
189 #define SET_TUNABLE_FIELD(key, type, name, convert, check, default) \
190 case key: { \
191 Maybe<type> converted = convert::fromUint32(value); \
192 if (!converted || !check(converted.value())) { \
193 return false; \
195 name##_ = converted.value(); \
196 break; \
198 FOR_EACH_GC_TUNABLE(SET_TUNABLE_FIELD)
199 #undef SET_TUNABLE_FIELD
201 default:
202 MOZ_CRASH("Unknown GC parameter.");
205 maintainInvariantsAfterUpdate(key);
206 return true;
209 void GCSchedulingTunables::resetParameter(JSGCParamKey key) {
210 auto guard = mozilla::MakeScopeExit([this] { checkInvariants(); });
212 switch (key) {
213 #define RESET_TUNABLE_FIELD(key, type, name, convert, check, default) \
214 case key: \
215 name##_ = default; \
216 MOZ_ASSERT(check(name##_)); \
217 break;
218 FOR_EACH_GC_TUNABLE(RESET_TUNABLE_FIELD)
219 #undef RESET_TUNABLE_FIELD
221 default:
222 MOZ_CRASH("Unknown GC parameter.");
225 maintainInvariantsAfterUpdate(key);
228 void GCSchedulingTunables::maintainInvariantsAfterUpdate(JSGCParamKey updated) {
229 switch (updated) {
230 case JSGC_MIN_NURSERY_BYTES:
231 if (gcMaxNurseryBytes_ < gcMinNurseryBytes_) {
232 gcMaxNurseryBytes_ = gcMinNurseryBytes_;
234 break;
235 case JSGC_MAX_NURSERY_BYTES:
236 if (gcMinNurseryBytes_ > gcMaxNurseryBytes_) {
237 gcMinNurseryBytes_ = gcMaxNurseryBytes_;
239 break;
240 case JSGC_SMALL_HEAP_SIZE_MAX:
241 if (smallHeapSizeMaxBytes_ >= largeHeapSizeMinBytes_) {
242 largeHeapSizeMinBytes_ = smallHeapSizeMaxBytes_ + 1;
244 break;
245 case JSGC_LARGE_HEAP_SIZE_MIN:
246 if (largeHeapSizeMinBytes_ <= smallHeapSizeMaxBytes_) {
247 smallHeapSizeMaxBytes_ = largeHeapSizeMinBytes_ - 1;
249 break;
250 case JSGC_HIGH_FREQUENCY_SMALL_HEAP_GROWTH:
251 if (highFrequencySmallHeapGrowth_ < highFrequencyLargeHeapGrowth_) {
252 highFrequencyLargeHeapGrowth_ = highFrequencySmallHeapGrowth_;
254 break;
255 case JSGC_HIGH_FREQUENCY_LARGE_HEAP_GROWTH:
256 if (highFrequencyLargeHeapGrowth_ > highFrequencySmallHeapGrowth_) {
257 highFrequencySmallHeapGrowth_ = highFrequencyLargeHeapGrowth_;
259 break;
260 default:
261 break;
265 void GCSchedulingTunables::checkInvariants() {
266 MOZ_ASSERT(gcMinNurseryBytes_ == Nursery::roundSize(gcMinNurseryBytes_));
267 MOZ_ASSERT(gcMaxNurseryBytes_ == Nursery::roundSize(gcMaxNurseryBytes_));
268 MOZ_ASSERT(gcMinNurseryBytes_ <= gcMaxNurseryBytes_);
269 MOZ_ASSERT(gcMinNurseryBytes_ >= SystemPageSize());
270 MOZ_ASSERT(gcMaxNurseryBytes_ <= MaxNurseryBytesParam);
272 MOZ_ASSERT(largeHeapSizeMinBytes_ > smallHeapSizeMaxBytes_);
274 MOZ_ASSERT(lowFrequencyHeapGrowth_ >= MinHeapGrowthFactor);
275 MOZ_ASSERT(lowFrequencyHeapGrowth_ <= MaxHeapGrowthFactor);
277 MOZ_ASSERT(highFrequencySmallHeapGrowth_ >= MinHeapGrowthFactor);
278 MOZ_ASSERT(highFrequencySmallHeapGrowth_ <= MaxHeapGrowthFactor);
279 MOZ_ASSERT(highFrequencyLargeHeapGrowth_ <= highFrequencySmallHeapGrowth_);
280 MOZ_ASSERT(highFrequencyLargeHeapGrowth_ >= MinHeapGrowthFactor);
281 MOZ_ASSERT(highFrequencySmallHeapGrowth_ <= MaxHeapGrowthFactor);
284 void GCSchedulingState::updateHighFrequencyMode(
285 const mozilla::TimeStamp& lastGCTime, const mozilla::TimeStamp& currentTime,
286 const GCSchedulingTunables& tunables) {
287 if (js::SupportDifferentialTesting()) {
288 return;
291 inHighFrequencyGCMode_ =
292 !lastGCTime.IsNull() &&
293 lastGCTime + tunables.highFrequencyThreshold() > currentTime;
296 void GCSchedulingState::updateHighFrequencyModeForReason(JS::GCReason reason) {
297 // These reason indicate that the embedding isn't triggering GC slices often
298 // enough and allocation rate is high.
299 if (reason == JS::GCReason::ALLOC_TRIGGER ||
300 reason == JS::GCReason::TOO_MUCH_MALLOC) {
301 inHighFrequencyGCMode_ = true;
305 static constexpr size_t BytesPerMB = 1024 * 1024;
306 static constexpr double CollectionRateSmoothingFactor = 0.5;
307 static constexpr double AllocationRateSmoothingFactor = 0.5;
309 static double ExponentialMovingAverage(double prevAverage, double newData,
310 double smoothingFactor) {
311 MOZ_ASSERT(smoothingFactor > 0.0 && smoothingFactor <= 1.0);
312 return smoothingFactor * newData + (1.0 - smoothingFactor) * prevAverage;
315 void js::ZoneAllocator::updateCollectionRate(
316 mozilla::TimeDuration mainThreadGCTime, size_t initialBytesForAllZones) {
317 MOZ_ASSERT(initialBytesForAllZones != 0);
318 MOZ_ASSERT(gcHeapSize.initialBytes() <= initialBytesForAllZones);
320 double zoneFraction =
321 double(gcHeapSize.initialBytes()) / double(initialBytesForAllZones);
322 double zoneDuration = mainThreadGCTime.ToSeconds() * zoneFraction +
323 perZoneGCTime.ref().ToSeconds();
324 double collectionRate =
325 double(gcHeapSize.initialBytes()) / (zoneDuration * BytesPerMB);
327 if (!smoothedCollectionRate.ref()) {
328 smoothedCollectionRate = Some(collectionRate);
329 } else {
330 double prevRate = smoothedCollectionRate.ref().value();
331 smoothedCollectionRate = Some(ExponentialMovingAverage(
332 prevRate, collectionRate, CollectionRateSmoothingFactor));
336 void js::ZoneAllocator::updateAllocationRate(TimeDuration mutatorTime) {
337 // To get the total size allocated since the last collection we have to
338 // take account of how much memory got freed in the meantime.
339 size_t freedBytes = gcHeapSize.freedBytes();
341 size_t sizeIncludingFreedBytes = gcHeapSize.bytes() + freedBytes;
343 MOZ_ASSERT(prevGCHeapSize <= sizeIncludingFreedBytes);
344 size_t allocatedBytes = sizeIncludingFreedBytes - prevGCHeapSize;
346 double allocationRate =
347 double(allocatedBytes) / (mutatorTime.ToSeconds() * BytesPerMB);
349 if (!smoothedAllocationRate.ref()) {
350 smoothedAllocationRate = Some(allocationRate);
351 } else {
352 double prevRate = smoothedAllocationRate.ref().value();
353 smoothedAllocationRate = Some(ExponentialMovingAverage(
354 prevRate, allocationRate, AllocationRateSmoothingFactor));
357 gcHeapSize.clearFreedBytes();
358 prevGCHeapSize = gcHeapSize.bytes();
361 // GC thresholds may exceed the range of size_t on 32-bit platforms, so these
362 // are calculated using 64-bit integers and clamped.
363 static inline size_t ToClampedSize(uint64_t bytes) {
364 return std::min(bytes, uint64_t(SIZE_MAX));
367 void HeapThreshold::setIncrementalLimitFromStartBytes(
368 size_t retainedBytes, const GCSchedulingTunables& tunables) {
369 // Calculate the incremental limit for a heap based on its size and start
370 // threshold.
372 // This effectively classifies the heap size into small, medium or large, and
373 // uses the small heap incremental limit paramer, the large heap incremental
374 // limit parameter or an interpolation between them.
376 // The incremental limit is always set greater than the start threshold by at
377 // least the maximum nursery size to reduce the chance that tenuring a full
378 // nursery will send us straight into non-incremental collection.
380 MOZ_ASSERT(tunables.smallHeapIncrementalLimit() >=
381 tunables.largeHeapIncrementalLimit());
383 double factor = LinearInterpolate(
384 retainedBytes, tunables.smallHeapSizeMaxBytes(),
385 tunables.smallHeapIncrementalLimit(), tunables.largeHeapSizeMinBytes(),
386 tunables.largeHeapIncrementalLimit());
388 uint64_t bytes =
389 std::max(uint64_t(double(startBytes_) * factor),
390 uint64_t(startBytes_) + tunables.gcMaxNurseryBytes());
391 incrementalLimitBytes_ = ToClampedSize(bytes);
392 MOZ_ASSERT(incrementalLimitBytes_ >= startBytes_);
394 // Maintain the invariant that the slice threshold is always less than the
395 // incremental limit when adjusting GC parameters.
396 if (hasSliceThreshold() && sliceBytes() > incrementalLimitBytes()) {
397 sliceBytes_ = incrementalLimitBytes();
401 double HeapThreshold::eagerAllocTrigger(bool highFrequencyGC) const {
402 double eagerTriggerFactor = highFrequencyGC
403 ? HighFrequencyEagerAllocTriggerFactor
404 : LowFrequencyEagerAllocTriggerFactor;
405 return eagerTriggerFactor * startBytes();
408 void HeapThreshold::setSliceThreshold(ZoneAllocator* zone,
409 const HeapSize& heapSize,
410 const GCSchedulingTunables& tunables,
411 bool waitingOnBGTask) {
412 // Set the allocation threshold at which to trigger the a GC slice in an
413 // ongoing incremental collection. This is used to ensure progress in
414 // allocation heavy code that may not return to the main event loop.
416 // The threshold is based on the JSGC_ZONE_ALLOC_DELAY_KB parameter, but this
417 // is reduced to increase the slice frequency as we approach the incremental
418 // limit, in the hope that we never reach it. If collector is waiting for a
419 // background task to complete, don't trigger any slices until we reach the
420 // urgent threshold.
422 size_t bytesRemaining = incrementalBytesRemaining(heapSize);
423 bool isUrgent = bytesRemaining < tunables.urgentThresholdBytes();
425 size_t delayBeforeNextSlice = tunables.zoneAllocDelayBytes();
426 if (isUrgent) {
427 double fractionRemaining =
428 double(bytesRemaining) / double(tunables.urgentThresholdBytes());
429 delayBeforeNextSlice =
430 size_t(double(delayBeforeNextSlice) * fractionRemaining);
431 MOZ_ASSERT(delayBeforeNextSlice <= tunables.zoneAllocDelayBytes());
432 } else if (waitingOnBGTask) {
433 delayBeforeNextSlice = bytesRemaining - tunables.urgentThresholdBytes();
436 sliceBytes_ = ToClampedSize(
437 std::min(uint64_t(heapSize.bytes()) + uint64_t(delayBeforeNextSlice),
438 uint64_t(incrementalLimitBytes_)));
441 size_t HeapThreshold::incrementalBytesRemaining(
442 const HeapSize& heapSize) const {
443 if (heapSize.bytes() >= incrementalLimitBytes_) {
444 return 0;
447 return incrementalLimitBytes_ - heapSize.bytes();
450 /* static */
451 double HeapThreshold::computeZoneHeapGrowthFactorForHeapSize(
452 size_t lastBytes, const GCSchedulingTunables& tunables,
453 const GCSchedulingState& state) {
454 // For small zones, our collection heuristics do not matter much: favor
455 // something simple in this case.
456 if (lastBytes < 1 * 1024 * 1024) {
457 return tunables.lowFrequencyHeapGrowth();
460 // The heap growth factor depends on the heap size after a GC and the GC
461 // frequency. If GC's are not triggering in rapid succession, use a lower
462 // threshold so that we will collect garbage sooner.
463 if (!state.inHighFrequencyGCMode()) {
464 return tunables.lowFrequencyHeapGrowth();
467 // For high frequency GCs we let the heap grow depending on whether we
468 // classify the heap as small, medium or large. There are parameters for small
469 // and large heap sizes and linear interpolation is used between them for
470 // medium sized heaps.
472 MOZ_ASSERT(tunables.smallHeapSizeMaxBytes() <=
473 tunables.largeHeapSizeMinBytes());
474 MOZ_ASSERT(tunables.highFrequencyLargeHeapGrowth() <=
475 tunables.highFrequencySmallHeapGrowth());
477 return LinearInterpolate(lastBytes, tunables.smallHeapSizeMaxBytes(),
478 tunables.highFrequencySmallHeapGrowth(),
479 tunables.largeHeapSizeMinBytes(),
480 tunables.highFrequencyLargeHeapGrowth());
483 /* static */
484 size_t GCHeapThreshold::computeZoneTriggerBytes(
485 double growthFactor, size_t lastBytes,
486 const GCSchedulingTunables& tunables) {
487 size_t base = std::max(lastBytes, tunables.gcZoneAllocThresholdBase());
488 double trigger = double(base) * growthFactor;
489 double triggerMax =
490 double(tunables.gcMaxBytes()) / tunables.largeHeapIncrementalLimit();
491 return ToClampedSize(std::min(triggerMax, trigger));
494 // Parameters for balanced heap limits computation.
496 // The W0 parameter. How much memory can be traversed in the minimum collection
497 // time.
498 static constexpr double BalancedHeapBaseMB = 5.0;
500 // The minimum heap limit. Do not constrain the heap to any less than this size.
501 static constexpr double MinBalancedHeapLimitMB = 10.0;
503 // The minimum amount of additional space to allow beyond the retained size.
504 static constexpr double MinBalancedHeadroomMB = 3.0;
506 // The maximum factor by which to expand the heap beyond the retained size.
507 static constexpr double MaxHeapGrowth = 3.0;
509 // The default allocation rate in MB/s allocated by the mutator to use before we
510 // have an estimate. Used to set the heap limit for zones that have not yet been
511 // collected.
512 static constexpr double DefaultAllocationRate = 0.0;
514 // The s0 parameter. The default collection rate in MB/s to use before we have
515 // an estimate. Used to set the heap limit for zones that have not yet been
516 // collected.
517 static constexpr double DefaultCollectionRate = 200.0;
519 double GCHeapThreshold::computeBalancedHeapLimit(
520 size_t lastBytes, double allocationRate, double collectionRate,
521 const GCSchedulingTunables& tunables) {
522 MOZ_ASSERT(tunables.balancedHeapLimitsEnabled());
524 // Optimal heap limits as described in https://arxiv.org/abs/2204.10455
526 double W = double(lastBytes) / BytesPerMB; // Retained size / MB.
527 double W0 = BalancedHeapBaseMB;
528 double d = tunables.heapGrowthFactor(); // Rearranged constant 'c'.
529 double g = allocationRate;
530 double s = collectionRate;
531 double f = d * sqrt((W + W0) * (g / s));
532 double M = W + std::min(f, MaxHeapGrowth * W);
533 M = std::max({MinBalancedHeapLimitMB, W + MinBalancedHeadroomMB, M});
535 return M * double(BytesPerMB);
538 void GCHeapThreshold::updateStartThreshold(
539 size_t lastBytes, mozilla::Maybe<double> allocationRate,
540 mozilla::Maybe<double> collectionRate, const GCSchedulingTunables& tunables,
541 const GCSchedulingState& state, bool isAtomsZone) {
542 if (!tunables.balancedHeapLimitsEnabled()) {
543 double growthFactor =
544 computeZoneHeapGrowthFactorForHeapSize(lastBytes, tunables, state);
546 startBytes_ = computeZoneTriggerBytes(growthFactor, lastBytes, tunables);
547 } else {
548 double threshold = computeBalancedHeapLimit(
549 lastBytes, allocationRate.valueOr(DefaultAllocationRate),
550 collectionRate.valueOr(DefaultCollectionRate), tunables);
552 double triggerMax =
553 double(tunables.gcMaxBytes()) / tunables.largeHeapIncrementalLimit();
555 startBytes_ = ToClampedSize(uint64_t(std::min(triggerMax, threshold)));
558 setIncrementalLimitFromStartBytes(lastBytes, tunables);
561 /* static */
562 size_t MallocHeapThreshold::computeZoneTriggerBytes(double growthFactor,
563 size_t lastBytes,
564 size_t baseBytes) {
565 return ToClampedSize(double(std::max(lastBytes, baseBytes)) * growthFactor);
568 void MallocHeapThreshold::updateStartThreshold(
569 size_t lastBytes, const GCSchedulingTunables& tunables,
570 const GCSchedulingState& state) {
571 double growthFactor =
572 computeZoneHeapGrowthFactorForHeapSize(lastBytes, tunables, state);
574 startBytes_ = computeZoneTriggerBytes(growthFactor, lastBytes,
575 tunables.mallocThresholdBase());
577 setIncrementalLimitFromStartBytes(lastBytes, tunables);
580 #ifdef DEBUG
582 static const char* MemoryUseName(MemoryUse use) {
583 switch (use) {
584 # define DEFINE_CASE(Name) \
585 case MemoryUse::Name: \
586 return #Name;
587 JS_FOR_EACH_MEMORY_USE(DEFINE_CASE)
588 # undef DEFINE_CASE
591 MOZ_CRASH("Unknown memory use");
594 MemoryTracker::MemoryTracker() : mutex(mutexid::MemoryTracker) {}
596 void MemoryTracker::checkEmptyOnDestroy() {
597 bool ok = true;
599 if (!gcMap.empty()) {
600 ok = false;
601 fprintf(stderr, "Missing calls to JS::RemoveAssociatedMemory:\n");
602 for (auto r = gcMap.all(); !r.empty(); r.popFront()) {
603 fprintf(stderr, " %p 0x%zx %s\n", r.front().key().ptr(),
604 r.front().value(), MemoryUseName(r.front().key().use()));
608 if (!nonGCMap.empty()) {
609 ok = false;
610 fprintf(stderr, "Missing calls to Zone::decNonGCMemory:\n");
611 for (auto r = nonGCMap.all(); !r.empty(); r.popFront()) {
612 fprintf(stderr, " %p 0x%zx\n", r.front().key().ptr(), r.front().value());
616 MOZ_ASSERT(ok);
619 /* static */
620 inline bool MemoryTracker::isGCMemoryUse(MemoryUse use) {
621 // Most memory uses are for memory associated with GC things but some are for
622 // memory associated with non-GC thing pointers.
623 return !isNonGCMemoryUse(use);
626 /* static */
627 inline bool MemoryTracker::isNonGCMemoryUse(MemoryUse use) {
628 return use == MemoryUse::TrackedAllocPolicy;
631 /* static */
632 inline bool MemoryTracker::allowMultipleAssociations(MemoryUse use) {
633 // For most uses only one association is possible for each GC thing. Allow a
634 // one-to-many relationship only where necessary.
635 return isNonGCMemoryUse(use) || use == MemoryUse::RegExpSharedBytecode ||
636 use == MemoryUse::BreakpointSite || use == MemoryUse::Breakpoint ||
637 use == MemoryUse::ForOfPICStub || use == MemoryUse::ICUObject;
640 void MemoryTracker::trackGCMemory(Cell* cell, size_t nbytes, MemoryUse use) {
641 MOZ_ASSERT(cell->isTenured());
642 MOZ_ASSERT(isGCMemoryUse(use));
644 LockGuard<Mutex> lock(mutex);
646 Key<Cell> key{cell, use};
647 AutoEnterOOMUnsafeRegion oomUnsafe;
648 auto ptr = gcMap.lookupForAdd(key);
649 if (ptr) {
650 if (!allowMultipleAssociations(use)) {
651 MOZ_CRASH_UNSAFE_PRINTF("Association already present: %p 0x%zx %s", cell,
652 nbytes, MemoryUseName(use));
654 ptr->value() += nbytes;
655 return;
658 if (!gcMap.add(ptr, key, nbytes)) {
659 oomUnsafe.crash("MemoryTracker::trackGCMemory");
663 void MemoryTracker::untrackGCMemory(Cell* cell, size_t nbytes, MemoryUse use) {
664 MOZ_ASSERT(cell->isTenured());
666 LockGuard<Mutex> lock(mutex);
668 Key<Cell> key{cell, use};
669 auto ptr = gcMap.lookup(key);
670 if (!ptr) {
671 MOZ_CRASH_UNSAFE_PRINTF("Association not found: %p 0x%zx %s", cell, nbytes,
672 MemoryUseName(use));
675 if (!allowMultipleAssociations(use) && ptr->value() != nbytes) {
676 MOZ_CRASH_UNSAFE_PRINTF(
677 "Association for %p %s has different size: "
678 "expected 0x%zx but got 0x%zx",
679 cell, MemoryUseName(use), ptr->value(), nbytes);
682 if (nbytes > ptr->value()) {
683 MOZ_CRASH_UNSAFE_PRINTF(
684 "Association for %p %s size is too large: "
685 "expected at most 0x%zx but got 0x%zx",
686 cell, MemoryUseName(use), ptr->value(), nbytes);
689 ptr->value() -= nbytes;
691 if (ptr->value() == 0) {
692 gcMap.remove(ptr);
696 void MemoryTracker::swapGCMemory(Cell* a, Cell* b, MemoryUse use) {
697 Key<Cell> ka{a, use};
698 Key<Cell> kb{b, use};
700 LockGuard<Mutex> lock(mutex);
702 size_t sa = getAndRemoveEntry(ka, lock);
703 size_t sb = getAndRemoveEntry(kb, lock);
705 AutoEnterOOMUnsafeRegion oomUnsafe;
707 if ((sa && b->isTenured() && !gcMap.put(kb, sa)) ||
708 (sb && a->isTenured() && !gcMap.put(ka, sb))) {
709 oomUnsafe.crash("MemoryTracker::swapGCMemory");
713 size_t MemoryTracker::getAndRemoveEntry(const Key<Cell>& key,
714 LockGuard<Mutex>& lock) {
715 auto ptr = gcMap.lookup(key);
716 if (!ptr) {
717 return 0;
720 size_t size = ptr->value();
721 gcMap.remove(ptr);
722 return size;
725 void MemoryTracker::registerNonGCMemory(void* mem, MemoryUse use) {
726 LockGuard<Mutex> lock(mutex);
728 Key<void> key{mem, use};
729 auto ptr = nonGCMap.lookupForAdd(key);
730 if (ptr) {
731 MOZ_CRASH_UNSAFE_PRINTF("%s assocaition %p already registered",
732 MemoryUseName(use), mem);
735 AutoEnterOOMUnsafeRegion oomUnsafe;
736 if (!nonGCMap.add(ptr, key, 0)) {
737 oomUnsafe.crash("MemoryTracker::registerNonGCMemory");
741 void MemoryTracker::unregisterNonGCMemory(void* mem, MemoryUse use) {
742 LockGuard<Mutex> lock(mutex);
744 Key<void> key{mem, use};
745 auto ptr = nonGCMap.lookup(key);
746 if (!ptr) {
747 MOZ_CRASH_UNSAFE_PRINTF("%s association %p not found", MemoryUseName(use),
748 mem);
751 if (ptr->value() != 0) {
752 MOZ_CRASH_UNSAFE_PRINTF(
753 "%s association %p still has 0x%zx bytes associated",
754 MemoryUseName(use), mem, ptr->value());
757 nonGCMap.remove(ptr);
760 void MemoryTracker::moveNonGCMemory(void* dst, void* src, MemoryUse use) {
761 LockGuard<Mutex> lock(mutex);
763 Key<void> srcKey{src, use};
764 auto srcPtr = nonGCMap.lookup(srcKey);
765 if (!srcPtr) {
766 MOZ_CRASH_UNSAFE_PRINTF("%s association %p not found", MemoryUseName(use),
767 src);
770 size_t nbytes = srcPtr->value();
771 nonGCMap.remove(srcPtr);
773 Key<void> dstKey{dst, use};
774 auto dstPtr = nonGCMap.lookupForAdd(dstKey);
775 if (dstPtr) {
776 MOZ_CRASH_UNSAFE_PRINTF("%s %p already registered", MemoryUseName(use),
777 dst);
780 AutoEnterOOMUnsafeRegion oomUnsafe;
781 if (!nonGCMap.add(dstPtr, dstKey, nbytes)) {
782 oomUnsafe.crash("MemoryTracker::moveNonGCMemory");
786 void MemoryTracker::incNonGCMemory(void* mem, size_t nbytes, MemoryUse use) {
787 MOZ_ASSERT(isNonGCMemoryUse(use));
789 LockGuard<Mutex> lock(mutex);
791 Key<void> key{mem, use};
792 auto ptr = nonGCMap.lookup(key);
793 if (!ptr) {
794 MOZ_CRASH_UNSAFE_PRINTF("%s allocation %p not found", MemoryUseName(use),
795 mem);
798 ptr->value() += nbytes;
801 void MemoryTracker::decNonGCMemory(void* mem, size_t nbytes, MemoryUse use) {
802 MOZ_ASSERT(isNonGCMemoryUse(use));
804 LockGuard<Mutex> lock(mutex);
806 Key<void> key{mem, use};
807 auto ptr = nonGCMap.lookup(key);
808 if (!ptr) {
809 MOZ_CRASH_UNSAFE_PRINTF("%s allocation %p not found", MemoryUseName(use),
810 mem);
813 size_t& value = ptr->value();
814 if (nbytes > value) {
815 MOZ_CRASH_UNSAFE_PRINTF(
816 "%s allocation %p is too large: "
817 "expected at most 0x%zx but got 0x%zx bytes",
818 MemoryUseName(use), mem, value, nbytes);
821 value -= nbytes;
824 void MemoryTracker::fixupAfterMovingGC() {
825 // Update the table after we move GC things. We don't use StableCellHasher
826 // because that would create a difference between debug and release builds.
827 for (GCMap::Enum e(gcMap); !e.empty(); e.popFront()) {
828 const auto& key = e.front().key();
829 Cell* cell = key.ptr();
830 if (cell->isForwarded()) {
831 cell = gc::RelocationOverlay::fromCell(cell)->forwardingAddress();
832 e.rekeyFront(Key<Cell>{cell, key.use()});
837 template <typename Ptr>
838 inline MemoryTracker::Key<Ptr>::Key(Ptr* ptr, MemoryUse use)
839 : ptr_(uint64_t(ptr)), use_(uint64_t(use)) {
840 # ifdef JS_64BIT
841 static_assert(sizeof(Key) == 8,
842 "MemoryTracker::Key should be packed into 8 bytes");
843 # endif
844 MOZ_ASSERT(this->ptr() == ptr);
845 MOZ_ASSERT(this->use() == use);
848 template <typename Ptr>
849 inline Ptr* MemoryTracker::Key<Ptr>::ptr() const {
850 return reinterpret_cast<Ptr*>(ptr_);
852 template <typename Ptr>
853 inline MemoryUse MemoryTracker::Key<Ptr>::use() const {
854 return static_cast<MemoryUse>(use_);
857 template <typename Ptr>
858 inline HashNumber MemoryTracker::Hasher<Ptr>::hash(const Lookup& l) {
859 return mozilla::HashGeneric(DefaultHasher<Ptr*>::hash(l.ptr()),
860 DefaultHasher<unsigned>::hash(unsigned(l.use())));
863 template <typename Ptr>
864 inline bool MemoryTracker::Hasher<Ptr>::match(const KeyT& k, const Lookup& l) {
865 return k.ptr() == l.ptr() && k.use() == l.use();
868 template <typename Ptr>
869 inline void MemoryTracker::Hasher<Ptr>::rekey(KeyT& k, const KeyT& newKey) {
870 k = newKey;
873 #endif // DEBUG