no bug - Bumping Firefox l10n changesets r=release a=l10n-bump DONTBUILD CLOSED TREE
[gecko.git] / js / src / gc / Scheduling.cpp
blob2870e0a97b5a53f58cdfe5479ea15ddcbbd6e605
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "gc/Scheduling.h"
9 #include "mozilla/CheckedInt.h"
10 #include "mozilla/ScopeExit.h"
11 #include "mozilla/TimeStamp.h"
13 #include <algorithm>
14 #include <cmath>
16 #include "gc/Memory.h"
17 #include "gc/Nursery.h"
18 #include "gc/RelocationOverlay.h"
19 #include "gc/ZoneAllocator.h"
20 #include "util/DifferentialTesting.h"
21 #include "vm/MutexIDs.h"
23 using namespace js;
24 using namespace js::gc;
26 using mozilla::CheckedInt;
27 using mozilla::Maybe;
28 using mozilla::Nothing;
29 using mozilla::Some;
30 using mozilla::TimeDuration;
33 * We may start to collect a zone before its trigger threshold is reached if
34 * GCRuntime::maybeGC() is called for that zone or we start collecting other
35 * zones. These eager threshold factors are not configurable.
37 static constexpr double HighFrequencyEagerAllocTriggerFactor = 0.85;
38 static constexpr double LowFrequencyEagerAllocTriggerFactor = 0.9;
41 * Don't allow heap growth factors to be set so low that eager collections could
42 * reduce the trigger threshold.
44 static constexpr double MinHeapGrowthFactor =
45 1.0f / std::min(HighFrequencyEagerAllocTriggerFactor,
46 LowFrequencyEagerAllocTriggerFactor);
48 // Limit various parameters to reasonable levels to catch errors.
49 static constexpr double MaxHeapGrowthFactor = 100;
50 static constexpr size_t MaxNurseryBytesParam = 128 * 1024 * 1024;
52 namespace {
54 // Helper classes to marshal GC parameter values to/from uint32_t.
56 template <typename T>
57 struct ConvertGeneric {
58 static uint32_t toUint32(T value) {
59 static_assert(std::is_arithmetic_v<T>);
60 if constexpr (std::is_signed_v<T>) {
61 MOZ_ASSERT(value >= 0);
63 if constexpr (!std::is_same_v<T, bool> &&
64 std::numeric_limits<T>::max() >
65 std::numeric_limits<uint32_t>::max()) {
66 MOZ_ASSERT(value <= UINT32_MAX);
68 return uint32_t(value);
70 static Maybe<T> fromUint32(uint32_t param) {
71 // Currently we use explicit conversion and don't range check.
72 return Some(T(param));
76 using ConvertBool = ConvertGeneric<bool>;
77 using ConvertSize = ConvertGeneric<size_t>;
78 using ConvertDouble = ConvertGeneric<double>;
80 struct ConvertTimes100 {
81 static uint32_t toUint32(double value) { return uint32_t(value * 100.0); }
82 static Maybe<double> fromUint32(uint32_t param) {
83 return Some(double(param) / 100.0);
87 struct ConvertNurseryBytes : ConvertSize {
88 static Maybe<size_t> fromUint32(uint32_t param) {
89 return Some(Nursery::roundSize(param));
93 struct ConvertKB {
94 static uint32_t toUint32(size_t value) { return value / 1024; }
95 static Maybe<size_t> fromUint32(uint32_t param) {
96 // Parameters which represent heap sizes in bytes are restricted to values
97 // which can be represented on 32 bit platforms.
98 CheckedInt<uint32_t> size = CheckedInt<uint32_t>(param) * 1024;
99 return size.isValid() ? Some(size_t(size.value())) : Nothing();
103 struct ConvertMB {
104 static uint32_t toUint32(size_t value) { return value / (1024 * 1024); }
105 static Maybe<size_t> fromUint32(uint32_t param) {
106 // Parameters which represent heap sizes in bytes are restricted to values
107 // which can be represented on 32 bit platforms.
108 CheckedInt<uint32_t> size = CheckedInt<uint32_t>(param) * 1024 * 1024;
109 return size.isValid() ? Some(size_t(size.value())) : Nothing();
113 struct ConvertMillis {
114 static uint32_t toUint32(TimeDuration value) {
115 return uint32_t(value.ToMilliseconds());
117 static Maybe<TimeDuration> fromUint32(uint32_t param) {
118 return Some(TimeDuration::FromMilliseconds(param));
122 struct ConvertSeconds {
123 static uint32_t toUint32(TimeDuration value) {
124 return uint32_t(value.ToSeconds());
126 static Maybe<TimeDuration> fromUint32(uint32_t param) {
127 return Some(TimeDuration::FromSeconds(param));
131 } // anonymous namespace
133 // Helper functions to check GC parameter values
135 template <typename T>
136 static bool NoCheck(T value) {
137 return true;
140 template <typename T>
141 static bool CheckNonZero(T value) {
142 return value != 0;
145 static bool CheckNurserySize(size_t bytes) {
146 return bytes >= SystemPageSize() && bytes <= MaxNurseryBytesParam;
149 static bool CheckHeapGrowth(double growth) {
150 return growth >= MinHeapGrowthFactor && growth <= MaxHeapGrowthFactor;
153 static bool CheckIncrementalLimit(double factor) {
154 return factor >= 1.0 && factor <= MaxHeapGrowthFactor;
157 static bool CheckNonZeroUnitRange(double value) {
158 return value > 0.0 && value <= 100.0;
161 GCSchedulingTunables::GCSchedulingTunables() {
162 #define INIT_TUNABLE_FIELD(key, type, name, convert, check, default) \
163 name##_ = default; \
164 MOZ_ASSERT(check(name##_));
165 FOR_EACH_GC_TUNABLE(INIT_TUNABLE_FIELD)
166 #undef INIT_TUNABLE_FIELD
168 checkInvariants();
171 uint32_t GCSchedulingTunables::getParameter(JSGCParamKey key) {
172 switch (key) {
173 #define GET_TUNABLE_FIELD(key, type, name, convert, check, default) \
174 case key: \
175 return convert::toUint32(name##_);
176 FOR_EACH_GC_TUNABLE(GET_TUNABLE_FIELD)
177 #undef GET_TUNABLE_FIELD
179 default:
180 MOZ_CRASH("Unknown parameter key");
184 bool GCSchedulingTunables::setParameter(JSGCParamKey key, uint32_t value) {
185 auto guard = mozilla::MakeScopeExit([this] { checkInvariants(); });
187 switch (key) {
188 #define SET_TUNABLE_FIELD(key, type, name, convert, check, default) \
189 case key: { \
190 Maybe<type> converted = convert::fromUint32(value); \
191 if (!converted || !check(converted.value())) { \
192 return false; \
194 name##_ = converted.value(); \
195 break; \
197 FOR_EACH_GC_TUNABLE(SET_TUNABLE_FIELD)
198 #undef SET_TUNABLE_FIELD
200 default:
201 MOZ_CRASH("Unknown GC parameter.");
204 maintainInvariantsAfterUpdate(key);
205 return true;
208 void GCSchedulingTunables::resetParameter(JSGCParamKey key) {
209 auto guard = mozilla::MakeScopeExit([this] { checkInvariants(); });
211 switch (key) {
212 #define RESET_TUNABLE_FIELD(key, type, name, convert, check, default) \
213 case key: \
214 name##_ = default; \
215 MOZ_ASSERT(check(name##_)); \
216 break;
217 FOR_EACH_GC_TUNABLE(RESET_TUNABLE_FIELD)
218 #undef RESET_TUNABLE_FIELD
220 default:
221 MOZ_CRASH("Unknown GC parameter.");
224 maintainInvariantsAfterUpdate(key);
227 void GCSchedulingTunables::maintainInvariantsAfterUpdate(JSGCParamKey updated) {
228 // Check whether a change to parameter |updated| has broken an invariant in
229 // relation to another parameter. If it has, adjust that other parameter to
230 // restore the invariant.
231 switch (updated) {
232 case JSGC_MIN_NURSERY_BYTES:
233 if (gcMaxNurseryBytes_ < gcMinNurseryBytes_) {
234 gcMaxNurseryBytes_ = gcMinNurseryBytes_;
236 break;
237 case JSGC_MAX_NURSERY_BYTES:
238 if (gcMinNurseryBytes_ > gcMaxNurseryBytes_) {
239 gcMinNurseryBytes_ = gcMaxNurseryBytes_;
241 break;
242 case JSGC_SMALL_HEAP_SIZE_MAX:
243 if (smallHeapSizeMaxBytes_ >= largeHeapSizeMinBytes_) {
244 largeHeapSizeMinBytes_ = smallHeapSizeMaxBytes_ + 1;
246 break;
247 case JSGC_LARGE_HEAP_SIZE_MIN:
248 if (largeHeapSizeMinBytes_ <= smallHeapSizeMaxBytes_) {
249 smallHeapSizeMaxBytes_ = largeHeapSizeMinBytes_ - 1;
251 break;
252 case JSGC_HIGH_FREQUENCY_SMALL_HEAP_GROWTH:
253 if (highFrequencySmallHeapGrowth_ < highFrequencyLargeHeapGrowth_) {
254 highFrequencyLargeHeapGrowth_ = highFrequencySmallHeapGrowth_;
256 break;
257 case JSGC_HIGH_FREQUENCY_LARGE_HEAP_GROWTH:
258 if (highFrequencyLargeHeapGrowth_ > highFrequencySmallHeapGrowth_) {
259 highFrequencySmallHeapGrowth_ = highFrequencyLargeHeapGrowth_;
261 break;
262 case JSGC_SMALL_HEAP_INCREMENTAL_LIMIT:
263 if (smallHeapIncrementalLimit_ < largeHeapIncrementalLimit_) {
264 largeHeapIncrementalLimit_ = smallHeapIncrementalLimit_;
266 break;
267 case JSGC_LARGE_HEAP_INCREMENTAL_LIMIT:
268 if (largeHeapIncrementalLimit_ > smallHeapIncrementalLimit_) {
269 smallHeapIncrementalLimit_ = largeHeapIncrementalLimit_;
271 break;
272 default:
273 break;
277 void GCSchedulingTunables::checkInvariants() {
278 MOZ_ASSERT(gcMinNurseryBytes_ == Nursery::roundSize(gcMinNurseryBytes_));
279 MOZ_ASSERT(gcMaxNurseryBytes_ == Nursery::roundSize(gcMaxNurseryBytes_));
280 MOZ_ASSERT(gcMinNurseryBytes_ <= gcMaxNurseryBytes_);
281 MOZ_ASSERT(gcMinNurseryBytes_ >= SystemPageSize());
282 MOZ_ASSERT(gcMaxNurseryBytes_ <= MaxNurseryBytesParam);
284 MOZ_ASSERT(largeHeapSizeMinBytes_ > smallHeapSizeMaxBytes_);
286 MOZ_ASSERT(lowFrequencyHeapGrowth_ >= MinHeapGrowthFactor);
287 MOZ_ASSERT(lowFrequencyHeapGrowth_ <= MaxHeapGrowthFactor);
289 MOZ_ASSERT(highFrequencySmallHeapGrowth_ >= MinHeapGrowthFactor);
290 MOZ_ASSERT(highFrequencySmallHeapGrowth_ <= MaxHeapGrowthFactor);
291 MOZ_ASSERT(highFrequencyLargeHeapGrowth_ <= highFrequencySmallHeapGrowth_);
292 MOZ_ASSERT(highFrequencyLargeHeapGrowth_ >= MinHeapGrowthFactor);
293 MOZ_ASSERT(highFrequencySmallHeapGrowth_ <= MaxHeapGrowthFactor);
295 MOZ_ASSERT(smallHeapIncrementalLimit_ >= largeHeapIncrementalLimit_);
298 void GCSchedulingState::updateHighFrequencyMode(
299 const mozilla::TimeStamp& lastGCTime, const mozilla::TimeStamp& currentTime,
300 const GCSchedulingTunables& tunables) {
301 if (js::SupportDifferentialTesting()) {
302 return;
305 inHighFrequencyGCMode_ =
306 !lastGCTime.IsNull() &&
307 lastGCTime + tunables.highFrequencyThreshold() > currentTime;
310 void GCSchedulingState::updateHighFrequencyModeForReason(JS::GCReason reason) {
311 // These reason indicate that the embedding isn't triggering GC slices often
312 // enough and allocation rate is high.
313 if (reason == JS::GCReason::ALLOC_TRIGGER ||
314 reason == JS::GCReason::TOO_MUCH_MALLOC) {
315 inHighFrequencyGCMode_ = true;
319 static constexpr size_t BytesPerMB = 1024 * 1024;
320 static constexpr double CollectionRateSmoothingFactor = 0.5;
321 static constexpr double AllocationRateSmoothingFactor = 0.5;
323 static double ExponentialMovingAverage(double prevAverage, double newData,
324 double smoothingFactor) {
325 MOZ_ASSERT(smoothingFactor > 0.0 && smoothingFactor <= 1.0);
326 return smoothingFactor * newData + (1.0 - smoothingFactor) * prevAverage;
329 void js::ZoneAllocator::updateCollectionRate(
330 mozilla::TimeDuration mainThreadGCTime, size_t initialBytesForAllZones) {
331 MOZ_ASSERT(initialBytesForAllZones != 0);
332 MOZ_ASSERT(gcHeapSize.initialBytes() <= initialBytesForAllZones);
334 double zoneFraction =
335 double(gcHeapSize.initialBytes()) / double(initialBytesForAllZones);
336 double zoneDuration = mainThreadGCTime.ToSeconds() * zoneFraction +
337 perZoneGCTime.ref().ToSeconds();
338 double collectionRate =
339 double(gcHeapSize.initialBytes()) / (zoneDuration * BytesPerMB);
341 if (!smoothedCollectionRate.ref()) {
342 smoothedCollectionRate = Some(collectionRate);
343 } else {
344 double prevRate = smoothedCollectionRate.ref().value();
345 smoothedCollectionRate = Some(ExponentialMovingAverage(
346 prevRate, collectionRate, CollectionRateSmoothingFactor));
350 void js::ZoneAllocator::updateAllocationRate(TimeDuration mutatorTime) {
351 // To get the total size allocated since the last collection we have to
352 // take account of how much memory got freed in the meantime.
353 size_t freedBytes = gcHeapSize.freedBytes();
355 size_t sizeIncludingFreedBytes = gcHeapSize.bytes() + freedBytes;
357 MOZ_ASSERT(prevGCHeapSize <= sizeIncludingFreedBytes);
358 size_t allocatedBytes = sizeIncludingFreedBytes - prevGCHeapSize;
360 double allocationRate =
361 double(allocatedBytes) / (mutatorTime.ToSeconds() * BytesPerMB);
363 if (!smoothedAllocationRate.ref()) {
364 smoothedAllocationRate = Some(allocationRate);
365 } else {
366 double prevRate = smoothedAllocationRate.ref().value();
367 smoothedAllocationRate = Some(ExponentialMovingAverage(
368 prevRate, allocationRate, AllocationRateSmoothingFactor));
371 gcHeapSize.clearFreedBytes();
372 prevGCHeapSize = gcHeapSize.bytes();
375 // GC thresholds may exceed the range of size_t on 32-bit platforms, so these
376 // are calculated using 64-bit integers and clamped.
377 static inline size_t ToClampedSize(uint64_t bytes) {
378 return std::min(bytes, uint64_t(SIZE_MAX));
381 void HeapThreshold::setIncrementalLimitFromStartBytes(
382 size_t retainedBytes, const GCSchedulingTunables& tunables) {
383 // Calculate the incremental limit for a heap based on its size and start
384 // threshold.
386 // This effectively classifies the heap size into small, medium or large, and
387 // uses the small heap incremental limit paramer, the large heap incremental
388 // limit parameter or an interpolation between them.
390 // The incremental limit is always set greater than the start threshold by at
391 // least the maximum nursery size to reduce the chance that tenuring a full
392 // nursery will send us straight into non-incremental collection.
394 MOZ_ASSERT(tunables.smallHeapIncrementalLimit() >=
395 tunables.largeHeapIncrementalLimit());
397 double factor = LinearInterpolate(double(retainedBytes),
398 double(tunables.smallHeapSizeMaxBytes()),
399 tunables.smallHeapIncrementalLimit(),
400 double(tunables.largeHeapSizeMinBytes()),
401 tunables.largeHeapIncrementalLimit());
403 uint64_t bytes =
404 std::max(uint64_t(double(startBytes_) * factor),
405 uint64_t(startBytes_) + tunables.gcMaxNurseryBytes());
406 incrementalLimitBytes_ = ToClampedSize(bytes);
407 MOZ_ASSERT(incrementalLimitBytes_ >= startBytes_);
409 // Maintain the invariant that the slice threshold is always less than the
410 // incremental limit when adjusting GC parameters.
411 if (hasSliceThreshold() && sliceBytes() > incrementalLimitBytes()) {
412 sliceBytes_ = incrementalLimitBytes();
416 size_t HeapThreshold::eagerAllocTrigger(bool highFrequencyGC) const {
417 double eagerTriggerFactor = highFrequencyGC
418 ? HighFrequencyEagerAllocTriggerFactor
419 : LowFrequencyEagerAllocTriggerFactor;
420 return size_t(eagerTriggerFactor * double(startBytes()));
423 void HeapThreshold::setSliceThreshold(ZoneAllocator* zone,
424 const HeapSize& heapSize,
425 const GCSchedulingTunables& tunables,
426 bool waitingOnBGTask) {
427 // Set the allocation threshold at which to trigger the a GC slice in an
428 // ongoing incremental collection. This is used to ensure progress in
429 // allocation heavy code that may not return to the main event loop.
431 // The threshold is based on the JSGC_ZONE_ALLOC_DELAY_KB parameter, but this
432 // is reduced to increase the slice frequency as we approach the incremental
433 // limit, in the hope that we never reach it. If collector is waiting for a
434 // background task to complete, don't trigger any slices until we reach the
435 // urgent threshold.
437 size_t bytesRemaining = incrementalBytesRemaining(heapSize);
438 bool isUrgent = bytesRemaining < tunables.urgentThresholdBytes();
440 size_t delayBeforeNextSlice = tunables.zoneAllocDelayBytes();
441 if (isUrgent) {
442 double fractionRemaining =
443 double(bytesRemaining) / double(tunables.urgentThresholdBytes());
444 delayBeforeNextSlice =
445 size_t(double(delayBeforeNextSlice) * fractionRemaining);
446 MOZ_ASSERT(delayBeforeNextSlice <= tunables.zoneAllocDelayBytes());
447 } else if (waitingOnBGTask) {
448 delayBeforeNextSlice = bytesRemaining - tunables.urgentThresholdBytes();
451 sliceBytes_ = ToClampedSize(
452 std::min(uint64_t(heapSize.bytes()) + uint64_t(delayBeforeNextSlice),
453 uint64_t(incrementalLimitBytes_)));
456 size_t HeapThreshold::incrementalBytesRemaining(
457 const HeapSize& heapSize) const {
458 if (heapSize.bytes() >= incrementalLimitBytes_) {
459 return 0;
462 return incrementalLimitBytes_ - heapSize.bytes();
465 /* static */
466 double HeapThreshold::computeZoneHeapGrowthFactorForHeapSize(
467 size_t lastBytes, const GCSchedulingTunables& tunables,
468 const GCSchedulingState& state) {
469 // For small zones, our collection heuristics do not matter much: favor
470 // something simple in this case.
471 if (lastBytes < 1 * 1024 * 1024) {
472 return tunables.lowFrequencyHeapGrowth();
475 // The heap growth factor depends on the heap size after a GC and the GC
476 // frequency. If GC's are not triggering in rapid succession, use a lower
477 // threshold so that we will collect garbage sooner.
478 if (!state.inHighFrequencyGCMode()) {
479 return tunables.lowFrequencyHeapGrowth();
482 // For high frequency GCs we let the heap grow depending on whether we
483 // classify the heap as small, medium or large. There are parameters for small
484 // and large heap sizes and linear interpolation is used between them for
485 // medium sized heaps.
487 MOZ_ASSERT(tunables.smallHeapSizeMaxBytes() <=
488 tunables.largeHeapSizeMinBytes());
489 MOZ_ASSERT(tunables.highFrequencyLargeHeapGrowth() <=
490 tunables.highFrequencySmallHeapGrowth());
492 return LinearInterpolate(double(lastBytes),
493 double(tunables.smallHeapSizeMaxBytes()),
494 tunables.highFrequencySmallHeapGrowth(),
495 double(tunables.largeHeapSizeMinBytes()),
496 tunables.highFrequencyLargeHeapGrowth());
499 /* static */
500 size_t GCHeapThreshold::computeZoneTriggerBytes(
501 double growthFactor, size_t lastBytes,
502 const GCSchedulingTunables& tunables) {
503 size_t base = std::max(lastBytes, tunables.gcZoneAllocThresholdBase());
504 double trigger = double(base) * growthFactor;
505 double triggerMax =
506 double(tunables.gcMaxBytes()) / tunables.largeHeapIncrementalLimit();
507 return ToClampedSize(uint64_t(std::min(triggerMax, trigger)));
510 // Parameters for balanced heap limits computation.
512 // The W0 parameter. How much memory can be traversed in the minimum collection
513 // time.
514 static constexpr double BalancedHeapBaseMB = 5.0;
516 // The minimum heap limit. Do not constrain the heap to any less than this size.
517 static constexpr double MinBalancedHeapLimitMB = 10.0;
519 // The minimum amount of additional space to allow beyond the retained size.
520 static constexpr double MinBalancedHeadroomMB = 3.0;
522 // The maximum factor by which to expand the heap beyond the retained size.
523 static constexpr double MaxHeapGrowth = 3.0;
525 // The default allocation rate in MB/s allocated by the mutator to use before we
526 // have an estimate. Used to set the heap limit for zones that have not yet been
527 // collected.
528 static constexpr double DefaultAllocationRate = 0.0;
530 // The s0 parameter. The default collection rate in MB/s to use before we have
531 // an estimate. Used to set the heap limit for zones that have not yet been
532 // collected.
533 static constexpr double DefaultCollectionRate = 200.0;
535 double GCHeapThreshold::computeBalancedHeapLimit(
536 size_t lastBytes, double allocationRate, double collectionRate,
537 const GCSchedulingTunables& tunables) {
538 MOZ_ASSERT(tunables.balancedHeapLimitsEnabled());
540 // Optimal heap limits as described in https://arxiv.org/abs/2204.10455
542 double W = double(lastBytes) / BytesPerMB; // Retained size / MB.
543 double W0 = BalancedHeapBaseMB;
544 double d = tunables.heapGrowthFactor(); // Rearranged constant 'c'.
545 double g = allocationRate;
546 double s = collectionRate;
547 double f = d * sqrt((W + W0) * (g / s));
548 double M = W + std::min(f, MaxHeapGrowth * W);
549 M = std::max({MinBalancedHeapLimitMB, W + MinBalancedHeadroomMB, M});
551 return M * double(BytesPerMB);
554 void GCHeapThreshold::updateStartThreshold(
555 size_t lastBytes, mozilla::Maybe<double> allocationRate,
556 mozilla::Maybe<double> collectionRate, const GCSchedulingTunables& tunables,
557 const GCSchedulingState& state, bool isAtomsZone) {
558 if (!tunables.balancedHeapLimitsEnabled()) {
559 double growthFactor =
560 computeZoneHeapGrowthFactorForHeapSize(lastBytes, tunables, state);
562 startBytes_ = computeZoneTriggerBytes(growthFactor, lastBytes, tunables);
563 } else {
564 double threshold = computeBalancedHeapLimit(
565 lastBytes, allocationRate.valueOr(DefaultAllocationRate),
566 collectionRate.valueOr(DefaultCollectionRate), tunables);
568 double triggerMax =
569 double(tunables.gcMaxBytes()) / tunables.largeHeapIncrementalLimit();
571 startBytes_ = ToClampedSize(uint64_t(std::min(triggerMax, threshold)));
574 setIncrementalLimitFromStartBytes(lastBytes, tunables);
577 /* static */
578 size_t MallocHeapThreshold::computeZoneTriggerBytes(double growthFactor,
579 size_t lastBytes,
580 size_t baseBytes) {
581 return ToClampedSize(
582 uint64_t(double(std::max(lastBytes, baseBytes)) * growthFactor));
585 void MallocHeapThreshold::updateStartThreshold(
586 size_t lastBytes, const GCSchedulingTunables& tunables,
587 const GCSchedulingState& state) {
588 double growthFactor =
589 computeZoneHeapGrowthFactorForHeapSize(lastBytes, tunables, state);
591 startBytes_ = computeZoneTriggerBytes(growthFactor, lastBytes,
592 tunables.mallocThresholdBase());
594 setIncrementalLimitFromStartBytes(lastBytes, tunables);
597 #ifdef DEBUG
599 static const char* MemoryUseName(MemoryUse use) {
600 switch (use) {
601 # define DEFINE_CASE(Name) \
602 case MemoryUse::Name: \
603 return #Name;
604 JS_FOR_EACH_MEMORY_USE(DEFINE_CASE)
605 # undef DEFINE_CASE
608 MOZ_CRASH("Unknown memory use");
611 MemoryTracker::MemoryTracker() : mutex(mutexid::MemoryTracker) {}
613 void MemoryTracker::checkEmptyOnDestroy() {
614 bool ok = true;
616 if (!gcMap.empty()) {
617 ok = false;
618 fprintf(stderr, "Missing calls to JS::RemoveAssociatedMemory:\n");
619 for (auto r = gcMap.all(); !r.empty(); r.popFront()) {
620 fprintf(stderr, " %p 0x%zx %s\n", r.front().key().ptr(),
621 r.front().value(), MemoryUseName(r.front().key().use()));
625 if (!nonGCMap.empty()) {
626 ok = false;
627 fprintf(stderr, "Missing calls to Zone::decNonGCMemory:\n");
628 for (auto r = nonGCMap.all(); !r.empty(); r.popFront()) {
629 fprintf(stderr, " %p 0x%zx\n", r.front().key().ptr(), r.front().value());
633 MOZ_ASSERT(ok);
636 /* static */
637 inline bool MemoryTracker::isGCMemoryUse(MemoryUse use) {
638 // Most memory uses are for memory associated with GC things but some are for
639 // memory associated with non-GC thing pointers.
640 return !isNonGCMemoryUse(use);
643 /* static */
644 inline bool MemoryTracker::isNonGCMemoryUse(MemoryUse use) {
645 return use == MemoryUse::TrackedAllocPolicy;
648 /* static */
649 inline bool MemoryTracker::allowMultipleAssociations(MemoryUse use) {
650 // For most uses only one association is possible for each GC thing. Allow a
651 // one-to-many relationship only where necessary.
652 return isNonGCMemoryUse(use) || use == MemoryUse::RegExpSharedBytecode ||
653 use == MemoryUse::BreakpointSite || use == MemoryUse::Breakpoint ||
654 use == MemoryUse::ForOfPICStub || use == MemoryUse::ICUObject;
657 void MemoryTracker::trackGCMemory(Cell* cell, size_t nbytes, MemoryUse use) {
658 MOZ_ASSERT(cell->isTenured());
659 MOZ_ASSERT(isGCMemoryUse(use));
661 LockGuard<Mutex> lock(mutex);
663 Key<Cell> key{cell, use};
664 AutoEnterOOMUnsafeRegion oomUnsafe;
665 auto ptr = gcMap.lookupForAdd(key);
666 if (ptr) {
667 if (!allowMultipleAssociations(use)) {
668 MOZ_CRASH_UNSAFE_PRINTF("Association already present: %p 0x%zx %s", cell,
669 nbytes, MemoryUseName(use));
671 ptr->value() += nbytes;
672 return;
675 if (!gcMap.add(ptr, key, nbytes)) {
676 oomUnsafe.crash("MemoryTracker::trackGCMemory");
680 void MemoryTracker::untrackGCMemory(Cell* cell, size_t nbytes, MemoryUse use) {
681 MOZ_ASSERT(cell->isTenured());
683 LockGuard<Mutex> lock(mutex);
685 Key<Cell> key{cell, use};
686 auto ptr = gcMap.lookup(key);
687 if (!ptr) {
688 MOZ_CRASH_UNSAFE_PRINTF("Association not found: %p 0x%zx %s", cell, nbytes,
689 MemoryUseName(use));
692 if (!allowMultipleAssociations(use) && ptr->value() != nbytes) {
693 MOZ_CRASH_UNSAFE_PRINTF(
694 "Association for %p %s has different size: "
695 "expected 0x%zx but got 0x%zx",
696 cell, MemoryUseName(use), ptr->value(), nbytes);
699 if (nbytes > ptr->value()) {
700 MOZ_CRASH_UNSAFE_PRINTF(
701 "Association for %p %s size is too large: "
702 "expected at most 0x%zx but got 0x%zx",
703 cell, MemoryUseName(use), ptr->value(), nbytes);
706 ptr->value() -= nbytes;
708 if (ptr->value() == 0) {
709 gcMap.remove(ptr);
713 void MemoryTracker::swapGCMemory(Cell* a, Cell* b, MemoryUse use) {
714 Key<Cell> ka{a, use};
715 Key<Cell> kb{b, use};
717 LockGuard<Mutex> lock(mutex);
719 size_t sa = getAndRemoveEntry(ka, lock);
720 size_t sb = getAndRemoveEntry(kb, lock);
722 AutoEnterOOMUnsafeRegion oomUnsafe;
724 if ((sa && b->isTenured() && !gcMap.put(kb, sa)) ||
725 (sb && a->isTenured() && !gcMap.put(ka, sb))) {
726 oomUnsafe.crash("MemoryTracker::swapGCMemory");
730 size_t MemoryTracker::getAndRemoveEntry(const Key<Cell>& key,
731 LockGuard<Mutex>& lock) {
732 auto ptr = gcMap.lookup(key);
733 if (!ptr) {
734 return 0;
737 size_t size = ptr->value();
738 gcMap.remove(ptr);
739 return size;
742 void MemoryTracker::registerNonGCMemory(void* mem, MemoryUse use) {
743 LockGuard<Mutex> lock(mutex);
745 Key<void> key{mem, use};
746 auto ptr = nonGCMap.lookupForAdd(key);
747 if (ptr) {
748 MOZ_CRASH_UNSAFE_PRINTF("%s assocaition %p already registered",
749 MemoryUseName(use), mem);
752 AutoEnterOOMUnsafeRegion oomUnsafe;
753 if (!nonGCMap.add(ptr, key, 0)) {
754 oomUnsafe.crash("MemoryTracker::registerNonGCMemory");
758 void MemoryTracker::unregisterNonGCMemory(void* mem, MemoryUse use) {
759 LockGuard<Mutex> lock(mutex);
761 Key<void> key{mem, use};
762 auto ptr = nonGCMap.lookup(key);
763 if (!ptr) {
764 MOZ_CRASH_UNSAFE_PRINTF("%s association %p not found", MemoryUseName(use),
765 mem);
768 if (ptr->value() != 0) {
769 MOZ_CRASH_UNSAFE_PRINTF(
770 "%s association %p still has 0x%zx bytes associated",
771 MemoryUseName(use), mem, ptr->value());
774 nonGCMap.remove(ptr);
777 void MemoryTracker::moveNonGCMemory(void* dst, void* src, MemoryUse use) {
778 LockGuard<Mutex> lock(mutex);
780 Key<void> srcKey{src, use};
781 auto srcPtr = nonGCMap.lookup(srcKey);
782 if (!srcPtr) {
783 MOZ_CRASH_UNSAFE_PRINTF("%s association %p not found", MemoryUseName(use),
784 src);
787 size_t nbytes = srcPtr->value();
788 nonGCMap.remove(srcPtr);
790 Key<void> dstKey{dst, use};
791 auto dstPtr = nonGCMap.lookupForAdd(dstKey);
792 if (dstPtr) {
793 MOZ_CRASH_UNSAFE_PRINTF("%s %p already registered", MemoryUseName(use),
794 dst);
797 AutoEnterOOMUnsafeRegion oomUnsafe;
798 if (!nonGCMap.add(dstPtr, dstKey, nbytes)) {
799 oomUnsafe.crash("MemoryTracker::moveNonGCMemory");
803 void MemoryTracker::incNonGCMemory(void* mem, size_t nbytes, MemoryUse use) {
804 MOZ_ASSERT(isNonGCMemoryUse(use));
806 LockGuard<Mutex> lock(mutex);
808 Key<void> key{mem, use};
809 auto ptr = nonGCMap.lookup(key);
810 if (!ptr) {
811 MOZ_CRASH_UNSAFE_PRINTF("%s allocation %p not found", MemoryUseName(use),
812 mem);
815 ptr->value() += nbytes;
818 void MemoryTracker::decNonGCMemory(void* mem, size_t nbytes, MemoryUse use) {
819 MOZ_ASSERT(isNonGCMemoryUse(use));
821 LockGuard<Mutex> lock(mutex);
823 Key<void> key{mem, use};
824 auto ptr = nonGCMap.lookup(key);
825 if (!ptr) {
826 MOZ_CRASH_UNSAFE_PRINTF("%s allocation %p not found", MemoryUseName(use),
827 mem);
830 size_t& value = ptr->value();
831 if (nbytes > value) {
832 MOZ_CRASH_UNSAFE_PRINTF(
833 "%s allocation %p is too large: "
834 "expected at most 0x%zx but got 0x%zx bytes",
835 MemoryUseName(use), mem, value, nbytes);
838 value -= nbytes;
841 void MemoryTracker::fixupAfterMovingGC() {
842 // Update the table after we move GC things. We don't use StableCellHasher
843 // because that would create a difference between debug and release builds.
844 for (GCMap::Enum e(gcMap); !e.empty(); e.popFront()) {
845 const auto& key = e.front().key();
846 Cell* cell = key.ptr();
847 if (cell->isForwarded()) {
848 cell = gc::RelocationOverlay::fromCell(cell)->forwardingAddress();
849 e.rekeyFront(Key<Cell>{cell, key.use()});
854 template <typename Ptr>
855 inline MemoryTracker::Key<Ptr>::Key(Ptr* ptr, MemoryUse use)
856 : ptr_(uint64_t(ptr)), use_(uint64_t(use)) {
857 # ifdef JS_64BIT
858 static_assert(sizeof(Key) == 8,
859 "MemoryTracker::Key should be packed into 8 bytes");
860 # endif
861 MOZ_ASSERT(this->ptr() == ptr);
862 MOZ_ASSERT(this->use() == use);
865 template <typename Ptr>
866 inline Ptr* MemoryTracker::Key<Ptr>::ptr() const {
867 return reinterpret_cast<Ptr*>(ptr_);
869 template <typename Ptr>
870 inline MemoryUse MemoryTracker::Key<Ptr>::use() const {
871 return static_cast<MemoryUse>(use_);
874 template <typename Ptr>
875 inline HashNumber MemoryTracker::Hasher<Ptr>::hash(const Lookup& l) {
876 return mozilla::HashGeneric(DefaultHasher<Ptr*>::hash(l.ptr()),
877 DefaultHasher<unsigned>::hash(unsigned(l.use())));
880 template <typename Ptr>
881 inline bool MemoryTracker::Hasher<Ptr>::match(const KeyT& k, const Lookup& l) {
882 return k.ptr() == l.ptr() && k.use() == l.use();
885 template <typename Ptr>
886 inline void MemoryTracker::Hasher<Ptr>::rekey(KeyT& k, const KeyT& newKey) {
887 k = newKey;
890 #endif // DEBUG