1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
10 #include "mozilla/Atomics.h"
11 #include "mozilla/DoublyLinkedList.h"
12 #include "mozilla/EnumSet.h"
13 #include "mozilla/Maybe.h"
14 #include "mozilla/TimeStamp.h"
16 #include "gc/ArenaList.h"
17 #include "gc/AtomMarking.h"
18 #include "gc/GCContext.h"
19 #include "gc/GCMarker.h"
20 #include "gc/GCParallelTask.h"
21 #include "gc/IteratorUtils.h"
22 #include "gc/Nursery.h"
23 #include "gc/Scheduling.h"
24 #include "gc/Statistics.h"
25 #include "gc/StoreBuffer.h"
26 #include "js/friend/PerformanceHint.h"
27 #include "js/GCAnnotations.h"
28 #include "js/UniquePtr.h"
29 #include "vm/AtomsTable.h"
34 class AutoLockGCBgAlloc
;
35 class AutoLockHelperThreadState
;
36 class FinalizationRegistryObject
;
37 class FinalizationRecordObject
;
38 class FinalizationQueueObject
;
40 class VerifyPreTracer
;
45 using BlackGrayEdgeVector
= Vector
<TenuredCell
*, 0, SystemAllocPolicy
>;
46 using ZoneVector
= Vector
<JS::Zone
*, 4, SystemAllocPolicy
>;
48 class AutoCallGCCallbacks
;
50 class AutoHeapSession
;
51 class AutoTraceSession
;
53 class MarkingValidator
;
55 class ParallelMarkTask
;
56 enum class ShouldCheckThresholds
;
57 class SweepGroupsIter
;
59 // Interface to a sweep action.
61 // The arguments passed to each action.
68 virtual ~SweepAction() = default;
69 virtual IncrementalProgress
run(Args
& state
) = 0;
70 virtual void assertFinished() const = 0;
71 virtual bool shouldSkip() { return false; }
79 ChunkPool() : head_(nullptr), count_(0) {}
80 ChunkPool(const ChunkPool
& other
) = delete;
81 ChunkPool(ChunkPool
&& other
) { *this = std::move(other
); }
85 MOZ_ASSERT(count_
== 0);
88 ChunkPool
& operator=(const ChunkPool
& other
) = delete;
89 ChunkPool
& operator=(ChunkPool
&& other
) {
91 other
.head_
= nullptr;
92 count_
= other
.count_
;
97 bool empty() const { return !head_
; }
98 size_t count() const { return count_
; }
100 TenuredChunk
* head() {
105 void push(TenuredChunk
* chunk
);
106 TenuredChunk
* remove(TenuredChunk
* chunk
);
111 TenuredChunk
* mergeSort(TenuredChunk
* list
, size_t count
);
112 bool isSorted() const;
116 bool contains(TenuredChunk
* chunk
) const;
118 void verifyChunks() const;
122 // Pool mutation does not invalidate an Iter unless the mutation
123 // is of the TenuredChunk currently being visited by the Iter.
126 explicit Iter(ChunkPool
& pool
) : current_(pool
.head_
) {}
127 bool done() const { return !current_
; }
129 TenuredChunk
* get() const { return current_
; }
130 operator TenuredChunk
*() const { return get(); }
131 TenuredChunk
* operator->() const { return get(); }
134 TenuredChunk
* current_
;
138 class BackgroundMarkTask
: public GCParallelTask
{
140 explicit BackgroundMarkTask(GCRuntime
* gc
);
141 void setBudget(const SliceBudget
& budget
) { this->budget
= budget
; }
142 void run(AutoLockHelperThreadState
& lock
) override
;
148 class BackgroundUnmarkTask
: public GCParallelTask
{
150 explicit BackgroundUnmarkTask(GCRuntime
* gc
);
152 void run(AutoLockHelperThreadState
& lock
) override
;
157 class BackgroundSweepTask
: public GCParallelTask
{
159 explicit BackgroundSweepTask(GCRuntime
* gc
);
160 void run(AutoLockHelperThreadState
& lock
) override
;
163 class BackgroundFreeTask
: public GCParallelTask
{
165 explicit BackgroundFreeTask(GCRuntime
* gc
);
166 void run(AutoLockHelperThreadState
& lock
) override
;
169 // Performs extra allocation off thread so that when memory is required on the
170 // main thread it will already be available and waiting.
171 class BackgroundAllocTask
: public GCParallelTask
{
172 // Guarded by the GC lock.
173 GCLockData
<ChunkPool
&> chunkPool_
;
178 BackgroundAllocTask(GCRuntime
* gc
, ChunkPool
& pool
);
179 bool enabled() const { return enabled_
; }
181 void run(AutoLockHelperThreadState
& lock
) override
;
184 // Search the provided chunks for free arenas and decommit them.
185 class BackgroundDecommitTask
: public GCParallelTask
{
187 explicit BackgroundDecommitTask(GCRuntime
* gc
);
188 void run(AutoLockHelperThreadState
& lock
) override
;
191 template <typename F
>
196 Callback() : op(nullptr), data(nullptr) {}
197 Callback(F op
, void* data
) : op(op
), data(data
) {}
200 template <typename F
>
201 using CallbackVector
= Vector
<Callback
<F
>, 4, SystemAllocPolicy
>;
203 using RootedValueMap
=
204 HashMap
<Value
*, const char*, DefaultHasher
<Value
*>, SystemAllocPolicy
>;
206 using AllocKinds
= mozilla::EnumSet
<AllocKind
, uint64_t>;
208 // A singly linked list of zones.
210 static Zone
* const End
;
219 bool isEmpty() const;
222 void prepend(Zone
* zone
);
223 void append(Zone
* zone
);
224 void prependList(ZoneList
&& other
);
225 void appendList(ZoneList
&& other
);
230 explicit ZoneList(Zone
* singleZone
);
233 ZoneList(const ZoneList
& other
) = delete;
234 ZoneList
& operator=(const ZoneList
& other
) = delete;
237 struct WeakCacheToSweep
{
238 JS::detail::WeakCacheBase
* cache
;
242 class WeakCacheSweepIterator
{
243 using WeakCacheBase
= JS::detail::WeakCacheBase
;
246 WeakCacheBase
* sweepCache
;
249 explicit WeakCacheSweepIterator(JS::Zone
* sweepGroup
);
252 WeakCacheToSweep
get() const;
259 struct SweepingTracer final
: public GenericTracerImpl
<SweepingTracer
> {
260 explicit SweepingTracer(JSRuntime
* rt
);
263 template <typename T
>
264 void onEdge(T
** thingp
, const char* name
);
265 friend class GenericTracerImpl
<SweepingTracer
>;
270 explicit GCRuntime(JSRuntime
* rt
);
271 [[nodiscard
]] bool init(uint32_t maxbytes
);
272 bool wasInitialized() const { return initialized
; }
277 Zone
* zone
= zones()[0];
278 MOZ_ASSERT(JS::shadow::Zone::from(zone
)->isAtomsZone());
281 Zone
* maybeSharedAtomsZone() { return sharedAtomsZone_
; }
283 [[nodiscard
]] bool freezeSharedAtomsZone();
284 void restoreSharedAtomsZone();
286 JS::HeapState
heapState() const { return heapState_
; }
288 inline bool hasZealMode(ZealMode mode
);
289 inline void clearZealMode(ZealMode mode
);
290 inline bool needZealousGC();
291 inline bool hasIncrementalTwoSliceZealMode();
293 [[nodiscard
]] bool addRoot(Value
* vp
, const char* name
);
294 void removeRoot(Value
* vp
);
296 [[nodiscard
]] bool setParameter(JSContext
* cx
, JSGCParamKey key
,
298 void resetParameter(JSContext
* cx
, JSGCParamKey key
);
299 uint32_t getParameter(JSGCParamKey key
);
301 void setPerformanceHint(PerformanceHint hint
);
302 bool isInPageLoad() const { return inPageLoadCount
!= 0; }
304 [[nodiscard
]] bool triggerGC(JS::GCReason reason
);
305 // Check whether to trigger a zone GC after allocating GC cells.
306 void maybeTriggerGCAfterAlloc(Zone
* zone
);
307 // Check whether to trigger a zone GC after malloc memory.
308 void maybeTriggerGCAfterMalloc(Zone
* zone
);
309 bool maybeTriggerGCAfterMalloc(Zone
* zone
, const HeapSize
& heap
,
310 const HeapThreshold
& threshold
,
311 JS::GCReason reason
);
312 // The return value indicates if we were able to do the GC.
313 bool triggerZoneGC(Zone
* zone
, JS::GCReason reason
, size_t usedBytes
,
314 size_t thresholdBytes
);
318 // Return whether we want to run a major GC. If eagerOk is true, include eager
319 // triggers (eg EAGER_ALLOC_TRIGGER) in this determination, and schedule all
320 // zones that exceed the eager thresholds.
321 JS::GCReason
wantMajorGC(bool eagerOk
);
322 bool checkEagerAllocTrigger(const HeapSize
& size
,
323 const HeapThreshold
& threshold
);
325 // Do a minor GC if requested, followed by a major GC if requested. The return
326 // value indicates whether a major GC was performed.
327 bool gcIfRequested() { return gcIfRequestedImpl(false); }
329 // Internal function to do a GC if previously requested. But if not and
330 // eagerOk, do an eager GC for all Zones that have exceeded the eager
333 // Return whether a major GC was performed or started.
334 bool gcIfRequestedImpl(bool eagerOk
);
336 void gc(JS::GCOptions options
, JS::GCReason reason
);
337 void startGC(JS::GCOptions options
, JS::GCReason reason
,
338 const SliceBudget
& budget
);
339 void gcSlice(JS::GCReason reason
, const SliceBudget
& budget
);
340 void finishGC(JS::GCReason reason
);
342 void startDebugGC(JS::GCOptions options
, const SliceBudget
& budget
);
343 void debugGCSlice(const SliceBudget
& budget
);
346 void notifyRootsRemoved();
348 enum TraceOrMarkRuntime
{ TraceRuntime
, MarkRuntime
};
349 void traceRuntime(JSTracer
* trc
, AutoTraceSession
& session
);
350 void traceRuntimeForMinorGC(JSTracer
* trc
, AutoGCSession
& session
);
352 void purgeRuntimeForMinorGC();
354 void shrinkBuffers();
355 void onOutOfMallocMemory();
356 void onOutOfMallocMemory(const AutoLockGC
& lock
);
358 Nursery
& nursery() { return nursery_
.ref(); }
359 gc::StoreBuffer
& storeBuffer() { return storeBuffer_
.ref(); }
361 void minorGC(JS::GCReason reason
,
362 gcstats::PhaseKind phase
= gcstats::PhaseKind::MINOR_GC
)
364 void evictNursery(JS::GCReason reason
= JS::GCReason::EVICT_NURSERY
) {
365 minorGC(reason
, gcstats::PhaseKind::EVICT_NURSERY
);
368 void* addressOfNurseryPosition() {
369 return nursery_
.refNoCheck().addressOfPosition();
372 const void* addressOfLastBufferedWholeCell() {
373 return storeBuffer_
.refNoCheck().addressOfLastBufferedWholeCell();
377 const uint32_t* addressOfZealModeBits() { return &zealModeBits
.refNoCheck(); }
378 void getZealBits(uint32_t* zealBits
, uint32_t* frequency
,
379 uint32_t* nextScheduled
);
380 void setZeal(uint8_t zeal
, uint32_t frequency
);
381 void unsetZeal(uint8_t zeal
);
382 bool parseAndSetZeal(const char* str
);
383 void setNextScheduled(uint32_t count
);
384 void verifyPreBarriers();
385 void maybeVerifyPreBarriers(bool always
);
386 bool selectForMarking(JSObject
* object
);
387 void clearSelectedForMarking();
388 void setDeterministic(bool enable
);
389 void setMarkStackLimit(size_t limit
, AutoLockGC
& lock
);
392 uint64_t nextCellUniqueId() {
393 MOZ_ASSERT(nextCellUniqueId_
> 0);
394 uint64_t uid
= ++nextCellUniqueId_
;
398 void setLowMemoryState(bool newState
) { lowMemoryState
= newState
; }
399 bool systemHasLowMemory() const { return lowMemoryState
; }
402 // Internal public interface
403 ZoneVector
& zones() { return zones_
.ref(); }
404 gcstats::Statistics
& stats() { return stats_
.ref(); }
405 const gcstats::Statistics
& stats() const { return stats_
.ref(); }
406 State
state() const { return incrementalState
; }
407 bool isHeapCompacting() const { return state() == State::Compact
; }
408 bool isForegroundSweeping() const { return state() == State::Sweep
; }
409 bool isBackgroundSweeping() const { return sweepTask
.wasStarted(); }
410 bool isBackgroundMarking() const { return markTask
.wasStarted(); }
411 void waitBackgroundSweepEnd();
412 void waitBackgroundAllocEnd() { allocTask
.cancelAndWait(); }
413 void waitBackgroundFreeEnd();
414 void waitForBackgroundTasks();
415 bool isWaitingOnBackgroundTask() const;
417 void lockGC() { lock
.lock(); }
418 bool tryLockGC() { return lock
.tryLock(); }
419 void unlockGC() { lock
.unlock(); }
422 void assertCurrentThreadHasLockedGC() const {
423 lock
.assertOwnedByCurrentThread();
427 void setAlwaysPreserveCode() { alwaysPreserveCode
= true; }
429 bool isIncrementalGCAllowed() const { return incrementalAllowed
; }
430 void disallowIncrementalGC() { incrementalAllowed
= false; }
432 void setIncrementalGCEnabled(bool enabled
);
434 bool isIncrementalGCEnabled() const { return incrementalGCEnabled
; }
435 bool isPerZoneGCEnabled() const { return perZoneGCEnabled
; }
436 bool isCompactingGCEnabled() const;
437 bool isParallelMarkingEnabled() const { return parallelMarkingEnabled
; }
439 bool isIncrementalGCInProgress() const {
440 return state() != State::NotActive
&& !isVerifyPreBarriersEnabled();
443 bool hasForegroundWork() const;
445 bool isShrinkingGC() const { return gcOptions() == JS::GCOptions::Shrink
; }
447 bool isShutdownGC() const { return gcOptions() == JS::GCOptions::Shutdown
; }
450 bool isShuttingDown() const { return hadShutdownGC
; }
453 bool initSweepActions();
455 void setGrayRootsTracer(JSGrayRootsTracer traceOp
, void* data
);
456 [[nodiscard
]] bool addBlackRootsTracer(JSTraceDataOp traceOp
, void* data
);
457 void removeBlackRootsTracer(JSTraceDataOp traceOp
, void* data
);
458 void clearBlackAndGrayRootTracers();
460 void setGCCallback(JSGCCallback callback
, void* data
);
461 void callGCCallback(JSGCStatus status
, JS::GCReason reason
) const;
462 void setObjectsTenuredCallback(JSObjectsTenuredCallback callback
, void* data
);
463 void callObjectsTenuredCallback();
464 [[nodiscard
]] bool addFinalizeCallback(JSFinalizeCallback callback
,
466 void removeFinalizeCallback(JSFinalizeCallback callback
);
467 void setHostCleanupFinalizationRegistryCallback(
468 JSHostCleanupFinalizationRegistryCallback callback
, void* data
);
469 void callHostCleanupFinalizationRegistryCallback(
470 JSFunction
* doCleanup
, GlobalObject
* incumbentGlobal
);
471 [[nodiscard
]] bool addWeakPointerZonesCallback(
472 JSWeakPointerZonesCallback callback
, void* data
);
473 void removeWeakPointerZonesCallback(JSWeakPointerZonesCallback callback
);
474 [[nodiscard
]] bool addWeakPointerCompartmentCallback(
475 JSWeakPointerCompartmentCallback callback
, void* data
);
476 void removeWeakPointerCompartmentCallback(
477 JSWeakPointerCompartmentCallback callback
);
478 JS::GCSliceCallback
setSliceCallback(JS::GCSliceCallback callback
);
479 bool addNurseryCollectionCallback(JS::GCNurseryCollectionCallback callback
,
481 void removeNurseryCollectionCallback(JS::GCNurseryCollectionCallback callback
,
483 JS::DoCycleCollectionCallback
setDoCycleCollectionCallback(
484 JS::DoCycleCollectionCallback callback
);
485 void callNurseryCollectionCallbacks(JS::GCNurseryProgress progress
,
486 JS::GCReason reason
);
488 bool addFinalizationRegistry(JSContext
* cx
,
489 Handle
<FinalizationRegistryObject
*> registry
);
490 bool registerWithFinalizationRegistry(JSContext
* cx
, HandleObject target
,
491 HandleObject record
);
492 void queueFinalizationRegistryForCleanup(FinalizationQueueObject
* queue
);
494 void nukeFinalizationRecordWrapper(JSObject
* wrapper
,
495 FinalizationRecordObject
* record
);
496 void nukeWeakRefWrapper(JSObject
* wrapper
, WeakRefObject
* weakRef
);
498 void setFullCompartmentChecks(bool enable
);
500 // Get the main marking tracer.
501 GCMarker
& marker() { return *markers
[0]; }
503 JS::Zone
* getCurrentSweepGroup() { return currentSweepGroup
; }
504 unsigned getCurrentSweepGroupIndex() {
505 MOZ_ASSERT_IF(unsigned(state()) < unsigned(State::Sweep
),
506 sweepGroupIndex
== 0);
507 return sweepGroupIndex
;
510 uint64_t gcNumber() const { return number
; }
511 void incGcNumber() { ++number
; }
513 uint64_t minorGCCount() const { return minorGCNumber
; }
514 void incMinorGcNumber() { ++minorGCNumber
; }
516 uint64_t majorGCCount() const { return majorGCNumber
; }
517 void incMajorGcNumber() { ++majorGCNumber
; }
519 uint64_t gcSliceCount() const { return sliceNumber
; }
520 void incGcSliceNumber() { ++sliceNumber
; }
522 int64_t defaultSliceBudgetMS() const { return defaultTimeBudgetMS_
; }
524 bool isIncrementalGc() const { return isIncremental
; }
525 bool isFullGc() const { return isFull
; }
526 bool isCompactingGc() const { return isCompacting
; }
527 bool didCompactZones() const { return isCompacting
&& zonesCompacted
; }
529 bool areGrayBitsValid() const { return grayBitsValid
; }
530 void setGrayBitsInvalid() { grayBitsValid
= false; }
532 mozilla::TimeStamp
lastGCStartTime() const { return lastGCStartTime_
; }
533 mozilla::TimeStamp
lastGCEndTime() const { return lastGCEndTime_
; }
535 bool majorGCRequested() const {
536 return majorGCTriggerReason
!= JS::GCReason::NO_REASON
;
539 double computeHeapGrowthFactor(size_t lastBytes
);
540 size_t computeTriggerBytes(double growthFactor
, size_t lastBytes
);
542 inline void updateOnFreeArenaAlloc(const TenuredChunkInfo
& info
);
543 void updateOnArenaFree() { ++numArenasFreeCommitted
; }
545 ChunkPool
& fullChunks(const AutoLockGC
& lock
) { return fullChunks_
.ref(); }
546 ChunkPool
& availableChunks(const AutoLockGC
& lock
) {
547 return availableChunks_
.ref();
549 ChunkPool
& emptyChunks(const AutoLockGC
& lock
) { return emptyChunks_
.ref(); }
550 const ChunkPool
& fullChunks(const AutoLockGC
& lock
) const {
551 return fullChunks_
.ref();
553 const ChunkPool
& availableChunks(const AutoLockGC
& lock
) const {
554 return availableChunks_
.ref();
556 const ChunkPool
& emptyChunks(const AutoLockGC
& lock
) const {
557 return emptyChunks_
.ref();
559 using NonEmptyChunksIter
= ChainedIterator
<ChunkPool::Iter
, 2>;
560 NonEmptyChunksIter
allNonEmptyChunks(const AutoLockGC
& lock
) {
561 return NonEmptyChunksIter(availableChunks(lock
), fullChunks(lock
));
563 uint32_t minEmptyChunkCount(const AutoLockGC
& lock
) const {
564 return minEmptyChunkCount_
;
566 uint32_t maxEmptyChunkCount(const AutoLockGC
& lock
) const {
567 return maxEmptyChunkCount_
;
570 void verifyAllChunks();
573 TenuredChunk
* getOrAllocChunk(AutoLockGCBgAlloc
& lock
);
574 void recycleChunk(TenuredChunk
* chunk
, const AutoLockGC
& lock
);
577 void startVerifyPreBarriers();
578 void endVerifyPreBarriers();
579 void finishVerifier();
580 bool isVerifyPreBarriersEnabled() const { return verifyPreData
.refNoCheck(); }
581 bool shouldYieldForZeal(ZealMode mode
);
583 bool isVerifyPreBarriersEnabled() const { return false; }
586 #ifdef JSGC_HASH_TABLE_CHECKS
587 void checkHashTablesAfterMovingGC();
591 // Crawl the heap to check whether an arbitary pointer is within a cell of
593 bool isPointerWithinTenuredCell(void* ptr
, JS::TraceKind traceKind
);
595 bool hasZone(Zone
* target
);
598 // Queue memory memory to be freed on a background thread if possible.
599 void queueUnusedLifoBlocksForFree(LifoAlloc
* lifo
);
600 void queueAllLifoBlocksForFreeAfterMinorGC(LifoAlloc
* lifo
);
601 void queueBuffersForFreeAfterMinorGC(Nursery::BufferSet
& buffers
);
603 // Public here for ReleaseArenaLists and FinalizeTypedArenas.
604 void releaseArena(Arena
* arena
, const AutoLockGC
& lock
);
606 // Allocator internals.
607 static void* refillFreeListInGC(Zone
* zone
, AllocKind thingKind
);
610 void delayMarkingChildren(gc::Cell
* cell
, MarkColor color
);
611 bool hasDelayedMarking() const;
612 void markAllDelayedChildren(ShouldReportMarkTime reportTime
);
615 * Concurrent sweep infrastructure.
617 void startTask(GCParallelTask
& task
, AutoLockHelperThreadState
& lock
);
618 void joinTask(GCParallelTask
& task
, AutoLockHelperThreadState
& lock
);
619 void updateHelperThreadCount();
620 bool updateMarkersVector();
621 size_t parallelWorkerCount() const;
622 size_t markingWorkerCount() const;
625 bool registerWeakRef(HandleObject target
, HandleObject weakRef
);
626 void traceKeptObjects(JSTracer
* trc
);
628 JS::GCReason
lastStartReason() const { return initialReason
; }
630 void updateAllocationRates();
632 // Allocator internals
633 static void* refillFreeList(JSContext
* cx
, AllocKind thingKind
);
634 void attemptLastDitchGC(JSContext
* cx
);
638 const GCVector
<HeapPtr
<JS::Value
>, 0, SystemAllocPolicy
>& getTestMarkQueue()
640 [[nodiscard
]] bool appendTestMarkQueue(const JS::Value
& value
);
641 void clearTestMarkQueue();
642 size_t testMarkQueuePos() const;
646 enum IncrementalResult
{ ResetIncremental
= 0, Ok
};
648 [[nodiscard
]] bool setParameter(JSGCParamKey key
, uint32_t value
,
650 void resetParameter(JSGCParamKey key
, AutoLockGC
& lock
);
651 uint32_t getParameter(JSGCParamKey key
, const AutoLockGC
& lock
);
652 bool setThreadParameter(JSGCParamKey key
, uint32_t value
, AutoLockGC
& lock
);
653 void resetThreadParameter(JSGCParamKey key
, AutoLockGC
& lock
);
654 void updateThreadDataStructures(AutoLockGC
& lock
);
656 JS::GCOptions
gcOptions() const { return maybeGcOptions
.ref().ref(); }
658 TriggerResult
checkHeapThreshold(Zone
* zone
, const HeapSize
& heapSize
,
659 const HeapThreshold
& heapThreshold
);
661 void updateSchedulingStateOnGCStart();
662 void updateSchedulingStateAfterCollection(mozilla::TimeStamp currentTime
);
663 void updateAllGCStartThresholds();
665 // For ArenaLists::allocateFromArena()
666 friend class ArenaLists
;
667 TenuredChunk
* pickChunk(AutoLockGCBgAlloc
& lock
);
668 Arena
* allocateArena(TenuredChunk
* chunk
, Zone
* zone
, AllocKind kind
,
669 ShouldCheckThresholds checkThresholds
,
670 const AutoLockGC
& lock
);
673 * Return the list of chunks that can be released outside the GC lock.
674 * Must be called either during the GC or with the GC lock taken.
676 friend class BackgroundDecommitTask
;
677 bool tooManyEmptyChunks(const AutoLockGC
& lock
);
678 ChunkPool
expireEmptyChunkPool(const AutoLockGC
& lock
);
679 void freeEmptyChunks(const AutoLockGC
& lock
);
680 void prepareToFreeChunk(TenuredChunkInfo
& info
);
681 void setMinEmptyChunkCount(uint32_t value
, const AutoLockGC
& lock
);
682 void setMaxEmptyChunkCount(uint32_t value
, const AutoLockGC
& lock
);
684 friend class BackgroundAllocTask
;
685 bool wantBackgroundAllocation(const AutoLockGC
& lock
) const;
686 void startBackgroundAllocTaskIfIdle();
688 void requestMajorGC(JS::GCReason reason
);
689 SliceBudget
defaultBudget(JS::GCReason reason
, int64_t millis
);
690 bool maybeIncreaseSliceBudget(SliceBudget
& budget
);
691 bool maybeIncreaseSliceBudgetForLongCollections(SliceBudget
& budget
);
692 bool maybeIncreaseSliceBudgetForUrgentCollections(SliceBudget
& budget
);
693 IncrementalResult
budgetIncrementalGC(bool nonincrementalByAPI
,
695 SliceBudget
& budget
);
696 void checkZoneIsScheduled(Zone
* zone
, JS::GCReason reason
,
697 const char* trigger
);
698 IncrementalResult
resetIncrementalGC(GCAbortReason reason
);
700 // Assert if the system state is such that we should never
701 // receive a request to do GC work.
702 void checkCanCallAPI();
704 // Check if the system state is such that GC has been supressed
705 // or otherwise delayed.
706 [[nodiscard
]] bool checkIfGCAllowedInCurrentState(JS::GCReason reason
);
708 gcstats::ZoneGCStats
scanZonesBeforeGC();
710 void setGCOptions(JS::GCOptions options
);
712 void collect(bool nonincrementalByAPI
, const SliceBudget
& budget
,
713 JS::GCReason reason
) JS_HAZ_GC_CALL
;
716 * Run one GC "cycle" (either a slice of incremental GC or an entire
717 * non-incremental GC).
720 * * ResetIncremental if we "reset" an existing incremental GC, which would
721 * force us to run another cycle or
724 [[nodiscard
]] IncrementalResult
gcCycle(bool nonincrementalByAPI
,
725 const SliceBudget
& budgetArg
,
726 JS::GCReason reason
);
727 bool shouldRepeatForDeadZone(JS::GCReason reason
);
729 void incrementalSlice(SliceBudget
& budget
, JS::GCReason reason
,
730 bool budgetWasIncreased
);
732 bool mightSweepInThisSlice(bool nonIncremental
);
733 void collectNurseryFromMajorGC(JS::GCReason reason
);
734 void collectNursery(JS::GCOptions options
, JS::GCReason reason
,
735 gcstats::PhaseKind phase
);
737 friend class AutoCallGCCallbacks
;
738 void maybeCallGCCallback(JSGCStatus status
, JS::GCReason reason
);
740 void startCollection(JS::GCReason reason
);
743 [[nodiscard
]] bool beginPreparePhase(JS::GCReason reason
,
744 AutoGCSession
& session
);
745 bool prepareZonesForCollection(JS::GCReason reason
, bool* isFullOut
);
746 void unmarkWeakMaps();
747 void endPreparePhase(JS::GCReason reason
);
748 void beginMarkPhase(AutoGCSession
& session
);
749 bool shouldPreserveJITCode(JS::Realm
* realm
,
750 const mozilla::TimeStamp
& currentTime
,
751 JS::GCReason reason
, bool canAllocateMoreCode
,
752 bool isActiveCompartment
);
753 void discardJITCodeForGC();
754 void startBackgroundFreeAfterMinorGC();
755 void relazifyFunctionsForShrinkingGC();
756 void purgePropMapTablesForShrinkingGC();
757 void purgeSourceURLsForShrinkingGC();
758 void traceRuntimeForMajorGC(JSTracer
* trc
, AutoGCSession
& session
);
759 void traceRuntimeAtoms(JSTracer
* trc
);
760 void traceRuntimeCommon(JSTracer
* trc
, TraceOrMarkRuntime traceOrMark
);
761 void traceEmbeddingBlackRoots(JSTracer
* trc
);
762 void traceEmbeddingGrayRoots(JSTracer
* trc
);
763 IncrementalProgress
traceEmbeddingGrayRoots(JSTracer
* trc
,
764 SliceBudget
& budget
);
765 void checkNoRuntimeRoots(AutoGCSession
& session
);
766 void maybeDoCycleCollection();
767 void findDeadCompartments();
769 friend class BackgroundMarkTask
;
770 enum ParallelMarking
: bool {
771 SingleThreadedMarking
= false,
772 AllowParallelMarking
= true
774 IncrementalProgress
markUntilBudgetExhausted(
775 SliceBudget
& sliceBudget
,
776 ParallelMarking allowParallelMarking
= SingleThreadedMarking
,
777 ShouldReportMarkTime reportTime
= ReportMarkTime
);
778 bool canMarkInParallel() const;
779 bool initParallelMarkers();
780 void finishParallelMarkers();
782 bool hasMarkingWork(MarkColor color
) const;
784 void drainMarkStack();
787 void assertNoMarkingWork() const;
789 void assertNoMarkingWork() const {}
792 void markDelayedChildren(gc::Arena
* arena
, MarkColor color
);
793 void processDelayedMarkingList(gc::MarkColor color
);
794 void rebuildDelayedMarkingList();
795 void appendToDelayedMarkingList(gc::Arena
** listTail
, gc::Arena
* arena
);
796 void resetDelayedMarking();
797 template <typename F
>
798 void forEachDelayedMarkingArena(F
&& f
);
800 template <class ZoneIterT
>
801 IncrementalProgress
markWeakReferences(SliceBudget
& budget
);
802 IncrementalProgress
markWeakReferencesInCurrentGroup(SliceBudget
& budget
);
803 template <class ZoneIterT
>
804 IncrementalProgress
markGrayRoots(SliceBudget
& budget
,
805 gcstats::PhaseKind phase
);
806 void markBufferedGrayRoots(JS::Zone
* zone
);
807 IncrementalProgress
markAllWeakReferences();
808 void markAllGrayReferences(gcstats::PhaseKind phase
);
810 // The mark queue is a testing-only feature for controlling mark ordering and
812 enum MarkQueueProgress
{
813 QueueYielded
, // End this incremental GC slice, if possible
814 QueueComplete
, // Done with the queue
815 QueueSuspended
// Continue the GC without ending the slice
817 MarkQueueProgress
processTestMarkQueue();
819 // GC Sweeping. Implemented in Sweeping.cpp.
820 void beginSweepPhase(JS::GCReason reason
, AutoGCSession
& session
);
821 void dropStringWrappers();
822 void groupZonesForSweeping(JS::GCReason reason
);
823 [[nodiscard
]] bool findSweepGroupEdges();
824 [[nodiscard
]] bool addEdgesForMarkQueue();
825 void getNextSweepGroup();
826 void resetGrayList(Compartment
* comp
);
827 IncrementalProgress
beginMarkingSweepGroup(JS::GCContext
* gcx
,
828 SliceBudget
& budget
);
829 IncrementalProgress
markGrayRootsInCurrentGroup(JS::GCContext
* gcx
,
830 SliceBudget
& budget
);
831 IncrementalProgress
markGray(JS::GCContext
* gcx
, SliceBudget
& budget
);
832 IncrementalProgress
endMarkingSweepGroup(JS::GCContext
* gcx
,
833 SliceBudget
& budget
);
834 void markIncomingGrayCrossCompartmentPointers();
835 IncrementalProgress
beginSweepingSweepGroup(JS::GCContext
* gcx
,
836 SliceBudget
& budget
);
837 void initBackgroundSweep(Zone
* zone
, JS::GCContext
* gcx
,
838 const FinalizePhase
& phase
);
839 IncrementalProgress
markDuringSweeping(JS::GCContext
* gcx
,
840 SliceBudget
& budget
);
841 void updateAtomsBitmap();
842 void sweepCCWrappers();
843 void sweepRealmGlobals();
844 void sweepEmbeddingWeakPointers(JS::GCContext
* gcx
);
846 void sweepCompressionTasks();
847 void sweepWeakMaps();
848 void sweepUniqueIds();
849 void sweepObjectsWithWeakPointers();
850 void sweepDebuggerOnMainThread(JS::GCContext
* gcx
);
851 void sweepJitDataOnMainThread(JS::GCContext
* gcx
);
852 void sweepFinalizationObserversOnMainThread();
853 void traceWeakFinalizationObserverEdges(JSTracer
* trc
, Zone
* zone
);
854 void sweepWeakRefs();
855 IncrementalProgress
endSweepingSweepGroup(JS::GCContext
* gcx
,
856 SliceBudget
& budget
);
857 IncrementalProgress
performSweepActions(SliceBudget
& sliceBudget
);
858 void startSweepingAtomsTable();
859 IncrementalProgress
sweepAtomsTable(JS::GCContext
* gcx
, SliceBudget
& budget
);
860 IncrementalProgress
sweepWeakCaches(JS::GCContext
* gcx
, SliceBudget
& budget
);
861 IncrementalProgress
finalizeAllocKind(JS::GCContext
* gcx
,
862 SliceBudget
& budget
);
863 bool foregroundFinalize(JS::GCContext
* gcx
, Zone
* zone
, AllocKind thingKind
,
864 js::SliceBudget
& sliceBudget
,
865 SortedArenaList
& sweepList
);
866 IncrementalProgress
sweepPropMapTree(JS::GCContext
* gcx
, SliceBudget
& budget
);
867 void endSweepPhase(bool destroyingRuntime
);
868 void queueZonesAndStartBackgroundSweep(ZoneList
&& zones
);
869 void sweepFromBackgroundThread(AutoLockHelperThreadState
& lock
);
870 void startBackgroundFree();
871 void freeFromBackgroundThread(AutoLockHelperThreadState
& lock
);
872 void sweepBackgroundThings(ZoneList
& zones
);
873 void backgroundFinalize(JS::GCContext
* gcx
, Zone
* zone
, AllocKind kind
,
875 void assertBackgroundSweepingFinished();
877 bool allCCVisibleZonesWereCollected();
878 void sweepZones(JS::GCContext
* gcx
, bool destroyingRuntime
);
879 bool shouldDecommit() const;
880 void startDecommit();
881 void decommitEmptyChunks(const bool& cancel
, AutoLockGC
& lock
);
882 void decommitFreeArenas(const bool& cancel
, AutoLockGC
& lock
);
883 void decommitFreeArenasWithoutUnlocking(const AutoLockGC
& lock
);
885 // Compacting GC. Implemented in Compacting.cpp.
886 bool shouldCompact();
887 void beginCompactPhase();
888 IncrementalProgress
compactPhase(JS::GCReason reason
,
889 SliceBudget
& sliceBudget
,
890 AutoGCSession
& session
);
891 void endCompactPhase();
892 void sweepZoneAfterCompacting(MovingTracer
* trc
, Zone
* zone
);
893 bool canRelocateZone(Zone
* zone
) const;
894 [[nodiscard
]] bool relocateArenas(Zone
* zone
, JS::GCReason reason
,
895 Arena
*& relocatedListOut
,
896 SliceBudget
& sliceBudget
);
897 void updateCellPointers(Zone
* zone
, AllocKinds kinds
);
898 void updateAllCellPointers(MovingTracer
* trc
, Zone
* zone
);
899 void updateZonePointersToRelocatedCells(Zone
* zone
);
900 void updateRuntimePointersToRelocatedCells(AutoGCSession
& session
);
901 void clearRelocatedArenas(Arena
* arenaList
, JS::GCReason reason
);
902 void clearRelocatedArenasWithoutUnlocking(Arena
* arenaList
,
904 const AutoLockGC
& lock
);
905 void releaseRelocatedArenas(Arena
* arenaList
);
906 void releaseRelocatedArenasWithoutUnlocking(Arena
* arenaList
,
907 const AutoLockGC
& lock
);
909 void protectOrReleaseRelocatedArenas(Arena
* arenaList
, JS::GCReason reason
);
910 void protectAndHoldArenas(Arena
* arenaList
);
911 void unprotectHeldRelocatedArenas(const AutoLockGC
& lock
);
912 void releaseHeldRelocatedArenas();
913 void releaseHeldRelocatedArenasWithoutUnlocking(const AutoLockGC
& lock
);
917 * Whether to immediately trigger a slice after a background task
918 * finishes. This may not happen at a convenient time, so the consideration is
919 * whether the slice will run quickly or may take a long time.
921 enum ShouldTriggerSliceWhenFinished
: bool {
922 DontTriggerSliceWhenFinished
= false,
923 TriggerSliceWhenFinished
= true
926 IncrementalProgress
waitForBackgroundTask(
927 GCParallelTask
& task
, const SliceBudget
& budget
, bool shouldPauseMutator
,
928 ShouldTriggerSliceWhenFinished triggerSlice
);
930 void maybeRequestGCAfterBackgroundTask(const AutoLockHelperThreadState
& lock
);
931 void cancelRequestedGCAfterBackgroundTask();
932 void finishCollection(JS::GCReason reason
);
933 void maybeStopPretenuring();
934 void checkGCStateNotInUse();
935 IncrementalProgress
joinBackgroundMarkTask();
938 void computeNonIncrementalMarkingForValidation(AutoGCSession
& session
);
939 void validateIncrementalMarking();
940 void finishMarkingValidation();
944 void checkForCompartmentMismatches();
947 void callFinalizeCallbacks(JS::GCContext
* gcx
, JSFinalizeStatus status
) const;
948 void callWeakPointerZonesCallbacks(JSTracer
* trc
) const;
949 void callWeakPointerCompartmentCallbacks(JSTracer
* trc
,
950 JS::Compartment
* comp
) const;
951 void callDoCycleCollectionCallback(JSContext
* cx
);
956 // Embedders can use this zone however they wish.
957 MainThreadData
<JS::Zone
*> systemZone
;
959 MainThreadData
<JS::GCContext
> mainThreadContext
;
962 // For parent runtimes, a zone containing atoms that is shared by child
964 MainThreadData
<Zone
*> sharedAtomsZone_
;
966 // All zones in the runtime. The first element is always the atoms zone.
967 MainThreadOrGCTaskData
<ZoneVector
> zones_
;
969 // Any activity affecting the heap.
970 MainThreadOrGCTaskData
<JS::HeapState
> heapState_
;
971 friend class AutoHeapSession
;
972 friend class JS::AutoEnterCycleCollection
;
974 UnprotectedData
<gcstats::Statistics
> stats_
;
977 js::StringStats stringStats
;
979 Vector
<UniquePtr
<GCMarker
>, 1, SystemAllocPolicy
> markers
;
981 // Delayed marking support in case we OOM pushing work onto the mark stack.
982 MainThreadOrGCTaskData
<js::gc::Arena
*> delayedMarkingList
;
983 MainThreadOrGCTaskData
<bool> delayedMarkingWorkAdded
;
985 /* Count of arenas that are currently in the stack. */
986 MainThreadOrGCTaskData
<size_t> markLaterArenas
;
989 SweepingTracer sweepingTracer
;
991 /* Track total GC heap size for this runtime. */
994 /* GC scheduling state and parameters. */
995 GCSchedulingTunables tunables
;
996 GCSchedulingState schedulingState
;
997 MainThreadData
<bool> fullGCRequested
;
999 // Helper thread configuration.
1000 MainThreadData
<double> helperThreadRatio
;
1001 MainThreadData
<size_t> maxHelperThreads
;
1002 MainThreadOrGCTaskData
<size_t> helperThreadCount
;
1003 MainThreadData
<size_t> markingThreadCount
;
1005 // State used for managing atom mark bitmaps in each zone.
1006 AtomMarkingRuntime atomMarking
;
1009 * Pointer to a callback that, if set, will be used to create a
1010 * budget for internally-triggered GCs.
1012 MainThreadData
<JS::CreateSliceBudgetCallback
> createBudgetCallback
;
1015 // Arenas used for permanent things created at startup and shared by child
1017 MainThreadData
<ArenaList
> permanentAtoms
;
1018 MainThreadData
<ArenaList
> permanentFatInlineAtoms
;
1019 MainThreadData
<ArenaList
> permanentWellKnownSymbols
;
1021 // When chunks are empty, they reside in the emptyChunks pool and are
1022 // re-used as needed or eventually expired if not re-used. The emptyChunks
1023 // pool gets refilled from the background allocation task heuristically so
1024 // that empty chunks should always be available for immediate allocation
1025 // without syscalls.
1026 GCLockData
<ChunkPool
> emptyChunks_
;
1028 // Chunks which have had some, but not all, of their arenas allocated live
1029 // in the available chunk lists. When all available arenas in a chunk have
1030 // been allocated, the chunk is removed from the available list and moved
1031 // to the fullChunks pool. During a GC, if all arenas are free, the chunk
1032 // is moved back to the emptyChunks pool and scheduled for eventual
1034 GCLockData
<ChunkPool
> availableChunks_
;
1036 // When all arenas in a chunk are used, it is moved to the fullChunks pool
1037 // so as to reduce the cost of operations on the available lists.
1038 GCLockData
<ChunkPool
> fullChunks_
;
1041 * JSGC_MIN_EMPTY_CHUNK_COUNT
1042 * JSGC_MAX_EMPTY_CHUNK_COUNT
1044 * Controls the number of empty chunks reserved for future allocation.
1046 * They can be read off main thread by the background allocation task and the
1047 * background decommit task.
1049 GCLockData
<uint32_t> minEmptyChunkCount_
;
1050 GCLockData
<uint32_t> maxEmptyChunkCount_
;
1052 MainThreadData
<RootedValueMap
> rootsHash
;
1054 // An incrementing id used to assign unique ids to cells that require one.
1055 MainThreadData
<uint64_t> nextCellUniqueId_
;
1058 * Number of the committed arenas in all GC chunks including empty chunks.
1060 mozilla::Atomic
<uint32_t, mozilla::ReleaseAcquire
> numArenasFreeCommitted
;
1061 MainThreadData
<VerifyPreTracer
*> verifyPreData
;
1063 MainThreadData
<mozilla::TimeStamp
> lastGCStartTime_
;
1064 MainThreadData
<mozilla::TimeStamp
> lastGCEndTime_
;
1066 WriteOnceData
<bool> initialized
;
1067 MainThreadData
<bool> incrementalGCEnabled
;
1068 MainThreadData
<bool> perZoneGCEnabled
;
1070 mozilla::Atomic
<size_t, mozilla::ReleaseAcquire
> numActiveZoneIters
;
1072 /* During shutdown, the GC needs to clean up every possible object. */
1073 MainThreadData
<bool> cleanUpEverything
;
1076 * The gray bits can become invalid if UnmarkGray overflows the stack. A
1077 * full GC will reset this bit, since it fills in all the gray bits.
1079 UnprotectedData
<bool> grayBitsValid
;
1081 mozilla::Atomic
<JS::GCReason
, mozilla::ReleaseAcquire
> majorGCTriggerReason
;
1083 /* Incremented at the start of every minor GC. */
1084 MainThreadData
<uint64_t> minorGCNumber
;
1086 /* Incremented at the start of every major GC. */
1087 MainThreadData
<uint64_t> majorGCNumber
;
1089 /* Incremented on every GC slice or minor collection. */
1090 MainThreadData
<uint64_t> number
;
1092 /* Incremented on every GC slice. */
1093 MainThreadData
<uint64_t> sliceNumber
;
1095 /* Whether the currently running GC can finish in multiple slices. */
1096 MainThreadOrGCTaskData
<bool> isIncremental
;
1098 /* Whether all zones are being collected in first GC slice. */
1099 MainThreadData
<bool> isFull
;
1101 /* Whether the heap will be compacted at the end of GC. */
1102 MainThreadData
<bool> isCompacting
;
1104 /* Whether to use parallel marking. */
1105 MainThreadData
<ParallelMarking
> useParallelMarking
;
1107 /* The invocation kind of the current GC, set at the start of collection. */
1108 MainThreadOrGCTaskData
<mozilla::Maybe
<JS::GCOptions
>> maybeGcOptions
;
1110 /* The initial GC reason, taken from the first slice. */
1111 MainThreadData
<JS::GCReason
> initialReason
;
1114 * The current incremental GC phase. This is also used internally in
1115 * non-incremental GC.
1117 MainThreadOrGCTaskData
<State
> incrementalState
;
1119 /* The incremental state at the start of this slice. */
1120 MainThreadOrGCTaskData
<State
> initialState
;
1122 /* Whether to pay attention the zeal settings in this incremental slice. */
1124 MainThreadData
<bool> useZeal
;
1129 /* Indicates that the last incremental slice exhausted the mark stack. */
1130 MainThreadData
<bool> lastMarkSlice
;
1132 // Whether it's currently safe to yield to the mutator in an incremental GC.
1133 MainThreadData
<bool> safeToYield
;
1135 // Whether to do any marking caused by barriers on a background thread during
1136 // incremental sweeping, while also sweeping zones which have finished
1138 MainThreadData
<bool> markOnBackgroundThreadDuringSweeping
;
1140 // Whether any sweeping and decommitting will run on a separate GC helper
1142 MainThreadData
<bool> useBackgroundThreads
;
1144 // Whether we have already discarded JIT code for all collected zones in this
1146 MainThreadData
<bool> haveDiscardedJITCodeThisSlice
;
1149 /* Shutdown has started. Further collections must be shutdown collections. */
1150 MainThreadData
<bool> hadShutdownGC
;
1153 /* Singly linked list of zones to be swept in the background. */
1154 HelperThreadLockData
<ZoneList
> backgroundSweepZones
;
1157 * Whether to trigger a GC slice after a background task is complete, so that
1158 * the collector can continue or finsish collecting. This is only used for the
1159 * tasks that run concurrently with the mutator, which are background
1160 * finalization and background decommit.
1162 HelperThreadLockData
<bool> requestSliceAfterBackgroundTask
;
1165 * Free LIFO blocks are transferred to these allocators before being freed on
1166 * a background thread.
1168 HelperThreadLockData
<LifoAlloc
> lifoBlocksToFree
;
1169 MainThreadData
<LifoAlloc
> lifoBlocksToFreeAfterMinorGC
;
1170 HelperThreadLockData
<Nursery::BufferSet
> buffersToFreeAfterMinorGC
;
1172 /* Index of current sweep group (for stats). */
1173 MainThreadData
<unsigned> sweepGroupIndex
;
1176 * Incremental sweep state.
1178 MainThreadData
<JS::Zone
*> sweepGroups
;
1179 MainThreadOrGCTaskData
<JS::Zone
*> currentSweepGroup
;
1180 MainThreadData
<UniquePtr
<SweepAction
>> sweepActions
;
1181 MainThreadOrGCTaskData
<JS::Zone
*> sweepZone
;
1182 MainThreadOrGCTaskData
<AllocKind
> sweepAllocKind
;
1183 MainThreadData
<mozilla::Maybe
<AtomsTable::SweepIterator
>> maybeAtomsToSweep
;
1184 MainThreadOrGCTaskData
<mozilla::Maybe
<WeakCacheSweepIterator
>>
1186 MainThreadData
<bool> abortSweepAfterCurrentGroup
;
1187 MainThreadOrGCTaskData
<IncrementalProgress
> sweepMarkResult
;
1191 * List of objects to mark at the beginning of a GC for testing purposes. May
1192 * also contain string directives to change mark color or wait until different
1195 * This is a WeakCache because not everything in this list is guaranteed to
1196 * end up marked (eg if you insert an object from an already-processed sweep
1197 * group in the middle of an incremental GC). Also, the mark queue is not
1198 * used during shutdown GCs. In either case, unmarked objects may need to be
1201 JS::WeakCache
<GCVector
<HeapPtr
<JS::Value
>, 0, SystemAllocPolicy
>>
1204 /* Position within the test mark queue. */
1205 size_t queuePos
= 0;
1207 /* The test marking queue might want to be marking a particular color. */
1208 mozilla::Maybe
<js::gc::MarkColor
> queueMarkColor
;
1210 // During gray marking, delay AssertCellIsNotGray checks by
1211 // recording the cell pointers here and checking after marking has
1213 MainThreadData
<Vector
<const Cell
*, 0, SystemAllocPolicy
>>
1214 cellsToAssertNotGray
;
1215 friend void js::gc::detail::AssertCellIsNotGray(const Cell
*);
1218 friend class SweepGroupsIter
;
1221 * Incremental compacting state.
1223 MainThreadData
<bool> startedCompacting
;
1224 MainThreadData
<ZoneList
> zonesToMaybeCompact
;
1225 MainThreadData
<size_t> zonesCompacted
;
1227 GCLockData
<Arena
*> relocatedArenasToRelease
;
1231 MainThreadData
<MarkingValidator
*> markingValidator
;
1235 * Default budget for incremental GC slice. See js/SliceBudget.h.
1237 * JSGC_SLICE_TIME_BUDGET_MS
1238 * pref: javascript.options.mem.gc_incremental_slice_ms,
1240 MainThreadData
<int64_t> defaultTimeBudgetMS_
;
1243 * We disable incremental GC if we encounter a Class with a trace hook
1244 * that does not implement write barriers.
1246 MainThreadData
<bool> incrementalAllowed
;
1249 * Whether compacting GC can is enabled globally.
1251 * JSGC_COMPACTING_ENABLED
1252 * pref: javascript.options.mem.gc_compacting
1254 MainThreadData
<bool> compactingEnabled
;
1257 * Whether parallel marking is enabled globally.
1259 * JSGC_PARALLEL_MARKING_ENABLED
1260 * pref: javascript.options.mem.gc_parallel_marking
1262 MainThreadData
<bool> parallelMarkingEnabled
;
1264 MainThreadData
<bool> rootsRemoved
;
1267 * These options control the zealousness of the GC. At every allocation,
1268 * nextScheduled is decremented. When it reaches zero we do a full GC.
1270 * At this point, if zeal_ is one of the types that trigger periodic
1271 * collection, then nextScheduled is reset to the value of zealFrequency.
1272 * Otherwise, no additional GCs take place.
1274 * You can control these values in several ways:
1275 * - Set the JS_GC_ZEAL environment variable
1276 * - Call gczeal() or schedulegc() from inside shell-executed JS code
1277 * (see the help for details)
1279 * If gcZeal_ == 1 then we perform GCs in select places (during MaybeGC and
1280 * whenever we are notified that GC roots have been removed). This option is
1281 * mainly useful to embedders.
1283 * We use zeal_ == 4 to enable write barrier verification. See the comment
1284 * in gc/Verifier.cpp for more information about this.
1286 * zeal_ values from 8 to 10 periodically run different types of
1289 * zeal_ value 14 performs periodic shrinking collections.
1292 static_assert(size_t(ZealMode::Count
) <= 32,
1293 "Too many zeal modes to store in a uint32_t");
1294 MainThreadData
<uint32_t> zealModeBits
;
1295 MainThreadData
<int> zealFrequency
;
1296 MainThreadData
<int> nextScheduled
;
1297 MainThreadData
<bool> deterministicOnly
;
1298 MainThreadData
<int> zealSliceBudget
;
1299 MainThreadData
<size_t> maybeMarkStackLimit
;
1301 MainThreadData
<PersistentRooted
<GCVector
<JSObject
*, 0, SystemAllocPolicy
>>>
1305 MainThreadData
<bool> fullCompartmentChecks
;
1307 MainThreadData
<uint32_t> gcCallbackDepth
;
1309 MainThreadData
<Callback
<JSGCCallback
>> gcCallback
;
1310 MainThreadData
<Callback
<JS::DoCycleCollectionCallback
>>
1311 gcDoCycleCollectionCallback
;
1312 MainThreadData
<Callback
<JSObjectsTenuredCallback
>> tenuredCallback
;
1313 MainThreadData
<CallbackVector
<JSFinalizeCallback
>> finalizeCallbacks
;
1314 MainThreadOrGCTaskData
<Callback
<JSHostCleanupFinalizationRegistryCallback
>>
1315 hostCleanupFinalizationRegistryCallback
;
1316 MainThreadData
<CallbackVector
<JSWeakPointerZonesCallback
>>
1317 updateWeakPointerZonesCallbacks
;
1318 MainThreadData
<CallbackVector
<JSWeakPointerCompartmentCallback
>>
1319 updateWeakPointerCompartmentCallbacks
;
1320 MainThreadData
<CallbackVector
<JS::GCNurseryCollectionCallback
>>
1321 nurseryCollectionCallbacks
;
1324 * The trace operations to trace embedding-specific GC roots. One is for
1325 * tracing through black roots and the other is for tracing through gray
1326 * roots. The black/gray distinction is only relevant to the cycle
1329 MainThreadData
<CallbackVector
<JSTraceDataOp
>> blackRootTracers
;
1330 MainThreadOrGCTaskData
<Callback
<JSGrayRootsTracer
>> grayRootTracer
;
1332 /* Always preserve JIT code during GCs, for testing. */
1333 MainThreadData
<bool> alwaysPreserveCode
;
1335 /* Count of the number of zones that are currently in page load. */
1336 MainThreadData
<size_t> inPageLoadCount
;
1338 MainThreadData
<bool> lowMemoryState
;
1341 * General purpose GC lock, used for synchronising operations on
1342 * arenas and during parallel marking.
1344 friend class js::AutoLockGC
;
1345 friend class js::AutoLockGCBgAlloc
;
1346 js::Mutex lock MOZ_UNANNOTATED
;
1348 /* Lock used to synchronise access to delayed marking state. */
1349 js::Mutex delayedMarkingLock MOZ_UNANNOTATED
;
1351 friend class BackgroundSweepTask
;
1352 friend class BackgroundFreeTask
;
1354 BackgroundAllocTask allocTask
;
1355 BackgroundUnmarkTask unmarkTask
;
1356 BackgroundMarkTask markTask
;
1357 BackgroundSweepTask sweepTask
;
1358 BackgroundFreeTask freeTask
;
1359 BackgroundDecommitTask decommitTask
;
1362 * During incremental sweeping, this field temporarily holds the arenas of
1363 * the current AllocKind being swept in order of increasing free space.
1365 MainThreadData
<SortedArenaList
> incrementalSweepList
;
1367 MainThreadData
<Nursery
> nursery_
;
1369 // The store buffer used to track tenured to nursery edges for generational
1370 // GC. This is accessed off main thread when sweeping WeakCaches.
1371 MainThreadOrGCTaskData
<gc::StoreBuffer
> storeBuffer_
;
1373 mozilla::TimeStamp lastLastDitchTime
;
1375 // The last time per-zone allocation rates were updated.
1376 MainThreadData
<mozilla::TimeStamp
> lastAllocRateUpdateTime
;
1378 // Total collector time since per-zone allocation rates were last updated.
1379 MainThreadData
<mozilla::TimeDuration
> collectorTimeSinceAllocRateUpdate
;
1381 friend class MarkingValidator
;
1382 friend class AutoEnterIteration
;
1385 /* Prevent compartments and zones from being collected during iteration. */
1386 class MOZ_RAII AutoEnterIteration
{
1390 explicit AutoEnterIteration(GCRuntime
* gc_
) : gc(gc_
) {
1391 ++gc
->numActiveZoneIters
;
1394 ~AutoEnterIteration() {
1395 MOZ_ASSERT(gc
->numActiveZoneIters
);
1396 --gc
->numActiveZoneIters
;
1402 inline bool GCRuntime::hasZealMode(ZealMode mode
) {
1403 static_assert(size_t(ZealMode::Limit
) < sizeof(zealModeBits
) * 8,
1404 "Zeal modes must fit in zealModeBits");
1405 return zealModeBits
& (1 << uint32_t(mode
));
1408 inline void GCRuntime::clearZealMode(ZealMode mode
) {
1409 zealModeBits
&= ~(1 << uint32_t(mode
));
1410 MOZ_ASSERT(!hasZealMode(mode
));
1413 inline bool GCRuntime::needZealousGC() {
1414 if (nextScheduled
> 0 && --nextScheduled
== 0) {
1415 if (hasZealMode(ZealMode::Alloc
) || hasZealMode(ZealMode::GenerationalGC
) ||
1416 hasZealMode(ZealMode::IncrementalMultipleSlices
) ||
1417 hasZealMode(ZealMode::Compact
) || hasIncrementalTwoSliceZealMode()) {
1418 nextScheduled
= zealFrequency
;
1425 inline bool GCRuntime::hasIncrementalTwoSliceZealMode() {
1426 return hasZealMode(ZealMode::YieldBeforeRootMarking
) ||
1427 hasZealMode(ZealMode::YieldBeforeMarking
) ||
1428 hasZealMode(ZealMode::YieldBeforeSweeping
) ||
1429 hasZealMode(ZealMode::YieldBeforeSweepingAtoms
) ||
1430 hasZealMode(ZealMode::YieldBeforeSweepingCaches
) ||
1431 hasZealMode(ZealMode::YieldBeforeSweepingObjects
) ||
1432 hasZealMode(ZealMode::YieldBeforeSweepingNonObjects
) ||
1433 hasZealMode(ZealMode::YieldBeforeSweepingPropMapTrees
) ||
1434 hasZealMode(ZealMode::YieldWhileGrayMarking
);
1438 inline bool GCRuntime::hasZealMode(ZealMode mode
) { return false; }
1439 inline void GCRuntime::clearZealMode(ZealMode mode
) {}
1440 inline bool GCRuntime::needZealousGC() { return false; }
1441 inline bool GCRuntime::hasIncrementalTwoSliceZealMode() { return false; }
1444 bool IsCurrentlyAnimating(const mozilla::TimeStamp
& lastAnimationTime
,
1445 const mozilla::TimeStamp
& currentTime
);
1447 } /* namespace gc */
1448 } /* namespace js */