Bug 1865597 - Add error checking when initializing parallel marking and disable on...
[gecko.git] / js / src / gc / GCRuntime.h
blob1a371f30f06ae4f6bddb1d4ea746e41eaa6ac7ca
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #ifndef gc_GCRuntime_h
8 #define gc_GCRuntime_h
10 #include "mozilla/Atomics.h"
11 #include "mozilla/DoublyLinkedList.h"
12 #include "mozilla/EnumSet.h"
13 #include "mozilla/Maybe.h"
14 #include "mozilla/TimeStamp.h"
16 #include "gc/ArenaList.h"
17 #include "gc/AtomMarking.h"
18 #include "gc/GCContext.h"
19 #include "gc/GCMarker.h"
20 #include "gc/GCParallelTask.h"
21 #include "gc/IteratorUtils.h"
22 #include "gc/Nursery.h"
23 #include "gc/Scheduling.h"
24 #include "gc/Statistics.h"
25 #include "gc/StoreBuffer.h"
26 #include "js/friend/PerformanceHint.h"
27 #include "js/GCAnnotations.h"
28 #include "js/UniquePtr.h"
29 #include "vm/AtomsTable.h"
31 namespace js {
33 class AutoLockGC;
34 class AutoLockGCBgAlloc;
35 class AutoLockHelperThreadState;
36 class FinalizationRegistryObject;
37 class FinalizationRecordObject;
38 class FinalizationQueueObject;
39 class GlobalObject;
40 class VerifyPreTracer;
41 class WeakRefObject;
43 namespace gc {
45 using BlackGrayEdgeVector = Vector<TenuredCell*, 0, SystemAllocPolicy>;
46 using ZoneVector = Vector<JS::Zone*, 4, SystemAllocPolicy>;
48 class AutoCallGCCallbacks;
49 class AutoGCSession;
50 class AutoHeapSession;
51 class AutoTraceSession;
52 struct FinalizePhase;
53 class MarkingValidator;
54 struct MovingTracer;
55 class ParallelMarkTask;
56 enum class ShouldCheckThresholds;
57 class SweepGroupsIter;
59 // Interface to a sweep action.
60 struct SweepAction {
61 // The arguments passed to each action.
62 struct Args {
63 GCRuntime* gc;
64 JS::GCContext* gcx;
65 SliceBudget& budget;
68 virtual ~SweepAction() = default;
69 virtual IncrementalProgress run(Args& state) = 0;
70 virtual void assertFinished() const = 0;
71 virtual bool shouldSkip() { return false; }
74 class ChunkPool {
75 TenuredChunk* head_;
76 size_t count_;
78 public:
79 ChunkPool() : head_(nullptr), count_(0) {}
80 ChunkPool(const ChunkPool& other) = delete;
81 ChunkPool(ChunkPool&& other) { *this = std::move(other); }
83 ~ChunkPool() {
84 MOZ_ASSERT(!head_);
85 MOZ_ASSERT(count_ == 0);
88 ChunkPool& operator=(const ChunkPool& other) = delete;
89 ChunkPool& operator=(ChunkPool&& other) {
90 head_ = other.head_;
91 other.head_ = nullptr;
92 count_ = other.count_;
93 other.count_ = 0;
94 return *this;
97 bool empty() const { return !head_; }
98 size_t count() const { return count_; }
100 TenuredChunk* head() {
101 MOZ_ASSERT(head_);
102 return head_;
104 TenuredChunk* pop();
105 void push(TenuredChunk* chunk);
106 TenuredChunk* remove(TenuredChunk* chunk);
108 void sort();
110 private:
111 TenuredChunk* mergeSort(TenuredChunk* list, size_t count);
112 bool isSorted() const;
114 #ifdef DEBUG
115 public:
116 bool contains(TenuredChunk* chunk) const;
117 bool verify() const;
118 void verifyChunks() const;
119 #endif
121 public:
122 // Pool mutation does not invalidate an Iter unless the mutation
123 // is of the TenuredChunk currently being visited by the Iter.
124 class Iter {
125 public:
126 explicit Iter(ChunkPool& pool) : current_(pool.head_) {}
127 bool done() const { return !current_; }
128 void next();
129 TenuredChunk* get() const { return current_; }
130 operator TenuredChunk*() const { return get(); }
131 TenuredChunk* operator->() const { return get(); }
133 private:
134 TenuredChunk* current_;
138 class BackgroundMarkTask : public GCParallelTask {
139 public:
140 explicit BackgroundMarkTask(GCRuntime* gc);
141 void setBudget(const SliceBudget& budget) { this->budget = budget; }
142 void run(AutoLockHelperThreadState& lock) override;
144 private:
145 SliceBudget budget;
148 class BackgroundUnmarkTask : public GCParallelTask {
149 public:
150 explicit BackgroundUnmarkTask(GCRuntime* gc);
151 void initZones();
152 void run(AutoLockHelperThreadState& lock) override;
154 ZoneVector zones;
157 class BackgroundSweepTask : public GCParallelTask {
158 public:
159 explicit BackgroundSweepTask(GCRuntime* gc);
160 void run(AutoLockHelperThreadState& lock) override;
163 class BackgroundFreeTask : public GCParallelTask {
164 public:
165 explicit BackgroundFreeTask(GCRuntime* gc);
166 void run(AutoLockHelperThreadState& lock) override;
169 // Performs extra allocation off thread so that when memory is required on the
170 // main thread it will already be available and waiting.
171 class BackgroundAllocTask : public GCParallelTask {
172 // Guarded by the GC lock.
173 GCLockData<ChunkPool&> chunkPool_;
175 const bool enabled_;
177 public:
178 BackgroundAllocTask(GCRuntime* gc, ChunkPool& pool);
179 bool enabled() const { return enabled_; }
181 void run(AutoLockHelperThreadState& lock) override;
184 // Search the provided chunks for free arenas and decommit them.
185 class BackgroundDecommitTask : public GCParallelTask {
186 public:
187 explicit BackgroundDecommitTask(GCRuntime* gc);
188 void run(AutoLockHelperThreadState& lock) override;
191 template <typename F>
192 struct Callback {
193 F op;
194 void* data;
196 Callback() : op(nullptr), data(nullptr) {}
197 Callback(F op, void* data) : op(op), data(data) {}
200 template <typename F>
201 using CallbackVector = Vector<Callback<F>, 4, SystemAllocPolicy>;
203 using RootedValueMap =
204 HashMap<Value*, const char*, DefaultHasher<Value*>, SystemAllocPolicy>;
206 using AllocKinds = mozilla::EnumSet<AllocKind, uint64_t>;
208 // A singly linked list of zones.
209 class ZoneList {
210 static Zone* const End;
212 Zone* head;
213 Zone* tail;
215 public:
216 ZoneList();
217 ~ZoneList();
219 bool isEmpty() const;
220 Zone* front() const;
222 void prepend(Zone* zone);
223 void append(Zone* zone);
224 void prependList(ZoneList&& other);
225 void appendList(ZoneList&& other);
226 Zone* removeFront();
227 void clear();
229 private:
230 explicit ZoneList(Zone* singleZone);
231 void check() const;
233 ZoneList(const ZoneList& other) = delete;
234 ZoneList& operator=(const ZoneList& other) = delete;
237 struct WeakCacheToSweep {
238 JS::detail::WeakCacheBase* cache;
239 JS::Zone* zone;
242 class WeakCacheSweepIterator {
243 using WeakCacheBase = JS::detail::WeakCacheBase;
245 JS::Zone* sweepZone;
246 WeakCacheBase* sweepCache;
248 public:
249 explicit WeakCacheSweepIterator(JS::Zone* sweepGroup);
251 bool done() const;
252 WeakCacheToSweep get() const;
253 void next();
255 private:
256 void settle();
259 struct SweepingTracer final : public GenericTracerImpl<SweepingTracer> {
260 explicit SweepingTracer(JSRuntime* rt);
262 private:
263 template <typename T>
264 void onEdge(T** thingp, const char* name);
265 friend class GenericTracerImpl<SweepingTracer>;
268 class GCRuntime {
269 public:
270 explicit GCRuntime(JSRuntime* rt);
271 [[nodiscard]] bool init(uint32_t maxbytes);
272 bool wasInitialized() const { return initialized; }
273 void finishRoots();
274 void finish();
276 Zone* atomsZone() {
277 Zone* zone = zones()[0];
278 MOZ_ASSERT(JS::shadow::Zone::from(zone)->isAtomsZone());
279 return zone;
281 Zone* maybeSharedAtomsZone() { return sharedAtomsZone_; }
283 [[nodiscard]] bool freezeSharedAtomsZone();
284 void restoreSharedAtomsZone();
286 JS::HeapState heapState() const { return heapState_; }
288 inline bool hasZealMode(ZealMode mode);
289 inline void clearZealMode(ZealMode mode);
290 inline bool needZealousGC();
291 inline bool hasIncrementalTwoSliceZealMode();
293 [[nodiscard]] bool addRoot(Value* vp, const char* name);
294 void removeRoot(Value* vp);
296 [[nodiscard]] bool setParameter(JSContext* cx, JSGCParamKey key,
297 uint32_t value);
298 void resetParameter(JSContext* cx, JSGCParamKey key);
299 uint32_t getParameter(JSGCParamKey key);
301 void setPerformanceHint(PerformanceHint hint);
302 bool isInPageLoad() const { return inPageLoadCount != 0; }
304 [[nodiscard]] bool triggerGC(JS::GCReason reason);
305 // Check whether to trigger a zone GC after allocating GC cells.
306 void maybeTriggerGCAfterAlloc(Zone* zone);
307 // Check whether to trigger a zone GC after malloc memory.
308 void maybeTriggerGCAfterMalloc(Zone* zone);
309 bool maybeTriggerGCAfterMalloc(Zone* zone, const HeapSize& heap,
310 const HeapThreshold& threshold,
311 JS::GCReason reason);
312 // The return value indicates if we were able to do the GC.
313 bool triggerZoneGC(Zone* zone, JS::GCReason reason, size_t usedBytes,
314 size_t thresholdBytes);
316 void maybeGC();
318 // Return whether we want to run a major GC. If eagerOk is true, include eager
319 // triggers (eg EAGER_ALLOC_TRIGGER) in this determination, and schedule all
320 // zones that exceed the eager thresholds.
321 JS::GCReason wantMajorGC(bool eagerOk);
322 bool checkEagerAllocTrigger(const HeapSize& size,
323 const HeapThreshold& threshold);
325 // Do a minor GC if requested, followed by a major GC if requested. The return
326 // value indicates whether a major GC was performed.
327 bool gcIfRequested() { return gcIfRequestedImpl(false); }
329 // Internal function to do a GC if previously requested. But if not and
330 // eagerOk, do an eager GC for all Zones that have exceeded the eager
331 // thresholds.
333 // Return whether a major GC was performed or started.
334 bool gcIfRequestedImpl(bool eagerOk);
336 void gc(JS::GCOptions options, JS::GCReason reason);
337 void startGC(JS::GCOptions options, JS::GCReason reason,
338 const SliceBudget& budget);
339 void gcSlice(JS::GCReason reason, const SliceBudget& budget);
340 void finishGC(JS::GCReason reason);
341 void abortGC();
342 void startDebugGC(JS::GCOptions options, const SliceBudget& budget);
343 void debugGCSlice(const SliceBudget& budget);
345 void runDebugGC();
346 void notifyRootsRemoved();
348 enum TraceOrMarkRuntime { TraceRuntime, MarkRuntime };
349 void traceRuntime(JSTracer* trc, AutoTraceSession& session);
350 void traceRuntimeForMinorGC(JSTracer* trc, AutoGCSession& session);
352 void purgeRuntimeForMinorGC();
354 void shrinkBuffers();
355 void onOutOfMallocMemory();
356 void onOutOfMallocMemory(const AutoLockGC& lock);
358 Nursery& nursery() { return nursery_.ref(); }
359 gc::StoreBuffer& storeBuffer() { return storeBuffer_.ref(); }
361 void minorGC(JS::GCReason reason,
362 gcstats::PhaseKind phase = gcstats::PhaseKind::MINOR_GC)
363 JS_HAZ_GC_CALL;
364 void evictNursery(JS::GCReason reason = JS::GCReason::EVICT_NURSERY) {
365 minorGC(reason, gcstats::PhaseKind::EVICT_NURSERY);
368 void* addressOfNurseryPosition() {
369 return nursery_.refNoCheck().addressOfPosition();
372 const void* addressOfLastBufferedWholeCell() {
373 return storeBuffer_.refNoCheck().addressOfLastBufferedWholeCell();
376 #ifdef JS_GC_ZEAL
377 const uint32_t* addressOfZealModeBits() { return &zealModeBits.refNoCheck(); }
378 void getZealBits(uint32_t* zealBits, uint32_t* frequency,
379 uint32_t* nextScheduled);
380 void setZeal(uint8_t zeal, uint32_t frequency);
381 void unsetZeal(uint8_t zeal);
382 bool parseAndSetZeal(const char* str);
383 void setNextScheduled(uint32_t count);
384 void verifyPreBarriers();
385 void maybeVerifyPreBarriers(bool always);
386 bool selectForMarking(JSObject* object);
387 void clearSelectedForMarking();
388 void setDeterministic(bool enable);
389 void setMarkStackLimit(size_t limit, AutoLockGC& lock);
390 #endif
392 uint64_t nextCellUniqueId() {
393 MOZ_ASSERT(nextCellUniqueId_ > 0);
394 uint64_t uid = ++nextCellUniqueId_;
395 return uid;
398 void setLowMemoryState(bool newState) { lowMemoryState = newState; }
399 bool systemHasLowMemory() const { return lowMemoryState; }
401 public:
402 // Internal public interface
403 ZoneVector& zones() { return zones_.ref(); }
404 gcstats::Statistics& stats() { return stats_.ref(); }
405 const gcstats::Statistics& stats() const { return stats_.ref(); }
406 State state() const { return incrementalState; }
407 bool isHeapCompacting() const { return state() == State::Compact; }
408 bool isForegroundSweeping() const { return state() == State::Sweep; }
409 bool isBackgroundSweeping() const { return sweepTask.wasStarted(); }
410 bool isBackgroundMarking() const { return markTask.wasStarted(); }
411 void waitBackgroundSweepEnd();
412 void waitBackgroundAllocEnd() { allocTask.cancelAndWait(); }
413 void waitBackgroundFreeEnd();
414 void waitForBackgroundTasks();
415 bool isWaitingOnBackgroundTask() const;
417 void lockGC() { lock.lock(); }
418 bool tryLockGC() { return lock.tryLock(); }
419 void unlockGC() { lock.unlock(); }
421 #ifdef DEBUG
422 void assertCurrentThreadHasLockedGC() const {
423 lock.assertOwnedByCurrentThread();
425 #endif // DEBUG
427 void setAlwaysPreserveCode() { alwaysPreserveCode = true; }
429 bool isIncrementalGCAllowed() const { return incrementalAllowed; }
430 void disallowIncrementalGC() { incrementalAllowed = false; }
432 void setIncrementalGCEnabled(bool enabled);
434 bool isIncrementalGCEnabled() const { return incrementalGCEnabled; }
435 bool isPerZoneGCEnabled() const { return perZoneGCEnabled; }
436 bool isCompactingGCEnabled() const;
437 bool isParallelMarkingEnabled() const { return parallelMarkingEnabled; }
439 bool isIncrementalGCInProgress() const {
440 return state() != State::NotActive && !isVerifyPreBarriersEnabled();
443 bool hasForegroundWork() const;
445 bool isShrinkingGC() const { return gcOptions() == JS::GCOptions::Shrink; }
447 bool isShutdownGC() const { return gcOptions() == JS::GCOptions::Shutdown; }
449 #ifdef DEBUG
450 bool isShuttingDown() const { return hadShutdownGC; }
451 #endif
453 bool initSweepActions();
455 void setGrayRootsTracer(JSGrayRootsTracer traceOp, void* data);
456 [[nodiscard]] bool addBlackRootsTracer(JSTraceDataOp traceOp, void* data);
457 void removeBlackRootsTracer(JSTraceDataOp traceOp, void* data);
458 void clearBlackAndGrayRootTracers();
460 void setGCCallback(JSGCCallback callback, void* data);
461 void callGCCallback(JSGCStatus status, JS::GCReason reason) const;
462 void setObjectsTenuredCallback(JSObjectsTenuredCallback callback, void* data);
463 void callObjectsTenuredCallback();
464 [[nodiscard]] bool addFinalizeCallback(JSFinalizeCallback callback,
465 void* data);
466 void removeFinalizeCallback(JSFinalizeCallback callback);
467 void setHostCleanupFinalizationRegistryCallback(
468 JSHostCleanupFinalizationRegistryCallback callback, void* data);
469 void callHostCleanupFinalizationRegistryCallback(
470 JSFunction* doCleanup, GlobalObject* incumbentGlobal);
471 [[nodiscard]] bool addWeakPointerZonesCallback(
472 JSWeakPointerZonesCallback callback, void* data);
473 void removeWeakPointerZonesCallback(JSWeakPointerZonesCallback callback);
474 [[nodiscard]] bool addWeakPointerCompartmentCallback(
475 JSWeakPointerCompartmentCallback callback, void* data);
476 void removeWeakPointerCompartmentCallback(
477 JSWeakPointerCompartmentCallback callback);
478 JS::GCSliceCallback setSliceCallback(JS::GCSliceCallback callback);
479 bool addNurseryCollectionCallback(JS::GCNurseryCollectionCallback callback,
480 void* data);
481 void removeNurseryCollectionCallback(JS::GCNurseryCollectionCallback callback,
482 void* data);
483 JS::DoCycleCollectionCallback setDoCycleCollectionCallback(
484 JS::DoCycleCollectionCallback callback);
485 void callNurseryCollectionCallbacks(JS::GCNurseryProgress progress,
486 JS::GCReason reason);
488 bool addFinalizationRegistry(JSContext* cx,
489 Handle<FinalizationRegistryObject*> registry);
490 bool registerWithFinalizationRegistry(JSContext* cx, HandleObject target,
491 HandleObject record);
492 void queueFinalizationRegistryForCleanup(FinalizationQueueObject* queue);
494 void nukeFinalizationRecordWrapper(JSObject* wrapper,
495 FinalizationRecordObject* record);
496 void nukeWeakRefWrapper(JSObject* wrapper, WeakRefObject* weakRef);
498 void setFullCompartmentChecks(bool enable);
500 // Get the main marking tracer.
501 GCMarker& marker() { return *markers[0]; }
503 JS::Zone* getCurrentSweepGroup() { return currentSweepGroup; }
504 unsigned getCurrentSweepGroupIndex() {
505 MOZ_ASSERT_IF(unsigned(state()) < unsigned(State::Sweep),
506 sweepGroupIndex == 0);
507 return sweepGroupIndex;
510 uint64_t gcNumber() const { return number; }
511 void incGcNumber() { ++number; }
513 uint64_t minorGCCount() const { return minorGCNumber; }
514 void incMinorGcNumber() { ++minorGCNumber; }
516 uint64_t majorGCCount() const { return majorGCNumber; }
517 void incMajorGcNumber() { ++majorGCNumber; }
519 uint64_t gcSliceCount() const { return sliceNumber; }
520 void incGcSliceNumber() { ++sliceNumber; }
522 int64_t defaultSliceBudgetMS() const { return defaultTimeBudgetMS_; }
524 bool isIncrementalGc() const { return isIncremental; }
525 bool isFullGc() const { return isFull; }
526 bool isCompactingGc() const { return isCompacting; }
527 bool didCompactZones() const { return isCompacting && zonesCompacted; }
529 bool areGrayBitsValid() const { return grayBitsValid; }
530 void setGrayBitsInvalid() { grayBitsValid = false; }
532 mozilla::TimeStamp lastGCStartTime() const { return lastGCStartTime_; }
533 mozilla::TimeStamp lastGCEndTime() const { return lastGCEndTime_; }
535 bool majorGCRequested() const {
536 return majorGCTriggerReason != JS::GCReason::NO_REASON;
539 double computeHeapGrowthFactor(size_t lastBytes);
540 size_t computeTriggerBytes(double growthFactor, size_t lastBytes);
542 inline void updateOnFreeArenaAlloc(const TenuredChunkInfo& info);
543 void updateOnArenaFree() { ++numArenasFreeCommitted; }
545 ChunkPool& fullChunks(const AutoLockGC& lock) { return fullChunks_.ref(); }
546 ChunkPool& availableChunks(const AutoLockGC& lock) {
547 return availableChunks_.ref();
549 ChunkPool& emptyChunks(const AutoLockGC& lock) { return emptyChunks_.ref(); }
550 const ChunkPool& fullChunks(const AutoLockGC& lock) const {
551 return fullChunks_.ref();
553 const ChunkPool& availableChunks(const AutoLockGC& lock) const {
554 return availableChunks_.ref();
556 const ChunkPool& emptyChunks(const AutoLockGC& lock) const {
557 return emptyChunks_.ref();
559 using NonEmptyChunksIter = ChainedIterator<ChunkPool::Iter, 2>;
560 NonEmptyChunksIter allNonEmptyChunks(const AutoLockGC& lock) {
561 return NonEmptyChunksIter(availableChunks(lock), fullChunks(lock));
563 uint32_t minEmptyChunkCount(const AutoLockGC& lock) const {
564 return minEmptyChunkCount_;
566 uint32_t maxEmptyChunkCount(const AutoLockGC& lock) const {
567 return maxEmptyChunkCount_;
569 #ifdef DEBUG
570 void verifyAllChunks();
571 #endif
573 TenuredChunk* getOrAllocChunk(AutoLockGCBgAlloc& lock);
574 void recycleChunk(TenuredChunk* chunk, const AutoLockGC& lock);
576 #ifdef JS_GC_ZEAL
577 void startVerifyPreBarriers();
578 void endVerifyPreBarriers();
579 void finishVerifier();
580 bool isVerifyPreBarriersEnabled() const { return verifyPreData.refNoCheck(); }
581 bool shouldYieldForZeal(ZealMode mode);
582 #else
583 bool isVerifyPreBarriersEnabled() const { return false; }
584 #endif
586 #ifdef JSGC_HASH_TABLE_CHECKS
587 void checkHashTablesAfterMovingGC();
588 #endif
590 // Crawl the heap to check whether an arbitary pointer is within a cell of
591 // the given kind. (TraceKind::Null means to ignore the kind.)
592 bool isPointerWithinTenuredCell(
593 void* ptr, JS::TraceKind traceKind = JS::TraceKind::Null);
595 #ifdef DEBUG
596 bool hasZone(Zone* target);
597 #endif
599 // Queue memory memory to be freed on a background thread if possible.
600 void queueUnusedLifoBlocksForFree(LifoAlloc* lifo);
601 void queueAllLifoBlocksForFreeAfterMinorGC(LifoAlloc* lifo);
602 void queueBuffersForFreeAfterMinorGC(Nursery::BufferSet& buffers);
604 // Public here for ReleaseArenaLists and FinalizeTypedArenas.
605 void releaseArena(Arena* arena, const AutoLockGC& lock);
607 // Allocator internals.
608 static void* refillFreeListInGC(Zone* zone, AllocKind thingKind);
610 // Delayed marking.
611 void delayMarkingChildren(gc::Cell* cell, MarkColor color);
612 bool hasDelayedMarking() const;
613 void markAllDelayedChildren(ShouldReportMarkTime reportTime);
616 * Concurrent sweep infrastructure.
618 void startTask(GCParallelTask& task, AutoLockHelperThreadState& lock);
619 void joinTask(GCParallelTask& task, AutoLockHelperThreadState& lock);
620 void updateHelperThreadCount();
621 size_t parallelWorkerCount() const;
623 // Parallel marking.
624 bool initOrDisableParallelMarking();
625 [[nodiscard]] bool updateMarkersVector();
626 size_t markingWorkerCount() const;
628 // WeakRefs
629 bool registerWeakRef(HandleObject target, HandleObject weakRef);
630 void traceKeptObjects(JSTracer* trc);
632 JS::GCReason lastStartReason() const { return initialReason; }
634 void updateAllocationRates();
636 // Allocator internals
637 static void* refillFreeList(JSContext* cx, AllocKind thingKind);
638 void attemptLastDitchGC(JSContext* cx);
640 // Test mark queue.
641 #ifdef DEBUG
642 const GCVector<HeapPtr<JS::Value>, 0, SystemAllocPolicy>& getTestMarkQueue()
643 const;
644 [[nodiscard]] bool appendTestMarkQueue(const JS::Value& value);
645 void clearTestMarkQueue();
646 size_t testMarkQueuePos() const;
647 #endif
649 private:
650 enum IncrementalResult { ResetIncremental = 0, Ok };
652 [[nodiscard]] bool setParameter(JSGCParamKey key, uint32_t value,
653 AutoLockGC& lock);
654 void resetParameter(JSGCParamKey key, AutoLockGC& lock);
655 uint32_t getParameter(JSGCParamKey key, const AutoLockGC& lock);
656 bool setThreadParameter(JSGCParamKey key, uint32_t value, AutoLockGC& lock);
657 void resetThreadParameter(JSGCParamKey key, AutoLockGC& lock);
658 void updateThreadDataStructures(AutoLockGC& lock);
660 JS::GCOptions gcOptions() const { return maybeGcOptions.ref().ref(); }
662 TriggerResult checkHeapThreshold(Zone* zone, const HeapSize& heapSize,
663 const HeapThreshold& heapThreshold);
665 void updateSchedulingStateOnGCStart();
666 void updateSchedulingStateAfterCollection(mozilla::TimeStamp currentTime);
667 void updateAllGCStartThresholds();
669 // For ArenaLists::allocateFromArena()
670 friend class ArenaLists;
671 TenuredChunk* pickChunk(AutoLockGCBgAlloc& lock);
672 Arena* allocateArena(TenuredChunk* chunk, Zone* zone, AllocKind kind,
673 ShouldCheckThresholds checkThresholds,
674 const AutoLockGC& lock);
677 * Return the list of chunks that can be released outside the GC lock.
678 * Must be called either during the GC or with the GC lock taken.
680 friend class BackgroundDecommitTask;
681 bool tooManyEmptyChunks(const AutoLockGC& lock);
682 ChunkPool expireEmptyChunkPool(const AutoLockGC& lock);
683 void freeEmptyChunks(const AutoLockGC& lock);
684 void prepareToFreeChunk(TenuredChunkInfo& info);
685 void setMinEmptyChunkCount(uint32_t value, const AutoLockGC& lock);
686 void setMaxEmptyChunkCount(uint32_t value, const AutoLockGC& lock);
688 friend class BackgroundAllocTask;
689 bool wantBackgroundAllocation(const AutoLockGC& lock) const;
690 void startBackgroundAllocTaskIfIdle();
692 void requestMajorGC(JS::GCReason reason);
693 SliceBudget defaultBudget(JS::GCReason reason, int64_t millis);
694 bool maybeIncreaseSliceBudget(SliceBudget& budget);
695 bool maybeIncreaseSliceBudgetForLongCollections(SliceBudget& budget);
696 bool maybeIncreaseSliceBudgetForUrgentCollections(SliceBudget& budget);
697 IncrementalResult budgetIncrementalGC(bool nonincrementalByAPI,
698 JS::GCReason reason,
699 SliceBudget& budget);
700 void checkZoneIsScheduled(Zone* zone, JS::GCReason reason,
701 const char* trigger);
702 IncrementalResult resetIncrementalGC(GCAbortReason reason);
704 // Assert if the system state is such that we should never
705 // receive a request to do GC work.
706 void checkCanCallAPI();
708 // Check if the system state is such that GC has been supressed
709 // or otherwise delayed.
710 [[nodiscard]] bool checkIfGCAllowedInCurrentState(JS::GCReason reason);
712 gcstats::ZoneGCStats scanZonesBeforeGC();
714 void setGCOptions(JS::GCOptions options);
716 void collect(bool nonincrementalByAPI, const SliceBudget& budget,
717 JS::GCReason reason) JS_HAZ_GC_CALL;
720 * Run one GC "cycle" (either a slice of incremental GC or an entire
721 * non-incremental GC).
723 * Returns:
724 * * ResetIncremental if we "reset" an existing incremental GC, which would
725 * force us to run another cycle or
726 * * Ok otherwise.
728 [[nodiscard]] IncrementalResult gcCycle(bool nonincrementalByAPI,
729 const SliceBudget& budgetArg,
730 JS::GCReason reason);
731 bool shouldRepeatForDeadZone(JS::GCReason reason);
733 void incrementalSlice(SliceBudget& budget, JS::GCReason reason,
734 bool budgetWasIncreased);
736 bool mightSweepInThisSlice(bool nonIncremental);
737 void collectNurseryFromMajorGC(JS::GCReason reason);
738 void collectNursery(JS::GCOptions options, JS::GCReason reason,
739 gcstats::PhaseKind phase);
741 friend class AutoCallGCCallbacks;
742 void maybeCallGCCallback(JSGCStatus status, JS::GCReason reason);
744 void startCollection(JS::GCReason reason);
746 void purgeRuntime();
747 [[nodiscard]] bool beginPreparePhase(JS::GCReason reason,
748 AutoGCSession& session);
749 bool prepareZonesForCollection(JS::GCReason reason, bool* isFullOut);
750 void unmarkWeakMaps();
751 void endPreparePhase(JS::GCReason reason);
752 void beginMarkPhase(AutoGCSession& session);
753 bool shouldPreserveJITCode(JS::Realm* realm,
754 const mozilla::TimeStamp& currentTime,
755 JS::GCReason reason, bool canAllocateMoreCode,
756 bool isActiveCompartment);
757 void discardJITCodeForGC();
758 void startBackgroundFreeAfterMinorGC();
759 void relazifyFunctionsForShrinkingGC();
760 void purgePropMapTablesForShrinkingGC();
761 void purgeSourceURLsForShrinkingGC();
762 void traceRuntimeForMajorGC(JSTracer* trc, AutoGCSession& session);
763 void traceRuntimeAtoms(JSTracer* trc);
764 void traceRuntimeCommon(JSTracer* trc, TraceOrMarkRuntime traceOrMark);
765 void traceEmbeddingBlackRoots(JSTracer* trc);
766 void traceEmbeddingGrayRoots(JSTracer* trc);
767 IncrementalProgress traceEmbeddingGrayRoots(JSTracer* trc,
768 SliceBudget& budget);
769 void checkNoRuntimeRoots(AutoGCSession& session);
770 void maybeDoCycleCollection();
771 void findDeadCompartments();
773 friend class BackgroundMarkTask;
774 enum ParallelMarking : bool {
775 SingleThreadedMarking = false,
776 AllowParallelMarking = true
778 IncrementalProgress markUntilBudgetExhausted(
779 SliceBudget& sliceBudget,
780 ParallelMarking allowParallelMarking = SingleThreadedMarking,
781 ShouldReportMarkTime reportTime = ReportMarkTime);
782 bool canMarkInParallel() const;
783 bool initParallelMarkers();
784 void finishParallelMarkers();
786 bool hasMarkingWork(MarkColor color) const;
788 void drainMarkStack();
790 #ifdef DEBUG
791 void assertNoMarkingWork() const;
792 #else
793 void assertNoMarkingWork() const {}
794 #endif
796 void markDelayedChildren(gc::Arena* arena, MarkColor color);
797 void processDelayedMarkingList(gc::MarkColor color);
798 void rebuildDelayedMarkingList();
799 void appendToDelayedMarkingList(gc::Arena** listTail, gc::Arena* arena);
800 void resetDelayedMarking();
801 template <typename F>
802 void forEachDelayedMarkingArena(F&& f);
804 template <class ZoneIterT>
805 IncrementalProgress markWeakReferences(SliceBudget& budget);
806 IncrementalProgress markWeakReferencesInCurrentGroup(SliceBudget& budget);
807 template <class ZoneIterT>
808 IncrementalProgress markGrayRoots(SliceBudget& budget,
809 gcstats::PhaseKind phase);
810 void markBufferedGrayRoots(JS::Zone* zone);
811 IncrementalProgress markAllWeakReferences();
812 void markAllGrayReferences(gcstats::PhaseKind phase);
814 // The mark queue is a testing-only feature for controlling mark ordering and
815 // yield timing.
816 enum MarkQueueProgress {
817 QueueYielded, // End this incremental GC slice, if possible
818 QueueComplete, // Done with the queue
819 QueueSuspended // Continue the GC without ending the slice
821 MarkQueueProgress processTestMarkQueue();
823 // GC Sweeping. Implemented in Sweeping.cpp.
824 void beginSweepPhase(JS::GCReason reason, AutoGCSession& session);
825 void dropStringWrappers();
826 void groupZonesForSweeping(JS::GCReason reason);
827 [[nodiscard]] bool findSweepGroupEdges();
828 [[nodiscard]] bool addEdgesForMarkQueue();
829 void getNextSweepGroup();
830 void resetGrayList(Compartment* comp);
831 IncrementalProgress beginMarkingSweepGroup(JS::GCContext* gcx,
832 SliceBudget& budget);
833 IncrementalProgress markGrayRootsInCurrentGroup(JS::GCContext* gcx,
834 SliceBudget& budget);
835 IncrementalProgress markGray(JS::GCContext* gcx, SliceBudget& budget);
836 IncrementalProgress endMarkingSweepGroup(JS::GCContext* gcx,
837 SliceBudget& budget);
838 void markIncomingGrayCrossCompartmentPointers();
839 IncrementalProgress beginSweepingSweepGroup(JS::GCContext* gcx,
840 SliceBudget& budget);
841 void initBackgroundSweep(Zone* zone, JS::GCContext* gcx,
842 const FinalizePhase& phase);
843 IncrementalProgress markDuringSweeping(JS::GCContext* gcx,
844 SliceBudget& budget);
845 void updateAtomsBitmap();
846 void sweepCCWrappers();
847 void sweepRealmGlobals();
848 void sweepEmbeddingWeakPointers(JS::GCContext* gcx);
849 void sweepMisc();
850 void sweepCompressionTasks();
851 void sweepWeakMaps();
852 void sweepUniqueIds();
853 void sweepObjectsWithWeakPointers();
854 void sweepDebuggerOnMainThread(JS::GCContext* gcx);
855 void sweepJitDataOnMainThread(JS::GCContext* gcx);
856 void sweepFinalizationObserversOnMainThread();
857 void traceWeakFinalizationObserverEdges(JSTracer* trc, Zone* zone);
858 void sweepWeakRefs();
859 IncrementalProgress endSweepingSweepGroup(JS::GCContext* gcx,
860 SliceBudget& budget);
861 IncrementalProgress performSweepActions(SliceBudget& sliceBudget);
862 void startSweepingAtomsTable();
863 IncrementalProgress sweepAtomsTable(JS::GCContext* gcx, SliceBudget& budget);
864 IncrementalProgress sweepWeakCaches(JS::GCContext* gcx, SliceBudget& budget);
865 IncrementalProgress finalizeAllocKind(JS::GCContext* gcx,
866 SliceBudget& budget);
867 bool foregroundFinalize(JS::GCContext* gcx, Zone* zone, AllocKind thingKind,
868 js::SliceBudget& sliceBudget,
869 SortedArenaList& sweepList);
870 IncrementalProgress sweepPropMapTree(JS::GCContext* gcx, SliceBudget& budget);
871 void endSweepPhase(bool destroyingRuntime);
872 void queueZonesAndStartBackgroundSweep(ZoneList&& zones);
873 void sweepFromBackgroundThread(AutoLockHelperThreadState& lock);
874 void startBackgroundFree();
875 void freeFromBackgroundThread(AutoLockHelperThreadState& lock);
876 void sweepBackgroundThings(ZoneList& zones);
877 void backgroundFinalize(JS::GCContext* gcx, Zone* zone, AllocKind kind,
878 Arena** empty);
879 void assertBackgroundSweepingFinished();
881 bool allCCVisibleZonesWereCollected();
882 void sweepZones(JS::GCContext* gcx, bool destroyingRuntime);
883 bool shouldDecommit() const;
884 void startDecommit();
885 void decommitEmptyChunks(const bool& cancel, AutoLockGC& lock);
886 void decommitFreeArenas(const bool& cancel, AutoLockGC& lock);
887 void decommitFreeArenasWithoutUnlocking(const AutoLockGC& lock);
889 // Compacting GC. Implemented in Compacting.cpp.
890 bool shouldCompact();
891 void beginCompactPhase();
892 IncrementalProgress compactPhase(JS::GCReason reason,
893 SliceBudget& sliceBudget,
894 AutoGCSession& session);
895 void endCompactPhase();
896 void sweepZoneAfterCompacting(MovingTracer* trc, Zone* zone);
897 bool canRelocateZone(Zone* zone) const;
898 [[nodiscard]] bool relocateArenas(Zone* zone, JS::GCReason reason,
899 Arena*& relocatedListOut,
900 SliceBudget& sliceBudget);
901 void updateCellPointers(Zone* zone, AllocKinds kinds);
902 void updateAllCellPointers(MovingTracer* trc, Zone* zone);
903 void updateZonePointersToRelocatedCells(Zone* zone);
904 void updateRuntimePointersToRelocatedCells(AutoGCSession& session);
905 void clearRelocatedArenas(Arena* arenaList, JS::GCReason reason);
906 void clearRelocatedArenasWithoutUnlocking(Arena* arenaList,
907 JS::GCReason reason,
908 const AutoLockGC& lock);
909 void releaseRelocatedArenas(Arena* arenaList);
910 void releaseRelocatedArenasWithoutUnlocking(Arena* arenaList,
911 const AutoLockGC& lock);
912 #ifdef DEBUG
913 void protectOrReleaseRelocatedArenas(Arena* arenaList, JS::GCReason reason);
914 void protectAndHoldArenas(Arena* arenaList);
915 void unprotectHeldRelocatedArenas(const AutoLockGC& lock);
916 void releaseHeldRelocatedArenas();
917 void releaseHeldRelocatedArenasWithoutUnlocking(const AutoLockGC& lock);
918 #endif
921 * Whether to immediately trigger a slice after a background task
922 * finishes. This may not happen at a convenient time, so the consideration is
923 * whether the slice will run quickly or may take a long time.
925 enum ShouldTriggerSliceWhenFinished : bool {
926 DontTriggerSliceWhenFinished = false,
927 TriggerSliceWhenFinished = true
930 IncrementalProgress waitForBackgroundTask(
931 GCParallelTask& task, const SliceBudget& budget, bool shouldPauseMutator,
932 ShouldTriggerSliceWhenFinished triggerSlice);
934 void maybeRequestGCAfterBackgroundTask(const AutoLockHelperThreadState& lock);
935 void cancelRequestedGCAfterBackgroundTask();
936 void finishCollection(JS::GCReason reason);
937 void maybeStopPretenuring();
938 void checkGCStateNotInUse();
939 IncrementalProgress joinBackgroundMarkTask();
941 #ifdef JS_GC_ZEAL
942 void computeNonIncrementalMarkingForValidation(AutoGCSession& session);
943 void validateIncrementalMarking();
944 void finishMarkingValidation();
945 #endif
947 #ifdef DEBUG
948 void checkForCompartmentMismatches();
949 #endif
951 void callFinalizeCallbacks(JS::GCContext* gcx, JSFinalizeStatus status) const;
952 void callWeakPointerZonesCallbacks(JSTracer* trc) const;
953 void callWeakPointerCompartmentCallbacks(JSTracer* trc,
954 JS::Compartment* comp) const;
955 void callDoCycleCollectionCallback(JSContext* cx);
957 public:
958 JSRuntime* const rt;
960 // Embedders can use this zone however they wish.
961 MainThreadData<JS::Zone*> systemZone;
963 MainThreadData<JS::GCContext> mainThreadContext;
965 private:
966 // For parent runtimes, a zone containing atoms that is shared by child
967 // runtimes.
968 MainThreadData<Zone*> sharedAtomsZone_;
970 // All zones in the runtime. The first element is always the atoms zone.
971 MainThreadOrGCTaskData<ZoneVector> zones_;
973 // Any activity affecting the heap.
974 MainThreadOrGCTaskData<JS::HeapState> heapState_;
975 friend class AutoHeapSession;
976 friend class JS::AutoEnterCycleCollection;
978 UnprotectedData<gcstats::Statistics> stats_;
980 public:
981 js::StringStats stringStats;
983 Vector<UniquePtr<GCMarker>, 1, SystemAllocPolicy> markers;
985 // Delayed marking support in case we OOM pushing work onto the mark stack.
986 MainThreadOrGCTaskData<js::gc::Arena*> delayedMarkingList;
987 MainThreadOrGCTaskData<bool> delayedMarkingWorkAdded;
988 #ifdef DEBUG
989 /* Count of arenas that are currently in the stack. */
990 MainThreadOrGCTaskData<size_t> markLaterArenas;
991 #endif
993 SweepingTracer sweepingTracer;
995 /* Track total GC heap size for this runtime. */
996 HeapSize heapSize;
998 /* GC scheduling state and parameters. */
999 GCSchedulingTunables tunables;
1000 GCSchedulingState schedulingState;
1001 MainThreadData<bool> fullGCRequested;
1003 // Helper thread configuration.
1004 MainThreadData<double> helperThreadRatio;
1005 MainThreadData<size_t> maxHelperThreads;
1006 MainThreadOrGCTaskData<size_t> helperThreadCount;
1007 MainThreadData<size_t> markingThreadCount;
1009 // State used for managing atom mark bitmaps in each zone.
1010 AtomMarkingRuntime atomMarking;
1013 * Pointer to a callback that, if set, will be used to create a
1014 * budget for internally-triggered GCs.
1016 MainThreadData<JS::CreateSliceBudgetCallback> createBudgetCallback;
1018 private:
1019 // Arenas used for permanent things created at startup and shared by child
1020 // runtimes.
1021 MainThreadData<ArenaList> permanentAtoms;
1022 MainThreadData<ArenaList> permanentFatInlineAtoms;
1023 MainThreadData<ArenaList> permanentWellKnownSymbols;
1025 // When chunks are empty, they reside in the emptyChunks pool and are
1026 // re-used as needed or eventually expired if not re-used. The emptyChunks
1027 // pool gets refilled from the background allocation task heuristically so
1028 // that empty chunks should always be available for immediate allocation
1029 // without syscalls.
1030 GCLockData<ChunkPool> emptyChunks_;
1032 // Chunks which have had some, but not all, of their arenas allocated live
1033 // in the available chunk lists. When all available arenas in a chunk have
1034 // been allocated, the chunk is removed from the available list and moved
1035 // to the fullChunks pool. During a GC, if all arenas are free, the chunk
1036 // is moved back to the emptyChunks pool and scheduled for eventual
1037 // release.
1038 GCLockData<ChunkPool> availableChunks_;
1040 // When all arenas in a chunk are used, it is moved to the fullChunks pool
1041 // so as to reduce the cost of operations on the available lists.
1042 GCLockData<ChunkPool> fullChunks_;
1045 * JSGC_MIN_EMPTY_CHUNK_COUNT
1046 * JSGC_MAX_EMPTY_CHUNK_COUNT
1048 * Controls the number of empty chunks reserved for future allocation.
1050 * They can be read off main thread by the background allocation task and the
1051 * background decommit task.
1053 GCLockData<uint32_t> minEmptyChunkCount_;
1054 GCLockData<uint32_t> maxEmptyChunkCount_;
1056 MainThreadData<RootedValueMap> rootsHash;
1058 // An incrementing id used to assign unique ids to cells that require one.
1059 MainThreadData<uint64_t> nextCellUniqueId_;
1062 * Number of the committed arenas in all GC chunks including empty chunks.
1064 mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> numArenasFreeCommitted;
1065 MainThreadData<VerifyPreTracer*> verifyPreData;
1067 MainThreadData<mozilla::TimeStamp> lastGCStartTime_;
1068 MainThreadData<mozilla::TimeStamp> lastGCEndTime_;
1070 WriteOnceData<bool> initialized;
1071 MainThreadData<bool> incrementalGCEnabled;
1072 MainThreadData<bool> perZoneGCEnabled;
1074 mozilla::Atomic<size_t, mozilla::ReleaseAcquire> numActiveZoneIters;
1076 /* During shutdown, the GC needs to clean up every possible object. */
1077 MainThreadData<bool> cleanUpEverything;
1080 * The gray bits can become invalid if UnmarkGray overflows the stack. A
1081 * full GC will reset this bit, since it fills in all the gray bits.
1083 UnprotectedData<bool> grayBitsValid;
1085 mozilla::Atomic<JS::GCReason, mozilla::ReleaseAcquire> majorGCTriggerReason;
1087 /* Incremented at the start of every minor GC. */
1088 MainThreadData<uint64_t> minorGCNumber;
1090 /* Incremented at the start of every major GC. */
1091 MainThreadData<uint64_t> majorGCNumber;
1093 /* Incremented on every GC slice or minor collection. */
1094 MainThreadData<uint64_t> number;
1096 /* Incremented on every GC slice. */
1097 MainThreadData<uint64_t> sliceNumber;
1099 /* Whether the currently running GC can finish in multiple slices. */
1100 MainThreadOrGCTaskData<bool> isIncremental;
1102 /* Whether all zones are being collected in first GC slice. */
1103 MainThreadData<bool> isFull;
1105 /* Whether the heap will be compacted at the end of GC. */
1106 MainThreadData<bool> isCompacting;
1108 /* Whether to use parallel marking. */
1109 MainThreadData<ParallelMarking> useParallelMarking;
1111 /* The invocation kind of the current GC, set at the start of collection. */
1112 MainThreadOrGCTaskData<mozilla::Maybe<JS::GCOptions>> maybeGcOptions;
1114 /* The initial GC reason, taken from the first slice. */
1115 MainThreadData<JS::GCReason> initialReason;
1118 * The current incremental GC phase. This is also used internally in
1119 * non-incremental GC.
1121 MainThreadOrGCTaskData<State> incrementalState;
1123 /* The incremental state at the start of this slice. */
1124 MainThreadOrGCTaskData<State> initialState;
1126 /* Whether to pay attention the zeal settings in this incremental slice. */
1127 #ifdef JS_GC_ZEAL
1128 MainThreadData<bool> useZeal;
1129 #else
1130 const bool useZeal;
1131 #endif
1133 /* Indicates that the last incremental slice exhausted the mark stack. */
1134 MainThreadData<bool> lastMarkSlice;
1136 // Whether it's currently safe to yield to the mutator in an incremental GC.
1137 MainThreadData<bool> safeToYield;
1139 // Whether to do any marking caused by barriers on a background thread during
1140 // incremental sweeping, while also sweeping zones which have finished
1141 // marking.
1142 MainThreadData<bool> markOnBackgroundThreadDuringSweeping;
1144 // Whether any sweeping and decommitting will run on a separate GC helper
1145 // thread.
1146 MainThreadData<bool> useBackgroundThreads;
1148 // Whether we have already discarded JIT code for all collected zones in this
1149 // slice.
1150 MainThreadData<bool> haveDiscardedJITCodeThisSlice;
1152 #ifdef DEBUG
1153 /* Shutdown has started. Further collections must be shutdown collections. */
1154 MainThreadData<bool> hadShutdownGC;
1155 #endif
1157 /* Singly linked list of zones to be swept in the background. */
1158 HelperThreadLockData<ZoneList> backgroundSweepZones;
1161 * Whether to trigger a GC slice after a background task is complete, so that
1162 * the collector can continue or finsish collecting. This is only used for the
1163 * tasks that run concurrently with the mutator, which are background
1164 * finalization and background decommit.
1166 HelperThreadLockData<bool> requestSliceAfterBackgroundTask;
1169 * Free LIFO blocks are transferred to these allocators before being freed on
1170 * a background thread.
1172 HelperThreadLockData<LifoAlloc> lifoBlocksToFree;
1173 MainThreadData<LifoAlloc> lifoBlocksToFreeAfterMinorGC;
1174 HelperThreadLockData<Nursery::BufferSet> buffersToFreeAfterMinorGC;
1176 /* Index of current sweep group (for stats). */
1177 MainThreadData<unsigned> sweepGroupIndex;
1180 * Incremental sweep state.
1182 MainThreadData<JS::Zone*> sweepGroups;
1183 MainThreadOrGCTaskData<JS::Zone*> currentSweepGroup;
1184 MainThreadData<UniquePtr<SweepAction>> sweepActions;
1185 MainThreadOrGCTaskData<JS::Zone*> sweepZone;
1186 MainThreadOrGCTaskData<AllocKind> sweepAllocKind;
1187 MainThreadData<mozilla::Maybe<AtomsTable::SweepIterator>> maybeAtomsToSweep;
1188 MainThreadOrGCTaskData<mozilla::Maybe<WeakCacheSweepIterator>>
1189 weakCachesToSweep;
1190 MainThreadData<bool> abortSweepAfterCurrentGroup;
1191 MainThreadOrGCTaskData<IncrementalProgress> sweepMarkResult;
1193 #ifdef DEBUG
1195 * List of objects to mark at the beginning of a GC for testing purposes. May
1196 * also contain string directives to change mark color or wait until different
1197 * phases of the GC.
1199 * This is a WeakCache because not everything in this list is guaranteed to
1200 * end up marked (eg if you insert an object from an already-processed sweep
1201 * group in the middle of an incremental GC). Also, the mark queue is not
1202 * used during shutdown GCs. In either case, unmarked objects may need to be
1203 * discarded.
1205 JS::WeakCache<GCVector<HeapPtr<JS::Value>, 0, SystemAllocPolicy>>
1206 testMarkQueue;
1208 /* Position within the test mark queue. */
1209 size_t queuePos = 0;
1211 /* The test marking queue might want to be marking a particular color. */
1212 mozilla::Maybe<js::gc::MarkColor> queueMarkColor;
1214 // During gray marking, delay AssertCellIsNotGray checks by
1215 // recording the cell pointers here and checking after marking has
1216 // finished.
1217 MainThreadData<Vector<const Cell*, 0, SystemAllocPolicy>>
1218 cellsToAssertNotGray;
1219 friend void js::gc::detail::AssertCellIsNotGray(const Cell*);
1220 #endif
1222 friend class SweepGroupsIter;
1225 * Incremental compacting state.
1227 MainThreadData<bool> startedCompacting;
1228 MainThreadData<ZoneList> zonesToMaybeCompact;
1229 MainThreadData<size_t> zonesCompacted;
1230 #ifdef DEBUG
1231 GCLockData<Arena*> relocatedArenasToRelease;
1232 #endif
1234 #ifdef JS_GC_ZEAL
1235 MainThreadData<MarkingValidator*> markingValidator;
1236 #endif
1239 * Default budget for incremental GC slice. See js/SliceBudget.h.
1241 * JSGC_SLICE_TIME_BUDGET_MS
1242 * pref: javascript.options.mem.gc_incremental_slice_ms,
1244 MainThreadData<int64_t> defaultTimeBudgetMS_;
1247 * We disable incremental GC if we encounter a Class with a trace hook
1248 * that does not implement write barriers.
1250 MainThreadData<bool> incrementalAllowed;
1253 * Whether compacting GC can is enabled globally.
1255 * JSGC_COMPACTING_ENABLED
1256 * pref: javascript.options.mem.gc_compacting
1258 MainThreadData<bool> compactingEnabled;
1261 * Whether parallel marking is enabled globally.
1263 * JSGC_PARALLEL_MARKING_ENABLED
1264 * pref: javascript.options.mem.gc_parallel_marking
1266 MainThreadData<bool> parallelMarkingEnabled;
1268 MainThreadData<bool> rootsRemoved;
1271 * These options control the zealousness of the GC. At every allocation,
1272 * nextScheduled is decremented. When it reaches zero we do a full GC.
1274 * At this point, if zeal_ is one of the types that trigger periodic
1275 * collection, then nextScheduled is reset to the value of zealFrequency.
1276 * Otherwise, no additional GCs take place.
1278 * You can control these values in several ways:
1279 * - Set the JS_GC_ZEAL environment variable
1280 * - Call gczeal() or schedulegc() from inside shell-executed JS code
1281 * (see the help for details)
1283 * If gcZeal_ == 1 then we perform GCs in select places (during MaybeGC and
1284 * whenever we are notified that GC roots have been removed). This option is
1285 * mainly useful to embedders.
1287 * We use zeal_ == 4 to enable write barrier verification. See the comment
1288 * in gc/Verifier.cpp for more information about this.
1290 * zeal_ values from 8 to 10 periodically run different types of
1291 * incremental GC.
1293 * zeal_ value 14 performs periodic shrinking collections.
1295 #ifdef JS_GC_ZEAL
1296 static_assert(size_t(ZealMode::Count) <= 32,
1297 "Too many zeal modes to store in a uint32_t");
1298 MainThreadData<uint32_t> zealModeBits;
1299 MainThreadData<int> zealFrequency;
1300 MainThreadData<int> nextScheduled;
1301 MainThreadData<bool> deterministicOnly;
1302 MainThreadData<int> zealSliceBudget;
1303 MainThreadData<size_t> maybeMarkStackLimit;
1305 MainThreadData<PersistentRooted<GCVector<JSObject*, 0, SystemAllocPolicy>>>
1306 selectedForMarking;
1307 #endif
1309 MainThreadData<bool> fullCompartmentChecks;
1311 MainThreadData<uint32_t> gcCallbackDepth;
1313 MainThreadData<Callback<JSGCCallback>> gcCallback;
1314 MainThreadData<Callback<JS::DoCycleCollectionCallback>>
1315 gcDoCycleCollectionCallback;
1316 MainThreadData<Callback<JSObjectsTenuredCallback>> tenuredCallback;
1317 MainThreadData<CallbackVector<JSFinalizeCallback>> finalizeCallbacks;
1318 MainThreadOrGCTaskData<Callback<JSHostCleanupFinalizationRegistryCallback>>
1319 hostCleanupFinalizationRegistryCallback;
1320 MainThreadData<CallbackVector<JSWeakPointerZonesCallback>>
1321 updateWeakPointerZonesCallbacks;
1322 MainThreadData<CallbackVector<JSWeakPointerCompartmentCallback>>
1323 updateWeakPointerCompartmentCallbacks;
1324 MainThreadData<CallbackVector<JS::GCNurseryCollectionCallback>>
1325 nurseryCollectionCallbacks;
1328 * The trace operations to trace embedding-specific GC roots. One is for
1329 * tracing through black roots and the other is for tracing through gray
1330 * roots. The black/gray distinction is only relevant to the cycle
1331 * collector.
1333 MainThreadData<CallbackVector<JSTraceDataOp>> blackRootTracers;
1334 MainThreadOrGCTaskData<Callback<JSGrayRootsTracer>> grayRootTracer;
1336 /* Always preserve JIT code during GCs, for testing. */
1337 MainThreadData<bool> alwaysPreserveCode;
1339 /* Count of the number of zones that are currently in page load. */
1340 MainThreadData<size_t> inPageLoadCount;
1342 MainThreadData<bool> lowMemoryState;
1345 * General purpose GC lock, used for synchronising operations on
1346 * arenas and during parallel marking.
1348 friend class js::AutoLockGC;
1349 friend class js::AutoLockGCBgAlloc;
1350 js::Mutex lock MOZ_UNANNOTATED;
1352 /* Lock used to synchronise access to delayed marking state. */
1353 js::Mutex delayedMarkingLock MOZ_UNANNOTATED;
1355 friend class BackgroundSweepTask;
1356 friend class BackgroundFreeTask;
1358 BackgroundAllocTask allocTask;
1359 BackgroundUnmarkTask unmarkTask;
1360 BackgroundMarkTask markTask;
1361 BackgroundSweepTask sweepTask;
1362 BackgroundFreeTask freeTask;
1363 BackgroundDecommitTask decommitTask;
1366 * During incremental sweeping, this field temporarily holds the arenas of
1367 * the current AllocKind being swept in order of increasing free space.
1369 MainThreadData<SortedArenaList> incrementalSweepList;
1371 MainThreadData<Nursery> nursery_;
1373 // The store buffer used to track tenured to nursery edges for generational
1374 // GC. This is accessed off main thread when sweeping WeakCaches.
1375 MainThreadOrGCTaskData<gc::StoreBuffer> storeBuffer_;
1377 mozilla::TimeStamp lastLastDitchTime;
1379 // The last time per-zone allocation rates were updated.
1380 MainThreadData<mozilla::TimeStamp> lastAllocRateUpdateTime;
1382 // Total collector time since per-zone allocation rates were last updated.
1383 MainThreadData<mozilla::TimeDuration> collectorTimeSinceAllocRateUpdate;
1385 friend class MarkingValidator;
1386 friend class AutoEnterIteration;
1389 /* Prevent compartments and zones from being collected during iteration. */
1390 class MOZ_RAII AutoEnterIteration {
1391 GCRuntime* gc;
1393 public:
1394 explicit AutoEnterIteration(GCRuntime* gc_) : gc(gc_) {
1395 ++gc->numActiveZoneIters;
1398 ~AutoEnterIteration() {
1399 MOZ_ASSERT(gc->numActiveZoneIters);
1400 --gc->numActiveZoneIters;
1404 #ifdef JS_GC_ZEAL
1406 inline bool GCRuntime::hasZealMode(ZealMode mode) {
1407 static_assert(size_t(ZealMode::Limit) < sizeof(zealModeBits) * 8,
1408 "Zeal modes must fit in zealModeBits");
1409 return zealModeBits & (1 << uint32_t(mode));
1412 inline void GCRuntime::clearZealMode(ZealMode mode) {
1413 zealModeBits &= ~(1 << uint32_t(mode));
1414 MOZ_ASSERT(!hasZealMode(mode));
1417 inline bool GCRuntime::needZealousGC() {
1418 if (nextScheduled > 0 && --nextScheduled == 0) {
1419 if (hasZealMode(ZealMode::Alloc) || hasZealMode(ZealMode::GenerationalGC) ||
1420 hasZealMode(ZealMode::IncrementalMultipleSlices) ||
1421 hasZealMode(ZealMode::Compact) || hasIncrementalTwoSliceZealMode()) {
1422 nextScheduled = zealFrequency;
1424 return true;
1426 return false;
1429 inline bool GCRuntime::hasIncrementalTwoSliceZealMode() {
1430 return hasZealMode(ZealMode::YieldBeforeRootMarking) ||
1431 hasZealMode(ZealMode::YieldBeforeMarking) ||
1432 hasZealMode(ZealMode::YieldBeforeSweeping) ||
1433 hasZealMode(ZealMode::YieldBeforeSweepingAtoms) ||
1434 hasZealMode(ZealMode::YieldBeforeSweepingCaches) ||
1435 hasZealMode(ZealMode::YieldBeforeSweepingObjects) ||
1436 hasZealMode(ZealMode::YieldBeforeSweepingNonObjects) ||
1437 hasZealMode(ZealMode::YieldBeforeSweepingPropMapTrees) ||
1438 hasZealMode(ZealMode::YieldWhileGrayMarking);
1441 #else
1442 inline bool GCRuntime::hasZealMode(ZealMode mode) { return false; }
1443 inline void GCRuntime::clearZealMode(ZealMode mode) {}
1444 inline bool GCRuntime::needZealousGC() { return false; }
1445 inline bool GCRuntime::hasIncrementalTwoSliceZealMode() { return false; }
1446 #endif
1448 bool IsCurrentlyAnimating(const mozilla::TimeStamp& lastAnimationTime,
1449 const mozilla::TimeStamp& currentTime);
1451 } /* namespace gc */
1452 } /* namespace js */
1454 #endif