1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
8 * Implementation of GC sweeping.
10 * In the SpiderMonkey GC, 'sweeping' is used to mean two things:
11 * - updating data structures to remove pointers to dead GC things and updating
12 * pointers to moved GC things
13 * - finalizing dead GC things
15 * Furthermore, the GC carries out gray and weak marking after the start of the
16 * sweep phase. This is also implemented in this file.
19 #include "mozilla/Maybe.h"
20 #include "mozilla/ScopeExit.h"
21 #include "mozilla/TimeStamp.h"
23 #include "builtin/FinalizationRegistryObject.h"
24 #include "builtin/WeakRefObject.h"
25 #include "debugger/DebugAPI.h"
26 #include "gc/AllocKind.h"
27 #include "gc/FinalizationObservers.h"
28 #include "gc/GCInternals.h"
29 #include "gc/GCLock.h"
30 #include "gc/GCProbes.h"
31 #include "gc/GCRuntime.h"
32 #include "gc/ParallelWork.h"
33 #include "gc/Statistics.h"
34 #include "gc/TraceKind.h"
35 #include "gc/WeakMap.h"
37 #include "jit/JitFrames.h"
38 #include "jit/JitRuntime.h"
39 #include "jit/JitZone.h"
40 #include "proxy/DeadObjectProxy.h"
41 #include "vm/BigIntType.h"
42 #include "vm/HelperThreads.h"
43 #include "vm/JSContext.h"
45 #include "vm/WrapperObject.h"
47 #include "gc/PrivateIterators-inl.h"
48 #include "vm/GeckoProfiler-inl.h"
49 #include "vm/JSObject-inl.h"
50 #include "vm/PropMap-inl.h"
51 #include "vm/Shape-inl.h"
52 #include "vm/StringType-inl.h"
55 using namespace js::gc
;
57 using mozilla::TimeStamp
;
59 struct js::gc::FinalizePhase
{
60 gcstats::PhaseKind statsPhase
;
65 * Finalization order for objects swept incrementally on the main thread.
67 static constexpr FinalizePhase ForegroundObjectFinalizePhase
= {
68 gcstats::PhaseKind::FINALIZE_OBJECT
,
69 {AllocKind::OBJECT0
, AllocKind::OBJECT2
, AllocKind::OBJECT4
,
70 AllocKind::OBJECT8
, AllocKind::OBJECT12
, AllocKind::OBJECT16
}};
73 * Finalization order for GC things swept incrementally on the main thread.
75 static constexpr FinalizePhase ForegroundNonObjectFinalizePhase
= {
76 gcstats::PhaseKind::FINALIZE_NON_OBJECT
,
77 {AllocKind::SCRIPT
, AllocKind::JITCODE
}};
80 * Finalization order for GC things swept on the background thread.
82 static constexpr FinalizePhase BackgroundFinalizePhases
[] = {
83 {gcstats::PhaseKind::FINALIZE_OBJECT
,
84 {AllocKind::FUNCTION
, AllocKind::FUNCTION_EXTENDED
,
85 AllocKind::OBJECT0_BACKGROUND
, AllocKind::OBJECT2_BACKGROUND
,
86 AllocKind::ARRAYBUFFER4
, AllocKind::OBJECT4_BACKGROUND
,
87 AllocKind::ARRAYBUFFER8
, AllocKind::OBJECT8_BACKGROUND
,
88 AllocKind::ARRAYBUFFER12
, AllocKind::OBJECT12_BACKGROUND
,
89 AllocKind::ARRAYBUFFER16
, AllocKind::OBJECT16_BACKGROUND
}},
90 {gcstats::PhaseKind::FINALIZE_NON_OBJECT
,
91 {AllocKind::SCOPE
, AllocKind::REGEXP_SHARED
, AllocKind::FAT_INLINE_STRING
,
92 AllocKind::STRING
, AllocKind::EXTERNAL_STRING
, AllocKind::FAT_INLINE_ATOM
,
93 AllocKind::ATOM
, AllocKind::SYMBOL
, AllocKind::BIGINT
, AllocKind::SHAPE
,
94 AllocKind::BASE_SHAPE
, AllocKind::GETTER_SETTER
,
95 AllocKind::COMPACT_PROP_MAP
, AllocKind::NORMAL_PROP_MAP
,
96 AllocKind::DICT_PROP_MAP
}}};
99 inline size_t Arena::finalize(JS::GCContext
* gcx
, AllocKind thingKind
,
101 /* Enforce requirements on size of T. */
102 MOZ_ASSERT(thingSize
% CellAlignBytes
== 0);
103 MOZ_ASSERT(thingSize
>= MinCellSize
);
104 MOZ_ASSERT(thingSize
<= 255);
106 MOZ_ASSERT(allocated());
107 MOZ_ASSERT(thingKind
== getAllocKind());
108 MOZ_ASSERT(thingSize
== getThingSize());
109 MOZ_ASSERT(!onDelayedMarkingList_
);
111 uint_fast16_t firstThing
= firstThingOffset(thingKind
);
112 uint_fast16_t firstThingOrSuccessorOfLastMarkedThing
= firstThing
;
113 uint_fast16_t lastThing
= ArenaSize
- thingSize
;
115 FreeSpan newListHead
;
116 FreeSpan
* newListTail
= &newListHead
;
117 size_t nmarked
= 0, nfinalized
= 0;
119 for (ArenaCellIterUnderFinalize
cell(this); !cell
.done(); cell
.next()) {
121 if (TenuredThingIsMarkedAny(t
)) {
122 uint_fast16_t thing
= uintptr_t(t
) & ArenaMask
;
123 if (thing
!= firstThingOrSuccessorOfLastMarkedThing
) {
124 // We just finished passing over one or more free things,
125 // so record a new FreeSpan.
126 newListTail
->initBounds(firstThingOrSuccessorOfLastMarkedThing
,
127 thing
- thingSize
, this);
128 newListTail
= newListTail
->nextSpanUnchecked(this);
130 firstThingOrSuccessorOfLastMarkedThing
= thing
+ thingSize
;
134 AlwaysPoison(t
, JS_SWEPT_TENURED_PATTERN
, thingSize
,
135 MemCheckKind::MakeUndefined
);
136 gcprobes::TenuredFinalize(t
);
141 if constexpr (std::is_same_v
<T
, JSObject
>) {
142 if (isNewlyCreated_
) {
143 zone
->pretenuring
.updateCellCountsInNewlyCreatedArenas(
144 nmarked
+ nfinalized
, nmarked
);
149 if (thingKind
== AllocKind::STRING
||
150 thingKind
== AllocKind::FAT_INLINE_STRING
) {
151 zone
->markedStrings
+= nmarked
;
152 zone
->finalizedStrings
+= nfinalized
;
156 // Do nothing. The caller will update the arena appropriately.
157 MOZ_ASSERT(newListTail
== &newListHead
);
158 DebugOnlyPoison(data
, JS_SWEPT_TENURED_PATTERN
, sizeof(data
),
159 MemCheckKind::MakeUndefined
);
163 MOZ_ASSERT(firstThingOrSuccessorOfLastMarkedThing
!= firstThing
);
164 uint_fast16_t lastMarkedThing
=
165 firstThingOrSuccessorOfLastMarkedThing
- thingSize
;
166 if (lastThing
== lastMarkedThing
) {
167 // If the last thing was marked, we will have already set the bounds of
168 // the final span, and we just need to terminate the list.
169 newListTail
->initAsEmpty();
171 // Otherwise, end the list with a span that covers the final stretch of free
173 newListTail
->initFinal(firstThingOrSuccessorOfLastMarkedThing
, lastThing
,
177 firstFreeSpan
= newListHead
;
179 size_t nfree
= numFreeThings(thingSize
);
180 MOZ_ASSERT(nfree
+ nmarked
== thingsPerArena(thingKind
));
185 // Finalize arenas from src list, releasing empty arenas if keepArenas wasn't
186 // specified and inserting the others into the appropriate destination size
188 template <typename T
>
189 static inline bool FinalizeTypedArenas(JS::GCContext
* gcx
, ArenaList
& src
,
190 SortedArenaList
& dest
,
192 SliceBudget
& budget
) {
193 MOZ_ASSERT(gcx
->isFinalizing());
195 size_t thingSize
= Arena::thingSize(thingKind
);
196 size_t thingsPerArena
= Arena::thingsPerArena(thingKind
);
197 size_t markCount
= 0;
199 auto updateMarkCount
= mozilla::MakeScopeExit([&] {
200 GCRuntime
* gc
= &gcx
->runtimeFromAnyThread()->gc
;
201 gc
->stats().addCount(gcstats::COUNT_CELLS_MARKED
, markCount
);
204 while (Arena
* arena
= src
.takeFirstArena()) {
205 size_t nmarked
= arena
->finalize
<T
>(gcx
, thingKind
, thingSize
);
206 size_t nfree
= thingsPerArena
- nmarked
;
208 markCount
+= nmarked
;
211 dest
.insertAt(arena
, nfree
);
213 arena
->chunk()->recycleArena(arena
, dest
, thingsPerArena
);
216 budget
.step(thingsPerArena
);
217 if (budget
.isOverBudget()) {
226 * Finalize the list of areans.
228 static bool FinalizeArenas(JS::GCContext
* gcx
, ArenaList
& src
,
229 SortedArenaList
& dest
, AllocKind thingKind
,
230 SliceBudget
& budget
) {
232 #define EXPAND_CASE(allocKind, traceKind, type, sizedType, bgFinal, nursery, \
234 case AllocKind::allocKind: \
235 return FinalizeTypedArenas<type>(gcx, src, dest, thingKind, budget);
236 FOR_EACH_ALLOCKIND(EXPAND_CASE
)
240 MOZ_CRASH("Invalid alloc kind");
244 void GCRuntime::initBackgroundSweep(Zone
* zone
, JS::GCContext
* gcx
,
245 const FinalizePhase
& phase
) {
246 gcstats::AutoPhase
ap(stats(), phase
.statsPhase
);
247 for (auto kind
: phase
.kinds
) {
248 zone
->arenas
.initBackgroundSweep(kind
);
252 void ArenaLists::initBackgroundSweep(AllocKind thingKind
) {
253 MOZ_ASSERT(IsBackgroundFinalized(thingKind
));
254 MOZ_ASSERT(concurrentUse(thingKind
) == ConcurrentUse::None
);
256 if (!collectingArenaList(thingKind
).isEmpty()) {
257 concurrentUse(thingKind
) = ConcurrentUse::BackgroundFinalize
;
261 void GCRuntime::backgroundFinalize(JS::GCContext
* gcx
, Zone
* zone
,
262 AllocKind kind
, Arena
** empty
) {
265 ArenaLists
* lists
= &zone
->arenas
;
266 ArenaList
& arenas
= lists
->collectingArenaList(kind
);
267 if (arenas
.isEmpty()) {
268 MOZ_ASSERT(lists
->concurrentUse(kind
) == ArenaLists::ConcurrentUse::None
);
272 SortedArenaList
finalizedSorted(Arena::thingsPerArena(kind
));
274 auto unlimited
= SliceBudget::unlimited();
275 FinalizeArenas(gcx
, arenas
, finalizedSorted
, kind
, unlimited
);
276 MOZ_ASSERT(arenas
.isEmpty());
278 finalizedSorted
.extractEmpty(empty
);
280 // When marking begins, all arenas are moved from arenaLists to
281 // collectingArenaLists. When the mutator runs, new arenas are allocated in
282 // arenaLists. Now that finalization is complete, we want to merge these lists
285 // We must take the GC lock to be able to safely modify the ArenaList;
286 // however, this does not by itself make the changes visible to all threads,
287 // as not all threads take the GC lock to read the ArenaLists.
288 // That safety is provided by the ReleaseAcquire memory ordering of the
289 // background finalize state, which we explicitly set as the final step.
292 MOZ_ASSERT(lists
->concurrentUse(kind
) ==
293 ArenaLists::ConcurrentUse::BackgroundFinalize
);
294 lists
->mergeFinalizedArenas(kind
, finalizedSorted
);
297 lists
->concurrentUse(kind
) = ArenaLists::ConcurrentUse::None
;
300 // After finalizing arenas, merge the following to get the final state of an
302 // - arenas allocated during marking
303 // - arenas allocated during sweeping
304 // - finalized arenas
305 void ArenaLists::mergeFinalizedArenas(AllocKind kind
,
306 SortedArenaList
& finalizedArenas
) {
308 // Updating arena lists off-thread requires taking the GC lock because the
309 // main thread uses these when allocating.
310 if (IsBackgroundFinalized(kind
)) {
311 runtimeFromAnyThread()->gc
.assertCurrentThreadHasLockedGC();
315 ArenaList
& arenas
= arenaList(kind
);
317 ArenaList allocatedDuringCollection
= std::move(arenas
);
318 arenas
= finalizedArenas
.toArenaList();
319 arenas
.insertListWithCursorAtEnd(allocatedDuringCollection
);
321 collectingArenaList(kind
).clear();
324 void ArenaLists::queueForegroundThingsForSweep() {
325 gcCompactPropMapArenasToUpdate
=
326 collectingArenaList(AllocKind::COMPACT_PROP_MAP
).head();
327 gcNormalPropMapArenasToUpdate
=
328 collectingArenaList(AllocKind::NORMAL_PROP_MAP
).head();
331 void GCRuntime::sweepBackgroundThings(ZoneList
& zones
) {
332 if (zones
.isEmpty()) {
336 JS::GCContext
* gcx
= TlsGCContext
.get();
337 MOZ_ASSERT(gcx
->isFinalizing());
339 // Sweep zones in order. The atoms zone must be finalized last as other
340 // zones may have direct pointers into it.
341 while (!zones
.isEmpty()) {
342 Zone
* zone
= zones
.removeFront();
343 MOZ_ASSERT(zone
->isGCFinished());
345 TimeStamp startTime
= TimeStamp::Now();
347 Arena
* emptyArenas
= zone
->arenas
.takeSweptEmptyArenas();
349 // We must finalize thing kinds in the order specified by
350 // BackgroundFinalizePhases.
351 for (const auto& phase
: BackgroundFinalizePhases
) {
352 for (auto kind
: phase
.kinds
) {
353 backgroundFinalize(gcx
, zone
, kind
, &emptyArenas
);
357 // Release any arenas that are now empty.
359 // Empty arenas are only released after everything has been finalized so
360 // that it's still possible to get a thing's zone after the thing has been
361 // finalized. The HeapPtr destructor depends on this, and this allows
362 // HeapPtrs between things of different alloc kind regardless of
363 // finalization order.
365 // Periodically drop and reaquire the GC lock every so often to avoid
366 // blocking the main thread from allocating chunks.
367 static const size_t LockReleasePeriod
= 32;
369 while (emptyArenas
) {
370 AutoLockGC
lock(this);
371 for (size_t i
= 0; i
< LockReleasePeriod
&& emptyArenas
; i
++) {
372 Arena
* arena
= emptyArenas
;
373 emptyArenas
= emptyArenas
->next
;
374 releaseArena(arena
, lock
);
378 // Record time spent sweeping this zone.
379 TimeStamp endTime
= TimeStamp::Now();
380 zone
->perZoneGCTime
+= endTime
- startTime
;
384 void GCRuntime::assertBackgroundSweepingFinished() {
387 AutoLockHelperThreadState lock
;
388 MOZ_ASSERT(backgroundSweepZones
.ref().isEmpty());
391 for (ZonesIter
zone(this, WithAtoms
); !zone
.done(); zone
.next()) {
392 for (auto kind
: AllAllocKinds()) {
393 MOZ_ASSERT_IF(state() != State::Prepare
&& state() != State::Mark
&&
394 state() != State::Sweep
,
395 zone
->arenas
.collectingArenaList(kind
).isEmpty());
396 MOZ_ASSERT(zone
->arenas
.doneBackgroundFinalize(kind
));
402 void GCRuntime::queueZonesAndStartBackgroundSweep(ZoneList
&& zones
) {
404 AutoLockHelperThreadState lock
;
405 MOZ_ASSERT(!requestSliceAfterBackgroundTask
);
406 backgroundSweepZones
.ref().appendList(std::move(zones
));
407 if (useBackgroundThreads
) {
408 sweepTask
.startOrRunIfIdle(lock
);
411 if (!useBackgroundThreads
) {
413 sweepTask
.runFromMainThread();
417 BackgroundSweepTask::BackgroundSweepTask(GCRuntime
* gc
)
418 : GCParallelTask(gc
, gcstats::PhaseKind::SWEEP
, GCUse::Finalizing
) {}
420 void BackgroundSweepTask::run(AutoLockHelperThreadState
& lock
) {
421 gc
->sweepFromBackgroundThread(lock
);
424 void GCRuntime::sweepFromBackgroundThread(AutoLockHelperThreadState
& lock
) {
427 zones
.appendList(std::move(backgroundSweepZones
.ref()));
429 AutoUnlockHelperThreadState
unlock(lock
);
430 sweepBackgroundThings(zones
);
432 // The main thread may call queueZonesAndStartBackgroundSweep() while this
433 // is running so we must check there is no more work after releasing the
435 } while (!backgroundSweepZones
.ref().isEmpty());
437 maybeRequestGCAfterBackgroundTask(lock
);
440 void GCRuntime::waitBackgroundSweepEnd() {
442 if (state() != State::Sweep
) {
443 assertBackgroundSweepingFinished();
447 void GCRuntime::startBackgroundFree() {
448 AutoLockHelperThreadState lock
;
449 freeTask
.startOrRunIfIdle(lock
);
452 BackgroundFreeTask::BackgroundFreeTask(GCRuntime
* gc
)
453 : GCParallelTask(gc
, gcstats::PhaseKind::NONE
) {
454 // This can occur outside GCs so doesn't have a stats phase.
457 void BackgroundFreeTask::run(AutoLockHelperThreadState
& lock
) {
458 gc
->freeFromBackgroundThread(lock
);
461 void GCRuntime::freeFromBackgroundThread(AutoLockHelperThreadState
& lock
) {
463 LifoAlloc
lifoBlocks(JSContext::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE
);
464 lifoBlocks
.transferFrom(&lifoBlocksToFree
.ref());
466 Nursery::BufferSet buffers
;
467 std::swap(buffers
, buffersToFreeAfterMinorGC
.ref());
469 AutoUnlockHelperThreadState
unlock(lock
);
471 lifoBlocks
.freeAll();
473 JS::GCContext
* gcx
= TlsGCContext
.get();
474 for (Nursery::BufferSet::Range r
= buffers
.all(); !r
.empty();
476 // Malloc memory associated with nursery objects is not tracked as these
477 // are assumed to be short lived.
478 gcx
->freeUntracked(r
.front());
480 } while (!lifoBlocksToFree
.ref().isEmpty() ||
481 !buffersToFreeAfterMinorGC
.ref().empty());
484 void GCRuntime::waitBackgroundFreeEnd() { freeTask
.join(); }
486 template <class ZoneIterT
>
487 IncrementalProgress
GCRuntime::markWeakReferences(
488 SliceBudget
& incrementalBudget
) {
489 MOZ_ASSERT(!marker().isWeakMarking());
491 gcstats::AutoPhase
ap1(stats(), gcstats::PhaseKind::MARK_WEAK
);
493 auto unlimited
= SliceBudget::unlimited();
494 SliceBudget
& budget
=
495 marker().incrementalWeakMapMarkingEnabled
? incrementalBudget
: unlimited
;
497 // Ensure we don't return to the mutator while we're still in weak marking
500 mozilla::MakeScopeExit([&] { marker().leaveWeakMarkingMode(); });
502 if (marker().enterWeakMarkingMode()) {
503 // If there was an 'enter-weak-marking-mode' token in the queue, then it
504 // and everything after it will still be in the queue so we can process
506 while (processTestMarkQueue() == QueueYielded
) {
509 // Do not rely on the information about not-yet-marked weak keys that have
510 // been collected by barriers. Clear out the gcEphemeronEdges entries and
511 // rebuild the full table. Note that this a cross-zone operation; delegate
512 // zone entries will be populated by map zone traversals, so everything
513 // needs to be cleared first, then populated.
514 if (!marker().incrementalWeakMapMarkingEnabled
) {
515 for (ZoneIterT
zone(this); !zone
.done(); zone
.next()) {
516 AutoEnterOOMUnsafeRegion oomUnsafe
;
517 if (!zone
->gcEphemeronEdges().clear()) {
518 oomUnsafe
.crash("clearing weak keys when entering weak marking mode");
523 for (ZoneIterT
zone(this); !zone
.done(); zone
.next()) {
524 if (zone
->enterWeakMarkingMode(&marker(), budget
) == NotFinished
) {
530 bool markedAny
= true;
532 if (!marker().markUntilBudgetExhausted(budget
)) {
533 MOZ_ASSERT(marker().incrementalWeakMapMarkingEnabled
);
539 if (!marker().isWeakMarking()) {
540 for (ZoneIterT
zone(this); !zone
.done(); zone
.next()) {
541 markedAny
|= WeakMapBase::markZoneIteratively(zone
, &marker());
545 markedAny
|= jit::JitRuntime::MarkJitcodeGlobalTableIteratively(&marker());
548 assertNoMarkingWork();
553 IncrementalProgress
GCRuntime::markWeakReferencesInCurrentGroup(
554 SliceBudget
& budget
) {
555 return markWeakReferences
<SweepGroupZonesIter
>(budget
);
558 template <class ZoneIterT
>
559 IncrementalProgress
GCRuntime::markGrayRoots(SliceBudget
& budget
,
560 gcstats::PhaseKind phase
) {
561 MOZ_ASSERT(marker().markColor() == MarkColor::Gray
);
563 gcstats::AutoPhase
ap(stats(), phase
);
565 AutoUpdateLiveCompartments
updateLive(this);
566 marker().setRootMarkingMode(true);
568 mozilla::MakeScopeExit([this]() { marker().setRootMarkingMode(false); });
570 IncrementalProgress result
=
571 traceEmbeddingGrayRoots(marker().tracer(), budget
);
572 if (result
== NotFinished
) {
576 Compartment::traceIncomingCrossCompartmentEdgesForZoneGC(
577 marker().tracer(), Compartment::GrayEdges
);
582 IncrementalProgress
GCRuntime::markAllWeakReferences() {
583 SliceBudget budget
= SliceBudget::unlimited();
584 return markWeakReferences
<GCZonesIter
>(budget
);
587 void GCRuntime::markAllGrayReferences(gcstats::PhaseKind phase
) {
588 SliceBudget budget
= SliceBudget::unlimited();
589 markGrayRoots
<GCZonesIter
>(budget
, phase
);
593 void GCRuntime::dropStringWrappers() {
595 * String "wrappers" are dropped on GC because their presence would require
596 * us to sweep the wrappers in all compartments every time we sweep a
599 for (ZonesIter
zone(this, WithAtoms
); !zone
.done(); zone
.next()) {
600 zone
->dropStringWrappersOnGC();
605 * Group zones that must be swept at the same time.
607 * From the point of view of the mutator, groups of zones transition atomically
608 * from marking to sweeping. If compartment A has an edge to an unmarked object
609 * in compartment B, then we must not start sweeping A in a later slice than we
610 * start sweeping B. That's because a write barrier in A could lead to the
611 * unmarked object in B becoming marked. However, if we had already swept that
612 * object, we would be in trouble.
614 * If we consider these dependencies as a graph, then all the compartments in
615 * any strongly-connected component of this graph must start sweeping in the
618 * Tarjan's algorithm is used to calculate the components.
621 bool Compartment::findSweepGroupEdges() {
622 Zone
* source
= zone();
623 for (WrappedObjectCompartmentEnum
e(this); !e
.empty(); e
.popFront()) {
624 Compartment
* targetComp
= e
.front();
625 Zone
* target
= targetComp
->zone();
627 if (!target
->isGCMarking() || source
->hasSweepGroupEdgeTo(target
)) {
631 for (ObjectWrapperEnum
e(this, targetComp
); !e
.empty(); e
.popFront()) {
632 JSObject
* key
= e
.front().mutableKey();
633 MOZ_ASSERT(key
->zone() == target
);
635 // Add an edge to the wrapped object's zone to ensure that the wrapper
636 // zone is not still being marked when we start sweeping the wrapped zone.
637 // As an optimization, if the wrapped object is already marked black there
638 // is no danger of later marking and we can skip this.
639 if (key
->isMarkedBlack()) {
643 if (!source
->addSweepGroupEdgeTo(target
)) {
647 // We don't need to consider any more wrappers for this target
648 // compartment since we already added an edge.
656 bool Zone::findSweepGroupEdges(Zone
* atomsZone
) {
657 MOZ_ASSERT_IF(this != atomsZone
, !isAtomsZone());
660 if (FinalizationObservers
* observers
= finalizationObservers()) {
661 observers
->checkTables();
665 // Any zone may have a pointer to an atom in the atoms zone, and these aren't
666 // in the cross compartment map.
667 if (atomsZone
->wasGCStarted() && !addSweepGroupEdgeTo(atomsZone
)) {
671 for (CompartmentsInZoneIter
comp(this); !comp
.done(); comp
.next()) {
672 if (!comp
->findSweepGroupEdges()) {
677 return WeakMapBase::findSweepGroupEdgesForZone(this);
680 bool GCRuntime::addEdgesForMarkQueue() {
684 // Add edges between all objects mentioned in the test mark queue, since
685 // otherwise they will get marked in a different order than their sweep
686 // groups. Note that this is only done at the beginning of an incremental
687 // collection, so it is possible for objects to be added later that do not
688 // follow the sweep group ordering. These objects will wait until their sweep
689 // group comes up, or will be skipped if their sweep group is already past.
690 JS::Zone
* prevZone
= nullptr;
691 for (Value val
: testMarkQueue
) {
692 if (!val
.isObject()) {
695 JSObject
* obj
= &val
.toObject();
696 JS::Zone
* zone
= obj
->zone();
697 if (!zone
->isGCMarking()) {
700 if (prevZone
&& prevZone
!= zone
) {
701 if (!prevZone
->addSweepGroupEdgeTo(zone
)) {
711 bool GCRuntime::findSweepGroupEdges() {
712 for (GCZonesIter
zone(this); !zone
.done(); zone
.next()) {
713 if (!zone
->findSweepGroupEdges(atomsZone())) {
718 if (!addEdgesForMarkQueue()) {
722 return DebugAPI::findSweepGroupEdges(rt
);
725 void GCRuntime::groupZonesForSweeping(JS::GCReason reason
) {
727 for (ZonesIter
zone(this, WithAtoms
); !zone
.done(); zone
.next()) {
728 MOZ_ASSERT(zone
->gcSweepGroupEdges().empty());
732 JSContext
* cx
= rt
->mainContextFromOwnThread();
733 ZoneComponentFinder
finder(cx
);
734 if (!isIncremental
|| !findSweepGroupEdges()) {
735 finder
.useOneComponent();
738 // Use one component for two-slice zeal modes.
739 if (useZeal
&& hasIncrementalTwoSliceZealMode()) {
740 finder
.useOneComponent();
743 for (GCZonesIter
zone(this); !zone
.done(); zone
.next()) {
744 MOZ_ASSERT(zone
->isGCMarking());
745 finder
.addNode(zone
);
747 sweepGroups
= finder
.getResultsList();
748 currentSweepGroup
= sweepGroups
;
751 for (GCZonesIter
zone(this); !zone
.done(); zone
.next()) {
752 zone
->clearSweepGroupEdges();
756 unsigned idx
= sweepGroupIndex
;
757 for (Zone
* head
= currentSweepGroup
; head
; head
= head
->nextGroup()) {
758 for (Zone
* zone
= head
; zone
; zone
= zone
->nextNodeInGroup()) {
759 MOZ_ASSERT(zone
->isGCMarking());
760 zone
->gcSweepGroupIndex
= idx
;
765 MOZ_ASSERT_IF(!isIncremental
, !currentSweepGroup
->nextGroup());
766 for (ZonesIter
zone(this, WithAtoms
); !zone
.done(); zone
.next()) {
767 MOZ_ASSERT(zone
->gcSweepGroupEdges().empty());
772 void GCRuntime::getNextSweepGroup() {
773 currentSweepGroup
= currentSweepGroup
->nextGroup();
775 if (!currentSweepGroup
) {
776 abortSweepAfterCurrentGroup
= false;
780 MOZ_ASSERT_IF(abortSweepAfterCurrentGroup
, !isIncremental
);
781 if (!isIncremental
) {
782 ZoneComponentFinder::mergeGroups(currentSweepGroup
);
785 for (Zone
* zone
= currentSweepGroup
; zone
; zone
= zone
->nextNodeInGroup()) {
786 MOZ_ASSERT(zone
->gcState() == zone
->initialMarkingState());
787 MOZ_ASSERT(!zone
->isQueuedForBackgroundSweep());
790 if (abortSweepAfterCurrentGroup
) {
793 // Abort collection of subsequent sweep groups.
794 for (SweepGroupZonesIter
zone(this); !zone
.done(); zone
.next()) {
795 MOZ_ASSERT(!zone
->gcNextGraphComponent
);
796 zone
->changeGCState(zone
->initialMarkingState(), Zone::NoGC
);
797 zone
->arenas
.unmarkPreMarkedFreeCells();
798 zone
->arenas
.mergeArenasFromCollectingLists();
799 zone
->clearGCSliceThresholds();
802 for (SweepGroupCompartmentsIter
comp(rt
); !comp
.done(); comp
.next()) {
806 abortSweepAfterCurrentGroup
= false;
807 currentSweepGroup
= nullptr;
814 * At the end of collection, anything reachable from a gray root that has not
815 * otherwise been marked black must be marked gray.
817 * This means that when marking things gray we must not allow marking to leave
818 * the current compartment group, as that could result in things being marked
819 * gray when they might subsequently be marked black. To achieve this, when we
820 * find a cross compartment pointer we don't mark the referent but add it to a
821 * singly-linked list of incoming gray pointers that is stored with each
824 * The list head is stored in Compartment::gcIncomingGrayPointers and contains
825 * cross compartment wrapper objects. The next pointer is stored in the second
826 * extra slot of the cross compartment wrapper.
828 * The list is created during gray marking when one of the
829 * MarkCrossCompartmentXXX functions is called for a pointer that leaves the
830 * current compartent group. This calls DelayCrossCompartmentGrayMarking to
831 * push the referring object onto the list.
833 * The list is traversed and then unlinked in
834 * GCRuntime::markIncomingGrayCrossCompartmentPointers.
837 static bool IsGrayListObject(JSObject
* obj
) {
839 return obj
->is
<CrossCompartmentWrapperObject
>() && !IsDeadProxyObject(obj
);
843 unsigned ProxyObject::grayLinkReservedSlot(JSObject
* obj
) {
844 MOZ_ASSERT(IsGrayListObject(obj
));
845 return CrossCompartmentWrapperObject::GrayLinkReservedSlot
;
849 static void AssertNotOnGrayList(JSObject
* obj
) {
851 IsGrayListObject(obj
),
852 GetProxyReservedSlot(obj
, ProxyObject::grayLinkReservedSlot(obj
))
857 static void AssertNoWrappersInGrayList(JSRuntime
* rt
) {
859 for (CompartmentsIter
c(rt
); !c
.done(); c
.next()) {
860 MOZ_ASSERT(!c
->gcIncomingGrayPointers
);
861 for (Compartment::ObjectWrapperEnum
e(c
); !e
.empty(); e
.popFront()) {
862 AssertNotOnGrayList(e
.front().value().unbarrieredGet());
868 static JSObject
* CrossCompartmentPointerReferent(JSObject
* obj
) {
869 MOZ_ASSERT(IsGrayListObject(obj
));
870 return &obj
->as
<ProxyObject
>().private_().toObject();
873 static JSObject
* NextIncomingCrossCompartmentPointer(JSObject
* prev
,
875 unsigned slot
= ProxyObject::grayLinkReservedSlot(prev
);
876 JSObject
* next
= GetProxyReservedSlot(prev
, slot
).toObjectOrNull();
877 MOZ_ASSERT_IF(next
, IsGrayListObject(next
));
880 SetProxyReservedSlot(prev
, slot
, UndefinedValue());
886 void js::gc::DelayCrossCompartmentGrayMarking(GCMarker
* maybeMarker
,
888 MOZ_ASSERT_IF(!maybeMarker
, !JS::RuntimeHeapIsBusy());
889 MOZ_ASSERT(IsGrayListObject(src
));
890 MOZ_ASSERT(src
->isMarkedGray());
892 AutoTouchingGrayThings tgt
;
894 mozilla::Maybe
<AutoLockGC
> lock
;
895 if (maybeMarker
&& maybeMarker
->isParallelMarking()) {
896 // Synchronize access to JSCompartment::gcIncomingGrayPointers.
898 // TODO: Instead of building this list we could scan all incoming CCWs and
899 // mark through gray ones when marking gray roots for a sweep group.
900 lock
.emplace(maybeMarker
->runtime());
903 /* Called from MarkCrossCompartmentXXX functions. */
904 unsigned slot
= ProxyObject::grayLinkReservedSlot(src
);
905 JSObject
* dest
= CrossCompartmentPointerReferent(src
);
906 Compartment
* comp
= dest
->compartment();
908 if (GetProxyReservedSlot(src
, slot
).isUndefined()) {
909 SetProxyReservedSlot(src
, slot
,
910 ObjectOrNullValue(comp
->gcIncomingGrayPointers
));
911 comp
->gcIncomingGrayPointers
= src
;
913 MOZ_ASSERT(GetProxyReservedSlot(src
, slot
).isObjectOrNull());
918 * Assert that the object is in our list, also walking the list to check its
921 JSObject
* obj
= comp
->gcIncomingGrayPointers
;
927 obj
= NextIncomingCrossCompartmentPointer(obj
, false);
933 void GCRuntime::markIncomingGrayCrossCompartmentPointers() {
934 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::MARK_INCOMING_GRAY
);
936 for (SweepGroupCompartmentsIter
c(rt
); !c
.done(); c
.next()) {
937 MOZ_ASSERT(c
->zone()->isGCMarkingBlackAndGray());
938 MOZ_ASSERT_IF(c
->gcIncomingGrayPointers
,
939 IsGrayListObject(c
->gcIncomingGrayPointers
));
941 for (JSObject
* src
= c
->gcIncomingGrayPointers
; src
;
942 src
= NextIncomingCrossCompartmentPointer(src
, true)) {
943 JSObject
* dst
= CrossCompartmentPointerReferent(src
);
944 MOZ_ASSERT(dst
->compartment() == c
);
945 MOZ_ASSERT_IF(src
->asTenured().isMarkedBlack(),
946 dst
->asTenured().isMarkedBlack());
948 if (src
->asTenured().isMarkedGray()) {
949 TraceManuallyBarrieredEdge(marker().tracer(), &dst
,
950 "cross-compartment gray pointer");
954 c
->gcIncomingGrayPointers
= nullptr;
958 static bool RemoveFromGrayList(JSObject
* wrapper
) {
959 AutoTouchingGrayThings tgt
;
961 if (!IsGrayListObject(wrapper
)) {
965 unsigned slot
= ProxyObject::grayLinkReservedSlot(wrapper
);
966 if (GetProxyReservedSlot(wrapper
, slot
).isUndefined()) {
967 return false; /* Not on our list. */
970 JSObject
* tail
= GetProxyReservedSlot(wrapper
, slot
).toObjectOrNull();
971 SetProxyReservedSlot(wrapper
, slot
, UndefinedValue());
973 Compartment
* comp
= CrossCompartmentPointerReferent(wrapper
)->compartment();
974 JSObject
* obj
= comp
->gcIncomingGrayPointers
;
975 if (obj
== wrapper
) {
976 comp
->gcIncomingGrayPointers
= tail
;
981 unsigned slot
= ProxyObject::grayLinkReservedSlot(obj
);
982 JSObject
* next
= GetProxyReservedSlot(obj
, slot
).toObjectOrNull();
983 if (next
== wrapper
) {
984 js::detail::SetProxyReservedSlotUnchecked(obj
, slot
,
985 ObjectOrNullValue(tail
));
991 MOZ_CRASH("object not found in gray link list");
994 void GCRuntime::resetGrayList(Compartment
* comp
) {
995 JSObject
* src
= comp
->gcIncomingGrayPointers
;
997 src
= NextIncomingCrossCompartmentPointer(src
, true);
999 comp
->gcIncomingGrayPointers
= nullptr;
1003 static bool HasIncomingCrossCompartmentPointers(JSRuntime
* rt
) {
1004 for (SweepGroupCompartmentsIter
c(rt
); !c
.done(); c
.next()) {
1005 if (c
->gcIncomingGrayPointers
) {
1014 void js::NotifyGCNukeWrapper(JSContext
* cx
, JSObject
* wrapper
) {
1015 MOZ_ASSERT(IsCrossCompartmentWrapper(wrapper
));
1018 * References to target of wrapper are being removed, we no longer have to
1019 * remember to mark it.
1021 RemoveFromGrayList(wrapper
);
1024 * Clean up WeakRef maps which might include this wrapper.
1026 JSObject
* target
= UncheckedUnwrapWithoutExpose(wrapper
);
1027 if (target
->is
<WeakRefObject
>()) {
1028 WeakRefObject
* weakRef
= &target
->as
<WeakRefObject
>();
1029 if (weakRef
->target()) {
1030 cx
->runtime()->gc
.nukeWeakRefWrapper(wrapper
, weakRef
);
1035 * Clean up FinalizationRecord record objects which might be the target of
1038 if (target
->is
<FinalizationRecordObject
>()) {
1039 auto* record
= &target
->as
<FinalizationRecordObject
>();
1040 cx
->runtime()->gc
.nukeFinalizationRecordWrapper(wrapper
, record
);
1045 JS_GC_SWAP_OBJECT_A_REMOVED
= 1 << 0,
1046 JS_GC_SWAP_OBJECT_B_REMOVED
= 1 << 1
1049 unsigned js::NotifyGCPreSwap(JSObject
* a
, JSObject
* b
) {
1051 * Two objects in the same compartment are about to have had their contents
1052 * swapped. If either of them are in our gray pointer list, then we remove
1053 * them from the lists, returning a bitset indicating what happened.
1055 return (RemoveFromGrayList(a
) ? JS_GC_SWAP_OBJECT_A_REMOVED
: 0) |
1056 (RemoveFromGrayList(b
) ? JS_GC_SWAP_OBJECT_B_REMOVED
: 0);
1059 void js::NotifyGCPostSwap(JSObject
* a
, JSObject
* b
, unsigned removedFlags
) {
1061 * Two objects in the same compartment have had their contents swapped. If
1062 * either of them were in our gray pointer list, we re-add them again.
1064 if (removedFlags
& JS_GC_SWAP_OBJECT_A_REMOVED
) {
1065 DelayCrossCompartmentGrayMarking(nullptr, b
);
1067 if (removedFlags
& JS_GC_SWAP_OBJECT_B_REMOVED
) {
1068 DelayCrossCompartmentGrayMarking(nullptr, a
);
1072 static inline void MaybeCheckWeakMapMarking(GCRuntime
* gc
) {
1073 #if defined(JS_GC_ZEAL) || defined(DEBUG)
1079 shouldCheck
= gc
->hasZealMode(ZealMode::CheckWeakMapMarking
);
1083 for (SweepGroupZonesIter
zone(gc
); !zone
.done(); zone
.next()) {
1084 MOZ_RELEASE_ASSERT(WeakMapBase::checkMarkingForZone(zone
));
1091 IncrementalProgress
GCRuntime::beginMarkingSweepGroup(JS::GCContext
* gcx
,
1092 SliceBudget
& budget
) {
1094 MOZ_ASSERT(!markOnBackgroundThreadDuringSweeping
);
1095 assertNoMarkingWork();
1096 for (auto& marker
: markers
) {
1097 MOZ_ASSERT(marker
->markColor() == MarkColor::Black
);
1099 MOZ_ASSERT(cellsToAssertNotGray
.ref().empty());
1102 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::MARK
);
1104 // Change state of current group to MarkBlackAndGray to restrict gray marking
1105 // to this group. Note that there may be pointers to the atoms zone, and these
1106 // will be marked through, as they are not marked with
1107 // TraceCrossCompartmentEdge.
1108 for (SweepGroupZonesIter
zone(this); !zone
.done(); zone
.next()) {
1109 zone
->changeGCState(zone
->initialMarkingState(), Zone::MarkBlackAndGray
);
1112 AutoSetMarkColor
setColorGray(marker(), MarkColor::Gray
);
1114 // Mark incoming gray pointers from previously swept compartments.
1115 markIncomingGrayCrossCompartmentPointers();
1120 IncrementalProgress
GCRuntime::markGrayRootsInCurrentGroup(
1121 JS::GCContext
* gcx
, SliceBudget
& budget
) {
1122 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::MARK
);
1124 AutoSetMarkColor
setColorGray(marker(), MarkColor::Gray
);
1126 return markGrayRoots
<SweepGroupZonesIter
>(budget
,
1127 gcstats::PhaseKind::MARK_GRAY
);
1130 IncrementalProgress
GCRuntime::markGray(JS::GCContext
* gcx
,
1131 SliceBudget
& budget
) {
1132 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::MARK
);
1134 if (markUntilBudgetExhausted(budget
, useParallelMarking
) == NotFinished
) {
1141 IncrementalProgress
GCRuntime::endMarkingSweepGroup(JS::GCContext
* gcx
,
1142 SliceBudget
& budget
) {
1144 MOZ_ASSERT(!markOnBackgroundThreadDuringSweeping
);
1145 assertNoMarkingWork();
1146 for (auto& marker
: markers
) {
1147 MOZ_ASSERT(marker
->markColor() == MarkColor::Black
);
1149 MOZ_ASSERT(!HasIncomingCrossCompartmentPointers(rt
));
1152 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::MARK
);
1154 if (markWeakReferencesInCurrentGroup(budget
) == NotFinished
) {
1158 AutoSetMarkColor
setColorGray(marker(), MarkColor::Gray
);
1160 // Mark transitively inside the current compartment group.
1161 if (markWeakReferencesInCurrentGroup(budget
) == NotFinished
) {
1165 MOZ_ASSERT(marker().isDrained());
1167 // We must not yield after this point before we start sweeping the group.
1168 safeToYield
= false;
1170 MaybeCheckWeakMapMarking(this);
1175 // Causes the given WeakCache to be swept when run.
1176 class ImmediateSweepWeakCacheTask
: public GCParallelTask
{
1178 JS::detail::WeakCacheBase
& cache
;
1181 ImmediateSweepWeakCacheTask(GCRuntime
* gc
, Zone
* zone
,
1182 JS::detail::WeakCacheBase
& wc
)
1183 : GCParallelTask(gc
, gcstats::PhaseKind::SWEEP_WEAK_CACHES
),
1187 ImmediateSweepWeakCacheTask(ImmediateSweepWeakCacheTask
&& other
) noexcept
1188 : GCParallelTask(std::move(other
)),
1190 cache(other
.cache
) {}
1192 ImmediateSweepWeakCacheTask(const ImmediateSweepWeakCacheTask
&) = delete;
1194 void run(AutoLockHelperThreadState
& lock
) override
{
1195 AutoUnlockHelperThreadState
unlock(lock
);
1196 AutoSetThreadIsSweeping
threadIsSweeping(zone
);
1197 SweepingTracer
trc(gc
->rt
);
1198 cache
.traceWeak(&trc
, &gc
->storeBuffer());
1202 void GCRuntime::updateAtomsBitmap() {
1204 if (atomMarking
.computeBitmapFromChunkMarkBits(rt
, marked
)) {
1205 for (GCZonesIter
zone(this); !zone
.done(); zone
.next()) {
1206 atomMarking
.refineZoneBitmapForCollectedZone(zone
, marked
);
1209 // Ignore OOM in computeBitmapFromChunkMarkBits. The
1210 // refineZoneBitmapForCollectedZone call can only remove atoms from the
1211 // zone bitmap, so it is conservative to just not call it.
1214 atomMarking
.markAtomsUsedByUncollectedZones(rt
);
1216 // For convenience sweep these tables non-incrementally as part of bitmap
1217 // sweeping; they are likely to be much smaller than the main atoms table.
1218 SweepingTracer
trc(rt
);
1219 rt
->symbolRegistry().traceWeak(&trc
);
1222 void GCRuntime::sweepCCWrappers() {
1223 SweepingTracer
trc(rt
);
1224 for (SweepGroupZonesIter
zone(this); !zone
.done(); zone
.next()) {
1225 zone
->traceWeakCCWEdges(&trc
);
1229 void GCRuntime::sweepRealmGlobals() {
1230 SweepingTracer
trc(rt
);
1231 for (SweepGroupRealmsIter
r(this); !r
.done(); r
.next()) {
1232 AutoSetThreadIsSweeping
threadIsSweeping(r
->zone());
1233 r
->traceWeakGlobalEdge(&trc
);
1237 void GCRuntime::sweepMisc() {
1238 SweepingTracer
trc(rt
);
1239 for (SweepGroupRealmsIter
r(this); !r
.done(); r
.next()) {
1240 AutoSetThreadIsSweeping
threadIsSweeping(r
->zone());
1241 r
->traceWeakSavedStacks(&trc
);
1243 for (SweepGroupCompartmentsIter
c(this); !c
.done(); c
.next()) {
1244 AutoSetThreadIsSweeping
threadIsSweeping(c
->zone());
1245 c
->traceWeakNativeIterators(&trc
);
1249 void GCRuntime::sweepCompressionTasks() {
1250 JSRuntime
* runtime
= rt
;
1252 // Attach finished compression tasks.
1253 AutoLockHelperThreadState lock
;
1254 AttachFinishedCompressions(runtime
, lock
);
1255 SweepPendingCompressions(lock
);
1258 void GCRuntime::sweepWeakMaps() {
1259 SweepingTracer
trc(rt
);
1260 for (SweepGroupZonesIter
zone(this); !zone
.done(); zone
.next()) {
1261 /* No need to look up any more weakmap keys from this sweep group. */
1262 AutoEnterOOMUnsafeRegion oomUnsafe
;
1263 if (!zone
->gcEphemeronEdges().clear()) {
1264 oomUnsafe
.crash("clearing weak keys in beginSweepingSweepGroup()");
1267 // Lock the storebuffer since this may access it when rehashing or resizing
1269 AutoLockStoreBuffer
lock(&storeBuffer());
1270 zone
->sweepWeakMaps(&trc
);
1274 void GCRuntime::sweepUniqueIds() {
1275 for (SweepGroupZonesIter
zone(this); !zone
.done(); zone
.next()) {
1276 AutoSetThreadIsSweeping
threadIsSweeping(zone
);
1277 zone
->sweepUniqueIds();
1281 void JS::Zone::sweepUniqueIds() {
1282 SweepingTracer
trc(runtimeFromAnyThread());
1283 uniqueIds().traceWeak(&trc
);
1287 bool UniqueIdGCPolicy::traceWeak(JSTracer
* trc
, Cell
** keyp
, uint64_t* valuep
) {
1288 // Since this is only ever used for sweeping, we can optimize it for that
1289 // case. (Compacting GC updates this table manually when it moves a cell.)
1290 MOZ_ASSERT(trc
->kind() == JS::TracerKind::Sweeping
);
1291 return (*keyp
)->isMarkedAny();
1294 void GCRuntime::sweepFinalizationObserversOnMainThread() {
1295 // This calls back into the browser which expects to be called from the main
1297 gcstats::AutoPhase
ap1(stats(), gcstats::PhaseKind::SWEEP_COMPARTMENTS
);
1298 gcstats::AutoPhase
ap2(stats(),
1299 gcstats::PhaseKind::SWEEP_FINALIZATION_OBSERVERS
);
1300 SweepingTracer
trc(rt
);
1301 AutoLockStoreBuffer
lock(&storeBuffer());
1302 for (SweepGroupZonesIter
zone(this); !zone
.done(); zone
.next()) {
1303 traceWeakFinalizationObserverEdges(&trc
, zone
);
1307 void GCRuntime::startTask(GCParallelTask
& task
,
1308 AutoLockHelperThreadState
& lock
) {
1309 if (!CanUseExtraThreads()) {
1310 AutoUnlockHelperThreadState
unlock(lock
);
1311 task
.runFromMainThread();
1312 stats().recordParallelPhase(task
.phaseKind
, task
.duration());
1316 task
.startWithLockHeld(lock
);
1319 void GCRuntime::joinTask(GCParallelTask
& task
,
1320 AutoLockHelperThreadState
& lock
) {
1321 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::JOIN_PARALLEL_TASKS
);
1322 task
.joinWithLockHeld(lock
);
1325 void GCRuntime::sweepDebuggerOnMainThread(JS::GCContext
* gcx
) {
1326 SweepingTracer
trc(rt
);
1327 AutoLockStoreBuffer
lock(&storeBuffer());
1329 // Detach unreachable debuggers and global objects from each other.
1330 // This can modify weakmaps and so must happen before weakmap sweeping.
1331 DebugAPI::sweepAll(gcx
);
1333 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::SWEEP_COMPARTMENTS
);
1335 // Sweep debug environment information. This performs lookups in the Zone's
1336 // unique IDs table and so must not happen in parallel with sweeping that
1339 gcstats::AutoPhase
ap2(stats(), gcstats::PhaseKind::SWEEP_MISC
);
1340 for (SweepGroupRealmsIter
r(rt
); !r
.done(); r
.next()) {
1341 r
->traceWeakDebugEnvironmentEdges(&trc
);
1346 void GCRuntime::sweepJitDataOnMainThread(JS::GCContext
* gcx
) {
1347 SweepingTracer
trc(rt
);
1349 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::SWEEP_JIT_DATA
);
1351 // Bug 1071218: the following method has not yet been refactored to
1352 // work on a single zone-group at once.
1354 // Sweep entries containing about-to-be-finalized JitCode in the
1355 // JitcodeGlobalTable.
1356 jit::JitRuntime::TraceWeakJitcodeGlobalTable(rt
, &trc
);
1359 // Discard JIT code and trace weak edges in JitScripts to remove edges to
1360 // dying GC things. The latter is carried out as part of discardJitCode if
1361 // possible to avoid iterating all scripts in the zone twice.
1363 gcstats::AutoPhase
apdc(stats(), gcstats::PhaseKind::SWEEP_DISCARD_CODE
);
1364 Zone::DiscardOptions options
;
1365 options
.traceWeakJitScripts
= &trc
;
1366 for (SweepGroupZonesIter
zone(this); !zone
.done(); zone
.next()) {
1367 if (!haveDiscardedJITCodeThisSlice
&& !zone
->isPreservingCode()) {
1368 zone
->forceDiscardJitCode(gcx
, options
);
1370 zone
->traceWeakJitScripts(&trc
);
1375 // JitZone must be swept *after* discarding JIT code, because
1376 // Zone::discardJitCode might access CacheIRStubInfos deleted here.
1378 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::SWEEP_JIT_DATA
);
1380 for (SweepGroupZonesIter
zone(this); !zone
.done(); zone
.next()) {
1381 if (jit::JitZone
* jitZone
= zone
->jitZone()) {
1382 jitZone
->traceWeak(&trc
, zone
);
1386 JSContext
* cx
= rt
->mainContextFromOwnThread();
1387 jit::TraceWeakJitActivationsInSweepingZones(cx
, &trc
);
1391 void GCRuntime::sweepObjectsWithWeakPointers() {
1392 SweepingTracer
trc(rt
);
1393 for (SweepGroupZonesIter
zone(this); !zone
.done(); zone
.next()) {
1394 AutoSetThreadIsSweeping
threadIsSweeping(zone
);
1395 zone
->sweepObjectsWithWeakPointers(&trc
);
1399 void JS::Zone::sweepObjectsWithWeakPointers(JSTracer
* trc
) {
1400 MOZ_ASSERT(trc
->traceWeakEdges());
1402 objectsWithWeakPointers
.ref().mutableEraseIf([&](JSObject
*& obj
) {
1403 if (!TraceManuallyBarrieredWeakEdge(trc
, &obj
, "objectsWithWeakPointers")) {
1404 // Object itself is dead.
1408 // Call trace hook to sweep weak pointers.
1409 obj
->getClass()->doTrace(trc
, obj
);
1414 using WeakCacheTaskVector
=
1415 mozilla::Vector
<ImmediateSweepWeakCacheTask
, 0, SystemAllocPolicy
>;
1417 // Call a functor for all weak caches that need to be swept in the current
1419 template <typename Functor
>
1420 static inline bool IterateWeakCaches(JSRuntime
* rt
, Functor f
) {
1421 for (SweepGroupZonesIter
zone(rt
); !zone
.done(); zone
.next()) {
1422 for (JS::detail::WeakCacheBase
* cache
: zone
->weakCaches()) {
1423 if (!f(cache
, zone
.get())) {
1429 for (JS::detail::WeakCacheBase
* cache
: rt
->weakCaches()) {
1430 if (!f(cache
, nullptr)) {
1438 static bool PrepareWeakCacheTasks(JSRuntime
* rt
,
1439 WeakCacheTaskVector
* immediateTasks
) {
1440 // Start incremental sweeping for caches that support it or add to a vector
1441 // of sweep tasks to run on a helper thread.
1443 MOZ_ASSERT(immediateTasks
->empty());
1445 GCRuntime
* gc
= &rt
->gc
;
1447 IterateWeakCaches(rt
, [&](JS::detail::WeakCacheBase
* cache
, Zone
* zone
) {
1448 if (cache
->empty()) {
1452 // Caches that support incremental sweeping will be swept later.
1453 if (zone
&& cache
->setIncrementalBarrierTracer(&gc
->sweepingTracer
)) {
1457 return immediateTasks
->emplaceBack(gc
, zone
, *cache
);
1461 immediateTasks
->clearAndFree();
1467 static void SweepAllWeakCachesOnMainThread(JSRuntime
* rt
) {
1468 // If we ran out of memory, do all the work on the main thread.
1469 gcstats::AutoPhase
ap(rt
->gc
.stats(), gcstats::PhaseKind::SWEEP_WEAK_CACHES
);
1470 SweepingTracer
trc(rt
);
1471 IterateWeakCaches(rt
, [&](JS::detail::WeakCacheBase
* cache
, Zone
* zone
) {
1472 if (cache
->needsIncrementalBarrier()) {
1473 cache
->setIncrementalBarrierTracer(nullptr);
1475 cache
->traceWeak(&trc
, &rt
->gc
.storeBuffer());
1480 void GCRuntime::sweepEmbeddingWeakPointers(JS::GCContext
* gcx
) {
1481 using namespace gcstats
;
1483 AutoLockStoreBuffer
lock(&storeBuffer());
1485 AutoPhase
ap(stats(), PhaseKind::FINALIZE_START
);
1486 callFinalizeCallbacks(gcx
, JSFINALIZE_GROUP_PREPARE
);
1488 AutoPhase
ap2(stats(), PhaseKind::WEAK_ZONES_CALLBACK
);
1489 callWeakPointerZonesCallbacks(&sweepingTracer
);
1492 AutoPhase
ap2(stats(), PhaseKind::WEAK_COMPARTMENT_CALLBACK
);
1493 for (SweepGroupZonesIter
zone(this); !zone
.done(); zone
.next()) {
1494 for (CompartmentsInZoneIter
comp(zone
); !comp
.done(); comp
.next()) {
1495 callWeakPointerCompartmentCallbacks(&sweepingTracer
, comp
);
1499 callFinalizeCallbacks(gcx
, JSFINALIZE_GROUP_START
);
1502 IncrementalProgress
GCRuntime::beginSweepingSweepGroup(JS::GCContext
* gcx
,
1503 SliceBudget
& budget
) {
1505 * Begin sweeping the group of zones in currentSweepGroup, performing
1506 * actions that must be done before yielding to caller.
1509 using namespace gcstats
;
1511 AutoSCC
scc(stats(), sweepGroupIndex
);
1513 bool sweepingAtoms
= false;
1514 for (SweepGroupZonesIter
zone(this); !zone
.done(); zone
.next()) {
1515 /* Set the GC state to sweeping. */
1516 zone
->changeGCState(Zone::MarkBlackAndGray
, Zone::Sweep
);
1518 /* Purge the ArenaLists before sweeping. */
1519 zone
->arenas
.checkSweepStateNotInUse();
1520 zone
->arenas
.unmarkPreMarkedFreeCells();
1521 zone
->arenas
.clearFreeLists();
1523 if (zone
->isAtomsZone()) {
1524 sweepingAtoms
= true;
1529 for (const auto* cell
: cellsToAssertNotGray
.ref()) {
1530 JS::AssertCellIsNotGray(cell
);
1532 cellsToAssertNotGray
.ref().clearAndFree();
1535 // Cancel off thread compilation as soon as possible, unless this already
1536 // happened in GCRuntime::discardJITCodeForGC.
1537 if (!haveDiscardedJITCodeThisSlice
) {
1538 js::CancelOffThreadIonCompile(rt
, JS::Zone::Sweep
);
1541 // Updating the atom marking bitmaps. This marks atoms referenced by
1542 // uncollected zones so cannot be done in parallel with the other sweeping
1544 if (sweepingAtoms
) {
1545 AutoPhase
ap(stats(), PhaseKind::UPDATE_ATOMS_BITMAP
);
1546 updateAtomsBitmap();
1550 validateIncrementalMarking();
1553 AutoSetThreadIsSweeping threadIsSweeping
;
1555 // This must happen before sweeping realm globals.
1556 sweepDebuggerOnMainThread(gcx
);
1558 // FinalizationRegistry sweeping touches weak maps and so must not run in
1559 // parallel with that. This triggers a read barrier and can add marking work
1560 // for zones that are still marking. Must happen before sweeping realm
1562 sweepFinalizationObserversOnMainThread();
1564 // This must happen before updating embedding weak pointers.
1565 sweepRealmGlobals();
1567 sweepEmbeddingWeakPointers(gcx
);
1570 AutoLockHelperThreadState lock
;
1572 AutoPhase
ap(stats(), PhaseKind::SWEEP_COMPARTMENTS
);
1574 AutoRunParallelTask
sweepCCWrappers(this, &GCRuntime::sweepCCWrappers
,
1575 PhaseKind::SWEEP_CC_WRAPPER
,
1576 GCUse::Sweeping
, lock
);
1577 AutoRunParallelTask
sweepMisc(this, &GCRuntime::sweepMisc
,
1578 PhaseKind::SWEEP_MISC
, GCUse::Sweeping
, lock
);
1579 AutoRunParallelTask
sweepCompTasks(this, &GCRuntime::sweepCompressionTasks
,
1580 PhaseKind::SWEEP_COMPRESSION
,
1581 GCUse::Sweeping
, lock
);
1582 AutoRunParallelTask
sweepWeakMaps(this, &GCRuntime::sweepWeakMaps
,
1583 PhaseKind::SWEEP_WEAKMAPS
,
1584 GCUse::Sweeping
, lock
);
1585 AutoRunParallelTask
sweepUniqueIds(this, &GCRuntime::sweepUniqueIds
,
1586 PhaseKind::SWEEP_UNIQUEIDS
,
1587 GCUse::Sweeping
, lock
);
1588 AutoRunParallelTask
sweepWeakPointers(
1589 this, &GCRuntime::sweepObjectsWithWeakPointers
,
1590 PhaseKind::SWEEP_WEAK_POINTERS
, GCUse::Sweeping
, lock
);
1592 WeakCacheTaskVector sweepCacheTasks
;
1593 bool canSweepWeakCachesOffThread
=
1594 PrepareWeakCacheTasks(rt
, &sweepCacheTasks
);
1595 if (canSweepWeakCachesOffThread
) {
1596 weakCachesToSweep
.ref().emplace(currentSweepGroup
);
1597 for (auto& task
: sweepCacheTasks
) {
1598 startTask(task
, lock
);
1603 AutoUnlockHelperThreadState
unlock(lock
);
1604 sweepJitDataOnMainThread(gcx
);
1606 if (!canSweepWeakCachesOffThread
) {
1607 MOZ_ASSERT(sweepCacheTasks
.empty());
1608 SweepAllWeakCachesOnMainThread(rt
);
1612 for (auto& task
: sweepCacheTasks
) {
1613 joinTask(task
, lock
);
1617 if (sweepingAtoms
) {
1618 startSweepingAtomsTable();
1621 // Queue all GC things in all zones for sweeping, either on the foreground
1622 // or on the background thread.
1624 for (SweepGroupZonesIter
zone(this); !zone
.done(); zone
.next()) {
1625 for (const auto& phase
: BackgroundFinalizePhases
) {
1626 initBackgroundSweep(zone
, gcx
, phase
);
1629 zone
->arenas
.queueForegroundThingsForSweep();
1632 MOZ_ASSERT(!sweepZone
);
1635 markOnBackgroundThreadDuringSweeping
= CanUseExtraThreads();
1641 bool GCRuntime::shouldYieldForZeal(ZealMode mode
) {
1642 bool yield
= useZeal
&& hasZealMode(mode
);
1644 // Only yield on the first sweep slice for this mode.
1645 bool firstSweepSlice
= initialState
!= State::Sweep
;
1646 if (mode
== ZealMode::IncrementalMultipleSlices
&& !firstSweepSlice
) {
1654 IncrementalProgress
GCRuntime::endSweepingSweepGroup(JS::GCContext
* gcx
,
1655 SliceBudget
& budget
) {
1656 // This is to prevent a race between markTask checking the zone state and
1657 // us changing it below.
1658 if (joinBackgroundMarkTask() == NotFinished
) {
1662 assertNoMarkingWork();
1664 // Disable background marking during sweeping until we start sweeping the next
1666 markOnBackgroundThreadDuringSweeping
= false;
1669 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::FINALIZE_END
);
1670 AutoLockStoreBuffer
lock(&storeBuffer());
1671 callFinalizeCallbacks(gcx
, JSFINALIZE_GROUP_END
);
1674 /* Free LIFO blocks on a background thread if possible. */
1675 startBackgroundFree();
1677 /* Update the GC state for zones we have swept. */
1678 for (SweepGroupZonesIter
zone(this); !zone
.done(); zone
.next()) {
1679 if (jit::JitZone
* jitZone
= zone
->jitZone()) {
1680 // Clear out any small pools that we're hanging on to.
1681 jitZone
->execAlloc().purge();
1683 AutoLockGC
lock(this);
1684 zone
->changeGCState(Zone::Sweep
, Zone::Finished
);
1685 zone
->arenas
.unmarkPreMarkedFreeCells();
1686 zone
->arenas
.checkNoArenasToUpdate();
1687 zone
->pretenuring
.clearCellCountsInNewlyCreatedArenas();
1691 * Start background thread to sweep zones if required, sweeping any atoms
1692 * zones last if present.
1695 for (SweepGroupZonesIter
zone(this); !zone
.done(); zone
.next()) {
1696 if (zone
->isAtomsZone()) {
1699 zones
.prepend(zone
);
1703 queueZonesAndStartBackgroundSweep(std::move(zones
));
1708 IncrementalProgress
GCRuntime::markDuringSweeping(JS::GCContext
* gcx
,
1709 SliceBudget
& budget
) {
1710 MOZ_ASSERT(markTask
.isIdle());
1712 if (markOnBackgroundThreadDuringSweeping
) {
1713 if (!marker().isDrained() || hasDelayedMarking()) {
1714 AutoLockHelperThreadState lock
;
1715 MOZ_ASSERT(markTask
.isIdle(lock
));
1716 markTask
.setBudget(budget
);
1717 markTask
.startOrRunIfIdle(lock
);
1719 return Finished
; // This means don't yield to the mutator here.
1722 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::MARK
);
1723 return markUntilBudgetExhausted(budget
, useParallelMarking
);
1726 void GCRuntime::beginSweepPhase(JS::GCReason reason
, AutoGCSession
& session
) {
1730 * Finalize as we sweep, outside of lock but with RuntimeHeapIsBusy()
1731 * true so that any attempt to allocate a GC-thing from a finalizer will
1732 * fail, rather than nest badly and leave the unmarked newborn to be swept.
1735 MOZ_ASSERT(!abortSweepAfterCurrentGroup
);
1736 MOZ_ASSERT(!markOnBackgroundThreadDuringSweeping
);
1739 releaseHeldRelocatedArenas();
1744 computeNonIncrementalMarkingForValidation(session
);
1747 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::SWEEP
);
1749 AssertNoWrappersInGrayList(rt
);
1750 dropStringWrappers();
1752 groupZonesForSweeping(reason
);
1754 sweepActions
->assertFinished();
1757 bool GCRuntime::foregroundFinalize(JS::GCContext
* gcx
, Zone
* zone
,
1758 AllocKind thingKind
,
1759 SliceBudget
& sliceBudget
,
1760 SortedArenaList
& sweepList
) {
1761 ArenaLists
& lists
= zone
->arenas
;
1762 lists
.checkNoArenasToUpdateForKind(thingKind
);
1764 // Non-empty arenas are reused for use for new allocations as soon as the
1765 // finalizers for that allocation kind have run. Empty arenas are only
1766 // released when everything in the zone has been swept (see
1767 // GCRuntime::sweepBackgroundThings for more details).
1768 if (!FinalizeArenas(gcx
, lists
.collectingArenaList(thingKind
), sweepList
,
1769 thingKind
, sliceBudget
)) {
1770 // Copy the current contents of sweepList so that ArenaIter can find them.
1771 lists
.setIncrementalSweptArenas(thingKind
, sweepList
);
1775 sweepList
.extractEmpty(&lists
.savedEmptyArenas
.ref());
1776 lists
.mergeFinalizedArenas(thingKind
, sweepList
);
1777 lists
.clearIncrementalSweptArenas();
1782 BackgroundMarkTask::BackgroundMarkTask(GCRuntime
* gc
)
1783 : GCParallelTask(gc
, gcstats::PhaseKind::MARK
, GCUse::Marking
),
1784 budget(SliceBudget::unlimited()) {}
1786 void js::gc::BackgroundMarkTask::run(AutoLockHelperThreadState
& lock
) {
1787 AutoUnlockHelperThreadState
unlock(lock
);
1789 // Time reporting is handled separately for parallel tasks.
1790 gc
->sweepMarkResult
= gc
->markUntilBudgetExhausted(
1791 this->budget
, GCRuntime::SingleThreadedMarking
, DontReportMarkTime
);
1794 IncrementalProgress
GCRuntime::joinBackgroundMarkTask() {
1795 AutoLockHelperThreadState lock
;
1796 if (markTask
.isIdle(lock
)) {
1800 joinTask(markTask
, lock
);
1802 IncrementalProgress result
= sweepMarkResult
;
1803 sweepMarkResult
= Finished
;
1807 template <typename T
>
1808 static void SweepThing(JS::GCContext
* gcx
, T
* thing
) {
1809 if (!TenuredThingIsMarkedAny(thing
)) {
1814 template <typename T
>
1815 static bool SweepArenaList(JS::GCContext
* gcx
, Arena
** arenasToSweep
,
1816 SliceBudget
& sliceBudget
) {
1817 while (Arena
* arena
= *arenasToSweep
) {
1818 MOZ_ASSERT(arena
->zone
->isGCSweeping());
1820 for (ArenaCellIterUnderGC
cell(arena
); !cell
.done(); cell
.next()) {
1821 SweepThing(gcx
, cell
.as
<T
>());
1824 Arena
* next
= arena
->next
;
1825 MOZ_ASSERT_IF(next
, next
->zone
== arena
->zone
);
1826 *arenasToSweep
= next
;
1828 AllocKind kind
= MapTypeToAllocKind
<T
>::kind
;
1829 sliceBudget
.step(Arena::thingsPerArena(kind
));
1830 if (sliceBudget
.isOverBudget()) {
1838 void GCRuntime::startSweepingAtomsTable() {
1839 auto& maybeAtoms
= maybeAtomsToSweep
.ref();
1840 MOZ_ASSERT(maybeAtoms
.isNothing());
1842 AtomsTable
* atomsTable
= rt
->atomsForSweeping();
1847 // Create secondary tables to hold new atoms added while we're sweeping the
1848 // main tables incrementally.
1849 if (!atomsTable
->startIncrementalSweep(maybeAtoms
)) {
1850 SweepingTracer
trc(rt
);
1851 atomsTable
->traceWeak(&trc
);
1855 IncrementalProgress
GCRuntime::sweepAtomsTable(JS::GCContext
* gcx
,
1856 SliceBudget
& budget
) {
1857 if (!atomsZone()->isGCSweeping()) {
1861 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::SWEEP_ATOMS_TABLE
);
1863 auto& maybeAtoms
= maybeAtomsToSweep
.ref();
1868 if (!rt
->atomsForSweeping()->sweepIncrementally(maybeAtoms
.ref(), budget
)) {
1877 static size_t IncrementalSweepWeakCache(GCRuntime
* gc
,
1878 const WeakCacheToSweep
& item
) {
1879 AutoSetThreadIsSweeping
threadIsSweeping(item
.zone
);
1881 JS::detail::WeakCacheBase
* cache
= item
.cache
;
1882 MOZ_ASSERT(cache
->needsIncrementalBarrier());
1884 SweepingTracer
trc(gc
->rt
);
1885 size_t steps
= cache
->traceWeak(&trc
, &gc
->storeBuffer());
1886 cache
->setIncrementalBarrierTracer(nullptr);
1891 WeakCacheSweepIterator::WeakCacheSweepIterator(JS::Zone
* sweepGroup
)
1892 : sweepZone(sweepGroup
), sweepCache(sweepZone
->weakCaches().getFirst()) {
1896 bool WeakCacheSweepIterator::done() const { return !sweepZone
; }
1898 WeakCacheToSweep
WeakCacheSweepIterator::get() const {
1899 MOZ_ASSERT(!done());
1901 return {sweepCache
, sweepZone
};
1904 void WeakCacheSweepIterator::next() {
1905 MOZ_ASSERT(!done());
1907 sweepCache
= sweepCache
->getNext();
1911 void WeakCacheSweepIterator::settle() {
1913 while (sweepCache
&& !sweepCache
->needsIncrementalBarrier()) {
1914 sweepCache
= sweepCache
->getNext();
1921 sweepZone
= sweepZone
->nextNodeInGroup();
1923 sweepCache
= sweepZone
->weakCaches().getFirst();
1927 MOZ_ASSERT((!sweepZone
&& !sweepCache
) ||
1928 (sweepCache
&& sweepCache
->needsIncrementalBarrier()));
1931 IncrementalProgress
GCRuntime::sweepWeakCaches(JS::GCContext
* gcx
,
1932 SliceBudget
& budget
) {
1933 if (weakCachesToSweep
.ref().isNothing()) {
1937 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::SWEEP_COMPARTMENTS
);
1939 WeakCacheSweepIterator
& work
= weakCachesToSweep
.ref().ref();
1941 AutoLockHelperThreadState lock
;
1944 AutoRunParallelWork
runWork(this, IncrementalSweepWeakCache
,
1945 gcstats::PhaseKind::SWEEP_WEAK_CACHES
,
1946 GCUse::Sweeping
, work
, budget
, lock
);
1947 AutoUnlockHelperThreadState
unlock(lock
);
1951 weakCachesToSweep
.ref().reset();
1958 IncrementalProgress
GCRuntime::finalizeAllocKind(JS::GCContext
* gcx
,
1959 SliceBudget
& budget
) {
1960 MOZ_ASSERT(sweepZone
->isGCSweeping());
1962 // Set the number of things per arena for this AllocKind.
1963 size_t thingsPerArena
= Arena::thingsPerArena(sweepAllocKind
);
1964 auto& sweepList
= incrementalSweepList
.ref();
1965 sweepList
.setThingsPerArena(thingsPerArena
);
1967 AutoSetThreadIsFinalizing
threadIsFinalizing(gcx
);
1969 if (!foregroundFinalize(gcx
, sweepZone
, sweepAllocKind
, budget
, sweepList
)) {
1973 // Reset the slots of the sweep list that we used.
1974 sweepList
.reset(thingsPerArena
);
1979 IncrementalProgress
GCRuntime::sweepPropMapTree(JS::GCContext
* gcx
,
1980 SliceBudget
& budget
) {
1981 // Remove dead SharedPropMaps from the tree. This happens incrementally on the
1982 // main thread. PropMaps are finalized later on the a background thread.
1984 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::SWEEP_PROP_MAP
);
1986 ArenaLists
& al
= sweepZone
->arenas
;
1988 if (!SweepArenaList
<CompactPropMap
>(
1989 gcx
, &al
.gcCompactPropMapArenasToUpdate
.ref(), budget
)) {
1992 if (!SweepArenaList
<NormalPropMap
>(
1993 gcx
, &al
.gcNormalPropMapArenasToUpdate
.ref(), budget
)) {
2000 // An iterator for a standard container that provides an STL-like begin()/end()
2001 // interface. This iterator provides a done()/get()/next() style interface.
2002 template <typename Container
>
2003 class ContainerIter
{
2004 using Iter
= decltype(std::declval
<const Container
>().begin());
2005 using Elem
= decltype(*std::declval
<Iter
>());
2011 explicit ContainerIter(const Container
& container
)
2012 : iter(container
.begin()), end(container
.end()) {}
2014 bool done() const { return iter
== end
; }
2016 Elem
get() const { return *iter
; }
2019 MOZ_ASSERT(!done());
2024 // IncrementalIter is a template class that makes a normal iterator into one
2025 // that can be used to perform incremental work by using external state that
2026 // persists between instantiations. The state is only initialised on the first
2027 // use and subsequent uses carry on from the previous state.
2028 template <typename Iter
>
2029 struct IncrementalIter
{
2030 using State
= mozilla::Maybe
<Iter
>;
2031 using Elem
= decltype(std::declval
<Iter
>().get());
2037 template <typename
... Args
>
2038 explicit IncrementalIter(State
& maybeIter
, Args
&&... args
)
2039 : maybeIter(maybeIter
) {
2040 if (maybeIter
.isNothing()) {
2041 maybeIter
.emplace(std::forward
<Args
>(args
)...);
2045 ~IncrementalIter() {
2051 bool done() const { return maybeIter
.ref().done(); }
2053 Elem
get() const { return maybeIter
.ref().get(); }
2055 void next() { maybeIter
.ref().next(); }
2058 // Iterate through the sweep groups created by
2059 // GCRuntime::groupZonesForSweeping().
2060 class js::gc::SweepGroupsIter
{
2064 explicit SweepGroupsIter(JSRuntime
* rt
) : gc(&rt
->gc
) {
2065 MOZ_ASSERT(gc
->currentSweepGroup
);
2068 bool done() const { return !gc
->currentSweepGroup
; }
2070 Zone
* get() const { return gc
->currentSweepGroup
; }
2073 MOZ_ASSERT(!done());
2074 gc
->getNextSweepGroup();
2078 namespace sweepaction
{
2080 // Implementation of the SweepAction interface that calls a method on GCRuntime.
2081 class SweepActionCall final
: public SweepAction
{
2082 using Method
= IncrementalProgress (GCRuntime::*)(JS::GCContext
* gcx
,
2083 SliceBudget
& budget
);
2088 explicit SweepActionCall(Method m
) : method(m
) {}
2089 IncrementalProgress
run(Args
& args
) override
{
2090 return (args
.gc
->*method
)(args
.gcx
, args
.budget
);
2092 void assertFinished() const override
{}
2095 // Implementation of the SweepAction interface that yields in a specified zeal
2097 class SweepActionMaybeYield final
: public SweepAction
{
2104 explicit SweepActionMaybeYield(ZealMode mode
)
2112 IncrementalProgress
run(Args
& args
) override
{
2114 if (!isYielding
&& args
.gc
->shouldYieldForZeal(mode
)) {
2124 void assertFinished() const override
{ MOZ_ASSERT(!isYielding
); }
2126 // These actions should be skipped if GC zeal is not configured.
2128 bool shouldSkip() override
{ return true; }
2132 // Implementation of the SweepAction interface that calls a list of actions in
2134 class SweepActionSequence final
: public SweepAction
{
2135 using ActionVector
= Vector
<UniquePtr
<SweepAction
>, 0, SystemAllocPolicy
>;
2136 using Iter
= IncrementalIter
<ContainerIter
<ActionVector
>>;
2138 ActionVector actions
;
2139 typename
Iter::State iterState
;
2142 bool init(UniquePtr
<SweepAction
>* acts
, size_t count
) {
2143 for (size_t i
= 0; i
< count
; i
++) {
2144 auto& action
= acts
[i
];
2148 if (action
->shouldSkip()) {
2151 if (!actions
.emplaceBack(std::move(action
))) {
2158 IncrementalProgress
run(Args
& args
) override
{
2159 for (Iter
iter(iterState
, actions
); !iter
.done(); iter
.next()) {
2160 if (iter
.get()->run(args
) == NotFinished
) {
2167 void assertFinished() const override
{
2168 MOZ_ASSERT(iterState
.isNothing());
2169 for (const auto& action
: actions
) {
2170 action
->assertFinished();
2175 template <typename Iter
, typename Init
>
2176 class SweepActionForEach final
: public SweepAction
{
2177 using Elem
= decltype(std::declval
<Iter
>().get());
2178 using IncrIter
= IncrementalIter
<Iter
>;
2182 UniquePtr
<SweepAction
> action
;
2183 typename
IncrIter::State iterState
;
2186 SweepActionForEach(const Init
& init
, Elem
* maybeElemOut
,
2187 UniquePtr
<SweepAction
> action
)
2188 : iterInit(init
), elemOut(maybeElemOut
), action(std::move(action
)) {}
2190 IncrementalProgress
run(Args
& args
) override
{
2191 MOZ_ASSERT_IF(elemOut
, *elemOut
== Elem());
2192 auto clearElem
= mozilla::MakeScopeExit([&] { setElem(Elem()); });
2193 for (IncrIter
iter(iterState
, iterInit
); !iter
.done(); iter
.next()) {
2194 setElem(iter
.get());
2195 if (action
->run(args
) == NotFinished
) {
2202 void assertFinished() const override
{
2203 MOZ_ASSERT(iterState
.isNothing());
2204 MOZ_ASSERT_IF(elemOut
, *elemOut
== Elem());
2205 action
->assertFinished();
2209 void setElem(const Elem
& value
) {
2216 static UniquePtr
<SweepAction
> Call(IncrementalProgress (GCRuntime::*method
)(
2217 JS::GCContext
* gcx
, SliceBudget
& budget
)) {
2218 return MakeUnique
<SweepActionCall
>(method
);
2221 static UniquePtr
<SweepAction
> MaybeYield(ZealMode zealMode
) {
2222 return MakeUnique
<SweepActionMaybeYield
>(zealMode
);
2225 template <typename
... Rest
>
2226 static UniquePtr
<SweepAction
> Sequence(UniquePtr
<SweepAction
> first
,
2228 UniquePtr
<SweepAction
> actions
[] = {std::move(first
), std::move(rest
)...};
2229 auto seq
= MakeUnique
<SweepActionSequence
>();
2230 if (!seq
|| !seq
->init(actions
, std::size(actions
))) {
2234 return UniquePtr
<SweepAction
>(std::move(seq
));
2237 static UniquePtr
<SweepAction
> RepeatForSweepGroup(
2238 JSRuntime
* rt
, UniquePtr
<SweepAction
> action
) {
2243 using Action
= SweepActionForEach
<SweepGroupsIter
, JSRuntime
*>;
2244 return js::MakeUnique
<Action
>(rt
, nullptr, std::move(action
));
2247 static UniquePtr
<SweepAction
> ForEachZoneInSweepGroup(
2248 JSRuntime
* rt
, Zone
** zoneOut
, UniquePtr
<SweepAction
> action
) {
2253 using Action
= SweepActionForEach
<SweepGroupZonesIter
, JSRuntime
*>;
2254 return js::MakeUnique
<Action
>(rt
, zoneOut
, std::move(action
));
2257 static UniquePtr
<SweepAction
> ForEachAllocKind(AllocKinds kinds
,
2259 UniquePtr
<SweepAction
> action
) {
2264 using Action
= SweepActionForEach
<ContainerIter
<AllocKinds
>, AllocKinds
>;
2265 return js::MakeUnique
<Action
>(kinds
, kindOut
, std::move(action
));
2268 } // namespace sweepaction
2270 bool GCRuntime::initSweepActions() {
2271 using namespace sweepaction
;
2272 using sweepaction::Call
;
2274 sweepActions
.ref() = RepeatForSweepGroup(
2277 Call(&GCRuntime::beginMarkingSweepGroup
),
2278 Call(&GCRuntime::markGrayRootsInCurrentGroup
),
2279 MaybeYield(ZealMode::YieldWhileGrayMarking
),
2280 Call(&GCRuntime::markGray
), Call(&GCRuntime::endMarkingSweepGroup
),
2281 Call(&GCRuntime::beginSweepingSweepGroup
),
2282 MaybeYield(ZealMode::IncrementalMultipleSlices
),
2283 MaybeYield(ZealMode::YieldBeforeSweepingAtoms
),
2284 Call(&GCRuntime::sweepAtomsTable
),
2285 MaybeYield(ZealMode::YieldBeforeSweepingCaches
),
2286 Call(&GCRuntime::sweepWeakCaches
),
2287 ForEachZoneInSweepGroup(
2288 rt
, &sweepZone
.ref(),
2289 Sequence(MaybeYield(ZealMode::YieldBeforeSweepingObjects
),
2290 ForEachAllocKind(ForegroundObjectFinalizePhase
.kinds
,
2291 &sweepAllocKind
.ref(),
2292 Call(&GCRuntime::finalizeAllocKind
)),
2293 MaybeYield(ZealMode::YieldBeforeSweepingNonObjects
),
2294 ForEachAllocKind(ForegroundNonObjectFinalizePhase
.kinds
,
2295 &sweepAllocKind
.ref(),
2296 Call(&GCRuntime::finalizeAllocKind
)),
2297 MaybeYield(ZealMode::YieldBeforeSweepingPropMapTrees
),
2298 Call(&GCRuntime::sweepPropMapTree
))),
2299 Call(&GCRuntime::endSweepingSweepGroup
)));
2301 return sweepActions
!= nullptr;
2304 IncrementalProgress
GCRuntime::performSweepActions(SliceBudget
& budget
) {
2305 AutoMajorGCProfilerEntry
s(this);
2306 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::SWEEP
);
2308 JS::GCContext
* gcx
= rt
->gcContext();
2309 AutoSetThreadIsSweeping
threadIsSweeping(gcx
);
2310 AutoPoisonFreedJitCode
pjc(gcx
);
2312 // Don't trigger pre-barriers when finalizing.
2313 AutoDisableBarriers
disableBarriers(this);
2315 // Drain the mark stack, possibly in a parallel task if we're in a part of
2316 // sweeping that allows it.
2318 // In the first sweep slice where we must not yield to the mutator until we've
2319 // starting sweeping a sweep group but in that case the stack must be empty
2323 MOZ_ASSERT(initialState
<= State::Sweep
);
2324 if (initialState
!= State::Sweep
) {
2325 assertNoMarkingWork();
2329 if (initialState
== State::Sweep
&&
2330 markDuringSweeping(gcx
, budget
) == NotFinished
) {
2334 // Then continue running sweep actions.
2336 SweepAction::Args args
{this, gcx
, budget
};
2337 IncrementalProgress sweepProgress
= sweepActions
->run(args
);
2338 IncrementalProgress markProgress
= joinBackgroundMarkTask();
2340 if (sweepProgress
== Finished
&& markProgress
== Finished
) {
2344 MOZ_ASSERT(isIncremental
);
2348 bool GCRuntime::allCCVisibleZonesWereCollected() {
2349 // Calculate whether the gray marking state is now valid.
2351 // The gray bits change from invalid to valid if we finished a full GC from
2352 // the point of view of the cycle collector. We ignore the following:
2354 // - Helper thread zones, as these are not reachable from the main heap.
2355 // - The atoms zone, since strings and symbols are never marked gray.
2358 // These exceptions ensure that when the CC requests a full GC the gray mark
2359 // state ends up valid even it we don't collect all of the zones.
2361 for (ZonesIter
zone(this, SkipAtoms
); !zone
.done(); zone
.next()) {
2362 if (!zone
->isCollecting() && !zone
->arenas
.arenaListsAreEmpty()) {
2370 void GCRuntime::endSweepPhase(bool destroyingRuntime
) {
2371 MOZ_ASSERT(!markOnBackgroundThreadDuringSweeping
);
2373 sweepActions
->assertFinished();
2375 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::SWEEP
);
2377 MOZ_ASSERT_IF(destroyingRuntime
, !useBackgroundThreads
);
2380 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::DESTROY
);
2382 // Sweep shared script bytecode now all zones have been swept and finalizers
2383 // for BaseScripts have released their references.
2384 SweepScriptData(rt
);
2388 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::FINALIZE_END
);
2389 AutoLockStoreBuffer
lock(&storeBuffer());
2390 callFinalizeCallbacks(rt
->gcContext(), JSFINALIZE_COLLECTION_END
);
2392 if (allCCVisibleZonesWereCollected()) {
2393 grayBitsValid
= true;
2397 if (isIncremental
) {
2398 findDeadCompartments();
2402 finishMarkingValidation();
2406 for (ZonesIter
zone(this, WithAtoms
); !zone
.done(); zone
.next()) {
2407 for (auto i
: AllAllocKinds()) {
2408 MOZ_ASSERT_IF(!IsBackgroundFinalized(i
) || !useBackgroundThreads
,
2409 zone
->arenas
.collectingArenaList(i
).isEmpty());
2414 AssertNoWrappersInGrayList(rt
);