1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
8 * Implementation of GC sweeping.
10 * In the SpiderMonkey GC, 'sweeping' is used to mean two things:
11 * - updating data structures to remove pointers to dead GC things and updating
12 * pointers to moved GC things
13 * - finalizing dead GC things
15 * Furthermore, the GC carries out gray and weak marking after the start of the
16 * sweep phase. This is also implemented in this file.
19 #include "mozilla/Maybe.h"
20 #include "mozilla/ScopeExit.h"
21 #include "mozilla/TimeStamp.h"
23 #include "builtin/FinalizationRegistryObject.h"
24 #include "builtin/WeakRefObject.h"
25 #include "debugger/DebugAPI.h"
26 #include "gc/AllocKind.h"
27 #include "gc/FinalizationObservers.h"
28 #include "gc/GCInternals.h"
29 #include "gc/GCLock.h"
30 #include "gc/GCProbes.h"
31 #include "gc/GCRuntime.h"
32 #include "gc/ParallelWork.h"
33 #include "gc/Statistics.h"
34 #include "gc/TraceKind.h"
35 #include "gc/WeakMap.h"
37 #include "jit/JitRuntime.h"
38 #include "jit/JitZone.h"
39 #include "proxy/DeadObjectProxy.h"
40 #include "vm/BigIntType.h"
41 #include "vm/HelperThreads.h"
42 #include "vm/JSContext.h"
44 #include "vm/WrapperObject.h"
46 #include "gc/PrivateIterators-inl.h"
47 #include "vm/GeckoProfiler-inl.h"
48 #include "vm/JSObject-inl.h"
49 #include "vm/PropMap-inl.h"
50 #include "vm/Shape-inl.h"
51 #include "vm/StringType-inl.h"
54 using namespace js::gc
;
56 using mozilla::TimeStamp
;
58 struct js::gc::FinalizePhase
{
59 gcstats::PhaseKind statsPhase
;
64 * Finalization order for objects swept incrementally on the main thread.
66 static constexpr FinalizePhase ForegroundObjectFinalizePhase
= {
67 gcstats::PhaseKind::FINALIZE_OBJECT
,
68 {AllocKind::OBJECT0
, AllocKind::OBJECT2
, AllocKind::OBJECT4
,
69 AllocKind::OBJECT8
, AllocKind::OBJECT12
, AllocKind::OBJECT16
}};
72 * Finalization order for GC things swept incrementally on the main thread.
74 static constexpr FinalizePhase ForegroundNonObjectFinalizePhase
= {
75 gcstats::PhaseKind::FINALIZE_NON_OBJECT
,
76 {AllocKind::SCRIPT
, AllocKind::JITCODE
}};
79 * Finalization order for GC things swept on the background thread.
81 static constexpr FinalizePhase BackgroundFinalizePhases
[] = {
82 {gcstats::PhaseKind::FINALIZE_OBJECT
,
83 {AllocKind::FUNCTION
, AllocKind::FUNCTION_EXTENDED
,
84 AllocKind::OBJECT0_BACKGROUND
, AllocKind::OBJECT2_BACKGROUND
,
85 AllocKind::ARRAYBUFFER4
, AllocKind::OBJECT4_BACKGROUND
,
86 AllocKind::ARRAYBUFFER8
, AllocKind::OBJECT8_BACKGROUND
,
87 AllocKind::ARRAYBUFFER12
, AllocKind::OBJECT12_BACKGROUND
,
88 AllocKind::ARRAYBUFFER16
, AllocKind::OBJECT16_BACKGROUND
}},
89 {gcstats::PhaseKind::FINALIZE_NON_OBJECT
,
90 {AllocKind::SCOPE
, AllocKind::REGEXP_SHARED
, AllocKind::FAT_INLINE_STRING
,
91 AllocKind::STRING
, AllocKind::EXTERNAL_STRING
, AllocKind::FAT_INLINE_ATOM
,
92 AllocKind::ATOM
, AllocKind::SYMBOL
, AllocKind::BIGINT
, AllocKind::SHAPE
,
93 AllocKind::BASE_SHAPE
, AllocKind::GETTER_SETTER
,
94 AllocKind::COMPACT_PROP_MAP
, AllocKind::NORMAL_PROP_MAP
,
95 AllocKind::DICT_PROP_MAP
}}};
98 inline size_t Arena::finalize(JS::GCContext
* gcx
, AllocKind thingKind
,
100 /* Enforce requirements on size of T. */
101 MOZ_ASSERT(thingSize
% CellAlignBytes
== 0);
102 MOZ_ASSERT(thingSize
>= MinCellSize
);
103 MOZ_ASSERT(thingSize
<= 255);
105 MOZ_ASSERT(allocated());
106 MOZ_ASSERT(thingKind
== getAllocKind());
107 MOZ_ASSERT(thingSize
== getThingSize());
108 MOZ_ASSERT(!onDelayedMarkingList_
);
110 uint_fast16_t firstThing
= firstThingOffset(thingKind
);
111 uint_fast16_t firstThingOrSuccessorOfLastMarkedThing
= firstThing
;
112 uint_fast16_t lastThing
= ArenaSize
- thingSize
;
114 FreeSpan newListHead
;
115 FreeSpan
* newListTail
= &newListHead
;
116 size_t nmarked
= 0, nfinalized
= 0;
118 for (ArenaCellIterUnderFinalize
cell(this); !cell
.done(); cell
.next()) {
120 if (TenuredThingIsMarkedAny(t
)) {
121 uint_fast16_t thing
= uintptr_t(t
) & ArenaMask
;
122 if (thing
!= firstThingOrSuccessorOfLastMarkedThing
) {
123 // We just finished passing over one or more free things,
124 // so record a new FreeSpan.
125 newListTail
->initBounds(firstThingOrSuccessorOfLastMarkedThing
,
126 thing
- thingSize
, this);
127 newListTail
= newListTail
->nextSpanUnchecked(this);
129 firstThingOrSuccessorOfLastMarkedThing
= thing
+ thingSize
;
133 AlwaysPoison(t
, JS_SWEPT_TENURED_PATTERN
, thingSize
,
134 MemCheckKind::MakeUndefined
);
135 gcprobes::TenuredFinalize(t
);
140 if constexpr (std::is_same_v
<T
, JSObject
>) {
141 if (isNewlyCreated_
) {
142 zone
->pretenuring
.updateCellCountsInNewlyCreatedArenas(
143 nmarked
+ nfinalized
, nmarked
);
148 if (thingKind
== AllocKind::STRING
||
149 thingKind
== AllocKind::FAT_INLINE_STRING
) {
150 zone
->markedStrings
+= nmarked
;
151 zone
->finalizedStrings
+= nfinalized
;
155 // Do nothing. The caller will update the arena appropriately.
156 MOZ_ASSERT(newListTail
== &newListHead
);
157 DebugOnlyPoison(data
, JS_SWEPT_TENURED_PATTERN
, sizeof(data
),
158 MemCheckKind::MakeUndefined
);
162 MOZ_ASSERT(firstThingOrSuccessorOfLastMarkedThing
!= firstThing
);
163 uint_fast16_t lastMarkedThing
=
164 firstThingOrSuccessorOfLastMarkedThing
- thingSize
;
165 if (lastThing
== lastMarkedThing
) {
166 // If the last thing was marked, we will have already set the bounds of
167 // the final span, and we just need to terminate the list.
168 newListTail
->initAsEmpty();
170 // Otherwise, end the list with a span that covers the final stretch of free
172 newListTail
->initFinal(firstThingOrSuccessorOfLastMarkedThing
, lastThing
,
176 firstFreeSpan
= newListHead
;
178 size_t nfree
= numFreeThings(thingSize
);
179 MOZ_ASSERT(nfree
+ nmarked
== thingsPerArena(thingKind
));
184 // Finalize arenas from src list, releasing empty arenas if keepArenas wasn't
185 // specified and inserting the others into the appropriate destination size
187 template <typename T
>
188 static inline bool FinalizeTypedArenas(JS::GCContext
* gcx
, ArenaList
& src
,
189 SortedArenaList
& dest
,
191 SliceBudget
& budget
) {
192 MOZ_ASSERT(gcx
->isFinalizing());
194 size_t thingSize
= Arena::thingSize(thingKind
);
195 size_t thingsPerArena
= Arena::thingsPerArena(thingKind
);
196 size_t markCount
= 0;
198 auto updateMarkCount
= mozilla::MakeScopeExit([&] {
199 GCRuntime
* gc
= &gcx
->runtimeFromAnyThread()->gc
;
200 gc
->stats().addCount(gcstats::COUNT_CELLS_MARKED
, markCount
);
203 while (Arena
* arena
= src
.takeFirstArena()) {
204 size_t nmarked
= arena
->finalize
<T
>(gcx
, thingKind
, thingSize
);
205 size_t nfree
= thingsPerArena
- nmarked
;
207 markCount
+= nmarked
;
210 dest
.insertAt(arena
, nfree
);
212 arena
->chunk()->recycleArena(arena
, dest
, thingsPerArena
);
215 budget
.step(thingsPerArena
);
216 if (budget
.isOverBudget()) {
225 * Finalize the list of areans.
227 static bool FinalizeArenas(JS::GCContext
* gcx
, ArenaList
& src
,
228 SortedArenaList
& dest
, AllocKind thingKind
,
229 SliceBudget
& budget
) {
231 #define EXPAND_CASE(allocKind, traceKind, type, sizedType, bgFinal, nursery, \
233 case AllocKind::allocKind: \
234 return FinalizeTypedArenas<type>(gcx, src, dest, thingKind, budget);
235 FOR_EACH_ALLOCKIND(EXPAND_CASE
)
239 MOZ_CRASH("Invalid alloc kind");
243 void GCRuntime::initBackgroundSweep(Zone
* zone
, JS::GCContext
* gcx
,
244 const FinalizePhase
& phase
) {
245 gcstats::AutoPhase
ap(stats(), phase
.statsPhase
);
246 for (auto kind
: phase
.kinds
) {
247 zone
->arenas
.initBackgroundSweep(kind
);
251 void ArenaLists::initBackgroundSweep(AllocKind thingKind
) {
252 MOZ_ASSERT(IsBackgroundFinalized(thingKind
));
253 MOZ_ASSERT(concurrentUse(thingKind
) == ConcurrentUse::None
);
255 if (!collectingArenaList(thingKind
).isEmpty()) {
256 concurrentUse(thingKind
) = ConcurrentUse::BackgroundFinalize
;
260 void GCRuntime::backgroundFinalize(JS::GCContext
* gcx
, Zone
* zone
,
261 AllocKind kind
, Arena
** empty
) {
264 ArenaLists
* lists
= &zone
->arenas
;
265 ArenaList
& arenas
= lists
->collectingArenaList(kind
);
266 if (arenas
.isEmpty()) {
267 MOZ_ASSERT(lists
->concurrentUse(kind
) == ArenaLists::ConcurrentUse::None
);
271 SortedArenaList
finalizedSorted(Arena::thingsPerArena(kind
));
273 auto unlimited
= SliceBudget::unlimited();
274 FinalizeArenas(gcx
, arenas
, finalizedSorted
, kind
, unlimited
);
275 MOZ_ASSERT(arenas
.isEmpty());
277 finalizedSorted
.extractEmpty(empty
);
279 // When marking begins, all arenas are moved from arenaLists to
280 // collectingArenaLists. When the mutator runs, new arenas are allocated in
281 // arenaLists. Now that finalization is complete, we want to merge these lists
284 // We must take the GC lock to be able to safely modify the ArenaList;
285 // however, this does not by itself make the changes visible to all threads,
286 // as not all threads take the GC lock to read the ArenaLists.
287 // That safety is provided by the ReleaseAcquire memory ordering of the
288 // background finalize state, which we explicitly set as the final step.
291 MOZ_ASSERT(lists
->concurrentUse(kind
) ==
292 ArenaLists::ConcurrentUse::BackgroundFinalize
);
293 lists
->mergeFinalizedArenas(kind
, finalizedSorted
);
296 lists
->concurrentUse(kind
) = ArenaLists::ConcurrentUse::None
;
299 // After finalizing arenas, merge the following to get the final state of an
301 // - arenas allocated during marking
302 // - arenas allocated during sweeping
303 // - finalized arenas
304 void ArenaLists::mergeFinalizedArenas(AllocKind kind
,
305 SortedArenaList
& finalizedArenas
) {
307 // Updating arena lists off-thread requires taking the GC lock because the
308 // main thread uses these when allocating.
309 if (IsBackgroundFinalized(kind
)) {
310 runtimeFromAnyThread()->gc
.assertCurrentThreadHasLockedGC();
314 ArenaList
& arenas
= arenaList(kind
);
316 ArenaList allocatedDuringCollection
= std::move(arenas
);
317 arenas
= finalizedArenas
.toArenaList();
318 arenas
.insertListWithCursorAtEnd(allocatedDuringCollection
);
320 collectingArenaList(kind
).clear();
323 void ArenaLists::queueForegroundThingsForSweep() {
324 gcCompactPropMapArenasToUpdate
=
325 collectingArenaList(AllocKind::COMPACT_PROP_MAP
).head();
326 gcNormalPropMapArenasToUpdate
=
327 collectingArenaList(AllocKind::NORMAL_PROP_MAP
).head();
330 void GCRuntime::sweepBackgroundThings(ZoneList
& zones
) {
331 if (zones
.isEmpty()) {
335 JS::GCContext
* gcx
= TlsGCContext
.get();
336 MOZ_ASSERT(gcx
->isFinalizing());
338 // Sweep zones in order. The atoms zone must be finalized last as other
339 // zones may have direct pointers into it.
340 while (!zones
.isEmpty()) {
341 Zone
* zone
= zones
.removeFront();
342 MOZ_ASSERT(zone
->isGCFinished());
344 TimeStamp startTime
= TimeStamp::Now();
346 Arena
* emptyArenas
= zone
->arenas
.takeSweptEmptyArenas();
348 // We must finalize thing kinds in the order specified by
349 // BackgroundFinalizePhases.
350 for (const auto& phase
: BackgroundFinalizePhases
) {
351 for (auto kind
: phase
.kinds
) {
352 backgroundFinalize(gcx
, zone
, kind
, &emptyArenas
);
356 // Release any arenas that are now empty.
358 // Empty arenas are only released after everything has been finalized so
359 // that it's still possible to get a thing's zone after the thing has been
360 // finalized. The HeapPtr destructor depends on this, and this allows
361 // HeapPtrs between things of different alloc kind regardless of
362 // finalization order.
364 // Periodically drop and reaquire the GC lock every so often to avoid
365 // blocking the main thread from allocating chunks.
366 static const size_t LockReleasePeriod
= 32;
368 while (emptyArenas
) {
369 AutoLockGC
lock(this);
370 for (size_t i
= 0; i
< LockReleasePeriod
&& emptyArenas
; i
++) {
371 Arena
* arena
= emptyArenas
;
372 emptyArenas
= emptyArenas
->next
;
373 releaseArena(arena
, lock
);
377 // Record time spent sweeping this zone.
378 TimeStamp endTime
= TimeStamp::Now();
379 zone
->perZoneGCTime
+= endTime
- startTime
;
383 void GCRuntime::assertBackgroundSweepingFinished() {
386 AutoLockHelperThreadState lock
;
387 MOZ_ASSERT(backgroundSweepZones
.ref().isEmpty());
390 for (ZonesIter
zone(this, WithAtoms
); !zone
.done(); zone
.next()) {
391 for (auto kind
: AllAllocKinds()) {
392 MOZ_ASSERT_IF(state() != State::Prepare
&& state() != State::Mark
&&
393 state() != State::Sweep
,
394 zone
->arenas
.collectingArenaList(kind
).isEmpty());
395 MOZ_ASSERT(zone
->arenas
.doneBackgroundFinalize(kind
));
401 void GCRuntime::queueZonesAndStartBackgroundSweep(ZoneList
&& zones
) {
403 AutoLockHelperThreadState lock
;
404 MOZ_ASSERT(!requestSliceAfterBackgroundTask
);
405 backgroundSweepZones
.ref().appendList(std::move(zones
));
406 if (useBackgroundThreads
) {
407 sweepTask
.startOrRunIfIdle(lock
);
410 if (!useBackgroundThreads
) {
412 sweepTask
.runFromMainThread();
416 BackgroundSweepTask::BackgroundSweepTask(GCRuntime
* gc
)
417 : GCParallelTask(gc
, gcstats::PhaseKind::SWEEP
, GCUse::Finalizing
) {}
419 void BackgroundSweepTask::run(AutoLockHelperThreadState
& lock
) {
420 gc
->sweepFromBackgroundThread(lock
);
423 void GCRuntime::sweepFromBackgroundThread(AutoLockHelperThreadState
& lock
) {
426 zones
.appendList(std::move(backgroundSweepZones
.ref()));
428 AutoUnlockHelperThreadState
unlock(lock
);
429 sweepBackgroundThings(zones
);
431 // The main thread may call queueZonesAndStartBackgroundSweep() while this
432 // is running so we must check there is no more work after releasing the
434 } while (!backgroundSweepZones
.ref().isEmpty());
436 maybeRequestGCAfterBackgroundTask(lock
);
439 void GCRuntime::waitBackgroundSweepEnd() {
441 if (state() != State::Sweep
) {
442 assertBackgroundSweepingFinished();
446 void GCRuntime::startBackgroundFree() {
447 AutoLockHelperThreadState lock
;
448 freeTask
.startOrRunIfIdle(lock
);
451 BackgroundFreeTask::BackgroundFreeTask(GCRuntime
* gc
)
452 : GCParallelTask(gc
, gcstats::PhaseKind::NONE
) {
453 // This can occur outside GCs so doesn't have a stats phase.
456 void BackgroundFreeTask::run(AutoLockHelperThreadState
& lock
) {
457 gc
->freeFromBackgroundThread(lock
);
460 void GCRuntime::freeFromBackgroundThread(AutoLockHelperThreadState
& lock
) {
462 LifoAlloc
lifoBlocks(JSContext::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE
);
463 lifoBlocks
.transferFrom(&lifoBlocksToFree
.ref());
465 Nursery::BufferSet buffers
;
466 std::swap(buffers
, buffersToFreeAfterMinorGC
.ref());
468 AutoUnlockHelperThreadState
unlock(lock
);
470 lifoBlocks
.freeAll();
472 JS::GCContext
* gcx
= TlsGCContext
.get();
473 for (Nursery::BufferSet::Range r
= buffers
.all(); !r
.empty();
475 // Malloc memory associated with nursery objects is not tracked as these
476 // are assumed to be short lived.
477 gcx
->freeUntracked(r
.front());
479 } while (!lifoBlocksToFree
.ref().isEmpty() ||
480 !buffersToFreeAfterMinorGC
.ref().empty());
483 void GCRuntime::waitBackgroundFreeEnd() { freeTask
.join(); }
485 template <class ZoneIterT
>
486 IncrementalProgress
GCRuntime::markWeakReferences(
487 SliceBudget
& incrementalBudget
) {
488 MOZ_ASSERT(!marker().isWeakMarking());
490 gcstats::AutoPhase
ap1(stats(), gcstats::PhaseKind::MARK_WEAK
);
492 auto unlimited
= SliceBudget::unlimited();
493 SliceBudget
& budget
=
494 marker().incrementalWeakMapMarkingEnabled
? incrementalBudget
: unlimited
;
496 // Ensure we don't return to the mutator while we're still in weak marking
499 mozilla::MakeScopeExit([&] { marker().leaveWeakMarkingMode(); });
501 if (marker().enterWeakMarkingMode()) {
502 // If there was an 'enter-weak-marking-mode' token in the queue, then it
503 // and everything after it will still be in the queue so we can process
505 while (processTestMarkQueue() == QueueYielded
) {
508 // Do not rely on the information about not-yet-marked weak keys that have
509 // been collected by barriers. Clear out the gcEphemeronEdges entries and
510 // rebuild the full table. Note that this a cross-zone operation; delegate
511 // zone entries will be populated by map zone traversals, so everything
512 // needs to be cleared first, then populated.
513 if (!marker().incrementalWeakMapMarkingEnabled
) {
514 for (ZoneIterT
zone(this); !zone
.done(); zone
.next()) {
515 AutoEnterOOMUnsafeRegion oomUnsafe
;
516 if (!zone
->gcEphemeronEdges().clear()) {
517 oomUnsafe
.crash("clearing weak keys when entering weak marking mode");
522 for (ZoneIterT
zone(this); !zone
.done(); zone
.next()) {
523 if (zone
->enterWeakMarkingMode(&marker(), budget
) == NotFinished
) {
529 bool markedAny
= true;
531 if (!marker().markUntilBudgetExhausted(budget
)) {
532 MOZ_ASSERT(marker().incrementalWeakMapMarkingEnabled
);
538 if (!marker().isWeakMarking()) {
539 for (ZoneIterT
zone(this); !zone
.done(); zone
.next()) {
540 markedAny
|= WeakMapBase::markZoneIteratively(zone
, &marker());
544 markedAny
|= jit::JitRuntime::MarkJitcodeGlobalTableIteratively(&marker());
547 assertNoMarkingWork();
552 IncrementalProgress
GCRuntime::markWeakReferencesInCurrentGroup(
553 SliceBudget
& budget
) {
554 return markWeakReferences
<SweepGroupZonesIter
>(budget
);
557 template <class ZoneIterT
>
558 IncrementalProgress
GCRuntime::markGrayRoots(SliceBudget
& budget
,
559 gcstats::PhaseKind phase
) {
560 MOZ_ASSERT(marker().markColor() == MarkColor::Gray
);
562 gcstats::AutoPhase
ap(stats(), phase
);
564 AutoUpdateLiveCompartments
updateLive(this);
565 marker().setRootMarkingMode(true);
567 mozilla::MakeScopeExit([this]() { marker().setRootMarkingMode(false); });
569 IncrementalProgress result
=
570 traceEmbeddingGrayRoots(marker().tracer(), budget
);
571 if (result
== NotFinished
) {
575 Compartment::traceIncomingCrossCompartmentEdgesForZoneGC(
576 marker().tracer(), Compartment::GrayEdges
);
581 IncrementalProgress
GCRuntime::markAllWeakReferences() {
582 SliceBudget budget
= SliceBudget::unlimited();
583 return markWeakReferences
<GCZonesIter
>(budget
);
586 void GCRuntime::markAllGrayReferences(gcstats::PhaseKind phase
) {
587 SliceBudget budget
= SliceBudget::unlimited();
588 markGrayRoots
<GCZonesIter
>(budget
, phase
);
592 void GCRuntime::dropStringWrappers() {
594 * String "wrappers" are dropped on GC because their presence would require
595 * us to sweep the wrappers in all compartments every time we sweep a
598 for (ZonesIter
zone(this, WithAtoms
); !zone
.done(); zone
.next()) {
599 zone
->dropStringWrappersOnGC();
604 * Group zones that must be swept at the same time.
606 * From the point of view of the mutator, groups of zones transition atomically
607 * from marking to sweeping. If compartment A has an edge to an unmarked object
608 * in compartment B, then we must not start sweeping A in a later slice than we
609 * start sweeping B. That's because a write barrier in A could lead to the
610 * unmarked object in B becoming marked. However, if we had already swept that
611 * object, we would be in trouble.
613 * If we consider these dependencies as a graph, then all the compartments in
614 * any strongly-connected component of this graph must start sweeping in the
617 * Tarjan's algorithm is used to calculate the components.
620 bool Compartment::findSweepGroupEdges() {
621 Zone
* source
= zone();
622 for (WrappedObjectCompartmentEnum
e(this); !e
.empty(); e
.popFront()) {
623 Compartment
* targetComp
= e
.front();
624 Zone
* target
= targetComp
->zone();
626 if (!target
->isGCMarking() || source
->hasSweepGroupEdgeTo(target
)) {
630 for (ObjectWrapperEnum
e(this, targetComp
); !e
.empty(); e
.popFront()) {
631 JSObject
* key
= e
.front().mutableKey();
632 MOZ_ASSERT(key
->zone() == target
);
634 // Add an edge to the wrapped object's zone to ensure that the wrapper
635 // zone is not still being marked when we start sweeping the wrapped zone.
636 // As an optimization, if the wrapped object is already marked black there
637 // is no danger of later marking and we can skip this.
638 if (key
->isMarkedBlack()) {
642 if (!source
->addSweepGroupEdgeTo(target
)) {
646 // We don't need to consider any more wrappers for this target
647 // compartment since we already added an edge.
655 bool Zone::findSweepGroupEdges(Zone
* atomsZone
) {
656 MOZ_ASSERT_IF(this != atomsZone
, !isAtomsZone());
659 if (FinalizationObservers
* observers
= finalizationObservers()) {
660 observers
->checkTables();
664 // Any zone may have a pointer to an atom in the atoms zone, and these aren't
665 // in the cross compartment map.
666 if (atomsZone
->wasGCStarted() && !addSweepGroupEdgeTo(atomsZone
)) {
670 for (CompartmentsInZoneIter
comp(this); !comp
.done(); comp
.next()) {
671 if (!comp
->findSweepGroupEdges()) {
676 return WeakMapBase::findSweepGroupEdgesForZone(this);
679 bool GCRuntime::addEdgesForMarkQueue() {
683 // Add edges between all objects mentioned in the test mark queue, since
684 // otherwise they will get marked in a different order than their sweep
685 // groups. Note that this is only done at the beginning of an incremental
686 // collection, so it is possible for objects to be added later that do not
687 // follow the sweep group ordering. These objects will wait until their sweep
688 // group comes up, or will be skipped if their sweep group is already past.
689 JS::Zone
* prevZone
= nullptr;
690 for (Value val
: testMarkQueue
) {
691 if (!val
.isObject()) {
694 JSObject
* obj
= &val
.toObject();
695 JS::Zone
* zone
= obj
->zone();
696 if (!zone
->isGCMarking()) {
699 if (prevZone
&& prevZone
!= zone
) {
700 if (!prevZone
->addSweepGroupEdgeTo(zone
)) {
710 bool GCRuntime::findSweepGroupEdges() {
711 for (GCZonesIter
zone(this); !zone
.done(); zone
.next()) {
712 if (!zone
->findSweepGroupEdges(atomsZone())) {
717 if (!addEdgesForMarkQueue()) {
721 return DebugAPI::findSweepGroupEdges(rt
);
724 void GCRuntime::groupZonesForSweeping(JS::GCReason reason
) {
726 for (ZonesIter
zone(this, WithAtoms
); !zone
.done(); zone
.next()) {
727 MOZ_ASSERT(zone
->gcSweepGroupEdges().empty());
731 JSContext
* cx
= rt
->mainContextFromOwnThread();
732 ZoneComponentFinder
finder(cx
);
733 if (!isIncremental
|| !findSweepGroupEdges()) {
734 finder
.useOneComponent();
737 // Use one component for two-slice zeal modes.
738 if (useZeal
&& hasIncrementalTwoSliceZealMode()) {
739 finder
.useOneComponent();
742 for (GCZonesIter
zone(this); !zone
.done(); zone
.next()) {
743 MOZ_ASSERT(zone
->isGCMarking());
744 finder
.addNode(zone
);
746 sweepGroups
= finder
.getResultsList();
747 currentSweepGroup
= sweepGroups
;
750 for (GCZonesIter
zone(this); !zone
.done(); zone
.next()) {
751 zone
->clearSweepGroupEdges();
755 unsigned idx
= sweepGroupIndex
;
756 for (Zone
* head
= currentSweepGroup
; head
; head
= head
->nextGroup()) {
757 for (Zone
* zone
= head
; zone
; zone
= zone
->nextNodeInGroup()) {
758 MOZ_ASSERT(zone
->isGCMarking());
759 zone
->gcSweepGroupIndex
= idx
;
764 MOZ_ASSERT_IF(!isIncremental
, !currentSweepGroup
->nextGroup());
765 for (ZonesIter
zone(this, WithAtoms
); !zone
.done(); zone
.next()) {
766 MOZ_ASSERT(zone
->gcSweepGroupEdges().empty());
771 void GCRuntime::getNextSweepGroup() {
772 currentSweepGroup
= currentSweepGroup
->nextGroup();
774 if (!currentSweepGroup
) {
775 abortSweepAfterCurrentGroup
= false;
779 MOZ_ASSERT_IF(abortSweepAfterCurrentGroup
, !isIncremental
);
780 if (!isIncremental
) {
781 ZoneComponentFinder::mergeGroups(currentSweepGroup
);
784 for (Zone
* zone
= currentSweepGroup
; zone
; zone
= zone
->nextNodeInGroup()) {
785 MOZ_ASSERT(zone
->gcState() == zone
->initialMarkingState());
786 MOZ_ASSERT(!zone
->isQueuedForBackgroundSweep());
789 if (abortSweepAfterCurrentGroup
) {
792 // Abort collection of subsequent sweep groups.
793 for (SweepGroupZonesIter
zone(this); !zone
.done(); zone
.next()) {
794 MOZ_ASSERT(!zone
->gcNextGraphComponent
);
795 zone
->changeGCState(zone
->initialMarkingState(), Zone::NoGC
);
796 zone
->arenas
.unmarkPreMarkedFreeCells();
797 zone
->arenas
.mergeArenasFromCollectingLists();
798 zone
->clearGCSliceThresholds();
801 for (SweepGroupCompartmentsIter
comp(rt
); !comp
.done(); comp
.next()) {
805 abortSweepAfterCurrentGroup
= false;
806 currentSweepGroup
= nullptr;
813 * At the end of collection, anything reachable from a gray root that has not
814 * otherwise been marked black must be marked gray.
816 * This means that when marking things gray we must not allow marking to leave
817 * the current compartment group, as that could result in things being marked
818 * gray when they might subsequently be marked black. To achieve this, when we
819 * find a cross compartment pointer we don't mark the referent but add it to a
820 * singly-linked list of incoming gray pointers that is stored with each
823 * The list head is stored in Compartment::gcIncomingGrayPointers and contains
824 * cross compartment wrapper objects. The next pointer is stored in the second
825 * extra slot of the cross compartment wrapper.
827 * The list is created during gray marking when one of the
828 * MarkCrossCompartmentXXX functions is called for a pointer that leaves the
829 * current compartent group. This calls DelayCrossCompartmentGrayMarking to
830 * push the referring object onto the list.
832 * The list is traversed and then unlinked in
833 * GCRuntime::markIncomingGrayCrossCompartmentPointers.
836 static bool IsGrayListObject(JSObject
* obj
) {
838 return obj
->is
<CrossCompartmentWrapperObject
>() && !IsDeadProxyObject(obj
);
842 unsigned ProxyObject::grayLinkReservedSlot(JSObject
* obj
) {
843 MOZ_ASSERT(IsGrayListObject(obj
));
844 return CrossCompartmentWrapperObject::GrayLinkReservedSlot
;
848 static void AssertNotOnGrayList(JSObject
* obj
) {
850 IsGrayListObject(obj
),
851 GetProxyReservedSlot(obj
, ProxyObject::grayLinkReservedSlot(obj
))
856 static void AssertNoWrappersInGrayList(JSRuntime
* rt
) {
858 for (CompartmentsIter
c(rt
); !c
.done(); c
.next()) {
859 MOZ_ASSERT(!c
->gcIncomingGrayPointers
);
860 for (Compartment::ObjectWrapperEnum
e(c
); !e
.empty(); e
.popFront()) {
861 AssertNotOnGrayList(e
.front().value().unbarrieredGet());
867 static JSObject
* CrossCompartmentPointerReferent(JSObject
* obj
) {
868 MOZ_ASSERT(IsGrayListObject(obj
));
869 return &obj
->as
<ProxyObject
>().private_().toObject();
872 static JSObject
* NextIncomingCrossCompartmentPointer(JSObject
* prev
,
874 unsigned slot
= ProxyObject::grayLinkReservedSlot(prev
);
875 JSObject
* next
= GetProxyReservedSlot(prev
, slot
).toObjectOrNull();
876 MOZ_ASSERT_IF(next
, IsGrayListObject(next
));
879 SetProxyReservedSlot(prev
, slot
, UndefinedValue());
885 void js::gc::DelayCrossCompartmentGrayMarking(GCMarker
* maybeMarker
,
887 MOZ_ASSERT_IF(!maybeMarker
, !JS::RuntimeHeapIsBusy());
888 MOZ_ASSERT(IsGrayListObject(src
));
889 MOZ_ASSERT(src
->isMarkedGray());
891 AutoTouchingGrayThings tgt
;
893 mozilla::Maybe
<AutoLockGC
> lock
;
894 if (maybeMarker
&& maybeMarker
->isParallelMarking()) {
895 // Synchronize access to JSCompartment::gcIncomingGrayPointers.
897 // TODO: Instead of building this list we could scan all incoming CCWs and
898 // mark through gray ones when marking gray roots for a sweep group.
899 lock
.emplace(maybeMarker
->runtime());
902 /* Called from MarkCrossCompartmentXXX functions. */
903 unsigned slot
= ProxyObject::grayLinkReservedSlot(src
);
904 JSObject
* dest
= CrossCompartmentPointerReferent(src
);
905 Compartment
* comp
= dest
->compartment();
907 if (GetProxyReservedSlot(src
, slot
).isUndefined()) {
908 SetProxyReservedSlot(src
, slot
,
909 ObjectOrNullValue(comp
->gcIncomingGrayPointers
));
910 comp
->gcIncomingGrayPointers
= src
;
912 MOZ_ASSERT(GetProxyReservedSlot(src
, slot
).isObjectOrNull());
917 * Assert that the object is in our list, also walking the list to check its
920 JSObject
* obj
= comp
->gcIncomingGrayPointers
;
926 obj
= NextIncomingCrossCompartmentPointer(obj
, false);
932 void GCRuntime::markIncomingGrayCrossCompartmentPointers() {
933 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::MARK_INCOMING_GRAY
);
935 for (SweepGroupCompartmentsIter
c(rt
); !c
.done(); c
.next()) {
936 MOZ_ASSERT(c
->zone()->isGCMarkingBlackAndGray());
937 MOZ_ASSERT_IF(c
->gcIncomingGrayPointers
,
938 IsGrayListObject(c
->gcIncomingGrayPointers
));
940 for (JSObject
* src
= c
->gcIncomingGrayPointers
; src
;
941 src
= NextIncomingCrossCompartmentPointer(src
, true)) {
942 JSObject
* dst
= CrossCompartmentPointerReferent(src
);
943 MOZ_ASSERT(dst
->compartment() == c
);
944 MOZ_ASSERT_IF(src
->asTenured().isMarkedBlack(),
945 dst
->asTenured().isMarkedBlack());
947 if (src
->asTenured().isMarkedGray()) {
948 TraceManuallyBarrieredEdge(marker().tracer(), &dst
,
949 "cross-compartment gray pointer");
953 c
->gcIncomingGrayPointers
= nullptr;
957 static bool RemoveFromGrayList(JSObject
* wrapper
) {
958 AutoTouchingGrayThings tgt
;
960 if (!IsGrayListObject(wrapper
)) {
964 unsigned slot
= ProxyObject::grayLinkReservedSlot(wrapper
);
965 if (GetProxyReservedSlot(wrapper
, slot
).isUndefined()) {
966 return false; /* Not on our list. */
969 JSObject
* tail
= GetProxyReservedSlot(wrapper
, slot
).toObjectOrNull();
970 SetProxyReservedSlot(wrapper
, slot
, UndefinedValue());
972 Compartment
* comp
= CrossCompartmentPointerReferent(wrapper
)->compartment();
973 JSObject
* obj
= comp
->gcIncomingGrayPointers
;
974 if (obj
== wrapper
) {
975 comp
->gcIncomingGrayPointers
= tail
;
980 unsigned slot
= ProxyObject::grayLinkReservedSlot(obj
);
981 JSObject
* next
= GetProxyReservedSlot(obj
, slot
).toObjectOrNull();
982 if (next
== wrapper
) {
983 js::detail::SetProxyReservedSlotUnchecked(obj
, slot
,
984 ObjectOrNullValue(tail
));
990 MOZ_CRASH("object not found in gray link list");
993 void GCRuntime::resetGrayList(Compartment
* comp
) {
994 JSObject
* src
= comp
->gcIncomingGrayPointers
;
996 src
= NextIncomingCrossCompartmentPointer(src
, true);
998 comp
->gcIncomingGrayPointers
= nullptr;
1002 static bool HasIncomingCrossCompartmentPointers(JSRuntime
* rt
) {
1003 for (SweepGroupCompartmentsIter
c(rt
); !c
.done(); c
.next()) {
1004 if (c
->gcIncomingGrayPointers
) {
1013 void js::NotifyGCNukeWrapper(JSContext
* cx
, JSObject
* wrapper
) {
1014 MOZ_ASSERT(IsCrossCompartmentWrapper(wrapper
));
1017 * References to target of wrapper are being removed, we no longer have to
1018 * remember to mark it.
1020 RemoveFromGrayList(wrapper
);
1023 * Clean up WeakRef maps which might include this wrapper.
1025 JSObject
* target
= UncheckedUnwrapWithoutExpose(wrapper
);
1026 if (target
->is
<WeakRefObject
>()) {
1027 WeakRefObject
* weakRef
= &target
->as
<WeakRefObject
>();
1028 if (weakRef
->target()) {
1029 cx
->runtime()->gc
.nukeWeakRefWrapper(wrapper
, weakRef
);
1034 * Clean up FinalizationRecord record objects which might be the target of
1037 if (target
->is
<FinalizationRecordObject
>()) {
1038 auto* record
= &target
->as
<FinalizationRecordObject
>();
1039 cx
->runtime()->gc
.nukeFinalizationRecordWrapper(wrapper
, record
);
1044 JS_GC_SWAP_OBJECT_A_REMOVED
= 1 << 0,
1045 JS_GC_SWAP_OBJECT_B_REMOVED
= 1 << 1
1048 unsigned js::NotifyGCPreSwap(JSObject
* a
, JSObject
* b
) {
1050 * Two objects in the same compartment are about to have had their contents
1051 * swapped. If either of them are in our gray pointer list, then we remove
1052 * them from the lists, returning a bitset indicating what happened.
1054 return (RemoveFromGrayList(a
) ? JS_GC_SWAP_OBJECT_A_REMOVED
: 0) |
1055 (RemoveFromGrayList(b
) ? JS_GC_SWAP_OBJECT_B_REMOVED
: 0);
1058 void js::NotifyGCPostSwap(JSObject
* a
, JSObject
* b
, unsigned removedFlags
) {
1060 * Two objects in the same compartment have had their contents swapped. If
1061 * either of them were in our gray pointer list, we re-add them again.
1063 if (removedFlags
& JS_GC_SWAP_OBJECT_A_REMOVED
) {
1064 DelayCrossCompartmentGrayMarking(nullptr, b
);
1066 if (removedFlags
& JS_GC_SWAP_OBJECT_B_REMOVED
) {
1067 DelayCrossCompartmentGrayMarking(nullptr, a
);
1071 static inline void MaybeCheckWeakMapMarking(GCRuntime
* gc
) {
1072 #if defined(JS_GC_ZEAL) || defined(DEBUG)
1078 shouldCheck
= gc
->hasZealMode(ZealMode::CheckWeakMapMarking
);
1082 for (SweepGroupZonesIter
zone(gc
); !zone
.done(); zone
.next()) {
1083 MOZ_RELEASE_ASSERT(WeakMapBase::checkMarkingForZone(zone
));
1090 IncrementalProgress
GCRuntime::beginMarkingSweepGroup(JS::GCContext
* gcx
,
1091 SliceBudget
& budget
) {
1093 MOZ_ASSERT(!markOnBackgroundThreadDuringSweeping
);
1094 assertNoMarkingWork();
1095 for (auto& marker
: markers
) {
1096 MOZ_ASSERT(marker
->markColor() == MarkColor::Black
);
1098 MOZ_ASSERT(cellsToAssertNotGray
.ref().empty());
1101 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::MARK
);
1103 // Change state of current group to MarkBlackAndGray to restrict gray marking
1104 // to this group. Note that there may be pointers to the atoms zone, and these
1105 // will be marked through, as they are not marked with
1106 // TraceCrossCompartmentEdge.
1107 for (SweepGroupZonesIter
zone(this); !zone
.done(); zone
.next()) {
1108 zone
->changeGCState(zone
->initialMarkingState(), Zone::MarkBlackAndGray
);
1111 AutoSetMarkColor
setColorGray(marker(), MarkColor::Gray
);
1113 // Mark incoming gray pointers from previously swept compartments.
1114 markIncomingGrayCrossCompartmentPointers();
1119 IncrementalProgress
GCRuntime::markGrayRootsInCurrentGroup(
1120 JS::GCContext
* gcx
, SliceBudget
& budget
) {
1121 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::MARK
);
1123 AutoSetMarkColor
setColorGray(marker(), MarkColor::Gray
);
1125 return markGrayRoots
<SweepGroupZonesIter
>(budget
,
1126 gcstats::PhaseKind::MARK_GRAY
);
1129 IncrementalProgress
GCRuntime::markGray(JS::GCContext
* gcx
,
1130 SliceBudget
& budget
) {
1131 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::MARK
);
1133 if (markUntilBudgetExhausted(budget
, AllowParallelMarking
) == NotFinished
) {
1140 IncrementalProgress
GCRuntime::endMarkingSweepGroup(JS::GCContext
* gcx
,
1141 SliceBudget
& budget
) {
1143 MOZ_ASSERT(!markOnBackgroundThreadDuringSweeping
);
1144 assertNoMarkingWork();
1145 for (auto& marker
: markers
) {
1146 MOZ_ASSERT(marker
->markColor() == MarkColor::Black
);
1148 MOZ_ASSERT(!HasIncomingCrossCompartmentPointers(rt
));
1151 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::MARK
);
1153 if (markWeakReferencesInCurrentGroup(budget
) == NotFinished
) {
1157 AutoSetMarkColor
setColorGray(marker(), MarkColor::Gray
);
1159 // Mark transitively inside the current compartment group.
1160 if (markWeakReferencesInCurrentGroup(budget
) == NotFinished
) {
1164 MOZ_ASSERT(marker().isDrained());
1166 // We must not yield after this point before we start sweeping the group.
1167 safeToYield
= false;
1169 MaybeCheckWeakMapMarking(this);
1174 // Causes the given WeakCache to be swept when run.
1175 class ImmediateSweepWeakCacheTask
: public GCParallelTask
{
1177 JS::detail::WeakCacheBase
& cache
;
1180 ImmediateSweepWeakCacheTask(GCRuntime
* gc
, Zone
* zone
,
1181 JS::detail::WeakCacheBase
& wc
)
1182 : GCParallelTask(gc
, gcstats::PhaseKind::SWEEP_WEAK_CACHES
),
1186 ImmediateSweepWeakCacheTask(ImmediateSweepWeakCacheTask
&& other
) noexcept
1187 : GCParallelTask(std::move(other
)),
1189 cache(other
.cache
) {}
1191 ImmediateSweepWeakCacheTask(const ImmediateSweepWeakCacheTask
&) = delete;
1193 void run(AutoLockHelperThreadState
& lock
) override
{
1194 AutoUnlockHelperThreadState
unlock(lock
);
1195 AutoSetThreadIsSweeping
threadIsSweeping(zone
);
1196 SweepingTracer
trc(gc
->rt
);
1197 cache
.traceWeak(&trc
, &gc
->storeBuffer());
1201 void GCRuntime::updateAtomsBitmap() {
1203 if (atomMarking
.computeBitmapFromChunkMarkBits(rt
, marked
)) {
1204 for (GCZonesIter
zone(this); !zone
.done(); zone
.next()) {
1205 atomMarking
.refineZoneBitmapForCollectedZone(zone
, marked
);
1208 // Ignore OOM in computeBitmapFromChunkMarkBits. The
1209 // refineZoneBitmapForCollectedZone call can only remove atoms from the
1210 // zone bitmap, so it is conservative to just not call it.
1213 atomMarking
.markAtomsUsedByUncollectedZones(rt
);
1215 // For convenience sweep these tables non-incrementally as part of bitmap
1216 // sweeping; they are likely to be much smaller than the main atoms table.
1217 SweepingTracer
trc(rt
);
1218 rt
->symbolRegistry().traceWeak(&trc
);
1221 void GCRuntime::sweepCCWrappers() {
1222 SweepingTracer
trc(rt
);
1223 for (SweepGroupZonesIter
zone(this); !zone
.done(); zone
.next()) {
1224 zone
->traceWeakCCWEdges(&trc
);
1228 void GCRuntime::sweepRealmGlobals() {
1229 SweepingTracer
trc(rt
);
1230 for (SweepGroupRealmsIter
r(this); !r
.done(); r
.next()) {
1231 AutoSetThreadIsSweeping
threadIsSweeping(r
->zone());
1232 r
->traceWeakGlobalEdge(&trc
);
1236 void GCRuntime::sweepMisc() {
1237 SweepingTracer
trc(rt
);
1238 for (SweepGroupRealmsIter
r(this); !r
.done(); r
.next()) {
1239 AutoSetThreadIsSweeping
threadIsSweeping(r
->zone());
1240 r
->traceWeakSavedStacks(&trc
);
1241 r
->traceWeakRegExps(&trc
);
1243 for (SweepGroupCompartmentsIter
c(this); !c
.done(); c
.next()) {
1244 AutoSetThreadIsSweeping
threadIsSweeping(c
->zone());
1245 c
->traceWeakNativeIterators(&trc
);
1249 void GCRuntime::sweepCompressionTasks() {
1250 JSRuntime
* runtime
= rt
;
1252 // Attach finished compression tasks.
1253 AutoLockHelperThreadState lock
;
1254 AttachFinishedCompressions(runtime
, lock
);
1255 SweepPendingCompressions(lock
);
1258 void GCRuntime::sweepWeakMaps() {
1259 SweepingTracer
trc(rt
);
1260 for (SweepGroupZonesIter
zone(this); !zone
.done(); zone
.next()) {
1261 /* No need to look up any more weakmap keys from this sweep group. */
1262 AutoEnterOOMUnsafeRegion oomUnsafe
;
1263 if (!zone
->gcEphemeronEdges().clear()) {
1264 oomUnsafe
.crash("clearing weak keys in beginSweepingSweepGroup()");
1267 // Lock the storebuffer since this may access it when rehashing or resizing
1269 AutoLockStoreBuffer
lock(&storeBuffer());
1270 zone
->sweepWeakMaps(&trc
);
1274 void GCRuntime::sweepUniqueIds() {
1275 for (SweepGroupZonesIter
zone(this); !zone
.done(); zone
.next()) {
1276 AutoSetThreadIsSweeping
threadIsSweeping(zone
);
1277 zone
->sweepUniqueIds();
1281 void JS::Zone::sweepUniqueIds() {
1282 SweepingTracer
trc(runtimeFromAnyThread());
1283 uniqueIds().traceWeak(&trc
);
1287 bool UniqueIdGCPolicy::traceWeak(JSTracer
* trc
, Cell
** keyp
, uint64_t* valuep
) {
1288 // Since this is only ever used for sweeping, we can optimize it for that
1289 // case. (Compacting GC updates this table manually when it moves a cell.)
1290 MOZ_ASSERT(trc
->kind() == JS::TracerKind::Sweeping
);
1291 return (*keyp
)->isMarkedAny();
1294 void GCRuntime::sweepFinalizationObserversOnMainThread() {
1295 // This calls back into the browser which expects to be called from the main
1297 gcstats::AutoPhase
ap1(stats(), gcstats::PhaseKind::SWEEP_COMPARTMENTS
);
1298 gcstats::AutoPhase
ap2(stats(),
1299 gcstats::PhaseKind::SWEEP_FINALIZATION_OBSERVERS
);
1300 SweepingTracer
trc(rt
);
1301 AutoLockStoreBuffer
lock(&storeBuffer());
1302 for (SweepGroupZonesIter
zone(this); !zone
.done(); zone
.next()) {
1303 traceWeakFinalizationObserverEdges(&trc
, zone
);
1307 void GCRuntime::startTask(GCParallelTask
& task
,
1308 AutoLockHelperThreadState
& lock
) {
1309 if (!CanUseExtraThreads()) {
1310 AutoUnlockHelperThreadState
unlock(lock
);
1311 task
.runFromMainThread();
1312 stats().recordParallelPhase(task
.phaseKind
, task
.duration());
1316 task
.startWithLockHeld(lock
);
1319 void GCRuntime::joinTask(GCParallelTask
& task
,
1320 AutoLockHelperThreadState
& lock
) {
1321 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::JOIN_PARALLEL_TASKS
);
1322 task
.joinWithLockHeld(lock
);
1325 void GCRuntime::sweepDebuggerOnMainThread(JS::GCContext
* gcx
) {
1326 SweepingTracer
trc(rt
);
1327 AutoLockStoreBuffer
lock(&storeBuffer());
1329 // Detach unreachable debuggers and global objects from each other.
1330 // This can modify weakmaps and so must happen before weakmap sweeping.
1331 DebugAPI::sweepAll(gcx
);
1333 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::SWEEP_COMPARTMENTS
);
1335 // Sweep debug environment information. This performs lookups in the Zone's
1336 // unique IDs table and so must not happen in parallel with sweeping that
1339 gcstats::AutoPhase
ap2(stats(), gcstats::PhaseKind::SWEEP_MISC
);
1340 for (SweepGroupRealmsIter
r(rt
); !r
.done(); r
.next()) {
1341 r
->traceWeakDebugEnvironmentEdges(&trc
);
1346 void GCRuntime::sweepJitDataOnMainThread(JS::GCContext
* gcx
) {
1347 SweepingTracer
trc(rt
);
1349 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::SWEEP_JIT_DATA
);
1351 if (initialState
!= State::NotActive
) {
1352 // Cancel any active or pending off thread compilations. We also did
1353 // this before marking (in DiscardJITCodeForGC) so this is a no-op
1354 // for non-incremental GCs.
1355 js::CancelOffThreadIonCompile(rt
, JS::Zone::Sweep
);
1358 // Bug 1071218: the following method has not yet been refactored to
1359 // work on a single zone-group at once.
1361 // Sweep entries containing about-to-be-finalized JitCode and
1362 // update relocated TypeSet::Types inside the JitcodeGlobalTable.
1363 jit::JitRuntime::TraceWeakJitcodeGlobalTable(rt
, &trc
);
1366 if (initialState
!= State::NotActive
) {
1367 gcstats::AutoPhase
apdc(stats(), gcstats::PhaseKind::SWEEP_DISCARD_CODE
);
1368 for (SweepGroupZonesIter
zone(this); !zone
.done(); zone
.next()) {
1369 zone
->discardJitCode(gcx
);
1373 // JitZone/JitRealm must be swept *after* discarding JIT code, because
1374 // Zone::discardJitCode might access CacheIRStubInfos deleted here.
1376 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::SWEEP_JIT_DATA
);
1378 for (SweepGroupRealmsIter
r(rt
); !r
.done(); r
.next()) {
1379 r
->traceWeakEdgesInJitRealm(&trc
);
1382 for (SweepGroupZonesIter
zone(this); !zone
.done(); zone
.next()) {
1383 if (jit::JitZone
* jitZone
= zone
->jitZone()) {
1384 jitZone
->traceWeak(&trc
, zone
);
1390 using WeakCacheTaskVector
=
1391 mozilla::Vector
<ImmediateSweepWeakCacheTask
, 0, SystemAllocPolicy
>;
1393 // Call a functor for all weak caches that need to be swept in the current
1395 template <typename Functor
>
1396 static inline bool IterateWeakCaches(JSRuntime
* rt
, Functor f
) {
1397 for (SweepGroupZonesIter
zone(rt
); !zone
.done(); zone
.next()) {
1398 for (JS::detail::WeakCacheBase
* cache
: zone
->weakCaches()) {
1399 if (!f(cache
, zone
.get())) {
1405 for (JS::detail::WeakCacheBase
* cache
: rt
->weakCaches()) {
1406 if (!f(cache
, nullptr)) {
1414 static bool PrepareWeakCacheTasks(JSRuntime
* rt
,
1415 WeakCacheTaskVector
* immediateTasks
) {
1416 // Start incremental sweeping for caches that support it or add to a vector
1417 // of sweep tasks to run on a helper thread.
1419 MOZ_ASSERT(immediateTasks
->empty());
1421 GCRuntime
* gc
= &rt
->gc
;
1423 IterateWeakCaches(rt
, [&](JS::detail::WeakCacheBase
* cache
, Zone
* zone
) {
1424 if (cache
->empty()) {
1428 // Caches that support incremental sweeping will be swept later.
1429 if (zone
&& cache
->setIncrementalBarrierTracer(&gc
->sweepingTracer
)) {
1433 return immediateTasks
->emplaceBack(gc
, zone
, *cache
);
1437 immediateTasks
->clearAndFree();
1443 static void SweepAllWeakCachesOnMainThread(JSRuntime
* rt
) {
1444 // If we ran out of memory, do all the work on the main thread.
1445 gcstats::AutoPhase
ap(rt
->gc
.stats(), gcstats::PhaseKind::SWEEP_WEAK_CACHES
);
1446 SweepingTracer
trc(rt
);
1447 IterateWeakCaches(rt
, [&](JS::detail::WeakCacheBase
* cache
, Zone
* zone
) {
1448 if (cache
->needsIncrementalBarrier()) {
1449 cache
->setIncrementalBarrierTracer(nullptr);
1451 cache
->traceWeak(&trc
, &rt
->gc
.storeBuffer());
1456 void GCRuntime::sweepEmbeddingWeakPointers(JS::GCContext
* gcx
) {
1457 using namespace gcstats
;
1459 AutoLockStoreBuffer
lock(&storeBuffer());
1461 AutoPhase
ap(stats(), PhaseKind::FINALIZE_START
);
1462 callFinalizeCallbacks(gcx
, JSFINALIZE_GROUP_PREPARE
);
1464 AutoPhase
ap2(stats(), PhaseKind::WEAK_ZONES_CALLBACK
);
1465 callWeakPointerZonesCallbacks(&sweepingTracer
);
1468 AutoPhase
ap2(stats(), PhaseKind::WEAK_COMPARTMENT_CALLBACK
);
1469 for (SweepGroupZonesIter
zone(this); !zone
.done(); zone
.next()) {
1470 for (CompartmentsInZoneIter
comp(zone
); !comp
.done(); comp
.next()) {
1471 callWeakPointerCompartmentCallbacks(&sweepingTracer
, comp
);
1475 callFinalizeCallbacks(gcx
, JSFINALIZE_GROUP_START
);
1478 IncrementalProgress
GCRuntime::beginSweepingSweepGroup(JS::GCContext
* gcx
,
1479 SliceBudget
& budget
) {
1481 * Begin sweeping the group of zones in currentSweepGroup, performing
1482 * actions that must be done before yielding to caller.
1485 using namespace gcstats
;
1487 AutoSCC
scc(stats(), sweepGroupIndex
);
1489 bool sweepingAtoms
= false;
1490 for (SweepGroupZonesIter
zone(this); !zone
.done(); zone
.next()) {
1491 /* Set the GC state to sweeping. */
1492 zone
->changeGCState(Zone::MarkBlackAndGray
, Zone::Sweep
);
1494 /* Purge the ArenaLists before sweeping. */
1495 zone
->arenas
.checkSweepStateNotInUse();
1496 zone
->arenas
.unmarkPreMarkedFreeCells();
1497 zone
->arenas
.clearFreeLists();
1499 if (zone
->isAtomsZone()) {
1500 sweepingAtoms
= true;
1505 for (const auto* cell
: cellsToAssertNotGray
.ref()) {
1506 JS::AssertCellIsNotGray(cell
);
1508 cellsToAssertNotGray
.ref().clearAndFree();
1511 // Updating the atom marking bitmaps. This marks atoms referenced by
1512 // uncollected zones so cannot be done in parallel with the other sweeping
1514 if (sweepingAtoms
) {
1515 AutoPhase
ap(stats(), PhaseKind::UPDATE_ATOMS_BITMAP
);
1516 updateAtomsBitmap();
1520 validateIncrementalMarking();
1523 AutoSetThreadIsSweeping threadIsSweeping
;
1525 // This must happen before sweeping realm globals.
1526 sweepDebuggerOnMainThread(gcx
);
1528 // FinalizationRegistry sweeping touches weak maps and so must not run in
1529 // parallel with that. This triggers a read barrier and can add marking work
1530 // for zones that are still marking. Must happen before sweeping realm
1532 sweepFinalizationObserversOnMainThread();
1534 // This must happen before updating embedding weak pointers.
1535 sweepRealmGlobals();
1537 sweepEmbeddingWeakPointers(gcx
);
1540 AutoLockHelperThreadState lock
;
1542 AutoPhase
ap(stats(), PhaseKind::SWEEP_COMPARTMENTS
);
1544 AutoRunParallelTask
sweepCCWrappers(this, &GCRuntime::sweepCCWrappers
,
1545 PhaseKind::SWEEP_CC_WRAPPER
,
1546 GCUse::Sweeping
, lock
);
1547 AutoRunParallelTask
sweepMisc(this, &GCRuntime::sweepMisc
,
1548 PhaseKind::SWEEP_MISC
, GCUse::Sweeping
, lock
);
1549 AutoRunParallelTask
sweepCompTasks(this, &GCRuntime::sweepCompressionTasks
,
1550 PhaseKind::SWEEP_COMPRESSION
,
1551 GCUse::Sweeping
, lock
);
1552 AutoRunParallelTask
sweepWeakMaps(this, &GCRuntime::sweepWeakMaps
,
1553 PhaseKind::SWEEP_WEAKMAPS
,
1554 GCUse::Sweeping
, lock
);
1555 AutoRunParallelTask
sweepUniqueIds(this, &GCRuntime::sweepUniqueIds
,
1556 PhaseKind::SWEEP_UNIQUEIDS
,
1557 GCUse::Sweeping
, lock
);
1559 WeakCacheTaskVector sweepCacheTasks
;
1560 bool canSweepWeakCachesOffThread
=
1561 PrepareWeakCacheTasks(rt
, &sweepCacheTasks
);
1562 if (canSweepWeakCachesOffThread
) {
1563 weakCachesToSweep
.ref().emplace(currentSweepGroup
);
1564 for (auto& task
: sweepCacheTasks
) {
1565 startTask(task
, lock
);
1570 AutoUnlockHelperThreadState
unlock(lock
);
1571 sweepJitDataOnMainThread(gcx
);
1573 if (!canSweepWeakCachesOffThread
) {
1574 MOZ_ASSERT(sweepCacheTasks
.empty());
1575 SweepAllWeakCachesOnMainThread(rt
);
1579 for (auto& task
: sweepCacheTasks
) {
1580 joinTask(task
, lock
);
1584 if (sweepingAtoms
) {
1585 startSweepingAtomsTable();
1588 // Queue all GC things in all zones for sweeping, either on the foreground
1589 // or on the background thread.
1591 for (SweepGroupZonesIter
zone(this); !zone
.done(); zone
.next()) {
1592 for (const auto& phase
: BackgroundFinalizePhases
) {
1593 initBackgroundSweep(zone
, gcx
, phase
);
1596 zone
->arenas
.queueForegroundThingsForSweep();
1599 MOZ_ASSERT(!sweepZone
);
1602 markOnBackgroundThreadDuringSweeping
= CanUseExtraThreads();
1608 bool GCRuntime::shouldYieldForZeal(ZealMode mode
) {
1609 bool yield
= useZeal
&& hasZealMode(mode
);
1611 // Only yield on the first sweep slice for this mode.
1612 bool firstSweepSlice
= initialState
!= State::Sweep
;
1613 if (mode
== ZealMode::IncrementalMultipleSlices
&& !firstSweepSlice
) {
1621 IncrementalProgress
GCRuntime::endSweepingSweepGroup(JS::GCContext
* gcx
,
1622 SliceBudget
& budget
) {
1623 // This is to prevent a race between markTask checking the zone state and
1624 // us changing it below.
1625 if (joinBackgroundMarkTask() == NotFinished
) {
1629 assertNoMarkingWork();
1631 // Disable background marking during sweeping until we start sweeping the next
1633 markOnBackgroundThreadDuringSweeping
= false;
1636 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::FINALIZE_END
);
1637 AutoLockStoreBuffer
lock(&storeBuffer());
1638 callFinalizeCallbacks(gcx
, JSFINALIZE_GROUP_END
);
1641 /* Free LIFO blocks on a background thread if possible. */
1642 startBackgroundFree();
1644 /* Update the GC state for zones we have swept. */
1645 for (SweepGroupZonesIter
zone(this); !zone
.done(); zone
.next()) {
1646 if (jit::JitZone
* jitZone
= zone
->jitZone()) {
1647 // Clear out any small pools that we're hanging on to.
1648 jitZone
->execAlloc().purge();
1650 AutoLockGC
lock(this);
1651 zone
->changeGCState(Zone::Sweep
, Zone::Finished
);
1652 zone
->arenas
.unmarkPreMarkedFreeCells();
1653 zone
->arenas
.checkNoArenasToUpdate();
1654 zone
->pretenuring
.clearCellCountsInNewlyCreatedArenas();
1658 * Start background thread to sweep zones if required, sweeping any atoms
1659 * zones last if present.
1662 for (SweepGroupZonesIter
zone(this); !zone
.done(); zone
.next()) {
1663 if (zone
->isAtomsZone()) {
1666 zones
.prepend(zone
);
1670 queueZonesAndStartBackgroundSweep(std::move(zones
));
1675 IncrementalProgress
GCRuntime::markDuringSweeping(JS::GCContext
* gcx
,
1676 SliceBudget
& budget
) {
1677 MOZ_ASSERT(markTask
.isIdle());
1679 if (markOnBackgroundThreadDuringSweeping
) {
1680 if (!marker().isDrained() || hasDelayedMarking()) {
1681 AutoLockHelperThreadState lock
;
1682 MOZ_ASSERT(markTask
.isIdle(lock
));
1683 markTask
.setBudget(budget
);
1684 markTask
.startOrRunIfIdle(lock
);
1686 return Finished
; // This means don't yield to the mutator here.
1689 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::MARK
);
1690 return markUntilBudgetExhausted(budget
, AllowParallelMarking
);
1693 void GCRuntime::beginSweepPhase(JS::GCReason reason
, AutoGCSession
& session
) {
1697 * Finalize as we sweep, outside of lock but with RuntimeHeapIsBusy()
1698 * true so that any attempt to allocate a GC-thing from a finalizer will
1699 * fail, rather than nest badly and leave the unmarked newborn to be swept.
1702 MOZ_ASSERT(!abortSweepAfterCurrentGroup
);
1703 MOZ_ASSERT(!markOnBackgroundThreadDuringSweeping
);
1706 releaseHeldRelocatedArenas();
1711 computeNonIncrementalMarkingForValidation(session
);
1714 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::SWEEP
);
1716 AssertNoWrappersInGrayList(rt
);
1717 dropStringWrappers();
1719 groupZonesForSweeping(reason
);
1721 sweepActions
->assertFinished();
1724 bool GCRuntime::foregroundFinalize(JS::GCContext
* gcx
, Zone
* zone
,
1725 AllocKind thingKind
,
1726 SliceBudget
& sliceBudget
,
1727 SortedArenaList
& sweepList
) {
1728 ArenaLists
& lists
= zone
->arenas
;
1729 lists
.checkNoArenasToUpdateForKind(thingKind
);
1731 // Non-empty arenas are reused for use for new allocations as soon as the
1732 // finalizers for that allocation kind have run. Empty arenas are only
1733 // released when everything in the zone has been swept (see
1734 // GCRuntime::sweepBackgroundThings for more details).
1735 if (!FinalizeArenas(gcx
, lists
.collectingArenaList(thingKind
), sweepList
,
1736 thingKind
, sliceBudget
)) {
1737 // Copy the current contents of sweepList so that ArenaIter can find them.
1738 lists
.setIncrementalSweptArenas(thingKind
, sweepList
);
1742 sweepList
.extractEmpty(&lists
.savedEmptyArenas
.ref());
1743 lists
.mergeFinalizedArenas(thingKind
, sweepList
);
1744 lists
.clearIncrementalSweptArenas();
1749 BackgroundMarkTask::BackgroundMarkTask(GCRuntime
* gc
)
1750 : GCParallelTask(gc
, gcstats::PhaseKind::MARK
, GCUse::Marking
),
1751 budget(SliceBudget::unlimited()) {}
1753 void js::gc::BackgroundMarkTask::run(AutoLockHelperThreadState
& lock
) {
1754 AutoUnlockHelperThreadState
unlock(lock
);
1756 // Time reporting is handled separately for parallel tasks.
1757 gc
->sweepMarkResult
= gc
->markUntilBudgetExhausted(
1758 this->budget
, GCRuntime::SingleThreadedMarking
, DontReportMarkTime
);
1761 IncrementalProgress
GCRuntime::joinBackgroundMarkTask() {
1762 AutoLockHelperThreadState lock
;
1763 if (markTask
.isIdle(lock
)) {
1767 joinTask(markTask
, lock
);
1769 IncrementalProgress result
= sweepMarkResult
;
1770 sweepMarkResult
= Finished
;
1774 template <typename T
>
1775 static void SweepThing(JS::GCContext
* gcx
, T
* thing
) {
1776 if (!TenuredThingIsMarkedAny(thing
)) {
1781 template <typename T
>
1782 static bool SweepArenaList(JS::GCContext
* gcx
, Arena
** arenasToSweep
,
1783 SliceBudget
& sliceBudget
) {
1784 while (Arena
* arena
= *arenasToSweep
) {
1785 MOZ_ASSERT(arena
->zone
->isGCSweeping());
1787 for (ArenaCellIterUnderGC
cell(arena
); !cell
.done(); cell
.next()) {
1788 SweepThing(gcx
, cell
.as
<T
>());
1791 Arena
* next
= arena
->next
;
1792 MOZ_ASSERT_IF(next
, next
->zone
== arena
->zone
);
1793 *arenasToSweep
= next
;
1795 AllocKind kind
= MapTypeToAllocKind
<T
>::kind
;
1796 sliceBudget
.step(Arena::thingsPerArena(kind
));
1797 if (sliceBudget
.isOverBudget()) {
1805 void GCRuntime::startSweepingAtomsTable() {
1806 auto& maybeAtoms
= maybeAtomsToSweep
.ref();
1807 MOZ_ASSERT(maybeAtoms
.isNothing());
1809 AtomsTable
* atomsTable
= rt
->atomsForSweeping();
1814 // Create secondary tables to hold new atoms added while we're sweeping the
1815 // main tables incrementally.
1816 if (!atomsTable
->startIncrementalSweep(maybeAtoms
)) {
1817 SweepingTracer
trc(rt
);
1818 atomsTable
->traceWeak(&trc
);
1822 IncrementalProgress
GCRuntime::sweepAtomsTable(JS::GCContext
* gcx
,
1823 SliceBudget
& budget
) {
1824 if (!atomsZone()->isGCSweeping()) {
1828 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::SWEEP_ATOMS_TABLE
);
1830 auto& maybeAtoms
= maybeAtomsToSweep
.ref();
1835 if (!rt
->atomsForSweeping()->sweepIncrementally(maybeAtoms
.ref(), budget
)) {
1844 static size_t IncrementalSweepWeakCache(GCRuntime
* gc
,
1845 const WeakCacheToSweep
& item
) {
1846 AutoSetThreadIsSweeping
threadIsSweeping(item
.zone
);
1848 JS::detail::WeakCacheBase
* cache
= item
.cache
;
1849 MOZ_ASSERT(cache
->needsIncrementalBarrier());
1851 SweepingTracer
trc(gc
->rt
);
1852 size_t steps
= cache
->traceWeak(&trc
, &gc
->storeBuffer());
1853 cache
->setIncrementalBarrierTracer(nullptr);
1858 WeakCacheSweepIterator::WeakCacheSweepIterator(JS::Zone
* sweepGroup
)
1859 : sweepZone(sweepGroup
), sweepCache(sweepZone
->weakCaches().getFirst()) {
1863 bool WeakCacheSweepIterator::done() const { return !sweepZone
; }
1865 WeakCacheToSweep
WeakCacheSweepIterator::get() const {
1866 MOZ_ASSERT(!done());
1868 return {sweepCache
, sweepZone
};
1871 void WeakCacheSweepIterator::next() {
1872 MOZ_ASSERT(!done());
1874 sweepCache
= sweepCache
->getNext();
1878 void WeakCacheSweepIterator::settle() {
1880 while (sweepCache
&& !sweepCache
->needsIncrementalBarrier()) {
1881 sweepCache
= sweepCache
->getNext();
1888 sweepZone
= sweepZone
->nextNodeInGroup();
1890 sweepCache
= sweepZone
->weakCaches().getFirst();
1894 MOZ_ASSERT((!sweepZone
&& !sweepCache
) ||
1895 (sweepCache
&& sweepCache
->needsIncrementalBarrier()));
1898 IncrementalProgress
GCRuntime::sweepWeakCaches(JS::GCContext
* gcx
,
1899 SliceBudget
& budget
) {
1900 if (weakCachesToSweep
.ref().isNothing()) {
1904 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::SWEEP_COMPARTMENTS
);
1906 WeakCacheSweepIterator
& work
= weakCachesToSweep
.ref().ref();
1908 AutoLockHelperThreadState lock
;
1911 AutoRunParallelWork
runWork(this, IncrementalSweepWeakCache
,
1912 gcstats::PhaseKind::SWEEP_WEAK_CACHES
,
1913 GCUse::Sweeping
, work
, budget
, lock
);
1914 AutoUnlockHelperThreadState
unlock(lock
);
1918 weakCachesToSweep
.ref().reset();
1925 IncrementalProgress
GCRuntime::finalizeAllocKind(JS::GCContext
* gcx
,
1926 SliceBudget
& budget
) {
1927 MOZ_ASSERT(sweepZone
->isGCSweeping());
1929 // Set the number of things per arena for this AllocKind.
1930 size_t thingsPerArena
= Arena::thingsPerArena(sweepAllocKind
);
1931 auto& sweepList
= incrementalSweepList
.ref();
1932 sweepList
.setThingsPerArena(thingsPerArena
);
1934 AutoSetThreadIsFinalizing
threadIsFinalizing(gcx
);
1936 if (!foregroundFinalize(gcx
, sweepZone
, sweepAllocKind
, budget
, sweepList
)) {
1940 // Reset the slots of the sweep list that we used.
1941 sweepList
.reset(thingsPerArena
);
1946 IncrementalProgress
GCRuntime::sweepPropMapTree(JS::GCContext
* gcx
,
1947 SliceBudget
& budget
) {
1948 // Remove dead SharedPropMaps from the tree. This happens incrementally on the
1949 // main thread. PropMaps are finalized later on the a background thread.
1951 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::SWEEP_PROP_MAP
);
1953 ArenaLists
& al
= sweepZone
->arenas
;
1955 if (!SweepArenaList
<CompactPropMap
>(
1956 gcx
, &al
.gcCompactPropMapArenasToUpdate
.ref(), budget
)) {
1959 if (!SweepArenaList
<NormalPropMap
>(
1960 gcx
, &al
.gcNormalPropMapArenasToUpdate
.ref(), budget
)) {
1967 // An iterator for a standard container that provides an STL-like begin()/end()
1968 // interface. This iterator provides a done()/get()/next() style interface.
1969 template <typename Container
>
1970 class ContainerIter
{
1971 using Iter
= decltype(std::declval
<const Container
>().begin());
1972 using Elem
= decltype(*std::declval
<Iter
>());
1978 explicit ContainerIter(const Container
& container
)
1979 : iter(container
.begin()), end(container
.end()) {}
1981 bool done() const { return iter
== end
; }
1983 Elem
get() const { return *iter
; }
1986 MOZ_ASSERT(!done());
1991 // IncrementalIter is a template class that makes a normal iterator into one
1992 // that can be used to perform incremental work by using external state that
1993 // persists between instantiations. The state is only initialised on the first
1994 // use and subsequent uses carry on from the previous state.
1995 template <typename Iter
>
1996 struct IncrementalIter
{
1997 using State
= mozilla::Maybe
<Iter
>;
1998 using Elem
= decltype(std::declval
<Iter
>().get());
2004 template <typename
... Args
>
2005 explicit IncrementalIter(State
& maybeIter
, Args
&&... args
)
2006 : maybeIter(maybeIter
) {
2007 if (maybeIter
.isNothing()) {
2008 maybeIter
.emplace(std::forward
<Args
>(args
)...);
2012 ~IncrementalIter() {
2018 bool done() const { return maybeIter
.ref().done(); }
2020 Elem
get() const { return maybeIter
.ref().get(); }
2022 void next() { maybeIter
.ref().next(); }
2025 // Iterate through the sweep groups created by
2026 // GCRuntime::groupZonesForSweeping().
2027 class js::gc::SweepGroupsIter
{
2031 explicit SweepGroupsIter(JSRuntime
* rt
) : gc(&rt
->gc
) {
2032 MOZ_ASSERT(gc
->currentSweepGroup
);
2035 bool done() const { return !gc
->currentSweepGroup
; }
2037 Zone
* get() const { return gc
->currentSweepGroup
; }
2040 MOZ_ASSERT(!done());
2041 gc
->getNextSweepGroup();
2045 namespace sweepaction
{
2047 // Implementation of the SweepAction interface that calls a method on GCRuntime.
2048 class SweepActionCall final
: public SweepAction
{
2049 using Method
= IncrementalProgress (GCRuntime::*)(JS::GCContext
* gcx
,
2050 SliceBudget
& budget
);
2055 explicit SweepActionCall(Method m
) : method(m
) {}
2056 IncrementalProgress
run(Args
& args
) override
{
2057 return (args
.gc
->*method
)(args
.gcx
, args
.budget
);
2059 void assertFinished() const override
{}
2062 // Implementation of the SweepAction interface that yields in a specified zeal
2064 class SweepActionMaybeYield final
: public SweepAction
{
2071 explicit SweepActionMaybeYield(ZealMode mode
)
2079 IncrementalProgress
run(Args
& args
) override
{
2081 if (!isYielding
&& args
.gc
->shouldYieldForZeal(mode
)) {
2091 void assertFinished() const override
{ MOZ_ASSERT(!isYielding
); }
2093 // These actions should be skipped if GC zeal is not configured.
2095 bool shouldSkip() override
{ return true; }
2099 // Implementation of the SweepAction interface that calls a list of actions in
2101 class SweepActionSequence final
: public SweepAction
{
2102 using ActionVector
= Vector
<UniquePtr
<SweepAction
>, 0, SystemAllocPolicy
>;
2103 using Iter
= IncrementalIter
<ContainerIter
<ActionVector
>>;
2105 ActionVector actions
;
2106 typename
Iter::State iterState
;
2109 bool init(UniquePtr
<SweepAction
>* acts
, size_t count
) {
2110 for (size_t i
= 0; i
< count
; i
++) {
2111 auto& action
= acts
[i
];
2115 if (action
->shouldSkip()) {
2118 if (!actions
.emplaceBack(std::move(action
))) {
2125 IncrementalProgress
run(Args
& args
) override
{
2126 for (Iter
iter(iterState
, actions
); !iter
.done(); iter
.next()) {
2127 if (iter
.get()->run(args
) == NotFinished
) {
2134 void assertFinished() const override
{
2135 MOZ_ASSERT(iterState
.isNothing());
2136 for (const auto& action
: actions
) {
2137 action
->assertFinished();
2142 template <typename Iter
, typename Init
>
2143 class SweepActionForEach final
: public SweepAction
{
2144 using Elem
= decltype(std::declval
<Iter
>().get());
2145 using IncrIter
= IncrementalIter
<Iter
>;
2149 UniquePtr
<SweepAction
> action
;
2150 typename
IncrIter::State iterState
;
2153 SweepActionForEach(const Init
& init
, Elem
* maybeElemOut
,
2154 UniquePtr
<SweepAction
> action
)
2155 : iterInit(init
), elemOut(maybeElemOut
), action(std::move(action
)) {}
2157 IncrementalProgress
run(Args
& args
) override
{
2158 MOZ_ASSERT_IF(elemOut
, *elemOut
== Elem());
2159 auto clearElem
= mozilla::MakeScopeExit([&] { setElem(Elem()); });
2160 for (IncrIter
iter(iterState
, iterInit
); !iter
.done(); iter
.next()) {
2161 setElem(iter
.get());
2162 if (action
->run(args
) == NotFinished
) {
2169 void assertFinished() const override
{
2170 MOZ_ASSERT(iterState
.isNothing());
2171 MOZ_ASSERT_IF(elemOut
, *elemOut
== Elem());
2172 action
->assertFinished();
2176 void setElem(const Elem
& value
) {
2183 static UniquePtr
<SweepAction
> Call(IncrementalProgress (GCRuntime::*method
)(
2184 JS::GCContext
* gcx
, SliceBudget
& budget
)) {
2185 return MakeUnique
<SweepActionCall
>(method
);
2188 static UniquePtr
<SweepAction
> MaybeYield(ZealMode zealMode
) {
2189 return MakeUnique
<SweepActionMaybeYield
>(zealMode
);
2192 template <typename
... Rest
>
2193 static UniquePtr
<SweepAction
> Sequence(UniquePtr
<SweepAction
> first
,
2195 UniquePtr
<SweepAction
> actions
[] = {std::move(first
), std::move(rest
)...};
2196 auto seq
= MakeUnique
<SweepActionSequence
>();
2197 if (!seq
|| !seq
->init(actions
, std::size(actions
))) {
2201 return UniquePtr
<SweepAction
>(std::move(seq
));
2204 static UniquePtr
<SweepAction
> RepeatForSweepGroup(
2205 JSRuntime
* rt
, UniquePtr
<SweepAction
> action
) {
2210 using Action
= SweepActionForEach
<SweepGroupsIter
, JSRuntime
*>;
2211 return js::MakeUnique
<Action
>(rt
, nullptr, std::move(action
));
2214 static UniquePtr
<SweepAction
> ForEachZoneInSweepGroup(
2215 JSRuntime
* rt
, Zone
** zoneOut
, UniquePtr
<SweepAction
> action
) {
2220 using Action
= SweepActionForEach
<SweepGroupZonesIter
, JSRuntime
*>;
2221 return js::MakeUnique
<Action
>(rt
, zoneOut
, std::move(action
));
2224 static UniquePtr
<SweepAction
> ForEachAllocKind(AllocKinds kinds
,
2226 UniquePtr
<SweepAction
> action
) {
2231 using Action
= SweepActionForEach
<ContainerIter
<AllocKinds
>, AllocKinds
>;
2232 return js::MakeUnique
<Action
>(kinds
, kindOut
, std::move(action
));
2235 } // namespace sweepaction
2237 bool GCRuntime::initSweepActions() {
2238 using namespace sweepaction
;
2239 using sweepaction::Call
;
2241 sweepActions
.ref() = RepeatForSweepGroup(
2244 Call(&GCRuntime::beginMarkingSweepGroup
),
2245 Call(&GCRuntime::markGrayRootsInCurrentGroup
),
2246 MaybeYield(ZealMode::YieldWhileGrayMarking
),
2247 Call(&GCRuntime::markGray
), Call(&GCRuntime::endMarkingSweepGroup
),
2248 Call(&GCRuntime::beginSweepingSweepGroup
),
2249 MaybeYield(ZealMode::IncrementalMultipleSlices
),
2250 MaybeYield(ZealMode::YieldBeforeSweepingAtoms
),
2251 Call(&GCRuntime::sweepAtomsTable
),
2252 MaybeYield(ZealMode::YieldBeforeSweepingCaches
),
2253 Call(&GCRuntime::sweepWeakCaches
),
2254 ForEachZoneInSweepGroup(
2255 rt
, &sweepZone
.ref(),
2256 Sequence(MaybeYield(ZealMode::YieldBeforeSweepingObjects
),
2257 ForEachAllocKind(ForegroundObjectFinalizePhase
.kinds
,
2258 &sweepAllocKind
.ref(),
2259 Call(&GCRuntime::finalizeAllocKind
)),
2260 MaybeYield(ZealMode::YieldBeforeSweepingNonObjects
),
2261 ForEachAllocKind(ForegroundNonObjectFinalizePhase
.kinds
,
2262 &sweepAllocKind
.ref(),
2263 Call(&GCRuntime::finalizeAllocKind
)),
2264 MaybeYield(ZealMode::YieldBeforeSweepingPropMapTrees
),
2265 Call(&GCRuntime::sweepPropMapTree
))),
2266 Call(&GCRuntime::endSweepingSweepGroup
)));
2268 return sweepActions
!= nullptr;
2271 IncrementalProgress
GCRuntime::performSweepActions(SliceBudget
& budget
) {
2272 AutoMajorGCProfilerEntry
s(this);
2273 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::SWEEP
);
2275 JS::GCContext
* gcx
= rt
->gcContext();
2276 AutoSetThreadIsSweeping
threadIsSweeping(gcx
);
2277 AutoPoisonFreedJitCode
pjc(gcx
);
2279 // Don't trigger pre-barriers when finalizing.
2280 AutoDisableBarriers
disableBarriers(this);
2282 // Drain the mark stack, possibly in a parallel task if we're in a part of
2283 // sweeping that allows it.
2285 // In the first sweep slice where we must not yield to the mutator until we've
2286 // starting sweeping a sweep group but in that case the stack must be empty
2290 MOZ_ASSERT(initialState
<= State::Sweep
);
2291 if (initialState
!= State::Sweep
) {
2292 assertNoMarkingWork();
2296 if (initialState
== State::Sweep
&&
2297 markDuringSweeping(gcx
, budget
) == NotFinished
) {
2301 // Then continue running sweep actions.
2303 SweepAction::Args args
{this, gcx
, budget
};
2304 IncrementalProgress sweepProgress
= sweepActions
->run(args
);
2305 IncrementalProgress markProgress
= joinBackgroundMarkTask();
2307 if (sweepProgress
== Finished
&& markProgress
== Finished
) {
2311 MOZ_ASSERT(isIncremental
);
2315 bool GCRuntime::allCCVisibleZonesWereCollected() {
2316 // Calculate whether the gray marking state is now valid.
2318 // The gray bits change from invalid to valid if we finished a full GC from
2319 // the point of view of the cycle collector. We ignore the following:
2321 // - Helper thread zones, as these are not reachable from the main heap.
2322 // - The atoms zone, since strings and symbols are never marked gray.
2325 // These exceptions ensure that when the CC requests a full GC the gray mark
2326 // state ends up valid even it we don't collect all of the zones.
2328 for (ZonesIter
zone(this, SkipAtoms
); !zone
.done(); zone
.next()) {
2329 if (!zone
->isCollecting() && !zone
->arenas
.arenaListsAreEmpty()) {
2337 void GCRuntime::endSweepPhase(bool destroyingRuntime
) {
2338 MOZ_ASSERT(!markOnBackgroundThreadDuringSweeping
);
2340 sweepActions
->assertFinished();
2342 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::SWEEP
);
2344 MOZ_ASSERT_IF(destroyingRuntime
, !useBackgroundThreads
);
2347 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::DESTROY
);
2349 // Sweep shared script bytecode now all zones have been swept and finalizers
2350 // for BaseScripts have released their references.
2351 SweepScriptData(rt
);
2355 gcstats::AutoPhase
ap(stats(), gcstats::PhaseKind::FINALIZE_END
);
2356 AutoLockStoreBuffer
lock(&storeBuffer());
2357 callFinalizeCallbacks(rt
->gcContext(), JSFINALIZE_COLLECTION_END
);
2359 if (allCCVisibleZonesWereCollected()) {
2360 grayBitsValid
= true;
2364 if (isIncremental
) {
2365 findDeadCompartments();
2369 finishMarkingValidation();
2373 for (ZonesIter
zone(this, WithAtoms
); !zone
.done(); zone
.next()) {
2374 for (auto i
: AllAllocKinds()) {
2375 MOZ_ASSERT_IF(!IsBackgroundFinalized(i
) || !useBackgroundThreads
,
2376 zone
->arenas
.collectingArenaList(i
).isEmpty());
2381 AssertNoWrappersInGrayList(rt
);