Bug 1837620 - Part 1: Remove baseline ICs that guard shapes when the shape becomes...
[gecko.git] / js / src / gc / Sweeping.cpp
blobb780010d10ecf967c18f3619ac79b6b088e29ef0
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 /*
8 * Implementation of GC sweeping.
10 * In the SpiderMonkey GC, 'sweeping' is used to mean two things:
11 * - updating data structures to remove pointers to dead GC things and updating
12 * pointers to moved GC things
13 * - finalizing dead GC things
15 * Furthermore, the GC carries out gray and weak marking after the start of the
16 * sweep phase. This is also implemented in this file.
19 #include "mozilla/Maybe.h"
20 #include "mozilla/ScopeExit.h"
21 #include "mozilla/TimeStamp.h"
23 #include "builtin/FinalizationRegistryObject.h"
24 #include "builtin/WeakRefObject.h"
25 #include "debugger/DebugAPI.h"
26 #include "gc/AllocKind.h"
27 #include "gc/FinalizationObservers.h"
28 #include "gc/GCInternals.h"
29 #include "gc/GCLock.h"
30 #include "gc/GCProbes.h"
31 #include "gc/GCRuntime.h"
32 #include "gc/ParallelWork.h"
33 #include "gc/Statistics.h"
34 #include "gc/TraceKind.h"
35 #include "gc/WeakMap.h"
36 #include "gc/Zone.h"
37 #include "jit/JitRuntime.h"
38 #include "jit/JitZone.h"
39 #include "proxy/DeadObjectProxy.h"
40 #include "vm/BigIntType.h"
41 #include "vm/HelperThreads.h"
42 #include "vm/JSContext.h"
43 #include "vm/Time.h"
44 #include "vm/WrapperObject.h"
46 #include "gc/PrivateIterators-inl.h"
47 #include "vm/GeckoProfiler-inl.h"
48 #include "vm/JSObject-inl.h"
49 #include "vm/PropMap-inl.h"
50 #include "vm/Shape-inl.h"
51 #include "vm/StringType-inl.h"
53 using namespace js;
54 using namespace js::gc;
56 using mozilla::TimeStamp;
58 struct js::gc::FinalizePhase {
59 gcstats::PhaseKind statsPhase;
60 AllocKinds kinds;
64 * Finalization order for objects swept incrementally on the main thread.
66 static constexpr FinalizePhase ForegroundObjectFinalizePhase = {
67 gcstats::PhaseKind::FINALIZE_OBJECT,
68 {AllocKind::OBJECT0, AllocKind::OBJECT2, AllocKind::OBJECT4,
69 AllocKind::OBJECT8, AllocKind::OBJECT12, AllocKind::OBJECT16}};
72 * Finalization order for GC things swept incrementally on the main thread.
74 static constexpr FinalizePhase ForegroundNonObjectFinalizePhase = {
75 gcstats::PhaseKind::FINALIZE_NON_OBJECT,
76 {AllocKind::SCRIPT, AllocKind::JITCODE}};
79 * Finalization order for GC things swept on the background thread.
81 static constexpr FinalizePhase BackgroundFinalizePhases[] = {
82 {gcstats::PhaseKind::FINALIZE_OBJECT,
83 {AllocKind::FUNCTION, AllocKind::FUNCTION_EXTENDED,
84 AllocKind::OBJECT0_BACKGROUND, AllocKind::OBJECT2_BACKGROUND,
85 AllocKind::ARRAYBUFFER4, AllocKind::OBJECT4_BACKGROUND,
86 AllocKind::ARRAYBUFFER8, AllocKind::OBJECT8_BACKGROUND,
87 AllocKind::ARRAYBUFFER12, AllocKind::OBJECT12_BACKGROUND,
88 AllocKind::ARRAYBUFFER16, AllocKind::OBJECT16_BACKGROUND}},
89 {gcstats::PhaseKind::FINALIZE_NON_OBJECT,
90 {AllocKind::SCOPE, AllocKind::REGEXP_SHARED, AllocKind::FAT_INLINE_STRING,
91 AllocKind::STRING, AllocKind::EXTERNAL_STRING, AllocKind::FAT_INLINE_ATOM,
92 AllocKind::ATOM, AllocKind::SYMBOL, AllocKind::BIGINT, AllocKind::SHAPE,
93 AllocKind::BASE_SHAPE, AllocKind::GETTER_SETTER,
94 AllocKind::COMPACT_PROP_MAP, AllocKind::NORMAL_PROP_MAP,
95 AllocKind::DICT_PROP_MAP}}};
97 template <typename T>
98 inline size_t Arena::finalize(JS::GCContext* gcx, AllocKind thingKind,
99 size_t thingSize) {
100 /* Enforce requirements on size of T. */
101 MOZ_ASSERT(thingSize % CellAlignBytes == 0);
102 MOZ_ASSERT(thingSize >= MinCellSize);
103 MOZ_ASSERT(thingSize <= 255);
105 MOZ_ASSERT(allocated());
106 MOZ_ASSERT(thingKind == getAllocKind());
107 MOZ_ASSERT(thingSize == getThingSize());
108 MOZ_ASSERT(!onDelayedMarkingList_);
110 uint_fast16_t firstThing = firstThingOffset(thingKind);
111 uint_fast16_t firstThingOrSuccessorOfLastMarkedThing = firstThing;
112 uint_fast16_t lastThing = ArenaSize - thingSize;
114 FreeSpan newListHead;
115 FreeSpan* newListTail = &newListHead;
116 size_t nmarked = 0, nfinalized = 0;
118 for (ArenaCellIterUnderFinalize cell(this); !cell.done(); cell.next()) {
119 T* t = cell.as<T>();
120 if (TenuredThingIsMarkedAny(t)) {
121 uint_fast16_t thing = uintptr_t(t) & ArenaMask;
122 if (thing != firstThingOrSuccessorOfLastMarkedThing) {
123 // We just finished passing over one or more free things,
124 // so record a new FreeSpan.
125 newListTail->initBounds(firstThingOrSuccessorOfLastMarkedThing,
126 thing - thingSize, this);
127 newListTail = newListTail->nextSpanUnchecked(this);
129 firstThingOrSuccessorOfLastMarkedThing = thing + thingSize;
130 nmarked++;
131 } else {
132 t->finalize(gcx);
133 AlwaysPoison(t, JS_SWEPT_TENURED_PATTERN, thingSize,
134 MemCheckKind::MakeUndefined);
135 gcprobes::TenuredFinalize(t);
136 nfinalized++;
140 if constexpr (std::is_same_v<T, JSObject>) {
141 if (isNewlyCreated_) {
142 zone->pretenuring.updateCellCountsInNewlyCreatedArenas(
143 nmarked + nfinalized, nmarked);
146 isNewlyCreated_ = 0;
148 if (thingKind == AllocKind::STRING ||
149 thingKind == AllocKind::FAT_INLINE_STRING) {
150 zone->markedStrings += nmarked;
151 zone->finalizedStrings += nfinalized;
154 if (nmarked == 0) {
155 // Do nothing. The caller will update the arena appropriately.
156 MOZ_ASSERT(newListTail == &newListHead);
157 DebugOnlyPoison(data, JS_SWEPT_TENURED_PATTERN, sizeof(data),
158 MemCheckKind::MakeUndefined);
159 return nmarked;
162 MOZ_ASSERT(firstThingOrSuccessorOfLastMarkedThing != firstThing);
163 uint_fast16_t lastMarkedThing =
164 firstThingOrSuccessorOfLastMarkedThing - thingSize;
165 if (lastThing == lastMarkedThing) {
166 // If the last thing was marked, we will have already set the bounds of
167 // the final span, and we just need to terminate the list.
168 newListTail->initAsEmpty();
169 } else {
170 // Otherwise, end the list with a span that covers the final stretch of free
171 // things.
172 newListTail->initFinal(firstThingOrSuccessorOfLastMarkedThing, lastThing,
173 this);
176 firstFreeSpan = newListHead;
177 #ifdef DEBUG
178 size_t nfree = numFreeThings(thingSize);
179 MOZ_ASSERT(nfree + nmarked == thingsPerArena(thingKind));
180 #endif
181 return nmarked;
184 // Finalize arenas from src list, releasing empty arenas if keepArenas wasn't
185 // specified and inserting the others into the appropriate destination size
186 // bins.
187 template <typename T>
188 static inline bool FinalizeTypedArenas(JS::GCContext* gcx, ArenaList& src,
189 SortedArenaList& dest,
190 AllocKind thingKind,
191 SliceBudget& budget) {
192 MOZ_ASSERT(gcx->isFinalizing());
194 size_t thingSize = Arena::thingSize(thingKind);
195 size_t thingsPerArena = Arena::thingsPerArena(thingKind);
196 size_t markCount = 0;
198 auto updateMarkCount = mozilla::MakeScopeExit([&] {
199 GCRuntime* gc = &gcx->runtimeFromAnyThread()->gc;
200 gc->stats().addCount(gcstats::COUNT_CELLS_MARKED, markCount);
203 while (Arena* arena = src.takeFirstArena()) {
204 size_t nmarked = arena->finalize<T>(gcx, thingKind, thingSize);
205 size_t nfree = thingsPerArena - nmarked;
207 markCount += nmarked;
209 if (nmarked) {
210 dest.insertAt(arena, nfree);
211 } else {
212 arena->chunk()->recycleArena(arena, dest, thingsPerArena);
215 budget.step(thingsPerArena);
216 if (budget.isOverBudget()) {
217 return false;
221 return true;
225 * Finalize the list of areans.
227 static bool FinalizeArenas(JS::GCContext* gcx, ArenaList& src,
228 SortedArenaList& dest, AllocKind thingKind,
229 SliceBudget& budget) {
230 switch (thingKind) {
231 #define EXPAND_CASE(allocKind, traceKind, type, sizedType, bgFinal, nursery, \
232 compact) \
233 case AllocKind::allocKind: \
234 return FinalizeTypedArenas<type>(gcx, src, dest, thingKind, budget);
235 FOR_EACH_ALLOCKIND(EXPAND_CASE)
236 #undef EXPAND_CASE
238 default:
239 MOZ_CRASH("Invalid alloc kind");
243 void GCRuntime::initBackgroundSweep(Zone* zone, JS::GCContext* gcx,
244 const FinalizePhase& phase) {
245 gcstats::AutoPhase ap(stats(), phase.statsPhase);
246 for (auto kind : phase.kinds) {
247 zone->arenas.initBackgroundSweep(kind);
251 void ArenaLists::initBackgroundSweep(AllocKind thingKind) {
252 MOZ_ASSERT(IsBackgroundFinalized(thingKind));
253 MOZ_ASSERT(concurrentUse(thingKind) == ConcurrentUse::None);
255 if (!collectingArenaList(thingKind).isEmpty()) {
256 concurrentUse(thingKind) = ConcurrentUse::BackgroundFinalize;
260 void GCRuntime::backgroundFinalize(JS::GCContext* gcx, Zone* zone,
261 AllocKind kind, Arena** empty) {
262 MOZ_ASSERT(empty);
264 ArenaLists* lists = &zone->arenas;
265 ArenaList& arenas = lists->collectingArenaList(kind);
266 if (arenas.isEmpty()) {
267 MOZ_ASSERT(lists->concurrentUse(kind) == ArenaLists::ConcurrentUse::None);
268 return;
271 SortedArenaList finalizedSorted(Arena::thingsPerArena(kind));
273 auto unlimited = SliceBudget::unlimited();
274 FinalizeArenas(gcx, arenas, finalizedSorted, kind, unlimited);
275 MOZ_ASSERT(arenas.isEmpty());
277 finalizedSorted.extractEmpty(empty);
279 // When marking begins, all arenas are moved from arenaLists to
280 // collectingArenaLists. When the mutator runs, new arenas are allocated in
281 // arenaLists. Now that finalization is complete, we want to merge these lists
282 // back together.
284 // We must take the GC lock to be able to safely modify the ArenaList;
285 // however, this does not by itself make the changes visible to all threads,
286 // as not all threads take the GC lock to read the ArenaLists.
287 // That safety is provided by the ReleaseAcquire memory ordering of the
288 // background finalize state, which we explicitly set as the final step.
290 AutoLockGC lock(rt);
291 MOZ_ASSERT(lists->concurrentUse(kind) ==
292 ArenaLists::ConcurrentUse::BackgroundFinalize);
293 lists->mergeFinalizedArenas(kind, finalizedSorted);
296 lists->concurrentUse(kind) = ArenaLists::ConcurrentUse::None;
299 // After finalizing arenas, merge the following to get the final state of an
300 // arena list:
301 // - arenas allocated during marking
302 // - arenas allocated during sweeping
303 // - finalized arenas
304 void ArenaLists::mergeFinalizedArenas(AllocKind kind,
305 SortedArenaList& finalizedArenas) {
306 #ifdef DEBUG
307 // Updating arena lists off-thread requires taking the GC lock because the
308 // main thread uses these when allocating.
309 if (IsBackgroundFinalized(kind)) {
310 runtimeFromAnyThread()->gc.assertCurrentThreadHasLockedGC();
312 #endif
314 ArenaList& arenas = arenaList(kind);
316 ArenaList allocatedDuringCollection = std::move(arenas);
317 arenas = finalizedArenas.toArenaList();
318 arenas.insertListWithCursorAtEnd(allocatedDuringCollection);
320 collectingArenaList(kind).clear();
323 void ArenaLists::queueForegroundThingsForSweep() {
324 gcCompactPropMapArenasToUpdate =
325 collectingArenaList(AllocKind::COMPACT_PROP_MAP).head();
326 gcNormalPropMapArenasToUpdate =
327 collectingArenaList(AllocKind::NORMAL_PROP_MAP).head();
330 void GCRuntime::sweepBackgroundThings(ZoneList& zones) {
331 if (zones.isEmpty()) {
332 return;
335 JS::GCContext* gcx = TlsGCContext.get();
336 MOZ_ASSERT(gcx->isFinalizing());
338 // Sweep zones in order. The atoms zone must be finalized last as other
339 // zones may have direct pointers into it.
340 while (!zones.isEmpty()) {
341 Zone* zone = zones.removeFront();
342 MOZ_ASSERT(zone->isGCFinished());
344 TimeStamp startTime = TimeStamp::Now();
346 Arena* emptyArenas = zone->arenas.takeSweptEmptyArenas();
348 // We must finalize thing kinds in the order specified by
349 // BackgroundFinalizePhases.
350 for (const auto& phase : BackgroundFinalizePhases) {
351 for (auto kind : phase.kinds) {
352 backgroundFinalize(gcx, zone, kind, &emptyArenas);
356 // Release any arenas that are now empty.
358 // Empty arenas are only released after everything has been finalized so
359 // that it's still possible to get a thing's zone after the thing has been
360 // finalized. The HeapPtr destructor depends on this, and this allows
361 // HeapPtrs between things of different alloc kind regardless of
362 // finalization order.
364 // Periodically drop and reaquire the GC lock every so often to avoid
365 // blocking the main thread from allocating chunks.
366 static const size_t LockReleasePeriod = 32;
368 while (emptyArenas) {
369 AutoLockGC lock(this);
370 for (size_t i = 0; i < LockReleasePeriod && emptyArenas; i++) {
371 Arena* arena = emptyArenas;
372 emptyArenas = emptyArenas->next;
373 releaseArena(arena, lock);
377 // Record time spent sweeping this zone.
378 TimeStamp endTime = TimeStamp::Now();
379 zone->perZoneGCTime += endTime - startTime;
383 void GCRuntime::assertBackgroundSweepingFinished() {
384 #ifdef DEBUG
386 AutoLockHelperThreadState lock;
387 MOZ_ASSERT(backgroundSweepZones.ref().isEmpty());
390 for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
391 for (auto kind : AllAllocKinds()) {
392 MOZ_ASSERT_IF(state() != State::Prepare && state() != State::Mark &&
393 state() != State::Sweep,
394 zone->arenas.collectingArenaList(kind).isEmpty());
395 MOZ_ASSERT(zone->arenas.doneBackgroundFinalize(kind));
398 #endif
401 void GCRuntime::queueZonesAndStartBackgroundSweep(ZoneList&& zones) {
403 AutoLockHelperThreadState lock;
404 MOZ_ASSERT(!requestSliceAfterBackgroundTask);
405 backgroundSweepZones.ref().appendList(std::move(zones));
406 if (useBackgroundThreads) {
407 sweepTask.startOrRunIfIdle(lock);
410 if (!useBackgroundThreads) {
411 sweepTask.join();
412 sweepTask.runFromMainThread();
416 BackgroundSweepTask::BackgroundSweepTask(GCRuntime* gc)
417 : GCParallelTask(gc, gcstats::PhaseKind::SWEEP, GCUse::Finalizing) {}
419 void BackgroundSweepTask::run(AutoLockHelperThreadState& lock) {
420 gc->sweepFromBackgroundThread(lock);
423 void GCRuntime::sweepFromBackgroundThread(AutoLockHelperThreadState& lock) {
424 do {
425 ZoneList zones;
426 zones.appendList(std::move(backgroundSweepZones.ref()));
428 AutoUnlockHelperThreadState unlock(lock);
429 sweepBackgroundThings(zones);
431 // The main thread may call queueZonesAndStartBackgroundSweep() while this
432 // is running so we must check there is no more work after releasing the
433 // lock.
434 } while (!backgroundSweepZones.ref().isEmpty());
436 maybeRequestGCAfterBackgroundTask(lock);
439 void GCRuntime::waitBackgroundSweepEnd() {
440 sweepTask.join();
441 if (state() != State::Sweep) {
442 assertBackgroundSweepingFinished();
446 void GCRuntime::startBackgroundFree() {
447 AutoLockHelperThreadState lock;
448 freeTask.startOrRunIfIdle(lock);
451 BackgroundFreeTask::BackgroundFreeTask(GCRuntime* gc)
452 : GCParallelTask(gc, gcstats::PhaseKind::NONE) {
453 // This can occur outside GCs so doesn't have a stats phase.
456 void BackgroundFreeTask::run(AutoLockHelperThreadState& lock) {
457 gc->freeFromBackgroundThread(lock);
460 void GCRuntime::freeFromBackgroundThread(AutoLockHelperThreadState& lock) {
461 do {
462 LifoAlloc lifoBlocks(JSContext::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE);
463 lifoBlocks.transferFrom(&lifoBlocksToFree.ref());
465 Nursery::BufferSet buffers;
466 std::swap(buffers, buffersToFreeAfterMinorGC.ref());
468 AutoUnlockHelperThreadState unlock(lock);
470 lifoBlocks.freeAll();
472 JS::GCContext* gcx = TlsGCContext.get();
473 for (Nursery::BufferSet::Range r = buffers.all(); !r.empty();
474 r.popFront()) {
475 // Malloc memory associated with nursery objects is not tracked as these
476 // are assumed to be short lived.
477 gcx->freeUntracked(r.front());
479 } while (!lifoBlocksToFree.ref().isEmpty() ||
480 !buffersToFreeAfterMinorGC.ref().empty());
483 void GCRuntime::waitBackgroundFreeEnd() { freeTask.join(); }
485 template <class ZoneIterT>
486 IncrementalProgress GCRuntime::markWeakReferences(
487 SliceBudget& incrementalBudget) {
488 MOZ_ASSERT(!marker().isWeakMarking());
490 gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::MARK_WEAK);
492 auto unlimited = SliceBudget::unlimited();
493 SliceBudget& budget =
494 marker().incrementalWeakMapMarkingEnabled ? incrementalBudget : unlimited;
496 // Ensure we don't return to the mutator while we're still in weak marking
497 // mode.
498 auto leaveOnExit =
499 mozilla::MakeScopeExit([&] { marker().leaveWeakMarkingMode(); });
501 if (marker().enterWeakMarkingMode()) {
502 // If there was an 'enter-weak-marking-mode' token in the queue, then it
503 // and everything after it will still be in the queue so we can process
504 // them now.
505 while (processTestMarkQueue() == QueueYielded) {
508 // Do not rely on the information about not-yet-marked weak keys that have
509 // been collected by barriers. Clear out the gcEphemeronEdges entries and
510 // rebuild the full table. Note that this a cross-zone operation; delegate
511 // zone entries will be populated by map zone traversals, so everything
512 // needs to be cleared first, then populated.
513 if (!marker().incrementalWeakMapMarkingEnabled) {
514 for (ZoneIterT zone(this); !zone.done(); zone.next()) {
515 AutoEnterOOMUnsafeRegion oomUnsafe;
516 if (!zone->gcEphemeronEdges().clear()) {
517 oomUnsafe.crash("clearing weak keys when entering weak marking mode");
522 for (ZoneIterT zone(this); !zone.done(); zone.next()) {
523 if (zone->enterWeakMarkingMode(&marker(), budget) == NotFinished) {
524 return NotFinished;
529 bool markedAny = true;
530 while (markedAny) {
531 if (!marker().markUntilBudgetExhausted(budget)) {
532 MOZ_ASSERT(marker().incrementalWeakMapMarkingEnabled);
533 return NotFinished;
536 markedAny = false;
538 if (!marker().isWeakMarking()) {
539 for (ZoneIterT zone(this); !zone.done(); zone.next()) {
540 markedAny |= WeakMapBase::markZoneIteratively(zone, &marker());
544 markedAny |= jit::JitRuntime::MarkJitcodeGlobalTableIteratively(&marker());
547 assertNoMarkingWork();
549 return Finished;
552 IncrementalProgress GCRuntime::markWeakReferencesInCurrentGroup(
553 SliceBudget& budget) {
554 return markWeakReferences<SweepGroupZonesIter>(budget);
557 template <class ZoneIterT>
558 IncrementalProgress GCRuntime::markGrayRoots(SliceBudget& budget,
559 gcstats::PhaseKind phase) {
560 MOZ_ASSERT(marker().markColor() == MarkColor::Gray);
562 gcstats::AutoPhase ap(stats(), phase);
564 AutoUpdateLiveCompartments updateLive(this);
565 marker().setRootMarkingMode(true);
566 auto guard =
567 mozilla::MakeScopeExit([this]() { marker().setRootMarkingMode(false); });
569 IncrementalProgress result =
570 traceEmbeddingGrayRoots(marker().tracer(), budget);
571 if (result == NotFinished) {
572 return NotFinished;
575 Compartment::traceIncomingCrossCompartmentEdgesForZoneGC(
576 marker().tracer(), Compartment::GrayEdges);
578 return Finished;
581 IncrementalProgress GCRuntime::markAllWeakReferences() {
582 SliceBudget budget = SliceBudget::unlimited();
583 return markWeakReferences<GCZonesIter>(budget);
586 void GCRuntime::markAllGrayReferences(gcstats::PhaseKind phase) {
587 SliceBudget budget = SliceBudget::unlimited();
588 markGrayRoots<GCZonesIter>(budget, phase);
589 drainMarkStack();
592 void GCRuntime::dropStringWrappers() {
594 * String "wrappers" are dropped on GC because their presence would require
595 * us to sweep the wrappers in all compartments every time we sweep a
596 * compartment group.
598 for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
599 zone->dropStringWrappersOnGC();
604 * Group zones that must be swept at the same time.
606 * From the point of view of the mutator, groups of zones transition atomically
607 * from marking to sweeping. If compartment A has an edge to an unmarked object
608 * in compartment B, then we must not start sweeping A in a later slice than we
609 * start sweeping B. That's because a write barrier in A could lead to the
610 * unmarked object in B becoming marked. However, if we had already swept that
611 * object, we would be in trouble.
613 * If we consider these dependencies as a graph, then all the compartments in
614 * any strongly-connected component of this graph must start sweeping in the
615 * same slice.
617 * Tarjan's algorithm is used to calculate the components.
620 bool Compartment::findSweepGroupEdges() {
621 Zone* source = zone();
622 for (WrappedObjectCompartmentEnum e(this); !e.empty(); e.popFront()) {
623 Compartment* targetComp = e.front();
624 Zone* target = targetComp->zone();
626 if (!target->isGCMarking() || source->hasSweepGroupEdgeTo(target)) {
627 continue;
630 for (ObjectWrapperEnum e(this, targetComp); !e.empty(); e.popFront()) {
631 JSObject* key = e.front().mutableKey();
632 MOZ_ASSERT(key->zone() == target);
634 // Add an edge to the wrapped object's zone to ensure that the wrapper
635 // zone is not still being marked when we start sweeping the wrapped zone.
636 // As an optimization, if the wrapped object is already marked black there
637 // is no danger of later marking and we can skip this.
638 if (key->isMarkedBlack()) {
639 continue;
642 if (!source->addSweepGroupEdgeTo(target)) {
643 return false;
646 // We don't need to consider any more wrappers for this target
647 // compartment since we already added an edge.
648 break;
652 return true;
655 bool Zone::findSweepGroupEdges(Zone* atomsZone) {
656 MOZ_ASSERT_IF(this != atomsZone, !isAtomsZone());
658 #ifdef DEBUG
659 if (FinalizationObservers* observers = finalizationObservers()) {
660 observers->checkTables();
662 #endif
664 // Any zone may have a pointer to an atom in the atoms zone, and these aren't
665 // in the cross compartment map.
666 if (atomsZone->wasGCStarted() && !addSweepGroupEdgeTo(atomsZone)) {
667 return false;
670 for (CompartmentsInZoneIter comp(this); !comp.done(); comp.next()) {
671 if (!comp->findSweepGroupEdges()) {
672 return false;
676 return WeakMapBase::findSweepGroupEdgesForZone(this);
679 bool GCRuntime::addEdgesForMarkQueue() {
680 #ifdef DEBUG
681 // For testing only.
683 // Add edges between all objects mentioned in the test mark queue, since
684 // otherwise they will get marked in a different order than their sweep
685 // groups. Note that this is only done at the beginning of an incremental
686 // collection, so it is possible for objects to be added later that do not
687 // follow the sweep group ordering. These objects will wait until their sweep
688 // group comes up, or will be skipped if their sweep group is already past.
689 JS::Zone* prevZone = nullptr;
690 for (Value val : testMarkQueue) {
691 if (!val.isObject()) {
692 continue;
694 JSObject* obj = &val.toObject();
695 JS::Zone* zone = obj->zone();
696 if (!zone->isGCMarking()) {
697 continue;
699 if (prevZone && prevZone != zone) {
700 if (!prevZone->addSweepGroupEdgeTo(zone)) {
701 return false;
704 prevZone = zone;
706 #endif
707 return true;
710 bool GCRuntime::findSweepGroupEdges() {
711 for (GCZonesIter zone(this); !zone.done(); zone.next()) {
712 if (!zone->findSweepGroupEdges(atomsZone())) {
713 return false;
717 if (!addEdgesForMarkQueue()) {
718 return false;
721 return DebugAPI::findSweepGroupEdges(rt);
724 void GCRuntime::groupZonesForSweeping(JS::GCReason reason) {
725 #ifdef DEBUG
726 for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
727 MOZ_ASSERT(zone->gcSweepGroupEdges().empty());
729 #endif
731 JSContext* cx = rt->mainContextFromOwnThread();
732 ZoneComponentFinder finder(cx);
733 if (!isIncremental || !findSweepGroupEdges()) {
734 finder.useOneComponent();
737 // Use one component for two-slice zeal modes.
738 if (useZeal && hasIncrementalTwoSliceZealMode()) {
739 finder.useOneComponent();
742 for (GCZonesIter zone(this); !zone.done(); zone.next()) {
743 MOZ_ASSERT(zone->isGCMarking());
744 finder.addNode(zone);
746 sweepGroups = finder.getResultsList();
747 currentSweepGroup = sweepGroups;
748 sweepGroupIndex = 1;
750 for (GCZonesIter zone(this); !zone.done(); zone.next()) {
751 zone->clearSweepGroupEdges();
754 #ifdef DEBUG
755 unsigned idx = sweepGroupIndex;
756 for (Zone* head = currentSweepGroup; head; head = head->nextGroup()) {
757 for (Zone* zone = head; zone; zone = zone->nextNodeInGroup()) {
758 MOZ_ASSERT(zone->isGCMarking());
759 zone->gcSweepGroupIndex = idx;
761 idx++;
764 MOZ_ASSERT_IF(!isIncremental, !currentSweepGroup->nextGroup());
765 for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
766 MOZ_ASSERT(zone->gcSweepGroupEdges().empty());
768 #endif
771 void GCRuntime::getNextSweepGroup() {
772 currentSweepGroup = currentSweepGroup->nextGroup();
773 ++sweepGroupIndex;
774 if (!currentSweepGroup) {
775 abortSweepAfterCurrentGroup = false;
776 return;
779 MOZ_ASSERT_IF(abortSweepAfterCurrentGroup, !isIncremental);
780 if (!isIncremental) {
781 ZoneComponentFinder::mergeGroups(currentSweepGroup);
784 for (Zone* zone = currentSweepGroup; zone; zone = zone->nextNodeInGroup()) {
785 MOZ_ASSERT(zone->gcState() == zone->initialMarkingState());
786 MOZ_ASSERT(!zone->isQueuedForBackgroundSweep());
789 if (abortSweepAfterCurrentGroup) {
790 markTask.join();
792 // Abort collection of subsequent sweep groups.
793 for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
794 MOZ_ASSERT(!zone->gcNextGraphComponent);
795 zone->changeGCState(zone->initialMarkingState(), Zone::NoGC);
796 zone->arenas.unmarkPreMarkedFreeCells();
797 zone->arenas.mergeArenasFromCollectingLists();
798 zone->clearGCSliceThresholds();
801 for (SweepGroupCompartmentsIter comp(rt); !comp.done(); comp.next()) {
802 resetGrayList(comp);
805 abortSweepAfterCurrentGroup = false;
806 currentSweepGroup = nullptr;
811 * Gray marking:
813 * At the end of collection, anything reachable from a gray root that has not
814 * otherwise been marked black must be marked gray.
816 * This means that when marking things gray we must not allow marking to leave
817 * the current compartment group, as that could result in things being marked
818 * gray when they might subsequently be marked black. To achieve this, when we
819 * find a cross compartment pointer we don't mark the referent but add it to a
820 * singly-linked list of incoming gray pointers that is stored with each
821 * compartment.
823 * The list head is stored in Compartment::gcIncomingGrayPointers and contains
824 * cross compartment wrapper objects. The next pointer is stored in the second
825 * extra slot of the cross compartment wrapper.
827 * The list is created during gray marking when one of the
828 * MarkCrossCompartmentXXX functions is called for a pointer that leaves the
829 * current compartent group. This calls DelayCrossCompartmentGrayMarking to
830 * push the referring object onto the list.
832 * The list is traversed and then unlinked in
833 * GCRuntime::markIncomingGrayCrossCompartmentPointers.
836 static bool IsGrayListObject(JSObject* obj) {
837 MOZ_ASSERT(obj);
838 return obj->is<CrossCompartmentWrapperObject>() && !IsDeadProxyObject(obj);
841 /* static */
842 unsigned ProxyObject::grayLinkReservedSlot(JSObject* obj) {
843 MOZ_ASSERT(IsGrayListObject(obj));
844 return CrossCompartmentWrapperObject::GrayLinkReservedSlot;
847 #ifdef DEBUG
848 static void AssertNotOnGrayList(JSObject* obj) {
849 MOZ_ASSERT_IF(
850 IsGrayListObject(obj),
851 GetProxyReservedSlot(obj, ProxyObject::grayLinkReservedSlot(obj))
852 .isUndefined());
854 #endif
856 static void AssertNoWrappersInGrayList(JSRuntime* rt) {
857 #ifdef DEBUG
858 for (CompartmentsIter c(rt); !c.done(); c.next()) {
859 MOZ_ASSERT(!c->gcIncomingGrayPointers);
860 for (Compartment::ObjectWrapperEnum e(c); !e.empty(); e.popFront()) {
861 AssertNotOnGrayList(e.front().value().unbarrieredGet());
864 #endif
867 static JSObject* CrossCompartmentPointerReferent(JSObject* obj) {
868 MOZ_ASSERT(IsGrayListObject(obj));
869 return &obj->as<ProxyObject>().private_().toObject();
872 static JSObject* NextIncomingCrossCompartmentPointer(JSObject* prev,
873 bool unlink) {
874 unsigned slot = ProxyObject::grayLinkReservedSlot(prev);
875 JSObject* next = GetProxyReservedSlot(prev, slot).toObjectOrNull();
876 MOZ_ASSERT_IF(next, IsGrayListObject(next));
878 if (unlink) {
879 SetProxyReservedSlot(prev, slot, UndefinedValue());
882 return next;
885 void js::gc::DelayCrossCompartmentGrayMarking(GCMarker* maybeMarker,
886 JSObject* src) {
887 MOZ_ASSERT_IF(!maybeMarker, !JS::RuntimeHeapIsBusy());
888 MOZ_ASSERT(IsGrayListObject(src));
889 MOZ_ASSERT(src->isMarkedGray());
891 AutoTouchingGrayThings tgt;
893 mozilla::Maybe<AutoLockGC> lock;
894 if (maybeMarker && maybeMarker->isParallelMarking()) {
895 // Synchronize access to JSCompartment::gcIncomingGrayPointers.
897 // TODO: Instead of building this list we could scan all incoming CCWs and
898 // mark through gray ones when marking gray roots for a sweep group.
899 lock.emplace(maybeMarker->runtime());
902 /* Called from MarkCrossCompartmentXXX functions. */
903 unsigned slot = ProxyObject::grayLinkReservedSlot(src);
904 JSObject* dest = CrossCompartmentPointerReferent(src);
905 Compartment* comp = dest->compartment();
907 if (GetProxyReservedSlot(src, slot).isUndefined()) {
908 SetProxyReservedSlot(src, slot,
909 ObjectOrNullValue(comp->gcIncomingGrayPointers));
910 comp->gcIncomingGrayPointers = src;
911 } else {
912 MOZ_ASSERT(GetProxyReservedSlot(src, slot).isObjectOrNull());
915 #ifdef DEBUG
917 * Assert that the object is in our list, also walking the list to check its
918 * integrity.
920 JSObject* obj = comp->gcIncomingGrayPointers;
921 bool found = false;
922 while (obj) {
923 if (obj == src) {
924 found = true;
926 obj = NextIncomingCrossCompartmentPointer(obj, false);
928 MOZ_ASSERT(found);
929 #endif
932 void GCRuntime::markIncomingGrayCrossCompartmentPointers() {
933 gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_INCOMING_GRAY);
935 for (SweepGroupCompartmentsIter c(rt); !c.done(); c.next()) {
936 MOZ_ASSERT(c->zone()->isGCMarkingBlackAndGray());
937 MOZ_ASSERT_IF(c->gcIncomingGrayPointers,
938 IsGrayListObject(c->gcIncomingGrayPointers));
940 for (JSObject* src = c->gcIncomingGrayPointers; src;
941 src = NextIncomingCrossCompartmentPointer(src, true)) {
942 JSObject* dst = CrossCompartmentPointerReferent(src);
943 MOZ_ASSERT(dst->compartment() == c);
944 MOZ_ASSERT_IF(src->asTenured().isMarkedBlack(),
945 dst->asTenured().isMarkedBlack());
947 if (src->asTenured().isMarkedGray()) {
948 TraceManuallyBarrieredEdge(marker().tracer(), &dst,
949 "cross-compartment gray pointer");
953 c->gcIncomingGrayPointers = nullptr;
957 static bool RemoveFromGrayList(JSObject* wrapper) {
958 AutoTouchingGrayThings tgt;
960 if (!IsGrayListObject(wrapper)) {
961 return false;
964 unsigned slot = ProxyObject::grayLinkReservedSlot(wrapper);
965 if (GetProxyReservedSlot(wrapper, slot).isUndefined()) {
966 return false; /* Not on our list. */
969 JSObject* tail = GetProxyReservedSlot(wrapper, slot).toObjectOrNull();
970 SetProxyReservedSlot(wrapper, slot, UndefinedValue());
972 Compartment* comp = CrossCompartmentPointerReferent(wrapper)->compartment();
973 JSObject* obj = comp->gcIncomingGrayPointers;
974 if (obj == wrapper) {
975 comp->gcIncomingGrayPointers = tail;
976 return true;
979 while (obj) {
980 unsigned slot = ProxyObject::grayLinkReservedSlot(obj);
981 JSObject* next = GetProxyReservedSlot(obj, slot).toObjectOrNull();
982 if (next == wrapper) {
983 js::detail::SetProxyReservedSlotUnchecked(obj, slot,
984 ObjectOrNullValue(tail));
985 return true;
987 obj = next;
990 MOZ_CRASH("object not found in gray link list");
993 void GCRuntime::resetGrayList(Compartment* comp) {
994 JSObject* src = comp->gcIncomingGrayPointers;
995 while (src) {
996 src = NextIncomingCrossCompartmentPointer(src, true);
998 comp->gcIncomingGrayPointers = nullptr;
1001 #ifdef DEBUG
1002 static bool HasIncomingCrossCompartmentPointers(JSRuntime* rt) {
1003 for (SweepGroupCompartmentsIter c(rt); !c.done(); c.next()) {
1004 if (c->gcIncomingGrayPointers) {
1005 return true;
1009 return false;
1011 #endif
1013 void js::NotifyGCNukeWrapper(JSContext* cx, JSObject* wrapper) {
1014 MOZ_ASSERT(IsCrossCompartmentWrapper(wrapper));
1017 * References to target of wrapper are being removed, we no longer have to
1018 * remember to mark it.
1020 RemoveFromGrayList(wrapper);
1023 * Clean up WeakRef maps which might include this wrapper.
1025 JSObject* target = UncheckedUnwrapWithoutExpose(wrapper);
1026 if (target->is<WeakRefObject>()) {
1027 WeakRefObject* weakRef = &target->as<WeakRefObject>();
1028 if (weakRef->target()) {
1029 cx->runtime()->gc.nukeWeakRefWrapper(wrapper, weakRef);
1034 * Clean up FinalizationRecord record objects which might be the target of
1035 * this wrapper.
1037 if (target->is<FinalizationRecordObject>()) {
1038 auto* record = &target->as<FinalizationRecordObject>();
1039 cx->runtime()->gc.nukeFinalizationRecordWrapper(wrapper, record);
1043 enum {
1044 JS_GC_SWAP_OBJECT_A_REMOVED = 1 << 0,
1045 JS_GC_SWAP_OBJECT_B_REMOVED = 1 << 1
1048 unsigned js::NotifyGCPreSwap(JSObject* a, JSObject* b) {
1050 * Two objects in the same compartment are about to have had their contents
1051 * swapped. If either of them are in our gray pointer list, then we remove
1052 * them from the lists, returning a bitset indicating what happened.
1054 return (RemoveFromGrayList(a) ? JS_GC_SWAP_OBJECT_A_REMOVED : 0) |
1055 (RemoveFromGrayList(b) ? JS_GC_SWAP_OBJECT_B_REMOVED : 0);
1058 void js::NotifyGCPostSwap(JSObject* a, JSObject* b, unsigned removedFlags) {
1060 * Two objects in the same compartment have had their contents swapped. If
1061 * either of them were in our gray pointer list, we re-add them again.
1063 if (removedFlags & JS_GC_SWAP_OBJECT_A_REMOVED) {
1064 DelayCrossCompartmentGrayMarking(nullptr, b);
1066 if (removedFlags & JS_GC_SWAP_OBJECT_B_REMOVED) {
1067 DelayCrossCompartmentGrayMarking(nullptr, a);
1071 static inline void MaybeCheckWeakMapMarking(GCRuntime* gc) {
1072 #if defined(JS_GC_ZEAL) || defined(DEBUG)
1074 bool shouldCheck;
1075 # if defined(DEBUG)
1076 shouldCheck = true;
1077 # else
1078 shouldCheck = gc->hasZealMode(ZealMode::CheckWeakMapMarking);
1079 # endif
1081 if (shouldCheck) {
1082 for (SweepGroupZonesIter zone(gc); !zone.done(); zone.next()) {
1083 MOZ_RELEASE_ASSERT(WeakMapBase::checkMarkingForZone(zone));
1087 #endif
1090 IncrementalProgress GCRuntime::beginMarkingSweepGroup(JS::GCContext* gcx,
1091 SliceBudget& budget) {
1092 #ifdef DEBUG
1093 MOZ_ASSERT(!markOnBackgroundThreadDuringSweeping);
1094 assertNoMarkingWork();
1095 for (auto& marker : markers) {
1096 MOZ_ASSERT(marker->markColor() == MarkColor::Black);
1098 MOZ_ASSERT(cellsToAssertNotGray.ref().empty());
1099 #endif
1101 gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK);
1103 // Change state of current group to MarkBlackAndGray to restrict gray marking
1104 // to this group. Note that there may be pointers to the atoms zone, and these
1105 // will be marked through, as they are not marked with
1106 // TraceCrossCompartmentEdge.
1107 for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
1108 zone->changeGCState(zone->initialMarkingState(), Zone::MarkBlackAndGray);
1111 AutoSetMarkColor setColorGray(marker(), MarkColor::Gray);
1113 // Mark incoming gray pointers from previously swept compartments.
1114 markIncomingGrayCrossCompartmentPointers();
1116 return Finished;
1119 IncrementalProgress GCRuntime::markGrayRootsInCurrentGroup(
1120 JS::GCContext* gcx, SliceBudget& budget) {
1121 gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK);
1123 AutoSetMarkColor setColorGray(marker(), MarkColor::Gray);
1125 return markGrayRoots<SweepGroupZonesIter>(budget,
1126 gcstats::PhaseKind::MARK_GRAY);
1129 IncrementalProgress GCRuntime::markGray(JS::GCContext* gcx,
1130 SliceBudget& budget) {
1131 gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK);
1133 if (markUntilBudgetExhausted(budget, AllowParallelMarking) == NotFinished) {
1134 return NotFinished;
1137 return Finished;
1140 IncrementalProgress GCRuntime::endMarkingSweepGroup(JS::GCContext* gcx,
1141 SliceBudget& budget) {
1142 #ifdef DEBUG
1143 MOZ_ASSERT(!markOnBackgroundThreadDuringSweeping);
1144 assertNoMarkingWork();
1145 for (auto& marker : markers) {
1146 MOZ_ASSERT(marker->markColor() == MarkColor::Black);
1148 MOZ_ASSERT(!HasIncomingCrossCompartmentPointers(rt));
1149 #endif
1151 gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK);
1153 if (markWeakReferencesInCurrentGroup(budget) == NotFinished) {
1154 return NotFinished;
1157 AutoSetMarkColor setColorGray(marker(), MarkColor::Gray);
1159 // Mark transitively inside the current compartment group.
1160 if (markWeakReferencesInCurrentGroup(budget) == NotFinished) {
1161 return NotFinished;
1164 MOZ_ASSERT(marker().isDrained());
1166 // We must not yield after this point before we start sweeping the group.
1167 safeToYield = false;
1169 MaybeCheckWeakMapMarking(this);
1171 return Finished;
1174 // Causes the given WeakCache to be swept when run.
1175 class ImmediateSweepWeakCacheTask : public GCParallelTask {
1176 Zone* zone;
1177 JS::detail::WeakCacheBase& cache;
1179 public:
1180 ImmediateSweepWeakCacheTask(GCRuntime* gc, Zone* zone,
1181 JS::detail::WeakCacheBase& wc)
1182 : GCParallelTask(gc, gcstats::PhaseKind::SWEEP_WEAK_CACHES),
1183 zone(zone),
1184 cache(wc) {}
1186 ImmediateSweepWeakCacheTask(ImmediateSweepWeakCacheTask&& other) noexcept
1187 : GCParallelTask(std::move(other)),
1188 zone(other.zone),
1189 cache(other.cache) {}
1191 ImmediateSweepWeakCacheTask(const ImmediateSweepWeakCacheTask&) = delete;
1193 void run(AutoLockHelperThreadState& lock) override {
1194 AutoUnlockHelperThreadState unlock(lock);
1195 AutoSetThreadIsSweeping threadIsSweeping(zone);
1196 SweepingTracer trc(gc->rt);
1197 cache.traceWeak(&trc, &gc->storeBuffer());
1201 void GCRuntime::updateAtomsBitmap() {
1202 DenseBitmap marked;
1203 if (atomMarking.computeBitmapFromChunkMarkBits(rt, marked)) {
1204 for (GCZonesIter zone(this); !zone.done(); zone.next()) {
1205 atomMarking.refineZoneBitmapForCollectedZone(zone, marked);
1207 } else {
1208 // Ignore OOM in computeBitmapFromChunkMarkBits. The
1209 // refineZoneBitmapForCollectedZone call can only remove atoms from the
1210 // zone bitmap, so it is conservative to just not call it.
1213 atomMarking.markAtomsUsedByUncollectedZones(rt);
1215 // For convenience sweep these tables non-incrementally as part of bitmap
1216 // sweeping; they are likely to be much smaller than the main atoms table.
1217 SweepingTracer trc(rt);
1218 rt->symbolRegistry().traceWeak(&trc);
1221 void GCRuntime::sweepCCWrappers() {
1222 SweepingTracer trc(rt);
1223 for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
1224 zone->traceWeakCCWEdges(&trc);
1228 void GCRuntime::sweepRealmGlobals() {
1229 SweepingTracer trc(rt);
1230 for (SweepGroupRealmsIter r(this); !r.done(); r.next()) {
1231 AutoSetThreadIsSweeping threadIsSweeping(r->zone());
1232 r->traceWeakGlobalEdge(&trc);
1236 void GCRuntime::sweepMisc() {
1237 SweepingTracer trc(rt);
1238 for (SweepGroupRealmsIter r(this); !r.done(); r.next()) {
1239 AutoSetThreadIsSweeping threadIsSweeping(r->zone());
1240 r->traceWeakSavedStacks(&trc);
1241 r->traceWeakRegExps(&trc);
1243 for (SweepGroupCompartmentsIter c(this); !c.done(); c.next()) {
1244 AutoSetThreadIsSweeping threadIsSweeping(c->zone());
1245 c->traceWeakNativeIterators(&trc);
1249 void GCRuntime::sweepCompressionTasks() {
1250 JSRuntime* runtime = rt;
1252 // Attach finished compression tasks.
1253 AutoLockHelperThreadState lock;
1254 AttachFinishedCompressions(runtime, lock);
1255 SweepPendingCompressions(lock);
1258 void GCRuntime::sweepWeakMaps() {
1259 SweepingTracer trc(rt);
1260 for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
1261 /* No need to look up any more weakmap keys from this sweep group. */
1262 AutoEnterOOMUnsafeRegion oomUnsafe;
1263 if (!zone->gcEphemeronEdges().clear()) {
1264 oomUnsafe.crash("clearing weak keys in beginSweepingSweepGroup()");
1267 // Lock the storebuffer since this may access it when rehashing or resizing
1268 // the tables.
1269 AutoLockStoreBuffer lock(&storeBuffer());
1270 zone->sweepWeakMaps(&trc);
1274 void GCRuntime::sweepUniqueIds() {
1275 for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
1276 AutoSetThreadIsSweeping threadIsSweeping(zone);
1277 zone->sweepUniqueIds();
1281 void JS::Zone::sweepUniqueIds() {
1282 SweepingTracer trc(runtimeFromAnyThread());
1283 uniqueIds().traceWeak(&trc);
1286 /* static */
1287 bool UniqueIdGCPolicy::traceWeak(JSTracer* trc, Cell** keyp, uint64_t* valuep) {
1288 // Since this is only ever used for sweeping, we can optimize it for that
1289 // case. (Compacting GC updates this table manually when it moves a cell.)
1290 MOZ_ASSERT(trc->kind() == JS::TracerKind::Sweeping);
1291 return (*keyp)->isMarkedAny();
1294 void GCRuntime::sweepFinalizationObserversOnMainThread() {
1295 // This calls back into the browser which expects to be called from the main
1296 // thread.
1297 gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::SWEEP_COMPARTMENTS);
1298 gcstats::AutoPhase ap2(stats(),
1299 gcstats::PhaseKind::SWEEP_FINALIZATION_OBSERVERS);
1300 SweepingTracer trc(rt);
1301 AutoLockStoreBuffer lock(&storeBuffer());
1302 for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
1303 traceWeakFinalizationObserverEdges(&trc, zone);
1307 void GCRuntime::startTask(GCParallelTask& task,
1308 AutoLockHelperThreadState& lock) {
1309 if (!CanUseExtraThreads()) {
1310 AutoUnlockHelperThreadState unlock(lock);
1311 task.runFromMainThread();
1312 stats().recordParallelPhase(task.phaseKind, task.duration());
1313 return;
1316 task.startWithLockHeld(lock);
1319 void GCRuntime::joinTask(GCParallelTask& task,
1320 AutoLockHelperThreadState& lock) {
1321 gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::JOIN_PARALLEL_TASKS);
1322 task.joinWithLockHeld(lock);
1325 void GCRuntime::sweepDebuggerOnMainThread(JS::GCContext* gcx) {
1326 SweepingTracer trc(rt);
1327 AutoLockStoreBuffer lock(&storeBuffer());
1329 // Detach unreachable debuggers and global objects from each other.
1330 // This can modify weakmaps and so must happen before weakmap sweeping.
1331 DebugAPI::sweepAll(gcx);
1333 gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_COMPARTMENTS);
1335 // Sweep debug environment information. This performs lookups in the Zone's
1336 // unique IDs table and so must not happen in parallel with sweeping that
1337 // table.
1339 gcstats::AutoPhase ap2(stats(), gcstats::PhaseKind::SWEEP_MISC);
1340 for (SweepGroupRealmsIter r(rt); !r.done(); r.next()) {
1341 r->traceWeakDebugEnvironmentEdges(&trc);
1346 void GCRuntime::sweepJitDataOnMainThread(JS::GCContext* gcx) {
1347 SweepingTracer trc(rt);
1349 gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_JIT_DATA);
1351 if (initialState != State::NotActive) {
1352 // Cancel any active or pending off thread compilations. We also did
1353 // this before marking (in DiscardJITCodeForGC) so this is a no-op
1354 // for non-incremental GCs.
1355 js::CancelOffThreadIonCompile(rt, JS::Zone::Sweep);
1358 // Bug 1071218: the following method has not yet been refactored to
1359 // work on a single zone-group at once.
1361 // Sweep entries containing about-to-be-finalized JitCode and
1362 // update relocated TypeSet::Types inside the JitcodeGlobalTable.
1363 jit::JitRuntime::TraceWeakJitcodeGlobalTable(rt, &trc);
1366 if (initialState != State::NotActive) {
1367 gcstats::AutoPhase apdc(stats(), gcstats::PhaseKind::SWEEP_DISCARD_CODE);
1368 for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
1369 zone->discardJitCode(gcx);
1373 // JitZone/JitRealm must be swept *after* discarding JIT code, because
1374 // Zone::discardJitCode might access CacheIRStubInfos deleted here.
1376 gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_JIT_DATA);
1378 for (SweepGroupRealmsIter r(rt); !r.done(); r.next()) {
1379 r->traceWeakEdgesInJitRealm(&trc);
1382 for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
1383 if (jit::JitZone* jitZone = zone->jitZone()) {
1384 jitZone->traceWeak(&trc, zone);
1390 using WeakCacheTaskVector =
1391 mozilla::Vector<ImmediateSweepWeakCacheTask, 0, SystemAllocPolicy>;
1393 // Call a functor for all weak caches that need to be swept in the current
1394 // sweep group.
1395 template <typename Functor>
1396 static inline bool IterateWeakCaches(JSRuntime* rt, Functor f) {
1397 for (SweepGroupZonesIter zone(rt); !zone.done(); zone.next()) {
1398 for (JS::detail::WeakCacheBase* cache : zone->weakCaches()) {
1399 if (!f(cache, zone.get())) {
1400 return false;
1405 for (JS::detail::WeakCacheBase* cache : rt->weakCaches()) {
1406 if (!f(cache, nullptr)) {
1407 return false;
1411 return true;
1414 static bool PrepareWeakCacheTasks(JSRuntime* rt,
1415 WeakCacheTaskVector* immediateTasks) {
1416 // Start incremental sweeping for caches that support it or add to a vector
1417 // of sweep tasks to run on a helper thread.
1419 MOZ_ASSERT(immediateTasks->empty());
1421 GCRuntime* gc = &rt->gc;
1422 bool ok =
1423 IterateWeakCaches(rt, [&](JS::detail::WeakCacheBase* cache, Zone* zone) {
1424 if (cache->empty()) {
1425 return true;
1428 // Caches that support incremental sweeping will be swept later.
1429 if (zone && cache->setIncrementalBarrierTracer(&gc->sweepingTracer)) {
1430 return true;
1433 return immediateTasks->emplaceBack(gc, zone, *cache);
1436 if (!ok) {
1437 immediateTasks->clearAndFree();
1440 return ok;
1443 static void SweepAllWeakCachesOnMainThread(JSRuntime* rt) {
1444 // If we ran out of memory, do all the work on the main thread.
1445 gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PhaseKind::SWEEP_WEAK_CACHES);
1446 SweepingTracer trc(rt);
1447 IterateWeakCaches(rt, [&](JS::detail::WeakCacheBase* cache, Zone* zone) {
1448 if (cache->needsIncrementalBarrier()) {
1449 cache->setIncrementalBarrierTracer(nullptr);
1451 cache->traceWeak(&trc, &rt->gc.storeBuffer());
1452 return true;
1456 void GCRuntime::sweepEmbeddingWeakPointers(JS::GCContext* gcx) {
1457 using namespace gcstats;
1459 AutoLockStoreBuffer lock(&storeBuffer());
1461 AutoPhase ap(stats(), PhaseKind::FINALIZE_START);
1462 callFinalizeCallbacks(gcx, JSFINALIZE_GROUP_PREPARE);
1464 AutoPhase ap2(stats(), PhaseKind::WEAK_ZONES_CALLBACK);
1465 callWeakPointerZonesCallbacks(&sweepingTracer);
1468 AutoPhase ap2(stats(), PhaseKind::WEAK_COMPARTMENT_CALLBACK);
1469 for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
1470 for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
1471 callWeakPointerCompartmentCallbacks(&sweepingTracer, comp);
1475 callFinalizeCallbacks(gcx, JSFINALIZE_GROUP_START);
1478 IncrementalProgress GCRuntime::beginSweepingSweepGroup(JS::GCContext* gcx,
1479 SliceBudget& budget) {
1481 * Begin sweeping the group of zones in currentSweepGroup, performing
1482 * actions that must be done before yielding to caller.
1485 using namespace gcstats;
1487 AutoSCC scc(stats(), sweepGroupIndex);
1489 bool sweepingAtoms = false;
1490 for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
1491 /* Set the GC state to sweeping. */
1492 zone->changeGCState(Zone::MarkBlackAndGray, Zone::Sweep);
1494 /* Purge the ArenaLists before sweeping. */
1495 zone->arenas.checkSweepStateNotInUse();
1496 zone->arenas.unmarkPreMarkedFreeCells();
1497 zone->arenas.clearFreeLists();
1499 if (zone->isAtomsZone()) {
1500 sweepingAtoms = true;
1504 #ifdef DEBUG
1505 for (const auto* cell : cellsToAssertNotGray.ref()) {
1506 JS::AssertCellIsNotGray(cell);
1508 cellsToAssertNotGray.ref().clearAndFree();
1509 #endif
1511 // Updating the atom marking bitmaps. This marks atoms referenced by
1512 // uncollected zones so cannot be done in parallel with the other sweeping
1513 // work below.
1514 if (sweepingAtoms) {
1515 AutoPhase ap(stats(), PhaseKind::UPDATE_ATOMS_BITMAP);
1516 updateAtomsBitmap();
1519 #ifdef JS_GC_ZEAL
1520 validateIncrementalMarking();
1521 #endif
1523 AutoSetThreadIsSweeping threadIsSweeping;
1525 // This must happen before sweeping realm globals.
1526 sweepDebuggerOnMainThread(gcx);
1528 // FinalizationRegistry sweeping touches weak maps and so must not run in
1529 // parallel with that. This triggers a read barrier and can add marking work
1530 // for zones that are still marking. Must happen before sweeping realm
1531 // globals.
1532 sweepFinalizationObserversOnMainThread();
1534 // This must happen before updating embedding weak pointers.
1535 sweepRealmGlobals();
1537 sweepEmbeddingWeakPointers(gcx);
1540 AutoLockHelperThreadState lock;
1542 AutoPhase ap(stats(), PhaseKind::SWEEP_COMPARTMENTS);
1544 AutoRunParallelTask sweepCCWrappers(this, &GCRuntime::sweepCCWrappers,
1545 PhaseKind::SWEEP_CC_WRAPPER,
1546 GCUse::Sweeping, lock);
1547 AutoRunParallelTask sweepMisc(this, &GCRuntime::sweepMisc,
1548 PhaseKind::SWEEP_MISC, GCUse::Sweeping, lock);
1549 AutoRunParallelTask sweepCompTasks(this, &GCRuntime::sweepCompressionTasks,
1550 PhaseKind::SWEEP_COMPRESSION,
1551 GCUse::Sweeping, lock);
1552 AutoRunParallelTask sweepWeakMaps(this, &GCRuntime::sweepWeakMaps,
1553 PhaseKind::SWEEP_WEAKMAPS,
1554 GCUse::Sweeping, lock);
1555 AutoRunParallelTask sweepUniqueIds(this, &GCRuntime::sweepUniqueIds,
1556 PhaseKind::SWEEP_UNIQUEIDS,
1557 GCUse::Sweeping, lock);
1559 WeakCacheTaskVector sweepCacheTasks;
1560 bool canSweepWeakCachesOffThread =
1561 PrepareWeakCacheTasks(rt, &sweepCacheTasks);
1562 if (canSweepWeakCachesOffThread) {
1563 weakCachesToSweep.ref().emplace(currentSweepGroup);
1564 for (auto& task : sweepCacheTasks) {
1565 startTask(task, lock);
1570 AutoUnlockHelperThreadState unlock(lock);
1571 sweepJitDataOnMainThread(gcx);
1573 if (!canSweepWeakCachesOffThread) {
1574 MOZ_ASSERT(sweepCacheTasks.empty());
1575 SweepAllWeakCachesOnMainThread(rt);
1579 for (auto& task : sweepCacheTasks) {
1580 joinTask(task, lock);
1584 if (sweepingAtoms) {
1585 startSweepingAtomsTable();
1588 // Queue all GC things in all zones for sweeping, either on the foreground
1589 // or on the background thread.
1591 for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
1592 for (const auto& phase : BackgroundFinalizePhases) {
1593 initBackgroundSweep(zone, gcx, phase);
1596 zone->arenas.queueForegroundThingsForSweep();
1599 MOZ_ASSERT(!sweepZone);
1601 safeToYield = true;
1602 markOnBackgroundThreadDuringSweeping = CanUseExtraThreads();
1604 return Finished;
1607 #ifdef JS_GC_ZEAL
1608 bool GCRuntime::shouldYieldForZeal(ZealMode mode) {
1609 bool yield = useZeal && hasZealMode(mode);
1611 // Only yield on the first sweep slice for this mode.
1612 bool firstSweepSlice = initialState != State::Sweep;
1613 if (mode == ZealMode::IncrementalMultipleSlices && !firstSweepSlice) {
1614 yield = false;
1617 return yield;
1619 #endif
1621 IncrementalProgress GCRuntime::endSweepingSweepGroup(JS::GCContext* gcx,
1622 SliceBudget& budget) {
1623 // This is to prevent a race between markTask checking the zone state and
1624 // us changing it below.
1625 if (joinBackgroundMarkTask() == NotFinished) {
1626 return NotFinished;
1629 assertNoMarkingWork();
1631 // Disable background marking during sweeping until we start sweeping the next
1632 // zone group.
1633 markOnBackgroundThreadDuringSweeping = false;
1636 gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::FINALIZE_END);
1637 AutoLockStoreBuffer lock(&storeBuffer());
1638 callFinalizeCallbacks(gcx, JSFINALIZE_GROUP_END);
1641 /* Free LIFO blocks on a background thread if possible. */
1642 startBackgroundFree();
1644 /* Update the GC state for zones we have swept. */
1645 for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
1646 if (jit::JitZone* jitZone = zone->jitZone()) {
1647 // Clear out any small pools that we're hanging on to.
1648 jitZone->execAlloc().purge();
1650 AutoLockGC lock(this);
1651 zone->changeGCState(Zone::Sweep, Zone::Finished);
1652 zone->arenas.unmarkPreMarkedFreeCells();
1653 zone->arenas.checkNoArenasToUpdate();
1654 zone->pretenuring.clearCellCountsInNewlyCreatedArenas();
1658 * Start background thread to sweep zones if required, sweeping any atoms
1659 * zones last if present.
1661 ZoneList zones;
1662 for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
1663 if (zone->isAtomsZone()) {
1664 zones.append(zone);
1665 } else {
1666 zones.prepend(zone);
1670 queueZonesAndStartBackgroundSweep(std::move(zones));
1672 return Finished;
1675 IncrementalProgress GCRuntime::markDuringSweeping(JS::GCContext* gcx,
1676 SliceBudget& budget) {
1677 MOZ_ASSERT(markTask.isIdle());
1679 if (markOnBackgroundThreadDuringSweeping) {
1680 if (!marker().isDrained() || hasDelayedMarking()) {
1681 AutoLockHelperThreadState lock;
1682 MOZ_ASSERT(markTask.isIdle(lock));
1683 markTask.setBudget(budget);
1684 markTask.startOrRunIfIdle(lock);
1686 return Finished; // This means don't yield to the mutator here.
1689 gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK);
1690 return markUntilBudgetExhausted(budget, AllowParallelMarking);
1693 void GCRuntime::beginSweepPhase(JS::GCReason reason, AutoGCSession& session) {
1695 * Sweep phase.
1697 * Finalize as we sweep, outside of lock but with RuntimeHeapIsBusy()
1698 * true so that any attempt to allocate a GC-thing from a finalizer will
1699 * fail, rather than nest badly and leave the unmarked newborn to be swept.
1702 MOZ_ASSERT(!abortSweepAfterCurrentGroup);
1703 MOZ_ASSERT(!markOnBackgroundThreadDuringSweeping);
1705 #ifdef DEBUG
1706 releaseHeldRelocatedArenas();
1707 verifyAllChunks();
1708 #endif
1710 #ifdef JS_GC_ZEAL
1711 computeNonIncrementalMarkingForValidation(session);
1712 #endif
1714 gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP);
1716 AssertNoWrappersInGrayList(rt);
1717 dropStringWrappers();
1719 groupZonesForSweeping(reason);
1721 sweepActions->assertFinished();
1724 bool GCRuntime::foregroundFinalize(JS::GCContext* gcx, Zone* zone,
1725 AllocKind thingKind,
1726 SliceBudget& sliceBudget,
1727 SortedArenaList& sweepList) {
1728 ArenaLists& lists = zone->arenas;
1729 lists.checkNoArenasToUpdateForKind(thingKind);
1731 // Non-empty arenas are reused for use for new allocations as soon as the
1732 // finalizers for that allocation kind have run. Empty arenas are only
1733 // released when everything in the zone has been swept (see
1734 // GCRuntime::sweepBackgroundThings for more details).
1735 if (!FinalizeArenas(gcx, lists.collectingArenaList(thingKind), sweepList,
1736 thingKind, sliceBudget)) {
1737 // Copy the current contents of sweepList so that ArenaIter can find them.
1738 lists.setIncrementalSweptArenas(thingKind, sweepList);
1739 return false;
1742 sweepList.extractEmpty(&lists.savedEmptyArenas.ref());
1743 lists.mergeFinalizedArenas(thingKind, sweepList);
1744 lists.clearIncrementalSweptArenas();
1746 return true;
1749 BackgroundMarkTask::BackgroundMarkTask(GCRuntime* gc)
1750 : GCParallelTask(gc, gcstats::PhaseKind::MARK, GCUse::Marking),
1751 budget(SliceBudget::unlimited()) {}
1753 void js::gc::BackgroundMarkTask::run(AutoLockHelperThreadState& lock) {
1754 AutoUnlockHelperThreadState unlock(lock);
1756 // Time reporting is handled separately for parallel tasks.
1757 gc->sweepMarkResult = gc->markUntilBudgetExhausted(
1758 this->budget, GCRuntime::SingleThreadedMarking, DontReportMarkTime);
1761 IncrementalProgress GCRuntime::joinBackgroundMarkTask() {
1762 AutoLockHelperThreadState lock;
1763 if (markTask.isIdle(lock)) {
1764 return Finished;
1767 joinTask(markTask, lock);
1769 IncrementalProgress result = sweepMarkResult;
1770 sweepMarkResult = Finished;
1771 return result;
1774 template <typename T>
1775 static void SweepThing(JS::GCContext* gcx, T* thing) {
1776 if (!TenuredThingIsMarkedAny(thing)) {
1777 thing->sweep(gcx);
1781 template <typename T>
1782 static bool SweepArenaList(JS::GCContext* gcx, Arena** arenasToSweep,
1783 SliceBudget& sliceBudget) {
1784 while (Arena* arena = *arenasToSweep) {
1785 MOZ_ASSERT(arena->zone->isGCSweeping());
1787 for (ArenaCellIterUnderGC cell(arena); !cell.done(); cell.next()) {
1788 SweepThing(gcx, cell.as<T>());
1791 Arena* next = arena->next;
1792 MOZ_ASSERT_IF(next, next->zone == arena->zone);
1793 *arenasToSweep = next;
1795 AllocKind kind = MapTypeToAllocKind<T>::kind;
1796 sliceBudget.step(Arena::thingsPerArena(kind));
1797 if (sliceBudget.isOverBudget()) {
1798 return false;
1802 return true;
1805 void GCRuntime::startSweepingAtomsTable() {
1806 auto& maybeAtoms = maybeAtomsToSweep.ref();
1807 MOZ_ASSERT(maybeAtoms.isNothing());
1809 AtomsTable* atomsTable = rt->atomsForSweeping();
1810 if (!atomsTable) {
1811 return;
1814 // Create secondary tables to hold new atoms added while we're sweeping the
1815 // main tables incrementally.
1816 if (!atomsTable->startIncrementalSweep(maybeAtoms)) {
1817 SweepingTracer trc(rt);
1818 atomsTable->traceWeak(&trc);
1822 IncrementalProgress GCRuntime::sweepAtomsTable(JS::GCContext* gcx,
1823 SliceBudget& budget) {
1824 if (!atomsZone()->isGCSweeping()) {
1825 return Finished;
1828 gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_ATOMS_TABLE);
1830 auto& maybeAtoms = maybeAtomsToSweep.ref();
1831 if (!maybeAtoms) {
1832 return Finished;
1835 if (!rt->atomsForSweeping()->sweepIncrementally(maybeAtoms.ref(), budget)) {
1836 return NotFinished;
1839 maybeAtoms.reset();
1841 return Finished;
1844 static size_t IncrementalSweepWeakCache(GCRuntime* gc,
1845 const WeakCacheToSweep& item) {
1846 AutoSetThreadIsSweeping threadIsSweeping(item.zone);
1848 JS::detail::WeakCacheBase* cache = item.cache;
1849 MOZ_ASSERT(cache->needsIncrementalBarrier());
1851 SweepingTracer trc(gc->rt);
1852 size_t steps = cache->traceWeak(&trc, &gc->storeBuffer());
1853 cache->setIncrementalBarrierTracer(nullptr);
1855 return steps;
1858 WeakCacheSweepIterator::WeakCacheSweepIterator(JS::Zone* sweepGroup)
1859 : sweepZone(sweepGroup), sweepCache(sweepZone->weakCaches().getFirst()) {
1860 settle();
1863 bool WeakCacheSweepIterator::done() const { return !sweepZone; }
1865 WeakCacheToSweep WeakCacheSweepIterator::get() const {
1866 MOZ_ASSERT(!done());
1868 return {sweepCache, sweepZone};
1871 void WeakCacheSweepIterator::next() {
1872 MOZ_ASSERT(!done());
1874 sweepCache = sweepCache->getNext();
1875 settle();
1878 void WeakCacheSweepIterator::settle() {
1879 while (sweepZone) {
1880 while (sweepCache && !sweepCache->needsIncrementalBarrier()) {
1881 sweepCache = sweepCache->getNext();
1884 if (sweepCache) {
1885 break;
1888 sweepZone = sweepZone->nextNodeInGroup();
1889 if (sweepZone) {
1890 sweepCache = sweepZone->weakCaches().getFirst();
1894 MOZ_ASSERT((!sweepZone && !sweepCache) ||
1895 (sweepCache && sweepCache->needsIncrementalBarrier()));
1898 IncrementalProgress GCRuntime::sweepWeakCaches(JS::GCContext* gcx,
1899 SliceBudget& budget) {
1900 if (weakCachesToSweep.ref().isNothing()) {
1901 return Finished;
1904 gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_COMPARTMENTS);
1906 WeakCacheSweepIterator& work = weakCachesToSweep.ref().ref();
1908 AutoLockHelperThreadState lock;
1911 AutoRunParallelWork runWork(this, IncrementalSweepWeakCache,
1912 gcstats::PhaseKind::SWEEP_WEAK_CACHES,
1913 GCUse::Sweeping, work, budget, lock);
1914 AutoUnlockHelperThreadState unlock(lock);
1917 if (work.done()) {
1918 weakCachesToSweep.ref().reset();
1919 return Finished;
1922 return NotFinished;
1925 IncrementalProgress GCRuntime::finalizeAllocKind(JS::GCContext* gcx,
1926 SliceBudget& budget) {
1927 MOZ_ASSERT(sweepZone->isGCSweeping());
1929 // Set the number of things per arena for this AllocKind.
1930 size_t thingsPerArena = Arena::thingsPerArena(sweepAllocKind);
1931 auto& sweepList = incrementalSweepList.ref();
1932 sweepList.setThingsPerArena(thingsPerArena);
1934 AutoSetThreadIsFinalizing threadIsFinalizing(gcx);
1936 if (!foregroundFinalize(gcx, sweepZone, sweepAllocKind, budget, sweepList)) {
1937 return NotFinished;
1940 // Reset the slots of the sweep list that we used.
1941 sweepList.reset(thingsPerArena);
1943 return Finished;
1946 IncrementalProgress GCRuntime::sweepPropMapTree(JS::GCContext* gcx,
1947 SliceBudget& budget) {
1948 // Remove dead SharedPropMaps from the tree. This happens incrementally on the
1949 // main thread. PropMaps are finalized later on the a background thread.
1951 gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_PROP_MAP);
1953 ArenaLists& al = sweepZone->arenas;
1955 if (!SweepArenaList<CompactPropMap>(
1956 gcx, &al.gcCompactPropMapArenasToUpdate.ref(), budget)) {
1957 return NotFinished;
1959 if (!SweepArenaList<NormalPropMap>(
1960 gcx, &al.gcNormalPropMapArenasToUpdate.ref(), budget)) {
1961 return NotFinished;
1964 return Finished;
1967 // An iterator for a standard container that provides an STL-like begin()/end()
1968 // interface. This iterator provides a done()/get()/next() style interface.
1969 template <typename Container>
1970 class ContainerIter {
1971 using Iter = decltype(std::declval<const Container>().begin());
1972 using Elem = decltype(*std::declval<Iter>());
1974 Iter iter;
1975 const Iter end;
1977 public:
1978 explicit ContainerIter(const Container& container)
1979 : iter(container.begin()), end(container.end()) {}
1981 bool done() const { return iter == end; }
1983 Elem get() const { return *iter; }
1985 void next() {
1986 MOZ_ASSERT(!done());
1987 ++iter;
1991 // IncrementalIter is a template class that makes a normal iterator into one
1992 // that can be used to perform incremental work by using external state that
1993 // persists between instantiations. The state is only initialised on the first
1994 // use and subsequent uses carry on from the previous state.
1995 template <typename Iter>
1996 struct IncrementalIter {
1997 using State = mozilla::Maybe<Iter>;
1998 using Elem = decltype(std::declval<Iter>().get());
2000 private:
2001 State& maybeIter;
2003 public:
2004 template <typename... Args>
2005 explicit IncrementalIter(State& maybeIter, Args&&... args)
2006 : maybeIter(maybeIter) {
2007 if (maybeIter.isNothing()) {
2008 maybeIter.emplace(std::forward<Args>(args)...);
2012 ~IncrementalIter() {
2013 if (done()) {
2014 maybeIter.reset();
2018 bool done() const { return maybeIter.ref().done(); }
2020 Elem get() const { return maybeIter.ref().get(); }
2022 void next() { maybeIter.ref().next(); }
2025 // Iterate through the sweep groups created by
2026 // GCRuntime::groupZonesForSweeping().
2027 class js::gc::SweepGroupsIter {
2028 GCRuntime* gc;
2030 public:
2031 explicit SweepGroupsIter(JSRuntime* rt) : gc(&rt->gc) {
2032 MOZ_ASSERT(gc->currentSweepGroup);
2035 bool done() const { return !gc->currentSweepGroup; }
2037 Zone* get() const { return gc->currentSweepGroup; }
2039 void next() {
2040 MOZ_ASSERT(!done());
2041 gc->getNextSweepGroup();
2045 namespace sweepaction {
2047 // Implementation of the SweepAction interface that calls a method on GCRuntime.
2048 class SweepActionCall final : public SweepAction {
2049 using Method = IncrementalProgress (GCRuntime::*)(JS::GCContext* gcx,
2050 SliceBudget& budget);
2052 Method method;
2054 public:
2055 explicit SweepActionCall(Method m) : method(m) {}
2056 IncrementalProgress run(Args& args) override {
2057 return (args.gc->*method)(args.gcx, args.budget);
2059 void assertFinished() const override {}
2062 // Implementation of the SweepAction interface that yields in a specified zeal
2063 // mode.
2064 class SweepActionMaybeYield final : public SweepAction {
2065 #ifdef JS_GC_ZEAL
2066 ZealMode mode;
2067 bool isYielding;
2068 #endif
2070 public:
2071 explicit SweepActionMaybeYield(ZealMode mode)
2072 #ifdef JS_GC_ZEAL
2073 : mode(mode),
2074 isYielding(false)
2075 #endif
2079 IncrementalProgress run(Args& args) override {
2080 #ifdef JS_GC_ZEAL
2081 if (!isYielding && args.gc->shouldYieldForZeal(mode)) {
2082 isYielding = true;
2083 return NotFinished;
2086 isYielding = false;
2087 #endif
2088 return Finished;
2091 void assertFinished() const override { MOZ_ASSERT(!isYielding); }
2093 // These actions should be skipped if GC zeal is not configured.
2094 #ifndef JS_GC_ZEAL
2095 bool shouldSkip() override { return true; }
2096 #endif
2099 // Implementation of the SweepAction interface that calls a list of actions in
2100 // sequence.
2101 class SweepActionSequence final : public SweepAction {
2102 using ActionVector = Vector<UniquePtr<SweepAction>, 0, SystemAllocPolicy>;
2103 using Iter = IncrementalIter<ContainerIter<ActionVector>>;
2105 ActionVector actions;
2106 typename Iter::State iterState;
2108 public:
2109 bool init(UniquePtr<SweepAction>* acts, size_t count) {
2110 for (size_t i = 0; i < count; i++) {
2111 auto& action = acts[i];
2112 if (!action) {
2113 return false;
2115 if (action->shouldSkip()) {
2116 continue;
2118 if (!actions.emplaceBack(std::move(action))) {
2119 return false;
2122 return true;
2125 IncrementalProgress run(Args& args) override {
2126 for (Iter iter(iterState, actions); !iter.done(); iter.next()) {
2127 if (iter.get()->run(args) == NotFinished) {
2128 return NotFinished;
2131 return Finished;
2134 void assertFinished() const override {
2135 MOZ_ASSERT(iterState.isNothing());
2136 for (const auto& action : actions) {
2137 action->assertFinished();
2142 template <typename Iter, typename Init>
2143 class SweepActionForEach final : public SweepAction {
2144 using Elem = decltype(std::declval<Iter>().get());
2145 using IncrIter = IncrementalIter<Iter>;
2147 Init iterInit;
2148 Elem* elemOut;
2149 UniquePtr<SweepAction> action;
2150 typename IncrIter::State iterState;
2152 public:
2153 SweepActionForEach(const Init& init, Elem* maybeElemOut,
2154 UniquePtr<SweepAction> action)
2155 : iterInit(init), elemOut(maybeElemOut), action(std::move(action)) {}
2157 IncrementalProgress run(Args& args) override {
2158 MOZ_ASSERT_IF(elemOut, *elemOut == Elem());
2159 auto clearElem = mozilla::MakeScopeExit([&] { setElem(Elem()); });
2160 for (IncrIter iter(iterState, iterInit); !iter.done(); iter.next()) {
2161 setElem(iter.get());
2162 if (action->run(args) == NotFinished) {
2163 return NotFinished;
2166 return Finished;
2169 void assertFinished() const override {
2170 MOZ_ASSERT(iterState.isNothing());
2171 MOZ_ASSERT_IF(elemOut, *elemOut == Elem());
2172 action->assertFinished();
2175 private:
2176 void setElem(const Elem& value) {
2177 if (elemOut) {
2178 *elemOut = value;
2183 static UniquePtr<SweepAction> Call(IncrementalProgress (GCRuntime::*method)(
2184 JS::GCContext* gcx, SliceBudget& budget)) {
2185 return MakeUnique<SweepActionCall>(method);
2188 static UniquePtr<SweepAction> MaybeYield(ZealMode zealMode) {
2189 return MakeUnique<SweepActionMaybeYield>(zealMode);
2192 template <typename... Rest>
2193 static UniquePtr<SweepAction> Sequence(UniquePtr<SweepAction> first,
2194 Rest... rest) {
2195 UniquePtr<SweepAction> actions[] = {std::move(first), std::move(rest)...};
2196 auto seq = MakeUnique<SweepActionSequence>();
2197 if (!seq || !seq->init(actions, std::size(actions))) {
2198 return nullptr;
2201 return UniquePtr<SweepAction>(std::move(seq));
2204 static UniquePtr<SweepAction> RepeatForSweepGroup(
2205 JSRuntime* rt, UniquePtr<SweepAction> action) {
2206 if (!action) {
2207 return nullptr;
2210 using Action = SweepActionForEach<SweepGroupsIter, JSRuntime*>;
2211 return js::MakeUnique<Action>(rt, nullptr, std::move(action));
2214 static UniquePtr<SweepAction> ForEachZoneInSweepGroup(
2215 JSRuntime* rt, Zone** zoneOut, UniquePtr<SweepAction> action) {
2216 if (!action) {
2217 return nullptr;
2220 using Action = SweepActionForEach<SweepGroupZonesIter, JSRuntime*>;
2221 return js::MakeUnique<Action>(rt, zoneOut, std::move(action));
2224 static UniquePtr<SweepAction> ForEachAllocKind(AllocKinds kinds,
2225 AllocKind* kindOut,
2226 UniquePtr<SweepAction> action) {
2227 if (!action) {
2228 return nullptr;
2231 using Action = SweepActionForEach<ContainerIter<AllocKinds>, AllocKinds>;
2232 return js::MakeUnique<Action>(kinds, kindOut, std::move(action));
2235 } // namespace sweepaction
2237 bool GCRuntime::initSweepActions() {
2238 using namespace sweepaction;
2239 using sweepaction::Call;
2241 sweepActions.ref() = RepeatForSweepGroup(
2243 Sequence(
2244 Call(&GCRuntime::beginMarkingSweepGroup),
2245 Call(&GCRuntime::markGrayRootsInCurrentGroup),
2246 MaybeYield(ZealMode::YieldWhileGrayMarking),
2247 Call(&GCRuntime::markGray), Call(&GCRuntime::endMarkingSweepGroup),
2248 Call(&GCRuntime::beginSweepingSweepGroup),
2249 MaybeYield(ZealMode::IncrementalMultipleSlices),
2250 MaybeYield(ZealMode::YieldBeforeSweepingAtoms),
2251 Call(&GCRuntime::sweepAtomsTable),
2252 MaybeYield(ZealMode::YieldBeforeSweepingCaches),
2253 Call(&GCRuntime::sweepWeakCaches),
2254 ForEachZoneInSweepGroup(
2255 rt, &sweepZone.ref(),
2256 Sequence(MaybeYield(ZealMode::YieldBeforeSweepingObjects),
2257 ForEachAllocKind(ForegroundObjectFinalizePhase.kinds,
2258 &sweepAllocKind.ref(),
2259 Call(&GCRuntime::finalizeAllocKind)),
2260 MaybeYield(ZealMode::YieldBeforeSweepingNonObjects),
2261 ForEachAllocKind(ForegroundNonObjectFinalizePhase.kinds,
2262 &sweepAllocKind.ref(),
2263 Call(&GCRuntime::finalizeAllocKind)),
2264 MaybeYield(ZealMode::YieldBeforeSweepingPropMapTrees),
2265 Call(&GCRuntime::sweepPropMapTree))),
2266 Call(&GCRuntime::endSweepingSweepGroup)));
2268 return sweepActions != nullptr;
2271 IncrementalProgress GCRuntime::performSweepActions(SliceBudget& budget) {
2272 AutoMajorGCProfilerEntry s(this);
2273 gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP);
2275 JS::GCContext* gcx = rt->gcContext();
2276 AutoSetThreadIsSweeping threadIsSweeping(gcx);
2277 AutoPoisonFreedJitCode pjc(gcx);
2279 // Don't trigger pre-barriers when finalizing.
2280 AutoDisableBarriers disableBarriers(this);
2282 // Drain the mark stack, possibly in a parallel task if we're in a part of
2283 // sweeping that allows it.
2285 // In the first sweep slice where we must not yield to the mutator until we've
2286 // starting sweeping a sweep group but in that case the stack must be empty
2287 // already.
2289 #ifdef DEBUG
2290 MOZ_ASSERT(initialState <= State::Sweep);
2291 if (initialState != State::Sweep) {
2292 assertNoMarkingWork();
2294 #endif
2296 if (initialState == State::Sweep &&
2297 markDuringSweeping(gcx, budget) == NotFinished) {
2298 return NotFinished;
2301 // Then continue running sweep actions.
2303 SweepAction::Args args{this, gcx, budget};
2304 IncrementalProgress sweepProgress = sweepActions->run(args);
2305 IncrementalProgress markProgress = joinBackgroundMarkTask();
2307 if (sweepProgress == Finished && markProgress == Finished) {
2308 return Finished;
2311 MOZ_ASSERT(isIncremental);
2312 return NotFinished;
2315 bool GCRuntime::allCCVisibleZonesWereCollected() {
2316 // Calculate whether the gray marking state is now valid.
2318 // The gray bits change from invalid to valid if we finished a full GC from
2319 // the point of view of the cycle collector. We ignore the following:
2321 // - Helper thread zones, as these are not reachable from the main heap.
2322 // - The atoms zone, since strings and symbols are never marked gray.
2323 // - Empty zones.
2325 // These exceptions ensure that when the CC requests a full GC the gray mark
2326 // state ends up valid even it we don't collect all of the zones.
2328 for (ZonesIter zone(this, SkipAtoms); !zone.done(); zone.next()) {
2329 if (!zone->isCollecting() && !zone->arenas.arenaListsAreEmpty()) {
2330 return false;
2334 return true;
2337 void GCRuntime::endSweepPhase(bool destroyingRuntime) {
2338 MOZ_ASSERT(!markOnBackgroundThreadDuringSweeping);
2340 sweepActions->assertFinished();
2342 gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP);
2344 MOZ_ASSERT_IF(destroyingRuntime, !useBackgroundThreads);
2347 gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::DESTROY);
2349 // Sweep shared script bytecode now all zones have been swept and finalizers
2350 // for BaseScripts have released their references.
2351 SweepScriptData(rt);
2355 gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::FINALIZE_END);
2356 AutoLockStoreBuffer lock(&storeBuffer());
2357 callFinalizeCallbacks(rt->gcContext(), JSFINALIZE_COLLECTION_END);
2359 if (allCCVisibleZonesWereCollected()) {
2360 grayBitsValid = true;
2364 if (isIncremental) {
2365 findDeadCompartments();
2368 #ifdef JS_GC_ZEAL
2369 finishMarkingValidation();
2370 #endif
2372 #ifdef DEBUG
2373 for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
2374 for (auto i : AllAllocKinds()) {
2375 MOZ_ASSERT_IF(!IsBackgroundFinalized(i) || !useBackgroundThreads,
2376 zone->arenas.collectingArenaList(i).isEmpty());
2379 #endif
2381 AssertNoWrappersInGrayList(rt);