1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
8 * This code implements a mark-and-sweep garbage collector. The mark phase is
9 * incremental. Most sweeping is done on a background thread. A GC is divided
10 * into slices as follows:
12 * Slice 1: Roots pushed onto the mark stack. The mark stack is processed by
13 * popping an element, marking it, and pushing its children.
14 * ... JS code runs ...
15 * Slice 2: More mark stack processing.
16 * ... JS code runs ...
17 * Slice n-1: More mark stack processing.
18 * ... JS code runs ...
19 * Slice n: Mark stack is completely drained. Some sweeping is done.
20 * ... JS code runs, remaining sweeping done on background thread ...
22 * When background sweeping finishes the GC is complete.
24 * Incremental GC requires close collaboration with the mutator (i.e., JS code):
26 * 1. During an incremental GC, if a memory location (except a root) is written
27 * to, then the value it previously held must be marked. Write barriers ensure
29 * 2. Any object that is allocated during incremental GC must start out marked.
30 * 3. Roots are special memory locations that don't need write
31 * barriers. However, they must be marked in the first slice. Roots are things
32 * like the C stack and the VM stack, since it would be too expensive to put
36 #include "jsgcinlines.h"
38 #include "mozilla/DebugOnly.h"
39 #include "mozilla/MemoryReporting.h"
40 #include "mozilla/Move.h"
41 #include "mozilla/Util.h"
43 #include <string.h> /* for memset used when DEBUG */
51 #include "jscompartment.h"
56 #include "jswatchpoint.h"
57 #include "jsweakmap.h"
63 #include "TraceLogging.h"
66 #include "gc/FindSCCs.h"
67 #include "gc/GCInternals.h"
68 #include "gc/Marking.h"
69 #include "gc/Memory.h"
71 # include "jit/BaselineJIT.h"
73 #include "jit/IonCode.h"
74 #include "vm/Debugger.h"
75 #include "vm/ForkJoin.h"
76 #include "vm/ProxyObject.h"
78 #include "vm/String.h"
79 #include "vm/WrapperObject.h"
81 #include "jsobjinlines.h"
82 #include "jsscriptinlines.h"
84 #include "vm/Stack-inl.h"
85 #include "vm/String-inl.h"
88 using namespace js::gc
;
90 using mozilla::ArrayEnd
;
91 using mozilla::DebugOnly
;
95 /* Perform a Full GC every 20 seconds if MaybeGC is called */
96 static const uint64_t GC_IDLE_FULL_SPAN
= 20 * 1000 * 1000;
98 /* Increase the IGC marking slice time if we are in highFrequencyGC mode. */
99 static const int IGC_MARK_SLICE_MULTIPLIER
= 2;
101 #if defined(ANDROID) || defined(MOZ_B2G)
102 static const int MAX_EMPTY_CHUNK_COUNT
= 2;
104 static const int MAX_EMPTY_CHUNK_COUNT
= 30;
107 /* This array should be const, but that doesn't link right under GCC. */
108 const AllocKind
gc::slotsToThingKind
[] = {
109 /* 0 */ FINALIZE_OBJECT0
, FINALIZE_OBJECT2
, FINALIZE_OBJECT2
, FINALIZE_OBJECT4
,
110 /* 4 */ FINALIZE_OBJECT4
, FINALIZE_OBJECT8
, FINALIZE_OBJECT8
, FINALIZE_OBJECT8
,
111 /* 8 */ FINALIZE_OBJECT8
, FINALIZE_OBJECT12
, FINALIZE_OBJECT12
, FINALIZE_OBJECT12
,
112 /* 12 */ FINALIZE_OBJECT12
, FINALIZE_OBJECT16
, FINALIZE_OBJECT16
, FINALIZE_OBJECT16
,
113 /* 16 */ FINALIZE_OBJECT16
116 JS_STATIC_ASSERT(JS_ARRAY_LENGTH(slotsToThingKind
) == SLOTS_TO_THING_KIND_LIMIT
);
118 const uint32_t Arena::ThingSizes
[] = {
119 sizeof(JSObject
), /* FINALIZE_OBJECT0 */
120 sizeof(JSObject
), /* FINALIZE_OBJECT0_BACKGROUND */
121 sizeof(JSObject_Slots2
), /* FINALIZE_OBJECT2 */
122 sizeof(JSObject_Slots2
), /* FINALIZE_OBJECT2_BACKGROUND */
123 sizeof(JSObject_Slots4
), /* FINALIZE_OBJECT4 */
124 sizeof(JSObject_Slots4
), /* FINALIZE_OBJECT4_BACKGROUND */
125 sizeof(JSObject_Slots8
), /* FINALIZE_OBJECT8 */
126 sizeof(JSObject_Slots8
), /* FINALIZE_OBJECT8_BACKGROUND */
127 sizeof(JSObject_Slots12
), /* FINALIZE_OBJECT12 */
128 sizeof(JSObject_Slots12
), /* FINALIZE_OBJECT12_BACKGROUND */
129 sizeof(JSObject_Slots16
), /* FINALIZE_OBJECT16 */
130 sizeof(JSObject_Slots16
), /* FINALIZE_OBJECT16_BACKGROUND */
131 sizeof(JSScript
), /* FINALIZE_SCRIPT */
132 sizeof(LazyScript
), /* FINALIZE_LAZY_SCRIPT */
133 sizeof(Shape
), /* FINALIZE_SHAPE */
134 sizeof(BaseShape
), /* FINALIZE_BASE_SHAPE */
135 sizeof(types::TypeObject
), /* FINALIZE_TYPE_OBJECT */
136 sizeof(JSShortString
), /* FINALIZE_SHORT_STRING */
137 sizeof(JSString
), /* FINALIZE_STRING */
138 sizeof(JSExternalString
), /* FINALIZE_EXTERNAL_STRING */
139 sizeof(jit::IonCode
), /* FINALIZE_IONCODE */
142 #define OFFSET(type) uint32_t(sizeof(ArenaHeader) + (ArenaSize - sizeof(ArenaHeader)) % sizeof(type))
144 const uint32_t Arena::FirstThingOffsets
[] = {
145 OFFSET(JSObject
), /* FINALIZE_OBJECT0 */
146 OFFSET(JSObject
), /* FINALIZE_OBJECT0_BACKGROUND */
147 OFFSET(JSObject_Slots2
), /* FINALIZE_OBJECT2 */
148 OFFSET(JSObject_Slots2
), /* FINALIZE_OBJECT2_BACKGROUND */
149 OFFSET(JSObject_Slots4
), /* FINALIZE_OBJECT4 */
150 OFFSET(JSObject_Slots4
), /* FINALIZE_OBJECT4_BACKGROUND */
151 OFFSET(JSObject_Slots8
), /* FINALIZE_OBJECT8 */
152 OFFSET(JSObject_Slots8
), /* FINALIZE_OBJECT8_BACKGROUND */
153 OFFSET(JSObject_Slots12
), /* FINALIZE_OBJECT12 */
154 OFFSET(JSObject_Slots12
), /* FINALIZE_OBJECT12_BACKGROUND */
155 OFFSET(JSObject_Slots16
), /* FINALIZE_OBJECT16 */
156 OFFSET(JSObject_Slots16
), /* FINALIZE_OBJECT16_BACKGROUND */
157 OFFSET(JSScript
), /* FINALIZE_SCRIPT */
158 OFFSET(LazyScript
), /* FINALIZE_LAZY_SCRIPT */
159 OFFSET(Shape
), /* FINALIZE_SHAPE */
160 OFFSET(BaseShape
), /* FINALIZE_BASE_SHAPE */
161 OFFSET(types::TypeObject
), /* FINALIZE_TYPE_OBJECT */
162 OFFSET(JSShortString
), /* FINALIZE_SHORT_STRING */
163 OFFSET(JSString
), /* FINALIZE_STRING */
164 OFFSET(JSExternalString
), /* FINALIZE_EXTERNAL_STRING */
165 OFFSET(jit::IonCode
), /* FINALIZE_IONCODE */
171 * Finalization order for incrementally swept things.
174 static const AllocKind FinalizePhaseStrings
[] = {
175 FINALIZE_EXTERNAL_STRING
178 static const AllocKind FinalizePhaseScripts
[] = {
183 static const AllocKind FinalizePhaseIonCode
[] = {
187 static const AllocKind
* const FinalizePhases
[] = {
188 FinalizePhaseStrings
,
189 FinalizePhaseScripts
,
192 static const int FinalizePhaseCount
= sizeof(FinalizePhases
) / sizeof(AllocKind
*);
194 static const int FinalizePhaseLength
[] = {
195 sizeof(FinalizePhaseStrings
) / sizeof(AllocKind
),
196 sizeof(FinalizePhaseScripts
) / sizeof(AllocKind
),
197 sizeof(FinalizePhaseIonCode
) / sizeof(AllocKind
)
200 static const gcstats::Phase FinalizePhaseStatsPhase
[] = {
201 gcstats::PHASE_SWEEP_STRING
,
202 gcstats::PHASE_SWEEP_SCRIPT
,
203 gcstats::PHASE_SWEEP_IONCODE
207 * Finalization order for things swept in the background.
210 static const AllocKind BackgroundPhaseObjects
[] = {
211 FINALIZE_OBJECT0_BACKGROUND
,
212 FINALIZE_OBJECT2_BACKGROUND
,
213 FINALIZE_OBJECT4_BACKGROUND
,
214 FINALIZE_OBJECT8_BACKGROUND
,
215 FINALIZE_OBJECT12_BACKGROUND
,
216 FINALIZE_OBJECT16_BACKGROUND
219 static const AllocKind BackgroundPhaseStrings
[] = {
220 FINALIZE_SHORT_STRING
,
224 static const AllocKind BackgroundPhaseShapes
[] = {
230 static const AllocKind
* const BackgroundPhases
[] = {
231 BackgroundPhaseObjects
,
232 BackgroundPhaseStrings
,
233 BackgroundPhaseShapes
235 static const int BackgroundPhaseCount
= sizeof(BackgroundPhases
) / sizeof(AllocKind
*);
237 static const int BackgroundPhaseLength
[] = {
238 sizeof(BackgroundPhaseObjects
) / sizeof(AllocKind
),
239 sizeof(BackgroundPhaseStrings
) / sizeof(AllocKind
),
240 sizeof(BackgroundPhaseShapes
) / sizeof(AllocKind
)
245 ArenaHeader::checkSynchronizedWithFreeList() const
248 * Do not allow to access the free list when its real head is still stored
249 * in FreeLists and is not synchronized with this one.
251 JS_ASSERT(allocated());
254 * We can be called from the background finalization thread when the free
255 * list in the zone can mutate at any moment. We cannot do any
256 * checks in this case.
258 if (IsBackgroundFinalized(getAllocKind()) && zone
->runtimeFromAnyThread()->gcHelperThread
.onBackgroundThread())
261 FreeSpan firstSpan
= FreeSpan::decodeOffsets(arenaAddress(), firstFreeSpanOffsets
);
262 if (firstSpan
.isEmpty())
264 const FreeSpan
*list
= zone
->allocator
.arenas
.getFreeList(getAllocKind());
265 if (list
->isEmpty() || firstSpan
.arenaAddress() != list
->arenaAddress())
269 * Here this arena has free things, FreeList::lists[thingKind] is not
270 * empty and also points to this arena. Thus they must the same.
272 JS_ASSERT(firstSpan
.isSameNonEmptySpan(list
));
277 Arena::staticAsserts()
279 JS_STATIC_ASSERT(sizeof(Arena
) == ArenaSize
);
280 JS_STATIC_ASSERT(JS_ARRAY_LENGTH(ThingSizes
) == FINALIZE_LIMIT
);
281 JS_STATIC_ASSERT(JS_ARRAY_LENGTH(FirstThingOffsets
) == FINALIZE_LIMIT
);
286 Arena::finalize(FreeOp
*fop
, AllocKind thingKind
, size_t thingSize
)
288 /* Enforce requirements on size of T. */
289 JS_ASSERT(thingSize
% CellSize
== 0);
290 JS_ASSERT(thingSize
<= 255);
292 JS_ASSERT(aheader
.allocated());
293 JS_ASSERT(thingKind
== aheader
.getAllocKind());
294 JS_ASSERT(thingSize
== aheader
.getThingSize());
295 JS_ASSERT(!aheader
.hasDelayedMarking
);
296 JS_ASSERT(!aheader
.markOverflow
);
297 JS_ASSERT(!aheader
.allocatedDuringIncremental
);
299 uintptr_t thing
= thingsStart(thingKind
);
300 uintptr_t lastByte
= thingsEnd() - 1;
302 FreeSpan
nextFree(aheader
.getFirstFreeSpan());
303 nextFree
.checkSpan();
305 FreeSpan newListHead
;
306 FreeSpan
*newListTail
= &newListHead
;
307 uintptr_t newFreeSpanStart
= 0;
308 bool allClear
= true;
309 DebugOnly
<size_t> nmarked
= 0;
310 for (;; thing
+= thingSize
) {
311 JS_ASSERT(thing
<= lastByte
+ 1);
312 if (thing
== nextFree
.first
) {
313 JS_ASSERT(nextFree
.last
<= lastByte
);
314 if (nextFree
.last
== lastByte
)
316 JS_ASSERT(Arena::isAligned(nextFree
.last
, thingSize
));
317 if (!newFreeSpanStart
)
318 newFreeSpanStart
= thing
;
319 thing
= nextFree
.last
;
320 nextFree
= *nextFree
.nextSpan();
321 nextFree
.checkSpan();
323 T
*t
= reinterpret_cast<T
*>(thing
);
327 if (newFreeSpanStart
) {
328 JS_ASSERT(thing
>= thingsStart(thingKind
) + thingSize
);
329 newListTail
->first
= newFreeSpanStart
;
330 newListTail
->last
= thing
- thingSize
;
331 newListTail
= newListTail
->nextSpanUnchecked(thingSize
);
332 newFreeSpanStart
= 0;
335 if (!newFreeSpanStart
)
336 newFreeSpanStart
= thing
;
338 JS_POISON(t
, JS_FREE_PATTERN
, thingSize
);
344 JS_ASSERT(newListTail
== &newListHead
);
345 JS_ASSERT(newFreeSpanStart
== thingsStart(thingKind
));
349 newListTail
->first
= newFreeSpanStart
? newFreeSpanStart
: nextFree
.first
;
350 JS_ASSERT(Arena::isAligned(newListTail
->first
, thingSize
));
351 newListTail
->last
= lastByte
;
355 for (const FreeSpan
*span
= &newListHead
; span
!= newListTail
; span
= span
->nextSpan()) {
357 JS_ASSERT(Arena::isAligned(span
->first
, thingSize
));
358 JS_ASSERT(Arena::isAligned(span
->last
, thingSize
));
359 nfree
+= (span
->last
- span
->first
) / thingSize
+ 1;
360 JS_ASSERT(nfree
+ nmarked
<= thingsPerArena(thingSize
));
362 nfree
+= (newListTail
->last
+ 1 - newListTail
->first
) / thingSize
;
363 JS_ASSERT(nfree
+ nmarked
== thingsPerArena(thingSize
));
365 aheader
.setFirstFreeSpan(&newListHead
);
371 * Insert an arena into the list in appropriate position and update the cursor
372 * to ensure that any arena before the cursor is full.
374 void ArenaList::insert(ArenaHeader
*a
)
377 JS_ASSERT_IF(!head
, cursor
== &head
);
380 if (!a
->hasFreeThings())
386 FinalizeTypedArenas(FreeOp
*fop
,
393 * Finalize arenas from src list, releasing empty arenas and inserting the
394 * others into dest in an appropriate position.
397 size_t thingSize
= Arena::thingSize(thingKind
);
399 while (ArenaHeader
*aheader
= *src
) {
400 *src
= aheader
->next
;
401 bool allClear
= aheader
->getArena()->finalize
<T
>(fop
, thingKind
, thingSize
);
403 aheader
->chunk()->releaseArena(aheader
);
405 dest
.insert(aheader
);
406 budget
.step(Arena::thingsPerArena(thingSize
));
407 if (budget
.isOverBudget())
415 * Finalize the list. On return al->cursor points to the first non-empty arena
416 * after the al->head.
419 FinalizeArenas(FreeOp
*fop
,
426 case FINALIZE_OBJECT0
:
427 case FINALIZE_OBJECT0_BACKGROUND
:
428 case FINALIZE_OBJECT2
:
429 case FINALIZE_OBJECT2_BACKGROUND
:
430 case FINALIZE_OBJECT4
:
431 case FINALIZE_OBJECT4_BACKGROUND
:
432 case FINALIZE_OBJECT8
:
433 case FINALIZE_OBJECT8_BACKGROUND
:
434 case FINALIZE_OBJECT12
:
435 case FINALIZE_OBJECT12_BACKGROUND
:
436 case FINALIZE_OBJECT16
:
437 case FINALIZE_OBJECT16_BACKGROUND
:
438 return FinalizeTypedArenas
<JSObject
>(fop
, src
, dest
, thingKind
, budget
);
439 case FINALIZE_SCRIPT
:
440 return FinalizeTypedArenas
<JSScript
>(fop
, src
, dest
, thingKind
, budget
);
441 case FINALIZE_LAZY_SCRIPT
:
442 return FinalizeTypedArenas
<LazyScript
>(fop
, src
, dest
, thingKind
, budget
);
444 return FinalizeTypedArenas
<Shape
>(fop
, src
, dest
, thingKind
, budget
);
445 case FINALIZE_BASE_SHAPE
:
446 return FinalizeTypedArenas
<BaseShape
>(fop
, src
, dest
, thingKind
, budget
);
447 case FINALIZE_TYPE_OBJECT
:
448 return FinalizeTypedArenas
<types::TypeObject
>(fop
, src
, dest
, thingKind
, budget
);
449 case FINALIZE_STRING
:
450 return FinalizeTypedArenas
<JSString
>(fop
, src
, dest
, thingKind
, budget
);
451 case FINALIZE_SHORT_STRING
:
452 return FinalizeTypedArenas
<JSShortString
>(fop
, src
, dest
, thingKind
, budget
);
453 case FINALIZE_EXTERNAL_STRING
:
454 return FinalizeTypedArenas
<JSExternalString
>(fop
, src
, dest
, thingKind
, budget
);
455 case FINALIZE_IONCODE
:
458 // IonCode finalization may release references on an executable
459 // allocator that is accessed when triggering interrupts.
460 JSRuntime::AutoLockForOperationCallback
lock(fop
->runtime());
461 return FinalizeTypedArenas
<jit::IonCode
>(fop
, src
, dest
, thingKind
, budget
);
465 MOZ_ASSUME_UNREACHABLE("Invalid alloc kind");
469 static inline Chunk
*
470 AllocChunk(JSRuntime
*rt
)
472 return static_cast<Chunk
*>(MapAlignedPages(rt
, ChunkSize
, ChunkSize
));
476 FreeChunk(JSRuntime
*rt
, Chunk
*p
)
478 UnmapPages(rt
, static_cast<void *>(p
), ChunkSize
);
482 ChunkPool::wantBackgroundAllocation(JSRuntime
*rt
) const
485 * To minimize memory waste we do not want to run the background chunk
486 * allocation if we have empty chunks or when the runtime needs just few
489 return rt
->gcHelperThread
.canBackgroundAllocate() &&
491 rt
->gcChunkSet
.count() >= 4;
494 /* Must be called with the GC lock taken. */
496 ChunkPool::get(JSRuntime
*rt
)
498 JS_ASSERT(this == &rt
->gcChunkPool
);
500 Chunk
*chunk
= emptyChunkListHead
;
502 JS_ASSERT(emptyCount
);
503 emptyChunkListHead
= chunk
->info
.next
;
506 JS_ASSERT(!emptyCount
);
507 chunk
= Chunk::allocate(rt
);
510 JS_ASSERT(chunk
->info
.numArenasFreeCommitted
== ArenasPerChunk
);
511 rt
->gcNumArenasFreeCommitted
+= ArenasPerChunk
;
513 JS_ASSERT(chunk
->unused());
514 JS_ASSERT(!rt
->gcChunkSet
.has(chunk
));
516 if (wantBackgroundAllocation(rt
))
517 rt
->gcHelperThread
.startBackgroundAllocationIfIdle();
522 /* Must be called either during the GC or with the GC lock taken. */
524 ChunkPool::put(Chunk
*chunk
)
527 chunk
->info
.next
= emptyChunkListHead
;
528 emptyChunkListHead
= chunk
;
532 /* Must be called either during the GC or with the GC lock taken. */
534 ChunkPool::expire(JSRuntime
*rt
, bool releaseAll
)
536 JS_ASSERT(this == &rt
->gcChunkPool
);
539 * Return old empty chunks to the system while preserving the order of
540 * other chunks in the list. This way, if the GC runs several times
541 * without emptying the list, the older chunks will stay at the tail
542 * and are more likely to reach the max age.
544 Chunk
*freeList
= nullptr;
545 int freeChunkCount
= 0;
546 for (Chunk
**chunkp
= &emptyChunkListHead
; *chunkp
; ) {
547 JS_ASSERT(emptyCount
);
548 Chunk
*chunk
= *chunkp
;
549 JS_ASSERT(chunk
->unused());
550 JS_ASSERT(!rt
->gcChunkSet
.has(chunk
));
551 JS_ASSERT(chunk
->info
.age
<= MAX_EMPTY_CHUNK_AGE
);
552 if (releaseAll
|| chunk
->info
.age
== MAX_EMPTY_CHUNK_AGE
||
553 freeChunkCount
++ > MAX_EMPTY_CHUNK_COUNT
)
555 *chunkp
= chunk
->info
.next
;
557 chunk
->prepareToBeFreed(rt
);
558 chunk
->info
.next
= freeList
;
561 /* Keep the chunk but increase its age. */
563 chunkp
= &chunk
->info
.next
;
566 JS_ASSERT_IF(releaseAll
, !emptyCount
);
571 FreeChunkList(JSRuntime
*rt
, Chunk
*chunkListHead
)
573 while (Chunk
*chunk
= chunkListHead
) {
574 JS_ASSERT(!chunk
->info
.numArenasFreeCommitted
);
575 chunkListHead
= chunk
->info
.next
;
576 FreeChunk(rt
, chunk
);
581 ChunkPool::expireAndFree(JSRuntime
*rt
, bool releaseAll
)
583 FreeChunkList(rt
, expire(rt
, releaseAll
));
587 Chunk::allocate(JSRuntime
*rt
)
589 Chunk
*chunk
= AllocChunk(rt
);
591 #ifdef JSGC_ROOT_ANALYSIS
592 // Our poison pointers are not guaranteed to be invalid on 64-bit
593 // architectures, and often are valid. We can't just reserve the full
594 // poison range, because it might already have been taken up by something
595 // else (shared library, previous allocation). So we'll just loop and
596 // discard poison pointers until we get something valid.
598 // This leaks all of these poisoned pointers. It would be better if they
599 // were marked as uncommitted, but it's a little complicated to avoid
600 // clobbering pre-existing unrelated mappings.
601 while (IsPoisonedPtr(chunk
))
602 chunk
= AllocChunk(rt
);
608 rt
->gcStats
.count(gcstats::STAT_NEW_CHUNK
);
612 /* Must be called with the GC lock taken. */
613 /* static */ inline void
614 Chunk::release(JSRuntime
*rt
, Chunk
*chunk
)
617 chunk
->prepareToBeFreed(rt
);
618 FreeChunk(rt
, chunk
);
622 Chunk::prepareToBeFreed(JSRuntime
*rt
)
624 JS_ASSERT(rt
->gcNumArenasFreeCommitted
>= info
.numArenasFreeCommitted
);
625 rt
->gcNumArenasFreeCommitted
-= info
.numArenasFreeCommitted
;
626 rt
->gcStats
.count(gcstats::STAT_DESTROY_CHUNK
);
630 * Let FreeChunkList detect a missing prepareToBeFreed call before it
633 info
.numArenasFreeCommitted
= 0;
638 Chunk::init(JSRuntime
*rt
)
640 JS_POISON(this, JS_FREE_PATTERN
, ChunkSize
);
643 * We clear the bitmap to guard against xpc_IsGrayGCThing being called on
644 * uninitialized data, which would happen before the first GC cycle.
648 /* Initialize the arena tracking bitmap. */
649 decommittedArenas
.clear(false);
651 /* Initialize the chunk info. */
652 info
.freeArenasHead
= &arenas
[0].aheader
;
653 info
.lastDecommittedArenaOffset
= 0;
654 info
.numArenasFree
= ArenasPerChunk
;
655 info
.numArenasFreeCommitted
= ArenasPerChunk
;
659 /* Initialize the arena header state. */
660 for (unsigned i
= 0; i
< ArenasPerChunk
; i
++) {
661 arenas
[i
].aheader
.setAsNotAllocated();
662 arenas
[i
].aheader
.next
= (i
+ 1 < ArenasPerChunk
)
663 ? &arenas
[i
+ 1].aheader
667 /* The rest of info fields are initialized in PickChunk. */
670 static inline Chunk
**
671 GetAvailableChunkList(Zone
*zone
)
673 JSRuntime
*rt
= zone
->runtimeFromAnyThread();
674 return zone
->isSystem
675 ? &rt
->gcSystemAvailableChunkListHead
676 : &rt
->gcUserAvailableChunkListHead
;
680 Chunk::addToAvailableList(Zone
*zone
)
682 insertToAvailableList(GetAvailableChunkList(zone
));
686 Chunk::insertToAvailableList(Chunk
**insertPoint
)
688 JS_ASSERT(hasAvailableArenas());
689 JS_ASSERT(!info
.prevp
);
690 JS_ASSERT(!info
.next
);
691 info
.prevp
= insertPoint
;
692 Chunk
*insertBefore
= *insertPoint
;
694 JS_ASSERT(insertBefore
->info
.prevp
== insertPoint
);
695 insertBefore
->info
.prevp
= &info
.next
;
697 info
.next
= insertBefore
;
702 Chunk::removeFromAvailableList()
704 JS_ASSERT(info
.prevp
);
705 *info
.prevp
= info
.next
;
707 JS_ASSERT(info
.next
->info
.prevp
== &info
.next
);
708 info
.next
->info
.prevp
= info
.prevp
;
710 info
.prevp
= nullptr;
715 * Search for and return the next decommitted Arena. Our goal is to keep
716 * lastDecommittedArenaOffset "close" to a free arena. We do this by setting
717 * it to the most recently freed arena when we free, and forcing it to
718 * the last alloc + 1 when we allocate.
721 Chunk::findDecommittedArenaOffset()
723 /* Note: lastFreeArenaOffset can be past the end of the list. */
724 for (unsigned i
= info
.lastDecommittedArenaOffset
; i
< ArenasPerChunk
; i
++)
725 if (decommittedArenas
.get(i
))
727 for (unsigned i
= 0; i
< info
.lastDecommittedArenaOffset
; i
++)
728 if (decommittedArenas
.get(i
))
730 MOZ_ASSUME_UNREACHABLE("No decommitted arenas found.");
734 Chunk::fetchNextDecommittedArena()
736 JS_ASSERT(info
.numArenasFreeCommitted
== 0);
737 JS_ASSERT(info
.numArenasFree
> 0);
739 unsigned offset
= findDecommittedArenaOffset();
740 info
.lastDecommittedArenaOffset
= offset
+ 1;
741 --info
.numArenasFree
;
742 decommittedArenas
.unset(offset
);
744 Arena
*arena
= &arenas
[offset
];
745 MarkPagesInUse(info
.runtime
, arena
, ArenaSize
);
746 arena
->aheader
.setAsNotAllocated();
748 return &arena
->aheader
;
752 Chunk::fetchNextFreeArena(JSRuntime
*rt
)
754 JS_ASSERT(info
.numArenasFreeCommitted
> 0);
755 JS_ASSERT(info
.numArenasFreeCommitted
<= info
.numArenasFree
);
756 JS_ASSERT(info
.numArenasFreeCommitted
<= rt
->gcNumArenasFreeCommitted
);
758 ArenaHeader
*aheader
= info
.freeArenasHead
;
759 info
.freeArenasHead
= aheader
->next
;
760 --info
.numArenasFreeCommitted
;
761 --info
.numArenasFree
;
762 --rt
->gcNumArenasFreeCommitted
;
768 Chunk::allocateArena(Zone
*zone
, AllocKind thingKind
)
770 JS_ASSERT(hasAvailableArenas());
772 JSRuntime
*rt
= zone
->runtimeFromAnyThread();
773 if (!rt
->isHeapMinorCollecting() && rt
->gcBytes
>= rt
->gcMaxBytes
)
776 ArenaHeader
*aheader
= JS_LIKELY(info
.numArenasFreeCommitted
> 0)
777 ? fetchNextFreeArena(rt
)
778 : fetchNextDecommittedArena();
779 aheader
->init(zone
, thingKind
);
780 if (JS_UNLIKELY(!hasAvailableArenas()))
781 removeFromAvailableList();
783 rt
->gcBytes
+= ArenaSize
;
784 zone
->gcBytes
+= ArenaSize
;
785 if (zone
->gcBytes
>= zone
->gcTriggerBytes
)
786 TriggerZoneGC(zone
, JS::gcreason::ALLOC_TRIGGER
);
792 Chunk::addArenaToFreeList(JSRuntime
*rt
, ArenaHeader
*aheader
)
794 JS_ASSERT(!aheader
->allocated());
795 aheader
->next
= info
.freeArenasHead
;
796 info
.freeArenasHead
= aheader
;
797 ++info
.numArenasFreeCommitted
;
798 ++info
.numArenasFree
;
799 ++rt
->gcNumArenasFreeCommitted
;
803 Chunk::releaseArena(ArenaHeader
*aheader
)
805 JS_ASSERT(aheader
->allocated());
806 JS_ASSERT(!aheader
->hasDelayedMarking
);
807 Zone
*zone
= aheader
->zone
;
808 JSRuntime
*rt
= zone
->runtimeFromAnyThread();
809 AutoLockGC maybeLock
;
810 if (rt
->gcHelperThread
.sweeping())
813 JS_ASSERT(rt
->gcBytes
>= ArenaSize
);
814 JS_ASSERT(zone
->gcBytes
>= ArenaSize
);
815 if (rt
->gcHelperThread
.sweeping())
816 zone
->reduceGCTriggerBytes(zone
->gcHeapGrowthFactor
* ArenaSize
);
817 rt
->gcBytes
-= ArenaSize
;
818 zone
->gcBytes
-= ArenaSize
;
820 aheader
->setAsNotAllocated();
821 addArenaToFreeList(rt
, aheader
);
823 if (info
.numArenasFree
== 1) {
824 JS_ASSERT(!info
.prevp
);
825 JS_ASSERT(!info
.next
);
826 addToAvailableList(zone
);
827 } else if (!unused()) {
828 JS_ASSERT(info
.prevp
);
830 rt
->gcChunkSet
.remove(this);
831 removeFromAvailableList();
832 rt
->gcChunkPool
.put(this);
836 /* The caller must hold the GC lock. */
838 PickChunk(Zone
*zone
)
840 JSRuntime
*rt
= zone
->runtimeFromAnyThread();
841 Chunk
**listHeadp
= GetAvailableChunkList(zone
);
842 Chunk
*chunk
= *listHeadp
;
846 chunk
= rt
->gcChunkPool
.get(rt
);
850 rt
->gcChunkAllocationSinceLastGC
= true;
853 * FIXME bug 583732 - chunk is newly allocated and cannot be present in
854 * the table so using ordinary lookupForAdd is suboptimal here.
856 GCChunkSet::AddPtr p
= rt
->gcChunkSet
.lookupForAdd(chunk
);
858 if (!rt
->gcChunkSet
.add(p
, chunk
)) {
859 Chunk::release(rt
, chunk
);
863 chunk
->info
.prevp
= nullptr;
864 chunk
->info
.next
= nullptr;
865 chunk
->addToAvailableList(zone
);
873 js::SetGCZeal(JSRuntime
*rt
, uint8_t zeal
, uint32_t frequency
)
876 if (rt
->gcVerifyPreData
)
877 VerifyBarriers(rt
, PreBarrierVerifier
);
878 if (rt
->gcVerifyPostData
)
879 VerifyBarriers(rt
, PostBarrierVerifier
);
882 bool schedule
= zeal
>= js::gc::ZealAllocValue
;
884 rt
->gcZealFrequency
= frequency
;
885 rt
->gcNextScheduled
= schedule
? frequency
: 0;
887 #ifdef JSGC_GENERATIONAL
888 if (zeal
== ZealGenerationalGCValue
)
889 rt
->gcNursery
.enterZealMode();
894 InitGCZeal(JSRuntime
*rt
)
896 const char *env
= getenv("JS_GC_ZEAL");
901 int frequency
= JS_DEFAULT_ZEAL_FREQ
;
902 if (strcmp(env
, "help") != 0) {
904 const char *p
= strchr(env
, ',');
906 frequency
= atoi(p
+ 1);
909 if (zeal
< 0 || zeal
> ZealLimit
|| frequency
< 0) {
911 "Format: JS_GC_ZEAL=N[,F]\n"
912 "N indicates \"zealousness\":\n"
913 " 0: no additional GCs\n"
914 " 1: additional GCs at common danger points\n"
915 " 2: GC every F allocations (default: 100)\n"
916 " 3: GC when the window paints (browser only)\n"
917 " 4: Verify pre write barriers between instructions\n"
918 " 5: Verify pre write barriers between paints\n"
919 " 6: Verify stack rooting\n"
920 " 7: Collect the nursery every N nursery allocations\n"
921 " 8: Incremental GC in two slices: 1) mark roots 2) finish collection\n"
922 " 9: Incremental GC in two slices: 1) mark all 2) new marking and finish\n"
923 " 10: Incremental GC in multiple slices\n"
924 " 11: Verify post write barriers between instructions\n"
925 " 12: Verify post write barriers between paints\n"
926 " 13: Purge analysis state every F allocations (default: 100)\n");
930 SetGCZeal(rt
, zeal
, frequency
);
936 /* Lifetime for type sets attached to scripts containing observed types. */
937 static const int64_t JIT_SCRIPT_RELEASE_TYPES_INTERVAL
= 60 * 1000 * 1000;
940 js_InitGC(JSRuntime
*rt
, uint32_t maxbytes
)
942 InitMemorySubsystem(rt
);
944 if (!rt
->gcChunkSet
.init(INITIAL_CHUNK_CAPACITY
))
947 if (!rt
->gcRootsHash
.init(256))
951 rt
->gcLock
= PR_NewLock();
955 if (!rt
->gcHelperThread
.init())
959 * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
960 * for default backward API compatibility.
962 rt
->gcMaxBytes
= maxbytes
;
963 rt
->setGCMaxMallocBytes(maxbytes
);
965 #ifndef JS_MORE_DETERMINISTIC
966 rt
->gcJitReleaseTime
= PRMJ_Now() + JIT_SCRIPT_RELEASE_TYPES_INTERVAL
;
969 #ifdef JSGC_GENERATIONAL
970 if (!rt
->gcNursery
.init())
973 if (!rt
->gcStoreBuffer
.enable())
986 RecordNativeStackTopForGC(JSRuntime
*rt
)
988 ConservativeGCData
*cgcd
= &rt
->conservativeGC
;
991 /* Record the stack top here only if we are called from a request. */
992 if (!rt
->requestDepth
)
995 cgcd
->recordStackTop();
999 js_FinishGC(JSRuntime
*rt
)
1002 * Wait until the background finalization stops and the helper thread
1003 * shuts down before we forcefully release any remaining GC memory.
1005 rt
->gcHelperThread
.finish();
1008 /* Free memory associated with GC verification. */
1012 /* Delete all remaining zones. */
1013 for (ZonesIter
zone(rt
); !zone
.done(); zone
.next()) {
1014 for (CompartmentsInZoneIter
comp(zone
); !comp
.done(); comp
.next())
1015 js_delete(comp
.get());
1016 js_delete(zone
.get());
1021 rt
->gcSystemAvailableChunkListHead
= nullptr;
1022 rt
->gcUserAvailableChunkListHead
= nullptr;
1023 for (GCChunkSet::Range
r(rt
->gcChunkSet
.all()); !r
.empty(); r
.popFront())
1024 Chunk::release(rt
, r
.front());
1025 rt
->gcChunkSet
.clear();
1027 rt
->gcChunkPool
.expireAndFree(rt
, true);
1029 rt
->gcRootsHash
.clear();
1032 template <typename T
> struct BarrierOwner
{};
1033 template <typename T
> struct BarrierOwner
<T
*> { typedef T result
; };
1034 template <> struct BarrierOwner
<Value
> { typedef HeapValue result
; };
1036 template <typename T
>
1038 AddRoot(JSRuntime
*rt
, T
*rp
, const char *name
, JSGCRootType rootType
)
1041 * Sometimes Firefox will hold weak references to objects and then convert
1042 * them to strong references by calling AddRoot (e.g., via PreserveWrapper,
1043 * or ModifyBusyCount in workers). We need a read barrier to cover these
1046 if (rt
->gcIncrementalState
!= NO_INCREMENTAL
)
1047 BarrierOwner
<T
>::result::writeBarrierPre(*rp
);
1049 return rt
->gcRootsHash
.put((void *)rp
, RootInfo(name
, rootType
));
1052 template <typename T
>
1054 AddRoot(JSContext
*cx
, T
*rp
, const char *name
, JSGCRootType rootType
)
1056 bool ok
= AddRoot(cx
->runtime(), rp
, name
, rootType
);
1058 JS_ReportOutOfMemory(cx
);
1063 js::AddValueRoot(JSContext
*cx
, Value
*vp
, const char *name
)
1065 return AddRoot(cx
, vp
, name
, JS_GC_ROOT_VALUE_PTR
);
1069 js::AddValueRootRT(JSRuntime
*rt
, js::Value
*vp
, const char *name
)
1071 return AddRoot(rt
, vp
, name
, JS_GC_ROOT_VALUE_PTR
);
1075 js::AddStringRoot(JSContext
*cx
, JSString
**rp
, const char *name
)
1077 return AddRoot(cx
, rp
, name
, JS_GC_ROOT_STRING_PTR
);
1081 js::AddObjectRoot(JSContext
*cx
, JSObject
**rp
, const char *name
)
1083 return AddRoot(cx
, rp
, name
, JS_GC_ROOT_OBJECT_PTR
);
1087 js::AddObjectRoot(JSRuntime
*rt
, JSObject
**rp
, const char *name
)
1089 return AddRoot(rt
, rp
, name
, JS_GC_ROOT_OBJECT_PTR
);
1093 js::AddScriptRoot(JSContext
*cx
, JSScript
**rp
, const char *name
)
1095 return AddRoot(cx
, rp
, name
, JS_GC_ROOT_SCRIPT_PTR
);
1098 extern JS_FRIEND_API(bool)
1099 js_AddObjectRoot(JSRuntime
*rt
, JSObject
**objp
)
1101 return AddRoot(rt
, objp
, nullptr, JS_GC_ROOT_OBJECT_PTR
);
1104 extern JS_FRIEND_API(void)
1105 js_RemoveObjectRoot(JSRuntime
*rt
, JSObject
**objp
)
1107 js_RemoveRoot(rt
, objp
);
1111 js_RemoveRoot(JSRuntime
*rt
, void *rp
)
1113 rt
->gcRootsHash
.remove(rp
);
1117 typedef RootedValueMap::Range RootRange
;
1118 typedef RootedValueMap::Entry RootEntry
;
1119 typedef RootedValueMap::Enum RootEnum
;
1122 ComputeTriggerBytes(Zone
*zone
, size_t lastBytes
, size_t maxBytes
, JSGCInvocationKind gckind
)
1124 size_t base
= gckind
== GC_SHRINK
? lastBytes
: Max(lastBytes
, zone
->runtimeFromMainThread()->gcAllocationThreshold
);
1125 double trigger
= double(base
) * zone
->gcHeapGrowthFactor
;
1126 return size_t(Min(double(maxBytes
), trigger
));
1130 Zone::setGCLastBytes(size_t lastBytes
, JSGCInvocationKind gckind
)
1133 * The heap growth factor depends on the heap size after a GC and the GC frequency.
1134 * For low frequency GCs (more than 1sec between GCs) we let the heap grow to 150%.
1135 * For high frequency GCs we let the heap grow depending on the heap size:
1136 * lastBytes < highFrequencyLowLimit: 300%
1137 * lastBytes > highFrequencyHighLimit: 150%
1138 * otherwise: linear interpolation between 150% and 300% based on lastBytes
1140 JSRuntime
*rt
= runtimeFromMainThread();
1142 if (!rt
->gcDynamicHeapGrowth
) {
1143 gcHeapGrowthFactor
= 3.0;
1144 } else if (lastBytes
< 1 * 1024 * 1024) {
1145 gcHeapGrowthFactor
= rt
->gcLowFrequencyHeapGrowth
;
1147 JS_ASSERT(rt
->gcHighFrequencyHighLimitBytes
> rt
->gcHighFrequencyLowLimitBytes
);
1148 uint64_t now
= PRMJ_Now();
1149 if (rt
->gcLastGCTime
&& rt
->gcLastGCTime
+ rt
->gcHighFrequencyTimeThreshold
* PRMJ_USEC_PER_MSEC
> now
) {
1150 if (lastBytes
<= rt
->gcHighFrequencyLowLimitBytes
) {
1151 gcHeapGrowthFactor
= rt
->gcHighFrequencyHeapGrowthMax
;
1152 } else if (lastBytes
>= rt
->gcHighFrequencyHighLimitBytes
) {
1153 gcHeapGrowthFactor
= rt
->gcHighFrequencyHeapGrowthMin
;
1155 double k
= (rt
->gcHighFrequencyHeapGrowthMin
- rt
->gcHighFrequencyHeapGrowthMax
)
1156 / (double)(rt
->gcHighFrequencyHighLimitBytes
- rt
->gcHighFrequencyLowLimitBytes
);
1157 gcHeapGrowthFactor
= (k
* (lastBytes
- rt
->gcHighFrequencyLowLimitBytes
)
1158 + rt
->gcHighFrequencyHeapGrowthMax
);
1159 JS_ASSERT(gcHeapGrowthFactor
<= rt
->gcHighFrequencyHeapGrowthMax
1160 && gcHeapGrowthFactor
>= rt
->gcHighFrequencyHeapGrowthMin
);
1162 rt
->gcHighFrequencyGC
= true;
1164 gcHeapGrowthFactor
= rt
->gcLowFrequencyHeapGrowth
;
1165 rt
->gcHighFrequencyGC
= false;
1168 gcTriggerBytes
= ComputeTriggerBytes(this, lastBytes
, rt
->gcMaxBytes
, gckind
);
1172 Zone::reduceGCTriggerBytes(size_t amount
)
1174 JS_ASSERT(amount
> 0);
1175 JS_ASSERT(gcTriggerBytes
>= amount
);
1176 if (gcTriggerBytes
- amount
< runtimeFromAnyThread()->gcAllocationThreshold
* gcHeapGrowthFactor
)
1178 gcTriggerBytes
-= amount
;
1181 Allocator::Allocator(Zone
*zone
)
1186 ArenaLists::prepareForIncrementalGC(JSRuntime
*rt
)
1188 for (size_t i
= 0; i
!= FINALIZE_LIMIT
; ++i
) {
1189 FreeSpan
*headSpan
= &freeLists
[i
];
1190 if (!headSpan
->isEmpty()) {
1191 ArenaHeader
*aheader
= headSpan
->arenaHeader();
1192 aheader
->allocatedDuringIncremental
= true;
1193 rt
->gcMarker
.delayMarkingArena(aheader
);
1199 PushArenaAllocatedDuringSweep(JSRuntime
*runtime
, ArenaHeader
*arena
)
1201 arena
->setNextAllocDuringSweep(runtime
->gcArenasAllocatedDuringSweep
);
1202 runtime
->gcArenasAllocatedDuringSweep
= arena
;
1206 ArenaLists::allocateFromArenaInline(Zone
*zone
, AllocKind thingKind
)
1211 * This function can be called from parallel threads all of which
1212 * are associated with the same compartment. In that case, each
1213 * thread will have a distinct ArenaLists. Therefore, whenever we
1214 * fall through to PickChunk() we must be sure that we are holding
1218 Chunk
*chunk
= nullptr;
1220 ArenaList
*al
= &arenaLists
[thingKind
];
1221 AutoLockGC maybeLock
;
1223 #ifdef JS_THREADSAFE
1224 volatile uintptr_t *bfs
= &backgroundFinalizeState
[thingKind
];
1225 if (*bfs
!= BFS_DONE
) {
1227 * We cannot search the arena list for free things while the
1228 * background finalization runs and can modify head or cursor at any
1229 * moment. So we always allocate a new arena in that case.
1231 maybeLock
.lock(zone
->runtimeFromAnyThread());
1232 if (*bfs
== BFS_RUN
) {
1233 JS_ASSERT(!*al
->cursor
);
1234 chunk
= PickChunk(zone
);
1237 * Let the caller to wait for the background allocation to
1238 * finish and restart the allocation attempt.
1242 } else if (*bfs
== BFS_JUST_FINISHED
) {
1243 /* See comments before BackgroundFinalizeState definition. */
1246 JS_ASSERT(*bfs
== BFS_DONE
);
1249 #endif /* JS_THREADSAFE */
1252 if (ArenaHeader
*aheader
= *al
->cursor
) {
1253 JS_ASSERT(aheader
->hasFreeThings());
1256 * The empty arenas are returned to the chunk and should not present on
1259 JS_ASSERT(!aheader
->isEmpty());
1260 al
->cursor
= &aheader
->next
;
1263 * Move the free span stored in the arena to the free list and
1266 freeLists
[thingKind
] = aheader
->getFirstFreeSpan();
1267 aheader
->setAsFullyUsed();
1268 if (JS_UNLIKELY(zone
->wasGCStarted())) {
1269 if (zone
->needsBarrier()) {
1270 aheader
->allocatedDuringIncremental
= true;
1271 zone
->runtimeFromMainThread()->gcMarker
.delayMarkingArena(aheader
);
1272 } else if (zone
->isGCSweeping()) {
1273 PushArenaAllocatedDuringSweep(zone
->runtimeFromMainThread(), aheader
);
1276 return freeLists
[thingKind
].infallibleAllocate(Arena::thingSize(thingKind
));
1279 /* Make sure we hold the GC lock before we call PickChunk. */
1280 if (!maybeLock
.locked())
1281 maybeLock
.lock(zone
->runtimeFromAnyThread());
1282 chunk
= PickChunk(zone
);
1288 * While we still hold the GC lock get an arena from some chunk, mark it
1289 * as full as its single free span is moved to the free lits, and insert
1290 * it to the list as a fully allocated arena.
1292 * We add the arena before the the head, not after the tail pointed by the
1293 * cursor, so after the GC the most recently added arena will be used first
1294 * for allocations improving cache locality.
1296 JS_ASSERT(!*al
->cursor
);
1297 ArenaHeader
*aheader
= chunk
->allocateArena(zone
, thingKind
);
1301 if (JS_UNLIKELY(zone
->wasGCStarted())) {
1302 if (zone
->needsBarrier()) {
1303 aheader
->allocatedDuringIncremental
= true;
1304 zone
->runtimeFromMainThread()->gcMarker
.delayMarkingArena(aheader
);
1305 } else if (zone
->isGCSweeping()) {
1306 PushArenaAllocatedDuringSweep(zone
->runtimeFromMainThread(), aheader
);
1309 aheader
->next
= al
->head
;
1311 JS_ASSERT(al
->cursor
== &al
->head
);
1312 al
->cursor
= &aheader
->next
;
1316 /* See comments before allocateFromNewArena about this assert. */
1317 JS_ASSERT(!aheader
->hasFreeThings());
1318 uintptr_t arenaAddr
= aheader
->arenaAddress();
1319 return freeLists
[thingKind
].allocateFromNewArena(arenaAddr
,
1320 Arena::firstThingOffset(thingKind
),
1321 Arena::thingSize(thingKind
));
1325 ArenaLists::allocateFromArena(JS::Zone
*zone
, AllocKind thingKind
)
1327 return allocateFromArenaInline(zone
, thingKind
);
1331 ArenaLists::finalizeNow(FreeOp
*fop
, AllocKind thingKind
)
1333 JS_ASSERT(!IsBackgroundFinalized(thingKind
));
1334 JS_ASSERT(backgroundFinalizeState
[thingKind
] == BFS_DONE
);
1336 ArenaHeader
*arenas
= arenaLists
[thingKind
].head
;
1337 arenaLists
[thingKind
].clear();
1340 FinalizeArenas(fop
, &arenas
, arenaLists
[thingKind
], thingKind
, budget
);
1345 ArenaLists::queueForForegroundSweep(FreeOp
*fop
, AllocKind thingKind
)
1347 JS_ASSERT(!IsBackgroundFinalized(thingKind
));
1348 JS_ASSERT(backgroundFinalizeState
[thingKind
] == BFS_DONE
);
1349 JS_ASSERT(!arenaListsToSweep
[thingKind
]);
1351 arenaListsToSweep
[thingKind
] = arenaLists
[thingKind
].head
;
1352 arenaLists
[thingKind
].clear();
1356 ArenaLists::queueForBackgroundSweep(FreeOp
*fop
, AllocKind thingKind
)
1358 JS_ASSERT(IsBackgroundFinalized(thingKind
));
1360 #ifdef JS_THREADSAFE
1361 JS_ASSERT(!fop
->runtime()->gcHelperThread
.sweeping());
1364 ArenaList
*al
= &arenaLists
[thingKind
];
1366 JS_ASSERT(backgroundFinalizeState
[thingKind
] == BFS_DONE
);
1367 JS_ASSERT(al
->cursor
== &al
->head
);
1372 * The state can be done, or just-finished if we have not allocated any GC
1373 * things from the arena list after the previous background finalization.
1375 JS_ASSERT(backgroundFinalizeState
[thingKind
] == BFS_DONE
||
1376 backgroundFinalizeState
[thingKind
] == BFS_JUST_FINISHED
);
1378 arenaListsToSweep
[thingKind
] = al
->head
;
1380 backgroundFinalizeState
[thingKind
] = BFS_RUN
;
1384 ArenaLists::backgroundFinalize(FreeOp
*fop
, ArenaHeader
*listHead
, bool onBackgroundThread
)
1386 JS_ASSERT(listHead
);
1387 AllocKind thingKind
= listHead
->getAllocKind();
1388 Zone
*zone
= listHead
->zone
;
1390 ArenaList finalized
;
1392 FinalizeArenas(fop
, &listHead
, finalized
, thingKind
, budget
);
1393 JS_ASSERT(!listHead
);
1396 * After we finish the finalization al->cursor must point to the end of
1397 * the head list as we emptied the list before the background finalization
1398 * and the allocation adds new arenas before the cursor.
1400 ArenaLists
*lists
= &zone
->allocator
.arenas
;
1401 ArenaList
*al
= &lists
->arenaLists
[thingKind
];
1403 AutoLockGC
lock(fop
->runtime());
1404 JS_ASSERT(lists
->backgroundFinalizeState
[thingKind
] == BFS_RUN
);
1405 JS_ASSERT(!*al
->cursor
);
1407 if (finalized
.head
) {
1408 *al
->cursor
= finalized
.head
;
1409 if (finalized
.cursor
!= &finalized
.head
)
1410 al
->cursor
= finalized
.cursor
;
1414 * We must set the state to BFS_JUST_FINISHED if we are running on the
1415 * background thread and we have touched arenaList list, even if we add to
1416 * the list only fully allocated arenas without any free things. It ensures
1417 * that the allocation thread takes the GC lock and all writes to the free
1418 * list elements are propagated. As we always take the GC lock when
1419 * allocating new arenas from the chunks we can set the state to BFS_DONE if
1420 * we have released all finalized arenas back to their chunks.
1422 if (onBackgroundThread
&& finalized
.head
)
1423 lists
->backgroundFinalizeState
[thingKind
] = BFS_JUST_FINISHED
;
1425 lists
->backgroundFinalizeState
[thingKind
] = BFS_DONE
;
1427 lists
->arenaListsToSweep
[thingKind
] = nullptr;
1431 ArenaLists::queueObjectsForSweep(FreeOp
*fop
)
1433 gcstats::AutoPhase
ap(fop
->runtime()->gcStats
, gcstats::PHASE_SWEEP_OBJECT
);
1435 finalizeNow(fop
, FINALIZE_OBJECT0
);
1436 finalizeNow(fop
, FINALIZE_OBJECT2
);
1437 finalizeNow(fop
, FINALIZE_OBJECT4
);
1438 finalizeNow(fop
, FINALIZE_OBJECT8
);
1439 finalizeNow(fop
, FINALIZE_OBJECT12
);
1440 finalizeNow(fop
, FINALIZE_OBJECT16
);
1442 queueForBackgroundSweep(fop
, FINALIZE_OBJECT0_BACKGROUND
);
1443 queueForBackgroundSweep(fop
, FINALIZE_OBJECT2_BACKGROUND
);
1444 queueForBackgroundSweep(fop
, FINALIZE_OBJECT4_BACKGROUND
);
1445 queueForBackgroundSweep(fop
, FINALIZE_OBJECT8_BACKGROUND
);
1446 queueForBackgroundSweep(fop
, FINALIZE_OBJECT12_BACKGROUND
);
1447 queueForBackgroundSweep(fop
, FINALIZE_OBJECT16_BACKGROUND
);
1451 ArenaLists::queueStringsForSweep(FreeOp
*fop
)
1453 gcstats::AutoPhase
ap(fop
->runtime()->gcStats
, gcstats::PHASE_SWEEP_STRING
);
1455 queueForBackgroundSweep(fop
, FINALIZE_SHORT_STRING
);
1456 queueForBackgroundSweep(fop
, FINALIZE_STRING
);
1458 queueForForegroundSweep(fop
, FINALIZE_EXTERNAL_STRING
);
1462 ArenaLists::queueScriptsForSweep(FreeOp
*fop
)
1464 gcstats::AutoPhase
ap(fop
->runtime()->gcStats
, gcstats::PHASE_SWEEP_SCRIPT
);
1465 queueForForegroundSweep(fop
, FINALIZE_SCRIPT
);
1466 queueForForegroundSweep(fop
, FINALIZE_LAZY_SCRIPT
);
1470 ArenaLists::queueIonCodeForSweep(FreeOp
*fop
)
1472 gcstats::AutoPhase
ap(fop
->runtime()->gcStats
, gcstats::PHASE_SWEEP_IONCODE
);
1473 queueForForegroundSweep(fop
, FINALIZE_IONCODE
);
1477 ArenaLists::queueShapesForSweep(FreeOp
*fop
)
1479 gcstats::AutoPhase
ap(fop
->runtime()->gcStats
, gcstats::PHASE_SWEEP_SHAPE
);
1481 queueForBackgroundSweep(fop
, FINALIZE_SHAPE
);
1482 queueForBackgroundSweep(fop
, FINALIZE_BASE_SHAPE
);
1483 queueForBackgroundSweep(fop
, FINALIZE_TYPE_OBJECT
);
1487 RunLastDitchGC(JSContext
*cx
, JS::Zone
*zone
, AllocKind thingKind
)
1490 * In parallel sections, we do not attempt to refill the free list
1491 * and hence do not encounter last ditch GC.
1493 JS_ASSERT(!InParallelSection());
1495 PrepareZoneForGC(zone
);
1497 JSRuntime
*rt
= cx
->runtime();
1499 /* The last ditch GC preserves all atoms. */
1500 AutoKeepAtoms
keepAtoms(cx
->perThreadData
);
1501 GC(rt
, GC_NORMAL
, JS::gcreason::LAST_DITCH
);
1504 * The JSGC_END callback can legitimately allocate new GC
1505 * things and populate the free list. If that happens, just
1506 * return that list head.
1508 size_t thingSize
= Arena::thingSize(thingKind
);
1509 if (void *thing
= zone
->allocator
.arenas
.allocateFromFreeList(thingKind
, thingSize
))
1515 template <AllowGC allowGC
>
1517 ArenaLists::refillFreeList(ThreadSafeContext
*cx
, AllocKind thingKind
)
1519 JS_ASSERT(cx
->allocator()->arenas
.freeLists
[thingKind
].isEmpty());
1520 JS_ASSERT(!cx
->isHeapBusy());
1522 Zone
*zone
= cx
->allocator()->zone_
;
1524 bool runGC
= cx
->allowGC() && allowGC
&&
1525 cx
->asJSContext()->runtime()->gcIncrementalState
!= NO_INCREMENTAL
&&
1526 zone
->gcBytes
> zone
->gcTriggerBytes
;
1529 if (JS_UNLIKELY(runGC
)) {
1530 if (void *thing
= RunLastDitchGC(cx
->asJSContext(), zone
, thingKind
))
1535 * allocateFromArena may fail while the background finalization still
1536 * run. If we aren't in a fork join, we want to wait for it to finish
1537 * and restart. However, checking for that is racy as the background
1538 * finalization could free some things after allocateFromArena decided
1539 * to fail but at this point it may have already stopped. To avoid
1540 * this race we always try to allocate twice.
1542 * If we're in a fork join, we simply try it once and return whatever
1545 for (bool secondAttempt
= false; ; secondAttempt
= true) {
1546 void *thing
= cx
->allocator()->arenas
.allocateFromArenaInline(zone
, thingKind
);
1547 if (JS_LIKELY(!!thing
) || !cx
->isJSContext())
1552 cx
->asJSContext()->runtime()->gcHelperThread
.waitBackgroundSweepEnd();
1555 if (!cx
->allowGC() || !allowGC
)
1559 * We failed to allocate. Run the GC if we haven't done it already.
1560 * Otherwise report OOM.
1568 js_ReportOutOfMemory(cx
);
1573 ArenaLists::refillFreeList
<NoGC
>(ThreadSafeContext
*cx
, AllocKind thingKind
);
1576 ArenaLists::refillFreeList
<CanGC
>(ThreadSafeContext
*cx
, AllocKind thingKind
);
1579 js_GetGCThingTraceKind(void *thing
)
1581 return GetGCThingTraceKind(thing
);
1585 js::InitTracer(JSTracer
*trc
, JSRuntime
*rt
, JSTraceCallback callback
)
1588 trc
->callback
= callback
;
1589 trc
->debugPrinter
= nullptr;
1590 trc
->debugPrintArg
= nullptr;
1591 trc
->debugPrintIndex
= size_t(-1);
1592 trc
->eagerlyTraceWeakMaps
= TraceWeakMapValues
;
1594 trc
->realLocation
= nullptr;
1598 /* static */ int64_t
1599 SliceBudget::TimeBudget(int64_t millis
)
1601 return millis
* PRMJ_USEC_PER_MSEC
;
1604 /* static */ int64_t
1605 SliceBudget::WorkBudget(int64_t work
)
1607 /* For work = 0 not to mean Unlimited, we subtract 1. */
1611 SliceBudget::SliceBudget()
1612 : deadline(INT64_MAX
),
1617 SliceBudget::SliceBudget(int64_t budget
)
1619 if (budget
== Unlimited
) {
1620 deadline
= INT64_MAX
;
1621 counter
= INTPTR_MAX
;
1622 } else if (budget
> 0) {
1623 deadline
= PRMJ_Now() + budget
;
1624 counter
= CounterReset
;
1627 counter
= -budget
- 1;
1632 SliceBudget::checkOverBudget()
1634 bool over
= PRMJ_Now() > deadline
;
1636 counter
= CounterReset
;
1640 GCMarker::GCMarker(JSRuntime
*rt
)
1641 : stack(size_t(-1)),
1644 unmarkedArenaStackTop(nullptr),
1648 InitTracer(this, rt
, nullptr);
1654 return stack
.init(MARK_STACK_LENGTH
);
1660 JS_ASSERT(!started
);
1664 JS_ASSERT(!unmarkedArenaStackTop
);
1665 JS_ASSERT(markLaterArenas
== 0);
1668 * The GC is recomputing the liveness of WeakMap entries, so we delay
1671 eagerlyTraceWeakMaps
= DoNotTraceWeakMaps
;
1677 JS_ASSERT(isDrained());
1682 JS_ASSERT(!unmarkedArenaStackTop
);
1683 JS_ASSERT(markLaterArenas
== 0);
1685 /* Free non-ballast stack memory. */
1688 resetBufferedGrayRoots();
1697 JS_ASSERT(isMarkStackEmpty());
1699 while (unmarkedArenaStackTop
) {
1700 ArenaHeader
*aheader
= unmarkedArenaStackTop
;
1701 JS_ASSERT(aheader
->hasDelayedMarking
);
1702 JS_ASSERT(markLaterArenas
);
1703 unmarkedArenaStackTop
= aheader
->getNextDelayedMarking();
1704 aheader
->unsetDelayedMarking();
1705 aheader
->markOverflow
= 0;
1706 aheader
->allocatedDuringIncremental
= 0;
1709 JS_ASSERT(isDrained());
1710 JS_ASSERT(!markLaterArenas
);
1714 * When the native stack is low, the GC does not call JS_TraceChildren to mark
1715 * the reachable "children" of the thing. Rather the thing is put aside and
1716 * JS_TraceChildren is called later with more space on the C stack.
1718 * To implement such delayed marking of the children with minimal overhead for
1719 * the normal case of sufficient native stack, the code adds a field per
1720 * arena. The field markingDelay->link links all arenas with delayed things
1721 * into a stack list with the pointer to stack top in
1722 * GCMarker::unmarkedArenaStackTop. delayMarkingChildren adds
1723 * arenas to the stack as necessary while markDelayedChildren pops the arenas
1724 * from the stack until it empties.
1728 GCMarker::delayMarkingArena(ArenaHeader
*aheader
)
1730 if (aheader
->hasDelayedMarking
) {
1731 /* Arena already scheduled to be marked later */
1734 aheader
->setNextDelayedMarking(unmarkedArenaStackTop
);
1735 unmarkedArenaStackTop
= aheader
;
1740 GCMarker::delayMarkingChildren(const void *thing
)
1742 const Cell
*cell
= reinterpret_cast<const Cell
*>(thing
);
1743 cell
->arenaHeader()->markOverflow
= 1;
1744 delayMarkingArena(cell
->arenaHeader());
1748 GCMarker::markDelayedChildren(ArenaHeader
*aheader
)
1750 if (aheader
->markOverflow
) {
1751 bool always
= aheader
->allocatedDuringIncremental
;
1752 aheader
->markOverflow
= 0;
1754 for (CellIterUnderGC
i(aheader
); !i
.done(); i
.next()) {
1755 Cell
*t
= i
.getCell();
1756 if (always
|| t
->isMarked()) {
1757 t
->markIfUnmarked();
1758 JS_TraceChildren(this, t
, MapAllocToTraceKind(aheader
->getAllocKind()));
1762 JS_ASSERT(aheader
->allocatedDuringIncremental
);
1763 PushArena(this, aheader
);
1765 aheader
->allocatedDuringIncremental
= 0;
1767 * Note that during an incremental GC we may still be allocating into
1768 * aheader. However, prepareForIncrementalGC sets the
1769 * allocatedDuringIncremental flag if we continue marking.
1774 GCMarker::markDelayedChildren(SliceBudget
&budget
)
1776 gcstats::Phase phase
= runtime
->gcIncrementalState
== MARK
1777 ? gcstats::PHASE_MARK_DELAYED
1778 : gcstats::PHASE_SWEEP_MARK_DELAYED
;
1779 gcstats::AutoPhase
ap(runtime
->gcStats
, phase
);
1781 JS_ASSERT(unmarkedArenaStackTop
);
1784 * If marking gets delayed at the same arena again, we must repeat
1785 * marking of its things. For that we pop arena from the stack and
1786 * clear its hasDelayedMarking flag before we begin the marking.
1788 ArenaHeader
*aheader
= unmarkedArenaStackTop
;
1789 JS_ASSERT(aheader
->hasDelayedMarking
);
1790 JS_ASSERT(markLaterArenas
);
1791 unmarkedArenaStackTop
= aheader
->getNextDelayedMarking();
1792 aheader
->unsetDelayedMarking();
1794 markDelayedChildren(aheader
);
1797 if (budget
.isOverBudget())
1799 } while (unmarkedArenaStackTop
);
1800 JS_ASSERT(!markLaterArenas
);
1807 GCMarker::checkZone(void *p
)
1810 DebugOnly
<Cell
*> cell
= static_cast<Cell
*>(p
);
1811 JS_ASSERT_IF(cell
->isTenured(), cell
->tenuredZone()->isCollecting());
1816 GCMarker::hasBufferedGrayRoots() const
1822 GCMarker::startBufferingGrayRoots()
1824 JS_ASSERT(!grayFailed
);
1825 for (GCZonesIter
zone(runtime
); !zone
.done(); zone
.next())
1826 JS_ASSERT(zone
->gcGrayRoots
.empty());
1828 JS_ASSERT(!callback
);
1829 callback
= GrayCallback
;
1830 JS_ASSERT(IS_GC_MARKING_TRACER(this));
1834 GCMarker::endBufferingGrayRoots()
1836 JS_ASSERT(callback
== GrayCallback
);
1838 JS_ASSERT(IS_GC_MARKING_TRACER(this));
1842 GCMarker::resetBufferedGrayRoots()
1844 for (GCZonesIter
zone(runtime
); !zone
.done(); zone
.next())
1845 zone
->gcGrayRoots
.clearAndFree();
1850 GCMarker::markBufferedGrayRoots(JS::Zone
*zone
)
1852 JS_ASSERT(!grayFailed
);
1853 JS_ASSERT(zone
->isGCMarkingGray());
1855 for (GrayRoot
*elem
= zone
->gcGrayRoots
.begin(); elem
!= zone
->gcGrayRoots
.end(); elem
++) {
1857 debugPrinter
= elem
->debugPrinter
;
1858 debugPrintArg
= elem
->debugPrintArg
;
1859 debugPrintIndex
= elem
->debugPrintIndex
;
1861 void *tmp
= elem
->thing
;
1862 JS_SET_TRACING_LOCATION(this, (void *)&elem
->thing
);
1863 MarkKind(this, &tmp
, elem
->kind
);
1864 JS_ASSERT(tmp
== elem
->thing
);
1869 GCMarker::appendGrayRoot(void *thing
, JSGCTraceKind kind
)
1876 GrayRoot
root(thing
, kind
);
1878 root
.debugPrinter
= debugPrinter
;
1879 root
.debugPrintArg
= debugPrintArg
;
1880 root
.debugPrintIndex
= debugPrintIndex
;
1883 Zone
*zone
= static_cast<Cell
*>(thing
)->tenuredZone();
1884 if (zone
->isCollecting()) {
1885 zone
->maybeAlive
= true;
1886 if (!zone
->gcGrayRoots
.append(root
)) {
1888 resetBufferedGrayRoots();
1894 GCMarker::GrayCallback(JSTracer
*trc
, void **thingp
, JSGCTraceKind kind
)
1896 GCMarker
*gcmarker
= static_cast<GCMarker
*>(trc
);
1897 gcmarker
->appendGrayRoot(*thingp
, kind
);
1901 GCMarker::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf
) const
1903 size_t size
= stack
.sizeOfExcludingThis(mallocSizeOf
);
1904 for (ZonesIter
zone(runtime
); !zone
.done(); zone
.next())
1905 size
+= zone
->gcGrayRoots
.sizeOfExcludingThis(mallocSizeOf
);
1910 js::SetMarkStackLimit(JSRuntime
*rt
, size_t limit
)
1912 JS_ASSERT(!rt
->isHeapBusy());
1913 rt
->gcMarker
.setSizeLimit(limit
);
1917 js::MarkCompartmentActive(StackFrame
*fp
)
1919 fp
->script()->compartment()->zone()->active
= true;
1923 TriggerOperationCallback(JSRuntime
*rt
, JS::gcreason::Reason reason
)
1928 rt
->gcIsNeeded
= true;
1929 rt
->gcTriggerReason
= reason
;
1930 rt
->triggerOperationCallback(JSRuntime::TriggerCallbackMainThread
);
1934 js::TriggerGC(JSRuntime
*rt
, JS::gcreason::Reason reason
)
1936 /* Wait till end of parallel section to trigger GC. */
1937 if (InParallelSection()) {
1938 ForkJoinSlice::Current()->requestGC(reason
);
1942 /* Don't trigger GCs when allocating under the operation callback lock. */
1943 if (rt
->currentThreadOwnsOperationCallbackLock())
1946 JS_ASSERT(CurrentThreadCanAccessRuntime(rt
));
1948 if (rt
->isHeapBusy())
1951 JS::PrepareForFullGC(rt
);
1952 TriggerOperationCallback(rt
, reason
);
1956 js::TriggerZoneGC(Zone
*zone
, JS::gcreason::Reason reason
)
1959 * If parallel threads are running, wait till they
1960 * are stopped to trigger GC.
1962 if (InParallelSection()) {
1963 ForkJoinSlice::Current()->requestZoneGC(zone
, reason
);
1967 /* Zones in use by a thread with an exclusive context can't be collected. */
1968 if (zone
->usedByExclusiveThread
)
1971 JSRuntime
*rt
= zone
->runtimeFromMainThread();
1973 /* Don't trigger GCs when allocating under the operation callback lock. */
1974 if (rt
->currentThreadOwnsOperationCallbackLock())
1977 if (rt
->isHeapBusy())
1980 if (rt
->gcZeal() == ZealAllocValue
) {
1981 TriggerGC(rt
, reason
);
1985 if (rt
->isAtomsZone(zone
)) {
1986 /* We can't do a zone GC of the atoms compartment. */
1987 TriggerGC(rt
, reason
);
1991 PrepareZoneForGC(zone
);
1992 TriggerOperationCallback(rt
, reason
);
1996 js::MaybeGC(JSContext
*cx
)
1998 JSRuntime
*rt
= cx
->runtime();
1999 JS_ASSERT(CurrentThreadCanAccessRuntime(rt
));
2001 if (rt
->gcZeal() == ZealAllocValue
|| rt
->gcZeal() == ZealPokeValue
) {
2002 JS::PrepareForFullGC(rt
);
2003 GC(rt
, GC_NORMAL
, JS::gcreason::MAYBEGC
);
2007 if (rt
->gcIsNeeded
) {
2008 GCSlice(rt
, GC_NORMAL
, JS::gcreason::MAYBEGC
);
2012 double factor
= rt
->gcHighFrequencyGC
? 0.85 : 0.9;
2013 Zone
*zone
= cx
->zone();
2014 if (zone
->gcBytes
> 1024 * 1024 &&
2015 zone
->gcBytes
>= factor
* zone
->gcTriggerBytes
&&
2016 rt
->gcIncrementalState
== NO_INCREMENTAL
&&
2017 !rt
->gcHelperThread
.sweeping())
2019 PrepareZoneForGC(zone
);
2020 GCSlice(rt
, GC_NORMAL
, JS::gcreason::MAYBEGC
);
2024 #ifndef JS_MORE_DETERMINISTIC
2026 * Access to the counters and, on 32 bit, setting gcNextFullGCTime below
2027 * is not atomic and a race condition could trigger or suppress the GC. We
2030 int64_t now
= PRMJ_Now();
2031 if (rt
->gcNextFullGCTime
&& rt
->gcNextFullGCTime
<= now
) {
2032 if (rt
->gcChunkAllocationSinceLastGC
||
2033 rt
->gcNumArenasFreeCommitted
> rt
->gcDecommitThreshold
)
2035 JS::PrepareForFullGC(rt
);
2036 GCSlice(rt
, GC_SHRINK
, JS::gcreason::MAYBEGC
);
2038 rt
->gcNextFullGCTime
= now
+ GC_IDLE_FULL_SPAN
;
2045 DecommitArenasFromAvailableList(JSRuntime
*rt
, Chunk
**availableListHeadp
)
2047 Chunk
*chunk
= *availableListHeadp
;
2052 * Decommit is expensive so we avoid holding the GC lock while calling it.
2054 * We decommit from the tail of the list to minimize interference with the
2055 * main thread that may start to allocate things at this point.
2057 * The arena that is been decommitted outside the GC lock must not be
2058 * available for allocations either via the free list or via the
2059 * decommittedArenas bitmap. For that we just fetch the arena from the
2060 * free list before the decommit pretending as it was allocated. If this
2061 * arena also is the single free arena in the chunk, then we must remove
2062 * from the available list before we release the lock so the allocation
2063 * thread would not see chunks with no free arenas on the available list.
2065 * After we retake the lock, we mark the arena as free and decommitted if
2066 * the decommit was successful. We must also add the chunk back to the
2067 * available list if we removed it previously or when the main thread
2068 * have allocated all remaining free arenas in the chunk.
2070 * We also must make sure that the aheader is not accessed again after we
2071 * decommit the arena.
2073 JS_ASSERT(chunk
->info
.prevp
== availableListHeadp
);
2074 while (Chunk
*next
= chunk
->info
.next
) {
2075 JS_ASSERT(next
->info
.prevp
== &chunk
->info
.next
);
2080 while (chunk
->info
.numArenasFreeCommitted
!= 0) {
2081 ArenaHeader
*aheader
= chunk
->fetchNextFreeArena(rt
);
2083 Chunk
**savedPrevp
= chunk
->info
.prevp
;
2084 if (!chunk
->hasAvailableArenas())
2085 chunk
->removeFromAvailableList();
2087 size_t arenaIndex
= Chunk::arenaIndex(aheader
->arenaAddress());
2091 * If the main thread waits for the decommit to finish, skip
2092 * potentially expensive unlock/lock pair on the contested
2095 Maybe
<AutoUnlockGC
> maybeUnlock
;
2096 if (!rt
->isHeapBusy())
2097 maybeUnlock
.construct(rt
);
2098 ok
= MarkPagesUnused(rt
, aheader
->getArena(), ArenaSize
);
2102 ++chunk
->info
.numArenasFree
;
2103 chunk
->decommittedArenas
.set(arenaIndex
);
2105 chunk
->addArenaToFreeList(rt
, aheader
);
2107 JS_ASSERT(chunk
->hasAvailableArenas());
2108 JS_ASSERT(!chunk
->unused());
2109 if (chunk
->info
.numArenasFree
== 1) {
2111 * Put the chunk back to the available list either at the
2112 * point where it was before to preserve the available list
2113 * that we enumerate, or, when the allocation thread has fully
2114 * used all the previous chunks, at the beginning of the
2117 Chunk
**insertPoint
= savedPrevp
;
2118 if (savedPrevp
!= availableListHeadp
) {
2119 Chunk
*prev
= Chunk::fromPointerToNext(savedPrevp
);
2120 if (!prev
->hasAvailableArenas())
2121 insertPoint
= availableListHeadp
;
2123 chunk
->insertToAvailableList(insertPoint
);
2125 JS_ASSERT(chunk
->info
.prevp
);
2128 if (rt
->gcChunkAllocationSinceLastGC
|| !ok
) {
2130 * The allocator thread has started to get new chunks. We should stop
2131 * to avoid decommitting arenas in just allocated chunks.
2138 * chunk->info.prevp becomes null when the allocator thread consumed
2139 * all chunks from the available list.
2141 JS_ASSERT_IF(chunk
->info
.prevp
, *chunk
->info
.prevp
== chunk
);
2142 if (chunk
->info
.prevp
== availableListHeadp
|| !chunk
->info
.prevp
)
2146 * prevp exists and is not the list head. It must point to the next
2147 * field of the previous chunk.
2149 chunk
= chunk
->getPrevious();
2154 DecommitArenas(JSRuntime
*rt
)
2156 DecommitArenasFromAvailableList(rt
, &rt
->gcSystemAvailableChunkListHead
);
2157 DecommitArenasFromAvailableList(rt
, &rt
->gcUserAvailableChunkListHead
);
2160 /* Must be called with the GC lock taken. */
2162 ExpireChunksAndArenas(JSRuntime
*rt
, bool shouldShrink
)
2164 if (Chunk
*toFree
= rt
->gcChunkPool
.expire(rt
, shouldShrink
)) {
2165 AutoUnlockGC
unlock(rt
);
2166 FreeChunkList(rt
, toFree
);
2174 SweepBackgroundThings(JSRuntime
* rt
, bool onBackgroundThread
)
2177 * We must finalize in the correct order, see comments in
2180 FreeOp
fop(rt
, false);
2181 for (int phase
= 0 ; phase
< BackgroundPhaseCount
; ++phase
) {
2182 for (Zone
*zone
= rt
->gcSweepingZones
; zone
; zone
= zone
->gcNextGraphNode
) {
2183 for (int index
= 0 ; index
< BackgroundPhaseLength
[phase
] ; ++index
) {
2184 AllocKind kind
= BackgroundPhases
[phase
][index
];
2185 ArenaHeader
*arenas
= zone
->allocator
.arenas
.arenaListsToSweep
[kind
];
2187 ArenaLists::backgroundFinalize(&fop
, arenas
, onBackgroundThread
);
2192 rt
->gcSweepingZones
= nullptr;
2195 #ifdef JS_THREADSAFE
2197 AssertBackgroundSweepingFinished(JSRuntime
*rt
)
2199 JS_ASSERT(!rt
->gcSweepingZones
);
2200 for (ZonesIter
zone(rt
); !zone
.done(); zone
.next()) {
2201 for (unsigned i
= 0; i
< FINALIZE_LIMIT
; ++i
) {
2202 JS_ASSERT(!zone
->allocator
.arenas
.arenaListsToSweep
[i
]);
2203 JS_ASSERT(zone
->allocator
.arenas
.doneBackgroundFinalize(AllocKind(i
)));
2211 static unsigned ncpus
= 0;
2214 SYSTEM_INFO sysinfo
;
2215 GetSystemInfo(&sysinfo
);
2216 ncpus
= unsigned(sysinfo
.dwNumberOfProcessors
);
2218 long n
= sysconf(_SC_NPROCESSORS_ONLN
);
2219 ncpus
= (n
> 0) ? unsigned(n
) : 1;
2224 #endif /* JS_THREADSAFE */
2227 GCHelperThread::init()
2229 if (!rt
->useHelperThreads()) {
2230 backgroundAllocation
= false;
2234 #ifdef JS_THREADSAFE
2235 if (!(wakeup
= PR_NewCondVar(rt
->gcLock
)))
2237 if (!(done
= PR_NewCondVar(rt
->gcLock
)))
2240 thread
= PR_CreateThread(PR_USER_THREAD
, threadMain
, this, PR_PRIORITY_NORMAL
,
2241 PR_GLOBAL_THREAD
, PR_JOINABLE_THREAD
, 0);
2245 backgroundAllocation
= (GetCPUCount() >= 2);
2246 #endif /* JS_THREADSAFE */
2251 GCHelperThread::finish()
2253 if (!rt
->useHelperThreads()) {
2254 JS_ASSERT(state
== IDLE
);
2259 #ifdef JS_THREADSAFE
2260 PRThread
*join
= nullptr;
2262 AutoLockGC
lock(rt
);
2263 if (thread
&& state
!= SHUTDOWN
) {
2265 * We cannot be in the ALLOCATING or CANCEL_ALLOCATION states as
2266 * the allocations should have been stopped during the last GC.
2268 JS_ASSERT(state
== IDLE
|| state
== SWEEPING
);
2270 PR_NotifyCondVar(wakeup
);
2276 /* PR_DestroyThread is not necessary. */
2277 PR_JoinThread(join
);
2280 PR_DestroyCondVar(wakeup
);
2282 PR_DestroyCondVar(done
);
2283 #endif /* JS_THREADSAFE */
2286 #ifdef JS_THREADSAFE
2287 #ifdef MOZ_NUWA_PROCESS
2289 MFBT_API
bool IsNuwaProcess();
2290 MFBT_API
void NuwaMarkCurrentThread(void (*recreate
)(void *), void *arg
);
2296 GCHelperThread::threadMain(void *arg
)
2298 PR_SetCurrentThreadName("JS GC Helper");
2300 #ifdef MOZ_NUWA_PROCESS
2301 if (IsNuwaProcess
&& IsNuwaProcess()) {
2302 JS_ASSERT(NuwaMarkCurrentThread
!= nullptr);
2303 NuwaMarkCurrentThread(nullptr, nullptr);
2307 static_cast<GCHelperThread
*>(arg
)->threadLoop();
2311 GCHelperThread::threadLoop()
2313 AutoLockGC
lock(rt
);
2315 #if JS_TRACE_LOGGING
2316 TraceLogging
*logger
= TraceLogging::getLogger(TraceLogging::GC_BACKGROUND
);
2320 * Even on the first iteration the state can be SHUTDOWN or SWEEPING if
2321 * the stop request or the GC and the corresponding startBackgroundSweep call
2322 * happen before this thread has a chance to run.
2329 PR_WaitCondVar(wakeup
, PR_INTERVAL_NO_TIMEOUT
);
2332 #if JS_TRACE_LOGGING
2333 logger
->log(TraceLogging::GC_SWEEPING_START
);
2336 if (state
== SWEEPING
)
2338 PR_NotifyAllCondVar(done
);
2339 #if JS_TRACE_LOGGING
2340 logger
->log(TraceLogging::GC_SWEEPING_STOP
);
2344 #if JS_TRACE_LOGGING
2345 logger
->log(TraceLogging::GC_ALLOCATING_START
);
2350 AutoUnlockGC
unlock(rt
);
2351 chunk
= Chunk::allocate(rt
);
2354 /* OOM stops the background allocation. */
2356 #if JS_TRACE_LOGGING
2357 logger
->log(TraceLogging::GC_ALLOCATING_STOP
);
2361 JS_ASSERT(chunk
->info
.numArenasFreeCommitted
== ArenasPerChunk
);
2362 rt
->gcNumArenasFreeCommitted
+= ArenasPerChunk
;
2363 rt
->gcChunkPool
.put(chunk
);
2364 } while (state
== ALLOCATING
&& rt
->gcChunkPool
.wantBackgroundAllocation(rt
));
2365 if (state
== ALLOCATING
)
2367 #if JS_TRACE_LOGGING
2368 logger
->log(TraceLogging::GC_ALLOCATING_STOP
);
2371 case CANCEL_ALLOCATION
:
2373 PR_NotifyAllCondVar(done
);
2378 #endif /* JS_THREADSAFE */
2381 GCHelperThread::startBackgroundSweep(bool shouldShrink
)
2383 JS_ASSERT(rt
->useHelperThreads());
2385 #ifdef JS_THREADSAFE
2386 AutoLockGC
lock(rt
);
2387 JS_ASSERT(state
== IDLE
);
2388 JS_ASSERT(!sweepFlag
);
2390 shrinkFlag
= shouldShrink
;
2392 PR_NotifyCondVar(wakeup
);
2393 #endif /* JS_THREADSAFE */
2396 /* Must be called with the GC lock taken. */
2398 GCHelperThread::startBackgroundShrink()
2400 JS_ASSERT(rt
->useHelperThreads());
2402 #ifdef JS_THREADSAFE
2405 JS_ASSERT(!sweepFlag
);
2408 PR_NotifyCondVar(wakeup
);
2414 case CANCEL_ALLOCATION
:
2416 * If we have started background allocation there is nothing to
2421 MOZ_ASSUME_UNREACHABLE("No shrink on shutdown");
2423 #endif /* JS_THREADSAFE */
2427 GCHelperThread::waitBackgroundSweepEnd()
2429 if (!rt
->useHelperThreads()) {
2430 JS_ASSERT(state
== IDLE
);
2434 #ifdef JS_THREADSAFE
2435 AutoLockGC
lock(rt
);
2436 while (state
== SWEEPING
)
2437 PR_WaitCondVar(done
, PR_INTERVAL_NO_TIMEOUT
);
2438 if (rt
->gcIncrementalState
== NO_INCREMENTAL
)
2439 AssertBackgroundSweepingFinished(rt
);
2440 #endif /* JS_THREADSAFE */
2444 GCHelperThread::waitBackgroundSweepOrAllocEnd()
2446 if (!rt
->useHelperThreads()) {
2447 JS_ASSERT(state
== IDLE
);
2451 #ifdef JS_THREADSAFE
2452 AutoLockGC
lock(rt
);
2453 if (state
== ALLOCATING
)
2454 state
= CANCEL_ALLOCATION
;
2455 while (state
== SWEEPING
|| state
== CANCEL_ALLOCATION
)
2456 PR_WaitCondVar(done
, PR_INTERVAL_NO_TIMEOUT
);
2457 if (rt
->gcIncrementalState
== NO_INCREMENTAL
)
2458 AssertBackgroundSweepingFinished(rt
);
2459 #endif /* JS_THREADSAFE */
2462 /* Must be called with the GC lock taken. */
2464 GCHelperThread::startBackgroundAllocationIfIdle()
2466 JS_ASSERT(rt
->useHelperThreads());
2468 #ifdef JS_THREADSAFE
2469 if (state
== IDLE
) {
2471 PR_NotifyCondVar(wakeup
);
2473 #endif /* JS_THREADSAFE */
2477 GCHelperThread::replenishAndFreeLater(void *ptr
)
2479 JS_ASSERT(freeCursor
== freeCursorEnd
);
2481 if (freeCursor
&& !freeVector
.append(freeCursorEnd
- FREE_ARRAY_LENGTH
))
2483 freeCursor
= (void **) js_malloc(FREE_ARRAY_SIZE
);
2485 freeCursorEnd
= nullptr;
2488 freeCursorEnd
= freeCursor
+ FREE_ARRAY_LENGTH
;
2489 *freeCursor
++ = ptr
;
2495 #ifdef JS_THREADSAFE
2496 /* Must be called with the GC lock taken. */
2498 GCHelperThread::doSweep()
2502 AutoUnlockGC
unlock(rt
);
2504 SweepBackgroundThings(rt
, true);
2507 void **array
= freeCursorEnd
- FREE_ARRAY_LENGTH
;
2508 freeElementsAndArray(array
, freeCursor
);
2509 freeCursor
= freeCursorEnd
= nullptr;
2511 JS_ASSERT(!freeCursorEnd
);
2513 for (void ***iter
= freeVector
.begin(); iter
!= freeVector
.end(); ++iter
) {
2514 void **array
= *iter
;
2515 freeElementsAndArray(array
, array
+ FREE_ARRAY_LENGTH
);
2517 freeVector
.resize(0);
2519 rt
->freeLifoAlloc
.freeAll();
2522 bool shrinking
= shrinkFlag
;
2523 ExpireChunksAndArenas(rt
, shrinking
);
2526 * The main thread may have called ShrinkGCBuffers while
2527 * ExpireChunksAndArenas(rt, false) was running, so we recheck the flag
2530 if (!shrinking
&& shrinkFlag
) {
2532 ExpireChunksAndArenas(rt
, true);
2535 #endif /* JS_THREADSAFE */
2538 GCHelperThread::onBackgroundThread()
2540 #ifdef JS_THREADSAFE
2541 return PR_GetCurrentThread() == getThread();
2548 ReleaseObservedTypes(JSRuntime
*rt
)
2550 bool releaseTypes
= rt
->gcZeal() != 0;
2552 #ifndef JS_MORE_DETERMINISTIC
2553 int64_t now
= PRMJ_Now();
2554 if (now
>= rt
->gcJitReleaseTime
)
2555 releaseTypes
= true;
2557 rt
->gcJitReleaseTime
= now
+ JIT_SCRIPT_RELEASE_TYPES_INTERVAL
;
2560 return releaseTypes
;
2564 * It's simpler if we preserve the invariant that every zone has at least one
2565 * compartment. If we know we're deleting the entire zone, then
2566 * SweepCompartments is allowed to delete all compartments. In this case,
2567 * |keepAtleastOne| is false. If some objects remain in the zone so that it
2568 * cannot be deleted, then we set |keepAtleastOne| to true, which prohibits
2569 * SweepCompartments from deleting every compartment. Instead, it preserves an
2570 * arbitrary compartment in the zone.
2573 SweepCompartments(FreeOp
*fop
, Zone
*zone
, bool keepAtleastOne
, bool lastGC
)
2575 JSRuntime
*rt
= zone
->runtimeFromMainThread();
2576 JSDestroyCompartmentCallback callback
= rt
->destroyCompartmentCallback
;
2578 JSCompartment
**read
= zone
->compartments
.begin();
2579 JSCompartment
**end
= zone
->compartments
.end();
2580 JSCompartment
**write
= read
;
2581 bool foundOne
= false;
2582 while (read
< end
) {
2583 JSCompartment
*comp
= *read
++;
2584 JS_ASSERT(!rt
->isAtomsCompartment(comp
));
2587 * Don't delete the last compartment if all the ones before it were
2588 * deleted and keepAtleastOne is true.
2590 bool dontDelete
= read
== end
&& !foundOne
&& keepAtleastOne
;
2591 if ((!comp
->marked
&& !dontDelete
) || lastGC
) {
2593 callback(fop
, comp
);
2594 if (comp
->principals
)
2595 JS_DropPrincipals(rt
, comp
->principals
);
2602 zone
->compartments
.resize(write
- zone
->compartments
.begin());
2603 JS_ASSERT_IF(keepAtleastOne
, !zone
->compartments
.empty());
2607 SweepZones(FreeOp
*fop
, bool lastGC
)
2609 JSRuntime
*rt
= fop
->runtime();
2611 /* Skip the atomsCompartment zone. */
2612 Zone
**read
= rt
->zones
.begin() + 1;
2613 Zone
**end
= rt
->zones
.end();
2614 Zone
**write
= read
;
2615 JS_ASSERT(rt
->zones
.length() >= 1);
2616 JS_ASSERT(rt
->isAtomsZone(rt
->zones
[0]));
2618 while (read
< end
) {
2619 Zone
*zone
= *read
++;
2621 if (!zone
->hold
&& zone
->wasGCStarted()) {
2622 if (zone
->allocator
.arenas
.arenaListsAreEmpty() || lastGC
) {
2623 zone
->allocator
.arenas
.checkEmptyFreeLists();
2624 SweepCompartments(fop
, zone
, false, lastGC
);
2625 JS_ASSERT(zone
->compartments
.empty());
2629 SweepCompartments(fop
, zone
, true, lastGC
);
2633 rt
->zones
.resize(write
- rt
->zones
.begin());
2637 PurgeRuntime(JSRuntime
*rt
)
2639 for (GCCompartmentsIter
comp(rt
); !comp
.done(); comp
.next())
2642 rt
->freeLifoAlloc
.transferUnusedFrom(&rt
->tempLifoAlloc
);
2643 rt
->interpreterStack().purge(rt
);
2645 rt
->gsnCache
.purge();
2646 rt
->newObjectCache
.purge();
2647 rt
->nativeIterCache
.purge();
2648 rt
->sourceDataCache
.purge();
2649 rt
->evalCache
.clear();
2651 bool activeCompilations
= false;
2652 for (ThreadDataIter
iter(rt
); !iter
.done(); iter
.next())
2653 activeCompilations
|= iter
->activeCompilations
;
2654 if (!activeCompilations
)
2655 rt
->parseMapPool().purgeAll();
2659 ShouldPreserveJITCode(JSCompartment
*comp
, int64_t currentTime
)
2661 JSRuntime
*rt
= comp
->runtimeFromMainThread();
2662 if (rt
->gcShouldCleanUpEverything
|| !comp
->zone()->types
.inferenceEnabled
)
2665 if (rt
->alwaysPreserveCode
)
2667 if (comp
->lastAnimationTime
+ PRMJ_USEC_PER_SEC
>= currentTime
&&
2668 comp
->lastCodeRelease
+ (PRMJ_USEC_PER_SEC
* 300) >= currentTime
)
2673 comp
->lastCodeRelease
= currentTime
;
2678 struct CompartmentCheckTracer
: public JSTracer
2681 JSGCTraceKind srcKind
;
2683 JSCompartment
*compartment
;
2687 InCrossCompartmentMap(JSObject
*src
, Cell
*dst
, JSGCTraceKind dstKind
)
2689 JSCompartment
*srccomp
= src
->compartment();
2691 if (dstKind
== JSTRACE_OBJECT
) {
2692 Value key
= ObjectValue(*static_cast<JSObject
*>(dst
));
2693 if (WrapperMap::Ptr p
= srccomp
->lookupWrapper(key
)) {
2694 if (*p
->value
.unsafeGet() == ObjectValue(*src
))
2700 * If the cross-compartment edge is caused by the debugger, then we don't
2701 * know the right hashtable key, so we have to iterate.
2703 for (JSCompartment::WrapperEnum
e(srccomp
); !e
.empty(); e
.popFront()) {
2704 if (e
.front().key
.wrapped
== dst
&& ToMarkable(e
.front().value
) == src
)
2712 CheckCompartment(CompartmentCheckTracer
*trc
, JSCompartment
*thingCompartment
,
2713 Cell
*thing
, JSGCTraceKind kind
)
2715 JS_ASSERT(thingCompartment
== trc
->compartment
||
2716 trc
->runtime
->isAtomsCompartment(thingCompartment
) ||
2717 (trc
->srcKind
== JSTRACE_OBJECT
&&
2718 InCrossCompartmentMap((JSObject
*)trc
->src
, thing
, kind
)));
2721 static JSCompartment
*
2722 CompartmentOfCell(Cell
*thing
, JSGCTraceKind kind
)
2724 if (kind
== JSTRACE_OBJECT
)
2725 return static_cast<JSObject
*>(thing
)->compartment();
2726 else if (kind
== JSTRACE_SHAPE
)
2727 return static_cast<Shape
*>(thing
)->compartment();
2728 else if (kind
== JSTRACE_BASE_SHAPE
)
2729 return static_cast<BaseShape
*>(thing
)->compartment();
2730 else if (kind
== JSTRACE_SCRIPT
)
2731 return static_cast<JSScript
*>(thing
)->compartment();
2737 CheckCompartmentCallback(JSTracer
*trcArg
, void **thingp
, JSGCTraceKind kind
)
2739 CompartmentCheckTracer
*trc
= static_cast<CompartmentCheckTracer
*>(trcArg
);
2740 Cell
*thing
= (Cell
*)*thingp
;
2742 JSCompartment
*comp
= CompartmentOfCell(thing
, kind
);
2743 if (comp
&& trc
->compartment
) {
2744 CheckCompartment(trc
, comp
, thing
, kind
);
2746 JS_ASSERT(thing
->tenuredZone() == trc
->zone
||
2747 trc
->runtime
->isAtomsZone(thing
->tenuredZone()));
2752 CheckForCompartmentMismatches(JSRuntime
*rt
)
2754 if (rt
->gcDisableStrictProxyCheckingCount
)
2757 CompartmentCheckTracer trc
;
2758 JS_TracerInit(&trc
, rt
, CheckCompartmentCallback
);
2760 for (ZonesIter
zone(rt
); !zone
.done(); zone
.next()) {
2762 for (size_t thingKind
= 0; thingKind
< FINALIZE_LAST
; thingKind
++) {
2763 for (CellIterUnderGC
i(zone
, AllocKind(thingKind
)); !i
.done(); i
.next()) {
2764 trc
.src
= i
.getCell();
2765 trc
.srcKind
= MapAllocToTraceKind(AllocKind(thingKind
));
2766 trc
.compartment
= CompartmentOfCell(trc
.src
, trc
.srcKind
);
2767 JS_TraceChildren(&trc
, trc
.src
, trc
.srcKind
);
2775 BeginMarkPhase(JSRuntime
*rt
)
2777 int64_t currentTime
= PRMJ_Now();
2780 if (rt
->gcFullCompartmentChecks
)
2781 CheckForCompartmentMismatches(rt
);
2784 rt
->gcIsFull
= true;
2787 for (ZonesIter
zone(rt
); !zone
.done(); zone
.next()) {
2788 /* Assert that zone state is as we expect */
2789 JS_ASSERT(!zone
->isCollecting());
2790 JS_ASSERT(!zone
->compartments
.empty());
2791 for (unsigned i
= 0; i
< FINALIZE_LIMIT
; ++i
)
2792 JS_ASSERT(!zone
->allocator
.arenas
.arenaListsToSweep
[i
]);
2794 /* Set up which zones will be collected. */
2795 if (zone
->isGCScheduled()) {
2796 if (!rt
->isAtomsZone(zone
)) {
2798 zone
->setGCState(Zone::Mark
);
2801 rt
->gcIsFull
= false;
2804 zone
->scheduledForDestruction
= false;
2805 zone
->maybeAlive
= zone
->hold
;
2806 zone
->setPreservingCode(false);
2809 for (CompartmentsIter
c(rt
); !c
.done(); c
.next()) {
2810 JS_ASSERT(!c
->gcLiveArrayBuffers
);
2812 if (ShouldPreserveJITCode(c
, currentTime
))
2813 c
->zone()->setPreservingCode(true);
2817 * Atoms are not in the cross-compartment map. So if there are any
2818 * zones that are not being collected, we are not allowed to collect
2819 * atoms. Otherwise, the non-collected zones could contain pointers
2820 * to atoms that we would miss.
2822 Zone
*atomsZone
= rt
->atomsCompartment()->zone();
2824 bool keepAtoms
= false;
2825 for (ThreadDataIter
iter(rt
); !iter
.done(); iter
.next())
2826 keepAtoms
|= iter
->gcKeepAtoms
;
2829 * We don't scan the stacks of exclusive threads, so we need to avoid
2830 * collecting their objects in another way. The only GC thing pointers they
2831 * have are to their exclusive compartment (which is not collected) or to
2832 * the atoms compartment. Therefore, we avoid collecting the atoms
2833 * compartment when exclusive threads are running.
2835 keepAtoms
|= rt
->exclusiveThreadsPresent();
2837 if (atomsZone
->isGCScheduled() && rt
->gcIsFull
&& !keepAtoms
) {
2838 JS_ASSERT(!atomsZone
->isCollecting());
2839 atomsZone
->setGCState(Zone::Mark
);
2843 /* Check that at least one zone is scheduled for collection. */
2848 * At the end of each incremental slice, we call prepareForIncrementalGC,
2849 * which marks objects in all arenas that we're currently allocating
2850 * into. This can cause leaks if unreachable objects are in these
2851 * arenas. This purge call ensures that we only mark arenas that have had
2852 * allocations after the incremental GC started.
2854 if (rt
->gcIsIncremental
) {
2855 for (GCZonesIter
zone(rt
); !zone
.done(); zone
.next())
2856 zone
->allocator
.arenas
.purge();
2859 rt
->gcMarker
.start();
2860 JS_ASSERT(!rt
->gcMarker
.callback
);
2861 JS_ASSERT(IS_GC_MARKING_TRACER(&rt
->gcMarker
));
2863 /* For non-incremental GC the following sweep discards the jit code. */
2864 if (rt
->gcIsIncremental
) {
2865 for (GCZonesIter
zone(rt
); !zone
.done(); zone
.next()) {
2866 gcstats::AutoPhase
ap(rt
->gcStats
, gcstats::PHASE_MARK_DISCARD_CODE
);
2867 zone
->discardJitCode(rt
->defaultFreeOp());
2871 GCMarker
*gcmarker
= &rt
->gcMarker
;
2873 rt
->gcStartNumber
= rt
->gcNumber
;
2876 * We must purge the runtime at the beginning of an incremental GC. The
2877 * danger if we purge later is that the snapshot invariant of incremental
2878 * GC will be broken, as follows. If some object is reachable only through
2879 * some cache (say the dtoaCache) then it will not be part of the snapshot.
2880 * If we purge after root marking, then the mutator could obtain a pointer
2881 * to the object and start using it. This object might never be marked, so
2882 * a GC hazard would exist.
2885 gcstats::AutoPhase
ap(rt
->gcStats
, gcstats::PHASE_PURGE
);
2892 gcstats::AutoPhase
ap1(rt
->gcStats
, gcstats::PHASE_MARK
);
2893 gcstats::AutoPhase
ap2(rt
->gcStats
, gcstats::PHASE_MARK_ROOTS
);
2895 for (GCZonesIter
zone(rt
); !zone
.done(); zone
.next()) {
2896 /* Unmark everything in the zones being collected. */
2897 zone
->allocator
.arenas
.unmarkAll();
2900 for (GCCompartmentsIter
c(rt
); !c
.done(); c
.next()) {
2901 /* Reset weak map list for the compartments being collected. */
2902 WeakMapBase::resetCompartmentWeakMapList(c
);
2905 MarkRuntime(gcmarker
);
2906 BufferGrayRoots(gcmarker
);
2909 * This code ensures that if a zone is "dead", then it will be
2910 * collected in this GC. A zone is considered dead if its maybeAlive
2911 * flag is false. The maybeAlive flag is set if:
2912 * (1) the zone has incoming cross-compartment edges, or
2913 * (2) an object in the zone was marked during root marking, either
2914 * as a black root or a gray root.
2915 * If the maybeAlive is false, then we set the scheduledForDestruction flag.
2916 * At any time later in the GC, if we try to mark an object whose
2917 * zone is scheduled for destruction, we will assert.
2918 * NOTE: Due to bug 811587, we only assert if gcManipulatingDeadCompartments
2919 * is true (e.g., if we're doing a brain transplant).
2921 * The purpose of this check is to ensure that a zone that we would
2922 * normally destroy is not resurrected by a read barrier or an
2923 * allocation. This might happen during a function like JS_TransplantObject,
2924 * which iterates over all compartments, live or dead, and operates on their
2925 * objects. See bug 803376 for details on this problem. To avoid the
2926 * problem, we are very careful to avoid allocation and read barriers during
2927 * JS_TransplantObject and the like. The code here ensures that we don't
2930 * Note that there are certain cases where allocations or read barriers in
2931 * dead zone are difficult to avoid. We detect such cases (via the
2932 * gcObjectsMarkedInDeadCompartment counter) and redo any ongoing GCs after
2933 * the JS_TransplantObject function has finished. This ensures that the dead
2934 * zones will be cleaned up. See AutoMarkInDeadZone and
2935 * AutoMaybeTouchDeadZones for details.
2938 /* Set the maybeAlive flag based on cross-compartment edges. */
2939 for (CompartmentsIter
c(rt
); !c
.done(); c
.next()) {
2940 for (JSCompartment::WrapperEnum
e(c
); !e
.empty(); e
.popFront()) {
2941 Cell
*dst
= e
.front().key
.wrapped
;
2942 dst
->tenuredZone()->maybeAlive
= true;
2947 * For black roots, code in gc/Marking.cpp will already have set maybeAlive
2948 * during MarkRuntime.
2951 for (GCZonesIter
zone(rt
); !zone
.done(); zone
.next()) {
2952 if (!zone
->maybeAlive
)
2953 zone
->scheduledForDestruction
= true;
2955 rt
->gcFoundBlackGrayEdges
= false;
2960 template <class CompartmentIterT
>
2962 MarkWeakReferences(JSRuntime
*rt
, gcstats::Phase phase
)
2964 GCMarker
*gcmarker
= &rt
->gcMarker
;
2965 JS_ASSERT(gcmarker
->isDrained());
2967 gcstats::AutoPhase
ap(rt
->gcStats
, gcstats::PHASE_SWEEP_MARK
);
2968 gcstats::AutoPhase
ap1(rt
->gcStats
, phase
);
2971 bool markedAny
= false;
2972 for (CompartmentIterT
c(rt
); !c
.done(); c
.next()) {
2973 markedAny
|= WatchpointMap::markCompartmentIteratively(c
, gcmarker
);
2974 markedAny
|= WeakMapBase::markCompartmentIteratively(c
, gcmarker
);
2976 markedAny
|= Debugger::markAllIteratively(gcmarker
);
2982 gcmarker
->drainMarkStack(budget
);
2984 JS_ASSERT(gcmarker
->isDrained());
2988 MarkWeakReferencesInCurrentGroup(JSRuntime
*rt
, gcstats::Phase phase
)
2990 MarkWeakReferences
<GCCompartmentGroupIter
>(rt
, phase
);
2993 template <class ZoneIterT
, class CompartmentIterT
>
2995 MarkGrayReferences(JSRuntime
*rt
)
2997 GCMarker
*gcmarker
= &rt
->gcMarker
;
3000 gcstats::AutoPhase
ap(rt
->gcStats
, gcstats::PHASE_SWEEP_MARK
);
3001 gcstats::AutoPhase
ap1(rt
->gcStats
, gcstats::PHASE_SWEEP_MARK_GRAY
);
3002 gcmarker
->setMarkColorGray();
3003 if (gcmarker
->hasBufferedGrayRoots()) {
3004 for (ZoneIterT
zone(rt
); !zone
.done(); zone
.next())
3005 gcmarker
->markBufferedGrayRoots(zone
);
3007 JS_ASSERT(!rt
->gcIsIncremental
);
3008 if (JSTraceDataOp op
= rt
->gcGrayRootTracer
.op
)
3009 (*op
)(gcmarker
, rt
->gcGrayRootTracer
.data
);
3012 gcmarker
->drainMarkStack(budget
);
3015 MarkWeakReferences
<CompartmentIterT
>(rt
, gcstats::PHASE_SWEEP_MARK_GRAY_WEAK
);
3017 JS_ASSERT(gcmarker
->isDrained());
3019 gcmarker
->setMarkColorBlack();
3023 MarkGrayReferencesInCurrentGroup(JSRuntime
*rt
)
3025 MarkGrayReferences
<GCZoneGroupIter
, GCCompartmentGroupIter
>(rt
);
3031 MarkAllWeakReferences(JSRuntime
*rt
, gcstats::Phase phase
)
3033 MarkWeakReferences
<GCCompartmentsIter
>(rt
, phase
);
3037 MarkAllGrayReferences(JSRuntime
*rt
)
3039 MarkGrayReferences
<GCZonesIter
, GCCompartmentsIter
>(rt
);
3042 class js::gc::MarkingValidator
3045 MarkingValidator(JSRuntime
*rt
);
3046 ~MarkingValidator();
3047 void nonIncrementalMark();
3054 typedef HashMap
<Chunk
*, ChunkBitmap
*, GCChunkHasher
, SystemAllocPolicy
> BitmapMap
;
3058 js::gc::MarkingValidator::MarkingValidator(JSRuntime
*rt
)
3063 js::gc::MarkingValidator::~MarkingValidator()
3065 if (!map
.initialized())
3068 for (BitmapMap::Range
r(map
.all()); !r
.empty(); r
.popFront())
3069 js_delete(r
.front().value
);
3073 js::gc::MarkingValidator::nonIncrementalMark()
3076 * Perform a non-incremental mark for all collecting zones and record
3077 * the results for later comparison.
3079 * Currently this does not validate gray marking.
3085 GCMarker
*gcmarker
= &runtime
->gcMarker
;
3087 /* Save existing mark bits. */
3088 for (GCChunkSet::Range
r(runtime
->gcChunkSet
.all()); !r
.empty(); r
.popFront()) {
3089 ChunkBitmap
*bitmap
= &r
.front()->bitmap
;
3090 ChunkBitmap
*entry
= js_new
<ChunkBitmap
>();
3094 memcpy((void *)entry
->bitmap
, (void *)bitmap
->bitmap
, sizeof(bitmap
->bitmap
));
3095 if (!map
.putNew(r
.front(), entry
))
3100 * Save the lists of live weakmaps and array buffers for the compartments we
3103 WeakMapVector weakmaps
;
3104 ArrayBufferVector arrayBuffers
;
3105 for (GCCompartmentsIter
c(runtime
); !c
.done(); c
.next()) {
3106 if (!WeakMapBase::saveCompartmentWeakMapList(c
, weakmaps
) ||
3107 !ArrayBufferObject::saveArrayBufferList(c
, arrayBuffers
))
3114 * After this point, the function should run to completion, so we shouldn't
3115 * do anything fallible.
3120 * Reset the lists of live weakmaps and array buffers for the compartments we
3123 for (GCCompartmentsIter
c(runtime
); !c
.done(); c
.next()) {
3124 WeakMapBase::resetCompartmentWeakMapList(c
);
3125 ArrayBufferObject::resetArrayBufferList(c
);
3128 /* Re-do all the marking, but non-incrementally. */
3129 js::gc::State state
= runtime
->gcIncrementalState
;
3130 runtime
->gcIncrementalState
= MARK_ROOTS
;
3132 JS_ASSERT(gcmarker
->isDrained());
3135 for (GCChunkSet::Range
r(runtime
->gcChunkSet
.all()); !r
.empty(); r
.popFront())
3136 r
.front()->bitmap
.clear();
3139 gcstats::AutoPhase
ap1(runtime
->gcStats
, gcstats::PHASE_MARK
);
3140 gcstats::AutoPhase
ap2(runtime
->gcStats
, gcstats::PHASE_MARK_ROOTS
);
3141 MarkRuntime(gcmarker
, true);
3145 runtime
->gcIncrementalState
= MARK
;
3146 runtime
->gcMarker
.drainMarkStack(budget
);
3149 gcstats::AutoPhase
ap(runtime
->gcStats
, gcstats::PHASE_SWEEP
);
3150 MarkAllWeakReferences(runtime
, gcstats::PHASE_SWEEP_MARK_WEAK
);
3152 /* Update zone state for gray marking. */
3153 for (GCZonesIter
zone(runtime
); !zone
.done(); zone
.next()) {
3154 JS_ASSERT(zone
->isGCMarkingBlack());
3155 zone
->setGCState(Zone::MarkGray
);
3158 MarkAllGrayReferences(runtime
);
3160 /* Restore zone state. */
3161 for (GCZonesIter
zone(runtime
); !zone
.done(); zone
.next()) {
3162 JS_ASSERT(zone
->isGCMarkingGray());
3163 zone
->setGCState(Zone::Mark
);
3167 /* Take a copy of the non-incremental mark state and restore the original. */
3168 for (GCChunkSet::Range
r(runtime
->gcChunkSet
.all()); !r
.empty(); r
.popFront()) {
3169 Chunk
*chunk
= r
.front();
3170 ChunkBitmap
*bitmap
= &chunk
->bitmap
;
3171 ChunkBitmap
*entry
= map
.lookup(chunk
)->value
;
3172 Swap(*entry
, *bitmap
);
3175 /* Restore the weak map and array buffer lists. */
3176 for (GCCompartmentsIter
c(runtime
); !c
.done(); c
.next()) {
3177 WeakMapBase::resetCompartmentWeakMapList(c
);
3178 ArrayBufferObject::resetArrayBufferList(c
);
3180 WeakMapBase::restoreCompartmentWeakMapLists(weakmaps
);
3181 ArrayBufferObject::restoreArrayBufferLists(arrayBuffers
);
3183 runtime
->gcIncrementalState
= state
;
3187 js::gc::MarkingValidator::validate()
3190 * Validates the incremental marking for a single compartment by comparing
3191 * the mark bits to those previously recorded for a non-incremental mark.
3197 for (GCChunkSet::Range
r(runtime
->gcChunkSet
.all()); !r
.empty(); r
.popFront()) {
3198 Chunk
*chunk
= r
.front();
3199 BitmapMap::Ptr ptr
= map
.lookup(chunk
);
3201 continue; /* Allocated after we did the non-incremental mark. */
3203 ChunkBitmap
*bitmap
= ptr
->value
;
3204 ChunkBitmap
*incBitmap
= &chunk
->bitmap
;
3206 for (size_t i
= 0; i
< ArenasPerChunk
; i
++) {
3207 if (chunk
->decommittedArenas
.get(i
))
3209 Arena
*arena
= &chunk
->arenas
[i
];
3210 if (!arena
->aheader
.allocated())
3212 if (!arena
->aheader
.zone
->isGCSweeping())
3214 if (arena
->aheader
.allocatedDuringIncremental
)
3217 AllocKind kind
= arena
->aheader
.getAllocKind();
3218 uintptr_t thing
= arena
->thingsStart(kind
);
3219 uintptr_t end
= arena
->thingsEnd();
3220 while (thing
< end
) {
3221 Cell
*cell
= (Cell
*)thing
;
3224 * If a non-incremental GC wouldn't have collected a cell, then
3225 * an incremental GC won't collect it.
3227 JS_ASSERT_IF(bitmap
->isMarked(cell
, BLACK
), incBitmap
->isMarked(cell
, BLACK
));
3230 * If the cycle collector isn't allowed to collect an object
3231 * after a non-incremental GC has run, then it isn't allowed to
3232 * collected it after an incremental GC.
3234 JS_ASSERT_IF(!bitmap
->isMarked(cell
, GRAY
), !incBitmap
->isMarked(cell
, GRAY
));
3236 thing
+= Arena::thingSize(kind
);
3245 ComputeNonIncrementalMarkingForValidation(JSRuntime
*rt
)
3248 JS_ASSERT(!rt
->gcMarkingValidator
);
3249 if (rt
->gcIsIncremental
&& rt
->gcValidate
)
3250 rt
->gcMarkingValidator
= js_new
<MarkingValidator
>(rt
);
3251 if (rt
->gcMarkingValidator
)
3252 rt
->gcMarkingValidator
->nonIncrementalMark();
3257 ValidateIncrementalMarking(JSRuntime
*rt
)
3260 if (rt
->gcMarkingValidator
)
3261 rt
->gcMarkingValidator
->validate();
3266 FinishMarkingValidation(JSRuntime
*rt
)
3269 js_delete(rt
->gcMarkingValidator
);
3270 rt
->gcMarkingValidator
= nullptr;
3275 AssertNeedsBarrierFlagsConsistent(JSRuntime
*rt
)
3278 bool anyNeedsBarrier
= false;
3279 for (ZonesIter
zone(rt
); !zone
.done(); zone
.next())
3280 anyNeedsBarrier
|= zone
->needsBarrier();
3281 JS_ASSERT(rt
->needsBarrier() == anyNeedsBarrier
);
3286 DropStringWrappers(JSRuntime
*rt
)
3289 * String "wrappers" are dropped on GC because their presence would require
3290 * us to sweep the wrappers in all compartments every time we sweep a
3291 * compartment group.
3293 for (CompartmentsIter
c(rt
); !c
.done(); c
.next()) {
3294 for (JSCompartment::WrapperEnum
e(c
); !e
.empty(); e
.popFront()) {
3295 if (e
.front().key
.kind
== CrossCompartmentKey::StringWrapper
)
3302 * Group zones that must be swept at the same time.
3304 * If compartment A has an edge to an unmarked object in compartment B, then we
3305 * must not sweep A in a later slice than we sweep B. That's because a write
3306 * barrier in A that could lead to the unmarked object in B becoming
3307 * marked. However, if we had already swept that object, we would be in trouble.
3309 * If we consider these dependencies as a graph, then all the compartments in
3310 * any strongly-connected component of this graph must be swept in the same
3313 * Tarjan's algorithm is used to calculate the components.
3317 JSCompartment::findOutgoingEdges(ComponentFinder
<JS::Zone
> &finder
)
3319 for (js::WrapperMap::Enum
e(crossCompartmentWrappers
); !e
.empty(); e
.popFront()) {
3320 CrossCompartmentKey::Kind kind
= e
.front().key
.kind
;
3321 JS_ASSERT(kind
!= CrossCompartmentKey::StringWrapper
);
3322 Cell
*other
= e
.front().key
.wrapped
;
3323 if (kind
== CrossCompartmentKey::ObjectWrapper
) {
3325 * Add edge to wrapped object compartment if wrapped object is not
3326 * marked black to indicate that wrapper compartment not be swept
3327 * after wrapped compartment.
3329 if (!other
->isMarked(BLACK
) || other
->isMarked(GRAY
)) {
3330 JS::Zone
*w
= other
->tenuredZone();
3331 if (w
->isGCMarking())
3332 finder
.addEdgeTo(w
);
3335 JS_ASSERT(kind
== CrossCompartmentKey::DebuggerScript
||
3336 kind
== CrossCompartmentKey::DebuggerSource
||
3337 kind
== CrossCompartmentKey::DebuggerObject
||
3338 kind
== CrossCompartmentKey::DebuggerEnvironment
);
3340 * Add edge for debugger object wrappers, to ensure (in conjuction
3341 * with call to Debugger::findCompartmentEdges below) that debugger
3342 * and debuggee objects are always swept in the same group.
3344 JS::Zone
*w
= other
->tenuredZone();
3345 if (w
->isGCMarking())
3346 finder
.addEdgeTo(w
);
3350 Debugger::findCompartmentEdges(zone(), finder
);
3354 Zone::findOutgoingEdges(ComponentFinder
<JS::Zone
> &finder
)
3357 * Any compartment may have a pointer to an atom in the atoms
3358 * compartment, and these aren't in the cross compartment map.
3360 JSRuntime
*rt
= runtimeFromMainThread();
3361 if (rt
->atomsCompartment()->zone()->isGCMarking())
3362 finder
.addEdgeTo(rt
->atomsCompartment()->zone());
3364 for (CompartmentsInZoneIter
comp(this); !comp
.done(); comp
.next())
3365 comp
->findOutgoingEdges(finder
);
3369 FindZoneGroups(JSRuntime
*rt
)
3371 ComponentFinder
<Zone
> finder(rt
->mainThread
.nativeStackLimit
[StackForSystemCode
]);
3372 if (!rt
->gcIsIncremental
)
3373 finder
.useOneComponent();
3375 for (GCZonesIter
zone(rt
); !zone
.done(); zone
.next()) {
3376 JS_ASSERT(zone
->isGCMarking());
3377 finder
.addNode(zone
);
3379 rt
->gcZoneGroups
= finder
.getResultsList();
3380 rt
->gcCurrentZoneGroup
= rt
->gcZoneGroups
;
3381 rt
->gcZoneGroupIndex
= 0;
3385 ResetGrayList(JSCompartment
* comp
);
3388 GetNextZoneGroup(JSRuntime
*rt
)
3390 rt
->gcCurrentZoneGroup
= rt
->gcCurrentZoneGroup
->nextGroup();
3391 ++rt
->gcZoneGroupIndex
;
3392 if (!rt
->gcCurrentZoneGroup
) {
3393 rt
->gcAbortSweepAfterCurrentGroup
= false;
3397 if (!rt
->gcIsIncremental
)
3398 ComponentFinder
<Zone
>::mergeGroups(rt
->gcCurrentZoneGroup
);
3400 if (rt
->gcAbortSweepAfterCurrentGroup
) {
3401 JS_ASSERT(!rt
->gcIsIncremental
);
3402 for (GCZoneGroupIter
zone(rt
); !zone
.done(); zone
.next()) {
3403 JS_ASSERT(!zone
->gcNextGraphComponent
);
3404 JS_ASSERT(zone
->isGCMarking());
3405 zone
->setNeedsBarrier(false, Zone::UpdateIon
);
3406 zone
->setGCState(Zone::NoGC
);
3407 zone
->gcGrayRoots
.clearAndFree();
3409 rt
->setNeedsBarrier(false);
3410 AssertNeedsBarrierFlagsConsistent(rt
);
3412 for (GCCompartmentGroupIter
comp(rt
); !comp
.done(); comp
.next()) {
3413 ArrayBufferObject::resetArrayBufferList(comp
);
3414 ResetGrayList(comp
);
3417 rt
->gcAbortSweepAfterCurrentGroup
= false;
3418 rt
->gcCurrentZoneGroup
= nullptr;
3425 * At the end of collection, anything reachable from a gray root that has not
3426 * otherwise been marked black must be marked gray.
3428 * This means that when marking things gray we must not allow marking to leave
3429 * the current compartment group, as that could result in things being marked
3430 * grey when they might subsequently be marked black. To achieve this, when we
3431 * find a cross compartment pointer we don't mark the referent but add it to a
3432 * singly-linked list of incoming gray pointers that is stored with each
3435 * The list head is stored in JSCompartment::gcIncomingGrayPointers and contains
3436 * cross compartment wrapper objects. The next pointer is stored in the second
3437 * extra slot of the cross compartment wrapper.
3439 * The list is created during gray marking when one of the
3440 * MarkCrossCompartmentXXX functions is called for a pointer that leaves the
3441 * current compartent group. This calls DelayCrossCompartmentGrayMarking to
3442 * push the referring object onto the list.
3444 * The list is traversed and then unlinked in
3445 * MarkIncomingCrossCompartmentPointers.
3449 IsGrayListObject(JSObject
*obj
)
3452 return obj
->is
<CrossCompartmentWrapperObject
>() && !IsDeadProxyObject(obj
);
3455 /* static */ unsigned
3456 ProxyObject::grayLinkSlot(JSObject
*obj
)
3458 JS_ASSERT(IsGrayListObject(obj
));
3459 return ProxyObject::EXTRA_SLOT
+ 1;
3464 AssertNotOnGrayList(JSObject
*obj
)
3466 JS_ASSERT_IF(IsGrayListObject(obj
),
3467 obj
->getReservedSlot(ProxyObject::grayLinkSlot(obj
)).isUndefined());
3472 CrossCompartmentPointerReferent(JSObject
*obj
)
3474 JS_ASSERT(IsGrayListObject(obj
));
3475 return &obj
->as
<ProxyObject
>().private_().toObject();
3479 NextIncomingCrossCompartmentPointer(JSObject
*prev
, bool unlink
)
3481 unsigned slot
= ProxyObject::grayLinkSlot(prev
);
3482 JSObject
*next
= prev
->getReservedSlot(slot
).toObjectOrNull();
3483 JS_ASSERT_IF(next
, IsGrayListObject(next
));
3486 prev
->setSlot(slot
, UndefinedValue());
3492 js::DelayCrossCompartmentGrayMarking(JSObject
*src
)
3494 JS_ASSERT(IsGrayListObject(src
));
3496 /* Called from MarkCrossCompartmentXXX functions. */
3497 unsigned slot
= ProxyObject::grayLinkSlot(src
);
3498 JSObject
*dest
= CrossCompartmentPointerReferent(src
);
3499 JSCompartment
*comp
= dest
->compartment();
3501 if (src
->getReservedSlot(slot
).isUndefined()) {
3502 src
->setCrossCompartmentSlot(slot
, ObjectOrNullValue(comp
->gcIncomingGrayPointers
));
3503 comp
->gcIncomingGrayPointers
= src
;
3505 JS_ASSERT(src
->getReservedSlot(slot
).isObjectOrNull());
3510 * Assert that the object is in our list, also walking the list to check its
3513 JSObject
*obj
= comp
->gcIncomingGrayPointers
;
3518 obj
= NextIncomingCrossCompartmentPointer(obj
, false);
3525 MarkIncomingCrossCompartmentPointers(JSRuntime
*rt
, const uint32_t color
)
3527 JS_ASSERT(color
== BLACK
|| color
== GRAY
);
3529 gcstats::AutoPhase
ap(rt
->gcStats
, gcstats::PHASE_SWEEP_MARK
);
3530 static const gcstats::Phase statsPhases
[] = {
3531 gcstats::PHASE_SWEEP_MARK_INCOMING_BLACK
,
3532 gcstats::PHASE_SWEEP_MARK_INCOMING_GRAY
3534 gcstats::AutoPhase
ap1(rt
->gcStats
, statsPhases
[color
]);
3536 bool unlinkList
= color
== GRAY
;
3538 for (GCCompartmentGroupIter
c(rt
); !c
.done(); c
.next()) {
3539 JS_ASSERT_IF(color
== GRAY
, c
->zone()->isGCMarkingGray());
3540 JS_ASSERT_IF(color
== BLACK
, c
->zone()->isGCMarkingBlack());
3541 JS_ASSERT_IF(c
->gcIncomingGrayPointers
, IsGrayListObject(c
->gcIncomingGrayPointers
));
3543 for (JSObject
*src
= c
->gcIncomingGrayPointers
;
3545 src
= NextIncomingCrossCompartmentPointer(src
, unlinkList
))
3547 JSObject
*dst
= CrossCompartmentPointerReferent(src
);
3548 JS_ASSERT(dst
->compartment() == c
);
3550 if (color
== GRAY
) {
3551 if (IsObjectMarked(&src
) && src
->isMarked(GRAY
))
3552 MarkGCThingUnbarriered(&rt
->gcMarker
, (void**)&dst
,
3553 "cross-compartment gray pointer");
3555 if (IsObjectMarked(&src
) && !src
->isMarked(GRAY
))
3556 MarkGCThingUnbarriered(&rt
->gcMarker
, (void**)&dst
,
3557 "cross-compartment black pointer");
3562 c
->gcIncomingGrayPointers
= nullptr;
3566 rt
->gcMarker
.drainMarkStack(budget
);
3570 RemoveFromGrayList(JSObject
*wrapper
)
3572 if (!IsGrayListObject(wrapper
))
3575 unsigned slot
= ProxyObject::grayLinkSlot(wrapper
);
3576 if (wrapper
->getReservedSlot(slot
).isUndefined())
3577 return false; /* Not on our list. */
3579 JSObject
*tail
= wrapper
->getReservedSlot(slot
).toObjectOrNull();
3580 wrapper
->setReservedSlot(slot
, UndefinedValue());
3582 JSCompartment
*comp
= CrossCompartmentPointerReferent(wrapper
)->compartment();
3583 JSObject
*obj
= comp
->gcIncomingGrayPointers
;
3584 if (obj
== wrapper
) {
3585 comp
->gcIncomingGrayPointers
= tail
;
3590 unsigned slot
= ProxyObject::grayLinkSlot(obj
);
3591 JSObject
*next
= obj
->getReservedSlot(slot
).toObjectOrNull();
3592 if (next
== wrapper
) {
3593 obj
->setCrossCompartmentSlot(slot
, ObjectOrNullValue(tail
));
3599 MOZ_ASSUME_UNREACHABLE("object not found in gray link list");
3603 ResetGrayList(JSCompartment
*comp
)
3605 JSObject
*src
= comp
->gcIncomingGrayPointers
;
3607 src
= NextIncomingCrossCompartmentPointer(src
, true);
3608 comp
->gcIncomingGrayPointers
= nullptr;
3612 js::NotifyGCNukeWrapper(JSObject
*obj
)
3615 * References to target of wrapper are being removed, we no longer have to
3616 * remember to mark it.
3618 RemoveFromGrayList(obj
);
3622 JS_GC_SWAP_OBJECT_A_REMOVED
= 1 << 0,
3623 JS_GC_SWAP_OBJECT_B_REMOVED
= 1 << 1
3627 js::NotifyGCPreSwap(JSObject
*a
, JSObject
*b
)
3630 * Two objects in the same compartment are about to have had their contents
3631 * swapped. If either of them are in our gray pointer list, then we remove
3632 * them from the lists, returning a bitset indicating what happened.
3634 return (RemoveFromGrayList(a
) ? JS_GC_SWAP_OBJECT_A_REMOVED
: 0) |
3635 (RemoveFromGrayList(b
) ? JS_GC_SWAP_OBJECT_B_REMOVED
: 0);
3639 js::NotifyGCPostSwap(JSObject
*a
, JSObject
*b
, unsigned removedFlags
)
3642 * Two objects in the same compartment have had their contents swapped. If
3643 * either of them were in our gray pointer list, we re-add them again.
3645 if (removedFlags
& JS_GC_SWAP_OBJECT_A_REMOVED
)
3646 DelayCrossCompartmentGrayMarking(b
);
3647 if (removedFlags
& JS_GC_SWAP_OBJECT_B_REMOVED
)
3648 DelayCrossCompartmentGrayMarking(a
);
3652 EndMarkingZoneGroup(JSRuntime
*rt
)
3655 * Mark any incoming black pointers from previously swept compartments
3656 * whose referents are not marked. This can occur when gray cells become
3657 * black by the action of UnmarkGray.
3659 MarkIncomingCrossCompartmentPointers(rt
, BLACK
);
3661 MarkWeakReferencesInCurrentGroup(rt
, gcstats::PHASE_SWEEP_MARK_WEAK
);
3664 * Change state of current group to MarkGray to restrict marking to this
3665 * group. Note that there may be pointers to the atoms compartment, and
3666 * these will be marked through, as they are not marked with
3667 * MarkCrossCompartmentXXX.
3669 for (GCZoneGroupIter
zone(rt
); !zone
.done(); zone
.next()) {
3670 JS_ASSERT(zone
->isGCMarkingBlack());
3671 zone
->setGCState(Zone::MarkGray
);
3674 /* Mark incoming gray pointers from previously swept compartments. */
3675 rt
->gcMarker
.setMarkColorGray();
3676 MarkIncomingCrossCompartmentPointers(rt
, GRAY
);
3677 rt
->gcMarker
.setMarkColorBlack();
3679 /* Mark gray roots and mark transitively inside the current compartment group. */
3680 MarkGrayReferencesInCurrentGroup(rt
);
3682 /* Restore marking state. */
3683 for (GCZoneGroupIter
zone(rt
); !zone
.done(); zone
.next()) {
3684 JS_ASSERT(zone
->isGCMarkingGray());
3685 zone
->setGCState(Zone::Mark
);
3688 JS_ASSERT(rt
->gcMarker
.isDrained());
3692 BeginSweepingZoneGroup(JSRuntime
*rt
)
3695 * Begin sweeping the group of zones in gcCurrentZoneGroup,
3696 * performing actions that must be done before yielding to caller.
3699 bool sweepingAtoms
= false;
3700 for (GCZoneGroupIter
zone(rt
); !zone
.done(); zone
.next()) {
3701 /* Set the GC state to sweeping. */
3702 JS_ASSERT(zone
->isGCMarking());
3703 zone
->setGCState(Zone::Sweep
);
3705 /* Purge the ArenaLists before sweeping. */
3706 zone
->allocator
.arenas
.purge();
3708 if (rt
->isAtomsZone(zone
))
3709 sweepingAtoms
= true;
3712 ValidateIncrementalMarking(rt
);
3714 FreeOp
fop(rt
, rt
->gcSweepOnBackgroundThread
);
3717 gcstats::AutoPhase
ap(rt
->gcStats
, gcstats::PHASE_FINALIZE_START
);
3718 if (rt
->gcFinalizeCallback
)
3719 rt
->gcFinalizeCallback(&fop
, JSFINALIZE_GROUP_START
, !rt
->gcIsFull
/* unused */);
3722 if (sweepingAtoms
) {
3723 gcstats::AutoPhase
ap(rt
->gcStats
, gcstats::PHASE_SWEEP_ATOMS
);
3727 /* Prune out dead views from ArrayBuffer's view lists. */
3728 for (GCCompartmentGroupIter
c(rt
); !c
.done(); c
.next())
3729 ArrayBufferObject::sweep(c
);
3731 /* Collect watch points associated with unreachable objects. */
3732 WatchpointMap::sweepAll(rt
);
3734 /* Detach unreachable debuggers and global objects from each other. */
3735 Debugger::sweepAll(&fop
);
3738 gcstats::AutoPhase
ap(rt
->gcStats
, gcstats::PHASE_SWEEP_COMPARTMENTS
);
3740 for (GCZoneGroupIter
zone(rt
); !zone
.done(); zone
.next()) {
3741 gcstats::AutoPhase
ap(rt
->gcStats
, gcstats::PHASE_SWEEP_DISCARD_CODE
);
3742 zone
->discardJitCode(&fop
);
3745 bool releaseTypes
= ReleaseObservedTypes(rt
);
3746 for (GCCompartmentGroupIter
c(rt
); !c
.done(); c
.next()) {
3747 gcstats::AutoSCC
scc(rt
->gcStats
, rt
->gcZoneGroupIndex
);
3748 c
->sweep(&fop
, releaseTypes
);
3751 for (GCZoneGroupIter
zone(rt
); !zone
.done(); zone
.next()) {
3752 gcstats::AutoSCC
scc(rt
->gcStats
, rt
->gcZoneGroupIndex
);
3753 zone
->sweep(&fop
, releaseTypes
);
3758 * Queue all GC things in all zones for sweeping, either in the
3759 * foreground or on the background thread.
3761 * Note that order is important here for the background case.
3763 * Objects are finalized immediately but this may change in the future.
3765 for (GCZoneGroupIter
zone(rt
); !zone
.done(); zone
.next()) {
3766 gcstats::AutoSCC
scc(rt
->gcStats
, rt
->gcZoneGroupIndex
);
3767 zone
->allocator
.arenas
.queueObjectsForSweep(&fop
);
3769 for (GCZoneGroupIter
zone(rt
); !zone
.done(); zone
.next()) {
3770 gcstats::AutoSCC
scc(rt
->gcStats
, rt
->gcZoneGroupIndex
);
3771 zone
->allocator
.arenas
.queueStringsForSweep(&fop
);
3773 for (GCZoneGroupIter
zone(rt
); !zone
.done(); zone
.next()) {
3774 gcstats::AutoSCC
scc(rt
->gcStats
, rt
->gcZoneGroupIndex
);
3775 zone
->allocator
.arenas
.queueScriptsForSweep(&fop
);
3778 for (GCZoneGroupIter
zone(rt
); !zone
.done(); zone
.next()) {
3779 gcstats::AutoSCC
scc(rt
->gcStats
, rt
->gcZoneGroupIndex
);
3780 zone
->allocator
.arenas
.queueIonCodeForSweep(&fop
);
3783 for (GCZoneGroupIter
zone(rt
); !zone
.done(); zone
.next()) {
3784 gcstats::AutoSCC
scc(rt
->gcStats
, rt
->gcZoneGroupIndex
);
3785 zone
->allocator
.arenas
.queueShapesForSweep(&fop
);
3786 zone
->allocator
.arenas
.gcShapeArenasToSweep
=
3787 zone
->allocator
.arenas
.arenaListsToSweep
[FINALIZE_SHAPE
];
3790 rt
->gcSweepPhase
= 0;
3791 rt
->gcSweepZone
= rt
->gcCurrentZoneGroup
;
3792 rt
->gcSweepKindIndex
= 0;
3795 gcstats::AutoPhase
ap(rt
->gcStats
, gcstats::PHASE_FINALIZE_END
);
3796 if (rt
->gcFinalizeCallback
)
3797 rt
->gcFinalizeCallback(&fop
, JSFINALIZE_GROUP_END
, !rt
->gcIsFull
/* unused */);
3802 EndSweepingZoneGroup(JSRuntime
*rt
)
3804 /* Update the GC state for zones we have swept and unlink the list. */
3805 for (GCZoneGroupIter
zone(rt
); !zone
.done(); zone
.next()) {
3806 JS_ASSERT(zone
->isGCSweeping());
3807 zone
->setGCState(Zone::Finished
);
3810 /* Reset the list of arenas marked as being allocated during sweep phase. */
3811 while (ArenaHeader
*arena
= rt
->gcArenasAllocatedDuringSweep
) {
3812 rt
->gcArenasAllocatedDuringSweep
= arena
->getNextAllocDuringSweep();
3813 arena
->unsetAllocDuringSweep();
3818 BeginSweepPhase(JSRuntime
*rt
, bool lastGC
)
3823 * Finalize as we sweep, outside of rt->gcLock but with rt->isHeapBusy()
3824 * true so that any attempt to allocate a GC-thing from a finalizer will
3825 * fail, rather than nest badly and leave the unmarked newborn to be swept.
3828 JS_ASSERT(!rt
->gcAbortSweepAfterCurrentGroup
);
3830 ComputeNonIncrementalMarkingForValidation(rt
);
3832 gcstats::AutoPhase
ap(rt
->gcStats
, gcstats::PHASE_SWEEP
);
3834 #ifdef JS_THREADSAFE
3835 rt
->gcSweepOnBackgroundThread
= !lastGC
&& rt
->useHelperThreads();
3839 for (CompartmentsIter
c(rt
); !c
.done(); c
.next()) {
3840 JS_ASSERT(!c
->gcIncomingGrayPointers
);
3841 for (JSCompartment::WrapperEnum
e(c
); !e
.empty(); e
.popFront()) {
3842 if (e
.front().key
.kind
!= CrossCompartmentKey::StringWrapper
)
3843 AssertNotOnGrayList(&e
.front().value
.get().toObject());
3848 DropStringWrappers(rt
);
3850 EndMarkingZoneGroup(rt
);
3851 BeginSweepingZoneGroup(rt
);
3855 ArenaLists::foregroundFinalize(FreeOp
*fop
, AllocKind thingKind
, SliceBudget
&sliceBudget
)
3857 if (!arenaListsToSweep
[thingKind
])
3860 ArenaList
&dest
= arenaLists
[thingKind
];
3861 return FinalizeArenas(fop
, &arenaListsToSweep
[thingKind
], dest
, thingKind
, sliceBudget
);
3865 DrainMarkStack(JSRuntime
*rt
, SliceBudget
&sliceBudget
, gcstats::Phase phase
)
3867 /* Run a marking slice and return whether the stack is now empty. */
3868 gcstats::AutoPhase
ap(rt
->gcStats
, phase
);
3869 return rt
->gcMarker
.drainMarkStack(sliceBudget
);
3873 SweepPhase(JSRuntime
*rt
, SliceBudget
&sliceBudget
)
3875 gcstats::AutoPhase
ap(rt
->gcStats
, gcstats::PHASE_SWEEP
);
3876 FreeOp
fop(rt
, rt
->gcSweepOnBackgroundThread
);
3878 bool finished
= DrainMarkStack(rt
, sliceBudget
, gcstats::PHASE_SWEEP_MARK
);
3883 /* Finalize foreground finalized things. */
3884 for (; rt
->gcSweepPhase
< FinalizePhaseCount
; ++rt
->gcSweepPhase
) {
3885 gcstats::AutoPhase
ap(rt
->gcStats
, FinalizePhaseStatsPhase
[rt
->gcSweepPhase
]);
3887 for (; rt
->gcSweepZone
; rt
->gcSweepZone
= rt
->gcSweepZone
->nextNodeInGroup()) {
3888 Zone
*zone
= rt
->gcSweepZone
;
3890 while (rt
->gcSweepKindIndex
< FinalizePhaseLength
[rt
->gcSweepPhase
]) {
3891 AllocKind kind
= FinalizePhases
[rt
->gcSweepPhase
][rt
->gcSweepKindIndex
];
3893 if (!zone
->allocator
.arenas
.foregroundFinalize(&fop
, kind
, sliceBudget
))
3894 return false; /* Yield to the mutator. */
3896 ++rt
->gcSweepKindIndex
;
3898 rt
->gcSweepKindIndex
= 0;
3900 rt
->gcSweepZone
= rt
->gcCurrentZoneGroup
;
3903 /* Remove dead shapes from the shape tree, but don't finalize them yet. */
3905 gcstats::AutoPhase
ap(rt
->gcStats
, gcstats::PHASE_SWEEP_SHAPE
);
3907 for (; rt
->gcSweepZone
; rt
->gcSweepZone
= rt
->gcSweepZone
->nextNodeInGroup()) {
3908 Zone
*zone
= rt
->gcSweepZone
;
3909 while (ArenaHeader
*arena
= zone
->allocator
.arenas
.gcShapeArenasToSweep
) {
3910 for (CellIterUnderGC
i(arena
); !i
.done(); i
.next()) {
3911 Shape
*shape
= i
.get
<Shape
>();
3912 if (!shape
->isMarked())
3916 zone
->allocator
.arenas
.gcShapeArenasToSweep
= arena
->next
;
3917 sliceBudget
.step(Arena::thingsPerArena(Arena::thingSize(FINALIZE_SHAPE
)));
3918 if (sliceBudget
.isOverBudget())
3919 return false; /* Yield to the mutator. */
3924 EndSweepingZoneGroup(rt
);
3925 GetNextZoneGroup(rt
);
3926 if (!rt
->gcCurrentZoneGroup
)
3927 return true; /* We're finished. */
3928 EndMarkingZoneGroup(rt
);
3929 BeginSweepingZoneGroup(rt
);
3934 EndSweepPhase(JSRuntime
*rt
, JSGCInvocationKind gckind
, bool lastGC
)
3936 gcstats::AutoPhase
ap(rt
->gcStats
, gcstats::PHASE_SWEEP
);
3937 FreeOp
fop(rt
, rt
->gcSweepOnBackgroundThread
);
3939 JS_ASSERT_IF(lastGC
, !rt
->gcSweepOnBackgroundThread
);
3941 JS_ASSERT(rt
->gcMarker
.isDrained());
3942 rt
->gcMarker
.stop();
3945 * Recalculate whether GC was full or not as this may have changed due to
3946 * newly created zones. Can only change from full to not full.
3949 for (ZonesIter
zone(rt
); !zone
.done(); zone
.next()) {
3950 if (!zone
->isCollecting()) {
3951 rt
->gcIsFull
= false;
3958 * If we found any black->gray edges during marking, we completely clear the
3959 * mark bits of all uncollected zones, or if a reset has occured, zones that
3960 * will no longer be collected. This is safe, although it may
3961 * prevent the cycle collector from collecting some dead objects.
3963 if (rt
->gcFoundBlackGrayEdges
) {
3964 for (ZonesIter
zone(rt
); !zone
.done(); zone
.next()) {
3965 if (!zone
->isCollecting())
3966 zone
->allocator
.arenas
.unmarkAll();
3971 PropertyTree::dumpShapes(rt
);
3975 gcstats::AutoPhase
ap(rt
->gcStats
, gcstats::PHASE_DESTROY
);
3978 * Sweep script filenames after sweeping functions in the generic loop
3979 * above. In this way when a scripted function's finalizer destroys the
3980 * script and calls rt->destroyScriptHook, the hook can still access the
3981 * script's filename. See bug 323267.
3984 SweepScriptData(rt
);
3986 /* Clear out any small pools that we're hanging on to. */
3987 if (JSC::ExecutableAllocator
*execAlloc
= rt
->maybeExecAlloc())
3991 * This removes compartments from rt->compartment, so we do it last to make
3992 * sure we don't miss sweeping any compartments.
3995 SweepZones(&fop
, lastGC
);
3997 if (!rt
->gcSweepOnBackgroundThread
) {
3999 * Destroy arenas after we finished the sweeping so finalizers can
4000 * safely use IsAboutToBeFinalized(). This is done on the
4001 * GCHelperThread if possible. We acquire the lock only because
4002 * Expire needs to unlock it for other callers.
4004 AutoLockGC
lock(rt
);
4005 ExpireChunksAndArenas(rt
, gckind
== GC_SHRINK
);
4010 gcstats::AutoPhase
ap(rt
->gcStats
, gcstats::PHASE_FINALIZE_END
);
4012 if (rt
->gcFinalizeCallback
)
4013 rt
->gcFinalizeCallback(&fop
, JSFINALIZE_COLLECTION_END
, !rt
->gcIsFull
);
4015 /* If we finished a full GC, then the gray bits are correct. */
4017 rt
->gcGrayBitsValid
= true;
4020 /* Set up list of zones for sweeping of background things. */
4021 JS_ASSERT(!rt
->gcSweepingZones
);
4022 for (GCZonesIter
zone(rt
); !zone
.done(); zone
.next()) {
4023 zone
->gcNextGraphNode
= rt
->gcSweepingZones
;
4024 rt
->gcSweepingZones
= zone
;
4027 /* If not sweeping on background thread then we must do it here. */
4028 if (!rt
->gcSweepOnBackgroundThread
) {
4029 gcstats::AutoPhase
ap(rt
->gcStats
, gcstats::PHASE_DESTROY
);
4031 SweepBackgroundThings(rt
, false);
4033 rt
->freeLifoAlloc
.freeAll();
4035 /* Ensure the compartments get swept if it's the last GC. */
4037 SweepZones(&fop
, lastGC
);
4040 for (ZonesIter
zone(rt
); !zone
.done(); zone
.next()) {
4041 zone
->setGCLastBytes(zone
->gcBytes
, gckind
);
4042 if (zone
->isCollecting()) {
4043 JS_ASSERT(zone
->isGCFinished());
4044 zone
->setGCState(Zone::NoGC
);
4048 JS_ASSERT(!zone
->isCollecting());
4049 JS_ASSERT(!zone
->wasGCStarted());
4051 for (unsigned i
= 0 ; i
< FINALIZE_LIMIT
; ++i
) {
4052 JS_ASSERT_IF(!IsBackgroundFinalized(AllocKind(i
)) ||
4053 !rt
->gcSweepOnBackgroundThread
,
4054 !zone
->allocator
.arenas
.arenaListsToSweep
[i
]);
4060 for (CompartmentsIter
c(rt
); !c
.done(); c
.next()) {
4061 JS_ASSERT(!c
->gcIncomingGrayPointers
);
4062 JS_ASSERT(!c
->gcLiveArrayBuffers
);
4064 for (JSCompartment::WrapperEnum
e(c
); !e
.empty(); e
.popFront()) {
4065 if (e
.front().key
.kind
!= CrossCompartmentKey::StringWrapper
)
4066 AssertNotOnGrayList(&e
.front().value
.get().toObject());
4071 FinishMarkingValidation(rt
);
4073 rt
->gcLastGCTime
= PRMJ_Now();
4078 /* ...while this class is to be used only for garbage collection. */
4079 class AutoGCSession
: AutoTraceSession
{
4081 explicit AutoGCSession(JSRuntime
*rt
);
4085 } /* anonymous namespace */
4087 /* Start a new heap session. */
4088 AutoTraceSession::AutoTraceSession(JSRuntime
*rt
, js::HeapState heapState
)
4090 prevState(rt
->heapState
),
4093 JS_ASSERT(!rt
->noGCOrAllocationCheck
);
4094 JS_ASSERT(!rt
->isHeapBusy());
4095 JS_ASSERT(heapState
!= Idle
);
4096 rt
->heapState
= heapState
;
4099 AutoTraceSession::~AutoTraceSession()
4101 JS_ASSERT(runtime
->isHeapBusy());
4102 runtime
->heapState
= prevState
;
4105 AutoGCSession::AutoGCSession(JSRuntime
*rt
)
4106 : AutoTraceSession(rt
, MajorCollecting
)
4108 runtime
->gcIsNeeded
= false;
4109 runtime
->gcInterFrameGC
= true;
4111 runtime
->gcNumber
++;
4114 // Threads with an exclusive context should never pause while they are in
4115 // the middle of a suppressGC.
4116 for (ThreadDataIter
iter(rt
); !iter
.done(); iter
.next())
4117 JS_ASSERT(!iter
->suppressGC
);
4121 AutoGCSession::~AutoGCSession()
4123 #ifndef JS_MORE_DETERMINISTIC
4124 runtime
->gcNextFullGCTime
= PRMJ_Now() + GC_IDLE_FULL_SPAN
;
4127 runtime
->gcChunkAllocationSinceLastGC
= false;
4130 /* Keeping these around after a GC is dangerous. */
4131 runtime
->gcSelectedForMarking
.clearAndFree();
4134 /* Clear gcMallocBytes for all compartments */
4135 for (ZonesIter
zone(runtime
); !zone
.done(); zone
.next()) {
4136 zone
->resetGCMallocBytes();
4137 zone
->unscheduleGC();
4140 runtime
->resetGCMallocBytes();
4143 AutoCopyFreeListToArenas::AutoCopyFreeListToArenas(JSRuntime
*rt
)
4146 for (ZonesIter
zone(rt
); !zone
.done(); zone
.next())
4147 zone
->allocator
.arenas
.copyFreeListsToArenas();
4150 AutoCopyFreeListToArenas::~AutoCopyFreeListToArenas()
4152 for (ZonesIter
zone(runtime
); !zone
.done(); zone
.next())
4153 zone
->allocator
.arenas
.clearFreeListsInArenas();
4157 IncrementalCollectSlice(JSRuntime
*rt
,
4159 JS::gcreason::Reason gcReason
,
4160 JSGCInvocationKind gcKind
);
4163 ResetIncrementalGC(JSRuntime
*rt
, const char *reason
)
4165 switch (rt
->gcIncrementalState
) {
4166 case NO_INCREMENTAL
:
4170 /* Cancel any ongoing marking. */
4171 AutoCopyFreeListToArenas
copy(rt
);
4173 rt
->gcMarker
.reset();
4174 rt
->gcMarker
.stop();
4176 for (GCCompartmentsIter
c(rt
); !c
.done(); c
.next()) {
4177 ArrayBufferObject::resetArrayBufferList(c
);
4181 for (GCZonesIter
zone(rt
); !zone
.done(); zone
.next()) {
4182 JS_ASSERT(zone
->isGCMarking());
4183 zone
->setNeedsBarrier(false, Zone::UpdateIon
);
4184 zone
->setGCState(Zone::NoGC
);
4186 rt
->setNeedsBarrier(false);
4187 AssertNeedsBarrierFlagsConsistent(rt
);
4189 rt
->gcIncrementalState
= NO_INCREMENTAL
;
4191 JS_ASSERT(!rt
->gcStrictCompartmentChecking
);
4197 rt
->gcMarker
.reset();
4199 for (ZonesIter
zone(rt
); !zone
.done(); zone
.next())
4200 zone
->scheduledForDestruction
= false;
4202 /* Finish sweeping the current zone group, then abort. */
4203 rt
->gcAbortSweepAfterCurrentGroup
= true;
4204 IncrementalCollectSlice(rt
, SliceBudget::Unlimited
, JS::gcreason::RESET
, GC_NORMAL
);
4207 gcstats::AutoPhase
ap(rt
->gcStats
, gcstats::PHASE_WAIT_BACKGROUND_THREAD
);
4208 rt
->gcHelperThread
.waitBackgroundSweepOrAllocEnd();
4213 MOZ_ASSUME_UNREACHABLE("Invalid incremental GC state");
4216 rt
->gcStats
.reset(reason
);
4219 for (CompartmentsIter
c(rt
); !c
.done(); c
.next())
4220 JS_ASSERT(!c
->gcLiveArrayBuffers
);
4222 for (ZonesIter
zone(rt
); !zone
.done(); zone
.next()) {
4223 JS_ASSERT(!zone
->needsBarrier());
4224 for (unsigned i
= 0; i
< FINALIZE_LIMIT
; ++i
)
4225 JS_ASSERT(!zone
->allocator
.arenas
.arenaListsToSweep
[i
]);
4234 AutoGCSlice(JSRuntime
*rt
);
4241 } /* anonymous namespace */
4243 AutoGCSlice::AutoGCSlice(JSRuntime
*rt
)
4247 * During incremental GC, the compartment's active flag determines whether
4248 * there are stack frames active for any of its scripts. Normally this flag
4249 * is set at the beginning of the mark phase. During incremental GC, we also
4250 * set it at the start of every phase.
4252 for (ActivationIterator
iter(rt
); !iter
.done(); ++iter
)
4253 iter
.activation()->compartment()->zone()->active
= true;
4255 for (GCZonesIter
zone(rt
); !zone
.done(); zone
.next()) {
4257 * Clear needsBarrier early so we don't do any write barriers during
4258 * GC. We don't need to update the Ion barriers (which is expensive)
4259 * because Ion code doesn't run during GC. If need be, we'll update the
4260 * Ion barriers in ~AutoGCSlice.
4262 if (zone
->isGCMarking()) {
4263 JS_ASSERT(zone
->needsBarrier());
4264 zone
->setNeedsBarrier(false, Zone::DontUpdateIon
);
4266 JS_ASSERT(!zone
->needsBarrier());
4269 rt
->setNeedsBarrier(false);
4270 AssertNeedsBarrierFlagsConsistent(rt
);
4273 AutoGCSlice::~AutoGCSlice()
4275 /* We can't use GCZonesIter if this is the end of the last slice. */
4276 bool haveBarriers
= false;
4277 for (ZonesIter
zone(runtime
); !zone
.done(); zone
.next()) {
4278 if (zone
->isGCMarking()) {
4279 zone
->setNeedsBarrier(true, Zone::UpdateIon
);
4280 zone
->allocator
.arenas
.prepareForIncrementalGC(runtime
);
4281 haveBarriers
= true;
4283 zone
->setNeedsBarrier(false, Zone::UpdateIon
);
4286 runtime
->setNeedsBarrier(haveBarriers
);
4287 AssertNeedsBarrierFlagsConsistent(runtime
);
4291 PushZealSelectedObjects(JSRuntime
*rt
)
4294 /* Push selected objects onto the mark stack and clear the list. */
4295 for (JSObject
**obj
= rt
->gcSelectedForMarking
.begin();
4296 obj
!= rt
->gcSelectedForMarking
.end(); obj
++)
4298 MarkObjectUnbarriered(&rt
->gcMarker
, obj
, "selected obj");
4304 IncrementalCollectSlice(JSRuntime
*rt
,
4306 JS::gcreason::Reason reason
,
4307 JSGCInvocationKind gckind
)
4309 AutoCopyFreeListToArenas
copy(rt
);
4310 AutoGCSlice
slice(rt
);
4312 bool lastGC
= (reason
== JS::gcreason::DESTROY_RUNTIME
);
4314 gc::State initialState
= rt
->gcIncrementalState
;
4318 if (reason
== JS::gcreason::DEBUG_GC
&& budget
!= SliceBudget::Unlimited
) {
4320 * Do the incremental collection type specified by zeal mode if the
4321 * collection was triggered by RunDebugGC() and incremental GC has not
4322 * been cancelled by ResetIncrementalGC.
4324 zeal
= rt
->gcZeal();
4328 JS_ASSERT_IF(rt
->gcIncrementalState
!= NO_INCREMENTAL
, rt
->gcIsIncremental
);
4329 rt
->gcIsIncremental
= budget
!= SliceBudget::Unlimited
;
4331 if (zeal
== ZealIncrementalRootsThenFinish
|| zeal
== ZealIncrementalMarkAllThenFinish
) {
4333 * Yields between slices occurs at predetermined points in these modes;
4334 * the budget is not used.
4336 budget
= SliceBudget::Unlimited
;
4339 SliceBudget
sliceBudget(budget
);
4341 if (rt
->gcIncrementalState
== NO_INCREMENTAL
) {
4342 rt
->gcIncrementalState
= MARK_ROOTS
;
4343 rt
->gcLastMarkSlice
= false;
4346 if (rt
->gcIncrementalState
== MARK
)
4347 AutoGCRooter::traceAllWrappers(&rt
->gcMarker
);
4349 switch (rt
->gcIncrementalState
) {
4352 if (!BeginMarkPhase(rt
)) {
4353 rt
->gcIncrementalState
= NO_INCREMENTAL
;
4358 PushZealSelectedObjects(rt
);
4360 rt
->gcIncrementalState
= MARK
;
4362 if (zeal
== ZealIncrementalRootsThenFinish
)
4368 /* If we needed delayed marking for gray roots, then collect until done. */
4369 if (!rt
->gcMarker
.hasBufferedGrayRoots())
4370 sliceBudget
.reset();
4372 bool finished
= DrainMarkStack(rt
, sliceBudget
, gcstats::PHASE_MARK
);
4376 JS_ASSERT(rt
->gcMarker
.isDrained());
4378 if (!rt
->gcLastMarkSlice
&&
4379 ((initialState
== MARK
&& budget
!= SliceBudget::Unlimited
) ||
4380 zeal
== ZealIncrementalMarkAllThenFinish
))
4383 * Yield with the aim of starting the sweep in the next
4384 * slice. We will need to mark anything new on the stack
4385 * when we resume, so we stay in MARK state.
4387 rt
->gcLastMarkSlice
= true;
4391 rt
->gcIncrementalState
= SWEEP
;
4394 * This runs to completion, but we don't continue if the budget is
4397 BeginSweepPhase(rt
, lastGC
);
4398 if (sliceBudget
.isOverBudget())
4402 * Always yield here when running in incremental multi-slice zeal
4403 * mode, so RunDebugGC can reset the slice buget.
4405 if (zeal
== ZealIncrementalMultipleSlices
)
4412 bool finished
= SweepPhase(rt
, sliceBudget
);
4416 EndSweepPhase(rt
, gckind
, lastGC
);
4418 if (rt
->gcSweepOnBackgroundThread
)
4419 rt
->gcHelperThread
.startBackgroundSweep(gckind
== GC_SHRINK
);
4421 rt
->gcIncrementalState
= NO_INCREMENTAL
;
4431 gc::IsIncrementalGCSafe(JSRuntime
*rt
)
4433 JS_ASSERT(!rt
->mainThread
.suppressGC
);
4435 bool keepAtoms
= false;
4436 for (ThreadDataIter
iter(rt
); !iter
.done(); iter
.next())
4437 keepAtoms
|= iter
->gcKeepAtoms
;
4439 keepAtoms
|= rt
->exclusiveThreadsPresent();
4442 return IncrementalSafety::Unsafe("gcKeepAtoms set");
4444 if (!rt
->gcIncrementalEnabled
)
4445 return IncrementalSafety::Unsafe("incremental permanently disabled");
4447 return IncrementalSafety::Safe();
4451 BudgetIncrementalGC(JSRuntime
*rt
, int64_t *budget
)
4453 IncrementalSafety safe
= IsIncrementalGCSafe(rt
);
4455 ResetIncrementalGC(rt
, safe
.reason());
4456 *budget
= SliceBudget::Unlimited
;
4457 rt
->gcStats
.nonincremental(safe
.reason());
4461 if (rt
->gcMode
!= JSGC_MODE_INCREMENTAL
) {
4462 ResetIncrementalGC(rt
, "GC mode change");
4463 *budget
= SliceBudget::Unlimited
;
4464 rt
->gcStats
.nonincremental("GC mode");
4468 if (rt
->isTooMuchMalloc()) {
4469 *budget
= SliceBudget::Unlimited
;
4470 rt
->gcStats
.nonincremental("malloc bytes trigger");
4474 for (ZonesIter
zone(rt
); !zone
.done(); zone
.next()) {
4475 if (zone
->gcBytes
>= zone
->gcTriggerBytes
) {
4476 *budget
= SliceBudget::Unlimited
;
4477 rt
->gcStats
.nonincremental("allocation trigger");
4480 if (rt
->gcIncrementalState
!= NO_INCREMENTAL
&&
4481 zone
->isGCScheduled() != zone
->wasGCStarted())
4486 if (zone
->isTooMuchMalloc()) {
4487 *budget
= SliceBudget::Unlimited
;
4488 rt
->gcStats
.nonincremental("malloc bytes trigger");
4493 ResetIncrementalGC(rt
, "zone change");
4497 * GC, repeatedly if necessary, until we think we have not created any new
4498 * garbage. We disable inlining to ensure that the bottom of the stack with
4499 * possible GC roots recorded in MarkRuntime excludes any pointers we use during
4500 * the marking implementation.
4502 static JS_NEVER_INLINE
void
4503 GCCycle(JSRuntime
*rt
, bool incremental
, int64_t budget
, JSGCInvocationKind gckind
, JS::gcreason::Reason reason
)
4505 /* If we attempt to invoke the GC while we are running in the GC, assert. */
4506 JS_ASSERT(!rt
->isHeapBusy());
4508 AutoGCSession
gcsession(rt
);
4511 * As we about to purge caches and clear the mark bits we must wait for
4512 * any background finalization to finish. We must also wait for the
4513 * background allocation to finish so we can avoid taking the GC lock
4514 * when manipulating the chunks during the GC.
4517 gcstats::AutoPhase
ap(rt
->gcStats
, gcstats::PHASE_WAIT_BACKGROUND_THREAD
);
4518 rt
->gcHelperThread
.waitBackgroundSweepOrAllocEnd();
4523 /* If non-incremental GC was requested, reset incremental GC. */
4524 ResetIncrementalGC(rt
, "requested");
4525 rt
->gcStats
.nonincremental("requested");
4526 budget
= SliceBudget::Unlimited
;
4528 BudgetIncrementalGC(rt
, &budget
);
4531 IncrementalCollectSlice(rt
, budget
, reason
, gckind
);
4537 IsDeterministicGCReason(JS::gcreason::Reason reason
)
4539 if (reason
> JS::gcreason::DEBUG_GC
&&
4540 reason
!= JS::gcreason::CC_FORCED
&& reason
!= JS::gcreason::SHUTDOWN_CC
)
4545 if (reason
== JS::gcreason::MAYBEGC
)
4553 ShouldCleanUpEverything(JSRuntime
*rt
, JS::gcreason::Reason reason
, JSGCInvocationKind gckind
)
4555 // During shutdown, we must clean everything up, for the sake of leak
4556 // detection. When a runtime has no contexts, or we're doing a GC before a
4557 // shutdown CC, those are strong indications that we're shutting down.
4559 // DEBUG_MODE_GC indicates we're discarding code because the debug mode
4560 // has changed; debug mode affects the results of bytecode analysis, so
4561 // we need to clear everything away.
4562 return reason
== JS::gcreason::DESTROY_RUNTIME
||
4563 reason
== JS::gcreason::SHUTDOWN_CC
||
4564 reason
== JS::gcreason::DEBUG_MODE_GC
||
4565 gckind
== GC_SHRINK
;
4570 #ifdef JSGC_GENERATIONAL
4571 class AutoDisableStoreBuffer
4577 AutoDisableStoreBuffer(JSRuntime
*rt
) : runtime(rt
) {
4578 prior
= rt
->gcStoreBuffer
.isEnabled();
4579 rt
->gcStoreBuffer
.disable();
4581 ~AutoDisableStoreBuffer() {
4583 runtime
->gcStoreBuffer
.enable();
4587 struct AutoDisableStoreBuffer
4589 AutoDisableStoreBuffer(JSRuntime
*) {}
4593 } /* anonymous namespace */
4596 Collect(JSRuntime
*rt
, bool incremental
, int64_t budget
,
4597 JSGCInvocationKind gckind
, JS::gcreason::Reason reason
)
4599 /* GC shouldn't be running in parallel execution mode */
4600 JS_ASSERT(!InParallelSection());
4602 JS_AbortIfWrongThread(rt
);
4604 if (rt
->mainThread
.suppressGC
)
4607 #if JS_TRACE_LOGGING
4608 AutoTraceLog
logger(TraceLogging::defaultLogger(),
4609 TraceLogging::GC_START
,
4610 TraceLogging::GC_STOP
);
4615 MaybeCheckStackRoots(cx
);
4618 if (rt
->gcDeterministicOnly
&& !IsDeterministicGCReason(reason
))
4622 JS_ASSERT_IF(!incremental
|| budget
!= SliceBudget::Unlimited
, JSGC_INCREMENTAL
);
4624 AutoStopVerifyingBarriers
av(rt
, reason
== JS::gcreason::SHUTDOWN_CC
||
4625 reason
== JS::gcreason::DESTROY_RUNTIME
);
4627 MinorGC(rt
, reason
);
4630 * Marking can trigger many incidental post barriers, some of them for
4631 * objects which are not going to be live after the GC.
4633 AutoDisableStoreBuffer
adsb(rt
);
4635 RecordNativeStackTopForGC(rt
);
4638 int compartmentCount
= 0;
4639 int collectedCount
= 0;
4640 for (ZonesIter
zone(rt
); !zone
.done(); zone
.next()) {
4641 if (rt
->gcMode
== JSGC_MODE_GLOBAL
)
4644 /* This is a heuristic to avoid resets. */
4645 if (rt
->gcIncrementalState
!= NO_INCREMENTAL
&& zone
->needsBarrier())
4649 if (zone
->isGCScheduled())
4653 for (CompartmentsIter
c(rt
); !c
.done(); c
.next())
4656 rt
->gcShouldCleanUpEverything
= ShouldCleanUpEverything(rt
, reason
, gckind
);
4658 gcstats::AutoGCSlice
agc(rt
->gcStats
, collectedCount
, zoneCount
, compartmentCount
, reason
);
4662 * Let the API user decide to defer a GC if it wants to (unless this
4663 * is the last context). Invoke the callback regardless.
4665 if (rt
->gcIncrementalState
== NO_INCREMENTAL
) {
4666 gcstats::AutoPhase
ap(rt
->gcStats
, gcstats::PHASE_GC_BEGIN
);
4667 if (JSGCCallback callback
= rt
->gcCallback
)
4668 callback(rt
, JSGC_BEGIN
, rt
->gcCallbackData
);
4672 GCCycle(rt
, incremental
, budget
, gckind
, reason
);
4674 if (rt
->gcIncrementalState
== NO_INCREMENTAL
) {
4675 gcstats::AutoPhase
ap(rt
->gcStats
, gcstats::PHASE_GC_END
);
4676 if (JSGCCallback callback
= rt
->gcCallback
)
4677 callback(rt
, JSGC_END
, rt
->gcCallbackData
);
4680 /* Need to re-schedule all zones for GC. */
4681 if (rt
->gcPoke
&& rt
->gcShouldCleanUpEverything
)
4682 JS::PrepareForFullGC(rt
);
4685 * On shutdown, iterate until finalizers or the JSGC_END callback
4686 * stop creating garbage.
4688 } while (rt
->gcPoke
&& rt
->gcShouldCleanUpEverything
);
4692 js::GC(JSRuntime
*rt
, JSGCInvocationKind gckind
, JS::gcreason::Reason reason
)
4694 Collect(rt
, false, SliceBudget::Unlimited
, gckind
, reason
);
4698 js::GCSlice(JSRuntime
*rt
, JSGCInvocationKind gckind
, JS::gcreason::Reason reason
, int64_t millis
)
4700 int64_t sliceBudget
;
4702 sliceBudget
= SliceBudget::TimeBudget(millis
);
4703 else if (rt
->gcHighFrequencyGC
&& rt
->gcDynamicMarkSlice
)
4704 sliceBudget
= rt
->gcSliceBudget
* IGC_MARK_SLICE_MULTIPLIER
;
4706 sliceBudget
= rt
->gcSliceBudget
;
4708 Collect(rt
, true, sliceBudget
, gckind
, reason
);
4712 js::GCFinalSlice(JSRuntime
*rt
, JSGCInvocationKind gckind
, JS::gcreason::Reason reason
)
4714 Collect(rt
, true, SliceBudget::Unlimited
, gckind
, reason
);
4718 ZonesSelected(JSRuntime
*rt
)
4720 for (ZonesIter
zone(rt
); !zone
.done(); zone
.next()) {
4721 if (zone
->isGCScheduled())
4728 js::GCDebugSlice(JSRuntime
*rt
, bool limit
, int64_t objCount
)
4730 int64_t budget
= limit
? SliceBudget::WorkBudget(objCount
) : SliceBudget::Unlimited
;
4731 if (!ZonesSelected(rt
)) {
4732 if (JS::IsIncrementalGCInProgress(rt
))
4733 JS::PrepareForIncrementalGC(rt
);
4735 JS::PrepareForFullGC(rt
);
4737 Collect(rt
, true, budget
, GC_NORMAL
, JS::gcreason::DEBUG_GC
);
4740 /* Schedule a full GC unless a zone will already be collected. */
4742 js::PrepareForDebugGC(JSRuntime
*rt
)
4744 if (!ZonesSelected(rt
))
4745 JS::PrepareForFullGC(rt
);
4749 JS::ShrinkGCBuffers(JSRuntime
*rt
)
4751 AutoLockGC
lock(rt
);
4752 JS_ASSERT(!rt
->isHeapBusy());
4754 if (!rt
->useHelperThreads())
4755 ExpireChunksAndArenas(rt
, true);
4757 rt
->gcHelperThread
.startBackgroundShrink();
4761 js::MinorGC(JSRuntime
*rt
, JS::gcreason::Reason reason
)
4763 #ifdef JSGC_GENERATIONAL
4764 #if JS_TRACE_LOGGING
4765 AutoTraceLog
logger(TraceLogging::defaultLogger(),
4766 TraceLogging::MINOR_GC_START
,
4767 TraceLogging::MINOR_GC_STOP
);
4769 rt
->gcNursery
.collect(rt
, reason
);
4774 js::gc::FinishBackgroundFinalize(JSRuntime
*rt
)
4776 rt
->gcHelperThread
.waitBackgroundSweepEnd();
4779 AutoFinishGC::AutoFinishGC(JSRuntime
*rt
)
4781 if (JS::IsIncrementalGCInProgress(rt
)) {
4782 JS::PrepareForIncrementalGC(rt
);
4783 JS::FinishIncrementalGC(rt
, JS::gcreason::API
);
4786 gc::FinishBackgroundFinalize(rt
);
4789 AutoPrepareForTracing::AutoPrepareForTracing(JSRuntime
*rt
)
4794 RecordNativeStackTopForGC(rt
);
4798 js::NewCompartment(JSContext
*cx
, Zone
*zone
, JSPrincipals
*principals
,
4799 const JS::CompartmentOptions
&options
)
4801 JSRuntime
*rt
= cx
->runtime();
4802 JS_AbortIfWrongThread(rt
);
4804 ScopedJSDeletePtr
<Zone
> zoneHolder
;
4806 zone
= cx
->new_
<Zone
>(rt
);
4810 zoneHolder
.reset(zone
);
4812 if (!zone
->init(cx
))
4815 zone
->setGCLastBytes(8192, GC_NORMAL
);
4817 const JSPrincipals
*trusted
= rt
->trustedPrincipals();
4818 zone
->isSystem
= principals
&& principals
== trusted
;
4821 ScopedJSDeletePtr
<JSCompartment
> compartment(cx
->new_
<JSCompartment
>(zone
, options
));
4822 if (!compartment
|| !compartment
->init(cx
))
4825 // Set up the principals.
4826 JS_SetCompartmentPrincipals(compartment
, principals
);
4828 AutoLockGC
lock(rt
);
4830 if (!zone
->compartments
.append(compartment
.get())) {
4831 js_ReportOutOfMemory(cx
);
4835 if (zoneHolder
&& !rt
->zones
.append(zone
)) {
4836 js_ReportOutOfMemory(cx
);
4840 zoneHolder
.forget();
4841 return compartment
.forget();
4845 gc::MergeCompartments(JSCompartment
*source
, JSCompartment
*target
)
4847 JSRuntime
*rt
= source
->runtimeFromMainThread();
4848 AutoPrepareForTracing
prepare(rt
);
4850 // Cleanup tables and other state in the source compartment that will be
4851 // meaningless after merging into the target compartment.
4853 source
->clearTables();
4855 // Fixup compartment pointers in source to refer to target.
4857 for (CellIter
iter(source
->zone(), FINALIZE_SCRIPT
); !iter
.done(); iter
.next()) {
4858 JSScript
*script
= iter
.get
<JSScript
>();
4859 JS_ASSERT(script
->compartment() == source
);
4860 script
->compartment_
= target
;
4863 for (CellIter
iter(source
->zone(), FINALIZE_BASE_SHAPE
); !iter
.done(); iter
.next()) {
4864 BaseShape
*base
= iter
.get
<BaseShape
>();
4865 JS_ASSERT(base
->compartment() == source
);
4866 base
->compartment_
= target
;
4869 // Fixup zone pointers in source's zone to refer to target's zone.
4871 for (size_t thingKind
= 0; thingKind
!= FINALIZE_LIMIT
; thingKind
++) {
4872 for (ArenaIter
aiter(source
->zone(), AllocKind(thingKind
)); !aiter
.done(); aiter
.next()) {
4873 ArenaHeader
*aheader
= aiter
.get();
4874 aheader
->zone
= target
->zone();
4878 // The source should be the only compartment in its zone.
4879 for (CompartmentsInZoneIter
c(source
->zone()); !c
.done(); c
.next())
4880 JS_ASSERT(c
.get() == source
);
4882 // Merge the allocator in source's zone into target's zone.
4883 target
->zone()->allocator
.arenas
.adoptArenas(rt
, &source
->zone()->allocator
.arenas
);
4884 target
->zone()->gcBytes
+= source
->zone()->gcBytes
;
4885 source
->zone()->gcBytes
= 0;
4887 // Merge other info in source's zone into target's zone.
4888 target
->zone()->types
.typeLifoAlloc
.transferFrom(&source
->zone()->types
.typeLifoAlloc
);
4892 gc::RunDebugGC(JSContext
*cx
)
4895 JSRuntime
*rt
= cx
->runtime();
4896 int type
= rt
->gcZeal();
4898 if (rt
->mainThread
.suppressGC
)
4901 if (type
== js::gc::ZealGenerationalGCValue
)
4902 return MinorGC(rt
, JS::gcreason::DEBUG_GC
);
4904 PrepareForDebugGC(cx
->runtime());
4906 if (type
== ZealIncrementalRootsThenFinish
||
4907 type
== ZealIncrementalMarkAllThenFinish
||
4908 type
== ZealIncrementalMultipleSlices
)
4910 js::gc::State initialState
= rt
->gcIncrementalState
;
4912 if (type
== ZealIncrementalMultipleSlices
) {
4914 * Start with a small slice limit and double it every slice. This
4915 * ensure that we get multiple slices, and collection runs to
4918 if (initialState
== NO_INCREMENTAL
)
4919 rt
->gcIncrementalLimit
= rt
->gcZealFrequency
/ 2;
4921 rt
->gcIncrementalLimit
*= 2;
4922 budget
= SliceBudget::WorkBudget(rt
->gcIncrementalLimit
);
4924 // This triggers incremental GC but is actually ignored by IncrementalMarkSlice.
4925 budget
= SliceBudget::WorkBudget(1);
4928 Collect(rt
, true, budget
, GC_NORMAL
, JS::gcreason::DEBUG_GC
);
4931 * For multi-slice zeal, reset the slice size when we get to the sweep
4934 if (type
== ZealIncrementalMultipleSlices
&&
4935 initialState
== MARK
&& rt
->gcIncrementalState
== SWEEP
)
4937 rt
->gcIncrementalLimit
= rt
->gcZealFrequency
/ 2;
4940 Collect(rt
, false, SliceBudget::Unlimited
, GC_NORMAL
, JS::gcreason::DEBUG_GC
);
4947 gc::SetDeterministicGC(JSContext
*cx
, bool enabled
)
4950 JSRuntime
*rt
= cx
->runtime();
4951 rt
->gcDeterministicOnly
= enabled
;
4956 gc::SetValidateGC(JSContext
*cx
, bool enabled
)
4958 JSRuntime
*rt
= cx
->runtime();
4959 rt
->gcValidate
= enabled
;
4963 gc::SetFullCompartmentChecks(JSContext
*cx
, bool enabled
)
4965 JSRuntime
*rt
= cx
->runtime();
4966 rt
->gcFullCompartmentChecks
= enabled
;
4971 /* Should only be called manually under gdb */
4972 void PreventGCDuringInteractiveDebug()
4974 TlsPerThreadData
.get()->suppressGC
++;
4980 js::ReleaseAllJITCode(FreeOp
*fop
)
4983 for (ZonesIter
zone(fop
->runtime()); !zone
.done(); zone
.next()) {
4986 /* Assert no baseline scripts are marked as active. */
4987 for (CellIter
i(zone
, FINALIZE_SCRIPT
); !i
.done(); i
.next()) {
4988 JSScript
*script
= i
.get
<JSScript
>();
4989 JS_ASSERT_IF(script
->hasBaselineScript(), !script
->baselineScript()->active());
4993 /* Mark baseline scripts on the stack as active. */
4994 jit::MarkActiveBaselineScripts(zone
);
4996 jit::InvalidateAll(fop
, zone
);
4998 for (CellIter
i(zone
, FINALIZE_SCRIPT
); !i
.done(); i
.next()) {
4999 JSScript
*script
= i
.get
<JSScript
>();
5000 jit::FinishInvalidation(fop
, script
);
5003 * Discard baseline script if it's not marked as active. Note that
5004 * this also resets the active flag.
5006 jit::FinishDiscardBaselineScript(fop
, script
);
5010 /* Sweep now invalidated compiler outputs from each compartment. */
5011 for (CompartmentsIter
comp(fop
->runtime()); !comp
.done(); comp
.next())
5012 comp
->types
.clearCompilerOutputs(fop
);
5017 * There are three possible PCCount profiling states:
5019 * 1. None: Neither scripts nor the runtime have count information.
5020 * 2. Profile: Active scripts have count information, the runtime does not.
5021 * 3. Query: Scripts do not have count information, the runtime does.
5023 * When starting to profile scripts, counting begins immediately, with all JIT
5024 * code discarded and recompiled with counts as necessary. Active interpreter
5025 * frames will not begin profiling until they begin executing another script
5026 * (via a call or return).
5028 * The below API functions manage transitions to new states, according
5029 * to the table below.
5032 * -------------------------
5033 * Function None Profile Query
5035 * StartPCCountProfiling Profile Profile Profile
5036 * StopPCCountProfiling None Query Query
5037 * PurgePCCounts None None None
5041 ReleaseScriptCounts(FreeOp
*fop
)
5043 JSRuntime
*rt
= fop
->runtime();
5044 JS_ASSERT(rt
->scriptAndCountsVector
);
5046 ScriptAndCountsVector
&vec
= *rt
->scriptAndCountsVector
;
5048 for (size_t i
= 0; i
< vec
.length(); i
++)
5049 vec
[i
].scriptCounts
.destroy(fop
);
5051 fop
->delete_(rt
->scriptAndCountsVector
);
5052 rt
->scriptAndCountsVector
= nullptr;
5056 js::StartPCCountProfiling(JSContext
*cx
)
5058 JSRuntime
*rt
= cx
->runtime();
5060 if (rt
->profilingScripts
)
5063 if (rt
->scriptAndCountsVector
)
5064 ReleaseScriptCounts(rt
->defaultFreeOp());
5066 ReleaseAllJITCode(rt
->defaultFreeOp());
5068 rt
->profilingScripts
= true;
5072 js::StopPCCountProfiling(JSContext
*cx
)
5074 JSRuntime
*rt
= cx
->runtime();
5076 if (!rt
->profilingScripts
)
5078 JS_ASSERT(!rt
->scriptAndCountsVector
);
5080 ReleaseAllJITCode(rt
->defaultFreeOp());
5082 ScriptAndCountsVector
*vec
= cx
->new_
<ScriptAndCountsVector
>(SystemAllocPolicy());
5086 for (ZonesIter
zone(rt
); !zone
.done(); zone
.next()) {
5087 for (CellIter
i(zone
, FINALIZE_SCRIPT
); !i
.done(); i
.next()) {
5088 JSScript
*script
= i
.get
<JSScript
>();
5089 if (script
->hasScriptCounts
&& script
->types
) {
5090 ScriptAndCounts sac
;
5091 sac
.script
= script
;
5092 sac
.scriptCounts
.set(script
->releaseScriptCounts());
5093 if (!vec
->append(sac
))
5094 sac
.scriptCounts
.destroy(rt
->defaultFreeOp());
5099 rt
->profilingScripts
= false;
5100 rt
->scriptAndCountsVector
= vec
;
5104 js::PurgePCCounts(JSContext
*cx
)
5106 JSRuntime
*rt
= cx
->runtime();
5108 if (!rt
->scriptAndCountsVector
)
5110 JS_ASSERT(!rt
->profilingScripts
);
5112 ReleaseScriptCounts(rt
->defaultFreeOp());
5116 js::PurgeJITCaches(Zone
*zone
)
5119 for (CellIterUnderGC
i(zone
, FINALIZE_SCRIPT
); !i
.done(); i
.next()) {
5120 JSScript
*script
= i
.get
<JSScript
>();
5122 /* Discard Ion caches. */
5123 jit::PurgeCaches(script
, zone
);
5130 ArenaLists::adoptArenas(JSRuntime
*rt
, ArenaLists
*fromArenaLists
)
5132 // The other parallel threads have all completed now, and GC
5133 // should be inactive, but still take the lock as a kind of read
5135 AutoLockGC
lock(rt
);
5137 fromArenaLists
->purge();
5139 for (size_t thingKind
= 0; thingKind
!= FINALIZE_LIMIT
; thingKind
++) {
5140 #ifdef JS_THREADSAFE
5141 // When we enter a parallel section, we join the background
5142 // thread, and we do not run GC while in the parallel section,
5143 // so no finalizer should be active!
5144 volatile uintptr_t *bfs
= &backgroundFinalizeState
[thingKind
];
5148 case BFS_JUST_FINISHED
:
5149 // No allocations between end of last sweep and now.
5150 // Transfering over arenas is a kind of allocation.
5154 JS_ASSERT(!"Background finalization in progress, but it should not be.");
5157 #endif /* JS_THREADSAFE */
5159 ArenaList
*fromList
= &fromArenaLists
->arenaLists
[thingKind
];
5160 ArenaList
*toList
= &arenaLists
[thingKind
];
5161 while (fromList
->head
!= nullptr) {
5162 ArenaHeader
*fromHeader
= fromList
->head
;
5163 fromList
->head
= fromHeader
->next
;
5164 fromHeader
->next
= nullptr;
5166 toList
->insert(fromHeader
);
5168 fromList
->cursor
= &fromList
->head
;
5173 ArenaLists::containsArena(JSRuntime
*rt
, ArenaHeader
*needle
)
5175 AutoLockGC
lock(rt
);
5176 size_t allocKind
= needle
->getAllocKind();
5177 for (ArenaHeader
*aheader
= arenaLists
[allocKind
].head
;
5179 aheader
= aheader
->next
)
5181 if (aheader
== needle
)
5188 AutoMaybeTouchDeadZones::AutoMaybeTouchDeadZones(JSContext
*cx
)
5189 : runtime(cx
->runtime()),
5190 markCount(runtime
->gcObjectsMarkedInDeadZones
),
5191 inIncremental(JS::IsIncrementalGCInProgress(runtime
)),
5192 manipulatingDeadZones(runtime
->gcManipulatingDeadZones
)
5194 runtime
->gcManipulatingDeadZones
= true;
5197 AutoMaybeTouchDeadZones::AutoMaybeTouchDeadZones(JSObject
*obj
)
5198 : runtime(obj
->compartment()->runtimeFromMainThread()),
5199 markCount(runtime
->gcObjectsMarkedInDeadZones
),
5200 inIncremental(JS::IsIncrementalGCInProgress(runtime
)),
5201 manipulatingDeadZones(runtime
->gcManipulatingDeadZones
)
5203 runtime
->gcManipulatingDeadZones
= true;
5206 AutoMaybeTouchDeadZones::~AutoMaybeTouchDeadZones()
5208 if (inIncremental
&& runtime
->gcObjectsMarkedInDeadZones
!= markCount
) {
5209 JS::PrepareForFullGC(runtime
);
5210 js::GC(runtime
, GC_NORMAL
, JS::gcreason::TRANSPLANT
);
5213 runtime
->gcManipulatingDeadZones
= manipulatingDeadZones
;
5216 AutoSuppressGC::AutoSuppressGC(ExclusiveContext
*cx
)
5217 : suppressGC_(cx
->perThreadData
->suppressGC
)
5222 AutoSuppressGC::AutoSuppressGC(JSCompartment
*comp
)
5223 : suppressGC_(comp
->runtimeFromMainThread()->mainThread
.suppressGC
)
5229 js::UninlinedIsInsideNursery(JSRuntime
*rt
, const void *thing
)
5231 return IsInsideNursery(rt
, thing
);
5235 AutoDisableProxyCheck::AutoDisableProxyCheck(JSRuntime
*rt
5236 MOZ_GUARD_OBJECT_NOTIFIER_PARAM_IN_IMPL
)
5237 : count(rt
->gcDisableStrictProxyCheckingCount
)
5239 MOZ_GUARD_OBJECT_NOTIFIER_INIT
;
5244 JS::AssertGCThingMustBeTenured(JSObject
*obj
)
5246 JS_ASSERT((!IsNurseryAllocable(obj
->tenuredGetAllocKind()) || obj
->getClass()->finalize
) &&
5250 JS_FRIEND_API(size_t)
5253 JSRuntime
*rt
= js::TlsPerThreadData
.get()->runtimeFromMainThread();
5256 return rt
->gcNumber
;
5259 JS::AutoAssertNoGC::AutoAssertNoGC()
5261 JSRuntime
*rt
= js::TlsPerThreadData
.get()->runtimeFromMainThread();
5262 gcNumber
= rt
? rt
->gcNumber
: size_t(-1);
5265 JS::AutoAssertNoGC::~AutoAssertNoGC()
5267 JSRuntime
*rt
= js::TlsPerThreadData
.get()->runtimeFromMainThread();
5269 MOZ_ASSERT(gcNumber
== rt
->gcNumber
, "GC ran inside an AutoAssertNoGC scope");