1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
10 #include "mozilla/Atomics.h"
11 #include "mozilla/BitSet.h"
14 #include <type_traits>
16 #include "js/AllocPolicy.h"
17 #include "js/GCAnnotations.h"
18 #include "js/HashTable.h"
19 #include "js/shadow/String.h" // JS::shadow::String
20 #include "js/shadow/Symbol.h" // JS::shadow::Symbol
21 #include "js/shadow/Zone.h" // JS::shadow::Zone
22 #include "js/TraceKind.h"
23 #include "js/TypeDecls.h"
25 /* These values are private to the JS engine. */
28 class NurseryDecommitTask
;
30 JS_PUBLIC_API
bool CurrentThreadCanAccessZone(JS::Zone
* zone
);
32 // To prevent false sharing, some data structures are aligned to a typical cache
34 static constexpr size_t TypicalCacheLineSize
= 64;
44 const size_t ArenaShift
= 12;
45 const size_t ArenaSize
= size_t(1) << ArenaShift
;
46 const size_t ArenaMask
= ArenaSize
- 1;
48 #if defined(XP_DARWIN) && defined(__aarch64__)
49 const size_t PageShift
= 14;
51 const size_t PageShift
= 12;
53 // Expected page size, so we could initialze ArenasPerPage at compile-time.
54 // The actual system page size should be queried by SystemPageSize().
55 const size_t PageSize
= size_t(1) << PageShift
;
56 constexpr size_t ArenasPerPage
= PageSize
/ ArenaSize
;
58 const size_t ChunkShift
= 20;
59 const size_t ChunkSize
= size_t(1) << ChunkShift
;
60 const size_t ChunkMask
= ChunkSize
- 1;
62 const size_t CellAlignShift
= 3;
63 const size_t CellAlignBytes
= size_t(1) << CellAlignShift
;
64 const size_t CellAlignMask
= CellAlignBytes
- 1;
66 const size_t CellBytesPerMarkBit
= CellAlignBytes
;
67 const size_t MarkBitsPerCell
= 2;
70 * The minimum cell size ends up as twice the cell alignment because the mark
71 * bitmap contains one bit per CellBytesPerMarkBit bytes (which is equal to
72 * CellAlignBytes) and we need two mark bits per cell.
74 const size_t MinCellSize
= CellBytesPerMarkBit
* MarkBitsPerCell
;
77 * The mark bitmap has one bit per each possible cell start position. This
78 * wastes some space for larger GC things but allows us to avoid division by the
79 * cell's size when accessing the bitmap.
81 const size_t ArenaBitmapBits
= ArenaSize
/ CellBytesPerMarkBit
;
82 const size_t ArenaBitmapBytes
= HowMany(ArenaBitmapBits
, 8);
83 const size_t ArenaBitmapWords
= HowMany(ArenaBitmapBits
, JS_BITS_PER_WORD
);
85 enum class ChunkKind
: uint8_t {
92 // The base class for all GC chunks, either in the nursery or in the tenured
93 // heap memory. This structure is locatable from any GC pointer by aligning to
97 // Initialize a tenured heap chunk.
98 explicit ChunkBase(JSRuntime
* rt
) {
99 MOZ_ASSERT((uintptr_t(this) & ChunkMask
) == 0);
100 initBaseForTenuredChunk(rt
);
103 void initBaseForTenuredChunk(JSRuntime
* rt
) {
105 storeBuffer
= nullptr;
106 kind
= ChunkKind::TenuredHeap
;
107 nurseryChunkIndex
= UINT8_MAX
;
110 // Initialize a nursery chunk.
111 ChunkBase(JSRuntime
* rt
, StoreBuffer
* sb
, ChunkKind kind
, uint8_t chunkIndex
)
115 nurseryChunkIndex(chunkIndex
) {
116 MOZ_ASSERT(kind
== ChunkKind::NurseryFromSpace
||
117 kind
== ChunkKind::NurseryToSpace
);
118 MOZ_ASSERT((uintptr_t(this) & ChunkMask
) == 0);
119 MOZ_ASSERT(storeBuffer
);
123 ChunkKind
getKind() const {
124 MOZ_ASSERT_IF(storeBuffer
, kind
== ChunkKind::NurseryToSpace
||
125 kind
== ChunkKind::NurseryFromSpace
);
126 MOZ_ASSERT_IF(!storeBuffer
, kind
== ChunkKind::TenuredHeap
);
130 // The store buffer for pointers from tenured things to things in this
131 // chunk. Will be non-null if and only if this is a nursery chunk.
132 StoreBuffer
* storeBuffer
;
134 // Provide quick access to the runtime from absolutely anywhere.
139 uint8_t nurseryChunkIndex
;
142 // Information about tenured heap chunks.
143 struct TenuredChunkInfo
{
145 friend class ChunkPool
;
146 TenuredChunk
* next
= nullptr;
147 TenuredChunk
* prev
= nullptr;
150 /* Number of free arenas, either committed or decommitted. */
151 uint32_t numArenasFree
;
153 /* Number of free, committed arenas. */
154 uint32_t numArenasFreeCommitted
;
158 * Calculating ArenasPerChunk:
160 * To figure out how many Arenas will fit in a chunk we need to know how much
161 * extra space is available after we allocate the header data. This is a problem
162 * because the header size depends on the number of arenas in the chunk.
164 * The dependent fields are markBits, decommittedPages and
165 * freeCommittedArenas. markBits needs ArenaBitmapBytes bytes per arena,
166 * decommittedPages needs one bit per page and freeCommittedArenas needs one
169 * We can calculate an approximate value by dividing the number of bits of free
170 * space in the chunk by the number of bits needed per arena. This is an
171 * approximation because it doesn't take account of the fact that the variable
172 * sized fields must be rounded up to a whole number of words, or any padding
173 * the compiler adds between fields.
175 * Fortunately, for the chunk and arena size parameters we use this
176 * approximation turns out to be correct. If it were not we might need to adjust
177 * the arena count down by one to allow more space for the padding.
179 const size_t BitsPerPageWithHeaders
=
180 (ArenaSize
+ ArenaBitmapBytes
) * ArenasPerPage
* CHAR_BIT
+ ArenasPerPage
+
182 const size_t ChunkBitsAvailable
=
183 (ChunkSize
- sizeof(ChunkBase
) - sizeof(TenuredChunkInfo
)) * CHAR_BIT
;
184 const size_t PagesPerChunk
= ChunkBitsAvailable
/ BitsPerPageWithHeaders
;
185 const size_t ArenasPerChunk
= PagesPerChunk
* ArenasPerPage
;
186 const size_t FreeCommittedBits
= ArenasPerChunk
;
187 const size_t DecommitBits
= PagesPerChunk
;
188 const size_t BitsPerArenaWithHeaders
=
189 (ArenaSize
+ ArenaBitmapBytes
) * CHAR_BIT
+
190 (DecommitBits
/ ArenasPerChunk
) + 1;
192 const size_t CalculatedChunkSizeRequired
=
193 sizeof(ChunkBase
) + sizeof(TenuredChunkInfo
) +
194 RoundUp(ArenasPerChunk
* ArenaBitmapBytes
, sizeof(uintptr_t)) +
195 RoundUp(FreeCommittedBits
, sizeof(uint32_t) * CHAR_BIT
) / CHAR_BIT
+
196 RoundUp(DecommitBits
, sizeof(uint32_t) * CHAR_BIT
) / CHAR_BIT
+
197 ArenasPerChunk
* ArenaSize
;
198 static_assert(CalculatedChunkSizeRequired
<= ChunkSize
,
199 "Calculated ArenasPerChunk is too large");
201 const size_t CalculatedChunkPadSize
= ChunkSize
- CalculatedChunkSizeRequired
;
202 static_assert(CalculatedChunkPadSize
* CHAR_BIT
< BitsPerArenaWithHeaders
,
203 "Calculated ArenasPerChunk is too small");
205 static_assert(ArenasPerChunk
== 252,
206 "Do not accidentally change our heap's density.");
208 // Mark bitmaps are atomic because they can be written by gray unmarking on the
209 // main thread while read by sweeping on a background thread. The former does
210 // not affect the result of the latter.
211 using MarkBitmapWord
= mozilla::Atomic
<uintptr_t, mozilla::Relaxed
>;
214 * Live objects are marked black or gray. Everything reachable from a JS root is
215 * marked black. Objects marked gray are eligible for cycle collection.
217 * BlackBit: GrayOrBlackBit: Color:
223 enum class ColorBit
: uint32_t { BlackBit
= 0, GrayOrBlackBit
= 1 };
225 // Mark colors. Order is important here: the greater value the 'more marked' a
227 enum class MarkColor
: uint8_t { Gray
= 1, Black
= 2 };
229 // Mark bitmap for a tenured heap chunk.
230 struct alignas(TypicalCacheLineSize
) MarkBitmap
{
231 static constexpr size_t WordCount
= ArenaBitmapWords
* ArenasPerChunk
;
232 MarkBitmapWord bitmap
[WordCount
];
234 inline void getMarkWordAndMask(const TenuredCell
* cell
, ColorBit colorBit
,
235 MarkBitmapWord
** wordp
, uintptr_t* maskp
);
237 // The following are not exported and are defined in gc/Heap.h:
238 inline bool markBit(const TenuredCell
* cell
, ColorBit colorBit
);
239 inline bool isMarkedAny(const TenuredCell
* cell
);
240 inline bool isMarkedBlack(const TenuredCell
* cell
);
241 inline bool isMarkedGray(const TenuredCell
* cell
);
242 inline bool markIfUnmarked(const TenuredCell
* cell
, MarkColor color
);
243 inline bool markIfUnmarkedAtomic(const TenuredCell
* cell
, MarkColor color
);
244 inline void markBlack(const TenuredCell
* cell
);
245 inline void markBlackAtomic(const TenuredCell
* cell
);
246 inline void copyMarkBit(TenuredCell
* dst
, const TenuredCell
* src
,
248 inline void unmark(const TenuredCell
* cell
);
249 inline MarkBitmapWord
* arenaBits(Arena
* arena
);
252 static_assert(ArenaBitmapBytes
* ArenasPerChunk
== sizeof(MarkBitmap
),
253 "Ensure our MarkBitmap actually covers all arenas.");
255 // Bitmap with one bit per page used for decommitted page set.
256 using ChunkPageBitmap
= mozilla::BitSet
<PagesPerChunk
, uint32_t>;
258 // Bitmap with one bit per arena used for free committed arena set.
259 using ChunkArenaBitmap
= mozilla::BitSet
<ArenasPerChunk
, uint32_t>;
261 // Base class containing data members for a tenured heap chunk.
262 class TenuredChunkBase
: public ChunkBase
{
264 TenuredChunkInfo info
;
266 ChunkArenaBitmap freeCommittedArenas
;
267 ChunkPageBitmap decommittedPages
;
270 explicit TenuredChunkBase(JSRuntime
* runtime
) : ChunkBase(runtime
) {
271 info
.numArenasFree
= ArenasPerChunk
;
274 void initAsDecommitted();
278 * We sometimes use an index to refer to a cell in an arena. The index for a
279 * cell is found by dividing by the cell alignment so not all indices refer to
282 const size_t ArenaCellIndexBytes
= CellAlignBytes
;
283 const size_t MaxArenaCellIndex
= ArenaSize
/ CellAlignBytes
;
285 const size_t MarkBitmapWordBits
= sizeof(MarkBitmapWord
) * CHAR_BIT
;
287 constexpr size_t FirstArenaAdjustmentBits
=
288 RoundUp(sizeof(gc::TenuredChunkBase
), ArenaSize
) / gc::CellBytesPerMarkBit
;
290 static_assert((FirstArenaAdjustmentBits
% MarkBitmapWordBits
) == 0);
291 constexpr size_t FirstArenaAdjustmentWords
=
292 FirstArenaAdjustmentBits
/ MarkBitmapWordBits
;
294 const size_t ChunkStoreBufferOffset
= offsetof(ChunkBase
, storeBuffer
);
295 const size_t ChunkMarkBitmapOffset
= offsetof(TenuredChunkBase
, markBits
);
297 // Hardcoded offsets into Arena class.
298 const size_t ArenaZoneOffset
= 2 * sizeof(uint32_t);
299 const size_t ArenaHeaderSize
= ArenaZoneOffset
+ 2 * sizeof(uintptr_t) +
300 sizeof(size_t) + sizeof(uintptr_t);
302 // The first word of a GC thing has certain requirements from the GC and is used
303 // to store flags in the low bits.
304 const size_t CellFlagBitsReservedForGC
= 3;
306 // The first word can be used to store JSClass pointers for some thing kinds, so
307 // these must be suitably aligned.
308 const size_t JSClassAlignBytes
= size_t(1) << CellFlagBitsReservedForGC
;
311 /* When downcasting, ensure we are actually the right type. */
312 extern JS_PUBLIC_API
void AssertGCThingHasType(js::gc::Cell
* cell
,
315 inline void AssertGCThingHasType(js::gc::Cell
* cell
, JS::TraceKind kind
) {}
318 MOZ_ALWAYS_INLINE
bool IsInsideNursery(const js::gc::Cell
* cell
);
319 MOZ_ALWAYS_INLINE
bool IsInsideNursery(const js::gc::TenuredCell
* cell
);
326 enum class HeapState
{
327 Idle
, // doing nothing with the GC heap
328 Tracing
, // tracing the GC heap without collecting, e.g.
329 // IterateCompartments()
330 MajorCollecting
, // doing a GC of the major heap
331 MinorCollecting
, // doing a GC of the minor heap (nursery)
332 CycleCollecting
// in the "Unlink" phase of cycle collection
335 JS_PUBLIC_API HeapState
RuntimeHeapState();
337 static inline bool RuntimeHeapIsBusy() {
338 return RuntimeHeapState() != HeapState::Idle
;
341 static inline bool RuntimeHeapIsTracing() {
342 return RuntimeHeapState() == HeapState::Tracing
;
345 static inline bool RuntimeHeapIsMajorCollecting() {
346 return RuntimeHeapState() == HeapState::MajorCollecting
;
349 static inline bool RuntimeHeapIsMinorCollecting() {
350 return RuntimeHeapState() == HeapState::MinorCollecting
;
353 static inline bool RuntimeHeapIsCollecting(HeapState state
) {
354 return state
== HeapState::MajorCollecting
||
355 state
== HeapState::MinorCollecting
;
358 static inline bool RuntimeHeapIsCollecting() {
359 return RuntimeHeapIsCollecting(RuntimeHeapState());
362 static inline bool RuntimeHeapIsCycleCollecting() {
363 return RuntimeHeapState() == HeapState::CycleCollecting
;
367 * This list enumerates the different types of conceptual stacks we have in
368 * SpiderMonkey. In reality, they all share the C stack, but we allow different
369 * stack limits depending on the type of code running.
372 StackForSystemCode
, // C++, such as the GC, running on behalf of the VM.
373 StackForTrustedScript
, // Script running with trusted principals.
374 StackForUntrustedScript
, // Script running with untrusted principals.
379 * Default maximum size for the generational nursery in bytes. This is the
380 * initial value. In the browser this configured by the
381 * javascript.options.mem.nursery.max_kb pref.
383 const uint32_t DefaultNurseryMaxBytes
= 16 * js::gc::ChunkSize
;
385 /* Default maximum heap size in bytes to pass to JS_NewContext(). */
386 const uint32_t DefaultHeapMaxBytes
= 32 * 1024 * 1024;
389 * A GC pointer, tagged with the trace kind.
391 * In general, a GC pointer should be stored with an exact type. This class
392 * is for use when that is not possible because a single pointer must point
393 * to several kinds of GC thing.
395 class JS_PUBLIC_API GCCellPtr
{
397 GCCellPtr() : GCCellPtr(nullptr) {}
399 // Construction from a void* and trace kind.
400 GCCellPtr(void* gcthing
, JS::TraceKind traceKind
)
401 : ptr(checkedCast(gcthing
, traceKind
)) {}
403 // Automatically construct a null GCCellPtr from nullptr.
404 MOZ_IMPLICIT
GCCellPtr(decltype(nullptr))
405 : ptr(checkedCast(nullptr, JS::TraceKind::Null
)) {}
407 // Construction from an explicit type.
408 template <typename T
>
409 explicit GCCellPtr(T
* p
)
410 : ptr(checkedCast(p
, JS::MapTypeToTraceKind
<T
>::kind
)) {}
411 explicit GCCellPtr(JSFunction
* p
)
412 : ptr(checkedCast(p
, JS::TraceKind::Object
)) {}
413 explicit GCCellPtr(JSScript
* p
)
414 : ptr(checkedCast(p
, JS::TraceKind::Script
)) {}
415 explicit GCCellPtr(const Value
& v
);
417 JS::TraceKind
kind() const {
418 uintptr_t kindBits
= ptr
& OutOfLineTraceKindMask
;
419 if (kindBits
!= OutOfLineTraceKindMask
) {
420 return JS::TraceKind(kindBits
);
422 return outOfLineKind();
425 // Allow GCCellPtr to be used in a boolean context.
426 explicit operator bool() const {
427 MOZ_ASSERT(bool(asCell()) == (kind() != JS::TraceKind::Null
));
431 // Simplify checks to the kind.
432 template <typename T
, typename
= std::enable_if_t
<JS::IsBaseTraceType_v
<T
>>>
434 return kind() == JS::MapTypeToTraceKind
<T
>::kind
;
437 // Conversions to more specific types must match the kind. Access to
438 // further refined types is not allowed directly from a GCCellPtr.
439 template <typename T
, typename
= std::enable_if_t
<JS::IsBaseTraceType_v
<T
>>>
441 MOZ_ASSERT(kind() == JS::MapTypeToTraceKind
<T
>::kind
);
442 // We can't use static_cast here, because the fact that JSObject
443 // inherits from js::gc::Cell is not part of the public API.
444 return *reinterpret_cast<T
*>(asCell());
447 // Return a pointer to the cell this |GCCellPtr| refers to, or |nullptr|.
448 // (It would be more symmetrical with |to| for this to return a |Cell&|, but
449 // the result can be |nullptr|, and null references are undefined behavior.)
450 js::gc::Cell
* asCell() const {
451 return reinterpret_cast<js::gc::Cell
*>(ptr
& ~OutOfLineTraceKindMask
);
454 // The CC's trace logger needs an identity that is XPIDL serializable.
455 uint64_t unsafeAsInteger() const {
456 return static_cast<uint64_t>(unsafeAsUIntPtr());
458 // Inline mark bitmap access requires direct pointer arithmetic.
459 uintptr_t unsafeAsUIntPtr() const {
460 MOZ_ASSERT(asCell());
461 MOZ_ASSERT(!js::gc::IsInsideNursery(asCell()));
462 return reinterpret_cast<uintptr_t>(asCell());
465 MOZ_ALWAYS_INLINE
bool mayBeOwnedByOtherRuntime() const {
466 if (!is
<JSString
>() && !is
<JS::Symbol
>()) {
469 if (is
<JSString
>()) {
470 return JS::shadow::String::isPermanentAtom(asCell());
472 MOZ_ASSERT(is
<JS::Symbol
>());
473 return JS::shadow::Symbol::isWellKnownSymbol(asCell());
477 static uintptr_t checkedCast(void* p
, JS::TraceKind traceKind
) {
478 auto* cell
= static_cast<js::gc::Cell
*>(p
);
479 MOZ_ASSERT((uintptr_t(p
) & OutOfLineTraceKindMask
) == 0);
480 AssertGCThingHasType(cell
, traceKind
);
481 // Store trace in the bottom bits of pointer for common kinds.
482 uintptr_t kindBits
= uintptr_t(traceKind
);
483 if (kindBits
>= OutOfLineTraceKindMask
) {
484 kindBits
= OutOfLineTraceKindMask
;
486 return uintptr_t(p
) | kindBits
;
489 JS::TraceKind
outOfLineKind() const;
494 // Unwraps the given GCCellPtr, calls the functor |f| with a template argument
495 // of the actual type of the pointer, and returns the result.
496 template <typename F
>
497 auto MapGCThingTyped(GCCellPtr thing
, F
&& f
) {
498 switch (thing
.kind()) {
499 #define JS_EXPAND_DEF(name, type, _, _1) \
500 case JS::TraceKind::name: \
501 return f(&thing.as<type>());
502 JS_FOR_EACH_TRACEKIND(JS_EXPAND_DEF
);
505 MOZ_CRASH("Invalid trace kind in MapGCThingTyped for GCCellPtr.");
509 // Unwraps the given GCCellPtr and calls the functor |f| with a template
510 // argument of the actual type of the pointer. Doesn't return anything.
511 template <typename F
>
512 void ApplyGCThingTyped(GCCellPtr thing
, F
&& f
) {
513 // This function doesn't do anything but is supplied for symmetry with other
514 // MapGCThingTyped/ApplyGCThingTyped implementations that have to wrap the
515 // functor to return a dummy value that is ignored.
516 MapGCThingTyped(thing
, f
);
521 // These are defined in the toplevel namespace instead of within JS so that
522 // they won't shadow other operator== overloads (see bug 1456512.)
524 inline bool operator==(JS::GCCellPtr ptr1
, JS::GCCellPtr ptr2
) {
525 return ptr1
.asCell() == ptr2
.asCell();
528 inline bool operator!=(JS::GCCellPtr ptr1
, JS::GCCellPtr ptr2
) {
529 return !(ptr1
== ptr2
);
536 MOZ_ALWAYS_INLINE
void MarkBitmap::getMarkWordAndMask(const TenuredCell
* cell
,
538 MarkBitmapWord
** wordp
,
540 // Note: the JIT pre-barrier trampolines inline this code. Update
541 // MacroAssembler::emitPreBarrierFastPath code too when making changes here!
543 MOZ_ASSERT(size_t(colorBit
) < MarkBitsPerCell
);
545 size_t offset
= uintptr_t(cell
) & ChunkMask
;
546 const size_t bit
= offset
/ CellBytesPerMarkBit
+ size_t(colorBit
);
547 size_t word
= bit
/ MarkBitmapWordBits
- FirstArenaAdjustmentWords
;
548 MOZ_ASSERT(word
< WordCount
);
549 *wordp
= &bitmap
[word
];
550 *maskp
= uintptr_t(1) << (bit
% MarkBitmapWordBits
);
555 static MOZ_ALWAYS_INLINE ChunkBase
* GetCellChunkBase(const Cell
* cell
) {
557 auto* chunk
= reinterpret_cast<ChunkBase
*>(uintptr_t(cell
) & ~ChunkMask
);
558 MOZ_ASSERT(chunk
->runtime
);
559 MOZ_ASSERT(chunk
->kind
!= ChunkKind::Invalid
);
563 static MOZ_ALWAYS_INLINE TenuredChunkBase
* GetCellChunkBase(
564 const TenuredCell
* cell
) {
567 reinterpret_cast<TenuredChunkBase
*>(uintptr_t(cell
) & ~ChunkMask
);
568 MOZ_ASSERT(chunk
->runtime
);
569 MOZ_ASSERT(chunk
->kind
== ChunkKind::TenuredHeap
);
573 static MOZ_ALWAYS_INLINE
JS::Zone
* GetTenuredGCThingZone(const void* ptr
) {
574 // This takes a void* because the compiler can't see type relationships in
575 // this header. |ptr| must be a pointer to a tenured GC thing.
577 const uintptr_t zone_addr
= (uintptr_t(ptr
) & ~ArenaMask
) | ArenaZoneOffset
;
578 return *reinterpret_cast<JS::Zone
**>(zone_addr
);
581 static MOZ_ALWAYS_INLINE
bool TenuredCellIsMarkedBlack(
582 const TenuredCell
* cell
) {
583 // Return true if BlackBit is set.
586 MOZ_ASSERT(!js::gc::IsInsideNursery(cell
));
588 MarkBitmapWord
* blackWord
;
590 TenuredChunkBase
* chunk
= GetCellChunkBase(cell
);
591 chunk
->markBits
.getMarkWordAndMask(cell
, js::gc::ColorBit::BlackBit
,
592 &blackWord
, &blackMask
);
593 return *blackWord
& blackMask
;
596 static MOZ_ALWAYS_INLINE
bool NonBlackCellIsMarkedGray(
597 const TenuredCell
* cell
) {
598 // Return true if GrayOrBlackBit is set. Callers should check BlackBit first.
601 MOZ_ASSERT(!js::gc::IsInsideNursery(cell
));
602 MOZ_ASSERT(!TenuredCellIsMarkedBlack(cell
));
604 MarkBitmapWord
* grayWord
;
606 TenuredChunkBase
* chunk
= GetCellChunkBase(cell
);
607 chunk
->markBits
.getMarkWordAndMask(cell
, js::gc::ColorBit::GrayOrBlackBit
,
608 &grayWord
, &grayMask
);
609 return *grayWord
& grayMask
;
612 static MOZ_ALWAYS_INLINE
bool TenuredCellIsMarkedGray(const TenuredCell
* cell
) {
613 return !TenuredCellIsMarkedBlack(cell
) && NonBlackCellIsMarkedGray(cell
);
616 static MOZ_ALWAYS_INLINE
bool CellIsMarkedGray(const Cell
* cell
) {
618 if (js::gc::IsInsideNursery(cell
)) {
621 return TenuredCellIsMarkedGray(reinterpret_cast<const TenuredCell
*>(cell
));
624 extern JS_PUBLIC_API
bool CanCheckGrayBits(const TenuredCell
* cell
);
626 extern JS_PUBLIC_API
bool CellIsMarkedGrayIfKnown(const TenuredCell
* cell
);
629 extern JS_PUBLIC_API
void AssertCellIsNotGray(const Cell
* cell
);
631 extern JS_PUBLIC_API
bool ObjectIsMarkedBlack(const JSObject
* obj
);
634 MOZ_ALWAYS_INLINE
bool CellHasStoreBuffer(const Cell
* cell
) {
635 return GetCellChunkBase(cell
)->storeBuffer
;
638 } /* namespace detail */
640 MOZ_ALWAYS_INLINE
bool IsInsideNursery(const Cell
* cell
) {
642 return detail::CellHasStoreBuffer(cell
);
645 MOZ_ALWAYS_INLINE
bool IsInsideNursery(const TenuredCell
* cell
) {
647 MOZ_ASSERT(!IsInsideNursery(reinterpret_cast<const Cell
*>(cell
)));
651 // Allow use before the compiler knows the derivation of JSObject, JSString, and
653 MOZ_ALWAYS_INLINE
bool IsInsideNursery(const JSObject
* obj
) {
654 return IsInsideNursery(reinterpret_cast<const Cell
*>(obj
));
656 MOZ_ALWAYS_INLINE
bool IsInsideNursery(const JSString
* str
) {
657 return IsInsideNursery(reinterpret_cast<const Cell
*>(str
));
659 MOZ_ALWAYS_INLINE
bool IsInsideNursery(const JS::BigInt
* bi
) {
660 return IsInsideNursery(reinterpret_cast<const Cell
*>(bi
));
663 MOZ_ALWAYS_INLINE
bool IsCellPointerValid(const void* ptr
) {
664 auto addr
= uintptr_t(ptr
);
665 if (addr
< ChunkSize
|| addr
% CellAlignBytes
!= 0) {
669 auto* cell
= reinterpret_cast<const Cell
*>(ptr
);
670 if (!IsInsideNursery(cell
)) {
671 return detail::GetTenuredGCThingZone(cell
) != nullptr;
677 MOZ_ALWAYS_INLINE
bool IsCellPointerValidOrNull(const void* cell
) {
681 return IsCellPointerValid(cell
);
689 extern JS_PUBLIC_API Zone
* GetTenuredGCThingZone(GCCellPtr thing
);
691 extern JS_PUBLIC_API Zone
* GetNurseryCellZone(js::gc::Cell
* cell
);
693 static MOZ_ALWAYS_INLINE Zone
* GetGCThingZone(GCCellPtr thing
) {
694 if (!js::gc::IsInsideNursery(thing
.asCell())) {
695 return js::gc::detail::GetTenuredGCThingZone(thing
.asCell());
698 return GetNurseryCellZone(thing
.asCell());
701 static MOZ_ALWAYS_INLINE Zone
* GetStringZone(JSString
* str
) {
702 if (!js::gc::IsInsideNursery(str
)) {
703 return js::gc::detail::GetTenuredGCThingZone(str
);
706 return GetNurseryCellZone(reinterpret_cast<js::gc::Cell
*>(str
));
709 extern JS_PUBLIC_API Zone
* GetObjectZone(JSObject
* obj
);
711 static MOZ_ALWAYS_INLINE
bool GCThingIsMarkedGray(GCCellPtr thing
) {
712 js::gc::Cell
* cell
= thing
.asCell();
713 if (IsInsideNursery(cell
)) {
717 auto* tenuredCell
= reinterpret_cast<js::gc::TenuredCell
*>(cell
);
718 return js::gc::detail::CellIsMarkedGrayIfKnown(tenuredCell
);
721 // Specialised gray marking check for use by the cycle collector. This is not
722 // called during incremental GC or when the gray bits are invalid.
723 static MOZ_ALWAYS_INLINE
bool GCThingIsMarkedGrayInCC(GCCellPtr thing
) {
724 js::gc::Cell
* cell
= thing
.asCell();
725 if (IsInsideNursery(cell
)) {
729 auto* tenuredCell
= reinterpret_cast<js::gc::TenuredCell
*>(cell
);
730 if (!js::gc::detail::TenuredCellIsMarkedGray(tenuredCell
)) {
734 MOZ_ASSERT(js::gc::detail::CanCheckGrayBits(tenuredCell
));
739 extern JS_PUBLIC_API
JS::TraceKind
GCThingTraceKind(void* thing
);
741 extern JS_PUBLIC_API
void EnableNurseryStrings(JSContext
* cx
);
743 extern JS_PUBLIC_API
void DisableNurseryStrings(JSContext
* cx
);
745 extern JS_PUBLIC_API
void EnableNurseryBigInts(JSContext
* cx
);
747 extern JS_PUBLIC_API
void DisableNurseryBigInts(JSContext
* cx
);
750 * Returns true when writes to GC thing pointers (and reads from weak pointers)
751 * must call an incremental barrier. This is generally only true when running
752 * mutator code in-between GC slices. At other times, the barrier may be elided
755 extern JS_PUBLIC_API
bool IsIncrementalBarrierNeeded(JSContext
* cx
);
758 * Notify the GC that a reference to a JSObject is about to be overwritten.
759 * This method must be called if IsIncrementalBarrierNeeded.
761 extern JS_PUBLIC_API
void IncrementalPreWriteBarrier(JSObject
* obj
);
764 * Notify the GC that a reference to a tenured GC cell is about to be
765 * overwritten. This method must be called if IsIncrementalBarrierNeeded.
767 extern JS_PUBLIC_API
void IncrementalPreWriteBarrier(GCCellPtr thing
);
770 * Unsets the gray bit for anything reachable from |thing|. |kind| should not be
771 * JS::TraceKind::Shape. |thing| should be non-null. The return value indicates
772 * if anything was unmarked.
774 extern JS_PUBLIC_API
bool UnmarkGrayGCThingRecursively(GCCellPtr thing
);
781 extern JS_PUBLIC_API
void PerformIncrementalReadBarrier(JS::GCCellPtr thing
);
783 static MOZ_ALWAYS_INLINE
void ExposeGCThingToActiveJS(JS::GCCellPtr thing
) {
784 // TODO: I'd like to assert !RuntimeHeapIsBusy() here but this gets
785 // called while we are tracing the heap, e.g. during memory reporting
786 // (see bug 1313318).
787 MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
789 // GC things residing in the nursery cannot be gray: they have no mark bits.
790 // All live objects in the nursery are moved to tenured at the beginning of
791 // each GC slice, so the gray marker never sees nursery things.
792 if (IsInsideNursery(thing
.asCell())) {
796 auto* cell
= reinterpret_cast<TenuredCell
*>(thing
.asCell());
797 if (detail::TenuredCellIsMarkedBlack(cell
)) {
801 // GC things owned by other runtimes are always black.
802 MOZ_ASSERT(!thing
.mayBeOwnedByOtherRuntime());
804 auto* zone
= JS::shadow::Zone::from(detail::GetTenuredGCThingZone(cell
));
805 if (zone
->needsIncrementalBarrier()) {
806 PerformIncrementalReadBarrier(thing
);
807 } else if (!zone
->isGCPreparing() && detail::NonBlackCellIsMarkedGray(cell
)) {
808 MOZ_ALWAYS_TRUE(JS::UnmarkGrayGCThingRecursively(thing
));
811 MOZ_ASSERT_IF(!zone
->isGCPreparing(), !detail::TenuredCellIsMarkedGray(cell
));
814 static MOZ_ALWAYS_INLINE
void IncrementalReadBarrier(JS::GCCellPtr thing
) {
815 // This is a lighter version of ExposeGCThingToActiveJS that doesn't do gray
818 if (IsInsideNursery(thing
.asCell())) {
822 auto* cell
= reinterpret_cast<TenuredCell
*>(thing
.asCell());
823 auto* zone
= JS::shadow::Zone::from(detail::GetTenuredGCThingZone(cell
));
824 if (zone
->needsIncrementalBarrier() &&
825 !detail::TenuredCellIsMarkedBlack(cell
)) {
826 // GC things owned by other runtimes are always black.
827 MOZ_ASSERT(!thing
.mayBeOwnedByOtherRuntime());
828 PerformIncrementalReadBarrier(thing
);
832 template <typename T
>
833 extern JS_PUBLIC_API
bool EdgeNeedsSweepUnbarrieredSlow(T
* thingp
);
835 static MOZ_ALWAYS_INLINE
bool EdgeNeedsSweepUnbarriered(JSObject
** objp
) {
836 // This function does not handle updating nursery pointers. Raw JSObject
837 // pointers should be updated separately or replaced with
838 // JS::Heap<JSObject*> which handles this automatically.
839 MOZ_ASSERT(!JS::RuntimeHeapIsMinorCollecting());
840 if (IsInsideNursery(*objp
)) {
844 auto zone
= JS::shadow::Zone::from(detail::GetTenuredGCThingZone(*objp
));
845 if (!zone
->isGCSweepingOrCompacting()) {
849 return EdgeNeedsSweepUnbarrieredSlow(objp
);
858 * This should be called when an object that is marked gray is exposed to the JS
859 * engine (by handing it to running JS code or writing it into live JS
860 * data). During incremental GC, since the gray bits haven't been computed yet,
861 * we conservatively mark the object black.
863 static MOZ_ALWAYS_INLINE
void ExposeObjectToActiveJS(JSObject
* obj
) {
865 MOZ_ASSERT(!js::gc::EdgeNeedsSweepUnbarrieredSlow(&obj
));
866 js::gc::ExposeGCThingToActiveJS(GCCellPtr(obj
));
871 #endif /* js_HeapAPI_h */