1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sw=4 et tw=78:
4 * ***** BEGIN LICENSE BLOCK *****
5 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
7 * The contents of this file are subject to the Mozilla Public License Version
8 * 1.1 (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
10 * http://www.mozilla.org/MPL/
12 * Software distributed under the License is distributed on an "AS IS" basis,
13 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
14 * for the specific language governing rights and limitations under the
17 * The Original Code is Mozilla Communicator client code, released
20 * The Initial Developer of the Original Code is
21 * Netscape Communications Corporation.
22 * Portions created by the Initial Developer are Copyright (C) 1998
23 * the Initial Developer. All Rights Reserved.
27 * Alternatively, the contents of this file may be used under the terms of
28 * either of the GNU General Public License Version 2 or later (the "GPL"),
29 * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
30 * in which case the provisions of the GPL or the LGPL are applicable instead
31 * of those above. If you wish to allow use of your version of this file only
32 * under the terms of either the GPL or the LGPL, and not to allow others to
33 * use your version of this file under the terms of the MPL, indicate your
34 * decision by deleting the provisions above and replace them with the notice
35 * and other provisions required by the GPL or the LGPL. If you do not delete
36 * the provisions above, a recipient may use your version of this file under
37 * the terms of any one of the MPL, the GPL or the LGPL.
39 * ***** END LICENSE BLOCK ***** */
42 * JS Mark-and-Sweep Garbage Collector.
44 * This GC allocates fixed-sized things with sizes up to GC_NBYTES_MAX (see
45 * jsgc.h). It allocates from a special GC arena pool with each arena allocated
46 * using malloc. It uses an ideally parallel array of flag bytes to hold the
47 * mark bit, finalizer type index, etc.
49 * XXX swizzle page to freelist for better locality of reference
51 #include <stdlib.h> /* for free */
53 #include <string.h> /* for memset used when DEBUG */
56 #include "jsutil.h" /* Added by JSIFY */
57 #include "jshash.h" /* Added by JSIFY */
64 #include "jsversion.h"
77 #include "jsstaticcheck.h"
82 #if JS_HAS_XML_SUPPORT
86 #ifdef INCLUDE_MOZILLA_DTRACE
87 #include "jsdtracef.h"
91 * Check if posix_memalign is available.
93 #if _POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600 || MOZ_MEMORY
94 # define HAS_POSIX_MEMALIGN 1
96 # define HAS_POSIX_MEMALIGN 0
100 * jemalloc provides posix_memalign.
104 #include "../../memory/jemalloc/jemalloc.h"
109 * Include the headers for mmap unless we have posix_memalign and do not
112 #if JS_GC_USE_MMAP || (!defined JS_GC_USE_MMAP && !HAS_POSIX_MEMALIGN)
114 # ifndef JS_GC_USE_MMAP
115 # define JS_GC_USE_MMAP 1
117 # include <windows.h>
118 # elif defined(__SYMBIAN32__)
119 // Symbian's OpenC has mmap (and #defines _POSIX_MAPPED_FILES), but
120 // doesn't implement MAP_ANON. If we have MOZ_MEMORY, then we can use
121 // posix_memalign; we've defined HAS_POSIX_MEMALIGN above. Otherwise,
124 # if defined(XP_UNIX) || defined(XP_BEOS)
127 # if _POSIX_MAPPED_FILES > 0
128 # ifndef JS_GC_USE_MMAP
129 # define JS_GC_USE_MMAP 1
131 # include <sys/mman.h>
133 /* On Mac OS X MAP_ANONYMOUS is not defined. */
134 # if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
135 # define MAP_ANONYMOUS MAP_ANON
137 # if !defined(MAP_ANONYMOUS)
138 # define MAP_ANONYMOUS 0
142 # error "JS_GC_USE_MMAP is set when mmap is not available"
149 * Check JSTempValueUnion has the size of jsval and void * so we can
150 * reinterpret jsval as void* GC-thing pointer and use JSTVU_SINGLE for
151 * different GC-things.
153 JS_STATIC_ASSERT(sizeof(JSTempValueUnion
) == sizeof(jsval
));
154 JS_STATIC_ASSERT(sizeof(JSTempValueUnion
) == sizeof(void *));
157 * Check that JSTRACE_XML follows JSTRACE_OBJECT, JSTRACE_DOUBLE and
160 JS_STATIC_ASSERT(JSTRACE_OBJECT
== 0);
161 JS_STATIC_ASSERT(JSTRACE_DOUBLE
== 1);
162 JS_STATIC_ASSERT(JSTRACE_STRING
== 2);
163 JS_STATIC_ASSERT(JSTRACE_XML
== 3);
166 * JS_IS_VALID_TRACE_KIND assumes that JSTRACE_STRING is the last non-xml
167 * trace kind when JS_HAS_XML_SUPPORT is false.
169 JS_STATIC_ASSERT(JSTRACE_STRING
+ 1 == JSTRACE_XML
);
172 * The number of used GCX-types must stay within GCX_LIMIT.
174 JS_STATIC_ASSERT(GCX_NTYPES
<= GCX_LIMIT
);
178 * Check that we can reinterpret double as JSGCDoubleCell.
180 JS_STATIC_ASSERT(sizeof(JSGCDoubleCell
) == sizeof(double));
183 * Check that we can use memset(p, 0, ...) to implement JS_CLEAR_WEAK_ROOTS.
185 JS_STATIC_ASSERT(JSVAL_NULL
== 0);
189 * A GC arena contains a fixed number of flag bits for each thing in its heap,
190 * and supports O(1) lookup of a flag given its thing's address.
192 * To implement this, we allocate things of the same size from a GC arena
193 * containing GC_ARENA_SIZE bytes aligned on GC_ARENA_SIZE boundary. The
194 * following picture shows arena's layout:
196 * +------------------------------+--------------------+---------------+
197 * | allocation area for GC thing | flags of GC things | JSGCArenaInfo |
198 * +------------------------------+--------------------+---------------+
200 * To find the flag bits for the thing we calculate the thing index counting
201 * from arena's start using:
203 * thingIndex = (thingAddress & GC_ARENA_MASK) / thingSize
205 * The details of flag's lookup depend on thing's kind. For all GC things
206 * except doubles we use one byte of flags where the 4 bits determine thing's
207 * type and the rest is used to implement GC marking, finalization and
208 * locking. We calculate the address of flag's byte using:
211 * (thingAddress | GC_ARENA_MASK) - sizeof(JSGCArenaInfo) - thingIndex
215 * (thingAddress | GC_ARENA_MASK) - sizeof(JSGCArenaInfo)
217 * is the last byte of flags' area.
219 * This implies that the things are allocated from the start of their area and
220 * flags are allocated from the end. This arrangement avoids a relatively
221 * expensive calculation of the location of the boundary separating things and
222 * flags. The boundary's offset from the start of the arena is given by:
224 * thingsPerArena * thingSize
226 * where thingsPerArena is the number of things that the arena can hold:
228 * (GC_ARENA_SIZE - sizeof(JSGCArenaInfo)) / (thingSize + 1).
230 * To allocate doubles we use a specialized arena. It can contain only numbers
231 * so we do not need the type bits. Moreover, since the doubles do not require
232 * a finalizer and very few of them are locked via js_LockGCThing API, we use
233 * just one bit of flags per double to denote if it was marked during the
234 * marking phase of the GC. The locking is implemented via a hash table. Thus
235 * for doubles the flag area becomes a bitmap.
237 * JS_GC_USE_MMAP macro governs the choice of the aligned arena allocator.
238 * When it is true, a platform-dependent function like mmap is used to get
239 * memory aligned on CPU page boundaries. If the macro is false or undefined,
240 * posix_memalign is used when available. Otherwise the code uses malloc to
241 * over-allocate a chunk with js_gcArenasPerChunk aligned arenas. The
242 * approximate space overhead of this is 1/js_gcArenasPerChunk. For details,
243 * see NewGCChunk/DestroyGCChunk below.
245 * The code also allocates arenas in chunks when JS_GC_USE_MMAP is 1 to
246 * minimize the overhead of mmap/munmap. In this case js_gcArenasPerChunk can
247 * not be a compile-time constant as the system page size is not known until
251 static uint32 js_gcArenasPerChunk
= 0;
252 static JSBool js_gcUseMmap
= JS_FALSE
;
253 #elif HAS_POSIX_MEMALIGN
254 # define js_gcArenasPerChunk 1
256 # define js_gcArenasPerChunk 7
259 #if defined(js_gcArenasPerChunk) && js_gcArenasPerChunk == 1
260 # define CHUNKED_ARENA_ALLOCATION 0
262 # define CHUNKED_ARENA_ALLOCATION 1
265 #define GC_ARENA_SHIFT 12
266 #define GC_ARENA_MASK ((jsuword) JS_BITMASK(GC_ARENA_SHIFT))
267 #define GC_ARENA_SIZE JS_BIT(GC_ARENA_SHIFT)
270 * JS_GC_ARENA_PAD defines the number of bytes to pad JSGCArenaInfo structure.
271 * It is used to improve allocation efficiency when using posix_memalign. If
272 * malloc's implementation uses internal headers, then calling
274 * posix_memalign(&p, GC_ARENA_SIZE, GC_ARENA_SIZE * js_gcArenasPerChunk)
276 * in a sequence leaves holes between allocations of the size GC_ARENA_SIZE
277 * due to the need to fit headers. JS_GC_ARENA_PAD mitigates that so the code
280 * posix_memalign(&p, GC_ARENA_SIZE,
281 * GC_ARENA_SIZE * js_gcArenasPerChunk - JS_GC_ARENA_PAD)
283 * When JS_GC_ARENA_PAD is equal or greater than the number of words in the
284 * system header, the system can pack all allocations together without holes.
286 * With JS_GC_USE_MEMALIGN we want at least 2 word pad unless posix_memalign
287 * comes from jemalloc that does not use any headers/trailers.
289 #ifndef JS_GC_ARENA_PAD
290 # if HAS_POSIX_MEMALIGN && !MOZ_MEMORY
291 # define JS_GC_ARENA_PAD (2 * JS_BYTES_PER_WORD)
293 # define JS_GC_ARENA_PAD 0
297 struct JSGCArenaInfo
{
299 * Allocation list for the arena or NULL if the arena holds double values.
304 * Pointer to the previous arena in a linked list. The arena can either
305 * belong to one of JSContext.gcArenaList lists or, when it does not have
306 * any allocated GC things, to the list of free arenas in the chunk with
307 * head stored in JSGCChunkInfo.lastFreeArena.
311 #if !CHUNKED_ARENA_ALLOCATION
312 jsuword prevUntracedPage
;
315 * A link field for the list of arenas with marked but not yet traced
316 * things. The field is encoded as arena's page to share the space with
317 * firstArena and arenaIndex fields.
319 jsuword prevUntracedPage
: JS_BITS_PER_WORD
- GC_ARENA_SHIFT
;
322 * When firstArena is false, the index of arena in the chunk. When
323 * firstArena is true, the index of a free arena holding JSGCChunkInfo or
324 * NO_FREE_ARENAS if there are no free arenas in the chunk.
326 * GET_ARENA_INDEX and GET_CHUNK_INFO_INDEX are convenience macros to
327 * access either of indexes.
329 jsuword arenaIndex
: GC_ARENA_SHIFT
- 1;
331 /* Flag indicating if the arena is the first in the chunk. */
332 jsuword firstArena
: 1;
336 jsuword untracedThings
; /* bitset for fast search of marked
337 but not yet traced things */
338 JSBool hasMarkedDoubles
; /* the arena has marked doubles */
341 #if JS_GC_ARENA_PAD != 0
342 uint8 pad
[JS_GC_ARENA_PAD
];
347 * Verify that the bit fields are indeed shared and JSGCArenaInfo is as small
348 * as possible. The code does not rely on this check so if on a particular
349 * platform this does not compile, then, as a workaround, comment the assert
350 * out and submit a bug report.
352 JS_STATIC_ASSERT(offsetof(JSGCArenaInfo
, u
) == 3 * sizeof(jsuword
));
355 * Macros to convert between JSGCArenaInfo, the start address of the arena and
356 * arena's page defined as (start address) >> GC_ARENA_SHIFT.
358 #define ARENA_INFO_OFFSET (GC_ARENA_SIZE - (uint32) sizeof(JSGCArenaInfo))
360 #define IS_ARENA_INFO_ADDRESS(arena) \
361 (((jsuword) (arena) & GC_ARENA_MASK) == ARENA_INFO_OFFSET)
363 #define ARENA_START_TO_INFO(arenaStart) \
364 (JS_ASSERT(((arenaStart) & (jsuword) GC_ARENA_MASK) == 0), \
365 (JSGCArenaInfo *) ((arenaStart) + (jsuword) ARENA_INFO_OFFSET))
367 #define ARENA_INFO_TO_START(arena) \
368 (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arena)), \
369 (jsuword) (arena) & ~(jsuword) GC_ARENA_MASK)
371 #define ARENA_PAGE_TO_INFO(arenaPage) \
372 (JS_ASSERT(arenaPage != 0), \
373 JS_ASSERT(!((jsuword)(arenaPage) >> (JS_BITS_PER_WORD-GC_ARENA_SHIFT))), \
374 ARENA_START_TO_INFO((arenaPage) << GC_ARENA_SHIFT))
376 #define ARENA_INFO_TO_PAGE(arena) \
377 (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arena)), \
378 ((jsuword) (arena) >> GC_ARENA_SHIFT))
380 #define GET_ARENA_INFO(chunk, index) \
381 (JS_ASSERT((index) < js_gcArenasPerChunk), \
382 ARENA_START_TO_INFO(chunk + ((index) << GC_ARENA_SHIFT)))
384 #if CHUNKED_ARENA_ALLOCATION
386 * Definitions for allocating arenas in chunks.
388 * All chunks that have at least one free arena are put on the doubly-linked
389 * list with the head stored in JSRuntime.gcChunkList. JSGCChunkInfo contains
390 * the head of the chunk's free arena list together with the link fields for
393 * Structure stored in one of chunk's free arenas. GET_CHUNK_INFO_INDEX gives
394 * the index of this arena. When all arenas in the chunk are used, it is
395 * removed from the list and the index is set to NO_FREE_ARENAS indicating
396 * that the chunk is not on gcChunkList and has no JSGCChunkInfo available.
399 struct JSGCChunkInfo
{
400 JSGCChunkInfo
**prevp
;
402 JSGCArenaInfo
*lastFreeArena
;
403 uint32 numFreeArenas
;
406 #define NO_FREE_ARENAS JS_BITMASK(GC_ARENA_SHIFT - 1)
408 #ifdef js_gcArenasPerChunk
409 JS_STATIC_ASSERT(1 <= js_gcArenasPerChunk
&&
410 js_gcArenasPerChunk
<= NO_FREE_ARENAS
);
413 #define GET_ARENA_CHUNK(arena, index) \
414 (JS_ASSERT(GET_ARENA_INDEX(arena) == index), \
415 ARENA_INFO_TO_START(arena) - ((index) << GC_ARENA_SHIFT))
417 #define GET_ARENA_INDEX(arena) \
418 ((arena)->firstArena ? 0 : (uint32) (arena)->arenaIndex)
420 #define GET_CHUNK_INFO_INDEX(chunk) \
421 ((uint32) ARENA_START_TO_INFO(chunk)->arenaIndex)
423 #define SET_CHUNK_INFO_INDEX(chunk, index) \
424 (JS_ASSERT((index) < js_gcArenasPerChunk || (index) == NO_FREE_ARENAS), \
425 (void) (ARENA_START_TO_INFO(chunk)->arenaIndex = (jsuword) (index)))
427 #define GET_CHUNK_INFO(chunk, infoIndex) \
428 (JS_ASSERT(GET_CHUNK_INFO_INDEX(chunk) == (infoIndex)), \
429 JS_ASSERT((uint32) (infoIndex) < js_gcArenasPerChunk), \
430 (JSGCChunkInfo *) ((chunk) + ((infoIndex) << GC_ARENA_SHIFT)))
432 #define CHUNK_INFO_TO_INDEX(ci) \
433 GET_ARENA_INDEX(ARENA_START_TO_INFO((jsuword)ci))
438 * Macros for GC-thing operations.
440 #define THINGS_PER_ARENA(thingSize) \
441 ((GC_ARENA_SIZE - (uint32) sizeof(JSGCArenaInfo)) / ((thingSize) + 1U))
443 #define THING_TO_ARENA(thing) \
444 (JS_ASSERT(!JSString::isStatic(thing)), \
445 (JSGCArenaInfo *)(((jsuword) (thing) | GC_ARENA_MASK) \
446 + 1 - sizeof(JSGCArenaInfo)))
448 #define THING_TO_INDEX(thing, thingSize) \
449 ((uint32) ((jsuword) (thing) & GC_ARENA_MASK) / (uint32) (thingSize))
451 #define THING_FLAGS_END(arena) ((uint8 *)(arena))
453 #define THING_FLAGP(arena, thingIndex) \
454 (JS_ASSERT((jsuword) (thingIndex) \
455 < (jsuword) THINGS_PER_ARENA((arena)->list->thingSize)), \
456 (uint8 *)(arena) - 1 - (thingIndex))
458 #define THING_TO_FLAGP(thing, thingSize) \
459 THING_FLAGP(THING_TO_ARENA(thing), THING_TO_INDEX(thing, thingSize))
461 #define FLAGP_TO_ARENA(flagp) THING_TO_ARENA(flagp)
463 #define FLAGP_TO_INDEX(flagp) \
464 (JS_ASSERT(((jsuword) (flagp) & GC_ARENA_MASK) < ARENA_INFO_OFFSET), \
465 (ARENA_INFO_OFFSET - 1 - (uint32) ((jsuword) (flagp) & GC_ARENA_MASK)))
467 #define FLAGP_TO_THING(flagp, thingSize) \
468 (JS_ASSERT(((jsuword) (flagp) & GC_ARENA_MASK) >= \
469 (ARENA_INFO_OFFSET - THINGS_PER_ARENA(thingSize))), \
470 (JSGCThing *)(((jsuword) (flagp) & ~GC_ARENA_MASK) + \
471 (thingSize) * FLAGP_TO_INDEX(flagp)))
474 * Macros for the specialized arena for doubles.
476 * DOUBLES_PER_ARENA defines the maximum number of doubles that the arena can
477 * hold. We find it as the following. Let n be the number of doubles in the
478 * arena. Together with the bitmap of flags and JSGCArenaInfo they should fit
479 * the arena. Hence DOUBLES_PER_ARENA or n_max is the maximum value of n for
480 * which the following holds:
482 * n*s + ceil(n/B) <= M (1)
484 * where "/" denotes normal real division,
485 * ceil(r) gives the least integer not smaller than the number r,
486 * s is the number of words in jsdouble,
487 * B is number of bits per word or B == JS_BITS_PER_WORD
488 * M is the number of words in the arena before JSGCArenaInfo or
489 * M == (GC_ARENA_SIZE - sizeof(JSGCArenaInfo)) / sizeof(jsuword).
490 * M == ARENA_INFO_OFFSET / sizeof(jsuword)
492 * We rewrite the inequality as
494 * n*B*s/B + ceil(n/B) <= M,
495 * ceil(n*B*s/B + n/B) <= M,
496 * ceil(n*(B*s + 1)/B) <= M (2)
498 * We define a helper function e(n, s, B),
500 * e(n, s, B) := ceil(n*(B*s + 1)/B) - n*(B*s + 1)/B, 0 <= e(n, s, B) < 1.
504 * n*(B*s + 1)/B + e(n, s, B) <= M,
505 * n + e*B/(B*s + 1) <= M*B/(B*s + 1)
507 * We apply the floor function to both sides of the last equation, where
508 * floor(r) gives the biggest integer not greater than r. As a consequence we
511 * floor(n + e*B/(B*s + 1)) <= floor(M*B/(B*s + 1)),
512 * n + floor(e*B/(B*s + 1)) <= floor(M*B/(B*s + 1)),
513 * n <= floor(M*B/(B*s + 1)), (3)
515 * where floor(e*B/(B*s + 1)) is zero as e*B/(B*s + 1) < B/(B*s + 1) < 1.
516 * Thus any n that satisfies the original constraint (1) or its equivalent (2),
517 * must also satisfy (3). That is, we got an upper estimate for the maximum
518 * value of n. Lets show that this upper estimate,
520 * floor(M*B/(B*s + 1)), (4)
522 * also satisfies (1) and, as such, gives the required maximum value.
523 * Substituting it into (2) gives:
525 * ceil(floor(M*B/(B*s + 1))*(B*s + 1)/B) == ceil(floor(M/X)*X)
527 * where X == (B*s + 1)/B > 1. But then floor(M/X)*X <= M/X*X == M and
529 * ceil(floor(M/X)*X) <= ceil(M) == M.
531 * Thus the value of (4) gives the maximum n satisfying (1).
533 * For the final result we observe that in (4)
535 * M*B == ARENA_INFO_OFFSET / sizeof(jsuword) * JS_BITS_PER_WORD
536 * == ARENA_INFO_OFFSET * JS_BITS_PER_BYTE
540 * B*s == JS_BITS_PER_WORD * sizeof(jsdouble) / sizeof(jsuword)
541 * == JS_BITS_PER_DOUBLE.
543 #define DOUBLES_PER_ARENA \
544 ((ARENA_INFO_OFFSET * JS_BITS_PER_BYTE) / (JS_BITS_PER_DOUBLE + 1))
547 * Check that ARENA_INFO_OFFSET and sizeof(jsdouble) divides sizeof(jsuword).
549 JS_STATIC_ASSERT(ARENA_INFO_OFFSET
% sizeof(jsuword
) == 0);
550 JS_STATIC_ASSERT(sizeof(jsdouble
) % sizeof(jsuword
) == 0);
551 JS_STATIC_ASSERT(sizeof(jsbitmap
) == sizeof(jsuword
));
553 #define DOUBLES_ARENA_BITMAP_WORDS \
554 (JS_HOWMANY(DOUBLES_PER_ARENA, JS_BITS_PER_WORD))
556 /* Check that DOUBLES_PER_ARENA indeed maximises (1). */
557 JS_STATIC_ASSERT(DOUBLES_PER_ARENA
* sizeof(jsdouble
) +
558 DOUBLES_ARENA_BITMAP_WORDS
* sizeof(jsuword
) <=
561 JS_STATIC_ASSERT((DOUBLES_PER_ARENA
+ 1) * sizeof(jsdouble
) +
563 JS_HOWMANY((DOUBLES_PER_ARENA
+ 1), JS_BITS_PER_WORD
) >
567 * When DOUBLES_PER_ARENA % BITS_PER_DOUBLE_FLAG_UNIT != 0, some bits in the
568 * last byte of the occupation bitmap are unused.
570 #define UNUSED_DOUBLE_BITMAP_BITS \
571 (DOUBLES_ARENA_BITMAP_WORDS * JS_BITS_PER_WORD - DOUBLES_PER_ARENA)
573 JS_STATIC_ASSERT(UNUSED_DOUBLE_BITMAP_BITS
< JS_BITS_PER_WORD
);
575 #define DOUBLES_ARENA_BITMAP_OFFSET \
576 (ARENA_INFO_OFFSET - DOUBLES_ARENA_BITMAP_WORDS * sizeof(jsuword))
578 #define CHECK_DOUBLE_ARENA_INFO(arenaInfo) \
579 (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arenaInfo)), \
580 JS_ASSERT(!(arenaInfo)->list)) \
583 * Get the start of the bitmap area containing double mark flags in the arena.
584 * To access the flag the code uses
586 * JS_TEST_BIT(bitmapStart, index)
588 * That is, compared with the case of arenas with non-double things, we count
589 * flags from the start of the bitmap area, not from the end.
591 #define DOUBLE_ARENA_BITMAP(arenaInfo) \
592 (CHECK_DOUBLE_ARENA_INFO(arenaInfo), \
593 (jsbitmap *) arenaInfo - DOUBLES_ARENA_BITMAP_WORDS)
595 #define DOUBLE_THING_TO_INDEX(thing) \
596 (CHECK_DOUBLE_ARENA_INFO(THING_TO_ARENA(thing)), \
597 JS_ASSERT(((jsuword) (thing) & GC_ARENA_MASK) < \
598 DOUBLES_ARENA_BITMAP_OFFSET), \
599 ((uint32) (((jsuword) (thing) & GC_ARENA_MASK) / sizeof(jsdouble))))
602 ClearDoubleArenaFlags(JSGCArenaInfo
*a
)
604 jsbitmap
*bitmap
, mask
;
608 * When some high bits in the last byte of the double occupation bitmap
609 * are unused, we must set them. Otherwise RefillDoubleFreeList will
610 * assume that they corresponds to some free cells and tries to allocate
613 * Note that the code works correctly with UNUSED_DOUBLE_BITMAP_BITS == 0.
615 bitmap
= DOUBLE_ARENA_BITMAP(a
);
616 memset(bitmap
, 0, (DOUBLES_ARENA_BITMAP_WORDS
- 1) * sizeof *bitmap
);
617 mask
= ((jsbitmap
) 1 << UNUSED_DOUBLE_BITMAP_BITS
) - 1;
618 nused
= JS_BITS_PER_WORD
- UNUSED_DOUBLE_BITMAP_BITS
;
619 bitmap
[DOUBLES_ARENA_BITMAP_WORDS
- 1] = mask
<< nused
;
622 static JS_ALWAYS_INLINE JSBool
623 IsMarkedDouble(JSGCArenaInfo
*a
, uint32 index
)
627 JS_ASSERT(a
->u
.hasMarkedDoubles
);
628 bitmap
= DOUBLE_ARENA_BITMAP(a
);
629 return JS_TEST_BIT(bitmap
, index
);
633 * JSRuntime.gcDoubleArenaList.nextDoubleFlags points either to:
635 * 1. The next byte in the bitmap area for doubles to check for unmarked
637 * 2. Or to the end of the bitmap area when all GC cells of the arena are
639 * 3. Or to a special sentinel value indicating that there are no arenas
640 * to check for unmarked doubles.
642 * We set the sentinel to ARENA_INFO_OFFSET so the single check
644 * ((jsuword) nextDoubleFlags & GC_ARENA_MASK) == ARENA_INFO_OFFSET
646 * will cover both the second and the third cases.
648 #define DOUBLE_BITMAP_SENTINEL ((jsbitmap *) ARENA_INFO_OFFSET)
652 * The maximum number of things to put on the local free list by taking
653 * several things from the global free list or from the tail of the last
654 * allocated arena to amortize the cost of rt->gcLock.
656 #define MAX_THREAD_LOCAL_THINGS 64
660 JS_STATIC_ASSERT(sizeof(JSStackHeader
) >= 2 * sizeof(jsval
));
662 JS_STATIC_ASSERT(sizeof(JSGCThing
) >= sizeof(JSString
));
663 JS_STATIC_ASSERT(sizeof(JSGCThing
) >= sizeof(jsdouble
));
665 /* We want to use all the available GC thing space for object's slots. */
666 JS_STATIC_ASSERT(sizeof(JSObject
) % sizeof(JSGCThing
) == 0);
669 * Ensure that JSObject is allocated from a different GC-list rather than
670 * jsdouble and JSString so we can easily finalize JSObject before these 2
671 * types of GC things. See comments in js_GC.
673 JS_STATIC_ASSERT(GC_FREELIST_INDEX(sizeof(JSString
)) !=
674 GC_FREELIST_INDEX(sizeof(JSObject
)));
675 JS_STATIC_ASSERT(GC_FREELIST_INDEX(sizeof(jsdouble
)) !=
676 GC_FREELIST_INDEX(sizeof(JSObject
)));
679 * JSPtrTable capacity growth descriptor. The table grows by powers of two
680 * starting from capacity JSPtrTableInfo.minCapacity, but switching to linear
681 * growth when capacity reaches JSPtrTableInfo.linearGrowthThreshold.
683 typedef struct JSPtrTableInfo
{
685 uint16 linearGrowthThreshold
;
688 #define GC_ITERATOR_TABLE_MIN 4
689 #define GC_ITERATOR_TABLE_LINEAR 1024
691 static const JSPtrTableInfo iteratorTableInfo
= {
692 GC_ITERATOR_TABLE_MIN
,
693 GC_ITERATOR_TABLE_LINEAR
696 /* Calculate table capacity based on the current value of JSPtrTable.count. */
698 PtrTableCapacity(size_t count
, const JSPtrTableInfo
*info
)
700 size_t linear
, log
, capacity
;
702 linear
= info
->linearGrowthThreshold
;
703 JS_ASSERT(info
->minCapacity
<= linear
);
707 } else if (count
< linear
) {
708 log
= JS_CEILING_LOG2W(count
);
709 JS_ASSERT(log
!= JS_BITS_PER_WORD
);
710 capacity
= (size_t)1 << log
;
711 if (capacity
< info
->minCapacity
)
712 capacity
= info
->minCapacity
;
714 capacity
= JS_ROUNDUP(count
, linear
);
717 JS_ASSERT(capacity
>= count
);
722 FreePtrTable(JSPtrTable
*table
, const JSPtrTableInfo
*info
)
725 JS_ASSERT(table
->count
> 0);
726 js_free(table
->array
);
730 JS_ASSERT(table
->count
== 0);
734 AddToPtrTable(JSContext
*cx
, JSPtrTable
*table
, const JSPtrTableInfo
*info
,
737 size_t count
, capacity
;
740 count
= table
->count
;
741 capacity
= PtrTableCapacity(count
, info
);
743 if (count
== capacity
) {
744 if (capacity
< info
->minCapacity
) {
745 JS_ASSERT(capacity
== 0);
746 JS_ASSERT(!table
->array
);
747 capacity
= info
->minCapacity
;
750 * Simplify the overflow detection assuming pointer is bigger
753 JS_STATIC_ASSERT(2 <= sizeof table
->array
[0]);
754 capacity
= (capacity
< info
->linearGrowthThreshold
)
756 : capacity
+ info
->linearGrowthThreshold
;
757 if (capacity
> (size_t)-1 / sizeof table
->array
[0])
760 array
= (void **) js_realloc(table
->array
,
761 capacity
* sizeof table
->array
[0]);
765 memset(array
+ count
, JS_FREE_PATTERN
,
766 (capacity
- count
) * sizeof table
->array
[0]);
768 table
->array
= array
;
771 table
->array
[count
] = ptr
;
772 table
->count
= count
+ 1;
777 JS_ReportOutOfMemory(cx
);
782 ShrinkPtrTable(JSPtrTable
*table
, const JSPtrTableInfo
*info
,
785 size_t oldCapacity
, capacity
;
788 JS_ASSERT(newCount
<= table
->count
);
789 if (newCount
== table
->count
)
792 oldCapacity
= PtrTableCapacity(table
->count
, info
);
793 table
->count
= newCount
;
794 capacity
= PtrTableCapacity(newCount
, info
);
796 if (oldCapacity
!= capacity
) {
797 array
= table
->array
;
804 array
= (void **) js_realloc(array
, capacity
* sizeof array
[0]);
806 table
->array
= array
;
809 memset(table
->array
+ newCount
, JS_FREE_PATTERN
,
810 (capacity
- newCount
) * sizeof table
->array
[0]);
815 # define METER(x) ((void) (x))
816 # define METER_IF(condition, x) ((void) ((condition) && (x)))
818 # define METER(x) ((void) 0)
819 # define METER_IF(condition, x) ((void) 0)
822 #define METER_UPDATE_MAX(maxLval, rval) \
823 METER_IF((maxLval) < (rval), (maxLval) = (rval))
825 #if JS_GC_USE_MMAP || !HAS_POSIX_MEMALIGN
828 * For chunks allocated via over-sized malloc, get a pointer to store the gap
829 * between the malloc's result and the first arena in the chunk.
832 GetMallocedChunkGapPtr(jsuword chunk
)
834 JS_ASSERT((chunk
& GC_ARENA_MASK
) == 0);
836 /* Use the memory after the chunk, see NewGCChunk for details. */
837 return (uint32
*) (chunk
+ (js_gcArenasPerChunk
<< GC_ARENA_SHIFT
));
850 p
= VirtualAlloc(NULL
, js_gcArenasPerChunk
<< GC_ARENA_SHIFT
,
851 MEM_COMMIT
| MEM_RESERVE
, PAGE_READWRITE
);
854 p
= mmap(NULL
, js_gcArenasPerChunk
<< GC_ARENA_SHIFT
,
855 PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
856 return (p
== MAP_FAILED
) ? 0 : (jsuword
) p
;
861 #if HAS_POSIX_MEMALIGN
862 if (0 != posix_memalign(&p
, GC_ARENA_SIZE
,
863 GC_ARENA_SIZE
* js_gcArenasPerChunk
-
870 * Implement chunk allocation using oversized malloc if mmap and
871 * posix_memalign are not available.
873 * Since malloc allocates pointers aligned on the word boundary, to get
874 * js_gcArenasPerChunk aligned arenas, we need to malloc only
876 * ((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT) - sizeof(size_t)
878 * bytes. But since we stores the gap between the malloced pointer and the
879 * first arena in the chunk after the chunk, we need to ask for
881 * ((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT)
883 * bytes to ensure that we always have room to store the gap.
885 p
= js_malloc((js_gcArenasPerChunk
+ 1) << GC_ARENA_SHIFT
);
892 chunk
= ((jsuword
) p
+ GC_ARENA_MASK
) & ~GC_ARENA_MASK
;
893 *GetMallocedChunkGapPtr(chunk
) = (uint32
) (chunk
- (jsuword
) p
);
900 DestroyGCChunk(jsuword chunk
)
902 JS_ASSERT((chunk
& GC_ARENA_MASK
) == 0);
906 VirtualFree((void *) chunk
, 0, MEM_RELEASE
);
907 # elif defined(SOLARIS)
908 munmap((char *) chunk
, js_gcArenasPerChunk
<< GC_ARENA_SHIFT
);
910 munmap((void *) chunk
, js_gcArenasPerChunk
<< GC_ARENA_SHIFT
);
916 #if HAS_POSIX_MEMALIGN
917 js_free((void *) chunk
);
919 /* See comments in NewGCChunk. */
920 JS_ASSERT(*GetMallocedChunkGapPtr(chunk
) < GC_ARENA_SIZE
);
921 js_free((void *) (chunk
- *GetMallocedChunkGapPtr(chunk
)));
925 #if CHUNKED_ARENA_ALLOCATION
928 AddChunkToList(JSRuntime
*rt
, JSGCChunkInfo
*ci
)
930 ci
->prevp
= &rt
->gcChunkList
;
931 ci
->next
= rt
->gcChunkList
;
932 if (rt
->gcChunkList
) {
933 JS_ASSERT(rt
->gcChunkList
->prevp
== &rt
->gcChunkList
);
934 rt
->gcChunkList
->prevp
= &ci
->next
;
936 rt
->gcChunkList
= ci
;
940 RemoveChunkFromList(JSRuntime
*rt
, JSGCChunkInfo
*ci
)
942 *ci
->prevp
= ci
->next
;
944 JS_ASSERT(ci
->next
->prevp
== &ci
->next
);
945 ci
->next
->prevp
= ci
->prevp
;
951 static JSGCArenaInfo
*
952 NewGCArena(JSRuntime
*rt
)
957 if (rt
->gcBytes
>= rt
->gcMaxBytes
)
960 #if CHUNKED_ARENA_ALLOCATION
961 if (js_gcArenasPerChunk
== 1) {
963 chunk
= NewGCChunk();
966 a
= ARENA_START_TO_INFO(chunk
);
967 #if CHUNKED_ARENA_ALLOCATION
971 JSGCArenaInfo
*aprev
;
973 ci
= rt
->gcChunkList
;
975 chunk
= NewGCChunk();
978 JS_ASSERT((chunk
& GC_ARENA_MASK
) == 0);
979 a
= GET_ARENA_INFO(chunk
, 0);
980 a
->firstArena
= JS_TRUE
;
988 a
= GET_ARENA_INFO(chunk
, i
);
989 a
->firstArena
= JS_FALSE
;
991 } while (i
!= js_gcArenasPerChunk
- 1);
992 ci
= GET_CHUNK_INFO(chunk
, 0);
993 ci
->lastFreeArena
= aprev
;
994 ci
->numFreeArenas
= js_gcArenasPerChunk
- 1;
995 AddChunkToList(rt
, ci
);
997 JS_ASSERT(ci
->prevp
== &rt
->gcChunkList
);
998 a
= ci
->lastFreeArena
;
1001 JS_ASSERT(ci
->numFreeArenas
== 1);
1002 JS_ASSERT(ARENA_INFO_TO_START(a
) == (jsuword
) ci
);
1003 RemoveChunkFromList(rt
, ci
);
1004 chunk
= GET_ARENA_CHUNK(a
, GET_ARENA_INDEX(a
));
1005 SET_CHUNK_INFO_INDEX(chunk
, NO_FREE_ARENAS
);
1007 JS_ASSERT(ci
->numFreeArenas
>= 2);
1008 JS_ASSERT(ARENA_INFO_TO_START(a
) != (jsuword
) ci
);
1009 ci
->lastFreeArena
= aprev
;
1010 ci
->numFreeArenas
--;
1016 rt
->gcBytes
+= GC_ARENA_SIZE
;
1017 a
->prevUntracedPage
= 0;
1018 memset(&a
->u
, 0, sizeof(a
->u
));
1024 DestroyGCArenas(JSRuntime
*rt
, JSGCArenaInfo
*last
)
1032 METER(rt
->gcStats
.afree
++);
1033 JS_ASSERT(rt
->gcBytes
>= GC_ARENA_SIZE
);
1034 rt
->gcBytes
-= GC_ARENA_SIZE
;
1036 #if CHUNKED_ARENA_ALLOCATION
1037 if (js_gcArenasPerChunk
== 1) {
1039 DestroyGCChunk(ARENA_INFO_TO_START(a
));
1040 #if CHUNKED_ARENA_ALLOCATION
1044 uint32 chunkInfoIndex
;
1049 firstArena
= a
->firstArena
;
1050 arenaIndex
= a
->arenaIndex
;
1051 memset((void *) ARENA_INFO_TO_START(a
), JS_FREE_PATTERN
,
1052 GC_ARENA_SIZE
- JS_GC_ARENA_PAD
);
1053 a
->firstArena
= firstArena
;
1054 a
->arenaIndex
= arenaIndex
;
1056 arenaIndex
= GET_ARENA_INDEX(a
);
1057 chunk
= GET_ARENA_CHUNK(a
, arenaIndex
);
1058 chunkInfoIndex
= GET_CHUNK_INFO_INDEX(chunk
);
1059 if (chunkInfoIndex
== NO_FREE_ARENAS
) {
1060 chunkInfoIndex
= arenaIndex
;
1061 SET_CHUNK_INFO_INDEX(chunk
, arenaIndex
);
1062 ci
= GET_CHUNK_INFO(chunk
, chunkInfoIndex
);
1064 ci
->lastFreeArena
= a
;
1065 ci
->numFreeArenas
= 1;
1066 AddChunkToList(rt
, ci
);
1068 JS_ASSERT(chunkInfoIndex
!= arenaIndex
);
1069 ci
= GET_CHUNK_INFO(chunk
, chunkInfoIndex
);
1070 JS_ASSERT(ci
->numFreeArenas
!= 0);
1071 JS_ASSERT(ci
->lastFreeArena
);
1072 JS_ASSERT(a
!= ci
->lastFreeArena
);
1073 if (ci
->numFreeArenas
== js_gcArenasPerChunk
- 1) {
1074 RemoveChunkFromList(rt
, ci
);
1075 DestroyGCChunk(chunk
);
1077 ++ci
->numFreeArenas
;
1078 a
->prev
= ci
->lastFreeArena
;
1079 ci
->lastFreeArena
= a
;
1088 InitGCArenaLists(JSRuntime
*rt
)
1091 JSGCArenaList
*arenaList
;
1093 for (i
= 0; i
< GC_NUM_FREELISTS
; i
++) {
1094 arenaList
= &rt
->gcArenaList
[i
];
1095 thingSize
= GC_FREELIST_NBYTES(i
);
1096 arenaList
->last
= NULL
;
1097 arenaList
->lastCount
= THINGS_PER_ARENA(thingSize
);
1098 arenaList
->thingSize
= thingSize
;
1099 arenaList
->freeList
= NULL
;
1101 rt
->gcDoubleArenaList
.first
= NULL
;
1102 rt
->gcDoubleArenaList
.nextDoubleFlags
= DOUBLE_BITMAP_SENTINEL
;
1106 FinishGCArenaLists(JSRuntime
*rt
)
1109 JSGCArenaList
*arenaList
;
1111 for (i
= 0; i
< GC_NUM_FREELISTS
; i
++) {
1112 arenaList
= &rt
->gcArenaList
[i
];
1113 DestroyGCArenas(rt
, arenaList
->last
);
1114 arenaList
->last
= NULL
;
1115 arenaList
->lastCount
= THINGS_PER_ARENA(arenaList
->thingSize
);
1116 arenaList
->freeList
= NULL
;
1118 DestroyGCArenas(rt
, rt
->gcDoubleArenaList
.first
);
1119 rt
->gcDoubleArenaList
.first
= NULL
;
1120 rt
->gcDoubleArenaList
.nextDoubleFlags
= DOUBLE_BITMAP_SENTINEL
;
1123 JS_ASSERT(rt
->gcChunkList
== 0);
1127 * This function must not be called when thing is jsdouble.
1130 GetGCThingFlags(void *thing
)
1135 a
= THING_TO_ARENA(thing
);
1136 index
= THING_TO_INDEX(thing
, a
->list
->thingSize
);
1137 return THING_FLAGP(a
, index
);
1141 * This function returns null when thing is jsdouble.
1144 GetGCThingFlagsOrNull(void *thing
)
1149 if (JSString::isStatic(thing
))
1151 a
= THING_TO_ARENA(thing
);
1154 index
= THING_TO_INDEX(thing
, a
->list
->thingSize
);
1155 return THING_FLAGP(a
, index
);
1159 js_GetExternalStringGCType(JSString
*str
)
1161 JS_ASSERT(!JSString::isStatic(str
));
1163 uintN type
= (uintN
) *GetGCThingFlags(str
) & GCF_TYPEMASK
;
1164 JS_ASSERT(type
== GCX_STRING
|| type
>= GCX_EXTERNAL_STRING
);
1165 return (type
== GCX_STRING
) ? -1 : (intN
) (type
- GCX_EXTERNAL_STRING
);
1169 MapGCFlagsToTraceKind(uintN flags
)
1173 type
= flags
& GCF_TYPEMASK
;
1174 JS_ASSERT(type
!= GCX_DOUBLE
);
1175 JS_ASSERT(type
< GCX_NTYPES
);
1176 return (type
< GCX_EXTERNAL_STRING
) ? type
: JSTRACE_STRING
;
1179 JS_FRIEND_API(uint32
)
1180 js_GetGCThingTraceKind(void *thing
)
1185 if (JSString::isStatic(thing
))
1186 return JSTRACE_STRING
;
1188 a
= THING_TO_ARENA(thing
);
1190 return JSTRACE_DOUBLE
;
1192 index
= THING_TO_INDEX(thing
, a
->list
->thingSize
);
1193 return MapGCFlagsToTraceKind(*THING_FLAGP(a
, index
));
1197 js_GetGCStringRuntime(JSString
*str
)
1199 JSGCArenaList
*list
;
1201 list
= THING_TO_ARENA(str
)->list
;
1203 JS_ASSERT(list
->thingSize
== sizeof(JSGCThing
));
1204 JS_ASSERT(GC_FREELIST_INDEX(sizeof(JSGCThing
)) == 0);
1206 return (JSRuntime
*)((uint8
*)list
- offsetof(JSRuntime
, gcArenaList
));
1210 js_IsAboutToBeFinalized(JSContext
*cx
, void *thing
)
1213 uint32 index
, flags
;
1215 a
= THING_TO_ARENA(thing
);
1218 * Check if arena has no marked doubles. In that case the bitmap with
1219 * the mark flags contains all garbage as it is initialized only when
1220 * marking the first double in the arena.
1222 if (!a
->u
.hasMarkedDoubles
)
1224 index
= DOUBLE_THING_TO_INDEX(thing
);
1225 return !IsMarkedDouble(a
, index
);
1227 index
= THING_TO_INDEX(thing
, a
->list
->thingSize
);
1228 flags
= *THING_FLAGP(a
, index
);
1229 return !(flags
& (GCF_MARK
| GCF_LOCK
| GCF_FINAL
));
1232 /* This is compatible with JSDHashEntryStub. */
1233 typedef struct JSGCRootHashEntry
{
1234 JSDHashEntryHdr hdr
;
1237 } JSGCRootHashEntry
;
1239 /* Initial size of the gcRootsHash table (SWAG, small enough to amortize). */
1240 #define GC_ROOTS_SIZE 256
1242 #if CHUNKED_ARENA_ALLOCATION
1245 * For a CPU with extremely large pages using them for GC things wastes
1248 # define GC_ARENAS_PER_CPU_PAGE_LIMIT JS_BIT(18 - GC_ARENA_SHIFT)
1250 JS_STATIC_ASSERT(GC_ARENAS_PER_CPU_PAGE_LIMIT
<= NO_FREE_ARENAS
);
1255 js_InitGC(JSRuntime
*rt
, uint32 maxbytes
)
1258 if (js_gcArenasPerChunk
== 0) {
1259 size_t cpuPageSize
, arenasPerPage
;
1260 # if defined(XP_WIN)
1264 cpuPageSize
= si
.dwPageSize
;
1266 # elif defined(XP_UNIX) || defined(XP_BEOS)
1267 cpuPageSize
= (size_t) sysconf(_SC_PAGESIZE
);
1269 # error "Not implemented"
1271 /* cpuPageSize is a power of 2. */
1272 JS_ASSERT((cpuPageSize
& (cpuPageSize
- 1)) == 0);
1273 arenasPerPage
= cpuPageSize
>> GC_ARENA_SHIFT
;
1275 if (arenasPerPage
== 0) {
1277 "JS engine warning: the size of the CPU page, %u bytes, is too low to use\n"
1278 "paged allocation for the garbage collector. Please report this.\n",
1279 (unsigned) cpuPageSize
);
1282 if (arenasPerPage
- 1 <= (size_t) (GC_ARENAS_PER_CPU_PAGE_LIMIT
- 1)) {
1284 * Use at least 4 GC arenas per paged allocation chunk to minimize
1285 * the overhead of mmap/VirtualAlloc.
1287 js_gcUseMmap
= JS_TRUE
;
1288 js_gcArenasPerChunk
= JS_MAX((uint32
) arenasPerPage
, 4);
1290 js_gcUseMmap
= JS_FALSE
;
1291 js_gcArenasPerChunk
= 7;
1294 JS_ASSERT(1 <= js_gcArenasPerChunk
&&
1295 js_gcArenasPerChunk
<= NO_FREE_ARENAS
);
1298 InitGCArenaLists(rt
);
1299 if (!JS_DHashTableInit(&rt
->gcRootsHash
, JS_DHashGetStubOps(), NULL
,
1300 sizeof(JSGCRootHashEntry
), GC_ROOTS_SIZE
)) {
1301 rt
->gcRootsHash
.ops
= NULL
;
1304 rt
->gcLocksHash
= NULL
; /* create lazily */
1307 * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
1308 * for default backward API compatibility.
1310 rt
->gcMaxBytes
= rt
->gcMaxMallocBytes
= maxbytes
;
1311 rt
->gcEmptyArenaPoolLifespan
= 30000;
1314 * By default the trigger factor gets maximum possible value. This
1315 * means that GC will not be triggered by growth of GC memory (gcBytes).
1317 rt
->setGCTriggerFactor((uint32
) -1);
1320 * The assigned value prevents GC from running when GC memory is too low
1321 * (during JS engine start).
1323 rt
->setGCLastBytes(8192);
1325 METER(memset(&rt
->gcStats
, 0, sizeof rt
->gcStats
));
1332 UpdateArenaStats(JSGCArenaStats
*st
, uint32 nlivearenas
, uint32 nkilledArenas
,
1337 narenas
= nlivearenas
+ nkilledArenas
;
1338 JS_ASSERT(narenas
>= st
->livearenas
);
1340 st
->newarenas
= narenas
- st
->livearenas
;
1341 st
->narenas
= narenas
;
1342 st
->livearenas
= nlivearenas
;
1343 if (st
->maxarenas
< narenas
)
1344 st
->maxarenas
= narenas
;
1345 st
->totalarenas
+= narenas
;
1347 st
->nthings
= nthings
;
1348 if (st
->maxthings
< nthings
)
1349 st
->maxthings
= nthings
;
1350 st
->totalthings
+= nthings
;
1354 js_DumpGCStats(JSRuntime
*rt
, FILE *fp
)
1357 size_t sumArenas
, sumTotalArenas
;
1358 size_t sumThings
, sumMaxThings
;
1359 size_t sumThingSize
, sumTotalThingSize
;
1360 size_t sumArenaCapacity
, sumTotalArenaCapacity
;
1362 size_t thingSize
, thingsPerArena
;
1363 size_t sumAlloc
, sumLocalAlloc
, sumFail
, sumRetry
;
1365 fprintf(fp
, "\nGC allocation statistics:\n");
1367 #define UL(x) ((unsigned long)(x))
1368 #define ULSTAT(x) UL(rt->gcStats.x)
1369 #define PERCENT(x,y) (100.0 * (double) (x) / (double) (y))
1376 sumTotalThingSize
= 0;
1377 sumArenaCapacity
= 0;
1378 sumTotalArenaCapacity
= 0;
1383 for (i
= -1; i
< (int) GC_NUM_FREELISTS
; i
++) {
1385 thingSize
= sizeof(jsdouble
);
1386 thingsPerArena
= DOUBLES_PER_ARENA
;
1387 st
= &rt
->gcStats
.doubleArenaStats
;
1389 "Arena list for double values (%lu doubles per arena):",
1390 UL(thingsPerArena
));
1392 thingSize
= rt
->gcArenaList
[i
].thingSize
;
1393 thingsPerArena
= THINGS_PER_ARENA(thingSize
);
1394 st
= &rt
->gcStats
.arenaStats
[i
];
1396 "Arena list %d (thing size %lu, %lu things per arena):",
1397 i
, UL(GC_FREELIST_NBYTES(i
)), UL(thingsPerArena
));
1399 if (st
->maxarenas
== 0) {
1400 fputs(" NEVER USED\n", fp
);
1404 fprintf(fp
, " arenas before GC: %lu\n", UL(st
->narenas
));
1405 fprintf(fp
, " new arenas before GC: %lu (%.1f%%)\n",
1406 UL(st
->newarenas
), PERCENT(st
->newarenas
, st
->narenas
));
1407 fprintf(fp
, " arenas after GC: %lu (%.1f%%)\n",
1408 UL(st
->livearenas
), PERCENT(st
->livearenas
, st
->narenas
));
1409 fprintf(fp
, " max arenas: %lu\n", UL(st
->maxarenas
));
1410 fprintf(fp
, " things: %lu\n", UL(st
->nthings
));
1411 fprintf(fp
, " GC cell utilization: %.1f%%\n",
1412 PERCENT(st
->nthings
, thingsPerArena
* st
->narenas
));
1413 fprintf(fp
, " average cell utilization: %.1f%%\n",
1414 PERCENT(st
->totalthings
, thingsPerArena
* st
->totalarenas
));
1415 fprintf(fp
, " max things: %lu\n", UL(st
->maxthings
));
1416 fprintf(fp
, " alloc attempts: %lu\n", UL(st
->alloc
));
1417 fprintf(fp
, " alloc without locks: %1u (%.1f%%)\n",
1418 UL(st
->localalloc
), PERCENT(st
->localalloc
, st
->alloc
));
1419 sumArenas
+= st
->narenas
;
1420 sumTotalArenas
+= st
->totalarenas
;
1421 sumThings
+= st
->nthings
;
1422 sumMaxThings
+= st
->maxthings
;
1423 sumThingSize
+= thingSize
* st
->nthings
;
1424 sumTotalThingSize
+= thingSize
* st
->totalthings
;
1425 sumArenaCapacity
+= thingSize
* thingsPerArena
* st
->narenas
;
1426 sumTotalArenaCapacity
+= thingSize
* thingsPerArena
* st
->totalarenas
;
1427 sumAlloc
+= st
->alloc
;
1428 sumLocalAlloc
+= st
->localalloc
;
1429 sumFail
+= st
->fail
;
1430 sumRetry
+= st
->retry
;
1432 fprintf(fp
, "TOTAL STATS:\n");
1433 fprintf(fp
, " bytes allocated: %lu\n", UL(rt
->gcBytes
));
1434 fprintf(fp
, " total GC arenas: %lu\n", UL(sumArenas
));
1435 fprintf(fp
, " total GC things: %lu\n", UL(sumThings
));
1436 fprintf(fp
, " max total GC things: %lu\n", UL(sumMaxThings
));
1437 fprintf(fp
, " GC cell utilization: %.1f%%\n",
1438 PERCENT(sumThingSize
, sumArenaCapacity
));
1439 fprintf(fp
, " average cell utilization: %.1f%%\n",
1440 PERCENT(sumTotalThingSize
, sumTotalArenaCapacity
));
1441 fprintf(fp
, "allocation retries after GC: %lu\n", UL(sumRetry
));
1442 fprintf(fp
, " alloc attempts: %lu\n", UL(sumAlloc
));
1443 fprintf(fp
, " alloc without locks: %1u (%.1f%%)\n",
1444 UL(sumLocalAlloc
), PERCENT(sumLocalAlloc
, sumAlloc
));
1445 fprintf(fp
, " allocation failures: %lu\n", UL(sumFail
));
1446 fprintf(fp
, " things born locked: %lu\n", ULSTAT(lockborn
));
1447 fprintf(fp
, " valid lock calls: %lu\n", ULSTAT(lock
));
1448 fprintf(fp
, " valid unlock calls: %lu\n", ULSTAT(unlock
));
1449 fprintf(fp
, " mark recursion depth: %lu\n", ULSTAT(depth
));
1450 fprintf(fp
, " maximum mark recursion: %lu\n", ULSTAT(maxdepth
));
1451 fprintf(fp
, " mark C recursion depth: %lu\n", ULSTAT(cdepth
));
1452 fprintf(fp
, " maximum mark C recursion: %lu\n", ULSTAT(maxcdepth
));
1453 fprintf(fp
, " delayed tracing calls: %lu\n", ULSTAT(untraced
));
1455 fprintf(fp
, " max trace later count: %lu\n", ULSTAT(maxuntraced
));
1457 fprintf(fp
, " maximum GC nesting level: %lu\n", ULSTAT(maxlevel
));
1458 fprintf(fp
, "potentially useful GC calls: %lu\n", ULSTAT(poke
));
1459 fprintf(fp
, " thing arenas freed so far: %lu\n", ULSTAT(afree
));
1460 fprintf(fp
, " stack segments scanned: %lu\n", ULSTAT(stackseg
));
1461 fprintf(fp
, "stack segment slots scanned: %lu\n", ULSTAT(segslots
));
1462 fprintf(fp
, "reachable closeable objects: %lu\n", ULSTAT(nclose
));
1463 fprintf(fp
, " max reachable closeable: %lu\n", ULSTAT(maxnclose
));
1464 fprintf(fp
, " scheduled close hooks: %lu\n", ULSTAT(closelater
));
1465 fprintf(fp
, " max scheduled close hooks: %lu\n", ULSTAT(maxcloselater
));
1471 #ifdef JS_ARENAMETER
1472 JS_DumpArenaStats(fp
);
1479 CheckLeakedRoots(JSRuntime
*rt
);
1483 js_FinishGC(JSRuntime
*rt
)
1485 #ifdef JS_ARENAMETER
1486 JS_DumpArenaStats(stdout
);
1489 js_DumpGCStats(rt
, stdout
);
1492 FreePtrTable(&rt
->gcIteratorTable
, &iteratorTableInfo
);
1493 FinishGCArenaLists(rt
);
1495 if (rt
->gcRootsHash
.ops
) {
1497 CheckLeakedRoots(rt
);
1499 JS_DHashTableFinish(&rt
->gcRootsHash
);
1500 rt
->gcRootsHash
.ops
= NULL
;
1502 if (rt
->gcLocksHash
) {
1503 JS_DHashTableDestroy(rt
->gcLocksHash
);
1504 rt
->gcLocksHash
= NULL
;
1509 js_AddRoot(JSContext
*cx
, void *rp
, const char *name
)
1511 JSBool ok
= js_AddRootRT(cx
->runtime
, rp
, name
);
1513 JS_ReportOutOfMemory(cx
);
1518 js_AddRootRT(JSRuntime
*rt
, void *rp
, const char *name
)
1521 JSGCRootHashEntry
*rhe
;
1524 * Due to the long-standing, but now removed, use of rt->gcLock across the
1525 * bulk of js_GC, API users have come to depend on JS_AddRoot etc. locking
1526 * properly with a racing GC, without calling JS_AddRoot from a request.
1527 * We have to preserve API compatibility here, now that we avoid holding
1528 * rt->gcLock across the mark phase (including the root hashtable mark).
1532 rhe
= (JSGCRootHashEntry
*)
1533 JS_DHashTableOperate(&rt
->gcRootsHash
, rp
, JS_DHASH_ADD
);
1546 js_RemoveRoot(JSRuntime
*rt
, void *rp
)
1549 * Due to the JS_RemoveRootRT API, we may be called outside of a request.
1550 * Same synchronization drill as above in js_AddRoot.
1554 (void) JS_DHashTableOperate(&rt
->gcRootsHash
, rp
, JS_DHASH_REMOVE
);
1555 rt
->gcPoke
= JS_TRUE
;
1562 static JSDHashOperator
1563 js_root_printer(JSDHashTable
*table
, JSDHashEntryHdr
*hdr
, uint32 i
, void *arg
)
1565 uint32
*leakedroots
= (uint32
*)arg
;
1566 JSGCRootHashEntry
*rhe
= (JSGCRootHashEntry
*)hdr
;
1570 "JS engine warning: leaking GC root \'%s\' at %p\n",
1571 rhe
->name
? (char *)rhe
->name
: "", rhe
->root
);
1573 return JS_DHASH_NEXT
;
1577 CheckLeakedRoots(JSRuntime
*rt
)
1579 uint32 leakedroots
= 0;
1581 /* Warn (but don't assert) debug builds of any remaining roots. */
1582 JS_DHashTableEnumerate(&rt
->gcRootsHash
, js_root_printer
,
1584 if (leakedroots
> 0) {
1585 if (leakedroots
== 1) {
1587 "JS engine warning: 1 GC root remains after destroying the JSRuntime at %p.\n"
1588 " This root may point to freed memory. Objects reachable\n"
1589 " through it have not been finalized.\n",
1593 "JS engine warning: %lu GC roots remain after destroying the JSRuntime at %p.\n"
1594 " These roots may point to freed memory. Objects reachable\n"
1595 " through them have not been finalized.\n",
1596 (unsigned long) leakedroots
, (void *) rt
);
1601 typedef struct NamedRootDumpArgs
{
1602 void (*dump
)(const char *name
, void *rp
, void *data
);
1604 } NamedRootDumpArgs
;
1606 static JSDHashOperator
1607 js_named_root_dumper(JSDHashTable
*table
, JSDHashEntryHdr
*hdr
, uint32 number
,
1610 NamedRootDumpArgs
*args
= (NamedRootDumpArgs
*) arg
;
1611 JSGCRootHashEntry
*rhe
= (JSGCRootHashEntry
*)hdr
;
1614 args
->dump(rhe
->name
, rhe
->root
, args
->data
);
1615 return JS_DHASH_NEXT
;
1620 js_DumpNamedRoots(JSRuntime
*rt
,
1621 void (*dump
)(const char *name
, void *rp
, void *data
),
1624 NamedRootDumpArgs args
;
1628 JS_DHashTableEnumerate(&rt
->gcRootsHash
, js_named_root_dumper
, &args
);
1634 typedef struct GCRootMapArgs
{
1639 static JSDHashOperator
1640 js_gcroot_mapper(JSDHashTable
*table
, JSDHashEntryHdr
*hdr
, uint32 number
,
1643 GCRootMapArgs
*args
= (GCRootMapArgs
*) arg
;
1644 JSGCRootHashEntry
*rhe
= (JSGCRootHashEntry
*)hdr
;
1648 mapflags
= args
->map(rhe
->root
, rhe
->name
, args
->data
);
1650 #if JS_MAP_GCROOT_NEXT == JS_DHASH_NEXT && \
1651 JS_MAP_GCROOT_STOP == JS_DHASH_STOP && \
1652 JS_MAP_GCROOT_REMOVE == JS_DHASH_REMOVE
1653 op
= (JSDHashOperator
)mapflags
;
1656 if (mapflags
& JS_MAP_GCROOT_STOP
)
1657 op
|= JS_DHASH_STOP
;
1658 if (mapflags
& JS_MAP_GCROOT_REMOVE
)
1659 op
|= JS_DHASH_REMOVE
;
1662 return (JSDHashOperator
) op
;
1666 js_MapGCRoots(JSRuntime
*rt
, JSGCRootMapFun map
, void *data
)
1674 rv
= JS_DHashTableEnumerate(&rt
->gcRootsHash
, js_gcroot_mapper
, &args
);
1680 js_RegisterCloseableIterator(JSContext
*cx
, JSObject
*obj
)
1686 JS_ASSERT(!rt
->gcRunning
);
1689 ok
= AddToPtrTable(cx
, &rt
->gcIteratorTable
, &iteratorTableInfo
, obj
);
1695 CloseNativeIterators(JSContext
*cx
)
1698 size_t count
, newCount
, i
;
1703 count
= rt
->gcIteratorTable
.count
;
1704 array
= rt
->gcIteratorTable
.array
;
1707 for (i
= 0; i
!= count
; ++i
) {
1708 obj
= (JSObject
*)array
[i
];
1709 if (js_IsAboutToBeFinalized(cx
, obj
))
1710 js_CloseNativeIterator(cx
, obj
);
1712 array
[newCount
++] = obj
;
1714 ShrinkPtrTable(&rt
->gcIteratorTable
, &iteratorTableInfo
, newCount
);
1717 #if defined(DEBUG_brendan) || defined(DEBUG_timeless)
1718 #define DEBUG_gchist
1724 static struct GCHist
{
1726 JSGCThing
*freeList
;
1729 unsigned gchpos
= 0;
1733 JSRuntime::setGCTriggerFactor(uint32 factor
)
1735 JS_ASSERT(factor
>= 100);
1737 gcTriggerFactor
= factor
;
1738 setGCLastBytes(gcLastBytes
);
1742 JSRuntime::setGCLastBytes(size_t lastBytes
)
1744 gcLastBytes
= lastBytes
;
1745 uint64 triggerBytes
= uint64(lastBytes
) * uint64(gcTriggerFactor
/ 100);
1746 if (triggerBytes
!= size_t(triggerBytes
))
1747 triggerBytes
= size_t(-1);
1748 gcTriggerBytes
= size_t(triggerBytes
);
1751 static JS_INLINE
bool
1752 IsGCThresholdReached(JSRuntime
*rt
)
1755 if (rt
->gcZeal
>= 1)
1760 * Since the initial value of the gcLastBytes parameter is not equal to
1761 * zero (see the js_InitGC function) the return value is false when
1762 * the gcBytes value is close to zero at the JS engine start.
1764 return rt
->gcMallocBytes
>= rt
->gcMaxMallocBytes
||
1765 rt
->gcBytes
>= rt
->gcTriggerBytes
;
1768 template <typename T
, typename NewbornType
>
1770 NewGCThing(JSContext
*cx
, uintN flags
, NewbornType
** newbornRoot
)
1776 JSGCArenaList
*arenaList
;
1779 JSLocalRootStack
*lrs
;
1781 JSGCArenaStats
*astats
;
1783 #ifdef JS_THREADSAFE
1785 uintN localMallocBytes
;
1786 JSGCThing
**lastptr
;
1787 JSGCThing
*tmpthing
;
1789 uintN maxFreeThings
; /* max to take from the global free list */
1792 JS_ASSERT((flags
& GCF_TYPEMASK
) != GCX_DOUBLE
);
1794 size_t nbytes
= sizeof(T
);
1795 JS_ASSERT(JS_ROUNDUP(nbytes
, sizeof(JSGCThing
)) == nbytes
);
1796 uintN flindex
= GC_FREELIST_INDEX(nbytes
);
1798 /* Updates of metering counters here may not be thread-safe. */
1799 METER(astats
= &cx
->runtime
->gcStats
.arenaStats
[flindex
]);
1800 METER(astats
->alloc
++);
1802 #ifdef JS_THREADSAFE
1803 gcLocked
= JS_FALSE
;
1804 JS_ASSERT(cx
->thread
);
1806 JSGCThing
*&freeList
= cx
->thread
->gcFreeLists
[flindex
];
1808 localMallocBytes
= JS_THREAD_DATA(cx
)->gcMallocBytes
;
1809 if (thing
&& rt
->gcMaxMallocBytes
- rt
->gcMallocBytes
> localMallocBytes
) {
1810 flagp
= thing
->flagp
;
1811 freeList
= thing
->next
;
1812 METER(astats
->localalloc
++);
1819 /* Transfer thread-local counter to global one. */
1820 if (localMallocBytes
!= 0) {
1821 JS_THREAD_DATA(cx
)->gcMallocBytes
= 0;
1822 if (rt
->gcMaxMallocBytes
- rt
->gcMallocBytes
< localMallocBytes
)
1823 rt
->gcMallocBytes
= rt
->gcMaxMallocBytes
;
1825 rt
->gcMallocBytes
+= localMallocBytes
;
1828 JS_ASSERT(!rt
->gcRunning
);
1829 if (rt
->gcRunning
) {
1830 METER(rt
->gcStats
.finalfail
++);
1835 #if defined JS_GC_ZEAL && defined JS_TRACER
1836 if (rt
->gcZeal
>= 1 && JS_TRACE_MONITOR(cx
).useReservedObjects
)
1837 goto testReservedObjects
;
1840 arenaList
= &rt
->gcArenaList
[flindex
];
1841 doGC
= IsGCThresholdReached(rt
);
1845 && !JS_ON_TRACE(cx
) && !JS_TRACE_MONITOR(cx
).useReservedObjects
1849 * Keep rt->gcLock across the call into js_GC so we don't starve
1850 * and lose to racing threads who deplete the heap just after
1851 * js_GC has replenished it (or has synchronized with a racing
1852 * GC that collected a bunch of garbage). This unfair scheduling
1853 * can happen on certain operating systems. For the gory details,
1854 * see bug 162779 at https://bugzilla.mozilla.org/.
1856 js_GC(cx
, GC_LAST_DITCH
);
1857 METER(astats
->retry
++);
1860 /* Try to get thing from the free list. */
1861 thing
= arenaList
->freeList
;
1863 arenaList
->freeList
= thing
->next
;
1864 flagp
= thing
->flagp
;
1865 JS_ASSERT(*flagp
& GCF_FINAL
);
1867 #ifdef JS_THREADSAFE
1869 * Refill the local free list by taking several things from the
1870 * global free list unless the free list is already populated or
1871 * we are still at rt->gcMaxMallocBytes barrier. The former is
1872 * caused via allocating new things in gcCallback(cx, JSGC_END).
1873 * The latter happens when GC is canceled due to
1874 * gcCallback(cx, JSGC_BEGIN) returning false.
1876 if (freeList
|| rt
->gcMallocBytes
>= rt
->gcMaxMallocBytes
)
1879 tmpthing
= arenaList
->freeList
;
1881 maxFreeThings
= MAX_THREAD_LOCAL_THINGS
;
1883 if (!tmpthing
->next
)
1885 tmpthing
= tmpthing
->next
;
1886 } while (--maxFreeThings
!= 0);
1888 freeList
= arenaList
->freeList
;
1889 arenaList
->freeList
= tmpthing
->next
;
1890 tmpthing
->next
= NULL
;
1897 * Try to allocate things from the last arena. If it is fully used,
1898 * check if we can allocate a new one and, if we cannot, consider
1899 * doing a "last ditch" GC unless already tried.
1901 thingsLimit
= THINGS_PER_ARENA(nbytes
);
1902 if (arenaList
->lastCount
!= thingsLimit
) {
1903 JS_ASSERT(arenaList
->lastCount
< thingsLimit
);
1904 a
= arenaList
->last
;
1907 if (JS_TRACE_MONITOR(cx
).useReservedObjects
) {
1909 testReservedObjects
:
1911 JSTraceMonitor
*tm
= &JS_TRACE_MONITOR(cx
);
1913 thing
= (JSGCThing
*) tm
->reservedObjects
;
1914 flagp
= GetGCThingFlags(thing
);
1916 tm
->reservedObjects
= JSVAL_TO_OBJECT(tm
->reservedObjects
->fslots
[0]);
1923 if (doGC
|| JS_ON_TRACE(cx
))
1928 a
->list
= arenaList
;
1929 a
->prev
= arenaList
->last
;
1930 a
->prevUntracedPage
= 0;
1931 a
->u
.untracedThings
= 0;
1932 arenaList
->last
= a
;
1933 arenaList
->lastCount
= 0;
1936 flagp
= THING_FLAGP(a
, arenaList
->lastCount
);
1937 thing
= FLAGP_TO_THING(flagp
, nbytes
);
1938 arenaList
->lastCount
++;
1940 #ifdef JS_THREADSAFE
1942 * Refill the local free list by taking free things from the last
1943 * arena. Prefer to order free things by ascending address in the
1944 * (unscientific) hope of better cache locality.
1946 if (freeList
|| rt
->gcMallocBytes
>= rt
->gcMaxMallocBytes
)
1948 lastptr
= &freeList
;
1949 maxFreeThings
= thingsLimit
- arenaList
->lastCount
;
1950 if (maxFreeThings
> MAX_THREAD_LOCAL_THINGS
)
1951 maxFreeThings
= MAX_THREAD_LOCAL_THINGS
;
1952 uint32 lastCount
= arenaList
->lastCount
;
1953 while (maxFreeThings
!= 0) {
1956 tmpflagp
= THING_FLAGP(a
, lastCount
);
1957 tmpthing
= FLAGP_TO_THING(tmpflagp
, nbytes
);
1959 tmpthing
->flagp
= tmpflagp
;
1960 *tmpflagp
= GCF_FINAL
; /* signifying that thing is free */
1962 *lastptr
= tmpthing
;
1963 lastptr
= &tmpthing
->next
;
1966 arenaList
->lastCount
= lastCount
;
1971 /* We successfully allocated the thing. */
1972 #ifdef JS_THREADSAFE
1975 lrs
= cx
->localRootStack
;
1978 * If we're in a local root scope, don't set newborn[type] at all, to
1979 * avoid entraining garbage from it for an unbounded amount of time
1980 * on this context. A caller will leave the local root scope and pop
1981 * this reference, allowing thing to be GC'd if it has no other refs.
1982 * See JS_EnterLocalRootScope and related APIs.
1984 if (js_PushLocalRoot(cx
, lrs
, (jsval
) thing
) < 0) {
1986 * When we fail for a thing allocated through the tail of the last
1987 * arena, thing's flag byte is not initialized. So to prevent GC
1988 * accessing the uninitialized flags during the finalization, we
1989 * always mark the thing as final. See bug 337407.
1996 * No local root scope, so we're stuck with the old, fragile model of
1997 * depending on a pigeon-hole newborn per type per context.
1999 *newbornRoot
= (T
*) thing
;
2002 /* We can't fail now, so update flags. */
2003 *flagp
= (uint8
)flags
;
2006 gchist
[gchpos
].lastDitch
= doGC
;
2007 gchist
[gchpos
].freeList
= rt
->gcArenaList
[flindex
].freeList
;
2008 if (++gchpos
== NGCHIST
)
2012 /* This is not thread-safe for thread-local allocations. */
2013 METER_IF(flags
& GCF_LOCK
, rt
->gcStats
.lockborn
++);
2015 #ifdef JS_THREADSAFE
2022 #ifdef JS_THREADSAFE
2026 METER(astats
->fail
++);
2027 js_ReportOutOfMemory(cx
);
2032 js_NewGCObject(JSContext
*cx
)
2034 return NewGCThing
<JSObject
>(cx
, GCX_OBJECT
, &cx
->weakRoots
.newbornObject
);
2038 js_NewGCString(JSContext
*cx
)
2040 return NewGCThing
<JSString
>(cx
, GCX_STRING
, &cx
->weakRoots
.newbornString
);
2044 js_NewGCExternalString(JSContext
*cx
, uintN type
)
2046 JS_ASSERT(type
< JS_EXTERNAL_STRING_LIMIT
);
2047 return NewGCThing
<JSString
>(cx
, GCX_EXTERNAL_STRING
+ type
,
2048 &cx
->weakRoots
.newbornExternalString
[type
]);
2052 js_NewGCFunction(JSContext
*cx
)
2054 return NewGCThing
<JSFunction
>(cx
, GCX_OBJECT
, &cx
->weakRoots
.newbornObject
);
2057 #if JS_HAS_XML_SUPPORT
2059 js_NewGCXML(JSContext
*cx
)
2061 return NewGCThing
<JSXML
>(cx
, GCX_XML
, &cx
->weakRoots
.newbornXML
);
2065 static JSGCDoubleCell
*
2066 RefillDoubleFreeList(JSContext
*cx
)
2069 jsbitmap
*doubleFlags
, usedBits
;
2070 JSBool didGC
= JS_FALSE
;
2073 JSGCDoubleCell
*cell
, *list
, *lastcell
;
2075 JS_ASSERT(!cx
->doubleFreeList
);
2080 JS_ASSERT(!rt
->gcRunning
);
2081 if (rt
->gcRunning
) {
2082 METER(rt
->gcStats
.finalfail
++);
2087 if (IsGCThresholdReached(rt
))
2091 * Loop until we find a flag bitmap byte with unset bits indicating free
2092 * double cells, then set all bits as used and put the cells to the free
2093 * list for the current context.
2095 doubleFlags
= rt
->gcDoubleArenaList
.nextDoubleFlags
;
2097 if (((jsuword
) doubleFlags
& GC_ARENA_MASK
) ==
2098 ARENA_INFO_OFFSET
) {
2099 if (doubleFlags
== DOUBLE_BITMAP_SENTINEL
||
2100 !((JSGCArenaInfo
*) doubleFlags
)->prev
) {
2104 if (didGC
|| JS_ON_TRACE(cx
)) {
2105 METER(rt
->gcStats
.doubleArenaStats
.fail
++);
2107 js_ReportOutOfMemory(cx
);
2110 js_GC(cx
, GC_LAST_DITCH
);
2111 METER(rt
->gcStats
.doubleArenaStats
.retry
++);
2112 doubleFlags
= rt
->gcDoubleArenaList
.nextDoubleFlags
;
2118 if (doubleFlags
== DOUBLE_BITMAP_SENTINEL
) {
2119 JS_ASSERT(!rt
->gcDoubleArenaList
.first
);
2120 rt
->gcDoubleArenaList
.first
= a
;
2122 JS_ASSERT(rt
->gcDoubleArenaList
.first
);
2123 ((JSGCArenaInfo
*) doubleFlags
)->prev
= a
;
2125 ClearDoubleArenaFlags(a
);
2126 doubleFlags
= DOUBLE_ARENA_BITMAP(a
);
2130 DOUBLE_ARENA_BITMAP(((JSGCArenaInfo
*) doubleFlags
)->prev
);
2134 * When doubleFlags points the last bitmap's word in the arena, its
2135 * high bits corresponds to non-existing cells. ClearDoubleArenaFlags
2136 * sets such bits to 1. Thus even for this last word its bit is unset
2137 * iff the corresponding cell exists and free.
2139 if (*doubleFlags
!= (jsbitmap
) -1)
2144 rt
->gcDoubleArenaList
.nextDoubleFlags
= doubleFlags
+ 1;
2145 usedBits
= *doubleFlags
;
2146 JS_ASSERT(usedBits
!= (jsbitmap
) -1);
2147 *doubleFlags
= (jsbitmap
) -1;
2151 * Find the index corresponding to the first bit in *doubleFlags. The last
2152 * bit will have "index + JS_BITS_PER_WORD - 1".
2154 index
= ((uintN
) ((jsuword
) doubleFlags
& GC_ARENA_MASK
) -
2155 DOUBLES_ARENA_BITMAP_OFFSET
) * JS_BITS_PER_BYTE
;
2156 cell
= (JSGCDoubleCell
*) ((jsuword
) doubleFlags
& ~GC_ARENA_MASK
) + index
;
2158 if (usedBits
== 0) {
2159 /* The common case when all doubles from *doubleFlags are free. */
2160 JS_ASSERT(index
+ JS_BITS_PER_WORD
<= DOUBLES_PER_ARENA
);
2162 for (lastcell
= cell
+ JS_BITS_PER_WORD
- 1; cell
!= lastcell
; ++cell
)
2163 cell
->link
= cell
+ 1;
2164 lastcell
->link
= NULL
;
2167 * Assemble the free list from free cells from *doubleFlags starting
2168 * from the tail. In the loop
2170 * index + bit >= DOUBLES_PER_ARENA
2172 * when bit is one of the unused bits. We do not check for such bits
2173 * explicitly as they must be set and the "if" check filters them out.
2175 JS_ASSERT(index
+ JS_BITS_PER_WORD
<=
2176 DOUBLES_PER_ARENA
+ UNUSED_DOUBLE_BITMAP_BITS
);
2177 bit
= JS_BITS_PER_WORD
;
2183 if (!(((jsbitmap
) 1 << bit
) & usedBits
)) {
2184 JS_ASSERT(index
+ bit
< DOUBLES_PER_ARENA
);
2185 JS_ASSERT_IF(index
+ bit
== DOUBLES_PER_ARENA
- 1, !list
);
2194 * We delegate assigning cx->doubleFreeList to js_NewDoubleInRootedValue as
2195 * it immediately consumes the head of the list.
2201 js_NewDoubleInRootedValue(JSContext
*cx
, jsdouble d
, jsval
*vp
)
2204 JSGCArenaStats
*astats
;
2206 JSGCDoubleCell
*cell
;
2208 /* Updates of metering counters here are not thread-safe. */
2209 METER(astats
= &cx
->runtime
->gcStats
.doubleArenaStats
);
2210 METER(astats
->alloc
++);
2211 cell
= cx
->doubleFreeList
;
2213 cell
= RefillDoubleFreeList(cx
);
2215 METER(astats
->fail
++);
2219 METER(astats
->localalloc
++);
2221 cx
->doubleFreeList
= cell
->link
;
2223 *vp
= DOUBLE_TO_JSVAL(&cell
->number
);
2228 js_NewWeaklyRootedDouble(JSContext
*cx
, jsdouble d
)
2233 if (!js_NewDoubleInRootedValue(cx
, d
, &v
))
2236 JS_ASSERT(JSVAL_IS_DOUBLE(v
));
2237 dp
= JSVAL_TO_DOUBLE(v
);
2238 if (cx
->localRootStack
) {
2239 if (js_PushLocalRoot(cx
, cx
->localRootStack
, v
) < 0)
2242 cx
->weakRoots
.newbornDouble
= dp
;
2249 js_ReserveObjects(JSContext
*cx
, size_t nobjects
)
2252 * Ensure at least nobjects objects are in the list. fslots[1] of each
2253 * object on the reservedObjects list is the length of the list from there.
2255 JSObject
*&head
= JS_TRACE_MONITOR(cx
).reservedObjects
;
2256 size_t i
= head
? JSVAL_TO_INT(head
->fslots
[1]) : 0;
2257 while (i
< nobjects
) {
2258 JSObject
*obj
= js_NewGCObject(cx
);
2261 memset(obj
, 0, sizeof(JSObject
));
2262 /* The class must be set to something for finalization. */
2263 obj
->classword
= (jsuword
) &js_ObjectClass
;
2264 obj
->fslots
[0] = OBJECT_TO_JSVAL(head
);
2266 obj
->fslots
[1] = INT_TO_JSVAL(i
);
2275 js_AddAsGCBytes(JSContext
*cx
, size_t sz
)
2280 if (rt
->gcBytes
>= rt
->gcMaxBytes
||
2281 sz
> (size_t) (rt
->gcMaxBytes
- rt
->gcBytes
) ||
2282 IsGCThresholdReached(rt
)) {
2283 if (JS_ON_TRACE(cx
)) {
2285 * If we can't leave the trace, signal OOM condition, otherwise
2286 * exit from trace and proceed with GC.
2288 if (!js_CanLeaveTrace(cx
)) {
2294 js_GC(cx
, GC_LAST_DITCH
);
2295 if (rt
->gcBytes
>= rt
->gcMaxBytes
||
2296 sz
> (size_t) (rt
->gcMaxBytes
- rt
->gcBytes
)) {
2298 JS_ReportOutOfMemory(cx
);
2302 rt
->gcBytes
+= (uint32
) sz
;
2307 js_RemoveAsGCBytes(JSRuntime
*rt
, size_t sz
)
2309 JS_ASSERT((size_t) rt
->gcBytes
>= sz
);
2310 rt
->gcBytes
-= (uint32
) sz
;
2314 * Shallow GC-things can be locked just by setting the GCF_LOCK bit, because
2315 * they have no descendants to mark during the GC. Currently the optimization
2316 * is only used for non-dependant strings.
2318 #define GC_THING_IS_SHALLOW(flagp, thing) \
2320 ((*(flagp) & GCF_TYPEMASK) >= GCX_EXTERNAL_STRING || \
2321 ((*(flagp) & GCF_TYPEMASK) == GCX_STRING && \
2322 !((JSString *) (thing))->isDependent())))
2324 /* This is compatible with JSDHashEntryStub. */
2325 typedef struct JSGCLockHashEntry
{
2326 JSDHashEntryHdr hdr
;
2329 } JSGCLockHashEntry
;
2332 js_LockGCThingRT(JSRuntime
*rt
, void *thing
)
2336 JSGCLockHashEntry
*lhe
;
2341 flagp
= GetGCThingFlagsOrNull(thing
);
2343 shallow
= GC_THING_IS_SHALLOW(flagp
, thing
);
2346 * Avoid adding a rt->gcLocksHash entry for shallow things until someone
2349 if (shallow
&& !(*flagp
& GCF_LOCK
)) {
2351 METER(rt
->gcStats
.lock
++);
2356 if (!rt
->gcLocksHash
) {
2357 rt
->gcLocksHash
= JS_NewDHashTable(JS_DHashGetStubOps(), NULL
,
2358 sizeof(JSGCLockHashEntry
),
2360 if (!rt
->gcLocksHash
) {
2366 lhe
= (JSGCLockHashEntry
*)
2367 JS_DHashTableOperate(rt
->gcLocksHash
, thing
, JS_DHASH_ADD
);
2376 JS_ASSERT(lhe
->count
>= 1);
2380 METER(rt
->gcStats
.lock
++);
2388 js_UnlockGCThingRT(JSRuntime
*rt
, void *thing
)
2392 JSGCLockHashEntry
*lhe
;
2397 flagp
= GetGCThingFlagsOrNull(thing
);
2399 shallow
= GC_THING_IS_SHALLOW(flagp
, thing
);
2401 if (shallow
&& !(*flagp
& GCF_LOCK
))
2403 if (!rt
->gcLocksHash
||
2404 (lhe
= (JSGCLockHashEntry
*)
2405 JS_DHashTableOperate(rt
->gcLocksHash
, thing
,
2407 JS_DHASH_ENTRY_IS_FREE(&lhe
->hdr
))) {
2408 /* Shallow entry is not in the hash -> clear its lock bit. */
2410 *flagp
&= ~GCF_LOCK
;
2414 if (--lhe
->count
!= 0)
2416 JS_DHashTableOperate(rt
->gcLocksHash
, thing
, JS_DHASH_REMOVE
);
2419 rt
->gcPoke
= JS_TRUE
;
2420 METER(rt
->gcStats
.unlock
++);
2427 JS_TraceChildren(JSTracer
*trc
, void *thing
, uint32 kind
)
2430 case JSTRACE_OBJECT
: {
2431 /* If obj has no map, it must be a newborn. */
2432 JSObject
*obj
= (JSObject
*) thing
;
2435 obj
->map
->ops
->trace(trc
, obj
);
2439 case JSTRACE_STRING
: {
2440 JSString
*str
= (JSString
*) thing
;
2441 if (str
->isDependent())
2442 JS_CALL_STRING_TRACER(trc
, str
->dependentBase(), "base");
2446 #if JS_HAS_XML_SUPPORT
2448 js_TraceXML(trc
, (JSXML
*)thing
);
2455 * Number of things covered by a single bit of JSGCArenaInfo.u.untracedThings.
2457 #define THINGS_PER_UNTRACED_BIT(thingSize) \
2458 JS_HOWMANY(THINGS_PER_ARENA(thingSize), JS_BITS_PER_WORD)
2461 DelayTracingChildren(JSRuntime
*rt
, uint8
*flagp
)
2464 uint32 untracedBitIndex
;
2468 * Things with children to be traced later are marked with
2469 * GCF_MARK | GCF_FINAL flags.
2471 JS_ASSERT((*flagp
& (GCF_MARK
| GCF_FINAL
)) == GCF_MARK
);
2472 *flagp
|= GCF_FINAL
;
2474 METER(rt
->gcStats
.untraced
++);
2476 ++rt
->gcTraceLaterCount
;
2477 METER_UPDATE_MAX(rt
->gcStats
.maxuntraced
, rt
->gcTraceLaterCount
);
2480 a
= FLAGP_TO_ARENA(flagp
);
2481 untracedBitIndex
= FLAGP_TO_INDEX(flagp
) /
2482 THINGS_PER_UNTRACED_BIT(a
->list
->thingSize
);
2483 JS_ASSERT(untracedBitIndex
< JS_BITS_PER_WORD
);
2484 bit
= (jsuword
)1 << untracedBitIndex
;
2485 if (a
->u
.untracedThings
!= 0) {
2486 JS_ASSERT(rt
->gcUntracedArenaStackTop
);
2487 if (a
->u
.untracedThings
& bit
) {
2488 /* bit already covers things with children to trace later. */
2491 a
->u
.untracedThings
|= bit
;
2494 * The thing is the first thing with not yet traced children in the
2495 * whole arena, so push the arena on the stack of arenas with things
2496 * to be traced later unless the arena has already been pushed. We
2497 * detect that through checking prevUntracedPage as the field is 0
2498 * only for not yet pushed arenas. To ensure that
2499 * prevUntracedPage != 0
2500 * even when the stack contains one element, we make prevUntracedPage
2501 * for the arena at the bottom to point to itself.
2503 * See comments in TraceDelayedChildren.
2505 a
->u
.untracedThings
= bit
;
2506 if (a
->prevUntracedPage
== 0) {
2507 if (!rt
->gcUntracedArenaStackTop
) {
2508 /* Stack was empty, mark the arena as the bottom element. */
2509 a
->prevUntracedPage
= ARENA_INFO_TO_PAGE(a
);
2511 JS_ASSERT(rt
->gcUntracedArenaStackTop
->prevUntracedPage
!= 0);
2512 a
->prevUntracedPage
=
2513 ARENA_INFO_TO_PAGE(rt
->gcUntracedArenaStackTop
);
2515 rt
->gcUntracedArenaStackTop
= a
;
2518 JS_ASSERT(rt
->gcUntracedArenaStackTop
);
2522 TraceDelayedChildren(JSTracer
*trc
)
2525 JSGCArenaInfo
*a
, *aprev
;
2527 uint32 thingsPerUntracedBit
;
2528 uint32 untracedBitIndex
, thingIndex
, indexLimit
, endIndex
;
2532 rt
= trc
->context
->runtime
;
2533 a
= rt
->gcUntracedArenaStackTop
;
2535 JS_ASSERT(rt
->gcTraceLaterCount
== 0);
2541 * The following assert verifies that the current arena belongs to the
2542 * untraced stack, since DelayTracingChildren ensures that even for
2543 * stack's bottom prevUntracedPage != 0 but rather points to itself.
2545 JS_ASSERT(a
->prevUntracedPage
!= 0);
2546 JS_ASSERT(rt
->gcUntracedArenaStackTop
->prevUntracedPage
!= 0);
2547 thingSize
= a
->list
->thingSize
;
2548 indexLimit
= (a
== a
->list
->last
)
2549 ? a
->list
->lastCount
2550 : THINGS_PER_ARENA(thingSize
);
2551 thingsPerUntracedBit
= THINGS_PER_UNTRACED_BIT(thingSize
);
2554 * We cannot use do-while loop here as a->u.untracedThings can be zero
2555 * before the loop as a leftover from the previous iterations. See
2556 * comments after the loop.
2558 while (a
->u
.untracedThings
!= 0) {
2559 untracedBitIndex
= JS_FLOOR_LOG2W(a
->u
.untracedThings
);
2560 a
->u
.untracedThings
&= ~((jsuword
)1 << untracedBitIndex
);
2561 thingIndex
= untracedBitIndex
* thingsPerUntracedBit
;
2562 endIndex
= thingIndex
+ thingsPerUntracedBit
;
2565 * endIndex can go beyond the last allocated thing as the real
2566 * limit can be "inside" the bit.
2568 if (endIndex
> indexLimit
)
2569 endIndex
= indexLimit
;
2570 JS_ASSERT(thingIndex
< indexLimit
);
2574 * Skip free or already traced things that share the bit
2575 * with untraced ones.
2577 flagp
= THING_FLAGP(a
, thingIndex
);
2578 if ((*flagp
& (GCF_MARK
|GCF_FINAL
)) != (GCF_MARK
|GCF_FINAL
))
2580 *flagp
&= ~GCF_FINAL
;
2582 JS_ASSERT(rt
->gcTraceLaterCount
!= 0);
2583 --rt
->gcTraceLaterCount
;
2585 thing
= FLAGP_TO_THING(flagp
, thingSize
);
2586 JS_TraceChildren(trc
, thing
, MapGCFlagsToTraceKind(*flagp
));
2587 } while (++thingIndex
!= endIndex
);
2591 * We finished tracing of all things in the the arena but we can only
2592 * pop it from the stack if the arena is the stack's top.
2594 * When JS_TraceChildren from the above calls JS_CallTracer that in
2595 * turn on low C stack calls DelayTracingChildren and the latter
2596 * pushes new arenas to the untraced stack, we have to skip popping
2597 * of this arena until it becomes the top of the stack again.
2599 if (a
== rt
->gcUntracedArenaStackTop
) {
2600 aprev
= ARENA_PAGE_TO_INFO(a
->prevUntracedPage
);
2601 a
->prevUntracedPage
= 0;
2604 * prevUntracedPage points to itself and we reached the
2605 * bottom of the stack.
2609 rt
->gcUntracedArenaStackTop
= a
= aprev
;
2611 a
= rt
->gcUntracedArenaStackTop
;
2614 JS_ASSERT(rt
->gcUntracedArenaStackTop
);
2615 JS_ASSERT(rt
->gcUntracedArenaStackTop
->prevUntracedPage
== 0);
2616 rt
->gcUntracedArenaStackTop
= NULL
;
2617 JS_ASSERT(rt
->gcTraceLaterCount
== 0);
2621 JS_CallTracer(JSTracer
*trc
, void *thing
, uint32 kind
)
2630 JS_ASSERT(JS_IS_VALID_TRACE_KIND(kind
));
2631 JS_ASSERT(trc
->debugPrinter
|| trc
->debugPrintArg
);
2633 if (!IS_GC_MARKING_TRACER(trc
)) {
2634 trc
->callback(trc
, thing
, kind
);
2640 JS_ASSERT(rt
->gcMarkingTracer
== trc
);
2641 JS_ASSERT(rt
->gcLevel
> 0);
2644 * Optimize for string and double as their size is known and their tracing
2648 case JSTRACE_DOUBLE
:
2649 a
= THING_TO_ARENA(thing
);
2650 JS_ASSERT(!a
->list
);
2651 if (!a
->u
.hasMarkedDoubles
) {
2652 ClearDoubleArenaFlags(a
);
2653 a
->u
.hasMarkedDoubles
= JS_TRUE
;
2655 index
= DOUBLE_THING_TO_INDEX(thing
);
2656 JS_SET_BIT(DOUBLE_ARENA_BITMAP(a
), index
);
2659 case JSTRACE_STRING
:
2661 if (JSString::isStatic(thing
))
2663 flagp
= THING_TO_FLAGP(thing
, sizeof(JSGCThing
));
2664 JS_ASSERT((*flagp
& GCF_FINAL
) == 0);
2665 JS_ASSERT(kind
== MapGCFlagsToTraceKind(*flagp
));
2666 if (!((JSString
*) thing
)->isDependent()) {
2670 if (*flagp
& GCF_MARK
)
2673 thing
= ((JSString
*) thing
)->dependentBase();
2678 flagp
= GetGCThingFlags(thing
);
2679 JS_ASSERT(kind
== MapGCFlagsToTraceKind(*flagp
));
2680 if (*flagp
& GCF_MARK
)
2684 * We check for non-final flag only if mark is unset as
2685 * DelayTracingChildren uses the flag. See comments in the function.
2687 JS_ASSERT(*flagp
!= GCF_FINAL
);
2689 if (!cx
->insideGCMarkCallback
) {
2691 * With JS_GC_ASSUME_LOW_C_STACK defined the mark phase of GC always
2692 * uses the non-recursive code that otherwise would be called only on
2693 * a low C stack condition.
2695 #ifdef JS_GC_ASSUME_LOW_C_STACK
2696 # define RECURSION_TOO_DEEP() JS_TRUE
2699 # define RECURSION_TOO_DEEP() (!JS_CHECK_STACK_SIZE(cx, stackDummy))
2701 if (RECURSION_TOO_DEEP())
2702 DelayTracingChildren(rt
, flagp
);
2704 JS_TraceChildren(trc
, thing
, kind
);
2707 * For API compatibility we allow for the callback to assume that
2708 * after it calls JS_MarkGCThing for the last time, the callback can
2709 * start to finalize its own objects that are only referenced by
2710 * unmarked GC things.
2712 * Since we do not know which call from inside the callback is the
2713 * last, we ensure that children of all marked things are traced and
2714 * call TraceDelayedChildren(trc) after tracing the thing.
2716 * As TraceDelayedChildren unconditionally invokes JS_TraceChildren
2717 * for the things with untraced children, calling DelayTracingChildren
2718 * is useless here. Hence we always trace thing's children even with a
2721 cx
->insideGCMarkCallback
= JS_FALSE
;
2722 JS_TraceChildren(trc
, thing
, kind
);
2723 TraceDelayedChildren(trc
);
2724 cx
->insideGCMarkCallback
= JS_TRUE
;
2729 trc
->debugPrinter
= NULL
;
2730 trc
->debugPrintArg
= NULL
;
2732 return; /* to avoid out: right_curl when DEBUG is not defined */
2736 js_CallValueTracerIfGCThing(JSTracer
*trc
, jsval v
)
2741 if (JSVAL_IS_DOUBLE(v
) || JSVAL_IS_STRING(v
)) {
2742 thing
= JSVAL_TO_TRACEABLE(v
);
2743 kind
= JSVAL_TRACE_KIND(v
);
2744 JS_ASSERT(kind
== js_GetGCThingTraceKind(JSVAL_TO_GCTHING(v
)));
2745 } else if (JSVAL_IS_OBJECT(v
) && v
!= JSVAL_NULL
) {
2746 /* v can be an arbitrary GC thing reinterpreted as an object. */
2747 thing
= JSVAL_TO_OBJECT(v
);
2748 kind
= js_GetGCThingTraceKind(thing
);
2752 JS_CallTracer(trc
, thing
, kind
);
2755 static JSDHashOperator
2756 gc_root_traversal(JSDHashTable
*table
, JSDHashEntryHdr
*hdr
, uint32 num
,
2759 JSGCRootHashEntry
*rhe
= (JSGCRootHashEntry
*)hdr
;
2760 JSTracer
*trc
= (JSTracer
*)arg
;
2761 jsval
*rp
= (jsval
*)rhe
->root
;
2764 /* Ignore null reference, scalar values, and static strings. */
2765 if (!JSVAL_IS_NULL(v
) &&
2766 JSVAL_IS_GCTHING(v
) &&
2767 !JSString::isStatic(JSVAL_TO_GCTHING(v
))) {
2769 JSBool root_points_to_gcArenaList
= JS_FALSE
;
2770 jsuword thing
= (jsuword
) JSVAL_TO_GCTHING(v
);
2773 JSGCArenaList
*arenaList
;
2778 rt
= trc
->context
->runtime
;
2779 for (i
= 0; i
< GC_NUM_FREELISTS
; i
++) {
2780 arenaList
= &rt
->gcArenaList
[i
];
2781 thingSize
= arenaList
->thingSize
;
2782 limit
= (size_t) arenaList
->lastCount
* thingSize
;
2783 for (a
= arenaList
->last
; a
; a
= a
->prev
) {
2784 if (thing
- ARENA_INFO_TO_START(a
) < limit
) {
2785 root_points_to_gcArenaList
= JS_TRUE
;
2788 limit
= (size_t) THINGS_PER_ARENA(thingSize
) * thingSize
;
2791 if (!root_points_to_gcArenaList
) {
2792 for (a
= rt
->gcDoubleArenaList
.first
; a
; a
= a
->prev
) {
2793 if (thing
- ARENA_INFO_TO_START(a
) <
2794 DOUBLES_PER_ARENA
* sizeof(jsdouble
)) {
2795 root_points_to_gcArenaList
= JS_TRUE
;
2800 if (!root_points_to_gcArenaList
&& rhe
->name
) {
2802 "JS API usage error: the address passed to JS_AddNamedRoot currently holds an\n"
2803 "invalid jsval. This is usually caused by a missing call to JS_RemoveRoot.\n"
2804 "The root's name is \"%s\".\n",
2807 JS_ASSERT(root_points_to_gcArenaList
);
2809 JS_SET_TRACING_NAME(trc
, rhe
->name
? rhe
->name
: "root");
2810 js_CallValueTracerIfGCThing(trc
, v
);
2813 return JS_DHASH_NEXT
;
2816 static JSDHashOperator
2817 gc_lock_traversal(JSDHashTable
*table
, JSDHashEntryHdr
*hdr
, uint32 num
,
2820 JSGCLockHashEntry
*lhe
= (JSGCLockHashEntry
*)hdr
;
2821 void *thing
= (void *)lhe
->thing
;
2822 JSTracer
*trc
= (JSTracer
*)arg
;
2825 JS_ASSERT(lhe
->count
>= 1);
2826 traceKind
= js_GetGCThingTraceKind(thing
);
2827 JS_CALL_TRACER(trc
, thing
, traceKind
, "locked object");
2828 return JS_DHASH_NEXT
;
2831 #define TRACE_JSVALS(trc, len, vec, name) \
2833 jsval _v, *_vp, *_end; \
2835 for (_vp = vec, _end = _vp + len; _vp < _end; _vp++) { \
2837 if (JSVAL_IS_TRACEABLE(_v)) { \
2838 JS_SET_TRACING_INDEX(trc, name, _vp - (vec)); \
2839 JS_CallTracer(trc, JSVAL_TO_TRACEABLE(_v), \
2840 JSVAL_TRACE_KIND(_v)); \
2846 js_TraceStackFrame(JSTracer
*trc
, JSStackFrame
*fp
)
2848 uintN nslots
, minargs
, skip
;
2851 JS_CALL_OBJECT_TRACER(trc
, fp
->callobj
, "call");
2853 JS_CALL_OBJECT_TRACER(trc
, JSVAL_TO_OBJECT(fp
->argsobj
), "arguments");
2855 JS_CALL_OBJECT_TRACER(trc
, fp
->varobj
, "variables");
2857 js_TraceScript(trc
, fp
->script
);
2859 /* fp->slots is null for watch pseudo-frames, see js_watch_set. */
2862 * Don't mark what has not been pushed yet, or what has been
2865 if (fp
->regs
&& fp
->regs
->sp
) {
2866 nslots
= (uintN
) (fp
->regs
->sp
- fp
->slots
);
2867 JS_ASSERT(nslots
>= fp
->script
->nfixed
);
2869 nslots
= fp
->script
->nfixed
;
2871 TRACE_JSVALS(trc
, nslots
, fp
->slots
, "slot");
2874 JS_ASSERT(!fp
->slots
);
2875 JS_ASSERT(!fp
->regs
);
2878 /* Allow for primitive this parameter due to JSFUN_THISP_* flags. */
2879 JS_CALL_VALUE_TRACER(trc
, fp
->thisv
, "this");
2882 JS_CALL_VALUE_TRACER(trc
, fp
->argv
[-2], "callee");
2886 minargs
= FUN_MINARGS(fp
->fun
);
2887 if (minargs
> nslots
)
2889 if (!FUN_INTERPRETED(fp
->fun
)) {
2890 JS_ASSERT(!(fp
->fun
->flags
& JSFUN_FAST_NATIVE
));
2891 nslots
+= fp
->fun
->u
.n
.extra
;
2893 if (fp
->fun
->flags
& JSFRAME_ROOTED_ARGV
)
2894 skip
= 2 + fp
->argc
;
2896 TRACE_JSVALS(trc
, 2 + nslots
- skip
, fp
->argv
- 2 + skip
, "operand");
2899 JS_CALL_VALUE_TRACER(trc
, fp
->rval
, "rval");
2901 JS_CALL_OBJECT_TRACER(trc
, fp
->scopeChain
, "scope chain");
2905 TraceWeakRoots(JSTracer
*trc
, JSWeakRoots
*wr
)
2907 if (wr
->newbornObject
)
2908 JS_CALL_OBJECT_TRACER(trc
, wr
->newbornObject
, "newborn_object");
2909 if (wr
->newbornString
)
2910 JS_CALL_STRING_TRACER(trc
, wr
->newbornString
, "newborn_string");
2911 if (wr
->newbornDouble
)
2912 JS_CALL_DOUBLE_TRACER(trc
, wr
->newbornDouble
, "newborn_double");
2913 #if JS_HAS_XML_SUPPORT
2915 JS_CALL_TRACER(trc
, wr
->newbornXML
, JSTRACE_XML
, "newborn_xml");
2917 for (uint32 i
= 0; i
!= JS_EXTERNAL_STRING_LIMIT
; i
++) {
2918 JSString
*str
= wr
->newbornExternalString
[i
];
2920 JS_CALL_STRING_TRACER(trc
, str
, "newborn_external_string");
2922 JS_CALL_VALUE_TRACER(trc
, wr
->lastAtom
, "lastAtom");
2923 JS_SET_TRACING_NAME(trc
, "lastInternalResult");
2924 js_CallValueTracerIfGCThing(trc
, wr
->lastInternalResult
);
2927 JS_REQUIRES_STACK
JS_FRIEND_API(void)
2928 js_TraceContext(JSTracer
*trc
, JSContext
*acx
)
2930 JSStackFrame
*fp
, *nextChain
;
2932 JSTempValueRooter
*tvr
;
2934 if (IS_GC_MARKING_TRACER(trc
)) {
2936 #define FREE_OLD_ARENAS(pool) \
2939 JSArena * _a = (pool).current; \
2940 if (_a == (pool).first.next && \
2941 _a->avail == _a->base + sizeof(int64)) { \
2942 _age = JS_Now() - *(int64 *) _a->base; \
2943 if (_age > (int64) acx->runtime->gcEmptyArenaPoolLifespan * \
2945 JS_FreeArenaPool(&(pool)); \
2950 * Release the stackPool's arenas if the stackPool has existed for
2951 * longer than the limit specified by gcEmptyArenaPoolLifespan.
2953 FREE_OLD_ARENAS(acx
->stackPool
);
2956 * Release the regexpPool's arenas based on the same criterion as for
2959 FREE_OLD_ARENAS(acx
->regexpPool
);
2962 * Clear the double free list to release all the pre-allocated doubles.
2964 acx
->doubleFreeList
= NULL
;
2968 * Iterate frame chain and dormant chains.
2970 * (NB: see comment on this whole "dormant" thing in js_Execute.)
2972 * Since js_GetTopStackFrame needs to dereference cx->thread to check for
2973 * JIT frames, we check for non-null thread here and avoid null checks
2974 * there. See bug 471197.
2976 #ifdef JS_THREADSAFE
2980 fp
= js_GetTopStackFrame(acx
);
2981 nextChain
= acx
->dormantFrameChain
;
2985 /* The top frame must not be dormant. */
2986 JS_ASSERT(!fp
->dormantNext
);
2989 js_TraceStackFrame(trc
, fp
);
2990 } while ((fp
= fp
->down
) != NULL
);
2996 nextChain
= nextChain
->dormantNext
;
3000 /* Mark other roots-by-definition in acx. */
3001 if (acx
->globalObject
&& !JS_HAS_OPTION(acx
, JSOPTION_UNROOTED_GLOBAL
))
3002 JS_CALL_OBJECT_TRACER(trc
, acx
->globalObject
, "global object");
3003 TraceWeakRoots(trc
, &acx
->weakRoots
);
3004 if (acx
->throwing
) {
3005 JS_CALL_VALUE_TRACER(trc
, acx
->exception
, "exception");
3007 /* Avoid keeping GC-ed junk stored in JSContext.exception. */
3008 acx
->exception
= JSVAL_NULL
;
3011 for (sh
= acx
->stackHeaders
; sh
; sh
= sh
->down
) {
3012 METER(trc
->context
->runtime
->gcStats
.stackseg
++);
3013 METER(trc
->context
->runtime
->gcStats
.segslots
+= sh
->nslots
);
3014 TRACE_JSVALS(trc
, sh
->nslots
, JS_STACK_SEGMENT(sh
), "stack");
3017 if (acx
->localRootStack
)
3018 js_TraceLocalRoots(trc
, acx
->localRootStack
);
3020 for (tvr
= acx
->tempValueRooters
; tvr
; tvr
= tvr
->down
) {
3021 switch (tvr
->count
) {
3023 JS_SET_TRACING_NAME(trc
, "tvr->u.value");
3024 js_CallValueTracerIfGCThing(trc
, tvr
->u
.value
);
3027 tvr
->u
.trace(trc
, tvr
);
3030 tvr
->u
.sprop
->trace(trc
);
3032 case JSTVU_WEAK_ROOTS
:
3033 TraceWeakRoots(trc
, tvr
->u
.weakRoots
);
3035 case JSTVU_COMPILER
:
3036 tvr
->u
.compiler
->trace(trc
);
3039 js_TraceScript(trc
, tvr
->u
.script
);
3041 case JSTVU_ENUMERATOR
:
3042 static_cast<JSAutoEnumStateRooter
*>(tvr
)->mark(trc
);
3045 JS_ASSERT(tvr
->count
>= 0);
3046 TRACE_JSVALS(trc
, tvr
->count
, tvr
->u
.array
, "tvr->u.array");
3050 if (acx
->sharpObjectMap
.depth
> 0)
3051 js_TraceSharpMap(trc
, &acx
->sharpObjectMap
);
3053 js_TraceRegExpStatics(trc
, acx
);
3056 InterpState
* state
= acx
->interpState
;
3058 if (state
->nativeVp
)
3059 TRACE_JSVALS(trc
, state
->nativeVpLen
, state
->nativeVp
, "nativeVp");
3060 state
= state
->prev
;
3068 MarkReservedObjects(JSTraceMonitor
*tm
)
3070 /* Keep the reserved objects. */
3071 for (JSObject
*obj
= tm
->reservedObjects
; obj
; obj
= JSVAL_TO_OBJECT(obj
->fslots
[0])) {
3072 uint8
*flagp
= GetGCThingFlags(obj
);
3073 JS_ASSERT((*flagp
& GCF_TYPEMASK
) == GCX_OBJECT
);
3074 JS_ASSERT(*flagp
!= GCF_FINAL
);
3079 #ifdef JS_THREADSAFE
3080 static JSDHashOperator
3081 reserved_objects_marker(JSDHashTable
*table
, JSDHashEntryHdr
*hdr
,
3084 JSThread
*thread
= ((JSThreadsHashEntry
*) hdr
)->thread
;
3086 MarkReservedObjects(&thread
->data
.traceMonitor
);
3087 return JS_DHASH_NEXT
;
3093 JS_REQUIRES_STACK
void
3094 js_TraceRuntime(JSTracer
*trc
, JSBool allAtoms
)
3096 JSRuntime
*rt
= trc
->context
->runtime
;
3097 JSContext
*iter
, *acx
;
3099 JS_DHashTableEnumerate(&rt
->gcRootsHash
, gc_root_traversal
, trc
);
3100 if (rt
->gcLocksHash
)
3101 JS_DHashTableEnumerate(rt
->gcLocksHash
, gc_lock_traversal
, trc
);
3102 js_TraceAtomState(trc
, allAtoms
);
3103 js_TraceRuntimeNumberState(trc
);
3106 while ((acx
= js_ContextIterator(rt
, JS_TRUE
, &iter
)) != NULL
)
3107 js_TraceContext(trc
, acx
);
3109 js_TraceThreads(rt
, trc
);
3111 if (rt
->gcExtraRootsTraceOp
)
3112 rt
->gcExtraRootsTraceOp(trc
, rt
->gcExtraRootsData
);
3115 for (int i
= 0; i
< JSBUILTIN_LIMIT
; i
++) {
3116 if (rt
->builtinFunctions
[i
])
3117 JS_CALL_OBJECT_TRACER(trc
, rt
->builtinFunctions
[i
], "builtin function");
3120 /* Mark the reserved objects unless we are shutting down. */
3121 if (IS_GC_MARKING_TRACER(trc
) && rt
->state
!= JSRTS_LANDING
) {
3122 #ifdef JS_THREADSAFE
3123 JS_DHashTableEnumerate(&rt
->threads
, reserved_objects_marker
, NULL
);
3125 MarkReservedObjects(&rt
->threadData
.traceMonitor
);
3133 js_TriggerGC(JSContext
*cx
, JSBool gcLocked
)
3135 JSRuntime
*rt
= cx
->runtime
;
3137 #ifdef JS_THREADSAFE
3138 JS_ASSERT(cx
->requestDepth
> 0);
3140 JS_ASSERT(!rt
->gcRunning
);
3145 * Trigger the GC when it is safe to call an operation callback on any
3148 rt
->gcIsNeeded
= JS_TRUE
;
3149 js_TriggerAllOperationCallbacks(rt
, gcLocked
);
3153 ProcessSetSlotRequest(JSContext
*cx
, JSSetSlotRequest
*ssr
)
3155 JSObject
*obj
= ssr
->obj
;
3156 JSObject
*pobj
= ssr
->pobj
;
3157 uint32 slot
= ssr
->slot
;
3160 pobj
= js_GetWrappedObject(cx
, pobj
);
3165 pobj
= JSVAL_TO_OBJECT(STOBJ_GET_SLOT(pobj
, slot
));
3169 if (slot
== JSSLOT_PROTO
) {
3170 obj
->setProto(pobj
);
3172 JS_ASSERT(slot
== JSSLOT_PARENT
);
3173 obj
->setParent(pobj
);
3178 js_DestroyScriptsToGC(JSContext
*cx
, JSThreadData
*data
)
3180 JSScript
**listp
, *script
;
3182 for (size_t i
= 0; i
!= JS_ARRAY_LENGTH(data
->scriptsToGC
); ++i
) {
3183 listp
= &data
->scriptsToGC
[i
];
3184 while ((script
= *listp
) != NULL
) {
3185 *listp
= script
->u
.nextToGC
;
3186 script
->u
.nextToGC
= NULL
;
3187 js_DestroyScript(cx
, script
);
3193 FinalizeObject(JSContext
*cx
, JSObject
*obj
)
3195 /* Cope with stillborn objects that have no map. */
3199 if (JS_UNLIKELY(cx
->debugHooks
->objectHook
!= NULL
)) {
3200 cx
->debugHooks
->objectHook(cx
, obj
, JS_FALSE
,
3201 cx
->debugHooks
->objectHookData
);
3204 /* Finalize obj first, in case it needs map and slots. */
3205 JSClass
*clasp
= STOBJ_GET_CLASS(obj
);
3206 if (clasp
->finalize
)
3207 clasp
->finalize(cx
, obj
);
3209 #ifdef INCLUDE_MOZILLA_DTRACE
3210 if (JAVASCRIPT_OBJECT_FINALIZE_ENABLED())
3211 jsdtrace_object_finalize(obj
);
3214 if (JS_LIKELY(OBJ_IS_NATIVE(obj
)))
3215 OBJ_SCOPE(obj
)->drop(cx
, obj
);
3216 js_FreeSlots(cx
, obj
);
3219 JS_STATIC_ASSERT(JS_EXTERNAL_STRING_LIMIT
== 8);
3220 static JSStringFinalizeOp str_finalizers
[JS_EXTERNAL_STRING_LIMIT
] = {
3221 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
3225 js_ChangeExternalStringFinalizer(JSStringFinalizeOp oldop
,
3226 JSStringFinalizeOp newop
)
3228 for (uintN i
= 0; i
!= JS_ARRAY_LENGTH(str_finalizers
); i
++) {
3229 if (str_finalizers
[i
] == oldop
) {
3230 str_finalizers
[i
] = newop
;
3238 * cx is NULL when we are called from js_FinishAtomState to force the
3239 * finalization of the permanently interned strings.
3242 js_FinalizeStringRT(JSRuntime
*rt
, JSString
*str
, intN type
, JSContext
*cx
)
3246 JSStringFinalizeOp finalizer
;
3248 JS_RUNTIME_UNMETER(rt
, liveStrings
);
3249 JS_ASSERT(!JSString::isStatic(str
));
3250 if (str
->isDependent()) {
3251 /* A dependent string can not be external and must be valid. */
3252 JS_ASSERT(type
< 0);
3253 JS_ASSERT(str
->dependentBase());
3254 JS_RUNTIME_UNMETER(rt
, liveDependentStrings
);
3257 /* A stillborn string has null chars, so is not valid. */
3258 chars
= str
->flatChars();
3259 valid
= (chars
!= NULL
);
3267 JS_ASSERT((uintN
) type
< JS_ARRAY_LENGTH(str_finalizers
));
3268 finalizer
= str_finalizers
[type
];
3271 * Assume that the finalizer for the permanently interned
3272 * string knows how to deal with null context.
3279 if (valid
&& str
->isDeflated())
3280 js_PurgeDeflatedStringCache(rt
, str
);
3284 * The gckind flag bit GC_LOCK_HELD indicates a call from js_NewGCThing with
3285 * rt->gcLock already held, so the lock should be kept on return.
3288 js_GC(JSContext
*cx
, JSGCInvocationKind gckind
)
3292 JSGCCallback callback
;
3295 uint32 thingSize
, indexLimit
;
3296 JSGCArenaInfo
*a
, **ap
, *emptyArenas
;
3297 uint8 flags
, *flagp
;
3298 JSGCThing
*thing
, *freeList
;
3299 JSGCArenaList
*arenaList
;
3301 #ifdef JS_THREADSAFE
3302 uint32 requestDebit
;
3305 uint32 nlivearenas
, nkilledarenas
, nthings
;
3308 JS_ASSERT_IF(gckind
== GC_LAST_DITCH
, !JS_ON_TRACE(cx
));
3311 #ifdef JS_THREADSAFE
3313 * We allow js_GC calls outside a request but the context must be bound
3314 * to the current thread.
3316 JS_ASSERT(CURRENT_THREAD_IS_ME(cx
->thread
));
3318 /* Avoid deadlock. */
3319 JS_ASSERT(!JS_IS_RUNTIME_LOCKED(rt
));
3322 if (gckind
& GC_KEEP_ATOMS
) {
3324 * The set slot request and last ditch GC kinds preserve all atoms and
3327 keepAtoms
= JS_TRUE
;
3329 /* Keep atoms when a suspended compile is running on another context. */
3330 keepAtoms
= (rt
->gcKeepAtoms
!= 0);
3331 JS_CLEAR_WEAK_ROOTS(&cx
->weakRoots
);
3335 * Don't collect garbage if the runtime isn't up, and cx is not the last
3336 * context in the runtime. The last context must force a GC, and nothing
3337 * should suppress that final collection or there may be shutdown leaks,
3338 * or runtime bloat until the next context is created.
3340 if (rt
->state
!= JSRTS_UP
&& gckind
!= GC_LAST_CONTEXT
)
3343 restart_at_beginning
:
3345 * Let the API user decide to defer a GC if it wants to (unless this
3346 * is the last context). Invoke the callback regardless. Sample the
3347 * callback in case we are freely racing with a JS_SetGCCallback{,RT} on
3350 if (gckind
!= GC_SET_SLOT_REQUEST
&& (callback
= rt
->gcCallback
)) {
3353 if (gckind
& GC_LOCK_HELD
)
3355 ok
= callback(cx
, JSGC_BEGIN
);
3356 if (gckind
& GC_LOCK_HELD
)
3358 if (!ok
&& gckind
!= GC_LAST_CONTEXT
) {
3360 * It's possible that we've looped back to this code from the 'goto
3361 * restart_at_beginning' below in the GC_SET_SLOT_REQUEST code and
3362 * that rt->gcLevel is now 0. Don't return without notifying!
3364 if (rt
->gcLevel
== 0 && (gckind
& GC_LOCK_HELD
))
3365 JS_NOTIFY_GC_DONE(rt
);
3370 /* Lock out other GC allocator and collector invocations. */
3371 if (!(gckind
& GC_LOCK_HELD
))
3374 METER(rt
->gcStats
.poke
++);
3375 rt
->gcPoke
= JS_FALSE
;
3377 #ifdef JS_THREADSAFE
3379 * Check if the GC is already running on this or another thread and
3380 * delegate the job to it.
3382 if (rt
->gcLevel
> 0) {
3383 JS_ASSERT(rt
->gcThread
);
3385 /* Bump gcLevel to restart the current GC, so it finds new garbage. */
3387 METER_UPDATE_MAX(rt
->gcStats
.maxlevel
, rt
->gcLevel
);
3390 * If the GC runs on another thread, temporarily suspend the current
3391 * request and wait until the GC is done.
3393 if (rt
->gcThread
!= cx
->thread
) {
3394 requestDebit
= js_DiscountRequestsForGC(cx
);
3395 js_RecountRequestsAfterGC(rt
, requestDebit
);
3397 if (!(gckind
& GC_LOCK_HELD
))
3402 /* No other thread is in GC, so indicate that we're now in GC. */
3404 rt
->gcThread
= cx
->thread
;
3407 * Notify all operation callbacks, which will give them a chance to
3408 * yield their current request. Contexts that are not currently
3409 * executing will perform their callback at some later point,
3410 * which then will be unnecessary, but harmless.
3412 js_NudgeOtherContexts(cx
);
3415 * Discount all the requests on the current thread from contributing
3416 * to rt->requestCount before we wait for all other requests to finish.
3417 * JS_NOTIFY_REQUEST_DONE, which will wake us up, is only called on
3418 * rt->requestCount transitions to 0.
3420 requestDebit
= js_CountThreadRequests(cx
);
3421 JS_ASSERT_IF(cx
->requestDepth
!= 0, requestDebit
>= 1);
3422 rt
->requestCount
-= requestDebit
;
3423 while (rt
->requestCount
> 0)
3424 JS_AWAIT_REQUEST_DONE(rt
);
3425 rt
->requestCount
+= requestDebit
;
3427 #else /* !JS_THREADSAFE */
3429 /* Bump gcLevel and return rather than nest; the outer gc will restart. */
3431 METER_UPDATE_MAX(rt
->gcStats
.maxlevel
, rt
->gcLevel
);
3432 if (rt
->gcLevel
> 1)
3435 #endif /* !JS_THREADSAFE */
3438 * Set rt->gcRunning here within the GC lock, and after waiting for any
3439 * active requests to end, so that new requests that try to JS_AddRoot,
3440 * JS_RemoveRoot, or JS_RemoveRootRT block in JS_BeginRequest waiting for
3441 * rt->gcLevel to drop to zero, while request-less calls to the *Root*
3442 * APIs block in js_AddRoot or js_RemoveRoot (see above in this file),
3443 * waiting for GC to finish.
3445 rt
->gcRunning
= JS_TRUE
;
3447 if (gckind
== GC_SET_SLOT_REQUEST
) {
3448 JSSetSlotRequest
*ssr
;
3450 while ((ssr
= rt
->setSlotRequests
) != NULL
) {
3451 rt
->setSlotRequests
= ssr
->next
;
3454 ProcessSetSlotRequest(cx
, ssr
);
3459 * We assume here that killing links to parent and prototype objects
3460 * does not create garbage (such objects typically are long-lived and
3461 * widely shared, e.g. global objects, Function.prototype, etc.). We
3462 * collect garbage only if a racing thread attempted GC and is waiting
3463 * for us to finish (gcLevel > 1) or if someone already poked us.
3465 if (rt
->gcLevel
== 1 && !rt
->gcPoke
&& !rt
->gcIsNeeded
)
3469 rt
->gcPoke
= JS_FALSE
;
3470 rt
->gcRunning
= JS_FALSE
;
3471 #ifdef JS_THREADSAFE
3472 rt
->gcThread
= NULL
;
3474 gckind
= GC_LOCK_HELD
;
3475 goto restart_at_beginning
;
3481 if (JS_ON_TRACE(cx
))
3486 /* Clear gcIsNeeded now, when we are about to start a normal GC cycle. */
3487 rt
->gcIsNeeded
= JS_FALSE
;
3489 /* Reset malloc counter. */
3490 rt
->gcMallocBytes
= 0;
3492 #ifdef JS_DUMP_SCOPE_METERS
3493 { extern void js_DumpScopeMeters(JSRuntime
*rt
);
3494 js_DumpScopeMeters(rt
);
3499 js_PurgeJITOracle();
3504 JS_ASSERT(!rt
->gcUntracedArenaStackTop
);
3505 JS_ASSERT(rt
->gcTraceLaterCount
== 0);
3508 * Reset the property cache's type id generator so we can compress ids.
3509 * Same for the protoHazardShape proxy-shape standing in for all object
3510 * prototypes having readonly or setter properties.
3512 if (rt
->shapeGen
& SHAPE_OVERFLOW_BIT
) {
3513 rt
->gcRegenShapes
= true;
3514 rt
->gcRegenShapesScopeFlag
^= JSScope::SHAPE_REGEN
;
3516 rt
->protoHazardShape
= 0;
3519 js_PurgeThreads(cx
);
3521 if (gckind
== GC_LAST_CONTEXT
) {
3522 /* Clear builtin functions, which are recreated on demand. */
3523 memset(rt
->builtinFunctions
, 0, sizeof rt
->builtinFunctions
);
3530 JS_TRACER_INIT(&trc
, cx
, NULL
);
3531 rt
->gcMarkingTracer
= &trc
;
3532 JS_ASSERT(IS_GC_MARKING_TRACER(&trc
));
3534 for (a
= rt
->gcDoubleArenaList
.first
; a
; a
= a
->prev
)
3535 a
->u
.hasMarkedDoubles
= JS_FALSE
;
3537 js_TraceRuntime(&trc
, keepAtoms
);
3538 js_MarkScriptFilenames(rt
, keepAtoms
);
3541 * Mark children of things that caused too deep recursion during the above
3544 TraceDelayedChildren(&trc
);
3546 JS_ASSERT(!cx
->insideGCMarkCallback
);
3547 if (rt
->gcCallback
) {
3548 cx
->insideGCMarkCallback
= JS_TRUE
;
3549 (void) rt
->gcCallback(cx
, JSGC_MARK_END
);
3550 JS_ASSERT(cx
->insideGCMarkCallback
);
3551 cx
->insideGCMarkCallback
= JS_FALSE
;
3553 JS_ASSERT(rt
->gcTraceLaterCount
== 0);
3555 rt
->gcMarkingTracer
= NULL
;
3557 #ifdef JS_THREADSAFE
3558 cx
->createDeallocatorTask();
3564 * Finalize as we sweep, outside of rt->gcLock but with rt->gcRunning set
3565 * so that any attempt to allocate a GC-thing from a finalizer will fail,
3566 * rather than nest badly and leave the unmarked newborn to be swept.
3568 * We first sweep atom state so we can use js_IsAboutToBeFinalized on
3569 * JSString or jsdouble held in a hashtable to check if the hashtable
3570 * entry can be freed. Note that even after the entry is freed, JSObject
3571 * finalizers can continue to access the corresponding jsdouble* and
3572 * JSString* assuming that they are unique. This works since the
3573 * atomization API must not be called during GC.
3575 js_SweepAtomState(cx
);
3577 /* Finalize iterator states before the objects they iterate over. */
3578 CloseNativeIterators(cx
);
3580 /* Finalize watch points associated with unreachable objects. */
3581 js_SweepWatchPoints(cx
);
3584 /* Save the pre-sweep count of scope-mapped properties. */
3585 rt
->liveScopePropsPreSweep
= rt
->liveScopeProps
;
3589 * Here we need to ensure that JSObject instances are finalized before GC-
3590 * allocated JSString and jsdouble instances so object's finalizer can
3591 * access them even if they will be freed. For that we simply finalize the
3592 * list containing JSObject first since the static assert at the beginning
3593 * of the file guarantees that JSString and jsdouble instances are
3594 * allocated from a different list.
3597 for (i
= 0; i
< GC_NUM_FREELISTS
; i
++) {
3598 arenaList
= &rt
->gcArenaList
[i
== 0
3599 ? GC_FREELIST_INDEX(sizeof(JSObject
))
3600 : i
== GC_FREELIST_INDEX(sizeof(JSObject
))
3603 ap
= &arenaList
->last
;
3607 JS_ASSERT(arenaList
->lastCount
> 0);
3608 arenaList
->freeList
= NULL
;
3610 thingSize
= arenaList
->thingSize
;
3611 indexLimit
= THINGS_PER_ARENA(thingSize
);
3612 flagp
= THING_FLAGP(a
, arenaList
->lastCount
- 1);
3613 METER((nlivearenas
= 0, nkilledarenas
= 0, nthings
= 0));
3615 JS_ASSERT(a
->prevUntracedPage
== 0);
3616 JS_ASSERT(a
->u
.untracedThings
== 0);
3620 if (flags
& (GCF_MARK
| GCF_LOCK
)) {
3621 *flagp
&= ~GCF_MARK
;
3622 allClear
= JS_FALSE
;
3625 thing
= FLAGP_TO_THING(flagp
, thingSize
);
3626 if (!(flags
& GCF_FINAL
)) {
3628 * Call the finalizer with GCF_FINAL ORed into flags.
3630 *flagp
= (uint8
)(flags
| GCF_FINAL
);
3631 type
= flags
& GCF_TYPEMASK
;
3634 FinalizeObject(cx
, (JSObject
*) thing
);
3636 #if JS_HAS_XML_SUPPORT
3638 js_FinalizeXML(cx
, (JSXML
*) thing
);
3642 JS_ASSERT(type
== GCX_STRING
||
3643 type
- GCX_EXTERNAL_STRING
<
3644 GCX_NTYPES
- GCX_EXTERNAL_STRING
);
3645 js_FinalizeStringRT(rt
, (JSString
*) thing
,
3647 GCX_EXTERNAL_STRING
),
3652 memset(thing
, JS_FREE_PATTERN
, thingSize
);
3655 thing
->flagp
= flagp
;
3656 thing
->next
= freeList
;
3659 } while (++flagp
!= THING_FLAGS_END(a
));
3663 * Forget just assembled free list head for the arena and
3664 * add the arena itself to the destroy list.
3666 freeList
= arenaList
->freeList
;
3667 if (a
== arenaList
->last
)
3668 arenaList
->lastCount
= indexLimit
;
3670 a
->prev
= emptyArenas
;
3672 METER(nkilledarenas
++);
3674 arenaList
->freeList
= freeList
;
3676 METER(nlivearenas
++);
3680 flagp
= THING_FLAGP(a
, indexLimit
- 1);
3684 * We use arenaList - &rt->gcArenaList[0], not i, as the stat index
3685 * due to the enumeration reorder at the beginning of the loop.
3687 METER(UpdateArenaStats(&rt
->gcStats
.arenaStats
[arenaList
-
3688 &rt
->gcArenaList
[0]],
3689 nlivearenas
, nkilledarenas
, nthings
));
3692 ap
= &rt
->gcDoubleArenaList
.first
;
3693 METER((nlivearenas
= 0, nkilledarenas
= 0, nthings
= 0));
3694 while ((a
= *ap
) != NULL
) {
3695 if (!a
->u
.hasMarkedDoubles
) {
3696 /* No marked double values in the arena. */
3698 a
->prev
= emptyArenas
;
3700 METER(nkilledarenas
++);
3704 for (i
= 0; i
!= DOUBLES_PER_ARENA
; ++i
) {
3705 if (IsMarkedDouble(a
, index
))
3708 METER(nlivearenas
++);
3712 METER(UpdateArenaStats(&rt
->gcStats
.doubleArenaStats
,
3713 nlivearenas
, nkilledarenas
, nthings
));
3714 rt
->gcDoubleArenaList
.nextDoubleFlags
=
3715 rt
->gcDoubleArenaList
.first
3716 ? DOUBLE_ARENA_BITMAP(rt
->gcDoubleArenaList
.first
)
3717 : DOUBLE_BITMAP_SENTINEL
;
3720 * Sweep the runtime's property tree after finalizing objects, in case any
3721 * had watchpoints referencing tree nodes.
3723 js_SweepScopeProperties(cx
);
3726 * Sweep script filenames after sweeping functions in the generic loop
3727 * above. In this way when a scripted function's finalizer destroys the
3728 * script and calls rt->destroyScriptHook, the hook can still access the
3729 * script's filename. See bug 323267.
3731 js_SweepScriptFilenames(rt
);
3734 * Destroy arenas after we finished the sweeping sofinalizers can safely
3735 * use js_IsAboutToBeFinalized().
3737 DestroyGCArenas(rt
, emptyArenas
);
3739 #ifdef JS_THREADSAFE
3740 cx
->submitDeallocatorTask();
3744 (void) rt
->gcCallback(cx
, JSGC_FINALIZE_END
);
3745 #ifdef DEBUG_srcnotesize
3746 { extern void DumpSrcNoteSizeHist();
3747 DumpSrcNoteSizeHist();
3748 printf("GC HEAP SIZE %lu\n", (unsigned long)rt
->gcBytes
);
3752 #ifdef JS_SCOPE_DEPTH_METER
3755 fp
= fopen("/tmp/scopedepth.stats", "w");
3758 JS_DumpBasicStats(&rt
->protoLookupDepthStats
, "proto-lookup depth", fp
);
3759 JS_DumpBasicStats(&rt
->scopeSearchDepthStats
, "scope-search depth", fp
);
3760 JS_DumpBasicStats(&rt
->hostenvScopeDepthStats
, "hostenv scope depth", fp
);
3761 JS_DumpBasicStats(&rt
->lexicalScopeDepthStats
, "lexical scope depth", fp
);
3767 #endif /* JS_SCOPE_DEPTH_METER */
3769 #ifdef JS_DUMP_LOOP_STATS
3770 { static FILE *lsfp
;
3772 lsfp
= fopen("/tmp/loopstats", "w");
3774 JS_DumpBasicStats(&rt
->loopStats
, "loops", lsfp
);
3778 #endif /* JS_DUMP_LOOP_STATS */
3786 * We want to restart GC if js_GC was called recursively or if any of the
3787 * finalizers called js_RemoveRoot or js_UnlockGCThingRT.
3789 if (!JS_ON_TRACE(cx
) && (rt
->gcLevel
> 1 || rt
->gcPoke
)) {
3792 rt
->gcPoke
= JS_FALSE
;
3797 rt
->setGCLastBytes(rt
->gcBytes
);
3800 rt
->gcRunning
= rt
->gcRegenShapes
= false;
3802 #ifdef JS_THREADSAFE
3803 rt
->gcThread
= NULL
;
3804 JS_NOTIFY_GC_DONE(rt
);
3807 * Unlock unless we have GC_LOCK_HELD which requires locked GC on return.
3809 if (!(gckind
& GC_LOCK_HELD
))
3814 * Execute JSGC_END callback outside the lock. Again, sample the callback
3815 * pointer in case it changes, since we are outside of the GC vs. requests
3816 * interlock mechanism here.
3818 if (gckind
!= GC_SET_SLOT_REQUEST
&& (callback
= rt
->gcCallback
)) {
3819 JSWeakRoots savedWeakRoots
;
3820 JSTempValueRooter tvr
;
3822 if (gckind
& GC_KEEP_ATOMS
) {
3824 * We allow JSGC_END implementation to force a full GC or allocate
3825 * new GC things. Thus we must protect the weak roots from garbage
3826 * collection and overwrites.
3828 savedWeakRoots
= cx
->weakRoots
;
3829 JS_PUSH_TEMP_ROOT_WEAK_COPY(cx
, &savedWeakRoots
, &tvr
);
3834 (void) callback(cx
, JSGC_END
);
3836 if (gckind
& GC_KEEP_ATOMS
) {
3838 JS_UNKEEP_ATOMS(rt
);
3839 JS_POP_TEMP_ROOT(cx
, &tvr
);
3840 } else if (gckind
== GC_LAST_CONTEXT
&& rt
->gcPoke
) {
3842 * On shutdown iterate until JSGC_END callback stops creating
3845 goto restart_at_beginning
;