bug 437325 - JSThread is no longer shared between runtimes. r=brendan
[mozilla-central.git] / js / src / jsgc.cpp
blobab8f2fbf60dc778f945668d5241f13b7a3d4e356
1 /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sw=4 et tw=78:
4 * ***** BEGIN LICENSE BLOCK *****
5 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
7 * The contents of this file are subject to the Mozilla Public License Version
8 * 1.1 (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
10 * http://www.mozilla.org/MPL/
12 * Software distributed under the License is distributed on an "AS IS" basis,
13 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
14 * for the specific language governing rights and limitations under the
15 * License.
17 * The Original Code is Mozilla Communicator client code, released
18 * March 31, 1998.
20 * The Initial Developer of the Original Code is
21 * Netscape Communications Corporation.
22 * Portions created by the Initial Developer are Copyright (C) 1998
23 * the Initial Developer. All Rights Reserved.
25 * Contributor(s):
27 * Alternatively, the contents of this file may be used under the terms of
28 * either of the GNU General Public License Version 2 or later (the "GPL"),
29 * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
30 * in which case the provisions of the GPL or the LGPL are applicable instead
31 * of those above. If you wish to allow use of your version of this file only
32 * under the terms of either the GPL or the LGPL, and not to allow others to
33 * use your version of this file under the terms of the MPL, indicate your
34 * decision by deleting the provisions above and replace them with the notice
35 * and other provisions required by the GPL or the LGPL. If you do not delete
36 * the provisions above, a recipient may use your version of this file under
37 * the terms of any one of the MPL, the GPL or the LGPL.
39 * ***** END LICENSE BLOCK ***** */
42 * JS Mark-and-Sweep Garbage Collector.
44 * This GC allocates fixed-sized things with sizes up to GC_NBYTES_MAX (see
45 * jsgc.h). It allocates from a special GC arena pool with each arena allocated
46 * using malloc. It uses an ideally parallel array of flag bytes to hold the
47 * mark bit, finalizer type index, etc.
49 * XXX swizzle page to freelist for better locality of reference
51 #include <stdlib.h> /* for free */
52 #include <math.h>
53 #include <string.h> /* for memset used when DEBUG */
54 #include "jstypes.h"
55 #include "jsutil.h" /* Added by JSIFY */
56 #include "jshash.h" /* Added by JSIFY */
57 #include "jsbit.h"
58 #include "jsclist.h"
59 #include "jsprf.h"
60 #include "jsapi.h"
61 #include "jsatom.h"
62 #include "jscntxt.h"
63 #include "jsversion.h"
64 #include "jsdbgapi.h"
65 #include "jsexn.h"
66 #include "jsfun.h"
67 #include "jsgc.h"
68 #include "jsinterp.h"
69 #include "jsiter.h"
70 #include "jslock.h"
71 #include "jsnum.h"
72 #include "jsobj.h"
73 #include "jsparse.h"
74 #include "jsscope.h"
75 #include "jsscript.h"
76 #include "jsstaticcheck.h"
77 #include "jsstr.h"
78 #include "jstracer.h"
80 #if JS_HAS_XML_SUPPORT
81 #include "jsxml.h"
82 #endif
85 * Check if posix_memalign is available.
87 #if _POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600 || MOZ_MEMORY
88 # define HAS_POSIX_MEMALIGN 1
89 #else
90 # define HAS_POSIX_MEMALIGN 0
91 #endif
94 * jemalloc provides posix_memalign.
96 #ifdef MOZ_MEMORY
97 extern "C" {
98 #include "../../memory/jemalloc/jemalloc.h"
100 #endif
103 * Include the headers for mmap unless we have posix_memalign and do not
104 * insist on mmap.
106 #if JS_GC_USE_MMAP || (!defined JS_GC_USE_MMAP && !HAS_POSIX_MEMALIGN)
107 # if defined(XP_WIN)
108 # ifndef JS_GC_USE_MMAP
109 # define JS_GC_USE_MMAP 1
110 # endif
111 # include <windows.h>
112 # else
113 # if defined(XP_UNIX) || defined(XP_BEOS)
114 # include <unistd.h>
115 # endif
116 # if _POSIX_MAPPED_FILES > 0
117 # ifndef JS_GC_USE_MMAP
118 # define JS_GC_USE_MMAP 1
119 # endif
120 # include <sys/mman.h>
122 /* On Mac OS X MAP_ANONYMOUS is not defined. */
123 # if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
124 # define MAP_ANONYMOUS MAP_ANON
125 # endif
126 # else
127 # if JS_GC_USE_MMAP
128 # error "JS_GC_USE_MMAP is set when mmap is not available"
129 # endif
130 # endif
131 # endif
132 #endif
135 * Check JSTempValueUnion has the size of jsval and void * so we can
136 * reinterpret jsval as void* GC-thing pointer and use JSTVU_SINGLE for
137 * different GC-things.
139 JS_STATIC_ASSERT(sizeof(JSTempValueUnion) == sizeof(jsval));
140 JS_STATIC_ASSERT(sizeof(JSTempValueUnion) == sizeof(void *));
144 * Check that JSTRACE_XML follows JSTRACE_OBJECT, JSTRACE_DOUBLE and
145 * JSTRACE_STRING.
147 JS_STATIC_ASSERT(JSTRACE_OBJECT == 0);
148 JS_STATIC_ASSERT(JSTRACE_DOUBLE == 1);
149 JS_STATIC_ASSERT(JSTRACE_STRING == 2);
150 JS_STATIC_ASSERT(JSTRACE_XML == 3);
153 * JS_IS_VALID_TRACE_KIND assumes that JSTRACE_STRING is the last non-xml
154 * trace kind when JS_HAS_XML_SUPPORT is false.
156 JS_STATIC_ASSERT(JSTRACE_STRING + 1 == JSTRACE_XML);
159 * The number of used GCX-types must stay within GCX_LIMIT.
161 JS_STATIC_ASSERT(GCX_NTYPES <= GCX_LIMIT);
165 * Check that we can reinterpret double as JSGCDoubleCell.
167 JS_STATIC_ASSERT(sizeof(JSGCDoubleCell) == sizeof(double));
170 * Check that we can use memset(p, 0, ...) to implement JS_CLEAR_WEAK_ROOTS.
172 JS_STATIC_ASSERT(JSVAL_NULL == 0);
176 * A GC arena contains a fixed number of flag bits for each thing in its heap,
177 * and supports O(1) lookup of a flag given its thing's address.
179 * To implement this, we allocate things of the same size from a GC arena
180 * containing GC_ARENA_SIZE bytes aligned on GC_ARENA_SIZE boundary. The
181 * following picture shows arena's layout:
183 * +------------------------------+--------------------+---------------+
184 * | allocation area for GC thing | flags of GC things | JSGCArenaInfo |
185 * +------------------------------+--------------------+---------------+
187 * To find the flag bits for the thing we calculate the thing index counting
188 * from arena's start using:
190 * thingIndex = (thingAddress & GC_ARENA_MASK) / thingSize
192 * The details of flag's lookup depend on thing's kind. For all GC things
193 * except doubles we use one byte of flags where the 4 bits determine thing's
194 * type and the rest is used to implement GC marking, finalization and
195 * locking. We calculate the address of flag's byte using:
197 * flagByteAddress =
198 * (thingAddress | GC_ARENA_MASK) - sizeof(JSGCArenaInfo) - thingIndex
200 * where
202 * (thingAddress | GC_ARENA_MASK) - sizeof(JSGCArenaInfo)
204 * is the last byte of flags' area.
206 * This implies that the things are allocated from the start of their area and
207 * flags are allocated from the end. This arrangement avoids a relatively
208 * expensive calculation of the location of the boundary separating things and
209 * flags. The boundary's offset from the start of the arena is given by:
211 * thingsPerArena * thingSize
213 * where thingsPerArena is the number of things that the arena can hold:
215 * (GC_ARENA_SIZE - sizeof(JSGCArenaInfo)) / (thingSize + 1).
217 * To allocate doubles we use a specialized arena. It can contain only numbers
218 * so we do not need the type bits. Moreover, since the doubles do not require
219 * a finalizer and very few of them are locked via js_LockGCThing API, we use
220 * just one bit of flags per double to denote if it was marked during the
221 * marking phase of the GC. The locking is implemented via a hash table. Thus
222 * for doubles the flag area becomes a bitmap.
224 * JS_GC_USE_MMAP macro governs the choice of the aligned arena allocator.
225 * When it is true, a platform-dependent function like mmap is used to get
226 * memory aligned on CPU page boundaries. If the macro is false or undefined,
227 * posix_memalign is used when available. Otherwise the code uses malloc to
228 * over-allocate a chunk with js_gcArenasPerChunk aligned arenas. The
229 * approximate space overhead of this is 1/js_gcArenasPerChunk. For details,
230 * see NewGCChunk/DestroyGCChunk below.
232 * The code also allocates arenas in chunks when JS_GC_USE_MMAP is 1 to
233 * minimize the overhead of mmap/munmap. In this case js_gcArenasPerChunk can
234 * not be a compile-time constant as the system page size is not known until
235 * runtime.
237 #if JS_GC_USE_MMAP
238 static uint32 js_gcArenasPerChunk = 0;
239 static JSBool js_gcUseMmap = JS_FALSE;
240 #elif HAS_POSIX_MEMALIGN
241 # define js_gcArenasPerChunk 1
242 #else
243 # define js_gcArenasPerChunk 7
244 #endif
246 #if defined(js_gcArenasPerChunk) && js_gcArenasPerChunk == 1
247 # define CHUNKED_ARENA_ALLOCATION 0
248 #else
249 # define CHUNKED_ARENA_ALLOCATION 1
250 #endif
252 #define GC_ARENA_SHIFT 12
253 #define GC_ARENA_MASK ((jsuword) JS_BITMASK(GC_ARENA_SHIFT))
254 #define GC_ARENA_SIZE JS_BIT(GC_ARENA_SHIFT)
257 * JS_GC_ARENA_PAD defines the number of bytes to pad JSGCArenaInfo structure.
258 * It is used to improve allocation efficiency when using posix_memalign. If
259 * malloc's implementation uses internal headers, then calling
261 * posix_memalign(&p, GC_ARENA_SIZE, GC_ARENA_SIZE * js_gcArenasPerChunk)
263 * in a sequence leaves holes between allocations of the size GC_ARENA_SIZE
264 * due to the need to fit headers. JS_GC_ARENA_PAD mitigates that so the code
265 * calls
267 * posix_memalign(&p, GC_ARENA_SIZE,
268 * GC_ARENA_SIZE * js_gcArenasPerChunk - JS_GC_ARENA_PAD)
270 * When JS_GC_ARENA_PAD is equal or greater than the number of words in the
271 * system header, the system can pack all allocations together without holes.
273 * With JS_GC_USE_MEMALIGN we want at least 2 word pad unless posix_memalign
274 * comes from jemalloc that does not use any headers/trailers.
276 #ifndef JS_GC_ARENA_PAD
277 # if HAS_POSIX_MEMALIGN && !MOZ_MEMORY
278 # define JS_GC_ARENA_PAD (2 * JS_BYTES_PER_WORD)
279 # else
280 # define JS_GC_ARENA_PAD 0
281 # endif
282 #endif
284 struct JSGCArenaInfo {
286 * Allocation list for the arena or NULL if the arena holds double values.
288 JSGCArenaList *list;
291 * Pointer to the previous arena in a linked list. The arena can either
292 * belong to one of JSContext.gcArenaList lists or, when it does not have
293 * any allocated GC things, to the list of free arenas in the chunk with
294 * head stored in JSGCChunkInfo.lastFreeArena.
296 JSGCArenaInfo *prev;
298 #if !CHUNKED_ARENA_ALLOCATION
299 jsuword prevUntracedPage;
300 #else
302 * A link field for the list of arenas with marked but not yet traced
303 * things. The field is encoded as arena's page to share the space with
304 * firstArena and arenaIndex fields.
306 jsuword prevUntracedPage : JS_BITS_PER_WORD - GC_ARENA_SHIFT;
309 * When firstArena is false, the index of arena in the chunk. When
310 * firstArena is true, the index of a free arena holding JSGCChunkInfo or
311 * NO_FREE_ARENAS if there are no free arenas in the chunk.
313 * GET_ARENA_INDEX and GET_CHUNK_INFO_INDEX are convenience macros to
314 * access either of indexes.
316 jsuword arenaIndex : GC_ARENA_SHIFT - 1;
318 /* Flag indicating if the arena is the first in the chunk. */
319 jsuword firstArena : 1;
320 #endif
322 union {
323 jsuword untracedThings; /* bitset for fast search of marked
324 but not yet traced things */
325 JSBool hasMarkedDoubles; /* the arena has marked doubles */
326 } u;
328 #if JS_GC_ARENA_PAD != 0
329 uint8 pad[JS_GC_ARENA_PAD];
330 #endif
334 * Verify that the bit fields are indeed shared and JSGCArenaInfo is as small
335 * as possible. The code does not rely on this check so if on a particular
336 * platform this does not compile, then, as a workaround, comment the assert
337 * out and submit a bug report.
339 JS_STATIC_ASSERT(offsetof(JSGCArenaInfo, u) == 3 * sizeof(jsuword));
342 * Macros to convert between JSGCArenaInfo, the start address of the arena and
343 * arena's page defined as (start address) >> GC_ARENA_SHIFT.
345 #define ARENA_INFO_OFFSET (GC_ARENA_SIZE - (uint32) sizeof(JSGCArenaInfo))
347 #define IS_ARENA_INFO_ADDRESS(arena) \
348 (((jsuword) (arena) & GC_ARENA_MASK) == ARENA_INFO_OFFSET)
350 #define ARENA_START_TO_INFO(arenaStart) \
351 (JS_ASSERT(((arenaStart) & (jsuword) GC_ARENA_MASK) == 0), \
352 (JSGCArenaInfo *) ((arenaStart) + (jsuword) ARENA_INFO_OFFSET))
354 #define ARENA_INFO_TO_START(arena) \
355 (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arena)), \
356 (jsuword) (arena) & ~(jsuword) GC_ARENA_MASK)
358 #define ARENA_PAGE_TO_INFO(arenaPage) \
359 (JS_ASSERT(arenaPage != 0), \
360 JS_ASSERT(!((jsuword)(arenaPage) >> (JS_BITS_PER_WORD-GC_ARENA_SHIFT))), \
361 ARENA_START_TO_INFO((arenaPage) << GC_ARENA_SHIFT))
363 #define ARENA_INFO_TO_PAGE(arena) \
364 (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arena)), \
365 ((jsuword) (arena) >> GC_ARENA_SHIFT))
367 #define GET_ARENA_INFO(chunk, index) \
368 (JS_ASSERT((index) < js_gcArenasPerChunk), \
369 ARENA_START_TO_INFO(chunk + ((index) << GC_ARENA_SHIFT)))
371 #if CHUNKED_ARENA_ALLOCATION
373 * Definitions for allocating arenas in chunks.
375 * All chunks that have at least one free arena are put on the doubly-linked
376 * list with the head stored in JSRuntime.gcChunkList. JSGCChunkInfo contains
377 * the head of the chunk's free arena list together with the link fields for
378 * gcChunkList.
380 * Structure stored in one of chunk's free arenas. GET_CHUNK_INFO_INDEX gives
381 * the index of this arena. When all arenas in the chunk are used, it is
382 * removed from the list and the index is set to NO_FREE_ARENAS indicating
383 * that the chunk is not on gcChunkList and has no JSGCChunkInfo available.
386 struct JSGCChunkInfo {
387 JSGCChunkInfo **prevp;
388 JSGCChunkInfo *next;
389 JSGCArenaInfo *lastFreeArena;
390 uint32 numFreeArenas;
393 #define NO_FREE_ARENAS JS_BITMASK(GC_ARENA_SHIFT - 1)
395 #ifdef js_gcArenasPerChunk
396 JS_STATIC_ASSERT(1 <= js_gcArenasPerChunk &&
397 js_gcArenasPerChunk <= NO_FREE_ARENAS);
398 #endif
400 #define GET_ARENA_CHUNK(arena, index) \
401 (JS_ASSERT(GET_ARENA_INDEX(arena) == index), \
402 ARENA_INFO_TO_START(arena) - ((index) << GC_ARENA_SHIFT))
404 #define GET_ARENA_INDEX(arena) \
405 ((arena)->firstArena ? 0 : (uint32) (arena)->arenaIndex)
407 #define GET_CHUNK_INFO_INDEX(chunk) \
408 ((uint32) ARENA_START_TO_INFO(chunk)->arenaIndex)
410 #define SET_CHUNK_INFO_INDEX(chunk, index) \
411 (JS_ASSERT((index) < js_gcArenasPerChunk || (index) == NO_FREE_ARENAS), \
412 (void) (ARENA_START_TO_INFO(chunk)->arenaIndex = (jsuword) (index)))
414 #define GET_CHUNK_INFO(chunk, infoIndex) \
415 (JS_ASSERT(GET_CHUNK_INFO_INDEX(chunk) == (infoIndex)), \
416 JS_ASSERT((uint32) (infoIndex) < js_gcArenasPerChunk), \
417 (JSGCChunkInfo *) ((chunk) + ((infoIndex) << GC_ARENA_SHIFT)))
419 #define CHUNK_INFO_TO_INDEX(ci) \
420 GET_ARENA_INDEX(ARENA_START_TO_INFO((jsuword)ci))
422 #endif
425 * Macros for GC-thing operations.
427 #define THINGS_PER_ARENA(thingSize) \
428 ((GC_ARENA_SIZE - (uint32) sizeof(JSGCArenaInfo)) / ((thingSize) + 1U))
430 #define THING_TO_ARENA(thing) \
431 ((JSGCArenaInfo *)(((jsuword) (thing) | GC_ARENA_MASK) + \
432 1 - sizeof(JSGCArenaInfo)))
434 #define THING_TO_INDEX(thing, thingSize) \
435 ((uint32) ((jsuword) (thing) & GC_ARENA_MASK) / (uint32) (thingSize))
437 #define THING_FLAGS_END(arena) ((uint8 *)(arena))
439 #define THING_FLAGP(arena, thingIndex) \
440 (JS_ASSERT((jsuword) (thingIndex) \
441 < (jsuword) THINGS_PER_ARENA((arena)->list->thingSize)), \
442 (uint8 *)(arena) - 1 - (thingIndex))
444 #define THING_TO_FLAGP(thing, thingSize) \
445 THING_FLAGP(THING_TO_ARENA(thing), THING_TO_INDEX(thing, thingSize))
447 #define FLAGP_TO_ARENA(flagp) THING_TO_ARENA(flagp)
449 #define FLAGP_TO_INDEX(flagp) \
450 (JS_ASSERT(((jsuword) (flagp) & GC_ARENA_MASK) < ARENA_INFO_OFFSET), \
451 (ARENA_INFO_OFFSET - 1 - (uint32) ((jsuword) (flagp) & GC_ARENA_MASK)))
453 #define FLAGP_TO_THING(flagp, thingSize) \
454 (JS_ASSERT(((jsuword) (flagp) & GC_ARENA_MASK) >= \
455 (ARENA_INFO_OFFSET - THINGS_PER_ARENA(thingSize))), \
456 (JSGCThing *)(((jsuword) (flagp) & ~GC_ARENA_MASK) + \
457 (thingSize) * FLAGP_TO_INDEX(flagp)))
460 * Macros for the specialized arena for doubles.
462 * DOUBLES_PER_ARENA defines the maximum number of doubles that the arena can
463 * hold. We find it as the following. Let n be the number of doubles in the
464 * arena. Together with the bitmap of flags and JSGCArenaInfo they should fit
465 * the arena. Hence DOUBLES_PER_ARENA or n_max is the maximum value of n for
466 * which the following holds:
468 * n*s + ceil(n/B) <= M (1)
470 * where "/" denotes normal real division,
471 * ceil(r) gives the least integer not smaller than the number r,
472 * s is the number of words in jsdouble,
473 * B is number of bits per word or B == JS_BITS_PER_WORD
474 * M is the number of words in the arena before JSGCArenaInfo or
475 * M == (GC_ARENA_SIZE - sizeof(JSGCArenaInfo)) / sizeof(jsuword).
476 * M == ARENA_INFO_OFFSET / sizeof(jsuword)
478 * We rewrite the inequality as
480 * n*B*s/B + ceil(n/B) <= M,
481 * ceil(n*B*s/B + n/B) <= M,
482 * ceil(n*(B*s + 1)/B) <= M (2)
484 * We define a helper function e(n, s, B),
486 * e(n, s, B) := ceil(n*(B*s + 1)/B) - n*(B*s + 1)/B, 0 <= e(n, s, B) < 1.
488 * It gives:
490 * n*(B*s + 1)/B + e(n, s, B) <= M,
491 * n + e*B/(B*s + 1) <= M*B/(B*s + 1)
493 * We apply the floor function to both sides of the last equation, where
494 * floor(r) gives the biggest integer not greater than r. As a consequence we
495 * have:
497 * floor(n + e*B/(B*s + 1)) <= floor(M*B/(B*s + 1)),
498 * n + floor(e*B/(B*s + 1)) <= floor(M*B/(B*s + 1)),
499 * n <= floor(M*B/(B*s + 1)), (3)
501 * where floor(e*B/(B*s + 1)) is zero as e*B/(B*s + 1) < B/(B*s + 1) < 1.
502 * Thus any n that satisfies the original constraint (1) or its equivalent (2),
503 * must also satisfy (3). That is, we got an upper estimate for the maximum
504 * value of n. Lets show that this upper estimate,
506 * floor(M*B/(B*s + 1)), (4)
508 * also satisfies (1) and, as such, gives the required maximum value.
509 * Substituting it into (2) gives:
511 * ceil(floor(M*B/(B*s + 1))*(B*s + 1)/B) == ceil(floor(M/X)*X)
513 * where X == (B*s + 1)/B > 1. But then floor(M/X)*X <= M/X*X == M and
515 * ceil(floor(M/X)*X) <= ceil(M) == M.
517 * Thus the value of (4) gives the maximum n satisfying (1).
519 * For the final result we observe that in (4)
521 * M*B == ARENA_INFO_OFFSET / sizeof(jsuword) * JS_BITS_PER_WORD
522 * == ARENA_INFO_OFFSET * JS_BITS_PER_BYTE
524 * and
526 * B*s == JS_BITS_PER_WORD * sizeof(jsdouble) / sizeof(jsuword)
527 * == JS_BITS_PER_DOUBLE.
529 #define DOUBLES_PER_ARENA \
530 ((ARENA_INFO_OFFSET * JS_BITS_PER_BYTE) / (JS_BITS_PER_DOUBLE + 1))
533 * Check that ARENA_INFO_OFFSET and sizeof(jsdouble) divides sizeof(jsuword).
535 JS_STATIC_ASSERT(ARENA_INFO_OFFSET % sizeof(jsuword) == 0);
536 JS_STATIC_ASSERT(sizeof(jsdouble) % sizeof(jsuword) == 0);
537 JS_STATIC_ASSERT(sizeof(jsbitmap) == sizeof(jsuword));
539 #define DOUBLES_ARENA_BITMAP_WORDS \
540 (JS_HOWMANY(DOUBLES_PER_ARENA, JS_BITS_PER_WORD))
542 /* Check that DOUBLES_PER_ARENA indeed maximises (1). */
543 JS_STATIC_ASSERT(DOUBLES_PER_ARENA * sizeof(jsdouble) +
544 DOUBLES_ARENA_BITMAP_WORDS * sizeof(jsuword) <=
545 ARENA_INFO_OFFSET);
547 JS_STATIC_ASSERT((DOUBLES_PER_ARENA + 1) * sizeof(jsdouble) +
548 sizeof(jsuword) *
549 JS_HOWMANY((DOUBLES_PER_ARENA + 1), JS_BITS_PER_WORD) >
550 ARENA_INFO_OFFSET);
553 * When DOUBLES_PER_ARENA % BITS_PER_DOUBLE_FLAG_UNIT != 0, some bits in the
554 * last byte of the occupation bitmap are unused.
556 #define UNUSED_DOUBLE_BITMAP_BITS \
557 (DOUBLES_ARENA_BITMAP_WORDS * JS_BITS_PER_WORD - DOUBLES_PER_ARENA)
559 JS_STATIC_ASSERT(UNUSED_DOUBLE_BITMAP_BITS < JS_BITS_PER_WORD);
561 #define DOUBLES_ARENA_BITMAP_OFFSET \
562 (ARENA_INFO_OFFSET - DOUBLES_ARENA_BITMAP_WORDS * sizeof(jsuword))
564 #define CHECK_DOUBLE_ARENA_INFO(arenaInfo) \
565 (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arenaInfo)), \
566 JS_ASSERT(!(arenaInfo)->list)) \
569 * Get the start of the bitmap area containing double mark flags in the arena.
570 * To access the flag the code uses
572 * JS_TEST_BIT(bitmapStart, index)
574 * That is, compared with the case of arenas with non-double things, we count
575 * flags from the start of the bitmap area, not from the end.
577 #define DOUBLE_ARENA_BITMAP(arenaInfo) \
578 (CHECK_DOUBLE_ARENA_INFO(arenaInfo), \
579 (jsbitmap *) arenaInfo - DOUBLES_ARENA_BITMAP_WORDS)
581 #define DOUBLE_THING_TO_INDEX(thing) \
582 (CHECK_DOUBLE_ARENA_INFO(THING_TO_ARENA(thing)), \
583 JS_ASSERT(((jsuword) (thing) & GC_ARENA_MASK) < \
584 DOUBLES_ARENA_BITMAP_OFFSET), \
585 ((uint32) (((jsuword) (thing) & GC_ARENA_MASK) / sizeof(jsdouble))))
587 static void
588 ClearDoubleArenaFlags(JSGCArenaInfo *a)
590 jsbitmap *bitmap, mask;
591 uintN nused;
594 * When some high bits in the last byte of the double occupation bitmap
595 * are unused, we must set them. Otherwise RefillDoubleFreeList will
596 * assume that they corresponds to some free cells and tries to allocate
597 * them.
599 * Note that the code works correctly with UNUSED_DOUBLE_BITMAP_BITS == 0.
601 bitmap = DOUBLE_ARENA_BITMAP(a);
602 memset(bitmap, 0, (DOUBLES_ARENA_BITMAP_WORDS - 1) * sizeof *bitmap);
603 mask = ((jsbitmap) 1 << UNUSED_DOUBLE_BITMAP_BITS) - 1;
604 nused = JS_BITS_PER_WORD - UNUSED_DOUBLE_BITMAP_BITS;
605 bitmap[DOUBLES_ARENA_BITMAP_WORDS - 1] = mask << nused;
608 static JS_ALWAYS_INLINE JSBool
609 IsMarkedDouble(JSGCArenaInfo *a, uint32 index)
611 jsbitmap *bitmap;
613 JS_ASSERT(a->u.hasMarkedDoubles);
614 bitmap = DOUBLE_ARENA_BITMAP(a);
615 return JS_TEST_BIT(bitmap, index);
619 * JSRuntime.gcDoubleArenaList.nextDoubleFlags points either to:
621 * 1. The next byte in the bitmap area for doubles to check for unmarked
622 * (or free) doubles.
623 * 2. Or to the end of the bitmap area when all GC cells of the arena are
624 * allocated.
625 * 3. Or to a special sentinel value indicating that there are no arenas
626 * to check for unmarked doubles.
628 * We set the sentinel to ARENA_INFO_OFFSET so the single check
630 * ((jsuword) nextDoubleFlags & GC_ARENA_MASK) == ARENA_INFO_OFFSET
632 * will cover both the second and the third cases.
634 #define DOUBLE_BITMAP_SENTINEL ((jsbitmap *) ARENA_INFO_OFFSET)
636 #ifdef JS_THREADSAFE
638 * The maximum number of things to put on the local free list by taking
639 * several things from the global free list or from the tail of the last
640 * allocated arena to amortize the cost of rt->gcLock.
642 * We use number 8 based on benchmarks from bug 312238.
644 #define MAX_THREAD_LOCAL_THINGS 8
646 #endif
648 JS_STATIC_ASSERT(sizeof(JSStackHeader) >= 2 * sizeof(jsval));
650 JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(JSString));
651 JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(jsdouble));
653 /* We want to use all the available GC thing space for object's slots. */
654 JS_STATIC_ASSERT(sizeof(JSObject) % sizeof(JSGCThing) == 0);
657 * Ensure that JSObject is allocated from a different GC-list rather than
658 * jsdouble and JSString so we can easily finalize JSObject before these 2
659 * types of GC things. See comments in js_GC.
661 JS_STATIC_ASSERT(GC_FREELIST_INDEX(sizeof(JSString)) !=
662 GC_FREELIST_INDEX(sizeof(JSObject)));
663 JS_STATIC_ASSERT(GC_FREELIST_INDEX(sizeof(jsdouble)) !=
664 GC_FREELIST_INDEX(sizeof(JSObject)));
667 * JSPtrTable capacity growth descriptor. The table grows by powers of two
668 * starting from capacity JSPtrTableInfo.minCapacity, but switching to linear
669 * growth when capacity reaches JSPtrTableInfo.linearGrowthThreshold.
671 typedef struct JSPtrTableInfo {
672 uint16 minCapacity;
673 uint16 linearGrowthThreshold;
674 } JSPtrTableInfo;
676 #define GC_ITERATOR_TABLE_MIN 4
677 #define GC_ITERATOR_TABLE_LINEAR 1024
679 static const JSPtrTableInfo iteratorTableInfo = {
680 GC_ITERATOR_TABLE_MIN,
681 GC_ITERATOR_TABLE_LINEAR
684 /* Calculate table capacity based on the current value of JSPtrTable.count. */
685 static size_t
686 PtrTableCapacity(size_t count, const JSPtrTableInfo *info)
688 size_t linear, log, capacity;
690 linear = info->linearGrowthThreshold;
691 JS_ASSERT(info->minCapacity <= linear);
693 if (count == 0) {
694 capacity = 0;
695 } else if (count < linear) {
696 log = JS_CEILING_LOG2W(count);
697 JS_ASSERT(log != JS_BITS_PER_WORD);
698 capacity = (size_t)1 << log;
699 if (capacity < info->minCapacity)
700 capacity = info->minCapacity;
701 } else {
702 capacity = JS_ROUNDUP(count, linear);
705 JS_ASSERT(capacity >= count);
706 return capacity;
709 static void
710 FreePtrTable(JSPtrTable *table, const JSPtrTableInfo *info)
712 if (table->array) {
713 JS_ASSERT(table->count > 0);
714 free(table->array);
715 table->array = NULL;
716 table->count = 0;
718 JS_ASSERT(table->count == 0);
721 static JSBool
722 AddToPtrTable(JSContext *cx, JSPtrTable *table, const JSPtrTableInfo *info,
723 void *ptr)
725 size_t count, capacity;
726 void **array;
728 count = table->count;
729 capacity = PtrTableCapacity(count, info);
731 if (count == capacity) {
732 if (capacity < info->minCapacity) {
733 JS_ASSERT(capacity == 0);
734 JS_ASSERT(!table->array);
735 capacity = info->minCapacity;
736 } else {
738 * Simplify the overflow detection assuming pointer is bigger
739 * than byte.
741 JS_STATIC_ASSERT(2 <= sizeof table->array[0]);
742 capacity = (capacity < info->linearGrowthThreshold)
743 ? 2 * capacity
744 : capacity + info->linearGrowthThreshold;
745 if (capacity > (size_t)-1 / sizeof table->array[0])
746 goto bad;
748 array = (void **) realloc(table->array,
749 capacity * sizeof table->array[0]);
750 if (!array)
751 goto bad;
752 #ifdef DEBUG
753 memset(array + count, JS_FREE_PATTERN,
754 (capacity - count) * sizeof table->array[0]);
755 #endif
756 table->array = array;
759 table->array[count] = ptr;
760 table->count = count + 1;
762 return JS_TRUE;
764 bad:
765 JS_ReportOutOfMemory(cx);
766 return JS_FALSE;
769 static void
770 ShrinkPtrTable(JSPtrTable *table, const JSPtrTableInfo *info,
771 size_t newCount)
773 size_t oldCapacity, capacity;
774 void **array;
776 JS_ASSERT(newCount <= table->count);
777 if (newCount == table->count)
778 return;
780 oldCapacity = PtrTableCapacity(table->count, info);
781 table->count = newCount;
782 capacity = PtrTableCapacity(newCount, info);
784 if (oldCapacity != capacity) {
785 array = table->array;
786 JS_ASSERT(array);
787 if (capacity == 0) {
788 free(array);
789 table->array = NULL;
790 return;
792 array = (void **) realloc(array, capacity * sizeof array[0]);
793 if (array)
794 table->array = array;
796 #ifdef DEBUG
797 memset(table->array + newCount, JS_FREE_PATTERN,
798 (capacity - newCount) * sizeof table->array[0]);
799 #endif
802 #ifdef JS_GCMETER
803 # define METER(x) ((void) (x))
804 # define METER_IF(condition, x) ((void) ((condition) && (x)))
805 #else
806 # define METER(x) ((void) 0)
807 # define METER_IF(condition, x) ((void) 0)
808 #endif
810 #define METER_UPDATE_MAX(maxLval, rval) \
811 METER_IF((maxLval) < (rval), (maxLval) = (rval))
813 #if JS_GC_USE_MMAP || !HAS_POSIX_MEMALIGN
816 * For chunks allocated via over-sized malloc, get a pointer to store the gap
817 * between the malloc's result and the first arena in the chunk.
819 static uint32 *
820 GetMallocedChunkGapPtr(jsuword chunk)
822 JS_ASSERT((chunk & GC_ARENA_MASK) == 0);
824 /* Use the memory after the chunk, see NewGCChunk for details. */
825 return (uint32 *) (chunk + (js_gcArenasPerChunk << GC_ARENA_SHIFT));
828 #endif
830 static jsuword
831 NewGCChunk(void)
833 void *p;
835 #if JS_GC_USE_MMAP
836 if (js_gcUseMmap) {
837 # if defined(XP_WIN)
838 p = VirtualAlloc(NULL, js_gcArenasPerChunk << GC_ARENA_SHIFT,
839 MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
840 return (jsuword) p;
841 # else
842 p = mmap(NULL, js_gcArenasPerChunk << GC_ARENA_SHIFT,
843 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
844 return (p == MAP_FAILED) ? 0 : (jsuword) p;
845 # endif
847 #endif
849 #if HAS_POSIX_MEMALIGN
850 if (0 != posix_memalign(&p, GC_ARENA_SIZE,
851 GC_ARENA_SIZE * js_gcArenasPerChunk -
852 JS_GC_ARENA_PAD)) {
853 return 0;
855 return (jsuword) p;
856 #else
858 * Implement chunk allocation using oversized malloc if mmap and
859 * posix_memalign are not available.
861 * Since malloc allocates pointers aligned on the word boundary, to get
862 * js_gcArenasPerChunk aligned arenas, we need to malloc only
864 * ((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT) - sizeof(size_t)
866 * bytes. But since we stores the gap between the malloced pointer and the
867 * first arena in the chunk after the chunk, we need to ask for
869 * ((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT)
871 * bytes to ensure that we always have room to store the gap.
873 p = malloc((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT);
874 if (!p)
875 return 0;
878 jsuword chunk;
880 chunk = ((jsuword) p + GC_ARENA_MASK) & ~GC_ARENA_MASK;
881 *GetMallocedChunkGapPtr(chunk) = (uint32) (chunk - (jsuword) p);
882 return chunk;
884 #endif
887 static void
888 DestroyGCChunk(jsuword chunk)
890 JS_ASSERT((chunk & GC_ARENA_MASK) == 0);
891 #if JS_GC_USE_MMAP
892 if (js_gcUseMmap) {
893 # if defined(XP_WIN)
894 VirtualFree((void *) chunk, 0, MEM_RELEASE);
895 # elif defined(SOLARIS)
896 munmap((char *) chunk, js_gcArenasPerChunk << GC_ARENA_SHIFT);
897 # else
898 munmap((void *) chunk, js_gcArenasPerChunk << GC_ARENA_SHIFT);
899 # endif
900 return;
902 #endif
904 #if HAS_POSIX_MEMALIGN
905 free((void *) chunk);
906 #else
907 /* See comments in NewGCChunk. */
908 JS_ASSERT(*GetMallocedChunkGapPtr(chunk) < GC_ARENA_SIZE);
909 free((void *) (chunk - *GetMallocedChunkGapPtr(chunk)));
910 #endif
913 #if CHUNKED_ARENA_ALLOCATION
915 static void
916 AddChunkToList(JSRuntime *rt, JSGCChunkInfo *ci)
918 ci->prevp = &rt->gcChunkList;
919 ci->next = rt->gcChunkList;
920 if (rt->gcChunkList) {
921 JS_ASSERT(rt->gcChunkList->prevp == &rt->gcChunkList);
922 rt->gcChunkList->prevp = &ci->next;
924 rt->gcChunkList = ci;
927 static void
928 RemoveChunkFromList(JSRuntime *rt, JSGCChunkInfo *ci)
930 *ci->prevp = ci->next;
931 if (ci->next) {
932 JS_ASSERT(ci->next->prevp == &ci->next);
933 ci->next->prevp = ci->prevp;
937 #endif
939 static JSGCArenaInfo *
940 NewGCArena(JSRuntime *rt)
942 jsuword chunk;
943 JSGCArenaInfo *a;
945 if (rt->gcBytes >= rt->gcMaxBytes)
946 return NULL;
948 #if CHUNKED_ARENA_ALLOCATION
949 if (js_gcArenasPerChunk == 1) {
950 #endif
951 chunk = NewGCChunk();
952 if (chunk == 0)
953 return NULL;
954 a = ARENA_START_TO_INFO(chunk);
955 #if CHUNKED_ARENA_ALLOCATION
956 } else {
957 JSGCChunkInfo *ci;
958 uint32 i;
959 JSGCArenaInfo *aprev;
961 ci = rt->gcChunkList;
962 if (!ci) {
963 chunk = NewGCChunk();
964 if (chunk == 0)
965 return NULL;
966 JS_ASSERT((chunk & GC_ARENA_MASK) == 0);
967 a = GET_ARENA_INFO(chunk, 0);
968 a->firstArena = JS_TRUE;
969 a->arenaIndex = 0;
970 aprev = NULL;
971 i = 0;
972 do {
973 a->prev = aprev;
974 aprev = a;
975 ++i;
976 a = GET_ARENA_INFO(chunk, i);
977 a->firstArena = JS_FALSE;
978 a->arenaIndex = i;
979 } while (i != js_gcArenasPerChunk - 1);
980 ci = GET_CHUNK_INFO(chunk, 0);
981 ci->lastFreeArena = aprev;
982 ci->numFreeArenas = js_gcArenasPerChunk - 1;
983 AddChunkToList(rt, ci);
984 } else {
985 JS_ASSERT(ci->prevp == &rt->gcChunkList);
986 a = ci->lastFreeArena;
987 aprev = a->prev;
988 if (!aprev) {
989 JS_ASSERT(ci->numFreeArenas == 1);
990 JS_ASSERT(ARENA_INFO_TO_START(a) == (jsuword) ci);
991 RemoveChunkFromList(rt, ci);
992 chunk = GET_ARENA_CHUNK(a, GET_ARENA_INDEX(a));
993 SET_CHUNK_INFO_INDEX(chunk, NO_FREE_ARENAS);
994 } else {
995 JS_ASSERT(ci->numFreeArenas >= 2);
996 JS_ASSERT(ARENA_INFO_TO_START(a) != (jsuword) ci);
997 ci->lastFreeArena = aprev;
998 ci->numFreeArenas--;
1002 #endif
1004 rt->gcBytes += GC_ARENA_SIZE;
1005 a->prevUntracedPage = 0;
1006 memset(&a->u, 0, sizeof(a->u));
1008 return a;
1011 static void
1012 DestroyGCArenas(JSRuntime *rt, JSGCArenaInfo *last)
1014 JSGCArenaInfo *a;
1016 while (last) {
1017 a = last;
1018 last = last->prev;
1020 METER(rt->gcStats.afree++);
1021 JS_ASSERT(rt->gcBytes >= GC_ARENA_SIZE);
1022 rt->gcBytes -= GC_ARENA_SIZE;
1024 #if CHUNKED_ARENA_ALLOCATION
1025 if (js_gcArenasPerChunk == 1) {
1026 #endif
1027 DestroyGCChunk(ARENA_INFO_TO_START(a));
1028 #if CHUNKED_ARENA_ALLOCATION
1029 } else {
1030 uint32 arenaIndex;
1031 jsuword chunk;
1032 uint32 chunkInfoIndex;
1033 JSGCChunkInfo *ci;
1034 # ifdef DEBUG
1035 jsuword firstArena;
1037 firstArena = a->firstArena;
1038 arenaIndex = a->arenaIndex;
1039 memset((void *) ARENA_INFO_TO_START(a), JS_FREE_PATTERN,
1040 GC_ARENA_SIZE - JS_GC_ARENA_PAD);
1041 a->firstArena = firstArena;
1042 a->arenaIndex = arenaIndex;
1043 # endif
1044 arenaIndex = GET_ARENA_INDEX(a);
1045 chunk = GET_ARENA_CHUNK(a, arenaIndex);
1046 chunkInfoIndex = GET_CHUNK_INFO_INDEX(chunk);
1047 if (chunkInfoIndex == NO_FREE_ARENAS) {
1048 chunkInfoIndex = arenaIndex;
1049 SET_CHUNK_INFO_INDEX(chunk, arenaIndex);
1050 ci = GET_CHUNK_INFO(chunk, chunkInfoIndex);
1051 a->prev = NULL;
1052 ci->lastFreeArena = a;
1053 ci->numFreeArenas = 1;
1054 AddChunkToList(rt, ci);
1055 } else {
1056 JS_ASSERT(chunkInfoIndex != arenaIndex);
1057 ci = GET_CHUNK_INFO(chunk, chunkInfoIndex);
1058 JS_ASSERT(ci->numFreeArenas != 0);
1059 JS_ASSERT(ci->lastFreeArena);
1060 JS_ASSERT(a != ci->lastFreeArena);
1061 if (ci->numFreeArenas == js_gcArenasPerChunk - 1) {
1062 RemoveChunkFromList(rt, ci);
1063 DestroyGCChunk(chunk);
1064 } else {
1065 ++ci->numFreeArenas;
1066 a->prev = ci->lastFreeArena;
1067 ci->lastFreeArena = a;
1071 # endif
1075 static void
1076 InitGCArenaLists(JSRuntime *rt)
1078 uintN i, thingSize;
1079 JSGCArenaList *arenaList;
1081 for (i = 0; i < GC_NUM_FREELISTS; i++) {
1082 arenaList = &rt->gcArenaList[i];
1083 thingSize = GC_FREELIST_NBYTES(i);
1084 JS_ASSERT((size_t)(uint16)thingSize == thingSize);
1085 arenaList->last = NULL;
1086 arenaList->lastCount = (uint16) THINGS_PER_ARENA(thingSize);
1087 arenaList->thingSize = (uint16) thingSize;
1088 arenaList->freeList = NULL;
1090 rt->gcDoubleArenaList.first = NULL;
1091 rt->gcDoubleArenaList.nextDoubleFlags = DOUBLE_BITMAP_SENTINEL;
1094 static void
1095 FinishGCArenaLists(JSRuntime *rt)
1097 uintN i;
1098 JSGCArenaList *arenaList;
1100 for (i = 0; i < GC_NUM_FREELISTS; i++) {
1101 arenaList = &rt->gcArenaList[i];
1102 DestroyGCArenas(rt, arenaList->last);
1103 arenaList->last = NULL;
1104 arenaList->lastCount = THINGS_PER_ARENA(arenaList->thingSize);
1105 arenaList->freeList = NULL;
1107 DestroyGCArenas(rt, rt->gcDoubleArenaList.first);
1108 rt->gcDoubleArenaList.first = NULL;
1109 rt->gcDoubleArenaList.nextDoubleFlags = DOUBLE_BITMAP_SENTINEL;
1111 rt->gcBytes = 0;
1112 JS_ASSERT(rt->gcChunkList == 0);
1116 * This function must not be called when thing is jsdouble.
1118 static uint8 *
1119 GetGCThingFlags(void *thing)
1121 JSGCArenaInfo *a;
1122 uint32 index;
1124 a = THING_TO_ARENA(thing);
1125 index = THING_TO_INDEX(thing, a->list->thingSize);
1126 return THING_FLAGP(a, index);
1130 * This function returns null when thing is jsdouble.
1132 static uint8 *
1133 GetGCThingFlagsOrNull(void *thing)
1135 JSGCArenaInfo *a;
1136 uint32 index;
1138 a = THING_TO_ARENA(thing);
1139 if (!a->list)
1140 return NULL;
1141 index = THING_TO_INDEX(thing, a->list->thingSize);
1142 return THING_FLAGP(a, index);
1145 intN
1146 js_GetExternalStringGCType(JSString *str)
1148 uintN type;
1150 type = (uintN) *GetGCThingFlags(str) & GCF_TYPEMASK;
1151 JS_ASSERT(type == GCX_STRING || type >= GCX_EXTERNAL_STRING);
1152 return (type == GCX_STRING) ? -1 : (intN) (type - GCX_EXTERNAL_STRING);
1155 static uint32
1156 MapGCFlagsToTraceKind(uintN flags)
1158 uint32 type;
1160 type = flags & GCF_TYPEMASK;
1161 JS_ASSERT(type != GCX_DOUBLE);
1162 JS_ASSERT(type < GCX_NTYPES);
1163 return (type < GCX_EXTERNAL_STRING) ? type : JSTRACE_STRING;
1166 JS_FRIEND_API(uint32)
1167 js_GetGCThingTraceKind(void *thing)
1169 JSGCArenaInfo *a;
1170 uint32 index;
1172 a = THING_TO_ARENA(thing);
1173 if (!a->list)
1174 return JSTRACE_DOUBLE;
1176 index = THING_TO_INDEX(thing, a->list->thingSize);
1177 return MapGCFlagsToTraceKind(*THING_FLAGP(a, index));
1180 JSRuntime*
1181 js_GetGCStringRuntime(JSString *str)
1183 JSGCArenaList *list;
1185 list = THING_TO_ARENA(str)->list;
1187 JS_ASSERT(list->thingSize == sizeof(JSGCThing));
1188 JS_ASSERT(GC_FREELIST_INDEX(sizeof(JSGCThing)) == 0);
1190 return (JSRuntime *)((uint8 *)list - offsetof(JSRuntime, gcArenaList));
1193 JSBool
1194 js_IsAboutToBeFinalized(JSContext *cx, void *thing)
1196 JSGCArenaInfo *a;
1197 uint32 index, flags;
1199 a = THING_TO_ARENA(thing);
1200 if (!a->list) {
1202 * Check if arena has no marked doubles. In that case the bitmap with
1203 * the mark flags contains all garbage as it is initialized only when
1204 * marking the first double in the arena.
1206 if (!a->u.hasMarkedDoubles)
1207 return JS_TRUE;
1208 index = DOUBLE_THING_TO_INDEX(thing);
1209 return !IsMarkedDouble(a, index);
1211 index = THING_TO_INDEX(thing, a->list->thingSize);
1212 flags = *THING_FLAGP(a, index);
1213 return !(flags & (GCF_MARK | GCF_LOCK | GCF_FINAL));
1216 /* This is compatible with JSDHashEntryStub. */
1217 typedef struct JSGCRootHashEntry {
1218 JSDHashEntryHdr hdr;
1219 void *root;
1220 const char *name;
1221 } JSGCRootHashEntry;
1223 /* Initial size of the gcRootsHash table (SWAG, small enough to amortize). */
1224 #define GC_ROOTS_SIZE 256
1226 #if CHUNKED_ARENA_ALLOCATION
1229 * For a CPU with extremely large pages using them for GC things wastes
1230 * too much memory.
1232 # define GC_ARENAS_PER_CPU_PAGE_LIMIT JS_BIT(18 - GC_ARENA_SHIFT)
1234 JS_STATIC_ASSERT(GC_ARENAS_PER_CPU_PAGE_LIMIT <= NO_FREE_ARENAS);
1236 #endif
1238 JSBool
1239 js_InitGC(JSRuntime *rt, uint32 maxbytes)
1241 #if JS_GC_USE_MMAP
1242 if (js_gcArenasPerChunk == 0) {
1243 size_t cpuPageSize, arenasPerPage;
1244 # if defined(XP_WIN)
1245 SYSTEM_INFO si;
1247 GetSystemInfo(&si);
1248 cpuPageSize = si.dwPageSize;
1250 # elif defined(XP_UNIX) || defined(XP_BEOS)
1251 cpuPageSize = (size_t) sysconf(_SC_PAGESIZE);
1252 # else
1253 # error "Not implemented"
1254 # endif
1255 /* cpuPageSize is a power of 2. */
1256 JS_ASSERT((cpuPageSize & (cpuPageSize - 1)) == 0);
1257 arenasPerPage = cpuPageSize >> GC_ARENA_SHIFT;
1258 #ifdef DEBUG
1259 if (arenasPerPage == 0) {
1260 fprintf(stderr,
1261 "JS engine warning: the size of the CPU page, %u bytes, is too low to use\n"
1262 "paged allocation for the garbage collector. Please report this.\n",
1263 (unsigned) cpuPageSize);
1265 #endif
1266 if (arenasPerPage - 1 <= (size_t) (GC_ARENAS_PER_CPU_PAGE_LIMIT - 1)) {
1268 * Use at least 4 GC arenas per paged allocation chunk to minimize
1269 * the overhead of mmap/VirtualAlloc.
1271 js_gcUseMmap = JS_TRUE;
1272 js_gcArenasPerChunk = JS_MAX((uint32) arenasPerPage, 4);
1273 } else {
1274 js_gcUseMmap = JS_FALSE;
1275 js_gcArenasPerChunk = 7;
1278 JS_ASSERT(1 <= js_gcArenasPerChunk &&
1279 js_gcArenasPerChunk <= NO_FREE_ARENAS);
1280 #endif
1282 InitGCArenaLists(rt);
1283 if (!JS_DHashTableInit(&rt->gcRootsHash, JS_DHashGetStubOps(), NULL,
1284 sizeof(JSGCRootHashEntry), GC_ROOTS_SIZE)) {
1285 rt->gcRootsHash.ops = NULL;
1286 return JS_FALSE;
1288 rt->gcLocksHash = NULL; /* create lazily */
1291 * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
1292 * for default backward API compatibility.
1294 rt->gcMaxBytes = rt->gcMaxMallocBytes = maxbytes;
1295 rt->gcEmptyArenaPoolLifespan = 30000;
1298 * By default the trigger factor gets maximum possible value. This
1299 * means that GC will not be triggered by growth of GC memory (gcBytes).
1301 rt->gcTriggerFactor = (uint32) -1;
1304 * The assigned value prevents GC from running when GC memory is too low
1305 * (during JS engine start).
1307 rt->gcLastBytes = 8192;
1309 METER(memset(&rt->gcStats, 0, sizeof rt->gcStats));
1310 return JS_TRUE;
1313 #ifdef JS_GCMETER
1315 static void
1316 UpdateArenaStats(JSGCArenaStats *st, uint32 nlivearenas, uint32 nkilledArenas,
1317 uint32 nthings)
1319 size_t narenas;
1321 narenas = nlivearenas + nkilledArenas;
1322 JS_ASSERT(narenas >= st->livearenas);
1324 st->newarenas = narenas - st->livearenas;
1325 st->narenas = narenas;
1326 st->livearenas = nlivearenas;
1327 if (st->maxarenas < narenas)
1328 st->maxarenas = narenas;
1329 st->totalarenas += narenas;
1331 st->nthings = nthings;
1332 if (st->maxthings < nthings)
1333 st->maxthings = nthings;
1334 st->totalthings += nthings;
1337 JS_FRIEND_API(void)
1338 js_DumpGCStats(JSRuntime *rt, FILE *fp)
1340 int i;
1341 size_t sumArenas, sumTotalArenas;
1342 size_t sumThings, sumMaxThings;
1343 size_t sumThingSize, sumTotalThingSize;
1344 size_t sumArenaCapacity, sumTotalArenaCapacity;
1345 JSGCArenaStats *st;
1346 size_t thingSize, thingsPerArena;
1347 size_t sumAlloc, sumLocalAlloc, sumFail, sumRetry;
1349 fprintf(fp, "\nGC allocation statistics:\n");
1351 #define UL(x) ((unsigned long)(x))
1352 #define ULSTAT(x) UL(rt->gcStats.x)
1353 #define PERCENT(x,y) (100.0 * (double) (x) / (double) (y))
1355 sumArenas = 0;
1356 sumTotalArenas = 0;
1357 sumThings = 0;
1358 sumMaxThings = 0;
1359 sumThingSize = 0;
1360 sumTotalThingSize = 0;
1361 sumArenaCapacity = 0;
1362 sumTotalArenaCapacity = 0;
1363 sumAlloc = 0;
1364 sumLocalAlloc = 0;
1365 sumFail = 0;
1366 sumRetry = 0;
1367 for (i = -1; i < (int) GC_NUM_FREELISTS; i++) {
1368 if (i == -1) {
1369 thingSize = sizeof(jsdouble);
1370 thingsPerArena = DOUBLES_PER_ARENA;
1371 st = &rt->gcStats.doubleArenaStats;
1372 fprintf(fp,
1373 "Arena list for double values (%lu doubles per arena):",
1374 UL(thingsPerArena));
1375 } else {
1376 thingSize = rt->gcArenaList[i].thingSize;
1377 thingsPerArena = THINGS_PER_ARENA(thingSize);
1378 st = &rt->gcStats.arenaStats[i];
1379 fprintf(fp,
1380 "Arena list %d (thing size %lu, %lu things per arena):",
1381 i, UL(GC_FREELIST_NBYTES(i)), UL(thingsPerArena));
1383 if (st->maxarenas == 0) {
1384 fputs(" NEVER USED\n", fp);
1385 continue;
1387 putc('\n', fp);
1388 fprintf(fp, " arenas before GC: %lu\n", UL(st->narenas));
1389 fprintf(fp, " new arenas before GC: %lu (%.1f%%)\n",
1390 UL(st->newarenas), PERCENT(st->newarenas, st->narenas));
1391 fprintf(fp, " arenas after GC: %lu (%.1f%%)\n",
1392 UL(st->livearenas), PERCENT(st->livearenas, st->narenas));
1393 fprintf(fp, " max arenas: %lu\n", UL(st->maxarenas));
1394 fprintf(fp, " things: %lu\n", UL(st->nthings));
1395 fprintf(fp, " GC cell utilization: %.1f%%\n",
1396 PERCENT(st->nthings, thingsPerArena * st->narenas));
1397 fprintf(fp, " average cell utilization: %.1f%%\n",
1398 PERCENT(st->totalthings, thingsPerArena * st->totalarenas));
1399 fprintf(fp, " max things: %lu\n", UL(st->maxthings));
1400 fprintf(fp, " alloc attempts: %lu\n", UL(st->alloc));
1401 fprintf(fp, " alloc without locks: %1u (%.1f%%)\n",
1402 UL(st->localalloc), PERCENT(st->localalloc, st->alloc));
1403 sumArenas += st->narenas;
1404 sumTotalArenas += st->totalarenas;
1405 sumThings += st->nthings;
1406 sumMaxThings += st->maxthings;
1407 sumThingSize += thingSize * st->nthings;
1408 sumTotalThingSize += thingSize * st->totalthings;
1409 sumArenaCapacity += thingSize * thingsPerArena * st->narenas;
1410 sumTotalArenaCapacity += thingSize * thingsPerArena * st->totalarenas;
1411 sumAlloc += st->alloc;
1412 sumLocalAlloc += st->localalloc;
1413 sumFail += st->fail;
1414 sumRetry += st->retry;
1416 fprintf(fp, "TOTAL STATS:\n");
1417 fprintf(fp, " bytes allocated: %lu\n", UL(rt->gcBytes));
1418 fprintf(fp, " total GC arenas: %lu\n", UL(sumArenas));
1419 fprintf(fp, " total GC things: %lu\n", UL(sumThings));
1420 fprintf(fp, " max total GC things: %lu\n", UL(sumMaxThings));
1421 fprintf(fp, " GC cell utilization: %.1f%%\n",
1422 PERCENT(sumThingSize, sumArenaCapacity));
1423 fprintf(fp, " average cell utilization: %.1f%%\n",
1424 PERCENT(sumTotalThingSize, sumTotalArenaCapacity));
1425 fprintf(fp, "allocation retries after GC: %lu\n", UL(sumRetry));
1426 fprintf(fp, " alloc attempts: %lu\n", UL(sumAlloc));
1427 fprintf(fp, " alloc without locks: %1u (%.1f%%)\n",
1428 UL(sumLocalAlloc), PERCENT(sumLocalAlloc, sumAlloc));
1429 fprintf(fp, " allocation failures: %lu\n", UL(sumFail));
1430 fprintf(fp, " things born locked: %lu\n", ULSTAT(lockborn));
1431 fprintf(fp, " valid lock calls: %lu\n", ULSTAT(lock));
1432 fprintf(fp, " valid unlock calls: %lu\n", ULSTAT(unlock));
1433 fprintf(fp, " mark recursion depth: %lu\n", ULSTAT(depth));
1434 fprintf(fp, " maximum mark recursion: %lu\n", ULSTAT(maxdepth));
1435 fprintf(fp, " mark C recursion depth: %lu\n", ULSTAT(cdepth));
1436 fprintf(fp, " maximum mark C recursion: %lu\n", ULSTAT(maxcdepth));
1437 fprintf(fp, " delayed tracing calls: %lu\n", ULSTAT(untraced));
1438 #ifdef DEBUG
1439 fprintf(fp, " max trace later count: %lu\n", ULSTAT(maxuntraced));
1440 #endif
1441 fprintf(fp, " maximum GC nesting level: %lu\n", ULSTAT(maxlevel));
1442 fprintf(fp, "potentially useful GC calls: %lu\n", ULSTAT(poke));
1443 fprintf(fp, " thing arenas freed so far: %lu\n", ULSTAT(afree));
1444 fprintf(fp, " stack segments scanned: %lu\n", ULSTAT(stackseg));
1445 fprintf(fp, "stack segment slots scanned: %lu\n", ULSTAT(segslots));
1446 fprintf(fp, "reachable closeable objects: %lu\n", ULSTAT(nclose));
1447 fprintf(fp, " max reachable closeable: %lu\n", ULSTAT(maxnclose));
1448 fprintf(fp, " scheduled close hooks: %lu\n", ULSTAT(closelater));
1449 fprintf(fp, " max scheduled close hooks: %lu\n", ULSTAT(maxcloselater));
1451 #undef UL
1452 #undef ULSTAT
1453 #undef PERCENT
1455 #ifdef JS_ARENAMETER
1456 JS_DumpArenaStats(fp);
1457 #endif
1459 #endif
1461 #ifdef DEBUG
1462 static void
1463 CheckLeakedRoots(JSRuntime *rt);
1464 #endif
1466 #ifdef JS_THREADSAFE
1467 static void
1468 TrimGCFreeListsPool(JSRuntime *rt, uintN keepCount);
1469 #endif
1471 void
1472 js_FinishGC(JSRuntime *rt)
1474 #ifdef JS_ARENAMETER
1475 JS_DumpArenaStats(stdout);
1476 #endif
1477 #ifdef JS_GCMETER
1478 js_DumpGCStats(rt, stdout);
1479 #endif
1481 FreePtrTable(&rt->gcIteratorTable, &iteratorTableInfo);
1482 #ifdef JS_THREADSAFE
1483 TrimGCFreeListsPool(rt, 0);
1484 JS_ASSERT(!rt->gcFreeListsPool);
1485 #endif
1486 FinishGCArenaLists(rt);
1488 if (rt->gcRootsHash.ops) {
1489 #ifdef DEBUG
1490 CheckLeakedRoots(rt);
1491 #endif
1492 JS_DHashTableFinish(&rt->gcRootsHash);
1493 rt->gcRootsHash.ops = NULL;
1495 if (rt->gcLocksHash) {
1496 JS_DHashTableDestroy(rt->gcLocksHash);
1497 rt->gcLocksHash = NULL;
1501 JSBool
1502 js_AddRoot(JSContext *cx, void *rp, const char *name)
1504 JSBool ok = js_AddRootRT(cx->runtime, rp, name);
1505 if (!ok)
1506 JS_ReportOutOfMemory(cx);
1507 return ok;
1510 JSBool
1511 js_AddRootRT(JSRuntime *rt, void *rp, const char *name)
1513 JSBool ok;
1514 JSGCRootHashEntry *rhe;
1517 * Due to the long-standing, but now removed, use of rt->gcLock across the
1518 * bulk of js_GC, API users have come to depend on JS_AddRoot etc. locking
1519 * properly with a racing GC, without calling JS_AddRoot from a request.
1520 * We have to preserve API compatibility here, now that we avoid holding
1521 * rt->gcLock across the mark phase (including the root hashtable mark).
1523 JS_LOCK_GC(rt);
1524 js_WaitForGC(rt);
1525 rhe = (JSGCRootHashEntry *)
1526 JS_DHashTableOperate(&rt->gcRootsHash, rp, JS_DHASH_ADD);
1527 if (rhe) {
1528 rhe->root = rp;
1529 rhe->name = name;
1530 ok = JS_TRUE;
1531 } else {
1532 ok = JS_FALSE;
1534 JS_UNLOCK_GC(rt);
1535 return ok;
1538 JSBool
1539 js_RemoveRoot(JSRuntime *rt, void *rp)
1542 * Due to the JS_RemoveRootRT API, we may be called outside of a request.
1543 * Same synchronization drill as above in js_AddRoot.
1545 JS_LOCK_GC(rt);
1546 js_WaitForGC(rt);
1547 (void) JS_DHashTableOperate(&rt->gcRootsHash, rp, JS_DHASH_REMOVE);
1548 rt->gcPoke = JS_TRUE;
1549 JS_UNLOCK_GC(rt);
1550 return JS_TRUE;
1553 #ifdef DEBUG
1555 static JSDHashOperator
1556 js_root_printer(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 i, void *arg)
1558 uint32 *leakedroots = (uint32 *)arg;
1559 JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
1561 (*leakedroots)++;
1562 fprintf(stderr,
1563 "JS engine warning: leaking GC root \'%s\' at %p\n",
1564 rhe->name ? (char *)rhe->name : "", rhe->root);
1566 return JS_DHASH_NEXT;
1569 static void
1570 CheckLeakedRoots(JSRuntime *rt)
1572 uint32 leakedroots = 0;
1574 /* Warn (but don't assert) debug builds of any remaining roots. */
1575 JS_DHashTableEnumerate(&rt->gcRootsHash, js_root_printer,
1576 &leakedroots);
1577 if (leakedroots > 0) {
1578 if (leakedroots == 1) {
1579 fprintf(stderr,
1580 "JS engine warning: 1 GC root remains after destroying the JSRuntime at %p.\n"
1581 " This root may point to freed memory. Objects reachable\n"
1582 " through it have not been finalized.\n",
1583 (void *) rt);
1584 } else {
1585 fprintf(stderr,
1586 "JS engine warning: %lu GC roots remain after destroying the JSRuntime at %p.\n"
1587 " These roots may point to freed memory. Objects reachable\n"
1588 " through them have not been finalized.\n",
1589 (unsigned long) leakedroots, (void *) rt);
1594 typedef struct NamedRootDumpArgs {
1595 void (*dump)(const char *name, void *rp, void *data);
1596 void *data;
1597 } NamedRootDumpArgs;
1599 static JSDHashOperator
1600 js_named_root_dumper(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number,
1601 void *arg)
1603 NamedRootDumpArgs *args = (NamedRootDumpArgs *) arg;
1604 JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
1606 if (rhe->name)
1607 args->dump(rhe->name, rhe->root, args->data);
1608 return JS_DHASH_NEXT;
1611 JS_BEGIN_EXTERN_C
1612 void
1613 js_DumpNamedRoots(JSRuntime *rt,
1614 void (*dump)(const char *name, void *rp, void *data),
1615 void *data)
1617 NamedRootDumpArgs args;
1619 args.dump = dump;
1620 args.data = data;
1621 JS_DHashTableEnumerate(&rt->gcRootsHash, js_named_root_dumper, &args);
1623 JS_END_EXTERN_C
1625 #endif /* DEBUG */
1627 typedef struct GCRootMapArgs {
1628 JSGCRootMapFun map;
1629 void *data;
1630 } GCRootMapArgs;
1632 static JSDHashOperator
1633 js_gcroot_mapper(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number,
1634 void *arg)
1636 GCRootMapArgs *args = (GCRootMapArgs *) arg;
1637 JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
1638 intN mapflags;
1639 int op;
1641 mapflags = args->map(rhe->root, rhe->name, args->data);
1643 #if JS_MAP_GCROOT_NEXT == JS_DHASH_NEXT && \
1644 JS_MAP_GCROOT_STOP == JS_DHASH_STOP && \
1645 JS_MAP_GCROOT_REMOVE == JS_DHASH_REMOVE
1646 op = (JSDHashOperator)mapflags;
1647 #else
1648 op = JS_DHASH_NEXT;
1649 if (mapflags & JS_MAP_GCROOT_STOP)
1650 op |= JS_DHASH_STOP;
1651 if (mapflags & JS_MAP_GCROOT_REMOVE)
1652 op |= JS_DHASH_REMOVE;
1653 #endif
1655 return (JSDHashOperator) op;
1658 uint32
1659 js_MapGCRoots(JSRuntime *rt, JSGCRootMapFun map, void *data)
1661 GCRootMapArgs args;
1662 uint32 rv;
1664 args.map = map;
1665 args.data = data;
1666 JS_LOCK_GC(rt);
1667 rv = JS_DHashTableEnumerate(&rt->gcRootsHash, js_gcroot_mapper, &args);
1668 JS_UNLOCK_GC(rt);
1669 return rv;
1672 JSBool
1673 js_RegisterCloseableIterator(JSContext *cx, JSObject *obj)
1675 JSRuntime *rt;
1676 JSBool ok;
1678 rt = cx->runtime;
1679 JS_ASSERT(!rt->gcRunning);
1681 JS_LOCK_GC(rt);
1682 ok = AddToPtrTable(cx, &rt->gcIteratorTable, &iteratorTableInfo, obj);
1683 JS_UNLOCK_GC(rt);
1684 return ok;
1687 static void
1688 CloseNativeIterators(JSContext *cx)
1690 JSRuntime *rt;
1691 size_t count, newCount, i;
1692 void **array;
1693 JSObject *obj;
1695 rt = cx->runtime;
1696 count = rt->gcIteratorTable.count;
1697 array = rt->gcIteratorTable.array;
1699 newCount = 0;
1700 for (i = 0; i != count; ++i) {
1701 obj = (JSObject *)array[i];
1702 if (js_IsAboutToBeFinalized(cx, obj))
1703 js_CloseNativeIterator(cx, obj);
1704 else
1705 array[newCount++] = obj;
1707 ShrinkPtrTable(&rt->gcIteratorTable, &iteratorTableInfo, newCount);
1710 #if defined(DEBUG_brendan) || defined(DEBUG_timeless)
1711 #define DEBUG_gchist
1712 #endif
1714 #ifdef DEBUG_gchist
1715 #define NGCHIST 64
1717 static struct GCHist {
1718 bool lastDitch;
1719 JSGCThing *freeList;
1720 } gchist[NGCHIST];
1722 unsigned gchpos = 0;
1723 #endif
1725 #ifdef JS_THREADSAFE
1727 const JSGCFreeListSet js_GCEmptyFreeListSet = {
1728 { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }, NULL
1731 static void
1732 TrimGCFreeListsPool(JSRuntime *rt, uintN keepCount)
1734 JSGCFreeListSet **cursor, *freeLists, *link;
1736 cursor = &rt->gcFreeListsPool;
1737 while (keepCount != 0) {
1738 --keepCount;
1739 freeLists = *cursor;
1740 if (!freeLists)
1741 return;
1742 memset(freeLists->array, 0, sizeof freeLists->array);
1743 cursor = &freeLists->link;
1745 freeLists = *cursor;
1746 if (freeLists) {
1747 *cursor = NULL;
1748 do {
1749 link = freeLists->link;
1750 free(freeLists);
1751 } while ((freeLists = link) != NULL);
1755 void
1756 js_RevokeGCLocalFreeLists(JSContext *cx)
1758 JS_ASSERT(!cx->gcLocalFreeLists->link);
1759 if (cx->gcLocalFreeLists != &js_GCEmptyFreeListSet) {
1760 cx->gcLocalFreeLists->link = cx->runtime->gcFreeListsPool;
1761 cx->runtime->gcFreeListsPool = cx->gcLocalFreeLists;
1762 cx->gcLocalFreeLists = (JSGCFreeListSet *) &js_GCEmptyFreeListSet;
1766 static JSGCFreeListSet *
1767 EnsureLocalFreeList(JSContext *cx)
1769 JSGCFreeListSet *freeLists;
1771 freeLists = cx->gcLocalFreeLists;
1772 if (freeLists != &js_GCEmptyFreeListSet) {
1773 JS_ASSERT(freeLists);
1774 return freeLists;
1777 freeLists = cx->runtime->gcFreeListsPool;
1778 if (freeLists) {
1779 cx->runtime->gcFreeListsPool = freeLists->link;
1780 freeLists->link = NULL;
1781 } else {
1782 /* JS_malloc is not used as the caller reports out-of-memory itself. */
1783 freeLists = (JSGCFreeListSet *) calloc(1, sizeof *freeLists);
1784 if (!freeLists)
1785 return NULL;
1787 cx->gcLocalFreeLists = freeLists;
1788 return freeLists;
1791 #endif
1793 static JS_INLINE bool
1794 IsGCThresholdReached(JSRuntime *rt)
1796 #ifdef JS_GC_ZEAL
1797 if (rt->gcZeal >= 1)
1798 return true;
1799 #endif
1802 * Since the initial value of the gcLastBytes parameter is not equal to
1803 * zero (see the js_InitGC function) the return value is false when
1804 * the gcBytes value is close to zero at the JS engine start.
1806 return rt->gcMallocBytes >= rt->gcMaxMallocBytes ||
1807 rt->gcBytes / rt->gcTriggerFactor >= rt->gcLastBytes / 100;
1810 void *
1811 js_NewGCThing(JSContext *cx, uintN flags, size_t nbytes)
1813 JSRuntime *rt;
1814 uintN flindex;
1815 bool doGC;
1816 JSGCThing *thing;
1817 uint8 *flagp;
1818 JSGCArenaList *arenaList;
1819 JSGCArenaInfo *a;
1820 uintN thingsLimit;
1821 JSLocalRootStack *lrs;
1822 #ifdef JS_GCMETER
1823 JSGCArenaStats *astats;
1824 #endif
1825 #ifdef JS_THREADSAFE
1826 JSBool gcLocked;
1827 uintN localMallocBytes;
1828 JSGCFreeListSet *freeLists;
1829 JSGCThing **lastptr;
1830 JSGCThing *tmpthing;
1831 uint8 *tmpflagp;
1832 uintN maxFreeThings; /* max to take from the global free list */
1833 #endif
1835 JS_ASSERT((flags & GCF_TYPEMASK) != GCX_DOUBLE);
1836 rt = cx->runtime;
1837 nbytes = JS_ROUNDUP(nbytes, sizeof(JSGCThing));
1838 flindex = GC_FREELIST_INDEX(nbytes);
1840 /* Updates of metering counters here may not be thread-safe. */
1841 METER(astats = &cx->runtime->gcStats.arenaStats[flindex]);
1842 METER(astats->alloc++);
1844 #ifdef JS_THREADSAFE
1845 gcLocked = JS_FALSE;
1846 JS_ASSERT(cx->thread);
1847 freeLists = cx->gcLocalFreeLists;
1848 thing = freeLists->array[flindex];
1849 localMallocBytes = cx->thread->gcMallocBytes;
1850 if (thing && rt->gcMaxMallocBytes - rt->gcMallocBytes > localMallocBytes) {
1851 flagp = thing->flagp;
1852 freeLists->array[flindex] = thing->next;
1853 METER(astats->localalloc++);
1854 goto success;
1857 JS_LOCK_GC(rt);
1858 gcLocked = JS_TRUE;
1860 /* Transfer thread-local counter to global one. */
1861 if (localMallocBytes != 0) {
1862 cx->thread->gcMallocBytes = 0;
1863 if (rt->gcMaxMallocBytes - rt->gcMallocBytes < localMallocBytes)
1864 rt->gcMallocBytes = rt->gcMaxMallocBytes;
1865 else
1866 rt->gcMallocBytes += localMallocBytes;
1868 #endif
1869 JS_ASSERT(!rt->gcRunning);
1870 if (rt->gcRunning) {
1871 METER(rt->gcStats.finalfail++);
1872 JS_UNLOCK_GC(rt);
1873 return NULL;
1876 #if defined JS_GC_ZEAL && defined JS_TRACER
1877 if (rt->gcZeal >= 1 && JS_TRACE_MONITOR(cx).useReservedObjects)
1878 goto testReservedObjects;
1879 #endif
1881 arenaList = &rt->gcArenaList[flindex];
1882 doGC = IsGCThresholdReached(rt);
1883 for (;;) {
1884 if (doGC
1885 #ifdef JS_TRACER
1886 && !JS_ON_TRACE(cx) && !JS_TRACE_MONITOR(cx).useReservedObjects
1887 #endif
1890 * Keep rt->gcLock across the call into js_GC so we don't starve
1891 * and lose to racing threads who deplete the heap just after
1892 * js_GC has replenished it (or has synchronized with a racing
1893 * GC that collected a bunch of garbage). This unfair scheduling
1894 * can happen on certain operating systems. For the gory details,
1895 * see bug 162779 at https://bugzilla.mozilla.org/.
1897 js_GC(cx, GC_LAST_DITCH);
1898 METER(astats->retry++);
1901 /* Try to get thing from the free list. */
1902 thing = arenaList->freeList;
1903 if (thing) {
1904 arenaList->freeList = thing->next;
1905 flagp = thing->flagp;
1906 JS_ASSERT(*flagp & GCF_FINAL);
1908 #ifdef JS_THREADSAFE
1910 * Refill the local free list by taking several things from the
1911 * global free list unless we are still at rt->gcMaxMallocBytes
1912 * barrier or the free list is already populated. The former
1913 * happens when GC is canceled due to gcCallback(cx, JSGC_BEGIN)
1914 * returning false. The latter is caused via allocating new
1915 * things in gcCallback(cx, JSGC_END).
1917 if (rt->gcMallocBytes >= rt->gcMaxMallocBytes)
1918 break;
1920 freeLists = EnsureLocalFreeList(cx);
1921 if (!freeLists)
1922 goto fail;
1923 if (freeLists->array[flindex])
1924 break;
1926 tmpthing = arenaList->freeList;
1927 if (tmpthing) {
1928 maxFreeThings = MAX_THREAD_LOCAL_THINGS;
1929 do {
1930 if (!tmpthing->next)
1931 break;
1932 tmpthing = tmpthing->next;
1933 } while (--maxFreeThings != 0);
1935 freeLists->array[flindex] = arenaList->freeList;
1936 arenaList->freeList = tmpthing->next;
1937 tmpthing->next = NULL;
1939 #endif
1940 break;
1944 * Try to allocate things from the last arena. If it is fully used,
1945 * check if we can allocate a new one and, if we cannot, consider
1946 * doing a "last ditch" GC unless already tried.
1948 thingsLimit = THINGS_PER_ARENA(nbytes);
1949 if (arenaList->lastCount != thingsLimit) {
1950 JS_ASSERT(arenaList->lastCount < thingsLimit);
1951 a = arenaList->last;
1952 } else {
1953 #ifdef JS_TRACER
1954 if (JS_TRACE_MONITOR(cx).useReservedObjects) {
1955 #ifdef JS_GC_ZEAL
1956 testReservedObjects:
1957 #endif
1958 JSTraceMonitor *tm = &JS_TRACE_MONITOR(cx);
1960 thing = (JSGCThing *) tm->reservedObjects;
1961 flagp = GetGCThingFlags(thing);
1962 JS_ASSERT(thing);
1963 tm->reservedObjects = JSVAL_TO_OBJECT(tm->reservedObjects->fslots[0]);
1964 break;
1966 #endif
1968 a = NewGCArena(rt);
1969 if (!a) {
1970 if (doGC || JS_ON_TRACE(cx))
1971 goto fail;
1972 doGC = true;
1973 continue;
1975 a->list = arenaList;
1976 a->prev = arenaList->last;
1977 a->prevUntracedPage = 0;
1978 a->u.untracedThings = 0;
1979 arenaList->last = a;
1980 arenaList->lastCount = 0;
1983 flagp = THING_FLAGP(a, arenaList->lastCount);
1984 thing = FLAGP_TO_THING(flagp, nbytes);
1985 arenaList->lastCount++;
1987 #ifdef JS_THREADSAFE
1989 * Refill the local free list by taking free things from the last
1990 * arena. Prefer to order free things by ascending address in the
1991 * (unscientific) hope of better cache locality.
1993 if (rt->gcMallocBytes >= rt->gcMaxMallocBytes)
1994 break;
1996 freeLists = EnsureLocalFreeList(cx);
1997 if (!freeLists)
1998 goto fail;
1999 if (freeLists->array[flindex])
2000 break;
2001 lastptr = &freeLists->array[flindex];
2002 maxFreeThings = thingsLimit - arenaList->lastCount;
2003 if (maxFreeThings > MAX_THREAD_LOCAL_THINGS)
2004 maxFreeThings = MAX_THREAD_LOCAL_THINGS;
2005 while (maxFreeThings != 0) {
2006 --maxFreeThings;
2008 tmpflagp = THING_FLAGP(a, arenaList->lastCount);
2009 tmpthing = FLAGP_TO_THING(tmpflagp, nbytes);
2010 arenaList->lastCount++;
2011 tmpthing->flagp = tmpflagp;
2012 *tmpflagp = GCF_FINAL; /* signifying that thing is free */
2014 *lastptr = tmpthing;
2015 lastptr = &tmpthing->next;
2017 *lastptr = NULL;
2018 #endif
2019 break;
2022 /* We successfully allocated the thing. */
2023 #ifdef JS_THREADSAFE
2024 success:
2025 #endif
2026 lrs = cx->localRootStack;
2027 if (lrs) {
2029 * If we're in a local root scope, don't set newborn[type] at all, to
2030 * avoid entraining garbage from it for an unbounded amount of time
2031 * on this context. A caller will leave the local root scope and pop
2032 * this reference, allowing thing to be GC'd if it has no other refs.
2033 * See JS_EnterLocalRootScope and related APIs.
2035 if (js_PushLocalRoot(cx, lrs, (jsval) thing) < 0) {
2037 * When we fail for a thing allocated through the tail of the last
2038 * arena, thing's flag byte is not initialized. So to prevent GC
2039 * accessing the uninitialized flags during the finalization, we
2040 * always mark the thing as final. See bug 337407.
2042 *flagp = GCF_FINAL;
2043 goto fail;
2045 } else {
2047 * No local root scope, so we're stuck with the old, fragile model of
2048 * depending on a pigeon-hole newborn per type per context.
2050 cx->weakRoots.newborn[flags & GCF_TYPEMASK] = thing;
2053 /* We can't fail now, so update flags. */
2054 *flagp = (uint8)flags;
2056 #ifdef DEBUG_gchist
2057 gchist[gchpos].lastDitch = doGC;
2058 gchist[gchpos].freeList = rt->gcArenaList[flindex].freeList;
2059 if (++gchpos == NGCHIST)
2060 gchpos = 0;
2061 #endif
2063 /* This is not thread-safe for thread-local allocations. */
2064 METER_IF(flags & GCF_LOCK, rt->gcStats.lockborn++);
2066 #ifdef JS_THREADSAFE
2067 if (gcLocked)
2068 JS_UNLOCK_GC(rt);
2069 #endif
2070 return thing;
2072 fail:
2073 #ifdef JS_THREADSAFE
2074 if (gcLocked)
2075 JS_UNLOCK_GC(rt);
2076 #endif
2077 METER(astats->fail++);
2078 if (!JS_ON_TRACE(cx))
2079 JS_ReportOutOfMemory(cx);
2080 return NULL;
2083 static JSGCDoubleCell *
2084 RefillDoubleFreeList(JSContext *cx)
2086 JSRuntime *rt;
2087 jsbitmap *doubleFlags, usedBits;
2088 JSBool didGC = JS_FALSE;
2089 JSGCArenaInfo *a;
2090 uintN bit, index;
2091 JSGCDoubleCell *cell, *list, *lastcell;
2093 JS_ASSERT(!cx->doubleFreeList);
2095 rt = cx->runtime;
2096 JS_LOCK_GC(rt);
2098 JS_ASSERT(!rt->gcRunning);
2099 if (rt->gcRunning) {
2100 METER(rt->gcStats.finalfail++);
2101 JS_UNLOCK_GC(rt);
2102 return NULL;
2105 if (IsGCThresholdReached(rt))
2106 goto do_gc;
2109 * Loop until we find a flag bitmap byte with unset bits indicating free
2110 * double cells, then set all bits as used and put the cells to the free
2111 * list for the current context.
2113 doubleFlags = rt->gcDoubleArenaList.nextDoubleFlags;
2114 for (;;) {
2115 if (((jsuword) doubleFlags & GC_ARENA_MASK) ==
2116 ARENA_INFO_OFFSET) {
2117 if (doubleFlags == DOUBLE_BITMAP_SENTINEL ||
2118 !((JSGCArenaInfo *) doubleFlags)->prev) {
2119 a = NewGCArena(rt);
2120 if (!a) {
2121 do_gc:
2122 if (didGC || JS_ON_TRACE(cx)) {
2123 METER(rt->gcStats.doubleArenaStats.fail++);
2124 JS_UNLOCK_GC(rt);
2125 if (!JS_ON_TRACE(cx))
2126 JS_ReportOutOfMemory(cx);
2127 return NULL;
2129 js_GC(cx, GC_LAST_DITCH);
2130 METER(rt->gcStats.doubleArenaStats.retry++);
2131 doubleFlags = rt->gcDoubleArenaList.nextDoubleFlags;
2132 didGC = JS_TRUE;
2133 continue;
2135 a->list = NULL;
2136 a->prev = NULL;
2137 if (doubleFlags == DOUBLE_BITMAP_SENTINEL) {
2138 JS_ASSERT(!rt->gcDoubleArenaList.first);
2139 rt->gcDoubleArenaList.first = a;
2140 } else {
2141 JS_ASSERT(rt->gcDoubleArenaList.first);
2142 ((JSGCArenaInfo *) doubleFlags)->prev = a;
2144 ClearDoubleArenaFlags(a);
2145 doubleFlags = DOUBLE_ARENA_BITMAP(a);
2146 break;
2148 doubleFlags =
2149 DOUBLE_ARENA_BITMAP(((JSGCArenaInfo *) doubleFlags)->prev);
2153 * When doubleFlags points the last bitmap's word in the arena, its
2154 * high bits corresponds to non-existing cells. ClearDoubleArenaFlags
2155 * sets such bits to 1. Thus even for this last word its bit is unset
2156 * iff the corresponding cell exists and free.
2158 if (*doubleFlags != (jsbitmap) -1)
2159 break;
2160 ++doubleFlags;
2163 rt->gcDoubleArenaList.nextDoubleFlags = doubleFlags + 1;
2164 usedBits = *doubleFlags;
2165 JS_ASSERT(usedBits != (jsbitmap) -1);
2166 *doubleFlags = (jsbitmap) -1;
2167 JS_UNLOCK_GC(rt);
2170 * Find the index corresponding to the first bit in *doubleFlags. The last
2171 * bit will have "index + JS_BITS_PER_WORD - 1".
2173 index = ((uintN) ((jsuword) doubleFlags & GC_ARENA_MASK) -
2174 DOUBLES_ARENA_BITMAP_OFFSET) * JS_BITS_PER_BYTE;
2175 cell = (JSGCDoubleCell *) ((jsuword) doubleFlags & ~GC_ARENA_MASK) + index;
2177 if (usedBits == 0) {
2178 /* The common case when all doubles from *doubleFlags are free. */
2179 JS_ASSERT(index + JS_BITS_PER_WORD <= DOUBLES_PER_ARENA);
2180 list = cell;
2181 for (lastcell = cell + JS_BITS_PER_WORD - 1; cell != lastcell; ++cell)
2182 cell->link = cell + 1;
2183 lastcell->link = NULL;
2184 } else {
2186 * Assemble the free list from free cells from *doubleFlags starting
2187 * from the tail. In the loop
2189 * index + bit >= DOUBLES_PER_ARENA
2191 * when bit is one of the unused bits. We do not check for such bits
2192 * explicitly as they must be set and the "if" check filters them out.
2194 JS_ASSERT(index + JS_BITS_PER_WORD <=
2195 DOUBLES_PER_ARENA + UNUSED_DOUBLE_BITMAP_BITS);
2196 bit = JS_BITS_PER_WORD;
2197 cell += bit;
2198 list = NULL;
2199 do {
2200 --bit;
2201 --cell;
2202 if (!(((jsbitmap) 1 << bit) & usedBits)) {
2203 JS_ASSERT(index + bit < DOUBLES_PER_ARENA);
2204 JS_ASSERT_IF(index + bit == DOUBLES_PER_ARENA - 1, !list);
2205 cell->link = list;
2206 list = cell;
2208 } while (bit != 0);
2210 JS_ASSERT(list);
2213 * We delegate assigning cx->doubleFreeList to js_NewDoubleInRootedValue as
2214 * it immediately consumes the head of the list.
2216 return list;
2219 JSBool
2220 js_NewDoubleInRootedValue(JSContext *cx, jsdouble d, jsval *vp)
2222 #ifdef JS_GCMETER
2223 JSGCArenaStats *astats;
2224 #endif
2225 JSGCDoubleCell *cell;
2227 /* Updates of metering counters here are not thread-safe. */
2228 METER(astats = &cx->runtime->gcStats.doubleArenaStats);
2229 METER(astats->alloc++);
2230 cell = cx->doubleFreeList;
2231 if (!cell) {
2232 cell = RefillDoubleFreeList(cx);
2233 if (!cell) {
2234 METER(astats->fail++);
2235 return JS_FALSE;
2237 } else {
2238 METER(astats->localalloc++);
2240 cx->doubleFreeList = cell->link;
2241 cell->number = d;
2242 *vp = DOUBLE_TO_JSVAL(&cell->number);
2243 return JS_TRUE;
2246 jsdouble *
2247 js_NewWeaklyRootedDouble(JSContext *cx, jsdouble d)
2249 jsval v;
2250 jsdouble *dp;
2252 if (!js_NewDoubleInRootedValue(cx, d, &v))
2253 return NULL;
2255 JS_ASSERT(JSVAL_IS_DOUBLE(v));
2256 dp = JSVAL_TO_DOUBLE(v);
2257 if (cx->localRootStack) {
2258 if (js_PushLocalRoot(cx, cx->localRootStack, v) < 0)
2259 return NULL;
2260 } else {
2261 cx->weakRoots.newborn[GCX_DOUBLE] = dp;
2263 return dp;
2266 #ifdef JS_TRACER
2267 JSBool
2268 js_ReserveObjects(JSContext *cx, size_t nobjects)
2271 * Ensure at least nobjects objects are in the list. fslots[1] of each
2272 * object on the reservedObjects list is the length of the list from there.
2274 JSObject *&head = JS_TRACE_MONITOR(cx).reservedObjects;
2275 size_t i = head ? JSVAL_TO_INT(head->fslots[1]) : 0;
2276 while (i < nobjects) {
2277 JSObject *obj = (JSObject *) js_NewGCThing(cx, GCX_OBJECT, sizeof(JSObject));
2278 if (!obj)
2279 return JS_FALSE;
2280 memset(obj, 0, sizeof(JSObject));
2281 /* The class must be set to something for finalization. */
2282 obj->classword = (jsuword) &js_ObjectClass;
2283 obj->fslots[0] = OBJECT_TO_JSVAL(head);
2284 i++;
2285 obj->fslots[1] = INT_TO_JSVAL(i);
2286 head = obj;
2289 return JS_TRUE;
2291 #endif
2293 JSBool
2294 js_AddAsGCBytes(JSContext *cx, size_t sz)
2296 JSRuntime *rt;
2298 rt = cx->runtime;
2299 if (rt->gcBytes >= rt->gcMaxBytes ||
2300 sz > (size_t) (rt->gcMaxBytes - rt->gcBytes) ||
2301 IsGCThresholdReached(rt)) {
2302 if (JS_ON_TRACE(cx)) {
2304 * If we can't leave the trace, signal OOM condition, otherwise
2305 * exit from trace and proceed with GC.
2307 if (!js_CanLeaveTrace(cx)) {
2308 JS_UNLOCK_GC(rt);
2309 return JS_FALSE;
2311 js_LeaveTrace(cx);
2313 js_GC(cx, GC_LAST_DITCH);
2314 if (rt->gcBytes >= rt->gcMaxBytes ||
2315 sz > (size_t) (rt->gcMaxBytes - rt->gcBytes)) {
2316 JS_UNLOCK_GC(rt);
2317 JS_ReportOutOfMemory(cx);
2318 return JS_FALSE;
2321 rt->gcBytes += (uint32) sz;
2322 return JS_TRUE;
2325 void
2326 js_RemoveAsGCBytes(JSRuntime *rt, size_t sz)
2328 JS_ASSERT((size_t) rt->gcBytes >= sz);
2329 rt->gcBytes -= (uint32) sz;
2333 * Shallow GC-things can be locked just by setting the GCF_LOCK bit, because
2334 * they have no descendants to mark during the GC. Currently the optimization
2335 * is only used for non-dependant strings.
2337 #define GC_THING_IS_SHALLOW(flagp, thing) \
2338 ((flagp) && \
2339 ((*(flagp) & GCF_TYPEMASK) >= GCX_EXTERNAL_STRING || \
2340 ((*(flagp) & GCF_TYPEMASK) == GCX_STRING && \
2341 !JSSTRING_IS_DEPENDENT((JSString *) (thing)))))
2343 /* This is compatible with JSDHashEntryStub. */
2344 typedef struct JSGCLockHashEntry {
2345 JSDHashEntryHdr hdr;
2346 const void *thing;
2347 uint32 count;
2348 } JSGCLockHashEntry;
2350 JSBool
2351 js_LockGCThingRT(JSRuntime *rt, void *thing)
2353 JSBool shallow, ok;
2354 uint8 *flagp;
2355 JSGCLockHashEntry *lhe;
2357 if (!thing)
2358 return JS_TRUE;
2360 flagp = GetGCThingFlagsOrNull(thing);
2361 JS_LOCK_GC(rt);
2362 shallow = GC_THING_IS_SHALLOW(flagp, thing);
2365 * Avoid adding a rt->gcLocksHash entry for shallow things until someone
2366 * nests a lock.
2368 if (shallow && !(*flagp & GCF_LOCK)) {
2369 *flagp |= GCF_LOCK;
2370 METER(rt->gcStats.lock++);
2371 ok = JS_TRUE;
2372 goto out;
2375 if (!rt->gcLocksHash) {
2376 rt->gcLocksHash = JS_NewDHashTable(JS_DHashGetStubOps(), NULL,
2377 sizeof(JSGCLockHashEntry),
2378 GC_ROOTS_SIZE);
2379 if (!rt->gcLocksHash) {
2380 ok = JS_FALSE;
2381 goto out;
2385 lhe = (JSGCLockHashEntry *)
2386 JS_DHashTableOperate(rt->gcLocksHash, thing, JS_DHASH_ADD);
2387 if (!lhe) {
2388 ok = JS_FALSE;
2389 goto out;
2391 if (!lhe->thing) {
2392 lhe->thing = thing;
2393 lhe->count = 1;
2394 } else {
2395 JS_ASSERT(lhe->count >= 1);
2396 lhe->count++;
2399 METER(rt->gcStats.lock++);
2400 ok = JS_TRUE;
2401 out:
2402 JS_UNLOCK_GC(rt);
2403 return ok;
2406 JSBool
2407 js_UnlockGCThingRT(JSRuntime *rt, void *thing)
2409 uint8 *flagp;
2410 JSBool shallow;
2411 JSGCLockHashEntry *lhe;
2413 if (!thing)
2414 return JS_TRUE;
2416 flagp = GetGCThingFlagsOrNull(thing);
2417 JS_LOCK_GC(rt);
2418 shallow = GC_THING_IS_SHALLOW(flagp, thing);
2420 if (shallow && !(*flagp & GCF_LOCK))
2421 goto out;
2422 if (!rt->gcLocksHash ||
2423 (lhe = (JSGCLockHashEntry *)
2424 JS_DHashTableOperate(rt->gcLocksHash, thing,
2425 JS_DHASH_LOOKUP),
2426 JS_DHASH_ENTRY_IS_FREE(&lhe->hdr))) {
2427 /* Shallow entry is not in the hash -> clear its lock bit. */
2428 if (shallow)
2429 *flagp &= ~GCF_LOCK;
2430 else
2431 goto out;
2432 } else {
2433 if (--lhe->count != 0)
2434 goto out;
2435 JS_DHashTableOperate(rt->gcLocksHash, thing, JS_DHASH_REMOVE);
2438 rt->gcPoke = JS_TRUE;
2439 METER(rt->gcStats.unlock++);
2440 out:
2441 JS_UNLOCK_GC(rt);
2442 return JS_TRUE;
2445 JS_PUBLIC_API(void)
2446 JS_TraceChildren(JSTracer *trc, void *thing, uint32 kind)
2448 JSObject *obj;
2449 size_t nslots, i;
2450 jsval v;
2451 JSString *str;
2453 switch (kind) {
2454 case JSTRACE_OBJECT:
2455 /* If obj has no map, it must be a newborn. */
2456 obj = (JSObject *) thing;
2457 if (!obj->map)
2458 break;
2459 if (obj->map->ops->trace) {
2460 obj->map->ops->trace(trc, obj);
2461 } else {
2462 nslots = STOBJ_NSLOTS(obj);
2463 for (i = 0; i != nslots; ++i) {
2464 v = STOBJ_GET_SLOT(obj, i);
2465 if (JSVAL_IS_TRACEABLE(v)) {
2466 JS_SET_TRACING_INDEX(trc, "slot", i);
2467 JS_CallTracer(trc, JSVAL_TO_TRACEABLE(v),
2468 JSVAL_TRACE_KIND(v));
2472 break;
2474 case JSTRACE_STRING:
2475 str = (JSString *)thing;
2476 if (JSSTRING_IS_DEPENDENT(str))
2477 JS_CALL_STRING_TRACER(trc, JSSTRDEP_BASE(str), "base");
2478 break;
2480 #if JS_HAS_XML_SUPPORT
2481 case JSTRACE_XML:
2482 js_TraceXML(trc, (JSXML *)thing);
2483 break;
2484 #endif
2489 * Number of things covered by a single bit of JSGCArenaInfo.u.untracedThings.
2491 #define THINGS_PER_UNTRACED_BIT(thingSize) \
2492 JS_HOWMANY(THINGS_PER_ARENA(thingSize), JS_BITS_PER_WORD)
2494 static void
2495 DelayTracingChildren(JSRuntime *rt, uint8 *flagp)
2497 JSGCArenaInfo *a;
2498 uint32 untracedBitIndex;
2499 jsuword bit;
2502 * Things with children to be traced later are marked with
2503 * GCF_MARK | GCF_FINAL flags.
2505 JS_ASSERT((*flagp & (GCF_MARK | GCF_FINAL)) == GCF_MARK);
2506 *flagp |= GCF_FINAL;
2508 METER(rt->gcStats.untraced++);
2509 #ifdef DEBUG
2510 ++rt->gcTraceLaterCount;
2511 METER_UPDATE_MAX(rt->gcStats.maxuntraced, rt->gcTraceLaterCount);
2512 #endif
2514 a = FLAGP_TO_ARENA(flagp);
2515 untracedBitIndex = FLAGP_TO_INDEX(flagp) /
2516 THINGS_PER_UNTRACED_BIT(a->list->thingSize);
2517 JS_ASSERT(untracedBitIndex < JS_BITS_PER_WORD);
2518 bit = (jsuword)1 << untracedBitIndex;
2519 if (a->u.untracedThings != 0) {
2520 JS_ASSERT(rt->gcUntracedArenaStackTop);
2521 if (a->u.untracedThings & bit) {
2522 /* bit already covers things with children to trace later. */
2523 return;
2525 a->u.untracedThings |= bit;
2526 } else {
2528 * The thing is the first thing with not yet traced children in the
2529 * whole arena, so push the arena on the stack of arenas with things
2530 * to be traced later unless the arena has already been pushed. We
2531 * detect that through checking prevUntracedPage as the field is 0
2532 * only for not yet pushed arenas. To ensure that
2533 * prevUntracedPage != 0
2534 * even when the stack contains one element, we make prevUntracedPage
2535 * for the arena at the bottom to point to itself.
2537 * See comments in TraceDelayedChildren.
2539 a->u.untracedThings = bit;
2540 if (a->prevUntracedPage == 0) {
2541 if (!rt->gcUntracedArenaStackTop) {
2542 /* Stack was empty, mark the arena as the bottom element. */
2543 a->prevUntracedPage = ARENA_INFO_TO_PAGE(a);
2544 } else {
2545 JS_ASSERT(rt->gcUntracedArenaStackTop->prevUntracedPage != 0);
2546 a->prevUntracedPage =
2547 ARENA_INFO_TO_PAGE(rt->gcUntracedArenaStackTop);
2549 rt->gcUntracedArenaStackTop = a;
2552 JS_ASSERT(rt->gcUntracedArenaStackTop);
2555 static void
2556 TraceDelayedChildren(JSTracer *trc)
2558 JSRuntime *rt;
2559 JSGCArenaInfo *a, *aprev;
2560 uint32 thingSize;
2561 uint32 thingsPerUntracedBit;
2562 uint32 untracedBitIndex, thingIndex, indexLimit, endIndex;
2563 JSGCThing *thing;
2564 uint8 *flagp;
2566 rt = trc->context->runtime;
2567 a = rt->gcUntracedArenaStackTop;
2568 if (!a) {
2569 JS_ASSERT(rt->gcTraceLaterCount == 0);
2570 return;
2573 for (;;) {
2575 * The following assert verifies that the current arena belongs to the
2576 * untraced stack, since DelayTracingChildren ensures that even for
2577 * stack's bottom prevUntracedPage != 0 but rather points to itself.
2579 JS_ASSERT(a->prevUntracedPage != 0);
2580 JS_ASSERT(rt->gcUntracedArenaStackTop->prevUntracedPage != 0);
2581 thingSize = a->list->thingSize;
2582 indexLimit = (a == a->list->last)
2583 ? a->list->lastCount
2584 : THINGS_PER_ARENA(thingSize);
2585 thingsPerUntracedBit = THINGS_PER_UNTRACED_BIT(thingSize);
2588 * We cannot use do-while loop here as a->u.untracedThings can be zero
2589 * before the loop as a leftover from the previous iterations. See
2590 * comments after the loop.
2592 while (a->u.untracedThings != 0) {
2593 untracedBitIndex = JS_FLOOR_LOG2W(a->u.untracedThings);
2594 a->u.untracedThings &= ~((jsuword)1 << untracedBitIndex);
2595 thingIndex = untracedBitIndex * thingsPerUntracedBit;
2596 endIndex = thingIndex + thingsPerUntracedBit;
2599 * endIndex can go beyond the last allocated thing as the real
2600 * limit can be "inside" the bit.
2602 if (endIndex > indexLimit)
2603 endIndex = indexLimit;
2604 JS_ASSERT(thingIndex < indexLimit);
2606 do {
2608 * Skip free or already traced things that share the bit
2609 * with untraced ones.
2611 flagp = THING_FLAGP(a, thingIndex);
2612 if ((*flagp & (GCF_MARK|GCF_FINAL)) != (GCF_MARK|GCF_FINAL))
2613 continue;
2614 *flagp &= ~GCF_FINAL;
2615 #ifdef DEBUG
2616 JS_ASSERT(rt->gcTraceLaterCount != 0);
2617 --rt->gcTraceLaterCount;
2618 #endif
2619 thing = FLAGP_TO_THING(flagp, thingSize);
2620 JS_TraceChildren(trc, thing, MapGCFlagsToTraceKind(*flagp));
2621 } while (++thingIndex != endIndex);
2625 * We finished tracing of all things in the the arena but we can only
2626 * pop it from the stack if the arena is the stack's top.
2628 * When JS_TraceChildren from the above calls JS_CallTracer that in
2629 * turn on low C stack calls DelayTracingChildren and the latter
2630 * pushes new arenas to the untraced stack, we have to skip popping
2631 * of this arena until it becomes the top of the stack again.
2633 if (a == rt->gcUntracedArenaStackTop) {
2634 aprev = ARENA_PAGE_TO_INFO(a->prevUntracedPage);
2635 a->prevUntracedPage = 0;
2636 if (a == aprev) {
2638 * prevUntracedPage points to itself and we reached the
2639 * bottom of the stack.
2641 break;
2643 rt->gcUntracedArenaStackTop = a = aprev;
2644 } else {
2645 a = rt->gcUntracedArenaStackTop;
2648 JS_ASSERT(rt->gcUntracedArenaStackTop);
2649 JS_ASSERT(rt->gcUntracedArenaStackTop->prevUntracedPage == 0);
2650 rt->gcUntracedArenaStackTop = NULL;
2651 JS_ASSERT(rt->gcTraceLaterCount == 0);
2654 JS_PUBLIC_API(void)
2655 JS_CallTracer(JSTracer *trc, void *thing, uint32 kind)
2657 JSContext *cx;
2658 JSRuntime *rt;
2659 JSGCArenaInfo *a;
2660 uintN index;
2661 uint8 *flagp;
2663 JS_ASSERT(thing);
2664 JS_ASSERT(JS_IS_VALID_TRACE_KIND(kind));
2665 JS_ASSERT(trc->debugPrinter || trc->debugPrintArg);
2667 if (!IS_GC_MARKING_TRACER(trc)) {
2668 trc->callback(trc, thing, kind);
2669 goto out;
2672 cx = trc->context;
2673 rt = cx->runtime;
2674 JS_ASSERT(rt->gcMarkingTracer == trc);
2675 JS_ASSERT(rt->gcLevel > 0);
2678 * Optimize for string and double as their size is known and their tracing
2679 * is not recursive.
2681 switch (kind) {
2682 case JSTRACE_DOUBLE:
2683 a = THING_TO_ARENA(thing);
2684 JS_ASSERT(!a->list);
2685 if (!a->u.hasMarkedDoubles) {
2686 ClearDoubleArenaFlags(a);
2687 a->u.hasMarkedDoubles = JS_TRUE;
2689 index = DOUBLE_THING_TO_INDEX(thing);
2690 JS_SET_BIT(DOUBLE_ARENA_BITMAP(a), index);
2691 goto out;
2693 case JSTRACE_STRING:
2694 for (;;) {
2695 flagp = THING_TO_FLAGP(thing, sizeof(JSGCThing));
2696 JS_ASSERT((*flagp & GCF_FINAL) == 0);
2697 JS_ASSERT(kind == MapGCFlagsToTraceKind(*flagp));
2698 if (!JSSTRING_IS_DEPENDENT((JSString *) thing)) {
2699 *flagp |= GCF_MARK;
2700 goto out;
2702 if (*flagp & GCF_MARK)
2703 goto out;
2704 *flagp |= GCF_MARK;
2705 thing = JSSTRDEP_BASE((JSString *) thing);
2707 /* NOTREACHED */
2710 flagp = GetGCThingFlags(thing);
2711 JS_ASSERT(kind == MapGCFlagsToTraceKind(*flagp));
2712 if (*flagp & GCF_MARK)
2713 goto out;
2716 * We check for non-final flag only if mark is unset as
2717 * DelayTracingChildren uses the flag. See comments in the function.
2719 JS_ASSERT(*flagp != GCF_FINAL);
2720 *flagp |= GCF_MARK;
2721 if (!cx->insideGCMarkCallback) {
2723 * With JS_GC_ASSUME_LOW_C_STACK defined the mark phase of GC always
2724 * uses the non-recursive code that otherwise would be called only on
2725 * a low C stack condition.
2727 #ifdef JS_GC_ASSUME_LOW_C_STACK
2728 # define RECURSION_TOO_DEEP() JS_TRUE
2729 #else
2730 int stackDummy;
2731 # define RECURSION_TOO_DEEP() (!JS_CHECK_STACK_SIZE(cx, stackDummy))
2732 #endif
2733 if (RECURSION_TOO_DEEP())
2734 DelayTracingChildren(rt, flagp);
2735 else
2736 JS_TraceChildren(trc, thing, kind);
2737 } else {
2739 * For API compatibility we allow for the callback to assume that
2740 * after it calls JS_MarkGCThing for the last time, the callback can
2741 * start to finalize its own objects that are only referenced by
2742 * unmarked GC things.
2744 * Since we do not know which call from inside the callback is the
2745 * last, we ensure that children of all marked things are traced and
2746 * call TraceDelayedChildren(trc) after tracing the thing.
2748 * As TraceDelayedChildren unconditionally invokes JS_TraceChildren
2749 * for the things with untraced children, calling DelayTracingChildren
2750 * is useless here. Hence we always trace thing's children even with a
2751 * low native stack.
2753 cx->insideGCMarkCallback = JS_FALSE;
2754 JS_TraceChildren(trc, thing, kind);
2755 TraceDelayedChildren(trc);
2756 cx->insideGCMarkCallback = JS_TRUE;
2759 out:
2760 #ifdef DEBUG
2761 trc->debugPrinter = NULL;
2762 trc->debugPrintArg = NULL;
2763 #endif
2764 return; /* to avoid out: right_curl when DEBUG is not defined */
2767 void
2768 js_CallValueTracerIfGCThing(JSTracer *trc, jsval v)
2770 void *thing;
2771 uint32 kind;
2773 if (JSVAL_IS_DOUBLE(v) || JSVAL_IS_STRING(v)) {
2774 thing = JSVAL_TO_TRACEABLE(v);
2775 kind = JSVAL_TRACE_KIND(v);
2776 JS_ASSERT(kind == js_GetGCThingTraceKind(JSVAL_TO_GCTHING(v)));
2777 } else if (JSVAL_IS_OBJECT(v) && v != JSVAL_NULL) {
2778 /* v can be an arbitrary GC thing reinterpreted as an object. */
2779 thing = JSVAL_TO_OBJECT(v);
2780 kind = js_GetGCThingTraceKind(thing);
2781 } else {
2782 return;
2784 JS_CallTracer(trc, thing, kind);
2787 static JSDHashOperator
2788 gc_root_traversal(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 num,
2789 void *arg)
2791 JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
2792 JSTracer *trc = (JSTracer *)arg;
2793 jsval *rp = (jsval *)rhe->root;
2794 jsval v = *rp;
2796 /* Ignore null object and scalar values. */
2797 if (!JSVAL_IS_NULL(v) && JSVAL_IS_GCTHING(v)) {
2798 #ifdef DEBUG
2799 JSBool root_points_to_gcArenaList = JS_FALSE;
2800 jsuword thing = (jsuword) JSVAL_TO_GCTHING(v);
2801 JSRuntime *rt;
2802 uintN i;
2803 JSGCArenaList *arenaList;
2804 uint32 thingSize;
2805 JSGCArenaInfo *a;
2806 size_t limit;
2808 rt = trc->context->runtime;
2809 for (i = 0; i < GC_NUM_FREELISTS; i++) {
2810 arenaList = &rt->gcArenaList[i];
2811 thingSize = arenaList->thingSize;
2812 limit = (size_t) arenaList->lastCount * thingSize;
2813 for (a = arenaList->last; a; a = a->prev) {
2814 if (thing - ARENA_INFO_TO_START(a) < limit) {
2815 root_points_to_gcArenaList = JS_TRUE;
2816 break;
2818 limit = (size_t) THINGS_PER_ARENA(thingSize) * thingSize;
2821 if (!root_points_to_gcArenaList) {
2822 for (a = rt->gcDoubleArenaList.first; a; a = a->prev) {
2823 if (thing - ARENA_INFO_TO_START(a) <
2824 DOUBLES_PER_ARENA * sizeof(jsdouble)) {
2825 root_points_to_gcArenaList = JS_TRUE;
2826 break;
2830 if (!root_points_to_gcArenaList && rhe->name) {
2831 fprintf(stderr,
2832 "JS API usage error: the address passed to JS_AddNamedRoot currently holds an\n"
2833 "invalid jsval. This is usually caused by a missing call to JS_RemoveRoot.\n"
2834 "The root's name is \"%s\".\n",
2835 rhe->name);
2837 JS_ASSERT(root_points_to_gcArenaList);
2838 #endif
2839 JS_SET_TRACING_NAME(trc, rhe->name ? rhe->name : "root");
2840 js_CallValueTracerIfGCThing(trc, v);
2843 return JS_DHASH_NEXT;
2846 static JSDHashOperator
2847 gc_lock_traversal(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 num,
2848 void *arg)
2850 JSGCLockHashEntry *lhe = (JSGCLockHashEntry *)hdr;
2851 void *thing = (void *)lhe->thing;
2852 JSTracer *trc = (JSTracer *)arg;
2853 uint32 traceKind;
2855 JS_ASSERT(lhe->count >= 1);
2856 traceKind = js_GetGCThingTraceKind(thing);
2857 JS_CALL_TRACER(trc, thing, traceKind, "locked object");
2858 return JS_DHASH_NEXT;
2861 #define TRACE_JSVALS(trc, len, vec, name) \
2862 JS_BEGIN_MACRO \
2863 jsval _v, *_vp, *_end; \
2865 for (_vp = vec, _end = _vp + len; _vp < _end; _vp++) { \
2866 _v = *_vp; \
2867 if (JSVAL_IS_TRACEABLE(_v)) { \
2868 JS_SET_TRACING_INDEX(trc, name, _vp - (vec)); \
2869 JS_CallTracer(trc, JSVAL_TO_TRACEABLE(_v), \
2870 JSVAL_TRACE_KIND(_v)); \
2873 JS_END_MACRO
2875 void
2876 js_TraceStackFrame(JSTracer *trc, JSStackFrame *fp)
2878 uintN nslots, minargs, skip;
2880 if (fp->callobj)
2881 JS_CALL_OBJECT_TRACER(trc, fp->callobj, "call");
2882 if (fp->argsobj)
2883 JS_CALL_OBJECT_TRACER(trc, fp->argsobj, "arguments");
2884 if (fp->varobj)
2885 JS_CALL_OBJECT_TRACER(trc, fp->varobj, "variables");
2886 if (fp->script) {
2887 js_TraceScript(trc, fp->script);
2889 /* fp->slots is null for watch pseudo-frames, see js_watch_set. */
2890 if (fp->slots) {
2892 * Don't mark what has not been pushed yet, or what has been
2893 * popped already.
2895 if (fp->regs) {
2896 nslots = (uintN) (fp->regs->sp - fp->slots);
2897 JS_ASSERT(nslots >= fp->script->nfixed);
2898 } else {
2899 nslots = fp->script->nfixed;
2901 TRACE_JSVALS(trc, nslots, fp->slots, "slot");
2903 } else {
2904 JS_ASSERT(!fp->slots);
2905 JS_ASSERT(!fp->regs);
2908 /* Allow for primitive this parameter due to JSFUN_THISP_* flags. */
2909 JS_ASSERT(JSVAL_IS_OBJECT((jsval)fp->thisp) ||
2910 (fp->fun && JSFUN_THISP_FLAGS(fp->fun->flags)));
2911 JS_CALL_VALUE_TRACER(trc, (jsval)fp->thisp, "this");
2913 if (fp->callee)
2914 JS_CALL_OBJECT_TRACER(trc, fp->callee, "callee");
2916 if (fp->argv) {
2917 nslots = fp->argc;
2918 skip = 0;
2919 if (fp->fun) {
2920 minargs = FUN_MINARGS(fp->fun);
2921 if (minargs > nslots)
2922 nslots = minargs;
2923 if (!FUN_INTERPRETED(fp->fun)) {
2924 JS_ASSERT(!(fp->fun->flags & JSFUN_FAST_NATIVE));
2925 nslots += fp->fun->u.n.extra;
2927 if (fp->fun->flags & JSFRAME_ROOTED_ARGV)
2928 skip = 2 + fp->argc;
2930 TRACE_JSVALS(trc, 2 + nslots - skip, fp->argv - 2 + skip, "operand");
2933 JS_CALL_VALUE_TRACER(trc, fp->rval, "rval");
2934 if (fp->scopeChain)
2935 JS_CALL_OBJECT_TRACER(trc, fp->scopeChain, "scope chain");
2936 if (fp->sharpArray)
2937 JS_CALL_OBJECT_TRACER(trc, fp->sharpArray, "sharp array");
2939 if (fp->xmlNamespace)
2940 JS_CALL_OBJECT_TRACER(trc, fp->xmlNamespace, "xmlNamespace");
2943 static void
2944 TraceWeakRoots(JSTracer *trc, JSWeakRoots *wr)
2946 uint32 i;
2947 void *thing;
2949 #ifdef DEBUG
2950 static const char *weakRootNames[JSTRACE_LIMIT] = {
2951 "newborn object",
2952 "newborn double",
2953 "newborn string",
2954 "newborn xml"
2956 #endif
2958 for (i = 0; i != JSTRACE_LIMIT; i++) {
2959 thing = wr->newborn[i];
2960 if (thing)
2961 JS_CALL_TRACER(trc, thing, i, weakRootNames[i]);
2963 JS_ASSERT(i == GCX_EXTERNAL_STRING);
2964 for (; i != GCX_NTYPES; ++i) {
2965 thing = wr->newborn[i];
2966 if (thing) {
2967 JS_SET_TRACING_INDEX(trc, "newborn external string",
2968 i - GCX_EXTERNAL_STRING);
2969 JS_CallTracer(trc, thing, JSTRACE_STRING);
2973 JS_CALL_VALUE_TRACER(trc, wr->lastAtom, "lastAtom");
2974 JS_SET_TRACING_NAME(trc, "lastInternalResult");
2975 js_CallValueTracerIfGCThing(trc, wr->lastInternalResult);
2978 JS_REQUIRES_STACK JS_FRIEND_API(void)
2979 js_TraceContext(JSTracer *trc, JSContext *acx)
2981 JSStackFrame *fp, *nextChain;
2982 JSStackHeader *sh;
2983 JSTempValueRooter *tvr;
2985 if (IS_GC_MARKING_TRACER(trc)) {
2987 #define FREE_OLD_ARENAS(pool) \
2988 JS_BEGIN_MACRO \
2989 int64 _age; \
2990 JSArena * _a = (pool).current; \
2991 if (_a == (pool).first.next && \
2992 _a->avail == _a->base + sizeof(int64)) { \
2993 _age = JS_Now() - *(int64 *) _a->base; \
2994 if (_age > (int64) acx->runtime->gcEmptyArenaPoolLifespan * \
2995 1000) \
2996 JS_FreeArenaPool(&(pool)); \
2998 JS_END_MACRO
3000 #ifdef JS_THREADSAFE
3001 js_RevokeGCLocalFreeLists(acx);
3002 #endif
3005 * Release the stackPool's arenas if the stackPool has existed for
3006 * longer than the limit specified by gcEmptyArenaPoolLifespan.
3008 FREE_OLD_ARENAS(acx->stackPool);
3011 * Release the regexpPool's arenas based on the same criterion as for
3012 * the stackPool.
3014 FREE_OLD_ARENAS(acx->regexpPool);
3017 * Clear the double free list to release all the pre-allocated doubles.
3019 acx->doubleFreeList = NULL;
3023 * Iterate frame chain and dormant chains.
3025 * (NB: see comment on this whole "dormant" thing in js_Execute.)
3027 * Since js_GetTopStackFrame needs to dereference cx->thread to check for
3028 * JIT frames, we check for non-null thread here and avoid null checks
3029 * there. See bug 471197.
3031 #ifdef JS_THREADSAFE
3032 if (acx->thread)
3033 #endif
3035 fp = js_GetTopStackFrame(acx);
3036 nextChain = acx->dormantFrameChain;
3037 if (!fp)
3038 goto next_chain;
3040 /* The top frame must not be dormant. */
3041 JS_ASSERT(!fp->dormantNext);
3042 for (;;) {
3043 do {
3044 js_TraceStackFrame(trc, fp);
3045 } while ((fp = fp->down) != NULL);
3047 next_chain:
3048 if (!nextChain)
3049 break;
3050 fp = nextChain;
3051 nextChain = nextChain->dormantNext;
3055 /* Mark other roots-by-definition in acx. */
3056 if (acx->globalObject && !JS_HAS_OPTION(acx, JSOPTION_UNROOTED_GLOBAL))
3057 JS_CALL_OBJECT_TRACER(trc, acx->globalObject, "global object");
3058 TraceWeakRoots(trc, &acx->weakRoots);
3059 if (acx->throwing) {
3060 JS_CALL_VALUE_TRACER(trc, acx->exception, "exception");
3061 } else {
3062 /* Avoid keeping GC-ed junk stored in JSContext.exception. */
3063 acx->exception = JSVAL_NULL;
3065 #if JS_HAS_LVALUE_RETURN
3066 if (acx->rval2set)
3067 JS_CALL_VALUE_TRACER(trc, acx->rval2, "rval2");
3068 #endif
3070 for (sh = acx->stackHeaders; sh; sh = sh->down) {
3071 METER(trc->context->runtime->gcStats.stackseg++);
3072 METER(trc->context->runtime->gcStats.segslots += sh->nslots);
3073 TRACE_JSVALS(trc, sh->nslots, JS_STACK_SEGMENT(sh), "stack");
3076 if (acx->localRootStack)
3077 js_TraceLocalRoots(trc, acx->localRootStack);
3079 for (tvr = acx->tempValueRooters; tvr; tvr = tvr->down) {
3080 switch (tvr->count) {
3081 case JSTVU_SINGLE:
3082 JS_SET_TRACING_NAME(trc, "tvr->u.value");
3083 js_CallValueTracerIfGCThing(trc, tvr->u.value);
3084 break;
3085 case JSTVU_TRACE:
3086 tvr->u.trace(trc, tvr);
3087 break;
3088 case JSTVU_SPROP:
3089 TRACE_SCOPE_PROPERTY(trc, tvr->u.sprop);
3090 break;
3091 case JSTVU_WEAK_ROOTS:
3092 TraceWeakRoots(trc, tvr->u.weakRoots);
3093 break;
3094 case JSTVU_PARSE_CONTEXT:
3095 js_TraceParseContext(trc, tvr->u.parseContext);
3096 break;
3097 case JSTVU_SCRIPT:
3098 js_TraceScript(trc, tvr->u.script);
3099 break;
3100 default:
3101 JS_ASSERT(tvr->count >= 0);
3102 TRACE_JSVALS(trc, tvr->count, tvr->u.array, "tvr->u.array");
3106 if (acx->sharpObjectMap.depth > 0)
3107 js_TraceSharpMap(trc, &acx->sharpObjectMap);
3109 js_TraceRegExpStatics(trc, acx);
3112 #ifdef JS_TRACER
3113 void
3114 js_PurgeTraceMonitor(JSContext *cx, JSTraceMonitor *tm)
3116 tm->reservedDoublePoolPtr = tm->reservedDoublePool;
3118 tm->needFlush = JS_TRUE;
3120 /* Keep the reserved objects. */
3121 for (JSObject *obj = tm->reservedObjects; obj; obj = JSVAL_TO_OBJECT(obj->fslots[0])) {
3122 uint8 *flagp = GetGCThingFlags(obj);
3123 JS_ASSERT((*flagp & GCF_TYPEMASK) == GCX_OBJECT);
3124 JS_ASSERT(*flagp != GCF_FINAL);
3125 *flagp |= GCF_MARK;
3128 #endif
3130 JS_REQUIRES_STACK void
3131 js_TraceRuntime(JSTracer *trc, JSBool allAtoms)
3133 JSRuntime *rt = trc->context->runtime;
3134 JSContext *iter, *acx;
3136 JS_DHashTableEnumerate(&rt->gcRootsHash, gc_root_traversal, trc);
3137 if (rt->gcLocksHash)
3138 JS_DHashTableEnumerate(rt->gcLocksHash, gc_lock_traversal, trc);
3139 js_TraceAtomState(trc, allAtoms);
3140 js_TraceNativeEnumerators(trc);
3141 js_TraceRuntimeNumberState(trc);
3143 iter = NULL;
3144 while ((acx = js_ContextIterator(rt, JS_TRUE, &iter)) != NULL)
3145 js_TraceContext(trc, acx);
3147 if (rt->gcExtraRootsTraceOp)
3148 rt->gcExtraRootsTraceOp(trc, rt->gcExtraRootsData);
3150 #ifdef JS_TRACER
3151 for (int i = 0; i < JSBUILTIN_LIMIT; i++) {
3152 if (rt->builtinFunctions[i])
3153 JS_CALL_OBJECT_TRACER(trc, rt->builtinFunctions[i], "builtin function");
3155 #endif
3158 static void
3159 ProcessSetSlotRequest(JSContext *cx, JSSetSlotRequest *ssr)
3161 JSObject *obj, *pobj;
3162 uint32 slot;
3164 obj = ssr->obj;
3165 pobj = ssr->pobj;
3166 slot = ssr->slot;
3168 while (pobj) {
3169 pobj = js_GetWrappedObject(cx, pobj);
3170 if (pobj == obj) {
3171 ssr->errnum = JSMSG_CYCLIC_VALUE;
3172 return;
3174 pobj = JSVAL_TO_OBJECT(STOBJ_GET_SLOT(pobj, slot));
3177 pobj = ssr->pobj;
3179 if (slot == JSSLOT_PROTO && OBJ_IS_NATIVE(obj)) {
3180 JSScope *scope, *newscope;
3181 JSObject *oldproto;
3183 /* Check to see whether obj shares its prototype's scope. */
3184 scope = OBJ_SCOPE(obj);
3185 oldproto = STOBJ_GET_PROTO(obj);
3186 if (oldproto && OBJ_SCOPE(oldproto) == scope) {
3187 /* Either obj needs a new empty scope, or it should share pobj's. */
3188 if (!pobj ||
3189 !OBJ_IS_NATIVE(pobj) ||
3190 OBJ_GET_CLASS(cx, pobj) != STOBJ_GET_CLASS(oldproto)) {
3192 * With no proto and no scope of its own, obj is truly empty.
3194 * If pobj is not native, obj needs its own empty scope -- it
3195 * should not continue to share oldproto's scope once oldproto
3196 * is not on obj's prototype chain. That would put properties
3197 * from oldproto's scope ahead of properties defined by pobj,
3198 * in lookup order.
3200 * If pobj's class differs from oldproto's, we may need a new
3201 * scope to handle differences in private and reserved slots,
3202 * so we suboptimally but safely make one.
3204 if (!js_GetMutableScope(cx, obj)) {
3205 ssr->errnum = JSMSG_OUT_OF_MEMORY;
3206 return;
3208 } else if (OBJ_SCOPE(pobj) != scope) {
3209 newscope = (JSScope *) js_HoldObjectMap(cx, pobj->map);
3210 obj->map = &newscope->map;
3211 js_DropObjectMap(cx, &scope->map, obj);
3212 JS_TRANSFER_SCOPE_LOCK(cx, scope, newscope);
3217 * Regenerate property cache shape ids for all of the scopes along the
3218 * old prototype chain, in case any property cache entries were filled
3219 * by looking up starting from obj.
3221 while (oldproto && OBJ_IS_NATIVE(oldproto)) {
3222 scope = OBJ_SCOPE(oldproto);
3223 SCOPE_MAKE_UNIQUE_SHAPE(cx, scope);
3224 oldproto = STOBJ_GET_PROTO(scope->object);
3228 /* Finally, do the deed. */
3229 STOBJ_SET_SLOT(obj, slot, OBJECT_TO_JSVAL(pobj));
3230 STOBJ_SET_DELEGATE(pobj);
3233 void
3234 js_DestroyScriptsToGC(JSContext *cx, JSThreadData *data)
3236 JSScript **listp, *script;
3238 for (size_t i = 0; i != JS_ARRAY_LENGTH(data->scriptsToGC); ++i) {
3239 listp = &data->scriptsToGC[i];
3240 while ((script = *listp) != NULL) {
3241 *listp = script->u.nextToGC;
3242 script->u.nextToGC = NULL;
3243 js_DestroyScript(cx, script);
3249 * The gckind flag bit GC_LOCK_HELD indicates a call from js_NewGCThing with
3250 * rt->gcLock already held, so the lock should be kept on return.
3252 void
3253 js_GC(JSContext *cx, JSGCInvocationKind gckind)
3255 JSRuntime *rt;
3256 JSBool keepAtoms;
3257 JSGCCallback callback;
3258 uintN i, type;
3259 JSTracer trc;
3260 uint32 thingSize, indexLimit;
3261 JSGCArenaInfo *a, **ap, *emptyArenas;
3262 uint8 flags, *flagp;
3263 JSGCThing *thing, *freeList;
3264 JSGCArenaList *arenaList;
3265 JSBool allClear;
3266 #ifdef JS_THREADSAFE
3267 uint32 requestDebit;
3268 JSContext *acx, *iter;
3269 #endif
3270 #ifdef JS_GCMETER
3271 uint32 nlivearenas, nkilledarenas, nthings;
3272 #endif
3274 JS_ASSERT_IF(gckind == GC_LAST_DITCH, !JS_ON_TRACE(cx));
3275 rt = cx->runtime;
3277 #ifdef JS_THREADSAFE
3279 * We allow js_GC calls outside a request but the context must be bound
3280 * to the current thread.
3282 JS_ASSERT(CURRENT_THREAD_IS_ME(cx->thread));
3284 /* Avoid deadlock. */
3285 JS_ASSERT(!JS_IS_RUNTIME_LOCKED(rt));
3286 #endif
3288 if (gckind & GC_KEEP_ATOMS) {
3290 * The set slot request and last ditch GC kinds preserve all atoms and
3291 * weak roots.
3293 keepAtoms = JS_TRUE;
3294 } else {
3295 /* Keep atoms when a suspended compile is running on another context. */
3296 keepAtoms = (rt->gcKeepAtoms != 0);
3297 JS_CLEAR_WEAK_ROOTS(&cx->weakRoots);
3301 * Don't collect garbage if the runtime isn't up, and cx is not the last
3302 * context in the runtime. The last context must force a GC, and nothing
3303 * should suppress that final collection or there may be shutdown leaks,
3304 * or runtime bloat until the next context is created.
3306 if (rt->state != JSRTS_UP && gckind != GC_LAST_CONTEXT)
3307 return;
3309 restart_at_beginning:
3311 * Let the API user decide to defer a GC if it wants to (unless this
3312 * is the last context). Invoke the callback regardless. Sample the
3313 * callback in case we are freely racing with a JS_SetGCCallback{,RT} on
3314 * another thread.
3316 if (gckind != GC_SET_SLOT_REQUEST && (callback = rt->gcCallback)) {
3317 JSBool ok;
3319 if (gckind & GC_LOCK_HELD)
3320 JS_UNLOCK_GC(rt);
3321 ok = callback(cx, JSGC_BEGIN);
3322 if (gckind & GC_LOCK_HELD)
3323 JS_LOCK_GC(rt);
3324 if (!ok && gckind != GC_LAST_CONTEXT) {
3326 * It's possible that we've looped back to this code from the 'goto
3327 * restart_at_beginning' below in the GC_SET_SLOT_REQUEST code and
3328 * that rt->gcLevel is now 0. Don't return without notifying!
3330 if (rt->gcLevel == 0 && (gckind & GC_LOCK_HELD))
3331 JS_NOTIFY_GC_DONE(rt);
3332 return;
3336 /* Lock out other GC allocator and collector invocations. */
3337 if (!(gckind & GC_LOCK_HELD))
3338 JS_LOCK_GC(rt);
3340 METER(rt->gcStats.poke++);
3341 rt->gcPoke = JS_FALSE;
3343 #ifdef JS_THREADSAFE
3344 JS_ASSERT(cx->thread->id == js_CurrentThreadId());
3346 /* Bump gcLevel and return rather than nest on this thread. */
3347 if (rt->gcThread == cx->thread) {
3348 JS_ASSERT(rt->gcLevel > 0);
3349 rt->gcLevel++;
3350 METER_UPDATE_MAX(rt->gcStats.maxlevel, rt->gcLevel);
3351 if (!(gckind & GC_LOCK_HELD))
3352 JS_UNLOCK_GC(rt);
3353 return;
3357 * If we're in one or more requests (possibly on more than one context)
3358 * running on the current thread, indicate, temporarily, that all these
3359 * requests are inactive.
3361 requestDebit = 0;
3363 JSCList *head, *link;
3366 * Check all contexts on cx->thread->contextList for active requests,
3367 * counting each such context against requestDebit.
3369 head = &cx->thread->contextList;
3370 for (link = head->next; link != head; link = link->next) {
3371 acx = CX_FROM_THREAD_LINKS(link);
3372 JS_ASSERT(acx->thread == cx->thread);
3373 if (acx->requestDepth)
3374 requestDebit++;
3377 if (requestDebit) {
3378 JS_ASSERT(requestDebit <= rt->requestCount);
3379 rt->requestCount -= requestDebit;
3380 if (rt->requestCount == 0)
3381 JS_NOTIFY_REQUEST_DONE(rt);
3384 /* If another thread is already in GC, don't attempt GC; wait instead. */
3385 if (rt->gcLevel > 0) {
3386 /* Bump gcLevel to restart the current GC, so it finds new garbage. */
3387 rt->gcLevel++;
3388 METER_UPDATE_MAX(rt->gcStats.maxlevel, rt->gcLevel);
3390 /* Wait for the other thread to finish, then resume our request. */
3391 while (rt->gcLevel > 0)
3392 JS_AWAIT_GC_DONE(rt);
3393 if (requestDebit)
3394 rt->requestCount += requestDebit;
3395 if (!(gckind & GC_LOCK_HELD))
3396 JS_UNLOCK_GC(rt);
3397 return;
3400 /* No other thread is in GC, so indicate that we're now in GC. */
3401 rt->gcLevel = 1;
3402 rt->gcThread = cx->thread;
3405 * Notify all operation callbacks, which will give them a chance to
3406 * yield their current request. Contexts that are not currently
3407 * executing will perform their callback at some later point,
3408 * which then will be unnecessary, but harmless.
3410 js_NudgeOtherContexts(cx);
3412 /* Wait for all other requests to finish. */
3413 while (rt->requestCount > 0)
3414 JS_AWAIT_REQUEST_DONE(rt);
3416 #else /* !JS_THREADSAFE */
3418 /* Bump gcLevel and return rather than nest; the outer gc will restart. */
3419 rt->gcLevel++;
3420 METER_UPDATE_MAX(rt->gcStats.maxlevel, rt->gcLevel);
3421 if (rt->gcLevel > 1)
3422 return;
3424 #endif /* !JS_THREADSAFE */
3427 * Set rt->gcRunning here within the GC lock, and after waiting for any
3428 * active requests to end, so that new requests that try to JS_AddRoot,
3429 * JS_RemoveRoot, or JS_RemoveRootRT block in JS_BeginRequest waiting for
3430 * rt->gcLevel to drop to zero, while request-less calls to the *Root*
3431 * APIs block in js_AddRoot or js_RemoveRoot (see above in this file),
3432 * waiting for GC to finish.
3434 rt->gcRunning = JS_TRUE;
3436 if (gckind == GC_SET_SLOT_REQUEST) {
3437 JSSetSlotRequest *ssr;
3439 while ((ssr = rt->setSlotRequests) != NULL) {
3440 rt->setSlotRequests = ssr->next;
3441 JS_UNLOCK_GC(rt);
3442 ssr->next = NULL;
3443 ProcessSetSlotRequest(cx, ssr);
3444 JS_LOCK_GC(rt);
3448 * We assume here that killing links to parent and prototype objects
3449 * does not create garbage (such objects typically are long-lived and
3450 * widely shared, e.g. global objects, Function.prototype, etc.). We
3451 * collect garbage only if a racing thread attempted GC and is waiting
3452 * for us to finish (gcLevel > 1) or if someone already poked us.
3454 if (rt->gcLevel == 1 && !rt->gcPoke)
3455 goto done_running;
3457 rt->gcLevel = 0;
3458 rt->gcPoke = JS_FALSE;
3459 rt->gcRunning = JS_FALSE;
3460 #ifdef JS_THREADSAFE
3461 rt->gcThread = NULL;
3462 rt->requestCount += requestDebit;
3463 #endif
3464 gckind = GC_LOCK_HELD;
3465 goto restart_at_beginning;
3468 JS_UNLOCK_GC(rt);
3470 #ifdef JS_TRACER
3471 if (JS_ON_TRACE(cx))
3472 goto out;
3473 #endif
3474 VOUCH_HAVE_STACK();
3476 /* Reset malloc counter. */
3477 rt->gcMallocBytes = 0;
3479 #ifdef JS_DUMP_SCOPE_METERS
3480 { extern void js_DumpScopeMeters(JSRuntime *rt);
3481 js_DumpScopeMeters(rt);
3483 #endif
3485 #ifdef JS_TRACER
3486 js_PurgeJITOracle();
3487 #endif
3488 js_PurgeThreads(cx);
3490 restart:
3491 rt->gcNumber++;
3492 JS_ASSERT(!rt->gcUntracedArenaStackTop);
3493 JS_ASSERT(rt->gcTraceLaterCount == 0);
3495 /* Reset the property cache's type id generator so we can compress ids. */
3496 rt->shapeGen = 0;
3499 * Mark phase.
3501 JS_TRACER_INIT(&trc, cx, NULL);
3502 rt->gcMarkingTracer = &trc;
3503 JS_ASSERT(IS_GC_MARKING_TRACER(&trc));
3505 for (a = rt->gcDoubleArenaList.first; a; a = a->prev)
3506 a->u.hasMarkedDoubles = JS_FALSE;
3508 js_TraceRuntime(&trc, keepAtoms);
3509 js_MarkScriptFilenames(rt, keepAtoms);
3512 * Mark children of things that caused too deep recursion during the above
3513 * tracing.
3515 TraceDelayedChildren(&trc);
3517 JS_ASSERT(!cx->insideGCMarkCallback);
3518 if (rt->gcCallback) {
3519 cx->insideGCMarkCallback = JS_TRUE;
3520 (void) rt->gcCallback(cx, JSGC_MARK_END);
3521 JS_ASSERT(cx->insideGCMarkCallback);
3522 cx->insideGCMarkCallback = JS_FALSE;
3524 JS_ASSERT(rt->gcTraceLaterCount == 0);
3526 rt->gcMarkingTracer = NULL;
3529 * Sweep phase.
3531 * Finalize as we sweep, outside of rt->gcLock but with rt->gcRunning set
3532 * so that any attempt to allocate a GC-thing from a finalizer will fail,
3533 * rather than nest badly and leave the unmarked newborn to be swept.
3535 * We first sweep atom state so we can use js_IsAboutToBeFinalized on
3536 * JSString or jsdouble held in a hashtable to check if the hashtable
3537 * entry can be freed. Note that even after the entry is freed, JSObject
3538 * finalizers can continue to access the corresponding jsdouble* and
3539 * JSString* assuming that they are unique. This works since the
3540 * atomization API must not be called during GC.
3542 js_SweepAtomState(cx);
3544 /* Finalize iterator states before the objects they iterate over. */
3545 CloseNativeIterators(cx);
3547 /* Finalize watch points associated with unreachable objects. */
3548 js_SweepWatchPoints(cx);
3550 #ifdef DEBUG
3551 /* Save the pre-sweep count of scope-mapped properties. */
3552 rt->liveScopePropsPreSweep = rt->liveScopeProps;
3553 #endif
3556 * Here we need to ensure that JSObject instances are finalized before GC-
3557 * allocated JSString and jsdouble instances so object's finalizer can
3558 * access them even if they will be freed. For that we simply finalize the
3559 * list containing JSObject first since the static assert at the beginning
3560 * of the file guarantees that JSString and jsdouble instances are
3561 * allocated from a different list.
3563 emptyArenas = NULL;
3564 for (i = 0; i < GC_NUM_FREELISTS; i++) {
3565 arenaList = &rt->gcArenaList[i == 0
3566 ? GC_FREELIST_INDEX(sizeof(JSObject))
3567 : i == GC_FREELIST_INDEX(sizeof(JSObject))
3569 : i];
3570 ap = &arenaList->last;
3571 if (!(a = *ap))
3572 continue;
3574 JS_ASSERT(arenaList->lastCount > 0);
3575 arenaList->freeList = NULL;
3576 freeList = NULL;
3577 thingSize = arenaList->thingSize;
3578 indexLimit = THINGS_PER_ARENA(thingSize);
3579 flagp = THING_FLAGP(a, arenaList->lastCount - 1);
3580 METER((nlivearenas = 0, nkilledarenas = 0, nthings = 0));
3581 for (;;) {
3582 JS_ASSERT(a->prevUntracedPage == 0);
3583 JS_ASSERT(a->u.untracedThings == 0);
3584 allClear = JS_TRUE;
3585 do {
3586 flags = *flagp;
3587 if (flags & (GCF_MARK | GCF_LOCK)) {
3588 *flagp &= ~GCF_MARK;
3589 allClear = JS_FALSE;
3590 METER(nthings++);
3591 } else {
3592 thing = FLAGP_TO_THING(flagp, thingSize);
3593 if (!(flags & GCF_FINAL)) {
3595 * Call the finalizer with GCF_FINAL ORed into flags.
3597 *flagp = (uint8)(flags | GCF_FINAL);
3598 type = flags & GCF_TYPEMASK;
3599 switch (type) {
3600 case GCX_OBJECT:
3601 js_FinalizeObject(cx, (JSObject *) thing);
3602 break;
3603 case GCX_DOUBLE:
3604 /* Do nothing. */
3605 break;
3606 #if JS_HAS_XML_SUPPORT
3607 case GCX_XML:
3608 js_FinalizeXML(cx, (JSXML *) thing);
3609 break;
3610 #endif
3611 default:
3612 JS_ASSERT(type == GCX_STRING ||
3613 type - GCX_EXTERNAL_STRING <
3614 GCX_NTYPES - GCX_EXTERNAL_STRING);
3615 js_FinalizeStringRT(rt, (JSString *) thing,
3616 (intN) (type -
3617 GCX_EXTERNAL_STRING),
3618 cx);
3619 break;
3621 #ifdef DEBUG
3622 memset(thing, JS_FREE_PATTERN, thingSize);
3623 #endif
3625 thing->flagp = flagp;
3626 thing->next = freeList;
3627 freeList = thing;
3629 } while (++flagp != THING_FLAGS_END(a));
3631 if (allClear) {
3633 * Forget just assembled free list head for the arena and
3634 * add the arena itself to the destroy list.
3636 freeList = arenaList->freeList;
3637 if (a == arenaList->last)
3638 arenaList->lastCount = (uint16) indexLimit;
3639 *ap = a->prev;
3640 a->prev = emptyArenas;
3641 emptyArenas = a;
3642 METER(nkilledarenas++);
3643 } else {
3644 arenaList->freeList = freeList;
3645 ap = &a->prev;
3646 METER(nlivearenas++);
3648 if (!(a = *ap))
3649 break;
3650 flagp = THING_FLAGP(a, indexLimit - 1);
3654 * We use arenaList - &rt->gcArenaList[0], not i, as the stat index
3655 * due to the enumeration reorder at the beginning of the loop.
3657 METER(UpdateArenaStats(&rt->gcStats.arenaStats[arenaList -
3658 &rt->gcArenaList[0]],
3659 nlivearenas, nkilledarenas, nthings));
3662 #ifdef JS_THREADSAFE
3664 * Release all but two free list sets to avoid allocating a new set in
3665 * js_NewGCThing.
3667 TrimGCFreeListsPool(rt, 2);
3668 #endif
3670 ap = &rt->gcDoubleArenaList.first;
3671 METER((nlivearenas = 0, nkilledarenas = 0, nthings = 0));
3672 while ((a = *ap) != NULL) {
3673 if (!a->u.hasMarkedDoubles) {
3674 /* No marked double values in the arena. */
3675 *ap = a->prev;
3676 a->prev = emptyArenas;
3677 emptyArenas = a;
3678 METER(nkilledarenas++);
3679 } else {
3680 ap = &a->prev;
3681 #ifdef JS_GCMETER
3682 for (i = 0; i != DOUBLES_PER_ARENA; ++i) {
3683 if (IsMarkedDouble(a, index))
3684 METER(nthings++);
3686 METER(nlivearenas++);
3687 #endif
3690 METER(UpdateArenaStats(&rt->gcStats.doubleArenaStats,
3691 nlivearenas, nkilledarenas, nthings));
3692 rt->gcDoubleArenaList.nextDoubleFlags =
3693 rt->gcDoubleArenaList.first
3694 ? DOUBLE_ARENA_BITMAP(rt->gcDoubleArenaList.first)
3695 : DOUBLE_BITMAP_SENTINEL;
3698 * Sweep the runtime's property tree after finalizing objects, in case any
3699 * had watchpoints referencing tree nodes.
3701 js_SweepScopeProperties(cx);
3704 * Sweep script filenames after sweeping functions in the generic loop
3705 * above. In this way when a scripted function's finalizer destroys the
3706 * script and calls rt->destroyScriptHook, the hook can still access the
3707 * script's filename. See bug 323267.
3709 js_SweepScriptFilenames(rt);
3712 * Destroy arenas after we finished the sweeping sofinalizers can safely
3713 * use js_IsAboutToBeFinalized().
3715 DestroyGCArenas(rt, emptyArenas);
3717 if (rt->gcCallback)
3718 (void) rt->gcCallback(cx, JSGC_FINALIZE_END);
3719 #ifdef DEBUG_srcnotesize
3720 { extern void DumpSrcNoteSizeHist();
3721 DumpSrcNoteSizeHist();
3722 printf("GC HEAP SIZE %lu\n", (unsigned long)rt->gcBytes);
3724 #endif
3726 #ifdef JS_SCOPE_DEPTH_METER
3727 { static FILE *fp;
3728 if (!fp)
3729 fp = fopen("/tmp/scopedepth.stats", "w");
3731 if (fp) {
3732 JS_DumpBasicStats(&rt->protoLookupDepthStats, "proto-lookup depth", fp);
3733 JS_DumpBasicStats(&rt->scopeSearchDepthStats, "scope-search depth", fp);
3734 JS_DumpBasicStats(&rt->hostenvScopeDepthStats, "hostenv scope depth", fp);
3735 JS_DumpBasicStats(&rt->lexicalScopeDepthStats, "lexical scope depth", fp);
3737 putc('\n', fp);
3738 fflush(fp);
3741 #endif /* JS_SCOPE_DEPTH_METER */
3743 #ifdef JS_DUMP_LOOP_STATS
3744 { static FILE *lsfp;
3745 if (!lsfp)
3746 lsfp = fopen("/tmp/loopstats", "w");
3747 if (lsfp) {
3748 JS_DumpBasicStats(&rt->loopStats, "loops", lsfp);
3749 fflush(lsfp);
3752 #endif /* JS_DUMP_LOOP_STATS */
3754 #ifdef JS_TRACER
3755 out:
3756 #endif
3757 JS_LOCK_GC(rt);
3760 * We want to restart GC if js_GC was called recursively or if any of the
3761 * finalizers called js_RemoveRoot or js_UnlockGCThingRT.
3763 if (!JS_ON_TRACE(cx) && (rt->gcLevel > 1 || rt->gcPoke)) {
3764 VOUCH_HAVE_STACK();
3765 rt->gcLevel = 1;
3766 rt->gcPoke = JS_FALSE;
3767 JS_UNLOCK_GC(rt);
3768 goto restart;
3771 if (rt->shapeGen >= SHAPE_OVERFLOW_BIT - 1) {
3773 * FIXME bug 440834: The shape id space has overflowed. Currently we
3774 * cope badly with this. Every call to js_GenerateShape does GC, and
3775 * we never re-enable the property cache.
3777 js_DisablePropertyCache(cx);
3778 #ifdef JS_THREADSAFE
3779 iter = NULL;
3780 while ((acx = js_ContextIterator(rt, JS_FALSE, &iter)) != NULL) {
3781 if (!acx->thread || acx->thread == cx->thread)
3782 continue;
3783 js_DisablePropertyCache(acx);
3785 #endif
3788 rt->gcLastBytes = rt->gcBytes;
3789 done_running:
3790 rt->gcLevel = 0;
3791 rt->gcRunning = JS_FALSE;
3793 #ifdef JS_THREADSAFE
3794 /* If we were invoked during a request, pay back the temporary debit. */
3795 if (requestDebit)
3796 rt->requestCount += requestDebit;
3797 rt->gcThread = NULL;
3798 JS_NOTIFY_GC_DONE(rt);
3801 * Unlock unless we have GC_LOCK_HELD which requires locked GC on return.
3803 if (!(gckind & GC_LOCK_HELD))
3804 JS_UNLOCK_GC(rt);
3805 #endif
3808 * Execute JSGC_END callback outside the lock. Again, sample the callback
3809 * pointer in case it changes, since we are outside of the GC vs. requests
3810 * interlock mechanism here.
3812 if (gckind != GC_SET_SLOT_REQUEST && (callback = rt->gcCallback)) {
3813 JSWeakRoots savedWeakRoots;
3814 JSTempValueRooter tvr;
3816 if (gckind & GC_KEEP_ATOMS) {
3818 * We allow JSGC_END implementation to force a full GC or allocate
3819 * new GC things. Thus we must protect the weak roots from garbage
3820 * collection and overwrites.
3822 savedWeakRoots = cx->weakRoots;
3823 JS_PUSH_TEMP_ROOT_WEAK_COPY(cx, &savedWeakRoots, &tvr);
3824 JS_KEEP_ATOMS(rt);
3825 JS_UNLOCK_GC(rt);
3828 (void) callback(cx, JSGC_END);
3830 if (gckind & GC_KEEP_ATOMS) {
3831 JS_LOCK_GC(rt);
3832 JS_UNKEEP_ATOMS(rt);
3833 JS_POP_TEMP_ROOT(cx, &tvr);
3834 } else if (gckind == GC_LAST_CONTEXT && rt->gcPoke) {
3836 * On shutdown iterate until JSGC_END callback stops creating
3837 * garbage.
3839 goto restart_at_beginning;
3844 #ifdef JS_THREADSAFE
3847 * If the GC is running and we're called on another thread, wait for this GC
3848 * activation to finish. We can safely wait here without fear of deadlock (in
3849 * the case where we are called within a request on another thread's context)
3850 * because the GC doesn't set rt->gcRunning until after it has waited for all
3851 * active requests to end.
3853 * We call here js_CurrentThreadId() after checking for rt->gcRunning to avoid
3854 * expensive calls when the GC is not running.
3856 void
3857 js_WaitForGC(JSRuntime *rt)
3859 JS_ASSERT_IF(rt->gcRunning, rt->gcLevel > 0);
3860 if (rt->gcRunning && rt->gcThread->id != js_CurrentThreadId()) {
3861 do {
3862 JS_AWAIT_GC_DONE(rt);
3863 } while (rt->gcRunning);
3867 #endif
3869 void
3870 js_UpdateMallocCounter(JSContext *cx, size_t nbytes)
3872 uint32 *pbytes, bytes;
3874 #ifdef JS_THREADSAFE
3875 pbytes = &cx->thread->gcMallocBytes;
3876 #else
3877 pbytes = &cx->runtime->gcMallocBytes;
3878 #endif
3879 bytes = *pbytes;
3880 *pbytes = ((uint32)-1 - bytes <= nbytes) ? (uint32)-1 : bytes + nbytes;