Backed out changeset 31682547b6f1 - bug 517199 has shown persistent failure on Window...
[mozilla-central.git] / js / src / jsgc.cpp
blob34701c4bfa9894f09c55598c61aa190c936751d1
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sw=4 et tw=78:
4 * ***** BEGIN LICENSE BLOCK *****
5 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
7 * The contents of this file are subject to the Mozilla Public License Version
8 * 1.1 (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
10 * http://www.mozilla.org/MPL/
12 * Software distributed under the License is distributed on an "AS IS" basis,
13 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
14 * for the specific language governing rights and limitations under the
15 * License.
17 * The Original Code is Mozilla Communicator client code, released
18 * March 31, 1998.
20 * The Initial Developer of the Original Code is
21 * Netscape Communications Corporation.
22 * Portions created by the Initial Developer are Copyright (C) 1998
23 * the Initial Developer. All Rights Reserved.
25 * Contributor(s):
27 * Alternatively, the contents of this file may be used under the terms of
28 * either of the GNU General Public License Version 2 or later (the "GPL"),
29 * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
30 * in which case the provisions of the GPL or the LGPL are applicable instead
31 * of those above. If you wish to allow use of your version of this file only
32 * under the terms of either the GPL or the LGPL, and not to allow others to
33 * use your version of this file under the terms of the MPL, indicate your
34 * decision by deleting the provisions above and replace them with the notice
35 * and other provisions required by the GPL or the LGPL. If you do not delete
36 * the provisions above, a recipient may use your version of this file under
37 * the terms of any one of the MPL, the GPL or the LGPL.
39 * ***** END LICENSE BLOCK ***** */
42 * JS Mark-and-Sweep Garbage Collector.
44 * This GC allocates fixed-sized things with sizes up to GC_NBYTES_MAX (see
45 * jsgc.h). It allocates from a special GC arena pool with each arena allocated
46 * using malloc. It uses an ideally parallel array of flag bytes to hold the
47 * mark bit, finalizer type index, etc.
49 * XXX swizzle page to freelist for better locality of reference
51 #include <stdlib.h> /* for free */
52 #include <math.h>
53 #include <string.h> /* for memset used when DEBUG */
54 #include "jstypes.h"
55 #include "jsstdint.h"
56 #include "jsutil.h" /* Added by JSIFY */
57 #include "jshash.h" /* Added by JSIFY */
58 #include "jsbit.h"
59 #include "jsclist.h"
60 #include "jsprf.h"
61 #include "jsapi.h"
62 #include "jsatom.h"
63 #include "jscntxt.h"
64 #include "jsversion.h"
65 #include "jsdbgapi.h"
66 #include "jsexn.h"
67 #include "jsfun.h"
68 #include "jsgc.h"
69 #include "jsinterp.h"
70 #include "jsiter.h"
71 #include "jslock.h"
72 #include "jsnum.h"
73 #include "jsobj.h"
74 #include "jsparse.h"
75 #include "jsscope.h"
76 #include "jsscript.h"
77 #include "jsstaticcheck.h"
78 #include "jsstr.h"
79 #include "jstask.h"
80 #include "jstracer.h"
82 #if JS_HAS_XML_SUPPORT
83 #include "jsxml.h"
84 #endif
86 #ifdef INCLUDE_MOZILLA_DTRACE
87 #include "jsdtracef.h"
88 #endif
91 * Check if posix_memalign is available.
93 #if _POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600 || MOZ_MEMORY
94 # define HAS_POSIX_MEMALIGN 1
95 #else
96 # define HAS_POSIX_MEMALIGN 0
97 #endif
100 * jemalloc provides posix_memalign.
102 #ifdef MOZ_MEMORY
103 extern "C" {
104 #include "../../memory/jemalloc/jemalloc.h"
106 #endif
109 * Include the headers for mmap unless we have posix_memalign and do not
110 * insist on mmap.
112 #if JS_GC_USE_MMAP || (!defined JS_GC_USE_MMAP && !HAS_POSIX_MEMALIGN)
113 # if defined(XP_WIN)
114 # ifndef JS_GC_USE_MMAP
115 # define JS_GC_USE_MMAP 1
116 # endif
117 # include <windows.h>
118 # elif defined(__SYMBIAN32__)
119 // Symbian's OpenC has mmap (and #defines _POSIX_MAPPED_FILES), but
120 // doesn't implement MAP_ANON. If we have MOZ_MEMORY, then we can use
121 // posix_memalign; we've defined HAS_POSIX_MEMALIGN above. Otherwise,
122 // we overallocate.
123 # else
124 # if defined(XP_UNIX) || defined(XP_BEOS)
125 # include <unistd.h>
126 # endif
127 # if _POSIX_MAPPED_FILES > 0
128 # ifndef JS_GC_USE_MMAP
129 # define JS_GC_USE_MMAP 1
130 # endif
131 # include <sys/mman.h>
133 /* On Mac OS X MAP_ANONYMOUS is not defined. */
134 # if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
135 # define MAP_ANONYMOUS MAP_ANON
136 # endif
137 # if !defined(MAP_ANONYMOUS)
138 # define MAP_ANONYMOUS 0
139 # endif
140 # else
141 # if JS_GC_USE_MMAP
142 # error "JS_GC_USE_MMAP is set when mmap is not available"
143 # endif
144 # endif
145 # endif
146 #endif
149 * Check JSTempValueUnion has the size of jsval and void * so we can
150 * reinterpret jsval as void* GC-thing pointer and use JSTVU_SINGLE for
151 * different GC-things.
153 JS_STATIC_ASSERT(sizeof(JSTempValueUnion) == sizeof(jsval));
154 JS_STATIC_ASSERT(sizeof(JSTempValueUnion) == sizeof(void *));
157 * Check that JSTRACE_XML follows JSTRACE_OBJECT, JSTRACE_DOUBLE and
158 * JSTRACE_STRING.
160 JS_STATIC_ASSERT(JSTRACE_OBJECT == 0);
161 JS_STATIC_ASSERT(JSTRACE_DOUBLE == 1);
162 JS_STATIC_ASSERT(JSTRACE_STRING == 2);
163 JS_STATIC_ASSERT(JSTRACE_XML == 3);
166 * JS_IS_VALID_TRACE_KIND assumes that JSTRACE_STRING is the last non-xml
167 * trace kind when JS_HAS_XML_SUPPORT is false.
169 JS_STATIC_ASSERT(JSTRACE_STRING + 1 == JSTRACE_XML);
172 * The number of used GCX-types must stay within GCX_LIMIT.
174 JS_STATIC_ASSERT(GCX_NTYPES <= GCX_LIMIT);
178 * Check that we can reinterpret double as JSGCDoubleCell.
180 JS_STATIC_ASSERT(sizeof(JSGCDoubleCell) == sizeof(double));
183 * Check that we can use memset(p, 0, ...) to implement JS_CLEAR_WEAK_ROOTS.
185 JS_STATIC_ASSERT(JSVAL_NULL == 0);
189 * A GC arena contains a fixed number of flag bits for each thing in its heap,
190 * and supports O(1) lookup of a flag given its thing's address.
192 * To implement this, we allocate things of the same size from a GC arena
193 * containing GC_ARENA_SIZE bytes aligned on GC_ARENA_SIZE boundary. The
194 * following picture shows arena's layout:
196 * +------------------------------+--------------------+---------------+
197 * | allocation area for GC thing | flags of GC things | JSGCArenaInfo |
198 * +------------------------------+--------------------+---------------+
200 * To find the flag bits for the thing we calculate the thing index counting
201 * from arena's start using:
203 * thingIndex = (thingAddress & GC_ARENA_MASK) / thingSize
205 * The details of flag's lookup depend on thing's kind. For all GC things
206 * except doubles we use one byte of flags where the 4 bits determine thing's
207 * type and the rest is used to implement GC marking, finalization and
208 * locking. We calculate the address of flag's byte using:
210 * flagByteAddress =
211 * (thingAddress | GC_ARENA_MASK) - sizeof(JSGCArenaInfo) - thingIndex
213 * where
215 * (thingAddress | GC_ARENA_MASK) - sizeof(JSGCArenaInfo)
217 * is the last byte of flags' area.
219 * This implies that the things are allocated from the start of their area and
220 * flags are allocated from the end. This arrangement avoids a relatively
221 * expensive calculation of the location of the boundary separating things and
222 * flags. The boundary's offset from the start of the arena is given by:
224 * thingsPerArena * thingSize
226 * where thingsPerArena is the number of things that the arena can hold:
228 * (GC_ARENA_SIZE - sizeof(JSGCArenaInfo)) / (thingSize + 1).
230 * To allocate doubles we use a specialized arena. It can contain only numbers
231 * so we do not need the type bits. Moreover, since the doubles do not require
232 * a finalizer and very few of them are locked via js_LockGCThing API, we use
233 * just one bit of flags per double to denote if it was marked during the
234 * marking phase of the GC. The locking is implemented via a hash table. Thus
235 * for doubles the flag area becomes a bitmap.
237 * JS_GC_USE_MMAP macro governs the choice of the aligned arena allocator.
238 * When it is true, a platform-dependent function like mmap is used to get
239 * memory aligned on CPU page boundaries. If the macro is false or undefined,
240 * posix_memalign is used when available. Otherwise the code uses malloc to
241 * over-allocate a chunk with js_gcArenasPerChunk aligned arenas. The
242 * approximate space overhead of this is 1/js_gcArenasPerChunk. For details,
243 * see NewGCChunk/DestroyGCChunk below.
245 * The code also allocates arenas in chunks when JS_GC_USE_MMAP is 1 to
246 * minimize the overhead of mmap/munmap. In this case js_gcArenasPerChunk can
247 * not be a compile-time constant as the system page size is not known until
248 * runtime.
250 #if JS_GC_USE_MMAP
251 static uint32 js_gcArenasPerChunk = 0;
252 static JSBool js_gcUseMmap = JS_FALSE;
253 #elif HAS_POSIX_MEMALIGN
254 # define js_gcArenasPerChunk 1
255 #else
256 # define js_gcArenasPerChunk 7
257 #endif
259 #if defined(js_gcArenasPerChunk) && js_gcArenasPerChunk == 1
260 # define CHUNKED_ARENA_ALLOCATION 0
261 #else
262 # define CHUNKED_ARENA_ALLOCATION 1
263 #endif
265 #define GC_ARENA_SHIFT 12
266 #define GC_ARENA_MASK ((jsuword) JS_BITMASK(GC_ARENA_SHIFT))
267 #define GC_ARENA_SIZE JS_BIT(GC_ARENA_SHIFT)
270 * JS_GC_ARENA_PAD defines the number of bytes to pad JSGCArenaInfo structure.
271 * It is used to improve allocation efficiency when using posix_memalign. If
272 * malloc's implementation uses internal headers, then calling
274 * posix_memalign(&p, GC_ARENA_SIZE, GC_ARENA_SIZE * js_gcArenasPerChunk)
276 * in a sequence leaves holes between allocations of the size GC_ARENA_SIZE
277 * due to the need to fit headers. JS_GC_ARENA_PAD mitigates that so the code
278 * calls
280 * posix_memalign(&p, GC_ARENA_SIZE,
281 * GC_ARENA_SIZE * js_gcArenasPerChunk - JS_GC_ARENA_PAD)
283 * When JS_GC_ARENA_PAD is equal or greater than the number of words in the
284 * system header, the system can pack all allocations together without holes.
286 * With JS_GC_USE_MEMALIGN we want at least 2 word pad unless posix_memalign
287 * comes from jemalloc that does not use any headers/trailers.
289 #ifndef JS_GC_ARENA_PAD
290 # if HAS_POSIX_MEMALIGN && !MOZ_MEMORY
291 # define JS_GC_ARENA_PAD (2 * JS_BYTES_PER_WORD)
292 # else
293 # define JS_GC_ARENA_PAD 0
294 # endif
295 #endif
297 struct JSGCArenaInfo {
299 * Allocation list for the arena or NULL if the arena holds double values.
301 JSGCArenaList *list;
304 * Pointer to the previous arena in a linked list. The arena can either
305 * belong to one of JSContext.gcArenaList lists or, when it does not have
306 * any allocated GC things, to the list of free arenas in the chunk with
307 * head stored in JSGCChunkInfo.lastFreeArena.
309 JSGCArenaInfo *prev;
311 #if !CHUNKED_ARENA_ALLOCATION
312 jsuword prevUntracedPage;
313 #else
315 * A link field for the list of arenas with marked but not yet traced
316 * things. The field is encoded as arena's page to share the space with
317 * firstArena and arenaIndex fields.
319 jsuword prevUntracedPage : JS_BITS_PER_WORD - GC_ARENA_SHIFT;
322 * When firstArena is false, the index of arena in the chunk. When
323 * firstArena is true, the index of a free arena holding JSGCChunkInfo or
324 * NO_FREE_ARENAS if there are no free arenas in the chunk.
326 * GET_ARENA_INDEX and GET_CHUNK_INFO_INDEX are convenience macros to
327 * access either of indexes.
329 jsuword arenaIndex : GC_ARENA_SHIFT - 1;
331 /* Flag indicating if the arena is the first in the chunk. */
332 jsuword firstArena : 1;
333 #endif
335 union {
336 jsuword untracedThings; /* bitset for fast search of marked
337 but not yet traced things */
338 JSBool hasMarkedDoubles; /* the arena has marked doubles */
339 } u;
341 #if JS_GC_ARENA_PAD != 0
342 uint8 pad[JS_GC_ARENA_PAD];
343 #endif
347 * Verify that the bit fields are indeed shared and JSGCArenaInfo is as small
348 * as possible. The code does not rely on this check so if on a particular
349 * platform this does not compile, then, as a workaround, comment the assert
350 * out and submit a bug report.
352 JS_STATIC_ASSERT(offsetof(JSGCArenaInfo, u) == 3 * sizeof(jsuword));
355 * Macros to convert between JSGCArenaInfo, the start address of the arena and
356 * arena's page defined as (start address) >> GC_ARENA_SHIFT.
358 #define ARENA_INFO_OFFSET (GC_ARENA_SIZE - (uint32) sizeof(JSGCArenaInfo))
360 #define IS_ARENA_INFO_ADDRESS(arena) \
361 (((jsuword) (arena) & GC_ARENA_MASK) == ARENA_INFO_OFFSET)
363 #define ARENA_START_TO_INFO(arenaStart) \
364 (JS_ASSERT(((arenaStart) & (jsuword) GC_ARENA_MASK) == 0), \
365 (JSGCArenaInfo *) ((arenaStart) + (jsuword) ARENA_INFO_OFFSET))
367 #define ARENA_INFO_TO_START(arena) \
368 (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arena)), \
369 (jsuword) (arena) & ~(jsuword) GC_ARENA_MASK)
371 #define ARENA_PAGE_TO_INFO(arenaPage) \
372 (JS_ASSERT(arenaPage != 0), \
373 JS_ASSERT(!((jsuword)(arenaPage) >> (JS_BITS_PER_WORD-GC_ARENA_SHIFT))), \
374 ARENA_START_TO_INFO((arenaPage) << GC_ARENA_SHIFT))
376 #define ARENA_INFO_TO_PAGE(arena) \
377 (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arena)), \
378 ((jsuword) (arena) >> GC_ARENA_SHIFT))
380 #define GET_ARENA_INFO(chunk, index) \
381 (JS_ASSERT((index) < js_gcArenasPerChunk), \
382 ARENA_START_TO_INFO(chunk + ((index) << GC_ARENA_SHIFT)))
384 #if CHUNKED_ARENA_ALLOCATION
386 * Definitions for allocating arenas in chunks.
388 * All chunks that have at least one free arena are put on the doubly-linked
389 * list with the head stored in JSRuntime.gcChunkList. JSGCChunkInfo contains
390 * the head of the chunk's free arena list together with the link fields for
391 * gcChunkList.
393 * Structure stored in one of chunk's free arenas. GET_CHUNK_INFO_INDEX gives
394 * the index of this arena. When all arenas in the chunk are used, it is
395 * removed from the list and the index is set to NO_FREE_ARENAS indicating
396 * that the chunk is not on gcChunkList and has no JSGCChunkInfo available.
399 struct JSGCChunkInfo {
400 JSGCChunkInfo **prevp;
401 JSGCChunkInfo *next;
402 JSGCArenaInfo *lastFreeArena;
403 uint32 numFreeArenas;
406 #define NO_FREE_ARENAS JS_BITMASK(GC_ARENA_SHIFT - 1)
408 #ifdef js_gcArenasPerChunk
409 JS_STATIC_ASSERT(1 <= js_gcArenasPerChunk &&
410 js_gcArenasPerChunk <= NO_FREE_ARENAS);
411 #endif
413 #define GET_ARENA_CHUNK(arena, index) \
414 (JS_ASSERT(GET_ARENA_INDEX(arena) == index), \
415 ARENA_INFO_TO_START(arena) - ((index) << GC_ARENA_SHIFT))
417 #define GET_ARENA_INDEX(arena) \
418 ((arena)->firstArena ? 0 : (uint32) (arena)->arenaIndex)
420 #define GET_CHUNK_INFO_INDEX(chunk) \
421 ((uint32) ARENA_START_TO_INFO(chunk)->arenaIndex)
423 #define SET_CHUNK_INFO_INDEX(chunk, index) \
424 (JS_ASSERT((index) < js_gcArenasPerChunk || (index) == NO_FREE_ARENAS), \
425 (void) (ARENA_START_TO_INFO(chunk)->arenaIndex = (jsuword) (index)))
427 #define GET_CHUNK_INFO(chunk, infoIndex) \
428 (JS_ASSERT(GET_CHUNK_INFO_INDEX(chunk) == (infoIndex)), \
429 JS_ASSERT((uint32) (infoIndex) < js_gcArenasPerChunk), \
430 (JSGCChunkInfo *) ((chunk) + ((infoIndex) << GC_ARENA_SHIFT)))
432 #define CHUNK_INFO_TO_INDEX(ci) \
433 GET_ARENA_INDEX(ARENA_START_TO_INFO((jsuword)ci))
435 #endif
438 * Macros for GC-thing operations.
440 #define THINGS_PER_ARENA(thingSize) \
441 ((GC_ARENA_SIZE - (uint32) sizeof(JSGCArenaInfo)) / ((thingSize) + 1U))
443 #define THING_TO_ARENA(thing) \
444 (JS_ASSERT(!JSString::isStatic(thing)), \
445 (JSGCArenaInfo *)(((jsuword) (thing) | GC_ARENA_MASK) \
446 + 1 - sizeof(JSGCArenaInfo)))
448 #define THING_TO_INDEX(thing, thingSize) \
449 ((uint32) ((jsuword) (thing) & GC_ARENA_MASK) / (uint32) (thingSize))
451 #define THING_FLAGS_END(arena) ((uint8 *)(arena))
453 #define THING_FLAGP(arena, thingIndex) \
454 (JS_ASSERT((jsuword) (thingIndex) \
455 < (jsuword) THINGS_PER_ARENA((arena)->list->thingSize)), \
456 (uint8 *)(arena) - 1 - (thingIndex))
458 #define THING_TO_FLAGP(thing, thingSize) \
459 THING_FLAGP(THING_TO_ARENA(thing), THING_TO_INDEX(thing, thingSize))
461 #define FLAGP_TO_ARENA(flagp) THING_TO_ARENA(flagp)
463 #define FLAGP_TO_INDEX(flagp) \
464 (JS_ASSERT(((jsuword) (flagp) & GC_ARENA_MASK) < ARENA_INFO_OFFSET), \
465 (ARENA_INFO_OFFSET - 1 - (uint32) ((jsuword) (flagp) & GC_ARENA_MASK)))
467 #define FLAGP_TO_THING(flagp, thingSize) \
468 (JS_ASSERT(((jsuword) (flagp) & GC_ARENA_MASK) >= \
469 (ARENA_INFO_OFFSET - THINGS_PER_ARENA(thingSize))), \
470 (JSGCThing *)(((jsuword) (flagp) & ~GC_ARENA_MASK) + \
471 (thingSize) * FLAGP_TO_INDEX(flagp)))
474 * Macros for the specialized arena for doubles.
476 * DOUBLES_PER_ARENA defines the maximum number of doubles that the arena can
477 * hold. We find it as the following. Let n be the number of doubles in the
478 * arena. Together with the bitmap of flags and JSGCArenaInfo they should fit
479 * the arena. Hence DOUBLES_PER_ARENA or n_max is the maximum value of n for
480 * which the following holds:
482 * n*s + ceil(n/B) <= M (1)
484 * where "/" denotes normal real division,
485 * ceil(r) gives the least integer not smaller than the number r,
486 * s is the number of words in jsdouble,
487 * B is number of bits per word or B == JS_BITS_PER_WORD
488 * M is the number of words in the arena before JSGCArenaInfo or
489 * M == (GC_ARENA_SIZE - sizeof(JSGCArenaInfo)) / sizeof(jsuword).
490 * M == ARENA_INFO_OFFSET / sizeof(jsuword)
492 * We rewrite the inequality as
494 * n*B*s/B + ceil(n/B) <= M,
495 * ceil(n*B*s/B + n/B) <= M,
496 * ceil(n*(B*s + 1)/B) <= M (2)
498 * We define a helper function e(n, s, B),
500 * e(n, s, B) := ceil(n*(B*s + 1)/B) - n*(B*s + 1)/B, 0 <= e(n, s, B) < 1.
502 * It gives:
504 * n*(B*s + 1)/B + e(n, s, B) <= M,
505 * n + e*B/(B*s + 1) <= M*B/(B*s + 1)
507 * We apply the floor function to both sides of the last equation, where
508 * floor(r) gives the biggest integer not greater than r. As a consequence we
509 * have:
511 * floor(n + e*B/(B*s + 1)) <= floor(M*B/(B*s + 1)),
512 * n + floor(e*B/(B*s + 1)) <= floor(M*B/(B*s + 1)),
513 * n <= floor(M*B/(B*s + 1)), (3)
515 * where floor(e*B/(B*s + 1)) is zero as e*B/(B*s + 1) < B/(B*s + 1) < 1.
516 * Thus any n that satisfies the original constraint (1) or its equivalent (2),
517 * must also satisfy (3). That is, we got an upper estimate for the maximum
518 * value of n. Lets show that this upper estimate,
520 * floor(M*B/(B*s + 1)), (4)
522 * also satisfies (1) and, as such, gives the required maximum value.
523 * Substituting it into (2) gives:
525 * ceil(floor(M*B/(B*s + 1))*(B*s + 1)/B) == ceil(floor(M/X)*X)
527 * where X == (B*s + 1)/B > 1. But then floor(M/X)*X <= M/X*X == M and
529 * ceil(floor(M/X)*X) <= ceil(M) == M.
531 * Thus the value of (4) gives the maximum n satisfying (1).
533 * For the final result we observe that in (4)
535 * M*B == ARENA_INFO_OFFSET / sizeof(jsuword) * JS_BITS_PER_WORD
536 * == ARENA_INFO_OFFSET * JS_BITS_PER_BYTE
538 * and
540 * B*s == JS_BITS_PER_WORD * sizeof(jsdouble) / sizeof(jsuword)
541 * == JS_BITS_PER_DOUBLE.
543 #define DOUBLES_PER_ARENA \
544 ((ARENA_INFO_OFFSET * JS_BITS_PER_BYTE) / (JS_BITS_PER_DOUBLE + 1))
547 * Check that ARENA_INFO_OFFSET and sizeof(jsdouble) divides sizeof(jsuword).
549 JS_STATIC_ASSERT(ARENA_INFO_OFFSET % sizeof(jsuword) == 0);
550 JS_STATIC_ASSERT(sizeof(jsdouble) % sizeof(jsuword) == 0);
551 JS_STATIC_ASSERT(sizeof(jsbitmap) == sizeof(jsuword));
553 #define DOUBLES_ARENA_BITMAP_WORDS \
554 (JS_HOWMANY(DOUBLES_PER_ARENA, JS_BITS_PER_WORD))
556 /* Check that DOUBLES_PER_ARENA indeed maximises (1). */
557 JS_STATIC_ASSERT(DOUBLES_PER_ARENA * sizeof(jsdouble) +
558 DOUBLES_ARENA_BITMAP_WORDS * sizeof(jsuword) <=
559 ARENA_INFO_OFFSET);
561 JS_STATIC_ASSERT((DOUBLES_PER_ARENA + 1) * sizeof(jsdouble) +
562 sizeof(jsuword) *
563 JS_HOWMANY((DOUBLES_PER_ARENA + 1), JS_BITS_PER_WORD) >
564 ARENA_INFO_OFFSET);
567 * When DOUBLES_PER_ARENA % BITS_PER_DOUBLE_FLAG_UNIT != 0, some bits in the
568 * last byte of the occupation bitmap are unused.
570 #define UNUSED_DOUBLE_BITMAP_BITS \
571 (DOUBLES_ARENA_BITMAP_WORDS * JS_BITS_PER_WORD - DOUBLES_PER_ARENA)
573 JS_STATIC_ASSERT(UNUSED_DOUBLE_BITMAP_BITS < JS_BITS_PER_WORD);
575 #define DOUBLES_ARENA_BITMAP_OFFSET \
576 (ARENA_INFO_OFFSET - DOUBLES_ARENA_BITMAP_WORDS * sizeof(jsuword))
578 #define CHECK_DOUBLE_ARENA_INFO(arenaInfo) \
579 (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arenaInfo)), \
580 JS_ASSERT(!(arenaInfo)->list)) \
583 * Get the start of the bitmap area containing double mark flags in the arena.
584 * To access the flag the code uses
586 * JS_TEST_BIT(bitmapStart, index)
588 * That is, compared with the case of arenas with non-double things, we count
589 * flags from the start of the bitmap area, not from the end.
591 #define DOUBLE_ARENA_BITMAP(arenaInfo) \
592 (CHECK_DOUBLE_ARENA_INFO(arenaInfo), \
593 (jsbitmap *) arenaInfo - DOUBLES_ARENA_BITMAP_WORDS)
595 #define DOUBLE_THING_TO_INDEX(thing) \
596 (CHECK_DOUBLE_ARENA_INFO(THING_TO_ARENA(thing)), \
597 JS_ASSERT(((jsuword) (thing) & GC_ARENA_MASK) < \
598 DOUBLES_ARENA_BITMAP_OFFSET), \
599 ((uint32) (((jsuword) (thing) & GC_ARENA_MASK) / sizeof(jsdouble))))
601 static void
602 ClearDoubleArenaFlags(JSGCArenaInfo *a)
604 jsbitmap *bitmap, mask;
605 uintN nused;
608 * When some high bits in the last byte of the double occupation bitmap
609 * are unused, we must set them. Otherwise RefillDoubleFreeList will
610 * assume that they corresponds to some free cells and tries to allocate
611 * them.
613 * Note that the code works correctly with UNUSED_DOUBLE_BITMAP_BITS == 0.
615 bitmap = DOUBLE_ARENA_BITMAP(a);
616 memset(bitmap, 0, (DOUBLES_ARENA_BITMAP_WORDS - 1) * sizeof *bitmap);
617 mask = ((jsbitmap) 1 << UNUSED_DOUBLE_BITMAP_BITS) - 1;
618 nused = JS_BITS_PER_WORD - UNUSED_DOUBLE_BITMAP_BITS;
619 bitmap[DOUBLES_ARENA_BITMAP_WORDS - 1] = mask << nused;
622 static JS_ALWAYS_INLINE JSBool
623 IsMarkedDouble(JSGCArenaInfo *a, uint32 index)
625 jsbitmap *bitmap;
627 JS_ASSERT(a->u.hasMarkedDoubles);
628 bitmap = DOUBLE_ARENA_BITMAP(a);
629 return JS_TEST_BIT(bitmap, index);
633 * JSRuntime.gcDoubleArenaList.nextDoubleFlags points either to:
635 * 1. The next byte in the bitmap area for doubles to check for unmarked
636 * (or free) doubles.
637 * 2. Or to the end of the bitmap area when all GC cells of the arena are
638 * allocated.
639 * 3. Or to a special sentinel value indicating that there are no arenas
640 * to check for unmarked doubles.
642 * We set the sentinel to ARENA_INFO_OFFSET so the single check
644 * ((jsuword) nextDoubleFlags & GC_ARENA_MASK) == ARENA_INFO_OFFSET
646 * will cover both the second and the third cases.
648 #define DOUBLE_BITMAP_SENTINEL ((jsbitmap *) ARENA_INFO_OFFSET)
650 #ifdef JS_THREADSAFE
652 * The maximum number of things to put on the local free list by taking
653 * several things from the global free list or from the tail of the last
654 * allocated arena to amortize the cost of rt->gcLock.
656 #define MAX_THREAD_LOCAL_THINGS 64
658 #endif
660 JS_STATIC_ASSERT(sizeof(JSStackHeader) >= 2 * sizeof(jsval));
662 JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(JSString));
663 JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(jsdouble));
665 /* We want to use all the available GC thing space for object's slots. */
666 JS_STATIC_ASSERT(sizeof(JSObject) % sizeof(JSGCThing) == 0);
669 * Ensure that JSObject is allocated from a different GC-list rather than
670 * jsdouble and JSString so we can easily finalize JSObject before these 2
671 * types of GC things. See comments in js_GC.
673 JS_STATIC_ASSERT(GC_FREELIST_INDEX(sizeof(JSString)) !=
674 GC_FREELIST_INDEX(sizeof(JSObject)));
675 JS_STATIC_ASSERT(GC_FREELIST_INDEX(sizeof(jsdouble)) !=
676 GC_FREELIST_INDEX(sizeof(JSObject)));
679 * JSPtrTable capacity growth descriptor. The table grows by powers of two
680 * starting from capacity JSPtrTableInfo.minCapacity, but switching to linear
681 * growth when capacity reaches JSPtrTableInfo.linearGrowthThreshold.
683 typedef struct JSPtrTableInfo {
684 uint16 minCapacity;
685 uint16 linearGrowthThreshold;
686 } JSPtrTableInfo;
688 #define GC_ITERATOR_TABLE_MIN 4
689 #define GC_ITERATOR_TABLE_LINEAR 1024
691 static const JSPtrTableInfo iteratorTableInfo = {
692 GC_ITERATOR_TABLE_MIN,
693 GC_ITERATOR_TABLE_LINEAR
696 /* Calculate table capacity based on the current value of JSPtrTable.count. */
697 static size_t
698 PtrTableCapacity(size_t count, const JSPtrTableInfo *info)
700 size_t linear, log, capacity;
702 linear = info->linearGrowthThreshold;
703 JS_ASSERT(info->minCapacity <= linear);
705 if (count == 0) {
706 capacity = 0;
707 } else if (count < linear) {
708 log = JS_CEILING_LOG2W(count);
709 JS_ASSERT(log != JS_BITS_PER_WORD);
710 capacity = (size_t)1 << log;
711 if (capacity < info->minCapacity)
712 capacity = info->minCapacity;
713 } else {
714 capacity = JS_ROUNDUP(count, linear);
717 JS_ASSERT(capacity >= count);
718 return capacity;
721 static void
722 FreePtrTable(JSPtrTable *table, const JSPtrTableInfo *info)
724 if (table->array) {
725 JS_ASSERT(table->count > 0);
726 js_free(table->array);
727 table->array = NULL;
728 table->count = 0;
730 JS_ASSERT(table->count == 0);
733 static JSBool
734 AddToPtrTable(JSContext *cx, JSPtrTable *table, const JSPtrTableInfo *info,
735 void *ptr)
737 size_t count, capacity;
738 void **array;
740 count = table->count;
741 capacity = PtrTableCapacity(count, info);
743 if (count == capacity) {
744 if (capacity < info->minCapacity) {
745 JS_ASSERT(capacity == 0);
746 JS_ASSERT(!table->array);
747 capacity = info->minCapacity;
748 } else {
750 * Simplify the overflow detection assuming pointer is bigger
751 * than byte.
753 JS_STATIC_ASSERT(2 <= sizeof table->array[0]);
754 capacity = (capacity < info->linearGrowthThreshold)
755 ? 2 * capacity
756 : capacity + info->linearGrowthThreshold;
757 if (capacity > (size_t)-1 / sizeof table->array[0])
758 goto bad;
760 array = (void **) js_realloc(table->array,
761 capacity * sizeof table->array[0]);
762 if (!array)
763 goto bad;
764 #ifdef DEBUG
765 memset(array + count, JS_FREE_PATTERN,
766 (capacity - count) * sizeof table->array[0]);
767 #endif
768 table->array = array;
771 table->array[count] = ptr;
772 table->count = count + 1;
774 return JS_TRUE;
776 bad:
777 JS_ReportOutOfMemory(cx);
778 return JS_FALSE;
781 static void
782 ShrinkPtrTable(JSPtrTable *table, const JSPtrTableInfo *info,
783 size_t newCount)
785 size_t oldCapacity, capacity;
786 void **array;
788 JS_ASSERT(newCount <= table->count);
789 if (newCount == table->count)
790 return;
792 oldCapacity = PtrTableCapacity(table->count, info);
793 table->count = newCount;
794 capacity = PtrTableCapacity(newCount, info);
796 if (oldCapacity != capacity) {
797 array = table->array;
798 JS_ASSERT(array);
799 if (capacity == 0) {
800 js_free(array);
801 table->array = NULL;
802 return;
804 array = (void **) js_realloc(array, capacity * sizeof array[0]);
805 if (array)
806 table->array = array;
808 #ifdef DEBUG
809 memset(table->array + newCount, JS_FREE_PATTERN,
810 (capacity - newCount) * sizeof table->array[0]);
811 #endif
814 #ifdef JS_GCMETER
815 # define METER(x) ((void) (x))
816 # define METER_IF(condition, x) ((void) ((condition) && (x)))
817 #else
818 # define METER(x) ((void) 0)
819 # define METER_IF(condition, x) ((void) 0)
820 #endif
822 #define METER_UPDATE_MAX(maxLval, rval) \
823 METER_IF((maxLval) < (rval), (maxLval) = (rval))
825 #if JS_GC_USE_MMAP || !HAS_POSIX_MEMALIGN
828 * For chunks allocated via over-sized malloc, get a pointer to store the gap
829 * between the malloc's result and the first arena in the chunk.
831 static uint32 *
832 GetMallocedChunkGapPtr(jsuword chunk)
834 JS_ASSERT((chunk & GC_ARENA_MASK) == 0);
836 /* Use the memory after the chunk, see NewGCChunk for details. */
837 return (uint32 *) (chunk + (js_gcArenasPerChunk << GC_ARENA_SHIFT));
840 #endif
842 static jsuword
843 NewGCChunk(void)
845 void *p;
847 #if JS_GC_USE_MMAP
848 if (js_gcUseMmap) {
849 # if defined(XP_WIN)
850 p = VirtualAlloc(NULL, js_gcArenasPerChunk << GC_ARENA_SHIFT,
851 MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
852 return (jsuword) p;
853 # else
854 p = mmap(NULL, js_gcArenasPerChunk << GC_ARENA_SHIFT,
855 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
856 return (p == MAP_FAILED) ? 0 : (jsuword) p;
857 # endif
859 #endif
861 #if HAS_POSIX_MEMALIGN
862 if (0 != posix_memalign(&p, GC_ARENA_SIZE,
863 GC_ARENA_SIZE * js_gcArenasPerChunk -
864 JS_GC_ARENA_PAD)) {
865 return 0;
867 return (jsuword) p;
868 #else
870 * Implement chunk allocation using oversized malloc if mmap and
871 * posix_memalign are not available.
873 * Since malloc allocates pointers aligned on the word boundary, to get
874 * js_gcArenasPerChunk aligned arenas, we need to malloc only
876 * ((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT) - sizeof(size_t)
878 * bytes. But since we stores the gap between the malloced pointer and the
879 * first arena in the chunk after the chunk, we need to ask for
881 * ((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT)
883 * bytes to ensure that we always have room to store the gap.
885 p = js_malloc((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT);
886 if (!p)
887 return 0;
890 jsuword chunk;
892 chunk = ((jsuword) p + GC_ARENA_MASK) & ~GC_ARENA_MASK;
893 *GetMallocedChunkGapPtr(chunk) = (uint32) (chunk - (jsuword) p);
894 return chunk;
896 #endif
899 static void
900 DestroyGCChunk(jsuword chunk)
902 JS_ASSERT((chunk & GC_ARENA_MASK) == 0);
903 #if JS_GC_USE_MMAP
904 if (js_gcUseMmap) {
905 # if defined(XP_WIN)
906 VirtualFree((void *) chunk, 0, MEM_RELEASE);
907 # elif defined(SOLARIS)
908 munmap((char *) chunk, js_gcArenasPerChunk << GC_ARENA_SHIFT);
909 # else
910 munmap((void *) chunk, js_gcArenasPerChunk << GC_ARENA_SHIFT);
911 # endif
912 return;
914 #endif
916 #if HAS_POSIX_MEMALIGN
917 js_free((void *) chunk);
918 #else
919 /* See comments in NewGCChunk. */
920 JS_ASSERT(*GetMallocedChunkGapPtr(chunk) < GC_ARENA_SIZE);
921 js_free((void *) (chunk - *GetMallocedChunkGapPtr(chunk)));
922 #endif
925 #if CHUNKED_ARENA_ALLOCATION
927 static void
928 AddChunkToList(JSRuntime *rt, JSGCChunkInfo *ci)
930 ci->prevp = &rt->gcChunkList;
931 ci->next = rt->gcChunkList;
932 if (rt->gcChunkList) {
933 JS_ASSERT(rt->gcChunkList->prevp == &rt->gcChunkList);
934 rt->gcChunkList->prevp = &ci->next;
936 rt->gcChunkList = ci;
939 static void
940 RemoveChunkFromList(JSRuntime *rt, JSGCChunkInfo *ci)
942 *ci->prevp = ci->next;
943 if (ci->next) {
944 JS_ASSERT(ci->next->prevp == &ci->next);
945 ci->next->prevp = ci->prevp;
949 #endif
951 static JSGCArenaInfo *
952 NewGCArena(JSRuntime *rt)
954 jsuword chunk;
955 JSGCArenaInfo *a;
957 if (rt->gcBytes >= rt->gcMaxBytes)
958 return NULL;
960 #if CHUNKED_ARENA_ALLOCATION
961 if (js_gcArenasPerChunk == 1) {
962 #endif
963 chunk = NewGCChunk();
964 if (chunk == 0)
965 return NULL;
966 a = ARENA_START_TO_INFO(chunk);
967 #if CHUNKED_ARENA_ALLOCATION
968 } else {
969 JSGCChunkInfo *ci;
970 uint32 i;
971 JSGCArenaInfo *aprev;
973 ci = rt->gcChunkList;
974 if (!ci) {
975 chunk = NewGCChunk();
976 if (chunk == 0)
977 return NULL;
978 JS_ASSERT((chunk & GC_ARENA_MASK) == 0);
979 a = GET_ARENA_INFO(chunk, 0);
980 a->firstArena = JS_TRUE;
981 a->arenaIndex = 0;
982 aprev = NULL;
983 i = 0;
984 do {
985 a->prev = aprev;
986 aprev = a;
987 ++i;
988 a = GET_ARENA_INFO(chunk, i);
989 a->firstArena = JS_FALSE;
990 a->arenaIndex = i;
991 } while (i != js_gcArenasPerChunk - 1);
992 ci = GET_CHUNK_INFO(chunk, 0);
993 ci->lastFreeArena = aprev;
994 ci->numFreeArenas = js_gcArenasPerChunk - 1;
995 AddChunkToList(rt, ci);
996 } else {
997 JS_ASSERT(ci->prevp == &rt->gcChunkList);
998 a = ci->lastFreeArena;
999 aprev = a->prev;
1000 if (!aprev) {
1001 JS_ASSERT(ci->numFreeArenas == 1);
1002 JS_ASSERT(ARENA_INFO_TO_START(a) == (jsuword) ci);
1003 RemoveChunkFromList(rt, ci);
1004 chunk = GET_ARENA_CHUNK(a, GET_ARENA_INDEX(a));
1005 SET_CHUNK_INFO_INDEX(chunk, NO_FREE_ARENAS);
1006 } else {
1007 JS_ASSERT(ci->numFreeArenas >= 2);
1008 JS_ASSERT(ARENA_INFO_TO_START(a) != (jsuword) ci);
1009 ci->lastFreeArena = aprev;
1010 ci->numFreeArenas--;
1014 #endif
1016 rt->gcBytes += GC_ARENA_SIZE;
1017 a->prevUntracedPage = 0;
1018 memset(&a->u, 0, sizeof(a->u));
1020 return a;
1023 static void
1024 DestroyGCArenas(JSRuntime *rt, JSGCArenaInfo *last)
1026 JSGCArenaInfo *a;
1028 while (last) {
1029 a = last;
1030 last = last->prev;
1032 METER(rt->gcStats.afree++);
1033 JS_ASSERT(rt->gcBytes >= GC_ARENA_SIZE);
1034 rt->gcBytes -= GC_ARENA_SIZE;
1036 #if CHUNKED_ARENA_ALLOCATION
1037 if (js_gcArenasPerChunk == 1) {
1038 #endif
1039 DestroyGCChunk(ARENA_INFO_TO_START(a));
1040 #if CHUNKED_ARENA_ALLOCATION
1041 } else {
1042 uint32 arenaIndex;
1043 jsuword chunk;
1044 uint32 chunkInfoIndex;
1045 JSGCChunkInfo *ci;
1046 # ifdef DEBUG
1047 jsuword firstArena;
1049 firstArena = a->firstArena;
1050 arenaIndex = a->arenaIndex;
1051 memset((void *) ARENA_INFO_TO_START(a), JS_FREE_PATTERN,
1052 GC_ARENA_SIZE - JS_GC_ARENA_PAD);
1053 a->firstArena = firstArena;
1054 a->arenaIndex = arenaIndex;
1055 # endif
1056 arenaIndex = GET_ARENA_INDEX(a);
1057 chunk = GET_ARENA_CHUNK(a, arenaIndex);
1058 chunkInfoIndex = GET_CHUNK_INFO_INDEX(chunk);
1059 if (chunkInfoIndex == NO_FREE_ARENAS) {
1060 chunkInfoIndex = arenaIndex;
1061 SET_CHUNK_INFO_INDEX(chunk, arenaIndex);
1062 ci = GET_CHUNK_INFO(chunk, chunkInfoIndex);
1063 a->prev = NULL;
1064 ci->lastFreeArena = a;
1065 ci->numFreeArenas = 1;
1066 AddChunkToList(rt, ci);
1067 } else {
1068 JS_ASSERT(chunkInfoIndex != arenaIndex);
1069 ci = GET_CHUNK_INFO(chunk, chunkInfoIndex);
1070 JS_ASSERT(ci->numFreeArenas != 0);
1071 JS_ASSERT(ci->lastFreeArena);
1072 JS_ASSERT(a != ci->lastFreeArena);
1073 if (ci->numFreeArenas == js_gcArenasPerChunk - 1) {
1074 RemoveChunkFromList(rt, ci);
1075 DestroyGCChunk(chunk);
1076 } else {
1077 ++ci->numFreeArenas;
1078 a->prev = ci->lastFreeArena;
1079 ci->lastFreeArena = a;
1083 # endif
1087 static void
1088 InitGCArenaLists(JSRuntime *rt)
1090 uintN i, thingSize;
1091 JSGCArenaList *arenaList;
1093 for (i = 0; i < GC_NUM_FREELISTS; i++) {
1094 arenaList = &rt->gcArenaList[i];
1095 thingSize = GC_FREELIST_NBYTES(i);
1096 arenaList->last = NULL;
1097 arenaList->lastCount = THINGS_PER_ARENA(thingSize);
1098 arenaList->thingSize = thingSize;
1099 arenaList->freeList = NULL;
1101 rt->gcDoubleArenaList.first = NULL;
1102 rt->gcDoubleArenaList.nextDoubleFlags = DOUBLE_BITMAP_SENTINEL;
1105 static void
1106 FinishGCArenaLists(JSRuntime *rt)
1108 uintN i;
1109 JSGCArenaList *arenaList;
1111 for (i = 0; i < GC_NUM_FREELISTS; i++) {
1112 arenaList = &rt->gcArenaList[i];
1113 DestroyGCArenas(rt, arenaList->last);
1114 arenaList->last = NULL;
1115 arenaList->lastCount = THINGS_PER_ARENA(arenaList->thingSize);
1116 arenaList->freeList = NULL;
1118 DestroyGCArenas(rt, rt->gcDoubleArenaList.first);
1119 rt->gcDoubleArenaList.first = NULL;
1120 rt->gcDoubleArenaList.nextDoubleFlags = DOUBLE_BITMAP_SENTINEL;
1122 rt->gcBytes = 0;
1123 JS_ASSERT(rt->gcChunkList == 0);
1127 * This function must not be called when thing is jsdouble.
1129 static uint8 *
1130 GetGCThingFlags(void *thing)
1132 JSGCArenaInfo *a;
1133 uint32 index;
1135 a = THING_TO_ARENA(thing);
1136 index = THING_TO_INDEX(thing, a->list->thingSize);
1137 return THING_FLAGP(a, index);
1141 * This function returns null when thing is jsdouble.
1143 static uint8 *
1144 GetGCThingFlagsOrNull(void *thing)
1146 JSGCArenaInfo *a;
1147 uint32 index;
1149 if (JSString::isStatic(thing))
1150 return NULL;
1151 a = THING_TO_ARENA(thing);
1152 if (!a->list)
1153 return NULL;
1154 index = THING_TO_INDEX(thing, a->list->thingSize);
1155 return THING_FLAGP(a, index);
1158 intN
1159 js_GetExternalStringGCType(JSString *str)
1161 JS_ASSERT(!JSString::isStatic(str));
1163 uintN type = (uintN) *GetGCThingFlags(str) & GCF_TYPEMASK;
1164 JS_ASSERT(type == GCX_STRING || type >= GCX_EXTERNAL_STRING);
1165 return (type == GCX_STRING) ? -1 : (intN) (type - GCX_EXTERNAL_STRING);
1168 static uint32
1169 MapGCFlagsToTraceKind(uintN flags)
1171 uint32 type;
1173 type = flags & GCF_TYPEMASK;
1174 JS_ASSERT(type != GCX_DOUBLE);
1175 JS_ASSERT(type < GCX_NTYPES);
1176 return (type < GCX_EXTERNAL_STRING) ? type : JSTRACE_STRING;
1179 JS_FRIEND_API(uint32)
1180 js_GetGCThingTraceKind(void *thing)
1182 JSGCArenaInfo *a;
1183 uint32 index;
1185 if (JSString::isStatic(thing))
1186 return JSTRACE_STRING;
1188 a = THING_TO_ARENA(thing);
1189 if (!a->list)
1190 return JSTRACE_DOUBLE;
1192 index = THING_TO_INDEX(thing, a->list->thingSize);
1193 return MapGCFlagsToTraceKind(*THING_FLAGP(a, index));
1196 JSRuntime*
1197 js_GetGCStringRuntime(JSString *str)
1199 JSGCArenaList *list;
1201 list = THING_TO_ARENA(str)->list;
1203 JS_ASSERT(list->thingSize == sizeof(JSGCThing));
1204 JS_ASSERT(GC_FREELIST_INDEX(sizeof(JSGCThing)) == 0);
1206 return (JSRuntime *)((uint8 *)list - offsetof(JSRuntime, gcArenaList));
1209 JSBool
1210 js_IsAboutToBeFinalized(JSContext *cx, void *thing)
1212 JSGCArenaInfo *a;
1213 uint32 index, flags;
1215 a = THING_TO_ARENA(thing);
1216 if (!a->list) {
1218 * Check if arena has no marked doubles. In that case the bitmap with
1219 * the mark flags contains all garbage as it is initialized only when
1220 * marking the first double in the arena.
1222 if (!a->u.hasMarkedDoubles)
1223 return JS_TRUE;
1224 index = DOUBLE_THING_TO_INDEX(thing);
1225 return !IsMarkedDouble(a, index);
1227 index = THING_TO_INDEX(thing, a->list->thingSize);
1228 flags = *THING_FLAGP(a, index);
1229 return !(flags & (GCF_MARK | GCF_LOCK | GCF_FINAL));
1232 /* This is compatible with JSDHashEntryStub. */
1233 typedef struct JSGCRootHashEntry {
1234 JSDHashEntryHdr hdr;
1235 void *root;
1236 const char *name;
1237 } JSGCRootHashEntry;
1239 /* Initial size of the gcRootsHash table (SWAG, small enough to amortize). */
1240 #define GC_ROOTS_SIZE 256
1242 #if CHUNKED_ARENA_ALLOCATION
1245 * For a CPU with extremely large pages using them for GC things wastes
1246 * too much memory.
1248 # define GC_ARENAS_PER_CPU_PAGE_LIMIT JS_BIT(18 - GC_ARENA_SHIFT)
1250 JS_STATIC_ASSERT(GC_ARENAS_PER_CPU_PAGE_LIMIT <= NO_FREE_ARENAS);
1252 #endif
1254 JSBool
1255 js_InitGC(JSRuntime *rt, uint32 maxbytes)
1257 #if JS_GC_USE_MMAP
1258 if (js_gcArenasPerChunk == 0) {
1259 size_t cpuPageSize, arenasPerPage;
1260 # if defined(XP_WIN)
1261 SYSTEM_INFO si;
1263 GetSystemInfo(&si);
1264 cpuPageSize = si.dwPageSize;
1266 # elif defined(XP_UNIX) || defined(XP_BEOS)
1267 cpuPageSize = (size_t) sysconf(_SC_PAGESIZE);
1268 # else
1269 # error "Not implemented"
1270 # endif
1271 /* cpuPageSize is a power of 2. */
1272 JS_ASSERT((cpuPageSize & (cpuPageSize - 1)) == 0);
1273 arenasPerPage = cpuPageSize >> GC_ARENA_SHIFT;
1274 #ifdef DEBUG
1275 if (arenasPerPage == 0) {
1276 fprintf(stderr,
1277 "JS engine warning: the size of the CPU page, %u bytes, is too low to use\n"
1278 "paged allocation for the garbage collector. Please report this.\n",
1279 (unsigned) cpuPageSize);
1281 #endif
1282 if (arenasPerPage - 1 <= (size_t) (GC_ARENAS_PER_CPU_PAGE_LIMIT - 1)) {
1284 * Use at least 4 GC arenas per paged allocation chunk to minimize
1285 * the overhead of mmap/VirtualAlloc.
1287 js_gcUseMmap = JS_TRUE;
1288 js_gcArenasPerChunk = JS_MAX((uint32) arenasPerPage, 4);
1289 } else {
1290 js_gcUseMmap = JS_FALSE;
1291 js_gcArenasPerChunk = 7;
1294 JS_ASSERT(1 <= js_gcArenasPerChunk &&
1295 js_gcArenasPerChunk <= NO_FREE_ARENAS);
1296 #endif
1298 InitGCArenaLists(rt);
1299 if (!JS_DHashTableInit(&rt->gcRootsHash, JS_DHashGetStubOps(), NULL,
1300 sizeof(JSGCRootHashEntry), GC_ROOTS_SIZE)) {
1301 rt->gcRootsHash.ops = NULL;
1302 return JS_FALSE;
1304 rt->gcLocksHash = NULL; /* create lazily */
1307 * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
1308 * for default backward API compatibility.
1310 rt->gcMaxBytes = rt->gcMaxMallocBytes = maxbytes;
1311 rt->gcEmptyArenaPoolLifespan = 30000;
1314 * By default the trigger factor gets maximum possible value. This
1315 * means that GC will not be triggered by growth of GC memory (gcBytes).
1317 rt->setGCTriggerFactor((uint32) -1);
1320 * The assigned value prevents GC from running when GC memory is too low
1321 * (during JS engine start).
1323 rt->setGCLastBytes(8192);
1325 METER(memset(&rt->gcStats, 0, sizeof rt->gcStats));
1326 return JS_TRUE;
1329 #ifdef JS_GCMETER
1331 static void
1332 UpdateArenaStats(JSGCArenaStats *st, uint32 nlivearenas, uint32 nkilledArenas,
1333 uint32 nthings)
1335 size_t narenas;
1337 narenas = nlivearenas + nkilledArenas;
1338 JS_ASSERT(narenas >= st->livearenas);
1340 st->newarenas = narenas - st->livearenas;
1341 st->narenas = narenas;
1342 st->livearenas = nlivearenas;
1343 if (st->maxarenas < narenas)
1344 st->maxarenas = narenas;
1345 st->totalarenas += narenas;
1347 st->nthings = nthings;
1348 if (st->maxthings < nthings)
1349 st->maxthings = nthings;
1350 st->totalthings += nthings;
1353 JS_FRIEND_API(void)
1354 js_DumpGCStats(JSRuntime *rt, FILE *fp)
1356 int i;
1357 size_t sumArenas, sumTotalArenas;
1358 size_t sumThings, sumMaxThings;
1359 size_t sumThingSize, sumTotalThingSize;
1360 size_t sumArenaCapacity, sumTotalArenaCapacity;
1361 JSGCArenaStats *st;
1362 size_t thingSize, thingsPerArena;
1363 size_t sumAlloc, sumLocalAlloc, sumFail, sumRetry;
1365 fprintf(fp, "\nGC allocation statistics:\n");
1367 #define UL(x) ((unsigned long)(x))
1368 #define ULSTAT(x) UL(rt->gcStats.x)
1369 #define PERCENT(x,y) (100.0 * (double) (x) / (double) (y))
1371 sumArenas = 0;
1372 sumTotalArenas = 0;
1373 sumThings = 0;
1374 sumMaxThings = 0;
1375 sumThingSize = 0;
1376 sumTotalThingSize = 0;
1377 sumArenaCapacity = 0;
1378 sumTotalArenaCapacity = 0;
1379 sumAlloc = 0;
1380 sumLocalAlloc = 0;
1381 sumFail = 0;
1382 sumRetry = 0;
1383 for (i = -1; i < (int) GC_NUM_FREELISTS; i++) {
1384 if (i == -1) {
1385 thingSize = sizeof(jsdouble);
1386 thingsPerArena = DOUBLES_PER_ARENA;
1387 st = &rt->gcStats.doubleArenaStats;
1388 fprintf(fp,
1389 "Arena list for double values (%lu doubles per arena):",
1390 UL(thingsPerArena));
1391 } else {
1392 thingSize = rt->gcArenaList[i].thingSize;
1393 thingsPerArena = THINGS_PER_ARENA(thingSize);
1394 st = &rt->gcStats.arenaStats[i];
1395 fprintf(fp,
1396 "Arena list %d (thing size %lu, %lu things per arena):",
1397 i, UL(GC_FREELIST_NBYTES(i)), UL(thingsPerArena));
1399 if (st->maxarenas == 0) {
1400 fputs(" NEVER USED\n", fp);
1401 continue;
1403 putc('\n', fp);
1404 fprintf(fp, " arenas before GC: %lu\n", UL(st->narenas));
1405 fprintf(fp, " new arenas before GC: %lu (%.1f%%)\n",
1406 UL(st->newarenas), PERCENT(st->newarenas, st->narenas));
1407 fprintf(fp, " arenas after GC: %lu (%.1f%%)\n",
1408 UL(st->livearenas), PERCENT(st->livearenas, st->narenas));
1409 fprintf(fp, " max arenas: %lu\n", UL(st->maxarenas));
1410 fprintf(fp, " things: %lu\n", UL(st->nthings));
1411 fprintf(fp, " GC cell utilization: %.1f%%\n",
1412 PERCENT(st->nthings, thingsPerArena * st->narenas));
1413 fprintf(fp, " average cell utilization: %.1f%%\n",
1414 PERCENT(st->totalthings, thingsPerArena * st->totalarenas));
1415 fprintf(fp, " max things: %lu\n", UL(st->maxthings));
1416 fprintf(fp, " alloc attempts: %lu\n", UL(st->alloc));
1417 fprintf(fp, " alloc without locks: %1u (%.1f%%)\n",
1418 UL(st->localalloc), PERCENT(st->localalloc, st->alloc));
1419 sumArenas += st->narenas;
1420 sumTotalArenas += st->totalarenas;
1421 sumThings += st->nthings;
1422 sumMaxThings += st->maxthings;
1423 sumThingSize += thingSize * st->nthings;
1424 sumTotalThingSize += thingSize * st->totalthings;
1425 sumArenaCapacity += thingSize * thingsPerArena * st->narenas;
1426 sumTotalArenaCapacity += thingSize * thingsPerArena * st->totalarenas;
1427 sumAlloc += st->alloc;
1428 sumLocalAlloc += st->localalloc;
1429 sumFail += st->fail;
1430 sumRetry += st->retry;
1432 fprintf(fp, "TOTAL STATS:\n");
1433 fprintf(fp, " bytes allocated: %lu\n", UL(rt->gcBytes));
1434 fprintf(fp, " total GC arenas: %lu\n", UL(sumArenas));
1435 fprintf(fp, " total GC things: %lu\n", UL(sumThings));
1436 fprintf(fp, " max total GC things: %lu\n", UL(sumMaxThings));
1437 fprintf(fp, " GC cell utilization: %.1f%%\n",
1438 PERCENT(sumThingSize, sumArenaCapacity));
1439 fprintf(fp, " average cell utilization: %.1f%%\n",
1440 PERCENT(sumTotalThingSize, sumTotalArenaCapacity));
1441 fprintf(fp, "allocation retries after GC: %lu\n", UL(sumRetry));
1442 fprintf(fp, " alloc attempts: %lu\n", UL(sumAlloc));
1443 fprintf(fp, " alloc without locks: %1u (%.1f%%)\n",
1444 UL(sumLocalAlloc), PERCENT(sumLocalAlloc, sumAlloc));
1445 fprintf(fp, " allocation failures: %lu\n", UL(sumFail));
1446 fprintf(fp, " things born locked: %lu\n", ULSTAT(lockborn));
1447 fprintf(fp, " valid lock calls: %lu\n", ULSTAT(lock));
1448 fprintf(fp, " valid unlock calls: %lu\n", ULSTAT(unlock));
1449 fprintf(fp, " mark recursion depth: %lu\n", ULSTAT(depth));
1450 fprintf(fp, " maximum mark recursion: %lu\n", ULSTAT(maxdepth));
1451 fprintf(fp, " mark C recursion depth: %lu\n", ULSTAT(cdepth));
1452 fprintf(fp, " maximum mark C recursion: %lu\n", ULSTAT(maxcdepth));
1453 fprintf(fp, " delayed tracing calls: %lu\n", ULSTAT(untraced));
1454 #ifdef DEBUG
1455 fprintf(fp, " max trace later count: %lu\n", ULSTAT(maxuntraced));
1456 #endif
1457 fprintf(fp, " maximum GC nesting level: %lu\n", ULSTAT(maxlevel));
1458 fprintf(fp, "potentially useful GC calls: %lu\n", ULSTAT(poke));
1459 fprintf(fp, " thing arenas freed so far: %lu\n", ULSTAT(afree));
1460 fprintf(fp, " stack segments scanned: %lu\n", ULSTAT(stackseg));
1461 fprintf(fp, "stack segment slots scanned: %lu\n", ULSTAT(segslots));
1462 fprintf(fp, "reachable closeable objects: %lu\n", ULSTAT(nclose));
1463 fprintf(fp, " max reachable closeable: %lu\n", ULSTAT(maxnclose));
1464 fprintf(fp, " scheduled close hooks: %lu\n", ULSTAT(closelater));
1465 fprintf(fp, " max scheduled close hooks: %lu\n", ULSTAT(maxcloselater));
1467 #undef UL
1468 #undef ULSTAT
1469 #undef PERCENT
1471 #ifdef JS_ARENAMETER
1472 JS_DumpArenaStats(fp);
1473 #endif
1475 #endif
1477 #ifdef DEBUG
1478 static void
1479 CheckLeakedRoots(JSRuntime *rt);
1480 #endif
1482 void
1483 js_FinishGC(JSRuntime *rt)
1485 #ifdef JS_ARENAMETER
1486 JS_DumpArenaStats(stdout);
1487 #endif
1488 #ifdef JS_GCMETER
1489 js_DumpGCStats(rt, stdout);
1490 #endif
1492 FreePtrTable(&rt->gcIteratorTable, &iteratorTableInfo);
1493 FinishGCArenaLists(rt);
1495 if (rt->gcRootsHash.ops) {
1496 #ifdef DEBUG
1497 CheckLeakedRoots(rt);
1498 #endif
1499 JS_DHashTableFinish(&rt->gcRootsHash);
1500 rt->gcRootsHash.ops = NULL;
1502 if (rt->gcLocksHash) {
1503 JS_DHashTableDestroy(rt->gcLocksHash);
1504 rt->gcLocksHash = NULL;
1508 JSBool
1509 js_AddRoot(JSContext *cx, void *rp, const char *name)
1511 JSBool ok = js_AddRootRT(cx->runtime, rp, name);
1512 if (!ok)
1513 JS_ReportOutOfMemory(cx);
1514 return ok;
1517 JSBool
1518 js_AddRootRT(JSRuntime *rt, void *rp, const char *name)
1520 JSBool ok;
1521 JSGCRootHashEntry *rhe;
1524 * Due to the long-standing, but now removed, use of rt->gcLock across the
1525 * bulk of js_GC, API users have come to depend on JS_AddRoot etc. locking
1526 * properly with a racing GC, without calling JS_AddRoot from a request.
1527 * We have to preserve API compatibility here, now that we avoid holding
1528 * rt->gcLock across the mark phase (including the root hashtable mark).
1530 JS_LOCK_GC(rt);
1531 js_WaitForGC(rt);
1532 rhe = (JSGCRootHashEntry *)
1533 JS_DHashTableOperate(&rt->gcRootsHash, rp, JS_DHASH_ADD);
1534 if (rhe) {
1535 rhe->root = rp;
1536 rhe->name = name;
1537 ok = JS_TRUE;
1538 } else {
1539 ok = JS_FALSE;
1541 JS_UNLOCK_GC(rt);
1542 return ok;
1545 JSBool
1546 js_RemoveRoot(JSRuntime *rt, void *rp)
1549 * Due to the JS_RemoveRootRT API, we may be called outside of a request.
1550 * Same synchronization drill as above in js_AddRoot.
1552 JS_LOCK_GC(rt);
1553 js_WaitForGC(rt);
1554 (void) JS_DHashTableOperate(&rt->gcRootsHash, rp, JS_DHASH_REMOVE);
1555 rt->gcPoke = JS_TRUE;
1556 JS_UNLOCK_GC(rt);
1557 return JS_TRUE;
1560 #ifdef DEBUG
1562 static JSDHashOperator
1563 js_root_printer(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 i, void *arg)
1565 uint32 *leakedroots = (uint32 *)arg;
1566 JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
1568 (*leakedroots)++;
1569 fprintf(stderr,
1570 "JS engine warning: leaking GC root \'%s\' at %p\n",
1571 rhe->name ? (char *)rhe->name : "", rhe->root);
1573 return JS_DHASH_NEXT;
1576 static void
1577 CheckLeakedRoots(JSRuntime *rt)
1579 uint32 leakedroots = 0;
1581 /* Warn (but don't assert) debug builds of any remaining roots. */
1582 JS_DHashTableEnumerate(&rt->gcRootsHash, js_root_printer,
1583 &leakedroots);
1584 if (leakedroots > 0) {
1585 if (leakedroots == 1) {
1586 fprintf(stderr,
1587 "JS engine warning: 1 GC root remains after destroying the JSRuntime at %p.\n"
1588 " This root may point to freed memory. Objects reachable\n"
1589 " through it have not been finalized.\n",
1590 (void *) rt);
1591 } else {
1592 fprintf(stderr,
1593 "JS engine warning: %lu GC roots remain after destroying the JSRuntime at %p.\n"
1594 " These roots may point to freed memory. Objects reachable\n"
1595 " through them have not been finalized.\n",
1596 (unsigned long) leakedroots, (void *) rt);
1601 typedef struct NamedRootDumpArgs {
1602 void (*dump)(const char *name, void *rp, void *data);
1603 void *data;
1604 } NamedRootDumpArgs;
1606 static JSDHashOperator
1607 js_named_root_dumper(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number,
1608 void *arg)
1610 NamedRootDumpArgs *args = (NamedRootDumpArgs *) arg;
1611 JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
1613 if (rhe->name)
1614 args->dump(rhe->name, rhe->root, args->data);
1615 return JS_DHASH_NEXT;
1618 JS_BEGIN_EXTERN_C
1619 void
1620 js_DumpNamedRoots(JSRuntime *rt,
1621 void (*dump)(const char *name, void *rp, void *data),
1622 void *data)
1624 NamedRootDumpArgs args;
1626 args.dump = dump;
1627 args.data = data;
1628 JS_DHashTableEnumerate(&rt->gcRootsHash, js_named_root_dumper, &args);
1630 JS_END_EXTERN_C
1632 #endif /* DEBUG */
1634 typedef struct GCRootMapArgs {
1635 JSGCRootMapFun map;
1636 void *data;
1637 } GCRootMapArgs;
1639 static JSDHashOperator
1640 js_gcroot_mapper(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number,
1641 void *arg)
1643 GCRootMapArgs *args = (GCRootMapArgs *) arg;
1644 JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
1645 intN mapflags;
1646 int op;
1648 mapflags = args->map(rhe->root, rhe->name, args->data);
1650 #if JS_MAP_GCROOT_NEXT == JS_DHASH_NEXT && \
1651 JS_MAP_GCROOT_STOP == JS_DHASH_STOP && \
1652 JS_MAP_GCROOT_REMOVE == JS_DHASH_REMOVE
1653 op = (JSDHashOperator)mapflags;
1654 #else
1655 op = JS_DHASH_NEXT;
1656 if (mapflags & JS_MAP_GCROOT_STOP)
1657 op |= JS_DHASH_STOP;
1658 if (mapflags & JS_MAP_GCROOT_REMOVE)
1659 op |= JS_DHASH_REMOVE;
1660 #endif
1662 return (JSDHashOperator) op;
1665 uint32
1666 js_MapGCRoots(JSRuntime *rt, JSGCRootMapFun map, void *data)
1668 GCRootMapArgs args;
1669 uint32 rv;
1671 args.map = map;
1672 args.data = data;
1673 JS_LOCK_GC(rt);
1674 rv = JS_DHashTableEnumerate(&rt->gcRootsHash, js_gcroot_mapper, &args);
1675 JS_UNLOCK_GC(rt);
1676 return rv;
1679 JSBool
1680 js_RegisterCloseableIterator(JSContext *cx, JSObject *obj)
1682 JSRuntime *rt;
1683 JSBool ok;
1685 rt = cx->runtime;
1686 JS_ASSERT(!rt->gcRunning);
1688 JS_LOCK_GC(rt);
1689 ok = AddToPtrTable(cx, &rt->gcIteratorTable, &iteratorTableInfo, obj);
1690 JS_UNLOCK_GC(rt);
1691 return ok;
1694 static void
1695 CloseNativeIterators(JSContext *cx)
1697 JSRuntime *rt;
1698 size_t count, newCount, i;
1699 void **array;
1700 JSObject *obj;
1702 rt = cx->runtime;
1703 count = rt->gcIteratorTable.count;
1704 array = rt->gcIteratorTable.array;
1706 newCount = 0;
1707 for (i = 0; i != count; ++i) {
1708 obj = (JSObject *)array[i];
1709 if (js_IsAboutToBeFinalized(cx, obj))
1710 js_CloseNativeIterator(cx, obj);
1711 else
1712 array[newCount++] = obj;
1714 ShrinkPtrTable(&rt->gcIteratorTable, &iteratorTableInfo, newCount);
1717 #if defined(DEBUG_brendan) || defined(DEBUG_timeless)
1718 #define DEBUG_gchist
1719 #endif
1721 #ifdef DEBUG_gchist
1722 #define NGCHIST 64
1724 static struct GCHist {
1725 bool lastDitch;
1726 JSGCThing *freeList;
1727 } gchist[NGCHIST];
1729 unsigned gchpos = 0;
1730 #endif
1732 void
1733 JSRuntime::setGCTriggerFactor(uint32 factor)
1735 JS_ASSERT(factor >= 100);
1737 gcTriggerFactor = factor;
1738 setGCLastBytes(gcLastBytes);
1741 void
1742 JSRuntime::setGCLastBytes(size_t lastBytes)
1744 gcLastBytes = lastBytes;
1745 uint64 triggerBytes = uint64(lastBytes) * uint64(gcTriggerFactor / 100);
1746 if (triggerBytes != size_t(triggerBytes))
1747 triggerBytes = size_t(-1);
1748 gcTriggerBytes = size_t(triggerBytes);
1751 static JS_INLINE bool
1752 IsGCThresholdReached(JSRuntime *rt)
1754 #ifdef JS_GC_ZEAL
1755 if (rt->gcZeal >= 1)
1756 return true;
1757 #endif
1760 * Since the initial value of the gcLastBytes parameter is not equal to
1761 * zero (see the js_InitGC function) the return value is false when
1762 * the gcBytes value is close to zero at the JS engine start.
1764 return rt->gcMallocBytes >= rt->gcMaxMallocBytes ||
1765 rt->gcBytes >= rt->gcTriggerBytes;
1768 template <typename T, typename NewbornType>
1769 static JS_INLINE T*
1770 NewGCThing(JSContext *cx, uintN flags, NewbornType** newbornRoot)
1772 JSRuntime *rt;
1773 bool doGC;
1774 JSGCThing *thing;
1775 uint8 *flagp;
1776 JSGCArenaList *arenaList;
1777 JSGCArenaInfo *a;
1778 uintN thingsLimit;
1779 JSLocalRootStack *lrs;
1780 #ifdef JS_GCMETER
1781 JSGCArenaStats *astats;
1782 #endif
1783 #ifdef JS_THREADSAFE
1784 JSBool gcLocked;
1785 uintN localMallocBytes;
1786 JSGCThing **lastptr;
1787 JSGCThing *tmpthing;
1788 uint8 *tmpflagp;
1789 uintN maxFreeThings; /* max to take from the global free list */
1790 #endif
1792 JS_ASSERT((flags & GCF_TYPEMASK) != GCX_DOUBLE);
1793 rt = cx->runtime;
1794 size_t nbytes = sizeof(T);
1795 JS_ASSERT(JS_ROUNDUP(nbytes, sizeof(JSGCThing)) == nbytes);
1796 uintN flindex = GC_FREELIST_INDEX(nbytes);
1798 /* Updates of metering counters here may not be thread-safe. */
1799 METER(astats = &cx->runtime->gcStats.arenaStats[flindex]);
1800 METER(astats->alloc++);
1802 #ifdef JS_THREADSAFE
1803 gcLocked = JS_FALSE;
1804 JS_ASSERT(cx->thread);
1806 JSGCThing *&freeList = cx->thread->gcFreeLists[flindex];
1807 thing = freeList;
1808 localMallocBytes = JS_THREAD_DATA(cx)->gcMallocBytes;
1809 if (thing && rt->gcMaxMallocBytes - rt->gcMallocBytes > localMallocBytes) {
1810 flagp = thing->flagp;
1811 freeList = thing->next;
1812 METER(astats->localalloc++);
1813 goto success;
1816 JS_LOCK_GC(rt);
1817 gcLocked = JS_TRUE;
1819 /* Transfer thread-local counter to global one. */
1820 if (localMallocBytes != 0) {
1821 JS_THREAD_DATA(cx)->gcMallocBytes = 0;
1822 if (rt->gcMaxMallocBytes - rt->gcMallocBytes < localMallocBytes)
1823 rt->gcMallocBytes = rt->gcMaxMallocBytes;
1824 else
1825 rt->gcMallocBytes += localMallocBytes;
1827 #endif
1828 JS_ASSERT(!rt->gcRunning);
1829 if (rt->gcRunning) {
1830 METER(rt->gcStats.finalfail++);
1831 JS_UNLOCK_GC(rt);
1832 return NULL;
1835 #if defined JS_GC_ZEAL && defined JS_TRACER
1836 if (rt->gcZeal >= 1 && JS_TRACE_MONITOR(cx).useReservedObjects)
1837 goto testReservedObjects;
1838 #endif
1840 arenaList = &rt->gcArenaList[flindex];
1841 doGC = IsGCThresholdReached(rt);
1842 for (;;) {
1843 if (doGC
1844 #ifdef JS_TRACER
1845 && !JS_ON_TRACE(cx) && !JS_TRACE_MONITOR(cx).useReservedObjects
1846 #endif
1849 * Keep rt->gcLock across the call into js_GC so we don't starve
1850 * and lose to racing threads who deplete the heap just after
1851 * js_GC has replenished it (or has synchronized with a racing
1852 * GC that collected a bunch of garbage). This unfair scheduling
1853 * can happen on certain operating systems. For the gory details,
1854 * see bug 162779 at https://bugzilla.mozilla.org/.
1856 js_GC(cx, GC_LAST_DITCH);
1857 METER(astats->retry++);
1860 /* Try to get thing from the free list. */
1861 thing = arenaList->freeList;
1862 if (thing) {
1863 arenaList->freeList = thing->next;
1864 flagp = thing->flagp;
1865 JS_ASSERT(*flagp & GCF_FINAL);
1867 #ifdef JS_THREADSAFE
1869 * Refill the local free list by taking several things from the
1870 * global free list unless the free list is already populated or
1871 * we are still at rt->gcMaxMallocBytes barrier. The former is
1872 * caused via allocating new things in gcCallback(cx, JSGC_END).
1873 * The latter happens when GC is canceled due to
1874 * gcCallback(cx, JSGC_BEGIN) returning false.
1876 if (freeList || rt->gcMallocBytes >= rt->gcMaxMallocBytes)
1877 break;
1879 tmpthing = arenaList->freeList;
1880 if (tmpthing) {
1881 maxFreeThings = MAX_THREAD_LOCAL_THINGS;
1882 do {
1883 if (!tmpthing->next)
1884 break;
1885 tmpthing = tmpthing->next;
1886 } while (--maxFreeThings != 0);
1888 freeList = arenaList->freeList;
1889 arenaList->freeList = tmpthing->next;
1890 tmpthing->next = NULL;
1892 #endif
1893 break;
1897 * Try to allocate things from the last arena. If it is fully used,
1898 * check if we can allocate a new one and, if we cannot, consider
1899 * doing a "last ditch" GC unless already tried.
1901 thingsLimit = THINGS_PER_ARENA(nbytes);
1902 if (arenaList->lastCount != thingsLimit) {
1903 JS_ASSERT(arenaList->lastCount < thingsLimit);
1904 a = arenaList->last;
1905 } else {
1906 #ifdef JS_TRACER
1907 if (JS_TRACE_MONITOR(cx).useReservedObjects) {
1908 #ifdef JS_GC_ZEAL
1909 testReservedObjects:
1910 #endif
1911 JSTraceMonitor *tm = &JS_TRACE_MONITOR(cx);
1913 thing = (JSGCThing *) tm->reservedObjects;
1914 flagp = GetGCThingFlags(thing);
1915 JS_ASSERT(thing);
1916 tm->reservedObjects = JSVAL_TO_OBJECT(tm->reservedObjects->fslots[0]);
1917 break;
1919 #endif
1921 a = NewGCArena(rt);
1922 if (!a) {
1923 if (doGC || JS_ON_TRACE(cx))
1924 goto fail;
1925 doGC = true;
1926 continue;
1928 a->list = arenaList;
1929 a->prev = arenaList->last;
1930 a->prevUntracedPage = 0;
1931 a->u.untracedThings = 0;
1932 arenaList->last = a;
1933 arenaList->lastCount = 0;
1936 flagp = THING_FLAGP(a, arenaList->lastCount);
1937 thing = FLAGP_TO_THING(flagp, nbytes);
1938 arenaList->lastCount++;
1940 #ifdef JS_THREADSAFE
1942 * Refill the local free list by taking free things from the last
1943 * arena. Prefer to order free things by ascending address in the
1944 * (unscientific) hope of better cache locality.
1946 if (freeList || rt->gcMallocBytes >= rt->gcMaxMallocBytes)
1947 break;
1948 lastptr = &freeList;
1949 maxFreeThings = thingsLimit - arenaList->lastCount;
1950 if (maxFreeThings > MAX_THREAD_LOCAL_THINGS)
1951 maxFreeThings = MAX_THREAD_LOCAL_THINGS;
1952 uint32 lastCount = arenaList->lastCount;
1953 while (maxFreeThings != 0) {
1954 --maxFreeThings;
1956 tmpflagp = THING_FLAGP(a, lastCount);
1957 tmpthing = FLAGP_TO_THING(tmpflagp, nbytes);
1958 lastCount++;
1959 tmpthing->flagp = tmpflagp;
1960 *tmpflagp = GCF_FINAL; /* signifying that thing is free */
1962 *lastptr = tmpthing;
1963 lastptr = &tmpthing->next;
1965 *lastptr = NULL;
1966 arenaList->lastCount = lastCount;
1967 #endif
1968 break;
1971 /* We successfully allocated the thing. */
1972 #ifdef JS_THREADSAFE
1973 success:
1974 #endif
1975 lrs = cx->localRootStack;
1976 if (lrs) {
1978 * If we're in a local root scope, don't set newborn[type] at all, to
1979 * avoid entraining garbage from it for an unbounded amount of time
1980 * on this context. A caller will leave the local root scope and pop
1981 * this reference, allowing thing to be GC'd if it has no other refs.
1982 * See JS_EnterLocalRootScope and related APIs.
1984 if (js_PushLocalRoot(cx, lrs, (jsval) thing) < 0) {
1986 * When we fail for a thing allocated through the tail of the last
1987 * arena, thing's flag byte is not initialized. So to prevent GC
1988 * accessing the uninitialized flags during the finalization, we
1989 * always mark the thing as final. See bug 337407.
1991 *flagp = GCF_FINAL;
1992 goto fail;
1994 } else {
1996 * No local root scope, so we're stuck with the old, fragile model of
1997 * depending on a pigeon-hole newborn per type per context.
1999 *newbornRoot = (T *) thing;
2002 /* We can't fail now, so update flags. */
2003 *flagp = (uint8)flags;
2005 #ifdef DEBUG_gchist
2006 gchist[gchpos].lastDitch = doGC;
2007 gchist[gchpos].freeList = rt->gcArenaList[flindex].freeList;
2008 if (++gchpos == NGCHIST)
2009 gchpos = 0;
2010 #endif
2012 /* This is not thread-safe for thread-local allocations. */
2013 METER_IF(flags & GCF_LOCK, rt->gcStats.lockborn++);
2015 #ifdef JS_THREADSAFE
2016 if (gcLocked)
2017 JS_UNLOCK_GC(rt);
2018 #endif
2019 return (T*)thing;
2021 fail:
2022 #ifdef JS_THREADSAFE
2023 if (gcLocked)
2024 JS_UNLOCK_GC(rt);
2025 #endif
2026 METER(astats->fail++);
2027 js_ReportOutOfMemory(cx);
2028 return NULL;
2031 JSObject *
2032 js_NewGCObject(JSContext *cx)
2034 return NewGCThing<JSObject>(cx, GCX_OBJECT, &cx->weakRoots.newbornObject);
2037 JSString *
2038 js_NewGCString(JSContext *cx)
2040 return NewGCThing<JSString>(cx, GCX_STRING, &cx->weakRoots.newbornString);
2043 JSString *
2044 js_NewGCExternalString(JSContext *cx, uintN type)
2046 JS_ASSERT(type < JS_EXTERNAL_STRING_LIMIT);
2047 return NewGCThing<JSString>(cx, GCX_EXTERNAL_STRING + type,
2048 &cx->weakRoots.newbornExternalString[type]);
2051 JSFunction *
2052 js_NewGCFunction(JSContext *cx)
2054 return NewGCThing<JSFunction>(cx, GCX_OBJECT, &cx->weakRoots.newbornObject);
2057 #if JS_HAS_XML_SUPPORT
2058 JSXML *
2059 js_NewGCXML(JSContext *cx)
2061 return NewGCThing<JSXML>(cx, GCX_XML, &cx->weakRoots.newbornXML);
2063 #endif
2065 static JSGCDoubleCell *
2066 RefillDoubleFreeList(JSContext *cx)
2068 JSRuntime *rt;
2069 jsbitmap *doubleFlags, usedBits;
2070 JSBool didGC = JS_FALSE;
2071 JSGCArenaInfo *a;
2072 uintN bit, index;
2073 JSGCDoubleCell *cell, *list, *lastcell;
2075 JS_ASSERT(!cx->doubleFreeList);
2077 rt = cx->runtime;
2078 JS_LOCK_GC(rt);
2080 JS_ASSERT(!rt->gcRunning);
2081 if (rt->gcRunning) {
2082 METER(rt->gcStats.finalfail++);
2083 JS_UNLOCK_GC(rt);
2084 return NULL;
2087 if (IsGCThresholdReached(rt))
2088 goto do_gc;
2091 * Loop until we find a flag bitmap byte with unset bits indicating free
2092 * double cells, then set all bits as used and put the cells to the free
2093 * list for the current context.
2095 doubleFlags = rt->gcDoubleArenaList.nextDoubleFlags;
2096 for (;;) {
2097 if (((jsuword) doubleFlags & GC_ARENA_MASK) ==
2098 ARENA_INFO_OFFSET) {
2099 if (doubleFlags == DOUBLE_BITMAP_SENTINEL ||
2100 !((JSGCArenaInfo *) doubleFlags)->prev) {
2101 a = NewGCArena(rt);
2102 if (!a) {
2103 do_gc:
2104 if (didGC || JS_ON_TRACE(cx)) {
2105 METER(rt->gcStats.doubleArenaStats.fail++);
2106 JS_UNLOCK_GC(rt);
2107 js_ReportOutOfMemory(cx);
2108 return NULL;
2110 js_GC(cx, GC_LAST_DITCH);
2111 METER(rt->gcStats.doubleArenaStats.retry++);
2112 doubleFlags = rt->gcDoubleArenaList.nextDoubleFlags;
2113 didGC = JS_TRUE;
2114 continue;
2116 a->list = NULL;
2117 a->prev = NULL;
2118 if (doubleFlags == DOUBLE_BITMAP_SENTINEL) {
2119 JS_ASSERT(!rt->gcDoubleArenaList.first);
2120 rt->gcDoubleArenaList.first = a;
2121 } else {
2122 JS_ASSERT(rt->gcDoubleArenaList.first);
2123 ((JSGCArenaInfo *) doubleFlags)->prev = a;
2125 ClearDoubleArenaFlags(a);
2126 doubleFlags = DOUBLE_ARENA_BITMAP(a);
2127 break;
2129 doubleFlags =
2130 DOUBLE_ARENA_BITMAP(((JSGCArenaInfo *) doubleFlags)->prev);
2134 * When doubleFlags points the last bitmap's word in the arena, its
2135 * high bits corresponds to non-existing cells. ClearDoubleArenaFlags
2136 * sets such bits to 1. Thus even for this last word its bit is unset
2137 * iff the corresponding cell exists and free.
2139 if (*doubleFlags != (jsbitmap) -1)
2140 break;
2141 ++doubleFlags;
2144 rt->gcDoubleArenaList.nextDoubleFlags = doubleFlags + 1;
2145 usedBits = *doubleFlags;
2146 JS_ASSERT(usedBits != (jsbitmap) -1);
2147 *doubleFlags = (jsbitmap) -1;
2148 JS_UNLOCK_GC(rt);
2151 * Find the index corresponding to the first bit in *doubleFlags. The last
2152 * bit will have "index + JS_BITS_PER_WORD - 1".
2154 index = ((uintN) ((jsuword) doubleFlags & GC_ARENA_MASK) -
2155 DOUBLES_ARENA_BITMAP_OFFSET) * JS_BITS_PER_BYTE;
2156 cell = (JSGCDoubleCell *) ((jsuword) doubleFlags & ~GC_ARENA_MASK) + index;
2158 if (usedBits == 0) {
2159 /* The common case when all doubles from *doubleFlags are free. */
2160 JS_ASSERT(index + JS_BITS_PER_WORD <= DOUBLES_PER_ARENA);
2161 list = cell;
2162 for (lastcell = cell + JS_BITS_PER_WORD - 1; cell != lastcell; ++cell)
2163 cell->link = cell + 1;
2164 lastcell->link = NULL;
2165 } else {
2167 * Assemble the free list from free cells from *doubleFlags starting
2168 * from the tail. In the loop
2170 * index + bit >= DOUBLES_PER_ARENA
2172 * when bit is one of the unused bits. We do not check for such bits
2173 * explicitly as they must be set and the "if" check filters them out.
2175 JS_ASSERT(index + JS_BITS_PER_WORD <=
2176 DOUBLES_PER_ARENA + UNUSED_DOUBLE_BITMAP_BITS);
2177 bit = JS_BITS_PER_WORD;
2178 cell += bit;
2179 list = NULL;
2180 do {
2181 --bit;
2182 --cell;
2183 if (!(((jsbitmap) 1 << bit) & usedBits)) {
2184 JS_ASSERT(index + bit < DOUBLES_PER_ARENA);
2185 JS_ASSERT_IF(index + bit == DOUBLES_PER_ARENA - 1, !list);
2186 cell->link = list;
2187 list = cell;
2189 } while (bit != 0);
2191 JS_ASSERT(list);
2194 * We delegate assigning cx->doubleFreeList to js_NewDoubleInRootedValue as
2195 * it immediately consumes the head of the list.
2197 return list;
2200 JSBool
2201 js_NewDoubleInRootedValue(JSContext *cx, jsdouble d, jsval *vp)
2203 #ifdef JS_GCMETER
2204 JSGCArenaStats *astats;
2205 #endif
2206 JSGCDoubleCell *cell;
2208 /* Updates of metering counters here are not thread-safe. */
2209 METER(astats = &cx->runtime->gcStats.doubleArenaStats);
2210 METER(astats->alloc++);
2211 cell = cx->doubleFreeList;
2212 if (!cell) {
2213 cell = RefillDoubleFreeList(cx);
2214 if (!cell) {
2215 METER(astats->fail++);
2216 return JS_FALSE;
2218 } else {
2219 METER(astats->localalloc++);
2221 cx->doubleFreeList = cell->link;
2222 cell->number = d;
2223 *vp = DOUBLE_TO_JSVAL(&cell->number);
2224 return JS_TRUE;
2227 jsdouble *
2228 js_NewWeaklyRootedDouble(JSContext *cx, jsdouble d)
2230 jsval v;
2231 jsdouble *dp;
2233 if (!js_NewDoubleInRootedValue(cx, d, &v))
2234 return NULL;
2236 JS_ASSERT(JSVAL_IS_DOUBLE(v));
2237 dp = JSVAL_TO_DOUBLE(v);
2238 if (cx->localRootStack) {
2239 if (js_PushLocalRoot(cx, cx->localRootStack, v) < 0)
2240 return NULL;
2241 } else {
2242 cx->weakRoots.newbornDouble = dp;
2244 return dp;
2247 #ifdef JS_TRACER
2248 JSBool
2249 js_ReserveObjects(JSContext *cx, size_t nobjects)
2252 * Ensure at least nobjects objects are in the list. fslots[1] of each
2253 * object on the reservedObjects list is the length of the list from there.
2255 JSObject *&head = JS_TRACE_MONITOR(cx).reservedObjects;
2256 size_t i = head ? JSVAL_TO_INT(head->fslots[1]) : 0;
2257 while (i < nobjects) {
2258 JSObject *obj = js_NewGCObject(cx);
2259 if (!obj)
2260 return JS_FALSE;
2261 memset(obj, 0, sizeof(JSObject));
2262 /* The class must be set to something for finalization. */
2263 obj->classword = (jsuword) &js_ObjectClass;
2264 obj->fslots[0] = OBJECT_TO_JSVAL(head);
2265 i++;
2266 obj->fslots[1] = INT_TO_JSVAL(i);
2267 head = obj;
2270 return JS_TRUE;
2272 #endif
2274 JSBool
2275 js_AddAsGCBytes(JSContext *cx, size_t sz)
2277 JSRuntime *rt;
2279 rt = cx->runtime;
2280 if (rt->gcBytes >= rt->gcMaxBytes ||
2281 sz > (size_t) (rt->gcMaxBytes - rt->gcBytes) ||
2282 IsGCThresholdReached(rt)) {
2283 if (JS_ON_TRACE(cx)) {
2285 * If we can't leave the trace, signal OOM condition, otherwise
2286 * exit from trace and proceed with GC.
2288 if (!js_CanLeaveTrace(cx)) {
2289 JS_UNLOCK_GC(rt);
2290 return JS_FALSE;
2292 js_LeaveTrace(cx);
2294 js_GC(cx, GC_LAST_DITCH);
2295 if (rt->gcBytes >= rt->gcMaxBytes ||
2296 sz > (size_t) (rt->gcMaxBytes - rt->gcBytes)) {
2297 JS_UNLOCK_GC(rt);
2298 JS_ReportOutOfMemory(cx);
2299 return JS_FALSE;
2302 rt->gcBytes += (uint32) sz;
2303 return JS_TRUE;
2306 void
2307 js_RemoveAsGCBytes(JSRuntime *rt, size_t sz)
2309 JS_ASSERT((size_t) rt->gcBytes >= sz);
2310 rt->gcBytes -= (uint32) sz;
2314 * Shallow GC-things can be locked just by setting the GCF_LOCK bit, because
2315 * they have no descendants to mark during the GC. Currently the optimization
2316 * is only used for non-dependant strings.
2318 #define GC_THING_IS_SHALLOW(flagp, thing) \
2319 ((flagp) && \
2320 ((*(flagp) & GCF_TYPEMASK) >= GCX_EXTERNAL_STRING || \
2321 ((*(flagp) & GCF_TYPEMASK) == GCX_STRING && \
2322 !((JSString *) (thing))->isDependent())))
2324 /* This is compatible with JSDHashEntryStub. */
2325 typedef struct JSGCLockHashEntry {
2326 JSDHashEntryHdr hdr;
2327 const void *thing;
2328 uint32 count;
2329 } JSGCLockHashEntry;
2331 JSBool
2332 js_LockGCThingRT(JSRuntime *rt, void *thing)
2334 JSBool shallow, ok;
2335 uint8 *flagp;
2336 JSGCLockHashEntry *lhe;
2338 if (!thing)
2339 return JS_TRUE;
2341 flagp = GetGCThingFlagsOrNull(thing);
2342 JS_LOCK_GC(rt);
2343 shallow = GC_THING_IS_SHALLOW(flagp, thing);
2346 * Avoid adding a rt->gcLocksHash entry for shallow things until someone
2347 * nests a lock.
2349 if (shallow && !(*flagp & GCF_LOCK)) {
2350 *flagp |= GCF_LOCK;
2351 METER(rt->gcStats.lock++);
2352 ok = JS_TRUE;
2353 goto out;
2356 if (!rt->gcLocksHash) {
2357 rt->gcLocksHash = JS_NewDHashTable(JS_DHashGetStubOps(), NULL,
2358 sizeof(JSGCLockHashEntry),
2359 GC_ROOTS_SIZE);
2360 if (!rt->gcLocksHash) {
2361 ok = JS_FALSE;
2362 goto out;
2366 lhe = (JSGCLockHashEntry *)
2367 JS_DHashTableOperate(rt->gcLocksHash, thing, JS_DHASH_ADD);
2368 if (!lhe) {
2369 ok = JS_FALSE;
2370 goto out;
2372 if (!lhe->thing) {
2373 lhe->thing = thing;
2374 lhe->count = 1;
2375 } else {
2376 JS_ASSERT(lhe->count >= 1);
2377 lhe->count++;
2380 METER(rt->gcStats.lock++);
2381 ok = JS_TRUE;
2382 out:
2383 JS_UNLOCK_GC(rt);
2384 return ok;
2387 JSBool
2388 js_UnlockGCThingRT(JSRuntime *rt, void *thing)
2390 uint8 *flagp;
2391 JSBool shallow;
2392 JSGCLockHashEntry *lhe;
2394 if (!thing)
2395 return JS_TRUE;
2397 flagp = GetGCThingFlagsOrNull(thing);
2398 JS_LOCK_GC(rt);
2399 shallow = GC_THING_IS_SHALLOW(flagp, thing);
2401 if (shallow && !(*flagp & GCF_LOCK))
2402 goto out;
2403 if (!rt->gcLocksHash ||
2404 (lhe = (JSGCLockHashEntry *)
2405 JS_DHashTableOperate(rt->gcLocksHash, thing,
2406 JS_DHASH_LOOKUP),
2407 JS_DHASH_ENTRY_IS_FREE(&lhe->hdr))) {
2408 /* Shallow entry is not in the hash -> clear its lock bit. */
2409 if (shallow)
2410 *flagp &= ~GCF_LOCK;
2411 else
2412 goto out;
2413 } else {
2414 if (--lhe->count != 0)
2415 goto out;
2416 JS_DHashTableOperate(rt->gcLocksHash, thing, JS_DHASH_REMOVE);
2419 rt->gcPoke = JS_TRUE;
2420 METER(rt->gcStats.unlock++);
2421 out:
2422 JS_UNLOCK_GC(rt);
2423 return JS_TRUE;
2426 JS_PUBLIC_API(void)
2427 JS_TraceChildren(JSTracer *trc, void *thing, uint32 kind)
2429 switch (kind) {
2430 case JSTRACE_OBJECT: {
2431 /* If obj has no map, it must be a newborn. */
2432 JSObject *obj = (JSObject *) thing;
2433 if (!obj->map)
2434 break;
2435 obj->map->ops->trace(trc, obj);
2436 break;
2439 case JSTRACE_STRING: {
2440 JSString *str = (JSString *) thing;
2441 if (str->isDependent())
2442 JS_CALL_STRING_TRACER(trc, str->dependentBase(), "base");
2443 break;
2446 #if JS_HAS_XML_SUPPORT
2447 case JSTRACE_XML:
2448 js_TraceXML(trc, (JSXML *)thing);
2449 break;
2450 #endif
2455 * Number of things covered by a single bit of JSGCArenaInfo.u.untracedThings.
2457 #define THINGS_PER_UNTRACED_BIT(thingSize) \
2458 JS_HOWMANY(THINGS_PER_ARENA(thingSize), JS_BITS_PER_WORD)
2460 static void
2461 DelayTracingChildren(JSRuntime *rt, uint8 *flagp)
2463 JSGCArenaInfo *a;
2464 uint32 untracedBitIndex;
2465 jsuword bit;
2468 * Things with children to be traced later are marked with
2469 * GCF_MARK | GCF_FINAL flags.
2471 JS_ASSERT((*flagp & (GCF_MARK | GCF_FINAL)) == GCF_MARK);
2472 *flagp |= GCF_FINAL;
2474 METER(rt->gcStats.untraced++);
2475 #ifdef DEBUG
2476 ++rt->gcTraceLaterCount;
2477 METER_UPDATE_MAX(rt->gcStats.maxuntraced, rt->gcTraceLaterCount);
2478 #endif
2480 a = FLAGP_TO_ARENA(flagp);
2481 untracedBitIndex = FLAGP_TO_INDEX(flagp) /
2482 THINGS_PER_UNTRACED_BIT(a->list->thingSize);
2483 JS_ASSERT(untracedBitIndex < JS_BITS_PER_WORD);
2484 bit = (jsuword)1 << untracedBitIndex;
2485 if (a->u.untracedThings != 0) {
2486 JS_ASSERT(rt->gcUntracedArenaStackTop);
2487 if (a->u.untracedThings & bit) {
2488 /* bit already covers things with children to trace later. */
2489 return;
2491 a->u.untracedThings |= bit;
2492 } else {
2494 * The thing is the first thing with not yet traced children in the
2495 * whole arena, so push the arena on the stack of arenas with things
2496 * to be traced later unless the arena has already been pushed. We
2497 * detect that through checking prevUntracedPage as the field is 0
2498 * only for not yet pushed arenas. To ensure that
2499 * prevUntracedPage != 0
2500 * even when the stack contains one element, we make prevUntracedPage
2501 * for the arena at the bottom to point to itself.
2503 * See comments in TraceDelayedChildren.
2505 a->u.untracedThings = bit;
2506 if (a->prevUntracedPage == 0) {
2507 if (!rt->gcUntracedArenaStackTop) {
2508 /* Stack was empty, mark the arena as the bottom element. */
2509 a->prevUntracedPage = ARENA_INFO_TO_PAGE(a);
2510 } else {
2511 JS_ASSERT(rt->gcUntracedArenaStackTop->prevUntracedPage != 0);
2512 a->prevUntracedPage =
2513 ARENA_INFO_TO_PAGE(rt->gcUntracedArenaStackTop);
2515 rt->gcUntracedArenaStackTop = a;
2518 JS_ASSERT(rt->gcUntracedArenaStackTop);
2521 static void
2522 TraceDelayedChildren(JSTracer *trc)
2524 JSRuntime *rt;
2525 JSGCArenaInfo *a, *aprev;
2526 uint32 thingSize;
2527 uint32 thingsPerUntracedBit;
2528 uint32 untracedBitIndex, thingIndex, indexLimit, endIndex;
2529 JSGCThing *thing;
2530 uint8 *flagp;
2532 rt = trc->context->runtime;
2533 a = rt->gcUntracedArenaStackTop;
2534 if (!a) {
2535 JS_ASSERT(rt->gcTraceLaterCount == 0);
2536 return;
2539 for (;;) {
2541 * The following assert verifies that the current arena belongs to the
2542 * untraced stack, since DelayTracingChildren ensures that even for
2543 * stack's bottom prevUntracedPage != 0 but rather points to itself.
2545 JS_ASSERT(a->prevUntracedPage != 0);
2546 JS_ASSERT(rt->gcUntracedArenaStackTop->prevUntracedPage != 0);
2547 thingSize = a->list->thingSize;
2548 indexLimit = (a == a->list->last)
2549 ? a->list->lastCount
2550 : THINGS_PER_ARENA(thingSize);
2551 thingsPerUntracedBit = THINGS_PER_UNTRACED_BIT(thingSize);
2554 * We cannot use do-while loop here as a->u.untracedThings can be zero
2555 * before the loop as a leftover from the previous iterations. See
2556 * comments after the loop.
2558 while (a->u.untracedThings != 0) {
2559 untracedBitIndex = JS_FLOOR_LOG2W(a->u.untracedThings);
2560 a->u.untracedThings &= ~((jsuword)1 << untracedBitIndex);
2561 thingIndex = untracedBitIndex * thingsPerUntracedBit;
2562 endIndex = thingIndex + thingsPerUntracedBit;
2565 * endIndex can go beyond the last allocated thing as the real
2566 * limit can be "inside" the bit.
2568 if (endIndex > indexLimit)
2569 endIndex = indexLimit;
2570 JS_ASSERT(thingIndex < indexLimit);
2572 do {
2574 * Skip free or already traced things that share the bit
2575 * with untraced ones.
2577 flagp = THING_FLAGP(a, thingIndex);
2578 if ((*flagp & (GCF_MARK|GCF_FINAL)) != (GCF_MARK|GCF_FINAL))
2579 continue;
2580 *flagp &= ~GCF_FINAL;
2581 #ifdef DEBUG
2582 JS_ASSERT(rt->gcTraceLaterCount != 0);
2583 --rt->gcTraceLaterCount;
2584 #endif
2585 thing = FLAGP_TO_THING(flagp, thingSize);
2586 JS_TraceChildren(trc, thing, MapGCFlagsToTraceKind(*flagp));
2587 } while (++thingIndex != endIndex);
2591 * We finished tracing of all things in the the arena but we can only
2592 * pop it from the stack if the arena is the stack's top.
2594 * When JS_TraceChildren from the above calls JS_CallTracer that in
2595 * turn on low C stack calls DelayTracingChildren and the latter
2596 * pushes new arenas to the untraced stack, we have to skip popping
2597 * of this arena until it becomes the top of the stack again.
2599 if (a == rt->gcUntracedArenaStackTop) {
2600 aprev = ARENA_PAGE_TO_INFO(a->prevUntracedPage);
2601 a->prevUntracedPage = 0;
2602 if (a == aprev) {
2604 * prevUntracedPage points to itself and we reached the
2605 * bottom of the stack.
2607 break;
2609 rt->gcUntracedArenaStackTop = a = aprev;
2610 } else {
2611 a = rt->gcUntracedArenaStackTop;
2614 JS_ASSERT(rt->gcUntracedArenaStackTop);
2615 JS_ASSERT(rt->gcUntracedArenaStackTop->prevUntracedPage == 0);
2616 rt->gcUntracedArenaStackTop = NULL;
2617 JS_ASSERT(rt->gcTraceLaterCount == 0);
2620 JS_PUBLIC_API(void)
2621 JS_CallTracer(JSTracer *trc, void *thing, uint32 kind)
2623 JSContext *cx;
2624 JSRuntime *rt;
2625 JSGCArenaInfo *a;
2626 uintN index;
2627 uint8 *flagp;
2629 JS_ASSERT(thing);
2630 JS_ASSERT(JS_IS_VALID_TRACE_KIND(kind));
2631 JS_ASSERT(trc->debugPrinter || trc->debugPrintArg);
2633 if (!IS_GC_MARKING_TRACER(trc)) {
2634 trc->callback(trc, thing, kind);
2635 goto out;
2638 cx = trc->context;
2639 rt = cx->runtime;
2640 JS_ASSERT(rt->gcMarkingTracer == trc);
2641 JS_ASSERT(rt->gcLevel > 0);
2644 * Optimize for string and double as their size is known and their tracing
2645 * is not recursive.
2647 switch (kind) {
2648 case JSTRACE_DOUBLE:
2649 a = THING_TO_ARENA(thing);
2650 JS_ASSERT(!a->list);
2651 if (!a->u.hasMarkedDoubles) {
2652 ClearDoubleArenaFlags(a);
2653 a->u.hasMarkedDoubles = JS_TRUE;
2655 index = DOUBLE_THING_TO_INDEX(thing);
2656 JS_SET_BIT(DOUBLE_ARENA_BITMAP(a), index);
2657 goto out;
2659 case JSTRACE_STRING:
2660 for (;;) {
2661 if (JSString::isStatic(thing))
2662 goto out;
2663 flagp = THING_TO_FLAGP(thing, sizeof(JSGCThing));
2664 JS_ASSERT((*flagp & GCF_FINAL) == 0);
2665 JS_ASSERT(kind == MapGCFlagsToTraceKind(*flagp));
2666 if (!((JSString *) thing)->isDependent()) {
2667 *flagp |= GCF_MARK;
2668 goto out;
2670 if (*flagp & GCF_MARK)
2671 goto out;
2672 *flagp |= GCF_MARK;
2673 thing = ((JSString *) thing)->dependentBase();
2675 /* NOTREACHED */
2678 flagp = GetGCThingFlags(thing);
2679 JS_ASSERT(kind == MapGCFlagsToTraceKind(*flagp));
2680 if (*flagp & GCF_MARK)
2681 goto out;
2684 * We check for non-final flag only if mark is unset as
2685 * DelayTracingChildren uses the flag. See comments in the function.
2687 JS_ASSERT(*flagp != GCF_FINAL);
2688 *flagp |= GCF_MARK;
2689 if (!cx->insideGCMarkCallback) {
2691 * With JS_GC_ASSUME_LOW_C_STACK defined the mark phase of GC always
2692 * uses the non-recursive code that otherwise would be called only on
2693 * a low C stack condition.
2695 #ifdef JS_GC_ASSUME_LOW_C_STACK
2696 # define RECURSION_TOO_DEEP() JS_TRUE
2697 #else
2698 int stackDummy;
2699 # define RECURSION_TOO_DEEP() (!JS_CHECK_STACK_SIZE(cx, stackDummy))
2700 #endif
2701 if (RECURSION_TOO_DEEP())
2702 DelayTracingChildren(rt, flagp);
2703 else
2704 JS_TraceChildren(trc, thing, kind);
2705 } else {
2707 * For API compatibility we allow for the callback to assume that
2708 * after it calls JS_MarkGCThing for the last time, the callback can
2709 * start to finalize its own objects that are only referenced by
2710 * unmarked GC things.
2712 * Since we do not know which call from inside the callback is the
2713 * last, we ensure that children of all marked things are traced and
2714 * call TraceDelayedChildren(trc) after tracing the thing.
2716 * As TraceDelayedChildren unconditionally invokes JS_TraceChildren
2717 * for the things with untraced children, calling DelayTracingChildren
2718 * is useless here. Hence we always trace thing's children even with a
2719 * low native stack.
2721 cx->insideGCMarkCallback = JS_FALSE;
2722 JS_TraceChildren(trc, thing, kind);
2723 TraceDelayedChildren(trc);
2724 cx->insideGCMarkCallback = JS_TRUE;
2727 out:
2728 #ifdef DEBUG
2729 trc->debugPrinter = NULL;
2730 trc->debugPrintArg = NULL;
2731 #endif
2732 return; /* to avoid out: right_curl when DEBUG is not defined */
2735 void
2736 js_CallValueTracerIfGCThing(JSTracer *trc, jsval v)
2738 void *thing;
2739 uint32 kind;
2741 if (JSVAL_IS_DOUBLE(v) || JSVAL_IS_STRING(v)) {
2742 thing = JSVAL_TO_TRACEABLE(v);
2743 kind = JSVAL_TRACE_KIND(v);
2744 JS_ASSERT(kind == js_GetGCThingTraceKind(JSVAL_TO_GCTHING(v)));
2745 } else if (JSVAL_IS_OBJECT(v) && v != JSVAL_NULL) {
2746 /* v can be an arbitrary GC thing reinterpreted as an object. */
2747 thing = JSVAL_TO_OBJECT(v);
2748 kind = js_GetGCThingTraceKind(thing);
2749 } else {
2750 return;
2752 JS_CallTracer(trc, thing, kind);
2755 static JSDHashOperator
2756 gc_root_traversal(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 num,
2757 void *arg)
2759 JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
2760 JSTracer *trc = (JSTracer *)arg;
2761 jsval *rp = (jsval *)rhe->root;
2762 jsval v = *rp;
2764 /* Ignore null reference, scalar values, and static strings. */
2765 if (!JSVAL_IS_NULL(v) &&
2766 JSVAL_IS_GCTHING(v) &&
2767 !JSString::isStatic(JSVAL_TO_GCTHING(v))) {
2768 #ifdef DEBUG
2769 JSBool root_points_to_gcArenaList = JS_FALSE;
2770 jsuword thing = (jsuword) JSVAL_TO_GCTHING(v);
2771 JSRuntime *rt;
2772 uintN i;
2773 JSGCArenaList *arenaList;
2774 uint32 thingSize;
2775 JSGCArenaInfo *a;
2776 size_t limit;
2778 rt = trc->context->runtime;
2779 for (i = 0; i < GC_NUM_FREELISTS; i++) {
2780 arenaList = &rt->gcArenaList[i];
2781 thingSize = arenaList->thingSize;
2782 limit = (size_t) arenaList->lastCount * thingSize;
2783 for (a = arenaList->last; a; a = a->prev) {
2784 if (thing - ARENA_INFO_TO_START(a) < limit) {
2785 root_points_to_gcArenaList = JS_TRUE;
2786 break;
2788 limit = (size_t) THINGS_PER_ARENA(thingSize) * thingSize;
2791 if (!root_points_to_gcArenaList) {
2792 for (a = rt->gcDoubleArenaList.first; a; a = a->prev) {
2793 if (thing - ARENA_INFO_TO_START(a) <
2794 DOUBLES_PER_ARENA * sizeof(jsdouble)) {
2795 root_points_to_gcArenaList = JS_TRUE;
2796 break;
2800 if (!root_points_to_gcArenaList && rhe->name) {
2801 fprintf(stderr,
2802 "JS API usage error: the address passed to JS_AddNamedRoot currently holds an\n"
2803 "invalid jsval. This is usually caused by a missing call to JS_RemoveRoot.\n"
2804 "The root's name is \"%s\".\n",
2805 rhe->name);
2807 JS_ASSERT(root_points_to_gcArenaList);
2808 #endif
2809 JS_SET_TRACING_NAME(trc, rhe->name ? rhe->name : "root");
2810 js_CallValueTracerIfGCThing(trc, v);
2813 return JS_DHASH_NEXT;
2816 static JSDHashOperator
2817 gc_lock_traversal(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 num,
2818 void *arg)
2820 JSGCLockHashEntry *lhe = (JSGCLockHashEntry *)hdr;
2821 void *thing = (void *)lhe->thing;
2822 JSTracer *trc = (JSTracer *)arg;
2823 uint32 traceKind;
2825 JS_ASSERT(lhe->count >= 1);
2826 traceKind = js_GetGCThingTraceKind(thing);
2827 JS_CALL_TRACER(trc, thing, traceKind, "locked object");
2828 return JS_DHASH_NEXT;
2831 #define TRACE_JSVALS(trc, len, vec, name) \
2832 JS_BEGIN_MACRO \
2833 jsval _v, *_vp, *_end; \
2835 for (_vp = vec, _end = _vp + len; _vp < _end; _vp++) { \
2836 _v = *_vp; \
2837 if (JSVAL_IS_TRACEABLE(_v)) { \
2838 JS_SET_TRACING_INDEX(trc, name, _vp - (vec)); \
2839 JS_CallTracer(trc, JSVAL_TO_TRACEABLE(_v), \
2840 JSVAL_TRACE_KIND(_v)); \
2843 JS_END_MACRO
2845 void
2846 js_TraceStackFrame(JSTracer *trc, JSStackFrame *fp)
2848 uintN nslots, minargs, skip;
2850 if (fp->callobj)
2851 JS_CALL_OBJECT_TRACER(trc, fp->callobj, "call");
2852 if (fp->argsobj)
2853 JS_CALL_OBJECT_TRACER(trc, JSVAL_TO_OBJECT(fp->argsobj), "arguments");
2854 if (fp->varobj)
2855 JS_CALL_OBJECT_TRACER(trc, fp->varobj, "variables");
2856 if (fp->script) {
2857 js_TraceScript(trc, fp->script);
2859 /* fp->slots is null for watch pseudo-frames, see js_watch_set. */
2860 if (fp->slots) {
2862 * Don't mark what has not been pushed yet, or what has been
2863 * popped already.
2865 if (fp->regs && fp->regs->sp) {
2866 nslots = (uintN) (fp->regs->sp - fp->slots);
2867 JS_ASSERT(nslots >= fp->script->nfixed);
2868 } else {
2869 nslots = fp->script->nfixed;
2871 TRACE_JSVALS(trc, nslots, fp->slots, "slot");
2873 } else {
2874 JS_ASSERT(!fp->slots);
2875 JS_ASSERT(!fp->regs);
2878 /* Allow for primitive this parameter due to JSFUN_THISP_* flags. */
2879 JS_CALL_VALUE_TRACER(trc, fp->thisv, "this");
2881 if (fp->argv) {
2882 JS_CALL_VALUE_TRACER(trc, fp->argv[-2], "callee");
2883 nslots = fp->argc;
2884 skip = 0;
2885 if (fp->fun) {
2886 minargs = FUN_MINARGS(fp->fun);
2887 if (minargs > nslots)
2888 nslots = minargs;
2889 if (!FUN_INTERPRETED(fp->fun)) {
2890 JS_ASSERT(!(fp->fun->flags & JSFUN_FAST_NATIVE));
2891 nslots += fp->fun->u.n.extra;
2893 if (fp->fun->flags & JSFRAME_ROOTED_ARGV)
2894 skip = 2 + fp->argc;
2896 TRACE_JSVALS(trc, 2 + nslots - skip, fp->argv - 2 + skip, "operand");
2899 JS_CALL_VALUE_TRACER(trc, fp->rval, "rval");
2900 if (fp->scopeChain)
2901 JS_CALL_OBJECT_TRACER(trc, fp->scopeChain, "scope chain");
2904 static void
2905 TraceWeakRoots(JSTracer *trc, JSWeakRoots *wr)
2907 if (wr->newbornObject)
2908 JS_CALL_OBJECT_TRACER(trc, wr->newbornObject, "newborn_object");
2909 if (wr->newbornString)
2910 JS_CALL_STRING_TRACER(trc, wr->newbornString, "newborn_string");
2911 if (wr->newbornDouble)
2912 JS_CALL_DOUBLE_TRACER(trc, wr->newbornDouble, "newborn_double");
2913 #if JS_HAS_XML_SUPPORT
2914 if (wr->newbornXML)
2915 JS_CALL_TRACER(trc, wr->newbornXML, JSTRACE_XML, "newborn_xml");
2916 #endif
2917 for (uint32 i = 0; i != JS_EXTERNAL_STRING_LIMIT; i++) {
2918 JSString *str = wr->newbornExternalString[i];
2919 if (str)
2920 JS_CALL_STRING_TRACER(trc, str, "newborn_external_string");
2922 JS_CALL_VALUE_TRACER(trc, wr->lastAtom, "lastAtom");
2923 JS_SET_TRACING_NAME(trc, "lastInternalResult");
2924 js_CallValueTracerIfGCThing(trc, wr->lastInternalResult);
2927 JS_REQUIRES_STACK JS_FRIEND_API(void)
2928 js_TraceContext(JSTracer *trc, JSContext *acx)
2930 JSStackFrame *fp, *nextChain;
2931 JSStackHeader *sh;
2932 JSTempValueRooter *tvr;
2934 if (IS_GC_MARKING_TRACER(trc)) {
2936 #define FREE_OLD_ARENAS(pool) \
2937 JS_BEGIN_MACRO \
2938 int64 _age; \
2939 JSArena * _a = (pool).current; \
2940 if (_a == (pool).first.next && \
2941 _a->avail == _a->base + sizeof(int64)) { \
2942 _age = JS_Now() - *(int64 *) _a->base; \
2943 if (_age > (int64) acx->runtime->gcEmptyArenaPoolLifespan * \
2944 1000) \
2945 JS_FreeArenaPool(&(pool)); \
2947 JS_END_MACRO
2950 * Release the stackPool's arenas if the stackPool has existed for
2951 * longer than the limit specified by gcEmptyArenaPoolLifespan.
2953 FREE_OLD_ARENAS(acx->stackPool);
2956 * Release the regexpPool's arenas based on the same criterion as for
2957 * the stackPool.
2959 FREE_OLD_ARENAS(acx->regexpPool);
2962 * Clear the double free list to release all the pre-allocated doubles.
2964 acx->doubleFreeList = NULL;
2968 * Iterate frame chain and dormant chains.
2970 * (NB: see comment on this whole "dormant" thing in js_Execute.)
2972 * Since js_GetTopStackFrame needs to dereference cx->thread to check for
2973 * JIT frames, we check for non-null thread here and avoid null checks
2974 * there. See bug 471197.
2976 #ifdef JS_THREADSAFE
2977 if (acx->thread)
2978 #endif
2980 fp = js_GetTopStackFrame(acx);
2981 nextChain = acx->dormantFrameChain;
2982 if (!fp)
2983 goto next_chain;
2985 /* The top frame must not be dormant. */
2986 JS_ASSERT(!fp->dormantNext);
2987 for (;;) {
2988 do {
2989 js_TraceStackFrame(trc, fp);
2990 } while ((fp = fp->down) != NULL);
2992 next_chain:
2993 if (!nextChain)
2994 break;
2995 fp = nextChain;
2996 nextChain = nextChain->dormantNext;
3000 /* Mark other roots-by-definition in acx. */
3001 if (acx->globalObject && !JS_HAS_OPTION(acx, JSOPTION_UNROOTED_GLOBAL))
3002 JS_CALL_OBJECT_TRACER(trc, acx->globalObject, "global object");
3003 TraceWeakRoots(trc, &acx->weakRoots);
3004 if (acx->throwing) {
3005 JS_CALL_VALUE_TRACER(trc, acx->exception, "exception");
3006 } else {
3007 /* Avoid keeping GC-ed junk stored in JSContext.exception. */
3008 acx->exception = JSVAL_NULL;
3011 for (sh = acx->stackHeaders; sh; sh = sh->down) {
3012 METER(trc->context->runtime->gcStats.stackseg++);
3013 METER(trc->context->runtime->gcStats.segslots += sh->nslots);
3014 TRACE_JSVALS(trc, sh->nslots, JS_STACK_SEGMENT(sh), "stack");
3017 if (acx->localRootStack)
3018 js_TraceLocalRoots(trc, acx->localRootStack);
3020 for (tvr = acx->tempValueRooters; tvr; tvr = tvr->down) {
3021 switch (tvr->count) {
3022 case JSTVU_SINGLE:
3023 JS_SET_TRACING_NAME(trc, "tvr->u.value");
3024 js_CallValueTracerIfGCThing(trc, tvr->u.value);
3025 break;
3026 case JSTVU_TRACE:
3027 tvr->u.trace(trc, tvr);
3028 break;
3029 case JSTVU_SPROP:
3030 tvr->u.sprop->trace(trc);
3031 break;
3032 case JSTVU_WEAK_ROOTS:
3033 TraceWeakRoots(trc, tvr->u.weakRoots);
3034 break;
3035 case JSTVU_COMPILER:
3036 tvr->u.compiler->trace(trc);
3037 break;
3038 case JSTVU_SCRIPT:
3039 js_TraceScript(trc, tvr->u.script);
3040 break;
3041 case JSTVU_ENUMERATOR:
3042 static_cast<JSAutoEnumStateRooter *>(tvr)->mark(trc);
3043 break;
3044 default:
3045 JS_ASSERT(tvr->count >= 0);
3046 TRACE_JSVALS(trc, tvr->count, tvr->u.array, "tvr->u.array");
3050 if (acx->sharpObjectMap.depth > 0)
3051 js_TraceSharpMap(trc, &acx->sharpObjectMap);
3053 js_TraceRegExpStatics(trc, acx);
3055 #ifdef JS_TRACER
3056 InterpState* state = acx->interpState;
3057 while (state) {
3058 if (state->nativeVp)
3059 TRACE_JSVALS(trc, state->nativeVpLen, state->nativeVp, "nativeVp");
3060 state = state->prev;
3062 #endif
3065 #ifdef JS_TRACER
3067 static void
3068 MarkReservedObjects(JSTraceMonitor *tm)
3070 /* Keep the reserved objects. */
3071 for (JSObject *obj = tm->reservedObjects; obj; obj = JSVAL_TO_OBJECT(obj->fslots[0])) {
3072 uint8 *flagp = GetGCThingFlags(obj);
3073 JS_ASSERT((*flagp & GCF_TYPEMASK) == GCX_OBJECT);
3074 JS_ASSERT(*flagp != GCF_FINAL);
3075 *flagp |= GCF_MARK;
3079 #ifdef JS_THREADSAFE
3080 static JSDHashOperator
3081 reserved_objects_marker(JSDHashTable *table, JSDHashEntryHdr *hdr,
3082 uint32, void *)
3084 JSThread *thread = ((JSThreadsHashEntry *) hdr)->thread;
3086 MarkReservedObjects(&thread->data.traceMonitor);
3087 return JS_DHASH_NEXT;
3089 #endif
3091 #endif
3093 JS_REQUIRES_STACK void
3094 js_TraceRuntime(JSTracer *trc, JSBool allAtoms)
3096 JSRuntime *rt = trc->context->runtime;
3097 JSContext *iter, *acx;
3099 JS_DHashTableEnumerate(&rt->gcRootsHash, gc_root_traversal, trc);
3100 if (rt->gcLocksHash)
3101 JS_DHashTableEnumerate(rt->gcLocksHash, gc_lock_traversal, trc);
3102 js_TraceAtomState(trc, allAtoms);
3103 js_TraceRuntimeNumberState(trc);
3105 iter = NULL;
3106 while ((acx = js_ContextIterator(rt, JS_TRUE, &iter)) != NULL)
3107 js_TraceContext(trc, acx);
3109 js_TraceThreads(rt, trc);
3111 if (rt->gcExtraRootsTraceOp)
3112 rt->gcExtraRootsTraceOp(trc, rt->gcExtraRootsData);
3114 #ifdef JS_TRACER
3115 for (int i = 0; i < JSBUILTIN_LIMIT; i++) {
3116 if (rt->builtinFunctions[i])
3117 JS_CALL_OBJECT_TRACER(trc, rt->builtinFunctions[i], "builtin function");
3120 /* Mark the reserved objects unless we are shutting down. */
3121 if (IS_GC_MARKING_TRACER(trc) && rt->state != JSRTS_LANDING) {
3122 #ifdef JS_THREADSAFE
3123 JS_DHashTableEnumerate(&rt->threads, reserved_objects_marker, NULL);
3124 #else
3125 MarkReservedObjects(&rt->threadData.traceMonitor);
3126 #endif
3129 #endif
3132 void
3133 js_TriggerGC(JSContext *cx, JSBool gcLocked)
3135 JSRuntime *rt = cx->runtime;
3137 #ifdef JS_THREADSAFE
3138 JS_ASSERT(cx->requestDepth > 0);
3139 #endif
3140 JS_ASSERT(!rt->gcRunning);
3141 if (rt->gcIsNeeded)
3142 return;
3145 * Trigger the GC when it is safe to call an operation callback on any
3146 * thread.
3148 rt->gcIsNeeded = JS_TRUE;
3149 js_TriggerAllOperationCallbacks(rt, gcLocked);
3152 static void
3153 ProcessSetSlotRequest(JSContext *cx, JSSetSlotRequest *ssr)
3155 JSObject *obj = ssr->obj;
3156 JSObject *pobj = ssr->pobj;
3157 uint32 slot = ssr->slot;
3159 while (pobj) {
3160 pobj = js_GetWrappedObject(cx, pobj);
3161 if (pobj == obj) {
3162 ssr->cycle = true;
3163 return;
3165 pobj = JSVAL_TO_OBJECT(STOBJ_GET_SLOT(pobj, slot));
3168 pobj = ssr->pobj;
3169 if (slot == JSSLOT_PROTO) {
3170 obj->setProto(pobj);
3171 } else {
3172 JS_ASSERT(slot == JSSLOT_PARENT);
3173 obj->setParent(pobj);
3177 void
3178 js_DestroyScriptsToGC(JSContext *cx, JSThreadData *data)
3180 JSScript **listp, *script;
3182 for (size_t i = 0; i != JS_ARRAY_LENGTH(data->scriptsToGC); ++i) {
3183 listp = &data->scriptsToGC[i];
3184 while ((script = *listp) != NULL) {
3185 *listp = script->u.nextToGC;
3186 script->u.nextToGC = NULL;
3187 js_DestroyScript(cx, script);
3192 static void
3193 FinalizeObject(JSContext *cx, JSObject *obj)
3195 /* Cope with stillborn objects that have no map. */
3196 if (!obj->map)
3197 return;
3199 if (JS_UNLIKELY(cx->debugHooks->objectHook != NULL)) {
3200 cx->debugHooks->objectHook(cx, obj, JS_FALSE,
3201 cx->debugHooks->objectHookData);
3204 /* Finalize obj first, in case it needs map and slots. */
3205 JSClass *clasp = STOBJ_GET_CLASS(obj);
3206 if (clasp->finalize)
3207 clasp->finalize(cx, obj);
3209 #ifdef INCLUDE_MOZILLA_DTRACE
3210 if (JAVASCRIPT_OBJECT_FINALIZE_ENABLED())
3211 jsdtrace_object_finalize(obj);
3212 #endif
3214 if (JS_LIKELY(OBJ_IS_NATIVE(obj)))
3215 OBJ_SCOPE(obj)->drop(cx, obj);
3216 js_FreeSlots(cx, obj);
3219 JS_STATIC_ASSERT(JS_EXTERNAL_STRING_LIMIT == 8);
3220 static JSStringFinalizeOp str_finalizers[JS_EXTERNAL_STRING_LIMIT] = {
3221 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL
3224 intN
3225 js_ChangeExternalStringFinalizer(JSStringFinalizeOp oldop,
3226 JSStringFinalizeOp newop)
3228 for (uintN i = 0; i != JS_ARRAY_LENGTH(str_finalizers); i++) {
3229 if (str_finalizers[i] == oldop) {
3230 str_finalizers[i] = newop;
3231 return intN(i);
3234 return -1;
3238 * cx is NULL when we are called from js_FinishAtomState to force the
3239 * finalization of the permanently interned strings.
3241 void
3242 js_FinalizeStringRT(JSRuntime *rt, JSString *str, intN type, JSContext *cx)
3244 jschar *chars;
3245 JSBool valid;
3246 JSStringFinalizeOp finalizer;
3248 JS_RUNTIME_UNMETER(rt, liveStrings);
3249 JS_ASSERT(!JSString::isStatic(str));
3250 if (str->isDependent()) {
3251 /* A dependent string can not be external and must be valid. */
3252 JS_ASSERT(type < 0);
3253 JS_ASSERT(str->dependentBase());
3254 JS_RUNTIME_UNMETER(rt, liveDependentStrings);
3255 valid = JS_TRUE;
3256 } else {
3257 /* A stillborn string has null chars, so is not valid. */
3258 chars = str->flatChars();
3259 valid = (chars != NULL);
3260 if (valid) {
3261 if (type < 0) {
3262 if (cx)
3263 cx->free(chars);
3264 else
3265 rt->free(chars);
3266 } else {
3267 JS_ASSERT((uintN) type < JS_ARRAY_LENGTH(str_finalizers));
3268 finalizer = str_finalizers[type];
3269 if (finalizer) {
3271 * Assume that the finalizer for the permanently interned
3272 * string knows how to deal with null context.
3274 finalizer(cx, str);
3279 if (valid && str->isDeflated())
3280 js_PurgeDeflatedStringCache(rt, str);
3284 * The gckind flag bit GC_LOCK_HELD indicates a call from js_NewGCThing with
3285 * rt->gcLock already held, so the lock should be kept on return.
3287 void
3288 js_GC(JSContext *cx, JSGCInvocationKind gckind)
3290 JSRuntime *rt;
3291 JSBool keepAtoms;
3292 JSGCCallback callback;
3293 uintN i, type;
3294 JSTracer trc;
3295 uint32 thingSize, indexLimit;
3296 JSGCArenaInfo *a, **ap, *emptyArenas;
3297 uint8 flags, *flagp;
3298 JSGCThing *thing, *freeList;
3299 JSGCArenaList *arenaList;
3300 JSBool allClear;
3301 #ifdef JS_THREADSAFE
3302 uint32 requestDebit;
3303 #endif
3304 #ifdef JS_GCMETER
3305 uint32 nlivearenas, nkilledarenas, nthings;
3306 #endif
3308 JS_ASSERT_IF(gckind == GC_LAST_DITCH, !JS_ON_TRACE(cx));
3309 rt = cx->runtime;
3311 #ifdef JS_THREADSAFE
3313 * We allow js_GC calls outside a request but the context must be bound
3314 * to the current thread.
3316 JS_ASSERT(CURRENT_THREAD_IS_ME(cx->thread));
3318 /* Avoid deadlock. */
3319 JS_ASSERT(!JS_IS_RUNTIME_LOCKED(rt));
3320 #endif
3322 if (gckind & GC_KEEP_ATOMS) {
3324 * The set slot request and last ditch GC kinds preserve all atoms and
3325 * weak roots.
3327 keepAtoms = JS_TRUE;
3328 } else {
3329 /* Keep atoms when a suspended compile is running on another context. */
3330 keepAtoms = (rt->gcKeepAtoms != 0);
3331 JS_CLEAR_WEAK_ROOTS(&cx->weakRoots);
3335 * Don't collect garbage if the runtime isn't up, and cx is not the last
3336 * context in the runtime. The last context must force a GC, and nothing
3337 * should suppress that final collection or there may be shutdown leaks,
3338 * or runtime bloat until the next context is created.
3340 if (rt->state != JSRTS_UP && gckind != GC_LAST_CONTEXT)
3341 return;
3343 restart_at_beginning:
3345 * Let the API user decide to defer a GC if it wants to (unless this
3346 * is the last context). Invoke the callback regardless. Sample the
3347 * callback in case we are freely racing with a JS_SetGCCallback{,RT} on
3348 * another thread.
3350 if (gckind != GC_SET_SLOT_REQUEST && (callback = rt->gcCallback)) {
3351 JSBool ok;
3353 if (gckind & GC_LOCK_HELD)
3354 JS_UNLOCK_GC(rt);
3355 ok = callback(cx, JSGC_BEGIN);
3356 if (gckind & GC_LOCK_HELD)
3357 JS_LOCK_GC(rt);
3358 if (!ok && gckind != GC_LAST_CONTEXT) {
3360 * It's possible that we've looped back to this code from the 'goto
3361 * restart_at_beginning' below in the GC_SET_SLOT_REQUEST code and
3362 * that rt->gcLevel is now 0. Don't return without notifying!
3364 if (rt->gcLevel == 0 && (gckind & GC_LOCK_HELD))
3365 JS_NOTIFY_GC_DONE(rt);
3366 return;
3370 /* Lock out other GC allocator and collector invocations. */
3371 if (!(gckind & GC_LOCK_HELD))
3372 JS_LOCK_GC(rt);
3374 METER(rt->gcStats.poke++);
3375 rt->gcPoke = JS_FALSE;
3377 #ifdef JS_THREADSAFE
3379 * Check if the GC is already running on this or another thread and
3380 * delegate the job to it.
3382 if (rt->gcLevel > 0) {
3383 JS_ASSERT(rt->gcThread);
3385 /* Bump gcLevel to restart the current GC, so it finds new garbage. */
3386 rt->gcLevel++;
3387 METER_UPDATE_MAX(rt->gcStats.maxlevel, rt->gcLevel);
3390 * If the GC runs on another thread, temporarily suspend the current
3391 * request and wait until the GC is done.
3393 if (rt->gcThread != cx->thread) {
3394 requestDebit = js_DiscountRequestsForGC(cx);
3395 js_RecountRequestsAfterGC(rt, requestDebit);
3397 if (!(gckind & GC_LOCK_HELD))
3398 JS_UNLOCK_GC(rt);
3399 return;
3402 /* No other thread is in GC, so indicate that we're now in GC. */
3403 rt->gcLevel = 1;
3404 rt->gcThread = cx->thread;
3407 * Notify all operation callbacks, which will give them a chance to
3408 * yield their current request. Contexts that are not currently
3409 * executing will perform their callback at some later point,
3410 * which then will be unnecessary, but harmless.
3412 js_NudgeOtherContexts(cx);
3415 * Discount all the requests on the current thread from contributing
3416 * to rt->requestCount before we wait for all other requests to finish.
3417 * JS_NOTIFY_REQUEST_DONE, which will wake us up, is only called on
3418 * rt->requestCount transitions to 0.
3420 requestDebit = js_CountThreadRequests(cx);
3421 JS_ASSERT_IF(cx->requestDepth != 0, requestDebit >= 1);
3422 rt->requestCount -= requestDebit;
3423 while (rt->requestCount > 0)
3424 JS_AWAIT_REQUEST_DONE(rt);
3425 rt->requestCount += requestDebit;
3427 #else /* !JS_THREADSAFE */
3429 /* Bump gcLevel and return rather than nest; the outer gc will restart. */
3430 rt->gcLevel++;
3431 METER_UPDATE_MAX(rt->gcStats.maxlevel, rt->gcLevel);
3432 if (rt->gcLevel > 1)
3433 return;
3435 #endif /* !JS_THREADSAFE */
3438 * Set rt->gcRunning here within the GC lock, and after waiting for any
3439 * active requests to end, so that new requests that try to JS_AddRoot,
3440 * JS_RemoveRoot, or JS_RemoveRootRT block in JS_BeginRequest waiting for
3441 * rt->gcLevel to drop to zero, while request-less calls to the *Root*
3442 * APIs block in js_AddRoot or js_RemoveRoot (see above in this file),
3443 * waiting for GC to finish.
3445 rt->gcRunning = JS_TRUE;
3447 if (gckind == GC_SET_SLOT_REQUEST) {
3448 JSSetSlotRequest *ssr;
3450 while ((ssr = rt->setSlotRequests) != NULL) {
3451 rt->setSlotRequests = ssr->next;
3452 JS_UNLOCK_GC(rt);
3453 ssr->next = NULL;
3454 ProcessSetSlotRequest(cx, ssr);
3455 JS_LOCK_GC(rt);
3459 * We assume here that killing links to parent and prototype objects
3460 * does not create garbage (such objects typically are long-lived and
3461 * widely shared, e.g. global objects, Function.prototype, etc.). We
3462 * collect garbage only if a racing thread attempted GC and is waiting
3463 * for us to finish (gcLevel > 1) or if someone already poked us.
3465 if (rt->gcLevel == 1 && !rt->gcPoke && !rt->gcIsNeeded)
3466 goto done_running;
3468 rt->gcLevel = 0;
3469 rt->gcPoke = JS_FALSE;
3470 rt->gcRunning = JS_FALSE;
3471 #ifdef JS_THREADSAFE
3472 rt->gcThread = NULL;
3473 #endif
3474 gckind = GC_LOCK_HELD;
3475 goto restart_at_beginning;
3478 JS_UNLOCK_GC(rt);
3480 #ifdef JS_TRACER
3481 if (JS_ON_TRACE(cx))
3482 goto out;
3483 #endif
3484 VOUCH_HAVE_STACK();
3486 /* Clear gcIsNeeded now, when we are about to start a normal GC cycle. */
3487 rt->gcIsNeeded = JS_FALSE;
3489 /* Reset malloc counter. */
3490 rt->gcMallocBytes = 0;
3492 #ifdef JS_DUMP_SCOPE_METERS
3493 { extern void js_DumpScopeMeters(JSRuntime *rt);
3494 js_DumpScopeMeters(rt);
3496 #endif
3498 #ifdef JS_TRACER
3499 js_PurgeJITOracle();
3500 #endif
3502 restart:
3503 rt->gcNumber++;
3504 JS_ASSERT(!rt->gcUntracedArenaStackTop);
3505 JS_ASSERT(rt->gcTraceLaterCount == 0);
3508 * Reset the property cache's type id generator so we can compress ids.
3509 * Same for the protoHazardShape proxy-shape standing in for all object
3510 * prototypes having readonly or setter properties.
3512 if (rt->shapeGen & SHAPE_OVERFLOW_BIT) {
3513 rt->gcRegenShapes = true;
3514 rt->gcRegenShapesScopeFlag ^= JSScope::SHAPE_REGEN;
3515 rt->shapeGen = 0;
3516 rt->protoHazardShape = 0;
3519 js_PurgeThreads(cx);
3520 #ifdef JS_TRACER
3521 if (gckind == GC_LAST_CONTEXT) {
3522 /* Clear builtin functions, which are recreated on demand. */
3523 memset(rt->builtinFunctions, 0, sizeof rt->builtinFunctions);
3525 #endif
3528 * Mark phase.
3530 JS_TRACER_INIT(&trc, cx, NULL);
3531 rt->gcMarkingTracer = &trc;
3532 JS_ASSERT(IS_GC_MARKING_TRACER(&trc));
3534 for (a = rt->gcDoubleArenaList.first; a; a = a->prev)
3535 a->u.hasMarkedDoubles = JS_FALSE;
3537 js_TraceRuntime(&trc, keepAtoms);
3538 js_MarkScriptFilenames(rt, keepAtoms);
3541 * Mark children of things that caused too deep recursion during the above
3542 * tracing.
3544 TraceDelayedChildren(&trc);
3546 JS_ASSERT(!cx->insideGCMarkCallback);
3547 if (rt->gcCallback) {
3548 cx->insideGCMarkCallback = JS_TRUE;
3549 (void) rt->gcCallback(cx, JSGC_MARK_END);
3550 JS_ASSERT(cx->insideGCMarkCallback);
3551 cx->insideGCMarkCallback = JS_FALSE;
3553 JS_ASSERT(rt->gcTraceLaterCount == 0);
3555 rt->gcMarkingTracer = NULL;
3557 #ifdef JS_THREADSAFE
3558 cx->createDeallocatorTask();
3559 #endif
3562 * Sweep phase.
3564 * Finalize as we sweep, outside of rt->gcLock but with rt->gcRunning set
3565 * so that any attempt to allocate a GC-thing from a finalizer will fail,
3566 * rather than nest badly and leave the unmarked newborn to be swept.
3568 * We first sweep atom state so we can use js_IsAboutToBeFinalized on
3569 * JSString or jsdouble held in a hashtable to check if the hashtable
3570 * entry can be freed. Note that even after the entry is freed, JSObject
3571 * finalizers can continue to access the corresponding jsdouble* and
3572 * JSString* assuming that they are unique. This works since the
3573 * atomization API must not be called during GC.
3575 js_SweepAtomState(cx);
3577 /* Finalize iterator states before the objects they iterate over. */
3578 CloseNativeIterators(cx);
3580 /* Finalize watch points associated with unreachable objects. */
3581 js_SweepWatchPoints(cx);
3583 #ifdef DEBUG
3584 /* Save the pre-sweep count of scope-mapped properties. */
3585 rt->liveScopePropsPreSweep = rt->liveScopeProps;
3586 #endif
3589 * Here we need to ensure that JSObject instances are finalized before GC-
3590 * allocated JSString and jsdouble instances so object's finalizer can
3591 * access them even if they will be freed. For that we simply finalize the
3592 * list containing JSObject first since the static assert at the beginning
3593 * of the file guarantees that JSString and jsdouble instances are
3594 * allocated from a different list.
3596 emptyArenas = NULL;
3597 for (i = 0; i < GC_NUM_FREELISTS; i++) {
3598 arenaList = &rt->gcArenaList[i == 0
3599 ? GC_FREELIST_INDEX(sizeof(JSObject))
3600 : i == GC_FREELIST_INDEX(sizeof(JSObject))
3602 : i];
3603 ap = &arenaList->last;
3604 if (!(a = *ap))
3605 continue;
3607 JS_ASSERT(arenaList->lastCount > 0);
3608 arenaList->freeList = NULL;
3609 freeList = NULL;
3610 thingSize = arenaList->thingSize;
3611 indexLimit = THINGS_PER_ARENA(thingSize);
3612 flagp = THING_FLAGP(a, arenaList->lastCount - 1);
3613 METER((nlivearenas = 0, nkilledarenas = 0, nthings = 0));
3614 for (;;) {
3615 JS_ASSERT(a->prevUntracedPage == 0);
3616 JS_ASSERT(a->u.untracedThings == 0);
3617 allClear = JS_TRUE;
3618 do {
3619 flags = *flagp;
3620 if (flags & (GCF_MARK | GCF_LOCK)) {
3621 *flagp &= ~GCF_MARK;
3622 allClear = JS_FALSE;
3623 METER(nthings++);
3624 } else {
3625 thing = FLAGP_TO_THING(flagp, thingSize);
3626 if (!(flags & GCF_FINAL)) {
3628 * Call the finalizer with GCF_FINAL ORed into flags.
3630 *flagp = (uint8)(flags | GCF_FINAL);
3631 type = flags & GCF_TYPEMASK;
3632 switch (type) {
3633 case GCX_OBJECT:
3634 FinalizeObject(cx, (JSObject *) thing);
3635 break;
3636 #if JS_HAS_XML_SUPPORT
3637 case GCX_XML:
3638 js_FinalizeXML(cx, (JSXML *) thing);
3639 break;
3640 #endif
3641 default:
3642 JS_ASSERT(type == GCX_STRING ||
3643 type - GCX_EXTERNAL_STRING <
3644 GCX_NTYPES - GCX_EXTERNAL_STRING);
3645 js_FinalizeStringRT(rt, (JSString *) thing,
3646 (intN) (type -
3647 GCX_EXTERNAL_STRING),
3648 cx);
3649 break;
3651 #ifdef DEBUG
3652 memset(thing, JS_FREE_PATTERN, thingSize);
3653 #endif
3655 thing->flagp = flagp;
3656 thing->next = freeList;
3657 freeList = thing;
3659 } while (++flagp != THING_FLAGS_END(a));
3661 if (allClear) {
3663 * Forget just assembled free list head for the arena and
3664 * add the arena itself to the destroy list.
3666 freeList = arenaList->freeList;
3667 if (a == arenaList->last)
3668 arenaList->lastCount = indexLimit;
3669 *ap = a->prev;
3670 a->prev = emptyArenas;
3671 emptyArenas = a;
3672 METER(nkilledarenas++);
3673 } else {
3674 arenaList->freeList = freeList;
3675 ap = &a->prev;
3676 METER(nlivearenas++);
3678 if (!(a = *ap))
3679 break;
3680 flagp = THING_FLAGP(a, indexLimit - 1);
3684 * We use arenaList - &rt->gcArenaList[0], not i, as the stat index
3685 * due to the enumeration reorder at the beginning of the loop.
3687 METER(UpdateArenaStats(&rt->gcStats.arenaStats[arenaList -
3688 &rt->gcArenaList[0]],
3689 nlivearenas, nkilledarenas, nthings));
3692 ap = &rt->gcDoubleArenaList.first;
3693 METER((nlivearenas = 0, nkilledarenas = 0, nthings = 0));
3694 while ((a = *ap) != NULL) {
3695 if (!a->u.hasMarkedDoubles) {
3696 /* No marked double values in the arena. */
3697 *ap = a->prev;
3698 a->prev = emptyArenas;
3699 emptyArenas = a;
3700 METER(nkilledarenas++);
3701 } else {
3702 ap = &a->prev;
3703 #ifdef JS_GCMETER
3704 for (i = 0; i != DOUBLES_PER_ARENA; ++i) {
3705 if (IsMarkedDouble(a, index))
3706 METER(nthings++);
3708 METER(nlivearenas++);
3709 #endif
3712 METER(UpdateArenaStats(&rt->gcStats.doubleArenaStats,
3713 nlivearenas, nkilledarenas, nthings));
3714 rt->gcDoubleArenaList.nextDoubleFlags =
3715 rt->gcDoubleArenaList.first
3716 ? DOUBLE_ARENA_BITMAP(rt->gcDoubleArenaList.first)
3717 : DOUBLE_BITMAP_SENTINEL;
3720 * Sweep the runtime's property tree after finalizing objects, in case any
3721 * had watchpoints referencing tree nodes.
3723 js_SweepScopeProperties(cx);
3726 * Sweep script filenames after sweeping functions in the generic loop
3727 * above. In this way when a scripted function's finalizer destroys the
3728 * script and calls rt->destroyScriptHook, the hook can still access the
3729 * script's filename. See bug 323267.
3731 js_SweepScriptFilenames(rt);
3734 * Destroy arenas after we finished the sweeping sofinalizers can safely
3735 * use js_IsAboutToBeFinalized().
3737 DestroyGCArenas(rt, emptyArenas);
3739 #ifdef JS_THREADSAFE
3740 cx->submitDeallocatorTask();
3741 #endif
3743 if (rt->gcCallback)
3744 (void) rt->gcCallback(cx, JSGC_FINALIZE_END);
3745 #ifdef DEBUG_srcnotesize
3746 { extern void DumpSrcNoteSizeHist();
3747 DumpSrcNoteSizeHist();
3748 printf("GC HEAP SIZE %lu\n", (unsigned long)rt->gcBytes);
3750 #endif
3752 #ifdef JS_SCOPE_DEPTH_METER
3753 { static FILE *fp;
3754 if (!fp)
3755 fp = fopen("/tmp/scopedepth.stats", "w");
3757 if (fp) {
3758 JS_DumpBasicStats(&rt->protoLookupDepthStats, "proto-lookup depth", fp);
3759 JS_DumpBasicStats(&rt->scopeSearchDepthStats, "scope-search depth", fp);
3760 JS_DumpBasicStats(&rt->hostenvScopeDepthStats, "hostenv scope depth", fp);
3761 JS_DumpBasicStats(&rt->lexicalScopeDepthStats, "lexical scope depth", fp);
3763 putc('\n', fp);
3764 fflush(fp);
3767 #endif /* JS_SCOPE_DEPTH_METER */
3769 #ifdef JS_DUMP_LOOP_STATS
3770 { static FILE *lsfp;
3771 if (!lsfp)
3772 lsfp = fopen("/tmp/loopstats", "w");
3773 if (lsfp) {
3774 JS_DumpBasicStats(&rt->loopStats, "loops", lsfp);
3775 fflush(lsfp);
3778 #endif /* JS_DUMP_LOOP_STATS */
3780 #ifdef JS_TRACER
3781 out:
3782 #endif
3783 JS_LOCK_GC(rt);
3786 * We want to restart GC if js_GC was called recursively or if any of the
3787 * finalizers called js_RemoveRoot or js_UnlockGCThingRT.
3789 if (!JS_ON_TRACE(cx) && (rt->gcLevel > 1 || rt->gcPoke)) {
3790 VOUCH_HAVE_STACK();
3791 rt->gcLevel = 1;
3792 rt->gcPoke = JS_FALSE;
3793 JS_UNLOCK_GC(rt);
3794 goto restart;
3797 rt->setGCLastBytes(rt->gcBytes);
3798 done_running:
3799 rt->gcLevel = 0;
3800 rt->gcRunning = rt->gcRegenShapes = false;
3802 #ifdef JS_THREADSAFE
3803 rt->gcThread = NULL;
3804 JS_NOTIFY_GC_DONE(rt);
3807 * Unlock unless we have GC_LOCK_HELD which requires locked GC on return.
3809 if (!(gckind & GC_LOCK_HELD))
3810 JS_UNLOCK_GC(rt);
3811 #endif
3814 * Execute JSGC_END callback outside the lock. Again, sample the callback
3815 * pointer in case it changes, since we are outside of the GC vs. requests
3816 * interlock mechanism here.
3818 if (gckind != GC_SET_SLOT_REQUEST && (callback = rt->gcCallback)) {
3819 JSWeakRoots savedWeakRoots;
3820 JSTempValueRooter tvr;
3822 if (gckind & GC_KEEP_ATOMS) {
3824 * We allow JSGC_END implementation to force a full GC or allocate
3825 * new GC things. Thus we must protect the weak roots from garbage
3826 * collection and overwrites.
3828 savedWeakRoots = cx->weakRoots;
3829 JS_PUSH_TEMP_ROOT_WEAK_COPY(cx, &savedWeakRoots, &tvr);
3830 JS_KEEP_ATOMS(rt);
3831 JS_UNLOCK_GC(rt);
3834 (void) callback(cx, JSGC_END);
3836 if (gckind & GC_KEEP_ATOMS) {
3837 JS_LOCK_GC(rt);
3838 JS_UNKEEP_ATOMS(rt);
3839 JS_POP_TEMP_ROOT(cx, &tvr);
3840 } else if (gckind == GC_LAST_CONTEXT && rt->gcPoke) {
3842 * On shutdown iterate until JSGC_END callback stops creating
3843 * garbage.
3845 goto restart_at_beginning;