ICs for scripted calls (bug 587698, r=dmandelin).
[mozilla-central.git] / js / src / jsgc.cpp
blob588fca45708dd6c577f8fc2617271137276570af
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sw=4 et tw=78:
4 * ***** BEGIN LICENSE BLOCK *****
5 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
7 * The contents of this file are subject to the Mozilla Public License Version
8 * 1.1 (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
10 * http://www.mozilla.org/MPL/
12 * Software distributed under the License is distributed on an "AS IS" basis,
13 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
14 * for the specific language governing rights and limitations under the
15 * License.
17 * The Original Code is Mozilla Communicator client code, released
18 * March 31, 1998.
20 * The Initial Developer of the Original Code is
21 * Netscape Communications Corporation.
22 * Portions created by the Initial Developer are Copyright (C) 1998
23 * the Initial Developer. All Rights Reserved.
25 * Contributor(s):
27 * Alternatively, the contents of this file may be used under the terms of
28 * either of the GNU General Public License Version 2 or later (the "GPL"),
29 * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
30 * in which case the provisions of the GPL or the LGPL are applicable instead
31 * of those above. If you wish to allow use of your version of this file only
32 * under the terms of either the GPL or the LGPL, and not to allow others to
33 * use your version of this file under the terms of the MPL, indicate your
34 * decision by deleting the provisions above and replace them with the notice
35 * and other provisions required by the GPL or the LGPL. If you do not delete
36 * the provisions above, a recipient may use your version of this file under
37 * the terms of any one of the MPL, the GPL or the LGPL.
39 * ***** END LICENSE BLOCK ***** */
42 * JS Mark-and-Sweep Garbage Collector.
44 * This GC allocates fixed-sized things with sizes up to GC_NBYTES_MAX (see
45 * jsgc.h). It allocates from a special GC arena pool with each arena allocated
46 * using malloc. It uses an ideally parallel array of flag bytes to hold the
47 * mark bit, finalizer type index, etc.
49 * XXX swizzle page to freelist for better locality of reference
51 #include <stdlib.h> /* for free */
52 #include <math.h>
53 #include <string.h> /* for memset used when DEBUG */
54 #include "jstypes.h"
55 #include "jsstdint.h"
56 #include "jsutil.h" /* Added by JSIFY */
57 #include "jshash.h" /* Added by JSIFY */
58 #include "jsbit.h"
59 #include "jsclist.h"
60 #include "jsprf.h"
61 #include "jsapi.h"
62 #include "jsatom.h"
63 #include "jscntxt.h"
64 #include "jsversion.h"
65 #include "jsdbgapi.h"
66 #include "jsexn.h"
67 #include "jsfun.h"
68 #include "jsgc.h"
69 #include "jsgcchunk.h"
70 #include "jsinterp.h"
71 #include "jsiter.h"
72 #include "jslock.h"
73 #include "jsnum.h"
74 #include "jsobj.h"
75 #include "jsparse.h"
76 #include "jsproxy.h"
77 #include "jsscope.h"
78 #include "jsscript.h"
79 #include "jsstaticcheck.h"
80 #include "jsstr.h"
81 #include "jstask.h"
82 #include "jstracer.h"
83 #include "methodjit/MethodJIT.h"
85 #if JS_HAS_XML_SUPPORT
86 #include "jsxml.h"
87 #endif
89 #include "jsprobes.h"
90 #include "jscntxtinlines.h"
91 #include "jsobjinlines.h"
92 #include "jshashtable.h"
94 #ifdef MOZ_VALGRIND
95 # define JS_VALGRIND
96 #endif
97 #ifdef JS_VALGRIND
98 # include <valgrind/memcheck.h>
99 #endif
101 using namespace js;
104 * Check that JSTRACE_XML follows JSTRACE_OBJECT and JSTRACE_STRING.
106 JS_STATIC_ASSERT(JSTRACE_OBJECT == 0);
107 JS_STATIC_ASSERT(JSTRACE_STRING == 1);
108 JS_STATIC_ASSERT(JSTRACE_XML == 2);
111 * JS_IS_VALID_TRACE_KIND assumes that JSTRACE_STRING is the last non-xml
112 * trace kind when JS_HAS_XML_SUPPORT is false.
114 JS_STATIC_ASSERT(JSTRACE_STRING + 1 == JSTRACE_XML);
117 * Check consistency of external string constants from JSFinalizeGCThingKind.
119 JS_STATIC_ASSERT(FINALIZE_EXTERNAL_STRING_LAST - FINALIZE_EXTERNAL_STRING0 ==
120 JS_EXTERNAL_STRING_LIMIT - 1);
123 * GC memory is allocated in chunks. The size of each chunk is GC_CHUNK_SIZE.
124 * The chunk contains an array of GC arenas holding GC things, an array of
125 * the mark bitmaps for each arena, an array of JSGCArenaInfo arena
126 * descriptors, an array of JSGCMarkingDelay descriptors, the GCChunkInfo
127 * chunk descriptor and a bitmap indicating free arenas in the chunk. The
128 * following picture demonstrates the layout:
130 * +--------+--------------+-------+--------+------------+-----------------+
131 * | arenas | mark bitmaps | infos | delays | chunk info | free arena bits |
132 * +--------+--------------+-------+--------+------------+-----------------+
134 * To ensure fast O(1) lookup of mark bits and arena descriptors each chunk is
135 * allocated on GC_CHUNK_SIZE boundary. This way a simple mask and shift
136 * operation gives an arena index into the mark and JSGCArenaInfo arrays.
138 * All chunks that have at least one free arena are put on the doubly-linked
139 * list with the head stored in JSRuntime.gcChunkList. GCChunkInfo contains
140 * the head of the chunk's free arena list together with the link fields for
141 * gcChunkList.
143 * A GC arena contains GC_ARENA_SIZE bytes aligned on GC_ARENA_SIZE boundary
144 * and holds things of the same size and kind. The size of each thing in the
145 * arena must be divisible by GC_CELL_SIZE, the minimal allocation unit, and
146 * the size of the mark bitmap is fixed and is independent of the thing's
147 * size with one bit per each GC_CELL_SIZE bytes. For thing sizes that exceed
148 * GC_CELL_SIZE this implies that we waste space in the mark bitmap. The
149 * advantage is that we can find the mark bit for the thing using just
150 * integer shifts avoiding an expensive integer division. We trade some space
151 * for speed here.
153 * The number of arenas in the chunk is given by GC_ARENAS_PER_CHUNK. We find
154 * that number as follows. Suppose chunk contains n arenas. Together with the
155 * word-aligned free arena bitmap and GCChunkInfo they should fit into the
156 * chunk. Hence GC_ARENAS_PER_CHUNK or n_max is the maximum value of n for
157 * which the following holds:
159 * n*s + ceil(n/B) <= M (1)
161 * where "/" denotes normal real division,
162 * ceil(r) gives the least integer not smaller than the number r,
163 * s is the number of words in the GC arena, arena's mark bitmap,
164 * JSGCArenaInfo and JSGCMarkingDelay or GC_ARENA_ALL_WORDS.
165 * B is number of bits per word or B == JS_BITS_PER_WORD
166 * M is the number of words in the chunk without GCChunkInfo or
167 * M == (GC_CHUNK_SIZE - sizeof(JSGCArenaInfo)) / sizeof(jsuword).
169 * We rewrite the inequality as
171 * n*B*s/B + ceil(n/B) <= M,
172 * ceil(n*B*s/B + n/B) <= M,
173 * ceil(n*(B*s + 1)/B) <= M (2)
175 * We define a helper function e(n, s, B),
177 * e(n, s, B) := ceil(n*(B*s + 1)/B) - n*(B*s + 1)/B, 0 <= e(n, s, B) < 1.
179 * It gives:
181 * n*(B*s + 1)/B + e(n, s, B) <= M,
182 * n + e*B/(B*s + 1) <= M*B/(B*s + 1)
184 * We apply the floor function to both sides of the last equation, where
185 * floor(r) gives the biggest integer not greater than r. As a consequence we
186 * have:
188 * floor(n + e*B/(B*s + 1)) <= floor(M*B/(B*s + 1)),
189 * n + floor(e*B/(B*s + 1)) <= floor(M*B/(B*s + 1)),
190 * n <= floor(M*B/(B*s + 1)), (3)
192 * where floor(e*B/(B*s + 1)) is zero as e*B/(B*s + 1) < B/(B*s + 1) < 1.
193 * Thus any n that satisfies the original constraint (1) or its equivalent (2),
194 * must also satisfy (3). That is, we got an upper estimate for the maximum
195 * value of n. Lets show that this upper estimate,
197 * floor(M*B/(B*s + 1)), (4)
199 * also satisfies (1) and, as such, gives the required maximum value.
200 * Substituting it into (2) gives:
202 * ceil(floor(M*B/(B*s + 1))*(B*s + 1)/B) == ceil(floor(M/X)*X)
204 * where X == (B*s + 1)/B > 1. But then floor(M/X)*X <= M/X*X == M and
206 * ceil(floor(M/X)*X) <= ceil(M) == M.
208 * Thus the value of (4) gives the maximum n satisfying (1).
210 * For the final result we observe that in (4)
212 * M*B == (GC_CHUNK_SIZE - sizeof(GCChunkInfo)) / sizeof(jsuword) *
213 * JS_BITS_PER_WORD
214 * == (GC_CHUNK_SIZE - sizeof(GCChunkInfo)) * JS_BITS_PER_BYTE
216 * since GC_CHUNK_SIZE and sizeof(GCChunkInfo) are at least word-aligned.
219 const jsuword GC_ARENA_SHIFT = 12;
220 const jsuword GC_ARENA_MASK = JS_BITMASK(GC_ARENA_SHIFT);
221 const jsuword GC_ARENA_SIZE = JS_BIT(GC_ARENA_SHIFT);
223 const jsuword GC_MAX_CHUNK_AGE = 3;
225 const size_t GC_CELL_SHIFT = 3;
226 const size_t GC_CELL_SIZE = size_t(1) << GC_CELL_SHIFT;
227 const size_t GC_CELL_MASK = GC_CELL_SIZE - 1;
229 const size_t BITS_PER_GC_CELL = GC_CELL_SIZE * JS_BITS_PER_BYTE;
231 const size_t GC_CELLS_PER_ARENA = size_t(1) << (GC_ARENA_SHIFT - GC_CELL_SHIFT);
232 const size_t GC_MARK_BITMAP_SIZE = GC_CELLS_PER_ARENA / JS_BITS_PER_BYTE;
233 const size_t GC_MARK_BITMAP_WORDS = GC_CELLS_PER_ARENA / JS_BITS_PER_WORD;
235 JS_STATIC_ASSERT(sizeof(jsbitmap) == sizeof(jsuword));
237 JS_STATIC_ASSERT(sizeof(JSString) % GC_CELL_SIZE == 0);
238 JS_STATIC_ASSERT(sizeof(JSObject) % GC_CELL_SIZE == 0);
239 JS_STATIC_ASSERT(sizeof(JSFunction) % GC_CELL_SIZE == 0);
240 #ifdef JSXML
241 JS_STATIC_ASSERT(sizeof(JSXML) % GC_CELL_SIZE == 0);
242 #endif
244 #ifdef JS_GCMETER
245 # define METER(x) ((void) (x))
246 # define METER_IF(condition, x) ((void) ((condition) && (x)))
247 #else
248 # define METER(x) ((void) 0)
249 # define METER_IF(condition, x) ((void) 0)
250 #endif
252 struct JSGCArenaInfo {
254 * Allocation list for the arena.
256 JSGCArenaList *list;
259 * Pointer to the previous arena in a linked list. The arena can either
260 * belong to one of JSContext.gcArenaList lists or, when it does not have
261 * any allocated GC things, to the list of free arenas in the chunk with
262 * head stored in GCChunkInfo.lastFreeArena.
264 JSGCArena *prev;
266 JSGCThing *freeList;
268 static inline JSGCArenaInfo *fromGCThing(void* thing);
271 /* See comments before ThingsPerUnmarkedBit below. */
272 struct JSGCMarkingDelay {
273 JSGCArena *link;
274 jsuword unmarkedChildren;
277 struct JSGCArena {
278 uint8 data[GC_ARENA_SIZE];
280 void checkAddress() const {
281 JS_ASSERT(!(reinterpret_cast<jsuword>(this) & GC_ARENA_MASK));
284 jsuword toPageStart() const {
285 checkAddress();
286 return reinterpret_cast<jsuword>(this);
289 static inline JSGCArena *fromGCThing(void* thing);
291 static inline JSGCArena *fromChunkAndIndex(jsuword chunk, size_t index);
293 jsuword getChunk() {
294 return toPageStart() & ~GC_CHUNK_MASK;
297 jsuword getIndex() {
298 return (toPageStart() & GC_CHUNK_MASK) >> GC_ARENA_SHIFT;
301 inline JSGCArenaInfo *getInfo();
303 inline JSGCMarkingDelay *getMarkingDelay();
305 inline jsbitmap *getMarkBitmap();
308 namespace js {
310 struct GCChunkInfo {
311 JSRuntime *runtime;
312 size_t numFreeArenas;
313 size_t gcChunkAge;
315 inline void init(JSRuntime *rt);
317 inline jsbitmap *getFreeArenaBitmap();
319 inline jsuword getChunk();
321 inline void clearMarkBitmap();
323 static inline GCChunkInfo *fromChunk(jsuword chunk);
326 } /* namespace js */
328 /* Check that all chunk arrays at least word-aligned. */
329 JS_STATIC_ASSERT(sizeof(JSGCArena) == GC_ARENA_SIZE);
330 JS_STATIC_ASSERT(GC_MARK_BITMAP_WORDS % sizeof(jsuword) == 0);
331 JS_STATIC_ASSERT(sizeof(JSGCArenaInfo) % sizeof(jsuword) == 0);
332 JS_STATIC_ASSERT(sizeof(JSGCMarkingDelay) % sizeof(jsuword) == 0);
334 const size_t GC_ARENA_ALL_WORDS = (GC_ARENA_SIZE + GC_MARK_BITMAP_SIZE +
335 sizeof(JSGCArenaInfo) +
336 sizeof(JSGCMarkingDelay)) / sizeof(jsuword);
338 /* The value according (4) above. */
339 const size_t GC_ARENAS_PER_CHUNK =
340 (GC_CHUNK_SIZE - sizeof(GCChunkInfo)) * JS_BITS_PER_BYTE /
341 (JS_BITS_PER_WORD * GC_ARENA_ALL_WORDS + 1);
343 const size_t GC_FREE_ARENA_BITMAP_WORDS = (GC_ARENAS_PER_CHUNK +
344 JS_BITS_PER_WORD - 1) /
345 JS_BITS_PER_WORD;
347 const size_t GC_FREE_ARENA_BITMAP_SIZE = GC_FREE_ARENA_BITMAP_WORDS *
348 sizeof(jsuword);
350 /* Check that GC_ARENAS_PER_CHUNK indeed maximises (1). */
351 JS_STATIC_ASSERT(GC_ARENAS_PER_CHUNK * GC_ARENA_ALL_WORDS +
352 GC_FREE_ARENA_BITMAP_WORDS <=
353 (GC_CHUNK_SIZE - sizeof(GCChunkInfo)) / sizeof(jsuword));
355 JS_STATIC_ASSERT((GC_ARENAS_PER_CHUNK + 1) * GC_ARENA_ALL_WORDS +
356 (GC_ARENAS_PER_CHUNK + 1 + JS_BITS_PER_WORD - 1) /
357 JS_BITS_PER_WORD >
358 (GC_CHUNK_SIZE - sizeof(GCChunkInfo)) / sizeof(jsuword));
361 const size_t GC_MARK_BITMAP_ARRAY_OFFSET = GC_ARENAS_PER_CHUNK
362 << GC_ARENA_SHIFT;
364 const size_t GC_ARENA_INFO_ARRAY_OFFSET =
365 GC_MARK_BITMAP_ARRAY_OFFSET + GC_MARK_BITMAP_SIZE * GC_ARENAS_PER_CHUNK;
367 const size_t GC_MARKING_DELAY_ARRAY_OFFSET =
368 GC_ARENA_INFO_ARRAY_OFFSET + sizeof(JSGCArenaInfo) * GC_ARENAS_PER_CHUNK;
370 const size_t GC_CHUNK_INFO_OFFSET = GC_CHUNK_SIZE - GC_FREE_ARENA_BITMAP_SIZE -
371 sizeof(GCChunkInfo);
373 inline jsuword
374 GCChunkInfo::getChunk() {
375 jsuword addr = reinterpret_cast<jsuword>(this);
376 JS_ASSERT((addr & GC_CHUNK_MASK) == GC_CHUNK_INFO_OFFSET);
377 jsuword chunk = addr & ~GC_CHUNK_MASK;
378 return chunk;
381 inline void
382 GCChunkInfo::clearMarkBitmap()
384 PodZero(reinterpret_cast<jsbitmap *>(getChunk() + GC_MARK_BITMAP_ARRAY_OFFSET),
385 GC_MARK_BITMAP_WORDS * GC_ARENAS_PER_CHUNK);
388 /* static */
389 inline GCChunkInfo *
390 GCChunkInfo::fromChunk(jsuword chunk) {
391 JS_ASSERT(!(chunk & GC_CHUNK_MASK));
392 jsuword addr = chunk | GC_CHUNK_INFO_OFFSET;
393 return reinterpret_cast<GCChunkInfo *>(addr);
396 inline jsbitmap *
397 GCChunkInfo::getFreeArenaBitmap()
399 jsuword addr = reinterpret_cast<jsuword>(this);
400 return reinterpret_cast<jsbitmap *>(addr + sizeof(GCChunkInfo));
403 inline void
404 GCChunkInfo::init(JSRuntime *rt)
406 runtime = rt;
407 numFreeArenas = GC_ARENAS_PER_CHUNK;
408 gcChunkAge = 0;
411 * For simplicity we set all bits to 1 including the high bits in the
412 * last word that corresponds to nonexistent arenas. This is fine since
413 * the arena scans the bitmap words from lowest to highest bits and the
414 * allocation checks numFreeArenas before doing the search.
416 memset(getFreeArenaBitmap(), 0xFF, GC_FREE_ARENA_BITMAP_SIZE);
419 inline void
420 CheckValidGCThingPtr(void *thing)
422 #ifdef DEBUG
423 JS_ASSERT(!JSString::isStatic(thing));
424 jsuword addr = reinterpret_cast<jsuword>(thing);
425 JS_ASSERT(!(addr & GC_CELL_MASK));
426 JS_ASSERT((addr & GC_CHUNK_MASK) < GC_MARK_BITMAP_ARRAY_OFFSET);
427 #endif
430 /* static */
431 inline JSGCArenaInfo *
432 JSGCArenaInfo::fromGCThing(void* thing)
434 CheckValidGCThingPtr(thing);
435 jsuword addr = reinterpret_cast<jsuword>(thing);
436 jsuword chunk = addr & ~GC_CHUNK_MASK;
437 JSGCArenaInfo *array =
438 reinterpret_cast<JSGCArenaInfo *>(chunk | GC_ARENA_INFO_ARRAY_OFFSET);
439 size_t arenaIndex = (addr & GC_CHUNK_MASK) >> GC_ARENA_SHIFT;
440 return array + arenaIndex;
443 /* static */
444 inline JSGCArena *
445 JSGCArena::fromGCThing(void* thing)
447 CheckValidGCThingPtr(thing);
448 jsuword addr = reinterpret_cast<jsuword>(thing);
449 return reinterpret_cast<JSGCArena *>(addr & ~GC_ARENA_MASK);
452 /* static */
453 inline JSGCArena *
454 JSGCArena::fromChunkAndIndex(jsuword chunk, size_t index) {
455 JS_ASSERT(chunk);
456 JS_ASSERT(!(chunk & GC_CHUNK_MASK));
457 JS_ASSERT(index < GC_ARENAS_PER_CHUNK);
458 return reinterpret_cast<JSGCArena *>(chunk | (index << GC_ARENA_SHIFT));
461 inline JSGCArenaInfo *
462 JSGCArena::getInfo()
464 jsuword chunk = getChunk();
465 jsuword index = getIndex();
466 jsuword offset = GC_ARENA_INFO_ARRAY_OFFSET + index * sizeof(JSGCArenaInfo);
467 return reinterpret_cast<JSGCArenaInfo *>(chunk | offset);
470 inline JSGCMarkingDelay *
471 JSGCArena::getMarkingDelay()
473 jsuword chunk = getChunk();
474 jsuword index = getIndex();
475 jsuword offset = GC_MARKING_DELAY_ARRAY_OFFSET +
476 index * sizeof(JSGCMarkingDelay);
477 return reinterpret_cast<JSGCMarkingDelay *>(chunk | offset);
480 inline jsbitmap *
481 JSGCArena::getMarkBitmap()
483 jsuword chunk = getChunk();
484 jsuword index = getIndex();
485 jsuword offset = GC_MARK_BITMAP_ARRAY_OFFSET + index * GC_MARK_BITMAP_SIZE;
486 return reinterpret_cast<jsbitmap *>(chunk | offset);
490 * Helpers for GC-thing operations.
493 inline jsbitmap *
494 GetGCThingMarkBit(void *thing, size_t &bitIndex)
496 CheckValidGCThingPtr(thing);
497 jsuword addr = reinterpret_cast<jsuword>(thing);
498 jsuword chunk = addr & ~GC_CHUNK_MASK;
499 bitIndex = (addr & GC_CHUNK_MASK) >> GC_CELL_SHIFT;
500 return reinterpret_cast<jsbitmap *>(chunk | GC_MARK_BITMAP_ARRAY_OFFSET);
504 * Live objects are marked black. How many other additional colors are available
505 * depends on the size of the GCThing.
507 static const uint32 BLACK = 0;
509 static void
510 AssertValidColor(void *thing, uint32 color)
512 JS_ASSERT_IF(color, color < JSGCArenaInfo::fromGCThing(thing)->list->thingSize / GC_CELL_SIZE);
515 inline bool
516 IsMarkedGCThing(void *thing, uint32 color = BLACK)
518 AssertValidColor(thing, color);
520 size_t index;
521 jsbitmap *markBitmap = GetGCThingMarkBit(thing, index);
522 return !!JS_TEST_BIT(markBitmap, index + color);
526 * The GC always marks live objects BLACK. If color is not BLACK, we also mark
527 * the object with that additional color.
529 inline bool
530 MarkIfUnmarkedGCThing(void *thing, uint32 color = BLACK)
532 AssertValidColor(thing, color);
534 size_t index;
535 jsbitmap *markBitmap = GetGCThingMarkBit(thing, index);
536 if (JS_TEST_BIT(markBitmap, index))
537 return false;
538 JS_SET_BIT(markBitmap, index);
539 if (color != BLACK)
540 JS_SET_BIT(markBitmap, index + color);
541 return true;
544 size_t
545 ThingsPerArena(size_t thingSize)
547 JS_ASSERT(!(thingSize & GC_CELL_MASK));
548 JS_ASSERT(thingSize <= GC_ARENA_SIZE);
549 return GC_ARENA_SIZE / thingSize;
552 /* Can only be called if thing belongs to an arena where a->list is not null. */
553 inline size_t
554 GCThingToArenaIndex(void *thing)
556 CheckValidGCThingPtr(thing);
557 jsuword addr = reinterpret_cast<jsuword>(thing);
558 jsuword offsetInArena = addr & GC_ARENA_MASK;
559 JSGCArenaInfo *a = JSGCArenaInfo::fromGCThing(thing);
560 JS_ASSERT(a->list);
561 JS_ASSERT(offsetInArena % a->list->thingSize == 0);
562 return offsetInArena / a->list->thingSize;
565 /* Can only be applicable to arena where a->list is not null. */
566 inline uint8 *
567 GCArenaIndexToThing(JSGCArena *a, JSGCArenaInfo *ainfo, size_t index)
569 JS_ASSERT(a->getInfo() == ainfo);
572 * We use "<=" and not "<" in the assert so index can mean the limit.
573 * For the same reason we use "+", not "|" when finding the thing address
574 * as the limit address can start at the next arena.
576 JS_ASSERT(index <= ThingsPerArena(ainfo->list->thingSize));
577 jsuword offsetInArena = index * ainfo->list->thingSize;
578 return reinterpret_cast<uint8 *>(a->toPageStart() + offsetInArena);
582 * The private JSGCThing struct, which describes a JSRuntime.gcFreeList element.
584 struct JSGCThing {
585 JSGCThing *link;
588 static inline JSGCThing *
589 MakeNewArenaFreeList(JSGCArena *a, size_t thingSize)
591 jsuword thingsStart = a->toPageStart();
592 jsuword lastThingMinAddr = thingsStart + GC_ARENA_SIZE - thingSize * 2 + 1;
593 jsuword thingPtr = thingsStart;
594 do {
595 jsuword nextPtr = thingPtr + thingSize;
596 JS_ASSERT((nextPtr & GC_ARENA_MASK) + thingSize <= GC_ARENA_SIZE);
597 JSGCThing *thing = reinterpret_cast<JSGCThing *>(thingPtr);
598 thing->link = reinterpret_cast<JSGCThing *>(nextPtr);
599 thingPtr = nextPtr;
600 } while (thingPtr < lastThingMinAddr);
602 JSGCThing *lastThing = reinterpret_cast<JSGCThing *>(thingPtr);
603 lastThing->link = NULL;
604 return reinterpret_cast<JSGCThing *>(thingsStart);
607 inline jsuword
608 GetGCChunk(JSRuntime *rt)
610 void *p = rt->gcChunkAllocator->alloc();
611 #ifdef MOZ_GCTIMER
612 if (p)
613 JS_ATOMIC_INCREMENT(&newChunkCount);
614 #endif
615 METER_IF(p, rt->gcStats.nchunks++);
616 METER_UPDATE_MAX(rt->gcStats.maxnchunks, rt->gcStats.nchunks);
617 return reinterpret_cast<jsuword>(p);
620 inline void
621 ReleaseGCChunk(JSRuntime *rt, jsuword chunk)
623 void *p = reinterpret_cast<void *>(chunk);
624 JS_ASSERT(p);
625 #ifdef MOZ_GCTIMER
626 JS_ATOMIC_INCREMENT(&destroyChunkCount);
627 #endif
628 JS_ASSERT(rt->gcStats.nchunks != 0);
629 METER(rt->gcStats.nchunks--);
630 rt->gcChunkAllocator->free(p);
633 static JSGCArena *
634 NewGCArena(JSContext *cx)
636 JSRuntime *rt = cx->runtime;
637 if (!JS_THREAD_DATA(cx)->waiveGCQuota &&
638 (rt->gcBytes >= rt->gcMaxBytes ||
639 rt->gcBytes > GC_HEAP_GROWTH_FACTOR * rt->gcNewArenaTriggerBytes)) {
641 * FIXME bug 524051 We cannot run a last-ditch GC on trace for now, so
642 * just pretend we are out of memory which will throw us off trace and
643 * we will re-try this code path from the interpreter.
645 if (!JS_ON_TRACE(cx))
646 return NULL;
647 js_TriggerGC(cx, true);
650 if (rt->gcFreeArenaChunks.empty()) {
651 #ifdef DEBUG
652 for (GCChunkSet::Range r(rt->gcChunkSet.all()); !r.empty(); r.popFront())
653 JS_ASSERT(GCChunkInfo::fromChunk(r.front())->numFreeArenas == 0);
654 #endif
656 * Make sure that after the GC we can append all allocated chunks to
657 * gcFreeArenaChunks.
659 * FIXME bug 583729 - use the same for the rt->gcChunkSet.
661 if (!rt->gcFreeArenaChunks.reserve(rt->gcChunkSet.count() + 1))
662 return NULL;
663 jsuword chunk = GetGCChunk(rt);
664 if (!chunk)
665 return NULL;
666 GCChunkInfo *ci = GCChunkInfo::fromChunk(chunk);
667 ci->init(rt);
670 * FIXME bug 583732 - chunk is newly allocated and cannot present in
671 * the table so using ordinary lookupForAdd is suboptimal here.
673 GCChunkSet::AddPtr p = rt->gcChunkSet.lookupForAdd(chunk);
674 JS_ASSERT(!p);
675 if (!rt->gcChunkSet.add(p, chunk)) {
676 ReleaseGCChunk(rt, chunk);
677 return NULL;
679 JS_ALWAYS_TRUE(rt->gcFreeArenaChunks.append(ci));
682 GCChunkInfo *ci = rt->gcFreeArenaChunks.back();
683 JS_ASSERT(ci->numFreeArenas);
685 /* Scan the bitmap for the first non-zero bit. */
686 jsbitmap *freeArenas = ci->getFreeArenaBitmap();
687 size_t arenaIndex = 0;
688 while (!*freeArenas) {
689 arenaIndex += JS_BITS_PER_WORD;
690 freeArenas++;
692 size_t bit = CountTrailingZeros(*freeArenas);
693 arenaIndex += bit;
694 JS_ASSERT(arenaIndex < GC_ARENAS_PER_CHUNK);
695 JS_ASSERT(*freeArenas & (jsuword(1) << bit));
696 *freeArenas &= ~(jsuword(1) << bit);
697 --ci->numFreeArenas;
698 if (ci->numFreeArenas == 0) {
699 JS_ASSERT(ci == rt->gcFreeArenaChunks.back());
700 rt->gcFreeArenaChunks.popBack();
703 rt->gcBytes += GC_ARENA_SIZE;
704 METER(rt->gcStats.nallarenas++);
705 METER_UPDATE_MAX(rt->gcStats.maxnallarenas, rt->gcStats.nallarenas);
707 return JSGCArena::fromChunkAndIndex(ci->getChunk(), arenaIndex);
711 * This function does not touch the arena or release its memory so code can
712 * still refer into it.
714 static void
715 ReleaseGCArena(JSRuntime *rt, JSGCArena *a)
717 METER(rt->gcStats.afree++);
718 JS_ASSERT(rt->gcBytes >= GC_ARENA_SIZE);
719 rt->gcBytes -= GC_ARENA_SIZE;
720 JS_ASSERT(rt->gcStats.nallarenas != 0);
721 METER(rt->gcStats.nallarenas--);
723 jsuword chunk = a->getChunk();
724 GCChunkInfo *ci = GCChunkInfo::fromChunk(chunk);
725 JS_ASSERT(ci->numFreeArenas <= GC_ARENAS_PER_CHUNK - 1);
726 jsbitmap *freeArenas = ci->getFreeArenaBitmap();
727 JS_ASSERT(!JS_TEST_BIT(freeArenas, a->getIndex()));
728 JS_SET_BIT(freeArenas, a->getIndex());
729 ci->numFreeArenas++;
730 if (ci->numFreeArenas == GC_ARENAS_PER_CHUNK)
731 ci->gcChunkAge = 0;
733 #ifdef DEBUG
734 a->getInfo()->prev = rt->gcEmptyArenaList;
735 rt->gcEmptyArenaList = a;
736 #endif
739 static void
740 FreeGCChunks(JSRuntime *rt)
742 #ifdef DEBUG
743 while (rt->gcEmptyArenaList) {
744 JSGCArena *next = rt->gcEmptyArenaList->getInfo()->prev;
745 memset(rt->gcEmptyArenaList, JS_FREE_PATTERN, GC_ARENA_SIZE);
746 rt->gcEmptyArenaList = next;
748 #endif
750 /* Remove unused chunks and rebuild gcFreeArenaChunks. */
751 rt->gcFreeArenaChunks.clear();
752 JS_ASSERT(rt->gcFreeArenaChunks.capacity() >= rt->gcChunkSet.count());
753 for (GCChunkSet::Enum e(rt->gcChunkSet); !e.empty(); e.popFront()) {
754 GCChunkInfo *ci = GCChunkInfo::fromChunk(e.front());
755 JS_ASSERT(ci->runtime == rt);
756 if (ci->numFreeArenas == GC_ARENAS_PER_CHUNK) {
757 if (ci->gcChunkAge > GC_MAX_CHUNK_AGE) {
758 e.removeFront();
759 ReleaseGCChunk(rt, ci->getChunk());
760 continue;
762 ci->gcChunkAge++;
765 if (ci->numFreeArenas)
766 JS_ALWAYS_TRUE(rt->gcFreeArenaChunks.append(ci));
770 static inline size_t
771 GetFinalizableThingSize(unsigned thingKind)
773 JS_STATIC_ASSERT(JS_EXTERNAL_STRING_LIMIT == 8);
775 static const uint8 map[FINALIZE_LIMIT] = {
776 sizeof(JSObject), /* FINALIZE_OBJECT */
777 sizeof(JSFunction), /* FINALIZE_FUNCTION */
778 #if JS_HAS_XML_SUPPORT
779 sizeof(JSXML), /* FINALIZE_XML */
780 #endif
781 sizeof(JSShortString), /* FINALIZE_SHORT_STRING */
782 sizeof(JSString), /* FINALIZE_STRING */
783 sizeof(JSString), /* FINALIZE_EXTERNAL_STRING0 */
784 sizeof(JSString), /* FINALIZE_EXTERNAL_STRING1 */
785 sizeof(JSString), /* FINALIZE_EXTERNAL_STRING2 */
786 sizeof(JSString), /* FINALIZE_EXTERNAL_STRING3 */
787 sizeof(JSString), /* FINALIZE_EXTERNAL_STRING4 */
788 sizeof(JSString), /* FINALIZE_EXTERNAL_STRING5 */
789 sizeof(JSString), /* FINALIZE_EXTERNAL_STRING6 */
790 sizeof(JSString), /* FINALIZE_EXTERNAL_STRING7 */
793 JS_ASSERT(thingKind < FINALIZE_LIMIT);
794 return map[thingKind];
797 static inline size_t
798 GetFinalizableTraceKind(size_t thingKind)
800 JS_STATIC_ASSERT(JS_EXTERNAL_STRING_LIMIT == 8);
802 static const uint8 map[FINALIZE_LIMIT] = {
803 JSTRACE_OBJECT, /* FINALIZE_OBJECT */
804 JSTRACE_OBJECT, /* FINALIZE_FUNCTION */
805 #if JS_HAS_XML_SUPPORT /* FINALIZE_XML */
806 JSTRACE_XML,
807 #endif
808 JSTRACE_STRING, /* FINALIZE_SHORT_STRING */
809 JSTRACE_STRING, /* FINALIZE_STRING */
810 JSTRACE_STRING, /* FINALIZE_EXTERNAL_STRING0 */
811 JSTRACE_STRING, /* FINALIZE_EXTERNAL_STRING1 */
812 JSTRACE_STRING, /* FINALIZE_EXTERNAL_STRING2 */
813 JSTRACE_STRING, /* FINALIZE_EXTERNAL_STRING3 */
814 JSTRACE_STRING, /* FINALIZE_EXTERNAL_STRING4 */
815 JSTRACE_STRING, /* FINALIZE_EXTERNAL_STRING5 */
816 JSTRACE_STRING, /* FINALIZE_EXTERNAL_STRING6 */
817 JSTRACE_STRING, /* FINALIZE_EXTERNAL_STRING7 */
820 JS_ASSERT(thingKind < FINALIZE_LIMIT);
821 return map[thingKind];
824 static inline size_t
825 GetFinalizableArenaTraceKind(JSGCArenaInfo *ainfo)
827 JS_ASSERT(ainfo->list);
828 return GetFinalizableTraceKind(ainfo->list->thingKind);
831 static inline size_t
832 GetArenaTraceKind(JSGCArenaInfo *ainfo)
834 return GetFinalizableArenaTraceKind(ainfo);
837 static inline size_t
838 GetFinalizableThingTraceKind(void *thing)
840 JSGCArenaInfo *ainfo = JSGCArenaInfo::fromGCThing(thing);
841 return GetFinalizableArenaTraceKind(ainfo);
844 static void
845 InitGCArenaLists(JSRuntime *rt)
847 for (unsigned i = 0; i != FINALIZE_LIMIT; ++i) {
848 JSGCArenaList *arenaList = &rt->gcArenaList[i];
849 arenaList->head = NULL;
850 arenaList->cursor = NULL;
851 arenaList->thingKind = i;
852 arenaList->thingSize = GetFinalizableThingSize(i);
856 static void
857 FinishGCArenaLists(JSRuntime *rt)
859 for (unsigned i = 0; i < FINALIZE_LIMIT; i++) {
860 rt->gcArenaList[i].head = NULL;
861 rt->gcArenaList[i].cursor = NULL;
864 rt->gcBytes = 0;
866 for (GCChunkSet::Range r(rt->gcChunkSet.all()); !r.empty(); r.popFront())
867 ReleaseGCChunk(rt, r.front());
868 rt->gcChunkSet.clear();
869 rt->gcFreeArenaChunks.clear();
872 intN
873 js_GetExternalStringGCType(JSString *str)
875 JS_STATIC_ASSERT(FINALIZE_STRING + 1 == FINALIZE_EXTERNAL_STRING0);
876 JS_ASSERT(!JSString::isStatic(str));
878 unsigned thingKind = JSGCArenaInfo::fromGCThing(str)->list->thingKind;
879 JS_ASSERT(IsFinalizableStringKind(thingKind));
880 return intN(thingKind) - intN(FINALIZE_EXTERNAL_STRING0);
883 JS_FRIEND_API(uint32)
884 js_GetGCThingTraceKind(void *thing)
886 if (JSString::isStatic(thing))
887 return JSTRACE_STRING;
889 JSGCArenaInfo *ainfo = JSGCArenaInfo::fromGCThing(thing);
890 return GetArenaTraceKind(ainfo);
893 JSRuntime *
894 js_GetGCThingRuntime(void *thing)
896 jsuword chunk = JSGCArena::fromGCThing(thing)->getChunk();
897 return GCChunkInfo::fromChunk(chunk)->runtime;
900 JS_FRIEND_API(bool)
901 js_IsAboutToBeFinalized(void *thing)
903 if (JSString::isStatic(thing))
904 return false;
906 return !IsMarkedGCThing(thing);
909 JS_FRIEND_API(bool)
910 js_GCThingIsMarked(void *thing, uint32 color)
912 return IsMarkedGCThing(thing, color);
915 JSBool
916 js_InitGC(JSRuntime *rt, uint32 maxbytes)
918 InitGCArenaLists(rt);
921 * Make room for at least 16 chunks so the table would not grow before
922 * the browser starts up.
924 if (!rt->gcChunkSet.init(16))
925 return false;
927 if (!rt->gcRootsHash.init(256))
928 return false;
930 if (!rt->gcLocksHash.init(256))
931 return false;
933 #ifdef JS_THREADSAFE
934 if (!rt->gcHelperThread.init())
935 return false;
936 #endif
939 * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
940 * for default backward API compatibility.
942 rt->gcMaxBytes = maxbytes;
943 rt->setGCMaxMallocBytes(maxbytes);
945 rt->gcEmptyArenaPoolLifespan = 30000;
948 * By default the trigger factor gets maximum possible value. This
949 * means that GC will not be triggered by growth of GC memory (gcBytes).
951 rt->setGCTriggerFactor((uint32) -1);
954 * The assigned value prevents GC from running when GC memory is too low
955 * (during JS engine start).
957 rt->setGCLastBytes(8192);
958 rt->gcNewArenaTriggerBytes = GC_ARENA_ALLOCATION_TRIGGER;
960 METER(PodZero(&rt->gcStats));
961 return true;
964 namespace js {
967 * Returns CGCT_VALID if the w can be a live GC thing and sets thing and traceKind
968 * accordingly. Otherwise returns the reason for rejection.
970 inline ConservativeGCTest
971 IsGCThingWord(JSRuntime *rt, jsuword w, void *&thing, uint32 &traceKind)
974 * The conservative scanner may access words that valgrind considers as
975 * undefined. To avoid false positives and not to alter valgrind view of
976 * the memory we make as memcheck-defined the argument, a copy of the
977 * original word. See bug 572678.
979 #ifdef JS_VALGRIND
980 VALGRIND_MAKE_MEM_DEFINED(&w, sizeof(w));
981 #endif
984 * We assume that the compiler never uses sub-word alignment to store
985 * pointers and does not tag pointers on its own. Additionally, the value
986 * representation for all values and the jsid representation for GC-things
987 * do not touch the low two bits. Thus any word with the low two bits set
988 * is not a valid GC-thing.
990 JS_STATIC_ASSERT(JSID_TYPE_STRING == 0 && JSID_TYPE_OBJECT == 4);
991 if (w & 0x3)
992 return CGCT_LOWBITSET;
995 * An object jsid has its low bits tagged. In the value representation on
996 * 64-bit, the high bits are tagged.
998 const jsuword JSID_PAYLOAD_MASK = ~jsuword(JSID_TYPE_MASK);
999 #if JS_BITS_PER_WORD == 32
1000 jsuword payload = w & JSID_PAYLOAD_MASK;
1001 #elif JS_BITS_PER_WORD == 64
1002 jsuword payload = w & JSID_PAYLOAD_MASK & JSVAL_PAYLOAD_MASK;
1003 #endif
1005 jsuword chunk = payload & ~GC_CHUNK_MASK;
1006 if (!rt->gcChunkSet.has(chunk))
1007 return CGCT_NOTCHUNK;
1009 GCChunkInfo *ci = GCChunkInfo::fromChunk(chunk);
1011 if ((payload & GC_CHUNK_MASK) >= GC_MARK_BITMAP_ARRAY_OFFSET)
1012 return CGCT_NOTARENA;
1014 size_t arenaIndex = (payload & GC_CHUNK_MASK) >> GC_ARENA_SHIFT;
1015 if (JS_TEST_BIT(ci->getFreeArenaBitmap(), arenaIndex))
1016 return CGCT_FREEARENA;
1018 JSGCArena *a = JSGCArena::fromChunkAndIndex(chunk, arenaIndex);
1019 JSGCArenaInfo *ainfo = a->getInfo();
1021 traceKind = GetFinalizableArenaTraceKind(ainfo);
1024 * On 64-bit we might consider using the tag bits in w to disqualify
1025 * additional false roots, however, the condition would have to look
1026 * something like:
1028 * if ((traceKind == JSTRACE_STRING && tag > 0 && tag != JSVAL_TAG_SHIFT) ||
1029 * (traceKind == JSTRACE_OBJECT && tag > 0 && tag != JSVAL_TAG_OBJECT))
1030 * return CGCT_WRONGTAG;
1032 * However, it seems like we should measure how often this actually avoids
1033 * false roots.
1036 jsuword start = a->toPageStart();
1037 jsuword offset = payload - start;
1038 size_t thingSize = ainfo->list->thingSize;
1039 offset -= offset % thingSize;
1042 * If GC_ARENA_SIZE % thingSize != 0 or when thingSize is not a power
1043 * of two, thingSize-aligned pointer may point at the end of the last
1044 * thing yet be inside the arena.
1046 if (offset + thingSize > GC_ARENA_SIZE) {
1047 JS_ASSERT(thingSize & (thingSize - 1));
1048 return CGCT_NOTARENA;
1050 thing = (JSGCThing *) (start + offset);
1052 /* Make sure the thing is not on the freelist of the arena. */
1053 JSGCThing *cursor = ainfo->freeList;
1054 while (cursor) {
1055 JS_ASSERT((((jsuword) cursor) & GC_ARENA_MASK) % thingSize == 0);
1056 JS_ASSERT(!IsMarkedGCThing(cursor));
1058 /* If the cursor moves past the thing, it's not in the freelist. */
1059 if (thing < cursor)
1060 break;
1062 /* If we find it on the freelist, it's dead. */
1063 if (thing == cursor)
1064 return CGCT_NOTLIVE;
1065 JS_ASSERT_IF(cursor->link, cursor < cursor->link);
1066 cursor = cursor->link;
1069 return CGCT_VALID;
1072 inline ConservativeGCTest
1073 IsGCThingWord(JSRuntime *rt, jsuword w)
1075 void *thing;
1076 uint32 traceKind;
1077 return IsGCThingWord(rt, w, thing, traceKind);
1080 static void
1081 MarkWordConservatively(JSTracer *trc, jsuword w)
1084 * The conservative scanner may access words that valgrind considers as
1085 * undefined. To avoid false positives and not to alter valgrind view of
1086 * the memory we make as memcheck-defined the argument, a copy of the
1087 * original word. See bug 572678.
1089 #ifdef JS_VALGRIND
1090 VALGRIND_MAKE_MEM_DEFINED(&w, sizeof(w));
1091 #endif
1093 void *thing;
1094 uint32 traceKind;
1095 ConservativeGCTest test = IsGCThingWord(trc->context->runtime, w, thing, traceKind);
1096 if (test == CGCT_VALID) {
1097 Mark(trc, thing, traceKind, "machine stack");
1098 #ifdef JS_DUMP_CONSERVATIVE_GC_ROOTS
1099 if (IS_GC_MARKING_TRACER(trc) && static_cast<GCMarker *>(trc)->conservativeDumpFileName) {
1100 GCMarker::ConservativeRoot root = {thing, traceKind};
1101 static_cast<GCMarker *>(trc)->conservativeRoots.append(root);
1103 #endif
1106 #if defined JS_DUMP_CONSERVATIVE_GC_ROOTS || defined JS_GCMETER
1107 if (IS_GC_MARKING_TRACER(trc))
1108 static_cast<GCMarker *>(trc)->conservativeStats.counter[test]++;
1109 #endif
1112 static void
1113 MarkRangeConservatively(JSTracer *trc, jsuword *begin, jsuword *end)
1115 JS_ASSERT(begin <= end);
1116 for (jsuword *i = begin; i != end; ++i)
1117 MarkWordConservatively(trc, *i);
1120 static void
1121 MarkThreadDataConservatively(JSTracer *trc, JSThreadData *td)
1123 ConservativeGCThreadData *ctd = &td->conservativeGC;
1124 JS_ASSERT(ctd->hasStackToScan());
1125 jsuword *stackMin, *stackEnd;
1126 #if JS_STACK_GROWTH_DIRECTION > 0
1127 stackMin = td->nativeStackBase;
1128 stackEnd = ctd->nativeStackTop;
1129 #else
1130 stackMin = ctd->nativeStackTop + 1;
1131 stackEnd = td->nativeStackBase;
1132 #endif
1133 JS_ASSERT(stackMin <= stackEnd);
1134 MarkRangeConservatively(trc, stackMin, stackEnd);
1135 MarkRangeConservatively(trc, ctd->registerSnapshot.words,
1136 JS_ARRAY_END(ctd->registerSnapshot.words));
1140 void
1141 MarkStackRangeConservatively(JSTracer *trc, Value *beginv, Value *endv)
1143 jsuword *begin = (jsuword *) beginv;
1144 jsuword *end = (jsuword *) endv;
1145 #ifdef JS_NUNBOX32
1147 * With 64-bit jsvals on 32-bit systems, we can optimize a bit by
1148 * scanning only the payloads.
1150 JS_ASSERT(begin <= end);
1151 for (jsuword *i = begin; i != end; i += 2)
1152 MarkWordConservatively(trc, *i);
1153 #else
1154 MarkRangeConservatively(trc, begin, end);
1155 #endif
1158 void
1159 MarkConservativeStackRoots(JSTracer *trc)
1161 #ifdef JS_THREADSAFE
1162 for (JSThread::Map::Range r = trc->context->runtime->threads.all(); !r.empty(); r.popFront()) {
1163 JSThread *thread = r.front().value;
1164 ConservativeGCThreadData *ctd = &thread->data.conservativeGC;
1165 if (ctd->hasStackToScan()) {
1166 JS_ASSERT_IF(!thread->requestDepth, thread->suspendCount);
1167 MarkThreadDataConservatively(trc, &thread->data);
1168 } else {
1169 JS_ASSERT(!thread->suspendCount);
1170 JS_ASSERT(thread->requestDepth <= ctd->requestThreshold);
1173 #else
1174 MarkThreadDataConservatively(trc, &trc->context->runtime->threadData);
1175 #endif
1178 JS_NEVER_INLINE void
1179 ConservativeGCThreadData::recordStackTop()
1181 /* Update the native stack pointer if it points to a bigger stack. */
1182 jsuword dummy;
1183 nativeStackTop = &dummy;
1185 /* Update the register snapshot with the latest values. */
1186 #if defined(_MSC_VER)
1187 # pragma warning(push)
1188 # pragma warning(disable: 4611)
1189 #endif
1190 setjmp(registerSnapshot.jmpbuf);
1191 #if defined(_MSC_VER)
1192 # pragma warning(pop)
1193 #endif
1197 static inline void
1198 RecordNativeStackTopForGC(JSContext *cx)
1200 ConservativeGCThreadData *ctd = &JS_THREAD_DATA(cx)->conservativeGC;
1202 #ifdef JS_THREADSAFE
1203 /* Record the stack top here only if we are called from a request. */
1204 JS_ASSERT(cx->thread->requestDepth >= ctd->requestThreshold);
1205 if (cx->thread->requestDepth == ctd->requestThreshold)
1206 return;
1207 #endif
1208 ctd->recordStackTop();
1211 } /* namespace js */
1213 #ifdef DEBUG
1214 static void
1215 CheckLeakedRoots(JSRuntime *rt);
1216 #endif
1218 void
1219 js_FinishGC(JSRuntime *rt)
1221 #ifdef JS_ARENAMETER
1222 JS_DumpArenaStats(stdout);
1223 #endif
1224 #ifdef JS_GCMETER
1225 if (JS_WANT_GC_METER_PRINT)
1226 js_DumpGCStats(rt, stdout);
1227 #endif
1229 #ifdef JS_THREADSAFE
1230 rt->gcHelperThread.cancel();
1231 #endif
1232 FinishGCArenaLists(rt);
1234 #ifdef DEBUG
1235 if (!rt->gcRootsHash.empty())
1236 CheckLeakedRoots(rt);
1237 #endif
1238 rt->gcRootsHash.clear();
1239 rt->gcLocksHash.clear();
1242 JSBool
1243 js_AddRoot(JSContext *cx, Value *vp, const char *name)
1245 JSBool ok = js_AddRootRT(cx->runtime, Jsvalify(vp), name);
1246 if (!ok)
1247 JS_ReportOutOfMemory(cx);
1248 return ok;
1251 JSBool
1252 js_AddGCThingRoot(JSContext *cx, void **rp, const char *name)
1254 JSBool ok = js_AddGCThingRootRT(cx->runtime, rp, name);
1255 if (!ok)
1256 JS_ReportOutOfMemory(cx);
1257 return ok;
1260 JS_FRIEND_API(JSBool)
1261 js_AddRootRT(JSRuntime *rt, jsval *vp, const char *name)
1264 * Due to the long-standing, but now removed, use of rt->gcLock across the
1265 * bulk of js_GC, API users have come to depend on JS_AddRoot etc. locking
1266 * properly with a racing GC, without calling JS_AddRoot from a request.
1267 * We have to preserve API compatibility here, now that we avoid holding
1268 * rt->gcLock across the mark phase (including the root hashtable mark).
1270 AutoLockGC lock(rt);
1271 js_WaitForGC(rt);
1273 return !!rt->gcRootsHash.put((void *)vp,
1274 RootInfo(name, JS_GC_ROOT_VALUE_PTR));
1277 JS_FRIEND_API(JSBool)
1278 js_AddGCThingRootRT(JSRuntime *rt, void **rp, const char *name)
1281 * Due to the long-standing, but now removed, use of rt->gcLock across the
1282 * bulk of js_GC, API users have come to depend on JS_AddRoot etc. locking
1283 * properly with a racing GC, without calling JS_AddRoot from a request.
1284 * We have to preserve API compatibility here, now that we avoid holding
1285 * rt->gcLock across the mark phase (including the root hashtable mark).
1287 AutoLockGC lock(rt);
1288 js_WaitForGC(rt);
1290 return !!rt->gcRootsHash.put((void *)rp,
1291 RootInfo(name, JS_GC_ROOT_GCTHING_PTR));
1294 JS_FRIEND_API(JSBool)
1295 js_RemoveRoot(JSRuntime *rt, void *rp)
1298 * Due to the JS_RemoveRootRT API, we may be called outside of a request.
1299 * Same synchronization drill as above in js_AddRoot.
1301 AutoLockGC lock(rt);
1302 js_WaitForGC(rt);
1303 rt->gcRootsHash.remove(rp);
1304 rt->gcPoke = JS_TRUE;
1305 return JS_TRUE;
1308 typedef RootedValueMap::Range RootRange;
1309 typedef RootedValueMap::Entry RootEntry;
1310 typedef RootedValueMap::Enum RootEnum;
1312 #ifdef DEBUG
1314 static void
1315 CheckLeakedRoots(JSRuntime *rt)
1317 uint32 leakedroots = 0;
1319 /* Warn (but don't assert) debug builds of any remaining roots. */
1320 for (RootRange r = rt->gcRootsHash.all(); !r.empty(); r.popFront()) {
1321 RootEntry &entry = r.front();
1322 leakedroots++;
1323 fprintf(stderr,
1324 "JS engine warning: leaking GC root \'%s\' at %p\n",
1325 entry.value.name ? entry.value.name : "", entry.key);
1328 if (leakedroots > 0) {
1329 if (leakedroots == 1) {
1330 fprintf(stderr,
1331 "JS engine warning: 1 GC root remains after destroying the JSRuntime at %p.\n"
1332 " This root may point to freed memory. Objects reachable\n"
1333 " through it have not been finalized.\n",
1334 (void *) rt);
1335 } else {
1336 fprintf(stderr,
1337 "JS engine warning: %lu GC roots remain after destroying the JSRuntime at %p.\n"
1338 " These roots may point to freed memory. Objects reachable\n"
1339 " through them have not been finalized.\n",
1340 (unsigned long) leakedroots, (void *) rt);
1345 void
1346 js_DumpNamedRoots(JSRuntime *rt,
1347 void (*dump)(const char *name, void *rp, JSGCRootType type, void *data),
1348 void *data)
1350 for (RootRange r = rt->gcRootsHash.all(); !r.empty(); r.popFront()) {
1351 RootEntry &entry = r.front();
1352 if (const char *name = entry.value.name)
1353 dump(name, entry.key, entry.value.type, data);
1357 #endif /* DEBUG */
1359 uint32
1360 js_MapGCRoots(JSRuntime *rt, JSGCRootMapFun map, void *data)
1362 AutoLockGC lock(rt);
1363 int ct = 0;
1364 for (RootEnum e(rt->gcRootsHash); !e.empty(); e.popFront()) {
1365 RootEntry &entry = e.front();
1367 ct++;
1368 intN mapflags = map(entry.key, entry.value.type, entry.value.name, data);
1370 if (mapflags & JS_MAP_GCROOT_REMOVE)
1371 e.removeFront();
1372 if (mapflags & JS_MAP_GCROOT_STOP)
1373 break;
1376 return ct;
1379 void
1380 JSRuntime::setGCTriggerFactor(uint32 factor)
1382 JS_ASSERT(factor >= 100);
1384 gcTriggerFactor = factor;
1385 setGCLastBytes(gcLastBytes);
1388 void
1389 JSRuntime::setGCLastBytes(size_t lastBytes)
1391 gcLastBytes = lastBytes;
1392 uint64 triggerBytes = uint64(lastBytes) * uint64(gcTriggerFactor / 100);
1393 if (triggerBytes != size_t(triggerBytes))
1394 triggerBytes = size_t(-1);
1395 gcTriggerBytes = size_t(triggerBytes);
1398 void
1399 JSGCFreeLists::purge()
1402 * Return the free list back to the arena so the GC finalization will not
1403 * run the finalizers over unitialized bytes from free things.
1405 for (JSGCThing **p = finalizables; p != JS_ARRAY_END(finalizables); ++p) {
1406 JSGCThing *freeListHead = *p;
1407 if (freeListHead) {
1408 JSGCArenaInfo *ainfo = JSGCArenaInfo::fromGCThing(freeListHead);
1409 JS_ASSERT(!ainfo->freeList);
1410 ainfo->freeList = freeListHead;
1411 *p = NULL;
1416 void
1417 JSGCFreeLists::moveTo(JSGCFreeLists *another)
1419 *another = *this;
1420 PodArrayZero(finalizables);
1421 JS_ASSERT(isEmpty());
1424 static inline bool
1425 IsGCThresholdReached(JSRuntime *rt)
1427 #ifdef JS_GC_ZEAL
1428 if (rt->gcZeal >= 1)
1429 return true;
1430 #endif
1433 * Since the initial value of the gcLastBytes parameter is not equal to
1434 * zero (see the js_InitGC function) the return value is false when
1435 * the gcBytes value is close to zero at the JS engine start.
1437 return rt->isGCMallocLimitReached() || rt->gcBytes >= rt->gcTriggerBytes;
1440 static void
1441 LastDitchGC(JSContext *cx)
1443 JS_ASSERT(!JS_ON_TRACE(cx));
1445 /* The last ditch GC preserves weak roots and all atoms. */
1446 AutoKeepAtoms keep(cx->runtime);
1449 * Keep rt->gcLock across the call into the GC so we don't starve and
1450 * lose to racing threads who deplete the heap just after the GC has
1451 * replenished it (or has synchronized with a racing GC that collected a
1452 * bunch of garbage). This unfair scheduling can happen on certain
1453 * operating systems. For the gory details, see bug 162779.
1455 js_GC(cx, GC_LOCK_HELD);
1458 static JSGCThing *
1459 RefillFinalizableFreeList(JSContext *cx, unsigned thingKind)
1461 JS_ASSERT(!JS_THREAD_DATA(cx)->gcFreeLists.finalizables[thingKind]);
1462 JSRuntime *rt = cx->runtime;
1463 JSGCArenaList *arenaList;
1464 JSGCArena *a;
1467 AutoLockGC lock(rt);
1468 JS_ASSERT(!rt->gcRunning);
1469 if (rt->gcRunning)
1470 return NULL;
1472 bool canGC = !JS_ON_TRACE(cx) && !JS_THREAD_DATA(cx)->waiveGCQuota;
1473 bool doGC = canGC && IsGCThresholdReached(rt);
1474 arenaList = &rt->gcArenaList[thingKind];
1475 for (;;) {
1476 if (doGC) {
1477 LastDitchGC(cx);
1478 METER(cx->runtime->gcArenaStats[thingKind].retry++);
1479 canGC = false;
1482 * The JSGC_END callback can legitimately allocate new GC
1483 * things and populate the free list. If that happens, just
1484 * return that list head.
1486 JSGCThing *freeList = JS_THREAD_DATA(cx)->gcFreeLists.finalizables[thingKind];
1487 if (freeList)
1488 return freeList;
1491 while ((a = arenaList->cursor) != NULL) {
1492 JSGCArenaInfo *ainfo = a->getInfo();
1493 arenaList->cursor = ainfo->prev;
1494 JSGCThing *freeList = ainfo->freeList;
1495 if (freeList) {
1496 ainfo->freeList = NULL;
1497 return freeList;
1501 a = NewGCArena(cx);
1502 if (a)
1503 break;
1504 if (!canGC) {
1505 METER(cx->runtime->gcArenaStats[thingKind].fail++);
1506 return NULL;
1508 doGC = true;
1512 * Do only minimal initialization of the arena inside the GC lock. We
1513 * can do the rest outside the lock because no other threads will see
1514 * the arena until the GC is run.
1516 JSGCArenaInfo *ainfo = a->getInfo();
1517 ainfo->list = arenaList;
1518 ainfo->prev = arenaList->head;
1519 ainfo->freeList = NULL;
1520 arenaList->head = a;
1523 JSGCMarkingDelay *markingDelay = a->getMarkingDelay();
1524 markingDelay->link = NULL;
1525 markingDelay->unmarkedChildren = 0;
1527 return MakeNewArenaFreeList(a, arenaList->thingSize);
1530 static inline void
1531 CheckGCFreeListLink(JSGCThing *thing)
1534 * The GC things on the free lists come from one arena and the things on
1535 * the free list are linked in ascending address order.
1537 JS_ASSERT_IF(thing->link,
1538 JSGCArena::fromGCThing(thing) ==
1539 JSGCArena::fromGCThing(thing->link));
1540 JS_ASSERT_IF(thing->link, thing < thing->link);
1543 void *
1544 js_NewFinalizableGCThing(JSContext *cx, unsigned thingKind)
1546 JS_ASSERT(thingKind < FINALIZE_LIMIT);
1547 #ifdef JS_THREADSAFE
1548 JS_ASSERT(cx->thread);
1549 #endif
1551 /* Updates of metering counters here may not be thread-safe. */
1552 METER(cx->runtime->gcArenaStats[thingKind].alloc++);
1554 JSGCThing **freeListp =
1555 JS_THREAD_DATA(cx)->gcFreeLists.finalizables + thingKind;
1556 JSGCThing *thing = *freeListp;
1557 if (thing) {
1558 *freeListp = thing->link;
1559 CheckGCFreeListLink(thing);
1560 METER(cx->runtime->gcArenaStats[thingKind].localalloc++);
1561 return thing;
1564 thing = RefillFinalizableFreeList(cx, thingKind);
1565 if (!thing) {
1566 js_ReportOutOfMemory(cx);
1567 return NULL;
1571 * See comments in RefillFinalizableFreeList about a possibility
1572 * of *freeListp == thing.
1574 JS_ASSERT(!*freeListp || *freeListp == thing);
1575 *freeListp = thing->link;
1577 CheckGCFreeListLink(thing);
1579 return thing;
1582 JSBool
1583 js_LockGCThingRT(JSRuntime *rt, void *thing)
1585 GCLocks *locks;
1587 if (!thing)
1588 return true;
1589 locks = &rt->gcLocksHash;
1590 AutoLockGC lock(rt);
1591 GCLocks::AddPtr p = locks->lookupForAdd(thing);
1593 if (!p) {
1594 if (!locks->add(p, thing, 1))
1595 return false;
1596 } else {
1597 JS_ASSERT(p->value >= 1);
1598 p->value++;
1601 METER(rt->gcStats.lock++);
1602 return true;
1605 void
1606 js_UnlockGCThingRT(JSRuntime *rt, void *thing)
1608 if (!thing)
1609 return;
1611 AutoLockGC lock(rt);
1612 GCLocks::Ptr p = rt->gcLocksHash.lookup(thing);
1614 if (p) {
1615 rt->gcPoke = true;
1616 if (--p->value == 0)
1617 rt->gcLocksHash.remove(p);
1619 METER(rt->gcStats.unlock++);
1623 JS_PUBLIC_API(void)
1624 JS_TraceChildren(JSTracer *trc, void *thing, uint32 kind)
1626 switch (kind) {
1627 case JSTRACE_OBJECT: {
1628 /* If obj has no map, it must be a newborn. */
1629 JSObject *obj = (JSObject *) thing;
1630 if (!obj->map)
1631 break;
1632 if (JSObject *proto = obj->getProto())
1633 JS_CALL_OBJECT_TRACER(trc, proto, "proto");
1634 if (JSObject *parent = obj->getParent())
1635 JS_CALL_OBJECT_TRACER(trc, parent, "parent");
1636 JSTraceOp op = obj->getOps()->trace;
1637 (op ? op : js_TraceObject)(trc, obj);
1638 break;
1641 case JSTRACE_STRING: {
1642 JSString *str = (JSString *) thing;
1643 if (str->isDependent())
1644 JS_CALL_STRING_TRACER(trc, str->dependentBase(), "base");
1645 else if (str->isRope()) {
1646 if (str->isInteriorNode())
1647 JS_CALL_STRING_TRACER(trc, str->interiorNodeParent(), "parent");
1648 JS_CALL_STRING_TRACER(trc, str->ropeLeft(), "left child");
1649 JS_CALL_STRING_TRACER(trc, str->ropeRight(), "right child");
1651 break;
1654 #if JS_HAS_XML_SUPPORT
1655 case JSTRACE_XML:
1656 js_TraceXML(trc, (JSXML *)thing);
1657 break;
1658 #endif
1662 namespace js {
1665 * When the native stack is low, the GC does not call JS_TraceChildren to mark
1666 * the reachable "children" of the thing. Rather the thing is put aside and
1667 * JS_TraceChildren is called later with more space on the C stack.
1669 * To implement such delayed marking of the children with minimal overhead for
1670 * the normal case of sufficient native stack, the code uses two fields per
1671 * arena stored in JSGCMarkingDelay. The first field, JSGCMarkingDelay::link,
1672 * links all arenas with delayed things into a stack list with the pointer to
1673 * stack top in JSRuntime::gcUnmarkedArenaStackTop. delayMarkingChildren adds
1674 * arenas to the stack as necessary while markDelayedChildren pops the arenas
1675 * from the stack until it empties.
1677 * The second field, JSGCMarkingDelay::unmarkedChildren, is a bitmap that
1678 * tells for which things the GC should call JS_TraceChildren later. The
1679 * bitmap is a single word. As such it does not pinpoint the delayed things
1680 * in the arena but rather tells the intervals containing
1681 * ThingsPerUnmarkedBit(thingSize) things. Later the code in
1682 * markDelayedChildren discovers such intervals and calls JS_TraceChildren on
1683 * any marked thing in the interval. This implies that JS_TraceChildren can be
1684 * called many times for a single thing if the thing shares the same interval
1685 * with some delayed things. This should be fine as any GC graph
1686 * marking/traversing hooks must allow repeated calls during the same GC cycle.
1687 * In particular, xpcom cycle collector relies on this.
1689 * Note that such repeated scanning may slow down the GC. In particular, it is
1690 * possible to construct an object graph where the GC calls JS_TraceChildren
1691 * ThingsPerUnmarkedBit(thingSize) for almost all things in the graph. We
1692 * tolerate this as the max value for ThingsPerUnmarkedBit(thingSize) is 4.
1693 * This is archived for JSObject on 32 bit system as it is exactly JSObject
1694 * that has the smallest size among the GC things that can be delayed. On 32
1695 * bit CPU we have less than 128 objects per 4K GC arena so each bit in
1696 * unmarkedChildren covers 4 objects.
1698 inline unsigned
1699 ThingsPerUnmarkedBit(unsigned thingSize)
1701 return JS_HOWMANY(ThingsPerArena(thingSize), JS_BITS_PER_WORD);
1704 GCMarker::GCMarker(JSContext *cx)
1705 : color(0), unmarkedArenaStackTop(NULL)
1707 JS_TRACER_INIT(this, cx, NULL);
1708 #ifdef DEBUG
1709 markLaterCount = 0;
1710 #endif
1711 #ifdef JS_DUMP_CONSERVATIVE_GC_ROOTS
1712 conservativeDumpFileName = getenv("JS_DUMP_CONSERVATIVE_GC_ROOTS");
1713 memset(&conservativeStats, 0, sizeof(conservativeStats));
1714 #endif
1717 GCMarker::~GCMarker()
1719 #ifdef JS_DUMP_CONSERVATIVE_GC_ROOTS
1720 dumpConservativeRoots();
1721 #endif
1722 #ifdef JS_GCMETER
1723 /* Update total stats. */
1724 context->runtime->gcStats.conservative.add(conservativeStats);
1725 #endif
1728 void
1729 GCMarker::delayMarkingChildren(void *thing)
1731 JS_ASSERT(this == context->runtime->gcMarkingTracer);
1732 JS_ASSERT(IsMarkedGCThing(thing));
1733 METER(context->runtime->gcStats.unmarked++);
1735 JSGCArena *a = JSGCArena::fromGCThing(thing);
1736 JSGCArenaInfo *ainfo = a->getInfo();
1737 JSGCMarkingDelay *markingDelay = a->getMarkingDelay();
1739 size_t thingArenaIndex = GCThingToArenaIndex(thing);
1740 size_t unmarkedBitIndex = thingArenaIndex /
1741 ThingsPerUnmarkedBit(ainfo->list->thingSize);
1742 JS_ASSERT(unmarkedBitIndex < JS_BITS_PER_WORD);
1744 jsuword bit = jsuword(1) << unmarkedBitIndex;
1745 if (markingDelay->unmarkedChildren != 0) {
1746 JS_ASSERT(unmarkedArenaStackTop);
1747 if (markingDelay->unmarkedChildren & bit) {
1748 /* bit already covers things with children to mark later. */
1749 return;
1751 markingDelay->unmarkedChildren |= bit;
1752 } else {
1754 * The thing is the first thing with not yet marked children in the
1755 * whole arena, so push the arena on the stack of arenas with things
1756 * to be marked later unless the arena has already been pushed. We
1757 * detect that through checking prevUnmarked as the field is 0
1758 * only for not yet pushed arenas. To ensure that
1759 * prevUnmarked != 0
1760 * even when the stack contains one element, we make prevUnmarked
1761 * for the arena at the bottom to point to itself.
1763 * See comments in markDelayedChildren.
1765 markingDelay->unmarkedChildren = bit;
1766 if (!markingDelay->link) {
1767 if (!unmarkedArenaStackTop) {
1768 /* Stack was empty, mark the arena as the bottom element. */
1769 markingDelay->link = a;
1770 } else {
1771 JS_ASSERT(unmarkedArenaStackTop->getMarkingDelay()->link);
1772 markingDelay->link = unmarkedArenaStackTop;
1774 unmarkedArenaStackTop = a;
1776 JS_ASSERT(unmarkedArenaStackTop);
1778 #ifdef DEBUG
1779 markLaterCount += ThingsPerUnmarkedBit(ainfo->list->thingSize);
1780 METER_UPDATE_MAX(context->runtime->gcStats.maxunmarked, markLaterCount);
1781 #endif
1784 JS_FRIEND_API(void)
1785 GCMarker::markDelayedChildren()
1787 JS_ASSERT(this == context->runtime->gcMarkingTracer);
1789 JSGCArena *a = unmarkedArenaStackTop;
1790 if (!a) {
1791 JS_ASSERT(markLaterCount == 0);
1792 return;
1795 for (;;) {
1797 * The following assert verifies that the current arena belongs to the
1798 * unmarked stack, since delayMarkingChildren ensures that even for
1799 * the stack's bottom, prevUnmarked != 0 but rather points to
1800 * itself.
1802 JSGCArenaInfo *ainfo = a->getInfo();
1803 JSGCMarkingDelay *markingDelay = a->getMarkingDelay();
1804 JS_ASSERT(markingDelay->link);
1805 JS_ASSERT(unmarkedArenaStackTop->getMarkingDelay()->link);
1806 unsigned thingSize = ainfo->list->thingSize;
1807 unsigned traceKind = GetFinalizableArenaTraceKind(ainfo);
1808 unsigned indexLimit = ThingsPerArena(thingSize);
1809 unsigned thingsPerUnmarkedBit = ThingsPerUnmarkedBit(thingSize);
1812 * We cannot use do-while loop here as a->unmarkedChildren can be zero
1813 * before the loop as a leftover from the previous iterations. See
1814 * comments after the loop.
1816 while (markingDelay->unmarkedChildren != 0) {
1817 unsigned unmarkedBitIndex = JS_FLOOR_LOG2W(markingDelay->unmarkedChildren);
1818 markingDelay->unmarkedChildren &= ~(jsuword(1) << unmarkedBitIndex);
1819 #ifdef DEBUG
1820 JS_ASSERT(markLaterCount >= thingsPerUnmarkedBit);
1821 markLaterCount -= thingsPerUnmarkedBit;
1822 #endif
1823 unsigned thingIndex = unmarkedBitIndex * thingsPerUnmarkedBit;
1824 unsigned endIndex = thingIndex + thingsPerUnmarkedBit;
1827 * endIndex can go beyond the last allocated thing as the real
1828 * limit can be "inside" the bit.
1830 if (endIndex > indexLimit)
1831 endIndex = indexLimit;
1832 uint8 *thing = GCArenaIndexToThing(a, ainfo, thingIndex);
1833 uint8 *end = GCArenaIndexToThing(a, ainfo, endIndex);
1834 do {
1835 JS_ASSERT(thing < end);
1836 if (IsMarkedGCThing(thing))
1837 JS_TraceChildren(this, thing, traceKind);
1838 thing += thingSize;
1839 } while (thing != end);
1843 * We finished tracing of all things in the the arena but we can only
1844 * pop it from the stack if the arena is the stack's top.
1846 * When JS_TraceChildren from the above calls JS_CallTracer that in
1847 * turn on low C stack calls delayMarkingChildren and the latter
1848 * pushes new arenas to the unmarked stack, we have to skip popping
1849 * of this arena until it becomes the top of the stack again.
1851 if (a == unmarkedArenaStackTop) {
1852 JSGCArena *aprev = markingDelay->link;
1853 markingDelay->link = NULL;
1854 if (a == aprev) {
1856 * prevUnmarked points to itself and we reached the bottom of
1857 * the stack.
1859 break;
1861 unmarkedArenaStackTop = a = aprev;
1862 } else {
1863 a = unmarkedArenaStackTop;
1866 JS_ASSERT(unmarkedArenaStackTop);
1867 JS_ASSERT(!unmarkedArenaStackTop->getMarkingDelay()->link);
1868 unmarkedArenaStackTop = NULL;
1869 JS_ASSERT(markLaterCount == 0);
1872 void
1873 GCMarker::slowifyArrays()
1875 while (!arraysToSlowify.empty()) {
1876 JSObject *obj = arraysToSlowify.back();
1877 arraysToSlowify.popBack();
1878 if (IsMarkedGCThing(obj))
1879 obj->makeDenseArraySlow(context);
1883 void
1884 Mark(JSTracer *trc, void *thing, uint32 kind)
1886 JS_ASSERT(thing);
1887 JS_ASSERT(JS_IS_VALID_TRACE_KIND(kind));
1888 JS_ASSERT(trc->debugPrinter || trc->debugPrintArg);
1889 JS_ASSERT_IF(!JSString::isStatic(thing), kind == GetFinalizableThingTraceKind(thing));
1890 #ifdef DEBUG
1891 if (IS_GC_MARKING_TRACER(trc)) {
1892 JSRuntime *rt = trc->context->runtime;
1893 JS_ASSERT(rt->gcMarkingTracer == trc);
1894 JS_ASSERT(rt->gcRunning);
1896 #endif
1898 if (!IS_GC_MARKING_TRACER(trc)) {
1899 trc->callback(trc, thing, kind);
1900 } else {
1901 GCMarker *gcmarker = static_cast<GCMarker *>(trc);
1903 if (kind == JSTRACE_STRING) {
1905 * Optimize for string as their marking is not recursive.
1907 * Iterate through all nodes and leaves in the rope if this is
1908 * part of a rope; otherwise, we only iterate once: on the string
1909 * itself.
1911 JSRopeNodeIterator iter((JSString *) thing);
1912 JSString *str = iter.init();
1913 do {
1914 for (;;) {
1915 if (JSString::isStatic(str))
1916 break;
1917 JS_ASSERT(kind == GetFinalizableThingTraceKind(str));
1918 if (!MarkIfUnmarkedGCThing(str))
1919 break;
1920 if (!str->isDependent())
1921 break;
1922 str = str->dependentBase();
1924 str = iter.next();
1925 } while (str);
1927 } else if (MarkIfUnmarkedGCThing(thing, gcmarker->getMarkColor())) {
1929 * With JS_GC_ASSUME_LOW_C_STACK defined the mark phase of GC
1930 * always uses the non-recursive code that otherwise would be
1931 * called only on a low C stack condition.
1933 #ifdef JS_GC_ASSUME_LOW_C_STACK
1934 # define RECURSION_TOO_DEEP() true
1935 #else
1936 int stackDummy;
1937 # define RECURSION_TOO_DEEP() (!JS_CHECK_STACK_SIZE(trc->context, stackDummy))
1938 #endif
1939 if (RECURSION_TOO_DEEP())
1940 gcmarker->delayMarkingChildren(thing);
1941 else
1942 JS_TraceChildren(trc, thing, kind);
1946 #ifdef DEBUG
1947 trc->debugPrinter = NULL;
1948 trc->debugPrintArg = NULL;
1949 #endif
1952 void
1953 MarkGCThing(JSTracer *trc, void *thing)
1955 JS_ASSERT(size_t(thing) % JS_GCTHING_ALIGN == 0);
1957 if (!thing)
1958 return;
1960 uint32 kind = js_GetGCThingTraceKind(thing);
1961 Mark(trc, thing, kind);
1964 } /* namespace js */
1966 static void
1967 gc_root_traversal(JSTracer *trc, const RootEntry &entry)
1969 #ifdef DEBUG
1970 void *ptr;
1971 if (entry.value.type == JS_GC_ROOT_GCTHING_PTR) {
1972 ptr = *reinterpret_cast<void **>(entry.key);
1973 } else {
1974 Value *vp = reinterpret_cast<Value *>(entry.key);
1975 ptr = vp->isGCThing() ? vp->asGCThing() : NULL;
1978 if (ptr) {
1979 if (!JSString::isStatic(ptr)) {
1980 bool root_points_to_gcArenaList = false;
1981 jsuword thing = (jsuword) ptr;
1982 JSRuntime *rt = trc->context->runtime;
1983 for (unsigned i = 0; i != FINALIZE_LIMIT; i++) {
1984 JSGCArenaList *arenaList = &rt->gcArenaList[i];
1985 size_t thingSize = arenaList->thingSize;
1986 size_t limit = ThingsPerArena(thingSize) * thingSize;
1987 for (JSGCArena *a = arenaList->head;
1989 a = a->getInfo()->prev) {
1990 if (thing - a->toPageStart() < limit) {
1991 root_points_to_gcArenaList = true;
1992 break;
1996 if (!root_points_to_gcArenaList && entry.value.name) {
1997 fprintf(stderr,
1998 "JS API usage error: the address passed to JS_AddNamedRoot currently holds an\n"
1999 "invalid gcthing. This is usually caused by a missing call to JS_RemoveRoot.\n"
2000 "The root's name is \"%s\".\n",
2001 entry.value.name);
2003 JS_ASSERT(root_points_to_gcArenaList);
2006 #endif
2007 JS_SET_TRACING_NAME(trc, entry.value.name ? entry.value.name : "root");
2008 if (entry.value.type == JS_GC_ROOT_GCTHING_PTR)
2009 MarkGCThing(trc, *reinterpret_cast<void **>(entry.key));
2010 else
2011 MarkValueRaw(trc, *reinterpret_cast<Value *>(entry.key));
2014 static void
2015 gc_lock_traversal(const GCLocks::Entry &entry, JSTracer *trc)
2017 uint32 traceKind;
2019 JS_ASSERT(entry.value >= 1);
2020 traceKind = js_GetGCThingTraceKind(entry.key);
2021 JS_CALL_TRACER(trc, entry.key, traceKind, "locked object");
2024 void
2025 js_TraceStackFrame(JSTracer *trc, JSStackFrame *fp)
2027 if (fp->hasCallObj())
2028 JS_CALL_OBJECT_TRACER(trc, fp->getCallObj(), "call");
2029 if (fp->hasArgsObj())
2030 JS_CALL_OBJECT_TRACER(trc, fp->getArgsObj(), "arguments");
2031 if (fp->hasScript())
2032 js_TraceScript(trc, fp->getScript());
2034 /* Allow for primitive this parameter due to JSFUN_THISP_* flags. */
2035 MarkValue(trc, fp->getThisValue(), "this");
2036 MarkValue(trc, fp->getReturnValue(), "rval");
2037 if (fp->hasScopeChain())
2038 JS_CALL_OBJECT_TRACER(trc, fp->getScopeChain(), "scope chain");
2041 inline void
2042 AutoGCRooter::trace(JSTracer *trc)
2044 switch (tag) {
2045 case JSVAL:
2046 MarkValue(trc, static_cast<AutoValueRooter *>(this)->val, "js::AutoValueRooter.val");
2047 return;
2049 case SHAPE:
2050 static_cast<AutoShapeRooter *>(this)->shape->trace(trc);
2051 return;
2053 case PARSER:
2054 static_cast<Parser *>(this)->trace(trc);
2055 return;
2057 case SCRIPT:
2058 if (JSScript *script = static_cast<AutoScriptRooter *>(this)->script)
2059 js_TraceScript(trc, script);
2060 return;
2062 case ENUMERATOR:
2063 static_cast<AutoEnumStateRooter *>(this)->trace(trc);
2064 return;
2066 case IDARRAY: {
2067 JSIdArray *ida = static_cast<AutoIdArray *>(this)->idArray;
2068 MarkIdRange(trc, ida->length, ida->vector, "js::AutoIdArray.idArray");
2069 return;
2072 case DESCRIPTORS: {
2073 PropDescArray &descriptors =
2074 static_cast<AutoPropDescArrayRooter *>(this)->descriptors;
2075 for (size_t i = 0, len = descriptors.length(); i < len; i++) {
2076 PropDesc &desc = descriptors[i];
2077 MarkValue(trc, desc.pd, "PropDesc::pd");
2078 MarkValue(trc, desc.value, "PropDesc::value");
2079 MarkValue(trc, desc.get, "PropDesc::get");
2080 MarkValue(trc, desc.set, "PropDesc::set");
2081 MarkId(trc, desc.id, "PropDesc::id");
2083 return;
2086 case DESCRIPTOR : {
2087 PropertyDescriptor &desc = *static_cast<AutoPropertyDescriptorRooter *>(this);
2088 if (desc.obj)
2089 MarkObject(trc, desc.obj, "Descriptor::obj");
2090 MarkValue(trc, desc.value, "Descriptor::value");
2091 if ((desc.attrs & JSPROP_GETTER) && desc.getter)
2092 MarkObject(trc, CastAsObject(desc.getter), "Descriptor::get");
2093 if (desc.attrs & JSPROP_SETTER && desc.setter)
2094 MarkObject(trc, CastAsObject(desc.setter), "Descriptor::set");
2095 return;
2098 case NAMESPACES: {
2099 JSXMLArray &array = static_cast<AutoNamespaceArray *>(this)->array;
2100 MarkObjectRange(trc, array.length, reinterpret_cast<JSObject **>(array.vector),
2101 "JSXMLArray.vector");
2102 array.cursors->trace(trc);
2103 return;
2106 case XML:
2107 js_TraceXML(trc, static_cast<AutoXMLRooter *>(this)->xml);
2108 return;
2110 case OBJECT:
2111 if (JSObject *obj = static_cast<AutoObjectRooter *>(this)->obj)
2112 MarkObject(trc, obj, "js::AutoObjectRooter.obj");
2113 return;
2115 case ID:
2116 MarkId(trc, static_cast<AutoIdRooter *>(this)->id_, "js::AutoIdRooter.val");
2117 return;
2119 case VALVECTOR: {
2120 Vector<Value, 8> &vector = static_cast<js::AutoValueVector *>(this)->vector;
2121 MarkValueRange(trc, vector.length(), vector.begin(), "js::AutoValueVector.vector");
2122 return;
2125 case STRING:
2126 if (JSString *str = static_cast<js::AutoStringRooter *>(this)->str)
2127 MarkString(trc, str, "js::AutoStringRooter.str");
2128 return;
2130 case IDVECTOR: {
2131 Vector<jsid, 8> &vector = static_cast<js::AutoIdVector *>(this)->vector;
2132 MarkIdRange(trc, vector.length(), vector.begin(), "js::AutoIdVector.vector");
2133 return;
2137 JS_ASSERT(tag >= 0);
2138 MarkValueRange(trc, tag, static_cast<AutoArrayRooter *>(this)->array, "js::AutoArrayRooter.array");
2141 namespace js {
2143 void
2144 MarkContext(JSTracer *trc, JSContext *acx)
2146 /* Stack frames and slots are traced by StackSpace::mark. */
2148 /* Mark other roots-by-definition in acx. */
2149 if (acx->globalObject && !JS_HAS_OPTION(acx, JSOPTION_UNROOTED_GLOBAL))
2150 JS_CALL_OBJECT_TRACER(trc, acx->globalObject, "global object");
2151 if (acx->throwing) {
2152 MarkValue(trc, acx->exception, "exception");
2153 } else {
2154 /* Avoid keeping GC-ed junk stored in JSContext.exception. */
2155 acx->exception.setNull();
2158 for (js::AutoGCRooter *gcr = acx->autoGCRooters; gcr; gcr = gcr->down)
2159 gcr->trace(trc);
2161 if (acx->sharpObjectMap.depth > 0)
2162 js_TraceSharpMap(trc, &acx->sharpObjectMap);
2164 js_TraceRegExpStatics(trc, acx);
2166 MarkValue(trc, acx->iterValue, "iterValue");
2168 acx->compartment->marked = true;
2170 #ifdef JS_TRACER
2171 TracerState* state = acx->tracerState;
2172 while (state) {
2173 if (state->nativeVp)
2174 MarkValueRange(trc, state->nativeVpLen, state->nativeVp, "nativeVp");
2175 state = state->prev;
2177 #endif
2180 JS_REQUIRES_STACK void
2181 MarkRuntime(JSTracer *trc)
2183 JSRuntime *rt = trc->context->runtime;
2185 if (rt->state != JSRTS_LANDING)
2186 MarkConservativeStackRoots(trc);
2189 * Verify that we do not have at this point unmarked GC things stored in
2190 * autorooters. To maximize test coverage we abort even in non-debug
2191 * builds for now, see bug 574313.
2193 JSContext *iter;
2194 #if 1
2195 iter = NULL;
2196 while (JSContext *acx = js_ContextIterator(rt, JS_TRUE, &iter)) {
2197 for (AutoGCRooter *gcr = acx->autoGCRooters; gcr; gcr = gcr->down) {
2198 #ifdef JS_THREADSAFE
2199 JS_ASSERT_IF(!acx->thread->requestDepth, acx->thread->suspendCount);
2200 #endif
2201 JS_ASSERT(JS_THREAD_DATA(acx)->conservativeGC.hasStackToScan());
2202 void *thing;
2203 switch (gcr->tag) {
2204 default:
2205 continue;
2206 case AutoGCRooter::JSVAL: {
2207 const Value &v = static_cast<AutoValueRooter *>(gcr)->val;
2208 if (!v.isMarkable())
2209 continue;
2210 thing = v.asGCThing();
2211 break;
2213 case AutoGCRooter::XML:
2214 thing = static_cast<AutoXMLRooter *>(gcr)->xml;
2215 break;
2216 case AutoGCRooter::OBJECT:
2217 thing = static_cast<AutoObjectRooter *>(gcr)->obj;
2218 if (!thing)
2219 continue;
2220 break;
2221 case AutoGCRooter::ID: {
2222 jsid id = static_cast<AutoIdRooter *>(gcr)->id();
2223 if (!JSID_IS_GCTHING(id))
2224 continue;
2225 thing = JSID_TO_GCTHING(id);
2226 break;
2230 if (JSString::isStatic(thing))
2231 continue;
2233 if (!IsMarkedGCThing(thing)) {
2234 ConservativeGCTest test = IsGCThingWord(rt, reinterpret_cast<jsuword>(thing));
2235 fprintf(stderr,
2236 "Conservative GC scanner has missed the root 0x%p with tag %ld"
2237 " on the stack due to %d. The root location 0x%p, distance from"
2238 " the stack base %ld, conservative gc span %ld."
2239 " Consevtaive GC status for the thread %d."
2240 " Aborting.\n",
2241 thing, (long) gcr->tag, int(test), (void *) gcr,
2242 (long) ((jsword) JS_THREAD_DATA(acx)->nativeStackBase - (jsword) gcr),
2243 (long) ((jsword) JS_THREAD_DATA(acx)->nativeStackBase -
2244 (jsword) JS_THREAD_DATA(acx)->conservativeGC.nativeStackTop),
2245 int(JS_THREAD_DATA(acx)->conservativeGC.hasStackToScan()));
2246 JS_ASSERT(false);
2247 abort();
2251 #endif
2253 for (RootRange r = rt->gcRootsHash.all(); !r.empty(); r.popFront())
2254 gc_root_traversal(trc, r.front());
2256 for (GCLocks::Range r = rt->gcLocksHash.all(); !r.empty(); r.popFront())
2257 gc_lock_traversal(r.front(), trc);
2259 js_TraceAtomState(trc);
2260 js_MarkTraps(trc);
2262 iter = NULL;
2263 while (JSContext *acx = js_ContextIterator(rt, JS_TRUE, &iter))
2264 MarkContext(trc, acx);
2266 for (ThreadDataIter i(rt); !i.empty(); i.popFront())
2267 i.threadData()->mark(trc);
2269 if (rt->emptyArgumentsShape)
2270 rt->emptyArgumentsShape->trace(trc);
2271 if (rt->emptyBlockShape)
2272 rt->emptyBlockShape->trace(trc);
2273 if (rt->emptyCallShape)
2274 rt->emptyCallShape->trace(trc);
2275 if (rt->emptyDeclEnvShape)
2276 rt->emptyDeclEnvShape->trace(trc);
2277 if (rt->emptyEnumeratorShape)
2278 rt->emptyEnumeratorShape->trace(trc);
2279 if (rt->emptyWithShape)
2280 rt->emptyWithShape->trace(trc);
2283 * We mark extra roots at the last thing so it can use use additional
2284 * colors to implement cycle collection.
2286 if (rt->gcExtraRootsTraceOp)
2287 rt->gcExtraRootsTraceOp(trc, rt->gcExtraRootsData);
2289 #ifdef DEBUG
2290 if (rt->functionMeterFilename) {
2291 for (int k = 0; k < 2; k++) {
2292 typedef JSRuntime::FunctionCountMap HM;
2293 HM &h = (k == 0) ? rt->methodReadBarrierCountMap : rt->unjoinedFunctionCountMap;
2294 for (HM::Range r = h.all(); !r.empty(); r.popFront()) {
2295 JSFunction *fun = r.front().key;
2296 JS_CALL_OBJECT_TRACER(trc, fun, "FunctionCountMap key");
2300 #endif
2303 } /* namespace js */
2305 void
2306 js_TriggerGC(JSContext *cx, JSBool gcLocked)
2308 JSRuntime *rt = cx->runtime;
2310 #ifdef JS_THREADSAFE
2311 JS_ASSERT(cx->thread->requestDepth > 0);
2312 #endif
2313 JS_ASSERT(!rt->gcRunning);
2314 if (rt->gcIsNeeded)
2315 return;
2318 * Trigger the GC when it is safe to call an operation callback on any
2319 * thread.
2321 rt->gcIsNeeded = JS_TRUE;
2322 js_TriggerAllOperationCallbacks(rt, gcLocked);
2325 void
2326 js_DestroyScriptsToGC(JSContext *cx, JSThreadData *data)
2328 JSScript **listp, *script;
2330 for (size_t i = 0; i != JS_ARRAY_LENGTH(data->scriptsToGC); ++i) {
2331 listp = &data->scriptsToGC[i];
2332 while ((script = *listp) != NULL) {
2333 *listp = script->u.nextToGC;
2334 script->u.nextToGC = NULL;
2335 js_DestroyScript(cx, script);
2340 inline void
2341 FinalizeObject(JSContext *cx, JSObject *obj, unsigned thingKind)
2343 JS_ASSERT(thingKind == FINALIZE_OBJECT ||
2344 thingKind == FINALIZE_FUNCTION);
2346 /* Cope with stillborn objects that have no map. */
2347 if (!obj->map)
2348 return;
2350 /* Finalize obj first, in case it needs map and slots. */
2351 Class *clasp = obj->getClass();
2352 if (clasp->finalize)
2353 clasp->finalize(cx, obj);
2355 Probes::finalizeObject(obj);
2357 obj->finish(cx);
2360 inline void
2361 FinalizeFunction(JSContext *cx, JSFunction *fun, unsigned thingKind)
2363 FinalizeObject(cx, FUN_OBJECT(fun), thingKind);
2366 #if JS_HAS_XML_SUPPORT
2367 inline void
2368 FinalizeXML(JSContext *cx, JSXML *xml, unsigned thingKind)
2370 js_FinalizeXML(cx, xml);
2372 #endif
2374 JS_STATIC_ASSERT(JS_EXTERNAL_STRING_LIMIT == 8);
2375 static JSStringFinalizeOp str_finalizers[JS_EXTERNAL_STRING_LIMIT] = {
2376 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL
2379 intN
2380 js_ChangeExternalStringFinalizer(JSStringFinalizeOp oldop,
2381 JSStringFinalizeOp newop)
2383 for (uintN i = 0; i != JS_ARRAY_LENGTH(str_finalizers); i++) {
2384 if (str_finalizers[i] == oldop) {
2385 str_finalizers[i] = newop;
2386 return intN(i);
2389 return -1;
2392 inline void
2393 FinalizeShortString(JSContext *cx, JSShortString *str, unsigned thingKind)
2395 JS_ASSERT(FINALIZE_SHORT_STRING == thingKind);
2396 JS_ASSERT(!JSString::isStatic(str->header()));
2397 JS_ASSERT(str->header()->isFlat());
2398 JS_RUNTIME_UNMETER(cx->runtime, liveStrings);
2401 inline void
2402 FinalizeString(JSContext *cx, JSString *str, unsigned thingKind)
2404 JS_ASSERT(FINALIZE_STRING == thingKind);
2405 JS_ASSERT(!JSString::isStatic(str));
2406 JS_RUNTIME_UNMETER(cx->runtime, liveStrings);
2407 if (str->isDependent()) {
2408 JS_ASSERT(str->dependentBase());
2409 JS_RUNTIME_UNMETER(cx->runtime, liveDependentStrings);
2410 } else if (str->isFlat()) {
2412 * flatChars for stillborn string is null, but cx->free checks
2413 * for a null pointer on its own.
2415 cx->free(str->flatChars());
2416 } else if (str->isTopNode()) {
2417 cx->free(str->topNodeBuffer());
2419 /* Nothing to be done for rope interior nodes. */
2422 inline void
2423 FinalizeExternalString(JSContext *cx, JSString *str, unsigned thingKind)
2425 unsigned type = thingKind - FINALIZE_EXTERNAL_STRING0;
2426 JS_ASSERT(type < JS_ARRAY_LENGTH(str_finalizers));
2427 JS_ASSERT(!JSString::isStatic(str));
2428 JS_ASSERT(str->isFlat());
2430 JS_RUNTIME_UNMETER(cx->runtime, liveStrings);
2432 /* A stillborn string has null chars. */
2433 jschar *chars = str->flatChars();
2434 if (!chars)
2435 return;
2436 JSStringFinalizeOp finalizer = str_finalizers[type];
2437 if (finalizer)
2438 finalizer(cx, str);
2442 * This function is called from js_FinishAtomState to force the finalization
2443 * of the permanently interned strings when cx is not available.
2445 void
2446 js_FinalizeStringRT(JSRuntime *rt, JSString *str)
2448 JS_RUNTIME_UNMETER(rt, liveStrings);
2449 JS_ASSERT(!JSString::isStatic(str));
2450 JS_ASSERT(!str->isRope());
2452 if (str->isDependent()) {
2453 /* A dependent string can not be external and must be valid. */
2454 JS_ASSERT(JSGCArenaInfo::fromGCThing(str)->list->thingKind == FINALIZE_STRING);
2455 JS_ASSERT(str->dependentBase());
2456 JS_RUNTIME_UNMETER(rt, liveDependentStrings);
2457 } else {
2458 unsigned thingKind = JSGCArenaInfo::fromGCThing(str)->list->thingKind;
2459 JS_ASSERT(IsFinalizableStringKind(thingKind));
2461 /* A stillborn string has null chars, so is not valid. */
2462 jschar *chars = str->flatChars();
2463 if (!chars)
2464 return;
2465 if (thingKind == FINALIZE_STRING) {
2466 rt->free(chars);
2467 } else if (thingKind != FINALIZE_SHORT_STRING) {
2468 unsigned type = thingKind - FINALIZE_EXTERNAL_STRING0;
2469 JS_ASSERT(type < JS_ARRAY_LENGTH(str_finalizers));
2470 JSStringFinalizeOp finalizer = str_finalizers[type];
2471 if (finalizer) {
2473 * Assume that the finalizer for the permanently interned
2474 * string knows how to deal with null context.
2476 finalizer(NULL, str);
2482 template<typename T,
2483 void finalizer(JSContext *cx, T *thing, unsigned thingKind)>
2484 static void
2485 FinalizeArenaList(JSContext *cx, unsigned thingKind)
2487 JS_STATIC_ASSERT(!(sizeof(T) & GC_CELL_MASK));
2488 JSGCArenaList *arenaList = &cx->runtime->gcArenaList[thingKind];
2489 JS_ASSERT(sizeof(T) == arenaList->thingSize);
2491 JSGCArena **ap = &arenaList->head;
2492 JSGCArena *a = *ap;
2493 if (!a)
2494 return;
2496 #ifdef JS_GCMETER
2497 uint32 nlivearenas = 0, nkilledarenas = 0, nthings = 0;
2498 #endif
2499 for (;;) {
2500 JSGCArenaInfo *ainfo = a->getInfo();
2501 JS_ASSERT(ainfo->list == arenaList);
2502 JS_ASSERT(!a->getMarkingDelay()->link);
2503 JS_ASSERT(a->getMarkingDelay()->unmarkedChildren == 0);
2505 JSGCThing *freeList = NULL;
2506 JSGCThing **tailp = &freeList;
2507 bool allClear = true;
2509 jsuword thing = a->toPageStart();
2510 jsuword thingsEnd = thing + GC_ARENA_SIZE / sizeof(T) * sizeof(T);
2512 jsuword nextFree = reinterpret_cast<jsuword>(ainfo->freeList);
2513 if (!nextFree) {
2514 nextFree = thingsEnd;
2515 } else {
2516 JS_ASSERT(thing <= nextFree);
2517 JS_ASSERT(nextFree < thingsEnd);
2520 jsuword gcCellIndex = 0;
2521 jsbitmap *bitmap = a->getMarkBitmap();
2522 for (;; thing += sizeof(T), gcCellIndex += sizeof(T) >> GC_CELL_SHIFT) {
2523 if (thing == nextFree) {
2524 if (thing == thingsEnd)
2525 break;
2526 nextFree = reinterpret_cast<jsuword>(
2527 reinterpret_cast<JSGCThing *>(nextFree)->link);
2528 if (!nextFree) {
2529 nextFree = thingsEnd;
2530 } else {
2531 JS_ASSERT(thing < nextFree);
2532 JS_ASSERT(nextFree < thingsEnd);
2534 } else if (JS_TEST_BIT(bitmap, gcCellIndex)) {
2535 allClear = false;
2536 METER(nthings++);
2537 continue;
2538 } else {
2539 T *t = reinterpret_cast<T *>(thing);
2540 finalizer(cx, t, thingKind);
2541 #ifdef DEBUG
2542 memset(t, JS_FREE_PATTERN, sizeof(T));
2543 #endif
2545 JSGCThing *t = reinterpret_cast<JSGCThing *>(thing);
2546 *tailp = t;
2547 tailp = &t->link;
2550 #ifdef DEBUG
2551 /* Check that the free list is consistent. */
2552 unsigned nfree = 0;
2553 if (freeList) {
2554 JS_ASSERT(tailp != &freeList);
2555 JSGCThing *t = freeList;
2556 for (;;) {
2557 ++nfree;
2558 if (&t->link == tailp)
2559 break;
2560 JS_ASSERT(t < t->link);
2561 t = t->link;
2564 #endif
2565 if (allClear) {
2567 * Forget just assembled free list head for the arena and
2568 * add the arena itself to the destroy list.
2570 JS_ASSERT(nfree == ThingsPerArena(sizeof(T)));
2571 *ap = ainfo->prev;
2572 ReleaseGCArena(cx->runtime, a);
2573 METER(nkilledarenas++);
2574 } else {
2575 JS_ASSERT(nfree < ThingsPerArena(sizeof(T)));
2576 *tailp = NULL;
2577 ainfo->freeList = freeList;
2578 ap = &ainfo->prev;
2579 METER(nlivearenas++);
2581 if (!(a = *ap))
2582 break;
2584 arenaList->cursor = arenaList->head;
2586 METER(UpdateArenaStats(&cx->runtime->gcArenaStats[thingKind],
2587 nlivearenas, nkilledarenas, nthings));
2590 #ifdef JS_THREADSAFE
2592 namespace js {
2594 JS_FRIEND_API(void)
2595 BackgroundSweepTask::replenishAndFreeLater(void *ptr)
2597 JS_ASSERT(freeCursor == freeCursorEnd);
2598 do {
2599 if (freeCursor && !freeVector.append(freeCursorEnd - FREE_ARRAY_LENGTH))
2600 break;
2601 freeCursor = (void **) js_malloc(FREE_ARRAY_SIZE);
2602 if (!freeCursor) {
2603 freeCursorEnd = NULL;
2604 break;
2606 freeCursorEnd = freeCursor + FREE_ARRAY_LENGTH;
2607 *freeCursor++ = ptr;
2608 return;
2609 } while (false);
2610 js_free(ptr);
2613 void
2614 BackgroundSweepTask::run()
2616 if (freeCursor) {
2617 void **array = freeCursorEnd - FREE_ARRAY_LENGTH;
2618 freeElementsAndArray(array, freeCursor);
2619 freeCursor = freeCursorEnd = NULL;
2620 } else {
2621 JS_ASSERT(!freeCursorEnd);
2623 for (void ***iter = freeVector.begin(); iter != freeVector.end(); ++iter) {
2624 void **array = *iter;
2625 freeElementsAndArray(array, array + FREE_ARRAY_LENGTH);
2631 #endif /* JS_THREADSAFE */
2633 static void
2634 SweepCompartments(JSContext *cx)
2636 JSRuntime *rt = cx->runtime;
2637 JSCompartmentCallback callback = rt->compartmentCallback;
2638 JSCompartment **read = rt->compartments.begin();
2639 JSCompartment **end = rt->compartments.end();
2640 JSCompartment **write = read;
2642 /* Delete defaultCompartment only during runtime shutdown */
2643 rt->defaultCompartment->marked = true;
2645 while (read < end) {
2646 JSCompartment *compartment = (*read++);
2647 if (compartment->marked) {
2648 compartment->marked = false;
2649 *write++ = compartment;
2650 /* Remove dead wrappers from the compartment map. */
2651 compartment->sweep(cx);
2652 } else {
2653 if (callback)
2654 (void) callback(cx, compartment, JSCOMPARTMENT_DESTROY);
2655 if (compartment->principals)
2656 JSPRINCIPALS_DROP(cx, compartment->principals);
2657 delete compartment;
2660 rt->compartments.resize(write - rt->compartments.begin());
2664 * Common cache invalidation and so forth that must be done before GC. Even if
2665 * GCUntilDone calls GC several times, this work needs to be done only once.
2667 static void
2668 PreGCCleanup(JSContext *cx, JSGCInvocationKind gckind)
2670 JSRuntime *rt = cx->runtime;
2672 /* Clear gcIsNeeded now, when we are about to start a normal GC cycle. */
2673 rt->gcIsNeeded = JS_FALSE;
2675 /* Reset malloc counter. */
2676 rt->resetGCMallocBytes();
2678 #ifdef JS_DUMP_SCOPE_METERS
2680 extern void js_DumpScopeMeters(JSRuntime *rt);
2681 js_DumpScopeMeters(rt);
2683 #endif
2686 * Reset the property cache's type id generator so we can compress ids.
2687 * Same for the protoHazardShape proxy-shape standing in for all object
2688 * prototypes having readonly or setter properties.
2690 if (rt->shapeGen & SHAPE_OVERFLOW_BIT
2691 #ifdef JS_GC_ZEAL
2692 || rt->gcZeal >= 1
2693 #endif
2695 rt->gcRegenShapes = true;
2696 rt->shapeGen = Shape::LAST_RESERVED_SHAPE;
2697 rt->protoHazardShape = 0;
2700 js_PurgeThreads(cx);
2702 JSContext *iter = NULL;
2703 while (JSContext *acx = js_ContextIterator(rt, JS_TRUE, &iter))
2704 acx->purge();
2709 * Perform mark-and-sweep GC.
2711 * In a JS_THREADSAFE build, the calling thread must be rt->gcThread and each
2712 * other thread must be either outside all requests or blocked waiting for GC
2713 * to finish. Note that the caller does not hold rt->gcLock.
2715 static void
2716 MarkAndSweep(JSContext *cx GCTIMER_PARAM)
2718 JSRuntime *rt = cx->runtime;
2719 rt->gcNumber++;
2722 * Mark phase.
2724 GCMarker gcmarker(cx);
2725 JS_ASSERT(IS_GC_MARKING_TRACER(&gcmarker));
2726 JS_ASSERT(gcmarker.getMarkColor() == BLACK);
2727 rt->gcMarkingTracer = &gcmarker;
2729 for (GCChunkSet::Range r(rt->gcChunkSet.all()); !r.empty(); r.popFront())
2730 GCChunkInfo::fromChunk(r.front())->clearMarkBitmap();
2732 MarkRuntime(&gcmarker);
2733 js_MarkScriptFilenames(rt);
2736 * Mark children of things that caused too deep recursion during the above
2737 * tracing.
2739 gcmarker.markDelayedChildren();
2741 rt->gcMarkingTracer = NULL;
2743 if (rt->gcCallback)
2744 (void) rt->gcCallback(cx, JSGC_MARK_END);
2746 #ifdef JS_THREADSAFE
2747 JS_ASSERT(!cx->gcSweepTask);
2748 if (!rt->gcHelperThread.busy())
2749 cx->gcSweepTask = new js::BackgroundSweepTask();
2750 #endif
2753 * Sweep phase.
2755 * Finalize as we sweep, outside of rt->gcLock but with rt->gcRunning set
2756 * so that any attempt to allocate a GC-thing from a finalizer will fail,
2757 * rather than nest badly and leave the unmarked newborn to be swept.
2759 * We first sweep atom state so we can use js_IsAboutToBeFinalized on
2760 * JSString held in a hashtable to check if the hashtable entry can be
2761 * freed. Note that even after the entry is freed, JSObject finalizers can
2762 * continue to access the corresponding JSString* assuming that they are
2763 * unique. This works since the atomization API must not be called during
2764 * the GC.
2766 TIMESTAMP(startSweep);
2767 js_SweepAtomState(cx);
2769 /* Finalize watch points associated with unreachable objects. */
2770 js_SweepWatchPoints(cx);
2772 #ifdef DEBUG
2773 /* Save the pre-sweep count of scope-mapped properties. */
2774 rt->liveObjectPropsPreSweep = rt->liveObjectProps;
2775 #endif
2777 #ifdef JS_METHODJIT
2778 /* Fix-up call ICs guarding against unreachable objects. */
2779 mjit::SweepCallICs(cx);
2780 #endif
2783 * We finalize iterators before other objects so the iterator can use the
2784 * object which properties it enumerates over to finalize the enumeration
2785 * state. We finalize objects before other GC things to ensure that
2786 * object's finalizer can access them even if they will be freed.
2788 JS_ASSERT(!rt->gcEmptyArenaList);
2789 FinalizeArenaList<JSObject, FinalizeObject>(cx, FINALIZE_OBJECT);
2790 FinalizeArenaList<JSFunction, FinalizeFunction>(cx, FINALIZE_FUNCTION);
2791 #if JS_HAS_XML_SUPPORT
2792 FinalizeArenaList<JSXML, FinalizeXML>(cx, FINALIZE_XML);
2793 #endif
2794 TIMESTAMP(sweepObjectEnd);
2797 * We sweep the deflated cache before we finalize the strings so the
2798 * cache can safely use js_IsAboutToBeFinalized..
2800 rt->deflatedStringCache->sweep(cx);
2802 FinalizeArenaList<JSShortString, FinalizeShortString>(cx, FINALIZE_SHORT_STRING);
2803 FinalizeArenaList<JSString, FinalizeString>(cx, FINALIZE_STRING);
2804 for (unsigned i = FINALIZE_EXTERNAL_STRING0;
2805 i <= FINALIZE_EXTERNAL_STRING_LAST;
2806 ++i) {
2807 FinalizeArenaList<JSString, FinalizeExternalString>(cx, i);
2810 rt->gcNewArenaTriggerBytes = rt->gcBytes < GC_ARENA_ALLOCATION_TRIGGER ?
2811 GC_ARENA_ALLOCATION_TRIGGER :
2812 rt->gcBytes;
2814 TIMESTAMP(sweepStringEnd);
2816 SweepCompartments(cx);
2819 * Sweep the runtime's property trees after finalizing objects, in case any
2820 * had watchpoints referencing tree nodes.
2822 js::PropertyTree::sweepShapes(cx);
2825 * Sweep script filenames after sweeping functions in the generic loop
2826 * above. In this way when a scripted function's finalizer destroys the
2827 * script and calls rt->destroyScriptHook, the hook can still access the
2828 * script's filename. See bug 323267.
2830 js_SweepScriptFilenames(rt);
2832 /* Slowify arrays we have accumulated. */
2833 gcmarker.slowifyArrays();
2836 * Destroy arenas after we finished the sweeping so finalizers can safely
2837 * use js_IsAboutToBeFinalized().
2839 FreeGCChunks(rt);
2840 TIMESTAMP(sweepDestroyEnd);
2842 #ifdef JS_THREADSAFE
2843 if (cx->gcSweepTask) {
2844 rt->gcHelperThread.schedule(cx->gcSweepTask);
2845 cx->gcSweepTask = NULL;
2847 #endif
2849 if (rt->gcCallback)
2850 (void) rt->gcCallback(cx, JSGC_FINALIZE_END);
2851 #ifdef DEBUG_srcnotesize
2852 { extern void DumpSrcNoteSizeHist();
2853 DumpSrcNoteSizeHist();
2854 printf("GC HEAP SIZE %lu\n", (unsigned long)rt->gcBytes);
2856 #endif
2858 #ifdef JS_SCOPE_DEPTH_METER
2859 DumpScopeDepthMeter(rt);
2860 #endif
2862 #ifdef JS_DUMP_LOOP_STATS
2863 DumpLoopStats(rt);
2864 #endif
2867 #ifdef JS_THREADSAFE
2870 * If the GC is running and we're called on another thread, wait for this GC
2871 * activation to finish. We can safely wait here without fear of deadlock (in
2872 * the case where we are called within a request on another thread's context)
2873 * because the GC doesn't set rt->gcRunning until after it has waited for all
2874 * active requests to end.
2876 * We call here js_CurrentThreadId() after checking for rt->gcState to avoid
2877 * an expensive call when the GC is not running.
2879 void
2880 js_WaitForGC(JSRuntime *rt)
2882 if (rt->gcRunning && rt->gcThread->id != js_CurrentThreadId()) {
2883 do {
2884 JS_AWAIT_GC_DONE(rt);
2885 } while (rt->gcRunning);
2890 * GC is running on another thread. Temporarily suspend all requests running
2891 * on the current thread and wait until the GC is done.
2893 static void
2894 LetOtherGCFinish(JSContext *cx)
2896 JSRuntime *rt = cx->runtime;
2897 JS_ASSERT(rt->gcThread);
2898 JS_ASSERT(cx->thread != rt->gcThread);
2900 size_t requestDebit = cx->thread->requestDepth ? 1 : 0;
2901 JS_ASSERT(requestDebit <= rt->requestCount);
2902 #ifdef JS_TRACER
2903 JS_ASSERT_IF(requestDebit == 0, !JS_ON_TRACE(cx));
2904 #endif
2905 if (requestDebit != 0) {
2906 #ifdef JS_TRACER
2907 if (JS_ON_TRACE(cx)) {
2909 * Leave trace before we decrease rt->requestCount and notify the
2910 * GC. Otherwise the GC may start immediately after we unlock while
2911 * this thread is still on trace.
2913 AutoUnlockGC unlock(rt);
2914 LeaveTrace(cx);
2916 #endif
2917 rt->requestCount -= requestDebit;
2918 if (rt->requestCount == 0)
2919 JS_NOTIFY_REQUEST_DONE(rt);
2922 /* See comments before another call to js_ShareWaitingTitles below. */
2923 cx->thread->gcWaiting = true;
2924 js_ShareWaitingTitles(cx);
2927 * Check that we did not release the GC lock above and let the GC to
2928 * finish before we wait.
2930 JS_ASSERT(rt->gcThread);
2933 * Wait for GC to finish on the other thread, even if requestDebit is 0
2934 * and even if GC has not started yet because the gcThread is waiting in
2935 * AutoGCSession. This ensures that js_GC never returns without a full GC
2936 * cycle happening.
2938 do {
2939 JS_AWAIT_GC_DONE(rt);
2940 } while (rt->gcThread);
2942 cx->thread->gcWaiting = false;
2943 rt->requestCount += requestDebit;
2946 #endif
2948 class AutoGCSession {
2949 public:
2950 explicit AutoGCSession(JSContext *cx);
2951 ~AutoGCSession();
2953 private:
2954 JSContext *context;
2956 /* Disable copy constructor or assignments */
2957 AutoGCSession(const AutoGCSession&);
2958 void operator=(const AutoGCSession&);
2962 * Start a new GC session. Together with LetOtherGCFinish this function
2963 * contains the rendezvous algorithm by which we stop the world for GC.
2965 * This thread becomes the GC thread. Wait for all other threads to quiesce.
2966 * Then set rt->gcRunning and return.
2968 AutoGCSession::AutoGCSession(JSContext *cx)
2969 : context(cx)
2971 JSRuntime *rt = cx->runtime;
2973 #ifdef JS_THREADSAFE
2974 if (rt->gcThread && rt->gcThread != cx->thread)
2975 LetOtherGCFinish(cx);
2976 #endif
2978 JS_ASSERT(!rt->gcRunning);
2980 #ifdef JS_THREADSAFE
2981 /* No other thread is in GC, so indicate that we're now in GC. */
2982 JS_ASSERT(!rt->gcThread);
2983 rt->gcThread = cx->thread;
2986 * Notify operation callbacks on other threads, which will give them a
2987 * chance to yield their requests. Threads without requests perform their
2988 * callback at some later point, which then will be unnecessary, but
2989 * harmless.
2991 for (JSThread::Map::Range r = rt->threads.all(); !r.empty(); r.popFront()) {
2992 JSThread *thread = r.front().value;
2993 if (thread != cx->thread)
2994 thread->data.triggerOperationCallback();
2998 * Discount the request on the current thread from contributing to
2999 * rt->requestCount before we wait for all other requests to finish.
3000 * JS_NOTIFY_REQUEST_DONE, which will wake us up, is only called on
3001 * rt->requestCount transitions to 0.
3003 size_t requestDebit = cx->thread->requestDepth ? 1 : 0;
3004 JS_ASSERT(requestDebit <= rt->requestCount);
3005 if (requestDebit != rt->requestCount) {
3006 rt->requestCount -= requestDebit;
3009 * Share any title that is owned by the GC thread before we wait, to
3010 * avoid a deadlock with ClaimTitle. We also set the gcWaiting flag so
3011 * that ClaimTitle can claim the title ownership from the GC thread if
3012 * that function is called while the GC is waiting.
3014 cx->thread->gcWaiting = true;
3015 js_ShareWaitingTitles(cx);
3016 do {
3017 JS_AWAIT_REQUEST_DONE(rt);
3018 } while (rt->requestCount > 0);
3019 cx->thread->gcWaiting = false;
3020 rt->requestCount += requestDebit;
3023 #endif /* JS_THREADSAFE */
3026 * Set rt->gcRunning here within the GC lock, and after waiting for any
3027 * active requests to end. This way js_WaitForGC called outside a request
3028 * would not block on the GC that is waiting for other requests to finish
3029 * with rt->gcThread set while JS_BeginRequest would do such wait.
3031 rt->gcRunning = true;
3034 /* End the current GC session and allow other threads to proceed. */
3035 AutoGCSession::~AutoGCSession()
3037 JSRuntime *rt = context->runtime;
3038 rt->gcRunning = false;
3039 #ifdef JS_THREADSAFE
3040 JS_ASSERT(rt->gcThread == context->thread);
3041 rt->gcThread = NULL;
3042 JS_NOTIFY_GC_DONE(rt);
3043 #endif
3047 * GC, repeatedly if necessary, until we think we have not created any new
3048 * garbage and no other threads are demanding more GC.
3050 static void
3051 GCUntilDone(JSContext *cx, JSGCInvocationKind gckind GCTIMER_PARAM)
3053 if (JS_ON_TRACE(cx))
3054 return;
3056 JSRuntime *rt = cx->runtime;
3058 /* Recursive GC or a call from another thread restarts the GC cycle. */
3059 if (rt->gcMarkAndSweep) {
3060 rt->gcPoke = true;
3061 #ifdef JS_THREADSAFE
3062 JS_ASSERT(rt->gcThread);
3063 if (rt->gcThread != cx->thread) {
3064 /* We do not return until another GC finishes. */
3065 LetOtherGCFinish(cx);
3067 #endif
3068 return;
3071 AutoGCSession gcsession(cx);
3073 METER(rt->gcStats.poke++);
3075 bool firstRun = true;
3076 rt->gcMarkAndSweep = true;
3077 do {
3078 rt->gcPoke = false;
3080 AutoUnlockGC unlock(rt);
3081 if (firstRun) {
3082 PreGCCleanup(cx, gckind);
3083 TIMESTAMP(startMark);
3084 firstRun = false;
3086 MarkAndSweep(cx GCTIMER_ARG);
3088 // GC again if:
3089 // - another thread, not in a request, called js_GC
3090 // - js_GC was called recursively
3091 // - a finalizer called js_RemoveRoot or js_UnlockGCThingRT.
3092 } while (rt->gcPoke);
3094 rt->gcMarkAndSweep = false;
3095 rt->gcRegenShapes = false;
3096 rt->setGCLastBytes(rt->gcBytes);
3100 * The gckind flag bit GC_LOCK_HELD indicates a call from js_NewGCThing with
3101 * rt->gcLock already held, so the lock should be kept on return.
3103 void
3104 js_GC(JSContext *cx, JSGCInvocationKind gckind)
3106 JSRuntime *rt = cx->runtime;
3109 * Don't collect garbage if the runtime isn't up, and cx is not the last
3110 * context in the runtime. The last context must force a GC, and nothing
3111 * should suppress that final collection or there may be shutdown leaks,
3112 * or runtime bloat until the next context is created.
3114 if (rt->state != JSRTS_UP && gckind != GC_LAST_CONTEXT)
3115 return;
3117 RecordNativeStackTopForGC(cx);
3119 GCTIMER_BEGIN();
3121 do {
3123 * Let the API user decide to defer a GC if it wants to (unless this
3124 * is the last context). Invoke the callback regardless. Sample the
3125 * callback in case we are freely racing with a JS_SetGCCallback{,RT}
3126 * on another thread.
3128 if (JSGCCallback callback = rt->gcCallback) {
3129 Conditionally<AutoUnlockGC> unlockIf(!!(gckind & GC_LOCK_HELD), rt);
3130 if (!callback(cx, JSGC_BEGIN) && gckind != GC_LAST_CONTEXT)
3131 return;
3135 /* Lock out other GC allocator and collector invocations. */
3136 Conditionally<AutoLockGC> lockIf(!(gckind & GC_LOCK_HELD), rt);
3138 GCUntilDone(cx, gckind GCTIMER_ARG);
3141 /* We re-sample the callback again as the finalizers can change it. */
3142 if (JSGCCallback callback = rt->gcCallback) {
3143 Conditionally<AutoUnlockGC> unlockIf(gckind & GC_LOCK_HELD, rt);
3145 (void) callback(cx, JSGC_END);
3149 * On shutdown, iterate until the JSGC_END callback stops creating
3150 * garbage.
3152 } while (gckind == GC_LAST_CONTEXT && rt->gcPoke);
3154 GCTIMER_END(gckind == GC_LAST_CONTEXT);
3157 namespace js {
3159 bool
3160 SetProtoCheckingForCycles(JSContext *cx, JSObject *obj, JSObject *proto)
3163 * This function cannot be called during the GC and always requires a
3164 * request.
3166 #ifdef JS_THREADSAFE
3167 JS_ASSERT(cx->thread->requestDepth);
3170 * This is only necessary if AutoGCSession below would wait for GC to
3171 * finish on another thread, but to capture the minimal stack space and
3172 * for code simplicity we do it here unconditionally.
3174 RecordNativeStackTopForGC(cx);
3175 #endif
3177 JSRuntime *rt = cx->runtime;
3178 AutoLockGC lock(rt);
3179 AutoGCSession gcsession(cx);
3180 AutoUnlockGC unlock(rt);
3182 bool cycle = false;
3183 for (JSObject *obj2 = proto; obj2;) {
3184 obj2 = obj2->wrappedObject(cx);
3185 if (obj2 == obj) {
3186 cycle = true;
3187 break;
3189 obj2 = obj2->getProto();
3191 if (!cycle)
3192 obj->setProto(proto);
3194 return !cycle;
3197 void
3198 TraceRuntime(JSTracer *trc)
3200 LeaveTrace(trc->context);
3202 #ifdef JS_THREADSAFE
3204 JSContext *cx = trc->context;
3205 JSRuntime *rt = cx->runtime;
3206 AutoLockGC lock(rt);
3208 if (rt->gcThread != cx->thread) {
3209 AutoGCSession gcsession(cx);
3210 AutoUnlockGC unlock(rt);
3211 RecordNativeStackTopForGC(trc->context);
3212 MarkRuntime(trc);
3213 return;
3216 #else
3217 RecordNativeStackTopForGC(trc->context);
3218 #endif
3221 * Calls from inside a normal GC or a recursive calls are OK and do not
3222 * require session setup.
3224 MarkRuntime(trc);
3227 JSCompartment *
3228 NewCompartment(JSContext *cx, JSPrincipals *principals)
3230 JSRuntime *rt = cx->runtime;
3231 JSCompartment *compartment = new JSCompartment(rt);
3232 if (!compartment || !compartment->init()) {
3233 JS_ReportOutOfMemory(cx);
3234 return NULL;
3237 if (principals) {
3238 compartment->principals = principals;
3239 JSPRINCIPALS_HOLD(cx, principals);
3243 AutoLockGC lock(rt);
3245 if (!rt->compartments.append(compartment)) {
3246 AutoUnlockGC unlock(rt);
3247 JS_ReportOutOfMemory(cx);
3248 return NULL;
3252 JSCompartmentCallback callback = rt->compartmentCallback;
3253 if (callback && !callback(cx, compartment, JSCOMPARTMENT_NEW)) {
3254 AutoLockGC lock(rt);
3255 rt->compartments.popBack();
3256 return NULL;
3259 return compartment;