Unleak regexp arena. Bug 586898 - JM: memory leak due to YARR. r=sayrer
[mozilla-central.git] / js / src / jscntxt.cpp
blob5cecaf8157f7b1f603957a699bf4d3689706928f
1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sw=4 et tw=80:
4 * ***** BEGIN LICENSE BLOCK *****
5 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
7 * The contents of this file are subject to the Mozilla Public License Version
8 * 1.1 (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
10 * http://www.mozilla.org/MPL/
12 * Software distributed under the License is distributed on an "AS IS" basis,
13 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
14 * for the specific language governing rights and limitations under the
15 * License.
17 * The Original Code is Mozilla Communicator client code, released
18 * March 31, 1998.
20 * The Initial Developer of the Original Code is
21 * Netscape Communications Corporation.
22 * Portions created by the Initial Developer are Copyright (C) 1998
23 * the Initial Developer. All Rights Reserved.
25 * Contributor(s):
27 * Alternatively, the contents of this file may be used under the terms of
28 * either of the GNU General Public License Version 2 or later (the "GPL"),
29 * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
30 * in which case the provisions of the GPL or the LGPL are applicable instead
31 * of those above. If you wish to allow use of your version of this file only
32 * under the terms of either the GPL or the LGPL, and not to allow others to
33 * use your version of this file under the terms of the MPL, indicate your
34 * decision by deleting the provisions above and replace them with the notice
35 * and other provisions required by the GPL or the LGPL. If you do not delete
36 * the provisions above, a recipient may use your version of this file under
37 * the terms of any one of the MPL, the GPL or the LGPL.
39 * ***** END LICENSE BLOCK ***** */
42 * JS execution context.
44 #include <new>
45 #include <stdarg.h>
46 #include <stdlib.h>
47 #include <string.h>
49 #include "jsstdint.h"
51 #include "jstypes.h"
52 #include "jsarena.h" /* Added by JSIFY */
53 #include "jsutil.h" /* Added by JSIFY */
54 #include "jsclist.h"
55 #include "jsprf.h"
56 #include "jsatom.h"
57 #include "jscntxt.h"
58 #include "jsversion.h"
59 #include "jsdbgapi.h"
60 #include "jsexn.h"
61 #include "jsfun.h"
62 #include "jsgc.h"
63 #include "jsiter.h"
64 #include "jslock.h"
65 #include "jsmath.h"
66 #include "jsnativestack.h"
67 #include "jsnum.h"
68 #include "jsobj.h"
69 #include "jsopcode.h"
70 #include "jspubtd.h"
71 #include "jsscan.h"
72 #include "jsscope.h"
73 #include "jsscript.h"
74 #include "jsstaticcheck.h"
75 #include "jsstr.h"
76 #include "jstracer.h"
78 #include "jscntxtinlines.h"
80 #ifdef XP_WIN
81 # include <windows.h>
82 #elif defined(XP_OS2)
83 # define INCL_DOSMEMMGR
84 # include <os2.h>
85 #else
86 # include <unistd.h>
87 # include <sys/mman.h>
88 # if !defined(MAP_ANONYMOUS)
89 # if defined(MAP_ANON)
90 # define MAP_ANONYMOUS MAP_ANON
91 # else
92 # define MAP_ANONYMOUS 0
93 # endif
94 # endif
95 #endif
97 using namespace js;
99 static const size_t ARENA_HEADER_SIZE_HACK = 40;
100 static const size_t TEMP_POOL_CHUNK_SIZE = 4096 - ARENA_HEADER_SIZE_HACK;
102 static void
103 FreeContext(JSContext *cx);
105 #ifdef DEBUG
106 JS_REQUIRES_STACK bool
107 StackSegment::contains(const JSStackFrame *fp) const
109 JS_ASSERT(inContext());
110 JSStackFrame *start;
111 JSStackFrame *stop;
112 if (isActive()) {
113 start = cx->fp;
114 stop = cx->activeSegment()->initialFrame->down;
115 } else {
116 start = suspendedFrame;
117 stop = initialFrame->down;
119 for (JSStackFrame *f = start; f != stop; f = f->down) {
120 if (f == fp)
121 return true;
123 return false;
125 #endif
127 bool
128 StackSpace::init()
130 void *p;
131 #ifdef XP_WIN
132 p = VirtualAlloc(NULL, CAPACITY_BYTES, MEM_RESERVE, PAGE_READWRITE);
133 if (!p)
134 return false;
135 void *check = VirtualAlloc(p, COMMIT_BYTES, MEM_COMMIT, PAGE_READWRITE);
136 if (p != check)
137 return false;
138 base = reinterpret_cast<Value *>(p);
139 commitEnd = base + COMMIT_VALS;
140 end = base + CAPACITY_VALS;
141 #elif defined(XP_OS2)
142 if (DosAllocMem(&p, CAPACITY_BYTES, PAG_COMMIT | PAG_READ | PAG_WRITE | OBJ_ANY) &&
143 DosAllocMem(&p, CAPACITY_BYTES, PAG_COMMIT | PAG_READ | PAG_WRITE))
144 return false;
145 base = reinterpret_cast<Value *>(p);
146 end = base + CAPACITY_VALS;
147 #else
148 JS_ASSERT(CAPACITY_BYTES % getpagesize() == 0);
149 p = mmap(NULL, CAPACITY_BYTES, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
150 if (p == MAP_FAILED)
151 return false;
152 base = reinterpret_cast<Value *>(p);
153 end = base + CAPACITY_VALS;
154 #endif
155 return true;
158 void
159 StackSpace::finish()
161 #ifdef XP_WIN
162 VirtualFree(base, (commitEnd - base) * sizeof(Value), MEM_DECOMMIT);
163 VirtualFree(base, 0, MEM_RELEASE);
164 #elif defined(XP_OS2)
165 DosFreeMem(base);
166 #else
167 #ifdef SOLARIS
168 munmap((caddr_t)base, CAPACITY_BYTES);
169 #else
170 munmap(base, CAPACITY_BYTES);
171 #endif
172 #endif
175 #ifdef XP_WIN
176 JS_FRIEND_API(bool)
177 StackSpace::bumpCommit(Value *from, ptrdiff_t nvals) const
179 JS_ASSERT(end - from >= nvals);
180 Value *newCommit = commitEnd;
181 Value *request = from + nvals;
183 /* Use a dumb loop; will probably execute once. */
184 JS_ASSERT((end - newCommit) % COMMIT_VALS == 0);
185 do {
186 newCommit += COMMIT_VALS;
187 JS_ASSERT((end - newCommit) >= 0);
188 } while (newCommit < request);
190 /* The cast is safe because CAPACITY_BYTES is small. */
191 int32 size = static_cast<int32>(newCommit - commitEnd) * sizeof(Value);
193 if (!VirtualAlloc(commitEnd, size, MEM_COMMIT, PAGE_READWRITE))
194 return false;
195 commitEnd = newCommit;
196 return true;
198 #endif
200 JS_REQUIRES_STACK void
201 StackSpace::mark(JSTracer *trc)
204 * The correctness/completeness of marking depends on the continuity
205 * invariants described by the StackSegment and StackSpace definitions.
207 Value *end = firstUnused();
208 for (StackSegment *seg = currentSegment; seg; seg = seg->getPreviousInMemory()) {
209 if (seg->inContext()) {
210 /* This may be the only pointer to the initialVarObj. */
211 if (JSObject *varobj = seg->getInitialVarObj())
212 JS_CALL_OBJECT_TRACER(trc, varobj, "varobj");
214 /* Mark slots/args trailing off of the last stack frame. */
215 JSStackFrame *fp = seg->getCurrentFrame();
216 MarkValueRange(trc, fp->slots(), end, "stack");
218 /* Mark stack frames and slots/args between stack frames. */
219 JSStackFrame *initialFrame = seg->getInitialFrame();
220 for (JSStackFrame *f = fp; f != initialFrame; f = f->down) {
221 js_TraceStackFrame(trc, f);
222 MarkValueRange(trc, f->down->slots(), f->argEnd(), "stack");
225 /* Mark initialFrame stack frame and leading args. */
226 js_TraceStackFrame(trc, initialFrame);
227 MarkValueRange(trc, seg->getInitialArgBegin(), initialFrame->argEnd(), "stack");
228 } else {
229 /* Mark slots/args trailing off segment. */
230 MarkValueRange(trc, seg->getInitialArgBegin(), end, "stack");
232 end = seg->previousSegmentEnd();
236 JS_REQUIRES_STACK bool
237 StackSpace::pushSegmentForInvoke(JSContext *cx, uintN argc, InvokeArgsGuard &ag)
239 Value *start = firstUnused();
240 ptrdiff_t nvals = VALUES_PER_STACK_SEGMENT + 2 + argc;
241 if (!ensureSpace(cx, start, nvals))
242 return false;
244 StackSegment *seg = new(start) StackSegment;
245 seg->setPreviousInMemory(currentSegment);
246 currentSegment = seg;
248 ag.cx = cx;
249 ag.seg = seg;
250 ag.argv_ = seg->getInitialArgBegin() + 2;
251 ag.argc_ = argc;
253 /* Use invokeArgEnd to root [vp, vpend) until the frame is pushed. */
254 #ifdef DEBUG
255 ag.prevInvokeSegment = invokeSegment;
256 invokeSegment = seg;
257 ag.prevInvokeFrame = invokeFrame;
258 invokeFrame = NULL;
259 #endif
260 ag.prevInvokeArgEnd = invokeArgEnd;
261 invokeArgEnd = ag.argv() + ag.argc();
262 return true;
265 JS_REQUIRES_STACK void
266 StackSpace::popSegmentForInvoke(const InvokeArgsGuard &ag)
268 JS_ASSERT(!currentSegment->inContext());
269 JS_ASSERT(ag.seg == currentSegment);
270 JS_ASSERT(invokeSegment == currentSegment);
271 JS_ASSERT(invokeArgEnd == ag.argv() + ag.argc());
273 currentSegment = currentSegment->getPreviousInMemory();
275 #ifdef DEBUG
276 invokeSegment = ag.prevInvokeSegment;
277 invokeFrame = ag.prevInvokeFrame;
278 #endif
279 invokeArgEnd = ag.prevInvokeArgEnd;
283 * Always push a segment when starting a new execute frame since segments
284 * provide initialVarObj, which may change.
286 JS_REQUIRES_STACK bool
287 StackSpace::getExecuteFrame(JSContext *cx, JSStackFrame *down,
288 uintN vplen, uintN nfixed,
289 ExecuteFrameGuard &fg) const
291 Value *start = firstUnused();
292 ptrdiff_t nvals = VALUES_PER_STACK_SEGMENT + vplen + VALUES_PER_STACK_FRAME + nfixed;
293 if (!ensureSpace(cx, start, nvals))
294 return false;
296 fg.seg = new(start) StackSegment;
297 fg.vp = start + VALUES_PER_STACK_SEGMENT;
298 fg.fp = reinterpret_cast<JSStackFrame *>(fg.vp + vplen);
299 fg.down = down;
300 return true;
303 JS_REQUIRES_STACK void
304 StackSpace::pushExecuteFrame(JSContext *cx, ExecuteFrameGuard &fg,
305 JSFrameRegs &regs, JSObject *initialVarObj)
307 fg.fp->down = fg.down;
308 StackSegment *seg = fg.seg;
309 seg->setPreviousInMemory(currentSegment);
310 currentSegment = seg;
311 cx->pushSegmentAndFrame(seg, fg.fp, regs);
312 seg->setInitialVarObj(initialVarObj);
313 fg.cx = cx;
316 JS_REQUIRES_STACK void
317 StackSpace::popExecuteFrame(JSContext *cx)
319 JS_ASSERT(isCurrentAndActive(cx));
320 JS_ASSERT(cx->hasActiveSegment());
321 cx->popSegmentAndFrame();
322 currentSegment = currentSegment->getPreviousInMemory();
325 JS_REQUIRES_STACK
326 ExecuteFrameGuard::~ExecuteFrameGuard()
328 if (!pushed())
329 return;
330 JS_ASSERT(cx->activeSegment() == seg);
331 JS_ASSERT(cx->fp == fp);
332 cx->stack().popExecuteFrame(cx);
335 JS_REQUIRES_STACK void
336 StackSpace::getSynthesizedSlowNativeFrame(JSContext *cx, StackSegment *&seg, JSStackFrame *&fp)
338 Value *start = firstUnused();
339 JS_ASSERT(size_t(end - start) >= VALUES_PER_STACK_SEGMENT + VALUES_PER_STACK_FRAME);
340 seg = new(start) StackSegment;
341 fp = reinterpret_cast<JSStackFrame *>(seg + 1);
344 JS_REQUIRES_STACK void
345 StackSpace::pushSynthesizedSlowNativeFrame(JSContext *cx, StackSegment *seg, JSStackFrame *fp,
346 JSFrameRegs &regs)
348 JS_ASSERT(!fp->script && FUN_SLOW_NATIVE(fp->fun));
349 fp->down = cx->fp;
350 seg->setPreviousInMemory(currentSegment);
351 currentSegment = seg;
352 cx->pushSegmentAndFrame(seg, fp, regs);
353 seg->setInitialVarObj(NULL);
356 JS_REQUIRES_STACK void
357 StackSpace::popSynthesizedSlowNativeFrame(JSContext *cx)
359 JS_ASSERT(isCurrentAndActive(cx));
360 JS_ASSERT(cx->hasActiveSegment());
361 JS_ASSERT(currentSegment->getInitialFrame() == cx->fp);
362 JS_ASSERT(!cx->fp->script && FUN_SLOW_NATIVE(cx->fp->fun));
363 cx->popSegmentAndFrame();
364 currentSegment = currentSegment->getPreviousInMemory();
367 void
368 FrameRegsIter::initSlow()
370 if (!curseg) {
371 curfp = NULL;
372 cursp = NULL;
373 curpc = NULL;
374 return;
377 JS_ASSERT(curseg->isSuspended());
378 curfp = curseg->getSuspendedFrame();
379 cursp = curseg->getSuspendedRegs()->sp;
380 curpc = curseg->getSuspendedRegs()->pc;
384 * Using the invariant described in the js::StackSegment comment, we know that,
385 * when a pair of down-linked stack frames are in the same segment, the
386 * up-frame's address is the top of the down-frame's stack, modulo missing
387 * arguments.
389 void
390 FrameRegsIter::incSlow(JSStackFrame *up, JSStackFrame *down)
392 JS_ASSERT(down);
393 JS_ASSERT(curpc == down->savedPC);
394 JS_ASSERT(up == curseg->getInitialFrame());
397 * If the up-frame is in csup and the down-frame is in csdown, it is not
398 * necessarily the case that |csup->getPreviousInContext == csdown| or that
399 * |csdown->getSuspendedFrame == down| (because of indirect eval and
400 * JS_EvaluateInStackFrame). To compute down's sp, we need to do a linear
401 * scan, keeping track of what is immediately after down in memory.
403 curseg = curseg->getPreviousInContext();
404 cursp = curseg->getSuspendedRegs()->sp;
405 JSStackFrame *f = curseg->getSuspendedFrame();
406 while (f != down) {
407 if (f == curseg->getInitialFrame()) {
408 curseg = curseg->getPreviousInContext();
409 cursp = curseg->getSuspendedRegs()->sp;
410 f = curseg->getSuspendedFrame();
411 } else {
412 cursp = contiguousDownFrameSP(f);
413 f = f->down;
418 bool
419 JSThreadData::init()
421 #ifdef DEBUG
422 /* The data must be already zeroed. */
423 for (size_t i = 0; i != sizeof(*this); ++i)
424 JS_ASSERT(reinterpret_cast<uint8*>(this)[i] == 0);
425 #endif
426 if (!stackSpace.init())
427 return false;
428 #ifdef JS_TRACER
429 InitJIT(&traceMonitor);
430 #endif
431 dtoaState = js_NewDtoaState();
432 if (!dtoaState) {
433 finish();
434 return false;
436 nativeStackBase = GetNativeStackBase();
437 return true;
440 void
441 JSThreadData::finish()
443 #ifdef DEBUG
444 /* All GC-related things must be already removed at this point. */
445 JS_ASSERT(gcFreeLists.isEmpty());
446 for (size_t i = 0; i != JS_ARRAY_LENGTH(scriptsToGC); ++i)
447 JS_ASSERT(!scriptsToGC[i]);
448 JS_ASSERT(!conservativeGC.isEnabled());
449 #endif
451 if (dtoaState)
452 js_DestroyDtoaState(dtoaState);
454 js_FinishGSNCache(&gsnCache);
455 propertyCache.~PropertyCache();
456 #if defined JS_TRACER
457 FinishJIT(&traceMonitor);
458 #endif
459 stackSpace.finish();
462 void
463 JSThreadData::mark(JSTracer *trc)
465 stackSpace.mark(trc);
466 #ifdef JS_TRACER
467 traceMonitor.mark(trc);
468 #endif
471 void
472 JSThreadData::purge(JSContext *cx)
474 gcFreeLists.purge();
476 js_PurgeGSNCache(&gsnCache);
478 /* FIXME: bug 506341. */
479 propertyCache.purge(cx);
481 #ifdef JS_TRACER
483 * If we are about to regenerate shapes, we have to flush the JIT cache,
484 * which will eventually abort any current recording.
486 if (cx->runtime->gcRegenShapes)
487 traceMonitor.needFlush = JS_TRUE;
488 #endif
490 /* Destroy eval'ed scripts. */
491 js_DestroyScriptsToGC(cx, this);
493 /* Purge cached native iterators. */
494 memset(cachedNativeIterators, 0, sizeof(cachedNativeIterators));
496 dtoaCache.s = NULL;
499 #ifdef JS_THREADSAFE
501 static JSThread *
502 NewThread(void *id)
504 JS_ASSERT(js_CurrentThreadId() == id);
505 JSThread *thread = (JSThread *) js_calloc(sizeof(JSThread));
506 if (!thread)
507 return NULL;
508 JS_INIT_CLIST(&thread->contextList);
509 thread->id = id;
510 if (!thread->data.init()) {
511 js_free(thread);
512 return NULL;
514 return thread;
517 static void
518 DestroyThread(JSThread *thread)
520 /* The thread must have zero contexts. */
521 JS_ASSERT(JS_CLIST_IS_EMPTY(&thread->contextList));
522 JS_ASSERT(!thread->titleToShare);
523 thread->data.finish();
524 js_free(thread);
527 JSThread *
528 js_CurrentThread(JSRuntime *rt)
530 void *id = js_CurrentThreadId();
531 JS_LOCK_GC(rt);
534 * We must not race with a GC that accesses cx->thread for JSContext
535 * instances on all threads, see bug 476934.
537 js_WaitForGC(rt);
539 JSThread *thread;
540 JSThread::Map::AddPtr p = rt->threads.lookupForAdd(id);
541 if (p) {
542 thread = p->value;
543 } else {
544 JS_UNLOCK_GC(rt);
545 thread = NewThread(id);
546 if (!thread)
547 return NULL;
548 JS_LOCK_GC(rt);
549 js_WaitForGC(rt);
550 if (!rt->threads.relookupOrAdd(p, id, thread)) {
551 JS_UNLOCK_GC(rt);
552 DestroyThread(thread);
553 return NULL;
556 /* Another thread cannot add an entry for the current thread id. */
557 JS_ASSERT(p->value == thread);
559 JS_ASSERT(thread->id == id);
561 return thread;
564 JSBool
565 js_InitContextThread(JSContext *cx)
567 JSThread *thread = js_CurrentThread(cx->runtime);
568 if (!thread)
569 return false;
571 JS_APPEND_LINK(&cx->threadLinks, &thread->contextList);
572 cx->thread = thread;
573 return true;
576 void
577 js_ClearContextThread(JSContext *cx)
579 JS_ASSERT(CURRENT_THREAD_IS_ME(cx->thread));
580 JS_REMOVE_AND_INIT_LINK(&cx->threadLinks);
581 cx->thread = NULL;
584 #endif /* JS_THREADSAFE */
586 JSThreadData *
587 js_CurrentThreadData(JSRuntime *rt)
589 #ifdef JS_THREADSAFE
590 JSThread *thread = js_CurrentThread(rt);
591 if (!thread)
592 return NULL;
594 return &thread->data;
595 #else
596 return &rt->threadData;
597 #endif
600 JSBool
601 js_InitThreads(JSRuntime *rt)
603 #ifdef JS_THREADSAFE
604 if (!rt->threads.init(4))
605 return false;
606 #else
607 if (!rt->threadData.init())
608 return false;
609 #endif
610 return true;
613 void
614 js_FinishThreads(JSRuntime *rt)
616 #ifdef JS_THREADSAFE
617 if (!rt->threads.initialized())
618 return;
619 for (JSThread::Map::Range r = rt->threads.all(); !r.empty(); r.popFront()) {
620 JSThread *thread = r.front().value;
621 JS_ASSERT(JS_CLIST_IS_EMPTY(&thread->contextList));
622 DestroyThread(thread);
624 rt->threads.clear();
625 #else
626 rt->threadData.finish();
627 #endif
630 void
631 js_PurgeThreads(JSContext *cx)
633 #ifdef JS_THREADSAFE
634 for (JSThread::Map::Enum e(cx->runtime->threads);
635 !e.empty();
636 e.popFront()) {
637 JSThread *thread = e.front().value;
639 if (JS_CLIST_IS_EMPTY(&thread->contextList)) {
640 JS_ASSERT(cx->thread != thread);
641 js_DestroyScriptsToGC(cx, &thread->data);
644 * The following is potentially suboptimal as it also zeros the
645 * caches in data, but the code simplicity wins here.
647 thread->data.gcFreeLists.purge();
648 DestroyThread(thread);
649 e.removeFront();
650 } else {
651 thread->data.purge(cx);
652 thread->gcThreadMallocBytes = JS_GC_THREAD_MALLOC_LIMIT;
655 #else
656 cx->runtime->threadData.purge(cx);
657 #endif
661 * JSOPTION_XML and JSOPTION_ANONFUNFIX must be part of the JS version
662 * associated with scripts, so in addition to storing them in cx->options we
663 * duplicate them in cx->version (script->version, etc.) and ensure each bit
664 * remains synchronized between the two through these two functions.
666 void
667 js_SyncOptionsToVersion(JSContext* cx)
669 if (cx->options & JSOPTION_XML)
670 cx->version |= JSVERSION_HAS_XML;
671 else
672 cx->version &= ~JSVERSION_HAS_XML;
673 if (cx->options & JSOPTION_ANONFUNFIX)
674 cx->version |= JSVERSION_ANONFUNFIX;
675 else
676 cx->version &= ~JSVERSION_ANONFUNFIX;
679 inline void
680 js_SyncVersionToOptions(JSContext* cx)
682 if (cx->version & JSVERSION_HAS_XML)
683 cx->options |= JSOPTION_XML;
684 else
685 cx->options &= ~JSOPTION_XML;
686 if (cx->version & JSVERSION_ANONFUNFIX)
687 cx->options |= JSOPTION_ANONFUNFIX;
688 else
689 cx->options &= ~JSOPTION_ANONFUNFIX;
692 void
693 js_OnVersionChange(JSContext *cx)
695 #ifdef DEBUG
696 JSVersion version = JSVERSION_NUMBER(cx);
698 JS_ASSERT(version == JSVERSION_DEFAULT || version >= JSVERSION_ECMA_3);
699 #endif
702 void
703 js_SetVersion(JSContext *cx, JSVersion version)
705 cx->version = version;
706 js_SyncVersionToOptions(cx);
707 js_OnVersionChange(cx);
710 JSContext *
711 js_NewContext(JSRuntime *rt, size_t stackChunkSize)
713 JSContext *cx;
714 JSBool ok, first;
715 JSContextCallback cxCallback;
718 * We need to initialize the new context fully before adding it to the
719 * runtime list. After that it can be accessed from another thread via
720 * js_ContextIterator.
722 void *mem = js_calloc(sizeof *cx);
723 if (!mem)
724 return NULL;
726 cx = new (mem) JSContext(rt);
727 cx->debugHooks = &rt->globalDebugHooks;
728 #if JS_STACK_GROWTH_DIRECTION > 0
729 cx->stackLimit = (jsuword) -1;
730 #endif
731 cx->scriptStackQuota = JS_DEFAULT_SCRIPT_STACK_QUOTA;
732 JS_STATIC_ASSERT(JSVERSION_DEFAULT == 0);
733 JS_ASSERT(cx->version == JSVERSION_DEFAULT);
734 VOUCH_DOES_NOT_REQUIRE_STACK();
736 JS_InitArenaPool(&cx->tempPool, "temp", TEMP_POOL_CHUNK_SIZE, sizeof(jsdouble),
737 &cx->scriptStackQuota);
738 JS_InitArenaPool(&cx->regExpPool, "regExp", TEMP_POOL_CHUNK_SIZE, sizeof(int),
739 &cx->scriptStackQuota);
741 JS_ASSERT(cx->resolveFlags == 0);
743 #ifdef JS_THREADSAFE
744 if (!js_InitContextThread(cx)) {
745 FreeContext(cx);
746 return NULL;
748 #endif
751 * Here the GC lock is still held after js_InitContextThread took it and
752 * the GC is not running on another thread.
754 for (;;) {
755 if (rt->state == JSRTS_UP) {
756 JS_ASSERT(!JS_CLIST_IS_EMPTY(&rt->contextList));
757 first = JS_FALSE;
758 break;
760 if (rt->state == JSRTS_DOWN) {
761 JS_ASSERT(JS_CLIST_IS_EMPTY(&rt->contextList));
762 first = JS_TRUE;
763 rt->state = JSRTS_LAUNCHING;
764 break;
766 JS_WAIT_CONDVAR(rt->stateChange, JS_NO_TIMEOUT);
769 * During the above wait after we are notified about the state change
770 * but before we wake up, another thread could enter the GC from
771 * js_DestroyContext, bug 478336. So we must wait here to ensure that
772 * when we exit the loop with the first flag set to true, that GC is
773 * finished.
775 js_WaitForGC(rt);
777 JS_APPEND_LINK(&cx->link, &rt->contextList);
778 JS_UNLOCK_GC(rt);
780 js_InitRandom(cx);
783 * If cx is the first context on this runtime, initialize well-known atoms,
784 * keywords, numbers, and strings. If one of these steps should fail, the
785 * runtime will be left in a partially initialized state, with zeroes and
786 * nulls stored in the default-initialized remainder of the struct. We'll
787 * clean the runtime up under js_DestroyContext, because cx will be "last"
788 * as well as "first".
790 if (first) {
791 #ifdef JS_THREADSAFE
792 JS_BeginRequest(cx);
793 #endif
794 ok = js_InitCommonAtoms(cx);
797 * scriptFilenameTable may be left over from a previous episode of
798 * non-zero contexts alive in rt, so don't re-init the table if it's
799 * not necessary.
801 if (ok && !rt->scriptFilenameTable)
802 ok = js_InitRuntimeScriptState(rt);
803 if (ok)
804 ok = js_InitRuntimeNumberState(cx);
805 if (ok) {
807 * Ensure that the empty scopes initialized by
808 * JSScope::initRuntimeState get the desired special shapes.
809 * (The rt->state dance above guarantees that this abuse of
810 * rt->shapeGen is thread-safe.)
812 uint32 shapeGen = rt->shapeGen;
813 rt->shapeGen = 0;
814 ok = JSScope::initRuntimeState(cx);
815 if (rt->shapeGen < shapeGen)
816 rt->shapeGen = shapeGen;
819 #ifdef JS_THREADSAFE
820 JS_EndRequest(cx);
821 #endif
822 if (!ok) {
823 js_DestroyContext(cx, JSDCM_NEW_FAILED);
824 return NULL;
827 AutoLockGC lock(rt);
828 rt->state = JSRTS_UP;
829 JS_NOTIFY_ALL_CONDVAR(rt->stateChange);
832 cxCallback = rt->cxCallback;
833 if (cxCallback && !cxCallback(cx, JSCONTEXT_NEW)) {
834 js_DestroyContext(cx, JSDCM_NEW_FAILED);
835 return NULL;
838 /* Using ContextAllocPolicy, so init after JSContext is ready. */
839 if (!cx->busyArrays.init()) {
840 FreeContext(cx);
841 return NULL;
844 return cx;
847 #if defined DEBUG && defined XP_UNIX
848 # include <stdio.h>
850 class JSAutoFile {
851 public:
852 JSAutoFile() : mFile(NULL) {}
854 ~JSAutoFile() {
855 if (mFile)
856 fclose(mFile);
859 FILE *open(const char *fname, const char *mode) {
860 return mFile = fopen(fname, mode);
862 operator FILE *() {
863 return mFile;
866 private:
867 FILE *mFile;
870 static void
871 DumpEvalCacheMeter(JSContext *cx)
873 if (const char *filename = getenv("JS_EVALCACHE_STATFILE")) {
874 struct {
875 const char *name;
876 ptrdiff_t offset;
877 } table[] = {
878 #define frob(x) { #x, offsetof(JSEvalCacheMeter, x) }
879 EVAL_CACHE_METER_LIST(frob)
880 #undef frob
882 JSEvalCacheMeter *ecm = &JS_THREAD_DATA(cx)->evalCacheMeter;
884 static JSAutoFile fp;
885 if (!fp && !fp.open(filename, "w"))
886 return;
888 fprintf(fp, "eval cache meter (%p):\n",
889 #ifdef JS_THREADSAFE
890 (void *) cx->thread
891 #else
892 (void *) cx->runtime
893 #endif
895 for (uintN i = 0; i < JS_ARRAY_LENGTH(table); ++i) {
896 fprintf(fp, "%-8.8s %llu\n",
897 table[i].name,
898 (unsigned long long int) *(uint64 *)((uint8 *)ecm + table[i].offset));
900 fprintf(fp, "hit ratio %g%%\n", ecm->hit * 100. / ecm->probe);
901 fprintf(fp, "avg steps %g\n", double(ecm->step) / ecm->probe);
902 fflush(fp);
905 # define DUMP_EVAL_CACHE_METER(cx) DumpEvalCacheMeter(cx)
907 static void
908 DumpFunctionCountMap(const char *title, JSRuntime::FunctionCountMap &map, FILE *fp)
910 fprintf(fp, "\n%s count map:\n", title);
912 for (JSRuntime::FunctionCountMap::Range r = map.all(); !r.empty(); r.popFront()) {
913 JSFunction *fun = r.front().key;
914 int32 count = r.front().value;
916 fprintf(fp, "%10d %s:%u\n", count, fun->u.i.script->filename, fun->u.i.script->lineno);
920 static void
921 DumpFunctionMeter(JSContext *cx)
923 if (const char *filename = cx->runtime->functionMeterFilename) {
924 struct {
925 const char *name;
926 ptrdiff_t offset;
927 } table[] = {
928 #define frob(x) { #x, offsetof(JSFunctionMeter, x) }
929 FUNCTION_KIND_METER_LIST(frob)
930 #undef frob
932 JSFunctionMeter *fm = &cx->runtime->functionMeter;
934 static JSAutoFile fp;
935 if (!fp && !fp.open(filename, "w"))
936 return;
938 fprintf(fp, "function meter (%s):\n", cx->runtime->lastScriptFilename);
939 for (uintN i = 0; i < JS_ARRAY_LENGTH(table); ++i)
940 fprintf(fp, "%-19.19s %d\n", table[i].name, *(int32 *)((uint8 *)fm + table[i].offset));
942 DumpFunctionCountMap("method read barrier", cx->runtime->methodReadBarrierCountMap, fp);
943 DumpFunctionCountMap("unjoined function", cx->runtime->unjoinedFunctionCountMap, fp);
945 putc('\n', fp);
946 fflush(fp);
950 # define DUMP_FUNCTION_METER(cx) DumpFunctionMeter(cx)
952 #endif /* DEBUG && XP_UNIX */
954 #ifndef DUMP_EVAL_CACHE_METER
955 # define DUMP_EVAL_CACHE_METER(cx) ((void) 0)
956 #endif
958 #ifndef DUMP_FUNCTION_METER
959 # define DUMP_FUNCTION_METER(cx) ((void) 0)
960 #endif
962 void
963 js_DestroyContext(JSContext *cx, JSDestroyContextMode mode)
965 JSRuntime *rt;
966 JSContextCallback cxCallback;
967 JSBool last;
969 JS_ASSERT(!cx->enumerators);
971 rt = cx->runtime;
972 #ifdef JS_THREADSAFE
974 * For API compatibility we allow to destroy contexts without a thread in
975 * optimized builds. We assume that the embedding knows that an OOM error
976 * cannot happen in JS_SetContextThread.
978 JS_ASSERT(cx->thread && CURRENT_THREAD_IS_ME(cx->thread));
979 if (!cx->thread)
980 JS_SetContextThread(cx);
982 JS_ASSERT_IF(rt->gcRunning, cx->outstandingRequests == 0);
983 #endif
985 if (mode != JSDCM_NEW_FAILED) {
986 cxCallback = rt->cxCallback;
987 if (cxCallback) {
989 * JSCONTEXT_DESTROY callback is not allowed to fail and must
990 * return true.
992 #ifdef DEBUG
993 JSBool callbackStatus =
994 #endif
995 cxCallback(cx, JSCONTEXT_DESTROY);
996 JS_ASSERT(callbackStatus);
1000 JS_LOCK_GC(rt);
1001 JS_ASSERT(rt->state == JSRTS_UP || rt->state == JSRTS_LAUNCHING);
1002 #ifdef JS_THREADSAFE
1004 * Typically we are called outside a request, so ensure that the GC is not
1005 * running before removing the context from rt->contextList, see bug 477021.
1007 if (cx->requestDepth == 0)
1008 js_WaitForGC(rt);
1009 #endif
1010 JS_REMOVE_LINK(&cx->link);
1011 last = (rt->contextList.next == &rt->contextList);
1012 if (last)
1013 rt->state = JSRTS_LANDING;
1014 if (last || mode == JSDCM_FORCE_GC || mode == JSDCM_MAYBE_GC
1015 #ifdef JS_THREADSAFE
1016 || cx->requestDepth != 0
1017 #endif
1019 JS_ASSERT(!rt->gcRunning);
1021 JS_UNLOCK_GC(rt);
1023 if (last) {
1024 #ifdef JS_THREADSAFE
1026 * If cx is not in a request already, begin one now so that we wait
1027 * for any racing GC started on a not-last context to finish, before
1028 * we plow ahead and unpin atoms. Note that even though we begin a
1029 * request here if necessary, we end all requests on cx below before
1030 * forcing a final GC. This lets any not-last context destruction
1031 * racing in another thread try to force or maybe run the GC, but by
1032 * that point, rt->state will not be JSRTS_UP, and that GC attempt
1033 * will return early.
1035 if (cx->requestDepth == 0)
1036 JS_BeginRequest(cx);
1037 #endif
1039 JSScope::finishRuntimeState(cx);
1040 js_FinishRuntimeNumberState(cx);
1042 /* Unpin all common atoms before final GC. */
1043 js_FinishCommonAtoms(cx);
1045 /* Clear debugging state to remove GC roots. */
1046 JS_ClearAllTraps(cx);
1047 JS_ClearAllWatchPoints(cx);
1050 /* Remove more GC roots in regExpStatics, then collect garbage. */
1051 JS_ClearRegExpRoots(cx);
1053 #ifdef JS_THREADSAFE
1055 * Destroying a context implicitly calls JS_EndRequest(). Also, we must
1056 * end our request here in case we are "last" -- in that event, another
1057 * js_DestroyContext that was not last might be waiting in the GC for our
1058 * request to end. We'll let it run below, just before we do the truly
1059 * final GC and then free atom state.
1061 while (cx->requestDepth != 0)
1062 JS_EndRequest(cx);
1063 #endif
1065 if (last) {
1066 js_GC(cx, GC_LAST_CONTEXT);
1067 DUMP_EVAL_CACHE_METER(cx);
1068 DUMP_FUNCTION_METER(cx);
1070 /* Take the runtime down, now that it has no contexts or atoms. */
1071 JS_LOCK_GC(rt);
1072 rt->state = JSRTS_DOWN;
1073 JS_NOTIFY_ALL_CONDVAR(rt->stateChange);
1074 } else {
1075 if (mode == JSDCM_FORCE_GC)
1076 js_GC(cx, GC_NORMAL);
1077 else if (mode == JSDCM_MAYBE_GC)
1078 JS_MaybeGC(cx);
1079 JS_LOCK_GC(rt);
1080 js_WaitForGC(rt);
1083 #ifdef JS_THREADSAFE
1084 js_ClearContextThread(cx);
1085 #endif
1086 #ifdef JS_METER_DST_OFFSET_CACHING
1087 cx->dstOffsetCache.dumpStats();
1088 #endif
1089 JS_UNLOCK_GC(rt);
1090 FreeContext(cx);
1093 static void
1094 FreeContext(JSContext *cx)
1096 #ifdef JS_THREADSAFE
1097 JS_ASSERT(!cx->thread);
1098 #endif
1100 /* Free the stuff hanging off of cx. */
1101 cx->regExpStatics.clear();
1102 VOUCH_DOES_NOT_REQUIRE_STACK();
1103 JS_FinishArenaPool(&cx->tempPool);
1104 JS_FinishArenaPool(&cx->regExpPool);
1106 if (cx->lastMessage)
1107 js_free(cx->lastMessage);
1109 /* Remove any argument formatters. */
1110 JSArgumentFormatMap *map = cx->argumentFormatMap;
1111 while (map) {
1112 JSArgumentFormatMap *temp = map;
1113 map = map->next;
1114 cx->free(temp);
1117 /* Destroy the resolve recursion damper. */
1118 if (cx->resolvingTable) {
1119 JS_DHashTableDestroy(cx->resolvingTable);
1120 cx->resolvingTable = NULL;
1123 /* Finally, free cx itself. */
1124 cx->~JSContext();
1125 js_free(cx);
1128 JSBool
1129 js_ValidContextPointer(JSRuntime *rt, JSContext *cx)
1131 JSCList *cl;
1133 for (cl = rt->contextList.next; cl != &rt->contextList; cl = cl->next) {
1134 if (cl == &cx->link)
1135 return JS_TRUE;
1137 JS_RUNTIME_METER(rt, deadContexts);
1138 return JS_FALSE;
1141 JSContext *
1142 js_ContextIterator(JSRuntime *rt, JSBool unlocked, JSContext **iterp)
1144 JSContext *cx = *iterp;
1146 Conditionally<AutoLockGC> lockIf(!!unlocked, rt);
1147 cx = js_ContextFromLinkField(cx ? cx->link.next : rt->contextList.next);
1148 if (&cx->link == &rt->contextList)
1149 cx = NULL;
1150 *iterp = cx;
1151 return cx;
1154 JS_FRIEND_API(JSContext *)
1155 js_NextActiveContext(JSRuntime *rt, JSContext *cx)
1157 JSContext *iter = cx;
1158 #ifdef JS_THREADSAFE
1159 while ((cx = js_ContextIterator(rt, JS_FALSE, &iter)) != NULL) {
1160 if (cx->requestDepth)
1161 break;
1163 return cx;
1164 #else
1165 return js_ContextIterator(rt, JS_FALSE, &iter);
1166 #endif
1169 static JSDHashNumber
1170 resolving_HashKey(JSDHashTable *table, const void *ptr)
1172 const JSResolvingKey *key = (const JSResolvingKey *)ptr;
1174 return (JSDHashNumber(uintptr_t(key->obj)) >> JS_GCTHING_ALIGN) ^ JSID_BITS(key->id);
1177 static JSBool
1178 resolving_MatchEntry(JSDHashTable *table,
1179 const JSDHashEntryHdr *hdr,
1180 const void *ptr)
1182 const JSResolvingEntry *entry = (const JSResolvingEntry *)hdr;
1183 const JSResolvingKey *key = (const JSResolvingKey *)ptr;
1185 return entry->key.obj == key->obj && entry->key.id == key->id;
1188 static const JSDHashTableOps resolving_dhash_ops = {
1189 JS_DHashAllocTable,
1190 JS_DHashFreeTable,
1191 resolving_HashKey,
1192 resolving_MatchEntry,
1193 JS_DHashMoveEntryStub,
1194 JS_DHashClearEntryStub,
1195 JS_DHashFinalizeStub,
1196 NULL
1199 JSBool
1200 js_StartResolving(JSContext *cx, JSResolvingKey *key, uint32 flag,
1201 JSResolvingEntry **entryp)
1203 JSDHashTable *table;
1204 JSResolvingEntry *entry;
1206 table = cx->resolvingTable;
1207 if (!table) {
1208 table = JS_NewDHashTable(&resolving_dhash_ops, NULL,
1209 sizeof(JSResolvingEntry),
1210 JS_DHASH_MIN_SIZE);
1211 if (!table)
1212 goto outofmem;
1213 cx->resolvingTable = table;
1216 entry = (JSResolvingEntry *)
1217 JS_DHashTableOperate(table, key, JS_DHASH_ADD);
1218 if (!entry)
1219 goto outofmem;
1221 if (entry->flags & flag) {
1222 /* An entry for (key, flag) exists already -- dampen recursion. */
1223 entry = NULL;
1224 } else {
1225 /* Fill in key if we were the first to add entry, then set flag. */
1226 if (!entry->key.obj)
1227 entry->key = *key;
1228 entry->flags |= flag;
1230 *entryp = entry;
1231 return JS_TRUE;
1233 outofmem:
1234 JS_ReportOutOfMemory(cx);
1235 return JS_FALSE;
1238 void
1239 js_StopResolving(JSContext *cx, JSResolvingKey *key, uint32 flag,
1240 JSResolvingEntry *entry, uint32 generation)
1242 JSDHashTable *table;
1245 * Clear flag from entry->flags and return early if other flags remain.
1246 * We must take care to re-lookup entry if the table has changed since
1247 * it was found by js_StartResolving.
1249 table = cx->resolvingTable;
1250 if (!entry || table->generation != generation) {
1251 entry = (JSResolvingEntry *)
1252 JS_DHashTableOperate(table, key, JS_DHASH_LOOKUP);
1254 JS_ASSERT(JS_DHASH_ENTRY_IS_BUSY(&entry->hdr));
1255 entry->flags &= ~flag;
1256 if (entry->flags)
1257 return;
1260 * Do a raw remove only if fewer entries were removed than would cause
1261 * alpha to be less than .5 (alpha is at most .75). Otherwise, we just
1262 * call JS_DHashTableOperate to re-lookup the key and remove its entry,
1263 * compressing or shrinking the table as needed.
1265 if (table->removedCount < JS_DHASH_TABLE_SIZE(table) >> 2)
1266 JS_DHashTableRawRemove(table, &entry->hdr);
1267 else
1268 JS_DHashTableOperate(table, key, JS_DHASH_REMOVE);
1271 static void
1272 ReportError(JSContext *cx, const char *message, JSErrorReport *reportp,
1273 JSErrorCallback callback, void *userRef)
1276 * Check the error report, and set a JavaScript-catchable exception
1277 * if the error is defined to have an associated exception. If an
1278 * exception is thrown, then the JSREPORT_EXCEPTION flag will be set
1279 * on the error report, and exception-aware hosts should ignore it.
1281 JS_ASSERT(reportp);
1282 if ((!callback || callback == js_GetErrorMessage) &&
1283 reportp->errorNumber == JSMSG_UNCAUGHT_EXCEPTION)
1284 reportp->flags |= JSREPORT_EXCEPTION;
1287 * Call the error reporter only if an exception wasn't raised.
1289 * If an exception was raised, then we call the debugErrorHook
1290 * (if present) to give it a chance to see the error before it
1291 * propagates out of scope. This is needed for compatability
1292 * with the old scheme.
1294 if (!JS_IsRunning(cx) ||
1295 !js_ErrorToException(cx, message, reportp, callback, userRef)) {
1296 js_ReportErrorAgain(cx, message, reportp);
1297 } else if (cx->debugHooks->debugErrorHook && cx->errorReporter) {
1298 JSDebugErrorHook hook = cx->debugHooks->debugErrorHook;
1299 /* test local in case debugErrorHook changed on another thread */
1300 if (hook)
1301 hook(cx, message, reportp, cx->debugHooks->debugErrorHookData);
1305 /* The report must be initially zeroed. */
1306 static void
1307 PopulateReportBlame(JSContext *cx, JSErrorReport *report)
1310 * Walk stack until we find a frame that is associated with some script
1311 * rather than a native frame.
1313 for (JSStackFrame *fp = js_GetTopStackFrame(cx); fp; fp = fp->down) {
1314 if (fp->pc(cx)) {
1315 report->filename = fp->script->filename;
1316 report->lineno = js_FramePCToLineNumber(cx, fp);
1317 break;
1323 * We don't post an exception in this case, since doing so runs into
1324 * complications of pre-allocating an exception object which required
1325 * running the Exception class initializer early etc.
1326 * Instead we just invoke the errorReporter with an "Out Of Memory"
1327 * type message, and then hope the process ends swiftly.
1329 void
1330 js_ReportOutOfMemory(JSContext *cx)
1332 #ifdef JS_TRACER
1334 * If we are in a builtin called directly from trace, don't report an
1335 * error. We will retry in the interpreter instead.
1337 if (JS_ON_TRACE(cx) && !cx->bailExit)
1338 return;
1339 #endif
1341 JSErrorReport report;
1342 JSErrorReporter onError = cx->errorReporter;
1344 /* Get the message for this error, but we won't expand any arguments. */
1345 const JSErrorFormatString *efs =
1346 js_GetLocalizedErrorMessage(cx, NULL, NULL, JSMSG_OUT_OF_MEMORY);
1347 const char *msg = efs ? efs->format : "Out of memory";
1349 /* Fill out the report, but don't do anything that requires allocation. */
1350 PodZero(&report);
1351 report.flags = JSREPORT_ERROR;
1352 report.errorNumber = JSMSG_OUT_OF_MEMORY;
1353 PopulateReportBlame(cx, &report);
1356 * If debugErrorHook is present then we give it a chance to veto sending
1357 * the error on to the regular ErrorReporter. We also clear a pending
1358 * exception if any now so the hooks can replace the out-of-memory error
1359 * by a script-catchable exception.
1361 cx->throwing = JS_FALSE;
1362 if (onError) {
1363 JSDebugErrorHook hook = cx->debugHooks->debugErrorHook;
1364 if (hook &&
1365 !hook(cx, msg, &report, cx->debugHooks->debugErrorHookData)) {
1366 onError = NULL;
1370 if (onError)
1371 onError(cx, msg, &report);
1374 void
1375 js_ReportOutOfScriptQuota(JSContext *cx)
1377 JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
1378 JSMSG_SCRIPT_STACK_QUOTA);
1381 void
1382 js_ReportOverRecursed(JSContext *cx)
1384 JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_OVER_RECURSED);
1387 void
1388 js_ReportAllocationOverflow(JSContext *cx)
1390 JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_ALLOC_OVERFLOW);
1394 * Given flags and the state of cx, decide whether we should report an
1395 * error, a warning, or just continue execution normally. Return
1396 * true if we should continue normally, without reporting anything;
1397 * otherwise, adjust *flags as appropriate and return false.
1399 static bool
1400 checkReportFlags(JSContext *cx, uintN *flags)
1402 if (JSREPORT_IS_STRICT_MODE_ERROR(*flags)) {
1404 * Error in strict code; warning with strict option; okay otherwise.
1405 * We assume that if the top frame is a native, then it is strict if
1406 * the nearest scripted frame is strict, see bug 536306.
1408 JSStackFrame *fp = js_GetScriptedCaller(cx, NULL);
1409 if (fp && fp->script->strictModeCode)
1410 *flags &= ~JSREPORT_WARNING;
1411 else if (JS_HAS_STRICT_OPTION(cx))
1412 *flags |= JSREPORT_WARNING;
1413 else
1414 return true;
1415 } else if (JSREPORT_IS_STRICT(*flags)) {
1416 /* Warning/error only when JSOPTION_STRICT is set. */
1417 if (!JS_HAS_STRICT_OPTION(cx))
1418 return true;
1421 /* Warnings become errors when JSOPTION_WERROR is set. */
1422 if (JSREPORT_IS_WARNING(*flags) && JS_HAS_WERROR_OPTION(cx))
1423 *flags &= ~JSREPORT_WARNING;
1425 return false;
1428 JSBool
1429 js_ReportErrorVA(JSContext *cx, uintN flags, const char *format, va_list ap)
1431 char *message;
1432 jschar *ucmessage;
1433 size_t messagelen;
1434 JSErrorReport report;
1435 JSBool warning;
1437 if (checkReportFlags(cx, &flags))
1438 return JS_TRUE;
1440 message = JS_vsmprintf(format, ap);
1441 if (!message)
1442 return JS_FALSE;
1443 messagelen = strlen(message);
1445 PodZero(&report);
1446 report.flags = flags;
1447 report.errorNumber = JSMSG_USER_DEFINED_ERROR;
1448 report.ucmessage = ucmessage = js_InflateString(cx, message, &messagelen);
1449 PopulateReportBlame(cx, &report);
1451 warning = JSREPORT_IS_WARNING(report.flags);
1453 ReportError(cx, message, &report, NULL, NULL);
1454 js_free(message);
1455 cx->free(ucmessage);
1456 return warning;
1460 * The arguments from ap need to be packaged up into an array and stored
1461 * into the report struct.
1463 * The format string addressed by the error number may contain operands
1464 * identified by the format {N}, where N is a decimal digit. Each of these
1465 * is to be replaced by the Nth argument from the va_list. The complete
1466 * message is placed into reportp->ucmessage converted to a JSString.
1468 * Returns true if the expansion succeeds (can fail if out of memory).
1470 JSBool
1471 js_ExpandErrorArguments(JSContext *cx, JSErrorCallback callback,
1472 void *userRef, const uintN errorNumber,
1473 char **messagep, JSErrorReport *reportp,
1474 bool charArgs, va_list ap)
1476 const JSErrorFormatString *efs;
1477 int i;
1478 int argCount;
1480 *messagep = NULL;
1482 /* Most calls supply js_GetErrorMessage; if this is so, assume NULL. */
1483 if (!callback || callback == js_GetErrorMessage)
1484 efs = js_GetLocalizedErrorMessage(cx, userRef, NULL, errorNumber);
1485 else
1486 efs = callback(userRef, NULL, errorNumber);
1487 if (efs) {
1488 size_t totalArgsLength = 0;
1489 size_t argLengths[10]; /* only {0} thru {9} supported */
1490 argCount = efs->argCount;
1491 JS_ASSERT(argCount <= 10);
1492 if (argCount > 0) {
1494 * Gather the arguments into an array, and accumulate
1495 * their sizes. We allocate 1 more than necessary and
1496 * null it out to act as the caboose when we free the
1497 * pointers later.
1499 reportp->messageArgs = (const jschar **)
1500 cx->malloc(sizeof(jschar *) * (argCount + 1));
1501 if (!reportp->messageArgs)
1502 return JS_FALSE;
1503 reportp->messageArgs[argCount] = NULL;
1504 for (i = 0; i < argCount; i++) {
1505 if (charArgs) {
1506 char *charArg = va_arg(ap, char *);
1507 size_t charArgLength = strlen(charArg);
1508 reportp->messageArgs[i]
1509 = js_InflateString(cx, charArg, &charArgLength);
1510 if (!reportp->messageArgs[i])
1511 goto error;
1512 } else {
1513 reportp->messageArgs[i] = va_arg(ap, jschar *);
1515 argLengths[i] = js_strlen(reportp->messageArgs[i]);
1516 totalArgsLength += argLengths[i];
1518 /* NULL-terminate for easy copying. */
1519 reportp->messageArgs[i] = NULL;
1522 * Parse the error format, substituting the argument X
1523 * for {X} in the format.
1525 if (argCount > 0) {
1526 if (efs->format) {
1527 jschar *buffer, *fmt, *out;
1528 int expandedArgs = 0;
1529 size_t expandedLength;
1530 size_t len = strlen(efs->format);
1532 buffer = fmt = js_InflateString (cx, efs->format, &len);
1533 if (!buffer)
1534 goto error;
1535 expandedLength = len
1536 - (3 * argCount) /* exclude the {n} */
1537 + totalArgsLength;
1540 * Note - the above calculation assumes that each argument
1541 * is used once and only once in the expansion !!!
1543 reportp->ucmessage = out = (jschar *)
1544 cx->malloc((expandedLength + 1) * sizeof(jschar));
1545 if (!out) {
1546 cx->free(buffer);
1547 goto error;
1549 while (*fmt) {
1550 if (*fmt == '{') {
1551 if (isdigit(fmt[1])) {
1552 int d = JS7_UNDEC(fmt[1]);
1553 JS_ASSERT(d < argCount);
1554 js_strncpy(out, reportp->messageArgs[d],
1555 argLengths[d]);
1556 out += argLengths[d];
1557 fmt += 3;
1558 expandedArgs++;
1559 continue;
1562 *out++ = *fmt++;
1564 JS_ASSERT(expandedArgs == argCount);
1565 *out = 0;
1566 cx->free(buffer);
1567 *messagep =
1568 js_DeflateString(cx, reportp->ucmessage,
1569 (size_t)(out - reportp->ucmessage));
1570 if (!*messagep)
1571 goto error;
1573 } else {
1575 * Zero arguments: the format string (if it exists) is the
1576 * entire message.
1578 if (efs->format) {
1579 size_t len;
1580 *messagep = JS_strdup(cx, efs->format);
1581 if (!*messagep)
1582 goto error;
1583 len = strlen(*messagep);
1584 reportp->ucmessage = js_InflateString(cx, *messagep, &len);
1585 if (!reportp->ucmessage)
1586 goto error;
1590 if (*messagep == NULL) {
1591 /* where's the right place for this ??? */
1592 const char *defaultErrorMessage
1593 = "No error message available for error number %d";
1594 size_t nbytes = strlen(defaultErrorMessage) + 16;
1595 *messagep = (char *)cx->malloc(nbytes);
1596 if (!*messagep)
1597 goto error;
1598 JS_snprintf(*messagep, nbytes, defaultErrorMessage, errorNumber);
1600 return JS_TRUE;
1602 error:
1603 if (reportp->messageArgs) {
1604 /* free the arguments only if we allocated them */
1605 if (charArgs) {
1606 i = 0;
1607 while (reportp->messageArgs[i])
1608 cx->free((void *)reportp->messageArgs[i++]);
1610 cx->free((void *)reportp->messageArgs);
1611 reportp->messageArgs = NULL;
1613 if (reportp->ucmessage) {
1614 cx->free((void *)reportp->ucmessage);
1615 reportp->ucmessage = NULL;
1617 if (*messagep) {
1618 cx->free((void *)*messagep);
1619 *messagep = NULL;
1621 return JS_FALSE;
1624 JSBool
1625 js_ReportErrorNumberVA(JSContext *cx, uintN flags, JSErrorCallback callback,
1626 void *userRef, const uintN errorNumber,
1627 JSBool charArgs, va_list ap)
1629 JSErrorReport report;
1630 char *message;
1631 JSBool warning;
1633 if (checkReportFlags(cx, &flags))
1634 return JS_TRUE;
1635 warning = JSREPORT_IS_WARNING(flags);
1637 PodZero(&report);
1638 report.flags = flags;
1639 report.errorNumber = errorNumber;
1640 PopulateReportBlame(cx, &report);
1642 if (!js_ExpandErrorArguments(cx, callback, userRef, errorNumber,
1643 &message, &report, !!charArgs, ap)) {
1644 return JS_FALSE;
1647 ReportError(cx, message, &report, callback, userRef);
1649 if (message)
1650 cx->free(message);
1651 if (report.messageArgs) {
1653 * js_ExpandErrorArguments owns its messageArgs only if it had to
1654 * inflate the arguments (from regular |char *|s).
1656 if (charArgs) {
1657 int i = 0;
1658 while (report.messageArgs[i])
1659 cx->free((void *)report.messageArgs[i++]);
1661 cx->free((void *)report.messageArgs);
1663 if (report.ucmessage)
1664 cx->free((void *)report.ucmessage);
1666 return warning;
1669 JS_FRIEND_API(void)
1670 js_ReportErrorAgain(JSContext *cx, const char *message, JSErrorReport *reportp)
1672 JSErrorReporter onError;
1674 if (!message)
1675 return;
1677 if (cx->lastMessage)
1678 js_free(cx->lastMessage);
1679 cx->lastMessage = JS_strdup(cx, message);
1680 if (!cx->lastMessage)
1681 return;
1682 onError = cx->errorReporter;
1685 * If debugErrorHook is present then we give it a chance to veto
1686 * sending the error on to the regular ErrorReporter.
1688 if (onError) {
1689 JSDebugErrorHook hook = cx->debugHooks->debugErrorHook;
1690 if (hook &&
1691 !hook(cx, cx->lastMessage, reportp,
1692 cx->debugHooks->debugErrorHookData)) {
1693 onError = NULL;
1696 if (onError)
1697 onError(cx, cx->lastMessage, reportp);
1700 void
1701 js_ReportIsNotDefined(JSContext *cx, const char *name)
1703 JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_NOT_DEFINED, name);
1706 JSBool
1707 js_ReportIsNullOrUndefined(JSContext *cx, intN spindex, const Value &v,
1708 JSString *fallback)
1710 char *bytes;
1711 JSBool ok;
1713 bytes = DecompileValueGenerator(cx, spindex, v, fallback);
1714 if (!bytes)
1715 return JS_FALSE;
1717 if (strcmp(bytes, js_undefined_str) == 0 ||
1718 strcmp(bytes, js_null_str) == 0) {
1719 ok = JS_ReportErrorFlagsAndNumber(cx, JSREPORT_ERROR,
1720 js_GetErrorMessage, NULL,
1721 JSMSG_NO_PROPERTIES, bytes,
1722 NULL, NULL);
1723 } else if (v.isUndefined()) {
1724 ok = JS_ReportErrorFlagsAndNumber(cx, JSREPORT_ERROR,
1725 js_GetErrorMessage, NULL,
1726 JSMSG_UNEXPECTED_TYPE, bytes,
1727 js_undefined_str, NULL);
1728 } else {
1729 JS_ASSERT(v.isNull());
1730 ok = JS_ReportErrorFlagsAndNumber(cx, JSREPORT_ERROR,
1731 js_GetErrorMessage, NULL,
1732 JSMSG_UNEXPECTED_TYPE, bytes,
1733 js_null_str, NULL);
1736 cx->free(bytes);
1737 return ok;
1740 void
1741 js_ReportMissingArg(JSContext *cx, const Value &v, uintN arg)
1743 char argbuf[11];
1744 char *bytes;
1745 JSAtom *atom;
1747 JS_snprintf(argbuf, sizeof argbuf, "%u", arg);
1748 bytes = NULL;
1749 if (IsFunctionObject(v)) {
1750 atom = GET_FUNCTION_PRIVATE(cx, &v.toObject())->atom;
1751 bytes = DecompileValueGenerator(cx, JSDVG_SEARCH_STACK,
1752 v, ATOM_TO_STRING(atom));
1753 if (!bytes)
1754 return;
1756 JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
1757 JSMSG_MISSING_FUN_ARG, argbuf,
1758 bytes ? bytes : "");
1759 cx->free(bytes);
1762 JSBool
1763 js_ReportValueErrorFlags(JSContext *cx, uintN flags, const uintN errorNumber,
1764 intN spindex, const Value &v, JSString *fallback,
1765 const char *arg1, const char *arg2)
1767 char *bytes;
1768 JSBool ok;
1770 JS_ASSERT(js_ErrorFormatString[errorNumber].argCount >= 1);
1771 JS_ASSERT(js_ErrorFormatString[errorNumber].argCount <= 3);
1772 bytes = DecompileValueGenerator(cx, spindex, v, fallback);
1773 if (!bytes)
1774 return JS_FALSE;
1776 ok = JS_ReportErrorFlagsAndNumber(cx, flags, js_GetErrorMessage,
1777 NULL, errorNumber, bytes, arg1, arg2);
1778 cx->free(bytes);
1779 return ok;
1782 #if defined DEBUG && defined XP_UNIX
1783 /* For gdb usage. */
1784 void js_traceon(JSContext *cx) { cx->tracefp = stderr; cx->tracePrevPc = NULL; }
1785 void js_traceoff(JSContext *cx) { cx->tracefp = NULL; }
1786 #endif
1788 JSErrorFormatString js_ErrorFormatString[JSErr_Limit] = {
1789 #define MSG_DEF(name, number, count, exception, format) \
1790 { format, count, exception } ,
1791 #include "js.msg"
1792 #undef MSG_DEF
1795 JS_FRIEND_API(const JSErrorFormatString *)
1796 js_GetErrorMessage(void *userRef, const char *locale, const uintN errorNumber)
1798 if ((errorNumber > 0) && (errorNumber < JSErr_Limit))
1799 return &js_ErrorFormatString[errorNumber];
1800 return NULL;
1803 JSBool
1804 js_InvokeOperationCallback(JSContext *cx)
1806 JS_ASSERT_REQUEST_DEPTH(cx);
1807 JS_ASSERT(JS_THREAD_DATA(cx)->operationCallbackFlag);
1810 * Reset the callback flag first, then yield. If another thread is racing
1811 * us here we will accumulate another callback request which will be
1812 * serviced at the next opportunity.
1814 JS_THREAD_DATA(cx)->operationCallbackFlag = 0;
1817 * Unless we are going to run the GC, we automatically yield the current
1818 * context every time the operation callback is hit since we might be
1819 * called as a result of an impending GC, which would deadlock if we do
1820 * not yield. Operation callbacks are supposed to happen rarely (seconds,
1821 * not milliseconds) so it is acceptable to yield at every callback.
1823 JSRuntime *rt = cx->runtime;
1824 if (rt->gcIsNeeded) {
1825 js_GC(cx, GC_NORMAL);
1828 * On trace we can exceed the GC quota, see comments in NewGCArena. So
1829 * we check the quota and report OOM here when we are off trace.
1831 bool delayedOutOfMemory;
1832 JS_LOCK_GC(rt);
1833 delayedOutOfMemory = (rt->gcBytes > rt->gcMaxBytes);
1834 JS_UNLOCK_GC(rt);
1835 if (delayedOutOfMemory) {
1836 js_ReportOutOfMemory(cx);
1837 return false;
1840 #ifdef JS_THREADSAFE
1841 else {
1842 JS_YieldRequest(cx);
1844 #endif
1846 JSOperationCallback cb = cx->operationCallback;
1849 * Important: Additional callbacks can occur inside the callback handler
1850 * if it re-enters the JS engine. The embedding must ensure that the
1851 * callback is disconnected before attempting such re-entry.
1854 return !cb || cb(cx);
1857 void
1858 js_TriggerAllOperationCallbacks(JSRuntime *rt, JSBool gcLocked)
1860 #ifdef JS_THREADSAFE
1861 Conditionally<AutoLockGC> lockIf(!gcLocked, rt);
1862 #endif
1863 for (ThreadDataIter i(rt); !i.empty(); i.popFront())
1864 i.threadData()->triggerOperationCallback();
1867 JSStackFrame *
1868 js_GetScriptedCaller(JSContext *cx, JSStackFrame *fp)
1870 if (!fp)
1871 fp = js_GetTopStackFrame(cx);
1872 while (fp) {
1873 if (fp->script)
1874 return fp;
1875 fp = fp->down;
1877 return NULL;
1880 jsbytecode*
1881 js_GetCurrentBytecodePC(JSContext* cx)
1883 jsbytecode *pc, *imacpc;
1885 #ifdef JS_TRACER
1886 if (JS_ON_TRACE(cx)) {
1887 pc = cx->bailExit->pc;
1888 imacpc = cx->bailExit->imacpc;
1889 } else
1890 #endif
1892 JS_ASSERT_NOT_ON_TRACE(cx); /* for static analysis */
1893 pc = cx->regs ? cx->regs->pc : NULL;
1894 if (!pc)
1895 return NULL;
1896 imacpc = cx->fp->imacpc;
1900 * If we are inside GetProperty_tn or similar, return a pointer to the
1901 * current instruction in the script, not the CALL instruction in the
1902 * imacro, for the benefit of callers doing bytecode inspection.
1904 return (*pc == JSOP_CALL && imacpc) ? imacpc : pc;
1907 bool
1908 js_CurrentPCIsInImacro(JSContext *cx)
1910 #ifdef JS_TRACER
1911 VOUCH_DOES_NOT_REQUIRE_STACK();
1912 return (JS_ON_TRACE(cx) ? cx->bailExit->imacpc : cx->fp->imacpc) != NULL;
1913 #else
1914 return false;
1915 #endif
1918 void
1919 DSTOffsetCache::purge()
1922 * NB: The initial range values are carefully chosen to result in a cache
1923 * miss on first use given the range of possible values. Be careful
1924 * to keep these values and the caching algorithm in sync!
1926 offsetMilliseconds = 0;
1927 rangeStartSeconds = rangeEndSeconds = INT64_MIN;
1929 #ifdef JS_METER_DST_OFFSET_CACHING
1930 totalCalculations = 0;
1931 hit = 0;
1932 missIncreasing = missDecreasing = 0;
1933 missIncreasingOffsetChangeExpand = missIncreasingOffsetChangeUpper = 0;
1934 missDecreasingOffsetChangeExpand = missDecreasingOffsetChangeLower = 0;
1935 missLargeIncrease = missLargeDecrease = 0;
1936 #endif
1938 sanityCheck();
1942 * Since getDSTOffsetMilliseconds guarantees that all times seen will be
1943 * positive, we can initialize the range at construction time with large
1944 * negative numbers to ensure the first computation is always a cache miss and
1945 * doesn't return a bogus offset.
1947 DSTOffsetCache::DSTOffsetCache()
1949 purge();
1952 JSContext::JSContext(JSRuntime *rt)
1953 : runtime(rt),
1954 compartment(rt->defaultCompartment),
1955 fp(NULL),
1956 regs(NULL),
1957 regExpStatics(this),
1958 busyArrays(this)
1961 void
1962 JSContext::pushSegmentAndFrame(js::StackSegment *newseg, JSStackFrame *newfp,
1963 JSFrameRegs &newregs)
1965 if (hasActiveSegment()) {
1966 JS_ASSERT(fp->savedPC == JSStackFrame::sInvalidPC);
1967 fp->savedPC = regs->pc;
1968 currentSegment->suspend(fp, regs);
1970 newseg->setPreviousInContext(currentSegment);
1971 currentSegment = newseg;
1972 #ifdef DEBUG
1973 newfp->savedPC = JSStackFrame::sInvalidPC;
1974 #endif
1975 setCurrentFrame(newfp);
1976 setCurrentRegs(&newregs);
1977 newseg->joinContext(this, newfp);
1980 void
1981 JSContext::popSegmentAndFrame()
1983 JS_ASSERT(currentSegment->maybeContext() == this);
1984 JS_ASSERT(currentSegment->getInitialFrame() == fp);
1985 JS_ASSERT(fp->savedPC == JSStackFrame::sInvalidPC);
1986 currentSegment->leaveContext();
1987 currentSegment = currentSegment->getPreviousInContext();
1988 if (currentSegment) {
1989 if (currentSegment->isSaved()) {
1990 setCurrentFrame(NULL);
1991 setCurrentRegs(NULL);
1992 } else {
1993 setCurrentFrame(currentSegment->getSuspendedFrame());
1994 setCurrentRegs(currentSegment->getSuspendedRegs());
1995 currentSegment->resume();
1996 #ifdef DEBUG
1997 fp->savedPC = JSStackFrame::sInvalidPC;
1998 #endif
2000 } else {
2001 JS_ASSERT(fp->down == NULL);
2002 setCurrentFrame(NULL);
2003 setCurrentRegs(NULL);
2007 void
2008 JSContext::saveActiveSegment()
2010 JS_ASSERT(hasActiveSegment());
2011 currentSegment->save(fp, regs);
2012 JS_ASSERT(fp->savedPC == JSStackFrame::sInvalidPC);
2013 fp->savedPC = regs->pc;
2014 setCurrentFrame(NULL);
2015 setCurrentRegs(NULL);
2018 void
2019 JSContext::restoreSegment()
2021 js::StackSegment *ccs = currentSegment;
2022 setCurrentFrame(ccs->getSuspendedFrame());
2023 setCurrentRegs(ccs->getSuspendedRegs());
2024 ccs->restore();
2025 #ifdef DEBUG
2026 fp->savedPC = JSStackFrame::sInvalidPC;
2027 #endif
2030 JSGenerator *
2031 JSContext::generatorFor(JSStackFrame *fp) const
2033 JS_ASSERT(stack().contains(fp) && fp->isGenerator());
2034 JS_ASSERT(!fp->isFloatingGenerator());
2035 JS_ASSERT(!genStack.empty());
2037 if (JS_LIKELY(fp == genStack.back()->liveFrame))
2038 return genStack.back();
2040 /* General case; should only be needed for debug APIs. */
2041 for (size_t i = 0; i < genStack.length(); ++i) {
2042 if (genStack[i]->liveFrame == fp)
2043 return genStack[i];
2045 JS_NOT_REACHED("no matching generator");
2046 return NULL;
2049 StackSegment *
2050 JSContext::containingSegment(const JSStackFrame *target)
2052 /* The context may have nothing running. */
2053 StackSegment *seg = currentSegment;
2054 if (!seg)
2055 return NULL;
2057 /* The active segments's top frame is cx->fp. */
2058 if (fp) {
2059 JS_ASSERT(activeSegment() == seg);
2060 JSStackFrame *f = fp;
2061 JSStackFrame *stop = seg->getInitialFrame()->down;
2062 for (; f != stop; f = f->down) {
2063 if (f == target)
2064 return seg;
2066 seg = seg->getPreviousInContext();
2069 /* A suspended segment's top frame is its suspended frame. */
2070 for (; seg; seg = seg->getPreviousInContext()) {
2071 JSStackFrame *f = seg->getSuspendedFrame();
2072 JSStackFrame *stop = seg->getInitialFrame()->down;
2073 for (; f != stop; f = f->down) {
2074 if (f == target)
2075 return seg;
2079 return NULL;
2082 void
2083 JSContext::checkMallocGCPressure(void *p)
2085 if (!p) {
2086 js_ReportOutOfMemory(this);
2087 return;
2090 #ifdef JS_THREADSAFE
2091 JS_ASSERT(thread);
2092 JS_ASSERT(thread->gcThreadMallocBytes <= 0);
2093 ptrdiff_t n = JS_GC_THREAD_MALLOC_LIMIT - thread->gcThreadMallocBytes;
2094 thread->gcThreadMallocBytes = JS_GC_THREAD_MALLOC_LIMIT;
2096 AutoLockGC lock(runtime);
2097 runtime->gcMallocBytes -= n;
2100 * Trigger the GC on memory pressure but only if we are inside a request
2101 * and not inside a GC.
2103 if (runtime->isGCMallocLimitReached() && requestDepth != 0)
2104 #endif
2106 if (!runtime->gcRunning) {
2107 JS_ASSERT(runtime->isGCMallocLimitReached());
2108 runtime->gcMallocBytes = -1;
2111 * Empty the GC free lists to trigger a last-ditch GC when any GC
2112 * thing is allocated later on this thread. This makes unnecessary
2113 * to check for the memory pressure on the fast path of the GC
2114 * allocator. We cannot touch the free lists on other threads as
2115 * their manipulation is not thread-safe.
2117 JS_THREAD_DATA(this)->gcFreeLists.purge();
2118 js_TriggerGC(this, true);
2123 bool
2124 JSContext::isConstructing()
2126 #ifdef JS_TRACER
2127 if (JS_ON_TRACE(this)) {
2128 JS_ASSERT(bailExit);
2129 return *bailExit->pc == JSOP_NEW;
2131 #endif
2132 JSStackFrame *fp = js_GetTopStackFrame(this);
2133 return fp && (fp->flags & JSFRAME_CONSTRUCTING);
2138 * Release pool's arenas if the stackPool has existed for longer than the
2139 * limit specified by gcEmptyArenaPoolLifespan.
2141 inline void
2142 FreeOldArenas(JSRuntime *rt, JSArenaPool *pool)
2144 JSArena *a = pool->current;
2145 if (a == pool->first.next && a->avail == a->base + sizeof(int64)) {
2146 int64 age = JS_Now() - *(int64 *) a->base;
2147 if (age > int64(rt->gcEmptyArenaPoolLifespan) * 1000)
2148 JS_FreeArenaPool(pool);
2152 void
2153 JSContext::purge()
2155 FreeOldArenas(runtime, &regExpPool);
2159 namespace js {
2161 void
2162 SetPendingException(JSContext *cx, const Value &v)
2164 cx->throwing = JS_TRUE;
2165 cx->exception = v;
2168 } /* namespace js */