Bug 588735 - Mirror glass caption buttons for rtl windows. r=roc, a=blocking-betaN.
[mozilla-central.git] / js / src / jscntxt.cpp
blob047de9998b248e064404723976c08e76146732e0
1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sw=4 et tw=80:
4 * ***** BEGIN LICENSE BLOCK *****
5 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
7 * The contents of this file are subject to the Mozilla Public License Version
8 * 1.1 (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
10 * http://www.mozilla.org/MPL/
12 * Software distributed under the License is distributed on an "AS IS" basis,
13 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
14 * for the specific language governing rights and limitations under the
15 * License.
17 * The Original Code is Mozilla Communicator client code, released
18 * March 31, 1998.
20 * The Initial Developer of the Original Code is
21 * Netscape Communications Corporation.
22 * Portions created by the Initial Developer are Copyright (C) 1998
23 * the Initial Developer. All Rights Reserved.
25 * Contributor(s):
27 * Alternatively, the contents of this file may be used under the terms of
28 * either of the GNU General Public License Version 2 or later (the "GPL"),
29 * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
30 * in which case the provisions of the GPL or the LGPL are applicable instead
31 * of those above. If you wish to allow use of your version of this file only
32 * under the terms of either the GPL or the LGPL, and not to allow others to
33 * use your version of this file under the terms of the MPL, indicate your
34 * decision by deleting the provisions above and replace them with the notice
35 * and other provisions required by the GPL or the LGPL. If you do not delete
36 * the provisions above, a recipient may use your version of this file under
37 * the terms of any one of the MPL, the GPL or the LGPL.
39 * ***** END LICENSE BLOCK ***** */
42 * JS execution context.
44 #include <new>
45 #include <stdarg.h>
46 #include <stdlib.h>
47 #include <string.h>
49 #include "jsstdint.h"
51 #include "jstypes.h"
52 #include "jsarena.h" /* Added by JSIFY */
53 #include "jsutil.h" /* Added by JSIFY */
54 #include "jsclist.h"
55 #include "jsprf.h"
56 #include "jsatom.h"
57 #include "jscntxt.h"
58 #include "jsversion.h"
59 #include "jsdbgapi.h"
60 #include "jsexn.h"
61 #include "jsfun.h"
62 #include "jsgc.h"
63 #include "jsiter.h"
64 #include "jslock.h"
65 #include "jsmath.h"
66 #include "jsnativestack.h"
67 #include "jsnum.h"
68 #include "jsobj.h"
69 #include "jsopcode.h"
70 #include "jspubtd.h"
71 #include "jsscan.h"
72 #include "jsscope.h"
73 #include "jsscript.h"
74 #include "jsstaticcheck.h"
75 #include "jsstr.h"
76 #include "jstracer.h"
78 #include "jscntxtinlines.h"
80 #ifdef XP_WIN
81 # include <windows.h>
82 #elif defined(XP_OS2)
83 # define INCL_DOSMEMMGR
84 # include <os2.h>
85 #else
86 # include <unistd.h>
87 # include <sys/mman.h>
88 # if !defined(MAP_ANONYMOUS)
89 # if defined(MAP_ANON)
90 # define MAP_ANONYMOUS MAP_ANON
91 # else
92 # define MAP_ANONYMOUS 0
93 # endif
94 # endif
95 #endif
97 using namespace js;
99 static const size_t ARENA_HEADER_SIZE_HACK = 40;
100 static const size_t TEMP_POOL_CHUNK_SIZE = 4096 - ARENA_HEADER_SIZE_HACK;
102 static void
103 FreeContext(JSContext *cx);
105 #ifdef DEBUG
106 JS_REQUIRES_STACK bool
107 StackSegment::contains(const JSStackFrame *fp) const
109 JS_ASSERT(inContext());
110 JSStackFrame *start;
111 JSStackFrame *stop;
112 if (isActive()) {
113 JS_ASSERT(cx->hasfp());
114 start = cx->fp();
115 stop = cx->activeSegment()->initialFrame->down;
116 } else {
117 JS_ASSERT(suspendedRegs && suspendedRegs->fp);
118 start = suspendedRegs->fp;
119 stop = initialFrame->down;
121 for (JSStackFrame *f = start; f != stop; f = f->down) {
122 if (f == fp)
123 return true;
125 return false;
127 #endif
129 bool
130 StackSpace::init()
132 void *p;
133 #ifdef XP_WIN
134 p = VirtualAlloc(NULL, CAPACITY_BYTES, MEM_RESERVE, PAGE_READWRITE);
135 if (!p)
136 return false;
137 void *check = VirtualAlloc(p, COMMIT_BYTES, MEM_COMMIT, PAGE_READWRITE);
138 if (p != check)
139 return false;
140 base = reinterpret_cast<Value *>(p);
141 commitEnd = base + COMMIT_VALS;
142 end = base + CAPACITY_VALS;
143 #elif defined(XP_OS2)
144 if (DosAllocMem(&p, CAPACITY_BYTES, PAG_COMMIT | PAG_READ | PAG_WRITE | OBJ_ANY) &&
145 DosAllocMem(&p, CAPACITY_BYTES, PAG_COMMIT | PAG_READ | PAG_WRITE))
146 return false;
147 base = reinterpret_cast<Value *>(p);
148 end = base + CAPACITY_VALS;
149 #else
150 JS_ASSERT(CAPACITY_BYTES % getpagesize() == 0);
151 p = mmap(NULL, CAPACITY_BYTES, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
152 if (p == MAP_FAILED)
153 return false;
154 base = reinterpret_cast<Value *>(p);
155 end = base + CAPACITY_VALS;
156 #endif
157 return true;
160 void
161 StackSpace::finish()
163 #ifdef XP_WIN
164 VirtualFree(base, (commitEnd - base) * sizeof(Value), MEM_DECOMMIT);
165 VirtualFree(base, 0, MEM_RELEASE);
166 #elif defined(XP_OS2)
167 DosFreeMem(base);
168 #else
169 #ifdef SOLARIS
170 munmap((caddr_t)base, CAPACITY_BYTES);
171 #else
172 munmap(base, CAPACITY_BYTES);
173 #endif
174 #endif
177 #ifdef XP_WIN
178 JS_FRIEND_API(bool)
179 StackSpace::bumpCommit(Value *from, ptrdiff_t nvals) const
181 JS_ASSERT(end - from >= nvals);
182 Value *newCommit = commitEnd;
183 Value *request = from + nvals;
185 /* Use a dumb loop; will probably execute once. */
186 JS_ASSERT((end - newCommit) % COMMIT_VALS == 0);
187 do {
188 newCommit += COMMIT_VALS;
189 JS_ASSERT((end - newCommit) >= 0);
190 } while (newCommit < request);
192 /* The cast is safe because CAPACITY_BYTES is small. */
193 int32 size = static_cast<int32>(newCommit - commitEnd) * sizeof(Value);
195 if (!VirtualAlloc(commitEnd, size, MEM_COMMIT, PAGE_READWRITE))
196 return false;
197 commitEnd = newCommit;
198 return true;
200 #endif
202 JS_REQUIRES_STACK void
203 StackSpace::mark(JSTracer *trc)
206 * The correctness/completeness of marking depends on the continuity
207 * invariants described by the StackSegment and StackSpace definitions.
209 Value *end = firstUnused();
210 for (StackSegment *seg = currentSegment; seg; seg = seg->getPreviousInMemory()) {
211 if (seg->inContext()) {
212 /* This may be the only pointer to the initialVarObj. */
213 if (JSObject *varobj = seg->getInitialVarObj())
214 JS_CALL_OBJECT_TRACER(trc, varobj, "varobj");
216 /* Mark slots/args trailing off of the last stack frame. */
217 JSStackFrame *fp = seg->getCurrentFrame();
218 MarkValueRange(trc, fp->slots(), end, "stack");
220 /* Mark stack frames and slots/args between stack frames. */
221 JSStackFrame *initialFrame = seg->getInitialFrame();
222 for (JSStackFrame *f = fp; f != initialFrame; f = f->down) {
223 js_TraceStackFrame(trc, f);
224 MarkValueRange(trc, f->down->slots(), f->argEnd(), "stack");
227 /* Mark initialFrame stack frame and leading args. */
228 js_TraceStackFrame(trc, initialFrame);
229 MarkValueRange(trc, seg->getInitialArgBegin(), initialFrame->argEnd(), "stack");
230 } else {
231 /* Mark slots/args trailing off segment. */
232 MarkValueRange(trc, seg->getInitialArgBegin(), end, "stack");
234 end = seg->previousSegmentEnd();
238 JS_REQUIRES_STACK bool
239 StackSpace::pushSegmentForInvoke(JSContext *cx, uintN argc, InvokeArgsGuard &ag)
241 Value *start = firstUnused();
242 ptrdiff_t nvals = VALUES_PER_STACK_SEGMENT + 2 + argc;
243 if (!ensureSpace(cx, start, nvals))
244 return false;
246 StackSegment *seg = new(start) StackSegment;
247 seg->setPreviousInMemory(currentSegment);
248 currentSegment = seg;
250 ag.cx = cx;
251 ag.seg = seg;
252 ag.argv_ = seg->getInitialArgBegin() + 2;
253 ag.argc_ = argc;
255 /* Use invokeArgEnd to root [vp, vpend) until the frame is pushed. */
256 #ifdef DEBUG
257 ag.prevInvokeSegment = invokeSegment;
258 invokeSegment = seg;
259 ag.prevInvokeFrame = invokeFrame;
260 invokeFrame = NULL;
261 #endif
262 ag.prevInvokeArgEnd = invokeArgEnd;
263 invokeArgEnd = ag.argv() + ag.argc();
264 return true;
267 JS_REQUIRES_STACK void
268 StackSpace::popSegmentForInvoke(const InvokeArgsGuard &ag)
270 JS_ASSERT(!currentSegment->inContext());
271 JS_ASSERT(ag.seg == currentSegment);
272 JS_ASSERT(invokeSegment == currentSegment);
273 JS_ASSERT(invokeArgEnd == ag.argv() + ag.argc());
275 currentSegment = currentSegment->getPreviousInMemory();
277 #ifdef DEBUG
278 invokeSegment = ag.prevInvokeSegment;
279 invokeFrame = ag.prevInvokeFrame;
280 #endif
281 invokeArgEnd = ag.prevInvokeArgEnd;
285 * Always push a segment when starting a new execute frame since segments
286 * provide initialVarObj, which may change.
288 JS_REQUIRES_STACK bool
289 StackSpace::getExecuteFrame(JSContext *cx, JSStackFrame *down,
290 uintN vplen, uintN nfixed,
291 FrameGuard &fg) const
293 Value *start = firstUnused();
294 ptrdiff_t nvals = VALUES_PER_STACK_SEGMENT + vplen + VALUES_PER_STACK_FRAME + nfixed;
295 if (!ensureSpace(cx, start, nvals))
296 return false;
298 fg.seg = new(start) StackSegment;
299 fg.vp = start + VALUES_PER_STACK_SEGMENT;
300 fg.fp = reinterpret_cast<JSStackFrame *>(fg.vp + vplen);
301 fg.down = down;
302 return true;
305 JS_REQUIRES_STACK void
306 StackSpace::pushExecuteFrame(JSContext *cx, FrameGuard &fg,
307 JSFrameRegs &regs, JSObject *initialVarObj)
309 fg.fp->down = fg.down;
310 StackSegment *seg = fg.seg;
311 seg->setPreviousInMemory(currentSegment);
312 currentSegment = seg;
314 regs.fp = fg.fp;
315 cx->pushSegmentAndFrame(seg, regs);
316 seg->setInitialVarObj(initialVarObj);
317 fg.cx = cx;
320 JS_REQUIRES_STACK void
321 StackSpace::popFrame(JSContext *cx)
323 JS_ASSERT(isCurrentAndActive(cx));
324 JS_ASSERT(cx->hasActiveSegment());
325 cx->popSegmentAndFrame();
326 currentSegment = currentSegment->getPreviousInMemory();
329 JS_REQUIRES_STACK
330 FrameGuard::~FrameGuard()
332 if (!pushed())
333 return;
334 JS_ASSERT(cx->activeSegment() == seg);
335 JS_ASSERT(cx->maybefp() == fp);
336 cx->stack().popFrame(cx);
339 JS_REQUIRES_STACK void
340 StackSpace::getSynthesizedSlowNativeFrame(JSContext *cx, StackSegment *&seg, JSStackFrame *&fp)
342 Value *start = firstUnused();
343 JS_ASSERT(size_t(end - start) >= VALUES_PER_STACK_SEGMENT + VALUES_PER_STACK_FRAME);
344 seg = new(start) StackSegment;
345 fp = reinterpret_cast<JSStackFrame *>(seg + 1);
348 JS_REQUIRES_STACK void
349 StackSpace::pushSynthesizedSlowNativeFrame(JSContext *cx, StackSegment *seg, JSFrameRegs &regs)
351 JS_ASSERT(!regs.fp->hasScript() && FUN_SLOW_NATIVE(regs.fp->getFunction()));
352 regs.fp->down = cx->maybefp();
353 seg->setPreviousInMemory(currentSegment);
354 currentSegment = seg;
355 cx->pushSegmentAndFrame(seg, regs);
356 seg->setInitialVarObj(NULL);
359 JS_REQUIRES_STACK void
360 StackSpace::popSynthesizedSlowNativeFrame(JSContext *cx)
362 JS_ASSERT(isCurrentAndActive(cx));
363 JS_ASSERT(cx->hasActiveSegment());
364 JS_ASSERT(currentSegment->getInitialFrame() == cx->fp());
365 JS_ASSERT(!cx->fp()->hasScript() && FUN_SLOW_NATIVE(cx->fp()->getFunction()));
366 cx->popSegmentAndFrame();
367 currentSegment = currentSegment->getPreviousInMemory();
370 JS_REQUIRES_STACK bool
371 StackSpace::pushDummyFrame(JSContext *cx, FrameGuard &fg, JSFrameRegs &regs, JSObject *scopeChain)
373 if (!getExecuteFrame(cx, cx->maybefp(), 0, 0, fg))
374 return false;
376 JSStackFrame *fp = fg.getFrame();
377 PodZero(fp);
378 fp->setScopeChain(scopeChain);
379 fp->flags = JSFRAME_DUMMY;
381 regs.pc = NULL;
382 regs.sp = fp->slots();
384 pushExecuteFrame(cx, fg, regs, NULL);
385 return true;
388 void
389 FrameRegsIter::initSlow()
391 if (!curseg) {
392 curfp = NULL;
393 cursp = NULL;
394 curpc = NULL;
395 return;
398 JS_ASSERT(curseg->isSuspended());
399 curfp = curseg->getSuspendedFrame();
400 cursp = curseg->getSuspendedRegs()->sp;
401 curpc = curseg->getSuspendedRegs()->pc;
405 * Using the invariant described in the js::StackSegment comment, we know that,
406 * when a pair of down-linked stack frames are in the same segment, the
407 * up-frame's address is the top of the down-frame's stack, modulo missing
408 * arguments.
410 void
411 FrameRegsIter::incSlow(JSStackFrame *up, JSStackFrame *down)
413 JS_ASSERT(down);
414 JS_ASSERT(curpc == down->savedPC);
415 JS_ASSERT(up == curseg->getInitialFrame());
418 * If the up-frame is in csup and the down-frame is in csdown, it is not
419 * necessarily the case that |csup->getPreviousInContext == csdown| or that
420 * |csdown->getSuspendedFrame == down| (because of indirect eval and
421 * JS_EvaluateInStackFrame). To compute down's sp, we need to do a linear
422 * scan, keeping track of what is immediately after down in memory.
424 curseg = curseg->getPreviousInContext();
425 cursp = curseg->getSuspendedRegs()->sp;
426 JSStackFrame *f = curseg->getSuspendedFrame();
427 while (f != down) {
428 if (f == curseg->getInitialFrame()) {
429 curseg = curseg->getPreviousInContext();
430 cursp = curseg->getSuspendedRegs()->sp;
431 f = curseg->getSuspendedFrame();
432 } else {
433 cursp = contiguousDownFrameSP(f);
434 f = f->down;
439 bool
440 JSThreadData::init()
442 #ifdef DEBUG
443 /* The data must be already zeroed. */
444 for (size_t i = 0; i != sizeof(*this); ++i)
445 JS_ASSERT(reinterpret_cast<uint8*>(this)[i] == 0);
446 #endif
447 if (!stackSpace.init())
448 return false;
449 #ifdef JS_TRACER
450 InitJIT(&traceMonitor);
451 #endif
452 dtoaState = js_NewDtoaState();
453 if (!dtoaState) {
454 finish();
455 return false;
457 nativeStackBase = GetNativeStackBase();
458 return true;
461 void
462 JSThreadData::finish()
464 #ifdef DEBUG
465 /* All GC-related things must be already removed at this point. */
466 JS_ASSERT(gcFreeLists.isEmpty());
467 for (size_t i = 0; i != JS_ARRAY_LENGTH(scriptsToGC); ++i)
468 JS_ASSERT(!scriptsToGC[i]);
469 JS_ASSERT(!conservativeGC.isEnabled());
470 #endif
472 if (dtoaState)
473 js_DestroyDtoaState(dtoaState);
475 js_FinishGSNCache(&gsnCache);
476 propertyCache.~PropertyCache();
477 #if defined JS_TRACER
478 FinishJIT(&traceMonitor);
479 #endif
480 stackSpace.finish();
483 void
484 JSThreadData::mark(JSTracer *trc)
486 stackSpace.mark(trc);
487 #ifdef JS_TRACER
488 traceMonitor.mark(trc);
489 #endif
492 void
493 JSThreadData::purge(JSContext *cx)
495 gcFreeLists.purge();
497 js_PurgeGSNCache(&gsnCache);
499 /* FIXME: bug 506341. */
500 propertyCache.purge(cx);
502 #ifdef JS_TRACER
504 * If we are about to regenerate shapes, we have to flush the JIT cache,
505 * which will eventually abort any current recording.
507 if (cx->runtime->gcRegenShapes)
508 traceMonitor.needFlush = JS_TRUE;
509 #endif
511 /* Destroy eval'ed scripts. */
512 js_DestroyScriptsToGC(cx, this);
514 /* Purge cached native iterators. */
515 memset(cachedNativeIterators, 0, sizeof(cachedNativeIterators));
517 dtoaCache.s = NULL;
520 #ifdef JS_THREADSAFE
522 static JSThread *
523 NewThread(void *id)
525 JS_ASSERT(js_CurrentThreadId() == id);
526 JSThread *thread = (JSThread *) js_calloc(sizeof(JSThread));
527 if (!thread)
528 return NULL;
529 JS_INIT_CLIST(&thread->contextList);
530 thread->id = id;
531 if (!thread->data.init()) {
532 js_free(thread);
533 return NULL;
535 return thread;
538 static void
539 DestroyThread(JSThread *thread)
541 /* The thread must have zero contexts. */
542 JS_ASSERT(JS_CLIST_IS_EMPTY(&thread->contextList));
543 JS_ASSERT(!thread->titleToShare);
544 thread->data.finish();
545 js_free(thread);
548 JSThread *
549 js_CurrentThread(JSRuntime *rt)
551 void *id = js_CurrentThreadId();
552 JS_LOCK_GC(rt);
555 * We must not race with a GC that accesses cx->thread for JSContext
556 * instances on all threads, see bug 476934.
558 js_WaitForGC(rt);
560 JSThread *thread;
561 JSThread::Map::AddPtr p = rt->threads.lookupForAdd(id);
562 if (p) {
563 thread = p->value;
564 } else {
565 JS_UNLOCK_GC(rt);
566 thread = NewThread(id);
567 if (!thread)
568 return NULL;
569 JS_LOCK_GC(rt);
570 js_WaitForGC(rt);
571 if (!rt->threads.relookupOrAdd(p, id, thread)) {
572 JS_UNLOCK_GC(rt);
573 DestroyThread(thread);
574 return NULL;
577 /* Another thread cannot add an entry for the current thread id. */
578 JS_ASSERT(p->value == thread);
580 JS_ASSERT(thread->id == id);
582 return thread;
585 JSBool
586 js_InitContextThread(JSContext *cx)
588 JSThread *thread = js_CurrentThread(cx->runtime);
589 if (!thread)
590 return false;
592 JS_APPEND_LINK(&cx->threadLinks, &thread->contextList);
593 cx->thread = thread;
594 return true;
597 void
598 js_ClearContextThread(JSContext *cx)
600 JS_ASSERT(CURRENT_THREAD_IS_ME(cx->thread));
601 JS_REMOVE_AND_INIT_LINK(&cx->threadLinks);
602 cx->thread = NULL;
605 #endif /* JS_THREADSAFE */
607 JSThreadData *
608 js_CurrentThreadData(JSRuntime *rt)
610 #ifdef JS_THREADSAFE
611 JSThread *thread = js_CurrentThread(rt);
612 if (!thread)
613 return NULL;
615 return &thread->data;
616 #else
617 return &rt->threadData;
618 #endif
621 JSBool
622 js_InitThreads(JSRuntime *rt)
624 #ifdef JS_THREADSAFE
625 if (!rt->threads.init(4))
626 return false;
627 #else
628 if (!rt->threadData.init())
629 return false;
630 #endif
631 return true;
634 void
635 js_FinishThreads(JSRuntime *rt)
637 #ifdef JS_THREADSAFE
638 if (!rt->threads.initialized())
639 return;
640 for (JSThread::Map::Range r = rt->threads.all(); !r.empty(); r.popFront()) {
641 JSThread *thread = r.front().value;
642 JS_ASSERT(JS_CLIST_IS_EMPTY(&thread->contextList));
643 DestroyThread(thread);
645 rt->threads.clear();
646 #else
647 rt->threadData.finish();
648 #endif
651 void
652 js_PurgeThreads(JSContext *cx)
654 #ifdef JS_THREADSAFE
655 for (JSThread::Map::Enum e(cx->runtime->threads);
656 !e.empty();
657 e.popFront()) {
658 JSThread *thread = e.front().value;
660 if (JS_CLIST_IS_EMPTY(&thread->contextList)) {
661 JS_ASSERT(cx->thread != thread);
662 js_DestroyScriptsToGC(cx, &thread->data);
665 * The following is potentially suboptimal as it also zeros the
666 * caches in data, but the code simplicity wins here.
668 thread->data.gcFreeLists.purge();
669 DestroyThread(thread);
670 e.removeFront();
671 } else {
672 thread->data.purge(cx);
673 thread->gcThreadMallocBytes = JS_GC_THREAD_MALLOC_LIMIT;
676 #else
677 cx->runtime->threadData.purge(cx);
678 #endif
682 * JSOPTION_XML and JSOPTION_ANONFUNFIX must be part of the JS version
683 * associated with scripts, so in addition to storing them in cx->options we
684 * duplicate them in cx->version (script->version, etc.) and ensure each bit
685 * remains synchronized between the two through these two functions.
687 void
688 js_SyncOptionsToVersion(JSContext* cx)
690 if (cx->options & JSOPTION_XML)
691 cx->version |= JSVERSION_HAS_XML;
692 else
693 cx->version &= ~JSVERSION_HAS_XML;
694 if (cx->options & JSOPTION_ANONFUNFIX)
695 cx->version |= JSVERSION_ANONFUNFIX;
696 else
697 cx->version &= ~JSVERSION_ANONFUNFIX;
700 inline void
701 js_SyncVersionToOptions(JSContext* cx)
703 if (cx->version & JSVERSION_HAS_XML)
704 cx->options |= JSOPTION_XML;
705 else
706 cx->options &= ~JSOPTION_XML;
707 if (cx->version & JSVERSION_ANONFUNFIX)
708 cx->options |= JSOPTION_ANONFUNFIX;
709 else
710 cx->options &= ~JSOPTION_ANONFUNFIX;
713 void
714 js_OnVersionChange(JSContext *cx)
716 #ifdef DEBUG
717 JSVersion version = JSVERSION_NUMBER(cx);
719 JS_ASSERT(version == JSVERSION_DEFAULT || version >= JSVERSION_ECMA_3);
720 #endif
723 void
724 js_SetVersion(JSContext *cx, JSVersion version)
726 cx->version = version;
727 js_SyncVersionToOptions(cx);
728 js_OnVersionChange(cx);
731 JSContext *
732 js_NewContext(JSRuntime *rt, size_t stackChunkSize)
734 JSContext *cx;
735 JSBool ok, first;
736 JSContextCallback cxCallback;
739 * We need to initialize the new context fully before adding it to the
740 * runtime list. After that it can be accessed from another thread via
741 * js_ContextIterator.
743 void *mem = js_calloc(sizeof *cx);
744 if (!mem)
745 return NULL;
747 cx = new (mem) JSContext(rt);
748 cx->debugHooks = &rt->globalDebugHooks;
749 #if JS_STACK_GROWTH_DIRECTION > 0
750 cx->stackLimit = (jsuword) -1;
751 #endif
752 cx->scriptStackQuota = JS_DEFAULT_SCRIPT_STACK_QUOTA;
753 JS_STATIC_ASSERT(JSVERSION_DEFAULT == 0);
754 JS_ASSERT(cx->version == JSVERSION_DEFAULT);
755 VOUCH_DOES_NOT_REQUIRE_STACK();
757 JS_InitArenaPool(&cx->tempPool, "temp", TEMP_POOL_CHUNK_SIZE, sizeof(jsdouble),
758 &cx->scriptStackQuota);
759 JS_InitArenaPool(&cx->regExpPool, "regExp", TEMP_POOL_CHUNK_SIZE, sizeof(int),
760 &cx->scriptStackQuota);
762 JS_ASSERT(cx->resolveFlags == 0);
764 #ifdef JS_THREADSAFE
765 if (!js_InitContextThread(cx)) {
766 FreeContext(cx);
767 return NULL;
769 #endif
772 * Here the GC lock is still held after js_InitContextThread took it and
773 * the GC is not running on another thread.
775 for (;;) {
776 if (rt->state == JSRTS_UP) {
777 JS_ASSERT(!JS_CLIST_IS_EMPTY(&rt->contextList));
778 first = JS_FALSE;
779 break;
781 if (rt->state == JSRTS_DOWN) {
782 JS_ASSERT(JS_CLIST_IS_EMPTY(&rt->contextList));
783 first = JS_TRUE;
784 rt->state = JSRTS_LAUNCHING;
785 break;
787 JS_WAIT_CONDVAR(rt->stateChange, JS_NO_TIMEOUT);
790 * During the above wait after we are notified about the state change
791 * but before we wake up, another thread could enter the GC from
792 * js_DestroyContext, bug 478336. So we must wait here to ensure that
793 * when we exit the loop with the first flag set to true, that GC is
794 * finished.
796 js_WaitForGC(rt);
798 JS_APPEND_LINK(&cx->link, &rt->contextList);
799 JS_UNLOCK_GC(rt);
801 js_InitRandom(cx);
804 * If cx is the first context on this runtime, initialize well-known atoms,
805 * keywords, numbers, and strings. If one of these steps should fail, the
806 * runtime will be left in a partially initialized state, with zeroes and
807 * nulls stored in the default-initialized remainder of the struct. We'll
808 * clean the runtime up under js_DestroyContext, because cx will be "last"
809 * as well as "first".
811 if (first) {
812 #ifdef JS_THREADSAFE
813 JS_BeginRequest(cx);
814 #endif
815 ok = js_InitCommonAtoms(cx);
818 * scriptFilenameTable may be left over from a previous episode of
819 * non-zero contexts alive in rt, so don't re-init the table if it's
820 * not necessary.
822 if (ok && !rt->scriptFilenameTable)
823 ok = js_InitRuntimeScriptState(rt);
824 if (ok)
825 ok = js_InitRuntimeNumberState(cx);
826 if (ok) {
828 * Ensure that the empty scopes initialized by
829 * JSScope::initRuntimeState get the desired special shapes.
830 * (The rt->state dance above guarantees that this abuse of
831 * rt->shapeGen is thread-safe.)
833 uint32 shapeGen = rt->shapeGen;
834 rt->shapeGen = 0;
835 ok = JSScope::initRuntimeState(cx);
836 if (rt->shapeGen < shapeGen)
837 rt->shapeGen = shapeGen;
840 #ifdef JS_THREADSAFE
841 JS_EndRequest(cx);
842 #endif
843 if (!ok) {
844 js_DestroyContext(cx, JSDCM_NEW_FAILED);
845 return NULL;
848 AutoLockGC lock(rt);
849 rt->state = JSRTS_UP;
850 JS_NOTIFY_ALL_CONDVAR(rt->stateChange);
853 cxCallback = rt->cxCallback;
854 if (cxCallback && !cxCallback(cx, JSCONTEXT_NEW)) {
855 js_DestroyContext(cx, JSDCM_NEW_FAILED);
856 return NULL;
859 /* Using ContextAllocPolicy, so init after JSContext is ready. */
860 if (!cx->busyArrays.init()) {
861 FreeContext(cx);
862 return NULL;
865 return cx;
868 #if defined DEBUG && defined XP_UNIX
869 # include <stdio.h>
871 class JSAutoFile {
872 public:
873 JSAutoFile() : mFile(NULL) {}
875 ~JSAutoFile() {
876 if (mFile)
877 fclose(mFile);
880 FILE *open(const char *fname, const char *mode) {
881 return mFile = fopen(fname, mode);
883 operator FILE *() {
884 return mFile;
887 private:
888 FILE *mFile;
891 static void
892 DumpEvalCacheMeter(JSContext *cx)
894 if (const char *filename = getenv("JS_EVALCACHE_STATFILE")) {
895 struct {
896 const char *name;
897 ptrdiff_t offset;
898 } table[] = {
899 #define frob(x) { #x, offsetof(JSEvalCacheMeter, x) }
900 EVAL_CACHE_METER_LIST(frob)
901 #undef frob
903 JSEvalCacheMeter *ecm = &JS_THREAD_DATA(cx)->evalCacheMeter;
905 static JSAutoFile fp;
906 if (!fp && !fp.open(filename, "w"))
907 return;
909 fprintf(fp, "eval cache meter (%p):\n",
910 #ifdef JS_THREADSAFE
911 (void *) cx->thread
912 #else
913 (void *) cx->runtime
914 #endif
916 for (uintN i = 0; i < JS_ARRAY_LENGTH(table); ++i) {
917 fprintf(fp, "%-8.8s %llu\n",
918 table[i].name,
919 (unsigned long long int) *(uint64 *)((uint8 *)ecm + table[i].offset));
921 fprintf(fp, "hit ratio %g%%\n", ecm->hit * 100. / ecm->probe);
922 fprintf(fp, "avg steps %g\n", double(ecm->step) / ecm->probe);
923 fflush(fp);
926 # define DUMP_EVAL_CACHE_METER(cx) DumpEvalCacheMeter(cx)
928 static void
929 DumpFunctionCountMap(const char *title, JSRuntime::FunctionCountMap &map, FILE *fp)
931 fprintf(fp, "\n%s count map:\n", title);
933 for (JSRuntime::FunctionCountMap::Range r = map.all(); !r.empty(); r.popFront()) {
934 JSFunction *fun = r.front().key;
935 int32 count = r.front().value;
937 fprintf(fp, "%10d %s:%u\n", count, fun->u.i.script->filename, fun->u.i.script->lineno);
941 static void
942 DumpFunctionMeter(JSContext *cx)
944 if (const char *filename = cx->runtime->functionMeterFilename) {
945 struct {
946 const char *name;
947 ptrdiff_t offset;
948 } table[] = {
949 #define frob(x) { #x, offsetof(JSFunctionMeter, x) }
950 FUNCTION_KIND_METER_LIST(frob)
951 #undef frob
953 JSFunctionMeter *fm = &cx->runtime->functionMeter;
955 static JSAutoFile fp;
956 if (!fp && !fp.open(filename, "w"))
957 return;
959 fprintf(fp, "function meter (%s):\n", cx->runtime->lastScriptFilename);
960 for (uintN i = 0; i < JS_ARRAY_LENGTH(table); ++i)
961 fprintf(fp, "%-19.19s %d\n", table[i].name, *(int32 *)((uint8 *)fm + table[i].offset));
963 DumpFunctionCountMap("method read barrier", cx->runtime->methodReadBarrierCountMap, fp);
964 DumpFunctionCountMap("unjoined function", cx->runtime->unjoinedFunctionCountMap, fp);
966 putc('\n', fp);
967 fflush(fp);
971 # define DUMP_FUNCTION_METER(cx) DumpFunctionMeter(cx)
973 #endif /* DEBUG && XP_UNIX */
975 #ifndef DUMP_EVAL_CACHE_METER
976 # define DUMP_EVAL_CACHE_METER(cx) ((void) 0)
977 #endif
979 #ifndef DUMP_FUNCTION_METER
980 # define DUMP_FUNCTION_METER(cx) ((void) 0)
981 #endif
983 void
984 js_DestroyContext(JSContext *cx, JSDestroyContextMode mode)
986 JSRuntime *rt;
987 JSContextCallback cxCallback;
988 JSBool last;
990 JS_ASSERT(!cx->enumerators);
992 rt = cx->runtime;
993 #ifdef JS_THREADSAFE
995 * For API compatibility we allow to destroy contexts without a thread in
996 * optimized builds. We assume that the embedding knows that an OOM error
997 * cannot happen in JS_SetContextThread.
999 JS_ASSERT(cx->thread && CURRENT_THREAD_IS_ME(cx->thread));
1000 if (!cx->thread)
1001 JS_SetContextThread(cx);
1003 JS_ASSERT_IF(rt->gcRunning, cx->outstandingRequests == 0);
1004 #endif
1006 if (mode != JSDCM_NEW_FAILED) {
1007 cxCallback = rt->cxCallback;
1008 if (cxCallback) {
1010 * JSCONTEXT_DESTROY callback is not allowed to fail and must
1011 * return true.
1013 #ifdef DEBUG
1014 JSBool callbackStatus =
1015 #endif
1016 cxCallback(cx, JSCONTEXT_DESTROY);
1017 JS_ASSERT(callbackStatus);
1021 JS_LOCK_GC(rt);
1022 JS_ASSERT(rt->state == JSRTS_UP || rt->state == JSRTS_LAUNCHING);
1023 #ifdef JS_THREADSAFE
1025 * Typically we are called outside a request, so ensure that the GC is not
1026 * running before removing the context from rt->contextList, see bug 477021.
1028 if (cx->requestDepth == 0)
1029 js_WaitForGC(rt);
1030 #endif
1031 JS_REMOVE_LINK(&cx->link);
1032 last = (rt->contextList.next == &rt->contextList);
1033 if (last)
1034 rt->state = JSRTS_LANDING;
1035 if (last || mode == JSDCM_FORCE_GC || mode == JSDCM_MAYBE_GC
1036 #ifdef JS_THREADSAFE
1037 || cx->requestDepth != 0
1038 #endif
1040 JS_ASSERT(!rt->gcRunning);
1042 JS_UNLOCK_GC(rt);
1044 if (last) {
1045 #ifdef JS_THREADSAFE
1047 * If cx is not in a request already, begin one now so that we wait
1048 * for any racing GC started on a not-last context to finish, before
1049 * we plow ahead and unpin atoms. Note that even though we begin a
1050 * request here if necessary, we end all requests on cx below before
1051 * forcing a final GC. This lets any not-last context destruction
1052 * racing in another thread try to force or maybe run the GC, but by
1053 * that point, rt->state will not be JSRTS_UP, and that GC attempt
1054 * will return early.
1056 if (cx->requestDepth == 0)
1057 JS_BeginRequest(cx);
1058 #endif
1060 JSScope::finishRuntimeState(cx);
1061 js_FinishRuntimeNumberState(cx);
1063 /* Unpin all common atoms before final GC. */
1064 js_FinishCommonAtoms(cx);
1066 /* Clear debugging state to remove GC roots. */
1067 JS_ClearAllTraps(cx);
1068 JS_ClearAllWatchPoints(cx);
1071 /* Remove more GC roots in regExpStatics, then collect garbage. */
1072 JS_ClearRegExpRoots(cx);
1074 #ifdef JS_THREADSAFE
1076 * Destroying a context implicitly calls JS_EndRequest(). Also, we must
1077 * end our request here in case we are "last" -- in that event, another
1078 * js_DestroyContext that was not last might be waiting in the GC for our
1079 * request to end. We'll let it run below, just before we do the truly
1080 * final GC and then free atom state.
1082 while (cx->requestDepth != 0)
1083 JS_EndRequest(cx);
1084 #endif
1086 if (last) {
1087 js_GC(cx, GC_LAST_CONTEXT);
1088 DUMP_EVAL_CACHE_METER(cx);
1089 DUMP_FUNCTION_METER(cx);
1091 /* Take the runtime down, now that it has no contexts or atoms. */
1092 JS_LOCK_GC(rt);
1093 rt->state = JSRTS_DOWN;
1094 JS_NOTIFY_ALL_CONDVAR(rt->stateChange);
1095 } else {
1096 if (mode == JSDCM_FORCE_GC)
1097 js_GC(cx, GC_NORMAL);
1098 else if (mode == JSDCM_MAYBE_GC)
1099 JS_MaybeGC(cx);
1100 JS_LOCK_GC(rt);
1101 js_WaitForGC(rt);
1104 #ifdef JS_THREADSAFE
1105 js_ClearContextThread(cx);
1106 #endif
1107 #ifdef JS_METER_DST_OFFSET_CACHING
1108 cx->dstOffsetCache.dumpStats();
1109 #endif
1110 JS_UNLOCK_GC(rt);
1111 FreeContext(cx);
1114 static void
1115 FreeContext(JSContext *cx)
1117 #ifdef JS_THREADSAFE
1118 JS_ASSERT(!cx->thread);
1119 #endif
1121 /* Free the stuff hanging off of cx. */
1122 cx->regExpStatics.clear();
1123 VOUCH_DOES_NOT_REQUIRE_STACK();
1124 JS_FinishArenaPool(&cx->tempPool);
1125 JS_FinishArenaPool(&cx->regExpPool);
1127 if (cx->lastMessage)
1128 js_free(cx->lastMessage);
1130 /* Remove any argument formatters. */
1131 JSArgumentFormatMap *map = cx->argumentFormatMap;
1132 while (map) {
1133 JSArgumentFormatMap *temp = map;
1134 map = map->next;
1135 cx->free(temp);
1138 /* Destroy the resolve recursion damper. */
1139 if (cx->resolvingTable) {
1140 JS_DHashTableDestroy(cx->resolvingTable);
1141 cx->resolvingTable = NULL;
1144 /* Finally, free cx itself. */
1145 cx->~JSContext();
1146 js_free(cx);
1149 JSBool
1150 js_ValidContextPointer(JSRuntime *rt, JSContext *cx)
1152 JSCList *cl;
1154 for (cl = rt->contextList.next; cl != &rt->contextList; cl = cl->next) {
1155 if (cl == &cx->link)
1156 return JS_TRUE;
1158 JS_RUNTIME_METER(rt, deadContexts);
1159 return JS_FALSE;
1162 JSContext *
1163 js_ContextIterator(JSRuntime *rt, JSBool unlocked, JSContext **iterp)
1165 JSContext *cx = *iterp;
1167 Conditionally<AutoLockGC> lockIf(!!unlocked, rt);
1168 cx = js_ContextFromLinkField(cx ? cx->link.next : rt->contextList.next);
1169 if (&cx->link == &rt->contextList)
1170 cx = NULL;
1171 *iterp = cx;
1172 return cx;
1175 JS_FRIEND_API(JSContext *)
1176 js_NextActiveContext(JSRuntime *rt, JSContext *cx)
1178 JSContext *iter = cx;
1179 #ifdef JS_THREADSAFE
1180 while ((cx = js_ContextIterator(rt, JS_FALSE, &iter)) != NULL) {
1181 if (cx->requestDepth)
1182 break;
1184 return cx;
1185 #else
1186 return js_ContextIterator(rt, JS_FALSE, &iter);
1187 #endif
1190 static JSDHashNumber
1191 resolving_HashKey(JSDHashTable *table, const void *ptr)
1193 const JSResolvingKey *key = (const JSResolvingKey *)ptr;
1195 return (JSDHashNumber(uintptr_t(key->obj)) >> JS_GCTHING_ALIGN) ^ JSID_BITS(key->id);
1198 static JSBool
1199 resolving_MatchEntry(JSDHashTable *table,
1200 const JSDHashEntryHdr *hdr,
1201 const void *ptr)
1203 const JSResolvingEntry *entry = (const JSResolvingEntry *)hdr;
1204 const JSResolvingKey *key = (const JSResolvingKey *)ptr;
1206 return entry->key.obj == key->obj && entry->key.id == key->id;
1209 static const JSDHashTableOps resolving_dhash_ops = {
1210 JS_DHashAllocTable,
1211 JS_DHashFreeTable,
1212 resolving_HashKey,
1213 resolving_MatchEntry,
1214 JS_DHashMoveEntryStub,
1215 JS_DHashClearEntryStub,
1216 JS_DHashFinalizeStub,
1217 NULL
1220 JSBool
1221 js_StartResolving(JSContext *cx, JSResolvingKey *key, uint32 flag,
1222 JSResolvingEntry **entryp)
1224 JSDHashTable *table;
1225 JSResolvingEntry *entry;
1227 table = cx->resolvingTable;
1228 if (!table) {
1229 table = JS_NewDHashTable(&resolving_dhash_ops, NULL,
1230 sizeof(JSResolvingEntry),
1231 JS_DHASH_MIN_SIZE);
1232 if (!table)
1233 goto outofmem;
1234 cx->resolvingTable = table;
1237 entry = (JSResolvingEntry *)
1238 JS_DHashTableOperate(table, key, JS_DHASH_ADD);
1239 if (!entry)
1240 goto outofmem;
1242 if (entry->flags & flag) {
1243 /* An entry for (key, flag) exists already -- dampen recursion. */
1244 entry = NULL;
1245 } else {
1246 /* Fill in key if we were the first to add entry, then set flag. */
1247 if (!entry->key.obj)
1248 entry->key = *key;
1249 entry->flags |= flag;
1251 *entryp = entry;
1252 return JS_TRUE;
1254 outofmem:
1255 JS_ReportOutOfMemory(cx);
1256 return JS_FALSE;
1259 void
1260 js_StopResolving(JSContext *cx, JSResolvingKey *key, uint32 flag,
1261 JSResolvingEntry *entry, uint32 generation)
1263 JSDHashTable *table;
1266 * Clear flag from entry->flags and return early if other flags remain.
1267 * We must take care to re-lookup entry if the table has changed since
1268 * it was found by js_StartResolving.
1270 table = cx->resolvingTable;
1271 if (!entry || table->generation != generation) {
1272 entry = (JSResolvingEntry *)
1273 JS_DHashTableOperate(table, key, JS_DHASH_LOOKUP);
1275 JS_ASSERT(JS_DHASH_ENTRY_IS_BUSY(&entry->hdr));
1276 entry->flags &= ~flag;
1277 if (entry->flags)
1278 return;
1281 * Do a raw remove only if fewer entries were removed than would cause
1282 * alpha to be less than .5 (alpha is at most .75). Otherwise, we just
1283 * call JS_DHashTableOperate to re-lookup the key and remove its entry,
1284 * compressing or shrinking the table as needed.
1286 if (table->removedCount < JS_DHASH_TABLE_SIZE(table) >> 2)
1287 JS_DHashTableRawRemove(table, &entry->hdr);
1288 else
1289 JS_DHashTableOperate(table, key, JS_DHASH_REMOVE);
1292 static void
1293 ReportError(JSContext *cx, const char *message, JSErrorReport *reportp,
1294 JSErrorCallback callback, void *userRef)
1297 * Check the error report, and set a JavaScript-catchable exception
1298 * if the error is defined to have an associated exception. If an
1299 * exception is thrown, then the JSREPORT_EXCEPTION flag will be set
1300 * on the error report, and exception-aware hosts should ignore it.
1302 JS_ASSERT(reportp);
1303 if ((!callback || callback == js_GetErrorMessage) &&
1304 reportp->errorNumber == JSMSG_UNCAUGHT_EXCEPTION)
1305 reportp->flags |= JSREPORT_EXCEPTION;
1308 * Call the error reporter only if an exception wasn't raised.
1310 * If an exception was raised, then we call the debugErrorHook
1311 * (if present) to give it a chance to see the error before it
1312 * propagates out of scope. This is needed for compatability
1313 * with the old scheme.
1315 if (!JS_IsRunning(cx) ||
1316 !js_ErrorToException(cx, message, reportp, callback, userRef)) {
1317 js_ReportErrorAgain(cx, message, reportp);
1318 } else if (cx->debugHooks->debugErrorHook && cx->errorReporter) {
1319 JSDebugErrorHook hook = cx->debugHooks->debugErrorHook;
1320 /* test local in case debugErrorHook changed on another thread */
1321 if (hook)
1322 hook(cx, message, reportp, cx->debugHooks->debugErrorHookData);
1326 /* The report must be initially zeroed. */
1327 static void
1328 PopulateReportBlame(JSContext *cx, JSErrorReport *report)
1331 * Walk stack until we find a frame that is associated with some script
1332 * rather than a native frame.
1334 for (JSStackFrame *fp = js_GetTopStackFrame(cx); fp; fp = fp->down) {
1335 if (fp->pc(cx)) {
1336 report->filename = fp->getScript()->filename;
1337 report->lineno = js_FramePCToLineNumber(cx, fp);
1338 break;
1344 * We don't post an exception in this case, since doing so runs into
1345 * complications of pre-allocating an exception object which required
1346 * running the Exception class initializer early etc.
1347 * Instead we just invoke the errorReporter with an "Out Of Memory"
1348 * type message, and then hope the process ends swiftly.
1350 void
1351 js_ReportOutOfMemory(JSContext *cx)
1353 #ifdef JS_TRACER
1355 * If we are in a builtin called directly from trace, don't report an
1356 * error. We will retry in the interpreter instead.
1358 if (JS_ON_TRACE(cx) && !cx->bailExit)
1359 return;
1360 #endif
1362 JSErrorReport report;
1363 JSErrorReporter onError = cx->errorReporter;
1365 /* Get the message for this error, but we won't expand any arguments. */
1366 const JSErrorFormatString *efs =
1367 js_GetLocalizedErrorMessage(cx, NULL, NULL, JSMSG_OUT_OF_MEMORY);
1368 const char *msg = efs ? efs->format : "Out of memory";
1370 /* Fill out the report, but don't do anything that requires allocation. */
1371 PodZero(&report);
1372 report.flags = JSREPORT_ERROR;
1373 report.errorNumber = JSMSG_OUT_OF_MEMORY;
1374 PopulateReportBlame(cx, &report);
1377 * If debugErrorHook is present then we give it a chance to veto sending
1378 * the error on to the regular ErrorReporter. We also clear a pending
1379 * exception if any now so the hooks can replace the out-of-memory error
1380 * by a script-catchable exception.
1382 cx->throwing = JS_FALSE;
1383 if (onError) {
1384 JSDebugErrorHook hook = cx->debugHooks->debugErrorHook;
1385 if (hook &&
1386 !hook(cx, msg, &report, cx->debugHooks->debugErrorHookData)) {
1387 onError = NULL;
1391 if (onError)
1392 onError(cx, msg, &report);
1395 void
1396 js_ReportOutOfScriptQuota(JSContext *cx)
1398 JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
1399 JSMSG_SCRIPT_STACK_QUOTA);
1402 JS_FRIEND_API(void)
1403 js_ReportOverRecursed(JSContext *cx)
1405 JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_OVER_RECURSED);
1408 void
1409 js_ReportAllocationOverflow(JSContext *cx)
1411 JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_ALLOC_OVERFLOW);
1415 * Given flags and the state of cx, decide whether we should report an
1416 * error, a warning, or just continue execution normally. Return
1417 * true if we should continue normally, without reporting anything;
1418 * otherwise, adjust *flags as appropriate and return false.
1420 static bool
1421 checkReportFlags(JSContext *cx, uintN *flags)
1423 if (JSREPORT_IS_STRICT_MODE_ERROR(*flags)) {
1425 * Error in strict code; warning with strict option; okay otherwise.
1426 * We assume that if the top frame is a native, then it is strict if
1427 * the nearest scripted frame is strict, see bug 536306.
1429 JSStackFrame *fp = js_GetScriptedCaller(cx, NULL);
1430 if (fp && fp->getScript()->strictModeCode)
1431 *flags &= ~JSREPORT_WARNING;
1432 else if (JS_HAS_STRICT_OPTION(cx))
1433 *flags |= JSREPORT_WARNING;
1434 else
1435 return true;
1436 } else if (JSREPORT_IS_STRICT(*flags)) {
1437 /* Warning/error only when JSOPTION_STRICT is set. */
1438 if (!JS_HAS_STRICT_OPTION(cx))
1439 return true;
1442 /* Warnings become errors when JSOPTION_WERROR is set. */
1443 if (JSREPORT_IS_WARNING(*flags) && JS_HAS_WERROR_OPTION(cx))
1444 *flags &= ~JSREPORT_WARNING;
1446 return false;
1449 JSBool
1450 js_ReportErrorVA(JSContext *cx, uintN flags, const char *format, va_list ap)
1452 char *message;
1453 jschar *ucmessage;
1454 size_t messagelen;
1455 JSErrorReport report;
1456 JSBool warning;
1458 if (checkReportFlags(cx, &flags))
1459 return JS_TRUE;
1461 message = JS_vsmprintf(format, ap);
1462 if (!message)
1463 return JS_FALSE;
1464 messagelen = strlen(message);
1466 PodZero(&report);
1467 report.flags = flags;
1468 report.errorNumber = JSMSG_USER_DEFINED_ERROR;
1469 report.ucmessage = ucmessage = js_InflateString(cx, message, &messagelen);
1470 PopulateReportBlame(cx, &report);
1472 warning = JSREPORT_IS_WARNING(report.flags);
1474 ReportError(cx, message, &report, NULL, NULL);
1475 js_free(message);
1476 cx->free(ucmessage);
1477 return warning;
1481 * The arguments from ap need to be packaged up into an array and stored
1482 * into the report struct.
1484 * The format string addressed by the error number may contain operands
1485 * identified by the format {N}, where N is a decimal digit. Each of these
1486 * is to be replaced by the Nth argument from the va_list. The complete
1487 * message is placed into reportp->ucmessage converted to a JSString.
1489 * Returns true if the expansion succeeds (can fail if out of memory).
1491 JSBool
1492 js_ExpandErrorArguments(JSContext *cx, JSErrorCallback callback,
1493 void *userRef, const uintN errorNumber,
1494 char **messagep, JSErrorReport *reportp,
1495 bool charArgs, va_list ap)
1497 const JSErrorFormatString *efs;
1498 int i;
1499 int argCount;
1501 *messagep = NULL;
1503 /* Most calls supply js_GetErrorMessage; if this is so, assume NULL. */
1504 if (!callback || callback == js_GetErrorMessage)
1505 efs = js_GetLocalizedErrorMessage(cx, userRef, NULL, errorNumber);
1506 else
1507 efs = callback(userRef, NULL, errorNumber);
1508 if (efs) {
1509 size_t totalArgsLength = 0;
1510 size_t argLengths[10]; /* only {0} thru {9} supported */
1511 argCount = efs->argCount;
1512 JS_ASSERT(argCount <= 10);
1513 if (argCount > 0) {
1515 * Gather the arguments into an array, and accumulate
1516 * their sizes. We allocate 1 more than necessary and
1517 * null it out to act as the caboose when we free the
1518 * pointers later.
1520 reportp->messageArgs = (const jschar **)
1521 cx->malloc(sizeof(jschar *) * (argCount + 1));
1522 if (!reportp->messageArgs)
1523 return JS_FALSE;
1524 reportp->messageArgs[argCount] = NULL;
1525 for (i = 0; i < argCount; i++) {
1526 if (charArgs) {
1527 char *charArg = va_arg(ap, char *);
1528 size_t charArgLength = strlen(charArg);
1529 reportp->messageArgs[i]
1530 = js_InflateString(cx, charArg, &charArgLength);
1531 if (!reportp->messageArgs[i])
1532 goto error;
1533 } else {
1534 reportp->messageArgs[i] = va_arg(ap, jschar *);
1536 argLengths[i] = js_strlen(reportp->messageArgs[i]);
1537 totalArgsLength += argLengths[i];
1539 /* NULL-terminate for easy copying. */
1540 reportp->messageArgs[i] = NULL;
1543 * Parse the error format, substituting the argument X
1544 * for {X} in the format.
1546 if (argCount > 0) {
1547 if (efs->format) {
1548 jschar *buffer, *fmt, *out;
1549 int expandedArgs = 0;
1550 size_t expandedLength;
1551 size_t len = strlen(efs->format);
1553 buffer = fmt = js_InflateString (cx, efs->format, &len);
1554 if (!buffer)
1555 goto error;
1556 expandedLength = len
1557 - (3 * argCount) /* exclude the {n} */
1558 + totalArgsLength;
1561 * Note - the above calculation assumes that each argument
1562 * is used once and only once in the expansion !!!
1564 reportp->ucmessage = out = (jschar *)
1565 cx->malloc((expandedLength + 1) * sizeof(jschar));
1566 if (!out) {
1567 cx->free(buffer);
1568 goto error;
1570 while (*fmt) {
1571 if (*fmt == '{') {
1572 if (isdigit(fmt[1])) {
1573 int d = JS7_UNDEC(fmt[1]);
1574 JS_ASSERT(d < argCount);
1575 js_strncpy(out, reportp->messageArgs[d],
1576 argLengths[d]);
1577 out += argLengths[d];
1578 fmt += 3;
1579 expandedArgs++;
1580 continue;
1583 *out++ = *fmt++;
1585 JS_ASSERT(expandedArgs == argCount);
1586 *out = 0;
1587 cx->free(buffer);
1588 *messagep =
1589 js_DeflateString(cx, reportp->ucmessage,
1590 (size_t)(out - reportp->ucmessage));
1591 if (!*messagep)
1592 goto error;
1594 } else {
1596 * Zero arguments: the format string (if it exists) is the
1597 * entire message.
1599 if (efs->format) {
1600 size_t len;
1601 *messagep = JS_strdup(cx, efs->format);
1602 if (!*messagep)
1603 goto error;
1604 len = strlen(*messagep);
1605 reportp->ucmessage = js_InflateString(cx, *messagep, &len);
1606 if (!reportp->ucmessage)
1607 goto error;
1611 if (*messagep == NULL) {
1612 /* where's the right place for this ??? */
1613 const char *defaultErrorMessage
1614 = "No error message available for error number %d";
1615 size_t nbytes = strlen(defaultErrorMessage) + 16;
1616 *messagep = (char *)cx->malloc(nbytes);
1617 if (!*messagep)
1618 goto error;
1619 JS_snprintf(*messagep, nbytes, defaultErrorMessage, errorNumber);
1621 return JS_TRUE;
1623 error:
1624 if (reportp->messageArgs) {
1625 /* free the arguments only if we allocated them */
1626 if (charArgs) {
1627 i = 0;
1628 while (reportp->messageArgs[i])
1629 cx->free((void *)reportp->messageArgs[i++]);
1631 cx->free((void *)reportp->messageArgs);
1632 reportp->messageArgs = NULL;
1634 if (reportp->ucmessage) {
1635 cx->free((void *)reportp->ucmessage);
1636 reportp->ucmessage = NULL;
1638 if (*messagep) {
1639 cx->free((void *)*messagep);
1640 *messagep = NULL;
1642 return JS_FALSE;
1645 JSBool
1646 js_ReportErrorNumberVA(JSContext *cx, uintN flags, JSErrorCallback callback,
1647 void *userRef, const uintN errorNumber,
1648 JSBool charArgs, va_list ap)
1650 JSErrorReport report;
1651 char *message;
1652 JSBool warning;
1654 if (checkReportFlags(cx, &flags))
1655 return JS_TRUE;
1656 warning = JSREPORT_IS_WARNING(flags);
1658 PodZero(&report);
1659 report.flags = flags;
1660 report.errorNumber = errorNumber;
1661 PopulateReportBlame(cx, &report);
1663 if (!js_ExpandErrorArguments(cx, callback, userRef, errorNumber,
1664 &message, &report, !!charArgs, ap)) {
1665 return JS_FALSE;
1668 ReportError(cx, message, &report, callback, userRef);
1670 if (message)
1671 cx->free(message);
1672 if (report.messageArgs) {
1674 * js_ExpandErrorArguments owns its messageArgs only if it had to
1675 * inflate the arguments (from regular |char *|s).
1677 if (charArgs) {
1678 int i = 0;
1679 while (report.messageArgs[i])
1680 cx->free((void *)report.messageArgs[i++]);
1682 cx->free((void *)report.messageArgs);
1684 if (report.ucmessage)
1685 cx->free((void *)report.ucmessage);
1687 return warning;
1690 JS_FRIEND_API(void)
1691 js_ReportErrorAgain(JSContext *cx, const char *message, JSErrorReport *reportp)
1693 JSErrorReporter onError;
1695 if (!message)
1696 return;
1698 if (cx->lastMessage)
1699 js_free(cx->lastMessage);
1700 cx->lastMessage = JS_strdup(cx, message);
1701 if (!cx->lastMessage)
1702 return;
1703 onError = cx->errorReporter;
1706 * If debugErrorHook is present then we give it a chance to veto
1707 * sending the error on to the regular ErrorReporter.
1709 if (onError) {
1710 JSDebugErrorHook hook = cx->debugHooks->debugErrorHook;
1711 if (hook &&
1712 !hook(cx, cx->lastMessage, reportp,
1713 cx->debugHooks->debugErrorHookData)) {
1714 onError = NULL;
1717 if (onError)
1718 onError(cx, cx->lastMessage, reportp);
1721 void
1722 js_ReportIsNotDefined(JSContext *cx, const char *name)
1724 JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_NOT_DEFINED, name);
1727 JSBool
1728 js_ReportIsNullOrUndefined(JSContext *cx, intN spindex, const Value &v,
1729 JSString *fallback)
1731 char *bytes;
1732 JSBool ok;
1734 bytes = DecompileValueGenerator(cx, spindex, v, fallback);
1735 if (!bytes)
1736 return JS_FALSE;
1738 if (strcmp(bytes, js_undefined_str) == 0 ||
1739 strcmp(bytes, js_null_str) == 0) {
1740 ok = JS_ReportErrorFlagsAndNumber(cx, JSREPORT_ERROR,
1741 js_GetErrorMessage, NULL,
1742 JSMSG_NO_PROPERTIES, bytes,
1743 NULL, NULL);
1744 } else if (v.isUndefined()) {
1745 ok = JS_ReportErrorFlagsAndNumber(cx, JSREPORT_ERROR,
1746 js_GetErrorMessage, NULL,
1747 JSMSG_UNEXPECTED_TYPE, bytes,
1748 js_undefined_str, NULL);
1749 } else {
1750 JS_ASSERT(v.isNull());
1751 ok = JS_ReportErrorFlagsAndNumber(cx, JSREPORT_ERROR,
1752 js_GetErrorMessage, NULL,
1753 JSMSG_UNEXPECTED_TYPE, bytes,
1754 js_null_str, NULL);
1757 cx->free(bytes);
1758 return ok;
1761 void
1762 js_ReportMissingArg(JSContext *cx, const Value &v, uintN arg)
1764 char argbuf[11];
1765 char *bytes;
1766 JSAtom *atom;
1768 JS_snprintf(argbuf, sizeof argbuf, "%u", arg);
1769 bytes = NULL;
1770 if (IsFunctionObject(v)) {
1771 atom = GET_FUNCTION_PRIVATE(cx, &v.toObject())->atom;
1772 bytes = DecompileValueGenerator(cx, JSDVG_SEARCH_STACK,
1773 v, ATOM_TO_STRING(atom));
1774 if (!bytes)
1775 return;
1777 JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
1778 JSMSG_MISSING_FUN_ARG, argbuf,
1779 bytes ? bytes : "");
1780 cx->free(bytes);
1783 JSBool
1784 js_ReportValueErrorFlags(JSContext *cx, uintN flags, const uintN errorNumber,
1785 intN spindex, const Value &v, JSString *fallback,
1786 const char *arg1, const char *arg2)
1788 char *bytes;
1789 JSBool ok;
1791 JS_ASSERT(js_ErrorFormatString[errorNumber].argCount >= 1);
1792 JS_ASSERT(js_ErrorFormatString[errorNumber].argCount <= 3);
1793 bytes = DecompileValueGenerator(cx, spindex, v, fallback);
1794 if (!bytes)
1795 return JS_FALSE;
1797 ok = JS_ReportErrorFlagsAndNumber(cx, flags, js_GetErrorMessage,
1798 NULL, errorNumber, bytes, arg1, arg2);
1799 cx->free(bytes);
1800 return ok;
1803 #if defined DEBUG && defined XP_UNIX
1804 /* For gdb usage. */
1805 void js_traceon(JSContext *cx) { cx->tracefp = stderr; cx->tracePrevPc = NULL; }
1806 void js_traceoff(JSContext *cx) { cx->tracefp = NULL; }
1807 #endif
1809 JSErrorFormatString js_ErrorFormatString[JSErr_Limit] = {
1810 #define MSG_DEF(name, number, count, exception, format) \
1811 { format, count, exception } ,
1812 #include "js.msg"
1813 #undef MSG_DEF
1816 JS_FRIEND_API(const JSErrorFormatString *)
1817 js_GetErrorMessage(void *userRef, const char *locale, const uintN errorNumber)
1819 if ((errorNumber > 0) && (errorNumber < JSErr_Limit))
1820 return &js_ErrorFormatString[errorNumber];
1821 return NULL;
1824 JSBool
1825 js_InvokeOperationCallback(JSContext *cx)
1827 JS_ASSERT_REQUEST_DEPTH(cx);
1828 JS_ASSERT(JS_THREAD_DATA(cx)->operationCallbackFlag);
1831 * Reset the callback flag first, then yield. If another thread is racing
1832 * us here we will accumulate another callback request which will be
1833 * serviced at the next opportunity.
1835 JS_THREAD_DATA(cx)->operationCallbackFlag = 0;
1838 * Unless we are going to run the GC, we automatically yield the current
1839 * context every time the operation callback is hit since we might be
1840 * called as a result of an impending GC, which would deadlock if we do
1841 * not yield. Operation callbacks are supposed to happen rarely (seconds,
1842 * not milliseconds) so it is acceptable to yield at every callback.
1844 JSRuntime *rt = cx->runtime;
1845 if (rt->gcIsNeeded) {
1846 js_GC(cx, GC_NORMAL);
1849 * On trace we can exceed the GC quota, see comments in NewGCArena. So
1850 * we check the quota and report OOM here when we are off trace.
1852 bool delayedOutOfMemory;
1853 JS_LOCK_GC(rt);
1854 delayedOutOfMemory = (rt->gcBytes > rt->gcMaxBytes);
1855 JS_UNLOCK_GC(rt);
1856 if (delayedOutOfMemory) {
1857 js_ReportOutOfMemory(cx);
1858 return false;
1861 #ifdef JS_THREADSAFE
1862 else {
1863 JS_YieldRequest(cx);
1865 #endif
1867 JSOperationCallback cb = cx->operationCallback;
1870 * Important: Additional callbacks can occur inside the callback handler
1871 * if it re-enters the JS engine. The embedding must ensure that the
1872 * callback is disconnected before attempting such re-entry.
1875 return !cb || cb(cx);
1878 void
1879 js_TriggerAllOperationCallbacks(JSRuntime *rt, JSBool gcLocked)
1881 #ifdef JS_THREADSAFE
1882 Conditionally<AutoLockGC> lockIf(!gcLocked, rt);
1883 #endif
1884 for (ThreadDataIter i(rt); !i.empty(); i.popFront())
1885 i.threadData()->triggerOperationCallback();
1888 JSStackFrame *
1889 js_GetScriptedCaller(JSContext *cx, JSStackFrame *fp)
1891 if (!fp)
1892 fp = js_GetTopStackFrame(cx);
1893 while (fp) {
1894 if (fp->hasScript())
1895 return fp;
1896 fp = fp->down;
1898 return NULL;
1901 jsbytecode*
1902 js_GetCurrentBytecodePC(JSContext* cx)
1904 jsbytecode *pc, *imacpc;
1906 #ifdef JS_TRACER
1907 if (JS_ON_TRACE(cx)) {
1908 pc = cx->bailExit->pc;
1909 imacpc = cx->bailExit->imacpc;
1910 } else
1911 #endif
1913 JS_ASSERT_NOT_ON_TRACE(cx); /* for static analysis */
1914 pc = cx->regs ? cx->regs->pc : NULL;
1915 if (!pc)
1916 return NULL;
1917 imacpc = cx->fp()->maybeIMacroPC();
1921 * If we are inside GetProperty_tn or similar, return a pointer to the
1922 * current instruction in the script, not the CALL instruction in the
1923 * imacro, for the benefit of callers doing bytecode inspection.
1925 return (*pc == JSOP_CALL && imacpc) ? imacpc : pc;
1928 bool
1929 js_CurrentPCIsInImacro(JSContext *cx)
1931 #ifdef JS_TRACER
1932 VOUCH_DOES_NOT_REQUIRE_STACK();
1933 if (JS_ON_TRACE(cx))
1934 return cx->bailExit->imacpc != NULL;
1935 return cx->fp()->hasIMacroPC();
1936 #else
1937 return false;
1938 #endif
1941 void
1942 DSTOffsetCache::purge()
1945 * NB: The initial range values are carefully chosen to result in a cache
1946 * miss on first use given the range of possible values. Be careful
1947 * to keep these values and the caching algorithm in sync!
1949 offsetMilliseconds = 0;
1950 rangeStartSeconds = rangeEndSeconds = INT64_MIN;
1951 oldOffsetMilliseconds = 0;
1952 oldRangeStartSeconds = oldRangeEndSeconds = INT64_MIN;
1954 #ifdef JS_METER_DST_OFFSET_CACHING
1955 totalCalculations = 0;
1956 hit = 0;
1957 missIncreasing = missDecreasing = 0;
1958 missIncreasingOffsetChangeExpand = missIncreasingOffsetChangeUpper = 0;
1959 missDecreasingOffsetChangeExpand = missDecreasingOffsetChangeLower = 0;
1960 missLargeIncrease = missLargeDecrease = 0;
1961 #endif
1963 sanityCheck();
1967 * Since getDSTOffsetMilliseconds guarantees that all times seen will be
1968 * positive, we can initialize the range at construction time with large
1969 * negative numbers to ensure the first computation is always a cache miss and
1970 * doesn't return a bogus offset.
1972 DSTOffsetCache::DSTOffsetCache()
1974 purge();
1977 JSContext::JSContext(JSRuntime *rt)
1978 : runtime(rt),
1979 compartment(rt->defaultCompartment),
1980 regs(NULL),
1981 regExpStatics(this),
1982 busyArrays(this)
1985 void
1986 JSContext::pushSegmentAndFrame(js::StackSegment *newseg, JSFrameRegs &newregs)
1988 if (hasActiveSegment()) {
1989 JS_ASSERT(regs->fp->savedPC == JSStackFrame::sInvalidPC);
1990 regs->fp->savedPC = regs->pc;
1991 currentSegment->suspend(regs);
1993 newseg->setPreviousInContext(currentSegment);
1994 currentSegment = newseg;
1995 #ifdef DEBUG
1996 newregs.fp->savedPC = JSStackFrame::sInvalidPC;
1997 #endif
1998 setCurrentRegs(&newregs);
1999 newseg->joinContext(this, newregs.fp);
2002 void
2003 JSContext::popSegmentAndFrame()
2005 JS_ASSERT(currentSegment->maybeContext() == this);
2006 JS_ASSERT(currentSegment->getInitialFrame() == regs->fp);
2007 JS_ASSERT(regs->fp->savedPC == JSStackFrame::sInvalidPC);
2008 currentSegment->leaveContext();
2009 currentSegment = currentSegment->getPreviousInContext();
2010 if (currentSegment) {
2011 if (currentSegment->isSaved()) {
2012 setCurrentRegs(NULL);
2013 } else {
2014 setCurrentRegs(currentSegment->getSuspendedRegs());
2015 currentSegment->resume();
2016 #ifdef DEBUG
2017 regs->fp->savedPC = JSStackFrame::sInvalidPC;
2018 #endif
2020 } else {
2021 JS_ASSERT(regs->fp->down == NULL);
2022 setCurrentRegs(NULL);
2026 void
2027 JSContext::saveActiveSegment()
2029 JS_ASSERT(hasActiveSegment());
2030 currentSegment->save(regs);
2031 JS_ASSERT(regs->fp->savedPC == JSStackFrame::sInvalidPC);
2032 regs->fp->savedPC = regs->pc;
2033 setCurrentRegs(NULL);
2036 void
2037 JSContext::restoreSegment()
2039 js::StackSegment *ccs = currentSegment;
2040 setCurrentRegs(ccs->getSuspendedRegs());
2041 ccs->restore();
2042 #ifdef DEBUG
2043 regs->fp->savedPC = JSStackFrame::sInvalidPC;
2044 #endif
2047 JSGenerator *
2048 JSContext::generatorFor(JSStackFrame *fp) const
2050 JS_ASSERT(stack().contains(fp) && fp->isGenerator());
2051 JS_ASSERT(!fp->isFloatingGenerator());
2052 JS_ASSERT(!genStack.empty());
2054 if (JS_LIKELY(fp == genStack.back()->liveFrame))
2055 return genStack.back();
2057 /* General case; should only be needed for debug APIs. */
2058 for (size_t i = 0; i < genStack.length(); ++i) {
2059 if (genStack[i]->liveFrame == fp)
2060 return genStack[i];
2062 JS_NOT_REACHED("no matching generator");
2063 return NULL;
2066 StackSegment *
2067 JSContext::containingSegment(const JSStackFrame *target)
2069 /* The context may have nothing running. */
2070 StackSegment *seg = currentSegment;
2071 if (!seg)
2072 return NULL;
2074 /* The active segments's top frame is cx->regs->fp. */
2075 if (regs) {
2076 JS_ASSERT(regs->fp);
2077 JS_ASSERT(activeSegment() == seg);
2078 JSStackFrame *f = regs->fp;
2079 JSStackFrame *stop = seg->getInitialFrame()->down;
2080 for (; f != stop; f = f->down) {
2081 if (f == target)
2082 return seg;
2084 seg = seg->getPreviousInContext();
2087 /* A suspended segment's top frame is its suspended frame. */
2088 for (; seg; seg = seg->getPreviousInContext()) {
2089 JSStackFrame *f = seg->getSuspendedFrame();
2090 JSStackFrame *stop = seg->getInitialFrame()->down;
2091 for (; f != stop; f = f->down) {
2092 if (f == target)
2093 return seg;
2097 return NULL;
2100 void
2101 JSContext::checkMallocGCPressure(void *p)
2103 if (!p) {
2104 js_ReportOutOfMemory(this);
2105 return;
2108 #ifdef JS_THREADSAFE
2109 JS_ASSERT(thread);
2110 JS_ASSERT(thread->gcThreadMallocBytes <= 0);
2111 ptrdiff_t n = JS_GC_THREAD_MALLOC_LIMIT - thread->gcThreadMallocBytes;
2112 thread->gcThreadMallocBytes = JS_GC_THREAD_MALLOC_LIMIT;
2114 AutoLockGC lock(runtime);
2115 runtime->gcMallocBytes -= n;
2118 * Trigger the GC on memory pressure but only if we are inside a request
2119 * and not inside a GC.
2121 if (runtime->isGCMallocLimitReached() && requestDepth != 0)
2122 #endif
2124 if (!runtime->gcRunning) {
2125 JS_ASSERT(runtime->isGCMallocLimitReached());
2126 runtime->gcMallocBytes = -1;
2129 * Empty the GC free lists to trigger a last-ditch GC when any GC
2130 * thing is allocated later on this thread. This makes unnecessary
2131 * to check for the memory pressure on the fast path of the GC
2132 * allocator. We cannot touch the free lists on other threads as
2133 * their manipulation is not thread-safe.
2135 JS_THREAD_DATA(this)->gcFreeLists.purge();
2136 js_TriggerGC(this, true);
2141 bool
2142 JSContext::isConstructing()
2144 #ifdef JS_TRACER
2145 if (JS_ON_TRACE(this)) {
2146 JS_ASSERT(bailExit);
2147 return *bailExit->pc == JSOP_NEW;
2149 #endif
2150 JSStackFrame *fp = js_GetTopStackFrame(this);
2151 return fp && (fp->flags & JSFRAME_CONSTRUCTING);
2156 * Release pool's arenas if the stackPool has existed for longer than the
2157 * limit specified by gcEmptyArenaPoolLifespan.
2159 inline void
2160 FreeOldArenas(JSRuntime *rt, JSArenaPool *pool)
2162 JSArena *a = pool->current;
2163 if (a == pool->first.next && a->avail == a->base + sizeof(int64)) {
2164 int64 age = JS_Now() - *(int64 *) a->base;
2165 if (age > int64(rt->gcEmptyArenaPoolLifespan) * 1000)
2166 JS_FreeArenaPool(pool);
2170 void
2171 JSContext::purge()
2173 FreeOldArenas(runtime, &regExpPool);
2177 namespace js {
2179 void
2180 SetPendingException(JSContext *cx, const Value &v)
2182 cx->throwing = JS_TRUE;
2183 cx->exception = v;
2186 } /* namespace js */