Get rid of TreeInfo and inline its members into TreeFragment (bug 525371, r=gal,lw)
[mozilla-central.git] / js / src / jscntxt.h
blob5618301244da5f84c16f4d08f1288ed45110aa14
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sw=4 et tw=78:
4 * ***** BEGIN LICENSE BLOCK *****
5 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
7 * The contents of this file are subject to the Mozilla Public License Version
8 * 1.1 (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
10 * http://www.mozilla.org/MPL/
12 * Software distributed under the License is distributed on an "AS IS" basis,
13 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
14 * for the specific language governing rights and limitations under the
15 * License.
17 * The Original Code is Mozilla Communicator client code, released
18 * March 31, 1998.
20 * The Initial Developer of the Original Code is
21 * Netscape Communications Corporation.
22 * Portions created by the Initial Developer are Copyright (C) 1998
23 * the Initial Developer. All Rights Reserved.
25 * Contributor(s):
27 * Alternatively, the contents of this file may be used under the terms of
28 * either of the GNU General Public License Version 2 or later (the "GPL"),
29 * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
30 * in which case the provisions of the GPL or the LGPL are applicable instead
31 * of those above. If you wish to allow use of your version of this file only
32 * under the terms of either the GPL or the LGPL, and not to allow others to
33 * use your version of this file under the terms of the MPL, indicate your
34 * decision by deleting the provisions above and replace them with the notice
35 * and other provisions required by the GPL or the LGPL. If you do not delete
36 * the provisions above, a recipient may use your version of this file under
37 * the terms of any one of the MPL, the GPL or the LGPL.
39 * ***** END LICENSE BLOCK ***** */
41 #ifndef jscntxt_h___
42 #define jscntxt_h___
44 * JS execution context.
46 #include "jsarena.h" /* Added by JSIFY */
47 #include "jsclist.h"
48 #include "jslong.h"
49 #include "jsatom.h"
50 #include "jsversion.h"
51 #include "jsdhash.h"
52 #include "jsgc.h"
53 #include "jsinterp.h"
54 #include "jsobj.h"
55 #include "jsprvtd.h"
56 #include "jspubtd.h"
57 #include "jsregexp.h"
58 #include "jsutil.h"
59 #include "jsarray.h"
60 #include "jstask.h"
61 #include "jsvector.h"
64 * js_GetSrcNote cache to avoid O(n^2) growth in finding a source note for a
65 * given pc in a script. We use the script->code pointer to tag the cache,
66 * instead of the script address itself, so that source notes are always found
67 * by offset from the bytecode with which they were generated.
69 typedef struct JSGSNCache {
70 jsbytecode *code;
71 JSDHashTable table;
72 #ifdef JS_GSNMETER
73 uint32 hits;
74 uint32 misses;
75 uint32 fills;
76 uint32 purges;
77 # define GSN_CACHE_METER(cache,cnt) (++(cache)->cnt)
78 #else
79 # define GSN_CACHE_METER(cache,cnt) /* nothing */
80 #endif
81 } JSGSNCache;
83 #define js_FinishGSNCache(cache) js_PurgeGSNCache(cache)
85 extern void
86 js_PurgeGSNCache(JSGSNCache *cache);
88 /* These helper macros take a cx as parameter and operate on its GSN cache. */
89 #define JS_PURGE_GSN_CACHE(cx) js_PurgeGSNCache(&JS_GSN_CACHE(cx))
90 #define JS_METER_GSN_CACHE(cx,cnt) GSN_CACHE_METER(&JS_GSN_CACHE(cx), cnt)
92 /* Forward declarations of nanojit types. */
93 namespace nanojit
95 class Assembler;
96 class CodeAlloc;
97 class Fragment;
98 class LirBuffer;
99 #ifdef DEBUG
100 class LabelMap;
101 #endif
102 template<typename K> struct DefaultHash;
103 template<typename K, typename V, typename H> class HashMap;
104 template<typename T> class Seq;
107 /* Tracer constants. */
108 static const size_t MONITOR_N_GLOBAL_STATES = 4;
109 static const size_t FRAGMENT_TABLE_SIZE = 512;
110 static const size_t MAX_NATIVE_STACK_SLOTS = 4096;
111 static const size_t MAX_CALL_STACK_ENTRIES = 500;
112 static const size_t MAX_GLOBAL_SLOTS = 4096;
113 static const size_t GLOBAL_SLOTS_BUFFER_SIZE = MAX_GLOBAL_SLOTS + 1;
115 /* Forward declarations of tracer types. */
116 class VMAllocator;
117 class TraceRecorder;
118 class FrameInfoCache;
119 struct REHashFn;
120 struct REHashKey;
121 struct FrameInfo;
122 struct VMSideExit;
123 struct TreeFragment;
124 struct InterpState;
125 template<typename T> class Queue;
126 typedef Queue<uint16> SlotList;
127 struct REFragment;
128 typedef nanojit::HashMap<REHashKey, REFragment*, REHashFn> REHashMap;
130 #if defined(JS_JIT_SPEW) || defined(DEBUG)
131 struct FragPI;
132 typedef nanojit::HashMap<uint32, FragPI, nanojit::DefaultHash<uint32> > FragStatsMap;
133 #endif
135 /* Holds the execution state during trace execution. */
136 struct InterpState
138 JSContext* cx; // current VM context handle
139 double* stackBase; // native stack base
140 double* sp; // native stack pointer, stack[0] is spbase[0]
141 double* eos; // first unusable word after the native stack / begin of globals
142 FrameInfo** callstackBase; // call stack base
143 void* sor; // start of rp stack
144 FrameInfo** rp; // call stack pointer
145 void* eor; // first unusable word after the call stack
146 VMSideExit* lastTreeExitGuard; // guard we exited on during a tree call
147 VMSideExit* lastTreeCallGuard; // guard we want to grow from if the tree
148 // call exit guard mismatched
149 void* rpAtLastTreeCall; // value of rp at innermost tree call guard
150 VMSideExit* outermostTreeExitGuard; // the last side exit returned by js_CallTree
151 TreeFragment* outermostTree; // the outermost tree we initially invoked
152 uintN* inlineCallCountp; // inline call count counter
153 VMSideExit** innermostNestedGuardp;
154 VMSideExit* innermost;
155 uint64 startTime;
156 InterpState* prev;
158 // Used by _FAIL builtins; see jsbuiltins.h. The builtin sets the
159 // JSBUILTIN_BAILED bit if it bails off trace and the JSBUILTIN_ERROR bit
160 // if an error or exception occurred.
161 uint32 builtinStatus;
163 // Used to communicate the location of the return value in case of a deep bail.
164 double* deepBailSp;
166 // Used when calling natives from trace to root the vp vector.
167 uintN nativeVpLen;
168 jsval* nativeVp;
170 InterpState(JSContext *cx, JSTraceMonitor *tm, TreeFragment *ti,
171 uintN &inlineCallCountp, VMSideExit** innermostNestedGuardp);
172 ~InterpState();
176 * Storage for the execution state and store during trace execution. Generated
177 * code depends on the fact that the globals begin |MAX_NATIVE_STACK_SLOTS|
178 * doubles after the stack begins. Thus, on trace, |InterpState::eos| holds a
179 * pointer to the first global.
181 struct TraceNativeStorage
183 double stack_global_buf[MAX_NATIVE_STACK_SLOTS + GLOBAL_SLOTS_BUFFER_SIZE];
184 FrameInfo *callstack_buf[MAX_CALL_STACK_ENTRIES];
186 double *stack() { return stack_global_buf; }
187 double *global() { return stack_global_buf + MAX_NATIVE_STACK_SLOTS; }
188 FrameInfo **callstack() { return callstack_buf; }
191 /* Holds data to track a single globa. */
192 struct GlobalState {
193 JSObject* globalObj;
194 uint32 globalShape;
195 SlotList* globalSlots;
199 * Trace monitor. Every JSThread (if JS_THREADSAFE) or JSRuntime (if not
200 * JS_THREADSAFE) has an associated trace monitor that keeps track of loop
201 * frequencies for all JavaScript code loaded into that runtime.
203 struct JSTraceMonitor {
205 * The context currently executing JIT-compiled code on this thread, or
206 * NULL if none. Among other things, this can in certain cases prevent
207 * last-ditch GC and suppress calls to JS_ReportOutOfMemory.
209 * !tracecx && !recorder: not on trace
210 * !tracecx && recorder: recording
211 * tracecx && !recorder: executing a trace
212 * tracecx && recorder: executing inner loop, recording outer loop
214 JSContext *tracecx;
217 * Cached storage to use when executing on trace. While we may enter nested
218 * traces, we always reuse the outer trace's storage, so never need more
219 * than of these.
221 TraceNativeStorage storage;
224 * There are 3 allocators here. This might seem like overkill, but they
225 * have different lifecycles, and by keeping them separate we keep the
226 * amount of retained memory down significantly.
228 * The dataAlloc has the lifecycle of the monitor. It's flushed only
229 * when the monitor is flushed.
231 * The traceAlloc has the same flush lifecycle as the dataAlloc, but
232 * it is also *marked* when a recording starts and rewinds to the mark
233 * point if recording aborts. So you can put things in it that are only
234 * reachable on a successful record/compile cycle.
236 * The tempAlloc is flushed after each recording, successful or not.
239 VMAllocator* dataAlloc; /* A chunk allocator for fragments. */
240 VMAllocator* traceAlloc; /* An allocator for trace metadata. */
241 VMAllocator* tempAlloc; /* A temporary chunk allocator. */
242 nanojit::CodeAlloc* codeAlloc; /* An allocator for native code. */
243 nanojit::Assembler* assembler;
244 nanojit::LirBuffer* lirbuf;
245 nanojit::LirBuffer* reLirBuf;
246 FrameInfoCache* frameCache;
247 #ifdef DEBUG
248 nanojit::LabelMap* labels;
249 #endif
251 TraceRecorder* recorder;
253 struct GlobalState globalStates[MONITOR_N_GLOBAL_STATES];
254 struct TreeFragment* vmfragments[FRAGMENT_TABLE_SIZE];
255 JSDHashTable recordAttempts;
258 * Maximum size of the code cache before we start flushing. 1/16 of this
259 * size is used as threshold for the regular expression code cache.
261 uint32 maxCodeCacheBytes;
264 * If nonzero, do not flush the JIT cache after a deep bail. That would
265 * free JITted code pages that we will later return to. Instead, set the
266 * needFlush flag so that it can be flushed later.
268 JSBool needFlush;
271 * reservedObjects is a linked list (via fslots[0]) of preallocated JSObjects.
272 * The JIT uses this to ensure that leaving a trace tree can't fail.
274 JSBool useReservedObjects;
275 JSObject *reservedObjects;
278 * Fragment map for the regular expression compiler.
280 REHashMap* reFragments;
283 * A temporary allocator for RE recording.
285 VMAllocator* reTempAlloc;
287 #ifdef DEBUG
288 /* Fields needed for fragment/guard profiling. */
289 nanojit::Seq<nanojit::Fragment*>* branches;
290 uint32 lastFragID;
292 * profAlloc has a lifetime which spans exactly from js_InitJIT to
293 * js_FinishJIT.
295 VMAllocator* profAlloc;
296 FragStatsMap* profTab;
297 #endif
299 /* Flush the JIT cache. */
300 void flush();
302 /* Mark all objects baked into native code in the code cache. */
303 void mark(JSTracer *trc);
305 bool outOfMemory() const;
308 typedef struct InterpStruct InterpStruct;
311 * N.B. JS_ON_TRACE(cx) is true if JIT code is on the stack in the current
312 * thread, regardless of whether cx is the context in which that trace is
313 * executing. cx must be a context on the current thread.
315 #ifdef JS_TRACER
316 # define JS_ON_TRACE(cx) (JS_TRACE_MONITOR(cx).tracecx != NULL)
317 #else
318 # define JS_ON_TRACE(cx) JS_FALSE
319 #endif
321 #ifdef DEBUG
322 # define JS_EVAL_CACHE_METERING 1
323 # define JS_FUNCTION_METERING 1
324 #endif
326 /* Number of potentially reusable scriptsToGC to search for the eval cache. */
327 #ifndef JS_EVAL_CACHE_SHIFT
328 # define JS_EVAL_CACHE_SHIFT 6
329 #endif
330 #define JS_EVAL_CACHE_SIZE JS_BIT(JS_EVAL_CACHE_SHIFT)
332 #ifdef JS_EVAL_CACHE_METERING
333 # define EVAL_CACHE_METER_LIST(_) _(probe), _(hit), _(step), _(noscope)
334 # define identity(x) x
336 struct JSEvalCacheMeter {
337 uint64 EVAL_CACHE_METER_LIST(identity);
340 # undef identity
341 #endif
343 #ifdef JS_FUNCTION_METERING
344 # define FUNCTION_KIND_METER_LIST(_) \
345 _(allfun), _(heavy), _(nofreeupvar), _(onlyfreevar), \
346 _(display), _(flat), _(setupvar), _(badfunarg)
347 # define identity(x) x
349 struct JSFunctionMeter {
350 int32 FUNCTION_KIND_METER_LIST(identity);
353 # undef identity
354 #endif
356 struct JSLocalRootChunk;
358 #define JSLRS_CHUNK_SHIFT 8
359 #define JSLRS_CHUNK_SIZE JS_BIT(JSLRS_CHUNK_SHIFT)
360 #define JSLRS_CHUNK_MASK JS_BITMASK(JSLRS_CHUNK_SHIFT)
362 struct JSLocalRootChunk {
363 jsval roots[JSLRS_CHUNK_SIZE];
364 JSLocalRootChunk *down;
367 struct JSLocalRootStack {
368 uint32 scopeMark;
369 uint32 rootCount;
370 JSLocalRootChunk *topChunk;
371 JSLocalRootChunk firstChunk;
373 /* See comments in js_NewFinalizableGCThing. */
374 JSGCFreeLists gcFreeLists;
377 const uint32 JSLRS_NULL_MARK = uint32(-1);
379 struct JSThreadData {
380 JSGCFreeLists gcFreeLists;
383 * Flag indicating that we are waiving any soft limits on the GC heap
384 * because we want allocations to be infallible (except when we hit
385 * a hard quota).
387 bool waiveGCQuota;
390 * The GSN cache is per thread since even multi-cx-per-thread embeddings
391 * do not interleave js_GetSrcNote calls.
393 JSGSNCache gsnCache;
395 /* Property cache for faster call/get/set invocation. */
396 JSPropertyCache propertyCache;
398 /* Random number generator state, used by jsmath.cpp. */
399 int64 rngSeed;
401 /* Optional stack of heap-allocated scoped local GC roots. */
402 JSLocalRootStack *localRootStack;
404 #ifdef JS_TRACER
405 /* Trace-tree JIT recorder/interpreter state. */
406 JSTraceMonitor traceMonitor;
407 #endif
409 /* Lock-free hashed lists of scripts created by eval to garbage-collect. */
410 JSScript *scriptsToGC[JS_EVAL_CACHE_SIZE];
412 #ifdef JS_EVAL_CACHE_METERING
413 JSEvalCacheMeter evalCacheMeter;
414 #endif
417 * Cache of reusable JSNativeEnumerators mapped by shape identifiers (as
418 * stored in scope->shape). This cache is nulled by the GC and protected
419 * by gcLock.
421 #define NATIVE_ENUM_CACHE_LOG2 8
422 #define NATIVE_ENUM_CACHE_MASK JS_BITMASK(NATIVE_ENUM_CACHE_LOG2)
423 #define NATIVE_ENUM_CACHE_SIZE JS_BIT(NATIVE_ENUM_CACHE_LOG2)
425 #define NATIVE_ENUM_CACHE_HASH(shape) \
426 ((((shape) >> NATIVE_ENUM_CACHE_LOG2) ^ (shape)) & NATIVE_ENUM_CACHE_MASK)
428 jsuword nativeEnumCache[NATIVE_ENUM_CACHE_SIZE];
430 void init();
431 void finish();
432 void mark(JSTracer *trc);
433 void purge(JSContext *cx);
434 void purgeGCFreeLists();
437 #ifdef JS_THREADSAFE
440 * Structure uniquely representing a thread. It holds thread-private data
441 * that can be accessed without a global lock.
443 struct JSThread {
444 /* Linked list of all contexts in use on this thread. */
445 JSCList contextList;
447 /* Opaque thread-id, from NSPR's PR_GetCurrentThread(). */
448 jsword id;
450 /* Indicates that the thread is waiting in ClaimTitle from jslock.cpp. */
451 JSTitle *titleToShare;
454 * Thread-local version of JSRuntime.gcMallocBytes to avoid taking
455 * locks on each JS_malloc.
457 ptrdiff_t gcThreadMallocBytes;
460 * Deallocator task for this thread.
462 JSFreePointerListTask *deallocatorTask;
464 /* Factored out of JSThread for !JS_THREADSAFE embedding in JSRuntime. */
465 JSThreadData data;
469 * Only when JSThread::gcThreadMallocBytes exhausts the following limit we
470 * update JSRuntime::gcMallocBytes.
473 const size_t JS_GC_THREAD_MALLOC_LIMIT = 1 << 19;
475 #define JS_THREAD_DATA(cx) (&(cx)->thread->data)
477 struct JSThreadsHashEntry {
478 JSDHashEntryHdr base;
479 JSThread *thread;
482 extern JSThread *
483 js_CurrentThread(JSRuntime *rt);
486 * The function takes the GC lock and does not release in successful return.
487 * On error (out of memory) the function releases the lock but delegates
488 * the error reporting to the caller.
490 extern JSBool
491 js_InitContextThread(JSContext *cx);
494 * On entrance the GC lock must be held and it will be held on exit.
496 extern void
497 js_ClearContextThread(JSContext *cx);
499 #endif /* JS_THREADSAFE */
501 typedef enum JSDestroyContextMode {
502 JSDCM_NO_GC,
503 JSDCM_MAYBE_GC,
504 JSDCM_FORCE_GC,
505 JSDCM_NEW_FAILED
506 } JSDestroyContextMode;
508 typedef enum JSRuntimeState {
509 JSRTS_DOWN,
510 JSRTS_LAUNCHING,
511 JSRTS_UP,
512 JSRTS_LANDING
513 } JSRuntimeState;
515 typedef enum JSBuiltinFunctionId {
516 JSBUILTIN_ObjectToIterator,
517 JSBUILTIN_CallIteratorNext,
518 JSBUILTIN_LIMIT
519 } JSBuiltinFunctionId;
521 typedef struct JSPropertyTreeEntry {
522 JSDHashEntryHdr hdr;
523 JSScopeProperty *child;
524 } JSPropertyTreeEntry;
526 typedef struct JSSetSlotRequest JSSetSlotRequest;
528 struct JSSetSlotRequest {
529 JSObject *obj; /* object containing slot to set */
530 JSObject *pobj; /* new proto or parent reference */
531 uint16 slot; /* which to set, proto or parent */
532 JSPackedBool cycle; /* true if a cycle was detected */
533 JSSetSlotRequest *next; /* next request in GC worklist */
536 struct JSRuntime {
537 /* Runtime state, synchronized by the stateChange/gcLock condvar/lock. */
538 JSRuntimeState state;
540 /* Context create/destroy callback. */
541 JSContextCallback cxCallback;
544 * Shape regenerated whenever a prototype implicated by an "add property"
545 * property cache fill and induced trace guard has a readonly property or a
546 * setter defined on it. This number proxies for the shapes of all objects
547 * along the prototype chain of all objects in the runtime on which such an
548 * add-property result has been cached/traced.
550 * See bug 492355 for more details.
552 * This comes early in JSRuntime to minimize the immediate format used by
553 * trace-JITted code that reads it.
555 uint32 protoHazardShape;
557 /* Garbage collector state, used by jsgc.c. */
558 JSGCChunkInfo *gcChunkList;
559 JSGCArenaList gcArenaList[FINALIZE_LIMIT];
560 JSGCDoubleArenaList gcDoubleArenaList;
561 JSDHashTable gcRootsHash;
562 JSDHashTable *gcLocksHash;
563 jsrefcount gcKeepAtoms;
564 size_t gcBytes;
565 size_t gcLastBytes;
566 size_t gcMaxBytes;
567 size_t gcMaxMallocBytes;
568 uint32 gcEmptyArenaPoolLifespan;
569 uint32 gcLevel;
570 uint32 gcNumber;
571 JSTracer *gcMarkingTracer;
572 uint32 gcTriggerFactor;
573 size_t gcTriggerBytes;
574 volatile JSBool gcIsNeeded;
575 volatile JSBool gcFlushCodeCaches;
578 * NB: do not pack another flag here by claiming gcPadding unless the new
579 * flag is written only by the GC thread. Atomic updates to packed bytes
580 * are not guaranteed, so stores issued by one thread may be lost due to
581 * unsynchronized read-modify-write cycles on other threads.
583 JSPackedBool gcPoke;
584 JSPackedBool gcRunning;
585 JSPackedBool gcRegenShapes;
588 * During gc, if rt->gcRegenShapes &&
589 * (scope->flags & JSScope::SHAPE_REGEN) == rt->gcRegenShapesScopeFlag,
590 * then the scope's shape has already been regenerated during this GC.
591 * To avoid having to sweep JSScopes, the bit's meaning toggles with each
592 * shape-regenerating GC.
594 * FIXME Once scopes are GC'd (bug 505004), this will be obsolete.
596 uint8 gcRegenShapesScopeFlag;
598 #ifdef JS_GC_ZEAL
599 jsrefcount gcZeal;
600 #endif
602 JSGCCallback gcCallback;
605 * Malloc counter to measure memory pressure for GC scheduling. It runs
606 * from gcMaxMallocBytes down to zero.
608 ptrdiff_t gcMallocBytes;
611 * Stack of GC arenas containing things that the GC marked, where children
612 * reached from those things have not yet been marked. This helps avoid
613 * using too much native stack during recursive GC marking.
615 JSGCArenaInfo *gcUntracedArenaStackTop;
616 #ifdef DEBUG
617 size_t gcTraceLaterCount;
618 #endif
621 * Table for tracking iterators to ensure that we close iterator's state
622 * before finalizing the iterable object.
624 js::Vector<JSObject*, 0, js::SystemAllocPolicy> gcIteratorTable;
627 * The trace operation and its data argument to trace embedding-specific
628 * GC roots.
630 JSTraceDataOp gcExtraRootsTraceOp;
631 void *gcExtraRootsData;
634 * Used to serialize cycle checks when setting __proto__ or __parent__ by
635 * requesting the GC handle the required cycle detection. If the GC hasn't
636 * been poked, it won't scan for garbage. This member is protected by
637 * rt->gcLock.
639 JSSetSlotRequest *setSlotRequests;
641 /* Well-known numbers held for use by this runtime's contexts. */
642 jsval NaNValue;
643 jsval negativeInfinityValue;
644 jsval positiveInfinityValue;
646 #ifdef JS_THREADSAFE
647 JSLock *deflatedStringCacheLock;
648 #endif
649 JSHashTable *deflatedStringCache;
650 #ifdef DEBUG
651 uint32 deflatedStringCacheBytes;
652 #endif
654 JSString *emptyString;
657 * Builtin functions, lazily created and held for use by the trace recorder.
659 * This field would be #ifdef JS_TRACER, but XPConnect is compiled without
660 * -DJS_TRACER and includes this header.
662 JSObject *builtinFunctions[JSBUILTIN_LIMIT];
664 /* List of active contexts sharing this runtime; protected by gcLock. */
665 JSCList contextList;
667 /* Per runtime debug hooks -- see jsprvtd.h and jsdbgapi.h. */
668 JSDebugHooks globalDebugHooks;
670 #ifdef JS_TRACER
671 /* True if any debug hooks not supported by the JIT are enabled. */
672 bool debuggerInhibitsJIT() const {
673 return (globalDebugHooks.interruptHandler ||
674 globalDebugHooks.callHook ||
675 globalDebugHooks.objectHook);
677 #endif
679 /* More debugging state, see jsdbgapi.c. */
680 JSCList trapList;
681 JSCList watchPointList;
683 /* Client opaque pointers */
684 void *data;
686 #ifdef JS_THREADSAFE
687 /* These combine to interlock the GC and new requests. */
688 PRLock *gcLock;
689 PRCondVar *gcDone;
690 PRCondVar *requestDone;
691 uint32 requestCount;
692 JSThread *gcThread;
694 /* Lock and owning thread pointer for JS_LOCK_RUNTIME. */
695 PRLock *rtLock;
696 #ifdef DEBUG
697 jsword rtLockOwner;
698 #endif
700 /* Used to synchronize down/up state change; protected by gcLock. */
701 PRCondVar *stateChange;
704 * State for sharing single-threaded titles, once a second thread tries to
705 * lock a title. The titleSharingDone condvar is protected by rt->gcLock
706 * to minimize number of locks taken in JS_EndRequest.
708 * The titleSharingTodo linked list is likewise "global" per runtime, not
709 * one-list-per-context, to conserve space over all contexts, optimizing
710 * for the likely case that titles become shared rarely, and among a very
711 * small set of threads (contexts).
713 PRCondVar *titleSharingDone;
714 JSTitle *titleSharingTodo;
717 * Magic terminator for the rt->titleSharingTodo linked list, threaded through
718 * title->u.link. This hack allows us to test whether a title is on the list
719 * by asking whether title->u.link is non-null. We use a large, likely bogus
720 * pointer here to distinguish this value from any valid u.count (small int)
721 * value.
723 #define NO_TITLE_SHARING_TODO ((JSTitle *) 0xfeedbeef)
726 * Lock serializing trapList and watchPointList accesses, and count of all
727 * mutations to trapList and watchPointList made by debugger threads. To
728 * keep the code simple, we define debuggerMutations for the thread-unsafe
729 * case too.
731 PRLock *debuggerLock;
733 JSDHashTable threads;
734 #endif /* JS_THREADSAFE */
735 uint32 debuggerMutations;
738 * Security callbacks set on the runtime are used by each context unless
739 * an override is set on the context.
741 JSSecurityCallbacks *securityCallbacks;
744 * Shared scope property tree, and arena-pool for allocating its nodes.
745 * The propertyRemovals counter is incremented for every JSScope::clear,
746 * and for each JSScope::remove method call that frees a slot in an object.
747 * See js_NativeGet and js_NativeSet in jsobj.c.
749 JSDHashTable propertyTreeHash;
750 JSScopeProperty *propertyFreeList;
751 JSArenaPool propertyArenaPool;
752 int32 propertyRemovals;
754 /* Script filename table. */
755 struct JSHashTable *scriptFilenameTable;
756 JSCList scriptFilenamePrefixes;
757 #ifdef JS_THREADSAFE
758 PRLock *scriptFilenameTableLock;
759 #endif
761 /* Number localization, used by jsnum.c */
762 const char *thousandsSeparator;
763 const char *decimalSeparator;
764 const char *numGrouping;
767 * Weak references to lazily-created, well-known XML singletons.
769 * NB: Singleton objects must be carefully disconnected from the rest of
770 * the object graph usually associated with a JSContext's global object,
771 * including the set of standard class objects. See jsxml.c for details.
773 JSObject *anynameObject;
774 JSObject *functionNamespaceObject;
776 #ifndef JS_THREADSAFE
777 JSThreadData threadData;
779 #define JS_THREAD_DATA(cx) (&(cx)->runtime->threadData)
780 #endif
783 * Object shape (property cache structural type) identifier generator.
785 * Type 0 stands for the empty scope, and must not be regenerated due to
786 * uint32 wrap-around. Since js_GenerateShape (in jsinterp.cpp) uses
787 * atomic pre-increment, the initial value for the first typed non-empty
788 * scope will be 1.
790 * If this counter overflows into SHAPE_OVERFLOW_BIT (in jsinterp.h), the
791 * cache is disabled, to avoid aliasing two different types. It stays
792 * disabled until a triggered GC at some later moment compresses live
793 * types, minimizing rt->shapeGen in the process.
795 volatile uint32 shapeGen;
797 /* Literal table maintained by jsatom.c functions. */
798 JSAtomState atomState;
800 #ifdef JS_THREADSAFE
801 JSBackgroundThread *deallocatorThread;
802 #endif
805 * Various metering fields are defined at the end of JSRuntime. In this
806 * way there is no need to recompile all the code that refers to other
807 * fields of JSRuntime after enabling the corresponding metering macro.
809 #ifdef JS_DUMP_ENUM_CACHE_STATS
810 int32 nativeEnumProbes;
811 int32 nativeEnumMisses;
812 # define ENUM_CACHE_METER(name) JS_ATOMIC_INCREMENT(&cx->runtime->name)
813 #else
814 # define ENUM_CACHE_METER(name) ((void) 0)
815 #endif
817 #ifdef JS_DUMP_LOOP_STATS
818 /* Loop statistics, to trigger trace recording and compiling. */
819 JSBasicStats loopStats;
820 #endif
822 #ifdef DEBUG
823 /* Function invocation metering. */
824 jsrefcount inlineCalls;
825 jsrefcount nativeCalls;
826 jsrefcount nonInlineCalls;
827 jsrefcount constructs;
829 /* Title lock and scope property metering. */
830 jsrefcount claimAttempts;
831 jsrefcount claimedTitles;
832 jsrefcount deadContexts;
833 jsrefcount deadlocksAvoided;
834 jsrefcount liveScopes;
835 jsrefcount sharedTitles;
836 jsrefcount totalScopes;
837 jsrefcount liveScopeProps;
838 jsrefcount liveScopePropsPreSweep;
839 jsrefcount totalScopeProps;
840 jsrefcount livePropTreeNodes;
841 jsrefcount duplicatePropTreeNodes;
842 jsrefcount totalPropTreeNodes;
843 jsrefcount propTreeKidsChunks;
845 /* String instrumentation. */
846 jsrefcount liveStrings;
847 jsrefcount totalStrings;
848 jsrefcount liveDependentStrings;
849 jsrefcount totalDependentStrings;
850 jsrefcount badUndependStrings;
851 double lengthSum;
852 double lengthSquaredSum;
853 double strdepLengthSum;
854 double strdepLengthSquaredSum;
856 /* Script instrumentation. */
857 jsrefcount liveScripts;
858 jsrefcount totalScripts;
859 jsrefcount liveEmptyScripts;
860 jsrefcount totalEmptyScripts;
861 #endif /* DEBUG */
863 #ifdef JS_SCOPE_DEPTH_METER
865 * Stats on runtime prototype chain lookups and scope chain depths, i.e.,
866 * counts of objects traversed on a chain until the wanted id is found.
868 JSBasicStats protoLookupDepthStats;
869 JSBasicStats scopeSearchDepthStats;
872 * Stats on compile-time host environment and lexical scope chain lengths
873 * (maximum depths).
875 JSBasicStats hostenvScopeDepthStats;
876 JSBasicStats lexicalScopeDepthStats;
877 #endif
879 #ifdef JS_GCMETER
880 JSGCStats gcStats;
881 #endif
883 #ifdef JS_FUNCTION_METERING
884 JSFunctionMeter functionMeter;
885 char lastScriptFilename[1024];
886 #endif
888 JSRuntime();
889 ~JSRuntime();
891 bool init(uint32 maxbytes);
893 void setGCTriggerFactor(uint32 factor);
894 void setGCLastBytes(size_t lastBytes);
896 void* malloc(size_t bytes) { return ::js_malloc(bytes); }
898 void* calloc(size_t bytes) { return ::js_calloc(bytes); }
900 void* realloc(void* p, size_t bytes) { return ::js_realloc(p, bytes); }
902 void free(void* p) { ::js_free(p); }
904 bool isGCMallocLimitReached() const { return gcMallocBytes <= 0; }
906 void resetGCMallocBytes() { gcMallocBytes = ptrdiff_t(gcMaxMallocBytes); }
908 void setGCMaxMallocBytes(size_t value) {
910 * For compatibility treat any value that exceeds PTRDIFF_T_MAX to
911 * mean that value.
913 gcMaxMallocBytes = (ptrdiff_t(value) >= 0) ? value : size_t(-1) >> 1;
914 resetGCMallocBytes();
918 /* Common macros to access thread-local caches in JSThread or JSRuntime. */
919 #define JS_GSN_CACHE(cx) (JS_THREAD_DATA(cx)->gsnCache)
920 #define JS_PROPERTY_CACHE(cx) (JS_THREAD_DATA(cx)->propertyCache)
921 #define JS_TRACE_MONITOR(cx) (JS_THREAD_DATA(cx)->traceMonitor)
922 #define JS_SCRIPTS_TO_GC(cx) (JS_THREAD_DATA(cx)->scriptsToGC)
924 #ifdef JS_EVAL_CACHE_METERING
925 # define EVAL_CACHE_METER(x) (JS_THREAD_DATA(cx)->evalCacheMeter.x++)
926 #else
927 # define EVAL_CACHE_METER(x) ((void) 0)
928 #endif
930 #ifdef DEBUG
931 # define JS_RUNTIME_METER(rt, which) JS_ATOMIC_INCREMENT(&(rt)->which)
932 # define JS_RUNTIME_UNMETER(rt, which) JS_ATOMIC_DECREMENT(&(rt)->which)
933 #else
934 # define JS_RUNTIME_METER(rt, which) /* nothing */
935 # define JS_RUNTIME_UNMETER(rt, which) /* nothing */
936 #endif
938 #define JS_KEEP_ATOMS(rt) JS_ATOMIC_INCREMENT(&(rt)->gcKeepAtoms);
939 #define JS_UNKEEP_ATOMS(rt) JS_ATOMIC_DECREMENT(&(rt)->gcKeepAtoms);
941 #ifdef JS_ARGUMENT_FORMATTER_DEFINED
943 * Linked list mapping format strings for JS_{Convert,Push}Arguments{,VA} to
944 * formatter functions. Elements are sorted in non-increasing format string
945 * length order.
947 struct JSArgumentFormatMap {
948 const char *format;
949 size_t length;
950 JSArgumentFormatter formatter;
951 JSArgumentFormatMap *next;
953 #endif
955 struct JSStackHeader {
956 uintN nslots;
957 JSStackHeader *down;
960 #define JS_STACK_SEGMENT(sh) ((jsval *)(sh) + 2)
963 * Key and entry types for the JSContext.resolvingTable hash table, typedef'd
964 * here because all consumers need to see these declarations (and not just the
965 * typedef names, as would be the case for an opaque pointer-to-typedef'd-type
966 * declaration), along with cx->resolvingTable.
968 typedef struct JSResolvingKey {
969 JSObject *obj;
970 jsid id;
971 } JSResolvingKey;
973 typedef struct JSResolvingEntry {
974 JSDHashEntryHdr hdr;
975 JSResolvingKey key;
976 uint32 flags;
977 } JSResolvingEntry;
979 #define JSRESFLAG_LOOKUP 0x1 /* resolving id from lookup */
980 #define JSRESFLAG_WATCH 0x2 /* resolving id from watch */
983 * Macros to push/pop JSTempValueRooter instances to context-linked stack of
984 * temporary GC roots. If you need to protect a result value that flows out of
985 * a C function across several layers of other functions, use the
986 * js_LeaveLocalRootScopeWithResult internal API (see further below) instead.
988 * The macros also provide a simple way to get a single rooted pointer via
989 * JS_PUSH_TEMP_ROOT_<KIND>(cx, NULL, &tvr). Then &tvr.u.<kind> gives the
990 * necessary pointer.
992 * JSTempValueRooter.count defines the type of the rooted value referenced by
993 * JSTempValueRooter.u union of type JSTempValueUnion. When count is positive
994 * or zero, u.array points to a vector of jsvals. Otherwise it must be one of
995 * the following constants:
997 #define JSTVU_SINGLE (-1) /* u.value or u.<gcthing> is single jsval
998 or non-JSString GC-thing pointer */
999 #define JSTVU_TRACE (-2) /* u.trace is a hook to trace a custom
1000 * structure */
1001 #define JSTVU_SPROP (-3) /* u.sprop roots property tree node */
1002 #define JSTVU_WEAK_ROOTS (-4) /* u.weakRoots points to saved weak roots */
1003 #define JSTVU_COMPILER (-5) /* u.compiler roots JSCompiler* */
1004 #define JSTVU_SCRIPT (-6) /* u.script roots JSScript* */
1005 #define JSTVU_ENUMERATOR (-7) /* a pointer to JSTempValueRooter points
1006 to an instance of JSAutoEnumStateRooter
1007 with u.object storing the enumeration
1008 object */
1011 * Here single JSTVU_SINGLE covers both jsval and pointers to almost (see note
1012 * below) any GC-thing via reinterpreting the thing as JSVAL_OBJECT. This works
1013 * because the GC-thing is aligned on a 0 mod 8 boundary, and object has the 0
1014 * jsval tag. So any GC-heap-allocated thing pointer may be tagged as if it
1015 * were an object and untagged, if it's then used only as an opaque pointer
1016 * until discriminated by other means than tag bits. This is how, for example,
1017 * js_GetGCThingTraceKind uses its |thing| parameter -- it consults GC-thing
1018 * flags stored separately from the thing to decide the kind of thing.
1020 * Note well that JSStrings may be statically allocated (see the intStringTable
1021 * and unitStringTable static arrays), so this hack does not work for arbitrary
1022 * GC-thing pointers.
1024 #define JS_PUSH_TEMP_ROOT_COMMON(cx,x,tvr,cnt,kind) \
1025 JS_BEGIN_MACRO \
1026 JS_ASSERT((cx)->tempValueRooters != (tvr)); \
1027 (tvr)->count = (cnt); \
1028 (tvr)->u.kind = (x); \
1029 (tvr)->down = (cx)->tempValueRooters; \
1030 (cx)->tempValueRooters = (tvr); \
1031 JS_END_MACRO
1033 #define JS_POP_TEMP_ROOT(cx,tvr) \
1034 JS_BEGIN_MACRO \
1035 JS_ASSERT((cx)->tempValueRooters == (tvr)); \
1036 (cx)->tempValueRooters = (tvr)->down; \
1037 JS_END_MACRO
1039 #define JS_PUSH_TEMP_ROOT(cx,cnt,arr,tvr) \
1040 JS_BEGIN_MACRO \
1041 JS_ASSERT((int)(cnt) >= 0); \
1042 JS_PUSH_TEMP_ROOT_COMMON(cx, arr, tvr, (ptrdiff_t) (cnt), array); \
1043 JS_END_MACRO
1045 #define JS_PUSH_SINGLE_TEMP_ROOT(cx,val,tvr) \
1046 JS_PUSH_TEMP_ROOT_COMMON(cx, val, tvr, JSTVU_SINGLE, value)
1048 #define JS_PUSH_TEMP_ROOT_OBJECT(cx,obj,tvr) \
1049 JS_PUSH_TEMP_ROOT_COMMON(cx, obj, tvr, JSTVU_SINGLE, object)
1051 #define JS_PUSH_TEMP_ROOT_STRING(cx,str,tvr) \
1052 JS_PUSH_SINGLE_TEMP_ROOT(cx, str ? STRING_TO_JSVAL(str) : JSVAL_NULL, tvr)
1054 #define JS_PUSH_TEMP_ROOT_XML(cx,xml_,tvr) \
1055 JS_PUSH_TEMP_ROOT_COMMON(cx, xml_, tvr, JSTVU_SINGLE, xml)
1057 #define JS_PUSH_TEMP_ROOT_TRACE(cx,trace_,tvr) \
1058 JS_PUSH_TEMP_ROOT_COMMON(cx, trace_, tvr, JSTVU_TRACE, trace)
1060 #define JS_PUSH_TEMP_ROOT_SPROP(cx,sprop_,tvr) \
1061 JS_PUSH_TEMP_ROOT_COMMON(cx, sprop_, tvr, JSTVU_SPROP, sprop)
1063 #define JS_PUSH_TEMP_ROOT_WEAK_COPY(cx,weakRoots_,tvr) \
1064 JS_PUSH_TEMP_ROOT_COMMON(cx, weakRoots_, tvr, JSTVU_WEAK_ROOTS, weakRoots)
1066 #define JS_PUSH_TEMP_ROOT_COMPILER(cx,pc,tvr) \
1067 JS_PUSH_TEMP_ROOT_COMMON(cx, pc, tvr, JSTVU_COMPILER, compiler)
1069 #define JS_PUSH_TEMP_ROOT_SCRIPT(cx,script_,tvr) \
1070 JS_PUSH_TEMP_ROOT_COMMON(cx, script_, tvr, JSTVU_SCRIPT, script)
1072 #define JSRESOLVE_INFER 0xffff /* infer bits from current bytecode */
1074 struct JSContext {
1076 * If this flag is set, we were asked to call back the operation callback
1077 * as soon as possible.
1079 volatile jsint operationCallbackFlag;
1081 /* JSRuntime contextList linkage. */
1082 JSCList link;
1084 #if JS_HAS_XML_SUPPORT
1086 * Bit-set formed from binary exponentials of the XML_* tiny-ids defined
1087 * for boolean settings in jsxml.c, plus an XSF_CACHE_VALID bit. Together
1088 * these act as a cache of the boolean XML.ignore* and XML.prettyPrinting
1089 * property values associated with this context's global object.
1091 uint8 xmlSettingFlags;
1092 uint8 padding;
1093 #else
1094 uint16 padding;
1095 #endif
1098 * Classic Algol "display" static link optimization.
1100 #define JS_DISPLAY_SIZE 16U
1102 JSStackFrame *display[JS_DISPLAY_SIZE];
1104 /* Runtime version control identifier. */
1105 uint16 version;
1107 /* Per-context options. */
1108 uint32 options; /* see jsapi.h for JSOPTION_* */
1110 /* Locale specific callbacks for string conversion. */
1111 JSLocaleCallbacks *localeCallbacks;
1114 * cx->resolvingTable is non-null and non-empty if we are initializing
1115 * standard classes lazily, or if we are otherwise recursing indirectly
1116 * from js_LookupProperty through a JSClass.resolve hook. It is used to
1117 * limit runaway recursion (see jsapi.c and jsobj.c).
1119 JSDHashTable *resolvingTable;
1122 * True if generating an error, to prevent runaway recursion.
1123 * NB: generatingError packs with insideGCMarkCallback and throwing below.
1125 JSPackedBool generatingError;
1127 /* Flag to indicate that we run inside gcCallback(cx, JSGC_MARK_END). */
1128 JSPackedBool insideGCMarkCallback;
1130 /* Exception state -- the exception member is a GC root by definition. */
1131 JSPackedBool throwing; /* is there a pending exception? */
1132 jsval exception; /* most-recently-thrown exception */
1134 /* Limit pointer for checking native stack consumption during recursion. */
1135 jsuword stackLimit;
1137 /* Quota on the size of arenas used to compile and execute scripts. */
1138 size_t scriptStackQuota;
1140 /* Data shared by threads in an address space. */
1141 JSRuntime * const runtime;
1143 explicit JSContext(JSRuntime *rt) : runtime(rt) {}
1145 /* Stack arena pool and frame pointer register. */
1146 JS_REQUIRES_STACK
1147 JSArenaPool stackPool;
1149 JS_REQUIRES_STACK
1150 JSStackFrame *fp;
1152 /* Temporary arena pool used while compiling and decompiling. */
1153 JSArenaPool tempPool;
1155 /* Top-level object and pointer to top stack frame's scope chain. */
1156 JSObject *globalObject;
1158 /* Storage to root recently allocated GC things and script result. */
1159 JSWeakRoots weakRoots;
1161 /* Regular expression class statics (XXX not shared globally). */
1162 JSRegExpStatics regExpStatics;
1164 /* State for object and array toSource conversion. */
1165 JSSharpObjectMap sharpObjectMap;
1166 JSHashTable *busyArrayTable;
1168 /* Argument formatter support for JS_{Convert,Push}Arguments{,VA}. */
1169 JSArgumentFormatMap *argumentFormatMap;
1171 /* Last message string and trace file for debugging. */
1172 char *lastMessage;
1173 #ifdef DEBUG
1174 void *tracefp;
1175 jsbytecode *tracePrevPc;
1176 #endif
1178 /* Per-context optional error reporter. */
1179 JSErrorReporter errorReporter;
1181 /* Branch callback. */
1182 JSOperationCallback operationCallback;
1184 /* Interpreter activation count. */
1185 uintN interpLevel;
1187 /* Client opaque pointers. */
1188 void *data;
1189 void *data2;
1191 /* GC and thread-safe state. */
1192 JSStackFrame *dormantFrameChain; /* dormant stack frame to scan */
1193 #ifdef JS_THREADSAFE
1194 JSThread *thread;
1195 jsrefcount requestDepth;
1196 /* Same as requestDepth but ignoring JS_SuspendRequest/JS_ResumeRequest */
1197 jsrefcount outstandingRequests;
1198 JSTitle *lockedSealedTitle; /* weak ref, for low-cost sealed
1199 title locking */
1200 JSCList threadLinks; /* JSThread contextList linkage */
1202 #define CX_FROM_THREAD_LINKS(tl) \
1203 ((JSContext *)((char *)(tl) - offsetof(JSContext, threadLinks)))
1204 #endif
1206 /* PDL of stack headers describing stack slots not rooted by argv, etc. */
1207 JSStackHeader *stackHeaders;
1209 /* Stack of thread-stack-allocated temporary GC roots. */
1210 JSTempValueRooter *tempValueRooters;
1212 /* Debug hooks associated with the current context. */
1213 const JSDebugHooks *debugHooks;
1215 /* Security callbacks that override any defined on the runtime. */
1216 JSSecurityCallbacks *securityCallbacks;
1218 /* Pinned regexp pool used for regular expressions. */
1219 JSArenaPool regexpPool;
1221 /* Stored here to avoid passing it around as a parameter. */
1222 uintN resolveFlags;
1224 #ifdef JS_TRACER
1226 * State for the current tree execution. bailExit is valid if the tree has
1227 * called back into native code via a _FAIL builtin and has not yet bailed,
1228 * else garbage (NULL in debug builds).
1230 InterpState *interpState;
1231 VMSideExit *bailExit;
1234 * True if traces may be executed. Invariant: The value of jitEnabled is
1235 * always equal to the expression in updateJITEnabled below.
1237 * This flag and the fields accessed by updateJITEnabled are written only
1238 * in runtime->gcLock, to avoid race conditions that would leave the wrong
1239 * value in jitEnabled. (But the interpreter reads this without
1240 * locking. That can race against another thread setting debug hooks, but
1241 * we always read cx->debugHooks without locking anyway.)
1243 bool jitEnabled;
1244 #endif
1246 /* Caller must be holding runtime->gcLock. */
1247 void updateJITEnabled() {
1248 #ifdef JS_TRACER
1249 jitEnabled = ((options & JSOPTION_JIT) &&
1250 !runtime->debuggerInhibitsJIT() &&
1251 debugHooks == &runtime->globalDebugHooks);
1252 #endif
1255 #ifdef JS_THREADSAFE
1256 inline void createDeallocatorTask() {
1257 JS_ASSERT(!thread->deallocatorTask);
1258 if (runtime->deallocatorThread && !runtime->deallocatorThread->busy())
1259 thread->deallocatorTask = new JSFreePointerListTask();
1262 inline void submitDeallocatorTask() {
1263 if (thread->deallocatorTask) {
1264 runtime->deallocatorThread->schedule(thread->deallocatorTask);
1265 thread->deallocatorTask = NULL;
1268 #endif
1270 ptrdiff_t &getMallocCounter() {
1271 #ifdef JS_THREADSAFE
1272 return thread->gcThreadMallocBytes;
1273 #else
1274 return runtime->gcMallocBytes;
1275 #endif
1279 * Call this after allocating memory held by GC things, to update memory
1280 * pressure counters or report the OOM error if necessary.
1282 inline void updateMallocCounter(void *p, size_t nbytes) {
1283 JS_ASSERT(ptrdiff_t(nbytes) >= 0);
1284 ptrdiff_t &counter = getMallocCounter();
1285 counter -= ptrdiff_t(nbytes);
1286 if (!p || counter <= 0)
1287 checkMallocGCPressure(p);
1291 * Call this after successfully allocating memory held by GC things, to
1292 * update memory pressure counters.
1294 inline void updateMallocCounter(size_t nbytes) {
1295 JS_ASSERT(ptrdiff_t(nbytes) >= 0);
1296 ptrdiff_t &counter = getMallocCounter();
1297 counter -= ptrdiff_t(nbytes);
1298 if (counter <= 0) {
1300 * Use 1 as an arbitrary non-null pointer indicating successful
1301 * allocation.
1303 checkMallocGCPressure(reinterpret_cast<void *>(jsuword(1)));
1307 inline void* malloc(size_t bytes) {
1308 JS_ASSERT(bytes != 0);
1309 void *p = runtime->malloc(bytes);
1310 updateMallocCounter(p, bytes);
1311 return p;
1314 inline void* mallocNoReport(size_t bytes) {
1315 JS_ASSERT(bytes != 0);
1316 void *p = runtime->malloc(bytes);
1317 if (!p)
1318 return NULL;
1319 updateMallocCounter(bytes);
1320 return p;
1323 inline void* calloc(size_t bytes) {
1324 JS_ASSERT(bytes != 0);
1325 void *p = runtime->calloc(bytes);
1326 updateMallocCounter(p, bytes);
1327 return p;
1330 inline void* realloc(void* p, size_t bytes) {
1331 void *orig = p;
1332 p = runtime->realloc(p, bytes);
1335 * For compatibility we do not account for realloc that increases
1336 * previously allocated memory.
1338 updateMallocCounter(p, orig ? 0 : bytes);
1339 return p;
1342 #ifdef JS_THREADSAFE
1343 inline void free(void* p) {
1344 if (!p)
1345 return;
1346 if (thread) {
1347 JSFreePointerListTask* task = thread->deallocatorTask;
1348 if (task) {
1349 task->add(p);
1350 return;
1353 runtime->free(p);
1355 #else
1356 inline void free(void* p) {
1357 if (!p)
1358 return;
1359 runtime->free(p);
1361 #endif
1364 * In the common case that we'd like to allocate the memory for an object
1365 * with cx->malloc/free, we cannot use overloaded C++ operators (no
1366 * placement delete). Factor the common workaround into one place.
1368 #define CREATE_BODY(parms) \
1369 void *memory = this->malloc(sizeof(T)); \
1370 if (!memory) \
1371 return NULL; \
1372 return new(memory) T parms;
1374 template <class T>
1375 JS_ALWAYS_INLINE T *create() {
1376 CREATE_BODY(())
1379 template <class T, class P1>
1380 JS_ALWAYS_INLINE T *create(const P1 &p1) {
1381 CREATE_BODY((p1))
1384 template <class T, class P1, class P2>
1385 JS_ALWAYS_INLINE T *create(const P1 &p1, const P2 &p2) {
1386 CREATE_BODY((p1, p2))
1389 template <class T, class P1, class P2, class P3>
1390 JS_ALWAYS_INLINE T *create(const P1 &p1, const P2 &p2, const P3 &p3) {
1391 CREATE_BODY((p1, p2, p3))
1393 #undef CREATE_BODY
1395 template <class T>
1396 JS_ALWAYS_INLINE void destroy(T *p) {
1397 p->~T();
1398 this->free(p);
1401 private:
1404 * The allocation code calls the function to indicate either OOM failure
1405 * when p is null or that a memory pressure counter has reached some
1406 * threshold when p is not null. The function takes the pointer and not
1407 * a boolean flag to minimize the amount of code in its inlined callers.
1409 void checkMallocGCPressure(void *p);
1412 #ifdef JS_THREADSAFE
1413 # define JS_THREAD_ID(cx) ((cx)->thread ? (cx)->thread->id : 0)
1414 #endif
1416 #ifdef __cplusplus
1418 static inline JSAtom **
1419 FrameAtomBase(JSContext *cx, JSStackFrame *fp)
1421 return fp->imacpc
1422 ? COMMON_ATOMS_START(&cx->runtime->atomState)
1423 : fp->script->atomMap.vector;
1426 /* FIXME(bug 332648): Move this into a public header. */
1427 class JSAutoTempValueRooter
1429 public:
1430 JSAutoTempValueRooter(JSContext *cx, size_t len, jsval *vec
1431 JS_GUARD_OBJECT_NOTIFIER_PARAM)
1432 : mContext(cx) {
1433 JS_GUARD_OBJECT_NOTIFIER_INIT;
1434 JS_PUSH_TEMP_ROOT(mContext, len, vec, &mTvr);
1436 explicit JSAutoTempValueRooter(JSContext *cx, jsval v = JSVAL_NULL
1437 JS_GUARD_OBJECT_NOTIFIER_PARAM)
1438 : mContext(cx) {
1439 JS_GUARD_OBJECT_NOTIFIER_INIT;
1440 JS_PUSH_SINGLE_TEMP_ROOT(mContext, v, &mTvr);
1442 JSAutoTempValueRooter(JSContext *cx, JSString *str
1443 JS_GUARD_OBJECT_NOTIFIER_PARAM)
1444 : mContext(cx) {
1445 JS_GUARD_OBJECT_NOTIFIER_INIT;
1446 JS_PUSH_TEMP_ROOT_STRING(mContext, str, &mTvr);
1448 JSAutoTempValueRooter(JSContext *cx, JSObject *obj
1449 JS_GUARD_OBJECT_NOTIFIER_PARAM)
1450 : mContext(cx) {
1451 JS_GUARD_OBJECT_NOTIFIER_INIT;
1452 JS_PUSH_TEMP_ROOT_OBJECT(mContext, obj, &mTvr);
1454 JSAutoTempValueRooter(JSContext *cx, JSScopeProperty *sprop
1455 JS_GUARD_OBJECT_NOTIFIER_PARAM)
1456 : mContext(cx) {
1457 JS_GUARD_OBJECT_NOTIFIER_INIT;
1458 JS_PUSH_TEMP_ROOT_SPROP(mContext, sprop, &mTvr);
1461 ~JSAutoTempValueRooter() {
1462 JS_POP_TEMP_ROOT(mContext, &mTvr);
1465 jsval value() { return mTvr.u.value; }
1466 jsval *addr() { return &mTvr.u.value; }
1468 protected:
1469 JSContext *mContext;
1471 private:
1472 #ifndef AIX
1473 static void *operator new(size_t);
1474 static void operator delete(void *, size_t);
1475 #endif
1477 JSTempValueRooter mTvr;
1478 JS_DECL_USE_GUARD_OBJECT_NOTIFIER
1481 class JSAutoTempIdRooter
1483 public:
1484 explicit JSAutoTempIdRooter(JSContext *cx, jsid id = INT_TO_JSID(0)
1485 JS_GUARD_OBJECT_NOTIFIER_PARAM)
1486 : mContext(cx) {
1487 JS_GUARD_OBJECT_NOTIFIER_INIT;
1488 JS_PUSH_SINGLE_TEMP_ROOT(mContext, ID_TO_VALUE(id), &mTvr);
1491 ~JSAutoTempIdRooter() {
1492 JS_POP_TEMP_ROOT(mContext, &mTvr);
1495 jsid id() { return (jsid) mTvr.u.value; }
1496 jsid * addr() { return (jsid *) &mTvr.u.value; }
1498 private:
1499 JSContext *mContext;
1500 JSTempValueRooter mTvr;
1501 JS_DECL_USE_GUARD_OBJECT_NOTIFIER
1504 class JSAutoIdArray {
1505 public:
1506 JSAutoIdArray(JSContext *cx, JSIdArray *ida
1507 JS_GUARD_OBJECT_NOTIFIER_PARAM)
1508 : cx(cx), idArray(ida) {
1509 JS_GUARD_OBJECT_NOTIFIER_INIT;
1510 if (ida)
1511 JS_PUSH_TEMP_ROOT(cx, ida->length, ida->vector, &tvr);
1513 ~JSAutoIdArray() {
1514 if (idArray) {
1515 JS_POP_TEMP_ROOT(cx, &tvr);
1516 JS_DestroyIdArray(cx, idArray);
1519 bool operator!() {
1520 return idArray == NULL;
1522 jsid operator[](size_t i) const {
1523 JS_ASSERT(idArray);
1524 JS_ASSERT(i < size_t(idArray->length));
1525 return idArray->vector[i];
1527 size_t length() const {
1528 return idArray->length;
1530 private:
1531 JSContext * const cx;
1532 JSIdArray * const idArray;
1533 JSTempValueRooter tvr;
1534 JS_DECL_USE_GUARD_OBJECT_NOTIFIER
1537 /* The auto-root for enumeration object and its state. */
1538 class JSAutoEnumStateRooter : public JSTempValueRooter
1540 public:
1541 JSAutoEnumStateRooter(JSContext *cx, JSObject *obj, jsval *statep
1542 JS_GUARD_OBJECT_NOTIFIER_PARAM)
1543 : mContext(cx), mStatep(statep)
1545 JS_GUARD_OBJECT_NOTIFIER_INIT;
1546 JS_ASSERT(obj);
1547 JS_ASSERT(statep);
1548 JS_PUSH_TEMP_ROOT_COMMON(cx, obj, this, JSTVU_ENUMERATOR, object);
1551 ~JSAutoEnumStateRooter() {
1552 JS_POP_TEMP_ROOT(mContext, this);
1555 void mark(JSTracer *trc) {
1556 JS_CALL_OBJECT_TRACER(trc, u.object, "enumerator_obj");
1557 js_MarkEnumeratorState(trc, u.object, *mStatep);
1560 private:
1561 JSContext *mContext;
1562 jsval *mStatep;
1563 JS_DECL_USE_GUARD_OBJECT_NOTIFIER
1566 class JSAutoResolveFlags
1568 public:
1569 JSAutoResolveFlags(JSContext *cx, uintN flags
1570 JS_GUARD_OBJECT_NOTIFIER_PARAM)
1571 : mContext(cx), mSaved(cx->resolveFlags) {
1572 JS_GUARD_OBJECT_NOTIFIER_INIT;
1573 cx->resolveFlags = flags;
1576 ~JSAutoResolveFlags() { mContext->resolveFlags = mSaved; }
1578 private:
1579 JSContext *mContext;
1580 uintN mSaved;
1581 JS_DECL_USE_GUARD_OBJECT_NOTIFIER
1584 #endif /* __cpluscplus */
1587 * Slightly more readable macros for testing per-context option settings (also
1588 * to hide bitset implementation detail).
1590 * JSOPTION_XML must be handled specially in order to propagate from compile-
1591 * to run-time (from cx->options to script->version/cx->version). To do that,
1592 * we copy JSOPTION_XML from cx->options into cx->version as JSVERSION_HAS_XML
1593 * whenever options are set, and preserve this XML flag across version number
1594 * changes done via the JS_SetVersion API.
1596 * But when executing a script or scripted function, the interpreter changes
1597 * cx->version, including the XML flag, to script->version. Thus JSOPTION_XML
1598 * is a compile-time option that causes a run-time version change during each
1599 * activation of the compiled script. That version change has the effect of
1600 * changing JS_HAS_XML_OPTION, so that any compiling done via eval enables XML
1601 * support. If an XML-enabled script or function calls a non-XML function,
1602 * the flag bit will be cleared during the callee's activation.
1604 * Note that JS_SetVersion API calls never pass JSVERSION_HAS_XML or'd into
1605 * that API's version parameter.
1607 * Note also that script->version must contain this XML option flag in order
1608 * for XDR'ed scripts to serialize and deserialize with that option preserved
1609 * for detection at run-time. We can't copy other compile-time options into
1610 * script->version because that would break backward compatibility (certain
1611 * other options, e.g. JSOPTION_VAROBJFIX, are analogous to JSOPTION_XML).
1613 #define JS_HAS_OPTION(cx,option) (((cx)->options & (option)) != 0)
1614 #define JS_HAS_STRICT_OPTION(cx) JS_HAS_OPTION(cx, JSOPTION_STRICT)
1615 #define JS_HAS_WERROR_OPTION(cx) JS_HAS_OPTION(cx, JSOPTION_WERROR)
1616 #define JS_HAS_COMPILE_N_GO_OPTION(cx) JS_HAS_OPTION(cx, JSOPTION_COMPILE_N_GO)
1617 #define JS_HAS_ATLINE_OPTION(cx) JS_HAS_OPTION(cx, JSOPTION_ATLINE)
1619 #define JSVERSION_MASK 0x0FFF /* see JSVersion in jspubtd.h */
1620 #define JSVERSION_HAS_XML 0x1000 /* flag induced by XML option */
1621 #define JSVERSION_ANONFUNFIX 0x2000 /* see jsapi.h, the comments
1622 for JSOPTION_ANONFUNFIX */
1624 #define JSVERSION_NUMBER(cx) ((JSVersion)((cx)->version & \
1625 JSVERSION_MASK))
1626 #define JS_HAS_XML_OPTION(cx) ((cx)->version & JSVERSION_HAS_XML || \
1627 JSVERSION_NUMBER(cx) >= JSVERSION_1_6)
1629 extern JSThreadData *
1630 js_CurrentThreadData(JSRuntime *rt);
1632 extern JSBool
1633 js_InitThreads(JSRuntime *rt);
1635 extern void
1636 js_FinishThreads(JSRuntime *rt);
1638 extern void
1639 js_PurgeThreads(JSContext *cx);
1641 extern void
1642 js_TraceThreads(JSRuntime *rt, JSTracer *trc);
1645 * Ensures the JSOPTION_XML and JSOPTION_ANONFUNFIX bits of cx->options are
1646 * reflected in cx->version, since each bit must travel with a script that has
1647 * it set.
1649 extern void
1650 js_SyncOptionsToVersion(JSContext *cx);
1653 * Common subroutine of JS_SetVersion and js_SetVersion, to update per-context
1654 * data that depends on version.
1656 extern void
1657 js_OnVersionChange(JSContext *cx);
1660 * Unlike the JS_SetVersion API, this function stores JSVERSION_HAS_XML and
1661 * any future non-version-number flags induced by compiler options.
1663 extern void
1664 js_SetVersion(JSContext *cx, JSVersion version);
1667 * Create and destroy functions for JSContext, which is manually allocated
1668 * and exclusively owned.
1670 extern JSContext *
1671 js_NewContext(JSRuntime *rt, size_t stackChunkSize);
1673 extern void
1674 js_DestroyContext(JSContext *cx, JSDestroyContextMode mode);
1677 * Return true if cx points to a context in rt->contextList, else return false.
1678 * NB: the caller (see jslock.c:ClaimTitle) must hold rt->gcLock.
1680 extern JSBool
1681 js_ValidContextPointer(JSRuntime *rt, JSContext *cx);
1683 static JS_INLINE JSContext *
1684 js_ContextFromLinkField(JSCList *link)
1686 JS_ASSERT(link);
1687 return (JSContext *) ((uint8 *) link - offsetof(JSContext, link));
1691 * If unlocked, acquire and release rt->gcLock around *iterp update; otherwise
1692 * the caller must be holding rt->gcLock.
1694 extern JSContext *
1695 js_ContextIterator(JSRuntime *rt, JSBool unlocked, JSContext **iterp);
1698 * Iterate through contexts with active requests. The caller must be holding
1699 * rt->gcLock in case of a thread-safe build, or otherwise guarantee that the
1700 * context list is not alternated asynchroniously.
1702 extern JS_FRIEND_API(JSContext *)
1703 js_NextActiveContext(JSRuntime *, JSContext *);
1705 #ifdef JS_THREADSAFE
1708 * Count the number of contexts entered requests on the current thread.
1710 uint32
1711 js_CountThreadRequests(JSContext *cx);
1714 * This is a helper for code at can potentially run outside JS request to
1715 * ensure that the GC is not running when the function returns.
1717 * This function must be called with the GC lock held.
1719 extern void
1720 js_WaitForGC(JSRuntime *rt);
1723 * If we're in one or more requests (possibly on more than one context)
1724 * running on the current thread, indicate, temporarily, that all these
1725 * requests are inactive so a possible GC can proceed on another thread.
1726 * This function returns the number of discounted requests. The number must
1727 * be passed later to js_ActivateRequestAfterGC to reactivate the requests.
1729 * This function must be called with the GC lock held.
1731 uint32
1732 js_DiscountRequestsForGC(JSContext *cx);
1735 * This function must be called with the GC lock held.
1737 void
1738 js_RecountRequestsAfterGC(JSRuntime *rt, uint32 requestDebit);
1740 #else /* !JS_THREADSAFE */
1742 # define js_WaitForGC(rt) ((void) 0)
1744 #endif
1747 * JSClass.resolve and watchpoint recursion damping machinery.
1749 extern JSBool
1750 js_StartResolving(JSContext *cx, JSResolvingKey *key, uint32 flag,
1751 JSResolvingEntry **entryp);
1753 extern void
1754 js_StopResolving(JSContext *cx, JSResolvingKey *key, uint32 flag,
1755 JSResolvingEntry *entry, uint32 generation);
1758 * Local root set management.
1760 * NB: the jsval parameters below may be properly tagged jsvals, or GC-thing
1761 * pointers cast to (jsval). This relies on JSObject's tag being zero, but
1762 * on the up side it lets us push int-jsval-encoded scopeMark values on the
1763 * local root stack.
1765 extern JSBool
1766 js_EnterLocalRootScope(JSContext *cx);
1768 #define js_LeaveLocalRootScope(cx) \
1769 js_LeaveLocalRootScopeWithResult(cx, JSVAL_NULL)
1771 extern void
1772 js_LeaveLocalRootScopeWithResult(JSContext *cx, jsval rval);
1774 extern void
1775 js_ForgetLocalRoot(JSContext *cx, jsval v);
1777 extern int
1778 js_PushLocalRoot(JSContext *cx, JSLocalRootStack *lrs, jsval v);
1781 * Report an exception, which is currently realized as a printf-style format
1782 * string and its arguments.
1784 typedef enum JSErrNum {
1785 #define MSG_DEF(name, number, count, exception, format) \
1786 name = number,
1787 #include "js.msg"
1788 #undef MSG_DEF
1789 JSErr_Limit
1790 } JSErrNum;
1792 extern JS_FRIEND_API(const JSErrorFormatString *)
1793 js_GetErrorMessage(void *userRef, const char *locale, const uintN errorNumber);
1795 #ifdef va_start
1796 extern JSBool
1797 js_ReportErrorVA(JSContext *cx, uintN flags, const char *format, va_list ap);
1799 extern JSBool
1800 js_ReportErrorNumberVA(JSContext *cx, uintN flags, JSErrorCallback callback,
1801 void *userRef, const uintN errorNumber,
1802 JSBool charArgs, va_list ap);
1804 extern JSBool
1805 js_ExpandErrorArguments(JSContext *cx, JSErrorCallback callback,
1806 void *userRef, const uintN errorNumber,
1807 char **message, JSErrorReport *reportp,
1808 bool charArgs, va_list ap);
1809 #endif
1811 extern void
1812 js_ReportOutOfMemory(JSContext *cx);
1815 * Report that cx->scriptStackQuota is exhausted.
1817 extern void
1818 js_ReportOutOfScriptQuota(JSContext *cx);
1820 extern void
1821 js_ReportOverRecursed(JSContext *cx);
1823 extern void
1824 js_ReportAllocationOverflow(JSContext *cx);
1826 #define JS_CHECK_RECURSION(cx, onerror) \
1827 JS_BEGIN_MACRO \
1828 int stackDummy_; \
1830 if (!JS_CHECK_STACK_SIZE(cx, stackDummy_)) { \
1831 js_ReportOverRecursed(cx); \
1832 onerror; \
1834 JS_END_MACRO
1837 * Report an exception using a previously composed JSErrorReport.
1838 * XXXbe remove from "friend" API
1840 extern JS_FRIEND_API(void)
1841 js_ReportErrorAgain(JSContext *cx, const char *message, JSErrorReport *report);
1843 extern void
1844 js_ReportIsNotDefined(JSContext *cx, const char *name);
1847 * Report an attempt to access the property of a null or undefined value (v).
1849 extern JSBool
1850 js_ReportIsNullOrUndefined(JSContext *cx, intN spindex, jsval v,
1851 JSString *fallback);
1853 extern void
1854 js_ReportMissingArg(JSContext *cx, jsval *vp, uintN arg);
1857 * Report error using js_DecompileValueGenerator(cx, spindex, v, fallback) as
1858 * the first argument for the error message. If the error message has less
1859 * then 3 arguments, use null for arg1 or arg2.
1861 extern JSBool
1862 js_ReportValueErrorFlags(JSContext *cx, uintN flags, const uintN errorNumber,
1863 intN spindex, jsval v, JSString *fallback,
1864 const char *arg1, const char *arg2);
1866 #define js_ReportValueError(cx,errorNumber,spindex,v,fallback) \
1867 ((void)js_ReportValueErrorFlags(cx, JSREPORT_ERROR, errorNumber, \
1868 spindex, v, fallback, NULL, NULL))
1870 #define js_ReportValueError2(cx,errorNumber,spindex,v,fallback,arg1) \
1871 ((void)js_ReportValueErrorFlags(cx, JSREPORT_ERROR, errorNumber, \
1872 spindex, v, fallback, arg1, NULL))
1874 #define js_ReportValueError3(cx,errorNumber,spindex,v,fallback,arg1,arg2) \
1875 ((void)js_ReportValueErrorFlags(cx, JSREPORT_ERROR, errorNumber, \
1876 spindex, v, fallback, arg1, arg2))
1878 extern JSErrorFormatString js_ErrorFormatString[JSErr_Limit];
1881 * See JS_SetThreadStackLimit in jsapi.c, where we check that the stack grows
1882 * in the expected direction. On Unix-y systems, JS_STACK_GROWTH_DIRECTION is
1883 * computed on the build host by jscpucfg.c and written into jsautocfg.h. The
1884 * macro is hardcoded in jscpucfg.h on Windows and Mac systems (for historical
1885 * reasons pre-dating autoconf usage).
1887 #if JS_STACK_GROWTH_DIRECTION > 0
1888 # define JS_CHECK_STACK_SIZE(cx, lval) ((jsuword)&(lval) < (cx)->stackLimit)
1889 #else
1890 # define JS_CHECK_STACK_SIZE(cx, lval) ((jsuword)&(lval) > (cx)->stackLimit)
1891 #endif
1894 * If the operation callback flag was set, call the operation callback.
1895 * This macro can run the full GC. Return true if it is OK to continue and
1896 * false otherwise.
1898 #define JS_CHECK_OPERATION_LIMIT(cx) \
1899 (!(cx)->operationCallbackFlag || js_InvokeOperationCallback(cx))
1902 * Invoke the operation callback and return false if the current execution
1903 * is to be terminated.
1905 extern JSBool
1906 js_InvokeOperationCallback(JSContext *cx);
1908 #ifndef JS_THREADSAFE
1909 # define js_TriggerAllOperationCallbacks(rt, gcLocked) \
1910 js_TriggerAllOperationCallbacks (rt)
1911 #endif
1913 void
1914 js_TriggerAllOperationCallbacks(JSRuntime *rt, JSBool gcLocked);
1916 extern JSStackFrame *
1917 js_GetScriptedCaller(JSContext *cx, JSStackFrame *fp);
1919 extern jsbytecode*
1920 js_GetCurrentBytecodePC(JSContext* cx);
1922 extern bool
1923 js_CurrentPCIsInImacro(JSContext *cx);
1925 #ifdef JS_TRACER
1927 * Reconstruct the JS stack and clear cx->tracecx. We must be currently in a
1928 * _FAIL builtin from trace on cx or another context on the same thread. The
1929 * machine code for the trace remains on the C stack when js_DeepBail returns.
1931 * Implemented in jstracer.cpp.
1933 JS_FORCES_STACK JS_FRIEND_API(void)
1934 js_DeepBail(JSContext *cx);
1935 #endif
1937 static JS_FORCES_STACK JS_INLINE void
1938 js_LeaveTrace(JSContext *cx)
1940 #ifdef JS_TRACER
1941 if (JS_ON_TRACE(cx))
1942 js_DeepBail(cx);
1943 #endif
1946 static JS_INLINE void
1947 js_LeaveTraceIfGlobalObject(JSContext *cx, JSObject *obj)
1949 if (!obj->fslots[JSSLOT_PARENT])
1950 js_LeaveTrace(cx);
1953 static JS_INLINE JSBool
1954 js_CanLeaveTrace(JSContext *cx)
1956 JS_ASSERT(JS_ON_TRACE(cx));
1957 #ifdef JS_TRACER
1958 return cx->bailExit != NULL;
1959 #else
1960 return JS_FALSE;
1961 #endif
1965 * Get the current cx->fp, first lazily instantiating stack frames if needed.
1966 * (Do not access cx->fp directly except in JS_REQUIRES_STACK code.)
1968 * Defined in jstracer.cpp if JS_TRACER is defined.
1970 static JS_FORCES_STACK JS_INLINE JSStackFrame *
1971 js_GetTopStackFrame(JSContext *cx)
1973 js_LeaveTrace(cx);
1974 return cx->fp;
1977 static JS_INLINE JSBool
1978 js_IsPropertyCacheDisabled(JSContext *cx)
1980 return cx->runtime->shapeGen >= SHAPE_OVERFLOW_BIT;
1983 static JS_INLINE uint32
1984 js_RegenerateShapeForGC(JSContext *cx)
1986 JS_ASSERT(cx->runtime->gcRunning);
1987 JS_ASSERT(cx->runtime->gcRegenShapes);
1990 * Under the GC, compared with js_GenerateShape, we don't need to use
1991 * atomic increments but we still must make sure that after an overflow
1992 * the shape stays such.
1994 uint32 shape = cx->runtime->shapeGen;
1995 shape = (shape + 1) | (shape & SHAPE_OVERFLOW_BIT);
1996 cx->runtime->shapeGen = shape;
1997 return shape;
2000 namespace js {
2003 * Policy that calls JSContext:: memory functions and reports errors to the
2004 * context. Since the JSContext* given on construction is stored for the
2005 * lifetime of the container, this policy may only be used for containers whose
2006 * lifetime is a shorter than the given JSContext.
2008 class ContextAllocPolicy
2010 JSContext *mCx;
2012 public:
2013 ContextAllocPolicy(JSContext *cx) : mCx(cx) {}
2014 JSContext *context() const { return mCx; }
2016 void *malloc(size_t bytes) { return mCx->malloc(bytes); }
2017 void free(void *p) { mCx->free(p); }
2018 void *realloc(void *p, size_t bytes) { return mCx->realloc(p, bytes); }
2019 void reportAllocOverflow() const { js_ReportAllocationOverflow(mCx); }
2024 #endif /* jscntxt_h___ */