Bug 539553 - Correctness regression on the r-tree benchmark. r=dmandelin.
[mozilla-central.git] / js / src / jstracer.h
blobdb3ceb68357c69a95bf4d78690d496b6c71e7da4
1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=4 sw=4 et tw=99 ft=cpp:
4 * ***** BEGIN LICENSE BLOCK *****
5 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
7 * The contents of this file are subject to the Mozilla Public License Version
8 * 1.1 (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
10 * http://www.mozilla.org/MPL/
12 * Software distributed under the License is distributed on an "AS IS" basis,
13 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
14 * for the specific language governing rights and limitations under the
15 * License.
17 * The Original Code is Mozilla SpiderMonkey JavaScript 1.9 code, released
18 * May 28, 2008.
20 * The Initial Developer of the Original Code is
21 * Brendan Eich <brendan@mozilla.org>
23 * Contributor(s):
24 * Andreas Gal <gal@mozilla.com>
25 * Mike Shaver <shaver@mozilla.org>
26 * David Anderson <danderson@mozilla.com>
28 * Alternatively, the contents of this file may be used under the terms of
29 * either of the GNU General Public License Version 2 or later (the "GPL"),
30 * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
31 * in which case the provisions of the GPL or the LGPL are applicable instead
32 * of those above. If you wish to allow use of your version of this file only
33 * under the terms of either the GPL or the LGPL, and not to allow others to
34 * use your version of this file under the terms of the MPL, indicate your
35 * decision by deleting the provisions above and replace them with the notice
36 * and other provisions required by the GPL or the LGPL. If you do not delete
37 * the provisions above, a recipient may use your version of this file under
38 * the terms of any one of the MPL, the GPL or the LGPL.
40 * ***** END LICENSE BLOCK ***** */
42 #ifndef jstracer_h___
43 #define jstracer_h___
45 #ifdef JS_TRACER
47 #include "jstypes.h"
48 #include "jsbuiltins.h"
49 #include "jscntxt.h"
50 #include "jsdhash.h"
51 #include "jsinterp.h"
52 #include "jslock.h"
53 #include "jsnum.h"
54 #include "jsvector.h"
56 #if defined(DEBUG) && !defined(JS_JIT_SPEW)
57 #define JS_JIT_SPEW
58 #endif
60 template <typename T>
61 class Queue {
62 T* _data;
63 unsigned _len;
64 unsigned _max;
65 nanojit::Allocator* alloc;
67 public:
68 void ensure(unsigned size) {
69 if (_max > size)
70 return;
71 if (!_max)
72 _max = 8;
73 _max = JS_MAX(_max * 2, size);
74 if (alloc) {
75 T* tmp = new (*alloc) T[_max];
76 memcpy(tmp, _data, _len * sizeof(T));
77 _data = tmp;
78 } else {
79 _data = (T*)realloc(_data, _max * sizeof(T));
81 #if defined(DEBUG)
82 memset(&_data[_len], 0xcd, _max - _len);
83 #endif
86 Queue(nanojit::Allocator* alloc)
87 : alloc(alloc)
89 this->_max =
90 this->_len = 0;
91 this->_data = NULL;
94 ~Queue() {
95 if (!alloc)
96 free(_data);
99 bool contains(T a) {
100 for (unsigned n = 0; n < _len; ++n) {
101 if (_data[n] == a)
102 return true;
104 return false;
107 void add(T a) {
108 ensure(_len + 1);
109 JS_ASSERT(_len <= _max);
110 _data[_len++] = a;
113 void add(T* chunk, unsigned size) {
114 ensure(_len + size);
115 JS_ASSERT(_len <= _max);
116 memcpy(&_data[_len], chunk, size * sizeof(T));
117 _len += size;
120 void addUnique(T a) {
121 if (!contains(a))
122 add(a);
125 void setLength(unsigned len) {
126 ensure(len + 1);
127 _len = len;
130 void clear() {
131 _len = 0;
134 T & get(unsigned i) {
135 JS_ASSERT(i < length());
136 return _data[i];
139 const T & get(unsigned i) const {
140 JS_ASSERT(i < length());
141 return _data[i];
144 T & operator [](unsigned i) {
145 return get(i);
148 const T & operator [](unsigned i) const {
149 return get(i);
152 unsigned length() const {
153 return _len;
156 T* data() const {
157 return _data;
160 int offsetOf(T slot) {
161 T* p = _data;
162 unsigned n = 0;
163 for (n = 0; n < _len; ++n)
164 if (*p++ == slot)
165 return n;
166 return -1;
172 * Tracker is used to keep track of values being manipulated by the interpreter
173 * during trace recording. It maps opaque, 4-byte aligned address to LIns pointers.
174 * pointers. To do this efficiently, we observe that the addresses of jsvals
175 * living in the interpreter tend to be aggregated close to each other -
176 * usually on the same page (where a tracker page doesn't have to be the same
177 * size as the OS page size, but it's typically similar). The Tracker
178 * consists of a linked-list of structures representing a memory page, which
179 * are created on-demand as memory locations are used.
181 * For every address, first we split it into two parts: upper bits which
182 * represent the "base", and lower bits which represent an offset against the
183 * base. For the offset, we then right-shift it by two because the bottom two
184 * bits of a 4-byte aligned address are always zero. The mapping then
185 * becomes:
187 * page = page in pagelist such that Base(address) == page->base,
188 * page->map[Offset(address)]
190 class Tracker {
191 #define TRACKER_PAGE_SZB 4096
192 #define TRACKER_PAGE_ENTRIES (TRACKER_PAGE_SZB >> 2) // each slot is 4 bytes
193 #define TRACKER_PAGE_MASK jsuword(TRACKER_PAGE_SZB - 1)
195 struct TrackerPage {
196 struct TrackerPage* next;
197 jsuword base;
198 nanojit::LIns* map[TRACKER_PAGE_ENTRIES];
200 struct TrackerPage* pagelist;
202 jsuword getTrackerPageBase(const void* v) const;
203 jsuword getTrackerPageOffset(const void* v) const;
204 struct TrackerPage* findTrackerPage(const void* v) const;
205 struct TrackerPage* addTrackerPage(const void* v);
206 public:
207 Tracker();
208 ~Tracker();
210 bool has(const void* v) const;
211 nanojit::LIns* get(const void* v) const;
212 void set(const void* v, nanojit::LIns* ins);
213 void clear();
216 class VMFragment : public nanojit::Fragment {
217 public:
218 VMFragment(const void* _ip verbose_only(, uint32_t profFragID))
219 : Fragment(_ip verbose_only(, profFragID))
223 * If this is anchored off a TreeFragment, this points to that tree fragment.
224 * Otherwise, it is |this|.
226 TreeFragment* root;
228 TreeFragment* toTreeFragment();
231 #if defined(JS_JIT_SPEW) || defined(NJ_NO_VARIADIC_MACROS)
233 enum LC_TMBits {
235 * Output control bits for all non-Nanojit code. Only use bits 16 and
236 * above, since Nanojit uses 0 .. 15 itself.
238 LC_TMMinimal = 1<<16,
239 LC_TMTracer = 1<<17,
240 LC_TMRecorder = 1<<18,
241 LC_TMAbort = 1<<19,
242 LC_TMStats = 1<<20,
243 LC_TMRegexp = 1<<21,
244 LC_TMTreeVis = 1<<22
247 #endif
249 #ifdef NJ_NO_VARIADIC_MACROS
251 #define debug_only_stmt(action) /* */
252 static void debug_only_printf(int mask, const char *fmt, ...) {}
253 #define debug_only_print0(mask, str) /* */
255 #elif defined(JS_JIT_SPEW)
257 // Top level logging controller object.
258 extern nanojit::LogControl js_LogController;
260 // Top level profiling hook, needed to harvest profile info from Fragments
261 // whose logical lifetime is about to finish
262 extern void js_FragProfiling_FragFinalizer(nanojit::Fragment* f, JSTraceMonitor*);
264 #define debug_only_stmt(stmt) \
265 stmt
267 #define debug_only_printf(mask, fmt, ...) \
268 JS_BEGIN_MACRO \
269 if ((js_LogController.lcbits & (mask)) > 0) { \
270 js_LogController.printf(fmt, __VA_ARGS__); \
271 fflush(stdout); \
273 JS_END_MACRO
275 #define debug_only_print0(mask, str) \
276 JS_BEGIN_MACRO \
277 if ((js_LogController.lcbits & (mask)) > 0) { \
278 js_LogController.printf("%s", str); \
279 fflush(stdout); \
281 JS_END_MACRO
283 #else
285 #define debug_only_stmt(action) /* */
286 #define debug_only_printf(mask, fmt, ...) /* */
287 #define debug_only_print0(mask, str) /* */
289 #endif
292 * The oracle keeps track of hit counts for program counter locations, as
293 * well as slots that should not be demoted to int because we know them to
294 * overflow or they result in type-unstable traces. We are using simple
295 * hash tables. Collisions lead to loss of optimization (demotable slots
296 * are not demoted, etc.) but have no correctness implications.
298 #define ORACLE_SIZE 4096
300 class Oracle {
301 avmplus::BitSet _stackDontDemote;
302 avmplus::BitSet _globalDontDemote;
303 avmplus::BitSet _pcDontDemote;
304 public:
305 Oracle();
307 JS_REQUIRES_STACK void markGlobalSlotUndemotable(JSContext* cx, unsigned slot);
308 JS_REQUIRES_STACK bool isGlobalSlotUndemotable(JSContext* cx, unsigned slot) const;
309 JS_REQUIRES_STACK void markStackSlotUndemotable(JSContext* cx, unsigned slot);
310 JS_REQUIRES_STACK void markStackSlotUndemotable(JSContext* cx, unsigned slot, const void* pc);
311 JS_REQUIRES_STACK bool isStackSlotUndemotable(JSContext* cx, unsigned slot) const;
312 JS_REQUIRES_STACK bool isStackSlotUndemotable(JSContext* cx, unsigned slot, const void* pc) const;
313 void markInstructionUndemotable(jsbytecode* pc);
314 bool isInstructionUndemotable(jsbytecode* pc) const;
316 void clearDemotability();
317 void clear() {
318 clearDemotability();
322 #if defined(_MSC_VER) && _MSC_VER >= 1400 || (defined(__GNUC__) && __GNUC__ >= 4)
323 #define USE_TRACE_TYPE_ENUM
324 #endif
327 * The types of values calculated during tracing, used to specialize operations
328 * to the types of those values. These loosely correspond to the values of the
329 * JSVAL_* language types, but we add a few further divisions to enable further
330 * optimization at execution time. Do not rely on this loose correspondence for
331 * correctness without adding static assertions!
333 * The ifdefs enforce that this enum occupies only one byte of memory, where
334 * possible. If it doesn't, type maps will occupy more space but should
335 * otherwise work correctly. A static assertion in jstracer.cpp verifies that
336 * this requirement is correctly enforced by these compilers.
338 enum JSTraceType_
339 #if defined(_MSC_VER) && _MSC_VER >= 1400
340 : int8_t
341 #endif
343 TT_OBJECT = 0, /* pointer to JSObject whose class is not js_FunctionClass */
344 TT_INT32 = 1, /* 32-bit signed integer */
345 TT_DOUBLE = 2, /* pointer to jsdouble */
346 TT_JSVAL = 3, /* arbitrary jsval */
347 TT_STRING = 4, /* pointer to JSString */
348 TT_NULL = 5, /* null */
349 TT_PSEUDOBOOLEAN = 6, /* true, false, or undefined (0, 1, or 2) */
350 TT_FUNCTION = 7, /* pointer to JSObject whose class is js_FunctionClass */
351 TT_IGNORE = 8
353 #if defined(__GNUC__) && defined(USE_TRACE_TYPE_ENUM)
354 __attribute__((packed))
355 #endif
358 #ifdef USE_TRACE_TYPE_ENUM
359 typedef JSTraceType_ JSTraceType;
360 #else
361 typedef int8_t JSTraceType;
362 #endif
365 * This indicates an invalid type or error. Note that it should not be used in typemaps,
366 * because it is the wrong size. It can only be used as a uint32, for example as the
367 * return value from a function that returns a type as a uint32.
369 const uint32 TT_INVALID = uint32(-1);
371 typedef Queue<uint16> SlotList;
373 class TypeMap : public Queue<JSTraceType> {
374 public:
375 TypeMap(nanojit::Allocator* alloc) : Queue<JSTraceType>(alloc) {}
376 void set(unsigned stackSlots, unsigned ngslots,
377 const JSTraceType* stackTypeMap, const JSTraceType* globalTypeMap);
378 JS_REQUIRES_STACK void captureTypes(JSContext* cx, JSObject* globalObj, SlotList& slots, unsigned callDepth);
379 JS_REQUIRES_STACK void captureMissingGlobalTypes(JSContext* cx, JSObject* globalObj, SlotList& slots,
380 unsigned stackSlots);
381 bool matches(TypeMap& other) const;
382 void fromRaw(JSTraceType* other, unsigned numSlots);
385 #define JS_TM_EXITCODES(_) \
386 /* \
387 * An exit at a possible branch-point in the trace at which to attach a \
388 * future secondary trace. Therefore the recorder must generate different \
389 * code to handle the other outcome of the branch condition from the \
390 * primary trace's outcome. \
391 */ \
392 _(BRANCH) \
393 /* \
394 * Exit at a tableswitch via a numbered case. \
395 */ \
396 _(CASE) \
397 /* \
398 * Exit at a tableswitch via the default case. \
399 */ \
400 _(DEFAULT) \
401 _(LOOP) \
402 _(NESTED) \
403 /* \
404 * An exit from a trace because a condition relied upon at recording time \
405 * no longer holds, where the alternate path of execution is so rare or \
406 * difficult to address in native code that it is not traced at all, e.g. \
407 * negative array index accesses, which differ from positive indexes in \
408 * that they require a string-based property lookup rather than a simple \
409 * memory access. \
410 */ \
411 _(MISMATCH) \
412 /* \
413 * A specialization of MISMATCH_EXIT to handle allocation failures. \
414 */ \
415 _(OOM) \
416 _(OVERFLOW) \
417 _(UNSTABLE_LOOP) \
418 _(TIMEOUT) \
419 _(DEEP_BAIL) \
420 _(STATUS) \
421 /* Exit is almost recursive and wants a peer at recursive_pc */ \
422 _(RECURSIVE_UNLINKED) \
423 /* Exit is recursive, and there are no more frames */ \
424 _(RECURSIVE_LOOP) \
425 /* Exit is recursive, but type-mismatched guarding on a down frame */ \
426 _(RECURSIVE_MISMATCH) \
427 /* Exit is recursive, and the JIT wants to try slurping interp frames */ \
428 _(RECURSIVE_EMPTY_RP) \
429 /* Slurping interp frames in up-recursion failed */ \
430 _(RECURSIVE_SLURP_FAIL) \
431 /* Tried to slurp an interp frame, but the pc or argc mismatched */ \
432 _(RECURSIVE_SLURP_MISMATCH)
434 enum ExitType {
435 #define MAKE_EXIT_CODE(x) x##_EXIT,
436 JS_TM_EXITCODES(MAKE_EXIT_CODE)
437 #undef MAKE_EXIT_CODE
438 TOTAL_EXIT_TYPES
441 struct FrameInfo;
443 struct VMSideExit : public nanojit::SideExit
445 JSObject* block;
446 jsbytecode* pc;
447 jsbytecode* imacpc;
448 intptr_t sp_adj;
449 intptr_t rp_adj;
450 int32_t calldepth;
451 uint32 numGlobalSlots;
452 uint32 numStackSlots;
453 uint32 numStackSlotsBelowCurrentFrame;
454 ExitType exitType;
455 uintN lookupFlags;
456 void* recursive_pc;
457 FrameInfo* recursive_down;
458 unsigned hitcount;
459 unsigned slurpFailSlot;
460 JSTraceType slurpType;
463 * Ordinarily 0. If a slow native function is atop the stack, the 1 bit is
464 * set if constructing and the other bits are a pointer to the funobj.
466 uintptr_t nativeCalleeWord;
468 JSObject * nativeCallee() {
469 return (JSObject *) (nativeCalleeWord & ~1);
472 bool constructing() {
473 return bool(nativeCalleeWord & 1);
476 void setNativeCallee(JSObject *callee, bool constructing) {
477 nativeCalleeWord = uintptr_t(callee) | (constructing ? 1 : 0);
480 inline JSTraceType* stackTypeMap() {
481 return (JSTraceType*)(this + 1);
484 inline JSTraceType& stackType(unsigned i) {
485 JS_ASSERT(i < numStackSlots);
486 return stackTypeMap()[i];
489 inline JSTraceType* globalTypeMap() {
490 return (JSTraceType*)(this + 1) + this->numStackSlots;
493 inline JSTraceType* fullTypeMap() {
494 return stackTypeMap();
497 inline VMFragment* fromFrag() {
498 return (VMFragment*)from;
501 inline TreeFragment* root() {
502 return fromFrag()->root;
506 class VMAllocator : public nanojit::Allocator
509 public:
510 VMAllocator() : mOutOfMemory(false), mSize(0)
513 size_t size() {
514 return mSize;
517 bool outOfMemory() {
518 return mOutOfMemory;
521 struct Mark
523 VMAllocator& vma;
524 bool committed;
525 nanojit::Allocator::Chunk* saved_chunk;
526 char* saved_top;
527 char* saved_limit;
528 size_t saved_size;
530 Mark(VMAllocator& vma) :
531 vma(vma),
532 committed(false),
533 saved_chunk(vma.current_chunk),
534 saved_top(vma.current_top),
535 saved_limit(vma.current_limit),
536 saved_size(vma.mSize)
539 ~Mark()
541 if (!committed)
542 vma.rewind(*this);
545 void commit() { committed = true; }
548 void rewind(const Mark& m) {
549 while (current_chunk != m.saved_chunk) {
550 Chunk *prev = current_chunk->prev;
551 freeChunk(current_chunk);
552 current_chunk = prev;
554 current_top = m.saved_top;
555 current_limit = m.saved_limit;
556 mSize = m.saved_size;
557 memset(current_top, 0, current_limit - current_top);
560 bool mOutOfMemory;
561 size_t mSize;
564 * FIXME: Area the LIR spills into if we encounter an OOM mid-way
565 * through compilation; we must check mOutOfMemory before we run out
566 * of mReserve, otherwise we're in undefined territory. This area
567 * used to be one page, now 16 to be "safer". This is a temporary
568 * and quite unsatisfactory approach to handling OOM in Nanojit.
570 uintptr_t mReserve[0x10000];
573 struct REHashKey {
574 size_t re_length;
575 uint16 re_flags;
576 const jschar* re_chars;
578 REHashKey(size_t re_length, uint16 re_flags, const jschar *re_chars)
579 : re_length(re_length)
580 , re_flags(re_flags)
581 , re_chars(re_chars)
584 bool operator==(const REHashKey& other) const
586 return ((this->re_length == other.re_length) &&
587 (this->re_flags == other.re_flags) &&
588 !memcmp(this->re_chars, other.re_chars,
589 this->re_length * sizeof(jschar)));
593 struct REHashFn {
594 static size_t hash(const REHashKey& k) {
595 return
596 k.re_length +
597 k.re_flags +
598 nanojit::murmurhash(k.re_chars, k.re_length * sizeof(jschar));
602 struct FrameInfo {
603 JSObject* block; // caller block chain head
604 jsbytecode* pc; // caller fp->regs->pc
605 jsbytecode* imacpc; // caller fp->imacpc
606 uint32 spdist; // distance from fp->slots to fp->regs->sp at JSOP_CALL
609 * Bit 15 (0x8000) is a flag that is set if constructing (called through new).
610 * Bits 0-14 are the actual argument count. This may be less than fun->nargs.
611 * NB: This is argc for the callee, not the caller.
613 uint32 argc;
616 * Number of stack slots in the caller, not counting slots pushed when
617 * invoking the callee. That is, slots after JSOP_CALL completes but
618 * without the return value. This is also equal to the number of slots
619 * between fp->down->argv[-2] (calleR fp->callee) and fp->argv[-2]
620 * (calleE fp->callee).
622 uint32 callerHeight;
624 /* argc of the caller */
625 uint32 callerArgc;
627 // Safer accessors for argc.
628 enum { CONSTRUCTING_FLAG = 0x10000 };
629 void set_argc(uint16 argc, bool constructing) {
630 this->argc = uint32(argc) | (constructing ? CONSTRUCTING_FLAG: 0);
632 uint16 get_argc() const { return uint16(argc & ~CONSTRUCTING_FLAG); }
633 bool is_constructing() const { return (argc & CONSTRUCTING_FLAG) != 0; }
635 // The typemap just before the callee is called.
636 JSTraceType* get_typemap() { return (JSTraceType*) (this+1); }
637 const JSTraceType* get_typemap() const { return (JSTraceType*) (this+1); }
640 struct UnstableExit
642 VMFragment* fragment;
643 VMSideExit* exit;
644 UnstableExit* next;
647 enum RecordReason
649 Record_Branch,
650 Record_EnterFrame,
651 Record_LeaveFrame
654 enum RecursionStatus
656 Recursion_None, /* No recursion has been compiled yet. */
657 Recursion_Disallowed, /* This tree cannot be recursive. */
658 Recursion_Unwinds, /* Tree is up-recursive only. */
659 Recursion_Detected /* Tree has down recursion and maybe up recursion. */
662 struct LinkableFragment : public VMFragment
664 LinkableFragment(const void* _ip, nanojit::Allocator* alloc
665 verbose_only(, uint32_t profFragID))
666 : VMFragment(_ip verbose_only(, profFragID)), typeMap(alloc), nStackTypes(0)
669 uint32 branchCount;
670 TypeMap typeMap;
671 unsigned nStackTypes;
672 SlotList* globalSlots;
676 * argc is cx->fp->argc at the trace loop header, i.e., the number of arguments
677 * pushed for the innermost JS frame. This is required as part of the fragment
678 * key because the fragment will write those arguments back to the interpreter
679 * stack when it exits, using its typemap, which implicitly incorporates a
680 * given value of argc. Without this feature, a fragment could be called as an
681 * inner tree with two different values of argc, and entry type checking or
682 * exit frame synthesis could crash.
684 struct TreeFragment : public LinkableFragment
686 TreeFragment(const void* _ip, nanojit::Allocator* alloc, JSObject* _globalObj,
687 uint32 _globalShape, uint32 _argc verbose_only(, uint32_t profFragID)):
688 LinkableFragment(_ip, alloc verbose_only(, profFragID)),
689 first(NULL),
690 next(NULL),
691 peer(NULL),
692 globalObj(_globalObj),
693 globalShape(_globalShape),
694 argc(_argc),
695 dependentTrees(alloc),
696 linkedTrees(alloc),
697 sideExits(alloc),
698 gcthings(alloc),
699 sprops(alloc)
702 TreeFragment* first;
703 TreeFragment* next;
704 TreeFragment* peer;
705 JSObject* globalObj;
706 uint32 globalShape;
707 uint32 argc;
708 /* Dependent trees must be trashed if this tree dies, and updated on missing global types */
709 Queue<TreeFragment*> dependentTrees;
710 /* Linked trees must be updated on missing global types, but are not dependent */
711 Queue<TreeFragment*> linkedTrees;
712 #ifdef DEBUG
713 const char* treeFileName;
714 uintN treeLineNumber;
715 uintN treePCOffset;
716 #endif
717 JSScript* script;
718 RecursionStatus recursion;
719 UnstableExit* unstableExits;
720 Queue<VMSideExit*> sideExits;
721 ptrdiff_t nativeStackBase;
722 unsigned maxCallDepth;
723 /* All embedded GC things are registered here so the GC can scan them. */
724 Queue<jsval> gcthings;
725 Queue<JSScopeProperty*> sprops;
726 unsigned maxNativeStackSlots;
728 inline unsigned nGlobalTypes() {
729 return typeMap.length() - nStackTypes;
731 inline JSTraceType* globalTypeMap() {
732 return typeMap.data() + nStackTypes;
734 inline JSTraceType* stackTypeMap() {
735 return typeMap.data();
738 JS_REQUIRES_STACK void initialize(JSContext* cx, SlotList *globalSlots);
739 UnstableExit* removeUnstableExit(VMSideExit* exit);
742 inline TreeFragment*
743 VMFragment::toTreeFragment()
745 JS_ASSERT(root == this);
746 return static_cast<TreeFragment*>(this);
749 typedef enum JSBuiltinStatus {
750 JSBUILTIN_BAILED = 1,
751 JSBUILTIN_ERROR = 2
752 } JSBuiltinStatus;
754 // Arguments objects created on trace have a private value that points to an
755 // instance of this struct. The struct includes a typemap that is allocated
756 // as part of the object.
757 struct js_ArgsPrivateNative {
758 double *argv;
760 static js_ArgsPrivateNative *create(VMAllocator &alloc, unsigned argc)
762 return (js_ArgsPrivateNative*) new (alloc) char[sizeof(js_ArgsPrivateNative) + argc];
765 JSTraceType *typemap()
767 return (JSTraceType*) (this+1);
771 static JS_INLINE void
772 js_SetBuiltinError(JSContext *cx)
774 cx->interpState->builtinStatus |= JSBUILTIN_ERROR;
777 #ifdef DEBUG_RECORDING_STATUS_NOT_BOOL
778 /* #define DEBUG_RECORDING_STATUS_NOT_BOOL to detect misuses of RecordingStatus */
779 struct RecordingStatus {
780 int code;
781 bool operator==(RecordingStatus &s) { return this->code == s.code; };
782 bool operator!=(RecordingStatus &s) { return this->code != s.code; };
784 enum RecordingStatusCodes {
785 RECORD_ERROR_code = 0,
786 RECORD_STOP_code = 1,
788 RECORD_CONTINUE_code = 3,
789 RECORD_IMACRO_code = 4
791 RecordingStatus RECORD_CONTINUE = { RECORD_CONTINUE_code };
792 RecordingStatus RECORD_STOP = { RECORD_STOP_code };
793 RecordingStatus RECORD_IMACRO = { RECORD_IMACRO_code };
794 RecordingStatus RECORD_ERROR = { RECORD_ERROR_code };
796 struct AbortableRecordingStatus {
797 int code;
798 bool operator==(AbortableRecordingStatus &s) { return this->code == s.code; };
799 bool operator!=(AbortableRecordingStatus &s) { return this->code != s.code; };
801 enum AbortableRecordingStatusCodes {
802 ARECORD_ERROR_code = 0,
803 ARECORD_STOP_code = 1,
804 ARECORD_ABORTED_code = 2,
805 ARECORD_CONTINUE_code = 3,
806 ARECORD_IMACRO_code = 4,
807 ARECORD_COMPLETED_code = 5
809 AbortableRecordingStatus ARECORD_ERROR = { ARECORD_ERROR_code };
810 AbortableRecordingStatus ARECORD_STOP = { ARECORD_STOP_code };
811 AbortableRecordingStatus ARECORD_CONTINUE = { ARECORD_CONTINUE_code };
812 AbortableRecordingStatus ARECORD_IMACRO = { ARECORD_IMACRO_code };
813 AbortableRecordingStatus ARECORD_ABORTED = { ARECORD_ABORTED_code };
814 AbortableRecordingStatus ARECORD_COMPLETED = { ARECORD_COMPLETED_code };
816 static inline AbortableRecordingStatus
817 InjectStatus(RecordingStatus rs)
819 AbortableRecordingStatus ars = { rs.code };
820 return ars;
822 static inline AbortableRecordingStatus
823 InjectStatus(AbortableRecordingStatus ars)
825 return ars;
828 static inline bool
829 StatusAbortsRecording(AbortableRecordingStatus ars)
831 return ars == ARECORD_ERROR || ars == ARECORD_STOP;
833 #else
836 * Normally, during recording, when the recorder cannot continue, it returns
837 * ARECORD_STOP to indicate that recording should be aborted by the top-level
838 * recording function. However, if the recorder reenters the interpreter (e.g.,
839 * when executing an inner loop), there will be an immediate abort. This
840 * condition must be carefully detected and propagated out of all nested
841 * recorder calls lest the now-invalid TraceRecorder object be accessed
842 * accidentally. This condition is indicated by the ARECORD_ABORTED value.
844 * The AbortableRecordingStatus enumeration represents the general set of
845 * possible results of calling a recorder function. Functions that cannot
846 * possibly return ARECORD_ABORTED may statically guarantee this to the caller
847 * using the RecordingStatus enumeration. Ideally, C++ would allow subtyping
848 * of enumerations, but it doesn't. To simulate subtype conversion manually,
849 * code should call InjectStatus to inject a value of the restricted set into a
850 * value of the general set.
853 enum RecordingStatus {
854 RECORD_ERROR = 0, // Error; propagate to interpreter.
855 RECORD_STOP = 1, // Recording should be aborted at the top-level
856 // call to the recorder.
857 // (value reserved for ARECORD_ABORTED)
858 RECORD_CONTINUE = 3, // Continue recording.
859 RECORD_IMACRO = 4 // Entered imacro; continue recording.
860 // Only JSOP_IS_IMACOP opcodes may return this.
863 enum AbortableRecordingStatus {
864 ARECORD_ERROR = 0,
865 ARECORD_STOP = 1,
866 ARECORD_ABORTED = 2, // Recording has already been aborted; the recorder
867 // has been deleted.
868 ARECORD_CONTINUE = 3,
869 ARECORD_IMACRO = 4,
870 ARECORD_COMPLETED = 5 // Recording of the current trace recorder completed
873 static JS_ALWAYS_INLINE AbortableRecordingStatus
874 InjectStatus(RecordingStatus rs)
876 return static_cast<AbortableRecordingStatus>(rs);
879 static JS_ALWAYS_INLINE AbortableRecordingStatus
880 InjectStatus(AbortableRecordingStatus ars)
882 return ars;
886 * Return whether the recording status requires the current recording session
887 * to be deleted. ABORTED and COMPLETED indicate the recording session is
888 * already deleted, so they return 'false'.
890 static JS_ALWAYS_INLINE bool
891 StatusAbortsRecording(AbortableRecordingStatus ars)
893 return ars <= ARECORD_STOP;
895 #endif
897 class SlotMap;
898 class SlurpInfo;
900 /* Results of trying to compare two typemaps together */
901 enum TypeConsensus
903 TypeConsensus_Okay, /* Two typemaps are compatible */
904 TypeConsensus_Undemotes, /* Not compatible now, but would be with pending undemotes. */
905 TypeConsensus_Bad /* Typemaps are not compatible */
908 #ifdef DEBUG
909 # define js_AbortRecording(cx, reason) js_AbortRecordingImpl(cx, reason)
910 #else
911 # define js_AbortRecording(cx, reason) js_AbortRecordingImpl(cx)
912 #endif
914 class TraceRecorder
916 /*************************************************************** Recording session constants */
918 /* The context in which recording started. */
919 JSContext* const cx;
921 /* Cached value of JS_TRACE_MONITOR(cx). */
922 JSTraceMonitor* const traceMonitor;
924 /* The Fragment being recorded by this recording session. */
925 VMFragment* const fragment;
927 /* The root fragment representing the tree. */
928 TreeFragment* const tree;
930 /* The reason we started recording. */
931 RecordReason const recordReason;
933 /* The global object from the start of recording until now. */
934 JSObject* const globalObj;
936 /* If non-null, the (pc of the) outer loop aborted to start recording this loop. */
937 jsbytecode* const outer;
939 /* If |outer|, the argc to use when looking up |outer| in the fragments table. */
940 uint32 const outerArgc;
942 /* The current frame's lexical block when recording started. */
943 JSObject* const lexicalBlock;
945 /* If non-null, the side exit from which we are growing. */
946 VMSideExit* const anchor;
948 /* The LIR-generation pipeline used to build |fragment|. */
949 nanojit::LirWriter* const lir;
951 /* Instructions yielding the corresponding trace-const members of InterpState. */
952 nanojit::LIns* const cx_ins;
953 nanojit::LIns* const eos_ins;
954 nanojit::LIns* const eor_ins;
955 nanojit::LIns* const loopLabel;
957 /* Lazy slot import state. */
958 unsigned importStackSlots;
959 unsigned importGlobalSlots;
960 TypeMap importTypeMap;
963 * The LirBuffer used to supply memory to our LirWriter pipeline. Also contains the most recent
964 * instruction for {sp, rp, state}. Also contains names for debug JIT spew. Should be split.
966 nanojit::LirBuffer* const lirbuf;
969 * Remembers traceAlloc state before recording started; automatically rewinds when mark is
970 * destroyed on a failed compilation.
972 VMAllocator::Mark mark;
974 /* Remembers the number of sideExits in treeInfo before recording started. */
975 const unsigned numSideExitsBefore;
977 /*********************************************************** Recording session mutable state */
979 /* Maps interpreter stack values to the instruction generating that value. */
980 Tracker tracker;
982 /* Maps interpreter stack values to the instruction writing back to the native stack. */
983 Tracker nativeFrameTracker;
985 /* The start of the global object's dslots we assume for the trackers. */
986 jsval* global_dslots;
988 /* The number of interpreted calls entered (and not yet left) since recording began. */
989 unsigned callDepth;
991 /* The current atom table, mirroring the interpreter loop's variable of the same name. */
992 JSAtom** atoms;
994 /* FIXME: Dead, but soon to be used for something or other. */
995 Queue<jsbytecode*> cfgMerges;
997 /* Indicates whether the current tree should be trashed when the recording session ends. */
998 bool trashSelf;
1000 /* A list of trees to trash at the end of the recording session. */
1001 Queue<TreeFragment*> whichTreesToTrash;
1003 /***************************************** Temporal state hoisted into the recording session */
1005 /* Carry the return value from a STOP/RETURN to the subsequent record_LeaveFrame. */
1006 nanojit::LIns* rval_ins;
1008 /* Carry the return value from a native call to the record_NativeCallComplete. */
1009 nanojit::LIns* native_rval_ins;
1011 /* Carry the return value of js_NewInstance to record_NativeCallComplete. */
1012 nanojit::LIns* newobj_ins;
1014 /* Carry the JSSpecializedNative used to generate a call to record_NativeCallComplete. */
1015 JSSpecializedNative* pendingSpecializedNative;
1017 /* Carry whether this is a jsval on the native stack from finishGetProp to monitorRecording. */
1018 jsval* pendingUnboxSlot;
1020 /* Carry a guard condition to the beginning of the next monitorRecording. */
1021 nanojit::LIns* pendingGuardCondition;
1023 /* Carry whether we have an always-exit from emitIf to checkTraceEnd. */
1024 bool pendingLoop;
1026 /* Temporary JSSpecializedNative used to describe non-specialized fast natives. */
1027 JSSpecializedNative generatedSpecializedNative;
1029 /* Temporary JSTraceType array used to construct temporary typemaps. */
1030 js::Vector<JSTraceType, 256> tempTypeMap;
1032 /************************************************************* 10 bajillion member functions */
1034 nanojit::LIns* insImmVal(jsval val);
1035 nanojit::LIns* insImmObj(JSObject* obj);
1036 nanojit::LIns* insImmFun(JSFunction* fun);
1037 nanojit::LIns* insImmStr(JSString* str);
1038 nanojit::LIns* insImmSprop(JSScopeProperty* sprop);
1039 nanojit::LIns* p2i(nanojit::LIns* ins);
1042 * Examines current interpreter state to record information suitable for returning to the
1043 * interpreter through a side exit of the given type.
1045 JS_REQUIRES_STACK VMSideExit* snapshot(ExitType exitType);
1048 * Creates a separate but identical copy of the given side exit, allowing the guards associated
1049 * with each to be entirely separate even after subsequent patching.
1051 JS_REQUIRES_STACK VMSideExit* copy(VMSideExit* exit);
1054 * Creates an instruction whose payload is a GuardRecord for the given exit. The instruction
1055 * is suitable for use as the final argument of a single call to LirBuffer::insGuard; do not
1056 * reuse the returned value.
1058 JS_REQUIRES_STACK nanojit::GuardRecord* createGuardRecord(VMSideExit* exit);
1060 bool isGlobal(jsval* p) const;
1061 ptrdiff_t nativeGlobalSlot(jsval *p) const;
1062 ptrdiff_t nativeGlobalOffset(jsval* p) const;
1063 JS_REQUIRES_STACK ptrdiff_t nativeStackOffset(jsval* p) const;
1064 JS_REQUIRES_STACK ptrdiff_t nativeStackSlot(jsval* p) const;
1065 JS_REQUIRES_STACK ptrdiff_t nativespOffset(jsval* p) const;
1066 JS_REQUIRES_STACK void import(nanojit::LIns* base, ptrdiff_t offset, jsval* p, JSTraceType t,
1067 const char *prefix, uintN index, JSStackFrame *fp);
1068 JS_REQUIRES_STACK void import(TreeFragment* tree, nanojit::LIns* sp, unsigned stackSlots,
1069 unsigned callDepth, unsigned ngslots, JSTraceType* typeMap);
1070 void trackNativeStackUse(unsigned slots);
1072 JS_REQUIRES_STACK bool isValidSlot(JSScope* scope, JSScopeProperty* sprop);
1073 JS_REQUIRES_STACK bool lazilyImportGlobalSlot(unsigned slot);
1074 JS_REQUIRES_STACK void importGlobalSlot(unsigned slot);
1076 JS_REQUIRES_STACK void guard(bool expected, nanojit::LIns* cond, ExitType exitType);
1077 JS_REQUIRES_STACK void guard(bool expected, nanojit::LIns* cond, VMSideExit* exit);
1078 JS_REQUIRES_STACK nanojit::LIns* slurpInt32Slot(nanojit::LIns* val_ins, jsval* vp,
1079 VMSideExit* exit);
1080 JS_REQUIRES_STACK nanojit::LIns* slurpDoubleSlot(nanojit::LIns* val_ins, jsval* vp,
1081 VMSideExit* exit);
1082 JS_REQUIRES_STACK nanojit::LIns* slurpStringSlot(nanojit::LIns* val_ins, jsval* vp,
1083 VMSideExit* exit);
1084 JS_REQUIRES_STACK nanojit::LIns* slurpObjectSlot(nanojit::LIns* val_ins, jsval* vp,
1085 VMSideExit* exit);
1086 JS_REQUIRES_STACK nanojit::LIns* slurpFunctionSlot(nanojit::LIns* val_ins, jsval* vp,
1087 VMSideExit* exit);
1088 JS_REQUIRES_STACK nanojit::LIns* slurpNullSlot(nanojit::LIns* val_ins, jsval* vp,
1089 VMSideExit* exit);
1090 JS_REQUIRES_STACK nanojit::LIns* slurpBoolSlot(nanojit::LIns* val_ins, jsval* vp,
1091 VMSideExit* exit);
1092 JS_REQUIRES_STACK nanojit::LIns* slurpSlot(nanojit::LIns* val_ins, jsval* vp,
1093 VMSideExit* exit);
1094 JS_REQUIRES_STACK void slurpSlot(nanojit::LIns* val_ins, jsval* vp, SlurpInfo* info);
1095 JS_REQUIRES_STACK AbortableRecordingStatus slurpDownFrames(jsbytecode* return_pc);
1096 JS_REQUIRES_STACK AbortableRecordingStatus upRecursion();
1097 JS_REQUIRES_STACK AbortableRecordingStatus downRecursion();
1099 nanojit::LIns* addName(nanojit::LIns* ins, const char* name);
1101 nanojit::LIns* writeBack(nanojit::LIns* i, nanojit::LIns* base, ptrdiff_t offset,
1102 bool demote);
1103 JS_REQUIRES_STACK void set(jsval* p, nanojit::LIns* l, bool initializing = false,
1104 bool demote = true);
1105 JS_REQUIRES_STACK nanojit::LIns* get(jsval* p);
1106 JS_REQUIRES_STACK nanojit::LIns* attemptImport(jsval* p);
1107 JS_REQUIRES_STACK nanojit::LIns* addr(jsval* p);
1109 JS_REQUIRES_STACK bool known(jsval* p);
1110 JS_REQUIRES_STACK void checkForGlobalObjectReallocation();
1112 JS_REQUIRES_STACK TypeConsensus selfTypeStability(SlotMap& smap);
1113 JS_REQUIRES_STACK TypeConsensus peerTypeStability(SlotMap& smap, const void* ip,
1114 TreeFragment** peer);
1116 JS_REQUIRES_STACK jsval& argval(unsigned n) const;
1117 JS_REQUIRES_STACK jsval& varval(unsigned n) const;
1118 JS_REQUIRES_STACK jsval& stackval(int n) const;
1120 struct NameResult {
1121 // |tracked| is true iff the result of the name lookup is a variable that
1122 // is already in the tracker. The rest of the fields are set only if
1123 // |tracked| is false.
1124 bool tracked;
1125 jsval v; // current property value
1126 JSObject *obj; // Call object where name was found
1127 nanojit::LIns *obj_ins; // LIR value for obj
1128 JSScopeProperty *sprop; // sprop name was resolved to
1131 JS_REQUIRES_STACK nanojit::LIns* scopeChain() const;
1132 JS_REQUIRES_STACK JSStackFrame* frameIfInRange(JSObject* obj, unsigned* depthp = NULL) const;
1133 JS_REQUIRES_STACK RecordingStatus traverseScopeChain(JSObject *obj, nanojit::LIns *obj_ins, JSObject *obj2, nanojit::LIns *&obj2_ins);
1134 JS_REQUIRES_STACK AbortableRecordingStatus scopeChainProp(JSObject* obj, jsval*& vp, nanojit::LIns*& ins, NameResult& nr);
1135 JS_REQUIRES_STACK RecordingStatus callProp(JSObject* obj, JSProperty* sprop, jsid id, jsval*& vp, nanojit::LIns*& ins, NameResult& nr);
1137 JS_REQUIRES_STACK nanojit::LIns* arg(unsigned n);
1138 JS_REQUIRES_STACK void arg(unsigned n, nanojit::LIns* i);
1139 JS_REQUIRES_STACK nanojit::LIns* var(unsigned n);
1140 JS_REQUIRES_STACK void var(unsigned n, nanojit::LIns* i);
1141 JS_REQUIRES_STACK nanojit::LIns* upvar(JSScript* script, JSUpvarArray* uva, uintN index, jsval& v);
1142 nanojit::LIns* stackLoad(nanojit::LIns* addr, uint8 type);
1143 JS_REQUIRES_STACK nanojit::LIns* stack(int n);
1144 JS_REQUIRES_STACK void stack(int n, nanojit::LIns* i);
1146 JS_REQUIRES_STACK nanojit::LIns* alu(nanojit::LOpcode op, jsdouble v0, jsdouble v1,
1147 nanojit::LIns* s0, nanojit::LIns* s1);
1148 nanojit::LIns* f2i(nanojit::LIns* f);
1149 nanojit::LIns* f2u(nanojit::LIns* f);
1150 JS_REQUIRES_STACK nanojit::LIns* makeNumberInt32(nanojit::LIns* f);
1151 JS_REQUIRES_STACK nanojit::LIns* stringify(jsval& v);
1153 JS_REQUIRES_STACK nanojit::LIns* newArguments(nanojit::LIns* callee_ins);
1155 JS_REQUIRES_STACK RecordingStatus call_imacro(jsbytecode* imacro);
1157 JS_REQUIRES_STACK AbortableRecordingStatus ifop();
1158 JS_REQUIRES_STACK RecordingStatus switchop();
1159 #ifdef NANOJIT_IA32
1160 JS_REQUIRES_STACK AbortableRecordingStatus tableswitch();
1161 #endif
1162 JS_REQUIRES_STACK RecordingStatus inc(jsval& v, jsint incr, bool pre = true);
1163 JS_REQUIRES_STACK RecordingStatus inc(jsval v, nanojit::LIns*& v_ins, jsint incr,
1164 bool pre = true);
1165 JS_REQUIRES_STACK RecordingStatus incHelper(jsval v, nanojit::LIns* v_ins,
1166 nanojit::LIns*& v_after, jsint incr);
1167 JS_REQUIRES_STACK AbortableRecordingStatus incProp(jsint incr, bool pre = true);
1168 JS_REQUIRES_STACK RecordingStatus incElem(jsint incr, bool pre = true);
1169 JS_REQUIRES_STACK AbortableRecordingStatus incName(jsint incr, bool pre = true);
1171 JS_REQUIRES_STACK void strictEquality(bool equal, bool cmpCase);
1172 JS_REQUIRES_STACK AbortableRecordingStatus equality(bool negate, bool tryBranchAfterCond);
1173 JS_REQUIRES_STACK AbortableRecordingStatus equalityHelper(jsval l, jsval r,
1174 nanojit::LIns* l_ins, nanojit::LIns* r_ins,
1175 bool negate, bool tryBranchAfterCond,
1176 jsval& rval);
1177 JS_REQUIRES_STACK AbortableRecordingStatus relational(nanojit::LOpcode op, bool tryBranchAfterCond);
1179 JS_REQUIRES_STACK RecordingStatus unary(nanojit::LOpcode op);
1180 JS_REQUIRES_STACK RecordingStatus binary(nanojit::LOpcode op);
1182 JS_REQUIRES_STACK RecordingStatus guardShape(nanojit::LIns* obj_ins, JSObject* obj,
1183 uint32 shape, const char* name,
1184 nanojit::LIns* map_ins, VMSideExit* exit);
1186 JSDHashTable guardedShapeTable;
1188 #if defined DEBUG_notme && defined XP_UNIX
1189 void dumpGuardedShapes(const char* prefix);
1190 #endif
1192 void forgetGuardedShapes();
1194 inline nanojit::LIns* map(nanojit::LIns *obj_ins);
1195 JS_REQUIRES_STACK bool map_is_native(JSObjectMap* map, nanojit::LIns* map_ins,
1196 nanojit::LIns*& ops_ins, size_t op_offset = 0);
1197 JS_REQUIRES_STACK AbortableRecordingStatus test_property_cache(JSObject* obj, nanojit::LIns* obj_ins,
1198 JSObject*& obj2, jsuword& pcval);
1199 JS_REQUIRES_STACK RecordingStatus guardNativePropertyOp(JSObject* aobj,
1200 nanojit::LIns* map_ins);
1201 JS_REQUIRES_STACK RecordingStatus guardPropertyCacheHit(nanojit::LIns* obj_ins,
1202 nanojit::LIns* map_ins,
1203 JSObject* aobj,
1204 JSObject* obj2,
1205 JSPropCacheEntry* entry,
1206 jsuword& pcval);
1208 void stobj_set_fslot(nanojit::LIns *obj_ins, unsigned slot,
1209 nanojit::LIns* v_ins);
1210 void stobj_set_dslot(nanojit::LIns *obj_ins, unsigned slot, nanojit::LIns*& dslots_ins,
1211 nanojit::LIns* v_ins);
1212 void stobj_set_slot(nanojit::LIns* obj_ins, unsigned slot, nanojit::LIns*& dslots_ins,
1213 nanojit::LIns* v_ins);
1215 nanojit::LIns* stobj_get_fslot(nanojit::LIns* obj_ins, unsigned slot);
1216 nanojit::LIns* stobj_get_dslot(nanojit::LIns* obj_ins, unsigned index,
1217 nanojit::LIns*& dslots_ins);
1218 nanojit::LIns* stobj_get_slot(nanojit::LIns* obj_ins, unsigned slot,
1219 nanojit::LIns*& dslots_ins);
1221 nanojit::LIns* stobj_get_private(nanojit::LIns* obj_ins) {
1222 return stobj_get_fslot(obj_ins, JSSLOT_PRIVATE);
1225 nanojit::LIns* stobj_get_proto(nanojit::LIns* obj_ins) {
1226 return stobj_get_fslot(obj_ins, JSSLOT_PROTO);
1229 nanojit::LIns* stobj_get_parent(nanojit::LIns* obj_ins) {
1230 return stobj_get_fslot(obj_ins, JSSLOT_PARENT);
1233 JS_REQUIRES_STACK AbortableRecordingStatus name(jsval*& vp, nanojit::LIns*& ins, NameResult& nr);
1234 JS_REQUIRES_STACK AbortableRecordingStatus prop(JSObject* obj, nanojit::LIns* obj_ins,
1235 uint32 *slotp, nanojit::LIns** v_insp,
1236 jsval* outp);
1237 JS_REQUIRES_STACK AbortableRecordingStatus propTail(JSObject* obj, nanojit::LIns* obj_ins,
1238 JSObject* obj2, jsuword pcval,
1239 uint32 *slotp, nanojit::LIns** v_insp,
1240 jsval* outp);
1241 JS_REQUIRES_STACK RecordingStatus denseArrayElement(jsval& oval, jsval& idx, jsval*& vp,
1242 nanojit::LIns*& v_ins,
1243 nanojit::LIns*& addr_ins);
1244 JS_REQUIRES_STACK AbortableRecordingStatus getProp(JSObject* obj, nanojit::LIns* obj_ins);
1245 JS_REQUIRES_STACK AbortableRecordingStatus getProp(jsval& v);
1246 JS_REQUIRES_STACK RecordingStatus getThis(nanojit::LIns*& this_ins);
1248 JS_REQUIRES_STACK VMSideExit* enterDeepBailCall();
1249 JS_REQUIRES_STACK void leaveDeepBailCall();
1251 JS_REQUIRES_STACK RecordingStatus primitiveToStringInPlace(jsval* vp);
1252 JS_REQUIRES_STACK void finishGetProp(nanojit::LIns* obj_ins, nanojit::LIns* vp_ins,
1253 nanojit::LIns* ok_ins, jsval* outp);
1254 JS_REQUIRES_STACK RecordingStatus getPropertyByName(nanojit::LIns* obj_ins, jsval* idvalp,
1255 jsval* outp);
1256 JS_REQUIRES_STACK RecordingStatus getPropertyByIndex(nanojit::LIns* obj_ins,
1257 nanojit::LIns* index_ins, jsval* outp);
1258 JS_REQUIRES_STACK RecordingStatus getPropertyById(nanojit::LIns* obj_ins, jsval* outp);
1259 JS_REQUIRES_STACK RecordingStatus getPropertyWithNativeGetter(nanojit::LIns* obj_ins,
1260 JSScopeProperty* sprop,
1261 jsval* outp);
1263 JS_REQUIRES_STACK RecordingStatus nativeSet(JSObject* obj, nanojit::LIns* obj_ins,
1264 JSScopeProperty* sprop,
1265 jsval v, nanojit::LIns* v_ins);
1266 JS_REQUIRES_STACK RecordingStatus setProp(jsval &l, JSPropCacheEntry* entry,
1267 JSScopeProperty* sprop,
1268 jsval &v, nanojit::LIns*& v_ins);
1269 JS_REQUIRES_STACK RecordingStatus setCallProp(JSObject *callobj, nanojit::LIns *callobj_ins,
1270 JSScopeProperty *sprop, nanojit::LIns *v_ins,
1271 jsval v);
1272 JS_REQUIRES_STACK RecordingStatus initOrSetPropertyByName(nanojit::LIns* obj_ins,
1273 jsval* idvalp, jsval* rvalp,
1274 bool init);
1275 JS_REQUIRES_STACK RecordingStatus initOrSetPropertyByIndex(nanojit::LIns* obj_ins,
1276 nanojit::LIns* index_ins,
1277 jsval* rvalp, bool init);
1278 JS_REQUIRES_STACK AbortableRecordingStatus setElem(int lval_spindex, int idx_spindex,
1279 int v_spindex);
1281 JS_REQUIRES_STACK nanojit::LIns* box_jsval(jsval v, nanojit::LIns* v_ins);
1282 JS_REQUIRES_STACK nanojit::LIns* unbox_jsval(jsval v, nanojit::LIns* v_ins, VMSideExit* exit);
1283 JS_REQUIRES_STACK bool guardClass(JSObject* obj, nanojit::LIns* obj_ins, JSClass* clasp,
1284 VMSideExit* exit);
1285 JS_REQUIRES_STACK bool guardDenseArray(JSObject* obj, nanojit::LIns* obj_ins,
1286 ExitType exitType = MISMATCH_EXIT);
1287 JS_REQUIRES_STACK bool guardDenseArray(JSObject* obj, nanojit::LIns* obj_ins,
1288 VMSideExit* exit);
1289 JS_REQUIRES_STACK bool guardHasPrototype(JSObject* obj, nanojit::LIns* obj_ins,
1290 JSObject** pobj, nanojit::LIns** pobj_ins,
1291 VMSideExit* exit);
1292 JS_REQUIRES_STACK RecordingStatus guardPrototypeHasNoIndexedProperties(JSObject* obj,
1293 nanojit::LIns* obj_ins,
1294 ExitType exitType);
1295 JS_REQUIRES_STACK RecordingStatus guardNotGlobalObject(JSObject* obj,
1296 nanojit::LIns* obj_ins);
1297 JS_REQUIRES_STACK JSStackFrame* entryFrame() const;
1298 JS_REQUIRES_STACK void clearEntryFrameSlotsFromTracker(Tracker& which);
1299 JS_REQUIRES_STACK void clearCurrentFrameSlotsFromTracker(Tracker& which);
1300 JS_REQUIRES_STACK void clearFrameSlotsFromTracker(Tracker& which, JSStackFrame* fp, unsigned nslots);
1301 JS_REQUIRES_STACK void putArguments();
1302 JS_REQUIRES_STACK RecordingStatus guardCallee(jsval& callee);
1303 JS_REQUIRES_STACK JSStackFrame *guardArguments(JSObject *obj, nanojit::LIns* obj_ins,
1304 unsigned *depthp);
1305 JS_REQUIRES_STACK nanojit::LIns* guardArgsLengthNotAssigned(nanojit::LIns* argsobj_ins);
1306 JS_REQUIRES_STACK RecordingStatus getClassPrototype(JSObject* ctor,
1307 nanojit::LIns*& proto_ins);
1308 JS_REQUIRES_STACK RecordingStatus getClassPrototype(JSProtoKey key,
1309 nanojit::LIns*& proto_ins);
1310 JS_REQUIRES_STACK RecordingStatus newArray(JSObject* ctor, uint32 argc, jsval* argv,
1311 jsval* rval);
1312 JS_REQUIRES_STACK RecordingStatus newString(JSObject* ctor, uint32 argc, jsval* argv,
1313 jsval* rval);
1314 JS_REQUIRES_STACK RecordingStatus interpretedFunctionCall(jsval& fval, JSFunction* fun,
1315 uintN argc, bool constructing);
1316 JS_REQUIRES_STACK void propagateFailureToBuiltinStatus(nanojit::LIns *ok_ins,
1317 nanojit::LIns *&status_ins);
1318 JS_REQUIRES_STACK RecordingStatus emitNativeCall(JSSpecializedNative* sn, uintN argc,
1319 nanojit::LIns* args[], bool rooted);
1320 JS_REQUIRES_STACK void emitNativePropertyOp(JSScope* scope,
1321 JSScopeProperty* sprop,
1322 nanojit::LIns* obj_ins,
1323 bool setflag,
1324 nanojit::LIns* boxed_ins);
1325 JS_REQUIRES_STACK RecordingStatus callSpecializedNative(JSNativeTraceInfo* trcinfo, uintN argc,
1326 bool constructing);
1327 JS_REQUIRES_STACK RecordingStatus callNative(uintN argc, JSOp mode);
1328 JS_REQUIRES_STACK RecordingStatus functionCall(uintN argc, JSOp mode);
1330 JS_REQUIRES_STACK void trackCfgMerges(jsbytecode* pc);
1331 JS_REQUIRES_STACK void emitIf(jsbytecode* pc, bool cond, nanojit::LIns* x);
1332 JS_REQUIRES_STACK void fuseIf(jsbytecode* pc, bool cond, nanojit::LIns* x);
1333 JS_REQUIRES_STACK AbortableRecordingStatus checkTraceEnd(jsbytecode* pc);
1335 bool hasMethod(JSObject* obj, jsid id);
1336 JS_REQUIRES_STACK bool hasIteratorMethod(JSObject* obj);
1338 JS_REQUIRES_STACK jsatomid getFullIndex(ptrdiff_t pcoff = 0);
1340 JS_REQUIRES_STACK JSTraceType determineSlotType(jsval* vp);
1342 JS_REQUIRES_STACK AbortableRecordingStatus compile();
1343 JS_REQUIRES_STACK AbortableRecordingStatus closeLoop();
1344 JS_REQUIRES_STACK AbortableRecordingStatus closeLoop(VMSideExit* exit);
1345 JS_REQUIRES_STACK AbortableRecordingStatus closeLoop(SlotMap& slotMap, VMSideExit* exit);
1346 JS_REQUIRES_STACK AbortableRecordingStatus endLoop();
1347 JS_REQUIRES_STACK AbortableRecordingStatus endLoop(VMSideExit* exit);
1348 JS_REQUIRES_STACK void joinEdgesToEntry(TreeFragment* peer_root);
1349 JS_REQUIRES_STACK void adjustCallerTypes(TreeFragment* f);
1350 JS_REQUIRES_STACK void prepareTreeCall(TreeFragment* inner, nanojit::LIns*& inner_sp_ins);
1351 JS_REQUIRES_STACK void emitTreeCall(TreeFragment* inner, VMSideExit* exit, nanojit::LIns* inner_sp_ins);
1352 JS_REQUIRES_STACK void determineGlobalTypes(JSTraceType* typeMap);
1353 JS_REQUIRES_STACK VMSideExit* downSnapshot(FrameInfo* downFrame);
1354 JS_REQUIRES_STACK TreeFragment* findNestedCompatiblePeer(TreeFragment* f);
1355 JS_REQUIRES_STACK AbortableRecordingStatus attemptTreeCall(TreeFragment* inner,
1356 uintN& inlineCallCount);
1358 static JS_REQUIRES_STACK bool recordLoopEdge(JSContext* cx, TraceRecorder* r,
1359 uintN& inlineCallCount);
1361 /* Allocators associated with this recording session. */
1362 VMAllocator& tempAlloc() const { return *traceMonitor->tempAlloc; }
1363 VMAllocator& traceAlloc() const { return *traceMonitor->traceAlloc; }
1364 VMAllocator& dataAlloc() const { return *traceMonitor->dataAlloc; }
1366 /* Member declarations for each opcode, to be called before interpreting the opcode. */
1367 #define OPDEF(op,val,name,token,length,nuses,ndefs,prec,format) \
1368 JS_REQUIRES_STACK AbortableRecordingStatus record_##op();
1369 # include "jsopcode.tbl"
1370 #undef OPDEF
1372 inline void* operator new(size_t size) { return calloc(1, size); }
1373 inline void operator delete(void *p) { free(p); }
1375 JS_REQUIRES_STACK
1376 TraceRecorder(JSContext* cx, VMSideExit*, VMFragment*,
1377 unsigned stackSlots, unsigned ngslots, JSTraceType* typeMap,
1378 VMSideExit* expectedInnerExit, jsbytecode* outerTree,
1379 uint32 outerArgc, RecordReason reason);
1381 /* The destructor should only be called through finish*, not directly. */
1382 ~TraceRecorder();
1383 JS_REQUIRES_STACK AbortableRecordingStatus finishSuccessfully();
1384 JS_REQUIRES_STACK AbortableRecordingStatus finishAbort(const char* reason);
1386 friend class ImportBoxedStackSlotVisitor;
1387 friend class ImportUnboxedStackSlotVisitor;
1388 friend class ImportGlobalSlotVisitor;
1389 friend class AdjustCallerGlobalTypesVisitor;
1390 friend class AdjustCallerStackTypesVisitor;
1391 friend class TypeCompatibilityVisitor;
1392 friend class ImportFrameSlotsVisitor;
1393 friend class SlotMap;
1394 friend class DefaultSlotMap;
1395 friend class DetermineTypesVisitor;
1396 friend class RecursiveSlotMap;
1397 friend class UpRecursiveSlotMap;
1398 friend jsval* js_ConcatPostImacroStackCleanup(uint32, JSFrameRegs &, TraceRecorder *);
1399 friend bool js_MonitorLoopEdge(JSContext*, uintN&, RecordReason);
1400 friend void js_AbortRecording(JSContext*, const char*);
1402 public:
1403 static bool JS_REQUIRES_STACK
1404 startRecorder(JSContext*, VMSideExit*, VMFragment*,
1405 unsigned stackSlots, unsigned ngslots, JSTraceType* typeMap,
1406 VMSideExit* expectedInnerExit, jsbytecode* outerTree,
1407 uint32 outerArgc, RecordReason reason);
1409 /* Accessors. */
1410 VMFragment* getFragment() const { return fragment; }
1411 TreeFragment* getTree() const { return tree; }
1412 bool outOfMemory() const { return traceMonitor->outOfMemory(); }
1414 /* Entry points / callbacks from the interpreter. */
1415 JS_REQUIRES_STACK AbortableRecordingStatus monitorRecording(JSOp op);
1416 JS_REQUIRES_STACK AbortableRecordingStatus record_EnterFrame(uintN& inlineCallCount);
1417 JS_REQUIRES_STACK AbortableRecordingStatus record_LeaveFrame();
1418 JS_REQUIRES_STACK AbortableRecordingStatus record_SetPropHit(JSPropCacheEntry* entry,
1419 JSScopeProperty* sprop);
1420 JS_REQUIRES_STACK AbortableRecordingStatus record_DefLocalFunSetSlot(uint32 slot, JSObject* obj);
1421 JS_REQUIRES_STACK AbortableRecordingStatus record_NativeCallComplete();
1422 void forgetGuardedShapesForObject(JSObject* obj);
1424 #ifdef DEBUG
1425 /* Debug printing functionality to emit printf() on trace. */
1426 JS_REQUIRES_STACK void tprint(const char *format, int count, nanojit::LIns *insa[]);
1427 JS_REQUIRES_STACK void tprint(const char *format);
1428 JS_REQUIRES_STACK void tprint(const char *format, nanojit::LIns *ins);
1429 JS_REQUIRES_STACK void tprint(const char *format, nanojit::LIns *ins1,
1430 nanojit::LIns *ins2);
1431 JS_REQUIRES_STACK void tprint(const char *format, nanojit::LIns *ins1,
1432 nanojit::LIns *ins2, nanojit::LIns *ins3);
1433 JS_REQUIRES_STACK void tprint(const char *format, nanojit::LIns *ins1,
1434 nanojit::LIns *ins2, nanojit::LIns *ins3,
1435 nanojit::LIns *ins4);
1436 JS_REQUIRES_STACK void tprint(const char *format, nanojit::LIns *ins1,
1437 nanojit::LIns *ins2, nanojit::LIns *ins3,
1438 nanojit::LIns *ins4, nanojit::LIns *ins5);
1439 JS_REQUIRES_STACK void tprint(const char *format, nanojit::LIns *ins1,
1440 nanojit::LIns *ins2, nanojit::LIns *ins3,
1441 nanojit::LIns *ins4, nanojit::LIns *ins5,
1442 nanojit::LIns *ins6);
1443 #endif
1446 #define TRACING_ENABLED(cx) ((cx)->jitEnabled)
1447 #define TRACE_RECORDER(cx) (JS_TRACE_MONITOR(cx).recorder)
1448 #define SET_TRACE_RECORDER(cx,tr) (JS_TRACE_MONITOR(cx).recorder = (tr))
1450 #define JSOP_IN_RANGE(op,lo,hi) (uintN((op) - (lo)) <= uintN((hi) - (lo)))
1451 #define JSOP_IS_BINARY(op) JSOP_IN_RANGE(op, JSOP_BITOR, JSOP_MOD)
1452 #define JSOP_IS_UNARY(op) JSOP_IN_RANGE(op, JSOP_NEG, JSOP_POS)
1453 #define JSOP_IS_EQUALITY(op) JSOP_IN_RANGE(op, JSOP_EQ, JSOP_NE)
1455 #define TRACE_ARGS_(x,args) \
1456 JS_BEGIN_MACRO \
1457 if (TraceRecorder* tr_ = TRACE_RECORDER(cx)) { \
1458 AbortableRecordingStatus status = tr_->record_##x args; \
1459 if (StatusAbortsRecording(status)) { \
1460 js_AbortRecording(cx, #x); \
1461 if (status == ARECORD_ERROR) \
1462 goto error; \
1464 JS_ASSERT(status != ARECORD_IMACRO); \
1466 JS_END_MACRO
1468 #define TRACE_ARGS(x,args) TRACE_ARGS_(x, args)
1469 #define TRACE_0(x) TRACE_ARGS(x, ())
1470 #define TRACE_1(x,a) TRACE_ARGS(x, (a))
1471 #define TRACE_2(x,a,b) TRACE_ARGS(x, (a, b))
1473 extern JS_REQUIRES_STACK bool
1474 js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount, RecordReason reason);
1476 extern JS_REQUIRES_STACK void
1477 js_AbortRecording(JSContext* cx, const char* reason);
1479 extern void
1480 js_InitJIT(JSTraceMonitor *tm);
1482 extern void
1483 js_FinishJIT(JSTraceMonitor *tm);
1485 extern void
1486 js_PurgeScriptFragments(JSContext* cx, JSScript* script);
1488 extern bool
1489 js_OverfullJITCache(JSTraceMonitor* tm);
1491 extern void
1492 js_FlushJITCache(JSContext* cx);
1494 extern void
1495 js_PurgeJITOracle();
1497 extern JSObject *
1498 js_GetBuiltinFunction(JSContext *cx, uintN index);
1500 extern void
1501 js_SetMaxCodeCacheBytes(JSContext* cx, uint32 bytes);
1503 extern bool
1504 js_NativeToValue(JSContext* cx, jsval& v, JSTraceType type, double* slot);
1506 #ifdef MOZ_TRACEVIS
1508 extern JS_FRIEND_API(bool)
1509 JS_StartTraceVis(const char* filename);
1511 extern JS_FRIEND_API(JSBool)
1512 js_StartTraceVis(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
1513 jsval *rval);
1515 extern JS_FRIEND_API(bool)
1516 JS_StopTraceVis();
1518 extern JS_FRIEND_API(JSBool)
1519 js_StopTraceVis(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
1520 jsval *rval);
1522 /* Must contain no more than 16 items. */
1523 enum TraceVisState {
1524 // Special: means we returned from current activity to last
1525 S_EXITLAST,
1526 // Activities
1527 S_INTERP,
1528 S_MONITOR,
1529 S_RECORD,
1530 S_COMPILE,
1531 S_EXECUTE,
1532 S_NATIVE,
1533 // Events: these all have (bit 3) == 1.
1534 S_RESET = 8
1537 /* Reason for an exit to the interpreter. */
1538 enum TraceVisExitReason {
1539 R_NONE,
1540 R_ABORT,
1541 /* Reasons in js_MonitorLoopEdge */
1542 R_INNER_SIDE_EXIT,
1543 R_DOUBLES,
1544 R_CALLBACK_PENDING,
1545 R_OOM_GETANCHOR,
1546 R_BACKED_OFF,
1547 R_COLD,
1548 R_FAIL_RECORD_TREE,
1549 R_MAX_PEERS,
1550 R_FAIL_EXECUTE_TREE,
1551 R_FAIL_STABILIZE,
1552 R_FAIL_EXTEND_FLUSH,
1553 R_FAIL_EXTEND_MAX_BRANCHES,
1554 R_FAIL_EXTEND_START,
1555 R_FAIL_EXTEND_COLD,
1556 R_NO_EXTEND_OUTER,
1557 R_MISMATCH_EXIT,
1558 R_OOM_EXIT,
1559 R_TIMEOUT_EXIT,
1560 R_DEEP_BAIL_EXIT,
1561 R_STATUS_EXIT,
1562 R_OTHER_EXIT
1565 enum TraceVisFlushReason {
1566 FR_DEEP_BAIL,
1567 FR_OOM,
1568 FR_GLOBAL_SHAPE_MISMATCH,
1569 FR_GLOBALS_FULL
1572 const unsigned long long MS64_MASK = 0xfull << 60;
1573 const unsigned long long MR64_MASK = 0x1full << 55;
1574 const unsigned long long MT64_MASK = ~(MS64_MASK | MR64_MASK);
1576 extern FILE* traceVisLogFile;
1577 extern JSHashTable *traceVisScriptTable;
1579 extern JS_FRIEND_API(void)
1580 js_StoreTraceVisState(JSContext *cx, TraceVisState s, TraceVisExitReason r);
1582 static inline void
1583 js_LogTraceVisState(JSContext *cx, TraceVisState s, TraceVisExitReason r)
1585 if (traceVisLogFile) {
1586 unsigned long long sllu = s;
1587 unsigned long long rllu = r;
1588 unsigned long long d = (sllu << 60) | (rllu << 55) | (rdtsc() & MT64_MASK);
1589 fwrite(&d, sizeof(d), 1, traceVisLogFile);
1591 if (traceVisScriptTable) {
1592 js_StoreTraceVisState(cx, s, r);
1597 * Although this runs the same code as js_LogTraceVisState, it is a separate
1598 * function because the meaning of the log entry is different. Also, the entry
1599 * formats may diverge someday.
1601 static inline void
1602 js_LogTraceVisEvent(JSContext *cx, TraceVisState s, TraceVisFlushReason r)
1604 js_LogTraceVisState(cx, s, (TraceVisExitReason) r);
1607 static inline void
1608 js_EnterTraceVisState(JSContext *cx, TraceVisState s, TraceVisExitReason r)
1610 js_LogTraceVisState(cx, s, r);
1613 static inline void
1614 js_ExitTraceVisState(JSContext *cx, TraceVisExitReason r)
1616 js_LogTraceVisState(cx, S_EXITLAST, r);
1619 struct TraceVisStateObj {
1620 TraceVisExitReason r;
1621 JSContext *mCx;
1623 inline TraceVisStateObj(JSContext *cx, TraceVisState s) : r(R_NONE)
1625 js_EnterTraceVisState(cx, s, R_NONE);
1626 mCx = cx;
1628 inline ~TraceVisStateObj()
1630 js_ExitTraceVisState(mCx, r);
1634 #endif /* MOZ_TRACEVIS */
1636 extern jsval *
1637 js_ConcatPostImacroStackCleanup(uint32 argc, JSFrameRegs &regs,
1638 TraceRecorder *recorder);
1640 #else /* !JS_TRACER */
1642 #define TRACE_0(x) ((void)0)
1643 #define TRACE_1(x,a) ((void)0)
1644 #define TRACE_2(x,a,b) ((void)0)
1646 #endif /* !JS_TRACER */
1648 #endif /* jstracer_h___ */