1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=4 sw=4 et tw=99 ft=cpp:
4 * ***** BEGIN LICENSE BLOCK *****
5 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
7 * The contents of this file are subject to the Mozilla Public License Version
8 * 1.1 (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
10 * http://www.mozilla.org/MPL/
12 * Software distributed under the License is distributed on an "AS IS" basis,
13 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
14 * for the specific language governing rights and limitations under the
17 * The Original Code is Mozilla SpiderMonkey JavaScript 1.9 code, released
20 * The Initial Developer of the Original Code is
21 * Brendan Eich <brendan@mozilla.org>
24 * Andreas Gal <gal@mozilla.com>
25 * Mike Shaver <shaver@mozilla.org>
26 * David Anderson <danderson@mozilla.com>
28 * Alternatively, the contents of this file may be used under the terms of
29 * either of the GNU General Public License Version 2 or later (the "GPL"),
30 * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
31 * in which case the provisions of the GPL or the LGPL are applicable instead
32 * of those above. If you wish to allow use of your version of this file only
33 * under the terms of either the GPL or the LGPL, and not to allow others to
34 * use your version of this file under the terms of the MPL, indicate your
35 * decision by deleting the provisions above and replace them with the notice
36 * and other provisions required by the GPL or the LGPL. If you do not delete
37 * the provisions above, a recipient may use your version of this file under
38 * the terms of any one of the MPL, the GPL or the LGPL.
40 * ***** END LICENSE BLOCK ***** */
48 #include "jsbuiltins.h"
55 #include "jscompartment.h"
65 nanojit::Allocator
* alloc
;
68 void ensure(unsigned size
) {
73 _max
= JS_MAX(_max
* 2, size
);
75 T
* tmp
= new (*alloc
) T
[_max
];
76 memcpy(tmp
, _data
, _len
* sizeof(T
));
79 _data
= (T
*)js_realloc(_data
, _max
* sizeof(T
));
82 memset(&_data
[_len
], 0xcd, _max
- _len
);
86 Queue(nanojit::Allocator
* alloc
)
100 for (unsigned n
= 0; n
< _len
; ++n
) {
109 JS_ASSERT(_len
<= _max
);
113 void add(T
* chunk
, unsigned size
) {
115 JS_ASSERT(_len
<= _max
);
116 memcpy(&_data
[_len
], chunk
, size
* sizeof(T
));
120 void addUnique(T a
) {
125 void setLength(unsigned len
) {
134 T
& get(unsigned i
) {
135 JS_ASSERT(i
< length());
139 const T
& get(unsigned i
) const {
140 JS_ASSERT(i
< length());
144 T
& operator [](unsigned i
) {
148 const T
& operator [](unsigned i
) const {
152 unsigned length() const {
160 int offsetOf(T slot
) {
163 for (n
= 0; n
< _len
; ++n
)
172 * Tracker is used to keep track of values being manipulated by the interpreter
173 * during trace recording. It maps opaque, 4-byte aligned address to LIns pointers.
174 * pointers. To do this efficiently, we observe that the addresses of jsvals
175 * living in the interpreter tend to be aggregated close to each other -
176 * usually on the same page (where a tracker page doesn't have to be the same
177 * size as the OS page size, but it's typically similar). The Tracker
178 * consists of a linked-list of structures representing a memory page, which
179 * are created on-demand as memory locations are used.
181 * For every address, first we split it into two parts: upper bits which
182 * represent the "base", and lower bits which represent an offset against the
183 * base. For the offset, we then right-shift it by two because the bottom two
184 * bits of a 4-byte aligned address are always zero. The mapping then
187 * page = page in pagelist such that Base(address) == page->base,
188 * page->map[Offset(address)]
191 #define TRACKER_PAGE_SZB 4096
192 #define TRACKER_PAGE_ENTRIES (TRACKER_PAGE_SZB >> 2) // each slot is 4 bytes
193 #define TRACKER_PAGE_MASK jsuword(TRACKER_PAGE_SZB - 1)
196 struct TrackerPage
* next
;
198 nanojit::LIns
* map
[TRACKER_PAGE_ENTRIES
];
200 struct TrackerPage
* pagelist
;
202 jsuword
getTrackerPageBase(const void* v
) const;
203 jsuword
getTrackerPageOffset(const void* v
) const;
204 struct TrackerPage
* findTrackerPage(const void* v
) const;
205 struct TrackerPage
* addTrackerPage(const void* v
);
210 bool has(const void* v
) const;
211 nanojit::LIns
* get(const void* v
) const;
212 void set(const void* v
, nanojit::LIns
* ins
);
216 class VMFragment
: public nanojit::Fragment
{
218 VMFragment(const void* _ip
verbose_only(, uint32_t profFragID
))
219 : Fragment(_ip
verbose_only(, profFragID
))
223 * If this is anchored off a TreeFragment, this points to that tree fragment.
224 * Otherwise, it is |this|.
228 TreeFragment
* toTreeFragment();
231 #ifdef NJ_NO_VARIADIC_MACROS
233 #define debug_only_stmt(action) /* */
234 static void debug_only_printf(int mask
, const char *fmt
, ...) JS_BEGIN_MACRO JS_END_MACRO
235 #define debug_only_print0(mask, str) JS_BEGIN_MACRO JS_END_MACRO
237 #elif defined(JS_JIT_SPEW)
239 // Top level logging controller object.
240 extern nanojit::LogControl LogController
;
242 // Top level profiling hook, needed to harvest profile info from Fragments
243 // whose logical lifetime is about to finish
244 extern void FragProfiling_FragFinalizer(nanojit::Fragment
* f
, TraceMonitor
*);
246 #define debug_only_stmt(stmt) \
249 #define debug_only_printf(mask, fmt, ...) \
251 if ((LogController.lcbits & (mask)) > 0) { \
252 LogController.printf(fmt, __VA_ARGS__); \
257 #define debug_only_print0(mask, str) \
259 if ((LogController.lcbits & (mask)) > 0) { \
260 LogController.printf("%s", str); \
267 #define debug_only_stmt(action) /* */
268 #define debug_only_printf(mask, fmt, ...) JS_BEGIN_MACRO JS_END_MACRO
269 #define debug_only_print0(mask, str) JS_BEGIN_MACRO JS_END_MACRO
274 * The oracle keeps track of hit counts for program counter locations, as
275 * well as slots that should not be demoted to int because we know them to
276 * overflow or they result in type-unstable traces. We are using simple
277 * hash tables. Collisions lead to loss of optimization (demotable slots
278 * are not demoted, etc.) but have no correctness implications.
280 #define ORACLE_SIZE 4096
283 avmplus::BitSet _stackDontDemote
;
284 avmplus::BitSet _globalDontDemote
;
285 avmplus::BitSet _pcDontDemote
;
286 avmplus::BitSet _pcSlowZeroTest
;
290 JS_REQUIRES_STACK
void markGlobalSlotUndemotable(JSContext
* cx
, unsigned slot
);
291 JS_REQUIRES_STACK
bool isGlobalSlotUndemotable(JSContext
* cx
, unsigned slot
) const;
292 JS_REQUIRES_STACK
void markStackSlotUndemotable(JSContext
* cx
, unsigned slot
);
293 JS_REQUIRES_STACK
void markStackSlotUndemotable(JSContext
* cx
, unsigned slot
, const void* pc
);
294 JS_REQUIRES_STACK
bool isStackSlotUndemotable(JSContext
* cx
, unsigned slot
) const;
295 JS_REQUIRES_STACK
bool isStackSlotUndemotable(JSContext
* cx
, unsigned slot
, const void* pc
) const;
296 void markInstructionUndemotable(jsbytecode
* pc
);
297 bool isInstructionUndemotable(jsbytecode
* pc
) const;
298 void markInstructionSlowZeroTest(jsbytecode
* pc
);
299 bool isInstructionSlowZeroTest(jsbytecode
* pc
) const;
301 void clearDemotability();
307 typedef Queue
<uint16
> SlotList
;
309 class TypeMap
: public Queue
<JSValueType
> {
312 TypeMap(nanojit::Allocator
* alloc
) : Queue
<JSValueType
>(alloc
) {}
313 void set(unsigned stackSlots
, unsigned ngslots
,
314 const JSValueType
* stackTypeMap
, const JSValueType
* globalTypeMap
);
315 JS_REQUIRES_STACK
void captureTypes(JSContext
* cx
, JSObject
* globalObj
, SlotList
& slots
, unsigned callDepth
,
317 JS_REQUIRES_STACK
void captureMissingGlobalTypes(JSContext
* cx
, JSObject
* globalObj
, SlotList
& slots
,
318 unsigned stackSlots
, bool speculate
);
319 bool matches(TypeMap
& other
) const;
320 void fromRaw(JSValueType
* other
, unsigned numSlots
);
323 #define JS_TM_EXITCODES(_) \
325 * An exit at a possible branch-point in the trace at which to attach a \
326 * future secondary trace. Therefore the recorder must generate different \
327 * code to handle the other outcome of the branch condition from the \
328 * primary trace's outcome. \
332 * Exit at a tableswitch via a numbered case. \
336 * Exit at a tableswitch via the default case. \
342 * An exit from a trace because a condition relied upon at recording time \
343 * no longer holds, where the alternate path of execution is so rare or \
344 * difficult to address in native code that it is not traced at all, e.g. \
345 * negative array index accesses, which differ from positive indexes in \
346 * that they require a string-based property lookup rather than a simple \
351 * A specialization of MISMATCH_EXIT to handle allocation failures. \
362 #define MAKE_EXIT_CODE(x) x##_EXIT,
363 JS_TM_EXITCODES(MAKE_EXIT_CODE
)
364 #undef MAKE_EXIT_CODE
370 struct VMSideExit
: public nanojit::SideExit
377 uint32 numGlobalSlots
;
378 uint32 numStackSlots
;
379 uint32 numStackSlotsBelowCurrentFrame
;
384 inline JSValueType
* stackTypeMap() {
385 return (JSValueType
*)(this + 1);
388 inline JSValueType
& stackType(unsigned i
) {
389 JS_ASSERT(i
< numStackSlots
);
390 return stackTypeMap()[i
];
393 inline JSValueType
* globalTypeMap() {
394 return (JSValueType
*)(this + 1) + this->numStackSlots
;
397 inline JSValueType
* fullTypeMap() {
398 return stackTypeMap();
401 inline VMFragment
* fromFrag() {
402 return (VMFragment
*)from
;
405 inline TreeFragment
* root() {
406 return fromFrag()->root
;
410 class VMAllocator
: public nanojit::Allocator
414 VMAllocator(char* reserve
, size_t reserveSize
)
415 : mOutOfMemory(false), mSize(0), mReserve(reserve
),
416 mReserveCurr(uintptr_t(reserve
)), mReserveLimit(uintptr_t(reserve
+ reserveSize
))
435 nanojit::Allocator::Chunk
* saved_chunk
;
440 Mark(VMAllocator
& vma
) :
443 saved_chunk(vma
.current_chunk
),
444 saved_top(vma
.current_top
),
445 saved_limit(vma
.current_limit
),
446 saved_size(vma
.mSize
)
455 void commit() { committed
= true; }
458 void rewind(const Mark
& m
) {
459 while (current_chunk
!= m
.saved_chunk
) {
460 Chunk
*prev
= current_chunk
->prev
;
461 freeChunk(current_chunk
);
462 current_chunk
= prev
;
464 current_top
= m
.saved_top
;
465 current_limit
= m
.saved_limit
;
466 mSize
= m
.saved_size
;
467 memset(current_top
, 0, current_limit
- current_top
);
473 /* See nanojit::Allocator::allocChunk() for details on these. */
475 uintptr_t mReserveCurr
;
476 uintptr_t mReserveLimit
;
480 JSObject
* block
; // caller block chain head
481 jsbytecode
* pc
; // caller fp->regs->pc
482 jsbytecode
* imacpc
; // caller fp->imacpc
483 uint32 spdist
; // distance from fp->slots to fp->regs->sp at JSOP_CALL
486 * Bit 15 (0x8000) is a flag that is set if constructing (called through new).
487 * Bits 0-14 are the actual argument count. This may be less than fun->nargs.
488 * NB: This is argc for the callee, not the caller.
493 * Number of stack slots in the caller, not counting slots pushed when
494 * invoking the callee. That is, slots after JSOP_CALL completes but
495 * without the return value. This is also equal to the number of slots
496 * between fp->prev->argv[-2] (calleR fp->callee) and fp->argv[-2]
497 * (calleE fp->callee).
501 /* argc of the caller */
504 // Safer accessors for argc.
505 enum { CONSTRUCTING_FLAG
= 0x10000 };
506 void set_argc(uint16 argc
, bool constructing
) {
507 this->argc
= uint32(argc
) | (constructing
? CONSTRUCTING_FLAG
: 0);
509 uint16
get_argc() const { return uint16(argc
& ~CONSTRUCTING_FLAG
); }
510 bool is_constructing() const { return (argc
& CONSTRUCTING_FLAG
) != 0; }
512 // The typemap just before the callee is called.
513 JSValueType
* get_typemap() { return (JSValueType
*) (this+1); }
514 const JSValueType
* get_typemap() const { return (JSValueType
*) (this+1); }
519 VMFragment
* fragment
;
524 struct LinkableFragment
: public VMFragment
526 LinkableFragment(const void* _ip
, nanojit::Allocator
* alloc
527 verbose_only(, uint32_t profFragID
))
528 : VMFragment(_ip
verbose_only(, profFragID
)), typeMap(alloc
), nStackTypes(0)
533 unsigned nStackTypes
;
534 unsigned spOffsetAtEntry
;
535 SlotList
* globalSlots
;
539 * argc is cx->fp->argc at the trace loop header, i.e., the number of arguments
540 * pushed for the innermost JS frame. This is required as part of the fragment
541 * key because the fragment will write those arguments back to the interpreter
542 * stack when it exits, using its typemap, which implicitly incorporates a
543 * given value of argc. Without this feature, a fragment could be called as an
544 * inner tree with two different values of argc, and entry type checking or
545 * exit frame synthesis could crash.
547 struct TreeFragment
: public LinkableFragment
549 TreeFragment(const void* _ip
, nanojit::Allocator
* alloc
, JSObject
* _globalObj
,
550 uint32 _globalShape
, uint32 _argc
verbose_only(, uint32_t profFragID
)):
551 LinkableFragment(_ip
, alloc
verbose_only(, profFragID
)),
555 globalObj(_globalObj
),
556 globalShape(_globalShape
),
558 dependentTrees(alloc
),
571 /* Dependent trees must be trashed if this tree dies, and updated on missing global types */
572 Queue
<TreeFragment
*> dependentTrees
;
573 /* Linked trees must be updated on missing global types, but are not dependent */
574 Queue
<TreeFragment
*> linkedTrees
;
576 const char* treeFileName
;
577 uintN treeLineNumber
;
581 UnstableExit
* unstableExits
;
582 Queue
<VMSideExit
*> sideExits
;
583 ptrdiff_t nativeStackBase
;
584 unsigned maxCallDepth
;
585 /* All embedded GC things are registered here so the GC can scan them. */
586 Queue
<Value
> gcthings
;
587 Queue
<const js::Shape
*> shapes
;
588 unsigned maxNativeStackSlots
;
589 /* Gives the number of times we have entered this trace. */
591 /* Gives the total number of iterations executed by the trace (up to a limit). */
594 inline unsigned nGlobalTypes() {
595 return typeMap
.length() - nStackTypes
;
597 inline JSValueType
* globalTypeMap() {
598 return typeMap
.data() + nStackTypes
;
600 inline JSValueType
* stackTypeMap() {
601 return typeMap
.data();
604 JS_REQUIRES_STACK
void initialize(JSContext
* cx
, SlotList
*globalSlots
, bool speculate
);
605 UnstableExit
* removeUnstableExit(VMSideExit
* exit
);
609 VMFragment::toTreeFragment()
611 JS_ASSERT(root
== this);
612 return static_cast<TreeFragment
*>(this);
617 MONITOR_NOT_RECORDING
,
621 const uintN PROFILE_MAX_INNER_LOOPS
= 8;
622 const uintN PROFILE_MAX_STACK
= 6;
625 * A loop profile keeps track of the instruction mix of a hot loop. We use this
626 * information to predict whether tracing would be beneficial for the loop.
631 /* Instructions are divided into a few categories. */
633 OP_FLOAT
, // Floating point arithmetic
634 OP_INT
, // Integer arithmetic
635 OP_BIT
, // Bit operations
637 OP_EVAL
, // Calls to eval()
638 OP_CALL
, // JSOP_CALL instructions
639 OP_FWDJUMP
, // Jumps with positive delta
640 OP_NEW
, // JSOP_NEW instructions
641 OP_RECURSIVE
, // Recursive calls
642 OP_ARRAY_READ
, // Reads from dense arrays
643 OP_TYPED_ARRAY
, // Accesses to typed arrays
647 /* The script in which the loop header lives. */
648 JSScript
*entryScript
;
650 /* The stack frame where we started profiling. Only valid while profiling! */
651 JSStackFrame
*entryfp
;
653 /* The bytecode locations of the loop header and the back edge. */
654 jsbytecode
*top
, *bottom
;
656 /* Number of times we have seen this loop executed; used to decide when to profile. */
659 /* Whether we have run a complete profile of the loop. */
662 /* Sometimes we can't decide in one profile run whether to trace, so we set undecided. */
665 /* If we have profiled the loop, this saves the decision of whether to trace it. */
668 /* Memoized value of isCompilationUnprofitable. */
672 * Sometimes loops are not good tracing opportunities, but they are nested inside
673 * loops that we want to trace. In that case, we set their traceOK flag to true,
674 * but we set execOK to false. That way, the loop is traced so that it can be
675 * integrated into the outer trace. But we never execute the trace on its only.
679 /* Instruction mix for the loop and total number of instructions. */
680 uintN allOps
[OP_LIMIT
];
683 /* Instruction mix and total for the loop, excluding nested inner loops. */
684 uintN selfOps
[OP_LIMIT
];
688 * A prediction of the number of instructions we would have to compile
689 * for the loop. This takes into account the fact that a branch may cause us to
690 * compile every instruction after it twice. Polymorphic calls are
691 * treated as n-way branches.
693 double numSelfOpsMult
;
696 * This keeps track of the number of times that every succeeding instruction
697 * in the trace will have to be compiled. Every time we hit a branch, we
698 * double this number. Polymorphic calls multiply it by n (for n-way
701 double branchMultiplier
;
703 /* Set to true if the loop is short (i.e., has fewer than 8 iterations). */
706 /* Set to true if the loop may be short (has few iterations at profiling time). */
710 * When we hit a nested loop while profiling, we record where it occurs
711 * and how many iterations we execute it.
714 JSStackFrame
*entryfp
;
715 jsbytecode
*top
, *bottom
;
719 InnerLoop(JSStackFrame
*entryfp
, jsbytecode
*top
, jsbytecode
*bottom
)
720 : entryfp(entryfp
), top(top
), bottom(bottom
), iters(0) {}
723 /* These two variables track all the inner loops seen while profiling (up to a limit). */
724 InnerLoop innerLoops
[PROFILE_MAX_INNER_LOOPS
];
728 * These two variables track the loops that we are currently nested
729 * inside while profiling. Loops get popped off here when they exit.
731 InnerLoop loopStack
[PROFILE_MAX_INNER_LOOPS
];
732 uintN loopStackDepth
;
735 * These fields keep track of values on the JS stack. If the stack grows larger
736 * than PROFILE_MAX_STACK, we continue to track sp, but we return conservative results
744 StackValue() : isConst(false), hasValue(false) {}
745 StackValue(bool isConst
) : isConst(isConst
), hasValue(false) {}
746 StackValue(bool isConst
, int value
) : isConst(isConst
), hasValue(true), value(value
) {}
748 StackValue stack
[PROFILE_MAX_STACK
];
751 inline void stackClear() { sp
= 0; }
753 inline void stackPush(const StackValue
&v
) {
754 if (sp
< PROFILE_MAX_STACK
)
760 inline void stackPop() { if (sp
> 0) sp
--; }
762 inline StackValue
stackAt(int pos
) {
764 if (pos
>= 0 && uintN(pos
) < PROFILE_MAX_STACK
)
767 return StackValue(false);
770 LoopProfile(JSStackFrame
*entryfp
, jsbytecode
*top
, jsbytecode
*bottom
);
779 /* These two functions track the instruction mix. */
780 inline void increment(OpKind kind
)
783 if (loopStackDepth
== 0)
787 inline uintN
count(OpKind kind
) { return allOps
[kind
]; }
789 /* Called for every back edge being profiled. */
790 MonitorResult
profileLoopEdge(JSContext
* cx
, uintN
& inlineCallCount
);
792 /* Called for every instruction being profiled. */
793 ProfileAction
profileOperation(JSContext
*cx
, JSOp op
);
795 /* Once a loop's profile is done, these decide whether it should be traced. */
796 bool isCompilationExpensive(JSContext
*cx
, uintN depth
);
797 bool isCompilationUnprofitable(JSContext
*cx
, uintN goodOps
);
798 void decide(JSContext
*cx
);
802 * BUILTIN_NO_FIXUP_NEEDED indicates that after the initial LeaveTree of a deep
803 * bail, the builtin call needs no further fixup when the trace exits and calls
804 * LeaveTree the second time.
806 typedef enum BuiltinStatus
{
811 static JS_INLINE
void
812 SetBuiltinError(JSContext
*cx
)
814 JS_TRACE_MONITOR(cx
).tracerState
->builtinStatus
|= BUILTIN_ERROR
;
817 static JS_INLINE
bool
818 WasBuiltinSuccessful(JSContext
*cx
)
820 return JS_TRACE_MONITOR(cx
).tracerState
->builtinStatus
== 0;
823 #ifdef DEBUG_RECORDING_STATUS_NOT_BOOL
824 /* #define DEBUG_RECORDING_STATUS_NOT_BOOL to detect misuses of RecordingStatus */
825 struct RecordingStatus
{
827 bool operator==(RecordingStatus
&s
) { return this->code
== s
.code
; };
828 bool operator!=(RecordingStatus
&s
) { return this->code
!= s
.code
; };
830 enum RecordingStatusCodes
{
831 RECORD_ERROR_code
= 0,
832 RECORD_STOP_code
= 1,
834 RECORD_CONTINUE_code
= 3,
835 RECORD_IMACRO_code
= 4
837 RecordingStatus RECORD_CONTINUE
= { RECORD_CONTINUE_code
};
838 RecordingStatus RECORD_STOP
= { RECORD_STOP_code
};
839 RecordingStatus RECORD_IMACRO
= { RECORD_IMACRO_code
};
840 RecordingStatus RECORD_ERROR
= { RECORD_ERROR_code
};
842 struct AbortableRecordingStatus
{
844 bool operator==(AbortableRecordingStatus
&s
) { return this->code
== s
.code
; };
845 bool operator!=(AbortableRecordingStatus
&s
) { return this->code
!= s
.code
; };
847 enum AbortableRecordingStatusCodes
{
848 ARECORD_ERROR_code
= 0,
849 ARECORD_STOP_code
= 1,
850 ARECORD_ABORTED_code
= 2,
851 ARECORD_CONTINUE_code
= 3,
852 ARECORD_IMACRO_code
= 4,
853 ARECORD_IMACRO_ABORTED_code
= 5,
854 ARECORD_COMPLETED_code
= 6
856 AbortableRecordingStatus ARECORD_ERROR
= { ARECORD_ERROR_code
};
857 AbortableRecordingStatus ARECORD_STOP
= { ARECORD_STOP_code
};
858 AbortableRecordingStatus ARECORD_CONTINUE
= { ARECORD_CONTINUE_code
};
859 AbortableRecordingStatus ARECORD_IMACRO
= { ARECORD_IMACRO_code
};
860 AbortableRecordingStatus ARECORD_IMACRO_ABORTED
= { ARECORD_IMACRO_ABORTED_code
};
861 AbortableRecordingStatus ARECORD_ABORTED
= { ARECORD_ABORTED_code
};
862 AbortableRecordingStatus ARECORD_COMPLETED
= { ARECORD_COMPLETED_code
};
864 static inline AbortableRecordingStatus
865 InjectStatus(RecordingStatus rs
)
867 AbortableRecordingStatus ars
= { rs
.code
};
870 static inline AbortableRecordingStatus
871 InjectStatus(AbortableRecordingStatus ars
)
877 StatusAbortsRecorderIfActive(AbortableRecordingStatus ars
)
879 return ars
== ARECORD_ERROR
|| ars
== ARECORD_STOP
;
884 * Normally, during recording, when the recorder cannot continue, it returns
885 * ARECORD_STOP to indicate that recording should be aborted by the top-level
886 * recording function. However, if the recorder reenters the interpreter (e.g.,
887 * when executing an inner loop), there will be an immediate abort. This
888 * condition must be carefully detected and propagated out of all nested
889 * recorder calls lest the now-invalid TraceRecorder object be accessed
890 * accidentally. This condition is indicated by the ARECORD_ABORTED value.
892 * The AbortableRecordingStatus enumeration represents the general set of
893 * possible results of calling a recorder function. Functions that cannot
894 * possibly return ARECORD_ABORTED may statically guarantee this to the caller
895 * using the RecordingStatus enumeration. Ideally, C++ would allow subtyping
896 * of enumerations, but it doesn't. To simulate subtype conversion manually,
897 * code should call InjectStatus to inject a value of the restricted set into a
898 * value of the general set.
901 enum RecordingStatus
{
902 RECORD_STOP
= 0, // Recording should be aborted at the top-level
903 // call to the recorder.
904 RECORD_ERROR
= 1, // Recording should be aborted at the top-level
905 // call to the recorder and the interpreter should
907 RECORD_CONTINUE
= 2, // Continue recording.
908 RECORD_IMACRO
= 3 // Entered imacro; continue recording.
909 // Only JSOP_IS_IMACOP opcodes may return this.
912 enum AbortableRecordingStatus
{
913 ARECORD_STOP
= 0, // see RECORD_STOP
914 ARECORD_ERROR
= 1, // Recording may or may not have been aborted.
915 // Recording should be aborted at the top-level
916 // if it has not already been and the interpreter
918 ARECORD_CONTINUE
= 2, // see RECORD_CONTINUE
919 ARECORD_IMACRO
= 3, // see RECORD_IMACRO
920 ARECORD_IMACRO_ABORTED
= 4, // see comment in TR::monitorRecording.
921 ARECORD_ABORTED
= 5, // Recording has already been aborted; the
922 // interpreter should continue executing
923 ARECORD_COMPLETED
= 6 // Recording completed successfully, the
924 // trace recorder has been deleted
927 static JS_ALWAYS_INLINE AbortableRecordingStatus
928 InjectStatus(RecordingStatus rs
)
930 return static_cast<AbortableRecordingStatus
>(rs
);
933 static JS_ALWAYS_INLINE AbortableRecordingStatus
934 InjectStatus(AbortableRecordingStatus ars
)
940 * Return whether the recording status requires the current recording session
941 * to be deleted. ERROR means the recording session should be deleted if it
942 * hasn't already. ABORTED and COMPLETED indicate the recording session is
943 * already deleted, so they return 'false'.
945 static JS_ALWAYS_INLINE
bool
946 StatusAbortsRecorderIfActive(AbortableRecordingStatus ars
)
948 return ars
<= ARECORD_ERROR
;
955 /* Results of trying to compare two typemaps together */
958 TypeConsensus_Okay
, /* Two typemaps are compatible */
959 TypeConsensus_Undemotes
, /* Not compatible now, but would be with pending undemotes. */
960 TypeConsensus_Bad
/* Typemaps are not compatible */
963 enum TracePointAction
{
970 typedef HashMap
<nanojit::LIns
*, JSObject
*> GuardedShapeTable
;
973 # define AbortRecording(cx, reason) AbortRecordingImpl(cx, reason)
975 # define AbortRecording(cx, reason) AbortRecordingImpl(cx)
979 AbortProfiling(JSContext
*cx
);
983 /*************************************************************** Recording session constants */
985 /* The context in which recording started. */
988 /* Cached value of JS_TRACE_MONITOR(cx). */
989 TraceMonitor
* const traceMonitor
;
991 /* Cached oracle keeps track of hit counts for program counter locations */
994 /* The Fragment being recorded by this recording session. */
995 VMFragment
* const fragment
;
997 /* The root fragment representing the tree. */
998 TreeFragment
* const tree
;
1000 /* The global object from the start of recording until now. */
1001 JSObject
* const globalObj
;
1003 /* If non-null, the script of outer loop aborted to start recording this loop. */
1004 JSScript
* const outerScript
;
1006 /* If non-null, the pc of the outer loop aborted to start recording this loop. */
1007 jsbytecode
* const outerPC
;
1009 /* If |outerPC|, the argc to use when looking up |outerPC| in the fragments table. */
1010 uint32
const outerArgc
;
1012 /* If non-null, the side exit from which we are growing. */
1013 VMSideExit
* const anchor
;
1015 /* Instructions yielding the corresponding trace-const members of TracerState. */
1016 nanojit::LIns
* const cx_ins
;
1017 nanojit::LIns
* const eos_ins
;
1018 nanojit::LIns
* const eor_ins
;
1019 nanojit::LIns
* const loopLabel
;
1021 /* Lazy slot import state. */
1022 unsigned importStackSlots
;
1023 unsigned importGlobalSlots
;
1024 TypeMap importTypeMap
;
1027 * The LirBuffer used to supply memory to our LirWriter pipeline. Also contains the most recent
1028 * instruction for {sp, rp, state}. Also contains names for debug JIT spew. Should be split.
1030 nanojit::LirBuffer
* const lirbuf
;
1033 * Remembers traceAlloc state before recording started; automatically rewinds when mark is
1034 * destroyed on a failed compilation.
1036 VMAllocator::Mark mark
;
1038 /* Remembers the number of sideExits in treeInfo before recording started. */
1039 const unsigned numSideExitsBefore
;
1041 /*********************************************************** Recording session mutable state */
1043 /* Maps interpreter stack values to the instruction generating that value. */
1046 /* Maps interpreter stack values to the instruction writing back to the native stack. */
1047 Tracker nativeFrameTracker
;
1049 /* The start of the global object's slots we assume for the trackers. */
1050 Value
* global_slots
;
1052 /* The number of interpreted calls entered (and not yet left) since recording began. */
1055 /* The current atom table, mirroring the interpreter loop's variable of the same name. */
1059 /* An instruction yielding the current script's strict mode code flag. */
1060 nanojit::LIns
* strictModeCode_ins
;
1062 /* FIXME: Dead, but soon to be used for something or other. */
1063 Queue
<jsbytecode
*> cfgMerges
;
1065 /* Indicates whether the current tree should be trashed when the recording session ends. */
1068 /* A list of trees to trash at the end of the recording session. */
1069 Queue
<TreeFragment
*> whichTreesToTrash
;
1071 /* The set of objects whose shapes already have been guarded. */
1072 GuardedShapeTable guardedShapeTable
;
1074 /* Current initializer depth, and whether any of the initializers are unoptimized NEWINIT. */
1080 * If we are expecting a record_AddProperty callback for this instruction,
1081 * the shape of the object before adding the data property. Else NULL.
1083 const js::Shape
* addPropShapeBefore
;
1086 /***************************************** Temporal state hoisted into the recording session */
1088 /* Carry the return value from a STOP/RETURN to the subsequent record_LeaveFrame. */
1089 nanojit::LIns
* rval_ins
;
1091 /* Carry the return value from a native call to the record_NativeCallComplete. */
1092 nanojit::LIns
* native_rval_ins
;
1094 /* Carry the return value of js_CreateThis to record_NativeCallComplete. */
1095 nanojit::LIns
* newobj_ins
;
1097 /* Carry the JSSpecializedNative used to generate a call to record_NativeCallComplete. */
1098 JSSpecializedNative
* pendingSpecializedNative
;
1100 /* Carry whether this is a jsval on the native stack from finishGetProp to monitorRecording. */
1101 Value
* pendingUnboxSlot
;
1103 /* Carry a guard condition to the beginning of the next monitorRecording. */
1104 nanojit::LIns
* pendingGuardCondition
;
1106 /* See AbortRecordingIfUnexpectedGlobalWrite. */
1107 js::Vector
<unsigned> pendingGlobalSlotsToSet
;
1109 /* Carry whether we have an always-exit from emitIf to checkTraceEnd. */
1112 /* Temporary JSSpecializedNative used to describe non-specialized fast natives. */
1113 JSSpecializedNative generatedSpecializedNative
;
1115 /* Temporary JSValueType array used to construct temporary typemaps. */
1116 js::Vector
<JSValueType
, 256> tempTypeMap
;
1118 /* Used to generate LIR. Has a short name because it's used a lot. */
1121 /************************************************************* 10 bajillion member functions */
1124 * These would be in Writer if they didn't modify TraceRecorder state.
1125 * They are invoked the via macros below that make them look like they are
1126 * part of Writer (hence the "w_" prefix, which looks like "w.").
1128 nanojit::LIns
* w_immpObjGC(JSObject
* obj
);
1129 nanojit::LIns
* w_immpFunGC(JSFunction
* fun
);
1130 nanojit::LIns
* w_immpStrGC(JSString
* str
);
1131 nanojit::LIns
* w_immpShapeGC(const js::Shape
* shape
);
1132 nanojit::LIns
* w_immpIdGC(jsid id
);
1134 #define immpObjGC(obj) name(w_immpObjGC(obj), #obj)
1135 #define immpFunGC(fun) name(w_immpFunGC(fun), #fun)
1136 #define immpStrGC(str) name(w_immpStrGC(str), #str)
1137 #define immpAtomGC(atom) name(w_immpStrGC(ATOM_TO_STRING(atom)), "ATOM_TO_STRING(" #atom ")")
1138 #define immpShapeGC(shape) name(w_immpShapeGC(shape), #shape)
1139 #define immpIdGC(id) name(w_immpIdGC(id), #id)
1142 * Examines current interpreter state to record information suitable for returning to the
1143 * interpreter through a side exit of the given type.
1145 JS_REQUIRES_STACK VMSideExit
* snapshot(ExitType exitType
);
1148 * Creates a separate but identical copy of the given side exit, allowing the guards associated
1149 * with each to be entirely separate even after subsequent patching.
1151 JS_REQUIRES_STACK VMSideExit
* copy(VMSideExit
* exit
);
1154 * Creates an instruction whose payload is a GuardRecord for the given exit. The instruction
1155 * is suitable for use as the final argument of a single call to LirBuffer::insGuard; do not
1156 * reuse the returned value.
1158 JS_REQUIRES_STACK
nanojit::GuardRecord
* createGuardRecord(VMSideExit
* exit
);
1160 JS_REQUIRES_STACK JS_INLINE
void markSlotUndemotable(LinkableFragment
* f
, unsigned slot
);
1162 JS_REQUIRES_STACK JS_INLINE
void markSlotUndemotable(LinkableFragment
* f
, unsigned slot
, const void* pc
);
1164 JS_REQUIRES_STACK
unsigned findUndemotesInTypemaps(const TypeMap
& typeMap
, LinkableFragment
* f
,
1165 Queue
<unsigned>& undemotes
);
1167 JS_REQUIRES_STACK
void assertDownFrameIsConsistent(VMSideExit
* anchor
, FrameInfo
* fi
);
1169 JS_REQUIRES_STACK
void captureStackTypes(unsigned callDepth
, JSValueType
* typeMap
);
1171 bool isVoidPtrGlobal(const void* p
) const;
1172 bool isGlobal(const Value
* p
) const;
1173 ptrdiff_t nativeGlobalSlot(const Value
*p
) const;
1174 ptrdiff_t nativeGlobalOffset(const Value
* p
) const;
1175 JS_REQUIRES_STACK
ptrdiff_t nativeStackOffsetImpl(const void* p
) const;
1176 JS_REQUIRES_STACK
ptrdiff_t nativeStackOffset(const Value
* p
) const;
1177 JS_REQUIRES_STACK
ptrdiff_t nativeStackSlotImpl(const void* p
) const;
1178 JS_REQUIRES_STACK
ptrdiff_t nativeStackSlot(const Value
* p
) const;
1179 JS_REQUIRES_STACK
ptrdiff_t nativespOffsetImpl(const void* p
) const;
1180 JS_REQUIRES_STACK
ptrdiff_t nativespOffset(const Value
* p
) const;
1181 JS_REQUIRES_STACK
void importImpl(tjit::Address addr
, const void* p
, JSValueType t
,
1182 const char *prefix
, uintN index
, JSStackFrame
*fp
);
1183 JS_REQUIRES_STACK
void import(tjit::Address addr
, const Value
* p
, JSValueType t
,
1184 const char *prefix
, uintN index
, JSStackFrame
*fp
);
1185 JS_REQUIRES_STACK
void import(TreeFragment
* tree
, nanojit::LIns
* sp
, unsigned stackSlots
,
1186 unsigned callDepth
, unsigned ngslots
, JSValueType
* typeMap
);
1187 void trackNativeStackUse(unsigned slots
);
1189 JS_REQUIRES_STACK
bool isValidSlot(JSObject
*obj
, const js::Shape
* shape
);
1190 JS_REQUIRES_STACK
bool lazilyImportGlobalSlot(unsigned slot
);
1191 JS_REQUIRES_STACK
void importGlobalSlot(unsigned slot
);
1193 void ensureCond(nanojit::LIns
** ins
, bool* cond
);
1195 JS_REQUIRES_STACK RecordingStatus
guard(bool expected
, nanojit::LIns
* cond
, ExitType exitType
,
1196 bool abortIfAlwaysExits
= false);
1197 JS_REQUIRES_STACK RecordingStatus
guard(bool expected
, nanojit::LIns
* cond
, VMSideExit
* exit
,
1198 bool abortIfAlwaysExits
= false);
1199 JS_REQUIRES_STACK
nanojit::LIns
* guard_xov(nanojit::LOpcode op
, nanojit::LIns
* d0
,
1200 nanojit::LIns
* d1
, VMSideExit
* exit
);
1202 nanojit::LIns
* writeBack(nanojit::LIns
* i
, nanojit::LIns
* base
, ptrdiff_t offset
,
1203 bool shouldDemoteToInt32
);
1206 bool isValidFrameObjPtr(void *obj
);
1208 void assertInsideLoop();
1210 JS_REQUIRES_STACK
void setImpl(void* p
, nanojit::LIns
* l
, bool shouldDemoteToInt32
= true);
1211 JS_REQUIRES_STACK
void set(Value
* p
, nanojit::LIns
* l
, bool shouldDemoteToInt32
= true);
1212 JS_REQUIRES_STACK
void setFrameObjPtr(void* p
, nanojit::LIns
* l
,
1213 bool shouldDemoteToInt32
= true);
1214 nanojit::LIns
* getFromTrackerImpl(const void *p
);
1215 nanojit::LIns
* getFromTracker(const Value
* p
);
1216 JS_REQUIRES_STACK
nanojit::LIns
* getImpl(const void* p
);
1217 JS_REQUIRES_STACK
nanojit::LIns
* get(const Value
* p
);
1218 JS_REQUIRES_STACK
nanojit::LIns
* getFrameObjPtr(void* p
);
1219 JS_REQUIRES_STACK
nanojit::LIns
* attemptImport(const Value
* p
);
1220 JS_REQUIRES_STACK
nanojit::LIns
* addr(Value
* p
);
1222 JS_REQUIRES_STACK
bool knownImpl(const void* p
);
1223 JS_REQUIRES_STACK
bool known(const Value
* p
);
1224 JS_REQUIRES_STACK
bool known(JSObject
** p
);
1226 * The slots of the global object are sometimes reallocated by the
1227 * interpreter. This function checks for that condition and re-maps the
1228 * entries of the tracker accordingly.
1230 JS_REQUIRES_STACK
void checkForGlobalObjectReallocation() {
1231 if (global_slots
!= globalObj
->getSlots())
1232 checkForGlobalObjectReallocationHelper();
1234 JS_REQUIRES_STACK
void checkForGlobalObjectReallocationHelper();
1236 JS_REQUIRES_STACK TypeConsensus
selfTypeStability(SlotMap
& smap
);
1237 JS_REQUIRES_STACK TypeConsensus
peerTypeStability(SlotMap
& smap
, const void* ip
,
1238 TreeFragment
** peer
);
1240 JS_REQUIRES_STACK Value
& argval(unsigned n
) const;
1241 JS_REQUIRES_STACK Value
& varval(unsigned n
) const;
1242 JS_REQUIRES_STACK Value
& stackval(int n
) const;
1244 JS_REQUIRES_STACK
void updateAtoms();
1245 JS_REQUIRES_STACK
void updateAtoms(JSScript
*script
);
1248 // |tracked| is true iff the result of the name lookup is a variable that
1249 // is already in the tracker. The rest of the fields are set only if
1250 // |tracked| is false.
1252 Value v
; // current property value
1253 JSObject
*obj
; // Call object where name was found
1254 nanojit::LIns
*obj_ins
; // LIR value for obj
1255 js::Shape
*shape
; // shape name was resolved to
1258 JS_REQUIRES_STACK
nanojit::LIns
* scopeChain();
1259 JS_REQUIRES_STACK
nanojit::LIns
* entryScopeChain() const;
1260 JS_REQUIRES_STACK
nanojit::LIns
* entryFrameIns() const;
1261 JS_REQUIRES_STACK JSStackFrame
* frameIfInRange(JSObject
* obj
, unsigned* depthp
= NULL
) const;
1262 JS_REQUIRES_STACK RecordingStatus
traverseScopeChain(JSObject
*obj
, nanojit::LIns
*obj_ins
, JSObject
*obj2
, nanojit::LIns
*&obj2_ins
);
1263 JS_REQUIRES_STACK AbortableRecordingStatus
scopeChainProp(JSObject
* obj
, Value
*& vp
, nanojit::LIns
*& ins
, NameResult
& nr
);
1264 JS_REQUIRES_STACK RecordingStatus
callProp(JSObject
* obj
, JSProperty
* shape
, jsid id
, Value
*& vp
, nanojit::LIns
*& ins
, NameResult
& nr
);
1266 JS_REQUIRES_STACK
nanojit::LIns
* arg(unsigned n
);
1267 JS_REQUIRES_STACK
void arg(unsigned n
, nanojit::LIns
* i
);
1268 JS_REQUIRES_STACK
nanojit::LIns
* var(unsigned n
);
1269 JS_REQUIRES_STACK
void var(unsigned n
, nanojit::LIns
* i
);
1270 JS_REQUIRES_STACK
nanojit::LIns
* upvar(JSScript
* script
, JSUpvarArray
* uva
, uintN index
, Value
& v
);
1271 nanojit::LIns
* stackLoad(tjit::Address addr
, uint8 type
);
1272 JS_REQUIRES_STACK
nanojit::LIns
* stack(int n
);
1273 JS_REQUIRES_STACK
void stack(int n
, nanojit::LIns
* i
);
1275 JS_REQUIRES_STACK
void guardNonNeg(nanojit::LIns
* d0
, nanojit::LIns
* d1
, VMSideExit
* exit
);
1276 JS_REQUIRES_STACK
nanojit::LIns
* alu(nanojit::LOpcode op
, jsdouble v0
, jsdouble v1
,
1277 nanojit::LIns
* s0
, nanojit::LIns
* s1
);
1279 nanojit::LIns
* d2i(nanojit::LIns
* f
, bool resultCanBeImpreciseIfFractional
= false);
1280 nanojit::LIns
* d2u(nanojit::LIns
* d
);
1281 JS_REQUIRES_STACK RecordingStatus
makeNumberInt32(nanojit::LIns
* d
, nanojit::LIns
** num_ins
);
1282 JS_REQUIRES_STACK RecordingStatus
makeNumberUint32(nanojit::LIns
* d
, nanojit::LIns
** num_ins
);
1283 JS_REQUIRES_STACK
nanojit::LIns
* stringify(const Value
& v
);
1285 JS_REQUIRES_STACK
nanojit::LIns
* newArguments(nanojit::LIns
* callee_ins
, bool strict
);
1287 JS_REQUIRES_STACK
bool canCallImacro() const;
1288 JS_REQUIRES_STACK RecordingStatus
callImacro(jsbytecode
* imacro
);
1289 JS_REQUIRES_STACK RecordingStatus
callImacroInfallibly(jsbytecode
* imacro
);
1291 JS_REQUIRES_STACK AbortableRecordingStatus
ifop();
1292 JS_REQUIRES_STACK RecordingStatus
switchop();
1294 JS_REQUIRES_STACK AbortableRecordingStatus
tableswitch();
1296 JS_REQUIRES_STACK RecordingStatus
inc(Value
& v
, jsint incr
, bool pre
= true);
1297 JS_REQUIRES_STACK RecordingStatus
inc(const Value
&v
, nanojit::LIns
*& v_ins
,
1298 Value
&v_out
, jsint incr
,
1300 JS_REQUIRES_STACK RecordingStatus
incHelper(const Value
&v
, nanojit::LIns
*& v_ins
,
1302 nanojit::LIns
*& v_ins_after
,
1304 JS_REQUIRES_STACK AbortableRecordingStatus
incProp(jsint incr
, bool pre
= true);
1305 JS_REQUIRES_STACK RecordingStatus
incElem(jsint incr
, bool pre
= true);
1306 JS_REQUIRES_STACK AbortableRecordingStatus
incName(jsint incr
, bool pre
= true);
1308 JS_REQUIRES_STACK RecordingStatus
strictEquality(bool equal
, bool cmpCase
);
1309 JS_REQUIRES_STACK AbortableRecordingStatus
equality(bool negate
, bool tryBranchAfterCond
);
1310 JS_REQUIRES_STACK AbortableRecordingStatus
equalityHelper(Value
& l
, Value
& r
,
1311 nanojit::LIns
* l_ins
, nanojit::LIns
* r_ins
,
1312 bool negate
, bool tryBranchAfterCond
,
1314 JS_REQUIRES_STACK AbortableRecordingStatus
relational(nanojit::LOpcode op
, bool tryBranchAfterCond
);
1316 JS_REQUIRES_STACK RecordingStatus
unary(nanojit::LOpcode op
);
1317 JS_REQUIRES_STACK RecordingStatus
binary(nanojit::LOpcode op
);
1319 JS_REQUIRES_STACK RecordingStatus
guardShape(nanojit::LIns
* obj_ins
, JSObject
* obj
,
1320 uint32 shape
, const char* name
, VMSideExit
* exit
);
1322 #if defined DEBUG_notme && defined XP_UNIX
1323 void dumpGuardedShapes(const char* prefix
);
1326 void forgetGuardedShapes();
1328 JS_REQUIRES_STACK AbortableRecordingStatus
test_property_cache(JSObject
* obj
, nanojit::LIns
* obj_ins
,
1329 JSObject
*& obj2
, PCVal
& pcval
);
1330 JS_REQUIRES_STACK RecordingStatus
guardPropertyCacheHit(nanojit::LIns
* obj_ins
,
1333 PropertyCacheEntry
* entry
,
1336 void stobj_set_fslot(nanojit::LIns
*obj_ins
, unsigned slot
, const Value
&v
,
1337 nanojit::LIns
* v_ins
);
1338 void stobj_set_dslot(nanojit::LIns
*obj_ins
, unsigned slot
,
1339 nanojit::LIns
*& slots_ins
, const Value
&v
, nanojit::LIns
* v_ins
);
1340 void stobj_set_slot(JSObject
*obj
, nanojit::LIns
* obj_ins
, unsigned slot
,
1341 nanojit::LIns
*& slots_ins
, const Value
&v
, nanojit::LIns
* v_ins
);
1343 nanojit::LIns
* unbox_slot(JSObject
*obj
, nanojit::LIns
*obj_ins
, uint32 slot
,
1346 JS_REQUIRES_STACK AbortableRecordingStatus
name(Value
*& vp
, nanojit::LIns
*& ins
, NameResult
& nr
);
1347 JS_REQUIRES_STACK AbortableRecordingStatus
prop(JSObject
* obj
, nanojit::LIns
* obj_ins
,
1348 uint32
*slotp
, nanojit::LIns
** v_insp
,
1350 JS_REQUIRES_STACK RecordingStatus
propTail(JSObject
* obj
, nanojit::LIns
* obj_ins
,
1351 JSObject
* obj2
, PCVal pcval
,
1352 uint32
*slotp
, nanojit::LIns
** v_insp
,
1354 JS_REQUIRES_STACK RecordingStatus
denseArrayElement(Value
& oval
, Value
& idx
, Value
*& vp
,
1355 nanojit::LIns
*& v_ins
,
1356 nanojit::LIns
*& addr_ins
,
1358 JS_REQUIRES_STACK
nanojit::LIns
*canonicalizeNaNs(nanojit::LIns
*dval_ins
);
1359 JS_REQUIRES_STACK AbortableRecordingStatus
typedArrayElement(Value
& oval
, Value
& idx
, Value
*& vp
,
1360 nanojit::LIns
*& v_ins
);
1361 JS_REQUIRES_STACK AbortableRecordingStatus
getProp(JSObject
* obj
, nanojit::LIns
* obj_ins
);
1362 JS_REQUIRES_STACK AbortableRecordingStatus
getProp(Value
& v
);
1363 JS_REQUIRES_STACK RecordingStatus
getThis(nanojit::LIns
*& this_ins
);
1365 JS_REQUIRES_STACK
void storeMagic(JSWhyMagic why
, tjit::Address addr
);
1366 JS_REQUIRES_STACK AbortableRecordingStatus
unboxNextValue(nanojit::LIns
* &v_ins
);
1368 JS_REQUIRES_STACK VMSideExit
* enterDeepBailCall();
1369 JS_REQUIRES_STACK
void leaveDeepBailCall();
1371 JS_REQUIRES_STACK RecordingStatus
primitiveToStringInPlace(Value
* vp
);
1372 JS_REQUIRES_STACK
void finishGetProp(nanojit::LIns
* obj_ins
, nanojit::LIns
* vp_ins
,
1373 nanojit::LIns
* ok_ins
, Value
* outp
);
1374 JS_REQUIRES_STACK RecordingStatus
getPropertyByName(nanojit::LIns
* obj_ins
, Value
* idvalp
,
1376 JS_REQUIRES_STACK RecordingStatus
getPropertyByIndex(nanojit::LIns
* obj_ins
,
1377 nanojit::LIns
* index_ins
, Value
* outp
);
1378 JS_REQUIRES_STACK RecordingStatus
getPropertyById(nanojit::LIns
* obj_ins
, Value
* outp
);
1379 JS_REQUIRES_STACK RecordingStatus
getPropertyWithNativeGetter(nanojit::LIns
* obj_ins
,
1380 const js::Shape
* shape
,
1382 JS_REQUIRES_STACK RecordingStatus
getPropertyWithScriptGetter(JSObject
*obj
,
1383 nanojit::LIns
* obj_ins
,
1384 const js::Shape
* shape
);
1386 JS_REQUIRES_STACK RecordingStatus
getCharCodeAt(JSString
*str
,
1387 nanojit::LIns
* str_ins
, nanojit::LIns
* idx_ins
,
1388 nanojit::LIns
** out_ins
);
1389 JS_REQUIRES_STACK
nanojit::LIns
* getUnitString(nanojit::LIns
* str_ins
, nanojit::LIns
* idx_ins
);
1390 JS_REQUIRES_STACK RecordingStatus
getCharAt(JSString
*str
,
1391 nanojit::LIns
* str_ins
, nanojit::LIns
* idx_ins
,
1392 JSOp mode
, nanojit::LIns
** out_ins
);
1394 JS_REQUIRES_STACK RecordingStatus
initOrSetPropertyByName(nanojit::LIns
* obj_ins
,
1395 Value
* idvalp
, Value
* rvalp
,
1397 JS_REQUIRES_STACK RecordingStatus
initOrSetPropertyByIndex(nanojit::LIns
* obj_ins
,
1398 nanojit::LIns
* index_ins
,
1399 Value
* rvalp
, bool init
);
1400 JS_REQUIRES_STACK AbortableRecordingStatus
setElem(int lval_spindex
, int idx_spindex
,
1403 JS_REQUIRES_STACK RecordingStatus
lookupForSetPropertyOp(JSObject
* obj
, nanojit::LIns
* obj_ins
,
1404 jsid id
, bool* safep
,
1406 const js::Shape
** shapep
);
1407 JS_REQUIRES_STACK RecordingStatus
nativeSet(JSObject
* obj
, nanojit::LIns
* obj_ins
,
1408 const js::Shape
* shape
,
1409 const Value
& v
, nanojit::LIns
* v_ins
);
1410 JS_REQUIRES_STACK RecordingStatus
addDataProperty(JSObject
* obj
);
1411 JS_REQUIRES_STACK RecordingStatus
setCallProp(JSObject
* callobj
, nanojit::LIns
* callobj_ins
,
1412 const js::Shape
* shape
, nanojit::LIns
* v_ins
,
1414 JS_REQUIRES_STACK RecordingStatus
setProperty(JSObject
* obj
, nanojit::LIns
* obj_ins
,
1415 const Value
& v
, nanojit::LIns
* v_ins
,
1417 JS_REQUIRES_STACK RecordingStatus
recordSetPropertyOp();
1418 JS_REQUIRES_STACK RecordingStatus
recordInitPropertyOp(jsbytecode op
);
1420 void box_undefined_into(tjit::Address addr
);
1421 #if JS_BITS_PER_WORD == 32
1422 void box_null_into(tjit::Address addr
);
1423 nanojit::LIns
* unbox_number_as_double(tjit::Address addr
, nanojit::LIns
* tag_ins
,
1425 nanojit::LIns
* unbox_object(tjit::Address addr
, nanojit::LIns
* tag_ins
, JSValueType type
,
1427 nanojit::LIns
* unbox_non_double_object(tjit::Address addr
, nanojit::LIns
* tag_ins
,
1428 JSValueType type
, VMSideExit
* exit
);
1429 #elif JS_BITS_PER_WORD == 64
1430 nanojit::LIns
* non_double_object_value_has_type(nanojit::LIns
* v_ins
, JSValueType type
);
1431 nanojit::LIns
* unpack_ptr(nanojit::LIns
* v_ins
);
1432 nanojit::LIns
* unbox_number_as_double(nanojit::LIns
* v_ins
, VMSideExit
* exit
);
1433 nanojit::LIns
* unbox_object(nanojit::LIns
* v_ins
, JSValueType type
, VMSideExit
* exit
);
1434 nanojit::LIns
* unbox_non_double_object(nanojit::LIns
* v_ins
, JSValueType type
, VMSideExit
* exit
);
1437 nanojit::LIns
* unbox_value(const Value
& v
, tjit::Address addr
, VMSideExit
* exit
,
1438 bool force_double
=false);
1439 void unbox_any_object(tjit::Address addr
, nanojit::LIns
** obj_ins
, nanojit::LIns
** is_obj_ins
);
1440 nanojit::LIns
* is_boxed_true(tjit::Address addr
);
1441 nanojit::LIns
* is_boxed_magic(tjit::Address addr
, JSWhyMagic why
);
1443 nanojit::LIns
* is_string_id(nanojit::LIns
* id_ins
);
1444 nanojit::LIns
* unbox_string_id(nanojit::LIns
* id_ins
);
1445 nanojit::LIns
* unbox_int_id(nanojit::LIns
* id_ins
);
1447 /* Box a slot on trace into the given address at the given offset. */
1448 void box_value_into(const Value
& v
, nanojit::LIns
* v_ins
, tjit::Address addr
);
1451 * Box a slot so that it may be passed with value semantics to a native. On
1452 * 32-bit, this currently means boxing the value into insAlloc'd memory and
1453 * returning the address which is passed as a Value*. On 64-bit, this
1454 * currently means returning the boxed value which is passed as a jsval.
1456 nanojit::LIns
* box_value_for_native_call(const Value
& v
, nanojit::LIns
* v_ins
);
1458 /* Box a slot into insAlloc'd memory. */
1459 nanojit::LIns
* box_value_into_alloc(const Value
& v
, nanojit::LIns
* v_ins
);
1461 JS_REQUIRES_STACK
void guardClassHelper(bool cond
, nanojit::LIns
* obj_ins
, Class
* clasp
,
1462 VMSideExit
* exit
, nanojit::LoadQual loadQual
);
1463 JS_REQUIRES_STACK
void guardClass(nanojit::LIns
* obj_ins
, Class
* clasp
,
1464 VMSideExit
* exit
, nanojit::LoadQual loadQual
);
1465 JS_REQUIRES_STACK
void guardNotClass(nanojit::LIns
* obj_ins
, Class
* clasp
,
1466 VMSideExit
* exit
, nanojit::LoadQual loadQual
);
1467 JS_REQUIRES_STACK
void guardDenseArray(nanojit::LIns
* obj_ins
, ExitType exitType
);
1468 JS_REQUIRES_STACK
void guardDenseArray(nanojit::LIns
* obj_ins
, VMSideExit
* exit
);
1469 JS_REQUIRES_STACK
bool guardHasPrototype(JSObject
* obj
, nanojit::LIns
* obj_ins
,
1470 JSObject
** pobj
, nanojit::LIns
** pobj_ins
,
1472 JS_REQUIRES_STACK RecordingStatus
guardPrototypeHasNoIndexedProperties(JSObject
* obj
,
1473 nanojit::LIns
* obj_ins
,
1475 JS_REQUIRES_STACK RecordingStatus
guardNativeConversion(Value
& v
);
1476 JS_REQUIRES_STACK
void clearReturningFrameFromNativeTracker();
1477 JS_REQUIRES_STACK
void putActivationObjects();
1478 JS_REQUIRES_STACK RecordingStatus
guardCallee(Value
& callee
);
1479 JS_REQUIRES_STACK JSStackFrame
*guardArguments(JSObject
*obj
, nanojit::LIns
* obj_ins
,
1481 JS_REQUIRES_STACK
nanojit::LIns
* guardArgsLengthNotAssigned(nanojit::LIns
* argsobj_ins
);
1482 JS_REQUIRES_STACK
void guardNotHole(nanojit::LIns
*argsobj_ins
, nanojit::LIns
*ids_ins
);
1483 JS_REQUIRES_STACK RecordingStatus
getClassPrototype(JSObject
* ctor
,
1484 nanojit::LIns
*& proto_ins
);
1485 JS_REQUIRES_STACK RecordingStatus
getClassPrototype(JSProtoKey key
,
1486 nanojit::LIns
*& proto_ins
);
1487 JS_REQUIRES_STACK RecordingStatus
newArray(JSObject
* ctor
, uint32 argc
, Value
* argv
,
1489 JS_REQUIRES_STACK RecordingStatus
newString(JSObject
* ctor
, uint32 argc
, Value
* argv
,
1491 JS_REQUIRES_STACK RecordingStatus
interpretedFunctionCall(Value
& fval
, JSFunction
* fun
,
1492 uintN argc
, bool constructing
);
1493 JS_REQUIRES_STACK
void propagateFailureToBuiltinStatus(nanojit::LIns
*ok_ins
,
1494 nanojit::LIns
*&status_ins
);
1495 JS_REQUIRES_STACK RecordingStatus
emitNativeCall(JSSpecializedNative
* sn
, uintN argc
,
1496 nanojit::LIns
* args
[], bool rooted
);
1497 JS_REQUIRES_STACK
void emitNativePropertyOp(const js::Shape
* shape
,
1498 nanojit::LIns
* obj_ins
,
1500 nanojit::LIns
* addr_boxed_val_ins
);
1501 JS_REQUIRES_STACK RecordingStatus
callSpecializedNative(JSNativeTraceInfo
* trcinfo
, uintN argc
,
1503 JS_REQUIRES_STACK RecordingStatus
callNative(uintN argc
, JSOp mode
);
1504 JS_REQUIRES_STACK RecordingStatus
callFloatReturningInt(uintN argc
,
1505 const nanojit::CallInfo
*ci
);
1506 JS_REQUIRES_STACK RecordingStatus
functionCall(uintN argc
, JSOp mode
);
1508 JS_REQUIRES_STACK
void trackCfgMerges(jsbytecode
* pc
);
1509 JS_REQUIRES_STACK
void emitIf(jsbytecode
* pc
, bool cond
, nanojit::LIns
* x
);
1510 JS_REQUIRES_STACK
void fuseIf(jsbytecode
* pc
, bool cond
, nanojit::LIns
* x
);
1511 JS_REQUIRES_STACK AbortableRecordingStatus
checkTraceEnd(jsbytecode
* pc
);
1513 AbortableRecordingStatus
hasMethod(JSObject
* obj
, jsid id
, bool& found
);
1514 JS_REQUIRES_STACK AbortableRecordingStatus
hasIteratorMethod(JSObject
* obj
, bool& found
);
1516 JS_REQUIRES_STACK jsatomid
getFullIndex(ptrdiff_t pcoff
= 0);
1518 JS_REQUIRES_STACK JSValueType
determineSlotType(Value
* vp
);
1520 JS_REQUIRES_STACK RecordingStatus
setUpwardTrackedVar(Value
* stackVp
, const Value
& v
,
1521 nanojit::LIns
* v_ins
);
1523 JS_REQUIRES_STACK AbortableRecordingStatus
compile();
1524 JS_REQUIRES_STACK AbortableRecordingStatus
closeLoop();
1525 JS_REQUIRES_STACK AbortableRecordingStatus
endLoop();
1526 JS_REQUIRES_STACK AbortableRecordingStatus
endLoop(VMSideExit
* exit
);
1527 JS_REQUIRES_STACK
void joinEdgesToEntry(TreeFragment
* peer_root
);
1528 JS_REQUIRES_STACK
void adjustCallerTypes(TreeFragment
* f
);
1529 JS_REQUIRES_STACK
void prepareTreeCall(TreeFragment
* inner
);
1530 JS_REQUIRES_STACK
void emitTreeCall(TreeFragment
* inner
, VMSideExit
* exit
);
1531 JS_REQUIRES_STACK
void determineGlobalTypes(JSValueType
* typeMap
);
1532 JS_REQUIRES_STACK VMSideExit
* downSnapshot(FrameInfo
* downFrame
);
1533 JS_REQUIRES_STACK TreeFragment
* findNestedCompatiblePeer(TreeFragment
* f
);
1534 JS_REQUIRES_STACK AbortableRecordingStatus
attemptTreeCall(TreeFragment
* inner
,
1535 uintN
& inlineCallCount
);
1537 static JS_REQUIRES_STACK MonitorResult
recordLoopEdge(JSContext
* cx
, TraceRecorder
* r
,
1538 uintN
& inlineCallCount
);
1540 /* Allocators associated with this recording session. */
1541 VMAllocator
& tempAlloc() const { return *traceMonitor
->tempAlloc
; }
1542 VMAllocator
& traceAlloc() const { return *traceMonitor
->traceAlloc
; }
1543 VMAllocator
& dataAlloc() const { return *traceMonitor
->dataAlloc
; }
1545 /* Member declarations for each opcode, to be called before interpreting the opcode. */
1546 #define OPDEF(op,val,name,token,length,nuses,ndefs,prec,format) \
1547 JS_REQUIRES_STACK AbortableRecordingStatus record_##op();
1548 # include "jsopcode.tbl"
1552 TraceRecorder(JSContext
* cx
, VMSideExit
*, VMFragment
*,
1553 unsigned stackSlots
, unsigned ngslots
, JSValueType
* typeMap
,
1554 VMSideExit
* expectedInnerExit
, JSScript
* outerScript
, jsbytecode
* outerPC
,
1555 uint32 outerArgc
, bool speculate
);
1557 /* The destructor should only be called through finish*, not directly. */
1559 JS_REQUIRES_STACK AbortableRecordingStatus
finishSuccessfully();
1561 enum AbortResult
{ NORMAL_ABORT
, JIT_RESET
};
1562 JS_REQUIRES_STACK AbortResult
finishAbort(const char* reason
);
1564 friend class ImportBoxedStackSlotVisitor
;
1565 friend class ImportUnboxedStackSlotVisitor
;
1566 friend class ImportGlobalSlotVisitor
;
1567 friend class AdjustCallerGlobalTypesVisitor
;
1568 friend class AdjustCallerStackTypesVisitor
;
1569 friend class TypeCompatibilityVisitor
;
1570 friend class ImportFrameSlotsVisitor
;
1571 friend class SlotMap
;
1572 friend class DefaultSlotMap
;
1573 friend class DetermineTypesVisitor
;
1574 friend class RecursiveSlotMap
;
1575 friend class UpRecursiveSlotMap
;
1576 friend MonitorResult
RecordLoopEdge(JSContext
*, uintN
&);
1577 friend TracePointAction
RecordTracePoint(JSContext
*, uintN
&inlineCallCount
,
1579 friend AbortResult
AbortRecording(JSContext
*, const char*);
1580 friend class BoxArg
;
1581 friend void TraceMonitor::sweep(JSContext
*cx
);
1584 static bool JS_REQUIRES_STACK
1585 startRecorder(JSContext
*, VMSideExit
*, VMFragment
*,
1586 unsigned stackSlots
, unsigned ngslots
, JSValueType
* typeMap
,
1587 VMSideExit
* expectedInnerExit
, JSScript
* outerScript
, jsbytecode
* outerPC
,
1588 uint32 outerArgc
, bool speculate
);
1591 VMFragment
* getFragment() const { return fragment
; }
1592 TreeFragment
* getTree() const { return tree
; }
1593 bool outOfMemory() const { return traceMonitor
->outOfMemory(); }
1594 Oracle
* getOracle() const { return oracle
; }
1595 JSObject
* getGlobal() const { return globalObj
; }
1597 /* Entry points / callbacks from the interpreter. */
1598 JS_REQUIRES_STACK AbortableRecordingStatus
monitorRecording(JSOp op
);
1599 JS_REQUIRES_STACK AbortableRecordingStatus
record_EnterFrame();
1600 JS_REQUIRES_STACK AbortableRecordingStatus
record_LeaveFrame();
1601 JS_REQUIRES_STACK AbortableRecordingStatus
record_AddProperty(JSObject
*obj
);
1602 JS_REQUIRES_STACK AbortableRecordingStatus
record_DefLocalFunSetSlot(uint32 slot
,
1604 JS_REQUIRES_STACK AbortableRecordingStatus
record_NativeCallComplete();
1605 void forgetGuardedShapesForObject(JSObject
* obj
);
1607 bool globalSetExpected(unsigned slot
) {
1608 unsigned *pi
= Find(pendingGlobalSlotsToSet
, slot
);
1609 if (pi
== pendingGlobalSlotsToSet
.end()) {
1611 * Do slot arithmetic manually to avoid getSlotRef assertions which
1612 * do not need to be satisfied for this purpose.
1614 Value
*vp
= globalObj
->getSlots() + slot
;
1616 /* If this global is definitely being tracked, then the write is unexpected. */
1617 if (tracker
.has(vp
))
1621 * Otherwise, only abort if the global is not present in the
1622 * import typemap. Just deep aborting false here is not acceptable,
1623 * because the recorder does not guard on every operation that
1624 * could lazily resolve. Since resolving adds properties to
1625 * reserved slots, the tracer will never have imported them.
1627 return tree
->globalSlots
->offsetOf((uint16
)nativeGlobalSlot(vp
)) == -1;
1629 pendingGlobalSlotsToSet
.erase(pi
);
1634 /* Debug printing functionality to emit printf() on trace. */
1635 JS_REQUIRES_STACK
void tprint(const char *format
, int count
, nanojit::LIns
*insa
[]);
1636 JS_REQUIRES_STACK
void tprint(const char *format
);
1637 JS_REQUIRES_STACK
void tprint(const char *format
, nanojit::LIns
*ins
);
1638 JS_REQUIRES_STACK
void tprint(const char *format
, nanojit::LIns
*ins1
,
1639 nanojit::LIns
*ins2
);
1640 JS_REQUIRES_STACK
void tprint(const char *format
, nanojit::LIns
*ins1
,
1641 nanojit::LIns
*ins2
, nanojit::LIns
*ins3
);
1642 JS_REQUIRES_STACK
void tprint(const char *format
, nanojit::LIns
*ins1
,
1643 nanojit::LIns
*ins2
, nanojit::LIns
*ins3
,
1644 nanojit::LIns
*ins4
);
1645 JS_REQUIRES_STACK
void tprint(const char *format
, nanojit::LIns
*ins1
,
1646 nanojit::LIns
*ins2
, nanojit::LIns
*ins3
,
1647 nanojit::LIns
*ins4
, nanojit::LIns
*ins5
);
1648 JS_REQUIRES_STACK
void tprint(const char *format
, nanojit::LIns
*ins1
,
1649 nanojit::LIns
*ins2
, nanojit::LIns
*ins3
,
1650 nanojit::LIns
*ins4
, nanojit::LIns
*ins5
,
1651 nanojit::LIns
*ins6
);
1655 #define TRACING_ENABLED(cx) ((cx)->traceJitEnabled)
1656 #define REGEX_JIT_ENABLED(cx) ((cx)->traceJitEnabled || (cx)->methodJitEnabled)
1657 #define TRACE_RECORDER(cx) (JS_TRACE_MONITOR(cx).recorder)
1658 #define TRACE_PROFILER(cx) (JS_TRACE_MONITOR(cx).profile)
1659 #define SET_TRACE_RECORDER(cx,tr) (JS_TRACE_MONITOR(cx).recorder = (tr))
1661 #define JSOP_IN_RANGE(op,lo,hi) (uintN((op) - (lo)) <= uintN((hi) - (lo)))
1662 #define JSOP_IS_BINARY(op) JSOP_IN_RANGE(op, JSOP_BITOR, JSOP_MOD)
1663 #define JSOP_IS_UNARY(op) JSOP_IN_RANGE(op, JSOP_NEG, JSOP_POS)
1664 #define JSOP_IS_EQUALITY(op) JSOP_IN_RANGE(op, JSOP_EQ, JSOP_NE)
1666 #define TRACE_ARGS_(x,args) \
1668 if (TraceRecorder* tr_ = TRACE_RECORDER(cx)) { \
1669 AbortableRecordingStatus status = tr_->record_##x args; \
1670 if (StatusAbortsRecorderIfActive(status)) { \
1671 if (TRACE_RECORDER(cx)) { \
1672 JS_ASSERT(TRACE_RECORDER(cx) == tr_); \
1673 AbortRecording(cx, #x); \
1675 if (status == ARECORD_ERROR) \
1678 JS_ASSERT(status != ARECORD_IMACRO); \
1682 #define TRACE_ARGS(x,args) TRACE_ARGS_(x, args)
1683 #define TRACE_0(x) TRACE_ARGS(x, ())
1684 #define TRACE_1(x,a) TRACE_ARGS(x, (a))
1685 #define TRACE_2(x,a,b) TRACE_ARGS(x, (a, b))
1687 extern JS_REQUIRES_STACK MonitorResult
1688 MonitorLoopEdge(JSContext
* cx
, uintN
& inlineCallCount
);
1690 extern JS_REQUIRES_STACK MonitorResult
1691 ProfileLoopEdge(JSContext
* cx
, uintN
& inlineCallCount
);
1693 extern JS_REQUIRES_STACK TracePointAction
1694 RecordTracePoint(JSContext
*, uintN
& inlineCallCount
, bool* blacklist
);
1696 extern JS_REQUIRES_STACK TracePointAction
1697 MonitorTracePoint(JSContext
*, uintN
& inlineCallCount
, bool* blacklist
,
1698 void** traceData
, uintN
*traceEpoch
, uint32
*loopCounter
, uint32 hits
);
1700 extern JS_REQUIRES_STACK
TraceRecorder::AbortResult
1701 AbortRecording(JSContext
* cx
, const char* reason
);
1704 InitJIT(TraceMonitor
*tm
);
1707 FinishJIT(TraceMonitor
*tm
);
1710 PurgeScriptFragments(TraceMonitor
* tm
, JSScript
* script
);
1713 OverfullJITCache(JSContext
*cx
, TraceMonitor
* tm
);
1716 FlushJITCache(JSContext
* cx
);
1719 GetBuiltinFunction(JSContext
*cx
, uintN index
);
1722 SetMaxCodeCacheBytes(JSContext
* cx
, uint32 bytes
);
1725 ExternNativeToValue(JSContext
* cx
, Value
& v
, JSValueType type
, double* slot
);
1729 extern JS_FRIEND_API(bool)
1730 StartTraceVis(const char* filename
);
1732 extern JS_FRIEND_API(JSBool
)
1733 StartTraceVisNative(JSContext
*cx
, uintN argc
, jsval
*vp
);
1735 extern JS_FRIEND_API(bool)
1738 extern JS_FRIEND_API(JSBool
)
1739 StopTraceVisNative(JSContext
*cx
, uintN argc
, jsval
*vp
);
1741 /* Must contain no more than 16 items. */
1742 enum TraceVisState
{
1743 // Special: means we returned from current activity to last
1752 // Events: these all have (bit 3) == 1.
1756 /* Reason for an exit to the interpreter. */
1757 enum TraceVisExitReason
{
1760 /* Reasons in MonitorLoopEdge */
1769 R_FAIL_EXECUTE_TREE
,
1771 R_FAIL_EXTEND_FLUSH
,
1772 R_FAIL_EXTEND_MAX_BRANCHES
,
1773 R_FAIL_EXTEND_START
,
1775 R_FAIL_SCOPE_CHAIN_CHECK
,
1785 enum TraceVisFlushReason
{
1788 FR_GLOBAL_SHAPE_MISMATCH
,
1792 const unsigned long long MS64_MASK
= 0xfull
<< 60;
1793 const unsigned long long MR64_MASK
= 0x1full
<< 55;
1794 const unsigned long long MT64_MASK
= ~(MS64_MASK
| MR64_MASK
);
1796 extern FILE* traceVisLogFile
;
1797 extern JSHashTable
*traceVisScriptTable
;
1799 extern JS_FRIEND_API(void)
1800 StoreTraceVisState(JSContext
*cx
, TraceVisState s
, TraceVisExitReason r
);
1803 LogTraceVisState(JSContext
*cx
, TraceVisState s
, TraceVisExitReason r
)
1805 if (traceVisLogFile
) {
1806 unsigned long long sllu
= s
;
1807 unsigned long long rllu
= r
;
1808 unsigned long long d
= (sllu
<< 60) | (rllu
<< 55) | (rdtsc() & MT64_MASK
);
1809 fwrite(&d
, sizeof(d
), 1, traceVisLogFile
);
1811 if (traceVisScriptTable
) {
1812 StoreTraceVisState(cx
, s
, r
);
1817 * Although this runs the same code as LogTraceVisState, it is a separate
1818 * function because the meaning of the log entry is different. Also, the entry
1819 * formats may diverge someday.
1822 LogTraceVisEvent(JSContext
*cx
, TraceVisState s
, TraceVisFlushReason r
)
1824 LogTraceVisState(cx
, s
, (TraceVisExitReason
) r
);
1828 EnterTraceVisState(JSContext
*cx
, TraceVisState s
, TraceVisExitReason r
)
1830 LogTraceVisState(cx
, s
, r
);
1834 ExitTraceVisState(JSContext
*cx
, TraceVisExitReason r
)
1836 LogTraceVisState(cx
, S_EXITLAST
, r
);
1839 struct TraceVisStateObj
{
1840 TraceVisExitReason r
;
1843 inline TraceVisStateObj(JSContext
*cx
, TraceVisState s
) : r(R_NONE
)
1845 EnterTraceVisState(cx
, s
, R_NONE
);
1848 inline ~TraceVisStateObj()
1850 ExitTraceVisState(mCx
, r
);
1854 #endif /* MOZ_TRACEVIS */
1856 } /* namespace js */
1858 #else /* !JS_TRACER */
1860 #define TRACE_0(x) ((void)0)
1861 #define TRACE_1(x,a) ((void)0)
1862 #define TRACE_2(x,a,b) ((void)0)
1864 #endif /* !JS_TRACER */
1869 * While recording, the slots of the global object may change payload or type.
1870 * This is fine as long as the recorder expects this change (and therefore has
1871 * generated the corresponding LIR, snapshots, etc). The recorder indicates
1872 * that it expects a write to a global slot by setting pendingGlobalSlotsToSet
1873 * in the recorder, before the write is made by the interpreter, and clearing
1874 * pendingGlobalSlotsToSet before recording the next op. Any global slot write
1875 * that has not been whitelisted in this manner is therefore unexpected and, if
1876 * the global slot is actually being tracked, recording must be aborted.
1878 static JS_INLINE
void
1879 AbortRecordingIfUnexpectedGlobalWrite(JSContext
*cx
, JSObject
*obj
, unsigned slot
)
1882 if (TraceRecorder
*tr
= TRACE_RECORDER(cx
)) {
1883 if (obj
== tr
->getGlobal() && !tr
->globalSetExpected(slot
))
1884 AbortRecording(cx
, "Global slot written outside tracer supervision");
1889 } /* namespace js */
1891 #endif /* jstracer_h___ */