2 ** Trace recorder (bytecode -> SSA IR).
3 ** Copyright (C) 2005-2014 Mike Pall. See Copyright Notice in luajit.h
28 #include "lj_ircall.h"
31 #include "lj_record.h"
32 #include "lj_ffrecord.h"
34 #include "lj_dispatch.h"
37 /* Some local macros to save typing. Undef'd at the end. */
38 #define IR(ref) (&J->cur.ir[(ref)])
40 /* Pass IR on to next optimization in chain (FOLD). */
41 #define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
43 /* Emit raw IR without passing through optimizations. */
44 #define emitir_raw(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J))
46 /* -- Sanity checks ------------------------------------------------------- */
49 /* Sanity check the whole IR -- sloooow. */
50 static void rec_check_ir(jit_State
*J
)
52 IRRef i
, nins
= J
->cur
.nins
, nk
= J
->cur
.nk
;
53 lua_assert(nk
<= REF_BIAS
&& nins
>= REF_BIAS
&& nins
< 65536);
54 for (i
= nins
-1; i
>= nk
; i
--) {
56 uint32_t mode
= lj_ir_mode
[ir
->o
];
59 switch (irm_op1(mode
)) {
60 case IRMnone
: lua_assert(op1
== 0); break;
61 case IRMref
: lua_assert(op1
>= nk
);
62 lua_assert(i
>= REF_BIAS
? op1
< i
: op1
> i
); break;
64 case IRMcst
: lua_assert(i
< REF_BIAS
); continue;
66 switch (irm_op2(mode
)) {
67 case IRMnone
: lua_assert(op2
== 0); break;
68 case IRMref
: lua_assert(op2
>= nk
);
69 lua_assert(i
>= REF_BIAS
? op2
< i
: op2
> i
); break;
71 case IRMcst
: lua_assert(0); break;
74 lua_assert(ir
->prev
>= nk
);
75 lua_assert(i
>= REF_BIAS
? ir
->prev
< i
: ir
->prev
> i
);
76 lua_assert(ir
->o
== IR_NOP
|| IR(ir
->prev
)->o
== ir
->o
);
81 /* Compare stack slots and frames of the recorder and the VM. */
82 static void rec_check_slots(jit_State
*J
)
84 BCReg s
, nslots
= J
->baseslot
+ J
->maxslot
;
86 cTValue
*base
= J
->L
->base
- J
->baseslot
;
87 lua_assert(J
->baseslot
>= 1 && J
->baseslot
< LJ_MAX_JSLOTS
);
88 lua_assert(J
->baseslot
== 1 || (J
->slot
[J
->baseslot
-1] & TREF_FRAME
));
89 lua_assert(nslots
< LJ_MAX_JSLOTS
);
90 for (s
= 0; s
< nslots
; s
++) {
93 cTValue
*tv
= &base
[s
];
94 IRRef ref
= tref_ref(tr
);
96 lua_assert(ref
>= J
->cur
.nk
&& ref
< J
->cur
.nins
);
98 lua_assert(irt_t(ir
->t
) == tref_t(tr
));
100 lua_assert(tref_isfunc(tr
));
101 } else if ((tr
& TREF_FRAME
)) {
102 GCfunc
*fn
= gco2func(frame_gc(tv
));
103 BCReg delta
= (BCReg
)(tv
- frame_prev(tv
));
104 lua_assert(tref_isfunc(tr
));
105 if (tref_isk(tr
)) lua_assert(fn
== ir_kfunc(ir
));
106 lua_assert(s
> delta
? (J
->slot
[s
-delta
] & TREF_FRAME
) : (s
== delta
));
108 } else if ((tr
& TREF_CONT
)) {
109 lua_assert(ir_kptr(ir
) == gcrefp(tv
->gcr
, void));
110 lua_assert((J
->slot
[s
+1] & TREF_FRAME
));
114 lua_assert(tref_isnumber(tr
)); /* Could be IRT_INT etc., too. */
116 lua_assert(itype2irt(tv
) == tref_type(tr
));
117 if (tref_isk(tr
)) { /* Compare constants. */
119 lj_ir_kvalue(J
->L
, &tvk
, ir
);
120 if (!(tvisnum(&tvk
) && tvisnan(&tvk
)))
121 lua_assert(lj_obj_equal(tv
, &tvk
));
123 lua_assert(tvisnum(tv
) && tvisnan(tv
));
128 lua_assert(J
->framedepth
== depth
);
132 /* -- Type handling and specialization ------------------------------------ */
134 /* Note: these functions return tagged references (TRef). */
136 /* Specialize a slot to a specific type. Note: slot can be negative! */
137 static TRef
sloadt(jit_State
*J
, int32_t slot
, IRType t
, int mode
)
139 /* Caller may set IRT_GUARD in t. */
140 TRef ref
= emitir_raw(IRT(IR_SLOAD
, t
), (int32_t)J
->baseslot
+slot
, mode
);
145 /* Specialize a slot to the runtime type. Note: slot can be negative! */
146 static TRef
sload(jit_State
*J
, int32_t slot
)
148 IRType t
= itype2irt(&J
->L
->base
[slot
]);
149 TRef ref
= emitir_raw(IRTG(IR_SLOAD
, t
), (int32_t)J
->baseslot
+slot
,
151 if (irtype_ispri(t
)) ref
= TREF_PRI(t
); /* Canonicalize primitive refs. */
156 /* Get TRef from slot. Load slot and specialize if not done already. */
157 #define getslot(J, s) (J->base[(s)] ? J->base[(s)] : sload(J, (int32_t)(s)))
159 /* Get TRef for current function. */
160 static TRef
getcurrf(jit_State
*J
)
164 lua_assert(J
->baseslot
== 1);
165 return sloadt(J
, -1, IRT_FUNC
, IRSLOAD_READONLY
);
168 /* Compare for raw object equality.
169 ** Returns 0 if the objects are the same.
170 ** Returns 1 if they are different, but the same type.
171 ** Returns 2 for two different types.
172 ** Comparisons between primitives always return 1 -- no caller cares about it.
174 int lj_record_objcmp(jit_State
*J
, TRef a
, TRef b
, cTValue
*av
, cTValue
*bv
)
176 int diff
= !lj_obj_equal(av
, bv
);
177 if (!tref_isk2(a
, b
)) { /* Shortcut, also handles primitives. */
178 IRType ta
= tref_isinteger(a
) ? IRT_INT
: tref_type(a
);
179 IRType tb
= tref_isinteger(b
) ? IRT_INT
: tref_type(b
);
181 /* Widen mixed number/int comparisons to number/number comparison. */
182 if (ta
== IRT_INT
&& tb
== IRT_NUM
) {
183 a
= emitir(IRTN(IR_CONV
), a
, IRCONV_NUM_INT
);
185 } else if (ta
== IRT_NUM
&& tb
== IRT_INT
) {
186 b
= emitir(IRTN(IR_CONV
), b
, IRCONV_NUM_INT
);
188 return 2; /* Two different types are never equal. */
191 emitir(IRTG(diff
? IR_NE
: IR_EQ
, ta
), a
, b
);
196 /* Constify a value. Returns 0 for non-representable object types. */
197 TRef
lj_record_constify(jit_State
*J
, cTValue
*o
)
200 return lj_ir_kgc(J
, gcV(o
), itype2irt(o
));
202 return lj_ir_kint(J
, intV(o
));
204 return lj_ir_knumint(J
, numV(o
));
205 else if (tvisbool(o
))
206 return TREF_PRI(itype2irt(o
));
208 return 0; /* Can't represent lightuserdata (pointless). */
211 /* -- Record loop ops ----------------------------------------------------- */
215 LOOPEV_LEAVE
, /* Loop is left or not entered. */
216 LOOPEV_ENTERLO
, /* Loop is entered with a low iteration count left. */
217 LOOPEV_ENTER
/* Loop is entered. */
220 /* Canonicalize slots: convert integers to numbers. */
221 static void canonicalize_slots(jit_State
*J
)
224 if (LJ_DUALNUM
) return;
225 for (s
= J
->baseslot
+J
->maxslot
-1; s
>= 1; s
--) {
226 TRef tr
= J
->slot
[s
];
227 if (tref_isinteger(tr
)) {
228 IRIns
*ir
= IR(tref_ref(tr
));
229 if (!(ir
->o
== IR_SLOAD
&& (ir
->op2
& IRSLOAD_READONLY
)))
230 J
->slot
[s
] = emitir(IRTN(IR_CONV
), tr
, IRCONV_NUM_INT
);
235 /* Stop recording. */
236 void lj_record_stop(jit_State
*J
, TraceLink linktype
, TraceNo lnk
)
239 J
->cur
.linktype
= (uint8_t)linktype
;
240 J
->cur
.link
= (uint16_t)lnk
;
241 /* Looping back at the same stack level? */
242 if (lnk
== J
->cur
.traceno
&& J
->framedepth
+ J
->retdepth
== 0) {
243 if ((J
->flags
& JIT_F_OPT_LOOP
)) /* Shall we try to create a loop? */
244 goto nocanon
; /* Do not canonicalize or we lose the narrowing. */
245 if (J
->cur
.root
) /* Otherwise ensure we always link to the root trace. */
246 J
->cur
.link
= J
->cur
.root
;
248 canonicalize_slots(J
);
250 /* Note: all loop ops must set J->pc to the following instruction! */
251 lj_snap_add(J
); /* Add loop snapshot. */
253 J
->mergesnap
= 1; /* In case recording continues. */
256 /* Search bytecode backwards for a int/num constant slot initializer. */
257 static TRef
find_kinit(jit_State
*J
, const BCIns
*endpc
, BCReg slot
, IRType t
)
259 /* This algorithm is rather simplistic and assumes quite a bit about
260 ** how the bytecode is generated. It works fine for FORI initializers,
261 ** but it won't necessarily work in other cases (e.g. iterator arguments).
262 ** It doesn't do anything fancy, either (like backpropagating MOVs).
264 const BCIns
*pc
, *startpc
= proto_bc(J
->pt
);
265 for (pc
= endpc
-1; pc
> startpc
; pc
--) {
267 BCOp op
= bc_op(ins
);
268 /* First try to find the last instruction that stores to this slot. */
269 if (bcmode_a(op
) == BCMbase
&& bc_a(ins
) <= slot
) {
270 return 0; /* Multiple results, e.g. from a CALL or KNIL. */
271 } else if (bcmode_a(op
) == BCMdst
&& bc_a(ins
) == slot
) {
272 if (op
== BC_KSHORT
|| op
== BC_KNUM
) { /* Found const. initializer. */
273 /* Now try to verify there's no forward jump across it. */
274 const BCIns
*kpc
= pc
;
275 for (; pc
> startpc
; pc
--)
276 if (bc_op(*pc
) == BC_JMP
) {
277 const BCIns
*target
= pc
+bc_j(*pc
)+1;
278 if (target
> kpc
&& target
<= endpc
)
279 return 0; /* Conditional assignment. */
281 if (op
== BC_KSHORT
) {
282 int32_t k
= (int32_t)(int16_t)bc_d(ins
);
283 return t
== IRT_INT
? lj_ir_kint(J
, k
) : lj_ir_knum(J
, (lua_Number
)k
);
285 cTValue
*tv
= proto_knumtv(J
->pt
, bc_d(ins
));
287 int32_t k
= numberVint(tv
);
288 if (tvisint(tv
) || numV(tv
) == (lua_Number
)k
) /* -0 is ok here. */
289 return lj_ir_kint(J
, k
);
290 return 0; /* Type mismatch. */
292 return lj_ir_knum(J
, numberVnum(tv
));
296 return 0; /* Non-constant initializer. */
299 return 0; /* No assignment to this slot found? */
302 /* Load and optionally convert a FORI argument from a slot. */
303 static TRef
fori_load(jit_State
*J
, BCReg slot
, IRType t
, int mode
)
305 int conv
= (tvisint(&J
->L
->base
[slot
]) != (t
==IRT_INT
)) ? IRSLOAD_CONVERT
: 0;
306 return sloadt(J
, (int32_t)slot
,
307 t
+ (((mode
& IRSLOAD_TYPECHECK
) ||
308 (conv
&& t
== IRT_INT
&& !(mode
>> 16))) ?
313 /* Peek before FORI to find a const initializer. Otherwise load from slot. */
314 static TRef
fori_arg(jit_State
*J
, const BCIns
*fori
, BCReg slot
,
317 TRef tr
= J
->base
[slot
];
319 tr
= find_kinit(J
, fori
, slot
, t
);
321 tr
= fori_load(J
, slot
, t
, mode
);
326 /* Return the direction of the FOR loop iterator.
327 ** It's important to exactly reproduce the semantics of the interpreter.
329 static int rec_for_direction(cTValue
*o
)
331 return (tvisint(o
) ? intV(o
) : (int32_t)o
->u32
.hi
) >= 0;
334 /* Simulate the runtime behavior of the FOR loop iterator. */
335 static LoopEvent
rec_for_iter(IROp
*op
, cTValue
*o
, int isforl
)
337 lua_Number stopv
= numberVnum(&o
[FORL_STOP
]);
338 lua_Number idxv
= numberVnum(&o
[FORL_IDX
]);
339 lua_Number stepv
= numberVnum(&o
[FORL_STEP
]);
342 if (rec_for_direction(&o
[FORL_STEP
])) {
345 return idxv
+ 2*stepv
> stopv
? LOOPEV_ENTERLO
: LOOPEV_ENTER
;
347 *op
= IR_GT
; return LOOPEV_LEAVE
;
351 return idxv
+ 2*stepv
< stopv
? LOOPEV_ENTERLO
: LOOPEV_ENTER
;
353 *op
= IR_LT
; return LOOPEV_LEAVE
;
357 /* Record checks for FOR loop overflow and step direction. */
358 static void rec_for_check(jit_State
*J
, IRType t
, int dir
,
359 TRef stop
, TRef step
, int init
)
361 if (!tref_isk(step
)) {
362 /* Non-constant step: need a guard for the direction. */
363 TRef zero
= (t
== IRT_INT
) ? lj_ir_kint(J
, 0) : lj_ir_knum_zero(J
);
364 emitir(IRTG(dir
? IR_GE
: IR_LT
, t
), step
, zero
);
365 /* Add hoistable overflow checks for a narrowed FORL index. */
366 if (init
&& t
== IRT_INT
) {
367 if (tref_isk(stop
)) {
368 /* Constant stop: optimize check away or to a range check for step. */
369 int32_t k
= IR(tref_ref(stop
))->i
;
372 emitir(IRTGI(IR_LE
), step
, lj_ir_kint(J
, (int32_t)0x7fffffff-k
));
375 emitir(IRTGI(IR_GE
), step
, lj_ir_kint(J
, (int32_t)0x80000000-k
));
378 /* Stop+step variable: need full overflow check. */
379 TRef tr
= emitir(IRTGI(IR_ADDOV
), step
, stop
);
380 emitir(IRTI(IR_USE
), tr
, 0); /* ADDOV is weak. Avoid dead result. */
383 } else if (init
&& t
== IRT_INT
&& !tref_isk(stop
)) {
384 /* Constant step: optimize overflow check to a range check for stop. */
385 int32_t k
= IR(tref_ref(step
))->i
;
386 k
= (int32_t)(dir
? 0x7fffffff : 0x80000000) - k
;
387 emitir(IRTGI(dir
? IR_LE
: IR_GE
), stop
, lj_ir_kint(J
, k
));
391 /* Record a FORL instruction. */
392 static void rec_for_loop(jit_State
*J
, const BCIns
*fori
, ScEvEntry
*scev
,
395 BCReg ra
= bc_a(*fori
);
396 cTValue
*tv
= &J
->L
->base
[ra
];
397 TRef idx
= J
->base
[ra
+FORL_IDX
];
398 IRType t
= idx
? tref_type(idx
) :
399 (init
|| LJ_DUALNUM
) ? lj_opt_narrow_forl(J
, tv
) : IRT_NUM
;
400 int mode
= IRSLOAD_INHERIT
+
401 ((!LJ_DUALNUM
|| tvisint(tv
) == (t
== IRT_INT
)) ? IRSLOAD_READONLY
: 0);
402 TRef stop
= fori_arg(J
, fori
, ra
+FORL_STOP
, t
, mode
);
403 TRef step
= fori_arg(J
, fori
, ra
+FORL_STEP
, t
, mode
);
404 int tc
, dir
= rec_for_direction(&tv
[FORL_STEP
]);
405 lua_assert(bc_op(*fori
) == BC_FORI
|| bc_op(*fori
) == BC_JFORI
);
408 scev
->stop
= tref_ref(stop
);
409 scev
->step
= tref_ref(step
);
410 rec_for_check(J
, t
, dir
, stop
, step
, init
);
411 scev
->start
= tref_ref(find_kinit(J
, fori
, ra
+FORL_IDX
, IRT_INT
));
413 !(scev
->start
&& irref_isk(scev
->stop
) && irref_isk(scev
->step
) &&
414 tvisint(&tv
[FORL_IDX
]) == (t
== IRT_INT
))) ?
415 IRSLOAD_TYPECHECK
: 0;
417 J
->base
[ra
+FORL_STOP
] = stop
;
418 J
->base
[ra
+FORL_STEP
] = step
;
421 idx
= fori_load(J
, ra
+FORL_IDX
, t
,
422 IRSLOAD_INHERIT
+ tc
+ (J
->scev
.start
<< 16));
424 J
->base
[ra
+FORL_IDX
] = idx
= emitir(IRT(IR_ADD
, t
), idx
, step
);
425 J
->base
[ra
+FORL_EXT
] = idx
;
426 scev
->idx
= tref_ref(idx
);
427 setmref(scev
->pc
, fori
);
428 J
->maxslot
= ra
+FORL_EXT
+1;
431 /* Record FORL/JFORL or FORI/JFORI. */
432 static LoopEvent
rec_for(jit_State
*J
, const BCIns
*fori
, int isforl
)
434 BCReg ra
= bc_a(*fori
);
435 TValue
*tv
= &J
->L
->base
[ra
];
436 TRef
*tr
= &J
->base
[ra
];
441 if (isforl
) { /* Handle FORL/JFORL opcodes. */
442 TRef idx
= tr
[FORL_IDX
];
443 if (mref(J
->scev
.pc
, const BCIns
) == fori
&& tref_ref(idx
) == J
->scev
.idx
) {
446 idx
= emitir(IRT(IR_ADD
, t
), idx
, J
->scev
.step
);
447 tr
[FORL_EXT
] = tr
[FORL_IDX
] = idx
;
450 rec_for_loop(J
, fori
, &scev
, 0);
454 } else { /* Handle FORI/JFORI opcodes. */
456 lj_meta_for(J
->L
, tv
);
457 t
= (LJ_DUALNUM
|| tref_isint(tr
[FORL_IDX
])) ? lj_opt_narrow_forl(J
, tv
) :
459 for (i
= FORL_IDX
; i
<= FORL_STEP
; i
++) {
460 if (!tr
[i
]) sload(J
, ra
+i
);
461 lua_assert(tref_isnumber_str(tr
[i
]));
462 if (tref_isstr(tr
[i
]))
463 tr
[i
] = emitir(IRTG(IR_STRTO
, IRT_NUM
), tr
[i
], 0);
465 if (!tref_isinteger(tr
[i
]))
466 tr
[i
] = emitir(IRTGI(IR_CONV
), tr
[i
], IRCONV_INT_NUM
|IRCONV_CHECK
);
468 if (!tref_isnum(tr
[i
]))
469 tr
[i
] = emitir(IRTN(IR_CONV
), tr
[i
], IRCONV_NUM_INT
);
472 tr
[FORL_EXT
] = tr
[FORL_IDX
];
473 stop
= tr
[FORL_STOP
];
474 rec_for_check(J
, t
, rec_for_direction(&tv
[FORL_STEP
]),
475 stop
, tr
[FORL_STEP
], 1);
478 ev
= rec_for_iter(&op
, tv
, isforl
);
479 if (ev
== LOOPEV_LEAVE
) {
480 J
->maxslot
= ra
+FORL_EXT
+1;
484 J
->pc
= fori
+bc_j(*fori
)+1;
488 emitir(IRTG(op
, t
), tr
[FORL_IDX
], stop
);
490 if (ev
== LOOPEV_LEAVE
) {
492 J
->pc
= fori
+bc_j(*fori
)+1;
494 J
->maxslot
= ra
+FORL_EXT
+1;
501 /* Record ITERL/JITERL. */
502 static LoopEvent
rec_iterl(jit_State
*J
, const BCIns iterins
)
504 BCReg ra
= bc_a(iterins
);
505 if (!tref_isnil(getslot(J
, ra
))) { /* Looping back? */
506 J
->base
[ra
-1] = J
->base
[ra
]; /* Copy result of ITERC to control var. */
507 J
->maxslot
= ra
-1+bc_b(J
->pc
[-1]);
508 J
->pc
+= bc_j(iterins
)+1;
517 /* Record LOOP/JLOOP. Now, that was easy. */
518 static LoopEvent
rec_loop(jit_State
*J
, BCReg ra
)
520 if (ra
< J
->maxslot
) J
->maxslot
= ra
;
525 /* Check if a loop repeatedly failed to trace because it didn't loop back. */
526 static int innerloopleft(jit_State
*J
, const BCIns
*pc
)
529 for (i
= 0; i
< PENALTY_SLOTS
; i
++)
530 if (mref(J
->penalty
[i
].pc
, const BCIns
) == pc
) {
531 if ((J
->penalty
[i
].reason
== LJ_TRERR_LLEAVE
||
532 J
->penalty
[i
].reason
== LJ_TRERR_LINNER
) &&
533 J
->penalty
[i
].val
>= 2*PENALTY_MIN
)
540 /* Handle the case when an interpreted loop op is hit. */
541 static void rec_loop_interp(jit_State
*J
, const BCIns
*pc
, LoopEvent ev
)
543 if (J
->parent
== 0 && J
->exitno
== 0) {
544 if (pc
== J
->startpc
&& J
->framedepth
+ J
->retdepth
== 0) {
546 if (ev
== LOOPEV_LEAVE
) /* Must loop back to form a root trace. */
547 lj_trace_err(J
, LJ_TRERR_LLEAVE
);
548 lj_record_stop(J
, LJ_TRLINK_LOOP
, J
->cur
.traceno
); /* Looping trace. */
549 } else if (ev
!= LOOPEV_LEAVE
) { /* Entering inner loop? */
550 /* It's usually better to abort here and wait until the inner loop
551 ** is traced. But if the inner loop repeatedly didn't loop back,
552 ** this indicates a low trip count. In this case try unrolling
553 ** an inner loop even in a root trace. But it's better to be a bit
554 ** more conservative here and only do it for very short loops.
556 if (bc_j(*pc
) != -1 && !innerloopleft(J
, pc
))
557 lj_trace_err(J
, LJ_TRERR_LINNER
); /* Root trace hit an inner loop. */
558 if ((ev
!= LOOPEV_ENTERLO
&&
559 J
->loopref
&& J
->cur
.nins
- J
->loopref
> 24) || --J
->loopunroll
< 0)
560 lj_trace_err(J
, LJ_TRERR_LUNROLL
); /* Limit loop unrolling. */
561 J
->loopref
= J
->cur
.nins
;
563 } else if (ev
!= LOOPEV_LEAVE
) { /* Side trace enters an inner loop. */
564 J
->loopref
= J
->cur
.nins
;
565 if (--J
->loopunroll
< 0)
566 lj_trace_err(J
, LJ_TRERR_LUNROLL
); /* Limit loop unrolling. */
567 } /* Side trace continues across a loop that's left or not entered. */
570 /* Handle the case when an already compiled loop op is hit. */
571 static void rec_loop_jit(jit_State
*J
, TraceNo lnk
, LoopEvent ev
)
573 if (J
->parent
== 0 && J
->exitno
== 0) { /* Root trace hit an inner loop. */
574 /* Better let the inner loop spawn a side trace back here. */
575 lj_trace_err(J
, LJ_TRERR_LINNER
);
576 } else if (ev
!= LOOPEV_LEAVE
) { /* Side trace enters a compiled loop. */
577 J
->instunroll
= 0; /* Cannot continue across a compiled loop op. */
578 if (J
->pc
== J
->startpc
&& J
->framedepth
+ J
->retdepth
== 0)
579 lj_record_stop(J
, LJ_TRLINK_LOOP
, J
->cur
.traceno
); /* Form extra loop. */
581 lj_record_stop(J
, LJ_TRLINK_ROOT
, lnk
); /* Link to the loop. */
582 } /* Side trace continues across a loop that's left or not entered. */
585 /* -- Record profiler hook checks ----------------------------------------- */
589 /* Need to insert profiler hook check? */
590 static int rec_profile_need(jit_State
*J
, GCproto
*pt
, const BCIns
*pc
)
593 lua_assert(J
->prof_mode
== 'f' || J
->prof_mode
== 'l');
598 if (pt
!= ppt
&& ppt
) {
602 if (J
->prof_mode
== 'l') {
603 BCLine line
= lj_debug_line(pt
, proto_bcpos(pt
, pc
));
604 BCLine pline
= J
->prev_line
;
612 static void rec_profile_ins(jit_State
*J
, const BCIns
*pc
)
614 if (J
->prof_mode
&& rec_profile_need(J
, J
->pt
, pc
)) {
615 emitir(IRTG(IR_PROF
, IRT_NIL
), 0, 0);
620 static void rec_profile_ret(jit_State
*J
)
622 if (J
->prof_mode
== 'f') {
623 emitir(IRTG(IR_PROF
, IRT_NIL
), 0, 0);
631 /* -- Record calls and returns -------------------------------------------- */
633 /* Specialize to the runtime value of the called function or its prototype. */
634 static TRef
rec_call_specialize(jit_State
*J
, GCfunc
*fn
, TRef tr
)
638 GCproto
*pt
= funcproto(fn
);
639 /* Too many closures created? Probably not a monomorphic function. */
640 if (pt
->flags
>= PROTO_CLC_POLY
) { /* Specialize to prototype instead. */
641 TRef trpt
= emitir(IRT(IR_FLOAD
, IRT_P32
), tr
, IRFL_FUNC_PC
);
642 emitir(IRTG(IR_EQ
, IRT_P32
), trpt
, lj_ir_kptr(J
, proto_bc(pt
)));
643 (void)lj_ir_kgc(J
, obj2gco(pt
), IRT_PROTO
); /* Prevent GC of proto. */
647 /* Don't specialize to non-monomorphic builtins. */
648 switch (fn
->c
.ffid
) {
649 case FF_coroutine_wrap_aux
:
650 case FF_string_gmatch_aux
:
651 /* NYI: io_file_iter doesn't have an ffid, yet. */
652 { /* Specialize to the ffid. */
653 TRef trid
= emitir(IRT(IR_FLOAD
, IRT_U8
), tr
, IRFL_FUNC_FFID
);
654 emitir(IRTG(IR_EQ
, IRT_INT
), trid
, lj_ir_kint(J
, fn
->c
.ffid
));
658 /* NYI: don't specialize to non-monomorphic C functions. */
662 /* Otherwise specialize to the function (closure) value itself. */
663 kfunc
= lj_ir_kfunc(J
, fn
);
664 emitir(IRTG(IR_EQ
, IRT_FUNC
), tr
, kfunc
);
668 /* Record call setup. */
669 static void rec_call_setup(jit_State
*J
, BCReg func
, ptrdiff_t nargs
)
672 TValue
*functv
= &J
->L
->base
[func
];
673 TRef
*fbase
= &J
->base
[func
];
675 for (i
= 0; i
<= nargs
; i
++)
676 (void)getslot(J
, func
+i
); /* Ensure func and all args have a reference. */
677 if (!tref_isfunc(fbase
[0])) { /* Resolve __call metamethod. */
679 copyTV(J
->L
, &ix
.tabv
, functv
);
680 if (!lj_record_mm_lookup(J
, &ix
, MM_call
) || !tref_isfunc(ix
.mobj
))
681 lj_trace_err(J
, LJ_TRERR_NOMM
);
682 for (i
= ++nargs
; i
> 0; i
--) /* Shift arguments up. */
683 fbase
[i
] = fbase
[i
-1];
684 fbase
[0] = ix
.mobj
; /* Replace function. */
687 fbase
[0] = TREF_FRAME
| rec_call_specialize(J
, funcV(functv
), fbase
[0]);
688 J
->maxslot
= (BCReg
)nargs
;
692 void lj_record_call(jit_State
*J
, BCReg func
, ptrdiff_t nargs
)
694 rec_call_setup(J
, func
, nargs
);
698 J
->baseslot
+= func
+1;
701 /* Record tail call. */
702 void lj_record_tailcall(jit_State
*J
, BCReg func
, ptrdiff_t nargs
)
704 rec_call_setup(J
, func
, nargs
);
705 if (frame_isvarg(J
->L
->base
- 1)) {
706 BCReg cbase
= (BCReg
)frame_delta(J
->L
->base
- 1);
707 if (--J
->framedepth
< 0)
708 lj_trace_err(J
, LJ_TRERR_NYIRETL
);
709 J
->baseslot
-= (BCReg
)cbase
;
713 /* Move func + args down. */
714 memmove(&J
->base
[-1], &J
->base
[func
], sizeof(TRef
)*(J
->maxslot
+1));
715 /* Note: the new TREF_FRAME is now at J->base[-1] (even for slot #0). */
716 /* Tailcalls can form a loop, so count towards the loop unroll limit. */
717 if (++J
->tailcalled
> J
->loopunroll
)
718 lj_trace_err(J
, LJ_TRERR_LUNROLL
);
721 /* Check unroll limits for down-recursion. */
722 static int check_downrec_unroll(jit_State
*J
, GCproto
*pt
)
725 for (ptref
= J
->chain
[IR_KGC
]; ptref
; ptref
= IR(ptref
)->prev
)
726 if (ir_kgc(IR(ptref
)) == obj2gco(pt
)) {
729 for (ref
= J
->chain
[IR_RETF
]; ref
; ref
= IR(ref
)->prev
)
730 if (IR(ref
)->op1
== ptref
)
733 if (J
->pc
== J
->startpc
) {
734 if (count
+ J
->tailcalled
> J
->param
[JIT_P_recunroll
])
737 lj_trace_err(J
, LJ_TRERR_DOWNREC
);
744 static TRef
rec_cat(jit_State
*J
, BCReg baseslot
, BCReg topslot
);
747 void lj_record_ret(jit_State
*J
, BCReg rbase
, ptrdiff_t gotresults
)
749 TValue
*frame
= J
->L
->base
- 1;
751 for (i
= 0; i
< gotresults
; i
++)
752 (void)getslot(J
, rbase
+i
); /* Ensure all results have a reference. */
753 while (frame_ispcall(frame
)) { /* Immediately resolve pcall() returns. */
754 BCReg cbase
= (BCReg
)frame_delta(frame
);
755 if (--J
->framedepth
< 0)
756 lj_trace_err(J
, LJ_TRERR_NYIRETL
);
757 lua_assert(J
->baseslot
> 1);
760 J
->baseslot
-= (BCReg
)cbase
;
762 J
->base
[--rbase
] = TREF_TRUE
; /* Prepend true to results. */
763 frame
= frame_prevd(frame
);
765 /* Return to lower frame via interpreter for unhandled cases. */
766 if (J
->framedepth
== 0 && J
->pt
&& bc_isret(bc_op(*J
->pc
)) &&
767 (!frame_islua(frame
) ||
768 (J
->parent
== 0 && J
->exitno
== 0 &&
769 !bc_isret(bc_op(J
->cur
.startins
))))) {
770 /* NYI: specialize to frame type and return directly, not via RET*. */
771 for (i
= 0; i
< (ptrdiff_t)rbase
; i
++)
772 J
->base
[i
] = 0; /* Purge dead slots. */
773 J
->maxslot
= rbase
+ (BCReg
)gotresults
;
774 lj_record_stop(J
, LJ_TRLINK_RETURN
, 0); /* Return to interpreter. */
777 if (frame_isvarg(frame
)) {
778 BCReg cbase
= (BCReg
)frame_delta(frame
);
779 if (--J
->framedepth
< 0) /* NYI: return of vararg func to lower frame. */
780 lj_trace_err(J
, LJ_TRERR_NYIRETL
);
781 lua_assert(J
->baseslot
> 1);
783 J
->baseslot
-= (BCReg
)cbase
;
785 frame
= frame_prevd(frame
);
787 if (frame_islua(frame
)) { /* Return to Lua frame. */
788 BCIns callins
= *(frame_pc(frame
)-1);
789 ptrdiff_t nresults
= bc_b(callins
) ? (ptrdiff_t)bc_b(callins
)-1 :gotresults
;
790 BCReg cbase
= bc_a(callins
);
791 GCproto
*pt
= funcproto(frame_func(frame
- (cbase
+1)));
792 if ((pt
->flags
& PROTO_NOJIT
))
793 lj_trace_err(J
, LJ_TRERR_CJITOFF
);
794 if (J
->framedepth
== 0 && J
->pt
&& frame
== J
->L
->base
- 1) {
795 if (check_downrec_unroll(J
, pt
)) {
796 J
->maxslot
= (BCReg
)(rbase
+ gotresults
);
798 lj_record_stop(J
, LJ_TRLINK_DOWNREC
, J
->cur
.traceno
); /* Down-rec. */
803 for (i
= 0; i
< nresults
; i
++) /* Adjust results. */
804 J
->base
[i
-1] = i
< gotresults
? J
->base
[rbase
+i
] : TREF_NIL
;
805 J
->maxslot
= cbase
+(BCReg
)nresults
;
806 if (J
->framedepth
> 0) { /* Return to a frame that is part of the trace. */
808 lua_assert(J
->baseslot
> cbase
+1);
809 J
->baseslot
-= cbase
+1;
811 } else if (J
->parent
== 0 && J
->exitno
== 0 &&
812 !bc_isret(bc_op(J
->cur
.startins
))) {
813 /* Return to lower frame would leave the loop in a root trace. */
814 lj_trace_err(J
, LJ_TRERR_LLEAVE
);
815 } else { /* Return to lower frame. Guard for the target we return to. */
816 TRef trpt
= lj_ir_kgc(J
, obj2gco(pt
), IRT_PROTO
);
817 TRef trpc
= lj_ir_kptr(J
, (void *)frame_pc(frame
));
818 emitir(IRTG(IR_RETF
, IRT_P32
), trpt
, trpc
);
821 lua_assert(J
->baseslot
== 1);
822 /* Shift result slots up and clear the slots of the new frame below. */
823 memmove(J
->base
+ cbase
, J
->base
-1, sizeof(TRef
)*nresults
);
824 memset(J
->base
-1, 0, sizeof(TRef
)*(cbase
+1));
826 } else if (frame_iscont(frame
)) { /* Return to continuation frame. */
827 ASMFunction cont
= frame_contf(frame
);
828 BCReg cbase
= (BCReg
)frame_delta(frame
);
829 if ((J
->framedepth
-= 2) < 0)
830 lj_trace_err(J
, LJ_TRERR_NYIRETL
);
831 J
->baseslot
-= (BCReg
)cbase
;
833 J
->maxslot
= cbase
-2;
834 if (cont
== lj_cont_ra
) {
835 /* Copy result to destination slot. */
836 BCReg dst
= bc_a(*(frame_contpc(frame
)-1));
837 J
->base
[dst
] = gotresults
? J
->base
[cbase
+rbase
] : TREF_NIL
;
838 if (dst
>= J
->maxslot
) J
->maxslot
= dst
+1;
839 } else if (cont
== lj_cont_nop
) {
840 /* Nothing to do here. */
841 } else if (cont
== lj_cont_cat
) {
842 BCReg bslot
= bc_b(*(frame_contpc(frame
)-1));
843 TRef tr
= gotresults
? J
->base
[cbase
+rbase
] : TREF_NIL
;
844 if (bslot
!= cbase
-2) { /* Concatenate the remainder. */
845 TValue
*b
= J
->L
->base
, save
; /* Simulate lower frame and result. */
846 J
->base
[cbase
-2] = tr
;
847 copyTV(J
->L
, &save
, b
-2);
848 if (gotresults
) copyTV(J
->L
, b
-2, b
+rbase
); else setnilV(b
-2);
849 J
->L
->base
= b
- cbase
;
850 tr
= rec_cat(J
, bslot
, cbase
-2);
851 b
= J
->L
->base
+ cbase
; /* Undo. */
853 copyTV(J
->L
, b
-2, &save
);
855 if (tr
) { /* Store final result. */
856 BCReg dst
= bc_a(*(frame_contpc(frame
)-1));
858 if (dst
>= J
->maxslot
) J
->maxslot
= dst
+1;
859 } /* Otherwise continue with another __concat call. */
861 /* Result type already specialized. */
862 lua_assert(cont
== lj_cont_condf
|| cont
== lj_cont_condt
);
865 lj_trace_err(J
, LJ_TRERR_NYIRETL
); /* NYI: handle return to C frame. */
867 lua_assert(J
->baseslot
>= 1);
870 /* -- Metamethod handling ------------------------------------------------- */
872 /* Prepare to record call to metamethod. */
873 static BCReg
rec_mm_prep(jit_State
*J
, ASMFunction cont
)
875 BCReg s
, top
= cont
== lj_cont_cat
? J
->maxslot
: curr_proto(J
->L
)->framesize
;
877 TRef trcont
= lj_ir_kptr(J
, (void *)((int64_t)cont
-(int64_t)lj_vm_asm_begin
));
879 TRef trcont
= lj_ir_kptr(J
, (void *)cont
);
881 J
->base
[top
] = trcont
| TREF_CONT
;
883 for (s
= J
->maxslot
; s
< top
; s
++)
884 J
->base
[s
] = 0; /* Clear frame gap to avoid resurrecting previous refs. */
888 /* Record metamethod lookup. */
889 int lj_record_mm_lookup(jit_State
*J
, RecordIndex
*ix
, MMS mm
)
893 if (tref_istab(ix
->tab
)) {
894 mt
= tabref(tabV(&ix
->tabv
)->metatable
);
895 mix
.tab
= emitir(IRT(IR_FLOAD
, IRT_TAB
), ix
->tab
, IRFL_TAB_META
);
896 } else if (tref_isudata(ix
->tab
)) {
897 int udtype
= udataV(&ix
->tabv
)->udtype
;
898 mt
= tabref(udataV(&ix
->tabv
)->metatable
);
899 /* The metatables of special userdata objects are treated as immutable. */
900 if (udtype
!= UDTYPE_USERDATA
) {
902 if (LJ_HASFFI
&& udtype
== UDTYPE_FFI_CLIB
) {
903 /* Specialize to the C library namespace object. */
904 emitir(IRTG(IR_EQ
, IRT_P32
), ix
->tab
, lj_ir_kptr(J
, udataV(&ix
->tabv
)));
906 /* Specialize to the type of userdata. */
907 TRef tr
= emitir(IRT(IR_FLOAD
, IRT_U8
), ix
->tab
, IRFL_UDATA_UDTYPE
);
908 emitir(IRTGI(IR_EQ
), tr
, lj_ir_kint(J
, udtype
));
911 mo
= lj_tab_getstr(mt
, mmname_str(J2G(J
), mm
));
912 if (!mo
|| tvisnil(mo
))
913 return 0; /* No metamethod. */
914 /* Treat metamethod or index table as immutable, too. */
915 if (!(tvisfunc(mo
) || tvistab(mo
)))
916 lj_trace_err(J
, LJ_TRERR_BADTYPE
);
917 copyTV(J
->L
, &ix
->mobjv
, mo
);
918 ix
->mobj
= lj_ir_kgc(J
, gcV(mo
), tvisfunc(mo
) ? IRT_FUNC
: IRT_TAB
);
920 ix
->mt
= TREF_NIL
; /* Dummy value for comparison semantics. */
921 return 1; /* Got metamethod or index table. */
923 mix
.tab
= emitir(IRT(IR_FLOAD
, IRT_TAB
), ix
->tab
, IRFL_UDATA_META
);
925 /* Specialize to base metatable. Must flush mcode in lua_setmetatable(). */
926 mt
= tabref(basemt_obj(J2G(J
), &ix
->tabv
));
929 return 0; /* No metamethod. */
931 /* The cdata metatable is treated as immutable. */
932 if (LJ_HASFFI
&& tref_iscdata(ix
->tab
)) goto immutable_mt
;
933 ix
->mt
= mix
.tab
= lj_ir_ktab(J
, mt
);
936 ix
->mt
= mt
? mix
.tab
: TREF_NIL
;
937 emitir(IRTG(mt
? IR_NE
: IR_EQ
, IRT_TAB
), mix
.tab
, lj_ir_knull(J
, IRT_TAB
));
940 GCstr
*mmstr
= mmname_str(J2G(J
), mm
);
941 cTValue
*mo
= lj_tab_getstr(mt
, mmstr
);
942 if (mo
&& !tvisnil(mo
))
943 copyTV(J
->L
, &ix
->mobjv
, mo
);
945 settabV(J
->L
, &mix
.tabv
, mt
);
946 setstrV(J
->L
, &mix
.keyv
, mmstr
);
947 mix
.key
= lj_ir_kstr(J
, mmstr
);
950 ix
->mobj
= lj_record_idx(J
, &mix
);
951 return !tref_isnil(ix
->mobj
); /* 1 if metamethod found, 0 if not. */
953 return 0; /* No metamethod. */
956 /* Record call to arithmetic metamethod. */
957 static TRef
rec_mm_arith(jit_State
*J
, RecordIndex
*ix
, MMS mm
)
959 /* Set up metamethod call first to save ix->tab and ix->tabv. */
960 BCReg func
= rec_mm_prep(J
, mm
== MM_concat
? lj_cont_cat
: lj_cont_ra
);
961 TRef
*base
= J
->base
+ func
;
962 TValue
*basev
= J
->L
->base
+ func
;
963 base
[1] = ix
->tab
; base
[2] = ix
->key
;
964 copyTV(J
->L
, basev
+1, &ix
->tabv
);
965 copyTV(J
->L
, basev
+2, &ix
->keyv
);
966 if (!lj_record_mm_lookup(J
, ix
, mm
)) { /* Lookup mm on 1st operand. */
969 copyTV(J
->L
, &ix
->tabv
, &ix
->keyv
);
970 if (lj_record_mm_lookup(J
, ix
, mm
)) /* Lookup mm on 2nd operand. */
973 lj_trace_err(J
, LJ_TRERR_NOMM
);
977 copyTV(J
->L
, basev
+0, &ix
->mobjv
);
978 lj_record_call(J
, func
, 2);
979 return 0; /* No result yet. */
982 /* Record call to __len metamethod. */
983 static TRef
rec_mm_len(jit_State
*J
, TRef tr
, TValue
*tv
)
987 copyTV(J
->L
, &ix
.tabv
, tv
);
988 if (lj_record_mm_lookup(J
, &ix
, MM_len
)) {
989 BCReg func
= rec_mm_prep(J
, lj_cont_ra
);
990 TRef
*base
= J
->base
+ func
;
991 TValue
*basev
= J
->L
->base
+ func
;
992 base
[0] = ix
.mobj
; copyTV(J
->L
, basev
+0, &ix
.mobjv
);
993 base
[1] = tr
; copyTV(J
->L
, basev
+1, tv
);
995 base
[2] = tr
; copyTV(J
->L
, basev
+2, tv
);
997 base
[2] = TREF_NIL
; setnilV(basev
+2);
999 lj_record_call(J
, func
, 2);
1001 if (LJ_52
&& tref_istab(tr
))
1002 return lj_ir_call(J
, IRCALL_lj_tab_len
, tr
);
1003 lj_trace_err(J
, LJ_TRERR_NOMM
);
1005 return 0; /* No result yet. */
1008 /* Call a comparison metamethod. */
1009 static void rec_mm_callcomp(jit_State
*J
, RecordIndex
*ix
, int op
)
1011 BCReg func
= rec_mm_prep(J
, (op
&1) ? lj_cont_condf
: lj_cont_condt
);
1012 TRef
*base
= J
->base
+ func
;
1013 TValue
*tv
= J
->L
->base
+ func
;
1014 base
[0] = ix
->mobj
; base
[1] = ix
->val
; base
[2] = ix
->key
;
1015 copyTV(J
->L
, tv
+0, &ix
->mobjv
);
1016 copyTV(J
->L
, tv
+1, &ix
->valv
);
1017 copyTV(J
->L
, tv
+2, &ix
->keyv
);
1018 lj_record_call(J
, func
, 2);
1021 /* Record call to equality comparison metamethod (for tab and udata only). */
1022 static void rec_mm_equal(jit_State
*J
, RecordIndex
*ix
, int op
)
1025 copyTV(J
->L
, &ix
->tabv
, &ix
->valv
);
1026 if (lj_record_mm_lookup(J
, ix
, MM_eq
)) { /* Lookup mm on 1st operand. */
1028 TRef mo1
= ix
->mobj
;
1030 copyTV(J
->L
, &mo1v
, &ix
->mobjv
);
1031 /* Avoid the 2nd lookup and the objcmp if the metatables are equal. */
1033 if (tvistab(bv
) && tabref(tabV(bv
)->metatable
) == ix
->mtv
) {
1034 TRef mt2
= emitir(IRT(IR_FLOAD
, IRT_TAB
), ix
->key
, IRFL_TAB_META
);
1035 emitir(IRTG(IR_EQ
, IRT_TAB
), mt2
, ix
->mt
);
1036 } else if (tvisudata(bv
) && tabref(udataV(bv
)->metatable
) == ix
->mtv
) {
1037 TRef mt2
= emitir(IRT(IR_FLOAD
, IRT_TAB
), ix
->key
, IRFL_UDATA_META
);
1038 emitir(IRTG(IR_EQ
, IRT_TAB
), mt2
, ix
->mt
);
1039 } else { /* Lookup metamethod on 2nd operand and compare both. */
1041 copyTV(J
->L
, &ix
->tabv
, bv
);
1042 if (!lj_record_mm_lookup(J
, ix
, MM_eq
) ||
1043 lj_record_objcmp(J
, mo1
, ix
->mobj
, &mo1v
, &ix
->mobjv
))
1046 rec_mm_callcomp(J
, ix
, op
);
1050 /* Record call to ordered comparison metamethods (for arbitrary objects). */
1051 static void rec_mm_comp(jit_State
*J
, RecordIndex
*ix
, int op
)
1054 copyTV(J
->L
, &ix
->tabv
, &ix
->valv
);
1056 MMS mm
= (op
& 2) ? MM_le
: MM_lt
; /* Try __le + __lt or only __lt. */
1058 if (!lj_record_mm_lookup(J
, ix
, mm
)) { /* Lookup mm on 1st operand. */
1060 copyTV(J
->L
, &ix
->tabv
, &ix
->keyv
);
1061 if (!lj_record_mm_lookup(J
, ix
, mm
)) /* Lookup mm on 2nd operand. */
1064 rec_mm_callcomp(J
, ix
, op
);
1067 if (lj_record_mm_lookup(J
, ix
, mm
)) { /* Lookup mm on 1st operand. */
1069 TRef mo1
= ix
->mobj
;
1071 copyTV(J
->L
, &mo1v
, &ix
->mobjv
);
1072 /* Avoid the 2nd lookup and the objcmp if the metatables are equal. */
1074 if (tvistab(bv
) && tabref(tabV(bv
)->metatable
) == ix
->mtv
) {
1075 TRef mt2
= emitir(IRT(IR_FLOAD
, IRT_TAB
), ix
->key
, IRFL_TAB_META
);
1076 emitir(IRTG(IR_EQ
, IRT_TAB
), mt2
, ix
->mt
);
1077 } else if (tvisudata(bv
) && tabref(udataV(bv
)->metatable
) == ix
->mtv
) {
1078 TRef mt2
= emitir(IRT(IR_FLOAD
, IRT_TAB
), ix
->key
, IRFL_UDATA_META
);
1079 emitir(IRTG(IR_EQ
, IRT_TAB
), mt2
, ix
->mt
);
1080 } else { /* Lookup metamethod on 2nd operand and compare both. */
1082 copyTV(J
->L
, &ix
->tabv
, bv
);
1083 if (!lj_record_mm_lookup(J
, ix
, mm
) ||
1084 lj_record_objcmp(J
, mo1
, ix
->mobj
, &mo1v
, &ix
->mobjv
))
1087 rec_mm_callcomp(J
, ix
, op
);
1092 /* Lookup failed. Retry with __lt and swapped operands. */
1093 if (!(op
& 2)) break; /* Already at __lt. Interpreter will throw. */
1094 ix
->tab
= ix
->key
; ix
->key
= ix
->val
; ix
->val
= ix
->tab
;
1095 copyTV(J
->L
, &ix
->tabv
, &ix
->keyv
);
1096 copyTV(J
->L
, &ix
->keyv
, &ix
->valv
);
1097 copyTV(J
->L
, &ix
->valv
, &ix
->tabv
);
1103 /* Setup call to cdata comparison metamethod. */
1104 static void rec_mm_comp_cdata(jit_State
*J
, RecordIndex
*ix
, int op
, MMS mm
)
1107 if (tref_iscdata(ix
->val
)) {
1109 copyTV(J
->L
, &ix
->tabv
, &ix
->valv
);
1111 lua_assert(tref_iscdata(ix
->key
));
1113 copyTV(J
->L
, &ix
->tabv
, &ix
->keyv
);
1115 lj_record_mm_lookup(J
, ix
, mm
);
1116 rec_mm_callcomp(J
, ix
, op
);
1120 /* -- Indexed access ------------------------------------------------------ */
1122 /* Record bounds-check. */
1123 static void rec_idx_abc(jit_State
*J
, TRef asizeref
, TRef ikey
, uint32_t asize
)
1125 /* Try to emit invariant bounds checks. */
1126 if ((J
->flags
& (JIT_F_OPT_LOOP
|JIT_F_OPT_ABC
)) ==
1127 (JIT_F_OPT_LOOP
|JIT_F_OPT_ABC
)) {
1128 IRRef ref
= tref_ref(ikey
);
1129 IRIns
*ir
= IR(ref
);
1132 /* Handle constant offsets. */
1133 if (ir
->o
== IR_ADD
&& irref_isk(ir
->op2
)) {
1135 ofs
= IR(ofsref
)->i
;
1139 /* Got scalar evolution analysis results for this reference? */
1140 if (ref
== J
->scev
.idx
) {
1142 lua_assert(irt_isint(J
->scev
.t
) && ir
->o
== IR_SLOAD
);
1143 stop
= numberVint(&(J
->L
->base
- J
->baseslot
)[ir
->op1
+ FORL_STOP
]);
1144 /* Runtime value for stop of loop is within bounds? */
1145 if ((uint64_t)stop
+ ofs
< (uint64_t)asize
) {
1146 /* Emit invariant bounds check for stop. */
1147 emitir(IRTG(IR_ABC
, IRT_P32
), asizeref
, ofs
== 0 ? J
->scev
.stop
:
1148 emitir(IRTI(IR_ADD
), J
->scev
.stop
, ofsref
));
1149 /* Emit invariant bounds check for start, if not const or negative. */
1150 if (!(J
->scev
.dir
&& J
->scev
.start
&&
1151 (int64_t)IR(J
->scev
.start
)->i
+ ofs
>= 0))
1152 emitir(IRTG(IR_ABC
, IRT_P32
), asizeref
, ikey
);
1157 emitir(IRTGI(IR_ABC
), asizeref
, ikey
); /* Emit regular bounds check. */
1160 /* Record indexed key lookup. */
1161 static TRef
rec_idx_key(jit_State
*J
, RecordIndex
*ix
, IRRef
*rbref
)
1164 GCtab
*t
= tabV(&ix
->tabv
);
1165 ix
->oldv
= lj_tab_get(J
->L
, t
, &ix
->keyv
); /* Lookup previous value. */
1168 /* Integer keys are looked up in the array part first. */
1170 if (tref_isnumber(key
)) {
1171 int32_t k
= numberVint(&ix
->keyv
);
1172 if (!tvisint(&ix
->keyv
) && numV(&ix
->keyv
) != (lua_Number
)k
)
1174 if ((MSize
)k
< LJ_MAX_ASIZE
) { /* Potential array key? */
1175 TRef ikey
= lj_opt_narrow_index(J
, key
);
1176 TRef asizeref
= emitir(IRTI(IR_FLOAD
), ix
->tab
, IRFL_TAB_ASIZE
);
1177 if ((MSize
)k
< t
->asize
) { /* Currently an array key? */
1179 rec_idx_abc(J
, asizeref
, ikey
, t
->asize
);
1180 arrayref
= emitir(IRT(IR_FLOAD
, IRT_P32
), ix
->tab
, IRFL_TAB_ARRAY
);
1181 return emitir(IRT(IR_AREF
, IRT_P32
), arrayref
, ikey
);
1182 } else { /* Currently not in array (may be an array extension)? */
1183 emitir(IRTGI(IR_ULE
), asizeref
, ikey
); /* Inv. bounds check. */
1184 if (k
== 0 && tref_isk(key
))
1185 key
= lj_ir_knum_zero(J
); /* Canonicalize 0 or +-0.0 to +0.0. */
1186 /* And continue with the hash lookup. */
1188 } else if (!tref_isk(key
)) {
1189 /* We can rule out const numbers which failed the integerness test
1190 ** above. But all other numbers are potential array keys.
1192 if (t
->asize
== 0) { /* True sparse tables have an empty array part. */
1193 /* Guard that the array part stays empty. */
1194 TRef tmp
= emitir(IRTI(IR_FLOAD
), ix
->tab
, IRFL_TAB_ASIZE
);
1195 emitir(IRTGI(IR_EQ
), tmp
, lj_ir_kint(J
, 0));
1197 lj_trace_err(J
, LJ_TRERR_NYITMIX
);
1202 /* Otherwise the key is located in the hash part. */
1203 if (t
->hmask
== 0) { /* Shortcut for empty hash part. */
1204 /* Guard that the hash part stays empty. */
1205 TRef tmp
= emitir(IRTI(IR_FLOAD
), ix
->tab
, IRFL_TAB_HMASK
);
1206 emitir(IRTGI(IR_EQ
), tmp
, lj_ir_kint(J
, 0));
1207 return lj_ir_kkptr(J
, niltvg(J2G(J
)));
1209 if (tref_isinteger(key
)) /* Hash keys are based on numbers, not ints. */
1210 key
= emitir(IRTN(IR_CONV
), key
, IRCONV_NUM_INT
);
1211 if (tref_isk(key
)) {
1212 /* Optimize lookup of constant hash keys. */
1213 MSize hslot
= (MSize
)((char *)ix
->oldv
- (char *)&noderef(t
->node
)[0].val
);
1214 if (t
->hmask
> 0 && hslot
<= t
->hmask
*(MSize
)sizeof(Node
) &&
1215 hslot
<= 65535*(MSize
)sizeof(Node
)) {
1216 TRef node
, kslot
, hm
;
1217 *rbref
= J
->cur
.nins
; /* Mark possible rollback point. */
1218 hm
= emitir(IRTI(IR_FLOAD
), ix
->tab
, IRFL_TAB_HMASK
);
1219 emitir(IRTGI(IR_EQ
), hm
, lj_ir_kint(J
, (int32_t)t
->hmask
));
1220 node
= emitir(IRT(IR_FLOAD
, IRT_P32
), ix
->tab
, IRFL_TAB_NODE
);
1221 kslot
= lj_ir_kslot(J
, key
, hslot
/ sizeof(Node
));
1222 return emitir(IRTG(IR_HREFK
, IRT_P32
), node
, kslot
);
1225 /* Fall back to a regular hash lookup. */
1226 return emitir(IRT(IR_HREF
, IRT_P32
), ix
->tab
, key
);
1229 /* Determine whether a key is NOT one of the fast metamethod names. */
1230 static int nommstr(jit_State
*J
, TRef key
)
1232 if (tref_isstr(key
)) {
1233 if (tref_isk(key
)) {
1234 GCstr
*str
= ir_kstr(IR(tref_ref(key
)));
1236 for (mm
= 0; mm
<= MM_FAST
; mm
++)
1237 if (mmname_str(J2G(J
), mm
) == str
)
1238 return 0; /* MUST be one the fast metamethod names. */
1240 return 0; /* Variable string key MAY be a metamethod name. */
1243 return 1; /* CANNOT be a metamethod name. */
1246 /* Record indexed load/store. */
1247 TRef
lj_record_idx(jit_State
*J
, RecordIndex
*ix
)
1250 IROp xrefop
, loadop
;
1254 while (!tref_istab(ix
->tab
)) { /* Handle non-table lookup. */
1255 /* Never call raw lj_record_idx() on non-table. */
1256 lua_assert(ix
->idxchain
!= 0);
1257 if (!lj_record_mm_lookup(J
, ix
, ix
->val
? MM_newindex
: MM_index
))
1258 lj_trace_err(J
, LJ_TRERR_NOMM
);
1260 if (tref_isfunc(ix
->mobj
)) { /* Handle metamethod call. */
1261 BCReg func
= rec_mm_prep(J
, ix
->val
? lj_cont_nop
: lj_cont_ra
);
1262 TRef
*base
= J
->base
+ func
;
1263 TValue
*tv
= J
->L
->base
+ func
;
1264 base
[0] = ix
->mobj
; base
[1] = ix
->tab
; base
[2] = ix
->key
;
1265 setfuncV(J
->L
, tv
+0, funcV(&ix
->mobjv
));
1266 copyTV(J
->L
, tv
+1, &ix
->tabv
);
1267 copyTV(J
->L
, tv
+2, &ix
->keyv
);
1270 copyTV(J
->L
, tv
+3, &ix
->valv
);
1271 lj_record_call(J
, func
, 3); /* mobj(tab, key, val) */
1274 lj_record_call(J
, func
, 2); /* res = mobj(tab, key) */
1275 return 0; /* No result yet. */
1278 /* Otherwise retry lookup with metaobject. */
1280 copyTV(J
->L
, &ix
->tabv
, &ix
->mobjv
);
1281 if (--ix
->idxchain
== 0)
1282 lj_trace_err(J
, LJ_TRERR_IDXLOOP
);
1285 /* First catch nil and NaN keys for tables. */
1286 if (tvisnil(&ix
->keyv
) || (tvisnum(&ix
->keyv
) && tvisnan(&ix
->keyv
))) {
1287 if (ix
->val
) /* Better fail early. */
1288 lj_trace_err(J
, LJ_TRERR_STORENN
);
1289 if (tref_isk(ix
->key
)) {
1290 if (ix
->idxchain
&& lj_record_mm_lookup(J
, ix
, MM_index
))
1296 /* Record the key lookup. */
1297 xref
= rec_idx_key(J
, ix
, &rbref
);
1298 xrefop
= IR(tref_ref(xref
))->o
;
1299 loadop
= xrefop
== IR_AREF
? IR_ALOAD
: IR_HLOAD
;
1300 /* The lj_meta_tset() inconsistency is gone, but better play safe. */
1301 oldv
= xrefop
== IR_KKPTR
? (cTValue
*)ir_kptr(IR(tref_ref(xref
))) : ix
->oldv
;
1303 if (ix
->val
== 0) { /* Indexed load */
1304 IRType t
= itype2irt(oldv
);
1306 if (oldv
== niltvg(J2G(J
))) {
1307 emitir(IRTG(IR_EQ
, IRT_P32
), xref
, lj_ir_kkptr(J
, niltvg(J2G(J
))));
1310 res
= emitir(IRTG(loadop
, t
), xref
, 0);
1312 if (tref_ref(res
) < rbref
) /* HREFK + load forwarded? */
1313 lj_ir_rollback(J
, rbref
); /* Rollback to eliminate hmask guard. */
1314 if (t
== IRT_NIL
&& ix
->idxchain
&& lj_record_mm_lookup(J
, ix
, MM_index
))
1316 if (irtype_ispri(t
)) res
= TREF_PRI(t
); /* Canonicalize primitives. */
1318 } else { /* Indexed store. */
1319 GCtab
*mt
= tabref(tabV(&ix
->tabv
)->metatable
);
1320 int keybarrier
= tref_isgcv(ix
->key
) && !tref_isnil(ix
->val
);
1321 if (tref_ref(xref
) < rbref
) /* HREFK forwarded? */
1322 lj_ir_rollback(J
, rbref
); /* Rollback to eliminate hmask guard. */
1323 if (tvisnil(oldv
)) { /* Previous value was nil? */
1324 /* Need to duplicate the hasmm check for the early guards. */
1326 if (ix
->idxchain
&& mt
) {
1327 cTValue
*mo
= lj_tab_getstr(mt
, mmname_str(J2G(J
), MM_newindex
));
1328 hasmm
= mo
&& !tvisnil(mo
);
1331 emitir(IRTG(loadop
, IRT_NIL
), xref
, 0); /* Guard for nil value. */
1332 else if (xrefop
== IR_HREF
)
1333 emitir(IRTG(oldv
== niltvg(J2G(J
)) ? IR_EQ
: IR_NE
, IRT_P32
),
1334 xref
, lj_ir_kkptr(J
, niltvg(J2G(J
))));
1335 if (ix
->idxchain
&& lj_record_mm_lookup(J
, ix
, MM_newindex
)) {
1340 if (oldv
== niltvg(J2G(J
))) { /* Need to insert a new key. */
1342 if (tref_isinteger(key
)) /* NEWREF needs a TValue as a key. */
1343 key
= emitir(IRTN(IR_CONV
), key
, IRCONV_NUM_INT
);
1344 xref
= emitir(IRT(IR_NEWREF
, IRT_P32
), ix
->tab
, key
);
1345 keybarrier
= 0; /* NEWREF already takes care of the key barrier. */
1347 } else if (!lj_opt_fwd_wasnonnil(J
, loadop
, tref_ref(xref
))) {
1348 /* Cannot derive that the previous value was non-nil, must do checks. */
1349 if (xrefop
== IR_HREF
) /* Guard against store to niltv. */
1350 emitir(IRTG(IR_NE
, IRT_P32
), xref
, lj_ir_kkptr(J
, niltvg(J2G(J
))));
1351 if (ix
->idxchain
) { /* Metamethod lookup required? */
1352 /* A check for NULL metatable is cheaper (hoistable) than a load. */
1354 TRef mtref
= emitir(IRT(IR_FLOAD
, IRT_TAB
), ix
->tab
, IRFL_TAB_META
);
1355 emitir(IRTG(IR_EQ
, IRT_TAB
), mtref
, lj_ir_knull(J
, IRT_TAB
));
1357 IRType t
= itype2irt(oldv
);
1358 emitir(IRTG(loadop
, t
), xref
, 0); /* Guard for non-nil value. */
1362 keybarrier
= 0; /* Previous non-nil value kept the key alive. */
1364 /* Convert int to number before storing. */
1365 if (!LJ_DUALNUM
&& tref_isinteger(ix
->val
))
1366 ix
->val
= emitir(IRTN(IR_CONV
), ix
->val
, IRCONV_NUM_INT
);
1367 emitir(IRT(loadop
+IRDELTA_L2S
, tref_type(ix
->val
)), xref
, ix
->val
);
1368 if (keybarrier
|| tref_isgcv(ix
->val
))
1369 emitir(IRT(IR_TBAR
, IRT_NIL
), ix
->tab
, 0);
1370 /* Invalidate neg. metamethod cache for stores with certain string keys. */
1371 if (!nommstr(J
, ix
->key
)) {
1372 TRef fref
= emitir(IRT(IR_FREF
, IRT_P32
), ix
->tab
, IRFL_TAB_NOMM
);
1373 emitir(IRT(IR_FSTORE
, IRT_U8
), fref
, lj_ir_kint(J
, 0));
1380 static void rec_tsetm(jit_State
*J
, BCReg ra
, BCReg rn
, int32_t i
)
1383 cTValue
*basev
= J
->L
->base
;
1384 copyTV(J
->L
, &ix
.tabv
, &basev
[ra
-1]);
1385 ix
.tab
= getslot(J
, ra
-1);
1387 for (; ra
< rn
; i
++, ra
++) {
1388 setintV(&ix
.keyv
, i
);
1389 ix
.key
= lj_ir_kint(J
, i
);
1390 copyTV(J
->L
, &ix
.valv
, &basev
[ra
]);
1391 ix
.val
= getslot(J
, ra
);
1392 lj_record_idx(J
, &ix
);
1396 /* -- Upvalue access ------------------------------------------------------ */
1398 /* Check whether upvalue is immutable and ok to constify. */
1399 static int rec_upvalue_constify(jit_State
*J
, GCupval
*uvp
)
1401 if (uvp
->immutable
) {
1402 cTValue
*o
= uvval(uvp
);
1403 /* Don't constify objects that may retain large amounts of memory. */
1406 GCcdata
*cd
= cdataV(o
);
1407 if (!cdataisv(cd
) && !(cd
->marked
& LJ_GC_CDATA_FIN
)) {
1408 CType
*ct
= ctype_raw(ctype_ctsG(J2G(J
)), cd
->ctypeid
);
1409 if (!ctype_hassize(ct
->info
) || ct
->size
<= 16)
1417 if (!(tvistab(o
) || tvisudata(o
) || tvisthread(o
)))
1423 /* Record upvalue load/store. */
1424 static TRef
rec_upvalue(jit_State
*J
, uint32_t uv
, TRef val
)
1426 GCupval
*uvp
= &gcref(J
->fn
->l
.uvptr
[uv
])->uv
;
1427 TRef fn
= getcurrf(J
);
1429 int needbarrier
= 0;
1430 if (rec_upvalue_constify(J
, uvp
)) { /* Try to constify immutable upvalue. */
1432 lua_assert(val
== 0);
1433 if (!tref_isk(fn
)) { /* Late specialization of current function. */
1434 if (J
->pt
->flags
>= PROTO_CLC_POLY
)
1436 kfunc
= lj_ir_kfunc(J
, J
->fn
);
1437 emitir(IRTG(IR_EQ
, IRT_FUNC
), fn
, kfunc
);
1438 J
->base
[-1] = TREF_FRAME
| kfunc
;
1441 tr
= lj_record_constify(J
, uvval(uvp
));
1446 /* Note: this effectively limits LJ_MAX_UPVAL to 127. */
1447 uv
= (uv
<< 8) | (hashrot(uvp
->dhash
, uvp
->dhash
+ HASH_BIAS
) & 0xff);
1449 /* In current stack? */
1450 if (uvval(uvp
) >= tvref(J
->L
->stack
) &&
1451 uvval(uvp
) < tvref(J
->L
->maxstack
)) {
1452 int32_t slot
= (int32_t)(uvval(uvp
) - (J
->L
->base
- J
->baseslot
));
1453 if (slot
>= 0) { /* Aliases an SSA slot? */
1454 slot
-= (int32_t)J
->baseslot
; /* Note: slot number may be negative! */
1455 /* NYI: add IR to guard that it's still aliasing the same slot. */
1457 return getslot(J
, slot
);
1459 J
->base
[slot
] = val
;
1460 if (slot
>= (int32_t)J
->maxslot
) J
->maxslot
= (BCReg
)(slot
+1);
1465 uref
= tref_ref(emitir(IRTG(IR_UREFO
, IRT_P32
), fn
, uv
));
1468 uref
= tref_ref(emitir(IRTG(IR_UREFC
, IRT_P32
), fn
, uv
));
1470 if (val
== 0) { /* Upvalue load */
1471 IRType t
= itype2irt(uvval(uvp
));
1472 TRef res
= emitir(IRTG(IR_ULOAD
, t
), uref
, 0);
1473 if (irtype_ispri(t
)) res
= TREF_PRI(t
); /* Canonicalize primitive refs. */
1475 } else { /* Upvalue store. */
1476 /* Convert int to number before storing. */
1477 if (!LJ_DUALNUM
&& tref_isinteger(val
))
1478 val
= emitir(IRTN(IR_CONV
), val
, IRCONV_NUM_INT
);
1479 emitir(IRT(IR_USTORE
, tref_type(val
)), uref
, val
);
1480 if (needbarrier
&& tref_isgcv(val
))
1481 emitir(IRT(IR_OBAR
, IRT_NIL
), uref
, val
);
1487 /* -- Record calls to Lua functions --------------------------------------- */
1489 /* Check unroll limits for calls. */
1490 static void check_call_unroll(jit_State
*J
, TraceNo lnk
)
1492 cTValue
*frame
= J
->L
->base
- 1;
1493 void *pc
= mref(frame_func(frame
)->l
.pc
, void);
1494 int32_t depth
= J
->framedepth
;
1496 if ((J
->pt
->flags
& PROTO_VARARG
)) depth
--; /* Vararg frame still missing. */
1497 for (; depth
> 0; depth
--) { /* Count frames with same prototype. */
1498 if (frame_iscont(frame
)) depth
--;
1499 frame
= frame_prev(frame
);
1500 if (mref(frame_func(frame
)->l
.pc
, void) == pc
)
1503 if (J
->pc
== J
->startpc
) {
1504 if (count
+ J
->tailcalled
> J
->param
[JIT_P_recunroll
]) {
1506 if (J
->framedepth
+ J
->retdepth
== 0)
1507 lj_record_stop(J
, LJ_TRLINK_TAILREC
, J
->cur
.traceno
); /* Tail-rec. */
1509 lj_record_stop(J
, LJ_TRLINK_UPREC
, J
->cur
.traceno
); /* Up-recursion. */
1512 if (count
> J
->param
[JIT_P_callunroll
]) {
1513 if (lnk
) { /* Possible tail- or up-recursion. */
1514 lj_trace_flush(J
, lnk
); /* Flush trace that only returns. */
1515 /* Set a small, pseudo-random hotcount for a quick retry of JFUNC*. */
1516 hotcount_set(J2GG(J
), J
->pc
+1, LJ_PRNG_BITS(J
, 4));
1518 lj_trace_err(J
, LJ_TRERR_CUNROLL
);
1523 /* Record Lua function setup. */
1524 static void rec_func_setup(jit_State
*J
)
1526 GCproto
*pt
= J
->pt
;
1527 BCReg s
, numparams
= pt
->numparams
;
1528 if ((pt
->flags
& PROTO_NOJIT
))
1529 lj_trace_err(J
, LJ_TRERR_CJITOFF
);
1530 if (J
->baseslot
+ pt
->framesize
>= LJ_MAX_JSLOTS
)
1531 lj_trace_err(J
, LJ_TRERR_STACKOV
);
1532 /* Fill up missing parameters with nil. */
1533 for (s
= J
->maxslot
; s
< numparams
; s
++)
1534 J
->base
[s
] = TREF_NIL
;
1535 /* The remaining slots should never be read before they are written. */
1536 J
->maxslot
= numparams
;
1539 /* Record Lua vararg function setup. */
1540 static void rec_func_vararg(jit_State
*J
)
1542 GCproto
*pt
= J
->pt
;
1543 BCReg s
, fixargs
, vframe
= J
->maxslot
+1;
1544 lua_assert((pt
->flags
& PROTO_VARARG
));
1545 if (J
->baseslot
+ vframe
+ pt
->framesize
>= LJ_MAX_JSLOTS
)
1546 lj_trace_err(J
, LJ_TRERR_STACKOV
);
1547 J
->base
[vframe
-1] = J
->base
[-1]; /* Copy function up. */
1548 /* Copy fixarg slots up and set their original slots to nil. */
1549 fixargs
= pt
->numparams
< J
->maxslot
? pt
->numparams
: J
->maxslot
;
1550 for (s
= 0; s
< fixargs
; s
++) {
1551 J
->base
[vframe
+s
] = J
->base
[s
];
1552 J
->base
[s
] = TREF_NIL
;
1554 J
->maxslot
= fixargs
;
1557 J
->baseslot
+= vframe
;
1560 /* Record entry to a Lua function. */
1561 static void rec_func_lua(jit_State
*J
)
1564 check_call_unroll(J
, 0);
1567 /* Record entry to an already compiled function. */
1568 static void rec_func_jit(jit_State
*J
, TraceNo lnk
)
1572 T
= traceref(J
, lnk
);
1573 if (T
->linktype
== LJ_TRLINK_RETURN
) { /* Trace returns to interpreter? */
1574 check_call_unroll(J
, lnk
);
1575 /* Temporarily unpatch JFUNC* to continue recording across function. */
1576 J
->patchins
= *J
->pc
;
1577 J
->patchpc
= (BCIns
*)J
->pc
;
1578 *J
->patchpc
= T
->startins
;
1581 J
->instunroll
= 0; /* Cannot continue across a compiled function. */
1582 if (J
->pc
== J
->startpc
&& J
->framedepth
+ J
->retdepth
== 0)
1583 lj_record_stop(J
, LJ_TRLINK_TAILREC
, J
->cur
.traceno
); /* Extra tail-rec. */
1585 lj_record_stop(J
, LJ_TRLINK_ROOT
, lnk
); /* Link to the function. */
1588 /* -- Vararg handling ----------------------------------------------------- */
1590 /* Detect y = select(x, ...) idiom. */
1591 static int select_detect(jit_State
*J
)
1593 BCIns ins
= J
->pc
[1];
1594 if (bc_op(ins
) == BC_CALLM
&& bc_b(ins
) == 2 && bc_c(ins
) == 1) {
1595 cTValue
*func
= &J
->L
->base
[bc_a(ins
)];
1596 if (tvisfunc(func
) && funcV(func
)->c
.ffid
== FF_select
)
1602 /* Record vararg instruction. */
1603 static void rec_varg(jit_State
*J
, BCReg dst
, ptrdiff_t nresults
)
1605 int32_t numparams
= J
->pt
->numparams
;
1606 ptrdiff_t nvararg
= frame_delta(J
->L
->base
-1) - numparams
- 1;
1607 lua_assert(frame_isvarg(J
->L
->base
-1));
1608 if (J
->framedepth
> 0) { /* Simple case: varargs defined on-trace. */
1610 if (nvararg
< 0) nvararg
= 0;
1611 if (nresults
== -1) {
1613 J
->maxslot
= dst
+ (BCReg
)nvararg
;
1614 } else if (dst
+ nresults
> J
->maxslot
) {
1615 J
->maxslot
= dst
+ (BCReg
)nresults
;
1617 for (i
= 0; i
< nresults
; i
++)
1618 J
->base
[dst
+i
] = i
< nvararg
? getslot(J
, i
- nvararg
- 1) : TREF_NIL
;
1619 } else { /* Unknown number of varargs passed to trace. */
1620 TRef fr
= emitir(IRTI(IR_SLOAD
), 0, IRSLOAD_READONLY
|IRSLOAD_FRAME
);
1621 int32_t frofs
= 8*(1+numparams
)+FRAME_VARG
;
1622 if (nresults
>= 0) { /* Known fixed number of results. */
1625 ptrdiff_t nload
= nvararg
>= nresults
? nresults
: nvararg
;
1627 if (nvararg
>= nresults
)
1628 emitir(IRTGI(IR_GE
), fr
, lj_ir_kint(J
, frofs
+8*(int32_t)nresults
));
1630 emitir(IRTGI(IR_EQ
), fr
, lj_ir_kint(J
, frame_ftsz(J
->L
->base
-1)));
1631 vbase
= emitir(IRTI(IR_SUB
), REF_BASE
, fr
);
1632 vbase
= emitir(IRT(IR_ADD
, IRT_P32
), vbase
, lj_ir_kint(J
, frofs
-8));
1633 for (i
= 0; i
< nload
; i
++) {
1634 IRType t
= itype2irt(&J
->L
->base
[i
-1-nvararg
]);
1635 TRef aref
= emitir(IRT(IR_AREF
, IRT_P32
),
1636 vbase
, lj_ir_kint(J
, (int32_t)i
));
1637 TRef tr
= emitir(IRTG(IR_VLOAD
, t
), aref
, 0);
1638 if (irtype_ispri(t
)) tr
= TREF_PRI(t
); /* Canonicalize primitives. */
1639 J
->base
[dst
+i
] = tr
;
1642 emitir(IRTGI(IR_LE
), fr
, lj_ir_kint(J
, frofs
));
1645 for (i
= nvararg
; i
< nresults
; i
++)
1646 J
->base
[dst
+i
] = TREF_NIL
;
1647 if (dst
+ (BCReg
)nresults
> J
->maxslot
)
1648 J
->maxslot
= dst
+ (BCReg
)nresults
;
1649 } else if (select_detect(J
)) { /* y = select(x, ...) */
1650 TRef tridx
= J
->base
[dst
-1];
1652 ptrdiff_t idx
= lj_ffrecord_select_mode(J
, tridx
, &J
->L
->base
[dst
-1]);
1653 if (idx
< 0) goto nyivarg
;
1654 if (idx
!= 0 && !tref_isinteger(tridx
))
1655 tridx
= emitir(IRTGI(IR_CONV
), tridx
, IRCONV_INT_NUM
|IRCONV_INDEX
);
1656 if (idx
!= 0 && tref_isk(tridx
)) {
1657 emitir(IRTGI(idx
<= nvararg
? IR_GE
: IR_LT
),
1658 fr
, lj_ir_kint(J
, frofs
+8*(int32_t)idx
));
1659 frofs
-= 8; /* Bias for 1-based index. */
1660 } else if (idx
<= nvararg
) { /* Compute size. */
1661 TRef tmp
= emitir(IRTI(IR_ADD
), fr
, lj_ir_kint(J
, -frofs
));
1663 emitir(IRTGI(IR_GE
), tmp
, lj_ir_kint(J
, 0));
1664 tr
= emitir(IRTI(IR_BSHR
), tmp
, lj_ir_kint(J
, 3));
1666 tridx
= emitir(IRTI(IR_ADD
), tridx
, lj_ir_kint(J
, -1));
1667 rec_idx_abc(J
, tr
, tridx
, (uint32_t)nvararg
);
1670 TRef tmp
= lj_ir_kint(J
, frofs
);
1672 TRef tmp2
= emitir(IRTI(IR_BSHL
), tridx
, lj_ir_kint(J
, 3));
1673 tmp
= emitir(IRTI(IR_ADD
), tmp2
, tmp
);
1675 tr
= lj_ir_kint(J
, 0);
1677 emitir(IRTGI(IR_LT
), fr
, tmp
);
1679 if (idx
!= 0 && idx
<= nvararg
) {
1681 TRef aref
, vbase
= emitir(IRTI(IR_SUB
), REF_BASE
, fr
);
1682 vbase
= emitir(IRT(IR_ADD
, IRT_P32
), vbase
, lj_ir_kint(J
, frofs
-8));
1683 t
= itype2irt(&J
->L
->base
[idx
-2-nvararg
]);
1684 aref
= emitir(IRT(IR_AREF
, IRT_P32
), vbase
, tridx
);
1685 tr
= emitir(IRTG(IR_VLOAD
, t
), aref
, 0);
1686 if (irtype_ispri(t
)) tr
= TREF_PRI(t
); /* Canonicalize primitives. */
1688 J
->base
[dst
-2] = tr
;
1690 J
->bcskip
= 2; /* Skip CALLM + select. */
1693 setintV(&J
->errinfo
, BC_VARG
);
1694 lj_trace_err_info(J
, LJ_TRERR_NYIBC
);
1699 /* -- Record allocations -------------------------------------------------- */
1701 static TRef
rec_tnew(jit_State
*J
, uint32_t ah
)
1703 uint32_t asize
= ah
& 0x7ff;
1704 uint32_t hbits
= ah
>> 11;
1705 if (asize
== 0x7ff) asize
= 0x801;
1706 return emitir(IRTG(IR_TNEW
, IRT_TAB
), asize
, hbits
);
1709 /* -- Concatenation ------------------------------------------------------- */
1711 static TRef
rec_cat(jit_State
*J
, BCReg baseslot
, BCReg topslot
)
1713 TRef
*top
= &J
->base
[topslot
];
1717 lua_assert(baseslot
< topslot
);
1718 for (s
= baseslot
; s
<= topslot
; s
++)
1719 (void)getslot(J
, s
); /* Ensure all arguments have a reference. */
1720 if (tref_isnumber_str(top
[0]) && tref_isnumber_str(top
[-1])) {
1721 TRef tr
, hdr
, *trp
, *xbase
, *base
= &J
->base
[baseslot
];
1722 /* First convert numbers to strings. */
1723 for (trp
= top
; trp
>= base
; trp
--) {
1724 if (tref_isnumber(*trp
))
1725 *trp
= emitir(IRT(IR_TOSTR
, IRT_STR
), *trp
,
1726 tref_isnum(*trp
) ? IRTOSTR_NUM
: IRTOSTR_INT
);
1727 else if (!tref_isstr(*trp
))
1731 tr
= hdr
= emitir(IRT(IR_BUFHDR
, IRT_P32
),
1732 lj_ir_kptr(J
, &J2G(J
)->tmpbuf
), IRBUFHDR_RESET
);
1734 tr
= emitir(IRT(IR_BUFPUT
, IRT_P32
), tr
, *trp
++);
1735 } while (trp
<= top
);
1736 tr
= emitir(IRT(IR_BUFSTR
, IRT_STR
), tr
, hdr
);
1737 J
->maxslot
= (BCReg
)(xbase
- J
->base
);
1738 if (xbase
== base
) return tr
; /* Return simple concatenation result. */
1739 /* Pass partial result. */
1740 topslot
= J
->maxslot
--;
1743 setstrV(J
->L
, &ix
.keyv
, &J2G(J
)->strempty
); /* Simulate string result. */
1745 J
->maxslot
= topslot
-1;
1746 copyTV(J
->L
, &ix
.keyv
, &J
->L
->base
[topslot
]);
1748 copyTV(J
->L
, &ix
.tabv
, &J
->L
->base
[topslot
-1]);
1751 memcpy(savetv
, &J
->L
->base
[topslot
-1], sizeof(savetv
)); /* Save slots. */
1752 rec_mm_arith(J
, &ix
, MM_concat
); /* Call __concat metamethod. */
1753 memcpy(&J
->L
->base
[topslot
-1], savetv
, sizeof(savetv
)); /* Restore slots. */
1754 return 0; /* No result yet. */
1757 /* -- Record bytecode ops ------------------------------------------------- */
1759 /* Prepare for comparison. */
1760 static void rec_comp_prep(jit_State
*J
)
1762 /* Prevent merging with snapshot #0 (GC exit) since we fixup the PC. */
1763 if (J
->cur
.nsnap
== 1 && J
->cur
.snap
[0].ref
== J
->cur
.nins
)
1764 emitir_raw(IRT(IR_NOP
, IRT_NIL
), 0, 0);
1768 /* Fixup comparison. */
1769 static void rec_comp_fixup(jit_State
*J
, const BCIns
*pc
, int cond
)
1771 BCIns jmpins
= pc
[1];
1772 const BCIns
*npc
= pc
+ 2 + (cond
? bc_j(jmpins
) : 0);
1773 SnapShot
*snap
= &J
->cur
.snap
[J
->cur
.nsnap
-1];
1774 /* Set PC to opposite target to avoid re-recording the comp. in side trace. */
1775 J
->cur
.snapmap
[snap
->mapofs
+ snap
->nent
] = SNAP_MKPC(npc
);
1777 if (bc_a(jmpins
) < J
->maxslot
) J
->maxslot
= bc_a(jmpins
);
1778 lj_snap_shrink(J
); /* Shrink last snapshot if possible. */
1781 /* Record the next bytecode instruction (_before_ it's executed). */
1782 void lj_record_ins(jit_State
*J
)
1791 /* Perform post-processing action before recording the next instruction. */
1792 if (LJ_UNLIKELY(J
->postproc
!= LJ_POST_NONE
)) {
1793 switch (J
->postproc
) {
1794 case LJ_POST_FIXCOMP
: /* Fixup comparison. */
1795 pc
= frame_pc(&J2G(J
)->tmptv
);
1796 rec_comp_fixup(J
, pc
, (!tvistruecond(&J2G(J
)->tmptv2
) ^ (bc_op(*pc
)&1)));
1798 case LJ_POST_FIXGUARD
: /* Fixup and emit pending guard. */
1799 case LJ_POST_FIXGUARDSNAP
: /* Fixup and emit pending guard and snapshot. */
1800 if (!tvistruecond(&J2G(J
)->tmptv2
)) {
1801 J
->fold
.ins
.o
^= 1; /* Flip guard to opposite. */
1802 if (J
->postproc
== LJ_POST_FIXGUARDSNAP
) {
1803 SnapShot
*snap
= &J
->cur
.snap
[J
->cur
.nsnap
-1];
1804 J
->cur
.snapmap
[snap
->mapofs
+snap
->nent
-1]--; /* False -> true. */
1807 lj_opt_fold(J
); /* Emit pending guard. */
1809 case LJ_POST_FIXBOOL
:
1810 if (!tvistruecond(&J2G(J
)->tmptv2
)) {
1812 TValue
*tv
= J
->L
->base
;
1813 for (s
= 0; s
< J
->maxslot
; s
++) /* Fixup stack slot (if any). */
1814 if (J
->base
[s
] == TREF_TRUE
&& tvisfalse(&tv
[s
])) {
1815 J
->base
[s
] = TREF_FALSE
;
1820 case LJ_POST_FIXCONST
:
1823 TValue
*tv
= J
->L
->base
;
1824 for (s
= 0; s
< J
->maxslot
; s
++) /* Constify stack slots (if any). */
1825 if (J
->base
[s
] == TREF_NIL
&& !tvisnil(&tv
[s
]))
1826 J
->base
[s
] = lj_record_constify(J
, &tv
[s
]);
1829 case LJ_POST_FFRETRY
: /* Suppress recording of retried fast function. */
1830 if (bc_op(*J
->pc
) >= BC__MAX
)
1833 default: lua_assert(0); break;
1835 J
->postproc
= LJ_POST_NONE
;
1838 /* Need snapshot before recording next bytecode (e.g. after a store). */
1846 /* Skip some bytecodes. */
1847 if (LJ_UNLIKELY(J
->bcskip
> 0)) {
1852 /* Record only closed loops for root traces. */
1854 if (J
->framedepth
== 0 &&
1855 (MSize
)((char *)pc
- (char *)J
->bc_min
) >= J
->bc_extent
)
1856 lj_trace_err(J
, LJ_TRERR_LLEAVE
);
1858 #ifdef LUA_USE_ASSERT
1864 rec_profile_ins(J
, pc
);
1867 /* Keep a copy of the runtime values of var/num/str operands. */
1868 #define rav (&ix.valv)
1869 #define rbv (&ix.tabv)
1870 #define rcv (&ix.keyv)
1877 switch (bcmode_a(op
)) {
1879 copyTV(J
->L
, rav
, &lbase
[ra
]); ix
.val
= ra
= getslot(J
, ra
); break;
1880 default: break; /* Handled later. */
1884 switch (bcmode_b(op
)) {
1885 case BCMnone
: rb
= 0; rc
= bc_d(ins
); break; /* Upgrade rc to 'rd'. */
1887 copyTV(J
->L
, rbv
, &lbase
[rb
]); ix
.tab
= rb
= getslot(J
, rb
); break;
1888 default: break; /* Handled later. */
1890 switch (bcmode_c(op
)) {
1892 copyTV(J
->L
, rcv
, &lbase
[rc
]); ix
.key
= rc
= getslot(J
, rc
); break;
1893 case BCMpri
: setitype(rcv
, ~rc
); ix
.key
= rc
= TREF_PRI(IRT_NIL
+rc
); break;
1894 case BCMnum
: { cTValue
*tv
= proto_knumtv(J
->pt
, rc
);
1895 copyTV(J
->L
, rcv
, tv
); ix
.key
= rc
= tvisint(tv
) ? lj_ir_kint(J
, intV(tv
)) :
1896 lj_ir_knumint(J
, numV(tv
)); } break;
1897 case BCMstr
: { GCstr
*s
= gco2str(proto_kgc(J
->pt
, ~(ptrdiff_t)rc
));
1898 setstrV(J
->L
, rcv
, s
); ix
.key
= rc
= lj_ir_kstr(J
, s
); } break;
1899 default: break; /* Handled later. */
1904 /* -- Comparison ops ---------------------------------------------------- */
1906 case BC_ISLT
: case BC_ISGE
: case BC_ISLE
: case BC_ISGT
:
1908 if (tref_iscdata(ra
) || tref_iscdata(rc
)) {
1909 rec_mm_comp_cdata(J
, &ix
, op
, ((int)op
& 2) ? MM_le
: MM_lt
);
1913 /* Emit nothing for two numeric or string consts. */
1914 if (!(tref_isk2(ra
,rc
) && tref_isnumber_str(ra
) && tref_isnumber_str(rc
))) {
1915 IRType ta
= tref_isinteger(ra
) ? IRT_INT
: tref_type(ra
);
1916 IRType tc
= tref_isinteger(rc
) ? IRT_INT
: tref_type(rc
);
1919 /* Widen mixed number/int comparisons to number/number comparison. */
1920 if (ta
== IRT_INT
&& tc
== IRT_NUM
) {
1921 ra
= emitir(IRTN(IR_CONV
), ra
, IRCONV_NUM_INT
);
1923 } else if (ta
== IRT_NUM
&& tc
== IRT_INT
) {
1924 rc
= emitir(IRTN(IR_CONV
), rc
, IRCONV_NUM_INT
);
1926 ta
= IRT_NIL
; /* Force metamethod for different types. */
1927 } else if (!((ta
== IRT_FALSE
|| ta
== IRT_TRUE
) &&
1928 (tc
== IRT_FALSE
|| tc
== IRT_TRUE
))) {
1929 break; /* Interpreter will throw for two different types. */
1933 irop
= (int)op
- (int)BC_ISLT
+ (int)IR_LT
;
1934 if (ta
== IRT_NUM
) {
1935 if ((irop
& 1)) irop
^= 4; /* ISGE/ISGT are unordered. */
1936 if (!lj_ir_numcmp(numberVnum(rav
), numberVnum(rcv
), (IROp
)irop
))
1938 } else if (ta
== IRT_INT
) {
1939 if (!lj_ir_numcmp(numberVnum(rav
), numberVnum(rcv
), (IROp
)irop
))
1941 } else if (ta
== IRT_STR
) {
1942 if (!lj_ir_strcmp(strV(rav
), strV(rcv
), (IROp
)irop
)) irop
^= 1;
1943 ra
= lj_ir_call(J
, IRCALL_lj_str_cmp
, ra
, rc
);
1944 rc
= lj_ir_kint(J
, 0);
1947 rec_mm_comp(J
, &ix
, (int)op
);
1950 emitir(IRTG(irop
, ta
), ra
, rc
);
1951 rec_comp_fixup(J
, J
->pc
, ((int)op
^ irop
) & 1);
1955 case BC_ISEQV
: case BC_ISNEV
:
1956 case BC_ISEQS
: case BC_ISNES
:
1957 case BC_ISEQN
: case BC_ISNEN
:
1958 case BC_ISEQP
: case BC_ISNEP
:
1960 if (tref_iscdata(ra
) || tref_iscdata(rc
)) {
1961 rec_mm_comp_cdata(J
, &ix
, op
, MM_eq
);
1965 /* Emit nothing for two non-table, non-udata consts. */
1966 if (!(tref_isk2(ra
, rc
) && !(tref_istab(ra
) || tref_isudata(ra
)))) {
1969 diff
= lj_record_objcmp(J
, ra
, rc
, rav
, rcv
);
1970 if (diff
== 2 || !(tref_istab(ra
) || tref_isudata(ra
)))
1971 rec_comp_fixup(J
, J
->pc
, ((int)op
& 1) == !diff
);
1972 else if (diff
== 1) /* Only check __eq if different, but same type. */
1973 rec_mm_equal(J
, &ix
, (int)op
);
1977 /* -- Unary test and copy ops ------------------------------------------- */
1979 case BC_ISTC
: case BC_ISFC
:
1980 if ((op
& 1) == tref_istruecond(rc
))
1981 rc
= 0; /* Don't store if condition is not true. */
1983 case BC_IST
: case BC_ISF
: /* Type specialization suffices. */
1984 if (bc_a(pc
[1]) < J
->maxslot
)
1985 J
->maxslot
= bc_a(pc
[1]); /* Shrink used slots. */
1988 case BC_ISTYPE
: case BC_ISNUM
:
1989 /* These coercions need to correspond with lj_meta_istype(). */
1990 if (LJ_DUALNUM
&& rc
== ~LJ_TNUMX
+1)
1991 ra
= lj_opt_narrow_toint(J
, ra
);
1992 else if (rc
== ~LJ_TNUMX
+2)
1993 ra
= lj_ir_tonum(J
, ra
);
1994 else if (rc
== ~LJ_TSTR
+1)
1995 ra
= lj_ir_tostr(J
, ra
);
1996 /* else: type specialization suffices. */
1997 J
->base
[bc_a(ins
)] = ra
;
2000 /* -- Unary ops --------------------------------------------------------- */
2003 /* Type specialization already forces const result. */
2004 rc
= tref_istruecond(rc
) ? TREF_FALSE
: TREF_TRUE
;
2009 rc
= emitir(IRTI(IR_FLOAD
), rc
, IRFL_STR_LEN
);
2010 else if (!LJ_52
&& tref_istab(rc
))
2011 rc
= lj_ir_call(J
, IRCALL_lj_tab_len
, rc
);
2013 rc
= rec_mm_len(J
, rc
, rcv
);
2016 /* -- Arithmetic ops ---------------------------------------------------- */
2019 if (tref_isnumber_str(rc
)) {
2020 rc
= lj_opt_narrow_unm(J
, rc
, rcv
);
2023 copyTV(J
->L
, &ix
.tabv
, rcv
);
2024 rc
= rec_mm_arith(J
, &ix
, MM_unm
);
2028 case BC_ADDNV
: case BC_SUBNV
: case BC_MULNV
: case BC_DIVNV
: case BC_MODNV
:
2029 /* Swap rb/rc and rbv/rcv. rav is temp. */
2030 ix
.tab
= rc
; ix
.key
= rc
= rb
; rb
= ix
.tab
;
2031 copyTV(J
->L
, rav
, rbv
);
2032 copyTV(J
->L
, rbv
, rcv
);
2033 copyTV(J
->L
, rcv
, rav
);
2037 case BC_ADDVN
: case BC_SUBVN
: case BC_MULVN
: case BC_DIVVN
:
2038 case BC_ADDVV
: case BC_SUBVV
: case BC_MULVV
: case BC_DIVVV
: {
2039 MMS mm
= bcmode_mm(op
);
2040 if (tref_isnumber_str(rb
) && tref_isnumber_str(rc
))
2041 rc
= lj_opt_narrow_arith(J
, rb
, rc
, rbv
, rcv
,
2042 (int)mm
- (int)MM_add
+ (int)IR_ADD
);
2044 rc
= rec_mm_arith(J
, &ix
, mm
);
2048 case BC_MODVN
: case BC_MODVV
:
2050 if (tref_isnumber_str(rb
) && tref_isnumber_str(rc
))
2051 rc
= lj_opt_narrow_mod(J
, rb
, rc
, rcv
);
2053 rc
= rec_mm_arith(J
, &ix
, MM_mod
);
2057 if (tref_isnumber_str(rb
) && tref_isnumber_str(rc
))
2058 rc
= lj_opt_narrow_pow(J
, lj_ir_tonum(J
, rb
), rc
, rcv
);
2060 rc
= rec_mm_arith(J
, &ix
, MM_pow
);
2063 /* -- Miscellaneous ops ------------------------------------------------- */
2066 rc
= rec_cat(J
, rb
, rc
);
2069 /* -- Constant and move ops --------------------------------------------- */
2072 /* Clear gap of method call to avoid resurrecting previous refs. */
2073 if (ra
> J
->maxslot
) J
->base
[ra
-1] = 0;
2075 case BC_KSTR
: case BC_KNUM
: case BC_KPRI
:
2078 rc
= lj_ir_kint(J
, (int32_t)(int16_t)rc
);
2082 J
->base
[ra
++] = TREF_NIL
;
2083 if (rc
>= J
->maxslot
) J
->maxslot
= rc
+1;
2087 rc
= lj_ir_kgc(J
, proto_kgc(J
->pt
, ~(ptrdiff_t)rc
), IRT_CDATA
);
2091 /* -- Upvalue and function ops ------------------------------------------ */
2094 rc
= rec_upvalue(J
, rc
, 0);
2096 case BC_USETV
: case BC_USETS
: case BC_USETN
: case BC_USETP
:
2097 rec_upvalue(J
, ra
, rc
);
2100 /* -- Table ops --------------------------------------------------------- */
2102 case BC_GGET
: case BC_GSET
:
2103 settabV(J
->L
, &ix
.tabv
, tabref(J
->fn
->l
.env
));
2104 ix
.tab
= emitir(IRT(IR_FLOAD
, IRT_TAB
), getcurrf(J
), IRFL_FUNC_ENV
);
2105 ix
.idxchain
= LJ_MAX_IDXCHAIN
;
2106 rc
= lj_record_idx(J
, &ix
);
2109 case BC_TGETB
: case BC_TSETB
:
2110 setintV(&ix
.keyv
, (int32_t)rc
);
2111 ix
.key
= lj_ir_kint(J
, (int32_t)rc
);
2113 case BC_TGETV
: case BC_TGETS
: case BC_TSETV
: case BC_TSETS
:
2114 ix
.idxchain
= LJ_MAX_IDXCHAIN
;
2115 rc
= lj_record_idx(J
, &ix
);
2117 case BC_TGETR
: case BC_TSETR
:
2119 rc
= lj_record_idx(J
, &ix
);
2123 rec_tsetm(J
, ra
, (BCReg
)(J
->L
->top
- J
->L
->base
), (int32_t)rcv
->u32
.lo
);
2127 rc
= rec_tnew(J
, rc
);
2130 rc
= emitir(IRTG(IR_TDUP
, IRT_TAB
),
2131 lj_ir_ktab(J
, gco2tab(proto_kgc(J
->pt
, ~(ptrdiff_t)rc
))), 0);
2134 /* -- Calls and vararg handling ----------------------------------------- */
2137 J
->base
[ra
] = getslot(J
, ra
-3);
2138 J
->base
[ra
+1] = getslot(J
, ra
-2);
2139 J
->base
[ra
+2] = getslot(J
, ra
-1);
2140 { /* Do the actual copy now because lj_record_call needs the values. */
2141 TValue
*b
= &J
->L
->base
[ra
];
2142 copyTV(J
->L
, b
, b
-3);
2143 copyTV(J
->L
, b
+1, b
-2);
2144 copyTV(J
->L
, b
+2, b
-1);
2146 lj_record_call(J
, ra
, (ptrdiff_t)rc
-1);
2149 /* L->top is set to L->base+ra+rc+NARGS-1+1. See lj_dispatch_ins(). */
2151 rc
= (BCReg
)(J
->L
->top
- J
->L
->base
) - ra
;
2154 lj_record_call(J
, ra
, (ptrdiff_t)rc
-1);
2158 rc
= (BCReg
)(J
->L
->top
- J
->L
->base
) - ra
;
2161 lj_record_tailcall(J
, ra
, (ptrdiff_t)rc
-1);
2165 rec_varg(J
, ra
, (ptrdiff_t)rb
-1);
2168 /* -- Returns ----------------------------------------------------------- */
2171 /* L->top is set to L->base+ra+rc+NRESULTS-1, see lj_dispatch_ins(). */
2172 rc
= (BCReg
)(J
->L
->top
- J
->L
->base
) - ra
+ 1;
2174 case BC_RET
: case BC_RET0
: case BC_RET1
:
2178 lj_record_ret(J
, ra
, (ptrdiff_t)rc
-1);
2181 /* -- Loops and branches ------------------------------------------------ */
2184 if (rec_for(J
, pc
, 0) != LOOPEV_LEAVE
)
2185 J
->loopref
= J
->cur
.nins
;
2188 lua_assert(bc_op(pc
[(ptrdiff_t)rc
-BCBIAS_J
]) == BC_JFORL
);
2189 if (rec_for(J
, pc
, 0) != LOOPEV_LEAVE
) /* Link to existing loop. */
2190 lj_record_stop(J
, LJ_TRLINK_ROOT
, bc_d(pc
[(ptrdiff_t)rc
-BCBIAS_J
]));
2191 /* Continue tracing if the loop is not entered. */
2195 rec_loop_interp(J
, pc
, rec_for(J
, pc
+((ptrdiff_t)rc
-BCBIAS_J
), 1));
2198 rec_loop_interp(J
, pc
, rec_iterl(J
, *pc
));
2201 rec_loop_interp(J
, pc
, rec_loop(J
, ra
));
2205 rec_loop_jit(J
, rc
, rec_for(J
, pc
+bc_j(traceref(J
, rc
)->startins
), 1));
2208 rec_loop_jit(J
, rc
, rec_iterl(J
, traceref(J
, rc
)->startins
));
2211 rec_loop_jit(J
, rc
, rec_loop(J
, ra
));
2219 lj_trace_err(J
, LJ_TRERR_BLACKL
);
2223 if (ra
< J
->maxslot
)
2224 J
->maxslot
= ra
; /* Shrink used slots. */
2227 /* -- Function headers -------------------------------------------------- */
2233 rec_func_jit(J
, rc
);
2241 lua_assert(0); /* Cannot happen. No hotcall counting for varag funcs. */
2246 lj_ffrecord_func(J
);
2250 if (op
>= BC__MAX
) {
2251 lj_ffrecord_func(J
);
2259 setintV(&J
->errinfo
, (int32_t)op
);
2260 lj_trace_err_info(J
, LJ_TRERR_NYIBC
);
2264 /* rc == 0 if we have no result yet, e.g. pending __index metamethod call. */
2265 if (bcmode_a(op
) == BCMdst
&& rc
) {
2267 if (ra
>= J
->maxslot
) J
->maxslot
= ra
+1;
2274 /* Limit the number of recorded IR instructions. */
2275 if (J
->cur
.nins
> REF_FIRST
+(IRRef
)J
->param
[JIT_P_maxrecord
])
2276 lj_trace_err(J
, LJ_TRERR_TRACEOV
);
2279 /* -- Recording setup ----------------------------------------------------- */
2281 /* Setup recording for a root trace started by a hot loop. */
2282 static const BCIns
*rec_setup_root(jit_State
*J
)
2284 /* Determine the next PC and the bytecode range for the loop. */
2285 const BCIns
*pcj
, *pc
= J
->pc
;
2287 BCReg ra
= bc_a(ins
);
2288 switch (bc_op(ins
)) {
2290 J
->bc_extent
= (MSize
)(-bc_j(ins
))*sizeof(BCIns
);
2295 lua_assert(bc_op(pc
[-1]) == BC_ITERC
);
2296 J
->maxslot
= ra
+ bc_b(pc
[-1]) - 1;
2297 J
->bc_extent
= (MSize
)(-bc_j(ins
))*sizeof(BCIns
);
2299 lua_assert(bc_op(pc
[-1]) == BC_JMP
);
2303 /* Only check BC range for real loops, but not for "repeat until true". */
2304 pcj
= pc
+ bc_j(ins
);
2306 if (bc_op(ins
) == BC_JMP
&& bc_j(ins
) < 0) {
2307 J
->bc_min
= pcj
+1 + bc_j(ins
);
2308 J
->bc_extent
= (MSize
)(-bc_j(ins
))*sizeof(BCIns
);
2316 /* No bytecode range check for down-recursive root traces. */
2317 J
->maxslot
= ra
+ bc_d(ins
) - 1;
2320 /* No bytecode range check for root traces started by a hot call. */
2321 J
->maxslot
= J
->pt
->numparams
;
2327 /* No bytecode range check for stitched traces. */
2337 /* Setup for recording a new trace. */
2338 void lj_record_setup(jit_State
*J
)
2342 /* Initialize state related to current trace. */
2343 memset(J
->slot
, 0, sizeof(J
->slot
));
2344 memset(J
->chain
, 0, sizeof(J
->chain
));
2345 memset(J
->bpropcache
, 0, sizeof(J
->bpropcache
));
2346 J
->scev
.idx
= REF_NIL
;
2347 setmref(J
->scev
.pc
, NULL
);
2349 J
->baseslot
= 1; /* Invoking function is at base[-1]. */
2350 J
->base
= J
->slot
+ J
->baseslot
;
2355 J
->instunroll
= J
->param
[JIT_P_instunroll
];
2356 J
->loopunroll
= J
->param
[JIT_P_loopunroll
];
2360 J
->bc_min
= NULL
; /* Means no limit. */
2361 J
->bc_extent
= ~(MSize
)0;
2363 /* Emit instructions for fixed references. Also triggers initial IR alloc. */
2364 emitir_raw(IRT(IR_BASE
, IRT_P32
), J
->parent
, J
->exitno
);
2365 for (i
= 0; i
<= 2; i
++) {
2366 IRIns
*ir
= IR(REF_NIL
-i
);
2368 ir
->t
.irt
= (uint8_t)(IRT_NIL
+i
);
2372 J
->cur
.nk
= REF_TRUE
;
2375 setmref(J
->cur
.startpc
, J
->pc
);
2376 if (J
->parent
) { /* Side trace. */
2377 GCtrace
*T
= traceref(J
, J
->parent
);
2378 TraceNo root
= T
->root
? T
->root
: J
->parent
;
2379 J
->cur
.root
= (uint16_t)root
;
2380 J
->cur
.startins
= BCINS_AD(BC_JMP
, 0, 0);
2381 /* Check whether we could at least potentially form an extra loop. */
2382 if (J
->exitno
== 0 && T
->snap
[0].nent
== 0) {
2383 /* We can narrow a FORL for some side traces, too. */
2384 if (J
->pc
> proto_bc(J
->pt
) && bc_op(J
->pc
[-1]) == BC_JFORI
&&
2385 bc_d(J
->pc
[bc_j(J
->pc
[-1])-1]) == root
) {
2387 rec_for_loop(J
, J
->pc
-1, &J
->scev
, 1);
2391 J
->startpc
= NULL
; /* Prevent forming an extra loop. */
2393 lj_snap_replay(J
, T
);
2395 if (traceref(J
, J
->cur
.root
)->nchild
>= J
->param
[JIT_P_maxside
] ||
2396 T
->snap
[J
->exitno
].count
>= J
->param
[JIT_P_hotexit
] +
2397 J
->param
[JIT_P_tryside
]) {
2398 lj_record_stop(J
, LJ_TRLINK_INTERP
, 0);
2400 } else { /* Root trace. */
2402 J
->cur
.startins
= *J
->pc
;
2403 J
->pc
= rec_setup_root(J
);
2404 /* Note: the loop instruction itself is recorded at the end and not
2405 ** at the start! So snapshot #0 needs to point to the *next* instruction.
2408 if (bc_op(J
->cur
.startins
) == BC_FORL
)
2409 rec_for_loop(J
, J
->pc
-1, &J
->scev
, 1);
2410 else if (bc_op(J
->cur
.startins
) == BC_ITERC
)
2412 if (1 + J
->pt
->framesize
>= LJ_MAX_JSLOTS
)
2413 lj_trace_err(J
, LJ_TRERR_STACKOV
);
2419 #ifdef LUAJIT_ENABLE_CHECKHOOK
2420 /* Regularly check for instruction/line hooks from compiled code and
2421 ** exit to the interpreter if the hooks are set.
2423 ** This is a compile-time option and disabled by default, since the
2424 ** hook checks may be quite expensive in tight loops.
2426 ** Note this is only useful if hooks are *not* set most of the time.
2427 ** Use this only if you want to *asynchronously* interrupt the execution.
2429 ** You can set the instruction hook via lua_sethook() with a count of 1
2430 ** from a signal handler or another native thread. Please have a look
2431 ** at the first few functions in luajit.c for an example (Ctrl-C handler).
2434 TRef tr
= emitir(IRT(IR_XLOAD
, IRT_U8
),
2435 lj_ir_kptr(J
, &J2G(J
)->hookmask
), IRXLOAD_VOLATILE
);
2436 tr
= emitir(IRTI(IR_BAND
), tr
, lj_ir_kint(J
, (LUA_MASKLINE
|LUA_MASKCOUNT
)));
2437 emitir(IRTGI(IR_EQ
), tr
, lj_ir_kint(J
, 0));