2 ** NARROW: Narrowing of numbers to integers (double to int32_t).
3 ** STRIPOV: Stripping of overflow checks.
4 ** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h
7 #define lj_opt_narrow_c
22 /* Rationale for narrowing optimizations:
24 ** Lua has only a single number type and this is a FP double by default.
25 ** Narrowing doubles to integers does not pay off for the interpreter on a
26 ** current-generation x86/x64 machine. Most FP operations need the same
27 ** amount of execution resources as their integer counterparts, except
28 ** with slightly longer latencies. Longer latencies are a non-issue for
29 ** the interpreter, since they are usually hidden by other overhead.
31 ** The total CPU execution bandwidth is the sum of the bandwidth of the FP
32 ** and the integer units, because they execute in parallel. The FP units
33 ** have an equal or higher bandwidth than the integer units. Not using
34 ** them means losing execution bandwidth. Moving work away from them to
35 ** the already quite busy integer units is a losing proposition.
37 ** The situation for JIT-compiled code is a bit different: the higher code
38 ** density makes the extra latencies much more visible. Tight loops expose
39 ** the latencies for updating the induction variables. Array indexing
40 ** requires narrowing conversions with high latencies and additional
41 ** guards (to check that the index is really an integer). And many common
42 ** optimizations only work on integers.
44 ** One solution would be speculative, eager narrowing of all number loads.
45 ** This causes many problems, like losing -0 or the need to resolve type
46 ** mismatches between traces. It also effectively forces the integer type
47 ** to have overflow-checking semantics. This impedes many basic
48 ** optimizations and requires adding overflow checks to all integer
49 ** arithmetic operations (whereas FP arithmetics can do without).
51 ** Always replacing an FP op with an integer op plus an overflow check is
52 ** counter-productive on a current-generation super-scalar CPU. Although
53 ** the overflow check branches are highly predictable, they will clog the
54 ** execution port for the branch unit and tie up reorder buffers. This is
55 ** turning a pure data-flow dependency into a different data-flow
56 ** dependency (with slightly lower latency) *plus* a control dependency.
57 ** In general, you don't want to do this since latencies due to data-flow
58 ** dependencies can be well hidden by out-of-order execution.
60 ** A better solution is to keep all numbers as FP values and only narrow
61 ** when it's beneficial to do so. LuaJIT uses predictive narrowing for
62 ** induction variables and demand-driven narrowing for index expressions,
63 ** integer arguments and bit operations. Additionally it can eliminate or
64 ** hoist most of the resulting overflow checks. Regular arithmetic
65 ** computations are never narrowed to integers.
67 ** The integer type in the IR has convenient wrap-around semantics and
68 ** ignores overflow. Extra operations have been added for
69 ** overflow-checking arithmetic (ADDOV/SUBOV) instead of an extra type.
70 ** Apart from reducing overall complexity of the compiler, this also
71 ** nicely solves the problem where you want to apply algebraic
72 ** simplifications to ADD, but not to ADDOV. And the x86/x64 assembler can
73 ** use lea instead of an add for integer ADD, but not for ADDOV (lea does
74 ** not affect the flags, but it helps to avoid register moves).
77 ** All of the above has to be reconsidered for architectures with slow FP
78 ** operations or without a hardware FPU. The dual-number mode of LuaJIT
79 ** addresses this issue. Arithmetic operations are performed on integers
80 ** as far as possible and overflow checks are added as needed.
82 ** This implies that narrowing for integer arguments and bit operations
83 ** should also strip overflow checks, e.g. replace ADDOV with ADD. The
84 ** original overflow guards are weak and can be eliminated by DCE, if
85 ** there's no other use.
87 ** A slight twist is that it's usually beneficial to use overflow-checked
88 ** integer arithmetics if all inputs are already integers. This is the only
89 ** change that affects the single-number mode, too.
92 /* Some local macros to save typing. Undef'd at the end. */
93 #define IR(ref) (&J->cur.ir[(ref)])
94 #define fins (&J->fold.ins)
96 /* Pass IR on to next optimization in chain (FOLD). */
97 #define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
99 #define emitir_raw(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J))
101 /* -- Elimination of narrowing type conversions --------------------------- */
103 /* Narrowing of index expressions and bit operations is demand-driven. The
104 ** trace recorder emits a narrowing type conversion (CONV.int.num or TOBIT)
105 ** in all of these cases (e.g. array indexing or string indexing). FOLD
106 ** already takes care of eliminating simple redundant conversions like
107 ** CONV.int.num(CONV.num.int(x)) ==> x.
109 ** But the surrounding code is FP-heavy and arithmetic operations are
110 ** performed on FP numbers (for the single-number mode). Consider a common
111 ** example such as 'x=t[i+1]', with 'i' already an integer (due to induction
112 ** variable narrowing). The index expression would be recorded as
113 ** CONV.int.num(ADD(CONV.num.int(i), 1))
114 ** which is clearly suboptimal.
116 ** One can do better by recursively backpropagating the narrowing type
117 ** conversion across FP arithmetic operations. This turns FP ops into
118 ** their corresponding integer counterparts. Depending on the semantics of
119 ** the conversion they also need to check for overflow. Currently only ADD
120 ** and SUB are supported.
122 ** The above example can be rewritten as
123 ** ADDOV(CONV.int.num(CONV.num.int(i)), 1)
124 ** and then into ADDOV(i, 1) after folding of the conversions. The original
125 ** FP ops remain in the IR and are eliminated by DCE since all references to
128 ** [In dual-number mode the trace recorder already emits ADDOV etc., but
129 ** this can be further reduced. See below.]
131 ** Special care has to be taken to avoid narrowing across an operation
132 ** which is potentially operating on non-integral operands. One obvious
133 ** case is when an expression contains a non-integral constant, but ends
134 ** up as an integer index at runtime (like t[x+1.5] with x=0.5).
136 ** Operations with two non-constant operands illustrate a similar problem
137 ** (like t[a+b] with a=1.5 and b=2.5). Backpropagation has to stop there,
138 ** unless it can be proven that either operand is integral (e.g. by CSEing
139 ** a previous conversion). As a not-so-obvious corollary this logic also
140 ** applies for a whole expression tree (e.g. t[(a+1)+(b+1)]).
142 ** Correctness of the transformation is guaranteed by avoiding to expand
143 ** the tree by adding more conversions than the one we would need to emit
144 ** if not backpropagating. TOBIT employs a more optimistic rule, because
145 ** the conversion has special semantics, designed to make the life of the
146 ** compiler writer easier. ;-)
148 ** Using on-the-fly backpropagation of an expression tree doesn't work
149 ** because it's unknown whether the transform is correct until the end.
150 ** This either requires IR rollback and cache invalidation for every
151 ** subtree or a two-pass algorithm. The former didn't work out too well,
152 ** so the code now combines a recursive collector with a stack-based
155 ** [A recursive backpropagation algorithm with backtracking, employing
156 ** skip-list lookup and round-robin caching, emitting stack operations
157 ** on-the-fly for a stack-based interpreter -- and all of that in a meager
158 ** kilobyte? Yep, compilers are a great treasure chest. Throw away your
159 ** textbooks and read the codebase of a compiler today!]
161 ** There's another optimization opportunity for array indexing: it's
162 ** always accompanied by an array bounds-check. The outermost overflow
163 ** check may be delegated to the ABC operation. This works because ABC is
164 ** an unsigned comparison and wrap-around due to overflow creates negative
167 ** But this optimization is only valid for constants that cannot overflow
168 ** an int32_t into the range of valid array indexes [0..2^27+1). A check
169 ** for +-2^30 is safe since -2^31 - 2^30 wraps to 2^30 and 2^31-1 + 2^30
172 ** It's also good enough in practice, since e.g. t[i+1] or t[i-10] are
173 ** quite common. So the above example finally ends up as ADD(i, 1)!
175 ** Later on, the assembler is able to fuse the whole array reference and
176 ** the ADD into the memory operands of loads and other instructions. This
177 ** is why LuaJIT is able to generate very pretty (and fast) machine code
178 ** for array indexing. And that, my dear, concludes another story about
179 ** one of the hidden secrets of LuaJIT ...
182 /* Maximum backpropagation depth and maximum stack size. */
183 #define NARROW_MAX_BACKPROP 100
184 #define NARROW_MAX_STACK 256
186 /* The stack machine has a 32 bit instruction format: [IROpT | IRRef1]
187 ** The lower 16 bits hold a reference (or 0). The upper 16 bits hold
188 ** the IR opcode + type or one of the following special opcodes:
191 NARROW_REF
, /* Push ref. */
192 NARROW_CONV
, /* Push conversion of ref. */
193 NARROW_SEXT
, /* Push sign-extension of ref. */
194 NARROW_INT
/* Push KINT ref. The next code holds an int32_t. */
197 typedef uint32_t NarrowIns
;
199 #define NARROWINS(op, ref) (((op) << 16) + (ref))
200 #define narrow_op(ins) ((IROpT)((ins) >> 16))
201 #define narrow_ref(ins) ((IRRef1)(ins))
203 /* Context used for narrowing of type conversions. */
204 typedef struct NarrowConv
{
205 jit_State
*J
; /* JIT compiler state. */
206 NarrowIns
*sp
; /* Current stack pointer. */
207 NarrowIns
*maxsp
; /* Maximum stack pointer minus redzone. */
208 int lim
; /* Limit on the number of emitted conversions. */
209 IRRef mode
; /* Conversion mode (IRCONV_*). */
210 IRType t
; /* Destination type: IRT_INT or IRT_I64. */
211 NarrowIns stack
[NARROW_MAX_STACK
]; /* Stack holding stack-machine code. */
214 /* Lookup a reference in the backpropagation cache. */
215 static BPropEntry
*narrow_bpc_get(jit_State
*J
, IRRef1 key
, IRRef mode
)
218 for (i
= 0; i
< BPROP_SLOTS
; i
++) {
219 BPropEntry
*bp
= &J
->bpropcache
[i
];
220 /* Stronger checks are ok, too. */
221 if (bp
->key
== key
&& bp
->mode
>= mode
&&
222 ((bp
->mode
^ mode
) & IRCONV_MODEMASK
) == 0)
228 /* Add an entry to the backpropagation cache. */
229 static void narrow_bpc_set(jit_State
*J
, IRRef1 key
, IRRef1 val
, IRRef mode
)
231 uint32_t slot
= J
->bpropslot
;
232 BPropEntry
*bp
= &J
->bpropcache
[slot
];
233 J
->bpropslot
= (slot
+ 1) & (BPROP_SLOTS
-1);
239 /* Backpropagate overflow stripping. */
240 static void narrow_stripov_backprop(NarrowConv
*nc
, IRRef ref
, int depth
)
242 jit_State
*J
= nc
->J
;
244 if (ir
->o
== IR_ADDOV
|| ir
->o
== IR_SUBOV
||
245 (ir
->o
== IR_MULOV
&& (nc
->mode
& IRCONV_CONVMASK
) == IRCONV_ANY
)) {
246 BPropEntry
*bp
= narrow_bpc_get(nc
->J
, ref
, IRCONV_TOBIT
);
249 } else if (++depth
< NARROW_MAX_BACKPROP
&& nc
->sp
< nc
->maxsp
) {
250 narrow_stripov_backprop(nc
, ir
->op1
, depth
);
251 narrow_stripov_backprop(nc
, ir
->op2
, depth
);
252 *nc
->sp
++ = NARROWINS(IRT(ir
->o
- IR_ADDOV
+ IR_ADD
, IRT_INT
), ref
);
256 *nc
->sp
++ = NARROWINS(NARROW_REF
, ref
);
259 /* Backpropagate narrowing conversion. Return number of needed conversions. */
260 static int narrow_conv_backprop(NarrowConv
*nc
, IRRef ref
, int depth
)
262 jit_State
*J
= nc
->J
;
266 /* Check the easy cases first. */
267 if (ir
->o
== IR_CONV
&& (ir
->op2
& IRCONV_SRCMASK
) == IRT_INT
) {
268 if ((nc
->mode
& IRCONV_CONVMASK
) <= IRCONV_ANY
)
269 narrow_stripov_backprop(nc
, ir
->op1
, depth
+1);
271 *nc
->sp
++ = NARROWINS(NARROW_REF
, ir
->op1
); /* Undo conversion. */
272 if (nc
->t
== IRT_I64
)
273 *nc
->sp
++ = NARROWINS(NARROW_SEXT
, 0); /* Sign-extend integer. */
275 } else if (ir
->o
== IR_KNUM
) { /* Narrow FP constant. */
276 lua_Number n
= ir_knum(ir
)->n
;
277 if ((nc
->mode
& IRCONV_CONVMASK
) == IRCONV_TOBIT
) {
278 /* Allows a wider range of constants. */
279 int64_t k64
= (int64_t)n
;
280 if (n
== (lua_Number
)k64
) { /* Only if const doesn't lose precision. */
281 *nc
->sp
++ = NARROWINS(NARROW_INT
, 0);
282 *nc
->sp
++ = (NarrowIns
)k64
; /* But always truncate to 32 bits. */
286 int32_t k
= lj_num2int(n
);
287 /* Only if constant is a small integer. */
288 if (checki16(k
) && n
== (lua_Number
)k
) {
289 *nc
->sp
++ = NARROWINS(NARROW_INT
, 0);
290 *nc
->sp
++ = (NarrowIns
)k
;
294 return 10; /* Never narrow other FP constants (this is rare). */
297 /* Try to CSE the conversion. Stronger checks are ok, too. */
298 cref
= J
->chain
[fins
->o
];
300 IRIns
*cr
= IR(cref
);
301 if (cr
->op1
== ref
&&
302 (fins
->o
== IR_TOBIT
||
303 ((cr
->op2
& IRCONV_MODEMASK
) == (nc
->mode
& IRCONV_MODEMASK
) &&
304 irt_isguard(cr
->t
) >= irt_isguard(fins
->t
)))) {
305 *nc
->sp
++ = NARROWINS(NARROW_REF
, cref
);
306 return 0; /* Already there, no additional conversion needed. */
311 /* Backpropagate across ADD/SUB. */
312 if (ir
->o
== IR_ADD
|| ir
->o
== IR_SUB
) {
313 /* Try cache lookup first. */
314 IRRef mode
= nc
->mode
;
316 /* Inner conversions need a stronger check. */
317 if ((mode
& IRCONV_CONVMASK
) == IRCONV_INDEX
&& depth
> 0)
318 mode
+= IRCONV_CHECK
-IRCONV_INDEX
;
319 bp
= narrow_bpc_get(nc
->J
, (IRRef1
)ref
, mode
);
321 *nc
->sp
++ = NARROWINS(NARROW_REF
, bp
->val
);
323 } else if (nc
->t
== IRT_I64
) {
324 /* Try sign-extending from an existing (checked) conversion to int. */
325 mode
= (IRT_INT
<<5)|IRT_NUM
|IRCONV_INDEX
;
326 bp
= narrow_bpc_get(nc
->J
, (IRRef1
)ref
, mode
);
328 *nc
->sp
++ = NARROWINS(NARROW_REF
, bp
->val
);
329 *nc
->sp
++ = NARROWINS(NARROW_SEXT
, 0);
333 if (++depth
< NARROW_MAX_BACKPROP
&& nc
->sp
< nc
->maxsp
) {
334 NarrowIns
*savesp
= nc
->sp
;
335 int count
= narrow_conv_backprop(nc
, ir
->op1
, depth
);
336 count
+= narrow_conv_backprop(nc
, ir
->op2
, depth
);
337 if (count
<= nc
->lim
) { /* Limit total number of conversions. */
338 *nc
->sp
++ = NARROWINS(IRT(ir
->o
, nc
->t
), ref
);
341 nc
->sp
= savesp
; /* Too many conversions, need to backtrack. */
345 /* Otherwise add a conversion. */
346 *nc
->sp
++ = NARROWINS(NARROW_CONV
, ref
);
350 /* Emit the conversions collected during backpropagation. */
351 static IRRef
narrow_conv_emit(jit_State
*J
, NarrowConv
*nc
)
353 /* The fins fields must be saved now -- emitir() overwrites them. */
354 IROpT guardot
= irt_isguard(fins
->t
) ? IRTG(IR_ADDOV
-IR_ADD
, 0) : 0;
355 IROpT convot
= fins
->ot
;
356 IRRef1 convop2
= fins
->op2
;
357 NarrowIns
*next
= nc
->stack
; /* List of instructions from backpropagation. */
358 NarrowIns
*last
= nc
->sp
;
359 NarrowIns
*sp
= nc
->stack
; /* Recycle the stack to store operands. */
360 while (next
< last
) { /* Simple stack machine to process the ins. list. */
361 NarrowIns ref
= *next
++;
362 IROpT op
= narrow_op(ref
);
363 if (op
== NARROW_REF
) {
365 } else if (op
== NARROW_CONV
) {
366 *sp
++ = emitir_raw(convot
, ref
, convop2
); /* Raw emit avoids a loop. */
367 } else if (op
== NARROW_SEXT
) {
368 lua_assert(sp
>= nc
->stack
+1);
369 sp
[-1] = emitir(IRT(IR_CONV
, IRT_I64
), sp
[-1],
370 (IRT_I64
<<5)|IRT_INT
|IRCONV_SEXT
);
371 } else if (op
== NARROW_INT
) {
372 lua_assert(next
< last
);
373 *sp
++ = nc
->t
== IRT_I64
?
374 lj_ir_kint64(J
, (int64_t)(int32_t)*next
++) :
375 lj_ir_kint(J
, *next
++);
376 } else { /* Regular IROpT. Pops two operands and pushes one result. */
377 IRRef mode
= nc
->mode
;
378 lua_assert(sp
>= nc
->stack
+2);
380 /* Omit some overflow checks for array indexing. See comments above. */
381 if ((mode
& IRCONV_CONVMASK
) == IRCONV_INDEX
) {
382 if (next
== last
&& irref_isk(narrow_ref(sp
[0])) &&
383 (uint32_t)IR(narrow_ref(sp
[0]))->i
+ 0x40000000u
< 0x80000000u
)
385 else /* Otherwise cache a stronger check. */
386 mode
+= IRCONV_CHECK
-IRCONV_INDEX
;
388 sp
[-1] = emitir(op
+guardot
, sp
[-1], sp
[0]);
391 narrow_bpc_set(J
, narrow_ref(ref
), narrow_ref(sp
[-1]), mode
);
394 lua_assert(sp
== nc
->stack
+1);
398 /* Narrow a type conversion of an arithmetic operation. */
399 TRef LJ_FASTCALL
lj_opt_narrow_convert(jit_State
*J
)
401 if ((J
->flags
& JIT_F_OPT_NARROW
)) {
405 nc
.maxsp
= &nc
.stack
[NARROW_MAX_STACK
-4];
406 nc
.t
= irt_type(fins
->t
);
407 if (fins
->o
== IR_TOBIT
) {
408 nc
.mode
= IRCONV_TOBIT
; /* Used only in the backpropagation cache. */
409 nc
.lim
= 2; /* TOBIT can use a more optimistic rule. */
414 if (narrow_conv_backprop(&nc
, fins
->op1
, 0) <= nc
.lim
)
415 return narrow_conv_emit(J
, &nc
);
420 /* -- Narrowing of implicit conversions ----------------------------------- */
422 /* Recursively strip overflow checks. */
423 static TRef
narrow_stripov(jit_State
*J
, TRef tr
, int lastop
, IRRef mode
)
425 IRRef ref
= tref_ref(tr
);
428 if (op
>= IR_ADDOV
&& op
<= lastop
) {
429 BPropEntry
*bp
= narrow_bpc_get(J
, ref
, mode
);
431 return TREF(bp
->val
, irt_t(IR(bp
->val
)->t
));
433 IRRef op1
= ir
->op1
, op2
= ir
->op2
; /* The IR may be reallocated. */
434 op1
= narrow_stripov(J
, op1
, lastop
, mode
);
435 op2
= narrow_stripov(J
, op2
, lastop
, mode
);
436 tr
= emitir(IRT(op
- IR_ADDOV
+ IR_ADD
,
437 ((mode
& IRCONV_DSTMASK
) >> IRCONV_DSH
)), op1
, op2
);
438 narrow_bpc_set(J
, ref
, tref_ref(tr
), mode
);
440 } else if (LJ_64
&& (mode
& IRCONV_SEXT
) && !irt_is64(ir
->t
)) {
441 tr
= emitir(IRT(IR_CONV
, IRT_INTP
), tr
, mode
);
446 /* Narrow array index. */
447 TRef LJ_FASTCALL
lj_opt_narrow_index(jit_State
*J
, TRef tr
)
450 lua_assert(tref_isnumber(tr
));
451 if (tref_isnum(tr
)) /* Conversion may be narrowed, too. See above. */
452 return emitir(IRTGI(IR_CONV
), tr
, IRCONV_INT_NUM
|IRCONV_INDEX
);
453 /* Omit some overflow checks for array indexing. See comments above. */
454 ir
= IR(tref_ref(tr
));
455 if ((ir
->o
== IR_ADDOV
|| ir
->o
== IR_SUBOV
) && irref_isk(ir
->op2
) &&
456 (uint32_t)IR(ir
->op2
)->i
+ 0x40000000u
< 0x80000000u
)
457 return emitir(IRTI(ir
->o
- IR_ADDOV
+ IR_ADD
), ir
->op1
, ir
->op2
);
461 /* Narrow conversion to integer operand (overflow undefined). */
462 TRef LJ_FASTCALL
lj_opt_narrow_toint(jit_State
*J
, TRef tr
)
465 tr
= emitir(IRTG(IR_STRTO
, IRT_NUM
), tr
, 0);
466 if (tref_isnum(tr
)) /* Conversion may be narrowed, too. See above. */
467 return emitir(IRTI(IR_CONV
), tr
, IRCONV_INT_NUM
|IRCONV_ANY
);
468 if (!tref_isinteger(tr
))
469 lj_trace_err(J
, LJ_TRERR_BADTYPE
);
471 ** Undefined overflow semantics allow stripping of ADDOV, SUBOV and MULOV.
472 ** Use IRCONV_TOBIT for the cache entries, since the semantics are the same.
474 return narrow_stripov(J
, tr
, IR_MULOV
, (IRT_INT
<<5)|IRT_INT
|IRCONV_TOBIT
);
477 /* Narrow conversion to bitop operand (overflow wrapped). */
478 TRef LJ_FASTCALL
lj_opt_narrow_tobit(jit_State
*J
, TRef tr
)
481 tr
= emitir(IRTG(IR_STRTO
, IRT_NUM
), tr
, 0);
482 if (tref_isnum(tr
)) /* Conversion may be narrowed, too. See above. */
483 return emitir(IRTI(IR_TOBIT
), tr
, lj_ir_knum_tobit(J
));
484 if (!tref_isinteger(tr
))
485 lj_trace_err(J
, LJ_TRERR_BADTYPE
);
487 ** Wrapped overflow semantics allow stripping of ADDOV and SUBOV.
488 ** MULOV cannot be stripped due to precision widening.
490 return narrow_stripov(J
, tr
, IR_SUBOV
, (IRT_INT
<<5)|IRT_INT
|IRCONV_TOBIT
);
494 /* Narrow C array index (overflow undefined). */
495 TRef LJ_FASTCALL
lj_opt_narrow_cindex(jit_State
*J
, TRef tr
)
497 lua_assert(tref_isnumber(tr
));
499 return emitir(IRT(IR_CONV
, IRT_INTP
), tr
,
500 (IRT_INTP
<<5)|IRT_NUM
|IRCONV_TRUNC
|IRCONV_ANY
);
501 /* Undefined overflow semantics allow stripping of ADDOV, SUBOV and MULOV. */
502 return narrow_stripov(J
, tr
, IR_MULOV
,
503 LJ_64
? ((IRT_INTP
<<5)|IRT_INT
|IRCONV_SEXT
) :
504 ((IRT_INTP
<<5)|IRT_INT
|IRCONV_TOBIT
));
508 /* -- Narrowing of arithmetic operators ----------------------------------- */
510 /* Check whether a number fits into an int32_t (-0 is ok, too). */
511 static int numisint(lua_Number n
)
513 return (n
== (lua_Number
)lj_num2int(n
));
516 /* Narrowing of arithmetic operations. */
517 TRef
lj_opt_narrow_arith(jit_State
*J
, TRef rb
, TRef rc
,
518 TValue
*vb
, TValue
*vc
, IROp op
)
520 if (tref_isstr(rb
)) {
521 rb
= emitir(IRTG(IR_STRTO
, IRT_NUM
), rb
, 0);
522 lj_str_tonum(strV(vb
), vb
);
524 if (tref_isstr(rc
)) {
525 rc
= emitir(IRTG(IR_STRTO
, IRT_NUM
), rc
, 0);
526 lj_str_tonum(strV(vc
), vc
);
528 /* Must not narrow MUL in non-DUALNUM variant, because it loses -0. */
529 if ((op
>= IR_ADD
&& op
<= (LJ_DUALNUM
? IR_MUL
: IR_SUB
)) &&
530 tref_isinteger(rb
) && tref_isinteger(rc
) &&
531 numisint(lj_vm_foldarith(numberVnum(vb
), numberVnum(vc
),
532 (int)op
- (int)IR_ADD
)))
533 return emitir(IRTGI((int)op
- (int)IR_ADD
+ (int)IR_ADDOV
), rb
, rc
);
534 if (!tref_isnum(rb
)) rb
= emitir(IRTN(IR_CONV
), rb
, IRCONV_NUM_INT
);
535 if (!tref_isnum(rc
)) rc
= emitir(IRTN(IR_CONV
), rc
, IRCONV_NUM_INT
);
536 return emitir(IRTN(op
), rb
, rc
);
539 /* Narrowing of unary minus operator. */
540 TRef
lj_opt_narrow_unm(jit_State
*J
, TRef rc
, TValue
*vc
)
542 if (tref_isstr(rc
)) {
543 rc
= emitir(IRTG(IR_STRTO
, IRT_NUM
), rc
, 0);
544 lj_str_tonum(strV(vc
), vc
);
546 if (tref_isinteger(rc
)) {
547 if ((uint32_t)numberVint(vc
) != 0x80000000u
)
548 return emitir(IRTGI(IR_SUBOV
), lj_ir_kint(J
, 0), rc
);
549 rc
= emitir(IRTN(IR_CONV
), rc
, IRCONV_NUM_INT
);
551 return emitir(IRTN(IR_NEG
), rc
, lj_ir_knum_neg(J
));
554 /* Narrowing of modulo operator. */
555 TRef
lj_opt_narrow_mod(jit_State
*J
, TRef rb
, TRef rc
, TValue
*vc
)
558 if (tvisstr(vc
) && !lj_str_tonum(strV(vc
), vc
))
559 lj_trace_err(J
, LJ_TRERR_BADTYPE
);
560 if ((LJ_DUALNUM
|| (J
->flags
& JIT_F_OPT_NARROW
)) &&
561 tref_isinteger(rb
) && tref_isinteger(rc
) &&
562 (tvisint(vc
) ? intV(vc
) != 0 : !tviszero(vc
))) {
563 emitir(IRTGI(IR_NE
), rc
, lj_ir_kint(J
, 0));
564 return emitir(IRTI(IR_MOD
), rb
, rc
);
566 /* b % c ==> b - floor(b/c)*c */
567 rb
= lj_ir_tonum(J
, rb
);
568 rc
= lj_ir_tonum(J
, rc
);
569 tmp
= emitir(IRTN(IR_DIV
), rb
, rc
);
570 tmp
= emitir(IRTN(IR_FPMATH
), tmp
, IRFPM_FLOOR
);
571 tmp
= emitir(IRTN(IR_MUL
), tmp
, rc
);
572 return emitir(IRTN(IR_SUB
), rb
, tmp
);
575 /* Narrowing of power operator or math.pow. */
576 TRef
lj_opt_narrow_pow(jit_State
*J
, TRef rb
, TRef rc
, TValue
*vc
)
578 if (tvisstr(vc
) && !lj_str_tonum(strV(vc
), vc
))
579 lj_trace_err(J
, LJ_TRERR_BADTYPE
);
580 /* Narrowing must be unconditional to preserve (-x)^i semantics. */
581 if (tvisint(vc
) || numisint(numV(vc
))) {
583 /* Split pow is faster for bigger exponents. But do this only for (+k)^i. */
584 if (tref_isk(rb
) && (int32_t)ir_knum(IR(tref_ref(rb
)))->u32
.hi
>= 0) {
585 int32_t k
= numberVint(vc
);
586 if (!(k
>= -65536 && k
<= 65536)) goto split_pow
;
589 if (!tref_isinteger(rc
)) {
591 rc
= emitir(IRTG(IR_STRTO
, IRT_NUM
), rc
, 0);
592 /* Guarded conversion to integer! */
593 rc
= emitir(IRTGI(IR_CONV
), rc
, IRCONV_INT_NUM
|IRCONV_CHECK
);
595 if (checkrange
&& !tref_isk(rc
)) { /* Range guard: -65536 <= i <= 65536 */
596 TRef tmp
= emitir(IRTI(IR_ADD
), rc
, lj_ir_kint(J
, 65536));
597 emitir(IRTGI(IR_ULE
), tmp
, lj_ir_kint(J
, 2*65536));
599 return emitir(IRTN(IR_POW
), rb
, rc
);
602 /* FOLD covers most cases, but some are easier to do here. */
603 if (tref_isk(rb
) && tvispone(ir_knum(IR(tref_ref(rb
)))))
604 return rb
; /* 1 ^ x ==> 1 */
605 rc
= lj_ir_tonum(J
, rc
);
606 if (tref_isk(rc
) && ir_knum(IR(tref_ref(rc
)))->n
== 0.5)
607 return emitir(IRTN(IR_FPMATH
), rb
, IRFPM_SQRT
); /* x ^ 0.5 ==> sqrt(x) */
608 /* Split up b^c into exp2(c*log2(b)). Assembler may rejoin later. */
609 rb
= emitir(IRTN(IR_FPMATH
), rb
, IRFPM_LOG2
);
610 rc
= emitir(IRTN(IR_MUL
), rb
, rc
);
611 return emitir(IRTN(IR_FPMATH
), rc
, IRFPM_EXP2
);
614 /* -- Predictive narrowing of induction variables ------------------------- */
616 /* Narrow a single runtime value. */
617 static int narrow_forl(jit_State
*J
, cTValue
*o
)
619 if (tvisint(o
)) return 1;
620 if (LJ_DUALNUM
|| (J
->flags
& JIT_F_OPT_NARROW
)) return numisint(numV(o
));
624 /* Narrow the FORL index type by looking at the runtime values. */
625 IRType
lj_opt_narrow_forl(jit_State
*J
, cTValue
*tv
)
627 lua_assert(tvisnumber(&tv
[FORL_IDX
]) &&
628 tvisnumber(&tv
[FORL_STOP
]) &&
629 tvisnumber(&tv
[FORL_STEP
]));
630 /* Narrow only if the runtime values of start/stop/step are all integers. */
631 if (narrow_forl(J
, &tv
[FORL_IDX
]) &&
632 narrow_forl(J
, &tv
[FORL_STOP
]) &&
633 narrow_forl(J
, &tv
[FORL_STEP
])) {
634 /* And if the loop index can't possibly overflow. */
635 lua_Number step
= numberVnum(&tv
[FORL_STEP
]);
636 lua_Number sum
= numberVnum(&tv
[FORL_STOP
]) + step
;
637 if (0 <= step
? (sum
<= 2147483647.0) : (sum
>= -2147483648.0))