2 ** MIPS IR assembler (SSA IR -> machine code).
3 ** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
6 /* -- Register allocator extensions --------------------------------------- */
8 /* Allocate a register with a hint. */
9 static Reg
ra_hintalloc(ASMState
*as
, IRRef ref
, Reg hint
, RegSet allow
)
13 if (!ra_hashint(r
) && !iscrossref(as
, ref
))
14 ra_sethint(IR(ref
)->r
, hint
); /* Propagate register hint. */
15 r
= ra_allocref(as
, ref
, allow
);
21 /* Allocate a register or RID_ZERO. */
22 static Reg
ra_alloc1z(ASMState
*as
, IRRef ref
, RegSet allow
)
26 if (!(allow
& RSET_FPR
) && irref_isk(ref
) && IR(ref
)->i
== 0)
28 r
= ra_allocref(as
, ref
, allow
);
35 /* Allocate two source registers for three-operand instructions. */
36 static Reg
ra_alloc2(ASMState
*as
, IRIns
*ir
, RegSet allow
)
38 IRIns
*irl
= IR(ir
->op1
), *irr
= IR(ir
->op2
);
39 Reg left
= irl
->r
, right
= irr
->r
;
40 if (ra_hasreg(left
)) {
43 right
= ra_alloc1z(as
, ir
->op2
, rset_exclude(allow
, left
));
46 } else if (ra_hasreg(right
)) {
48 left
= ra_alloc1z(as
, ir
->op1
, rset_exclude(allow
, right
));
49 } else if (ra_hashint(right
)) {
50 right
= ra_alloc1z(as
, ir
->op2
, allow
);
51 left
= ra_alloc1z(as
, ir
->op1
, rset_exclude(allow
, right
));
53 left
= ra_alloc1z(as
, ir
->op1
, allow
);
54 right
= ra_alloc1z(as
, ir
->op2
, rset_exclude(allow
, left
));
56 return left
| (right
<< 8);
59 /* -- Guard handling ------------------------------------------------------ */
61 /* Need some spare long-range jump slots, for out-of-range branches. */
62 #define MIPS_SPAREJUMP 4
64 /* Setup spare long-range jump slots per mcarea. */
65 static void asm_sparejump_setup(ASMState
*as
)
67 MCode
*mxp
= as
->mcbot
;
68 /* Assumes sizeof(MCLink) == 8. */
69 if (((uintptr_t)mxp
& (LJ_PAGESIZE
-1)) == 8) {
70 lua_assert(MIPSI_NOP
== 0);
71 memset(mxp
+2, 0, MIPS_SPAREJUMP
*8);
72 mxp
+= MIPS_SPAREJUMP
*2;
73 lua_assert(mxp
< as
->mctop
);
74 lj_mcode_sync(as
->mcbot
, mxp
);
75 lj_mcode_commitbot(as
->J
, mxp
);
77 as
->mclim
= as
->mcbot
+ MCLIM_REDZONE
;
81 /* Setup exit stub after the end of each trace. */
82 static void asm_exitstub_setup(ASMState
*as
)
84 MCode
*mxp
= as
->mctop
;
85 /* sw TMP, 0(sp); j ->vm_exit_handler; li TMP, traceno */
86 *--mxp
= MIPSI_LI
|MIPSF_T(RID_TMP
)|as
->T
->traceno
;
87 *--mxp
= MIPSI_J
|((((uintptr_t)(void *)lj_vm_exit_handler
)>>2)&0x03ffffffu
);
88 lua_assert(((uintptr_t)mxp
^ (uintptr_t)(void *)lj_vm_exit_handler
)>>28 == 0);
89 *--mxp
= MIPSI_SW
|MIPSF_T(RID_TMP
)|MIPSF_S(RID_SP
)|0;
93 /* Keep this in-sync with exitstub_trace_addr(). */
94 #define asm_exitstub_addr(as) ((as)->mctop)
96 /* Emit conditional branch to exit for guard. */
97 static void asm_guard(ASMState
*as
, MIPSIns mi
, Reg rs
, Reg rt
)
99 MCode
*target
= asm_exitstub_addr(as
);
101 if (LJ_UNLIKELY(p
== as
->invmcp
)) {
105 mi
= mi
^ ((mi
>>28) == 1 ? 0x04000000u
: 0x00010000u
); /* Invert cond. */
106 target
= p
; /* Patch target later in asm_loop_fixup. */
108 emit_ti(as
, MIPSI_LI
, RID_TMP
, as
->snapno
);
109 emit_branch(as
, mi
, rs
, rt
, target
);
112 /* -- Operand fusion ------------------------------------------------------ */
114 /* Limit linear search to this distance. Avoids O(n^2) behavior. */
115 #define CONFLICT_SEARCH_LIM 31
117 /* Check if there's no conflicting instruction between curins and ref. */
118 static int noconflict(ASMState
*as
, IRRef ref
, IROp conflict
)
121 IRRef i
= as
->curins
;
122 if (i
> ref
+ CONFLICT_SEARCH_LIM
)
123 return 0; /* Give up, ref is too far away. */
125 if (ir
[i
].o
== conflict
)
126 return 0; /* Conflict found. */
127 return 1; /* Ok, no conflict. */
130 /* Fuse the array base of colocated arrays. */
131 static int32_t asm_fuseabase(ASMState
*as
, IRRef ref
)
134 if (ir
->o
== IR_TNEW
&& ir
->op1
<= LJ_MAX_COLOSIZE
&&
135 !neverfuse(as
) && noconflict(as
, ref
, IR_NEWREF
))
136 return (int32_t)sizeof(GCtab
);
140 /* Fuse array/hash/upvalue reference into register+offset operand. */
141 static Reg
asm_fuseahuref(ASMState
*as
, IRRef ref
, int32_t *ofsp
, RegSet allow
)
144 if (ra_noreg(ir
->r
)) {
145 if (ir
->o
== IR_AREF
) {
146 if (mayfuse(as
, ref
)) {
147 if (irref_isk(ir
->op2
)) {
148 IRRef tab
= IR(ir
->op1
)->op1
;
149 int32_t ofs
= asm_fuseabase(as
, tab
);
150 IRRef refa
= ofs
? tab
: ir
->op1
;
151 ofs
+= 8*IR(ir
->op2
)->i
;
154 return ra_alloc1(as
, refa
, allow
);
158 } else if (ir
->o
== IR_HREFK
) {
159 if (mayfuse(as
, ref
)) {
160 int32_t ofs
= (int32_t)(IR(ir
->op2
)->op2
* sizeof(Node
));
163 return ra_alloc1(as
, ir
->op1
, allow
);
166 } else if (ir
->o
== IR_UREFC
) {
167 if (irref_isk(ir
->op1
)) {
168 GCfunc
*fn
= ir_kfunc(IR(ir
->op1
));
169 int32_t ofs
= i32ptr(&gcref(fn
->l
.uvptr
[(ir
->op2
>> 8)])->uv
.tv
);
170 int32_t jgl
= (intptr_t)J2G(as
->J
);
171 if ((uint32_t)(ofs
-jgl
) < 65536) {
172 *ofsp
= ofs
-jgl
-32768;
175 *ofsp
= (int16_t)ofs
;
176 return ra_allock(as
, ofs
-(int16_t)ofs
, allow
);
182 return ra_alloc1(as
, ref
, allow
);
185 /* Fuse XLOAD/XSTORE reference into load/store operand. */
186 static void asm_fusexref(ASMState
*as
, MIPSIns mi
, Reg rt
, IRRef ref
,
187 RegSet allow
, int32_t ofs
)
191 if (ra_noreg(ir
->r
) && canfuse(as
, ir
)) {
192 if (ir
->o
== IR_ADD
) {
194 if (irref_isk(ir
->op2
) && (ofs2
= ofs
+ IR(ir
->op2
)->i
, checki16(ofs2
))) {
198 } else if (ir
->o
== IR_STRREF
) {
199 int32_t ofs2
= 65536;
200 lua_assert(ofs
== 0);
201 ofs
= (int32_t)sizeof(GCstr
);
202 if (irref_isk(ir
->op2
)) {
203 ofs2
= ofs
+ IR(ir
->op2
)->i
;
205 } else if (irref_isk(ir
->op1
)) {
206 ofs2
= ofs
+ IR(ir
->op1
)->i
;
209 if (!checki16(ofs2
)) {
210 /* NYI: Fuse ADD with constant. */
211 Reg right
, left
= ra_alloc2(as
, ir
, allow
);
212 right
= (left
>> 8); left
&= 255;
213 emit_hsi(as
, mi
, rt
, RID_TMP
, ofs
);
214 emit_dst(as
, MIPSI_ADDU
, RID_TMP
, left
, right
);
220 base
= ra_alloc1(as
, ref
, allow
);
221 emit_hsi(as
, mi
, rt
, base
, ofs
);
224 /* -- Calls --------------------------------------------------------------- */
226 /* Generate a call to a C function. */
227 static void asm_gencall(ASMState
*as
, const CCallInfo
*ci
, IRRef
*args
)
229 uint32_t n
, nargs
= CCI_XNARGS(ci
);
231 Reg gpr
, fpr
= REGARG_FIRSTFPR
;
232 if ((void *)ci
->func
)
233 emit_call(as
, (void *)ci
->func
);
234 for (gpr
= REGARG_FIRSTGPR
; gpr
<= REGARG_LASTGPR
; gpr
++)
235 as
->cost
[gpr
] = REGCOST(~0u, ASMREF_L
);
236 gpr
= REGARG_FIRSTGPR
;
237 for (n
= 0; n
< nargs
; n
++) { /* Setup args. */
241 if (irt_isfp(ir
->t
) && fpr
<= REGARG_LASTFPR
&&
242 !(ci
->flags
& CCI_VARARG
)) {
243 lua_assert(rset_test(as
->freeset
, fpr
)); /* Already evicted. */
244 ra_leftov(as
, fpr
, ref
);
246 gpr
+= irt_isnum(ir
->t
) ? 2 : 1;
248 fpr
= REGARG_LASTFPR
+1;
249 if (irt_isnum(ir
->t
)) gpr
= (gpr
+1) & ~1;
250 if (gpr
<= REGARG_LASTGPR
) {
251 lua_assert(rset_test(as
->freeset
, gpr
)); /* Already evicted. */
252 if (irt_isfp(ir
->t
)) {
253 RegSet of
= as
->freeset
;
255 /* Workaround to protect argument GPRs from being used for remat. */
256 as
->freeset
&= ~RSET_RANGE(REGARG_FIRSTGPR
, REGARG_LASTGPR
+1);
257 r
= ra_alloc1(as
, ref
, RSET_FPR
);
258 as
->freeset
|= (of
& RSET_RANGE(REGARG_FIRSTGPR
, REGARG_LASTGPR
+1));
259 if (irt_isnum(ir
->t
)) {
260 emit_tg(as
, MIPSI_MFC1
, gpr
+(LJ_BE
?0:1), r
+1);
261 emit_tg(as
, MIPSI_MFC1
, gpr
+(LJ_BE
?1:0), r
);
262 lua_assert(rset_test(as
->freeset
, gpr
+1)); /* Already evicted. */
264 } else if (irt_isfloat(ir
->t
)) {
265 emit_tg(as
, MIPSI_MFC1
, gpr
, r
);
269 ra_leftov(as
, gpr
, ref
);
273 Reg r
= ra_alloc1z(as
, ref
, irt_isfp(ir
->t
) ? RSET_FPR
: RSET_GPR
);
274 if (irt_isnum(ir
->t
)) ofs
= (ofs
+ 4) & ~4;
275 emit_spstore(as
, ir
, r
, ofs
);
276 ofs
+= irt_isnum(ir
->t
) ? 8 : 4;
280 fpr
= REGARG_LASTFPR
+1;
281 if (gpr
<= REGARG_LASTGPR
)
290 /* Setup result reg/sp for call. Evict scratch regs. */
291 static void asm_setupresult(ASMState
*as
, IRIns
*ir
, const CCallInfo
*ci
)
293 RegSet drop
= RSET_SCRATCH
;
294 int hiop
= ((ir
+1)->o
== IR_HIOP
);
295 if ((ci
->flags
& CCI_NOFPRCLOBBER
))
297 if (ra_hasreg(ir
->r
))
298 rset_clear(drop
, ir
->r
); /* Dest reg handled below. */
299 if (hiop
&& ra_hasreg((ir
+1)->r
))
300 rset_clear(drop
, (ir
+1)->r
); /* Dest reg handled below. */
301 ra_evictset(as
, drop
); /* Evictions must be performed first. */
303 lua_assert(!irt_ispri(ir
->t
));
304 if (irt_isfp(ir
->t
)) {
305 if ((ci
->flags
& CCI_CASTU64
)) {
306 int32_t ofs
= sps_scale(ir
->s
);
308 if (ra_hasreg(dest
)) {
310 ra_modified(as
, dest
);
311 emit_tg(as
, MIPSI_MTC1
, RID_RETHI
, dest
+1);
312 emit_tg(as
, MIPSI_MTC1
, RID_RETLO
, dest
);
315 emit_tsi(as
, MIPSI_SW
, RID_RETLO
, RID_SP
, ofs
+(LJ_BE
?4:0));
316 emit_tsi(as
, MIPSI_SW
, RID_RETHI
, RID_SP
, ofs
+(LJ_BE
?0:4));
319 ra_destreg(as
, ir
, RID_FPRET
);
324 ra_destreg(as
, ir
, RID_RET
);
329 static void asm_callx(ASMState
*as
, IRIns
*ir
)
331 IRRef args
[CCI_NARGS_MAX
*2];
335 ci
.flags
= asm_callx_flags(as
, ir
);
336 asm_collectargs(as
, ir
, &ci
, args
);
337 asm_setupresult(as
, ir
, &ci
);
338 func
= ir
->op2
; irf
= IR(func
);
339 if (irf
->o
== IR_CARG
) { func
= irf
->op1
; irf
= IR(func
); }
340 if (irref_isk(func
)) { /* Call to constant address. */
341 ci
.func
= (ASMFunction
)(void *)(irf
->i
);
342 } else { /* Need specific register for indirect calls. */
343 Reg r
= ra_alloc1(as
, func
, RID2RSET(RID_CFUNCADDR
));
345 if (r
== RID_CFUNCADDR
)
348 *--p
= MIPSI_MOVE
| MIPSF_D(RID_CFUNCADDR
) | MIPSF_S(r
);
349 *--p
= MIPSI_JALR
| MIPSF_S(r
);
351 ci
.func
= (ASMFunction
)(void *)0;
353 asm_gencall(as
, &ci
, args
);
356 static void asm_callround(ASMState
*as
, IRIns
*ir
, IRCallID id
)
358 /* The modified regs must match with the *.dasc implementation. */
359 RegSet drop
= RID2RSET(RID_R1
)|RID2RSET(RID_R12
)|RID2RSET(RID_FPRET
)|
360 RID2RSET(RID_F2
)|RID2RSET(RID_F4
)|RID2RSET(REGARG_FIRSTFPR
);
361 if (ra_hasreg(ir
->r
)) rset_clear(drop
, ir
->r
);
362 ra_evictset(as
, drop
);
363 ra_destreg(as
, ir
, RID_FPRET
);
364 emit_call(as
, (void *)lj_ir_callinfo
[id
].func
);
365 ra_leftov(as
, REGARG_FIRSTFPR
, ir
->op1
);
368 /* -- Returns ------------------------------------------------------------- */
370 /* Return to lower frame. Guard that it goes to the right spot. */
371 static void asm_retf(ASMState
*as
, IRIns
*ir
)
373 Reg base
= ra_alloc1(as
, REF_BASE
, RSET_GPR
);
374 void *pc
= ir_kptr(IR(ir
->op2
));
375 int32_t delta
= 1+LJ_FR2
+bc_a(*((const BCIns
*)pc
- 1));
376 as
->topslot
-= (BCReg
)delta
;
377 if ((int32_t)as
->topslot
< 0) as
->topslot
= 0;
378 irt_setmark(IR(REF_BASE
)->t
); /* Children must not coalesce with BASE reg. */
379 emit_setgl(as
, base
, jit_base
);
380 emit_addptr(as
, base
, -8*delta
);
381 asm_guard(as
, MIPSI_BNE
, RID_TMP
,
382 ra_allock(as
, i32ptr(pc
), rset_exclude(RSET_GPR
, base
)));
383 emit_tsi(as
, MIPSI_LW
, RID_TMP
, base
, -8);
386 /* -- Type conversions ---------------------------------------------------- */
388 static void asm_tointg(ASMState
*as
, IRIns
*ir
, Reg left
)
390 Reg tmp
= ra_scratch(as
, rset_exclude(RSET_FPR
, left
));
391 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
392 asm_guard(as
, MIPSI_BC1F
, 0, 0);
393 emit_fgh(as
, MIPSI_C_EQ_D
, 0, tmp
, left
);
394 emit_fg(as
, MIPSI_CVT_D_W
, tmp
, tmp
);
395 emit_tg(as
, MIPSI_MFC1
, dest
, tmp
);
396 emit_fg(as
, MIPSI_CVT_W_D
, tmp
, left
);
399 static void asm_tobit(ASMState
*as
, IRIns
*ir
)
401 RegSet allow
= RSET_FPR
;
402 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
403 Reg left
= ra_alloc1(as
, ir
->op1
, allow
);
404 Reg right
= ra_alloc1(as
, ir
->op2
, rset_clear(allow
, left
));
405 Reg tmp
= ra_scratch(as
, rset_clear(allow
, right
));
406 emit_tg(as
, MIPSI_MFC1
, dest
, tmp
);
407 emit_fgh(as
, MIPSI_ADD_D
, tmp
, left
, right
);
410 static void asm_conv(ASMState
*as
, IRIns
*ir
)
412 IRType st
= (IRType
)(ir
->op2
& IRCONV_SRCMASK
);
413 int stfp
= (st
== IRT_NUM
|| st
== IRT_FLOAT
);
414 IRRef lref
= ir
->op1
;
415 lua_assert(irt_type(ir
->t
) != st
);
416 lua_assert(!(irt_isint64(ir
->t
) ||
417 (st
== IRT_I64
|| st
== IRT_U64
))); /* Handled by SPLIT. */
418 if (irt_isfp(ir
->t
)) {
419 Reg dest
= ra_dest(as
, ir
, RSET_FPR
);
420 if (stfp
) { /* FP to FP conversion. */
421 emit_fg(as
, st
== IRT_NUM
? MIPSI_CVT_S_D
: MIPSI_CVT_D_S
,
422 dest
, ra_alloc1(as
, lref
, RSET_FPR
));
423 } else if (st
== IRT_U32
) { /* U32 to FP conversion. */
424 /* y = (x ^ 0x8000000) + 2147483648.0 */
425 Reg left
= ra_alloc1(as
, lref
, RSET_GPR
);
426 Reg tmp
= ra_scratch(as
, rset_exclude(RSET_FPR
, dest
));
427 emit_fgh(as
, irt_isfloat(ir
->t
) ? MIPSI_ADD_S
: MIPSI_ADD_D
,
429 emit_fg(as
, irt_isfloat(ir
->t
) ? MIPSI_CVT_S_W
: MIPSI_CVT_D_W
,
431 if (irt_isfloat(ir
->t
))
432 emit_lsptr(as
, MIPSI_LWC1
, (tmp
& 31),
433 (void *)lj_ir_k64_find(as
->J
, U64x(4f000000
,4f000000
)),
436 emit_lsptr(as
, MIPSI_LDC1
, (tmp
& 31),
437 (void *)lj_ir_k64_find(as
->J
, U64x(41e00000
,00000000)),
439 emit_tg(as
, MIPSI_MTC1
, RID_TMP
, dest
);
440 emit_dst(as
, MIPSI_XOR
, RID_TMP
, RID_TMP
, left
);
441 emit_ti(as
, MIPSI_LUI
, RID_TMP
, 0x8000);
442 } else { /* Integer to FP conversion. */
443 Reg left
= ra_alloc1(as
, lref
, RSET_GPR
);
444 emit_fg(as
, irt_isfloat(ir
->t
) ? MIPSI_CVT_S_W
: MIPSI_CVT_D_W
,
446 emit_tg(as
, MIPSI_MTC1
, left
, dest
);
448 } else if (stfp
) { /* FP to integer conversion. */
449 if (irt_isguard(ir
->t
)) {
450 /* Checked conversions are only supported from number to int. */
451 lua_assert(irt_isint(ir
->t
) && st
== IRT_NUM
);
452 asm_tointg(as
, ir
, ra_alloc1(as
, lref
, RSET_FPR
));
454 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
455 Reg left
= ra_alloc1(as
, lref
, RSET_FPR
);
456 Reg tmp
= ra_scratch(as
, rset_exclude(RSET_FPR
, left
));
457 if (irt_isu32(ir
->t
)) {
458 /* y = (int)floor(x - 2147483648.0) ^ 0x80000000 */
459 emit_dst(as
, MIPSI_XOR
, dest
, dest
, RID_TMP
);
460 emit_ti(as
, MIPSI_LUI
, RID_TMP
, 0x8000);
461 emit_tg(as
, MIPSI_MFC1
, dest
, tmp
);
462 emit_fg(as
, st
== IRT_FLOAT
? MIPSI_FLOOR_W_S
: MIPSI_FLOOR_W_D
,
464 emit_fgh(as
, st
== IRT_FLOAT
? MIPSI_SUB_S
: MIPSI_SUB_D
,
467 emit_lsptr(as
, MIPSI_LWC1
, (tmp
& 31),
468 (void *)lj_ir_k64_find(as
->J
, U64x(4f000000
,4f000000
)),
471 emit_lsptr(as
, MIPSI_LDC1
, (tmp
& 31),
472 (void *)lj_ir_k64_find(as
->J
, U64x(41e00000
,00000000)),
475 emit_tg(as
, MIPSI_MFC1
, dest
, tmp
);
476 emit_fg(as
, st
== IRT_FLOAT
? MIPSI_TRUNC_W_S
: MIPSI_TRUNC_W_D
,
481 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
482 if (st
>= IRT_I8
&& st
<= IRT_U16
) { /* Extend to 32 bit integer. */
483 Reg left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
484 lua_assert(irt_isint(ir
->t
) || irt_isu32(ir
->t
));
485 if ((ir
->op2
& IRCONV_SEXT
)) {
486 if ((as
->flags
& JIT_F_MIPS32R2
)) {
487 emit_dst(as
, st
== IRT_I8
? MIPSI_SEB
: MIPSI_SEH
, dest
, 0, left
);
489 uint32_t shift
= st
== IRT_I8
? 24 : 16;
490 emit_dta(as
, MIPSI_SRA
, dest
, dest
, shift
);
491 emit_dta(as
, MIPSI_SLL
, dest
, left
, shift
);
494 emit_tsi(as
, MIPSI_ANDI
, dest
, left
,
495 (int32_t)(st
== IRT_U8
? 0xff : 0xffff));
497 } else { /* 32/64 bit integer conversions. */
498 /* Only need to handle 32/32 bit no-op (cast) on 32 bit archs. */
499 ra_leftov(as
, dest
, lref
); /* Do nothing, but may need to move regs. */
504 static void asm_strto(ASMState
*as
, IRIns
*ir
)
506 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_strscan_num
];
508 RegSet drop
= RSET_SCRATCH
;
509 if (ra_hasreg(ir
->r
)) rset_set(drop
, ir
->r
); /* Spill dest reg (if any). */
510 ra_evictset(as
, drop
);
511 asm_guard(as
, MIPSI_BEQ
, RID_RET
, RID_ZERO
); /* Test return status. */
512 args
[0] = ir
->op1
; /* GCstr *str */
513 args
[1] = ASMREF_TMP1
; /* TValue *n */
514 asm_gencall(as
, ci
, args
);
515 /* Store the result to the spill slot or temp slots. */
516 emit_tsi(as
, MIPSI_ADDIU
, ra_releasetmp(as
, ASMREF_TMP1
),
517 RID_SP
, sps_scale(ir
->s
));
520 /* -- Memory references --------------------------------------------------- */
522 /* Get pointer to TValue. */
523 static void asm_tvptr(ASMState
*as
, Reg dest
, IRRef ref
)
526 if (irt_isnum(ir
->t
)) {
527 if (irref_isk(ref
)) /* Use the number constant itself as a TValue. */
528 ra_allockreg(as
, i32ptr(ir_knum(ir
)), dest
);
529 else /* Otherwise force a spill and use the spill slot. */
530 emit_tsi(as
, MIPSI_ADDIU
, dest
, RID_SP
, ra_spill(as
, ir
));
532 /* Otherwise use g->tmptv to hold the TValue. */
533 RegSet allow
= rset_exclude(RSET_GPR
, dest
);
535 emit_tsi(as
, MIPSI_ADDIU
, dest
, RID_JGL
, offsetof(global_State
, tmptv
)-32768);
536 if (!irt_ispri(ir
->t
)) {
537 Reg src
= ra_alloc1(as
, ref
, allow
);
538 emit_setgl(as
, src
, tmptv
.gcr
);
540 type
= ra_allock(as
, irt_toitype(ir
->t
), allow
);
541 emit_setgl(as
, type
, tmptv
.it
);
545 static void asm_aref(ASMState
*as
, IRIns
*ir
)
547 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
549 if (irref_isk(ir
->op2
)) {
550 IRRef tab
= IR(ir
->op1
)->op1
;
551 int32_t ofs
= asm_fuseabase(as
, tab
);
552 IRRef refa
= ofs
? tab
: ir
->op1
;
553 ofs
+= 8*IR(ir
->op2
)->i
;
555 base
= ra_alloc1(as
, refa
, RSET_GPR
);
556 emit_tsi(as
, MIPSI_ADDIU
, dest
, base
, ofs
);
560 base
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
561 idx
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, base
));
562 emit_dst(as
, MIPSI_ADDU
, dest
, RID_TMP
, base
);
563 emit_dta(as
, MIPSI_SLL
, RID_TMP
, idx
, 3);
566 /* Inlined hash lookup. Specialized for key type and for const keys.
567 ** The equivalent C code is:
568 ** Node *n = hashkey(t, key);
570 ** if (lj_obj_equal(&n->key, key)) return &n->val;
571 ** } while ((n = nextnode(n)));
574 static void asm_href(ASMState
*as
, IRIns
*ir
, IROp merge
)
576 RegSet allow
= RSET_GPR
;
577 int destused
= ra_used(ir
);
578 Reg dest
= ra_dest(as
, ir
, allow
);
579 Reg tab
= ra_alloc1(as
, ir
->op1
, rset_clear(allow
, dest
));
580 Reg key
= RID_NONE
, type
= RID_NONE
, tmpnum
= RID_NONE
, tmp1
= RID_TMP
, tmp2
;
581 IRRef refkey
= ir
->op2
;
582 IRIns
*irkey
= IR(refkey
);
583 IRType1 kt
= irkey
->t
;
585 MCLabel l_end
, l_loop
, l_next
;
587 rset_clear(allow
, tab
);
589 key
= ra_alloc1(as
, refkey
, RSET_FPR
);
590 tmpnum
= ra_scratch(as
, rset_exclude(RSET_FPR
, key
));
591 } else if (!irt_ispri(kt
)) {
592 key
= ra_alloc1(as
, refkey
, allow
);
593 rset_clear(allow
, key
);
594 type
= ra_allock(as
, irt_toitype(irkey
->t
), allow
);
595 rset_clear(allow
, type
);
597 tmp2
= ra_scratch(as
, allow
);
598 rset_clear(allow
, tmp2
);
600 /* Key not found in chain: jump to exit (if merged) or load niltv. */
601 l_end
= emit_label(as
);
604 asm_guard(as
, MIPSI_B
, RID_ZERO
, RID_ZERO
);
606 emit_loada(as
, dest
, niltvg(J2G(as
->J
)));
607 /* Follow hash chain until the end. */
608 emit_move(as
, dest
, tmp2
);
610 emit_tsi(as
, MIPSI_LW
, tmp2
, dest
, (int32_t)offsetof(Node
, next
));
611 l_next
= emit_label(as
);
613 /* Type and value comparison. */
614 if (merge
== IR_EQ
) { /* Must match asm_guard(). */
615 emit_ti(as
, MIPSI_LI
, RID_TMP
, as
->snapno
);
616 l_end
= asm_exitstub_addr(as
);
619 emit_branch(as
, MIPSI_BC1T
, 0, 0, l_end
);
620 emit_fgh(as
, MIPSI_C_EQ_D
, 0, tmpnum
, key
);
621 *--as
->mcp
= MIPSI_NOP
; /* Avoid NaN comparison overhead. */
622 emit_branch(as
, MIPSI_BEQ
, tmp2
, RID_ZERO
, l_next
);
623 emit_tsi(as
, MIPSI_SLTIU
, tmp2
, tmp2
, (int32_t)LJ_TISNUM
);
624 emit_hsi(as
, MIPSI_LDC1
, tmpnum
, dest
, (int32_t)offsetof(Node
, key
.n
));
627 emit_branch(as
, MIPSI_BEQ
, tmp2
, type
, l_end
);
629 emit_branch(as
, MIPSI_BEQ
, tmp1
, key
, l_end
);
630 emit_tsi(as
, MIPSI_LW
, tmp1
, dest
, (int32_t)offsetof(Node
, key
.gcr
));
631 emit_branch(as
, MIPSI_BNE
, tmp2
, type
, l_next
);
634 emit_tsi(as
, MIPSI_LW
, tmp2
, dest
, (int32_t)offsetof(Node
, key
.it
));
635 *l_loop
= MIPSI_BNE
| MIPSF_S(tmp2
) | ((as
->mcp
-l_loop
-1) & 0xffffu
);
637 /* Load main position relative to tab->node into dest. */
638 khash
= irref_isk(refkey
) ? ir_khash(irkey
) : 1;
640 emit_tsi(as
, MIPSI_LW
, dest
, tab
, (int32_t)offsetof(GCtab
, node
));
643 if (irref_isk(refkey
))
644 tmphash
= ra_allock(as
, khash
, allow
);
645 emit_dst(as
, MIPSI_ADDU
, dest
, dest
, tmp1
);
646 lua_assert(sizeof(Node
) == 24);
647 emit_dst(as
, MIPSI_SUBU
, tmp1
, tmp2
, tmp1
);
648 emit_dta(as
, MIPSI_SLL
, tmp1
, tmp1
, 3);
649 emit_dta(as
, MIPSI_SLL
, tmp2
, tmp1
, 5);
650 emit_dst(as
, MIPSI_AND
, tmp1
, tmp2
, tmphash
);
651 emit_tsi(as
, MIPSI_LW
, dest
, tab
, (int32_t)offsetof(GCtab
, node
));
652 emit_tsi(as
, MIPSI_LW
, tmp2
, tab
, (int32_t)offsetof(GCtab
, hmask
));
653 if (irref_isk(refkey
)) {
655 } else if (irt_isstr(kt
)) {
656 emit_tsi(as
, MIPSI_LW
, tmp1
, key
, (int32_t)offsetof(GCstr
, hash
));
657 } else { /* Must match with hash*() in lj_tab.c. */
658 emit_dst(as
, MIPSI_SUBU
, tmp1
, tmp1
, tmp2
);
659 emit_rotr(as
, tmp2
, tmp2
, dest
, (-HASH_ROT3
)&31);
660 emit_dst(as
, MIPSI_XOR
, tmp1
, tmp1
, tmp2
);
661 emit_rotr(as
, tmp1
, tmp1
, dest
, (-HASH_ROT2
-HASH_ROT1
)&31);
662 emit_dst(as
, MIPSI_SUBU
, tmp2
, tmp2
, dest
);
664 emit_dst(as
, MIPSI_XOR
, tmp2
, tmp2
, tmp1
);
665 if ((as
->flags
& JIT_F_MIPS32R2
)) {
666 emit_dta(as
, MIPSI_ROTR
, dest
, tmp1
, (-HASH_ROT1
)&31);
668 emit_dst(as
, MIPSI_OR
, dest
, dest
, tmp1
);
669 emit_dta(as
, MIPSI_SLL
, tmp1
, tmp1
, HASH_ROT1
);
670 emit_dta(as
, MIPSI_SRL
, dest
, tmp1
, (-HASH_ROT1
)&31);
672 emit_dst(as
, MIPSI_ADDU
, tmp1
, tmp1
, tmp1
);
673 emit_tg(as
, MIPSI_MFC1
, tmp2
, key
);
674 emit_tg(as
, MIPSI_MFC1
, tmp1
, key
+1);
676 emit_dst(as
, MIPSI_XOR
, tmp2
, key
, tmp1
);
677 emit_rotr(as
, dest
, tmp1
, tmp2
, (-HASH_ROT1
)&31);
678 emit_dst(as
, MIPSI_ADDU
, tmp1
, key
, ra_allock(as
, HASH_BIAS
, allow
));
684 static void asm_hrefk(ASMState
*as
, IRIns
*ir
)
686 IRIns
*kslot
= IR(ir
->op2
);
687 IRIns
*irkey
= IR(kslot
->op1
);
688 int32_t ofs
= (int32_t)(kslot
->op2
* sizeof(Node
));
689 int32_t kofs
= ofs
+ (int32_t)offsetof(Node
, key
);
690 Reg dest
= (ra_used(ir
)||ofs
> 32736) ? ra_dest(as
, ir
, RSET_GPR
) : RID_NONE
;
691 Reg node
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
692 Reg key
= RID_NONE
, type
= RID_TMP
, idx
= node
;
693 RegSet allow
= rset_exclude(RSET_GPR
, node
);
695 lua_assert(ofs
% sizeof(Node
) == 0);
698 rset_clear(allow
, dest
);
699 kofs
= (int32_t)offsetof(Node
, key
);
700 } else if (ra_hasreg(dest
)) {
701 emit_tsi(as
, MIPSI_ADDIU
, dest
, node
, ofs
);
703 if (!irt_ispri(irkey
->t
)) {
704 key
= ra_scratch(as
, allow
);
705 rset_clear(allow
, key
);
707 if (irt_isnum(irkey
->t
)) {
708 lo
= (int32_t)ir_knum(irkey
)->u32
.lo
;
709 hi
= (int32_t)ir_knum(irkey
)->u32
.hi
;
712 hi
= irt_toitype(irkey
->t
);
716 asm_guard(as
, MIPSI_BNE
, key
, lo
? ra_allock(as
, lo
, allow
) : RID_ZERO
);
718 asm_guard(as
, MIPSI_BNE
, type
, hi
? ra_allock(as
, hi
, allow
) : RID_ZERO
);
719 if (ra_hasreg(key
)) emit_tsi(as
, MIPSI_LW
, key
, idx
, kofs
+(LJ_BE
?4:0));
720 emit_tsi(as
, MIPSI_LW
, type
, idx
, kofs
+(LJ_BE
?0:4));
722 emit_tsi(as
, MIPSI_ADDU
, dest
, node
, ra_allock(as
, ofs
, allow
));
725 static void asm_uref(ASMState
*as
, IRIns
*ir
)
727 /* NYI: Check that UREFO is still open and not aliasing a slot. */
728 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
729 if (irref_isk(ir
->op1
)) {
730 GCfunc
*fn
= ir_kfunc(IR(ir
->op1
));
731 MRef
*v
= &gcref(fn
->l
.uvptr
[(ir
->op2
>> 8)])->uv
.v
;
732 emit_lsptr(as
, MIPSI_LW
, dest
, v
, RSET_GPR
);
734 Reg uv
= ra_scratch(as
, RSET_GPR
);
735 Reg func
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
736 if (ir
->o
== IR_UREFC
) {
737 asm_guard(as
, MIPSI_BEQ
, RID_TMP
, RID_ZERO
);
738 emit_tsi(as
, MIPSI_ADDIU
, dest
, uv
, (int32_t)offsetof(GCupval
, tv
));
739 emit_tsi(as
, MIPSI_LBU
, RID_TMP
, uv
, (int32_t)offsetof(GCupval
, closed
));
741 emit_tsi(as
, MIPSI_LW
, dest
, uv
, (int32_t)offsetof(GCupval
, v
));
743 emit_tsi(as
, MIPSI_LW
, uv
, func
,
744 (int32_t)offsetof(GCfuncL
, uvptr
) + 4*(int32_t)(ir
->op2
>> 8));
748 static void asm_fref(ASMState
*as
, IRIns
*ir
)
750 UNUSED(as
); UNUSED(ir
);
751 lua_assert(!ra_used(ir
));
754 static void asm_strref(ASMState
*as
, IRIns
*ir
)
756 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
757 IRRef ref
= ir
->op2
, refk
= ir
->op1
;
758 int32_t ofs
= (int32_t)sizeof(GCstr
);
760 if (irref_isk(ref
)) {
761 IRRef tmp
= refk
; refk
= ref
; ref
= tmp
;
762 } else if (!irref_isk(refk
)) {
763 Reg right
, left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
764 IRIns
*irr
= IR(ir
->op2
);
765 if (ra_hasreg(irr
->r
)) {
766 ra_noweak(as
, irr
->r
);
768 } else if (mayfuse(as
, irr
->op2
) &&
769 irr
->o
== IR_ADD
&& irref_isk(irr
->op2
) &&
770 checki16(ofs
+ IR(irr
->op2
)->i
)) {
771 ofs
+= IR(irr
->op2
)->i
;
772 right
= ra_alloc1(as
, irr
->op1
, rset_exclude(RSET_GPR
, left
));
774 right
= ra_allocref(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
776 emit_tsi(as
, MIPSI_ADDIU
, dest
, dest
, ofs
);
777 emit_dst(as
, MIPSI_ADDU
, dest
, left
, right
);
780 r
= ra_alloc1(as
, ref
, RSET_GPR
);
783 emit_tsi(as
, MIPSI_ADDIU
, dest
, r
, ofs
);
785 emit_dst(as
, MIPSI_ADDU
, dest
, r
,
786 ra_allock(as
, ofs
, rset_exclude(RSET_GPR
, r
)));
789 /* -- Loads and stores ---------------------------------------------------- */
791 static MIPSIns
asm_fxloadins(IRIns
*ir
)
793 switch (irt_type(ir
->t
)) {
794 case IRT_I8
: return MIPSI_LB
;
795 case IRT_U8
: return MIPSI_LBU
;
796 case IRT_I16
: return MIPSI_LH
;
797 case IRT_U16
: return MIPSI_LHU
;
798 case IRT_NUM
: return MIPSI_LDC1
;
799 case IRT_FLOAT
: return MIPSI_LWC1
;
800 default: return MIPSI_LW
;
804 static MIPSIns
asm_fxstoreins(IRIns
*ir
)
806 switch (irt_type(ir
->t
)) {
807 case IRT_I8
: case IRT_U8
: return MIPSI_SB
;
808 case IRT_I16
: case IRT_U16
: return MIPSI_SH
;
809 case IRT_NUM
: return MIPSI_SDC1
;
810 case IRT_FLOAT
: return MIPSI_SWC1
;
811 default: return MIPSI_SW
;
815 static void asm_fload(ASMState
*as
, IRIns
*ir
)
817 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
818 Reg idx
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
819 MIPSIns mi
= asm_fxloadins(ir
);
821 if (ir
->op2
== IRFL_TAB_ARRAY
) {
822 ofs
= asm_fuseabase(as
, ir
->op1
);
823 if (ofs
) { /* Turn the t->array load into an add for colocated arrays. */
824 emit_tsi(as
, MIPSI_ADDIU
, dest
, idx
, ofs
);
828 ofs
= field_ofs
[ir
->op2
];
829 lua_assert(!irt_isfp(ir
->t
));
830 emit_tsi(as
, mi
, dest
, idx
, ofs
);
833 static void asm_fstore(ASMState
*as
, IRIns
*ir
)
835 if (ir
->r
!= RID_SINK
) {
836 Reg src
= ra_alloc1z(as
, ir
->op2
, RSET_GPR
);
837 IRIns
*irf
= IR(ir
->op1
);
838 Reg idx
= ra_alloc1(as
, irf
->op1
, rset_exclude(RSET_GPR
, src
));
839 int32_t ofs
= field_ofs
[irf
->op2
];
840 MIPSIns mi
= asm_fxstoreins(ir
);
841 lua_assert(!irt_isfp(ir
->t
));
842 emit_tsi(as
, mi
, src
, idx
, ofs
);
846 static void asm_xload(ASMState
*as
, IRIns
*ir
)
848 Reg dest
= ra_dest(as
, ir
, irt_isfp(ir
->t
) ? RSET_FPR
: RSET_GPR
);
849 lua_assert(!(ir
->op2
& IRXLOAD_UNALIGNED
));
850 asm_fusexref(as
, asm_fxloadins(ir
), dest
, ir
->op1
, RSET_GPR
, 0);
853 static void asm_xstore_(ASMState
*as
, IRIns
*ir
, int32_t ofs
)
855 if (ir
->r
!= RID_SINK
) {
856 Reg src
= ra_alloc1z(as
, ir
->op2
, irt_isfp(ir
->t
) ? RSET_FPR
: RSET_GPR
);
857 asm_fusexref(as
, asm_fxstoreins(ir
), src
, ir
->op1
,
858 rset_exclude(RSET_GPR
, src
), ofs
);
862 #define asm_xstore(as, ir) asm_xstore_(as, ir, 0)
864 static void asm_ahuvload(ASMState
*as
, IRIns
*ir
)
867 Reg dest
= RID_NONE
, type
= RID_TMP
, idx
;
868 RegSet allow
= RSET_GPR
;
871 lua_assert(irt_isnum(t
) || irt_isint(t
) || irt_isaddr(t
));
872 dest
= ra_dest(as
, ir
, irt_isnum(t
) ? RSET_FPR
: RSET_GPR
);
873 rset_clear(allow
, dest
);
875 idx
= asm_fuseahuref(as
, ir
->op1
, &ofs
, allow
);
876 rset_clear(allow
, idx
);
878 asm_guard(as
, MIPSI_BEQ
, type
, RID_ZERO
);
879 emit_tsi(as
, MIPSI_SLTIU
, type
, type
, (int32_t)LJ_TISNUM
);
881 emit_hsi(as
, MIPSI_LDC1
, dest
, idx
, ofs
);
883 asm_guard(as
, MIPSI_BNE
, type
, ra_allock(as
, irt_toitype(t
), allow
));
884 if (ra_hasreg(dest
)) emit_tsi(as
, MIPSI_LW
, dest
, idx
, ofs
+(LJ_BE
?4:0));
886 emit_tsi(as
, MIPSI_LW
, type
, idx
, ofs
+(LJ_BE
?0:4));
889 static void asm_ahustore(ASMState
*as
, IRIns
*ir
)
891 RegSet allow
= RSET_GPR
;
892 Reg idx
, src
= RID_NONE
, type
= RID_NONE
;
894 if (ir
->r
== RID_SINK
)
896 if (irt_isnum(ir
->t
)) {
897 src
= ra_alloc1(as
, ir
->op2
, RSET_FPR
);
899 if (!irt_ispri(ir
->t
)) {
900 src
= ra_alloc1(as
, ir
->op2
, allow
);
901 rset_clear(allow
, src
);
903 type
= ra_allock(as
, (int32_t)irt_toitype(ir
->t
), allow
);
904 rset_clear(allow
, type
);
906 idx
= asm_fuseahuref(as
, ir
->op1
, &ofs
, allow
);
907 if (irt_isnum(ir
->t
)) {
908 emit_hsi(as
, MIPSI_SDC1
, src
, idx
, ofs
);
911 emit_tsi(as
, MIPSI_SW
, src
, idx
, ofs
+(LJ_BE
?4:0));
912 emit_tsi(as
, MIPSI_SW
, type
, idx
, ofs
+(LJ_BE
?0:4));
916 static void asm_sload(ASMState
*as
, IRIns
*ir
)
918 int32_t ofs
= 8*((int32_t)ir
->op1
-1) + ((ir
->op2
& IRSLOAD_FRAME
) ? 4 : 0);
920 Reg dest
= RID_NONE
, type
= RID_NONE
, base
;
921 RegSet allow
= RSET_GPR
;
922 lua_assert(!(ir
->op2
& IRSLOAD_PARENT
)); /* Handled by asm_head_side(). */
923 lua_assert(irt_isguard(t
) || !(ir
->op2
& IRSLOAD_TYPECHECK
));
924 lua_assert(!irt_isint(t
) || (ir
->op2
& (IRSLOAD_CONVERT
|IRSLOAD_FRAME
)));
925 if ((ir
->op2
& IRSLOAD_CONVERT
) && irt_isguard(t
) && irt_isint(t
)) {
926 dest
= ra_scratch(as
, RSET_FPR
);
927 asm_tointg(as
, ir
, dest
);
928 t
.irt
= IRT_NUM
; /* Continue with a regular number type check. */
929 } else if (ra_used(ir
)) {
930 lua_assert(irt_isnum(t
) || irt_isint(t
) || irt_isaddr(t
));
931 dest
= ra_dest(as
, ir
, irt_isnum(t
) ? RSET_FPR
: RSET_GPR
);
932 rset_clear(allow
, dest
);
933 base
= ra_alloc1(as
, REF_BASE
, allow
);
934 rset_clear(allow
, base
);
935 if ((ir
->op2
& IRSLOAD_CONVERT
)) {
937 Reg tmp
= ra_scratch(as
, RSET_FPR
);
938 emit_tg(as
, MIPSI_MFC1
, dest
, tmp
);
939 emit_fg(as
, MIPSI_TRUNC_W_D
, tmp
, tmp
);
941 t
.irt
= IRT_NUM
; /* Check for original type. */
943 Reg tmp
= ra_scratch(as
, RSET_GPR
);
944 emit_fg(as
, MIPSI_CVT_D_W
, dest
, dest
);
945 emit_tg(as
, MIPSI_MTC1
, tmp
, dest
);
947 t
.irt
= IRT_INT
; /* Check for original type. */
952 base
= ra_alloc1(as
, REF_BASE
, allow
);
953 rset_clear(allow
, base
);
956 if ((ir
->op2
& IRSLOAD_TYPECHECK
)) {
957 asm_guard(as
, MIPSI_BEQ
, RID_TMP
, RID_ZERO
);
958 emit_tsi(as
, MIPSI_SLTIU
, RID_TMP
, RID_TMP
, (int32_t)LJ_TISNUM
);
961 if (ra_hasreg(dest
)) emit_hsi(as
, MIPSI_LDC1
, dest
, base
, ofs
);
963 if ((ir
->op2
& IRSLOAD_TYPECHECK
)) {
964 Reg ktype
= ra_allock(as
, irt_toitype(t
), allow
);
965 asm_guard(as
, MIPSI_BNE
, RID_TMP
, ktype
);
968 if (ra_hasreg(dest
)) emit_tsi(as
, MIPSI_LW
, dest
, base
, ofs
^ (LJ_BE
?4:0));
970 if (ra_hasreg(type
)) emit_tsi(as
, MIPSI_LW
, type
, base
, ofs
^ (LJ_BE
?0:4));
973 /* -- Allocations --------------------------------------------------------- */
976 static void asm_cnew(ASMState
*as
, IRIns
*ir
)
978 CTState
*cts
= ctype_ctsG(J2G(as
->J
));
979 CTypeID id
= (CTypeID
)IR(ir
->op1
)->i
;
981 CTInfo info
= lj_ctype_info(cts
, id
, &sz
);
982 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_mem_newgco
];
984 RegSet drop
= RSET_SCRATCH
;
985 lua_assert(sz
!= CTSIZE_INVALID
|| (ir
->o
== IR_CNEW
&& ir
->op2
!= REF_NIL
));
988 if (ra_hasreg(ir
->r
))
989 rset_clear(drop
, ir
->r
); /* Dest reg handled below. */
990 ra_evictset(as
, drop
);
992 ra_destreg(as
, ir
, RID_RET
); /* GCcdata * */
994 /* Initialize immutable cdata object. */
995 if (ir
->o
== IR_CNEWI
) {
996 RegSet allow
= (RSET_GPR
& ~RSET_SCRATCH
);
997 int32_t ofs
= sizeof(GCcdata
);
998 lua_assert(sz
== 4 || sz
== 8);
1001 lua_assert((ir
+1)->o
== IR_HIOP
);
1005 Reg r
= ra_alloc1z(as
, ir
->op2
, allow
);
1006 emit_tsi(as
, MIPSI_SW
, r
, RID_RET
, ofs
);
1007 rset_clear(allow
, r
);
1008 if (ofs
== sizeof(GCcdata
)) break;
1009 ofs
-= 4; if (LJ_BE
) ir
++; else ir
--;
1011 } else if (ir
->op2
!= REF_NIL
) { /* Create VLA/VLS/aligned cdata. */
1012 ci
= &lj_ir_callinfo
[IRCALL_lj_cdata_newv
];
1013 args
[0] = ASMREF_L
; /* lua_State *L */
1014 args
[1] = ir
->op1
; /* CTypeID id */
1015 args
[2] = ir
->op2
; /* CTSize sz */
1016 args
[3] = ASMREF_TMP1
; /* CTSize align */
1017 asm_gencall(as
, ci
, args
);
1018 emit_loadi(as
, ra_releasetmp(as
, ASMREF_TMP1
), (int32_t)ctype_align(info
));
1022 /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */
1023 emit_tsi(as
, MIPSI_SB
, RID_RET
+1, RID_RET
, offsetof(GCcdata
, gct
));
1024 emit_tsi(as
, MIPSI_SH
, RID_TMP
, RID_RET
, offsetof(GCcdata
, ctypeid
));
1025 emit_ti(as
, MIPSI_LI
, RID_RET
+1, ~LJ_TCDATA
);
1026 emit_ti(as
, MIPSI_LI
, RID_TMP
, id
); /* Lower 16 bit used. Sign-ext ok. */
1027 args
[0] = ASMREF_L
; /* lua_State *L */
1028 args
[1] = ASMREF_TMP1
; /* MSize size */
1029 asm_gencall(as
, ci
, args
);
1030 ra_allockreg(as
, (int32_t)(sz
+sizeof(GCcdata
)),
1031 ra_releasetmp(as
, ASMREF_TMP1
));
1034 #define asm_cnew(as, ir) ((void)0)
1037 /* -- Write barriers ------------------------------------------------------ */
1039 static void asm_tbar(ASMState
*as
, IRIns
*ir
)
1041 Reg tab
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1042 Reg mark
= ra_scratch(as
, rset_exclude(RSET_GPR
, tab
));
1044 MCLabel l_end
= emit_label(as
);
1045 emit_tsi(as
, MIPSI_SW
, link
, tab
, (int32_t)offsetof(GCtab
, gclist
));
1046 emit_tsi(as
, MIPSI_SB
, mark
, tab
, (int32_t)offsetof(GCtab
, marked
));
1047 emit_setgl(as
, tab
, gc
.grayagain
);
1048 emit_getgl(as
, link
, gc
.grayagain
);
1049 emit_dst(as
, MIPSI_XOR
, mark
, mark
, RID_TMP
); /* Clear black bit. */
1050 emit_branch(as
, MIPSI_BEQ
, RID_TMP
, RID_ZERO
, l_end
);
1051 emit_tsi(as
, MIPSI_ANDI
, RID_TMP
, mark
, LJ_GC_BLACK
);
1052 emit_tsi(as
, MIPSI_LBU
, mark
, tab
, (int32_t)offsetof(GCtab
, marked
));
1055 static void asm_obar(ASMState
*as
, IRIns
*ir
)
1057 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_gc_barrieruv
];
1061 /* No need for other object barriers (yet). */
1062 lua_assert(IR(ir
->op1
)->o
== IR_UREFC
);
1063 ra_evictset(as
, RSET_SCRATCH
);
1064 l_end
= emit_label(as
);
1065 args
[0] = ASMREF_TMP1
; /* global_State *g */
1066 args
[1] = ir
->op1
; /* TValue *tv */
1067 asm_gencall(as
, ci
, args
);
1068 emit_tsi(as
, MIPSI_ADDIU
, ra_releasetmp(as
, ASMREF_TMP1
), RID_JGL
, -32768);
1069 obj
= IR(ir
->op1
)->r
;
1070 tmp
= ra_scratch(as
, rset_exclude(RSET_GPR
, obj
));
1071 emit_branch(as
, MIPSI_BEQ
, RID_TMP
, RID_ZERO
, l_end
);
1072 emit_tsi(as
, MIPSI_ANDI
, tmp
, tmp
, LJ_GC_BLACK
);
1073 emit_branch(as
, MIPSI_BEQ
, RID_TMP
, RID_ZERO
, l_end
);
1074 emit_tsi(as
, MIPSI_ANDI
, RID_TMP
, RID_TMP
, LJ_GC_WHITES
);
1075 val
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, obj
));
1076 emit_tsi(as
, MIPSI_LBU
, tmp
, obj
,
1077 (int32_t)offsetof(GCupval
, marked
)-(int32_t)offsetof(GCupval
, tv
));
1078 emit_tsi(as
, MIPSI_LBU
, RID_TMP
, val
, (int32_t)offsetof(GChead
, marked
));
1081 /* -- Arithmetic and logic operations ------------------------------------- */
1083 static void asm_fparith(ASMState
*as
, IRIns
*ir
, MIPSIns mi
)
1085 Reg dest
= ra_dest(as
, ir
, RSET_FPR
);
1086 Reg right
, left
= ra_alloc2(as
, ir
, RSET_FPR
);
1087 right
= (left
>> 8); left
&= 255;
1088 emit_fgh(as
, mi
, dest
, left
, right
);
1091 static void asm_fpunary(ASMState
*as
, IRIns
*ir
, MIPSIns mi
)
1093 Reg dest
= ra_dest(as
, ir
, RSET_FPR
);
1094 Reg left
= ra_hintalloc(as
, ir
->op1
, dest
, RSET_FPR
);
1095 emit_fg(as
, mi
, dest
, left
);
1098 static void asm_fpmath(ASMState
*as
, IRIns
*ir
)
1100 if (ir
->op2
== IRFPM_EXP2
&& asm_fpjoin_pow(as
, ir
))
1102 if (ir
->op2
<= IRFPM_TRUNC
)
1103 asm_callround(as
, ir
, IRCALL_lj_vm_floor
+ ir
->op2
);
1104 else if (ir
->op2
== IRFPM_SQRT
)
1105 asm_fpunary(as
, ir
, MIPSI_SQRT_D
);
1107 asm_callid(as
, ir
, IRCALL_lj_vm_floor
+ ir
->op2
);
1110 static void asm_add(ASMState
*as
, IRIns
*ir
)
1112 if (irt_isnum(ir
->t
)) {
1113 asm_fparith(as
, ir
, MIPSI_ADD_D
);
1115 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1116 Reg right
, left
= ra_hintalloc(as
, ir
->op1
, dest
, RSET_GPR
);
1117 if (irref_isk(ir
->op2
)) {
1118 int32_t k
= IR(ir
->op2
)->i
;
1120 emit_tsi(as
, MIPSI_ADDIU
, dest
, left
, k
);
1124 right
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
1125 emit_dst(as
, MIPSI_ADDU
, dest
, left
, right
);
1129 static void asm_sub(ASMState
*as
, IRIns
*ir
)
1131 if (irt_isnum(ir
->t
)) {
1132 asm_fparith(as
, ir
, MIPSI_SUB_D
);
1134 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1135 Reg right
, left
= ra_alloc2(as
, ir
, RSET_GPR
);
1136 right
= (left
>> 8); left
&= 255;
1137 emit_dst(as
, MIPSI_SUBU
, dest
, left
, right
);
1141 static void asm_mul(ASMState
*as
, IRIns
*ir
)
1143 if (irt_isnum(ir
->t
)) {
1144 asm_fparith(as
, ir
, MIPSI_MUL_D
);
1146 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1147 Reg right
, left
= ra_alloc2(as
, ir
, RSET_GPR
);
1148 right
= (left
>> 8); left
&= 255;
1149 emit_dst(as
, MIPSI_MUL
, dest
, left
, right
);
1153 #define asm_div(as, ir) asm_fparith(as, ir, MIPSI_DIV_D)
1154 #define asm_mod(as, ir) asm_callid(as, ir, IRCALL_lj_vm_modi)
1155 #define asm_pow(as, ir) asm_callid(as, ir, IRCALL_lj_vm_powi)
1157 static void asm_neg(ASMState
*as
, IRIns
*ir
)
1159 if (irt_isnum(ir
->t
)) {
1160 asm_fpunary(as
, ir
, MIPSI_NEG_D
);
1162 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1163 Reg left
= ra_hintalloc(as
, ir
->op1
, dest
, RSET_GPR
);
1164 emit_dst(as
, MIPSI_SUBU
, dest
, RID_ZERO
, left
);
1168 #define asm_abs(as, ir) asm_fpunary(as, ir, MIPSI_ABS_D)
1169 #define asm_atan2(as, ir) asm_callid(as, ir, IRCALL_atan2)
1170 #define asm_ldexp(as, ir) asm_callid(as, ir, IRCALL_ldexp)
1172 static void asm_arithov(ASMState
*as
, IRIns
*ir
)
1174 Reg right
, left
, tmp
, dest
= ra_dest(as
, ir
, RSET_GPR
);
1175 if (irref_isk(ir
->op2
)) {
1176 int k
= IR(ir
->op2
)->i
;
1177 if (ir
->o
== IR_SUBOV
) k
= -k
;
1178 if (checki16(k
)) { /* (dest < left) == (k >= 0 ? 1 : 0) */
1179 left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1180 asm_guard(as
, k
>= 0 ? MIPSI_BNE
: MIPSI_BEQ
, RID_TMP
, RID_ZERO
);
1181 emit_dst(as
, MIPSI_SLT
, RID_TMP
, dest
, dest
== left
? RID_TMP
: left
);
1182 emit_tsi(as
, MIPSI_ADDIU
, dest
, left
, k
);
1183 if (dest
== left
) emit_move(as
, RID_TMP
, left
);
1187 left
= ra_alloc2(as
, ir
, RSET_GPR
);
1188 right
= (left
>> 8); left
&= 255;
1189 tmp
= ra_scratch(as
, rset_exclude(rset_exclude(rset_exclude(RSET_GPR
, left
),
1191 asm_guard(as
, MIPSI_BLTZ
, RID_TMP
, 0);
1192 emit_dst(as
, MIPSI_AND
, RID_TMP
, RID_TMP
, tmp
);
1193 if (ir
->o
== IR_ADDOV
) { /* ((dest^left) & (dest^right)) < 0 */
1194 emit_dst(as
, MIPSI_XOR
, RID_TMP
, dest
, dest
== right
? RID_TMP
: right
);
1195 } else { /* ((dest^left) & (dest^~right)) < 0 */
1196 emit_dst(as
, MIPSI_XOR
, RID_TMP
, RID_TMP
, dest
);
1197 emit_dst(as
, MIPSI_NOR
, RID_TMP
, dest
== right
? RID_TMP
: right
, RID_ZERO
);
1199 emit_dst(as
, MIPSI_XOR
, tmp
, dest
, dest
== left
? RID_TMP
: left
);
1200 emit_dst(as
, ir
->o
== IR_ADDOV
? MIPSI_ADDU
: MIPSI_SUBU
, dest
, left
, right
);
1201 if (dest
== left
|| dest
== right
)
1202 emit_move(as
, RID_TMP
, dest
== left
? left
: right
);
1205 #define asm_addov(as, ir) asm_arithov(as, ir)
1206 #define asm_subov(as, ir) asm_arithov(as, ir)
1208 static void asm_mulov(ASMState
*as
, IRIns
*ir
)
1210 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1211 Reg tmp
, right
, left
= ra_alloc2(as
, ir
, RSET_GPR
);
1212 right
= (left
>> 8); left
&= 255;
1213 tmp
= ra_scratch(as
, rset_exclude(rset_exclude(rset_exclude(RSET_GPR
, left
),
1215 asm_guard(as
, MIPSI_BNE
, RID_TMP
, tmp
);
1216 emit_dta(as
, MIPSI_SRA
, RID_TMP
, dest
, 31);
1217 emit_dst(as
, MIPSI_MFHI
, tmp
, 0, 0);
1218 emit_dst(as
, MIPSI_MFLO
, dest
, 0, 0);
1219 emit_dst(as
, MIPSI_MULT
, 0, left
, right
);
1223 static void asm_add64(ASMState
*as
, IRIns
*ir
)
1225 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1226 Reg right
, left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1227 if (irref_isk(ir
->op2
)) {
1228 int32_t k
= IR(ir
->op2
)->i
;
1230 emit_dst(as
, MIPSI_ADDU
, dest
, left
, RID_TMP
);
1232 } else if (checki16(k
)) {
1233 emit_dst(as
, MIPSI_ADDU
, dest
, dest
, RID_TMP
);
1234 emit_tsi(as
, MIPSI_ADDIU
, dest
, left
, k
);
1238 emit_dst(as
, MIPSI_ADDU
, dest
, dest
, RID_TMP
);
1239 right
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
1240 emit_dst(as
, MIPSI_ADDU
, dest
, left
, right
);
1243 dest
= ra_dest(as
, ir
, RSET_GPR
);
1244 left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1245 if (irref_isk(ir
->op2
)) {
1246 int32_t k
= IR(ir
->op2
)->i
;
1249 emit_move(as
, dest
, left
);
1251 } else if (checki16(k
)) {
1253 Reg tmp
= ra_scratch(as
, rset_exclude(RSET_GPR
, left
));
1254 emit_move(as
, dest
, tmp
);
1257 emit_dst(as
, MIPSI_SLTU
, RID_TMP
, dest
, left
);
1258 emit_tsi(as
, MIPSI_ADDIU
, dest
, left
, k
);
1262 right
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
1263 if (dest
== left
&& dest
== right
) {
1264 Reg tmp
= ra_scratch(as
, rset_exclude(rset_exclude(RSET_GPR
, left
), right
));
1265 emit_move(as
, dest
, tmp
);
1268 emit_dst(as
, MIPSI_SLTU
, RID_TMP
, dest
, dest
== left
? right
: left
);
1269 emit_dst(as
, MIPSI_ADDU
, dest
, left
, right
);
1272 static void asm_sub64(ASMState
*as
, IRIns
*ir
)
1274 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1275 Reg right
, left
= ra_alloc2(as
, ir
, RSET_GPR
);
1276 right
= (left
>> 8); left
&= 255;
1277 emit_dst(as
, MIPSI_SUBU
, dest
, dest
, RID_TMP
);
1278 emit_dst(as
, MIPSI_SUBU
, dest
, left
, right
);
1280 dest
= ra_dest(as
, ir
, RSET_GPR
);
1281 left
= ra_alloc2(as
, ir
, RSET_GPR
);
1282 right
= (left
>> 8); left
&= 255;
1284 Reg tmp
= ra_scratch(as
, rset_exclude(rset_exclude(RSET_GPR
, left
), right
));
1285 emit_move(as
, dest
, tmp
);
1288 emit_dst(as
, MIPSI_SLTU
, RID_TMP
, left
, dest
);
1289 emit_dst(as
, MIPSI_SUBU
, dest
, left
, right
);
1292 static void asm_neg64(ASMState
*as
, IRIns
*ir
)
1294 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1295 Reg left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1296 emit_dst(as
, MIPSI_SUBU
, dest
, dest
, RID_TMP
);
1297 emit_dst(as
, MIPSI_SUBU
, dest
, RID_ZERO
, left
);
1299 dest
= ra_dest(as
, ir
, RSET_GPR
);
1300 left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1301 emit_dst(as
, MIPSI_SLTU
, RID_TMP
, RID_ZERO
, dest
);
1302 emit_dst(as
, MIPSI_SUBU
, dest
, RID_ZERO
, left
);
1306 static void asm_bnot(ASMState
*as
, IRIns
*ir
)
1308 Reg left
, right
, dest
= ra_dest(as
, ir
, RSET_GPR
);
1309 IRIns
*irl
= IR(ir
->op1
);
1310 if (mayfuse(as
, ir
->op1
) && irl
->o
== IR_BOR
) {
1311 left
= ra_alloc2(as
, irl
, RSET_GPR
);
1312 right
= (left
>> 8); left
&= 255;
1314 left
= ra_hintalloc(as
, ir
->op1
, dest
, RSET_GPR
);
1317 emit_dst(as
, MIPSI_NOR
, dest
, left
, right
);
1320 static void asm_bswap(ASMState
*as
, IRIns
*ir
)
1322 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1323 Reg left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1324 if ((as
->flags
& JIT_F_MIPS32R2
)) {
1325 emit_dta(as
, MIPSI_ROTR
, dest
, RID_TMP
, 16);
1326 emit_dst(as
, MIPSI_WSBH
, RID_TMP
, 0, left
);
1328 Reg tmp
= ra_scratch(as
, rset_exclude(rset_exclude(RSET_GPR
, left
), dest
));
1329 emit_dst(as
, MIPSI_OR
, dest
, dest
, tmp
);
1330 emit_dst(as
, MIPSI_OR
, dest
, dest
, RID_TMP
);
1331 emit_tsi(as
, MIPSI_ANDI
, dest
, dest
, 0xff00);
1332 emit_dta(as
, MIPSI_SLL
, RID_TMP
, RID_TMP
, 8);
1333 emit_dta(as
, MIPSI_SRL
, dest
, left
, 8);
1334 emit_tsi(as
, MIPSI_ANDI
, RID_TMP
, left
, 0xff00);
1335 emit_dst(as
, MIPSI_OR
, tmp
, tmp
, RID_TMP
);
1336 emit_dta(as
, MIPSI_SRL
, tmp
, left
, 24);
1337 emit_dta(as
, MIPSI_SLL
, RID_TMP
, left
, 24);
1341 static void asm_bitop(ASMState
*as
, IRIns
*ir
, MIPSIns mi
, MIPSIns mik
)
1343 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1344 Reg right
, left
= ra_hintalloc(as
, ir
->op1
, dest
, RSET_GPR
);
1345 if (irref_isk(ir
->op2
)) {
1346 int32_t k
= IR(ir
->op2
)->i
;
1348 emit_tsi(as
, mik
, dest
, left
, k
);
1352 right
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
1353 emit_dst(as
, mi
, dest
, left
, right
);
1356 #define asm_band(as, ir) asm_bitop(as, ir, MIPSI_AND, MIPSI_ANDI)
1357 #define asm_bor(as, ir) asm_bitop(as, ir, MIPSI_OR, MIPSI_ORI)
1358 #define asm_bxor(as, ir) asm_bitop(as, ir, MIPSI_XOR, MIPSI_XORI)
1360 static void asm_bitshift(ASMState
*as
, IRIns
*ir
, MIPSIns mi
, MIPSIns mik
)
1362 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1363 if (irref_isk(ir
->op2
)) { /* Constant shifts. */
1364 uint32_t shift
= (uint32_t)(IR(ir
->op2
)->i
& 31);
1365 emit_dta(as
, mik
, dest
, ra_hintalloc(as
, ir
->op1
, dest
, RSET_GPR
), shift
);
1367 Reg right
, left
= ra_alloc2(as
, ir
, RSET_GPR
);
1368 right
= (left
>> 8); left
&= 255;
1369 emit_dst(as
, mi
, dest
, right
, left
); /* Shift amount is in rs. */
1373 #define asm_bshl(as, ir) asm_bitshift(as, ir, MIPSI_SLLV, MIPSI_SLL)
1374 #define asm_bshr(as, ir) asm_bitshift(as, ir, MIPSI_SRLV, MIPSI_SRL)
1375 #define asm_bsar(as, ir) asm_bitshift(as, ir, MIPSI_SRAV, MIPSI_SRA)
1376 #define asm_brol(as, ir) lua_assert(0)
1378 static void asm_bror(ASMState
*as
, IRIns
*ir
)
1380 if ((as
->flags
& JIT_F_MIPS32R2
)) {
1381 asm_bitshift(as
, ir
, MIPSI_ROTRV
, MIPSI_ROTR
);
1383 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1384 if (irref_isk(ir
->op2
)) { /* Constant shifts. */
1385 uint32_t shift
= (uint32_t)(IR(ir
->op2
)->i
& 31);
1386 Reg left
= ra_hintalloc(as
, ir
->op1
, dest
, RSET_GPR
);
1387 emit_rotr(as
, dest
, left
, RID_TMP
, shift
);
1389 Reg right
, left
= ra_alloc2(as
, ir
, RSET_GPR
);
1390 right
= (left
>> 8); left
&= 255;
1391 emit_dst(as
, MIPSI_OR
, dest
, dest
, RID_TMP
);
1392 emit_dst(as
, MIPSI_SRLV
, dest
, right
, left
);
1393 emit_dst(as
, MIPSI_SLLV
, RID_TMP
, RID_TMP
, left
);
1394 emit_dst(as
, MIPSI_SUBU
, RID_TMP
, ra_allock(as
, 32, RSET_GPR
), right
);
1399 static void asm_min_max(ASMState
*as
, IRIns
*ir
, int ismax
)
1401 if (irt_isnum(ir
->t
)) {
1402 Reg dest
= ra_dest(as
, ir
, RSET_FPR
);
1403 Reg right
, left
= ra_alloc2(as
, ir
, RSET_FPR
);
1404 right
= (left
>> 8); left
&= 255;
1406 emit_fg(as
, MIPSI_MOVT_D
, dest
, right
);
1408 emit_fg(as
, MIPSI_MOVF_D
, dest
, left
);
1409 if (dest
!= right
) emit_fg(as
, MIPSI_MOV_D
, dest
, right
);
1411 emit_fgh(as
, MIPSI_C_OLT_D
, 0, ismax
? left
: right
, ismax
? right
: left
);
1413 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1414 Reg right
, left
= ra_alloc2(as
, ir
, RSET_GPR
);
1415 right
= (left
>> 8); left
&= 255;
1417 emit_dst(as
, MIPSI_MOVN
, dest
, right
, RID_TMP
);
1419 emit_dst(as
, MIPSI_MOVZ
, dest
, left
, RID_TMP
);
1420 if (dest
!= right
) emit_move(as
, dest
, right
);
1422 emit_dst(as
, MIPSI_SLT
, RID_TMP
,
1423 ismax
? left
: right
, ismax
? right
: left
);
1427 #define asm_min(as, ir) asm_min_max(as, ir, 0)
1428 #define asm_max(as, ir) asm_min_max(as, ir, 1)
1430 /* -- Comparisons --------------------------------------------------------- */
1432 static void asm_comp(ASMState
*as
, IRIns
*ir
)
1434 /* ORDER IR: LT GE LE GT ULT UGE ULE UGT. */
1436 if (irt_isnum(ir
->t
)) {
1437 Reg right
, left
= ra_alloc2(as
, ir
, RSET_FPR
);
1438 right
= (left
>> 8); left
&= 255;
1439 asm_guard(as
, (op
&1) ? MIPSI_BC1T
: MIPSI_BC1F
, 0, 0);
1440 emit_fgh(as
, MIPSI_C_OLT_D
+ ((op
&3) ^ ((op
>>2)&1)), 0, left
, right
);
1442 Reg right
, left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1443 if (op
== IR_ABC
) op
= IR_UGT
;
1444 if ((op
&4) == 0 && irref_isk(ir
->op2
) && IR(ir
->op2
)->i
== 0) {
1445 MIPSIns mi
= (op
&2) ? ((op
&1) ? MIPSI_BLEZ
: MIPSI_BGTZ
) :
1446 ((op
&1) ? MIPSI_BLTZ
: MIPSI_BGEZ
);
1447 asm_guard(as
, mi
, left
, 0);
1449 if (irref_isk(ir
->op2
)) {
1450 int32_t k
= IR(ir
->op2
)->i
;
1453 asm_guard(as
, (op
&1) ? MIPSI_BNE
: MIPSI_BEQ
, RID_TMP
, RID_ZERO
);
1454 emit_tsi(as
, (op
&4) ? MIPSI_SLTIU
: MIPSI_SLTI
,
1459 right
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
1460 asm_guard(as
, ((op
^(op
>>1))&1) ? MIPSI_BNE
: MIPSI_BEQ
, RID_TMP
, RID_ZERO
);
1461 emit_dst(as
, (op
&4) ? MIPSI_SLTU
: MIPSI_SLT
,
1462 RID_TMP
, (op
&2) ? right
: left
, (op
&2) ? left
: right
);
1467 static void asm_equal(ASMState
*as
, IRIns
*ir
)
1469 Reg right
, left
= ra_alloc2(as
, ir
, irt_isnum(ir
->t
) ? RSET_FPR
: RSET_GPR
);
1470 right
= (left
>> 8); left
&= 255;
1471 if (irt_isnum(ir
->t
)) {
1472 asm_guard(as
, (ir
->o
& 1) ? MIPSI_BC1T
: MIPSI_BC1F
, 0, 0);
1473 emit_fgh(as
, MIPSI_C_EQ_D
, 0, left
, right
);
1475 asm_guard(as
, (ir
->o
& 1) ? MIPSI_BEQ
: MIPSI_BNE
, left
, right
);
1480 /* 64 bit integer comparisons. */
1481 static void asm_comp64(ASMState
*as
, IRIns
*ir
)
1483 /* ORDER IR: LT GE LE GT ULT UGE ULE UGT. */
1484 IROp op
= (ir
-1)->o
;
1486 Reg rightlo
, leftlo
, righthi
, lefthi
= ra_alloc2(as
, ir
, RSET_GPR
);
1487 righthi
= (lefthi
>> 8); lefthi
&= 255;
1488 leftlo
= ra_alloc2(as
, ir
-1,
1489 rset_exclude(rset_exclude(RSET_GPR
, lefthi
), righthi
));
1490 rightlo
= (leftlo
>> 8); leftlo
&= 255;
1491 asm_guard(as
, ((op
^(op
>>1))&1) ? MIPSI_BNE
: MIPSI_BEQ
, RID_TMP
, RID_ZERO
);
1492 l_end
= emit_label(as
);
1493 if (lefthi
!= righthi
)
1494 emit_dst(as
, (op
&4) ? MIPSI_SLTU
: MIPSI_SLT
, RID_TMP
,
1495 (op
&2) ? righthi
: lefthi
, (op
&2) ? lefthi
: righthi
);
1496 emit_dst(as
, MIPSI_SLTU
, RID_TMP
,
1497 (op
&2) ? rightlo
: leftlo
, (op
&2) ? leftlo
: rightlo
);
1498 if (lefthi
!= righthi
)
1499 emit_branch(as
, MIPSI_BEQ
, lefthi
, righthi
, l_end
);
1502 static void asm_comp64eq(ASMState
*as
, IRIns
*ir
)
1504 Reg tmp
, right
, left
= ra_alloc2(as
, ir
, RSET_GPR
);
1505 right
= (left
>> 8); left
&= 255;
1506 asm_guard(as
, ((ir
-1)->o
& 1) ? MIPSI_BEQ
: MIPSI_BNE
, RID_TMP
, RID_ZERO
);
1507 tmp
= ra_scratch(as
, rset_exclude(rset_exclude(RSET_GPR
, left
), right
));
1508 emit_dst(as
, MIPSI_OR
, RID_TMP
, RID_TMP
, tmp
);
1509 emit_dst(as
, MIPSI_XOR
, tmp
, left
, right
);
1510 left
= ra_alloc2(as
, ir
-1, RSET_GPR
);
1511 right
= (left
>> 8); left
&= 255;
1512 emit_dst(as
, MIPSI_XOR
, RID_TMP
, left
, right
);
1516 /* -- Support for 64 bit ops in 32 bit mode ------------------------------- */
1518 /* Hiword op of a split 64 bit op. Previous op must be the loword op. */
1519 static void asm_hiop(ASMState
*as
, IRIns
*ir
)
1522 /* HIOP is marked as a store because it needs its own DCE logic. */
1523 int uselo
= ra_used(ir
-1), usehi
= ra_used(ir
); /* Loword/hiword used? */
1524 if (LJ_UNLIKELY(!(as
->flags
& JIT_F_OPT_DCE
))) uselo
= usehi
= 1;
1525 if ((ir
-1)->o
== IR_CONV
) { /* Conversions to/from 64 bit. */
1526 as
->curins
--; /* Always skip the CONV. */
1530 } else if ((ir
-1)->o
< IR_EQ
) { /* 64 bit integer comparisons. ORDER IR. */
1531 as
->curins
--; /* Always skip the loword comparison. */
1534 } else if ((ir
-1)->o
<= IR_NE
) { /* 64 bit integer comparisons. ORDER IR. */
1535 as
->curins
--; /* Always skip the loword comparison. */
1536 asm_comp64eq(as
, ir
);
1538 } else if ((ir
-1)->o
== IR_XSTORE
) {
1539 as
->curins
--; /* Handle both stores here. */
1540 if ((ir
-1)->r
!= RID_SINK
) {
1541 asm_xstore_(as
, ir
, LJ_LE
? 4 : 0);
1542 asm_xstore_(as
, ir
-1, LJ_LE
? 0 : 4);
1546 if (!usehi
) return; /* Skip unused hiword op for all remaining ops. */
1547 switch ((ir
-1)->o
) {
1548 case IR_ADD
: as
->curins
--; asm_add64(as
, ir
); break;
1549 case IR_SUB
: as
->curins
--; asm_sub64(as
, ir
); break;
1550 case IR_NEG
: as
->curins
--; asm_neg64(as
, ir
); break;
1554 ra_allocref(as
, ir
->op1
, RID2RSET(RID_RETLO
)); /* Mark lo op as used. */
1557 /* Nothing to do here. Handled by lo op itself. */
1559 default: lua_assert(0); break;
1562 UNUSED(as
); UNUSED(ir
); lua_assert(0); /* Unused without FFI. */
1566 /* -- Profiling ----------------------------------------------------------- */
1568 static void asm_prof(ASMState
*as
, IRIns
*ir
)
1571 asm_guard(as
, MIPSI_BNE
, RID_TMP
, RID_ZERO
);
1572 emit_tsi(as
, MIPSI_ANDI
, RID_TMP
, RID_TMP
, HOOK_PROFILE
);
1573 emit_lsglptr(as
, MIPSI_LBU
, RID_TMP
,
1574 (int32_t)offsetof(global_State
, hookmask
));
1577 /* -- Stack handling ------------------------------------------------------ */
1579 /* Check Lua stack size for overflow. Use exit handler as fallback. */
1580 static void asm_stack_check(ASMState
*as
, BCReg topslot
,
1581 IRIns
*irp
, RegSet allow
, ExitNo exitno
)
1583 /* Try to get an unused temp. register, otherwise spill/restore RID_RET*. */
1584 Reg tmp
, pbase
= irp
? (ra_hasreg(irp
->r
) ? irp
->r
: RID_TMP
) : RID_BASE
;
1585 ExitNo oldsnap
= as
->snapno
;
1586 rset_clear(allow
, pbase
);
1587 tmp
= allow
? rset_pickbot(allow
) :
1588 (pbase
== RID_RETHI
? RID_RETLO
: RID_RETHI
);
1589 as
->snapno
= exitno
;
1590 asm_guard(as
, MIPSI_BNE
, RID_TMP
, RID_ZERO
);
1591 as
->snapno
= oldsnap
;
1592 if (allow
== RSET_EMPTY
) /* Restore temp. register. */
1593 emit_tsi(as
, MIPSI_LW
, tmp
, RID_SP
, 0);
1595 ra_modified(as
, tmp
);
1596 emit_tsi(as
, MIPSI_SLTIU
, RID_TMP
, RID_TMP
, (int32_t)(8*topslot
));
1597 emit_dst(as
, MIPSI_SUBU
, RID_TMP
, tmp
, pbase
);
1598 emit_tsi(as
, MIPSI_LW
, tmp
, tmp
, offsetof(lua_State
, maxstack
));
1599 if (pbase
== RID_TMP
)
1600 emit_getgl(as
, RID_TMP
, jit_base
);
1601 emit_getgl(as
, tmp
, cur_L
);
1602 if (allow
== RSET_EMPTY
) /* Spill temp. register. */
1603 emit_tsi(as
, MIPSI_SW
, tmp
, RID_SP
, 0);
1606 /* Restore Lua stack from on-trace state. */
1607 static void asm_stack_restore(ASMState
*as
, SnapShot
*snap
)
1609 SnapEntry
*map
= &as
->T
->snapmap
[snap
->mapofs
];
1610 SnapEntry
*flinks
= &as
->T
->snapmap
[snap_nextofs(as
->T
, snap
)-1];
1611 MSize n
, nent
= snap
->nent
;
1612 /* Store the value of all modified slots to the Lua stack. */
1613 for (n
= 0; n
< nent
; n
++) {
1614 SnapEntry sn
= map
[n
];
1615 BCReg s
= snap_slot(sn
);
1616 int32_t ofs
= 8*((int32_t)s
-1);
1617 IRRef ref
= snap_ref(sn
);
1618 IRIns
*ir
= IR(ref
);
1619 if ((sn
& SNAP_NORESTORE
))
1621 if (irt_isnum(ir
->t
)) {
1622 Reg src
= ra_alloc1(as
, ref
, RSET_FPR
);
1623 emit_hsi(as
, MIPSI_SDC1
, src
, RID_BASE
, ofs
);
1626 RegSet allow
= rset_exclude(RSET_GPR
, RID_BASE
);
1627 lua_assert(irt_ispri(ir
->t
) || irt_isaddr(ir
->t
) || irt_isinteger(ir
->t
));
1628 if (!irt_ispri(ir
->t
)) {
1629 Reg src
= ra_alloc1(as
, ref
, allow
);
1630 rset_clear(allow
, src
);
1631 emit_tsi(as
, MIPSI_SW
, src
, RID_BASE
, ofs
+(LJ_BE
?4:0));
1633 if ((sn
& (SNAP_CONT
|SNAP_FRAME
))) {
1634 if (s
== 0) continue; /* Do not overwrite link to previous frame. */
1635 type
= ra_allock(as
, (int32_t)(*flinks
--), allow
);
1637 type
= ra_allock(as
, (int32_t)irt_toitype(ir
->t
), allow
);
1639 emit_tsi(as
, MIPSI_SW
, type
, RID_BASE
, ofs
+(LJ_BE
?0:4));
1643 lua_assert(map
+ nent
== flinks
);
1646 /* -- GC handling --------------------------------------------------------- */
1648 /* Check GC threshold and do one or more GC steps. */
1649 static void asm_gc_check(ASMState
*as
)
1651 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_gc_step_jit
];
1655 ra_evictset(as
, RSET_SCRATCH
);
1656 l_end
= emit_label(as
);
1657 /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
1658 /* Assumes asm_snap_prep() already done. */
1659 asm_guard(as
, MIPSI_BNE
, RID_RET
, RID_ZERO
);
1660 args
[0] = ASMREF_TMP1
; /* global_State *g */
1661 args
[1] = ASMREF_TMP2
; /* MSize steps */
1662 asm_gencall(as
, ci
, args
);
1663 emit_tsi(as
, MIPSI_ADDIU
, ra_releasetmp(as
, ASMREF_TMP1
), RID_JGL
, -32768);
1664 tmp
= ra_releasetmp(as
, ASMREF_TMP2
);
1665 emit_loadi(as
, tmp
, as
->gcsteps
);
1666 /* Jump around GC step if GC total < GC threshold. */
1667 emit_branch(as
, MIPSI_BNE
, RID_TMP
, RID_ZERO
, l_end
);
1668 emit_dst(as
, MIPSI_SLTU
, RID_TMP
, RID_TMP
, tmp
);
1669 emit_getgl(as
, tmp
, gc
.threshold
);
1670 emit_getgl(as
, RID_TMP
, gc
.total
);
1675 /* -- Loop handling ------------------------------------------------------- */
1677 /* Fixup the loop branch. */
1678 static void asm_loop_fixup(ASMState
*as
)
1680 MCode
*p
= as
->mctop
;
1681 MCode
*target
= as
->mcp
;
1683 if (as
->loopinv
) { /* Inverted loop branch? */
1684 /* asm_guard already inverted the cond branch. Only patch the target. */
1685 p
[-3] |= ((target
-p
+2) & 0x0000ffffu
);
1687 p
[-2] = MIPSI_J
|(((uintptr_t)target
>>2)&0x03ffffffu
);
1691 /* -- Head of trace ------------------------------------------------------- */
1693 /* Coalesce BASE register for a root trace. */
1694 static void asm_head_root_base(ASMState
*as
)
1696 IRIns
*ir
= IR(REF_BASE
);
1698 if (as
->loopinv
) as
->mctop
--;
1701 if (rset_test(as
->modset
, r
) || irt_ismarked(ir
->t
))
1702 ir
->r
= RID_INIT
; /* No inheritance for modified BASE register. */
1704 emit_move(as
, r
, RID_BASE
);
1708 /* Coalesce BASE register for a side trace. */
1709 static RegSet
asm_head_side_base(ASMState
*as
, IRIns
*irp
, RegSet allow
)
1711 IRIns
*ir
= IR(REF_BASE
);
1713 if (as
->loopinv
) as
->mctop
--;
1716 if (rset_test(as
->modset
, r
) || irt_ismarked(ir
->t
))
1717 ir
->r
= RID_INIT
; /* No inheritance for modified BASE register. */
1719 rset_clear(allow
, r
); /* Mark same BASE register as coalesced. */
1720 } else if (ra_hasreg(irp
->r
) && rset_test(as
->freeset
, irp
->r
)) {
1721 rset_clear(allow
, irp
->r
);
1722 emit_move(as
, r
, irp
->r
); /* Move from coalesced parent reg. */
1724 emit_getgl(as
, r
, jit_base
); /* Otherwise reload BASE. */
1730 /* -- Tail of trace ------------------------------------------------------- */
1732 /* Fixup the tail code. */
1733 static void asm_tail_fixup(ASMState
*as
, TraceNo lnk
)
1735 MCode
*target
= lnk
? traceref(as
->J
,lnk
)->mcode
: (MCode
*)lj_vm_exit_interp
;
1736 int32_t spadj
= as
->T
->spadjust
;
1737 MCode
*p
= as
->mctop
-1;
1738 *p
= spadj
? (MIPSI_ADDIU
|MIPSF_T(RID_SP
)|MIPSF_S(RID_SP
)|spadj
) : MIPSI_NOP
;
1739 p
[-1] = MIPSI_J
|(((uintptr_t)target
>>2)&0x03ffffffu
);
1742 /* Prepare tail of code. */
1743 static void asm_tail_prep(ASMState
*as
)
1745 as
->mcp
= as
->mctop
-2; /* Leave room for branch plus nop or stack adj. */
1746 as
->invmcp
= as
->loopref
? as
->mcp
: NULL
;
1749 /* -- Trace setup --------------------------------------------------------- */
1751 /* Ensure there are enough stack slots for call arguments. */
1752 static Reg
asm_setup_call_slots(ASMState
*as
, IRIns
*ir
, const CCallInfo
*ci
)
1754 IRRef args
[CCI_NARGS_MAX
*2];
1755 uint32_t i
, nargs
= CCI_XNARGS(ci
);
1756 int nslots
= 4, ngpr
= REGARG_NUMGPR
, nfpr
= REGARG_NUMFPR
;
1757 asm_collectargs(as
, ir
, ci
, args
);
1758 for (i
= 0; i
< nargs
; i
++) {
1759 if (args
[i
] && irt_isfp(IR(args
[i
])->t
) &&
1760 nfpr
> 0 && !(ci
->flags
& CCI_VARARG
)) {
1762 ngpr
-= irt_isnum(IR(args
[i
])->t
) ? 2 : 1;
1763 } else if (args
[i
] && irt_isnum(IR(args
[i
])->t
)) {
1766 if (ngpr
> 0) ngpr
-= 2; else nslots
= (nslots
+3) & ~1;
1769 if (ngpr
> 0) ngpr
--; else nslots
++;
1772 if (nslots
> as
->evenspill
) /* Leave room for args in stack slots. */
1773 as
->evenspill
= nslots
;
1774 return irt_isfp(ir
->t
) ? REGSP_HINT(RID_FPRET
) : REGSP_HINT(RID_RET
);
1777 static void asm_setup_target(ASMState
*as
)
1779 asm_sparejump_setup(as
);
1780 asm_exitstub_setup(as
);
1783 /* -- Trace patching ------------------------------------------------------ */
1785 /* Patch exit jumps of existing machine code to a new target. */
1786 void lj_asm_patchexit(jit_State
*J
, GCtrace
*T
, ExitNo exitno
, MCode
*target
)
1788 MCode
*p
= T
->mcode
;
1789 MCode
*pe
= (MCode
*)((char *)p
+ T
->szmcode
);
1790 MCode
*px
= exitstub_trace_addr(T
, exitno
);
1791 MCode
*cstart
= NULL
, *cstop
= NULL
;
1792 MCode
*mcarea
= lj_mcode_patch(J
, p
, 0);
1793 MCode exitload
= MIPSI_LI
| MIPSF_T(RID_TMP
) | exitno
;
1794 MCode tjump
= MIPSI_J
|(((uintptr_t)target
>>2)&0x03ffffffu
);
1795 for (p
++; p
< pe
; p
++) {
1796 if (*p
== exitload
) { /* Look for load of exit number. */
1797 if (((p
[-1] ^ (px
-p
)) & 0xffffu
) == 0) { /* Look for exitstub branch. */
1798 ptrdiff_t delta
= target
- p
;
1799 if (((delta
+ 0x8000) >> 16) == 0) { /* Patch in-range branch. */
1801 p
[-1] = (p
[-1] & 0xffff0000u
) | (delta
& 0xffffu
);
1802 *p
= MIPSI_NOP
; /* Replace the load of the exit number. */
1804 if (!cstart
) cstart
= p
-1;
1805 } else { /* Branch out of range. Use spare jump slot in mcarea. */
1807 for (i
= 2; i
< 2+MIPS_SPAREJUMP
*2; i
+= 2) {
1808 if (mcarea
[i
] == tjump
) {
1809 delta
= mcarea
+i
- p
;
1811 } else if (mcarea
[i
] == MIPSI_NOP
) {
1814 delta
= mcarea
+i
- p
;
1818 /* Ignore jump slot overflow. Child trace is simply not attached. */
1820 } else if (p
+1 == pe
) {
1821 /* Patch NOP after code for inverted loop branch. Use of J is ok. */
1822 lua_assert(p
[1] == MIPSI_NOP
);
1824 *p
= MIPSI_NOP
; /* Replace the load of the exit number. */
1826 if (!cstart
) cstart
= p
+1;
1830 if (cstart
) lj_mcode_sync(cstart
, cstop
);
1831 lj_mcode_patch(J
, mcarea
, 1);