2 ** MIPS IR assembler (SSA IR -> machine code).
3 ** Copyright (C) 2005-2023 Mike Pall. See Copyright Notice in luajit.h
6 /* -- Register allocator extensions --------------------------------------- */
8 /* Allocate a register with a hint. */
9 static Reg
ra_hintalloc(ASMState
*as
, IRRef ref
, Reg hint
, RegSet allow
)
13 if (!ra_hashint(r
) && !iscrossref(as
, ref
))
14 ra_sethint(IR(ref
)->r
, hint
); /* Propagate register hint. */
15 r
= ra_allocref(as
, ref
, allow
);
21 /* Allocate a register or RID_ZERO. */
22 static Reg
ra_alloc1z(ASMState
*as
, IRRef ref
, RegSet allow
)
26 if (!(allow
& RSET_FPR
) && irref_isk(ref
) && IR(ref
)->i
== 0)
28 r
= ra_allocref(as
, ref
, allow
);
35 /* Allocate two source registers for three-operand instructions. */
36 static Reg
ra_alloc2(ASMState
*as
, IRIns
*ir
, RegSet allow
)
38 IRIns
*irl
= IR(ir
->op1
), *irr
= IR(ir
->op2
);
39 Reg left
= irl
->r
, right
= irr
->r
;
40 if (ra_hasreg(left
)) {
43 right
= ra_alloc1z(as
, ir
->op2
, rset_exclude(allow
, left
));
46 } else if (ra_hasreg(right
)) {
48 left
= ra_alloc1z(as
, ir
->op1
, rset_exclude(allow
, right
));
49 } else if (ra_hashint(right
)) {
50 right
= ra_alloc1z(as
, ir
->op2
, allow
);
51 left
= ra_alloc1z(as
, ir
->op1
, rset_exclude(allow
, right
));
53 left
= ra_alloc1z(as
, ir
->op1
, allow
);
54 right
= ra_alloc1z(as
, ir
->op2
, rset_exclude(allow
, left
));
56 return left
| (right
<< 8);
59 /* -- Guard handling ------------------------------------------------------ */
61 /* Need some spare long-range jump slots, for out-of-range branches. */
62 #define MIPS_SPAREJUMP 4
64 /* Setup spare long-range jump slots per mcarea. */
65 static void asm_sparejump_setup(ASMState
*as
)
67 MCode
*mxp
= as
->mcbot
;
68 if (((uintptr_t)mxp
& (LJ_PAGESIZE
-1)) == sizeof(MCLink
)) {
69 lua_assert(MIPSI_NOP
== 0);
70 memset(mxp
, 0, MIPS_SPAREJUMP
*2*sizeof(MCode
));
71 mxp
+= MIPS_SPAREJUMP
*2;
72 lua_assert(mxp
< as
->mctop
);
73 lj_mcode_sync(as
->mcbot
, mxp
);
74 lj_mcode_commitbot(as
->J
, mxp
);
76 as
->mclim
= as
->mcbot
+ MCLIM_REDZONE
;
80 /* Setup exit stub after the end of each trace. */
81 static void asm_exitstub_setup(ASMState
*as
)
83 MCode
*mxp
= as
->mctop
;
84 /* sw TMP, 0(sp); j ->vm_exit_handler; li TMP, traceno */
85 *--mxp
= MIPSI_LI
|MIPSF_T(RID_TMP
)|as
->T
->traceno
;
86 *--mxp
= MIPSI_J
|((((uintptr_t)(void *)lj_vm_exit_handler
)>>2)&0x03ffffffu
);
87 lua_assert(((uintptr_t)mxp
^ (uintptr_t)(void *)lj_vm_exit_handler
)>>28 == 0);
88 *--mxp
= MIPSI_SW
|MIPSF_T(RID_TMP
)|MIPSF_S(RID_SP
)|0;
92 /* Keep this in-sync with exitstub_trace_addr(). */
93 #define asm_exitstub_addr(as) ((as)->mctop)
95 /* Emit conditional branch to exit for guard. */
96 static void asm_guard(ASMState
*as
, MIPSIns mi
, Reg rs
, Reg rt
)
98 MCode
*target
= asm_exitstub_addr(as
);
100 if (LJ_UNLIKELY(p
== as
->invmcp
)) {
104 mi
= mi
^ ((mi
>>28) == 1 ? 0x04000000u
: 0x00010000u
); /* Invert cond. */
105 target
= p
; /* Patch target later in asm_loop_fixup. */
107 emit_ti(as
, MIPSI_LI
, RID_TMP
, as
->snapno
);
108 emit_branch(as
, mi
, rs
, rt
, target
);
111 /* -- Operand fusion ------------------------------------------------------ */
113 /* Limit linear search to this distance. Avoids O(n^2) behavior. */
114 #define CONFLICT_SEARCH_LIM 31
116 /* Check if there's no conflicting instruction between curins and ref. */
117 static int noconflict(ASMState
*as
, IRRef ref
, IROp conflict
)
120 IRRef i
= as
->curins
;
121 if (i
> ref
+ CONFLICT_SEARCH_LIM
)
122 return 0; /* Give up, ref is too far away. */
124 if (ir
[i
].o
== conflict
)
125 return 0; /* Conflict found. */
126 return 1; /* Ok, no conflict. */
129 /* Fuse the array base of colocated arrays. */
130 static int32_t asm_fuseabase(ASMState
*as
, IRRef ref
)
133 if (ir
->o
== IR_TNEW
&& ir
->op1
<= LJ_MAX_COLOSIZE
&&
134 !neverfuse(as
) && noconflict(as
, ref
, IR_NEWREF
))
135 return (int32_t)sizeof(GCtab
);
139 /* Fuse array/hash/upvalue reference into register+offset operand. */
140 static Reg
asm_fuseahuref(ASMState
*as
, IRRef ref
, int32_t *ofsp
, RegSet allow
)
143 if (ra_noreg(ir
->r
)) {
144 if (ir
->o
== IR_AREF
) {
145 if (mayfuse(as
, ref
)) {
146 if (irref_isk(ir
->op2
)) {
147 IRRef tab
= IR(ir
->op1
)->op1
;
148 int32_t ofs
= asm_fuseabase(as
, tab
);
149 IRRef refa
= ofs
? tab
: ir
->op1
;
150 ofs
+= 8*IR(ir
->op2
)->i
;
153 return ra_alloc1(as
, refa
, allow
);
157 } else if (ir
->o
== IR_HREFK
) {
158 if (mayfuse(as
, ref
)) {
159 int32_t ofs
= (int32_t)(IR(ir
->op2
)->op2
* sizeof(Node
));
162 return ra_alloc1(as
, ir
->op1
, allow
);
165 } else if (ir
->o
== IR_UREFC
) {
166 if (irref_isk(ir
->op1
)) {
167 GCfunc
*fn
= ir_kfunc(IR(ir
->op1
));
168 int32_t ofs
= i32ptr(&gcref(fn
->l
.uvptr
[(ir
->op2
>> 8)])->uv
.tv
);
169 int32_t jgl
= (intptr_t)J2G(as
->J
);
170 if ((uint32_t)(ofs
-jgl
) < 65536) {
171 *ofsp
= ofs
-jgl
-32768;
174 *ofsp
= (int16_t)ofs
;
175 return ra_allock(as
, ofs
-(int16_t)ofs
, allow
);
181 return ra_alloc1(as
, ref
, allow
);
184 /* Fuse XLOAD/XSTORE reference into load/store operand. */
185 static void asm_fusexref(ASMState
*as
, MIPSIns mi
, Reg rt
, IRRef ref
,
186 RegSet allow
, int32_t ofs
)
190 if (ra_noreg(ir
->r
) && canfuse(as
, ir
)) {
191 if (ir
->o
== IR_ADD
) {
193 if (irref_isk(ir
->op2
) && (ofs2
= ofs
+ IR(ir
->op2
)->i
, checki16(ofs2
))) {
197 } else if (ir
->o
== IR_STRREF
) {
198 int32_t ofs2
= 65536;
199 lua_assert(ofs
== 0);
200 ofs
= (int32_t)sizeof(GCstr
);
201 if (irref_isk(ir
->op2
)) {
202 ofs2
= ofs
+ IR(ir
->op2
)->i
;
204 } else if (irref_isk(ir
->op1
)) {
205 ofs2
= ofs
+ IR(ir
->op1
)->i
;
208 if (!checki16(ofs2
)) {
209 /* NYI: Fuse ADD with constant. */
210 Reg right
, left
= ra_alloc2(as
, ir
, allow
);
211 right
= (left
>> 8); left
&= 255;
212 emit_hsi(as
, mi
, rt
, RID_TMP
, ofs
);
213 emit_dst(as
, MIPSI_ADDU
, RID_TMP
, left
, right
);
219 base
= ra_alloc1(as
, ref
, allow
);
220 emit_hsi(as
, mi
, rt
, base
, ofs
);
223 /* -- Calls --------------------------------------------------------------- */
225 /* Generate a call to a C function. */
226 static void asm_gencall(ASMState
*as
, const CCallInfo
*ci
, IRRef
*args
)
228 uint32_t n
, nargs
= CCI_NARGS(ci
);
230 Reg gpr
, fpr
= REGARG_FIRSTFPR
;
231 if ((void *)ci
->func
)
232 emit_call(as
, (void *)ci
->func
);
233 for (gpr
= REGARG_FIRSTGPR
; gpr
<= REGARG_LASTGPR
; gpr
++)
234 as
->cost
[gpr
] = REGCOST(~0u, ASMREF_L
);
235 gpr
= REGARG_FIRSTGPR
;
236 for (n
= 0; n
< nargs
; n
++) { /* Setup args. */
240 if (irt_isfp(ir
->t
) && fpr
<= REGARG_LASTFPR
&&
241 !(ci
->flags
& CCI_VARARG
)) {
242 lua_assert(rset_test(as
->freeset
, fpr
)); /* Already evicted. */
243 ra_leftov(as
, fpr
, ref
);
245 gpr
+= irt_isnum(ir
->t
) ? 2 : 1;
247 fpr
= REGARG_LASTFPR
+1;
248 if (irt_isnum(ir
->t
)) gpr
= (gpr
+1) & ~1;
249 if (gpr
<= REGARG_LASTGPR
) {
250 lua_assert(rset_test(as
->freeset
, gpr
)); /* Already evicted. */
251 if (irt_isfp(ir
->t
)) {
252 RegSet of
= as
->freeset
;
254 /* Workaround to protect argument GPRs from being used for remat. */
255 as
->freeset
&= ~RSET_RANGE(REGARG_FIRSTGPR
, REGARG_LASTGPR
+1);
256 r
= ra_alloc1(as
, ref
, RSET_FPR
);
257 as
->freeset
|= (of
& RSET_RANGE(REGARG_FIRSTGPR
, REGARG_LASTGPR
+1));
258 if (irt_isnum(ir
->t
)) {
259 emit_tg(as
, MIPSI_MFC1
, gpr
+(LJ_BE
?0:1), r
+1);
260 emit_tg(as
, MIPSI_MFC1
, gpr
+(LJ_BE
?1:0), r
);
261 lua_assert(rset_test(as
->freeset
, gpr
+1)); /* Already evicted. */
263 } else if (irt_isfloat(ir
->t
)) {
264 emit_tg(as
, MIPSI_MFC1
, gpr
, r
);
268 ra_leftov(as
, gpr
, ref
);
272 Reg r
= ra_alloc1z(as
, ref
, irt_isfp(ir
->t
) ? RSET_FPR
: RSET_GPR
);
273 if (irt_isnum(ir
->t
)) ofs
= (ofs
+ 4) & ~4;
274 emit_spstore(as
, ir
, r
, ofs
);
275 ofs
+= irt_isnum(ir
->t
) ? 8 : 4;
279 fpr
= REGARG_LASTFPR
+1;
280 if (gpr
<= REGARG_LASTGPR
)
289 /* Setup result reg/sp for call. Evict scratch regs. */
290 static void asm_setupresult(ASMState
*as
, IRIns
*ir
, const CCallInfo
*ci
)
292 RegSet drop
= RSET_SCRATCH
;
293 int hiop
= ((ir
+1)->o
== IR_HIOP
&& !irt_isnil((ir
+1)->t
));
294 if ((ci
->flags
& CCI_NOFPRCLOBBER
))
296 if (ra_hasreg(ir
->r
))
297 rset_clear(drop
, ir
->r
); /* Dest reg handled below. */
298 if (hiop
&& ra_hasreg((ir
+1)->r
))
299 rset_clear(drop
, (ir
+1)->r
); /* Dest reg handled below. */
300 ra_evictset(as
, drop
); /* Evictions must be performed first. */
302 lua_assert(!irt_ispri(ir
->t
));
303 if (irt_isfp(ir
->t
)) {
304 if ((ci
->flags
& CCI_CASTU64
)) {
305 int32_t ofs
= sps_scale(ir
->s
);
307 if (ra_hasreg(dest
)) {
309 ra_modified(as
, dest
);
310 emit_tg(as
, MIPSI_MTC1
, RID_RETHI
, dest
+1);
311 emit_tg(as
, MIPSI_MTC1
, RID_RETLO
, dest
);
314 emit_tsi(as
, MIPSI_SW
, RID_RETLO
, RID_SP
, ofs
+(LJ_BE
?4:0));
315 emit_tsi(as
, MIPSI_SW
, RID_RETHI
, RID_SP
, ofs
+(LJ_BE
?0:4));
318 ra_destreg(as
, ir
, RID_FPRET
);
323 ra_destreg(as
, ir
, RID_RET
);
328 static void asm_call(ASMState
*as
, IRIns
*ir
)
330 IRRef args
[CCI_NARGS_MAX
];
331 const CCallInfo
*ci
= &lj_ir_callinfo
[ir
->op2
];
332 asm_collectargs(as
, ir
, ci
, args
);
333 asm_setupresult(as
, ir
, ci
);
334 asm_gencall(as
, ci
, args
);
337 static void asm_callx(ASMState
*as
, IRIns
*ir
)
339 IRRef args
[CCI_NARGS_MAX
*2];
343 ci
.flags
= asm_callx_flags(as
, ir
);
344 asm_collectargs(as
, ir
, &ci
, args
);
345 asm_setupresult(as
, ir
, &ci
);
346 func
= ir
->op2
; irf
= IR(func
);
347 if (irf
->o
== IR_CARG
) { func
= irf
->op1
; irf
= IR(func
); }
348 if (irref_isk(func
)) { /* Call to constant address. */
349 ci
.func
= (ASMFunction
)(void *)(irf
->i
);
350 } else { /* Need specific register for indirect calls. */
351 Reg r
= ra_alloc1(as
, func
, RID2RSET(RID_CFUNCADDR
));
353 if (r
== RID_CFUNCADDR
)
356 *--p
= MIPSI_MOVE
| MIPSF_D(RID_CFUNCADDR
) | MIPSF_S(r
);
357 *--p
= MIPSI_JALR
| MIPSF_S(r
);
359 ci
.func
= (ASMFunction
)(void *)0;
361 asm_gencall(as
, &ci
, args
);
364 static void asm_callid(ASMState
*as
, IRIns
*ir
, IRCallID id
)
366 const CCallInfo
*ci
= &lj_ir_callinfo
[id
];
370 asm_setupresult(as
, ir
, ci
);
371 asm_gencall(as
, ci
, args
);
374 static void asm_callround(ASMState
*as
, IRIns
*ir
, IRCallID id
)
376 /* The modified regs must match with the *.dasc implementation. */
377 RegSet drop
= RID2RSET(RID_R1
)|RID2RSET(RID_R12
)|RID2RSET(RID_FPRET
)|
378 RID2RSET(RID_F2
)|RID2RSET(RID_F4
)|RID2RSET(REGARG_FIRSTFPR
);
379 if (ra_hasreg(ir
->r
)) rset_clear(drop
, ir
->r
);
380 ra_evictset(as
, drop
);
381 ra_destreg(as
, ir
, RID_FPRET
);
382 emit_call(as
, (void *)lj_ir_callinfo
[id
].func
);
383 ra_leftov(as
, REGARG_FIRSTFPR
, ir
->op1
);
386 /* -- Returns ------------------------------------------------------------- */
388 /* Return to lower frame. Guard that it goes to the right spot. */
389 static void asm_retf(ASMState
*as
, IRIns
*ir
)
391 Reg base
= ra_alloc1(as
, REF_BASE
, RSET_GPR
);
392 void *pc
= ir_kptr(IR(ir
->op2
));
393 int32_t delta
= 1+bc_a(*((const BCIns
*)pc
- 1));
394 as
->topslot
-= (BCReg
)delta
;
395 if ((int32_t)as
->topslot
< 0) as
->topslot
= 0;
396 irt_setmark(IR(REF_BASE
)->t
); /* Children must not coalesce with BASE reg. */
397 emit_setgl(as
, base
, jit_base
);
398 emit_addptr(as
, base
, -8*delta
);
399 asm_guard(as
, MIPSI_BNE
, RID_TMP
,
400 ra_allock(as
, i32ptr(pc
), rset_exclude(RSET_GPR
, base
)));
401 emit_tsi(as
, MIPSI_LW
, RID_TMP
, base
, -8);
404 /* -- Type conversions ---------------------------------------------------- */
406 static void asm_tointg(ASMState
*as
, IRIns
*ir
, Reg left
)
408 Reg tmp
= ra_scratch(as
, rset_exclude(RSET_FPR
, left
));
409 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
410 asm_guard(as
, MIPSI_BC1F
, 0, 0);
411 emit_fgh(as
, MIPSI_C_EQ_D
, 0, tmp
, left
);
412 emit_fg(as
, MIPSI_CVT_D_W
, tmp
, tmp
);
413 emit_tg(as
, MIPSI_MFC1
, dest
, tmp
);
414 emit_fg(as
, MIPSI_CVT_W_D
, tmp
, left
);
417 static void asm_tobit(ASMState
*as
, IRIns
*ir
)
419 RegSet allow
= RSET_FPR
;
420 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
421 Reg left
= ra_alloc1(as
, ir
->op1
, allow
);
422 Reg right
= ra_alloc1(as
, ir
->op2
, rset_clear(allow
, left
));
423 Reg tmp
= ra_scratch(as
, rset_clear(allow
, right
));
424 emit_tg(as
, MIPSI_MFC1
, dest
, tmp
);
425 emit_fgh(as
, MIPSI_ADD_D
, tmp
, left
, right
);
428 static void asm_conv(ASMState
*as
, IRIns
*ir
)
430 IRType st
= (IRType
)(ir
->op2
& IRCONV_SRCMASK
);
431 int stfp
= (st
== IRT_NUM
|| st
== IRT_FLOAT
);
432 IRRef lref
= ir
->op1
;
433 lua_assert(irt_type(ir
->t
) != st
);
434 lua_assert(!(irt_isint64(ir
->t
) ||
435 (st
== IRT_I64
|| st
== IRT_U64
))); /* Handled by SPLIT. */
436 if (irt_isfp(ir
->t
)) {
437 Reg dest
= ra_dest(as
, ir
, RSET_FPR
);
438 if (stfp
) { /* FP to FP conversion. */
439 emit_fg(as
, st
== IRT_NUM
? MIPSI_CVT_S_D
: MIPSI_CVT_D_S
,
440 dest
, ra_alloc1(as
, lref
, RSET_FPR
));
441 } else if (st
== IRT_U32
) { /* U32 to FP conversion. */
442 /* y = (x ^ 0x8000000) + 2147483648.0 */
443 Reg left
= ra_alloc1(as
, lref
, RSET_GPR
);
444 Reg tmp
= ra_scratch(as
, rset_exclude(RSET_FPR
, dest
));
445 if (irt_isfloat(ir
->t
))
446 emit_fg(as
, MIPSI_CVT_S_D
, dest
, dest
);
447 /* Must perform arithmetic with doubles to keep the precision. */
448 emit_fgh(as
, MIPSI_ADD_D
, dest
, dest
, tmp
);
449 emit_fg(as
, MIPSI_CVT_D_W
, dest
, dest
);
450 emit_lsptr(as
, MIPSI_LDC1
, (tmp
& 31),
451 (void *)lj_ir_k64_find(as
->J
, U64x(41e00000
,00000000)),
453 emit_tg(as
, MIPSI_MTC1
, RID_TMP
, dest
);
454 emit_dst(as
, MIPSI_XOR
, RID_TMP
, RID_TMP
, left
);
455 emit_ti(as
, MIPSI_LUI
, RID_TMP
, 0x8000);
456 } else { /* Integer to FP conversion. */
457 Reg left
= ra_alloc1(as
, lref
, RSET_GPR
);
458 emit_fg(as
, irt_isfloat(ir
->t
) ? MIPSI_CVT_S_W
: MIPSI_CVT_D_W
,
460 emit_tg(as
, MIPSI_MTC1
, left
, dest
);
462 } else if (stfp
) { /* FP to integer conversion. */
463 if (irt_isguard(ir
->t
)) {
464 /* Checked conversions are only supported from number to int. */
465 lua_assert(irt_isint(ir
->t
) && st
== IRT_NUM
);
466 asm_tointg(as
, ir
, ra_alloc1(as
, lref
, RSET_FPR
));
468 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
469 Reg left
= ra_alloc1(as
, lref
, RSET_FPR
);
470 Reg tmp
= ra_scratch(as
, rset_exclude(RSET_FPR
, left
));
471 if (irt_isu32(ir
->t
)) {
472 /* y = (int)floor(x - 2147483648.0) ^ 0x80000000 */
473 emit_dst(as
, MIPSI_XOR
, dest
, dest
, RID_TMP
);
474 emit_ti(as
, MIPSI_LUI
, RID_TMP
, 0x8000);
475 emit_tg(as
, MIPSI_MFC1
, dest
, tmp
);
476 emit_fg(as
, st
== IRT_FLOAT
? MIPSI_FLOOR_W_S
: MIPSI_FLOOR_W_D
,
478 emit_fgh(as
, st
== IRT_FLOAT
? MIPSI_SUB_S
: MIPSI_SUB_D
,
481 emit_lsptr(as
, MIPSI_LWC1
, (tmp
& 31),
482 (void *)lj_ir_k64_find(as
->J
, U64x(4f000000
,4f000000
)),
485 emit_lsptr(as
, MIPSI_LDC1
, (tmp
& 31),
486 (void *)lj_ir_k64_find(as
->J
, U64x(41e00000
,00000000)),
489 emit_tg(as
, MIPSI_MFC1
, dest
, tmp
);
490 emit_fg(as
, st
== IRT_FLOAT
? MIPSI_TRUNC_W_S
: MIPSI_TRUNC_W_D
,
495 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
496 if (st
>= IRT_I8
&& st
<= IRT_U16
) { /* Extend to 32 bit integer. */
497 Reg left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
498 lua_assert(irt_isint(ir
->t
) || irt_isu32(ir
->t
));
499 if ((ir
->op2
& IRCONV_SEXT
)) {
500 if ((as
->flags
& JIT_F_MIPS32R2
)) {
501 emit_dst(as
, st
== IRT_I8
? MIPSI_SEB
: MIPSI_SEH
, dest
, 0, left
);
503 uint32_t shift
= st
== IRT_I8
? 24 : 16;
504 emit_dta(as
, MIPSI_SRA
, dest
, dest
, shift
);
505 emit_dta(as
, MIPSI_SLL
, dest
, left
, shift
);
508 emit_tsi(as
, MIPSI_ANDI
, dest
, left
,
509 (int32_t)(st
== IRT_U8
? 0xff : 0xffff));
511 } else { /* 32/64 bit integer conversions. */
512 /* Only need to handle 32/32 bit no-op (cast) on 32 bit archs. */
513 ra_leftov(as
, dest
, lref
); /* Do nothing, but may need to move regs. */
519 static void asm_conv64(ASMState
*as
, IRIns
*ir
)
521 IRType st
= (IRType
)((ir
-1)->op2
& IRCONV_SRCMASK
);
522 IRType dt
= (((ir
-1)->op2
& IRCONV_DSTMASK
) >> IRCONV_DSH
);
526 args
[LJ_BE
?0:1] = ir
->op1
;
527 args
[LJ_BE
?1:0] = (ir
-1)->op1
;
528 if (st
== IRT_NUM
|| st
== IRT_FLOAT
) {
529 id
= IRCALL_fp64_d2l
+ ((st
== IRT_FLOAT
) ? 2 : 0) + (dt
- IRT_I64
);
532 id
= IRCALL_fp64_l2d
+ ((dt
== IRT_FLOAT
) ? 2 : 0) + (st
- IRT_I64
);
534 ci
= &lj_ir_callinfo
[id
];
535 asm_setupresult(as
, ir
, ci
);
536 asm_gencall(as
, ci
, args
);
540 static void asm_strto(ASMState
*as
, IRIns
*ir
)
542 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_strscan_num
];
544 RegSet drop
= RSET_SCRATCH
;
545 if (ra_hasreg(ir
->r
)) rset_set(drop
, ir
->r
); /* Spill dest reg (if any). */
546 ra_evictset(as
, drop
);
547 asm_guard(as
, MIPSI_BEQ
, RID_RET
, RID_ZERO
); /* Test return status. */
548 args
[0] = ir
->op1
; /* GCstr *str */
549 args
[1] = ASMREF_TMP1
; /* TValue *n */
550 asm_gencall(as
, ci
, args
);
551 /* Store the result to the spill slot or temp slots. */
552 emit_tsi(as
, MIPSI_ADDIU
, ra_releasetmp(as
, ASMREF_TMP1
),
553 RID_SP
, sps_scale(ir
->s
));
556 /* Get pointer to TValue. */
557 static void asm_tvptr(ASMState
*as
, Reg dest
, IRRef ref
)
560 if (irt_isnum(ir
->t
)) {
561 if (irref_isk(ref
)) /* Use the number constant itself as a TValue. */
562 ra_allockreg(as
, i32ptr(ir_knum(ir
)), dest
);
563 else /* Otherwise force a spill and use the spill slot. */
564 emit_tsi(as
, MIPSI_ADDIU
, dest
, RID_SP
, ra_spill(as
, ir
));
566 /* Otherwise use g->tmptv to hold the TValue. */
567 RegSet allow
= rset_exclude(RSET_GPR
, dest
);
569 emit_tsi(as
, MIPSI_ADDIU
, dest
, RID_JGL
, offsetof(global_State
, tmptv
)-32768);
570 if (!irt_ispri(ir
->t
)) {
571 Reg src
= ra_alloc1(as
, ref
, allow
);
572 emit_setgl(as
, src
, tmptv
.gcr
);
574 type
= ra_allock(as
, irt_toitype(ir
->t
), allow
);
575 emit_setgl(as
, type
, tmptv
.it
);
579 static void asm_tostr(ASMState
*as
, IRIns
*ir
)
584 if (irt_isnum(IR(ir
->op1
)->t
) || (ir
+1)->o
== IR_HIOP
) {
585 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_str_fromnum
];
586 args
[1] = ASMREF_TMP1
; /* const lua_Number * */
587 asm_setupresult(as
, ir
, ci
); /* GCstr * */
588 asm_gencall(as
, ci
, args
);
589 asm_tvptr(as
, ra_releasetmp(as
, ASMREF_TMP1
), ir
->op1
);
591 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_str_fromint
];
592 args
[1] = ir
->op1
; /* int32_t k */
593 asm_setupresult(as
, ir
, ci
); /* GCstr * */
594 asm_gencall(as
, ci
, args
);
598 /* -- Memory references --------------------------------------------------- */
600 static void asm_aref(ASMState
*as
, IRIns
*ir
)
602 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
604 if (irref_isk(ir
->op2
)) {
605 IRRef tab
= IR(ir
->op1
)->op1
;
606 int32_t ofs
= asm_fuseabase(as
, tab
);
607 IRRef refa
= ofs
? tab
: ir
->op1
;
608 ofs
+= 8*IR(ir
->op2
)->i
;
610 base
= ra_alloc1(as
, refa
, RSET_GPR
);
611 emit_tsi(as
, MIPSI_ADDIU
, dest
, base
, ofs
);
615 base
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
616 idx
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, base
));
617 emit_dst(as
, MIPSI_ADDU
, dest
, RID_TMP
, base
);
618 emit_dta(as
, MIPSI_SLL
, RID_TMP
, idx
, 3);
621 /* Inlined hash lookup. Specialized for key type and for const keys.
622 ** The equivalent C code is:
623 ** Node *n = hashkey(t, key);
625 ** if (lj_obj_equal(&n->key, key)) return &n->val;
626 ** } while ((n = nextnode(n)));
629 static void asm_href(ASMState
*as
, IRIns
*ir
)
631 RegSet allow
= RSET_GPR
;
632 int destused
= ra_used(ir
);
633 Reg dest
= ra_dest(as
, ir
, allow
);
634 Reg tab
= ra_alloc1(as
, ir
->op1
, rset_clear(allow
, dest
));
635 Reg key
= RID_NONE
, type
= RID_NONE
, tmpnum
= RID_NONE
, tmp1
= RID_TMP
, tmp2
;
636 IRRef refkey
= ir
->op2
;
637 IRIns
*irkey
= IR(refkey
);
638 IRType1 kt
= irkey
->t
;
640 MCLabel l_end
, l_loop
, l_next
;
642 rset_clear(allow
, tab
);
644 key
= ra_alloc1(as
, refkey
, RSET_FPR
);
645 tmpnum
= ra_scratch(as
, rset_exclude(RSET_FPR
, key
));
647 if (!irt_ispri(kt
)) {
648 key
= ra_alloc1(as
, refkey
, allow
);
649 rset_clear(allow
, key
);
651 type
= ra_allock(as
, irt_toitype(irkey
->t
), allow
);
652 rset_clear(allow
, type
);
654 tmp2
= ra_scratch(as
, allow
);
655 rset_clear(allow
, tmp2
);
657 /* Key not found in chain: load niltv. */
658 l_end
= emit_label(as
);
660 emit_loada(as
, dest
, niltvg(J2G(as
->J
)));
662 *--as
->mcp
= MIPSI_NOP
;
663 /* Follow hash chain until the end. */
664 emit_move(as
, dest
, tmp1
);
666 emit_tsi(as
, MIPSI_LW
, tmp1
, dest
, (int32_t)offsetof(Node
, next
));
667 l_next
= emit_label(as
);
669 /* Type and value comparison. */
671 emit_branch(as
, MIPSI_BC1T
, 0, 0, l_end
);
672 emit_fgh(as
, MIPSI_C_EQ_D
, 0, tmpnum
, key
);
673 emit_tg(as
, MIPSI_MFC1
, tmp1
, key
+1);
674 emit_branch(as
, MIPSI_BEQ
, tmp1
, RID_ZERO
, l_next
);
675 emit_tsi(as
, MIPSI_SLTIU
, tmp1
, tmp1
, (int32_t)LJ_TISNUM
);
676 emit_hsi(as
, MIPSI_LDC1
, tmpnum
, dest
, (int32_t)offsetof(Node
, key
.n
));
679 emit_branch(as
, MIPSI_BEQ
, tmp1
, type
, l_end
);
681 emit_branch(as
, MIPSI_BEQ
, tmp2
, key
, l_end
);
682 emit_tsi(as
, MIPSI_LW
, tmp2
, dest
, (int32_t)offsetof(Node
, key
.gcr
));
683 emit_branch(as
, MIPSI_BNE
, tmp1
, type
, l_next
);
686 emit_tsi(as
, MIPSI_LW
, tmp1
, dest
, (int32_t)offsetof(Node
, key
.it
));
687 *l_loop
= MIPSI_BNE
| MIPSF_S(tmp1
) | ((as
->mcp
-l_loop
-1) & 0xffffu
);
689 /* Load main position relative to tab->node into dest. */
690 khash
= irref_isk(refkey
) ? ir_khash(irkey
) : 1;
692 emit_tsi(as
, MIPSI_LW
, dest
, tab
, (int32_t)offsetof(GCtab
, node
));
695 if (irref_isk(refkey
))
696 tmphash
= ra_allock(as
, khash
, allow
);
697 emit_dst(as
, MIPSI_ADDU
, dest
, dest
, tmp1
);
698 lua_assert(sizeof(Node
) == 24);
699 emit_dst(as
, MIPSI_SUBU
, tmp1
, tmp2
, tmp1
);
700 emit_dta(as
, MIPSI_SLL
, tmp1
, tmp1
, 3);
701 emit_dta(as
, MIPSI_SLL
, tmp2
, tmp1
, 5);
702 emit_dst(as
, MIPSI_AND
, tmp1
, tmp2
, tmphash
);
703 emit_tsi(as
, MIPSI_LW
, dest
, tab
, (int32_t)offsetof(GCtab
, node
));
704 emit_tsi(as
, MIPSI_LW
, tmp2
, tab
, (int32_t)offsetof(GCtab
, hmask
));
705 if (irref_isk(refkey
)) {
707 } else if (irt_isstr(kt
)) {
708 emit_tsi(as
, MIPSI_LW
, tmp1
, key
, (int32_t)offsetof(GCstr
, hash
));
709 } else { /* Must match with hash*() in lj_tab.c. */
710 emit_dst(as
, MIPSI_SUBU
, tmp1
, tmp1
, tmp2
);
711 emit_rotr(as
, tmp2
, tmp2
, dest
, (-HASH_ROT3
)&31);
712 emit_dst(as
, MIPSI_XOR
, tmp1
, tmp1
, tmp2
);
713 emit_rotr(as
, tmp1
, tmp1
, dest
, (-HASH_ROT2
-HASH_ROT1
)&31);
714 emit_dst(as
, MIPSI_SUBU
, tmp2
, tmp2
, dest
);
716 emit_dst(as
, MIPSI_XOR
, tmp2
, tmp2
, tmp1
);
717 if ((as
->flags
& JIT_F_MIPS32R2
)) {
718 emit_dta(as
, MIPSI_ROTR
, dest
, tmp1
, (-HASH_ROT1
)&31);
720 emit_dst(as
, MIPSI_OR
, dest
, dest
, tmp1
);
721 emit_dta(as
, MIPSI_SLL
, tmp1
, tmp1
, HASH_ROT1
);
722 emit_dta(as
, MIPSI_SRL
, dest
, tmp1
, (-HASH_ROT1
)&31);
724 emit_dst(as
, MIPSI_ADDU
, tmp1
, tmp1
, tmp1
);
725 emit_tg(as
, MIPSI_MFC1
, tmp2
, key
);
726 emit_tg(as
, MIPSI_MFC1
, tmp1
, key
+1);
728 emit_dst(as
, MIPSI_XOR
, tmp2
, key
, tmp1
);
729 emit_rotr(as
, dest
, tmp1
, tmp2
, (-HASH_ROT1
)&31);
730 emit_dst(as
, MIPSI_ADDU
, tmp1
, key
, ra_allock(as
, HASH_BIAS
, allow
));
736 static void asm_hrefk(ASMState
*as
, IRIns
*ir
)
738 IRIns
*kslot
= IR(ir
->op2
);
739 IRIns
*irkey
= IR(kslot
->op1
);
740 int32_t ofs
= (int32_t)(kslot
->op2
* sizeof(Node
));
741 int32_t kofs
= ofs
+ (int32_t)offsetof(Node
, key
);
742 Reg dest
= (ra_used(ir
)||ofs
> 32736) ? ra_dest(as
, ir
, RSET_GPR
) : RID_NONE
;
743 Reg node
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
744 Reg key
= RID_NONE
, type
= RID_TMP
, idx
= node
;
745 RegSet allow
= rset_exclude(RSET_GPR
, node
);
747 lua_assert(ofs
% sizeof(Node
) == 0);
750 rset_clear(allow
, dest
);
751 kofs
= (int32_t)offsetof(Node
, key
);
752 } else if (ra_hasreg(dest
)) {
753 emit_tsi(as
, MIPSI_ADDIU
, dest
, node
, ofs
);
755 if (!irt_ispri(irkey
->t
)) {
756 key
= ra_scratch(as
, allow
);
757 rset_clear(allow
, key
);
759 if (irt_isnum(irkey
->t
)) {
760 lo
= (int32_t)ir_knum(irkey
)->u32
.lo
;
761 hi
= (int32_t)ir_knum(irkey
)->u32
.hi
;
764 hi
= irt_toitype(irkey
->t
);
768 asm_guard(as
, MIPSI_BNE
, key
, lo
? ra_allock(as
, lo
, allow
) : RID_ZERO
);
770 asm_guard(as
, MIPSI_BNE
, type
, hi
? ra_allock(as
, hi
, allow
) : RID_ZERO
);
771 if (ra_hasreg(key
)) emit_tsi(as
, MIPSI_LW
, key
, idx
, kofs
+(LJ_BE
?4:0));
772 emit_tsi(as
, MIPSI_LW
, type
, idx
, kofs
+(LJ_BE
?0:4));
774 emit_tsi(as
, MIPSI_ADDU
, dest
, node
, ra_allock(as
, ofs
, allow
));
777 static void asm_newref(ASMState
*as
, IRIns
*ir
)
779 if (ir
->r
!= RID_SINK
) {
780 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_tab_newkey
];
782 args
[0] = ASMREF_L
; /* lua_State *L */
783 args
[1] = ir
->op1
; /* GCtab *t */
784 args
[2] = ASMREF_TMP1
; /* cTValue *key */
785 asm_setupresult(as
, ir
, ci
); /* TValue * */
786 asm_gencall(as
, ci
, args
);
787 asm_tvptr(as
, ra_releasetmp(as
, ASMREF_TMP1
), ir
->op2
);
791 static void asm_uref(ASMState
*as
, IRIns
*ir
)
793 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
794 if (irref_isk(ir
->op1
)) {
795 GCfunc
*fn
= ir_kfunc(IR(ir
->op1
));
796 MRef
*v
= &gcref(fn
->l
.uvptr
[(ir
->op2
>> 8)])->uv
.v
;
797 emit_lsptr(as
, MIPSI_LW
, dest
, v
, RSET_GPR
);
799 Reg uv
= ra_scratch(as
, RSET_GPR
);
800 Reg func
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
801 if (ir
->o
== IR_UREFC
) {
802 asm_guard(as
, MIPSI_BEQ
, RID_TMP
, RID_ZERO
);
803 emit_tsi(as
, MIPSI_ADDIU
, dest
, uv
, (int32_t)offsetof(GCupval
, tv
));
804 emit_tsi(as
, MIPSI_LBU
, RID_TMP
, uv
, (int32_t)offsetof(GCupval
, closed
));
806 emit_tsi(as
, MIPSI_LW
, dest
, uv
, (int32_t)offsetof(GCupval
, v
));
808 emit_tsi(as
, MIPSI_LW
, uv
, func
,
809 (int32_t)offsetof(GCfuncL
, uvptr
) + 4*(int32_t)(ir
->op2
>> 8));
813 static void asm_fref(ASMState
*as
, IRIns
*ir
)
815 UNUSED(as
); UNUSED(ir
);
816 lua_assert(!ra_used(ir
));
819 static void asm_strref(ASMState
*as
, IRIns
*ir
)
821 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
822 IRRef ref
= ir
->op2
, refk
= ir
->op1
;
823 int32_t ofs
= (int32_t)sizeof(GCstr
);
825 if (irref_isk(ref
)) {
826 IRRef tmp
= refk
; refk
= ref
; ref
= tmp
;
827 } else if (!irref_isk(refk
)) {
828 Reg right
, left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
829 IRIns
*irr
= IR(ir
->op2
);
830 if (ra_hasreg(irr
->r
)) {
831 ra_noweak(as
, irr
->r
);
833 } else if (mayfuse(as
, irr
->op2
) &&
834 irr
->o
== IR_ADD
&& irref_isk(irr
->op2
) &&
835 checki16(ofs
+ IR(irr
->op2
)->i
)) {
836 ofs
+= IR(irr
->op2
)->i
;
837 right
= ra_alloc1(as
, irr
->op1
, rset_exclude(RSET_GPR
, left
));
839 right
= ra_allocref(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
841 emit_tsi(as
, MIPSI_ADDIU
, dest
, dest
, ofs
);
842 emit_dst(as
, MIPSI_ADDU
, dest
, left
, right
);
845 r
= ra_alloc1(as
, ref
, RSET_GPR
);
848 emit_tsi(as
, MIPSI_ADDIU
, dest
, r
, ofs
);
850 emit_dst(as
, MIPSI_ADDU
, dest
, r
,
851 ra_allock(as
, ofs
, rset_exclude(RSET_GPR
, r
)));
854 /* -- Loads and stores ---------------------------------------------------- */
856 static MIPSIns
asm_fxloadins(IRIns
*ir
)
858 switch (irt_type(ir
->t
)) {
859 case IRT_I8
: return MIPSI_LB
;
860 case IRT_U8
: return MIPSI_LBU
;
861 case IRT_I16
: return MIPSI_LH
;
862 case IRT_U16
: return MIPSI_LHU
;
863 case IRT_NUM
: return MIPSI_LDC1
;
864 case IRT_FLOAT
: return MIPSI_LWC1
;
865 default: return MIPSI_LW
;
869 static MIPSIns
asm_fxstoreins(IRIns
*ir
)
871 switch (irt_type(ir
->t
)) {
872 case IRT_I8
: case IRT_U8
: return MIPSI_SB
;
873 case IRT_I16
: case IRT_U16
: return MIPSI_SH
;
874 case IRT_NUM
: return MIPSI_SDC1
;
875 case IRT_FLOAT
: return MIPSI_SWC1
;
876 default: return MIPSI_SW
;
880 static void asm_fload(ASMState
*as
, IRIns
*ir
)
882 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
883 Reg idx
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
884 MIPSIns mi
= asm_fxloadins(ir
);
886 if (ir
->op2
== IRFL_TAB_ARRAY
) {
887 ofs
= asm_fuseabase(as
, ir
->op1
);
888 if (ofs
) { /* Turn the t->array load into an add for colocated arrays. */
889 emit_tsi(as
, MIPSI_ADDIU
, dest
, idx
, ofs
);
893 ofs
= field_ofs
[ir
->op2
];
894 lua_assert(!irt_isfp(ir
->t
));
895 emit_tsi(as
, mi
, dest
, idx
, ofs
);
898 static void asm_fstore(ASMState
*as
, IRIns
*ir
)
900 if (ir
->r
!= RID_SINK
) {
901 Reg src
= ra_alloc1z(as
, ir
->op2
, RSET_GPR
);
902 IRIns
*irf
= IR(ir
->op1
);
903 Reg idx
= ra_alloc1(as
, irf
->op1
, rset_exclude(RSET_GPR
, src
));
904 int32_t ofs
= field_ofs
[irf
->op2
];
905 MIPSIns mi
= asm_fxstoreins(ir
);
906 lua_assert(!irt_isfp(ir
->t
));
907 emit_tsi(as
, mi
, src
, idx
, ofs
);
911 static void asm_xload(ASMState
*as
, IRIns
*ir
)
913 Reg dest
= ra_dest(as
, ir
, irt_isfp(ir
->t
) ? RSET_FPR
: RSET_GPR
);
914 lua_assert(!(ir
->op2
& IRXLOAD_UNALIGNED
));
915 asm_fusexref(as
, asm_fxloadins(ir
), dest
, ir
->op1
, RSET_GPR
, 0);
918 static void asm_xstore(ASMState
*as
, IRIns
*ir
, int32_t ofs
)
920 if (ir
->r
!= RID_SINK
) {
921 Reg src
= ra_alloc1z(as
, ir
->op2
, irt_isfp(ir
->t
) ? RSET_FPR
: RSET_GPR
);
922 asm_fusexref(as
, asm_fxstoreins(ir
), src
, ir
->op1
,
923 rset_exclude(RSET_GPR
, src
), ofs
);
927 static void asm_ahuvload(ASMState
*as
, IRIns
*ir
)
930 Reg dest
= RID_NONE
, type
= RID_TMP
, idx
;
931 RegSet allow
= RSET_GPR
;
934 lua_assert(irt_isnum(t
) || irt_isint(t
) || irt_isaddr(t
));
935 dest
= ra_dest(as
, ir
, irt_isnum(t
) ? RSET_FPR
: RSET_GPR
);
936 rset_clear(allow
, dest
);
938 idx
= asm_fuseahuref(as
, ir
->op1
, &ofs
, allow
);
939 rset_clear(allow
, idx
);
941 asm_guard(as
, MIPSI_BEQ
, type
, RID_ZERO
);
942 emit_tsi(as
, MIPSI_SLTIU
, type
, type
, (int32_t)LJ_TISNUM
);
944 emit_hsi(as
, MIPSI_LDC1
, dest
, idx
, ofs
);
946 asm_guard(as
, MIPSI_BNE
, type
, ra_allock(as
, irt_toitype(t
), allow
));
947 if (ra_hasreg(dest
)) emit_tsi(as
, MIPSI_LW
, dest
, idx
, ofs
+(LJ_BE
?4:0));
949 emit_tsi(as
, MIPSI_LW
, type
, idx
, ofs
+(LJ_BE
?0:4));
952 static void asm_ahustore(ASMState
*as
, IRIns
*ir
)
954 RegSet allow
= RSET_GPR
;
955 Reg idx
, src
= RID_NONE
, type
= RID_NONE
;
957 if (ir
->r
== RID_SINK
)
959 if (irt_isnum(ir
->t
)) {
960 src
= ra_alloc1(as
, ir
->op2
, RSET_FPR
);
962 if (!irt_ispri(ir
->t
)) {
963 src
= ra_alloc1(as
, ir
->op2
, allow
);
964 rset_clear(allow
, src
);
966 type
= ra_allock(as
, (int32_t)irt_toitype(ir
->t
), allow
);
967 rset_clear(allow
, type
);
969 idx
= asm_fuseahuref(as
, ir
->op1
, &ofs
, allow
);
970 if (irt_isnum(ir
->t
)) {
971 emit_hsi(as
, MIPSI_SDC1
, src
, idx
, ofs
);
974 emit_tsi(as
, MIPSI_SW
, src
, idx
, ofs
+(LJ_BE
?4:0));
975 emit_tsi(as
, MIPSI_SW
, type
, idx
, ofs
+(LJ_BE
?0:4));
979 static void asm_sload(ASMState
*as
, IRIns
*ir
)
981 int32_t ofs
= 8*((int32_t)ir
->op1
-1) + ((ir
->op2
& IRSLOAD_FRAME
) ? 4 : 0);
983 Reg dest
= RID_NONE
, type
= RID_NONE
, base
;
984 RegSet allow
= RSET_GPR
;
985 lua_assert(!(ir
->op2
& IRSLOAD_PARENT
)); /* Handled by asm_head_side(). */
986 lua_assert(irt_isguard(t
) || !(ir
->op2
& IRSLOAD_TYPECHECK
));
987 lua_assert(!irt_isint(t
) || (ir
->op2
& (IRSLOAD_CONVERT
|IRSLOAD_FRAME
)));
988 if ((ir
->op2
& IRSLOAD_CONVERT
) && irt_isguard(t
) && irt_isint(t
)) {
989 dest
= ra_scratch(as
, RSET_FPR
);
990 asm_tointg(as
, ir
, dest
);
991 t
.irt
= IRT_NUM
; /* Continue with a regular number type check. */
992 } else if (ra_used(ir
)) {
993 lua_assert(irt_isnum(t
) || irt_isint(t
) || irt_isaddr(t
));
994 dest
= ra_dest(as
, ir
, irt_isnum(t
) ? RSET_FPR
: RSET_GPR
);
995 rset_clear(allow
, dest
);
996 base
= ra_alloc1(as
, REF_BASE
, allow
);
997 rset_clear(allow
, base
);
998 if ((ir
->op2
& IRSLOAD_CONVERT
)) {
1000 Reg tmp
= ra_scratch(as
, RSET_FPR
);
1001 emit_tg(as
, MIPSI_MFC1
, dest
, tmp
);
1002 emit_fg(as
, MIPSI_CVT_W_D
, tmp
, tmp
);
1004 t
.irt
= IRT_NUM
; /* Check for original type. */
1006 Reg tmp
= ra_scratch(as
, RSET_GPR
);
1007 emit_fg(as
, MIPSI_CVT_D_W
, dest
, dest
);
1008 emit_tg(as
, MIPSI_MTC1
, tmp
, dest
);
1010 t
.irt
= IRT_INT
; /* Check for original type. */
1015 base
= ra_alloc1(as
, REF_BASE
, allow
);
1016 rset_clear(allow
, base
);
1019 if ((ir
->op2
& IRSLOAD_TYPECHECK
)) {
1020 asm_guard(as
, MIPSI_BEQ
, RID_TMP
, RID_ZERO
);
1021 emit_tsi(as
, MIPSI_SLTIU
, RID_TMP
, RID_TMP
, (int32_t)LJ_TISNUM
);
1024 if (ra_hasreg(dest
)) emit_hsi(as
, MIPSI_LDC1
, dest
, base
, ofs
);
1026 if ((ir
->op2
& IRSLOAD_TYPECHECK
)) {
1027 Reg ktype
= ra_allock(as
, irt_toitype(t
), allow
);
1028 asm_guard(as
, MIPSI_BNE
, RID_TMP
, ktype
);
1031 if (ra_hasreg(dest
)) emit_tsi(as
, MIPSI_LW
, dest
, base
, ofs
^ (LJ_BE
?4:0));
1033 if (ra_hasreg(type
)) emit_tsi(as
, MIPSI_LW
, type
, base
, ofs
^ (LJ_BE
?0:4));
1036 /* -- Allocations --------------------------------------------------------- */
1039 static void asm_cnew(ASMState
*as
, IRIns
*ir
)
1041 CTState
*cts
= ctype_ctsG(J2G(as
->J
));
1042 CTypeID ctypeid
= (CTypeID
)IR(ir
->op1
)->i
;
1043 CTSize sz
= (ir
->o
== IR_CNEWI
|| ir
->op2
== REF_NIL
) ?
1044 lj_ctype_size(cts
, ctypeid
) : (CTSize
)IR(ir
->op2
)->i
;
1045 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_mem_newgco
];
1047 RegSet allow
= (RSET_GPR
& ~RSET_SCRATCH
);
1048 RegSet drop
= RSET_SCRATCH
;
1049 lua_assert(sz
!= CTSIZE_INVALID
);
1051 args
[0] = ASMREF_L
; /* lua_State *L */
1052 args
[1] = ASMREF_TMP1
; /* MSize size */
1055 if (ra_hasreg(ir
->r
))
1056 rset_clear(drop
, ir
->r
); /* Dest reg handled below. */
1057 ra_evictset(as
, drop
);
1059 ra_destreg(as
, ir
, RID_RET
); /* GCcdata * */
1061 /* Initialize immutable cdata object. */
1062 if (ir
->o
== IR_CNEWI
) {
1063 int32_t ofs
= sizeof(GCcdata
);
1064 lua_assert(sz
== 4 || sz
== 8);
1067 lua_assert((ir
+1)->o
== IR_HIOP
);
1071 Reg r
= ra_alloc1z(as
, ir
->op2
, allow
);
1072 emit_tsi(as
, MIPSI_SW
, r
, RID_RET
, ofs
);
1073 rset_clear(allow
, r
);
1074 if (ofs
== sizeof(GCcdata
)) break;
1075 ofs
-= 4; if (LJ_BE
) ir
++; else ir
--;
1078 /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */
1079 emit_tsi(as
, MIPSI_SB
, RID_RET
+1, RID_RET
, offsetof(GCcdata
, gct
));
1080 emit_tsi(as
, MIPSI_SH
, RID_TMP
, RID_RET
, offsetof(GCcdata
, ctypeid
));
1081 emit_ti(as
, MIPSI_LI
, RID_RET
+1, ~LJ_TCDATA
);
1082 emit_ti(as
, MIPSI_LI
, RID_TMP
, ctypeid
); /* Lower 16 bit used. Sign-ext ok. */
1083 asm_gencall(as
, ci
, args
);
1084 ra_allockreg(as
, (int32_t)(sz
+sizeof(GCcdata
)),
1085 ra_releasetmp(as
, ASMREF_TMP1
));
1088 #define asm_cnew(as, ir) ((void)0)
1091 /* -- Write barriers ------------------------------------------------------ */
1093 static void asm_tbar(ASMState
*as
, IRIns
*ir
)
1095 Reg tab
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1096 Reg mark
= ra_scratch(as
, rset_exclude(RSET_GPR
, tab
));
1098 MCLabel l_end
= emit_label(as
);
1099 emit_tsi(as
, MIPSI_SW
, link
, tab
, (int32_t)offsetof(GCtab
, gclist
));
1100 emit_tsi(as
, MIPSI_SB
, mark
, tab
, (int32_t)offsetof(GCtab
, marked
));
1101 emit_setgl(as
, tab
, gc
.grayagain
);
1102 emit_getgl(as
, link
, gc
.grayagain
);
1103 emit_dst(as
, MIPSI_XOR
, mark
, mark
, RID_TMP
); /* Clear black bit. */
1104 emit_branch(as
, MIPSI_BEQ
, RID_TMP
, RID_ZERO
, l_end
);
1105 emit_tsi(as
, MIPSI_ANDI
, RID_TMP
, mark
, LJ_GC_BLACK
);
1106 emit_tsi(as
, MIPSI_LBU
, mark
, tab
, (int32_t)offsetof(GCtab
, marked
));
1109 static void asm_obar(ASMState
*as
, IRIns
*ir
)
1111 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_gc_barrieruv
];
1115 /* No need for other object barriers (yet). */
1116 lua_assert(IR(ir
->op1
)->o
== IR_UREFC
);
1117 ra_evictset(as
, RSET_SCRATCH
);
1118 l_end
= emit_label(as
);
1119 args
[0] = ASMREF_TMP1
; /* global_State *g */
1120 args
[1] = ir
->op1
; /* TValue *tv */
1121 asm_gencall(as
, ci
, args
);
1122 emit_tsi(as
, MIPSI_ADDIU
, ra_releasetmp(as
, ASMREF_TMP1
), RID_JGL
, -32768);
1123 obj
= IR(ir
->op1
)->r
;
1124 tmp
= ra_scratch(as
, rset_exclude(RSET_GPR
, obj
));
1125 emit_branch(as
, MIPSI_BEQ
, RID_TMP
, RID_ZERO
, l_end
);
1126 emit_tsi(as
, MIPSI_ANDI
, tmp
, tmp
, LJ_GC_BLACK
);
1127 emit_branch(as
, MIPSI_BEQ
, RID_TMP
, RID_ZERO
, l_end
);
1128 emit_tsi(as
, MIPSI_ANDI
, RID_TMP
, RID_TMP
, LJ_GC_WHITES
);
1129 val
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, obj
));
1130 emit_tsi(as
, MIPSI_LBU
, tmp
, obj
,
1131 (int32_t)offsetof(GCupval
, marked
)-(int32_t)offsetof(GCupval
, tv
));
1132 emit_tsi(as
, MIPSI_LBU
, RID_TMP
, val
, (int32_t)offsetof(GChead
, marked
));
1135 /* -- Arithmetic and logic operations ------------------------------------- */
1137 static void asm_fparith(ASMState
*as
, IRIns
*ir
, MIPSIns mi
)
1139 Reg dest
= ra_dest(as
, ir
, RSET_FPR
);
1140 Reg right
, left
= ra_alloc2(as
, ir
, RSET_FPR
);
1141 right
= (left
>> 8); left
&= 255;
1142 emit_fgh(as
, mi
, dest
, left
, right
);
1145 static void asm_fpunary(ASMState
*as
, IRIns
*ir
, MIPSIns mi
)
1147 Reg dest
= ra_dest(as
, ir
, RSET_FPR
);
1148 Reg left
= ra_hintalloc(as
, ir
->op1
, dest
, RSET_FPR
);
1149 emit_fg(as
, mi
, dest
, left
);
1152 static int asm_fpjoin_pow(ASMState
*as
, IRIns
*ir
)
1154 IRIns
*irp
= IR(ir
->op1
);
1155 if (irp
== ir
-1 && irp
->o
== IR_MUL
&& !ra_used(irp
)) {
1156 IRIns
*irpp
= IR(irp
->op1
);
1157 if (irpp
== ir
-2 && irpp
->o
== IR_FPMATH
&&
1158 irpp
->op2
== IRFPM_LOG2
&& !ra_used(irpp
)) {
1159 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_pow
];
1161 args
[0] = irpp
->op1
;
1163 asm_setupresult(as
, ir
, ci
);
1164 asm_gencall(as
, ci
, args
);
1171 static void asm_add(ASMState
*as
, IRIns
*ir
)
1173 if (irt_isnum(ir
->t
)) {
1174 asm_fparith(as
, ir
, MIPSI_ADD_D
);
1176 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1177 Reg right
, left
= ra_hintalloc(as
, ir
->op1
, dest
, RSET_GPR
);
1178 if (irref_isk(ir
->op2
)) {
1179 int32_t k
= IR(ir
->op2
)->i
;
1181 emit_tsi(as
, MIPSI_ADDIU
, dest
, left
, k
);
1185 right
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
1186 emit_dst(as
, MIPSI_ADDU
, dest
, left
, right
);
1190 static void asm_sub(ASMState
*as
, IRIns
*ir
)
1192 if (irt_isnum(ir
->t
)) {
1193 asm_fparith(as
, ir
, MIPSI_SUB_D
);
1195 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1196 Reg right
, left
= ra_alloc2(as
, ir
, RSET_GPR
);
1197 right
= (left
>> 8); left
&= 255;
1198 emit_dst(as
, MIPSI_SUBU
, dest
, left
, right
);
1202 static void asm_mul(ASMState
*as
, IRIns
*ir
)
1204 if (irt_isnum(ir
->t
)) {
1205 asm_fparith(as
, ir
, MIPSI_MUL_D
);
1207 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1208 Reg right
, left
= ra_alloc2(as
, ir
, RSET_GPR
);
1209 right
= (left
>> 8); left
&= 255;
1210 emit_dst(as
, MIPSI_MUL
, dest
, left
, right
);
1214 static void asm_neg(ASMState
*as
, IRIns
*ir
)
1216 if (irt_isnum(ir
->t
)) {
1217 asm_fpunary(as
, ir
, MIPSI_NEG_D
);
1219 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1220 Reg left
= ra_hintalloc(as
, ir
->op1
, dest
, RSET_GPR
);
1221 emit_dst(as
, MIPSI_SUBU
, dest
, RID_ZERO
, left
);
1225 static void asm_arithov(ASMState
*as
, IRIns
*ir
)
1227 Reg right
, left
, tmp
, dest
= ra_dest(as
, ir
, RSET_GPR
);
1228 if (irref_isk(ir
->op2
)) {
1229 int k
= IR(ir
->op2
)->i
;
1230 if (ir
->o
== IR_SUBOV
) k
= (int)(~(unsigned int)k
+1u);
1231 if (checki16(k
)) { /* (dest < left) == (k >= 0 ? 1 : 0) */
1232 left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1233 asm_guard(as
, k
>= 0 ? MIPSI_BNE
: MIPSI_BEQ
, RID_TMP
, RID_ZERO
);
1234 emit_dst(as
, MIPSI_SLT
, RID_TMP
, dest
, dest
== left
? RID_TMP
: left
);
1235 emit_tsi(as
, MIPSI_ADDIU
, dest
, left
, k
);
1236 if (dest
== left
) emit_move(as
, RID_TMP
, left
);
1240 left
= ra_alloc2(as
, ir
, RSET_GPR
);
1241 right
= (left
>> 8); left
&= 255;
1242 tmp
= ra_scratch(as
, rset_exclude(rset_exclude(rset_exclude(RSET_GPR
, left
),
1244 asm_guard(as
, MIPSI_BLTZ
, RID_TMP
, 0);
1245 emit_dst(as
, MIPSI_AND
, RID_TMP
, RID_TMP
, tmp
);
1246 if (ir
->o
== IR_ADDOV
) { /* ((dest^left) & (dest^right)) < 0 */
1247 emit_dst(as
, MIPSI_XOR
, RID_TMP
, dest
, dest
== right
? RID_TMP
: right
);
1248 } else { /* ((dest^left) & (dest^~right)) < 0 */
1249 emit_dst(as
, MIPSI_XOR
, RID_TMP
, RID_TMP
, dest
);
1250 emit_dst(as
, MIPSI_NOR
, RID_TMP
, dest
== right
? RID_TMP
: right
, RID_ZERO
);
1252 emit_dst(as
, MIPSI_XOR
, tmp
, dest
, dest
== left
? RID_TMP
: left
);
1253 emit_dst(as
, ir
->o
== IR_ADDOV
? MIPSI_ADDU
: MIPSI_SUBU
, dest
, left
, right
);
1254 if (dest
== left
|| dest
== right
)
1255 emit_move(as
, RID_TMP
, dest
== left
? left
: right
);
1258 static void asm_mulov(ASMState
*as
, IRIns
*ir
)
1263 UNUSED(as
); UNUSED(ir
); lua_assert(0); /* Unused in single-number mode. */
1268 static void asm_add64(ASMState
*as
, IRIns
*ir
)
1270 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1271 Reg right
, left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1272 if (irref_isk(ir
->op2
)) {
1273 int32_t k
= IR(ir
->op2
)->i
;
1275 emit_dst(as
, MIPSI_ADDU
, dest
, left
, RID_TMP
);
1277 } else if (checki16(k
)) {
1278 emit_dst(as
, MIPSI_ADDU
, dest
, dest
, RID_TMP
);
1279 emit_tsi(as
, MIPSI_ADDIU
, dest
, left
, k
);
1283 emit_dst(as
, MIPSI_ADDU
, dest
, dest
, RID_TMP
);
1284 right
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
1285 emit_dst(as
, MIPSI_ADDU
, dest
, left
, right
);
1288 dest
= ra_dest(as
, ir
, RSET_GPR
);
1289 left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1290 if (irref_isk(ir
->op2
)) {
1291 int32_t k
= IR(ir
->op2
)->i
;
1294 emit_move(as
, dest
, left
);
1296 } else if (checki16(k
)) {
1298 Reg tmp
= ra_scratch(as
, rset_exclude(RSET_GPR
, left
));
1299 emit_move(as
, dest
, tmp
);
1302 emit_dst(as
, MIPSI_SLTU
, RID_TMP
, dest
, left
);
1303 emit_tsi(as
, MIPSI_ADDIU
, dest
, left
, k
);
1307 right
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
1308 if (dest
== left
&& dest
== right
) {
1309 Reg tmp
= ra_scratch(as
, rset_exclude(rset_exclude(RSET_GPR
, left
), right
));
1310 emit_move(as
, dest
, tmp
);
1313 emit_dst(as
, MIPSI_SLTU
, RID_TMP
, dest
, dest
== left
? right
: left
);
1314 emit_dst(as
, MIPSI_ADDU
, dest
, left
, right
);
1317 static void asm_sub64(ASMState
*as
, IRIns
*ir
)
1319 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1320 Reg right
, left
= ra_alloc2(as
, ir
, RSET_GPR
);
1321 right
= (left
>> 8); left
&= 255;
1322 emit_dst(as
, MIPSI_SUBU
, dest
, dest
, RID_TMP
);
1323 emit_dst(as
, MIPSI_SUBU
, dest
, left
, right
);
1325 dest
= ra_dest(as
, ir
, RSET_GPR
);
1326 left
= ra_alloc2(as
, ir
, RSET_GPR
);
1327 right
= (left
>> 8); left
&= 255;
1329 Reg tmp
= ra_scratch(as
, rset_exclude(rset_exclude(RSET_GPR
, left
), right
));
1330 emit_move(as
, dest
, tmp
);
1333 emit_dst(as
, MIPSI_SLTU
, RID_TMP
, left
, dest
);
1334 emit_dst(as
, MIPSI_SUBU
, dest
, left
, right
);
1337 static void asm_neg64(ASMState
*as
, IRIns
*ir
)
1339 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1340 Reg left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1341 emit_dst(as
, MIPSI_SUBU
, dest
, dest
, RID_TMP
);
1342 emit_dst(as
, MIPSI_SUBU
, dest
, RID_ZERO
, left
);
1344 dest
= ra_dest(as
, ir
, RSET_GPR
);
1345 left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1346 emit_dst(as
, MIPSI_SLTU
, RID_TMP
, RID_ZERO
, dest
);
1347 emit_dst(as
, MIPSI_SUBU
, dest
, RID_ZERO
, left
);
1351 static void asm_bitnot(ASMState
*as
, IRIns
*ir
)
1353 Reg left
, right
, dest
= ra_dest(as
, ir
, RSET_GPR
);
1354 IRIns
*irl
= IR(ir
->op1
);
1355 if (mayfuse(as
, ir
->op1
) && irl
->o
== IR_BOR
) {
1356 left
= ra_alloc2(as
, irl
, RSET_GPR
);
1357 right
= (left
>> 8); left
&= 255;
1359 left
= ra_hintalloc(as
, ir
->op1
, dest
, RSET_GPR
);
1362 emit_dst(as
, MIPSI_NOR
, dest
, left
, right
);
1365 static void asm_bitswap(ASMState
*as
, IRIns
*ir
)
1367 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1368 Reg left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1369 if ((as
->flags
& JIT_F_MIPS32R2
)) {
1370 emit_dta(as
, MIPSI_ROTR
, dest
, RID_TMP
, 16);
1371 emit_dst(as
, MIPSI_WSBH
, RID_TMP
, 0, left
);
1373 Reg tmp
= ra_scratch(as
, rset_exclude(rset_exclude(RSET_GPR
, left
), dest
));
1374 emit_dst(as
, MIPSI_OR
, dest
, dest
, tmp
);
1375 emit_dst(as
, MIPSI_OR
, dest
, dest
, RID_TMP
);
1376 emit_tsi(as
, MIPSI_ANDI
, dest
, dest
, 0xff00);
1377 emit_dta(as
, MIPSI_SLL
, RID_TMP
, RID_TMP
, 8);
1378 emit_dta(as
, MIPSI_SRL
, dest
, left
, 8);
1379 emit_tsi(as
, MIPSI_ANDI
, RID_TMP
, left
, 0xff00);
1380 emit_dst(as
, MIPSI_OR
, tmp
, tmp
, RID_TMP
);
1381 emit_dta(as
, MIPSI_SRL
, tmp
, left
, 24);
1382 emit_dta(as
, MIPSI_SLL
, RID_TMP
, left
, 24);
1386 static void asm_bitop(ASMState
*as
, IRIns
*ir
, MIPSIns mi
, MIPSIns mik
)
1388 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1389 Reg right
, left
= ra_hintalloc(as
, ir
->op1
, dest
, RSET_GPR
);
1390 if (irref_isk(ir
->op2
)) {
1391 int32_t k
= IR(ir
->op2
)->i
;
1393 emit_tsi(as
, mik
, dest
, left
, k
);
1397 right
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
1398 emit_dst(as
, mi
, dest
, left
, right
);
1401 static void asm_bitshift(ASMState
*as
, IRIns
*ir
, MIPSIns mi
, MIPSIns mik
)
1403 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1404 if (irref_isk(ir
->op2
)) { /* Constant shifts. */
1405 uint32_t shift
= (uint32_t)(IR(ir
->op2
)->i
& 31);
1406 emit_dta(as
, mik
, dest
, ra_hintalloc(as
, ir
->op1
, dest
, RSET_GPR
), shift
);
1408 Reg right
, left
= ra_alloc2(as
, ir
, RSET_GPR
);
1409 right
= (left
>> 8); left
&= 255;
1410 emit_dst(as
, mi
, dest
, right
, left
); /* Shift amount is in rs. */
1414 static void asm_bitror(ASMState
*as
, IRIns
*ir
)
1416 if ((as
->flags
& JIT_F_MIPS32R2
)) {
1417 asm_bitshift(as
, ir
, MIPSI_ROTRV
, MIPSI_ROTR
);
1419 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1420 if (irref_isk(ir
->op2
)) { /* Constant shifts. */
1421 uint32_t shift
= (uint32_t)(IR(ir
->op2
)->i
& 31);
1422 Reg left
= ra_hintalloc(as
, ir
->op1
, dest
, RSET_GPR
);
1423 emit_rotr(as
, dest
, left
, RID_TMP
, shift
);
1425 Reg right
, left
= ra_alloc2(as
, ir
, RSET_GPR
);
1426 right
= (left
>> 8); left
&= 255;
1427 emit_dst(as
, MIPSI_OR
, dest
, dest
, RID_TMP
);
1428 emit_dst(as
, MIPSI_SRLV
, dest
, right
, left
);
1429 emit_dst(as
, MIPSI_SLLV
, RID_TMP
, RID_TMP
, left
);
1430 emit_dst(as
, MIPSI_SUBU
, RID_TMP
, ra_allock(as
, 32, RSET_GPR
), right
);
1435 static void asm_min_max(ASMState
*as
, IRIns
*ir
, int ismax
)
1437 if (irt_isnum(ir
->t
)) {
1438 Reg dest
= ra_dest(as
, ir
, RSET_FPR
);
1439 Reg right
, left
= ra_alloc2(as
, ir
, RSET_FPR
);
1440 right
= (left
>> 8); left
&= 255;
1442 emit_fg(as
, MIPSI_MOVT_D
, dest
, right
);
1444 emit_fg(as
, MIPSI_MOVF_D
, dest
, left
);
1445 if (dest
!= right
) emit_fg(as
, MIPSI_MOV_D
, dest
, right
);
1447 emit_fgh(as
, MIPSI_C_OLT_D
, 0, ismax
? left
: right
, ismax
? right
: left
);
1449 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1450 Reg right
, left
= ra_alloc2(as
, ir
, RSET_GPR
);
1451 right
= (left
>> 8); left
&= 255;
1453 emit_dst(as
, MIPSI_MOVN
, dest
, right
, RID_TMP
);
1455 emit_dst(as
, MIPSI_MOVZ
, dest
, left
, RID_TMP
);
1456 if (dest
!= right
) emit_move(as
, dest
, right
);
1458 emit_dst(as
, MIPSI_SLT
, RID_TMP
,
1459 ismax
? left
: right
, ismax
? right
: left
);
1463 /* -- Comparisons --------------------------------------------------------- */
1465 static void asm_comp(ASMState
*as
, IRIns
*ir
)
1467 /* ORDER IR: LT GE LE GT ULT UGE ULE UGT. */
1469 if (irt_isnum(ir
->t
)) {
1470 Reg right
, left
= ra_alloc2(as
, ir
, RSET_FPR
);
1471 right
= (left
>> 8); left
&= 255;
1472 asm_guard(as
, (op
&1) ? MIPSI_BC1T
: MIPSI_BC1F
, 0, 0);
1473 emit_fgh(as
, MIPSI_C_OLT_D
+ ((op
&3) ^ ((op
>>2)&1)), 0, left
, right
);
1475 Reg right
, left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1476 if (op
== IR_ABC
) op
= IR_UGT
;
1477 if ((op
&4) == 0 && irref_isk(ir
->op2
) && IR(ir
->op2
)->i
== 0) {
1478 MIPSIns mi
= (op
&2) ? ((op
&1) ? MIPSI_BLEZ
: MIPSI_BGTZ
) :
1479 ((op
&1) ? MIPSI_BLTZ
: MIPSI_BGEZ
);
1480 asm_guard(as
, mi
, left
, 0);
1482 if (irref_isk(ir
->op2
)) {
1483 int32_t k
= IR(ir
->op2
)->i
;
1486 asm_guard(as
, (op
&1) ? MIPSI_BNE
: MIPSI_BEQ
, RID_TMP
, RID_ZERO
);
1487 emit_tsi(as
, (op
&4) ? MIPSI_SLTIU
: MIPSI_SLTI
,
1492 right
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
1493 asm_guard(as
, ((op
^(op
>>1))&1) ? MIPSI_BNE
: MIPSI_BEQ
, RID_TMP
, RID_ZERO
);
1494 emit_dst(as
, (op
&4) ? MIPSI_SLTU
: MIPSI_SLT
,
1495 RID_TMP
, (op
&2) ? right
: left
, (op
&2) ? left
: right
);
1500 static void asm_compeq(ASMState
*as
, IRIns
*ir
)
1502 Reg right
, left
= ra_alloc2(as
, ir
, irt_isnum(ir
->t
) ? RSET_FPR
: RSET_GPR
);
1503 right
= (left
>> 8); left
&= 255;
1504 if (irt_isnum(ir
->t
)) {
1505 asm_guard(as
, (ir
->o
& 1) ? MIPSI_BC1T
: MIPSI_BC1F
, 0, 0);
1506 emit_fgh(as
, MIPSI_C_EQ_D
, 0, left
, right
);
1508 asm_guard(as
, (ir
->o
& 1) ? MIPSI_BEQ
: MIPSI_BNE
, left
, right
);
1513 /* 64 bit integer comparisons. */
1514 static void asm_comp64(ASMState
*as
, IRIns
*ir
)
1516 /* ORDER IR: LT GE LE GT ULT UGE ULE UGT. */
1517 IROp op
= (ir
-1)->o
;
1519 Reg rightlo
, leftlo
, righthi
, lefthi
= ra_alloc2(as
, ir
, RSET_GPR
);
1520 righthi
= (lefthi
>> 8); lefthi
&= 255;
1521 leftlo
= ra_alloc2(as
, ir
-1,
1522 rset_exclude(rset_exclude(RSET_GPR
, lefthi
), righthi
));
1523 rightlo
= (leftlo
>> 8); leftlo
&= 255;
1524 asm_guard(as
, ((op
^(op
>>1))&1) ? MIPSI_BNE
: MIPSI_BEQ
, RID_TMP
, RID_ZERO
);
1525 l_end
= emit_label(as
);
1526 if (lefthi
!= righthi
)
1527 emit_dst(as
, (op
&4) ? MIPSI_SLTU
: MIPSI_SLT
, RID_TMP
,
1528 (op
&2) ? righthi
: lefthi
, (op
&2) ? lefthi
: righthi
);
1529 emit_dst(as
, MIPSI_SLTU
, RID_TMP
,
1530 (op
&2) ? rightlo
: leftlo
, (op
&2) ? leftlo
: rightlo
);
1531 if (lefthi
!= righthi
)
1532 emit_branch(as
, MIPSI_BEQ
, lefthi
, righthi
, l_end
);
1535 static void asm_comp64eq(ASMState
*as
, IRIns
*ir
)
1537 Reg tmp
, right
, left
= ra_alloc2(as
, ir
, RSET_GPR
);
1538 right
= (left
>> 8); left
&= 255;
1539 asm_guard(as
, ((ir
-1)->o
& 1) ? MIPSI_BEQ
: MIPSI_BNE
, RID_TMP
, RID_ZERO
);
1540 tmp
= ra_scratch(as
, rset_exclude(rset_exclude(RSET_GPR
, left
), right
));
1541 emit_dst(as
, MIPSI_OR
, RID_TMP
, RID_TMP
, tmp
);
1542 emit_dst(as
, MIPSI_XOR
, tmp
, left
, right
);
1543 left
= ra_alloc2(as
, ir
-1, RSET_GPR
);
1544 right
= (left
>> 8); left
&= 255;
1545 emit_dst(as
, MIPSI_XOR
, RID_TMP
, left
, right
);
1549 /* -- Support for 64 bit ops in 32 bit mode ------------------------------- */
1551 /* Hiword op of a split 64 bit op. Previous op must be the loword op. */
1552 static void asm_hiop(ASMState
*as
, IRIns
*ir
)
1555 /* HIOP is marked as a store because it needs its own DCE logic. */
1556 int uselo
= ra_used(ir
-1), usehi
= ra_used(ir
); /* Loword/hiword used? */
1557 if (LJ_UNLIKELY(!(as
->flags
& JIT_F_OPT_DCE
))) uselo
= usehi
= 1;
1558 if ((ir
-1)->o
== IR_CONV
) { /* Conversions to/from 64 bit. */
1559 as
->curins
--; /* Always skip the CONV. */
1563 } else if ((ir
-1)->o
< IR_EQ
) { /* 64 bit integer comparisons. ORDER IR. */
1564 as
->curins
--; /* Always skip the loword comparison. */
1567 } else if ((ir
-1)->o
<= IR_NE
) { /* 64 bit integer comparisons. ORDER IR. */
1568 as
->curins
--; /* Always skip the loword comparison. */
1569 asm_comp64eq(as
, ir
);
1571 } else if ((ir
-1)->o
== IR_XSTORE
) {
1572 as
->curins
--; /* Handle both stores here. */
1573 if ((ir
-1)->r
!= RID_SINK
) {
1574 asm_xstore(as
, ir
, LJ_LE
? 4 : 0);
1575 asm_xstore(as
, ir
-1, LJ_LE
? 0 : 4);
1579 if (!usehi
) return; /* Skip unused hiword op for all remaining ops. */
1580 switch ((ir
-1)->o
) {
1581 case IR_ADD
: as
->curins
--; asm_add64(as
, ir
); break;
1582 case IR_SUB
: as
->curins
--; asm_sub64(as
, ir
); break;
1583 case IR_NEG
: as
->curins
--; asm_neg64(as
, ir
); break;
1587 ra_allocref(as
, ir
->op1
, RID2RSET(RID_RETLO
)); /* Mark lo op as used. */
1590 /* Nothing to do here. Handled by lo op itself. */
1592 default: lua_assert(0); break;
1595 UNUSED(as
); UNUSED(ir
); lua_assert(0); /* Unused without FFI. */
1599 /* -- Stack handling ------------------------------------------------------ */
1601 /* Check Lua stack size for overflow. Use exit handler as fallback. */
1602 static void asm_stack_check(ASMState
*as
, BCReg topslot
,
1603 IRIns
*irp
, RegSet allow
, ExitNo exitno
)
1605 /* Try to get an unused temp. register, otherwise spill/restore RID_RET*. */
1606 Reg tmp
, pbase
= irp
? (ra_hasreg(irp
->r
) ? irp
->r
: RID_TMP
) : RID_BASE
;
1607 ExitNo oldsnap
= as
->snapno
;
1608 rset_clear(allow
, pbase
);
1609 tmp
= allow
? rset_pickbot(allow
) :
1610 (pbase
== RID_RETHI
? RID_RETLO
: RID_RETHI
);
1611 as
->snapno
= exitno
;
1612 asm_guard(as
, MIPSI_BNE
, RID_TMP
, RID_ZERO
);
1613 as
->snapno
= oldsnap
;
1614 if (allow
== RSET_EMPTY
) /* Restore temp. register. */
1615 emit_tsi(as
, MIPSI_LW
, tmp
, RID_SP
, 0);
1617 ra_modified(as
, tmp
);
1618 emit_tsi(as
, MIPSI_SLTIU
, RID_TMP
, RID_TMP
, (int32_t)(8*topslot
));
1619 emit_dst(as
, MIPSI_SUBU
, RID_TMP
, tmp
, pbase
);
1620 emit_tsi(as
, MIPSI_LW
, tmp
, tmp
, offsetof(lua_State
, maxstack
));
1621 if (pbase
== RID_TMP
)
1622 emit_getgl(as
, RID_TMP
, jit_base
);
1623 emit_getgl(as
, tmp
, jit_L
);
1624 if (allow
== RSET_EMPTY
) /* Spill temp. register. */
1625 emit_tsi(as
, MIPSI_SW
, tmp
, RID_SP
, 0);
1628 /* Restore Lua stack from on-trace state. */
1629 static void asm_stack_restore(ASMState
*as
, SnapShot
*snap
)
1631 SnapEntry
*map
= &as
->T
->snapmap
[snap
->mapofs
];
1632 SnapEntry
*flinks
= &as
->T
->snapmap
[snap_nextofs(as
->T
, snap
)-1];
1633 MSize n
, nent
= snap
->nent
;
1634 /* Store the value of all modified slots to the Lua stack. */
1635 for (n
= 0; n
< nent
; n
++) {
1636 SnapEntry sn
= map
[n
];
1637 BCReg s
= snap_slot(sn
);
1638 int32_t ofs
= 8*((int32_t)s
-1);
1639 IRRef ref
= snap_ref(sn
);
1640 IRIns
*ir
= IR(ref
);
1641 if ((sn
& SNAP_NORESTORE
))
1643 if (irt_isnum(ir
->t
)) {
1644 Reg src
= ra_alloc1(as
, ref
, RSET_FPR
);
1645 emit_hsi(as
, MIPSI_SDC1
, src
, RID_BASE
, ofs
);
1648 RegSet allow
= rset_exclude(RSET_GPR
, RID_BASE
);
1649 lua_assert(irt_ispri(ir
->t
) || irt_isaddr(ir
->t
) || irt_isinteger(ir
->t
));
1650 if (!irt_ispri(ir
->t
)) {
1651 Reg src
= ra_alloc1(as
, ref
, allow
);
1652 rset_clear(allow
, src
);
1653 emit_tsi(as
, MIPSI_SW
, src
, RID_BASE
, ofs
+(LJ_BE
?4:0));
1655 if ((sn
& (SNAP_CONT
|SNAP_FRAME
))) {
1656 if (s
== 0) continue; /* Do not overwrite link to previous frame. */
1657 type
= ra_allock(as
, (int32_t)(*flinks
--), allow
);
1659 type
= ra_allock(as
, (int32_t)irt_toitype(ir
->t
), allow
);
1661 emit_tsi(as
, MIPSI_SW
, type
, RID_BASE
, ofs
+(LJ_BE
?0:4));
1665 lua_assert(map
+ nent
== flinks
);
1668 /* -- GC handling --------------------------------------------------------- */
1670 /* Marker to prevent patching the GC check exit. */
1671 #define MIPS_NOPATCH_GC_CHECK MIPSI_OR
1673 /* Check GC threshold and do one or more GC steps. */
1674 static void asm_gc_check(ASMState
*as
)
1676 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_gc_step_jit
];
1680 ra_evictset(as
, RSET_SCRATCH
);
1681 l_end
= emit_label(as
);
1682 /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
1683 /* Assumes asm_snap_prep() already done. */
1684 asm_guard(as
, MIPSI_BNE
, RID_RET
, RID_ZERO
);
1685 args
[0] = ASMREF_TMP1
; /* global_State *g */
1686 args
[1] = ASMREF_TMP2
; /* MSize steps */
1687 asm_gencall(as
, ci
, args
);
1688 l_end
[-3] = MIPS_NOPATCH_GC_CHECK
; /* Replace the nop after the call. */
1689 emit_tsi(as
, MIPSI_ADDIU
, ra_releasetmp(as
, ASMREF_TMP1
), RID_JGL
, -32768);
1690 tmp
= ra_releasetmp(as
, ASMREF_TMP2
);
1691 emit_loadi(as
, tmp
, as
->gcsteps
);
1692 /* Jump around GC step if GC total < GC threshold. */
1693 emit_branch(as
, MIPSI_BNE
, RID_TMP
, RID_ZERO
, l_end
);
1694 emit_dst(as
, MIPSI_SLTU
, RID_TMP
, RID_TMP
, tmp
);
1695 emit_getgl(as
, tmp
, gc
.threshold
);
1696 emit_getgl(as
, RID_TMP
, gc
.total
);
1701 /* -- Loop handling ------------------------------------------------------- */
1703 /* Fixup the loop branch. */
1704 static void asm_loop_fixup(ASMState
*as
)
1706 MCode
*p
= as
->mctop
;
1707 MCode
*target
= as
->mcp
;
1709 if (as
->loopinv
) { /* Inverted loop branch? */
1710 /* asm_guard already inverted the cond branch. Only patch the target. */
1711 p
[-3] |= ((target
-p
+2) & 0x0000ffffu
);
1713 p
[-2] = MIPSI_J
|(((uintptr_t)target
>>2)&0x03ffffffu
);
1717 /* -- Head of trace ------------------------------------------------------- */
1719 /* Coalesce BASE register for a root trace. */
1720 static void asm_head_root_base(ASMState
*as
)
1722 IRIns
*ir
= IR(REF_BASE
);
1724 if (as
->loopinv
) as
->mctop
--;
1727 if (rset_test(as
->modset
, r
) || irt_ismarked(ir
->t
))
1728 ir
->r
= RID_INIT
; /* No inheritance for modified BASE register. */
1730 emit_move(as
, r
, RID_BASE
);
1734 /* Coalesce BASE register for a side trace. */
1735 static Reg
asm_head_side_base(ASMState
*as
, IRIns
*irp
)
1737 IRIns
*ir
= IR(REF_BASE
);
1739 if (as
->loopinv
) as
->mctop
--;
1742 if (rset_test(as
->modset
, r
) || irt_ismarked(ir
->t
))
1743 ir
->r
= RID_INIT
; /* No inheritance for modified BASE register. */
1745 return r
; /* Same BASE register already coalesced. */
1746 } else if (ra_hasreg(irp
->r
) && rset_test(as
->freeset
, irp
->r
)) {
1747 emit_move(as
, r
, irp
->r
); /* Move from coalesced parent reg. */
1750 emit_getgl(as
, r
, jit_base
); /* Otherwise reload BASE. */
1756 /* -- Tail of trace ------------------------------------------------------- */
1758 /* Fixup the tail code. */
1759 static void asm_tail_fixup(ASMState
*as
, TraceNo lnk
)
1761 MCode
*target
= lnk
? traceref(as
->J
,lnk
)->mcode
: (MCode
*)lj_vm_exit_interp
;
1762 int32_t spadj
= as
->T
->spadjust
;
1763 MCode
*p
= as
->mctop
-1;
1764 *p
= spadj
? (MIPSI_ADDIU
|MIPSF_T(RID_SP
)|MIPSF_S(RID_SP
)|spadj
) : MIPSI_NOP
;
1765 p
[-1] = MIPSI_J
|(((uintptr_t)target
>>2)&0x03ffffffu
);
1768 /* Prepare tail of code. */
1769 static void asm_tail_prep(ASMState
*as
)
1771 as
->mcp
= as
->mctop
-2; /* Leave room for branch plus nop or stack adj. */
1772 as
->invmcp
= as
->loopref
? as
->mcp
: NULL
;
1775 /* -- Instruction dispatch ------------------------------------------------ */
1777 /* Assemble a single instruction. */
1778 static void asm_ir(ASMState
*as
, IRIns
*ir
)
1780 switch ((IROp
)ir
->o
) {
1781 /* Miscellaneous ops. */
1782 case IR_LOOP
: asm_loop(as
); break;
1783 case IR_NOP
: case IR_XBAR
: lua_assert(!ra_used(ir
)); break;
1785 ra_alloc1(as
, ir
->op1
, irt_isfp(ir
->t
) ? RSET_FPR
: RSET_GPR
); break;
1786 case IR_PHI
: asm_phi(as
, ir
); break;
1787 case IR_HIOP
: asm_hiop(as
, ir
); break;
1788 case IR_GCSTEP
: asm_gcstep(as
, ir
); break;
1790 /* Guarded assertions. */
1791 case IR_EQ
: case IR_NE
: asm_compeq(as
, ir
); break;
1792 case IR_LT
: case IR_GE
: case IR_LE
: case IR_GT
:
1793 case IR_ULT
: case IR_UGE
: case IR_ULE
: case IR_UGT
:
1798 case IR_RETF
: asm_retf(as
, ir
); break;
1801 case IR_BNOT
: asm_bitnot(as
, ir
); break;
1802 case IR_BSWAP
: asm_bitswap(as
, ir
); break;
1804 case IR_BAND
: asm_bitop(as
, ir
, MIPSI_AND
, MIPSI_ANDI
); break;
1805 case IR_BOR
: asm_bitop(as
, ir
, MIPSI_OR
, MIPSI_ORI
); break;
1806 case IR_BXOR
: asm_bitop(as
, ir
, MIPSI_XOR
, MIPSI_XORI
); break;
1808 case IR_BSHL
: asm_bitshift(as
, ir
, MIPSI_SLLV
, MIPSI_SLL
); break;
1809 case IR_BSHR
: asm_bitshift(as
, ir
, MIPSI_SRLV
, MIPSI_SRL
); break;
1810 case IR_BSAR
: asm_bitshift(as
, ir
, MIPSI_SRAV
, MIPSI_SRA
); break;
1811 case IR_BROL
: lua_assert(0); break;
1812 case IR_BROR
: asm_bitror(as
, ir
); break;
1814 /* Arithmetic ops. */
1815 case IR_ADD
: asm_add(as
, ir
); break;
1816 case IR_SUB
: asm_sub(as
, ir
); break;
1817 case IR_MUL
: asm_mul(as
, ir
); break;
1818 case IR_DIV
: asm_fparith(as
, ir
, MIPSI_DIV_D
); break;
1819 case IR_MOD
: asm_callid(as
, ir
, IRCALL_lj_vm_modi
); break;
1820 case IR_POW
: asm_callid(as
, ir
, IRCALL_lj_vm_powi
); break;
1821 case IR_NEG
: asm_neg(as
, ir
); break;
1823 case IR_ABS
: asm_fpunary(as
, ir
, MIPSI_ABS_D
); break;
1824 case IR_ATAN2
: asm_callid(as
, ir
, IRCALL_atan2
); break;
1825 case IR_LDEXP
: asm_callid(as
, ir
, IRCALL_ldexp
); break;
1826 case IR_MIN
: asm_min_max(as
, ir
, 0); break;
1827 case IR_MAX
: asm_min_max(as
, ir
, 1); break;
1829 if (ir
->op2
== IRFPM_EXP2
&& asm_fpjoin_pow(as
, ir
))
1831 if (ir
->op2
<= IRFPM_TRUNC
)
1832 asm_callround(as
, ir
, IRCALL_lj_vm_floor
+ ir
->op2
);
1833 else if (ir
->op2
== IRFPM_SQRT
)
1834 asm_fpunary(as
, ir
, MIPSI_SQRT_D
);
1836 asm_callid(as
, ir
, IRCALL_lj_vm_floor
+ ir
->op2
);
1839 /* Overflow-checking arithmetic ops. */
1840 case IR_ADDOV
: asm_arithov(as
, ir
); break;
1841 case IR_SUBOV
: asm_arithov(as
, ir
); break;
1842 case IR_MULOV
: asm_mulov(as
, ir
); break;
1844 /* Memory references. */
1845 case IR_AREF
: asm_aref(as
, ir
); break;
1846 case IR_HREF
: asm_href(as
, ir
); break;
1847 case IR_HREFK
: asm_hrefk(as
, ir
); break;
1848 case IR_NEWREF
: asm_newref(as
, ir
); break;
1849 case IR_UREFO
: case IR_UREFC
: asm_uref(as
, ir
); break;
1850 case IR_FREF
: asm_fref(as
, ir
); break;
1851 case IR_STRREF
: asm_strref(as
, ir
); break;
1853 /* Loads and stores. */
1854 case IR_ALOAD
: case IR_HLOAD
: case IR_ULOAD
: case IR_VLOAD
:
1855 asm_ahuvload(as
, ir
);
1857 case IR_FLOAD
: asm_fload(as
, ir
); break;
1858 case IR_XLOAD
: asm_xload(as
, ir
); break;
1859 case IR_SLOAD
: asm_sload(as
, ir
); break;
1861 case IR_ASTORE
: case IR_HSTORE
: case IR_USTORE
: asm_ahustore(as
, ir
); break;
1862 case IR_FSTORE
: asm_fstore(as
, ir
); break;
1863 case IR_XSTORE
: asm_xstore(as
, ir
, 0); break;
1866 case IR_SNEW
: case IR_XSNEW
: asm_snew(as
, ir
); break;
1867 case IR_TNEW
: asm_tnew(as
, ir
); break;
1868 case IR_TDUP
: asm_tdup(as
, ir
); break;
1869 case IR_CNEW
: case IR_CNEWI
: asm_cnew(as
, ir
); break;
1871 /* Write barriers. */
1872 case IR_TBAR
: asm_tbar(as
, ir
); break;
1873 case IR_OBAR
: asm_obar(as
, ir
); break;
1875 /* Type conversions. */
1876 case IR_CONV
: asm_conv(as
, ir
); break;
1877 case IR_TOBIT
: asm_tobit(as
, ir
); break;
1878 case IR_TOSTR
: asm_tostr(as
, ir
); break;
1879 case IR_STRTO
: asm_strto(as
, ir
); break;
1882 case IR_CALLN
: case IR_CALLL
: case IR_CALLS
: asm_call(as
, ir
); break;
1883 case IR_CALLXS
: asm_callx(as
, ir
); break;
1884 case IR_CARG
: break;
1887 setintV(&as
->J
->errinfo
, ir
->o
);
1888 lj_trace_err_info(as
->J
, LJ_TRERR_NYIIR
);
1893 /* -- Trace setup --------------------------------------------------------- */
1895 /* Ensure there are enough stack slots for call arguments. */
1896 static Reg
asm_setup_call_slots(ASMState
*as
, IRIns
*ir
, const CCallInfo
*ci
)
1898 IRRef args
[CCI_NARGS_MAX
*2];
1899 uint32_t i
, nargs
= (int)CCI_NARGS(ci
);
1900 int nslots
= 4, ngpr
= REGARG_NUMGPR
, nfpr
= REGARG_NUMFPR
;
1901 asm_collectargs(as
, ir
, ci
, args
);
1902 for (i
= 0; i
< nargs
; i
++) {
1903 if (args
[i
] && irt_isfp(IR(args
[i
])->t
) &&
1904 nfpr
> 0 && !(ci
->flags
& CCI_VARARG
)) {
1906 ngpr
-= irt_isnum(IR(args
[i
])->t
) ? 2 : 1;
1907 } else if (args
[i
] && irt_isnum(IR(args
[i
])->t
)) {
1910 if (ngpr
> 0) ngpr
-= 2; else nslots
= (nslots
+3) & ~1;
1913 if (ngpr
> 0) ngpr
--; else nslots
++;
1916 if (nslots
> as
->evenspill
) /* Leave room for args in stack slots. */
1917 as
->evenspill
= nslots
;
1918 return irt_isfp(ir
->t
) ? REGSP_HINT(RID_FPRET
) : REGSP_HINT(RID_RET
);
1921 static void asm_setup_target(ASMState
*as
)
1923 asm_sparejump_setup(as
);
1924 asm_exitstub_setup(as
);
1927 /* -- Trace patching ------------------------------------------------------ */
1929 /* Patch exit jumps of existing machine code to a new target. */
1930 void lj_asm_patchexit(jit_State
*J
, GCtrace
*T
, ExitNo exitno
, MCode
*target
)
1932 MCode
*p
= T
->mcode
;
1933 MCode
*pe
= (MCode
*)((char *)p
+ T
->szmcode
);
1934 MCode
*px
= exitstub_trace_addr(T
, exitno
);
1935 MCode
*cstart
= NULL
, *cstop
= NULL
;
1936 MCode
*mcarea
= lj_mcode_patch(J
, p
, 0);
1937 MCode exitload
= MIPSI_LI
| MIPSF_T(RID_TMP
) | exitno
;
1938 MCode tjump
= MIPSI_J
|(((uintptr_t)target
>>2)&0x03ffffffu
);
1939 for (p
++; p
< pe
; p
++) {
1940 if (*p
== exitload
) { /* Look for load of exit number. */
1941 /* Look for exitstub branch. Yes, this covers all used branch variants. */
1942 if (((p
[-1] ^ (px
-p
)) & 0xffffu
) == 0 &&
1943 ((p
[-1] & 0xf0000000u
) == MIPSI_BEQ
||
1944 (p
[-1] & 0xfc1e0000u
) == MIPSI_BLTZ
||
1945 (p
[-1] & 0xffe00000u
) == MIPSI_BC1F
) &&
1946 p
[-2] != MIPS_NOPATCH_GC_CHECK
) {
1947 ptrdiff_t delta
= target
- p
;
1948 if (((delta
+ 0x8000) >> 16) == 0) { /* Patch in-range branch. */
1950 p
[-1] = (p
[-1] & 0xffff0000u
) | (delta
& 0xffffu
);
1951 *p
= MIPSI_NOP
; /* Replace the load of the exit number. */
1953 if (!cstart
) cstart
= p
-1;
1954 } else { /* Branch out of range. Use spare jump slot in mcarea. */
1956 for (i
= (int)(sizeof(MCLink
)/sizeof(MCode
));
1957 i
< (int)(sizeof(MCLink
)/sizeof(MCode
)+MIPS_SPAREJUMP
*2);
1959 if (mcarea
[i
] == tjump
) {
1960 delta
= mcarea
+i
- p
;
1962 } else if (mcarea
[i
] == MIPSI_NOP
) {
1965 delta
= mcarea
+i
- p
;
1969 /* Ignore jump slot overflow. Child trace is simply not attached. */
1971 } else if (p
+1 == pe
) {
1972 /* Patch NOP after code for inverted loop branch. Use of J is ok. */
1973 lua_assert(p
[1] == MIPSI_NOP
);
1975 *p
= MIPSI_NOP
; /* Replace the load of the exit number. */
1977 if (!cstart
) cstart
= p
+1;
1981 if (cstart
) lj_mcode_sync(cstart
, cstop
);
1982 lj_mcode_patch(J
, mcarea
, 1);