2 ** ARM IR assembler (SSA IR -> machine code).
3 ** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
6 /* -- Register allocator extensions --------------------------------------- */
8 /* Allocate a register with a hint. */
9 static Reg
ra_hintalloc(ASMState
*as
, IRRef ref
, Reg hint
, RegSet allow
)
13 if (!ra_hashint(r
) && !iscrossref(as
, ref
))
14 ra_sethint(IR(ref
)->r
, hint
); /* Propagate register hint. */
15 r
= ra_allocref(as
, ref
, allow
);
21 /* Allocate a scratch register pair. */
22 static Reg
ra_scratchpair(ASMState
*as
, RegSet allow
)
24 RegSet pick1
= as
->freeset
& allow
;
25 RegSet pick2
= pick1
& (pick1
>> 1) & RSET_GPREVEN
;
28 r
= rset_picktop(pick2
);
30 RegSet pick
= pick1
& (allow
>> 1) & RSET_GPREVEN
;
32 r
= rset_picktop(pick
);
33 ra_restore(as
, regcost_ref(as
->cost
[r
+1]));
35 pick
= pick1
& (allow
<< 1) & RSET_GPRODD
;
37 r
= ra_restore(as
, regcost_ref(as
->cost
[rset_picktop(pick
)-1]));
39 r
= ra_evict(as
, allow
& (allow
>> 1) & RSET_GPREVEN
);
40 ra_restore(as
, regcost_ref(as
->cost
[r
+1]));
44 lua_assert(rset_test(RSET_GPREVEN
, r
));
47 RA_DBGX((as
, "scratchpair $r $r", r
, r
+1));
52 /* Allocate two source registers for three-operand instructions. */
53 static Reg
ra_alloc2(ASMState
*as
, IRIns
*ir
, RegSet allow
)
55 IRIns
*irl
= IR(ir
->op1
), *irr
= IR(ir
->op2
);
56 Reg left
= irl
->r
, right
= irr
->r
;
57 if (ra_hasreg(left
)) {
60 right
= ra_allocref(as
, ir
->op2
, rset_exclude(allow
, left
));
63 } else if (ra_hasreg(right
)) {
65 left
= ra_allocref(as
, ir
->op1
, rset_exclude(allow
, right
));
66 } else if (ra_hashint(right
)) {
67 right
= ra_allocref(as
, ir
->op2
, allow
);
68 left
= ra_alloc1(as
, ir
->op1
, rset_exclude(allow
, right
));
70 left
= ra_allocref(as
, ir
->op1
, allow
);
71 right
= ra_alloc1(as
, ir
->op2
, rset_exclude(allow
, left
));
73 return left
| (right
<< 8);
77 /* -- Guard handling ------------------------------------------------------ */
79 /* Generate an exit stub group at the bottom of the reserved MCode memory. */
80 static MCode
*asm_exitstub_gen(ASMState
*as
, ExitNo group
)
82 MCode
*mxp
= as
->mcbot
;
84 if (mxp
+ 4*4+4*EXITSTUBS_PER_GROUP
>= as
->mctop
)
86 /* str lr, [sp]; bl ->vm_exit_handler; .long DISPATCH_address, group. */
87 *mxp
++ = ARMI_STR
|ARMI_LS_P
|ARMI_LS_U
|ARMF_D(RID_LR
)|ARMF_N(RID_SP
);
88 *mxp
= ARMI_BL
|((((MCode
*)(void *)lj_vm_exit_handler
-mxp
)-2)&0x00ffffffu
);
90 *mxp
++ = (MCode
)i32ptr(J2GG(as
->J
)->dispatch
); /* DISPATCH address */
91 *mxp
++ = group
*EXITSTUBS_PER_GROUP
;
92 for (i
= 0; i
< EXITSTUBS_PER_GROUP
; i
++)
93 *mxp
++ = ARMI_B
|((-6-i
)&0x00ffffffu
);
94 lj_mcode_sync(as
->mcbot
, mxp
);
95 lj_mcode_commitbot(as
->J
, mxp
);
97 as
->mclim
= as
->mcbot
+ MCLIM_REDZONE
;
98 return mxp
- EXITSTUBS_PER_GROUP
;
101 /* Setup all needed exit stubs. */
102 static void asm_exitstub_setup(ASMState
*as
, ExitNo nexits
)
105 if (nexits
>= EXITSTUBS_PER_GROUP
*LJ_MAX_EXITSTUBGR
)
106 lj_trace_err(as
->J
, LJ_TRERR_SNAPOV
);
107 for (i
= 0; i
< (nexits
+EXITSTUBS_PER_GROUP
-1)/EXITSTUBS_PER_GROUP
; i
++)
108 if (as
->J
->exitstubgroup
[i
] == NULL
)
109 as
->J
->exitstubgroup
[i
] = asm_exitstub_gen(as
, i
);
112 /* Emit conditional branch to exit for guard. */
113 static void asm_guardcc(ASMState
*as
, ARMCC cc
)
115 MCode
*target
= exitstub_addr(as
->J
, as
->snapno
);
117 if (LJ_UNLIKELY(p
== as
->invmcp
)) {
119 *p
= ARMI_BL
| ((target
-p
-2) & 0x00ffffffu
);
120 emit_branch(as
, ARMF_CC(ARMI_B
, cc
^1), p
+1);
123 emit_branch(as
, ARMF_CC(ARMI_BL
, cc
), target
);
126 /* -- Operand fusion ------------------------------------------------------ */
128 /* Limit linear search to this distance. Avoids O(n^2) behavior. */
129 #define CONFLICT_SEARCH_LIM 31
131 /* Check if there's no conflicting instruction between curins and ref. */
132 static int noconflict(ASMState
*as
, IRRef ref
, IROp conflict
)
135 IRRef i
= as
->curins
;
136 if (i
> ref
+ CONFLICT_SEARCH_LIM
)
137 return 0; /* Give up, ref is too far away. */
139 if (ir
[i
].o
== conflict
)
140 return 0; /* Conflict found. */
141 return 1; /* Ok, no conflict. */
144 /* Fuse the array base of colocated arrays. */
145 static int32_t asm_fuseabase(ASMState
*as
, IRRef ref
)
148 if (ir
->o
== IR_TNEW
&& ir
->op1
<= LJ_MAX_COLOSIZE
&&
149 !neverfuse(as
) && noconflict(as
, ref
, IR_NEWREF
))
150 return (int32_t)sizeof(GCtab
);
154 /* Fuse array/hash/upvalue reference into register+offset operand. */
155 static Reg
asm_fuseahuref(ASMState
*as
, IRRef ref
, int32_t *ofsp
, RegSet allow
,
159 if (ra_noreg(ir
->r
)) {
160 if (ir
->o
== IR_AREF
) {
161 if (mayfuse(as
, ref
)) {
162 if (irref_isk(ir
->op2
)) {
163 IRRef tab
= IR(ir
->op1
)->op1
;
164 int32_t ofs
= asm_fuseabase(as
, tab
);
165 IRRef refa
= ofs
? tab
: ir
->op1
;
166 ofs
+= 8*IR(ir
->op2
)->i
;
167 if (ofs
> -lim
&& ofs
< lim
) {
169 return ra_alloc1(as
, refa
, allow
);
173 } else if (ir
->o
== IR_HREFK
) {
174 if (mayfuse(as
, ref
)) {
175 int32_t ofs
= (int32_t)(IR(ir
->op2
)->op2
* sizeof(Node
));
178 return ra_alloc1(as
, ir
->op1
, allow
);
181 } else if (ir
->o
== IR_UREFC
) {
182 if (irref_isk(ir
->op1
)) {
183 GCfunc
*fn
= ir_kfunc(IR(ir
->op1
));
184 int32_t ofs
= i32ptr(&gcref(fn
->l
.uvptr
[(ir
->op2
>> 8)])->uv
.tv
);
185 *ofsp
= (ofs
& 255); /* Mask out less bits to allow LDRD. */
186 return ra_allock(as
, (ofs
& ~255), allow
);
191 return ra_alloc1(as
, ref
, allow
);
194 /* Fuse m operand into arithmetic/logic instructions. */
195 static uint32_t asm_fuseopm(ASMState
*as
, ARMIns ai
, IRRef ref
, RegSet allow
)
198 if (ra_hasreg(ir
->r
)) {
199 ra_noweak(as
, ir
->r
);
200 return ARMF_M(ir
->r
);
201 } else if (irref_isk(ref
)) {
202 uint32_t k
= emit_isk12(ai
, ir
->i
);
205 } else if (mayfuse(as
, ref
)) {
206 if (ir
->o
>= IR_BSHL
&& ir
->o
<= IR_BROR
) {
207 Reg m
= ra_alloc1(as
, ir
->op1
, allow
);
208 ARMShift sh
= ir
->o
== IR_BSHL
? ARMSH_LSL
:
209 ir
->o
== IR_BSHR
? ARMSH_LSR
:
210 ir
->o
== IR_BSAR
? ARMSH_ASR
: ARMSH_ROR
;
211 if (irref_isk(ir
->op2
)) {
212 return m
| ARMF_SH(sh
, (IR(ir
->op2
)->i
& 31));
214 Reg s
= ra_alloc1(as
, ir
->op2
, rset_exclude(allow
, m
));
215 return m
| ARMF_RSH(sh
, s
);
217 } else if (ir
->o
== IR_ADD
&& ir
->op1
== ir
->op2
) {
218 Reg m
= ra_alloc1(as
, ir
->op1
, allow
);
219 return m
| ARMF_SH(ARMSH_LSL
, 1);
222 return ra_allocref(as
, ref
, allow
);
225 /* Fuse shifts into loads/stores. Only bother with BSHL 2 => lsl #2. */
226 static IRRef
asm_fuselsl2(ASMState
*as
, IRRef ref
)
229 if (ra_noreg(ir
->r
) && mayfuse(as
, ref
) && ir
->o
== IR_BSHL
&&
230 irref_isk(ir
->op2
) && IR(ir
->op2
)->i
== 2)
232 return 0; /* No fusion. */
235 /* Fuse XLOAD/XSTORE reference into load/store operand. */
236 static void asm_fusexref(ASMState
*as
, ARMIns ai
, Reg rd
, IRRef ref
,
237 RegSet allow
, int32_t ofs
)
241 if (ra_noreg(ir
->r
) && canfuse(as
, ir
)) {
242 int32_t lim
= (!LJ_SOFTFP
&& (ai
& 0x08000000)) ? 1024 :
243 (ai
& 0x04000000) ? 4096 : 256;
244 if (ir
->o
== IR_ADD
) {
246 if (irref_isk(ir
->op2
) &&
247 (ofs2
= ofs
+ IR(ir
->op2
)->i
) > -lim
&& ofs2
< lim
&&
248 (!(!LJ_SOFTFP
&& (ai
& 0x08000000)) || !(ofs2
& 3))) {
251 } else if (ofs
== 0 && !(!LJ_SOFTFP
&& (ai
& 0x08000000))) {
252 IRRef lref
= ir
->op1
, rref
= ir
->op2
;
254 if ((ai
& 0x04000000)) {
255 IRRef sref
= asm_fuselsl2(as
, rref
);
258 ai
|= ARMF_SH(ARMSH_LSL
, 2);
259 } else if ((sref
= asm_fuselsl2(as
, lref
)) != 0) {
262 ai
|= ARMF_SH(ARMSH_LSL
, 2);
265 rn
= ra_alloc1(as
, lref
, allow
);
266 rm
= ra_alloc1(as
, rref
, rset_exclude(allow
, rn
));
267 if ((ai
& 0x04000000)) ai
|= ARMI_LS_R
;
268 emit_dnm(as
, ai
|ARMI_LS_P
|ARMI_LS_U
, rd
, rn
, rm
);
271 } else if (ir
->o
== IR_STRREF
&& !(!LJ_SOFTFP
&& (ai
& 0x08000000))) {
272 lua_assert(ofs
== 0);
273 ofs
= (int32_t)sizeof(GCstr
);
274 if (irref_isk(ir
->op2
)) {
275 ofs
+= IR(ir
->op2
)->i
;
277 } else if (irref_isk(ir
->op1
)) {
278 ofs
+= IR(ir
->op1
)->i
;
281 /* NYI: Fuse ADD with constant. */
282 Reg rn
= ra_alloc1(as
, ir
->op1
, allow
);
283 uint32_t m
= asm_fuseopm(as
, 0, ir
->op2
, rset_exclude(allow
, rn
));
284 if ((ai
& 0x04000000))
285 emit_lso(as
, ai
, rd
, rd
, ofs
);
287 emit_lsox(as
, ai
, rd
, rd
, ofs
);
288 emit_dn(as
, ARMI_ADD
^m
, rd
, rn
);
291 if (ofs
<= -lim
|| ofs
>= lim
) {
292 Reg rn
= ra_alloc1(as
, ref
, allow
);
293 Reg rm
= ra_allock(as
, ofs
, rset_exclude(allow
, rn
));
294 if ((ai
& 0x04000000)) ai
|= ARMI_LS_R
;
295 emit_dnm(as
, ai
|ARMI_LS_P
|ARMI_LS_U
, rd
, rn
, rm
);
300 base
= ra_alloc1(as
, ref
, allow
);
302 if ((ai
& 0x08000000))
303 emit_vlso(as
, ai
, rd
, base
, ofs
);
306 if ((ai
& 0x04000000))
307 emit_lso(as
, ai
, rd
, base
, ofs
);
309 emit_lsox(as
, ai
, rd
, base
, ofs
);
313 /* Fuse to multiply-add/sub instruction. */
314 static int asm_fusemadd(ASMState
*as
, IRIns
*ir
, ARMIns ai
, ARMIns air
)
316 IRRef lref
= ir
->op1
, rref
= ir
->op2
;
319 ((mayfuse(as
, lref
) && (irm
= IR(lref
), irm
->o
== IR_MUL
) &&
321 (mayfuse(as
, rref
) && (irm
= IR(rref
), irm
->o
== IR_MUL
) &&
322 (rref
= lref
, ai
= air
, ra_noreg(irm
->r
))))) {
323 Reg dest
= ra_dest(as
, ir
, RSET_FPR
);
324 Reg add
= ra_hintalloc(as
, rref
, dest
, RSET_FPR
);
325 Reg right
, left
= ra_alloc2(as
, irm
,
326 rset_exclude(rset_exclude(RSET_FPR
, dest
), add
));
327 right
= (left
>> 8); left
&= 255;
328 emit_dnm(as
, ai
, (dest
& 15), (left
& 15), (right
& 15));
329 if (dest
!= add
) emit_dm(as
, ARMI_VMOV_D
, (dest
& 15), (add
& 15));
336 /* -- Calls --------------------------------------------------------------- */
338 /* Generate a call to a C function. */
339 static void asm_gencall(ASMState
*as
, const CCallInfo
*ci
, IRRef
*args
)
341 uint32_t n
, nargs
= CCI_XNARGS(ci
);
344 Reg gpr
= REGARG_FIRSTGPR
;
346 Reg gpr
, fpr
= REGARG_FIRSTFPR
, fprodd
= 0;
348 if ((void *)ci
->func
)
349 emit_call(as
, (void *)ci
->func
);
351 for (gpr
= REGARG_FIRSTGPR
; gpr
<= REGARG_LASTGPR
; gpr
++)
352 as
->cost
[gpr
] = REGCOST(~0u, ASMREF_L
);
353 gpr
= REGARG_FIRSTGPR
;
355 for (n
= 0; n
< nargs
; n
++) { /* Setup args. */
359 if (ref
&& irt_isfp(ir
->t
)) {
360 RegSet of
= as
->freeset
;
362 if (!LJ_ABI_SOFTFP
&& !(ci
->flags
& CCI_VARARG
)) {
363 if (irt_isnum(ir
->t
)) {
364 if (fpr
<= REGARG_LASTFPR
) {
365 ra_leftov(as
, fpr
, ref
);
369 } else if (fprodd
) { /* Ick. */
370 src
= ra_alloc1(as
, ref
, RSET_FPR
);
371 emit_dm(as
, ARMI_VMOV_S
, (fprodd
& 15), (src
& 15) | 0x00400000);
374 } else if (fpr
<= REGARG_LASTFPR
) {
375 ra_leftov(as
, fpr
, ref
);
379 /* Workaround to protect argument GPRs from being used for remat. */
380 as
->freeset
&= ~RSET_RANGE(REGARG_FIRSTGPR
, REGARG_LASTGPR
+1);
381 src
= ra_alloc1(as
, ref
, RSET_FPR
); /* May alloc GPR to remat FPR. */
382 as
->freeset
|= (of
& RSET_RANGE(REGARG_FIRSTGPR
, REGARG_LASTGPR
+1));
386 /* Workaround to protect argument GPRs from being used for remat. */
387 as
->freeset
&= ~RSET_RANGE(REGARG_FIRSTGPR
, REGARG_LASTGPR
+1);
388 src
= ra_alloc1(as
, ref
, RSET_FPR
); /* May alloc GPR to remat FPR. */
389 as
->freeset
|= (of
& RSET_RANGE(REGARG_FIRSTGPR
, REGARG_LASTGPR
+1));
390 if (irt_isnum(ir
->t
)) gpr
= (gpr
+1) & ~1u;
391 if (gpr
<= REGARG_LASTGPR
) {
392 lua_assert(rset_test(as
->freeset
, gpr
)); /* Must have been evicted. */
393 if (irt_isnum(ir
->t
)) {
394 lua_assert(rset_test(as
->freeset
, gpr
+1)); /* Ditto. */
395 emit_dnm(as
, ARMI_VMOV_RR_D
, gpr
, gpr
+1, (src
& 15));
398 emit_dn(as
, ARMI_VMOV_R_S
, gpr
, (src
& 15));
403 if (irt_isnum(ir
->t
)) ofs
= (ofs
+ 4) & ~4;
404 emit_spstore(as
, ir
, src
, ofs
);
405 ofs
+= irt_isnum(ir
->t
) ? 8 : 4;
410 if (gpr
<= REGARG_LASTGPR
) {
411 lua_assert(rset_test(as
->freeset
, gpr
)); /* Must have been evicted. */
412 if (ref
) ra_leftov(as
, gpr
, ref
);
416 Reg r
= ra_alloc1(as
, ref
, RSET_GPR
);
417 emit_spstore(as
, ir
, r
, ofs
);
425 /* Setup result reg/sp for call. Evict scratch regs. */
426 static void asm_setupresult(ASMState
*as
, IRIns
*ir
, const CCallInfo
*ci
)
428 RegSet drop
= RSET_SCRATCH
;
429 int hiop
= ((ir
+1)->o
== IR_HIOP
);
430 if (ra_hasreg(ir
->r
))
431 rset_clear(drop
, ir
->r
); /* Dest reg handled below. */
432 if (hiop
&& ra_hasreg((ir
+1)->r
))
433 rset_clear(drop
, (ir
+1)->r
); /* Dest reg handled below. */
434 ra_evictset(as
, drop
); /* Evictions must be performed first. */
436 lua_assert(!irt_ispri(ir
->t
));
437 if (!LJ_SOFTFP
&& irt_isfp(ir
->t
)) {
438 if (LJ_ABI_SOFTFP
|| (ci
->flags
& (CCI_CASTU64
|CCI_VARARG
))) {
439 Reg dest
= (ra_dest(as
, ir
, RSET_FPR
) & 15);
440 if (irt_isnum(ir
->t
))
441 emit_dnm(as
, ARMI_VMOV_D_RR
, RID_RETLO
, RID_RETHI
, dest
);
443 emit_dn(as
, ARMI_VMOV_S_R
, RID_RET
, dest
);
445 ra_destreg(as
, ir
, RID_FPRET
);
450 ra_destreg(as
, ir
, RID_RET
);
456 static void asm_callx(ASMState
*as
, IRIns
*ir
)
458 IRRef args
[CCI_NARGS_MAX
*2];
462 ci
.flags
= asm_callx_flags(as
, ir
);
463 asm_collectargs(as
, ir
, &ci
, args
);
464 asm_setupresult(as
, ir
, &ci
);
465 func
= ir
->op2
; irf
= IR(func
);
466 if (irf
->o
== IR_CARG
) { func
= irf
->op1
; irf
= IR(func
); }
467 if (irref_isk(func
)) { /* Call to constant address. */
468 ci
.func
= (ASMFunction
)(void *)(irf
->i
);
469 } else { /* Need a non-argument register for indirect calls. */
470 Reg freg
= ra_alloc1(as
, func
, RSET_RANGE(RID_R4
, RID_R12
+1));
471 emit_m(as
, ARMI_BLXr
, freg
);
472 ci
.func
= (ASMFunction
)(void *)0;
474 asm_gencall(as
, &ci
, args
);
477 /* -- Returns ------------------------------------------------------------- */
479 /* Return to lower frame. Guard that it goes to the right spot. */
480 static void asm_retf(ASMState
*as
, IRIns
*ir
)
482 Reg base
= ra_alloc1(as
, REF_BASE
, RSET_GPR
);
483 void *pc
= ir_kptr(IR(ir
->op2
));
484 int32_t delta
= 1+LJ_FR2
+bc_a(*((const BCIns
*)pc
- 1));
485 as
->topslot
-= (BCReg
)delta
;
486 if ((int32_t)as
->topslot
< 0) as
->topslot
= 0;
487 irt_setmark(IR(REF_BASE
)->t
); /* Children must not coalesce with BASE reg. */
488 /* Need to force a spill on REF_BASE now to update the stack slot. */
489 emit_lso(as
, ARMI_STR
, base
, RID_SP
, ra_spill(as
, IR(REF_BASE
)));
490 emit_setgl(as
, base
, jit_base
);
491 emit_addptr(as
, base
, -8*delta
);
492 asm_guardcc(as
, CC_NE
);
493 emit_nm(as
, ARMI_CMP
, RID_TMP
,
494 ra_allock(as
, i32ptr(pc
), rset_exclude(RSET_GPR
, base
)));
495 emit_lso(as
, ARMI_LDR
, RID_TMP
, base
, -4);
498 /* -- Type conversions ---------------------------------------------------- */
501 static void asm_tointg(ASMState
*as
, IRIns
*ir
, Reg left
)
503 Reg tmp
= ra_scratch(as
, rset_exclude(RSET_FPR
, left
));
504 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
505 asm_guardcc(as
, CC_NE
);
506 emit_d(as
, ARMI_VMRS
, 0);
507 emit_dm(as
, ARMI_VCMP_D
, (tmp
& 15), (left
& 15));
508 emit_dm(as
, ARMI_VCVT_F64_S32
, (tmp
& 15), (tmp
& 15));
509 emit_dn(as
, ARMI_VMOV_R_S
, dest
, (tmp
& 15));
510 emit_dm(as
, ARMI_VCVT_S32_F64
, (tmp
& 15), (left
& 15));
513 static void asm_tobit(ASMState
*as
, IRIns
*ir
)
515 RegSet allow
= RSET_FPR
;
516 Reg left
= ra_alloc1(as
, ir
->op1
, allow
);
517 Reg right
= ra_alloc1(as
, ir
->op2
, rset_clear(allow
, left
));
518 Reg tmp
= ra_scratch(as
, rset_clear(allow
, right
));
519 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
520 emit_dn(as
, ARMI_VMOV_R_S
, dest
, (tmp
& 15));
521 emit_dnm(as
, ARMI_VADD_D
, (tmp
& 15), (left
& 15), (right
& 15));
524 #define asm_tobit(as, ir) lua_assert(0)
527 static void asm_conv(ASMState
*as
, IRIns
*ir
)
529 IRType st
= (IRType
)(ir
->op2
& IRCONV_SRCMASK
);
531 int stfp
= (st
== IRT_NUM
|| st
== IRT_FLOAT
);
533 IRRef lref
= ir
->op1
;
534 /* 64 bit integer conversions are handled by SPLIT. */
535 lua_assert(!irt_isint64(ir
->t
) && !(st
== IRT_I64
|| st
== IRT_U64
));
537 /* FP conversions are handled by SPLIT. */
538 lua_assert(!irt_isfp(ir
->t
) && !(st
== IRT_NUM
|| st
== IRT_FLOAT
));
539 /* Can't check for same types: SPLIT uses CONV int.int + BXOR for sfp NEG. */
541 lua_assert(irt_type(ir
->t
) != st
);
542 if (irt_isfp(ir
->t
)) {
543 Reg dest
= ra_dest(as
, ir
, RSET_FPR
);
544 if (stfp
) { /* FP to FP conversion. */
545 emit_dm(as
, st
== IRT_NUM
? ARMI_VCVT_F32_F64
: ARMI_VCVT_F64_F32
,
546 (dest
& 15), (ra_alloc1(as
, lref
, RSET_FPR
) & 15));
547 } else { /* Integer to FP conversion. */
548 Reg left
= ra_alloc1(as
, lref
, RSET_GPR
);
549 ARMIns ai
= irt_isfloat(ir
->t
) ?
550 (st
== IRT_INT
? ARMI_VCVT_F32_S32
: ARMI_VCVT_F32_U32
) :
551 (st
== IRT_INT
? ARMI_VCVT_F64_S32
: ARMI_VCVT_F64_U32
);
552 emit_dm(as
, ai
, (dest
& 15), (dest
& 15));
553 emit_dn(as
, ARMI_VMOV_S_R
, left
, (dest
& 15));
555 } else if (stfp
) { /* FP to integer conversion. */
556 if (irt_isguard(ir
->t
)) {
557 /* Checked conversions are only supported from number to int. */
558 lua_assert(irt_isint(ir
->t
) && st
== IRT_NUM
);
559 asm_tointg(as
, ir
, ra_alloc1(as
, lref
, RSET_FPR
));
561 Reg left
= ra_alloc1(as
, lref
, RSET_FPR
);
562 Reg tmp
= ra_scratch(as
, rset_exclude(RSET_FPR
, left
));
563 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
565 emit_dn(as
, ARMI_VMOV_R_S
, dest
, (tmp
& 15));
566 ai
= irt_isint(ir
->t
) ?
567 (st
== IRT_NUM
? ARMI_VCVT_S32_F64
: ARMI_VCVT_S32_F32
) :
568 (st
== IRT_NUM
? ARMI_VCVT_U32_F64
: ARMI_VCVT_U32_F32
);
569 emit_dm(as
, ai
, (tmp
& 15), (left
& 15));
574 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
575 if (st
>= IRT_I8
&& st
<= IRT_U16
) { /* Extend to 32 bit integer. */
576 Reg left
= ra_alloc1(as
, lref
, RSET_GPR
);
577 lua_assert(irt_isint(ir
->t
) || irt_isu32(ir
->t
));
578 if ((as
->flags
& JIT_F_ARMV6
)) {
579 ARMIns ai
= st
== IRT_I8
? ARMI_SXTB
:
580 st
== IRT_U8
? ARMI_UXTB
:
581 st
== IRT_I16
? ARMI_SXTH
: ARMI_UXTH
;
582 emit_dm(as
, ai
, dest
, left
);
583 } else if (st
== IRT_U8
) {
584 emit_dn(as
, ARMI_AND
|ARMI_K12
|255, dest
, left
);
586 uint32_t shift
= st
== IRT_I8
? 24 : 16;
587 ARMShift sh
= st
== IRT_U16
? ARMSH_LSR
: ARMSH_ASR
;
588 emit_dm(as
, ARMI_MOV
|ARMF_SH(sh
, shift
), dest
, RID_TMP
);
589 emit_dm(as
, ARMI_MOV
|ARMF_SH(ARMSH_LSL
, shift
), RID_TMP
, left
);
591 } else { /* Handle 32/32 bit no-op (cast). */
592 ra_leftov(as
, dest
, lref
); /* Do nothing, but may need to move regs. */
597 static void asm_strto(ASMState
*as
, IRIns
*ir
)
599 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_strscan_num
];
601 Reg rlo
= 0, rhi
= 0, tmp
;
602 int destused
= ra_used(ir
);
604 ra_evictset(as
, RSET_SCRATCH
);
607 if (ra_hasspill(ir
->s
) && ra_hasspill((ir
+1)->s
) &&
608 (ir
->s
& 1) == 0 && ir
->s
+ 1 == (ir
+1)->s
) {
610 for (i
= 0; i
< 2; i
++) {
615 emit_spload(as
, ir
+i
, r
, sps_scale((ir
+i
)->s
));
618 ofs
= sps_scale(ir
->s
);
621 rhi
= ra_dest(as
, ir
+1, RSET_GPR
);
622 rlo
= ra_dest(as
, ir
, rset_exclude(RSET_GPR
, rhi
));
625 asm_guardcc(as
, CC_EQ
);
627 emit_lso(as
, ARMI_LDR
, rhi
, RID_SP
, 4);
628 emit_lso(as
, ARMI_LDR
, rlo
, RID_SP
, 0);
633 if (ra_hasspill(ir
->s
)) {
634 ofs
= sps_scale(ir
->s
);
636 if (ra_hasreg(ir
->r
)) {
638 ra_modified(as
, ir
->r
);
639 emit_spload(as
, ir
, ir
->r
, ofs
);
642 rlo
= ra_dest(as
, ir
, RSET_FPR
);
645 asm_guardcc(as
, CC_EQ
);
647 emit_vlso(as
, ARMI_VLDR_D
, rlo
, RID_SP
, 0);
649 emit_n(as
, ARMI_CMP
|ARMI_K12
|0, RID_RET
); /* Test return status. */
650 args
[0] = ir
->op1
; /* GCstr *str */
651 args
[1] = ASMREF_TMP1
; /* TValue *n */
652 asm_gencall(as
, ci
, args
);
653 tmp
= ra_releasetmp(as
, ASMREF_TMP1
);
655 emit_dm(as
, ARMI_MOV
, tmp
, RID_SP
);
657 emit_opk(as
, ARMI_ADD
, tmp
, RID_SP
, ofs
, RSET_GPR
);
660 /* -- Memory references --------------------------------------------------- */
662 /* Get pointer to TValue. */
663 static void asm_tvptr(ASMState
*as
, Reg dest
, IRRef ref
)
666 if (irt_isnum(ir
->t
)) {
667 if (irref_isk(ref
)) {
668 /* Use the number constant itself as a TValue. */
669 ra_allockreg(as
, i32ptr(ir_knum(ir
)), dest
);
674 /* Otherwise force a spill and use the spill slot. */
675 emit_opk(as
, ARMI_ADD
, dest
, RID_SP
, ra_spill(as
, ir
), RSET_GPR
);
679 /* Otherwise use [sp] and [sp+4] to hold the TValue. */
680 RegSet allow
= rset_exclude(RSET_GPR
, dest
);
682 emit_dm(as
, ARMI_MOV
, dest
, RID_SP
);
683 if (!irt_ispri(ir
->t
)) {
684 Reg src
= ra_alloc1(as
, ref
, allow
);
685 emit_lso(as
, ARMI_STR
, src
, RID_SP
, 0);
687 if (LJ_SOFTFP
&& (ir
+1)->o
== IR_HIOP
)
688 type
= ra_alloc1(as
, ref
+1, allow
);
690 type
= ra_allock(as
, irt_toitype(ir
->t
), allow
);
691 emit_lso(as
, ARMI_STR
, type
, RID_SP
, 4);
695 static void asm_aref(ASMState
*as
, IRIns
*ir
)
697 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
699 if (irref_isk(ir
->op2
)) {
700 IRRef tab
= IR(ir
->op1
)->op1
;
701 int32_t ofs
= asm_fuseabase(as
, tab
);
702 IRRef refa
= ofs
? tab
: ir
->op1
;
703 uint32_t k
= emit_isk12(ARMI_ADD
, ofs
+ 8*IR(ir
->op2
)->i
);
705 base
= ra_alloc1(as
, refa
, RSET_GPR
);
706 emit_dn(as
, ARMI_ADD
^k
, dest
, base
);
710 base
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
711 idx
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, base
));
712 emit_dnm(as
, ARMI_ADD
|ARMF_SH(ARMSH_LSL
, 3), dest
, base
, idx
);
715 /* Inlined hash lookup. Specialized for key type and for const keys.
716 ** The equivalent C code is:
717 ** Node *n = hashkey(t, key);
719 ** if (lj_obj_equal(&n->key, key)) return &n->val;
720 ** } while ((n = nextnode(n)));
723 static void asm_href(ASMState
*as
, IRIns
*ir
, IROp merge
)
725 RegSet allow
= RSET_GPR
;
726 int destused
= ra_used(ir
);
727 Reg dest
= ra_dest(as
, ir
, allow
);
728 Reg tab
= ra_alloc1(as
, ir
->op1
, rset_clear(allow
, dest
));
729 Reg key
= 0, keyhi
= 0, keynumhi
= RID_NONE
, tmp
= RID_TMP
;
730 IRRef refkey
= ir
->op2
;
731 IRIns
*irkey
= IR(refkey
);
732 IRType1 kt
= irkey
->t
;
733 int32_t k
= 0, khi
= emit_isk12(ARMI_CMP
, irt_toitype(kt
));
735 MCLabel l_end
, l_loop
;
736 rset_clear(allow
, tab
);
737 if (!irref_isk(refkey
) || irt_isstr(kt
)) {
739 key
= ra_alloc1(as
, refkey
, allow
);
740 rset_clear(allow
, key
);
741 if (irkey
[1].o
== IR_HIOP
) {
742 if (ra_hasreg((irkey
+1)->r
)) {
743 keynumhi
= (irkey
+1)->r
;
745 ra_noweak(as
, keynumhi
);
747 keyhi
= keynumhi
= ra_allocref(as
, refkey
+1, allow
);
749 rset_clear(allow
, keynumhi
);
754 key
= ra_scratch(as
, allow
);
755 rset_clear(allow
, key
);
756 keyhi
= keynumhi
= ra_scratch(as
, allow
);
757 rset_clear(allow
, keyhi
);
760 key
= ra_alloc1(as
, refkey
, allow
);
761 rset_clear(allow
, key
);
764 } else if (irt_isnum(kt
)) {
765 int32_t val
= (int32_t)ir_knum(irkey
)->u32
.lo
;
766 k
= emit_isk12(ARMI_CMP
, val
);
768 key
= ra_allock(as
, val
, allow
);
769 rset_clear(allow
, key
);
771 val
= (int32_t)ir_knum(irkey
)->u32
.hi
;
772 khi
= emit_isk12(ARMI_CMP
, val
);
774 keyhi
= ra_allock(as
, val
, allow
);
775 rset_clear(allow
, keyhi
);
777 } else if (!irt_ispri(kt
)) {
778 k
= emit_isk12(ARMI_CMP
, irkey
->i
);
780 key
= ra_alloc1(as
, refkey
, allow
);
781 rset_clear(allow
, key
);
785 tmp
= ra_scratchpair(as
, allow
);
787 /* Key not found in chain: jump to exit (if merged) or load niltv. */
788 l_end
= emit_label(as
);
791 asm_guardcc(as
, CC_AL
);
793 emit_loada(as
, dest
, niltvg(J2G(as
->J
)));
795 /* Follow hash chain until the end. */
797 emit_n(as
, ARMI_CMP
|ARMI_K12
|0, dest
);
798 emit_lso(as
, ARMI_LDR
, dest
, dest
, (int32_t)offsetof(Node
, next
));
800 /* Type and value comparison. */
802 asm_guardcc(as
, CC_EQ
);
804 emit_branch(as
, ARMF_CC(ARMI_B
, CC_EQ
), l_end
);
805 if (!irt_ispri(kt
)) {
806 emit_nm(as
, ARMF_CC(ARMI_CMP
, CC_EQ
)^k
, tmp
, key
);
807 emit_nm(as
, ARMI_CMP
^khi
, tmp
+1, keyhi
);
808 emit_lsox(as
, ARMI_LDRD
, tmp
, dest
, (int32_t)offsetof(Node
, key
));
810 emit_n(as
, ARMI_CMP
^khi
, tmp
);
811 emit_lso(as
, ARMI_LDR
, tmp
, dest
, (int32_t)offsetof(Node
, key
.it
));
813 *l_loop
= ARMF_CC(ARMI_B
, CC_NE
) | ((as
->mcp
-l_loop
-2) & 0x00ffffffu
);
815 /* Load main position relative to tab->node into dest. */
816 khash
= irref_isk(refkey
) ? ir_khash(irkey
) : 1;
818 emit_lso(as
, ARMI_LDR
, dest
, tab
, (int32_t)offsetof(GCtab
, node
));
820 emit_dnm(as
, ARMI_ADD
|ARMF_SH(ARMSH_LSL
, 3), dest
, dest
, tmp
);
821 emit_dnm(as
, ARMI_ADD
|ARMF_SH(ARMSH_LSL
, 1), tmp
, tmp
, tmp
);
822 if (irt_isstr(kt
)) { /* Fetch of str->hash is cheaper than ra_allock. */
823 emit_dnm(as
, ARMI_AND
, tmp
, tmp
+1, RID_TMP
);
824 emit_lso(as
, ARMI_LDR
, dest
, tab
, (int32_t)offsetof(GCtab
, node
));
825 emit_lso(as
, ARMI_LDR
, tmp
+1, key
, (int32_t)offsetof(GCstr
, hash
));
826 emit_lso(as
, ARMI_LDR
, RID_TMP
, tab
, (int32_t)offsetof(GCtab
, hmask
));
827 } else if (irref_isk(refkey
)) {
828 emit_opk(as
, ARMI_AND
, tmp
, RID_TMP
, (int32_t)khash
,
829 rset_exclude(rset_exclude(RSET_GPR
, tab
), dest
));
830 emit_lso(as
, ARMI_LDR
, dest
, tab
, (int32_t)offsetof(GCtab
, node
));
831 emit_lso(as
, ARMI_LDR
, RID_TMP
, tab
, (int32_t)offsetof(GCtab
, hmask
));
832 } else { /* Must match with hash*() in lj_tab.c. */
833 if (ra_hasreg(keynumhi
)) { /* Canonicalize +-0.0 to 0.0. */
834 if (keyhi
== RID_TMP
)
835 emit_dm(as
, ARMF_CC(ARMI_MOV
, CC_NE
), keyhi
, keynumhi
);
836 emit_d(as
, ARMF_CC(ARMI_MOV
, CC_EQ
)|ARMI_K12
|0, keyhi
);
838 emit_dnm(as
, ARMI_AND
, tmp
, tmp
, RID_TMP
);
839 emit_dnm(as
, ARMI_SUB
|ARMF_SH(ARMSH_ROR
, 32-HASH_ROT3
), tmp
, tmp
, tmp
+1);
840 emit_lso(as
, ARMI_LDR
, dest
, tab
, (int32_t)offsetof(GCtab
, node
));
841 emit_dnm(as
, ARMI_EOR
|ARMF_SH(ARMSH_ROR
, 32-((HASH_ROT2
+HASH_ROT1
)&31)),
843 emit_lso(as
, ARMI_LDR
, RID_TMP
, tab
, (int32_t)offsetof(GCtab
, hmask
));
844 emit_dnm(as
, ARMI_SUB
|ARMF_SH(ARMSH_ROR
, 32-HASH_ROT1
), tmp
+1, tmp
+1, tmp
);
845 if (ra_hasreg(keynumhi
)) {
846 emit_dnm(as
, ARMI_EOR
, tmp
+1, tmp
, key
);
847 emit_dnm(as
, ARMI_ORR
|ARMI_S
, RID_TMP
, tmp
, key
); /* Test for +-0.0. */
848 emit_dnm(as
, ARMI_ADD
, tmp
, keynumhi
, keynumhi
);
850 emit_dnm(as
, ARMI_VMOV_RR_D
, key
, keynumhi
,
851 (ra_alloc1(as
, refkey
, RSET_FPR
) & 15));
854 emit_dnm(as
, ARMI_EOR
, tmp
+1, tmp
, key
);
855 emit_opk(as
, ARMI_ADD
, tmp
, key
, (int32_t)HASH_BIAS
,
856 rset_exclude(rset_exclude(RSET_GPR
, tab
), key
));
862 static void asm_hrefk(ASMState
*as
, IRIns
*ir
)
864 IRIns
*kslot
= IR(ir
->op2
);
865 IRIns
*irkey
= IR(kslot
->op1
);
866 int32_t ofs
= (int32_t)(kslot
->op2
* sizeof(Node
));
867 int32_t kofs
= ofs
+ (int32_t)offsetof(Node
, key
);
868 Reg dest
= (ra_used(ir
) || ofs
> 4095) ? ra_dest(as
, ir
, RSET_GPR
) : RID_NONE
;
869 Reg node
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
870 Reg key
= RID_NONE
, type
= RID_TMP
, idx
= node
;
871 RegSet allow
= rset_exclude(RSET_GPR
, node
);
872 lua_assert(ofs
% sizeof(Node
) == 0);
875 rset_clear(allow
, dest
);
876 kofs
= (int32_t)offsetof(Node
, key
);
877 } else if (ra_hasreg(dest
)) {
878 emit_opk(as
, ARMI_ADD
, dest
, node
, ofs
, allow
);
880 asm_guardcc(as
, CC_NE
);
881 if (!irt_ispri(irkey
->t
)) {
882 RegSet even
= (as
->freeset
& allow
);
883 even
= even
& (even
>> 1) & RSET_GPREVEN
;
885 key
= ra_scratch(as
, even
);
886 if (rset_test(as
->freeset
, key
+1)) {
888 ra_modified(as
, type
);
891 key
= ra_scratch(as
, allow
);
893 rset_clear(allow
, key
);
895 rset_clear(allow
, type
);
896 if (irt_isnum(irkey
->t
)) {
897 emit_opk(as
, ARMF_CC(ARMI_CMP
, CC_EQ
), 0, type
,
898 (int32_t)ir_knum(irkey
)->u32
.hi
, allow
);
899 emit_opk(as
, ARMI_CMP
, 0, key
,
900 (int32_t)ir_knum(irkey
)->u32
.lo
, allow
);
903 emit_opk(as
, ARMF_CC(ARMI_CMP
, CC_EQ
), 0, key
, irkey
->i
, allow
);
904 emit_n(as
, ARMI_CMN
|ARMI_K12
|-irt_toitype(irkey
->t
), type
);
906 emit_lso(as
, ARMI_LDR
, type
, idx
, kofs
+4);
907 if (ra_hasreg(key
)) emit_lso(as
, ARMI_LDR
, key
, idx
, kofs
);
909 emit_opk(as
, ARMI_ADD
, dest
, node
, ofs
, RSET_GPR
);
912 static void asm_uref(ASMState
*as
, IRIns
*ir
)
914 /* NYI: Check that UREFO is still open and not aliasing a slot. */
915 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
916 if (irref_isk(ir
->op1
)) {
917 GCfunc
*fn
= ir_kfunc(IR(ir
->op1
));
918 MRef
*v
= &gcref(fn
->l
.uvptr
[(ir
->op2
>> 8)])->uv
.v
;
919 emit_lsptr(as
, ARMI_LDR
, dest
, v
);
921 Reg uv
= ra_scratch(as
, RSET_GPR
);
922 Reg func
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
923 if (ir
->o
== IR_UREFC
) {
924 asm_guardcc(as
, CC_NE
);
925 emit_n(as
, ARMI_CMP
|ARMI_K12
|1, RID_TMP
);
926 emit_opk(as
, ARMI_ADD
, dest
, uv
,
927 (int32_t)offsetof(GCupval
, tv
), RSET_GPR
);
928 emit_lso(as
, ARMI_LDRB
, RID_TMP
, uv
, (int32_t)offsetof(GCupval
, closed
));
930 emit_lso(as
, ARMI_LDR
, dest
, uv
, (int32_t)offsetof(GCupval
, v
));
932 emit_lso(as
, ARMI_LDR
, uv
, func
,
933 (int32_t)offsetof(GCfuncL
, uvptr
) + 4*(int32_t)(ir
->op2
>> 8));
937 static void asm_fref(ASMState
*as
, IRIns
*ir
)
939 UNUSED(as
); UNUSED(ir
);
940 lua_assert(!ra_used(ir
));
943 static void asm_strref(ASMState
*as
, IRIns
*ir
)
945 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
946 IRRef ref
= ir
->op2
, refk
= ir
->op1
;
948 if (irref_isk(ref
)) {
949 IRRef tmp
= refk
; refk
= ref
; ref
= tmp
;
950 } else if (!irref_isk(refk
)) {
951 uint32_t k
, m
= ARMI_K12
|sizeof(GCstr
);
952 Reg right
, left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
953 IRIns
*irr
= IR(ir
->op2
);
954 if (ra_hasreg(irr
->r
)) {
955 ra_noweak(as
, irr
->r
);
957 } else if (mayfuse(as
, irr
->op2
) &&
958 irr
->o
== IR_ADD
&& irref_isk(irr
->op2
) &&
959 (k
= emit_isk12(ARMI_ADD
,
960 (int32_t)sizeof(GCstr
) + IR(irr
->op2
)->i
))) {
962 right
= ra_alloc1(as
, irr
->op1
, rset_exclude(RSET_GPR
, left
));
964 right
= ra_allocref(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
966 emit_dn(as
, ARMI_ADD
^m
, dest
, dest
);
967 emit_dnm(as
, ARMI_ADD
, dest
, left
, right
);
970 r
= ra_alloc1(as
, ref
, RSET_GPR
);
971 emit_opk(as
, ARMI_ADD
, dest
, r
,
972 sizeof(GCstr
) + IR(refk
)->i
, rset_exclude(RSET_GPR
, r
));
975 /* -- Loads and stores ---------------------------------------------------- */
977 static ARMIns
asm_fxloadins(IRIns
*ir
)
979 switch (irt_type(ir
->t
)) {
980 case IRT_I8
: return ARMI_LDRSB
;
981 case IRT_U8
: return ARMI_LDRB
;
982 case IRT_I16
: return ARMI_LDRSH
;
983 case IRT_U16
: return ARMI_LDRH
;
984 case IRT_NUM
: lua_assert(!LJ_SOFTFP
); return ARMI_VLDR_D
;
985 case IRT_FLOAT
: if (!LJ_SOFTFP
) return ARMI_VLDR_S
;
986 default: return ARMI_LDR
;
990 static ARMIns
asm_fxstoreins(IRIns
*ir
)
992 switch (irt_type(ir
->t
)) {
993 case IRT_I8
: case IRT_U8
: return ARMI_STRB
;
994 case IRT_I16
: case IRT_U16
: return ARMI_STRH
;
995 case IRT_NUM
: lua_assert(!LJ_SOFTFP
); return ARMI_VSTR_D
;
996 case IRT_FLOAT
: if (!LJ_SOFTFP
) return ARMI_VSTR_S
;
997 default: return ARMI_STR
;
1001 static void asm_fload(ASMState
*as
, IRIns
*ir
)
1003 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1004 Reg idx
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1005 ARMIns ai
= asm_fxloadins(ir
);
1007 if (ir
->op2
== IRFL_TAB_ARRAY
) {
1008 ofs
= asm_fuseabase(as
, ir
->op1
);
1009 if (ofs
) { /* Turn the t->array load into an add for colocated arrays. */
1010 emit_dn(as
, ARMI_ADD
|ARMI_K12
|ofs
, dest
, idx
);
1014 ofs
= field_ofs
[ir
->op2
];
1015 if ((ai
& 0x04000000))
1016 emit_lso(as
, ai
, dest
, idx
, ofs
);
1018 emit_lsox(as
, ai
, dest
, idx
, ofs
);
1021 static void asm_fstore(ASMState
*as
, IRIns
*ir
)
1023 if (ir
->r
!= RID_SINK
) {
1024 Reg src
= ra_alloc1(as
, ir
->op2
, RSET_GPR
);
1025 IRIns
*irf
= IR(ir
->op1
);
1026 Reg idx
= ra_alloc1(as
, irf
->op1
, rset_exclude(RSET_GPR
, src
));
1027 int32_t ofs
= field_ofs
[irf
->op2
];
1028 ARMIns ai
= asm_fxstoreins(ir
);
1029 if ((ai
& 0x04000000))
1030 emit_lso(as
, ai
, src
, idx
, ofs
);
1032 emit_lsox(as
, ai
, src
, idx
, ofs
);
1036 static void asm_xload(ASMState
*as
, IRIns
*ir
)
1038 Reg dest
= ra_dest(as
, ir
,
1039 (!LJ_SOFTFP
&& irt_isfp(ir
->t
)) ? RSET_FPR
: RSET_GPR
);
1040 lua_assert(!(ir
->op2
& IRXLOAD_UNALIGNED
));
1041 asm_fusexref(as
, asm_fxloadins(ir
), dest
, ir
->op1
, RSET_GPR
, 0);
1044 static void asm_xstore_(ASMState
*as
, IRIns
*ir
, int32_t ofs
)
1046 if (ir
->r
!= RID_SINK
) {
1047 Reg src
= ra_alloc1(as
, ir
->op2
,
1048 (!LJ_SOFTFP
&& irt_isfp(ir
->t
)) ? RSET_FPR
: RSET_GPR
);
1049 asm_fusexref(as
, asm_fxstoreins(ir
), src
, ir
->op1
,
1050 rset_exclude(RSET_GPR
, src
), ofs
);
1054 #define asm_xstore(as, ir) asm_xstore_(as, ir, 0)
1056 static void asm_ahuvload(ASMState
*as
, IRIns
*ir
)
1058 int hiop
= (LJ_SOFTFP
&& (ir
+1)->o
== IR_HIOP
);
1059 IRType t
= hiop
? IRT_NUM
: irt_type(ir
->t
);
1060 Reg dest
= RID_NONE
, type
= RID_NONE
, idx
;
1061 RegSet allow
= RSET_GPR
;
1063 if (hiop
&& ra_used(ir
+1)) {
1064 type
= ra_dest(as
, ir
+1, allow
);
1065 rset_clear(allow
, type
);
1068 lua_assert((LJ_SOFTFP
? 0 : irt_isnum(ir
->t
)) ||
1069 irt_isint(ir
->t
) || irt_isaddr(ir
->t
));
1070 dest
= ra_dest(as
, ir
, (!LJ_SOFTFP
&& t
== IRT_NUM
) ? RSET_FPR
: allow
);
1071 rset_clear(allow
, dest
);
1073 idx
= asm_fuseahuref(as
, ir
->op1
, &ofs
, allow
,
1074 (!LJ_SOFTFP
&& t
== IRT_NUM
) ? 1024 : 4096);
1075 if (!hiop
|| type
== RID_NONE
) {
1076 rset_clear(allow
, idx
);
1077 if (ofs
< 256 && ra_hasreg(dest
) && (dest
& 1) == 0 &&
1078 rset_test((as
->freeset
& allow
), dest
+1)) {
1080 ra_modified(as
, type
);
1085 asm_guardcc(as
, t
== IRT_NUM
? CC_HS
: CC_NE
);
1086 emit_n(as
, ARMI_CMN
|ARMI_K12
|-irt_toitype_(t
), type
);
1087 if (ra_hasreg(dest
)) {
1090 emit_vlso(as
, ARMI_VLDR_D
, dest
, idx
, ofs
);
1093 emit_lso(as
, ARMI_LDR
, dest
, idx
, ofs
);
1095 emit_lso(as
, ARMI_LDR
, type
, idx
, ofs
+4);
1098 static void asm_ahustore(ASMState
*as
, IRIns
*ir
)
1100 if (ir
->r
!= RID_SINK
) {
1101 RegSet allow
= RSET_GPR
;
1102 Reg idx
, src
= RID_NONE
, type
= RID_NONE
;
1105 if (irt_isnum(ir
->t
)) {
1106 src
= ra_alloc1(as
, ir
->op2
, RSET_FPR
);
1107 idx
= asm_fuseahuref(as
, ir
->op1
, &ofs
, allow
, 1024);
1108 emit_vlso(as
, ARMI_VSTR_D
, src
, idx
, ofs
);
1112 int hiop
= (LJ_SOFTFP
&& (ir
+1)->o
== IR_HIOP
);
1113 if (!irt_ispri(ir
->t
)) {
1114 src
= ra_alloc1(as
, ir
->op2
, allow
);
1115 rset_clear(allow
, src
);
1118 type
= ra_alloc1(as
, (ir
+1)->op2
, allow
);
1120 type
= ra_allock(as
, (int32_t)irt_toitype(ir
->t
), allow
);
1121 idx
= asm_fuseahuref(as
, ir
->op1
, &ofs
, rset_exclude(allow
, type
), 4096);
1122 if (ra_hasreg(src
)) emit_lso(as
, ARMI_STR
, src
, idx
, ofs
);
1123 emit_lso(as
, ARMI_STR
, type
, idx
, ofs
+4);
1128 static void asm_sload(ASMState
*as
, IRIns
*ir
)
1130 int32_t ofs
= 8*((int32_t)ir
->op1
-1) + ((ir
->op2
& IRSLOAD_FRAME
) ? 4 : 0);
1131 int hiop
= (LJ_SOFTFP
&& (ir
+1)->o
== IR_HIOP
);
1132 IRType t
= hiop
? IRT_NUM
: irt_type(ir
->t
);
1133 Reg dest
= RID_NONE
, type
= RID_NONE
, base
;
1134 RegSet allow
= RSET_GPR
;
1135 lua_assert(!(ir
->op2
& IRSLOAD_PARENT
)); /* Handled by asm_head_side(). */
1136 lua_assert(irt_isguard(ir
->t
) || !(ir
->op2
& IRSLOAD_TYPECHECK
));
1138 lua_assert(!(ir
->op2
& IRSLOAD_CONVERT
)); /* Handled by LJ_SOFTFP SPLIT. */
1139 if (hiop
&& ra_used(ir
+1)) {
1140 type
= ra_dest(as
, ir
+1, allow
);
1141 rset_clear(allow
, type
);
1144 if ((ir
->op2
& IRSLOAD_CONVERT
) && irt_isguard(ir
->t
) && t
== IRT_INT
) {
1145 dest
= ra_scratch(as
, RSET_FPR
);
1146 asm_tointg(as
, ir
, dest
);
1147 t
= IRT_NUM
; /* Continue with a regular number type check. */
1152 if ((ir
->op2
& IRSLOAD_CONVERT
))
1153 tmp
= ra_scratch(as
, t
== IRT_INT
? RSET_FPR
: RSET_GPR
);
1154 lua_assert((LJ_SOFTFP
? 0 : irt_isnum(ir
->t
)) ||
1155 irt_isint(ir
->t
) || irt_isaddr(ir
->t
));
1156 dest
= ra_dest(as
, ir
, (!LJ_SOFTFP
&& t
== IRT_NUM
) ? RSET_FPR
: allow
);
1157 rset_clear(allow
, dest
);
1158 base
= ra_alloc1(as
, REF_BASE
, allow
);
1159 if ((ir
->op2
& IRSLOAD_CONVERT
)) {
1161 emit_dn(as
, ARMI_VMOV_R_S
, dest
, (tmp
& 15));
1162 emit_dm(as
, ARMI_VCVT_S32_F64
, (tmp
& 15), (tmp
& 15));
1163 t
= IRT_NUM
; /* Check for original type. */
1165 emit_dm(as
, ARMI_VCVT_F64_S32
, (dest
& 15), (dest
& 15));
1166 emit_dn(as
, ARMI_VMOV_S_R
, tmp
, (dest
& 15));
1167 t
= IRT_INT
; /* Check for original type. */
1173 base
= ra_alloc1(as
, REF_BASE
, allow
);
1175 rset_clear(allow
, base
);
1176 if ((ir
->op2
& IRSLOAD_TYPECHECK
)) {
1177 if (ra_noreg(type
)) {
1178 if (ofs
< 256 && ra_hasreg(dest
) && (dest
& 1) == 0 &&
1179 rset_test((as
->freeset
& allow
), dest
+1)) {
1181 ra_modified(as
, type
);
1186 asm_guardcc(as
, t
== IRT_NUM
? CC_HS
: CC_NE
);
1187 emit_n(as
, ARMI_CMN
|ARMI_K12
|-irt_toitype_(t
), type
);
1189 if (ra_hasreg(dest
)) {
1193 emit_vlso(as
, ARMI_VLDR_D
, dest
, base
, ofs
);
1195 if (ra_hasreg(type
)) emit_lso(as
, ARMI_LDR
, type
, base
, ofs
+4);
1196 emit_vlso(as
, ARMI_VLDR_D
, dest
, RID_TMP
, 0);
1197 emit_opk(as
, ARMI_ADD
, RID_TMP
, base
, ofs
, allow
);
1202 emit_lso(as
, ARMI_LDR
, dest
, base
, ofs
);
1204 if (ra_hasreg(type
)) emit_lso(as
, ARMI_LDR
, type
, base
, ofs
+4);
1207 /* -- Allocations --------------------------------------------------------- */
1210 static void asm_cnew(ASMState
*as
, IRIns
*ir
)
1212 CTState
*cts
= ctype_ctsG(J2G(as
->J
));
1213 CTypeID id
= (CTypeID
)IR(ir
->op1
)->i
;
1215 CTInfo info
= lj_ctype_info(cts
, id
, &sz
);
1216 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_mem_newgco
];
1218 RegSet allow
= (RSET_GPR
& ~RSET_SCRATCH
);
1219 RegSet drop
= RSET_SCRATCH
;
1220 lua_assert(sz
!= CTSIZE_INVALID
|| (ir
->o
== IR_CNEW
&& ir
->op2
!= REF_NIL
));
1223 if (ra_hasreg(ir
->r
))
1224 rset_clear(drop
, ir
->r
); /* Dest reg handled below. */
1225 ra_evictset(as
, drop
);
1227 ra_destreg(as
, ir
, RID_RET
); /* GCcdata * */
1229 /* Initialize immutable cdata object. */
1230 if (ir
->o
== IR_CNEWI
) {
1231 int32_t ofs
= sizeof(GCcdata
);
1232 lua_assert(sz
== 4 || sz
== 8);
1235 lua_assert(ir
->o
== IR_HIOP
);
1238 Reg r
= ra_alloc1(as
, ir
->op2
, allow
);
1239 emit_lso(as
, ARMI_STR
, r
, RID_RET
, ofs
);
1240 rset_clear(allow
, r
);
1241 if (ofs
== sizeof(GCcdata
)) break;
1244 } else if (ir
->op2
!= REF_NIL
) { /* Create VLA/VLS/aligned cdata. */
1245 ci
= &lj_ir_callinfo
[IRCALL_lj_cdata_newv
];
1246 args
[0] = ASMREF_L
; /* lua_State *L */
1247 args
[1] = ir
->op1
; /* CTypeID id */
1248 args
[2] = ir
->op2
; /* CTSize sz */
1249 args
[3] = ASMREF_TMP1
; /* CTSize align */
1250 asm_gencall(as
, ci
, args
);
1251 emit_loadi(as
, ra_releasetmp(as
, ASMREF_TMP1
), (int32_t)ctype_align(info
));
1255 /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */
1257 uint32_t k
= emit_isk12(ARMI_MOV
, id
);
1258 Reg r
= k
? RID_R1
: ra_allock(as
, id
, allow
);
1259 emit_lso(as
, ARMI_STRB
, RID_TMP
, RID_RET
, offsetof(GCcdata
, gct
));
1260 emit_lsox(as
, ARMI_STRH
, r
, RID_RET
, offsetof(GCcdata
, ctypeid
));
1261 emit_d(as
, ARMI_MOV
|ARMI_K12
|~LJ_TCDATA
, RID_TMP
);
1262 if (k
) emit_d(as
, ARMI_MOV
^k
, RID_R1
);
1264 args
[0] = ASMREF_L
; /* lua_State *L */
1265 args
[1] = ASMREF_TMP1
; /* MSize size */
1266 asm_gencall(as
, ci
, args
);
1267 ra_allockreg(as
, (int32_t)(sz
+sizeof(GCcdata
)),
1268 ra_releasetmp(as
, ASMREF_TMP1
));
1271 #define asm_cnew(as, ir) ((void)0)
1274 /* -- Write barriers ------------------------------------------------------ */
1276 static void asm_tbar(ASMState
*as
, IRIns
*ir
)
1278 Reg tab
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1279 Reg link
= ra_scratch(as
, rset_exclude(RSET_GPR
, tab
));
1280 Reg gr
= ra_allock(as
, i32ptr(J2G(as
->J
)),
1281 rset_exclude(rset_exclude(RSET_GPR
, tab
), link
));
1283 MCLabel l_end
= emit_label(as
);
1284 emit_lso(as
, ARMI_STR
, link
, tab
, (int32_t)offsetof(GCtab
, gclist
));
1285 emit_lso(as
, ARMI_STRB
, mark
, tab
, (int32_t)offsetof(GCtab
, marked
));
1286 emit_lso(as
, ARMI_STR
, tab
, gr
,
1287 (int32_t)offsetof(global_State
, gc
.grayagain
));
1288 emit_dn(as
, ARMI_BIC
|ARMI_K12
|LJ_GC_BLACK
, mark
, mark
);
1289 emit_lso(as
, ARMI_LDR
, link
, gr
,
1290 (int32_t)offsetof(global_State
, gc
.grayagain
));
1291 emit_branch(as
, ARMF_CC(ARMI_B
, CC_EQ
), l_end
);
1292 emit_n(as
, ARMI_TST
|ARMI_K12
|LJ_GC_BLACK
, mark
);
1293 emit_lso(as
, ARMI_LDRB
, mark
, tab
, (int32_t)offsetof(GCtab
, marked
));
1296 static void asm_obar(ASMState
*as
, IRIns
*ir
)
1298 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_gc_barrieruv
];
1302 /* No need for other object barriers (yet). */
1303 lua_assert(IR(ir
->op1
)->o
== IR_UREFC
);
1304 ra_evictset(as
, RSET_SCRATCH
);
1305 l_end
= emit_label(as
);
1306 args
[0] = ASMREF_TMP1
; /* global_State *g */
1307 args
[1] = ir
->op1
; /* TValue *tv */
1308 asm_gencall(as
, ci
, args
);
1309 if ((l_end
[-1] >> 28) == CC_AL
)
1310 l_end
[-1] = ARMF_CC(l_end
[-1], CC_NE
);
1312 emit_branch(as
, ARMF_CC(ARMI_B
, CC_EQ
), l_end
);
1313 ra_allockreg(as
, i32ptr(J2G(as
->J
)), ra_releasetmp(as
, ASMREF_TMP1
));
1314 obj
= IR(ir
->op1
)->r
;
1315 tmp
= ra_scratch(as
, rset_exclude(RSET_GPR
, obj
));
1316 emit_n(as
, ARMF_CC(ARMI_TST
, CC_NE
)|ARMI_K12
|LJ_GC_BLACK
, tmp
);
1317 emit_n(as
, ARMI_TST
|ARMI_K12
|LJ_GC_WHITES
, RID_TMP
);
1318 val
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, obj
));
1319 emit_lso(as
, ARMI_LDRB
, tmp
, obj
,
1320 (int32_t)offsetof(GCupval
, marked
)-(int32_t)offsetof(GCupval
, tv
));
1321 emit_lso(as
, ARMI_LDRB
, RID_TMP
, val
, (int32_t)offsetof(GChead
, marked
));
1324 /* -- Arithmetic and logic operations ------------------------------------- */
1327 static void asm_fparith(ASMState
*as
, IRIns
*ir
, ARMIns ai
)
1329 Reg dest
= ra_dest(as
, ir
, RSET_FPR
);
1330 Reg right
, left
= ra_alloc2(as
, ir
, RSET_FPR
);
1331 right
= (left
>> 8); left
&= 255;
1332 emit_dnm(as
, ai
, (dest
& 15), (left
& 15), (right
& 15));
1335 static void asm_fpunary(ASMState
*as
, IRIns
*ir
, ARMIns ai
)
1337 Reg dest
= ra_dest(as
, ir
, RSET_FPR
);
1338 Reg left
= ra_hintalloc(as
, ir
->op1
, dest
, RSET_FPR
);
1339 emit_dm(as
, ai
, (dest
& 15), (left
& 15));
1342 static void asm_callround(ASMState
*as
, IRIns
*ir
, int id
)
1344 /* The modified regs must match with the *.dasc implementation. */
1345 RegSet drop
= RID2RSET(RID_R0
)|RID2RSET(RID_R1
)|RID2RSET(RID_R2
)|
1346 RID2RSET(RID_R3
)|RID2RSET(RID_R12
);
1349 ra_evictset(as
, drop
);
1350 dest
= ra_dest(as
, ir
, RSET_FPR
);
1351 emit_dnm(as
, ARMI_VMOV_D_RR
, RID_RETLO
, RID_RETHI
, (dest
& 15));
1352 emit_call(as
, id
== IRFPM_FLOOR
? (void *)lj_vm_floor_sf
:
1353 id
== IRFPM_CEIL
? (void *)lj_vm_ceil_sf
:
1354 (void *)lj_vm_trunc_sf
);
1355 /* Workaround to protect argument GPRs from being used for remat. */
1357 as
->freeset
&= ~RSET_RANGE(RID_R0
, RID_R1
+1);
1358 as
->cost
[RID_R0
] = as
->cost
[RID_R1
] = REGCOST(~0u, ASMREF_L
);
1359 src
= ra_alloc1(as
, ir
->op1
, RSET_FPR
); /* May alloc GPR to remat FPR. */
1360 as
->freeset
|= (of
& RSET_RANGE(RID_R0
, RID_R1
+1));
1361 emit_dnm(as
, ARMI_VMOV_RR_D
, RID_R0
, RID_R1
, (src
& 15));
1364 static void asm_fpmath(ASMState
*as
, IRIns
*ir
)
1366 if (ir
->op2
== IRFPM_EXP2
&& asm_fpjoin_pow(as
, ir
))
1368 if (ir
->op2
<= IRFPM_TRUNC
)
1369 asm_callround(as
, ir
, ir
->op2
);
1370 else if (ir
->op2
== IRFPM_SQRT
)
1371 asm_fpunary(as
, ir
, ARMI_VSQRT_D
);
1373 asm_callid(as
, ir
, IRCALL_lj_vm_floor
+ ir
->op2
);
1376 #define asm_fpmath(as, ir) lua_assert(0)
1379 static int asm_swapops(ASMState
*as
, IRRef lref
, IRRef rref
)
1382 if (irref_isk(rref
))
1383 return 0; /* Don't swap constants to the left. */
1384 if (irref_isk(lref
))
1385 return 1; /* But swap constants to the right. */
1387 if ((ir
->o
>= IR_BSHL
&& ir
->o
<= IR_BROR
) ||
1388 (ir
->o
== IR_ADD
&& ir
->op1
== ir
->op2
))
1389 return 0; /* Don't swap fusable operands to the left. */
1391 if ((ir
->o
>= IR_BSHL
&& ir
->o
<= IR_BROR
) ||
1392 (ir
->o
== IR_ADD
&& ir
->op1
== ir
->op2
))
1393 return 1; /* But swap fusable operands to the right. */
1394 return 0; /* Otherwise don't swap. */
1397 static void asm_intop(ASMState
*as
, IRIns
*ir
, ARMIns ai
)
1399 IRRef lref
= ir
->op1
, rref
= ir
->op2
;
1400 Reg left
, dest
= ra_dest(as
, ir
, RSET_GPR
);
1402 if (asm_swapops(as
, lref
, rref
)) {
1403 IRRef tmp
= lref
; lref
= rref
; rref
= tmp
;
1404 if ((ai
& ~ARMI_S
) == ARMI_SUB
|| (ai
& ~ARMI_S
) == ARMI_SBC
)
1405 ai
^= (ARMI_SUB
^ARMI_RSB
);
1407 left
= ra_hintalloc(as
, lref
, dest
, RSET_GPR
);
1408 m
= asm_fuseopm(as
, ai
, rref
, rset_exclude(RSET_GPR
, left
));
1409 if (irt_isguard(ir
->t
)) { /* For IR_ADDOV etc. */
1410 asm_guardcc(as
, CC_VS
);
1413 emit_dn(as
, ai
^m
, dest
, left
);
1416 static void asm_intop_s(ASMState
*as
, IRIns
*ir
, ARMIns ai
)
1418 if (as
->flagmcp
== as
->mcp
) { /* Drop cmp r, #0. */
1423 asm_intop(as
, ir
, ai
);
1426 static void asm_intneg(ASMState
*as
, IRIns
*ir
, ARMIns ai
)
1428 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1429 Reg left
= ra_hintalloc(as
, ir
->op1
, dest
, RSET_GPR
);
1430 emit_dn(as
, ai
|ARMI_K12
|0, dest
, left
);
1433 /* NYI: use add/shift for MUL(OV) with constants. FOLD only does 2^k. */
1434 static void asm_intmul(ASMState
*as
, IRIns
*ir
)
1436 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1437 Reg left
= ra_alloc1(as
, ir
->op1
, rset_exclude(RSET_GPR
, dest
));
1438 Reg right
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
1440 /* ARMv5 restriction: dest != left and dest_hi != left. */
1441 if (dest
== left
&& left
!= right
) { left
= right
; right
= dest
; }
1442 if (irt_isguard(ir
->t
)) { /* IR_MULOV */
1443 if (!(as
->flags
& JIT_F_ARMV6
) && dest
== left
)
1444 tmp
= left
= ra_scratch(as
, rset_exclude(RSET_GPR
, left
));
1445 asm_guardcc(as
, CC_NE
);
1446 emit_nm(as
, ARMI_TEQ
|ARMF_SH(ARMSH_ASR
, 31), RID_TMP
, dest
);
1447 emit_dnm(as
, ARMI_SMULL
|ARMF_S(right
), dest
, RID_TMP
, left
);
1449 if (!(as
->flags
& JIT_F_ARMV6
) && dest
== left
) tmp
= left
= RID_TMP
;
1450 emit_nm(as
, ARMI_MUL
|ARMF_S(right
), dest
, left
);
1452 /* Only need this for the dest == left == right case. */
1453 if (ra_hasreg(tmp
)) emit_dm(as
, ARMI_MOV
, tmp
, right
);
1456 static void asm_add(ASMState
*as
, IRIns
*ir
)
1459 if (irt_isnum(ir
->t
)) {
1460 if (!asm_fusemadd(as
, ir
, ARMI_VMLA_D
, ARMI_VMLA_D
))
1461 asm_fparith(as
, ir
, ARMI_VADD_D
);
1465 asm_intop_s(as
, ir
, ARMI_ADD
);
1468 static void asm_sub(ASMState
*as
, IRIns
*ir
)
1471 if (irt_isnum(ir
->t
)) {
1472 if (!asm_fusemadd(as
, ir
, ARMI_VNMLS_D
, ARMI_VMLS_D
))
1473 asm_fparith(as
, ir
, ARMI_VSUB_D
);
1477 asm_intop_s(as
, ir
, ARMI_SUB
);
1480 static void asm_mul(ASMState
*as
, IRIns
*ir
)
1483 if (irt_isnum(ir
->t
)) {
1484 asm_fparith(as
, ir
, ARMI_VMUL_D
);
1491 #define asm_addov(as, ir) asm_add(as, ir)
1492 #define asm_subov(as, ir) asm_sub(as, ir)
1493 #define asm_mulov(as, ir) asm_mul(as, ir)
1496 #define asm_div(as, ir) lua_assert(0)
1497 #define asm_pow(as, ir) lua_assert(0)
1498 #define asm_abs(as, ir) lua_assert(0)
1499 #define asm_atan2(as, ir) lua_assert(0)
1500 #define asm_ldexp(as, ir) lua_assert(0)
1502 #define asm_div(as, ir) asm_fparith(as, ir, ARMI_VDIV_D)
1503 #define asm_pow(as, ir) asm_callid(as, ir, IRCALL_lj_vm_powi)
1504 #define asm_abs(as, ir) asm_fpunary(as, ir, ARMI_VABS_D)
1505 #define asm_atan2(as, ir) asm_callid(as, ir, IRCALL_atan2)
1506 #define asm_ldexp(as, ir) asm_callid(as, ir, IRCALL_ldexp)
1509 #define asm_mod(as, ir) asm_callid(as, ir, IRCALL_lj_vm_modi)
1511 static void asm_neg(ASMState
*as
, IRIns
*ir
)
1514 if (irt_isnum(ir
->t
)) {
1515 asm_fpunary(as
, ir
, ARMI_VNEG_D
);
1519 asm_intneg(as
, ir
, ARMI_RSB
);
1522 static void asm_bitop(ASMState
*as
, IRIns
*ir
, ARMIns ai
)
1524 if (as
->flagmcp
== as
->mcp
) { /* Try to drop cmp r, #0. */
1525 uint32_t cc
= (as
->mcp
[1] >> 28);
1530 } else if (cc
== CC_GE
) {
1531 *++as
->mcp
^= ((CC_GE
^CC_PL
) << 28);
1533 } else if (cc
== CC_LT
) {
1534 *++as
->mcp
^= ((CC_LT
^CC_MI
) << 28);
1536 } /* else: other conds don't work with bit ops. */
1539 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1540 uint32_t m
= asm_fuseopm(as
, ai
, ir
->op1
, RSET_GPR
);
1541 emit_d(as
, ai
^m
, dest
);
1543 /* NYI: Turn BAND !k12 into uxtb, uxth or bfc or shl+shr. */
1544 asm_intop(as
, ir
, ai
);
1548 #define asm_bnot(as, ir) asm_bitop(as, ir, ARMI_MVN)
1550 static void asm_bswap(ASMState
*as
, IRIns
*ir
)
1552 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1553 Reg left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1554 if ((as
->flags
& JIT_F_ARMV6
)) {
1555 emit_dm(as
, ARMI_REV
, dest
, left
);
1559 tmp2
= ra_scratch(as
, rset_exclude(rset_exclude(RSET_GPR
, dest
), left
));
1560 emit_dnm(as
, ARMI_EOR
|ARMF_SH(ARMSH_LSR
, 8), dest
, tmp2
, RID_TMP
);
1561 emit_dm(as
, ARMI_MOV
|ARMF_SH(ARMSH_ROR
, 8), tmp2
, left
);
1562 emit_dn(as
, ARMI_BIC
|ARMI_K12
|256*8|255, RID_TMP
, RID_TMP
);
1563 emit_dnm(as
, ARMI_EOR
|ARMF_SH(ARMSH_ROR
, 16), RID_TMP
, left
, left
);
1567 #define asm_band(as, ir) asm_bitop(as, ir, ARMI_AND)
1568 #define asm_bor(as, ir) asm_bitop(as, ir, ARMI_ORR)
1569 #define asm_bxor(as, ir) asm_bitop(as, ir, ARMI_EOR)
1571 static void asm_bitshift(ASMState
*as
, IRIns
*ir
, ARMShift sh
)
1573 if (irref_isk(ir
->op2
)) { /* Constant shifts. */
1574 /* NYI: Turn SHL+SHR or BAND+SHR into uxtb, uxth or ubfx. */
1575 /* NYI: Turn SHL+ASR into sxtb, sxth or sbfx. */
1576 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1577 Reg left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1578 int32_t shift
= (IR(ir
->op2
)->i
& 31);
1579 emit_dm(as
, ARMI_MOV
|ARMF_SH(sh
, shift
), dest
, left
);
1581 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1582 Reg left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1583 Reg right
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
1584 emit_dm(as
, ARMI_MOV
|ARMF_RSH(sh
, right
), dest
, left
);
1588 #define asm_bshl(as, ir) asm_bitshift(as, ir, ARMSH_LSL)
1589 #define asm_bshr(as, ir) asm_bitshift(as, ir, ARMSH_LSR)
1590 #define asm_bsar(as, ir) asm_bitshift(as, ir, ARMSH_ASR)
1591 #define asm_bror(as, ir) asm_bitshift(as, ir, ARMSH_ROR)
1592 #define asm_brol(as, ir) lua_assert(0)
1594 static void asm_intmin_max(ASMState
*as
, IRIns
*ir
, int cc
)
1596 uint32_t kcmp
= 0, kmov
= 0;
1597 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1598 Reg left
= ra_hintalloc(as
, ir
->op1
, dest
, RSET_GPR
);
1600 if (irref_isk(ir
->op2
)) {
1601 kcmp
= emit_isk12(ARMI_CMP
, IR(ir
->op2
)->i
);
1602 if (kcmp
) kmov
= emit_isk12(ARMI_MOV
, IR(ir
->op2
)->i
);
1606 right
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
1608 if (kmov
|| dest
!= right
) {
1609 emit_dm(as
, ARMF_CC(ARMI_MOV
, cc
)^kmov
, dest
, right
);
1610 cc
^= 1; /* Must use opposite conditions for paired moves. */
1612 cc
^= (CC_LT
^CC_GT
); /* Otherwise may swap CC_LT <-> CC_GT. */
1614 if (dest
!= left
) emit_dm(as
, ARMF_CC(ARMI_MOV
, cc
), dest
, left
);
1615 emit_nm(as
, ARMI_CMP
^kcmp
, left
, right
);
1619 static void asm_sfpmin_max(ASMState
*as
, IRIns
*ir
, int cc
)
1621 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_softfp_cmp
];
1622 RegSet drop
= RSET_SCRATCH
;
1625 args
[0] = ir
->op1
; args
[1] = (ir
+1)->op1
;
1626 args
[2] = ir
->op2
; args
[3] = (ir
+1)->op2
;
1627 /* __aeabi_cdcmple preserves r0-r3. */
1628 if (ra_hasreg(ir
->r
)) rset_clear(drop
, ir
->r
);
1629 if (ra_hasreg((ir
+1)->r
)) rset_clear(drop
, (ir
+1)->r
);
1630 if (!rset_test(as
->freeset
, RID_R2
) &&
1631 regcost_ref(as
->cost
[RID_R2
]) == args
[2]) rset_clear(drop
, RID_R2
);
1632 if (!rset_test(as
->freeset
, RID_R3
) &&
1633 regcost_ref(as
->cost
[RID_R3
]) == args
[3]) rset_clear(drop
, RID_R3
);
1634 ra_evictset(as
, drop
);
1635 ra_destpair(as
, ir
);
1636 emit_dm(as
, ARMF_CC(ARMI_MOV
, cc
), RID_RETHI
, RID_R3
);
1637 emit_dm(as
, ARMF_CC(ARMI_MOV
, cc
), RID_RETLO
, RID_R2
);
1638 emit_call(as
, (void *)ci
->func
);
1639 for (r
= RID_R0
; r
<= RID_R3
; r
++)
1640 ra_leftov(as
, r
, args
[r
-RID_R0
]);
1643 static void asm_fpmin_max(ASMState
*as
, IRIns
*ir
, int cc
)
1645 Reg dest
= (ra_dest(as
, ir
, RSET_FPR
) & 15);
1646 Reg right
, left
= ra_alloc2(as
, ir
, RSET_FPR
);
1647 right
= ((left
>> 8) & 15); left
&= 15;
1648 if (dest
!= left
) emit_dm(as
, ARMF_CC(ARMI_VMOV_D
, cc
^1), dest
, left
);
1649 if (dest
!= right
) emit_dm(as
, ARMF_CC(ARMI_VMOV_D
, cc
), dest
, right
);
1650 emit_d(as
, ARMI_VMRS
, 0);
1651 emit_dm(as
, ARMI_VCMP_D
, left
, right
);
1655 static void asm_min_max(ASMState
*as
, IRIns
*ir
, int cc
, int fcc
)
1660 if (irt_isnum(ir
->t
))
1661 asm_fpmin_max(as
, ir
, fcc
);
1664 asm_intmin_max(as
, ir
, cc
);
1667 #define asm_min(as, ir) asm_min_max(as, ir, CC_GT, CC_HI)
1668 #define asm_max(as, ir) asm_min_max(as, ir, CC_LT, CC_LO)
1670 /* -- Comparisons --------------------------------------------------------- */
1672 /* Map of comparisons to flags. ORDER IR. */
1673 static const uint8_t asm_compmap
[IR_ABC
+1] = {
1674 /* op FP swp int cc FP cc */
1675 /* LT */ CC_GE
+ (CC_HS
<< 4),
1676 /* GE x */ CC_LT
+ (CC_HI
<< 4),
1677 /* LE */ CC_GT
+ (CC_HI
<< 4),
1678 /* GT x */ CC_LE
+ (CC_HS
<< 4),
1679 /* ULT x */ CC_HS
+ (CC_LS
<< 4),
1680 /* UGE */ CC_LO
+ (CC_LO
<< 4),
1681 /* ULE x */ CC_HI
+ (CC_LO
<< 4),
1682 /* UGT */ CC_LS
+ (CC_LS
<< 4),
1683 /* EQ */ CC_NE
+ (CC_NE
<< 4),
1684 /* NE */ CC_EQ
+ (CC_EQ
<< 4),
1685 /* ABC */ CC_LS
+ (CC_LS
<< 4) /* Same as UGT. */
1689 /* FP comparisons. */
1690 static void asm_sfpcomp(ASMState
*as
, IRIns
*ir
)
1692 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_softfp_cmp
];
1693 RegSet drop
= RSET_SCRATCH
;
1696 int swp
= (((ir
->o
^ (ir
->o
>> 2)) & ~(ir
->o
>> 3) & 1) << 1);
1697 args
[swp
^0] = ir
->op1
; args
[swp
^1] = (ir
+1)->op1
;
1698 args
[swp
^2] = ir
->op2
; args
[swp
^3] = (ir
+1)->op2
;
1699 /* __aeabi_cdcmple preserves r0-r3. This helps to reduce spills. */
1700 for (r
= RID_R0
; r
<= RID_R3
; r
++)
1701 if (!rset_test(as
->freeset
, r
) &&
1702 regcost_ref(as
->cost
[r
]) == args
[r
-RID_R0
]) rset_clear(drop
, r
);
1703 ra_evictset(as
, drop
);
1704 asm_guardcc(as
, (asm_compmap
[ir
->o
] >> 4));
1705 emit_call(as
, (void *)ci
->func
);
1706 for (r
= RID_R0
; r
<= RID_R3
; r
++)
1707 ra_leftov(as
, r
, args
[r
-RID_R0
]);
1710 /* FP comparisons. */
1711 static void asm_fpcomp(ASMState
*as
, IRIns
*ir
)
1715 int swp
= ((ir
->o
^ (ir
->o
>> 2)) & ~(ir
->o
>> 3) & 1);
1716 if (!swp
&& irref_isk(ir
->op2
) && ir_knum(IR(ir
->op2
))->u64
== 0) {
1717 left
= (ra_alloc1(as
, ir
->op1
, RSET_FPR
) & 15);
1721 left
= ra_alloc2(as
, ir
, RSET_FPR
);
1723 right
= (left
& 15); left
= ((left
>> 8) & 15);
1725 right
= ((left
>> 8) & 15); left
&= 15;
1729 asm_guardcc(as
, (asm_compmap
[ir
->o
] >> 4));
1730 emit_d(as
, ARMI_VMRS
, 0);
1731 emit_dm(as
, ai
, left
, right
);
1735 /* Integer comparisons. */
1736 static void asm_intcomp(ASMState
*as
, IRIns
*ir
)
1738 ARMCC cc
= (asm_compmap
[ir
->o
] & 15);
1739 IRRef lref
= ir
->op1
, rref
= ir
->op2
;
1743 lua_assert(irt_isint(ir
->t
) || irt_isu32(ir
->t
) || irt_isaddr(ir
->t
));
1744 if (asm_swapops(as
, lref
, rref
)) {
1745 Reg tmp
= lref
; lref
= rref
; rref
= tmp
;
1746 if (cc
>= CC_GE
) cc
^= 7; /* LT <-> GT, LE <-> GE */
1747 else if (cc
> CC_NE
) cc
^= 11; /* LO <-> HI, LS <-> HS */
1749 if (irref_isk(rref
) && IR(rref
)->i
== 0) {
1750 IRIns
*irl
= IR(lref
);
1751 cmpprev0
= (irl
+1 == ir
);
1752 /* Combine comp(BAND(left, right), 0) into tst left, right. */
1753 if (cmpprev0
&& irl
->o
== IR_BAND
&& !ra_used(irl
)) {
1754 IRRef blref
= irl
->op1
, brref
= irl
->op2
;
1757 if (asm_swapops(as
, blref
, brref
)) {
1758 Reg tmp
= blref
; blref
= brref
; brref
= tmp
;
1760 if (irref_isk(brref
)) {
1761 m2
= emit_isk12(ARMI_AND
, IR(brref
)->i
);
1762 if ((m2
& (ARMI_AND
^ARMI_BIC
)))
1763 goto notst
; /* Not beneficial if we miss a constant operand. */
1765 if (cc
== CC_GE
) cc
= CC_PL
;
1766 else if (cc
== CC_LT
) cc
= CC_MI
;
1767 else if (cc
> CC_NE
) goto notst
; /* Other conds don't work with tst. */
1768 bleft
= ra_alloc1(as
, blref
, RSET_GPR
);
1769 if (!m2
) m2
= asm_fuseopm(as
, 0, brref
, rset_exclude(RSET_GPR
, bleft
));
1770 asm_guardcc(as
, cc
);
1771 emit_n(as
, ARMI_TST
^m2
, bleft
);
1776 left
= ra_alloc1(as
, lref
, RSET_GPR
);
1777 m
= asm_fuseopm(as
, ARMI_CMP
, rref
, rset_exclude(RSET_GPR
, left
));
1778 asm_guardcc(as
, cc
);
1779 emit_n(as
, ARMI_CMP
^m
, left
);
1780 /* Signed comparison with zero and referencing previous ins? */
1781 if (cmpprev0
&& (cc
<= CC_NE
|| cc
>= CC_GE
))
1782 as
->flagmcp
= as
->mcp
; /* Allow elimination of the compare. */
1785 static void asm_comp(ASMState
*as
, IRIns
*ir
)
1788 if (irt_isnum(ir
->t
))
1792 asm_intcomp(as
, ir
);
1795 #define asm_equal(as, ir) asm_comp(as, ir)
1798 /* 64 bit integer comparisons. */
1799 static void asm_int64comp(ASMState
*as
, IRIns
*ir
)
1801 int signedcomp
= (ir
->o
<= IR_GT
);
1805 RegSet allow
= RSET_GPR
, oldfree
;
1807 /* Always use unsigned comparison for loword. */
1808 cclo
= asm_compmap
[ir
->o
+ (signedcomp
? 4 : 0)] & 15;
1809 leftlo
= ra_alloc1(as
, ir
->op1
, allow
);
1810 oldfree
= as
->freeset
;
1811 mlo
= asm_fuseopm(as
, ARMI_CMP
, ir
->op2
, rset_clear(allow
, leftlo
));
1812 allow
&= ~(oldfree
& ~as
->freeset
); /* Update for allocs of asm_fuseopm. */
1814 /* Use signed or unsigned comparison for hiword. */
1815 cchi
= asm_compmap
[ir
->o
] & 15;
1816 lefthi
= ra_alloc1(as
, (ir
+1)->op1
, allow
);
1817 mhi
= asm_fuseopm(as
, ARMI_CMP
, (ir
+1)->op2
, rset_clear(allow
, lefthi
));
1819 /* All register allocations must be performed _before_ this point. */
1821 MCLabel l_around
= emit_label(as
);
1822 asm_guardcc(as
, cclo
);
1823 emit_n(as
, ARMI_CMP
^mlo
, leftlo
);
1824 emit_branch(as
, ARMF_CC(ARMI_B
, CC_NE
), l_around
);
1825 if (cchi
== CC_GE
|| cchi
== CC_LE
) cchi
^= 6; /* GE -> GT, LE -> LT */
1826 asm_guardcc(as
, cchi
);
1828 asm_guardcc(as
, cclo
);
1829 emit_n(as
, ARMF_CC(ARMI_CMP
, CC_EQ
)^mlo
, leftlo
);
1831 emit_n(as
, ARMI_CMP
^mhi
, lefthi
);
1835 /* -- Support for 64 bit ops in 32 bit mode ------------------------------- */
1837 /* Hiword op of a split 64 bit op. Previous op must be the loword op. */
1838 static void asm_hiop(ASMState
*as
, IRIns
*ir
)
1840 #if LJ_HASFFI || LJ_SOFTFP
1841 /* HIOP is marked as a store because it needs its own DCE logic. */
1842 int uselo
= ra_used(ir
-1), usehi
= ra_used(ir
); /* Loword/hiword used? */
1843 if (LJ_UNLIKELY(!(as
->flags
& JIT_F_OPT_DCE
))) uselo
= usehi
= 1;
1844 if ((ir
-1)->o
<= IR_NE
) { /* 64 bit integer or FP comparisons. ORDER IR. */
1845 as
->curins
--; /* Always skip the loword comparison. */
1847 if (!irt_isint(ir
->t
)) {
1848 asm_sfpcomp(as
, ir
-1);
1853 asm_int64comp(as
, ir
-1);
1857 } else if ((ir
-1)->o
== IR_MIN
|| (ir
-1)->o
== IR_MAX
) {
1858 as
->curins
--; /* Always skip the loword min/max. */
1860 asm_sfpmin_max(as
, ir
-1, (ir
-1)->o
== IR_MIN
? CC_HI
: CC_LO
);
1863 } else if ((ir
-1)->o
== IR_CONV
) {
1864 as
->curins
--; /* Always skip the CONV. */
1869 } else if ((ir
-1)->o
== IR_XSTORE
) {
1870 if ((ir
-1)->r
!= RID_SINK
)
1871 asm_xstore_(as
, ir
, 4);
1874 if (!usehi
) return; /* Skip unused hiword op for all remaining ops. */
1875 switch ((ir
-1)->o
) {
1879 asm_intop(as
, ir
, ARMI_ADC
);
1880 asm_intop(as
, ir
-1, ARMI_ADD
|ARMI_S
);
1884 asm_intop(as
, ir
, ARMI_SBC
);
1885 asm_intop(as
, ir
-1, ARMI_SUB
|ARMI_S
);
1889 asm_intneg(as
, ir
, ARMI_RSC
);
1890 asm_intneg(as
, ir
-1, ARMI_RSB
|ARMI_S
);
1894 case IR_SLOAD
: case IR_ALOAD
: case IR_HLOAD
: case IR_ULOAD
: case IR_VLOAD
:
1897 ra_allocref(as
, ir
->op1
, RSET_GPR
); /* Mark lo op as used. */
1904 ra_allocref(as
, ir
->op1
, RID2RSET(RID_RETLO
)); /* Mark lo op as used. */
1907 case IR_ASTORE
: case IR_HSTORE
: case IR_USTORE
: case IR_TOSTR
:
1910 /* Nothing to do here. Handled by lo op itself. */
1912 default: lua_assert(0); break;
1915 UNUSED(as
); UNUSED(ir
); lua_assert(0);
1919 /* -- Profiling ----------------------------------------------------------- */
1921 static void asm_prof(ASMState
*as
, IRIns
*ir
)
1924 asm_guardcc(as
, CC_NE
);
1925 emit_n(as
, ARMI_TST
|ARMI_K12
|HOOK_PROFILE
, RID_TMP
);
1926 emit_lsptr(as
, ARMI_LDRB
, RID_TMP
, (void *)&J2G(as
->J
)->hookmask
);
1929 /* -- Stack handling ------------------------------------------------------ */
1931 /* Check Lua stack size for overflow. Use exit handler as fallback. */
1932 static void asm_stack_check(ASMState
*as
, BCReg topslot
,
1933 IRIns
*irp
, RegSet allow
, ExitNo exitno
)
1938 if (!ra_hasspill(irp
->s
)) {
1940 lua_assert(ra_hasreg(pbase
));
1942 pbase
= rset_pickbot(allow
);
1945 emit_lso(as
, ARMI_LDR
, RID_RET
, RID_SP
, 0); /* Restore temp. register. */
1950 emit_branch(as
, ARMF_CC(ARMI_BL
, CC_LS
), exitstub_addr(as
->J
, exitno
));
1951 k
= emit_isk12(0, (int32_t)(8*topslot
));
1953 emit_n(as
, ARMI_CMP
^k
, RID_TMP
);
1954 emit_dnm(as
, ARMI_SUB
, RID_TMP
, RID_TMP
, pbase
);
1955 emit_lso(as
, ARMI_LDR
, RID_TMP
, RID_TMP
,
1956 (int32_t)offsetof(lua_State
, maxstack
));
1957 if (irp
) { /* Must not spill arbitrary registers in head of side trace. */
1958 int32_t i
= i32ptr(&J2G(as
->J
)->cur_L
);
1959 if (ra_hasspill(irp
->s
))
1960 emit_lso(as
, ARMI_LDR
, pbase
, RID_SP
, sps_scale(irp
->s
));
1961 emit_lso(as
, ARMI_LDR
, RID_TMP
, RID_TMP
, (i
& 4095));
1962 if (ra_hasspill(irp
->s
) && !allow
)
1963 emit_lso(as
, ARMI_STR
, RID_RET
, RID_SP
, 0); /* Save temp. register. */
1964 emit_loadi(as
, RID_TMP
, (i
& ~4095));
1966 emit_getgl(as
, RID_TMP
, cur_L
);
1970 /* Restore Lua stack from on-trace state. */
1971 static void asm_stack_restore(ASMState
*as
, SnapShot
*snap
)
1973 SnapEntry
*map
= &as
->T
->snapmap
[snap
->mapofs
];
1974 SnapEntry
*flinks
= &as
->T
->snapmap
[snap_nextofs(as
->T
, snap
)-1];
1975 MSize n
, nent
= snap
->nent
;
1976 /* Store the value of all modified slots to the Lua stack. */
1977 for (n
= 0; n
< nent
; n
++) {
1978 SnapEntry sn
= map
[n
];
1979 BCReg s
= snap_slot(sn
);
1980 int32_t ofs
= 8*((int32_t)s
-1);
1981 IRRef ref
= snap_ref(sn
);
1982 IRIns
*ir
= IR(ref
);
1983 if ((sn
& SNAP_NORESTORE
))
1985 if (irt_isnum(ir
->t
)) {
1987 RegSet odd
= rset_exclude(RSET_GPRODD
, RID_BASE
);
1989 lua_assert(irref_isk(ref
)); /* LJ_SOFTFP: must be a number constant. */
1990 tmp
= ra_allock(as
, (int32_t)ir_knum(ir
)->u32
.lo
,
1991 rset_exclude(RSET_GPREVEN
, RID_BASE
));
1992 emit_lso(as
, ARMI_STR
, tmp
, RID_BASE
, ofs
);
1993 if (rset_test(as
->freeset
, tmp
+1)) odd
= RID2RSET(tmp
+1);
1994 tmp
= ra_allock(as
, (int32_t)ir_knum(ir
)->u32
.hi
, odd
);
1995 emit_lso(as
, ARMI_STR
, tmp
, RID_BASE
, ofs
+4);
1997 Reg src
= ra_alloc1(as
, ref
, RSET_FPR
);
1998 emit_vlso(as
, ARMI_VSTR_D
, src
, RID_BASE
, ofs
);
2001 RegSet odd
= rset_exclude(RSET_GPRODD
, RID_BASE
);
2003 lua_assert(irt_ispri(ir
->t
) || irt_isaddr(ir
->t
) || irt_isinteger(ir
->t
));
2004 if (!irt_ispri(ir
->t
)) {
2005 Reg src
= ra_alloc1(as
, ref
, rset_exclude(RSET_GPREVEN
, RID_BASE
));
2006 emit_lso(as
, ARMI_STR
, src
, RID_BASE
, ofs
);
2007 if (rset_test(as
->freeset
, src
+1)) odd
= RID2RSET(src
+1);
2009 if ((sn
& (SNAP_CONT
|SNAP_FRAME
))) {
2010 if (s
== 0) continue; /* Do not overwrite link to previous frame. */
2011 type
= ra_allock(as
, (int32_t)(*flinks
--), odd
);
2013 } else if ((sn
& SNAP_SOFTFPNUM
)) {
2014 type
= ra_alloc1(as
, ref
+1, rset_exclude(RSET_GPRODD
, RID_BASE
));
2017 type
= ra_allock(as
, (int32_t)irt_toitype(ir
->t
), odd
);
2019 emit_lso(as
, ARMI_STR
, type
, RID_BASE
, ofs
+4);
2023 lua_assert(map
+ nent
== flinks
);
2026 /* -- GC handling --------------------------------------------------------- */
2028 /* Check GC threshold and do one or more GC steps. */
2029 static void asm_gc_check(ASMState
*as
)
2031 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_gc_step_jit
];
2035 ra_evictset(as
, RSET_SCRATCH
);
2036 l_end
= emit_label(as
);
2037 /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
2038 asm_guardcc(as
, CC_NE
); /* Assumes asm_snap_prep() already done. */
2039 emit_n(as
, ARMI_CMP
|ARMI_K12
|0, RID_RET
);
2040 args
[0] = ASMREF_TMP1
; /* global_State *g */
2041 args
[1] = ASMREF_TMP2
; /* MSize steps */
2042 asm_gencall(as
, ci
, args
);
2043 tmp1
= ra_releasetmp(as
, ASMREF_TMP1
);
2044 tmp2
= ra_releasetmp(as
, ASMREF_TMP2
);
2045 emit_loadi(as
, tmp2
, as
->gcsteps
);
2046 /* Jump around GC step if GC total < GC threshold. */
2047 emit_branch(as
, ARMF_CC(ARMI_B
, CC_LS
), l_end
);
2048 emit_nm(as
, ARMI_CMP
, RID_TMP
, tmp2
);
2049 emit_lso(as
, ARMI_LDR
, tmp2
, tmp1
,
2050 (int32_t)offsetof(global_State
, gc
.threshold
));
2051 emit_lso(as
, ARMI_LDR
, RID_TMP
, tmp1
,
2052 (int32_t)offsetof(global_State
, gc
.total
));
2053 ra_allockreg(as
, i32ptr(J2G(as
->J
)), tmp1
);
2058 /* -- Loop handling ------------------------------------------------------- */
2060 /* Fixup the loop branch. */
2061 static void asm_loop_fixup(ASMState
*as
)
2063 MCode
*p
= as
->mctop
;
2064 MCode
*target
= as
->mcp
;
2065 if (as
->loopinv
) { /* Inverted loop branch? */
2066 /* asm_guardcc already inverted the bcc and patched the final bl. */
2067 p
[-2] |= ((uint32_t)(target
-p
) & 0x00ffffffu
);
2069 p
[-1] = ARMI_B
| ((uint32_t)((target
-p
)-1) & 0x00ffffffu
);
2073 /* -- Head of trace ------------------------------------------------------- */
2075 /* Reload L register from g->cur_L. */
2076 static void asm_head_lreg(ASMState
*as
)
2078 IRIns
*ir
= IR(ASMREF_L
);
2080 Reg r
= ra_dest(as
, ir
, RSET_GPR
);
2081 emit_getgl(as
, r
, cur_L
);
2086 /* Coalesce BASE register for a root trace. */
2087 static void asm_head_root_base(ASMState
*as
)
2092 if (ra_hasreg(ir
->r
) && (rset_test(as
->modset
, ir
->r
) || irt_ismarked(ir
->t
)))
2094 ra_destreg(as
, ir
, RID_BASE
);
2097 /* Coalesce BASE register for a side trace. */
2098 static RegSet
asm_head_side_base(ASMState
*as
, IRIns
*irp
, RegSet allow
)
2103 if (ra_hasreg(ir
->r
) && (rset_test(as
->modset
, ir
->r
) || irt_ismarked(ir
->t
)))
2105 if (ra_hasspill(irp
->s
)) {
2106 rset_clear(allow
, ra_dest(as
, ir
, allow
));
2109 lua_assert(ra_hasreg(r
));
2110 rset_clear(allow
, r
);
2111 if (r
!= ir
->r
&& !rset_test(as
->freeset
, r
))
2112 ra_restore(as
, regcost_ref(as
->cost
[r
]));
2113 ra_destreg(as
, ir
, r
);
2118 /* -- Tail of trace ------------------------------------------------------- */
2120 /* Fixup the tail code. */
2121 static void asm_tail_fixup(ASMState
*as
, TraceNo lnk
)
2123 MCode
*p
= as
->mctop
;
2125 int32_t spadj
= as
->T
->spadjust
;
2129 /* Patch stack adjustment. */
2130 uint32_t k
= emit_isk12(ARMI_ADD
, spadj
);
2132 p
[-2] = (ARMI_ADD
^k
) | ARMF_D(RID_SP
) | ARMF_N(RID_SP
);
2134 /* Patch exit branch. */
2135 target
= lnk
? traceref(as
->J
, lnk
)->mcode
: (MCode
*)lj_vm_exit_interp
;
2136 p
[-1] = ARMI_B
|(((target
-p
)-1)&0x00ffffffu
);
2139 /* Prepare tail of code. */
2140 static void asm_tail_prep(ASMState
*as
)
2142 MCode
*p
= as
->mctop
- 1; /* Leave room for exit branch. */
2144 as
->invmcp
= as
->mcp
= p
;
2146 as
->mcp
= p
-1; /* Leave room for stack pointer adjustment. */
2149 *p
= 0; /* Prevent load/store merging. */
2152 /* -- Trace setup --------------------------------------------------------- */
2154 /* Ensure there are enough stack slots for call arguments. */
2155 static Reg
asm_setup_call_slots(ASMState
*as
, IRIns
*ir
, const CCallInfo
*ci
)
2157 IRRef args
[CCI_NARGS_MAX
*2];
2158 uint32_t i
, nargs
= CCI_XNARGS(ci
);
2159 int nslots
= 0, ngpr
= REGARG_NUMGPR
, nfpr
= REGARG_NUMFPR
, fprodd
= 0;
2160 asm_collectargs(as
, ir
, ci
, args
);
2161 for (i
= 0; i
< nargs
; i
++) {
2162 if (!LJ_SOFTFP
&& args
[i
] && irt_isfp(IR(args
[i
])->t
)) {
2163 if (!LJ_ABI_SOFTFP
&& !(ci
->flags
& CCI_VARARG
)) {
2164 if (irt_isnum(IR(args
[i
])->t
)) {
2165 if (nfpr
> 0) nfpr
--;
2166 else fprodd
= 0, nslots
= (nslots
+ 3) & ~1;
2168 if (fprodd
) fprodd
--;
2169 else if (nfpr
> 0) fprodd
= 1, nfpr
--;
2172 } else if (irt_isnum(IR(args
[i
])->t
)) {
2174 if (ngpr
> 0) ngpr
-= 2; else nslots
+= 2;
2176 if (ngpr
> 0) ngpr
--; else nslots
++;
2179 if (ngpr
> 0) ngpr
--; else nslots
++;
2182 if (nslots
> as
->evenspill
) /* Leave room for args in stack slots. */
2183 as
->evenspill
= nslots
;
2184 return REGSP_HINT(RID_RET
);
2187 static void asm_setup_target(ASMState
*as
)
2189 /* May need extra exit for asm_stack_check on side traces. */
2190 asm_exitstub_setup(as
, as
->T
->nsnap
+ (as
->parent
? 1 : 0));
2193 /* -- Trace patching ------------------------------------------------------ */
2195 /* Patch exit jumps of existing machine code to a new target. */
2196 void lj_asm_patchexit(jit_State
*J
, GCtrace
*T
, ExitNo exitno
, MCode
*target
)
2198 MCode
*p
= T
->mcode
;
2199 MCode
*pe
= (MCode
*)((char *)p
+ T
->szmcode
);
2200 MCode
*cstart
= NULL
, *cend
= p
;
2201 MCode
*mcarea
= lj_mcode_patch(J
, p
, 0);
2202 MCode
*px
= exitstub_addr(J
, exitno
) - 2;
2203 for (; p
< pe
; p
++) {
2204 /* Look for bl_cc exitstub, replace with b_cc target. */
2206 if ((ins
& 0x0f000000u
) == 0x0b000000u
&& ins
< 0xf0000000u
&&
2207 ((ins
^ (px
-p
)) & 0x00ffffffu
) == 0) {
2208 *p
= (ins
& 0xfe000000u
) | (((target
-p
)-2) & 0x00ffffffu
);
2210 if (!cstart
) cstart
= p
;
2213 lua_assert(cstart
!= NULL
);
2214 lj_mcode_sync(cstart
, cend
);
2215 lj_mcode_patch(J
, mcarea
, 1);