2 ** ARM IR assembler (SSA IR -> machine code).
3 ** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
6 /* -- Register allocator extensions --------------------------------------- */
8 /* Allocate a register with a hint. */
9 static Reg
ra_hintalloc(ASMState
*as
, IRRef ref
, Reg hint
, RegSet allow
)
13 if (!ra_hashint(r
) && !iscrossref(as
, ref
))
14 ra_sethint(IR(ref
)->r
, hint
); /* Propagate register hint. */
15 r
= ra_allocref(as
, ref
, allow
);
21 /* Allocate a scratch register pair. */
22 static Reg
ra_scratchpair(ASMState
*as
, RegSet allow
)
24 RegSet pick1
= as
->freeset
& allow
;
25 RegSet pick2
= pick1
& (pick1
>> 1) & RSET_GPREVEN
;
28 r
= rset_picktop(pick2
);
30 RegSet pick
= pick1
& (allow
>> 1) & RSET_GPREVEN
;
32 r
= rset_picktop(pick
);
33 ra_restore(as
, regcost_ref(as
->cost
[r
+1]));
35 pick
= pick1
& (allow
<< 1) & RSET_GPRODD
;
37 r
= ra_restore(as
, regcost_ref(as
->cost
[rset_picktop(pick
)-1]));
39 r
= ra_evict(as
, allow
& (allow
>> 1) & RSET_GPREVEN
);
40 ra_restore(as
, regcost_ref(as
->cost
[r
+1]));
44 lua_assert(rset_test(RSET_GPREVEN
, r
));
47 RA_DBGX((as
, "scratchpair $r $r", r
, r
+1));
52 /* Allocate two source registers for three-operand instructions. */
53 static Reg
ra_alloc2(ASMState
*as
, IRIns
*ir
, RegSet allow
)
55 IRIns
*irl
= IR(ir
->op1
), *irr
= IR(ir
->op2
);
56 Reg left
= irl
->r
, right
= irr
->r
;
57 if (ra_hasreg(left
)) {
60 right
= ra_allocref(as
, ir
->op2
, rset_exclude(allow
, left
));
63 } else if (ra_hasreg(right
)) {
65 left
= ra_allocref(as
, ir
->op1
, rset_exclude(allow
, right
));
66 } else if (ra_hashint(right
)) {
67 right
= ra_allocref(as
, ir
->op2
, allow
);
68 left
= ra_alloc1(as
, ir
->op1
, rset_exclude(allow
, right
));
70 left
= ra_allocref(as
, ir
->op1
, allow
);
71 right
= ra_alloc1(as
, ir
->op2
, rset_exclude(allow
, left
));
73 return left
| (right
<< 8);
77 /* -- Guard handling ------------------------------------------------------ */
79 /* Generate an exit stub group at the bottom of the reserved MCode memory. */
80 static MCode
*asm_exitstub_gen(ASMState
*as
, ExitNo group
)
82 MCode
*mxp
= as
->mcbot
;
84 if (mxp
+ 4*4+4*EXITSTUBS_PER_GROUP
>= as
->mctop
)
86 /* str lr, [sp]; bl ->vm_exit_handler; .long DISPATCH_address, group. */
87 *mxp
++ = ARMI_STR
|ARMI_LS_P
|ARMI_LS_U
|ARMF_D(RID_LR
)|ARMF_N(RID_SP
);
88 *mxp
= ARMI_BL
|((((MCode
*)(void *)lj_vm_exit_handler
-mxp
)-2)&0x00ffffffu
);
90 *mxp
++ = (MCode
)i32ptr(J2GG(as
->J
)->dispatch
); /* DISPATCH address */
91 *mxp
++ = group
*EXITSTUBS_PER_GROUP
;
92 for (i
= 0; i
< EXITSTUBS_PER_GROUP
; i
++)
93 *mxp
++ = ARMI_B
|((-6-i
)&0x00ffffffu
);
94 lj_mcode_commitbot(as
->J
, mxp
);
96 as
->mclim
= as
->mcbot
+ MCLIM_REDZONE
;
97 return mxp
- EXITSTUBS_PER_GROUP
;
100 /* Setup all needed exit stubs. */
101 static void asm_exitstub_setup(ASMState
*as
, ExitNo nexits
)
104 if (nexits
>= EXITSTUBS_PER_GROUP
*LJ_MAX_EXITSTUBGR
)
105 lj_trace_err(as
->J
, LJ_TRERR_SNAPOV
);
106 for (i
= 0; i
< (nexits
+EXITSTUBS_PER_GROUP
-1)/EXITSTUBS_PER_GROUP
; i
++)
107 if (as
->J
->exitstubgroup
[i
] == NULL
)
108 as
->J
->exitstubgroup
[i
] = asm_exitstub_gen(as
, i
);
111 /* Emit conditional branch to exit for guard. */
112 static void asm_guardcc(ASMState
*as
, ARMCC cc
)
114 MCode
*target
= exitstub_addr(as
->J
, as
->snapno
);
116 if (LJ_UNLIKELY(p
== as
->invmcp
)) {
118 *p
= ARMI_BL
| ((target
-p
-2) & 0x00ffffffu
);
119 emit_branch(as
, ARMF_CC(ARMI_B
, cc
^1), p
+1);
122 emit_branch(as
, ARMF_CC(ARMI_BL
, cc
), target
);
125 /* -- Operand fusion ------------------------------------------------------ */
127 /* Limit linear search to this distance. Avoids O(n^2) behavior. */
128 #define CONFLICT_SEARCH_LIM 31
130 /* Check if there's no conflicting instruction between curins and ref. */
131 static int noconflict(ASMState
*as
, IRRef ref
, IROp conflict
)
134 IRRef i
= as
->curins
;
135 if (i
> ref
+ CONFLICT_SEARCH_LIM
)
136 return 0; /* Give up, ref is too far away. */
138 if (ir
[i
].o
== conflict
)
139 return 0; /* Conflict found. */
140 return 1; /* Ok, no conflict. */
143 /* Fuse the array base of colocated arrays. */
144 static int32_t asm_fuseabase(ASMState
*as
, IRRef ref
)
147 if (ir
->o
== IR_TNEW
&& ir
->op1
<= LJ_MAX_COLOSIZE
&&
148 !neverfuse(as
) && noconflict(as
, ref
, IR_NEWREF
))
149 return (int32_t)sizeof(GCtab
);
153 /* Fuse array/hash/upvalue reference into register+offset operand. */
154 static Reg
asm_fuseahuref(ASMState
*as
, IRRef ref
, int32_t *ofsp
, RegSet allow
,
158 if (ra_noreg(ir
->r
)) {
159 if (ir
->o
== IR_AREF
) {
160 if (mayfuse(as
, ref
)) {
161 if (irref_isk(ir
->op2
)) {
162 IRRef tab
= IR(ir
->op1
)->op1
;
163 int32_t ofs
= asm_fuseabase(as
, tab
);
164 IRRef refa
= ofs
? tab
: ir
->op1
;
165 ofs
+= 8*IR(ir
->op2
)->i
;
166 if (ofs
> -lim
&& ofs
< lim
) {
168 return ra_alloc1(as
, refa
, allow
);
172 } else if (ir
->o
== IR_HREFK
) {
173 if (mayfuse(as
, ref
)) {
174 int32_t ofs
= (int32_t)(IR(ir
->op2
)->op2
* sizeof(Node
));
177 return ra_alloc1(as
, ir
->op1
, allow
);
180 } else if (ir
->o
== IR_UREFC
) {
181 if (irref_isk(ir
->op1
)) {
182 GCfunc
*fn
= ir_kfunc(IR(ir
->op1
));
183 int32_t ofs
= i32ptr(&gcref(fn
->l
.uvptr
[(ir
->op2
>> 8)])->uv
.tv
);
184 *ofsp
= (ofs
& 255); /* Mask out less bits to allow LDRD. */
185 return ra_allock(as
, (ofs
& ~255), allow
);
190 return ra_alloc1(as
, ref
, allow
);
193 /* Fuse m operand into arithmetic/logic instructions. */
194 static uint32_t asm_fuseopm(ASMState
*as
, ARMIns ai
, IRRef ref
, RegSet allow
)
197 if (ra_hasreg(ir
->r
)) {
198 ra_noweak(as
, ir
->r
);
199 return ARMF_M(ir
->r
);
200 } else if (irref_isk(ref
)) {
201 uint32_t k
= emit_isk12(ai
, ir
->i
);
204 } else if (mayfuse(as
, ref
)) {
205 if (ir
->o
>= IR_BSHL
&& ir
->o
<= IR_BROR
) {
206 Reg m
= ra_alloc1(as
, ir
->op1
, allow
);
207 ARMShift sh
= ir
->o
== IR_BSHL
? ARMSH_LSL
:
208 ir
->o
== IR_BSHR
? ARMSH_LSR
:
209 ir
->o
== IR_BSAR
? ARMSH_ASR
: ARMSH_ROR
;
210 if (irref_isk(ir
->op2
)) {
211 return m
| ARMF_SH(sh
, (IR(ir
->op2
)->i
& 31));
213 Reg s
= ra_alloc1(as
, ir
->op2
, rset_exclude(allow
, m
));
214 return m
| ARMF_RSH(sh
, s
);
216 } else if (ir
->o
== IR_ADD
&& ir
->op1
== ir
->op2
) {
217 Reg m
= ra_alloc1(as
, ir
->op1
, allow
);
218 return m
| ARMF_SH(ARMSH_LSL
, 1);
221 return ra_allocref(as
, ref
, allow
);
224 /* Fuse shifts into loads/stores. Only bother with BSHL 2 => lsl #2. */
225 static IRRef
asm_fuselsl2(ASMState
*as
, IRRef ref
)
228 if (ra_noreg(ir
->r
) && mayfuse(as
, ref
) && ir
->o
== IR_BSHL
&&
229 irref_isk(ir
->op2
) && IR(ir
->op2
)->i
== 2)
231 return 0; /* No fusion. */
234 /* Fuse XLOAD/XSTORE reference into load/store operand. */
235 static void asm_fusexref(ASMState
*as
, ARMIns ai
, Reg rd
, IRRef ref
,
236 RegSet allow
, int32_t ofs
)
240 if (ra_noreg(ir
->r
) && canfuse(as
, ir
)) {
241 int32_t lim
= (!LJ_SOFTFP
&& (ai
& 0x08000000)) ? 1024 :
242 (ai
& 0x04000000) ? 4096 : 256;
243 if (ir
->o
== IR_ADD
) {
245 if (irref_isk(ir
->op2
) &&
246 (ofs2
= ofs
+ IR(ir
->op2
)->i
) > -lim
&& ofs2
< lim
&&
247 (!(!LJ_SOFTFP
&& (ai
& 0x08000000)) || !(ofs2
& 3))) {
250 } else if (ofs
== 0 && !(!LJ_SOFTFP
&& (ai
& 0x08000000))) {
251 IRRef lref
= ir
->op1
, rref
= ir
->op2
;
253 if ((ai
& 0x04000000)) {
254 IRRef sref
= asm_fuselsl2(as
, rref
);
257 ai
|= ARMF_SH(ARMSH_LSL
, 2);
258 } else if ((sref
= asm_fuselsl2(as
, lref
)) != 0) {
261 ai
|= ARMF_SH(ARMSH_LSL
, 2);
264 rn
= ra_alloc1(as
, lref
, allow
);
265 rm
= ra_alloc1(as
, rref
, rset_exclude(allow
, rn
));
266 if ((ai
& 0x04000000)) ai
|= ARMI_LS_R
;
267 emit_dnm(as
, ai
|ARMI_LS_P
|ARMI_LS_U
, rd
, rn
, rm
);
270 } else if (ir
->o
== IR_STRREF
&& !(!LJ_SOFTFP
&& (ai
& 0x08000000))) {
271 lua_assert(ofs
== 0);
272 ofs
= (int32_t)sizeof(GCstr
);
273 if (irref_isk(ir
->op2
)) {
274 ofs
+= IR(ir
->op2
)->i
;
276 } else if (irref_isk(ir
->op1
)) {
277 ofs
+= IR(ir
->op1
)->i
;
280 /* NYI: Fuse ADD with constant. */
281 Reg rn
= ra_alloc1(as
, ir
->op1
, allow
);
282 uint32_t m
= asm_fuseopm(as
, 0, ir
->op2
, rset_exclude(allow
, rn
));
283 if ((ai
& 0x04000000))
284 emit_lso(as
, ai
, rd
, rd
, ofs
);
286 emit_lsox(as
, ai
, rd
, rd
, ofs
);
287 emit_dn(as
, ARMI_ADD
^m
, rd
, rn
);
290 if (ofs
<= -lim
|| ofs
>= lim
) {
291 Reg rn
= ra_alloc1(as
, ref
, allow
);
292 Reg rm
= ra_allock(as
, ofs
, rset_exclude(allow
, rn
));
293 if ((ai
& 0x04000000)) ai
|= ARMI_LS_R
;
294 emit_dnm(as
, ai
|ARMI_LS_P
|ARMI_LS_U
, rd
, rn
, rm
);
299 base
= ra_alloc1(as
, ref
, allow
);
301 if ((ai
& 0x08000000))
302 emit_vlso(as
, ai
, rd
, base
, ofs
);
305 if ((ai
& 0x04000000))
306 emit_lso(as
, ai
, rd
, base
, ofs
);
308 emit_lsox(as
, ai
, rd
, base
, ofs
);
312 /* Fuse to multiply-add/sub instruction. */
313 static int asm_fusemadd(ASMState
*as
, IRIns
*ir
, ARMIns ai
, ARMIns air
)
315 IRRef lref
= ir
->op1
, rref
= ir
->op2
;
318 ((mayfuse(as
, lref
) && (irm
= IR(lref
), irm
->o
== IR_MUL
) &&
320 (mayfuse(as
, rref
) && (irm
= IR(rref
), irm
->o
== IR_MUL
) &&
321 (rref
= lref
, ai
= air
, ra_noreg(irm
->r
))))) {
322 Reg dest
= ra_dest(as
, ir
, RSET_FPR
);
323 Reg add
= ra_hintalloc(as
, rref
, dest
, RSET_FPR
);
324 Reg right
, left
= ra_alloc2(as
, irm
,
325 rset_exclude(rset_exclude(RSET_FPR
, dest
), add
));
326 right
= (left
>> 8); left
&= 255;
327 emit_dnm(as
, ai
, (dest
& 15), (left
& 15), (right
& 15));
328 if (dest
!= add
) emit_dm(as
, ARMI_VMOV_D
, (dest
& 15), (add
& 15));
335 /* -- Calls --------------------------------------------------------------- */
337 /* Generate a call to a C function. */
338 static void asm_gencall(ASMState
*as
, const CCallInfo
*ci
, IRRef
*args
)
340 uint32_t n
, nargs
= CCI_NARGS(ci
);
343 Reg gpr
= REGARG_FIRSTGPR
;
345 Reg gpr
, fpr
= REGARG_FIRSTFPR
, fprodd
= 0;
347 if ((void *)ci
->func
)
348 emit_call(as
, (void *)ci
->func
);
350 for (gpr
= REGARG_FIRSTGPR
; gpr
<= REGARG_LASTGPR
; gpr
++)
351 as
->cost
[gpr
] = REGCOST(~0u, 0u);
352 gpr
= REGARG_FIRSTGPR
;
354 for (n
= 0; n
< nargs
; n
++) { /* Setup args. */
358 if (irt_isfp(ir
->t
)) {
359 RegSet of
= as
->freeset
;
361 if (!LJ_ABI_SOFTFP
&& !(ci
->flags
& CCI_VARARG
)) {
362 if (irt_isnum(ir
->t
)) {
363 if (fpr
<= REGARG_LASTFPR
) {
364 ra_leftov(as
, fpr
, ref
);
368 } else if (fprodd
) { /* Ick. */
369 src
= ra_alloc1(as
, ref
, RSET_FPR
);
370 emit_dm(as
, ARMI_VMOV_S
, (fprodd
& 15), (src
& 15) | 0x00400000);
373 } else if (fpr
<= REGARG_LASTFPR
) {
374 ra_leftov(as
, fpr
, ref
);
378 /* Workaround to protect argument GPRs from being used for remat. */
379 as
->freeset
&= ~RSET_RANGE(REGARG_FIRSTGPR
, REGARG_LASTGPR
+1);
380 src
= ra_alloc1(as
, ref
, RSET_FPR
); /* May alloc GPR to remat FPR. */
381 as
->freeset
|= (of
& RSET_RANGE(REGARG_FIRSTGPR
, REGARG_LASTGPR
+1));
385 /* Workaround to protect argument GPRs from being used for remat. */
386 as
->freeset
&= ~RSET_RANGE(REGARG_FIRSTGPR
, REGARG_LASTGPR
+1);
387 src
= ra_alloc1(as
, ref
, RSET_FPR
); /* May alloc GPR to remat FPR. */
388 as
->freeset
|= (of
& RSET_RANGE(REGARG_FIRSTGPR
, REGARG_LASTGPR
+1));
389 if (irt_isnum(ir
->t
)) gpr
= (gpr
+1) & ~1u;
390 if (gpr
<= REGARG_LASTGPR
) {
391 lua_assert(rset_test(as
->freeset
, gpr
)); /* Must have been evicted. */
392 if (irt_isnum(ir
->t
)) {
393 lua_assert(rset_test(as
->freeset
, gpr
+1)); /* Ditto. */
394 emit_dnm(as
, ARMI_VMOV_RR_D
, gpr
, gpr
+1, (src
& 15));
397 emit_dn(as
, ARMI_VMOV_R_S
, gpr
, (src
& 15));
402 if (irt_isnum(ir
->t
)) ofs
= (ofs
+ 4) & ~4;
403 emit_spstore(as
, ir
, src
, ofs
);
404 ofs
+= irt_isnum(ir
->t
) ? 8 : 4;
409 if (gpr
<= REGARG_LASTGPR
) {
410 lua_assert(rset_test(as
->freeset
, gpr
)); /* Must have been evicted. */
411 if (ref
) ra_leftov(as
, gpr
, ref
);
415 Reg r
= ra_alloc1(as
, ref
, RSET_GPR
);
416 emit_spstore(as
, ir
, r
, ofs
);
424 /* Setup result reg/sp for call. Evict scratch regs. */
425 static void asm_setupresult(ASMState
*as
, IRIns
*ir
, const CCallInfo
*ci
)
427 RegSet drop
= RSET_SCRATCH
;
428 int hiop
= ((ir
+1)->o
== IR_HIOP
);
429 if (ra_hasreg(ir
->r
))
430 rset_clear(drop
, ir
->r
); /* Dest reg handled below. */
431 if (hiop
&& ra_hasreg((ir
+1)->r
))
432 rset_clear(drop
, (ir
+1)->r
); /* Dest reg handled below. */
433 ra_evictset(as
, drop
); /* Evictions must be performed first. */
435 lua_assert(!irt_ispri(ir
->t
));
436 if (!LJ_SOFTFP
&& irt_isfp(ir
->t
)) {
437 if (LJ_ABI_SOFTFP
|| (ci
->flags
& (CCI_CASTU64
|CCI_VARARG
))) {
438 Reg dest
= (ra_dest(as
, ir
, RSET_FPR
) & 15);
439 if (irt_isnum(ir
->t
))
440 emit_dnm(as
, ARMI_VMOV_D_RR
, RID_RETLO
, RID_RETHI
, dest
);
442 emit_dn(as
, ARMI_VMOV_S_R
, RID_RET
, dest
);
444 ra_destreg(as
, ir
, RID_FPRET
);
449 ra_destreg(as
, ir
, RID_RET
);
455 static void asm_call(ASMState
*as
, IRIns
*ir
)
457 IRRef args
[CCI_NARGS_MAX
];
458 const CCallInfo
*ci
= &lj_ir_callinfo
[ir
->op2
];
459 asm_collectargs(as
, ir
, ci
, args
);
460 asm_setupresult(as
, ir
, ci
);
461 asm_gencall(as
, ci
, args
);
464 static void asm_callx(ASMState
*as
, IRIns
*ir
)
466 IRRef args
[CCI_NARGS_MAX
];
470 ci
.flags
= asm_callx_flags(as
, ir
);
471 asm_collectargs(as
, ir
, &ci
, args
);
472 asm_setupresult(as
, ir
, &ci
);
473 func
= ir
->op2
; irf
= IR(func
);
474 if (irf
->o
== IR_CARG
) { func
= irf
->op1
; irf
= IR(func
); }
475 if (irref_isk(func
)) { /* Call to constant address. */
476 ci
.func
= (ASMFunction
)(void *)(irf
->i
);
477 } else { /* Need a non-argument register for indirect calls. */
478 Reg freg
= ra_alloc1(as
, func
, RSET_RANGE(RID_R4
, RID_R12
+1));
479 emit_m(as
, ARMI_BLXr
, freg
);
480 ci
.func
= (ASMFunction
)(void *)0;
482 asm_gencall(as
, &ci
, args
);
485 /* -- Returns ------------------------------------------------------------- */
487 /* Return to lower frame. Guard that it goes to the right spot. */
488 static void asm_retf(ASMState
*as
, IRIns
*ir
)
490 Reg base
= ra_alloc1(as
, REF_BASE
, RSET_GPR
);
491 void *pc
= ir_kptr(IR(ir
->op2
));
492 int32_t delta
= 1+bc_a(*((const BCIns
*)pc
- 1));
493 as
->topslot
-= (BCReg
)delta
;
494 if ((int32_t)as
->topslot
< 0) as
->topslot
= 0;
495 /* Need to force a spill on REF_BASE now to update the stack slot. */
496 emit_lso(as
, ARMI_STR
, base
, RID_SP
, ra_spill(as
, IR(REF_BASE
)));
497 emit_setgl(as
, base
, jit_base
);
498 emit_addptr(as
, base
, -8*delta
);
499 asm_guardcc(as
, CC_NE
);
500 emit_nm(as
, ARMI_CMP
, RID_TMP
,
501 ra_allock(as
, i32ptr(pc
), rset_exclude(RSET_GPR
, base
)));
502 emit_lso(as
, ARMI_LDR
, RID_TMP
, base
, -4);
505 /* -- Type conversions ---------------------------------------------------- */
508 static void asm_tointg(ASMState
*as
, IRIns
*ir
, Reg left
)
510 Reg tmp
= ra_scratch(as
, rset_exclude(RSET_FPR
, left
));
511 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
512 asm_guardcc(as
, CC_NE
);
513 emit_d(as
, ARMI_VMRS
, 0);
514 emit_dm(as
, ARMI_VCMP_D
, (tmp
& 15), (left
& 15));
515 emit_dm(as
, ARMI_VCVT_F64_S32
, (tmp
& 15), (tmp
& 15));
516 emit_dn(as
, ARMI_VMOV_R_S
, dest
, (tmp
& 15));
517 emit_dm(as
, ARMI_VCVT_S32_F64
, (tmp
& 15), (left
& 15));
520 static void asm_tobit(ASMState
*as
, IRIns
*ir
)
522 RegSet allow
= RSET_FPR
;
523 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
524 Reg left
= ra_alloc1(as
, ir
->op1
, allow
);
525 Reg right
= ra_alloc1(as
, ir
->op2
, rset_clear(allow
, left
));
526 Reg tmp
= ra_scratch(as
, rset_clear(allow
, right
));
527 emit_dn(as
, ARMI_VMOV_R_S
, dest
, (tmp
& 15));
528 emit_dnm(as
, ARMI_VADD_D
, (tmp
& 15), (left
& 15), (right
& 15));
532 static void asm_conv(ASMState
*as
, IRIns
*ir
)
534 IRType st
= (IRType
)(ir
->op2
& IRCONV_SRCMASK
);
536 int stfp
= (st
== IRT_NUM
|| st
== IRT_FLOAT
);
538 IRRef lref
= ir
->op1
;
539 /* 64 bit integer conversions are handled by SPLIT. */
540 lua_assert(!irt_isint64(ir
->t
) && !(st
== IRT_I64
|| st
== IRT_U64
));
542 /* FP conversions are handled by SPLIT. */
543 lua_assert(!irt_isfp(ir
->t
) && !(st
== IRT_NUM
|| st
== IRT_FLOAT
));
544 /* Can't check for same types: SPLIT uses CONV int.int + BXOR for sfp NEG. */
546 lua_assert(irt_type(ir
->t
) != st
);
547 if (irt_isfp(ir
->t
)) {
548 Reg dest
= ra_dest(as
, ir
, RSET_FPR
);
549 if (stfp
) { /* FP to FP conversion. */
550 emit_dm(as
, st
== IRT_NUM
? ARMI_VCVT_F32_F64
: ARMI_VCVT_F64_F32
,
551 (dest
& 15), (ra_alloc1(as
, lref
, RSET_FPR
) & 15));
552 } else { /* Integer to FP conversion. */
553 Reg left
= ra_alloc1(as
, lref
, RSET_GPR
);
554 ARMIns ai
= irt_isfloat(ir
->t
) ?
555 (st
== IRT_INT
? ARMI_VCVT_F32_S32
: ARMI_VCVT_F32_U32
) :
556 (st
== IRT_INT
? ARMI_VCVT_F64_S32
: ARMI_VCVT_F64_U32
);
557 emit_dm(as
, ai
, (dest
& 15), (dest
& 15));
558 emit_dn(as
, ARMI_VMOV_S_R
, left
, (dest
& 15));
560 } else if (stfp
) { /* FP to integer conversion. */
561 if (irt_isguard(ir
->t
)) {
562 /* Checked conversions are only supported from number to int. */
563 lua_assert(irt_isint(ir
->t
) && st
== IRT_NUM
);
564 asm_tointg(as
, ir
, ra_alloc1(as
, lref
, RSET_FPR
));
566 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
567 Reg left
= ra_alloc1(as
, lref
, RSET_FPR
);
568 Reg tmp
= ra_scratch(as
, rset_exclude(RSET_FPR
, left
));
570 emit_dn(as
, ARMI_VMOV_R_S
, dest
, (tmp
& 15));
571 ai
= irt_isint(ir
->t
) ?
572 (st
== IRT_NUM
? ARMI_VCVT_S32_F64
: ARMI_VCVT_S32_F32
) :
573 (st
== IRT_NUM
? ARMI_VCVT_U32_F64
: ARMI_VCVT_U32_F32
);
574 emit_dm(as
, ai
, (tmp
& 15), (left
& 15));
579 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
580 if (st
>= IRT_I8
&& st
<= IRT_U16
) { /* Extend to 32 bit integer. */
581 Reg left
= ra_alloc1(as
, lref
, RSET_GPR
);
582 lua_assert(irt_isint(ir
->t
) || irt_isu32(ir
->t
));
583 if ((as
->flags
& JIT_F_ARMV6
)) {
584 ARMIns ai
= st
== IRT_I8
? ARMI_SXTB
:
585 st
== IRT_U8
? ARMI_UXTB
:
586 st
== IRT_I16
? ARMI_SXTH
: ARMI_UXTH
;
587 emit_dm(as
, ai
, dest
, left
);
588 } else if (st
== IRT_U8
) {
589 emit_dn(as
, ARMI_AND
|ARMI_K12
|255, dest
, left
);
591 uint32_t shift
= st
== IRT_I8
? 24 : 16;
592 ARMShift sh
= st
== IRT_U16
? ARMSH_LSR
: ARMSH_ASR
;
593 emit_dm(as
, ARMI_MOV
|ARMF_SH(sh
, shift
), dest
, RID_TMP
);
594 emit_dm(as
, ARMI_MOV
|ARMF_SH(ARMSH_LSL
, shift
), RID_TMP
, left
);
596 } else { /* Handle 32/32 bit no-op (cast). */
597 ra_leftov(as
, dest
, lref
); /* Do nothing, but may need to move regs. */
602 #if !LJ_SOFTFP && LJ_HASFFI
603 static void asm_conv64(ASMState
*as
, IRIns
*ir
)
605 IRType st
= (IRType
)((ir
-1)->op2
& IRCONV_SRCMASK
);
606 IRType dt
= (((ir
-1)->op2
& IRCONV_DSTMASK
) >> IRCONV_DSH
);
610 args
[0] = (ir
-1)->op1
;
612 if (st
== IRT_NUM
|| st
== IRT_FLOAT
) {
613 id
= IRCALL_fp64_d2l
+ ((st
== IRT_FLOAT
) ? 2 : 0) + (dt
- IRT_I64
);
616 id
= IRCALL_fp64_l2d
+ ((dt
== IRT_FLOAT
) ? 2 : 0) + (st
- IRT_I64
);
618 ci
= lj_ir_callinfo
[id
];
620 ci
.flags
|= CCI_VARARG
; /* These calls don't use the hard-float ABI! */
622 asm_setupresult(as
, ir
, &ci
);
623 asm_gencall(as
, &ci
, args
);
627 static void asm_strto(ASMState
*as
, IRIns
*ir
)
629 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_strscan_num
];
631 Reg rlo
= 0, rhi
= 0, tmp
;
632 int destused
= ra_used(ir
);
634 ra_evictset(as
, RSET_SCRATCH
);
637 if (ra_hasspill(ir
->s
) && ra_hasspill((ir
+1)->s
) &&
638 (ir
->s
& 1) == 0 && ir
->s
+ 1 == (ir
+1)->s
) {
640 for (i
= 0; i
< 2; i
++) {
645 emit_spload(as
, ir
+i
, r
, sps_scale((ir
+i
)->s
));
648 ofs
= sps_scale(ir
->s
);
651 rhi
= ra_dest(as
, ir
+1, RSET_GPR
);
652 rlo
= ra_dest(as
, ir
, rset_exclude(RSET_GPR
, rhi
));
655 asm_guardcc(as
, CC_EQ
);
657 emit_lso(as
, ARMI_LDR
, rhi
, RID_SP
, 4);
658 emit_lso(as
, ARMI_LDR
, rlo
, RID_SP
, 0);
663 if (ra_hasspill(ir
->s
)) {
664 ofs
= sps_scale(ir
->s
);
666 if (ra_hasreg(ir
->r
)) {
668 ra_modified(as
, ir
->r
);
669 emit_spload(as
, ir
, ir
->r
, ofs
);
672 rlo
= ra_dest(as
, ir
, RSET_FPR
);
675 asm_guardcc(as
, CC_EQ
);
677 emit_vlso(as
, ARMI_VLDR_D
, rlo
, RID_SP
, 0);
679 emit_n(as
, ARMI_CMP
|ARMI_K12
|0, RID_RET
); /* Test return status. */
680 args
[0] = ir
->op1
; /* GCstr *str */
681 args
[1] = ASMREF_TMP1
; /* TValue *n */
682 asm_gencall(as
, ci
, args
);
683 tmp
= ra_releasetmp(as
, ASMREF_TMP1
);
685 emit_dm(as
, ARMI_MOV
, tmp
, RID_SP
);
687 emit_opk(as
, ARMI_ADD
, tmp
, RID_SP
, ofs
, RSET_GPR
);
690 /* Get pointer to TValue. */
691 static void asm_tvptr(ASMState
*as
, Reg dest
, IRRef ref
)
694 if (irt_isnum(ir
->t
)) {
695 if (irref_isk(ref
)) {
696 /* Use the number constant itself as a TValue. */
697 ra_allockreg(as
, i32ptr(ir_knum(ir
)), dest
);
702 /* Otherwise force a spill and use the spill slot. */
703 emit_opk(as
, ARMI_ADD
, dest
, RID_SP
, ra_spill(as
, ir
), RSET_GPR
);
707 /* Otherwise use [sp] and [sp+4] to hold the TValue. */
708 RegSet allow
= rset_exclude(RSET_GPR
, dest
);
710 emit_dm(as
, ARMI_MOV
, dest
, RID_SP
);
711 if (!irt_ispri(ir
->t
)) {
712 Reg src
= ra_alloc1(as
, ref
, allow
);
713 emit_lso(as
, ARMI_STR
, src
, RID_SP
, 0);
715 if ((ir
+1)->o
== IR_HIOP
)
716 type
= ra_alloc1(as
, ref
+1, allow
);
718 type
= ra_allock(as
, irt_toitype(ir
->t
), allow
);
719 emit_lso(as
, ARMI_STR
, type
, RID_SP
, 4);
723 static void asm_tostr(ASMState
*as
, IRIns
*ir
)
728 if (irt_isnum(IR(ir
->op1
)->t
) || (ir
+1)->o
== IR_HIOP
) {
729 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_str_fromnum
];
730 args
[1] = ASMREF_TMP1
; /* const lua_Number * */
731 asm_setupresult(as
, ir
, ci
); /* GCstr * */
732 asm_gencall(as
, ci
, args
);
733 asm_tvptr(as
, ra_releasetmp(as
, ASMREF_TMP1
), ir
->op1
);
735 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_str_fromint
];
736 args
[1] = ir
->op1
; /* int32_t k */
737 asm_setupresult(as
, ir
, ci
); /* GCstr * */
738 asm_gencall(as
, ci
, args
);
742 /* -- Memory references --------------------------------------------------- */
744 static void asm_aref(ASMState
*as
, IRIns
*ir
)
746 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
748 if (irref_isk(ir
->op2
)) {
749 IRRef tab
= IR(ir
->op1
)->op1
;
750 int32_t ofs
= asm_fuseabase(as
, tab
);
751 IRRef refa
= ofs
? tab
: ir
->op1
;
752 uint32_t k
= emit_isk12(ARMI_ADD
, ofs
+ 8*IR(ir
->op2
)->i
);
754 base
= ra_alloc1(as
, refa
, RSET_GPR
);
755 emit_dn(as
, ARMI_ADD
^k
, dest
, base
);
759 base
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
760 idx
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, base
));
761 emit_dnm(as
, ARMI_ADD
|ARMF_SH(ARMSH_LSL
, 3), dest
, base
, idx
);
764 /* Inlined hash lookup. Specialized for key type and for const keys.
765 ** The equivalent C code is:
766 ** Node *n = hashkey(t, key);
768 ** if (lj_obj_equal(&n->key, key)) return &n->val;
769 ** } while ((n = nextnode(n)));
772 static void asm_href(ASMState
*as
, IRIns
*ir
, IROp merge
)
774 RegSet allow
= RSET_GPR
;
775 int destused
= ra_used(ir
);
776 Reg dest
= ra_dest(as
, ir
, allow
);
777 Reg tab
= ra_alloc1(as
, ir
->op1
, rset_clear(allow
, dest
));
778 Reg key
= 0, keyhi
= 0, keynumhi
= RID_NONE
, tmp
= RID_TMP
;
779 IRRef refkey
= ir
->op2
;
780 IRIns
*irkey
= IR(refkey
);
781 IRType1 kt
= irkey
->t
;
782 int32_t k
= 0, khi
= emit_isk12(ARMI_CMP
, irt_toitype(kt
));
784 MCLabel l_end
, l_loop
;
785 rset_clear(allow
, tab
);
786 if (!irref_isk(refkey
) || irt_isstr(kt
)) {
788 key
= ra_alloc1(as
, refkey
, allow
);
789 rset_clear(allow
, key
);
790 if (irkey
[1].o
== IR_HIOP
) {
791 if (ra_hasreg((irkey
+1)->r
)) {
792 keynumhi
= (irkey
+1)->r
;
794 ra_noweak(as
, keynumhi
);
796 keyhi
= keynumhi
= ra_allocref(as
, refkey
+1, allow
);
798 rset_clear(allow
, keynumhi
);
803 key
= ra_scratch(as
, allow
);
804 rset_clear(allow
, key
);
805 keyhi
= keynumhi
= ra_scratch(as
, allow
);
806 rset_clear(allow
, keyhi
);
809 key
= ra_alloc1(as
, refkey
, allow
);
810 rset_clear(allow
, key
);
813 } else if (irt_isnum(kt
)) {
814 int32_t val
= (int32_t)ir_knum(irkey
)->u32
.lo
;
815 k
= emit_isk12(ARMI_CMP
, val
);
817 key
= ra_allock(as
, val
, allow
);
818 rset_clear(allow
, key
);
820 val
= (int32_t)ir_knum(irkey
)->u32
.hi
;
821 khi
= emit_isk12(ARMI_CMP
, val
);
823 keyhi
= ra_allock(as
, val
, allow
);
824 rset_clear(allow
, keyhi
);
826 } else if (!irt_ispri(kt
)) {
827 k
= emit_isk12(ARMI_CMP
, irkey
->i
);
829 key
= ra_alloc1(as
, refkey
, allow
);
830 rset_clear(allow
, key
);
834 tmp
= ra_scratchpair(as
, allow
);
836 /* Key not found in chain: jump to exit (if merged) or load niltv. */
837 l_end
= emit_label(as
);
840 asm_guardcc(as
, CC_AL
);
842 emit_loada(as
, dest
, niltvg(J2G(as
->J
)));
844 /* Follow hash chain until the end. */
846 emit_n(as
, ARMI_CMP
|ARMI_K12
|0, dest
);
847 emit_lso(as
, ARMI_LDR
, dest
, dest
, (int32_t)offsetof(Node
, next
));
849 /* Type and value comparison. */
851 asm_guardcc(as
, CC_EQ
);
853 emit_branch(as
, ARMF_CC(ARMI_B
, CC_EQ
), l_end
);
854 if (!irt_ispri(kt
)) {
855 emit_nm(as
, ARMF_CC(ARMI_CMP
, CC_EQ
)^k
, tmp
, key
);
856 emit_nm(as
, ARMI_CMP
^khi
, tmp
+1, keyhi
);
857 emit_lsox(as
, ARMI_LDRD
, tmp
, dest
, (int32_t)offsetof(Node
, key
));
859 emit_n(as
, ARMI_CMP
^khi
, tmp
);
860 emit_lso(as
, ARMI_LDR
, tmp
, dest
, (int32_t)offsetof(Node
, key
.it
));
862 *l_loop
= ARMF_CC(ARMI_B
, CC_NE
) | ((as
->mcp
-l_loop
-2) & 0x00ffffffu
);
864 /* Load main position relative to tab->node into dest. */
865 khash
= irref_isk(refkey
) ? ir_khash(irkey
) : 1;
867 emit_lso(as
, ARMI_LDR
, dest
, tab
, (int32_t)offsetof(GCtab
, node
));
869 emit_dnm(as
, ARMI_ADD
|ARMF_SH(ARMSH_LSL
, 3), dest
, dest
, tmp
);
870 emit_dnm(as
, ARMI_ADD
|ARMF_SH(ARMSH_LSL
, 1), tmp
, tmp
, tmp
);
871 if (irt_isstr(kt
)) { /* Fetch of str->hash is cheaper than ra_allock. */
872 emit_dnm(as
, ARMI_AND
, tmp
, tmp
+1, RID_TMP
);
873 emit_lso(as
, ARMI_LDR
, dest
, tab
, (int32_t)offsetof(GCtab
, node
));
874 emit_lso(as
, ARMI_LDR
, tmp
+1, key
, (int32_t)offsetof(GCstr
, hash
));
875 emit_lso(as
, ARMI_LDR
, RID_TMP
, tab
, (int32_t)offsetof(GCtab
, hmask
));
876 } else if (irref_isk(refkey
)) {
877 emit_opk(as
, ARMI_AND
, tmp
, RID_TMP
, (int32_t)khash
,
878 rset_exclude(rset_exclude(RSET_GPR
, tab
), dest
));
879 emit_lso(as
, ARMI_LDR
, dest
, tab
, (int32_t)offsetof(GCtab
, node
));
880 emit_lso(as
, ARMI_LDR
, RID_TMP
, tab
, (int32_t)offsetof(GCtab
, hmask
));
881 } else { /* Must match with hash*() in lj_tab.c. */
882 if (ra_hasreg(keynumhi
)) { /* Canonicalize +-0.0 to 0.0. */
883 if (keyhi
== RID_TMP
)
884 emit_dm(as
, ARMF_CC(ARMI_MOV
, CC_NE
), keyhi
, keynumhi
);
885 emit_d(as
, ARMF_CC(ARMI_MOV
, CC_EQ
)|ARMI_K12
|0, keyhi
);
887 emit_dnm(as
, ARMI_AND
, tmp
, tmp
, RID_TMP
);
888 emit_dnm(as
, ARMI_SUB
|ARMF_SH(ARMSH_ROR
, 32-HASH_ROT3
), tmp
, tmp
, tmp
+1);
889 emit_lso(as
, ARMI_LDR
, dest
, tab
, (int32_t)offsetof(GCtab
, node
));
890 emit_dnm(as
, ARMI_EOR
|ARMF_SH(ARMSH_ROR
, 32-((HASH_ROT2
+HASH_ROT1
)&31)),
892 emit_lso(as
, ARMI_LDR
, RID_TMP
, tab
, (int32_t)offsetof(GCtab
, hmask
));
893 emit_dnm(as
, ARMI_SUB
|ARMF_SH(ARMSH_ROR
, 32-HASH_ROT1
), tmp
+1, tmp
+1, tmp
);
894 if (ra_hasreg(keynumhi
)) {
895 emit_dnm(as
, ARMI_EOR
, tmp
+1, tmp
, key
);
896 emit_dnm(as
, ARMI_ORR
|ARMI_S
, RID_TMP
, tmp
, key
); /* Test for +-0.0. */
897 emit_dnm(as
, ARMI_ADD
, tmp
, keynumhi
, keynumhi
);
899 emit_dnm(as
, ARMI_VMOV_RR_D
, key
, keynumhi
,
900 (ra_alloc1(as
, refkey
, RSET_FPR
) & 15));
903 emit_dnm(as
, ARMI_EOR
, tmp
+1, tmp
, key
);
904 emit_opk(as
, ARMI_ADD
, tmp
, key
, (int32_t)HASH_BIAS
,
905 rset_exclude(rset_exclude(RSET_GPR
, tab
), key
));
911 static void asm_hrefk(ASMState
*as
, IRIns
*ir
)
913 IRIns
*kslot
= IR(ir
->op2
);
914 IRIns
*irkey
= IR(kslot
->op1
);
915 int32_t ofs
= (int32_t)(kslot
->op2
* sizeof(Node
));
916 int32_t kofs
= ofs
+ (int32_t)offsetof(Node
, key
);
917 Reg dest
= (ra_used(ir
) || ofs
> 4095) ? ra_dest(as
, ir
, RSET_GPR
) : RID_NONE
;
918 Reg node
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
919 Reg key
= RID_NONE
, type
= RID_TMP
, idx
= node
;
920 RegSet allow
= rset_exclude(RSET_GPR
, node
);
921 lua_assert(ofs
% sizeof(Node
) == 0);
924 rset_clear(allow
, dest
);
925 kofs
= (int32_t)offsetof(Node
, key
);
926 } else if (ra_hasreg(dest
)) {
927 emit_opk(as
, ARMI_ADD
, dest
, node
, ofs
, allow
);
929 asm_guardcc(as
, CC_NE
);
930 if (!irt_ispri(irkey
->t
)) {
931 RegSet even
= (as
->freeset
& allow
);
932 even
= even
& (even
>> 1) & RSET_GPREVEN
;
934 key
= ra_scratch(as
, even
);
935 if (rset_test(as
->freeset
, key
+1)) {
937 ra_modified(as
, type
);
940 key
= ra_scratch(as
, allow
);
942 rset_clear(allow
, key
);
944 rset_clear(allow
, type
);
945 if (irt_isnum(irkey
->t
)) {
946 emit_opk(as
, ARMF_CC(ARMI_CMP
, CC_EQ
), 0, type
,
947 (int32_t)ir_knum(irkey
)->u32
.hi
, allow
);
948 emit_opk(as
, ARMI_CMP
, 0, key
,
949 (int32_t)ir_knum(irkey
)->u32
.lo
, allow
);
952 emit_opk(as
, ARMF_CC(ARMI_CMP
, CC_EQ
), 0, key
, irkey
->i
, allow
);
953 emit_n(as
, ARMI_CMN
|ARMI_K12
|-irt_toitype(irkey
->t
), type
);
955 emit_lso(as
, ARMI_LDR
, type
, idx
, kofs
+4);
956 if (ra_hasreg(key
)) emit_lso(as
, ARMI_LDR
, key
, idx
, kofs
);
958 emit_opk(as
, ARMI_ADD
, dest
, node
, ofs
, RSET_GPR
);
961 static void asm_newref(ASMState
*as
, IRIns
*ir
)
963 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_tab_newkey
];
965 if (ir
->r
== RID_SINK
)
967 args
[0] = ASMREF_L
; /* lua_State *L */
968 args
[1] = ir
->op1
; /* GCtab *t */
969 args
[2] = ASMREF_TMP1
; /* cTValue *key */
970 asm_setupresult(as
, ir
, ci
); /* TValue * */
971 asm_gencall(as
, ci
, args
);
972 asm_tvptr(as
, ra_releasetmp(as
, ASMREF_TMP1
), ir
->op2
);
975 static void asm_uref(ASMState
*as
, IRIns
*ir
)
977 /* NYI: Check that UREFO is still open and not aliasing a slot. */
978 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
979 if (irref_isk(ir
->op1
)) {
980 GCfunc
*fn
= ir_kfunc(IR(ir
->op1
));
981 MRef
*v
= &gcref(fn
->l
.uvptr
[(ir
->op2
>> 8)])->uv
.v
;
982 emit_lsptr(as
, ARMI_LDR
, dest
, v
);
984 Reg uv
= ra_scratch(as
, RSET_GPR
);
985 Reg func
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
986 if (ir
->o
== IR_UREFC
) {
987 asm_guardcc(as
, CC_NE
);
988 emit_n(as
, ARMI_CMP
|ARMI_K12
|1, RID_TMP
);
989 emit_opk(as
, ARMI_ADD
, dest
, uv
,
990 (int32_t)offsetof(GCupval
, tv
), RSET_GPR
);
991 emit_lso(as
, ARMI_LDRB
, RID_TMP
, uv
, (int32_t)offsetof(GCupval
, closed
));
993 emit_lso(as
, ARMI_LDR
, dest
, uv
, (int32_t)offsetof(GCupval
, v
));
995 emit_lso(as
, ARMI_LDR
, uv
, func
,
996 (int32_t)offsetof(GCfuncL
, uvptr
) + 4*(int32_t)(ir
->op2
>> 8));
1000 static void asm_fref(ASMState
*as
, IRIns
*ir
)
1002 UNUSED(as
); UNUSED(ir
);
1003 lua_assert(!ra_used(ir
));
1006 static void asm_strref(ASMState
*as
, IRIns
*ir
)
1008 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1009 IRRef ref
= ir
->op2
, refk
= ir
->op1
;
1011 if (irref_isk(ref
)) {
1012 IRRef tmp
= refk
; refk
= ref
; ref
= tmp
;
1013 } else if (!irref_isk(refk
)) {
1014 uint32_t k
, m
= ARMI_K12
|sizeof(GCstr
);
1015 Reg right
, left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1016 IRIns
*irr
= IR(ir
->op2
);
1017 if (ra_hasreg(irr
->r
)) {
1018 ra_noweak(as
, irr
->r
);
1020 } else if (mayfuse(as
, irr
->op2
) &&
1021 irr
->o
== IR_ADD
&& irref_isk(irr
->op2
) &&
1022 (k
= emit_isk12(ARMI_ADD
,
1023 (int32_t)sizeof(GCstr
) + IR(irr
->op2
)->i
))) {
1025 right
= ra_alloc1(as
, irr
->op1
, rset_exclude(RSET_GPR
, left
));
1027 right
= ra_allocref(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
1029 emit_dn(as
, ARMI_ADD
^m
, dest
, dest
);
1030 emit_dnm(as
, ARMI_ADD
, dest
, left
, right
);
1033 r
= ra_alloc1(as
, ref
, RSET_GPR
);
1034 emit_opk(as
, ARMI_ADD
, dest
, r
,
1035 sizeof(GCstr
) + IR(refk
)->i
, rset_exclude(RSET_GPR
, r
));
1038 /* -- Loads and stores ---------------------------------------------------- */
1040 static ARMIns
asm_fxloadins(IRIns
*ir
)
1042 switch (irt_type(ir
->t
)) {
1043 case IRT_I8
: return ARMI_LDRSB
;
1044 case IRT_U8
: return ARMI_LDRB
;
1045 case IRT_I16
: return ARMI_LDRSH
;
1046 case IRT_U16
: return ARMI_LDRH
;
1047 case IRT_NUM
: lua_assert(!LJ_SOFTFP
); return ARMI_VLDR_D
;
1048 case IRT_FLOAT
: if (!LJ_SOFTFP
) return ARMI_VLDR_S
;
1049 default: return ARMI_LDR
;
1053 static ARMIns
asm_fxstoreins(IRIns
*ir
)
1055 switch (irt_type(ir
->t
)) {
1056 case IRT_I8
: case IRT_U8
: return ARMI_STRB
;
1057 case IRT_I16
: case IRT_U16
: return ARMI_STRH
;
1058 case IRT_NUM
: lua_assert(!LJ_SOFTFP
); return ARMI_VSTR_D
;
1059 case IRT_FLOAT
: if (!LJ_SOFTFP
) return ARMI_VSTR_S
;
1060 default: return ARMI_STR
;
1064 static void asm_fload(ASMState
*as
, IRIns
*ir
)
1066 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1067 Reg idx
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1068 ARMIns ai
= asm_fxloadins(ir
);
1070 if (ir
->op2
== IRFL_TAB_ARRAY
) {
1071 ofs
= asm_fuseabase(as
, ir
->op1
);
1072 if (ofs
) { /* Turn the t->array load into an add for colocated arrays. */
1073 emit_dn(as
, ARMI_ADD
|ARMI_K12
|ofs
, dest
, idx
);
1077 ofs
= field_ofs
[ir
->op2
];
1078 if ((ai
& 0x04000000))
1079 emit_lso(as
, ai
, dest
, idx
, ofs
);
1081 emit_lsox(as
, ai
, dest
, idx
, ofs
);
1084 static void asm_fstore(ASMState
*as
, IRIns
*ir
)
1086 if (ir
->r
!= RID_SINK
) {
1087 Reg src
= ra_alloc1(as
, ir
->op2
, RSET_GPR
);
1088 IRIns
*irf
= IR(ir
->op1
);
1089 Reg idx
= ra_alloc1(as
, irf
->op1
, rset_exclude(RSET_GPR
, src
));
1090 int32_t ofs
= field_ofs
[irf
->op2
];
1091 ARMIns ai
= asm_fxstoreins(ir
);
1092 if ((ai
& 0x04000000))
1093 emit_lso(as
, ai
, src
, idx
, ofs
);
1095 emit_lsox(as
, ai
, src
, idx
, ofs
);
1099 static void asm_xload(ASMState
*as
, IRIns
*ir
)
1101 Reg dest
= ra_dest(as
, ir
,
1102 (!LJ_SOFTFP
&& irt_isfp(ir
->t
)) ? RSET_FPR
: RSET_GPR
);
1103 lua_assert(!(ir
->op2
& IRXLOAD_UNALIGNED
));
1104 asm_fusexref(as
, asm_fxloadins(ir
), dest
, ir
->op1
, RSET_GPR
, 0);
1107 static void asm_xstore(ASMState
*as
, IRIns
*ir
, int32_t ofs
)
1109 if (ir
->r
!= RID_SINK
) {
1110 Reg src
= ra_alloc1(as
, ir
->op2
,
1111 (!LJ_SOFTFP
&& irt_isfp(ir
->t
)) ? RSET_FPR
: RSET_GPR
);
1112 asm_fusexref(as
, asm_fxstoreins(ir
), src
, ir
->op1
,
1113 rset_exclude(RSET_GPR
, src
), ofs
);
1117 static void asm_ahuvload(ASMState
*as
, IRIns
*ir
)
1119 int hiop
= (LJ_SOFTFP
&& (ir
+1)->o
== IR_HIOP
);
1120 IRType t
= hiop
? IRT_NUM
: irt_type(ir
->t
);
1121 Reg dest
= RID_NONE
, type
= RID_NONE
, idx
;
1122 RegSet allow
= RSET_GPR
;
1124 if (hiop
&& ra_used(ir
+1)) {
1125 type
= ra_dest(as
, ir
+1, allow
);
1126 rset_clear(allow
, type
);
1129 lua_assert((LJ_SOFTFP
? 0 : irt_isnum(ir
->t
)) ||
1130 irt_isint(ir
->t
) || irt_isaddr(ir
->t
));
1131 dest
= ra_dest(as
, ir
, (!LJ_SOFTFP
&& t
== IRT_NUM
) ? RSET_FPR
: allow
);
1132 rset_clear(allow
, dest
);
1134 idx
= asm_fuseahuref(as
, ir
->op1
, &ofs
, allow
,
1135 (!LJ_SOFTFP
&& t
== IRT_NUM
) ? 1024 : 4096);
1136 if (!hiop
|| type
== RID_NONE
) {
1137 rset_clear(allow
, idx
);
1138 if (ofs
< 256 && ra_hasreg(dest
) && (dest
& 1) == 0 &&
1139 rset_test((as
->freeset
& allow
), dest
+1)) {
1141 ra_modified(as
, type
);
1146 asm_guardcc(as
, t
== IRT_NUM
? CC_HS
: CC_NE
);
1147 emit_n(as
, ARMI_CMN
|ARMI_K12
|-irt_toitype_(t
), type
);
1148 if (ra_hasreg(dest
)) {
1151 emit_vlso(as
, ARMI_VLDR_D
, dest
, idx
, ofs
);
1154 emit_lso(as
, ARMI_LDR
, dest
, idx
, ofs
);
1156 emit_lso(as
, ARMI_LDR
, type
, idx
, ofs
+4);
1159 static void asm_ahustore(ASMState
*as
, IRIns
*ir
)
1161 if (ir
->r
!= RID_SINK
) {
1162 RegSet allow
= RSET_GPR
;
1163 Reg idx
, src
= RID_NONE
, type
= RID_NONE
;
1166 if (irt_isnum(ir
->t
)) {
1167 src
= ra_alloc1(as
, ir
->op2
, RSET_FPR
);
1168 idx
= asm_fuseahuref(as
, ir
->op1
, &ofs
, allow
, 1024);
1169 emit_vlso(as
, ARMI_VSTR_D
, src
, idx
, ofs
);
1173 int hiop
= (LJ_SOFTFP
&& (ir
+1)->o
== IR_HIOP
);
1174 if (!irt_ispri(ir
->t
)) {
1175 src
= ra_alloc1(as
, ir
->op2
, allow
);
1176 rset_clear(allow
, src
);
1179 type
= ra_alloc1(as
, (ir
+1)->op2
, allow
);
1181 type
= ra_allock(as
, (int32_t)irt_toitype(ir
->t
), allow
);
1182 idx
= asm_fuseahuref(as
, ir
->op1
, &ofs
, rset_exclude(allow
, type
), 4096);
1183 if (ra_hasreg(src
)) emit_lso(as
, ARMI_STR
, src
, idx
, ofs
);
1184 emit_lso(as
, ARMI_STR
, type
, idx
, ofs
+4);
1189 static void asm_sload(ASMState
*as
, IRIns
*ir
)
1191 int32_t ofs
= 8*((int32_t)ir
->op1
-1) + ((ir
->op2
& IRSLOAD_FRAME
) ? 4 : 0);
1192 int hiop
= (LJ_SOFTFP
&& (ir
+1)->o
== IR_HIOP
);
1193 IRType t
= hiop
? IRT_NUM
: irt_type(ir
->t
);
1194 Reg dest
= RID_NONE
, type
= RID_NONE
, base
;
1195 RegSet allow
= RSET_GPR
;
1196 lua_assert(!(ir
->op2
& IRSLOAD_PARENT
)); /* Handled by asm_head_side(). */
1197 lua_assert(irt_isguard(ir
->t
) || !(ir
->op2
& IRSLOAD_TYPECHECK
));
1199 lua_assert(!(ir
->op2
& IRSLOAD_CONVERT
)); /* Handled by LJ_SOFTFP SPLIT. */
1200 if (hiop
&& ra_used(ir
+1)) {
1201 type
= ra_dest(as
, ir
+1, allow
);
1202 rset_clear(allow
, type
);
1205 if ((ir
->op2
& IRSLOAD_CONVERT
) && irt_isguard(ir
->t
) && t
== IRT_INT
) {
1206 dest
= ra_scratch(as
, RSET_FPR
);
1207 asm_tointg(as
, ir
, dest
);
1208 t
= IRT_NUM
; /* Continue with a regular number type check. */
1212 lua_assert((LJ_SOFTFP
? 0 : irt_isnum(ir
->t
)) ||
1213 irt_isint(ir
->t
) || irt_isaddr(ir
->t
));
1214 dest
= ra_dest(as
, ir
, (!LJ_SOFTFP
&& t
== IRT_NUM
) ? RSET_FPR
: allow
);
1215 rset_clear(allow
, dest
);
1216 base
= ra_alloc1(as
, REF_BASE
, allow
);
1217 if ((ir
->op2
& IRSLOAD_CONVERT
)) {
1219 Reg tmp
= ra_scratch(as
, RSET_FPR
);
1220 emit_dn(as
, ARMI_VMOV_R_S
, dest
, (tmp
& 15));
1221 emit_dm(as
, ARMI_VCVT_S32_F64
, (tmp
& 15), (tmp
& 15));
1223 t
= IRT_NUM
; /* Check for original type. */
1225 Reg tmp
= ra_scratch(as
, RSET_GPR
);
1226 emit_dm(as
, ARMI_VCVT_F64_S32
, (dest
& 15), (dest
& 15));
1227 emit_dn(as
, ARMI_VMOV_S_R
, tmp
, (dest
& 15));
1229 t
= IRT_INT
; /* Check for original type. */
1234 base
= ra_alloc1(as
, REF_BASE
, allow
);
1236 rset_clear(allow
, base
);
1237 if ((ir
->op2
& IRSLOAD_TYPECHECK
)) {
1238 if (ra_noreg(type
)) {
1239 if (ofs
< 256 && ra_hasreg(dest
) && (dest
& 1) == 0 &&
1240 rset_test((as
->freeset
& allow
), dest
+1)) {
1242 ra_modified(as
, type
);
1247 asm_guardcc(as
, t
== IRT_NUM
? CC_HS
: CC_NE
);
1248 emit_n(as
, ARMI_CMN
|ARMI_K12
|-irt_toitype_(t
), type
);
1250 if (ra_hasreg(dest
)) {
1254 emit_vlso(as
, ARMI_VLDR_D
, dest
, base
, ofs
);
1256 if (ra_hasreg(type
)) emit_lso(as
, ARMI_LDR
, type
, base
, ofs
+4);
1257 emit_vlso(as
, ARMI_VLDR_D
, dest
, RID_TMP
, 0);
1258 emit_opk(as
, ARMI_ADD
, RID_TMP
, base
, ofs
, allow
);
1263 emit_lso(as
, ARMI_LDR
, dest
, base
, ofs
);
1265 if (ra_hasreg(type
)) emit_lso(as
, ARMI_LDR
, type
, base
, ofs
+4);
1268 /* -- Allocations --------------------------------------------------------- */
1271 static void asm_cnew(ASMState
*as
, IRIns
*ir
)
1273 CTState
*cts
= ctype_ctsG(J2G(as
->J
));
1274 CTypeID ctypeid
= (CTypeID
)IR(ir
->op1
)->i
;
1275 CTSize sz
= (ir
->o
== IR_CNEWI
|| ir
->op2
== REF_NIL
) ?
1276 lj_ctype_size(cts
, ctypeid
) : (CTSize
)IR(ir
->op2
)->i
;
1277 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_mem_newgco
];
1279 RegSet allow
= (RSET_GPR
& ~RSET_SCRATCH
);
1280 RegSet drop
= RSET_SCRATCH
;
1281 lua_assert(sz
!= CTSIZE_INVALID
);
1283 args
[0] = ASMREF_L
; /* lua_State *L */
1284 args
[1] = ASMREF_TMP1
; /* MSize size */
1287 if (ra_hasreg(ir
->r
))
1288 rset_clear(drop
, ir
->r
); /* Dest reg handled below. */
1289 ra_evictset(as
, drop
);
1291 ra_destreg(as
, ir
, RID_RET
); /* GCcdata * */
1293 /* Initialize immutable cdata object. */
1294 if (ir
->o
== IR_CNEWI
) {
1295 int32_t ofs
= sizeof(GCcdata
);
1296 lua_assert(sz
== 4 || sz
== 8);
1299 lua_assert(ir
->o
== IR_HIOP
);
1302 Reg r
= ra_alloc1(as
, ir
->op2
, allow
);
1303 emit_lso(as
, ARMI_STR
, r
, RID_RET
, ofs
);
1304 rset_clear(allow
, r
);
1305 if (ofs
== sizeof(GCcdata
)) break;
1309 /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */
1311 uint32_t k
= emit_isk12(ARMI_MOV
, ctypeid
);
1312 Reg r
= k
? RID_R1
: ra_allock(as
, ctypeid
, allow
);
1313 emit_lso(as
, ARMI_STRB
, RID_TMP
, RID_RET
, offsetof(GCcdata
, gct
));
1314 emit_lsox(as
, ARMI_STRH
, r
, RID_RET
, offsetof(GCcdata
, ctypeid
));
1315 emit_d(as
, ARMI_MOV
|ARMI_K12
|~LJ_TCDATA
, RID_TMP
);
1316 if (k
) emit_d(as
, ARMI_MOV
^k
, RID_R1
);
1318 asm_gencall(as
, ci
, args
);
1319 ra_allockreg(as
, (int32_t)(sz
+sizeof(GCcdata
)),
1320 ra_releasetmp(as
, ASMREF_TMP1
));
1323 #define asm_cnew(as, ir) ((void)0)
1326 /* -- Write barriers ------------------------------------------------------ */
1328 static void asm_tbar(ASMState
*as
, IRIns
*ir
)
1330 Reg tab
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1331 Reg link
= ra_scratch(as
, rset_exclude(RSET_GPR
, tab
));
1332 Reg gr
= ra_allock(as
, i32ptr(J2G(as
->J
)),
1333 rset_exclude(rset_exclude(RSET_GPR
, tab
), link
));
1335 MCLabel l_end
= emit_label(as
);
1336 emit_lso(as
, ARMI_STR
, link
, tab
, (int32_t)offsetof(GCtab
, gclist
));
1337 emit_lso(as
, ARMI_STRB
, mark
, tab
, (int32_t)offsetof(GCtab
, marked
));
1338 emit_lso(as
, ARMI_STR
, tab
, gr
,
1339 (int32_t)offsetof(global_State
, gc
.grayagain
));
1340 emit_dn(as
, ARMI_BIC
|ARMI_K12
|LJ_GC_BLACK
, mark
, mark
);
1341 emit_lso(as
, ARMI_LDR
, link
, gr
,
1342 (int32_t)offsetof(global_State
, gc
.grayagain
));
1343 emit_branch(as
, ARMF_CC(ARMI_B
, CC_EQ
), l_end
);
1344 emit_n(as
, ARMI_TST
|ARMI_K12
|LJ_GC_BLACK
, mark
);
1345 emit_lso(as
, ARMI_LDRB
, mark
, tab
, (int32_t)offsetof(GCtab
, marked
));
1348 static void asm_obar(ASMState
*as
, IRIns
*ir
)
1350 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_gc_barrieruv
];
1354 /* No need for other object barriers (yet). */
1355 lua_assert(IR(ir
->op1
)->o
== IR_UREFC
);
1356 ra_evictset(as
, RSET_SCRATCH
);
1357 l_end
= emit_label(as
);
1358 args
[0] = ASMREF_TMP1
; /* global_State *g */
1359 args
[1] = ir
->op1
; /* TValue *tv */
1360 asm_gencall(as
, ci
, args
);
1361 if ((l_end
[-1] >> 28) == CC_AL
)
1362 l_end
[-1] = ARMF_CC(l_end
[-1], CC_NE
);
1364 emit_branch(as
, ARMF_CC(ARMI_B
, CC_EQ
), l_end
);
1365 ra_allockreg(as
, i32ptr(J2G(as
->J
)), ra_releasetmp(as
, ASMREF_TMP1
));
1366 obj
= IR(ir
->op1
)->r
;
1367 tmp
= ra_scratch(as
, rset_exclude(RSET_GPR
, obj
));
1368 emit_n(as
, ARMF_CC(ARMI_TST
, CC_NE
)|ARMI_K12
|LJ_GC_BLACK
, tmp
);
1369 emit_n(as
, ARMI_TST
|ARMI_K12
|LJ_GC_WHITES
, RID_TMP
);
1370 val
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, obj
));
1371 emit_lso(as
, ARMI_LDRB
, tmp
, obj
,
1372 (int32_t)offsetof(GCupval
, marked
)-(int32_t)offsetof(GCupval
, tv
));
1373 emit_lso(as
, ARMI_LDRB
, RID_TMP
, val
, (int32_t)offsetof(GChead
, marked
));
1376 /* -- Arithmetic and logic operations ------------------------------------- */
1379 static void asm_fparith(ASMState
*as
, IRIns
*ir
, ARMIns ai
)
1381 Reg dest
= ra_dest(as
, ir
, RSET_FPR
);
1382 Reg right
, left
= ra_alloc2(as
, ir
, RSET_FPR
);
1383 right
= (left
>> 8); left
&= 255;
1384 emit_dnm(as
, ai
, (dest
& 15), (left
& 15), (right
& 15));
1387 static void asm_fpunary(ASMState
*as
, IRIns
*ir
, ARMIns ai
)
1389 Reg dest
= ra_dest(as
, ir
, RSET_FPR
);
1390 Reg left
= ra_hintalloc(as
, ir
->op1
, dest
, RSET_FPR
);
1391 emit_dm(as
, ai
, (dest
& 15), (left
& 15));
1394 static int asm_fpjoin_pow(ASMState
*as
, IRIns
*ir
)
1396 IRIns
*irp
= IR(ir
->op1
);
1397 if (irp
== ir
-1 && irp
->o
== IR_MUL
&& !ra_used(irp
)) {
1398 IRIns
*irpp
= IR(irp
->op1
);
1399 if (irpp
== ir
-2 && irpp
->o
== IR_FPMATH
&&
1400 irpp
->op2
== IRFPM_LOG2
&& !ra_used(irpp
)) {
1401 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_pow
];
1403 args
[0] = irpp
->op1
;
1405 asm_setupresult(as
, ir
, ci
);
1406 asm_gencall(as
, ci
, args
);
1414 static int asm_swapops(ASMState
*as
, IRRef lref
, IRRef rref
)
1417 if (irref_isk(rref
))
1418 return 0; /* Don't swap constants to the left. */
1419 if (irref_isk(lref
))
1420 return 1; /* But swap constants to the right. */
1422 if ((ir
->o
>= IR_BSHL
&& ir
->o
<= IR_BROR
) ||
1423 (ir
->o
== IR_ADD
&& ir
->op1
== ir
->op2
))
1424 return 0; /* Don't swap fusable operands to the left. */
1426 if ((ir
->o
>= IR_BSHL
&& ir
->o
<= IR_BROR
) ||
1427 (ir
->o
== IR_ADD
&& ir
->op1
== ir
->op2
))
1428 return 1; /* But swap fusable operands to the right. */
1429 return 0; /* Otherwise don't swap. */
1432 static void asm_intop(ASMState
*as
, IRIns
*ir
, ARMIns ai
)
1434 IRRef lref
= ir
->op1
, rref
= ir
->op2
;
1435 Reg left
, dest
= ra_dest(as
, ir
, RSET_GPR
);
1437 if (asm_swapops(as
, lref
, rref
)) {
1438 IRRef tmp
= lref
; lref
= rref
; rref
= tmp
;
1439 if ((ai
& ~ARMI_S
) == ARMI_SUB
|| (ai
& ~ARMI_S
) == ARMI_SBC
)
1440 ai
^= (ARMI_SUB
^ARMI_RSB
);
1442 left
= ra_hintalloc(as
, lref
, dest
, RSET_GPR
);
1443 m
= asm_fuseopm(as
, ai
, rref
, rset_exclude(RSET_GPR
, left
));
1444 if (irt_isguard(ir
->t
)) { /* For IR_ADDOV etc. */
1445 asm_guardcc(as
, CC_VS
);
1448 emit_dn(as
, ai
^m
, dest
, left
);
1451 static void asm_intop_s(ASMState
*as
, IRIns
*ir
, ARMIns ai
)
1453 if (as
->flagmcp
== as
->mcp
) { /* Drop cmp r, #0. */
1458 asm_intop(as
, ir
, ai
);
1461 static void asm_bitop(ASMState
*as
, IRIns
*ir
, ARMIns ai
)
1463 if (as
->flagmcp
== as
->mcp
) { /* Try to drop cmp r, #0. */
1464 uint32_t cc
= (as
->mcp
[1] >> 28);
1469 } else if (cc
== CC_GE
) {
1470 *++as
->mcp
^= ((CC_GE
^CC_PL
) << 28);
1472 } else if (cc
== CC_LT
) {
1473 *++as
->mcp
^= ((CC_LT
^CC_MI
) << 28);
1475 } /* else: other conds don't work with bit ops. */
1478 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1479 uint32_t m
= asm_fuseopm(as
, ai
, ir
->op1
, RSET_GPR
);
1480 emit_d(as
, ai
^m
, dest
);
1482 /* NYI: Turn BAND !k12 into uxtb, uxth or bfc or shl+shr. */
1483 asm_intop(as
, ir
, ai
);
1487 static void asm_intneg(ASMState
*as
, IRIns
*ir
, ARMIns ai
)
1489 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1490 Reg left
= ra_hintalloc(as
, ir
->op1
, dest
, RSET_GPR
);
1491 emit_dn(as
, ai
|ARMI_K12
|0, dest
, left
);
1494 /* NYI: use add/shift for MUL(OV) with constants. FOLD only does 2^k. */
1495 static void asm_intmul(ASMState
*as
, IRIns
*ir
)
1497 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1498 Reg left
= ra_alloc1(as
, ir
->op1
, rset_exclude(RSET_GPR
, dest
));
1499 Reg right
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
1501 /* ARMv5 restriction: dest != left and dest_hi != left. */
1502 if (dest
== left
&& left
!= right
) { left
= right
; right
= dest
; }
1503 if (irt_isguard(ir
->t
)) { /* IR_MULOV */
1504 if (!(as
->flags
& JIT_F_ARMV6
) && dest
== left
)
1505 tmp
= left
= ra_scratch(as
, rset_exclude(RSET_FPR
, left
));
1506 asm_guardcc(as
, CC_NE
);
1507 emit_nm(as
, ARMI_TEQ
|ARMF_SH(ARMSH_ASR
, 31), RID_TMP
, dest
);
1508 emit_dnm(as
, ARMI_SMULL
|ARMF_S(right
), dest
, RID_TMP
, left
);
1510 if (!(as
->flags
& JIT_F_ARMV6
) && dest
== left
) tmp
= left
= RID_TMP
;
1511 emit_nm(as
, ARMI_MUL
|ARMF_S(right
), dest
, left
);
1513 /* Only need this for the dest == left == right case. */
1514 if (ra_hasreg(tmp
)) emit_dm(as
, ARMI_MOV
, tmp
, right
);
1517 static void asm_add(ASMState
*as
, IRIns
*ir
)
1520 if (irt_isnum(ir
->t
)) {
1521 if (!asm_fusemadd(as
, ir
, ARMI_VMLA_D
, ARMI_VMLA_D
))
1522 asm_fparith(as
, ir
, ARMI_VADD_D
);
1526 asm_intop_s(as
, ir
, ARMI_ADD
);
1529 static void asm_sub(ASMState
*as
, IRIns
*ir
)
1532 if (irt_isnum(ir
->t
)) {
1533 if (!asm_fusemadd(as
, ir
, ARMI_VNMLS_D
, ARMI_VMLS_D
))
1534 asm_fparith(as
, ir
, ARMI_VSUB_D
);
1538 asm_intop_s(as
, ir
, ARMI_SUB
);
1541 static void asm_mul(ASMState
*as
, IRIns
*ir
)
1544 if (irt_isnum(ir
->t
)) {
1545 asm_fparith(as
, ir
, ARMI_VMUL_D
);
1552 static void asm_neg(ASMState
*as
, IRIns
*ir
)
1555 if (irt_isnum(ir
->t
)) {
1556 asm_fpunary(as
, ir
, ARMI_VNEG_D
);
1560 asm_intneg(as
, ir
, ARMI_RSB
);
1563 static void asm_callid(ASMState
*as
, IRIns
*ir
, IRCallID id
)
1565 const CCallInfo
*ci
= &lj_ir_callinfo
[id
];
1569 asm_setupresult(as
, ir
, ci
);
1570 asm_gencall(as
, ci
, args
);
1574 static void asm_callround(ASMState
*as
, IRIns
*ir
, int id
)
1576 /* The modified regs must match with the *.dasc implementation. */
1577 RegSet drop
= RID2RSET(RID_D0
)|RID2RSET(RID_D1
)|RID2RSET(RID_D2
)|
1578 RID2RSET(RID_R0
)|RID2RSET(RID_R1
);
1579 if (ra_hasreg(ir
->r
)) rset_clear(drop
, ir
->r
);
1580 ra_evictset(as
, drop
);
1581 ra_destreg(as
, ir
, RID_FPRET
);
1582 emit_call(as
, id
== IRFPM_FLOOR
? (void *)lj_vm_floor_hf
:
1583 id
== IRFPM_CEIL
? (void *)lj_vm_ceil_hf
:
1584 (void *)lj_vm_trunc_hf
);
1585 ra_leftov(as
, RID_D0
, ir
->op1
);
1589 static void asm_bitswap(ASMState
*as
, IRIns
*ir
)
1591 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1592 Reg left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1593 if ((as
->flags
& JIT_F_ARMV6
)) {
1594 emit_dm(as
, ARMI_REV
, dest
, left
);
1598 tmp2
= ra_scratch(as
, rset_exclude(rset_exclude(RSET_GPR
, dest
), left
));
1599 emit_dnm(as
, ARMI_EOR
|ARMF_SH(ARMSH_LSR
, 8), dest
, tmp2
, RID_TMP
);
1600 emit_dm(as
, ARMI_MOV
|ARMF_SH(ARMSH_ROR
, 8), tmp2
, left
);
1601 emit_dn(as
, ARMI_BIC
|ARMI_K12
|256*8|255, RID_TMP
, RID_TMP
);
1602 emit_dnm(as
, ARMI_EOR
|ARMF_SH(ARMSH_ROR
, 16), RID_TMP
, left
, left
);
1606 static void asm_bitshift(ASMState
*as
, IRIns
*ir
, ARMShift sh
)
1608 if (irref_isk(ir
->op2
)) { /* Constant shifts. */
1609 /* NYI: Turn SHL+SHR or BAND+SHR into uxtb, uxth or ubfx. */
1610 /* NYI: Turn SHL+ASR into sxtb, sxth or sbfx. */
1611 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1612 Reg left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1613 int32_t shift
= (IR(ir
->op2
)->i
& 31);
1614 emit_dm(as
, ARMI_MOV
|ARMF_SH(sh
, shift
), dest
, left
);
1616 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1617 Reg left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1618 Reg right
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
1619 emit_dm(as
, ARMI_MOV
|ARMF_RSH(sh
, right
), dest
, left
);
1623 static void asm_intmin_max(ASMState
*as
, IRIns
*ir
, int cc
)
1625 uint32_t kcmp
= 0, kmov
= 0;
1626 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1627 Reg left
= ra_hintalloc(as
, ir
->op1
, dest
, RSET_GPR
);
1629 if (irref_isk(ir
->op2
)) {
1630 kcmp
= emit_isk12(ARMI_CMP
, IR(ir
->op2
)->i
);
1631 if (kcmp
) kmov
= emit_isk12(ARMI_MOV
, IR(ir
->op2
)->i
);
1635 right
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
1637 if (dest
!= right
) {
1638 emit_dm(as
, ARMF_CC(ARMI_MOV
, cc
)^kmov
, dest
, right
);
1639 cc
^= 1; /* Must use opposite conditions for paired moves. */
1641 cc
^= (CC_LT
^CC_GT
); /* Otherwise may swap CC_LT <-> CC_GT. */
1643 if (dest
!= left
) emit_dm(as
, ARMF_CC(ARMI_MOV
, cc
)^kmov
, dest
, left
);
1644 emit_nm(as
, ARMI_CMP
^kcmp
, left
, right
);
1648 static void asm_sfpmin_max(ASMState
*as
, IRIns
*ir
, int cc
)
1650 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_softfp_cmp
];
1651 RegSet drop
= RSET_SCRATCH
;
1654 args
[0] = ir
->op1
; args
[1] = (ir
+1)->op1
;
1655 args
[2] = ir
->op2
; args
[3] = (ir
+1)->op2
;
1656 /* __aeabi_cdcmple preserves r0-r3. */
1657 if (ra_hasreg(ir
->r
)) rset_clear(drop
, ir
->r
);
1658 if (ra_hasreg((ir
+1)->r
)) rset_clear(drop
, (ir
+1)->r
);
1659 if (!rset_test(as
->freeset
, RID_R2
) &&
1660 regcost_ref(as
->cost
[RID_R2
]) == args
[2]) rset_clear(drop
, RID_R2
);
1661 if (!rset_test(as
->freeset
, RID_R3
) &&
1662 regcost_ref(as
->cost
[RID_R3
]) == args
[3]) rset_clear(drop
, RID_R3
);
1663 ra_evictset(as
, drop
);
1664 ra_destpair(as
, ir
);
1665 emit_dm(as
, ARMF_CC(ARMI_MOV
, cc
), RID_RETHI
, RID_R3
);
1666 emit_dm(as
, ARMF_CC(ARMI_MOV
, cc
), RID_RETLO
, RID_R2
);
1667 emit_call(as
, (void *)ci
->func
);
1668 for (r
= RID_R0
; r
<= RID_R3
; r
++)
1669 ra_leftov(as
, r
, args
[r
-RID_R0
]);
1672 static void asm_fpmin_max(ASMState
*as
, IRIns
*ir
, int cc
)
1674 Reg dest
= (ra_dest(as
, ir
, RSET_FPR
) & 15);
1675 Reg right
, left
= ra_alloc2(as
, ir
, RSET_FPR
);
1676 right
= ((left
>> 8) & 15); left
&= 15;
1677 if (dest
!= left
) emit_dm(as
, ARMF_CC(ARMI_VMOV_D
, cc
^1), dest
, left
);
1678 if (dest
!= right
) emit_dm(as
, ARMF_CC(ARMI_VMOV_D
, cc
), dest
, right
);
1679 emit_d(as
, ARMI_VMRS
, 0);
1680 emit_dm(as
, ARMI_VCMP_D
, left
, right
);
1684 static void asm_min_max(ASMState
*as
, IRIns
*ir
, int cc
, int fcc
)
1689 if (irt_isnum(ir
->t
))
1690 asm_fpmin_max(as
, ir
, fcc
);
1693 asm_intmin_max(as
, ir
, cc
);
1696 /* -- Comparisons --------------------------------------------------------- */
1698 /* Map of comparisons to flags. ORDER IR. */
1699 static const uint8_t asm_compmap
[IR_ABC
+1] = {
1700 /* op FP swp int cc FP cc */
1701 /* LT */ CC_GE
+ (CC_HS
<< 4),
1702 /* GE x */ CC_LT
+ (CC_HI
<< 4),
1703 /* LE */ CC_GT
+ (CC_HI
<< 4),
1704 /* GT x */ CC_LE
+ (CC_HS
<< 4),
1705 /* ULT x */ CC_HS
+ (CC_LS
<< 4),
1706 /* UGE */ CC_LO
+ (CC_LO
<< 4),
1707 /* ULE x */ CC_HI
+ (CC_LO
<< 4),
1708 /* UGT */ CC_LS
+ (CC_LS
<< 4),
1709 /* EQ */ CC_NE
+ (CC_NE
<< 4),
1710 /* NE */ CC_EQ
+ (CC_EQ
<< 4),
1711 /* ABC */ CC_LS
+ (CC_LS
<< 4) /* Same as UGT. */
1715 /* FP comparisons. */
1716 static void asm_sfpcomp(ASMState
*as
, IRIns
*ir
)
1718 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_softfp_cmp
];
1719 RegSet drop
= RSET_SCRATCH
;
1722 int swp
= (((ir
->o
^ (ir
->o
>> 2)) & ~(ir
->o
>> 3) & 1) << 1);
1723 args
[swp
^0] = ir
->op1
; args
[swp
^1] = (ir
+1)->op1
;
1724 args
[swp
^2] = ir
->op2
; args
[swp
^3] = (ir
+1)->op2
;
1725 /* __aeabi_cdcmple preserves r0-r3. This helps to reduce spills. */
1726 for (r
= RID_R0
; r
<= RID_R3
; r
++)
1727 if (!rset_test(as
->freeset
, r
) &&
1728 regcost_ref(as
->cost
[r
]) == args
[r
-RID_R0
]) rset_clear(drop
, r
);
1729 ra_evictset(as
, drop
);
1730 asm_guardcc(as
, (asm_compmap
[ir
->o
] >> 4));
1731 emit_call(as
, (void *)ci
->func
);
1732 for (r
= RID_R0
; r
<= RID_R3
; r
++)
1733 ra_leftov(as
, r
, args
[r
-RID_R0
]);
1736 /* FP comparisons. */
1737 static void asm_fpcomp(ASMState
*as
, IRIns
*ir
)
1741 int swp
= ((ir
->o
^ (ir
->o
>> 2)) & ~(ir
->o
>> 3) & 1);
1742 if (!swp
&& irref_isk(ir
->op2
) && ir_knum(IR(ir
->op2
))->u64
== 0) {
1743 left
= (ra_alloc1(as
, ir
->op1
, RSET_FPR
) & 15);
1747 left
= ra_alloc2(as
, ir
, RSET_FPR
);
1749 right
= (left
& 15); left
= ((left
>> 8) & 15);
1751 right
= ((left
>> 8) & 15); left
&= 15;
1755 asm_guardcc(as
, (asm_compmap
[ir
->o
] >> 4));
1756 emit_d(as
, ARMI_VMRS
, 0);
1757 emit_dm(as
, ai
, left
, right
);
1761 /* Integer comparisons. */
1762 static void asm_intcomp(ASMState
*as
, IRIns
*ir
)
1764 ARMCC cc
= (asm_compmap
[ir
->o
] & 15);
1765 IRRef lref
= ir
->op1
, rref
= ir
->op2
;
1769 lua_assert(irt_isint(ir
->t
) || irt_isu32(ir
->t
) || irt_isaddr(ir
->t
));
1770 if (asm_swapops(as
, lref
, rref
)) {
1771 Reg tmp
= lref
; lref
= rref
; rref
= tmp
;
1772 if (cc
>= CC_GE
) cc
^= 7; /* LT <-> GT, LE <-> GE */
1773 else if (cc
> CC_NE
) cc
^= 11; /* LO <-> HI, LS <-> HS */
1775 if (irref_isk(rref
) && IR(rref
)->i
== 0) {
1776 IRIns
*irl
= IR(lref
);
1777 cmpprev0
= (irl
+1 == ir
);
1778 /* Combine comp(BAND(left, right), 0) into tst left, right. */
1779 if (cmpprev0
&& irl
->o
== IR_BAND
&& !ra_used(irl
)) {
1780 IRRef blref
= irl
->op1
, brref
= irl
->op2
;
1783 if (asm_swapops(as
, blref
, brref
)) {
1784 Reg tmp
= blref
; blref
= brref
; brref
= tmp
;
1786 if (irref_isk(brref
)) {
1787 m2
= emit_isk12(ARMI_AND
, IR(brref
)->i
);
1788 if ((m2
& (ARMI_AND
^ARMI_BIC
)))
1789 goto notst
; /* Not beneficial if we miss a constant operand. */
1791 if (cc
== CC_GE
) cc
= CC_PL
;
1792 else if (cc
== CC_LT
) cc
= CC_MI
;
1793 else if (cc
> CC_NE
) goto notst
; /* Other conds don't work with tst. */
1794 bleft
= ra_alloc1(as
, blref
, RSET_GPR
);
1795 if (!m2
) m2
= asm_fuseopm(as
, 0, brref
, rset_exclude(RSET_GPR
, bleft
));
1796 asm_guardcc(as
, cc
);
1797 emit_n(as
, ARMI_TST
^m2
, bleft
);
1802 left
= ra_alloc1(as
, lref
, RSET_GPR
);
1803 m
= asm_fuseopm(as
, ARMI_CMP
, rref
, rset_exclude(RSET_GPR
, left
));
1804 asm_guardcc(as
, cc
);
1805 emit_n(as
, ARMI_CMP
^m
, left
);
1806 /* Signed comparison with zero and referencing previous ins? */
1807 if (cmpprev0
&& (cc
<= CC_NE
|| cc
>= CC_GE
))
1808 as
->flagmcp
= as
->mcp
; /* Allow elimination of the compare. */
1811 /* 64 bit integer comparisons. */
1812 static void asm_int64comp(ASMState
*as
, IRIns
*ir
)
1814 int signedcomp
= (ir
->o
<= IR_GT
);
1818 RegSet allow
= RSET_GPR
, oldfree
;
1820 /* Always use unsigned comparison for loword. */
1821 cclo
= asm_compmap
[ir
->o
+ (signedcomp
? 4 : 0)] & 15;
1822 leftlo
= ra_alloc1(as
, ir
->op1
, allow
);
1823 oldfree
= as
->freeset
;
1824 mlo
= asm_fuseopm(as
, ARMI_CMP
, ir
->op2
, rset_clear(allow
, leftlo
));
1825 allow
&= ~(oldfree
& ~as
->freeset
); /* Update for allocs of asm_fuseopm. */
1827 /* Use signed or unsigned comparison for hiword. */
1828 cchi
= asm_compmap
[ir
->o
] & 15;
1829 lefthi
= ra_alloc1(as
, (ir
+1)->op1
, allow
);
1830 mhi
= asm_fuseopm(as
, ARMI_CMP
, (ir
+1)->op2
, rset_clear(allow
, lefthi
));
1832 /* All register allocations must be performed _before_ this point. */
1834 MCLabel l_around
= emit_label(as
);
1835 asm_guardcc(as
, cclo
);
1836 emit_n(as
, ARMI_CMP
^mlo
, leftlo
);
1837 emit_branch(as
, ARMF_CC(ARMI_B
, CC_NE
), l_around
);
1838 if (cchi
== CC_GE
|| cchi
== CC_LE
) cchi
^= 6; /* GE -> GT, LE -> LT */
1839 asm_guardcc(as
, cchi
);
1841 asm_guardcc(as
, cclo
);
1842 emit_n(as
, ARMF_CC(ARMI_CMP
, CC_EQ
)^mlo
, leftlo
);
1844 emit_n(as
, ARMI_CMP
^mhi
, lefthi
);
1847 /* -- Support for 64 bit ops in 32 bit mode ------------------------------- */
1849 /* Hiword op of a split 64 bit op. Previous op must be the loword op. */
1850 static void asm_hiop(ASMState
*as
, IRIns
*ir
)
1852 #if LJ_HASFFI || LJ_SOFTFP
1853 /* HIOP is marked as a store because it needs its own DCE logic. */
1854 int uselo
= ra_used(ir
-1), usehi
= ra_used(ir
); /* Loword/hiword used? */
1855 if (LJ_UNLIKELY(!(as
->flags
& JIT_F_OPT_DCE
))) uselo
= usehi
= 1;
1856 if ((ir
-1)->o
<= IR_NE
) { /* 64 bit integer or FP comparisons. ORDER IR. */
1857 as
->curins
--; /* Always skip the loword comparison. */
1859 if (!irt_isint(ir
->t
))
1860 asm_sfpcomp(as
, ir
-1);
1863 asm_int64comp(as
, ir
-1);
1866 } else if ((ir
-1)->o
== IR_MIN
|| (ir
-1)->o
== IR_MAX
) {
1867 as
->curins
--; /* Always skip the loword min/max. */
1869 asm_sfpmin_max(as
, ir
-1, (ir
-1)->o
== IR_MIN
? CC_HI
: CC_LO
);
1872 } else if ((ir
-1)->o
== IR_CONV
) {
1873 as
->curins
--; /* Always skip the CONV. */
1878 } else if ((ir
-1)->o
== IR_XSTORE
) {
1879 if ((ir
-1)->r
!= RID_SINK
)
1880 asm_xstore(as
, ir
, 4);
1883 if (!usehi
) return; /* Skip unused hiword op for all remaining ops. */
1884 switch ((ir
-1)->o
) {
1888 asm_intop(as
, ir
, ARMI_ADC
);
1889 asm_intop(as
, ir
-1, ARMI_ADD
|ARMI_S
);
1893 asm_intop(as
, ir
, ARMI_SBC
);
1894 asm_intop(as
, ir
-1, ARMI_SUB
|ARMI_S
);
1898 asm_intneg(as
, ir
, ARMI_RSC
);
1899 asm_intneg(as
, ir
-1, ARMI_RSB
|ARMI_S
);
1903 case IR_SLOAD
: case IR_ALOAD
: case IR_HLOAD
: case IR_ULOAD
: case IR_VLOAD
:
1906 ra_allocref(as
, ir
->op1
, RSET_GPR
); /* Mark lo op as used. */
1913 ra_allocref(as
, ir
->op1
, RID2RSET(RID_RETLO
)); /* Mark lo op as used. */
1916 case IR_ASTORE
: case IR_HSTORE
: case IR_USTORE
: case IR_TOSTR
:
1919 /* Nothing to do here. Handled by lo op itself. */
1921 default: lua_assert(0); break;
1924 UNUSED(as
); UNUSED(ir
); lua_assert(0);
1928 /* -- Stack handling ------------------------------------------------------ */
1930 /* Check Lua stack size for overflow. Use exit handler as fallback. */
1931 static void asm_stack_check(ASMState
*as
, BCReg topslot
,
1932 IRIns
*irp
, RegSet allow
, ExitNo exitno
)
1937 if (!ra_hasspill(irp
->s
)) {
1939 lua_assert(ra_hasreg(pbase
));
1941 pbase
= rset_pickbot(allow
);
1944 emit_lso(as
, ARMI_LDR
, RID_RET
, RID_SP
, 0); /* Restore temp. register. */
1949 emit_branch(as
, ARMF_CC(ARMI_BL
, CC_LS
), exitstub_addr(as
->J
, exitno
));
1950 k
= emit_isk12(0, (int32_t)(8*topslot
));
1952 emit_n(as
, ARMI_CMP
^k
, RID_TMP
);
1953 emit_dnm(as
, ARMI_SUB
, RID_TMP
, RID_TMP
, pbase
);
1954 emit_lso(as
, ARMI_LDR
, RID_TMP
, RID_TMP
,
1955 (int32_t)offsetof(lua_State
, maxstack
));
1956 if (irp
) { /* Must not spill arbitrary registers in head of side trace. */
1957 int32_t i
= i32ptr(&J2G(as
->J
)->jit_L
);
1958 if (ra_hasspill(irp
->s
))
1959 emit_lso(as
, ARMI_LDR
, pbase
, RID_SP
, sps_scale(irp
->s
));
1960 emit_lso(as
, ARMI_LDR
, RID_TMP
, RID_TMP
, (i
& 4095));
1961 if (ra_hasspill(irp
->s
) && !allow
)
1962 emit_lso(as
, ARMI_STR
, RID_RET
, RID_SP
, 0); /* Save temp. register. */
1963 emit_loadi(as
, RID_TMP
, (i
& ~4095));
1965 emit_getgl(as
, RID_TMP
, jit_L
);
1969 /* Restore Lua stack from on-trace state. */
1970 static void asm_stack_restore(ASMState
*as
, SnapShot
*snap
)
1972 SnapEntry
*map
= &as
->T
->snapmap
[snap
->mapofs
];
1973 SnapEntry
*flinks
= &as
->T
->snapmap
[snap_nextofs(as
->T
, snap
)-1];
1974 MSize n
, nent
= snap
->nent
;
1975 /* Store the value of all modified slots to the Lua stack. */
1976 for (n
= 0; n
< nent
; n
++) {
1977 SnapEntry sn
= map
[n
];
1978 BCReg s
= snap_slot(sn
);
1979 int32_t ofs
= 8*((int32_t)s
-1);
1980 IRRef ref
= snap_ref(sn
);
1981 IRIns
*ir
= IR(ref
);
1982 if ((sn
& SNAP_NORESTORE
))
1984 if (irt_isnum(ir
->t
)) {
1986 RegSet odd
= rset_exclude(RSET_GPRODD
, RID_BASE
);
1988 lua_assert(irref_isk(ref
)); /* LJ_SOFTFP: must be a number constant. */
1989 tmp
= ra_allock(as
, (int32_t)ir_knum(ir
)->u32
.lo
,
1990 rset_exclude(RSET_GPREVEN
, RID_BASE
));
1991 emit_lso(as
, ARMI_STR
, tmp
, RID_BASE
, ofs
);
1992 if (rset_test(as
->freeset
, tmp
+1)) odd
= RID2RSET(tmp
+1);
1993 tmp
= ra_allock(as
, (int32_t)ir_knum(ir
)->u32
.hi
, odd
);
1994 emit_lso(as
, ARMI_STR
, tmp
, RID_BASE
, ofs
+4);
1996 Reg src
= ra_alloc1(as
, ref
, RSET_FPR
);
1997 emit_vlso(as
, ARMI_VSTR_D
, src
, RID_BASE
, ofs
);
2000 RegSet odd
= rset_exclude(RSET_GPRODD
, RID_BASE
);
2002 lua_assert(irt_ispri(ir
->t
) || irt_isaddr(ir
->t
) || irt_isinteger(ir
->t
));
2003 if (!irt_ispri(ir
->t
)) {
2004 Reg src
= ra_alloc1(as
, ref
, rset_exclude(RSET_GPREVEN
, RID_BASE
));
2005 emit_lso(as
, ARMI_STR
, src
, RID_BASE
, ofs
);
2006 if (rset_test(as
->freeset
, src
+1)) odd
= RID2RSET(src
+1);
2008 if ((sn
& (SNAP_CONT
|SNAP_FRAME
))) {
2009 if (s
== 0) continue; /* Do not overwrite link to previous frame. */
2010 type
= ra_allock(as
, (int32_t)(*flinks
--), odd
);
2012 } else if ((sn
& SNAP_SOFTFPNUM
)) {
2013 type
= ra_alloc1(as
, ref
+1, rset_exclude(RSET_GPRODD
, RID_BASE
));
2016 type
= ra_allock(as
, (int32_t)irt_toitype(ir
->t
), odd
);
2018 emit_lso(as
, ARMI_STR
, type
, RID_BASE
, ofs
+4);
2022 lua_assert(map
+ nent
== flinks
);
2025 /* -- GC handling --------------------------------------------------------- */
2027 /* Check GC threshold and do one or more GC steps. */
2028 static void asm_gc_check(ASMState
*as
)
2030 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_gc_step_jit
];
2034 ra_evictset(as
, RSET_SCRATCH
);
2035 l_end
= emit_label(as
);
2036 /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
2037 asm_guardcc(as
, CC_NE
); /* Assumes asm_snap_prep() already done. */
2038 emit_n(as
, ARMI_CMP
|ARMI_K12
|0, RID_RET
);
2039 args
[0] = ASMREF_TMP1
; /* global_State *g */
2040 args
[1] = ASMREF_TMP2
; /* MSize steps */
2041 asm_gencall(as
, ci
, args
);
2042 tmp1
= ra_releasetmp(as
, ASMREF_TMP1
);
2043 tmp2
= ra_releasetmp(as
, ASMREF_TMP2
);
2044 emit_loadi(as
, tmp2
, as
->gcsteps
);
2045 /* Jump around GC step if GC total < GC threshold. */
2046 emit_branch(as
, ARMF_CC(ARMI_B
, CC_LS
), l_end
);
2047 emit_nm(as
, ARMI_CMP
, RID_TMP
, tmp2
);
2048 emit_lso(as
, ARMI_LDR
, tmp2
, tmp1
,
2049 (int32_t)offsetof(global_State
, gc
.threshold
));
2050 emit_lso(as
, ARMI_LDR
, RID_TMP
, tmp1
,
2051 (int32_t)offsetof(global_State
, gc
.total
));
2052 ra_allockreg(as
, i32ptr(J2G(as
->J
)), tmp1
);
2057 /* -- Loop handling ------------------------------------------------------- */
2059 /* Fixup the loop branch. */
2060 static void asm_loop_fixup(ASMState
*as
)
2062 MCode
*p
= as
->mctop
;
2063 MCode
*target
= as
->mcp
;
2064 if (as
->loopinv
) { /* Inverted loop branch? */
2065 /* asm_guardcc already inverted the bcc and patched the final bl. */
2066 p
[-2] |= ((uint32_t)(target
-p
) & 0x00ffffffu
);
2068 p
[-1] = ARMI_B
| ((uint32_t)((target
-p
)-1) & 0x00ffffffu
);
2072 /* -- Head of trace ------------------------------------------------------- */
2074 /* Reload L register from g->jit_L. */
2075 static void asm_head_lreg(ASMState
*as
)
2077 IRIns
*ir
= IR(ASMREF_L
);
2079 Reg r
= ra_dest(as
, ir
, RSET_GPR
);
2080 emit_getgl(as
, r
, jit_L
);
2085 /* Coalesce BASE register for a root trace. */
2086 static void asm_head_root_base(ASMState
*as
)
2091 if (ra_hasreg(ir
->r
) && rset_test(as
->modset
, ir
->r
)) ra_spill(as
, ir
);
2092 ra_destreg(as
, ir
, RID_BASE
);
2095 /* Coalesce BASE register for a side trace. */
2096 static RegSet
asm_head_side_base(ASMState
*as
, IRIns
*irp
, RegSet allow
)
2101 if (ra_hasreg(ir
->r
) && rset_test(as
->modset
, ir
->r
)) ra_spill(as
, ir
);
2102 if (ra_hasspill(irp
->s
)) {
2103 rset_clear(allow
, ra_dest(as
, ir
, allow
));
2106 lua_assert(ra_hasreg(r
));
2107 rset_clear(allow
, r
);
2108 if (r
!= ir
->r
&& !rset_test(as
->freeset
, r
))
2109 ra_restore(as
, regcost_ref(as
->cost
[r
]));
2110 ra_destreg(as
, ir
, r
);
2115 /* -- Tail of trace ------------------------------------------------------- */
2117 /* Fixup the tail code. */
2118 static void asm_tail_fixup(ASMState
*as
, TraceNo lnk
)
2120 MCode
*p
= as
->mctop
;
2122 int32_t spadj
= as
->T
->spadjust
;
2126 /* Patch stack adjustment. */
2127 uint32_t k
= emit_isk12(ARMI_ADD
, spadj
);
2129 p
[-2] = (ARMI_ADD
^k
) | ARMF_D(RID_SP
) | ARMF_N(RID_SP
);
2131 /* Patch exit branch. */
2132 target
= lnk
? traceref(as
->J
, lnk
)->mcode
: (MCode
*)lj_vm_exit_interp
;
2133 p
[-1] = ARMI_B
|(((target
-p
)-1)&0x00ffffffu
);
2136 /* Prepare tail of code. */
2137 static void asm_tail_prep(ASMState
*as
)
2139 MCode
*p
= as
->mctop
- 1; /* Leave room for exit branch. */
2141 as
->invmcp
= as
->mcp
= p
;
2143 as
->mcp
= p
-1; /* Leave room for stack pointer adjustment. */
2146 *p
= 0; /* Prevent load/store merging. */
2149 /* -- Instruction dispatch ------------------------------------------------ */
2151 /* Assemble a single instruction. */
2152 static void asm_ir(ASMState
*as
, IRIns
*ir
)
2154 switch ((IROp
)ir
->o
) {
2155 /* Miscellaneous ops. */
2156 case IR_LOOP
: asm_loop(as
); break;
2157 case IR_NOP
: case IR_XBAR
: lua_assert(!ra_used(ir
)); break;
2159 ra_alloc1(as
, ir
->op1
, irt_isfp(ir
->t
) ? RSET_FPR
: RSET_GPR
); break;
2160 case IR_PHI
: asm_phi(as
, ir
); break;
2161 case IR_HIOP
: asm_hiop(as
, ir
); break;
2162 case IR_GCSTEP
: asm_gcstep(as
, ir
); break;
2164 /* Guarded assertions. */
2165 case IR_EQ
: case IR_NE
:
2166 if ((ir
-1)->o
== IR_HREF
&& ir
->op1
== as
->curins
-1) {
2168 asm_href(as
, ir
-1, (IROp
)ir
->o
);
2172 case IR_LT
: case IR_GE
: case IR_LE
: case IR_GT
:
2173 case IR_ULT
: case IR_UGE
: case IR_ULE
: case IR_UGT
:
2176 if (irt_isnum(ir
->t
)) { asm_fpcomp(as
, ir
); break; }
2178 asm_intcomp(as
, ir
);
2181 case IR_RETF
: asm_retf(as
, ir
); break;
2184 case IR_BNOT
: asm_bitop(as
, ir
, ARMI_MVN
); break;
2185 case IR_BSWAP
: asm_bitswap(as
, ir
); break;
2187 case IR_BAND
: asm_bitop(as
, ir
, ARMI_AND
); break;
2188 case IR_BOR
: asm_bitop(as
, ir
, ARMI_ORR
); break;
2189 case IR_BXOR
: asm_bitop(as
, ir
, ARMI_EOR
); break;
2191 case IR_BSHL
: asm_bitshift(as
, ir
, ARMSH_LSL
); break;
2192 case IR_BSHR
: asm_bitshift(as
, ir
, ARMSH_LSR
); break;
2193 case IR_BSAR
: asm_bitshift(as
, ir
, ARMSH_ASR
); break;
2194 case IR_BROR
: asm_bitshift(as
, ir
, ARMSH_ROR
); break;
2195 case IR_BROL
: lua_assert(0); break;
2197 /* Arithmetic ops. */
2198 case IR_ADD
: case IR_ADDOV
: asm_add(as
, ir
); break;
2199 case IR_SUB
: case IR_SUBOV
: asm_sub(as
, ir
); break;
2200 case IR_MUL
: case IR_MULOV
: asm_mul(as
, ir
); break;
2201 case IR_MOD
: asm_callid(as
, ir
, IRCALL_lj_vm_modi
); break;
2202 case IR_NEG
: asm_neg(as
, ir
); break;
2205 case IR_DIV
: case IR_POW
: case IR_ABS
:
2206 case IR_ATAN2
: case IR_LDEXP
: case IR_FPMATH
: case IR_TOBIT
:
2207 lua_assert(0); /* Unused for LJ_SOFTFP. */
2210 case IR_DIV
: asm_fparith(as
, ir
, ARMI_VDIV_D
); break;
2211 case IR_POW
: asm_callid(as
, ir
, IRCALL_lj_vm_powi
); break;
2212 case IR_ABS
: asm_fpunary(as
, ir
, ARMI_VABS_D
); break;
2213 case IR_ATAN2
: asm_callid(as
, ir
, IRCALL_atan2
); break;
2214 case IR_LDEXP
: asm_callid(as
, ir
, IRCALL_ldexp
); break;
2216 if (ir
->op2
== IRFPM_EXP2
&& asm_fpjoin_pow(as
, ir
))
2218 if (ir
->op2
<= IRFPM_TRUNC
)
2219 asm_callround(as
, ir
, ir
->op2
);
2220 else if (ir
->op2
== IRFPM_SQRT
)
2221 asm_fpunary(as
, ir
, ARMI_VSQRT_D
);
2223 asm_callid(as
, ir
, IRCALL_lj_vm_floor
+ ir
->op2
);
2225 case IR_TOBIT
: asm_tobit(as
, ir
); break;
2228 case IR_MIN
: asm_min_max(as
, ir
, CC_GT
, CC_HI
); break;
2229 case IR_MAX
: asm_min_max(as
, ir
, CC_LT
, CC_LO
); break;
2231 /* Memory references. */
2232 case IR_AREF
: asm_aref(as
, ir
); break;
2233 case IR_HREF
: asm_href(as
, ir
, 0); break;
2234 case IR_HREFK
: asm_hrefk(as
, ir
); break;
2235 case IR_NEWREF
: asm_newref(as
, ir
); break;
2236 case IR_UREFO
: case IR_UREFC
: asm_uref(as
, ir
); break;
2237 case IR_FREF
: asm_fref(as
, ir
); break;
2238 case IR_STRREF
: asm_strref(as
, ir
); break;
2240 /* Loads and stores. */
2241 case IR_ALOAD
: case IR_HLOAD
: case IR_ULOAD
: case IR_VLOAD
:
2242 asm_ahuvload(as
, ir
);
2244 case IR_FLOAD
: asm_fload(as
, ir
); break;
2245 case IR_XLOAD
: asm_xload(as
, ir
); break;
2246 case IR_SLOAD
: asm_sload(as
, ir
); break;
2248 case IR_ASTORE
: case IR_HSTORE
: case IR_USTORE
: asm_ahustore(as
, ir
); break;
2249 case IR_FSTORE
: asm_fstore(as
, ir
); break;
2250 case IR_XSTORE
: asm_xstore(as
, ir
, 0); break;
2253 case IR_SNEW
: case IR_XSNEW
: asm_snew(as
, ir
); break;
2254 case IR_TNEW
: asm_tnew(as
, ir
); break;
2255 case IR_TDUP
: asm_tdup(as
, ir
); break;
2256 case IR_CNEW
: case IR_CNEWI
: asm_cnew(as
, ir
); break;
2258 /* Write barriers. */
2259 case IR_TBAR
: asm_tbar(as
, ir
); break;
2260 case IR_OBAR
: asm_obar(as
, ir
); break;
2262 /* Type conversions. */
2263 case IR_CONV
: asm_conv(as
, ir
); break;
2264 case IR_TOSTR
: asm_tostr(as
, ir
); break;
2265 case IR_STRTO
: asm_strto(as
, ir
); break;
2268 case IR_CALLN
: case IR_CALLL
: case IR_CALLS
: asm_call(as
, ir
); break;
2269 case IR_CALLXS
: asm_callx(as
, ir
); break;
2270 case IR_CARG
: break;
2273 setintV(&as
->J
->errinfo
, ir
->o
);
2274 lj_trace_err_info(as
->J
, LJ_TRERR_NYIIR
);
2279 /* -- Trace setup --------------------------------------------------------- */
2281 /* Ensure there are enough stack slots for call arguments. */
2282 static Reg
asm_setup_call_slots(ASMState
*as
, IRIns
*ir
, const CCallInfo
*ci
)
2284 IRRef args
[CCI_NARGS_MAX
];
2285 uint32_t i
, nargs
= (int)CCI_NARGS(ci
);
2286 int nslots
= 0, ngpr
= REGARG_NUMGPR
, nfpr
= REGARG_NUMFPR
, fprodd
= 0;
2287 asm_collectargs(as
, ir
, ci
, args
);
2288 for (i
= 0; i
< nargs
; i
++) {
2289 if (!LJ_SOFTFP
&& args
[i
] && irt_isfp(IR(args
[i
])->t
)) {
2290 if (!LJ_ABI_SOFTFP
&& !(ci
->flags
& CCI_VARARG
)) {
2291 if (irt_isnum(IR(args
[i
])->t
)) {
2292 if (nfpr
> 0) nfpr
--;
2293 else fprodd
= 0, nslots
= (nslots
+ 3) & ~1;
2295 if (fprodd
) fprodd
--;
2296 else if (nfpr
> 0) fprodd
= 1, nfpr
--;
2299 } else if (irt_isnum(IR(args
[i
])->t
)) {
2301 if (ngpr
> 0) ngpr
-= 2; else nslots
+= 2;
2303 if (ngpr
> 0) ngpr
--; else nslots
++;
2306 if (ngpr
> 0) ngpr
--; else nslots
++;
2309 if (nslots
> as
->evenspill
) /* Leave room for args in stack slots. */
2310 as
->evenspill
= nslots
;
2311 return REGSP_HINT(RID_RET
);
2314 static void asm_setup_target(ASMState
*as
)
2316 /* May need extra exit for asm_stack_check on side traces. */
2317 asm_exitstub_setup(as
, as
->T
->nsnap
+ (as
->parent
? 1 : 0));
2320 /* -- Trace patching ------------------------------------------------------ */
2322 /* Patch exit jumps of existing machine code to a new target. */
2323 void lj_asm_patchexit(jit_State
*J
, GCtrace
*T
, ExitNo exitno
, MCode
*target
)
2325 MCode
*p
= T
->mcode
;
2326 MCode
*pe
= (MCode
*)((char *)p
+ T
->szmcode
);
2327 MCode
*cstart
= NULL
, *cend
= p
;
2328 MCode
*mcarea
= lj_mcode_patch(J
, p
, 0);
2329 MCode
*px
= exitstub_addr(J
, exitno
) - 2;
2330 for (; p
< pe
; p
++) {
2331 /* Look for bl_cc exitstub, replace with b_cc target. */
2333 if ((ins
& 0x0f000000u
) == 0x0b000000u
&& ins
< 0xf0000000u
&&
2334 ((ins
^ (px
-p
)) & 0x00ffffffu
) == 0) {
2335 *p
= (ins
& 0xfe000000u
) | (((target
-p
)-2) & 0x00ffffffu
);
2337 if (!cstart
) cstart
= p
;
2340 lua_assert(cstart
!= NULL
);
2341 lj_mcode_sync(cstart
, cend
);
2342 lj_mcode_patch(J
, mcarea
, 1);