2 ** ARM IR assembler (SSA IR -> machine code).
3 ** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
6 /* -- Register allocator extensions --------------------------------------- */
8 /* Allocate a register with a hint. */
9 static Reg
ra_hintalloc(ASMState
*as
, IRRef ref
, Reg hint
, RegSet allow
)
13 if (!ra_hashint(r
) && !iscrossref(as
, ref
))
14 ra_sethint(IR(ref
)->r
, hint
); /* Propagate register hint. */
15 r
= ra_allocref(as
, ref
, allow
);
21 /* Allocate a scratch register pair. */
22 static Reg
ra_scratchpair(ASMState
*as
, RegSet allow
)
24 RegSet pick1
= as
->freeset
& allow
;
25 RegSet pick2
= pick1
& (pick1
>> 1) & RSET_GPREVEN
;
28 r
= rset_picktop(pick2
);
30 RegSet pick
= pick1
& (allow
>> 1) & RSET_GPREVEN
;
32 r
= rset_picktop(pick
);
33 ra_restore(as
, regcost_ref(as
->cost
[r
+1]));
35 pick
= pick1
& (allow
<< 1) & RSET_GPRODD
;
37 r
= ra_restore(as
, regcost_ref(as
->cost
[rset_picktop(pick
)-1]));
39 r
= ra_evict(as
, allow
& (allow
>> 1) & RSET_GPREVEN
);
40 ra_restore(as
, regcost_ref(as
->cost
[r
+1]));
44 lua_assert(rset_test(RSET_GPREVEN
, r
));
47 RA_DBGX((as
, "scratchpair $r $r", r
, r
+1));
52 /* Allocate two source registers for three-operand instructions. */
53 static Reg
ra_alloc2(ASMState
*as
, IRIns
*ir
, RegSet allow
)
55 IRIns
*irl
= IR(ir
->op1
), *irr
= IR(ir
->op2
);
56 Reg left
= irl
->r
, right
= irr
->r
;
57 if (ra_hasreg(left
)) {
60 right
= ra_allocref(as
, ir
->op2
, rset_exclude(allow
, left
));
63 } else if (ra_hasreg(right
)) {
65 left
= ra_allocref(as
, ir
->op1
, rset_exclude(allow
, right
));
66 } else if (ra_hashint(right
)) {
67 right
= ra_allocref(as
, ir
->op2
, allow
);
68 left
= ra_alloc1(as
, ir
->op1
, rset_exclude(allow
, right
));
70 left
= ra_allocref(as
, ir
->op1
, allow
);
71 right
= ra_alloc1(as
, ir
->op2
, rset_exclude(allow
, left
));
73 return left
| (right
<< 8);
77 /* -- Guard handling ------------------------------------------------------ */
79 /* Generate an exit stub group at the bottom of the reserved MCode memory. */
80 static MCode
*asm_exitstub_gen(ASMState
*as
, ExitNo group
)
82 MCode
*mxp
= as
->mcbot
;
84 if (mxp
+ 4*4+4*EXITSTUBS_PER_GROUP
>= as
->mctop
)
86 /* str lr, [sp]; bl ->vm_exit_handler; .long DISPATCH_address, group. */
87 *mxp
++ = ARMI_STR
|ARMI_LS_P
|ARMI_LS_U
|ARMF_D(RID_LR
)|ARMF_N(RID_SP
);
88 *mxp
= ARMI_BL
|((((MCode
*)(void *)lj_vm_exit_handler
-mxp
)-2)&0x00ffffffu
);
90 *mxp
++ = (MCode
)i32ptr(J2GG(as
->J
)->dispatch
); /* DISPATCH address */
91 *mxp
++ = group
*EXITSTUBS_PER_GROUP
;
92 for (i
= 0; i
< EXITSTUBS_PER_GROUP
; i
++)
93 *mxp
++ = ARMI_B
|((-6-i
)&0x00ffffffu
);
94 lj_mcode_sync(as
->mcbot
, mxp
);
95 lj_mcode_commitbot(as
->J
, mxp
);
97 as
->mclim
= as
->mcbot
+ MCLIM_REDZONE
;
98 return mxp
- EXITSTUBS_PER_GROUP
;
101 /* Setup all needed exit stubs. */
102 static void asm_exitstub_setup(ASMState
*as
, ExitNo nexits
)
105 if (nexits
>= EXITSTUBS_PER_GROUP
*LJ_MAX_EXITSTUBGR
)
106 lj_trace_err(as
->J
, LJ_TRERR_SNAPOV
);
107 for (i
= 0; i
< (nexits
+EXITSTUBS_PER_GROUP
-1)/EXITSTUBS_PER_GROUP
; i
++)
108 if (as
->J
->exitstubgroup
[i
] == NULL
)
109 as
->J
->exitstubgroup
[i
] = asm_exitstub_gen(as
, i
);
112 /* Emit conditional branch to exit for guard. */
113 static void asm_guardcc(ASMState
*as
, ARMCC cc
)
115 MCode
*target
= exitstub_addr(as
->J
, as
->snapno
);
117 if (LJ_UNLIKELY(p
== as
->invmcp
)) {
119 *p
= ARMI_BL
| ((target
-p
-2) & 0x00ffffffu
);
120 emit_branch(as
, ARMF_CC(ARMI_B
, cc
^1), p
+1);
123 emit_branch(as
, ARMF_CC(ARMI_BL
, cc
), target
);
126 /* -- Operand fusion ------------------------------------------------------ */
128 /* Limit linear search to this distance. Avoids O(n^2) behavior. */
129 #define CONFLICT_SEARCH_LIM 31
131 /* Check if there's no conflicting instruction between curins and ref. */
132 static int noconflict(ASMState
*as
, IRRef ref
, IROp conflict
)
135 IRRef i
= as
->curins
;
136 if (i
> ref
+ CONFLICT_SEARCH_LIM
)
137 return 0; /* Give up, ref is too far away. */
139 if (ir
[i
].o
== conflict
)
140 return 0; /* Conflict found. */
141 return 1; /* Ok, no conflict. */
144 /* Fuse the array base of colocated arrays. */
145 static int32_t asm_fuseabase(ASMState
*as
, IRRef ref
)
148 if (ir
->o
== IR_TNEW
&& ir
->op1
<= LJ_MAX_COLOSIZE
&&
149 !neverfuse(as
) && noconflict(as
, ref
, IR_NEWREF
))
150 return (int32_t)sizeof(GCtab
);
154 /* Fuse array/hash/upvalue reference into register+offset operand. */
155 static Reg
asm_fuseahuref(ASMState
*as
, IRRef ref
, int32_t *ofsp
, RegSet allow
,
159 if (ra_noreg(ir
->r
)) {
160 if (ir
->o
== IR_AREF
) {
161 if (mayfuse(as
, ref
)) {
162 if (irref_isk(ir
->op2
)) {
163 IRRef tab
= IR(ir
->op1
)->op1
;
164 int32_t ofs
= asm_fuseabase(as
, tab
);
165 IRRef refa
= ofs
? tab
: ir
->op1
;
166 ofs
+= 8*IR(ir
->op2
)->i
;
167 if (ofs
> -lim
&& ofs
< lim
) {
169 return ra_alloc1(as
, refa
, allow
);
173 } else if (ir
->o
== IR_HREFK
) {
174 if (mayfuse(as
, ref
)) {
175 int32_t ofs
= (int32_t)(IR(ir
->op2
)->op2
* sizeof(Node
));
178 return ra_alloc1(as
, ir
->op1
, allow
);
181 } else if (ir
->o
== IR_UREFC
) {
182 if (irref_isk(ir
->op1
)) {
183 GCfunc
*fn
= ir_kfunc(IR(ir
->op1
));
184 int32_t ofs
= i32ptr(&gcref(fn
->l
.uvptr
[(ir
->op2
>> 8)])->uv
.tv
);
185 *ofsp
= (ofs
& 255); /* Mask out less bits to allow LDRD. */
186 return ra_allock(as
, (ofs
& ~255), allow
);
191 return ra_alloc1(as
, ref
, allow
);
194 /* Fuse m operand into arithmetic/logic instructions. */
195 static uint32_t asm_fuseopm(ASMState
*as
, ARMIns ai
, IRRef ref
, RegSet allow
)
198 if (ra_hasreg(ir
->r
)) {
199 ra_noweak(as
, ir
->r
);
200 return ARMF_M(ir
->r
);
201 } else if (irref_isk(ref
)) {
202 uint32_t k
= emit_isk12(ai
, ir
->i
);
205 } else if (mayfuse(as
, ref
)) {
206 if (ir
->o
>= IR_BSHL
&& ir
->o
<= IR_BROR
) {
207 Reg m
= ra_alloc1(as
, ir
->op1
, allow
);
208 ARMShift sh
= ir
->o
== IR_BSHL
? ARMSH_LSL
:
209 ir
->o
== IR_BSHR
? ARMSH_LSR
:
210 ir
->o
== IR_BSAR
? ARMSH_ASR
: ARMSH_ROR
;
211 if (irref_isk(ir
->op2
)) {
212 return m
| ARMF_SH(sh
, (IR(ir
->op2
)->i
& 31));
214 Reg s
= ra_alloc1(as
, ir
->op2
, rset_exclude(allow
, m
));
215 return m
| ARMF_RSH(sh
, s
);
217 } else if (ir
->o
== IR_ADD
&& ir
->op1
== ir
->op2
) {
218 Reg m
= ra_alloc1(as
, ir
->op1
, allow
);
219 return m
| ARMF_SH(ARMSH_LSL
, 1);
222 return ra_allocref(as
, ref
, allow
);
225 /* Fuse shifts into loads/stores. Only bother with BSHL 2 => lsl #2. */
226 static IRRef
asm_fuselsl2(ASMState
*as
, IRRef ref
)
229 if (ra_noreg(ir
->r
) && mayfuse(as
, ref
) && ir
->o
== IR_BSHL
&&
230 irref_isk(ir
->op2
) && IR(ir
->op2
)->i
== 2)
232 return 0; /* No fusion. */
235 /* Fuse XLOAD/XSTORE reference into load/store operand. */
236 static void asm_fusexref(ASMState
*as
, ARMIns ai
, Reg rd
, IRRef ref
,
237 RegSet allow
, int32_t ofs
)
241 if (ra_noreg(ir
->r
) && canfuse(as
, ir
)) {
242 int32_t lim
= (!LJ_SOFTFP
&& (ai
& 0x08000000)) ? 1024 :
243 (ai
& 0x04000000) ? 4096 : 256;
244 if (ir
->o
== IR_ADD
) {
246 if (irref_isk(ir
->op2
) &&
247 (ofs2
= ofs
+ IR(ir
->op2
)->i
) > -lim
&& ofs2
< lim
&&
248 (!(!LJ_SOFTFP
&& (ai
& 0x08000000)) || !(ofs2
& 3))) {
251 } else if (ofs
== 0 && !(!LJ_SOFTFP
&& (ai
& 0x08000000))) {
252 IRRef lref
= ir
->op1
, rref
= ir
->op2
;
254 if ((ai
& 0x04000000)) {
255 IRRef sref
= asm_fuselsl2(as
, rref
);
258 ai
|= ARMF_SH(ARMSH_LSL
, 2);
259 } else if ((sref
= asm_fuselsl2(as
, lref
)) != 0) {
262 ai
|= ARMF_SH(ARMSH_LSL
, 2);
265 rn
= ra_alloc1(as
, lref
, allow
);
266 rm
= ra_alloc1(as
, rref
, rset_exclude(allow
, rn
));
267 if ((ai
& 0x04000000)) ai
|= ARMI_LS_R
;
268 emit_dnm(as
, ai
|ARMI_LS_P
|ARMI_LS_U
, rd
, rn
, rm
);
271 } else if (ir
->o
== IR_STRREF
&& !(!LJ_SOFTFP
&& (ai
& 0x08000000))) {
272 lua_assert(ofs
== 0);
273 ofs
= (int32_t)sizeof(GCstr
);
274 if (irref_isk(ir
->op2
)) {
275 ofs
+= IR(ir
->op2
)->i
;
277 } else if (irref_isk(ir
->op1
)) {
278 ofs
+= IR(ir
->op1
)->i
;
281 /* NYI: Fuse ADD with constant. */
282 Reg rn
= ra_alloc1(as
, ir
->op1
, allow
);
283 uint32_t m
= asm_fuseopm(as
, 0, ir
->op2
, rset_exclude(allow
, rn
));
284 if ((ai
& 0x04000000))
285 emit_lso(as
, ai
, rd
, rd
, ofs
);
287 emit_lsox(as
, ai
, rd
, rd
, ofs
);
288 emit_dn(as
, ARMI_ADD
^m
, rd
, rn
);
291 if (ofs
<= -lim
|| ofs
>= lim
) {
292 Reg rn
= ra_alloc1(as
, ref
, allow
);
293 Reg rm
= ra_allock(as
, ofs
, rset_exclude(allow
, rn
));
294 if ((ai
& 0x04000000)) ai
|= ARMI_LS_R
;
295 emit_dnm(as
, ai
|ARMI_LS_P
|ARMI_LS_U
, rd
, rn
, rm
);
300 base
= ra_alloc1(as
, ref
, allow
);
302 if ((ai
& 0x08000000))
303 emit_vlso(as
, ai
, rd
, base
, ofs
);
306 if ((ai
& 0x04000000))
307 emit_lso(as
, ai
, rd
, base
, ofs
);
309 emit_lsox(as
, ai
, rd
, base
, ofs
);
313 /* Fuse to multiply-add/sub instruction. */
314 static int asm_fusemadd(ASMState
*as
, IRIns
*ir
, ARMIns ai
, ARMIns air
)
316 IRRef lref
= ir
->op1
, rref
= ir
->op2
;
319 ((mayfuse(as
, lref
) && (irm
= IR(lref
), irm
->o
== IR_MUL
) &&
321 (mayfuse(as
, rref
) && (irm
= IR(rref
), irm
->o
== IR_MUL
) &&
322 (rref
= lref
, ai
= air
, ra_noreg(irm
->r
))))) {
323 Reg dest
= ra_dest(as
, ir
, RSET_FPR
);
324 Reg add
= ra_hintalloc(as
, rref
, dest
, RSET_FPR
);
325 Reg right
, left
= ra_alloc2(as
, irm
,
326 rset_exclude(rset_exclude(RSET_FPR
, dest
), add
));
327 right
= (left
>> 8); left
&= 255;
328 emit_dnm(as
, ai
, (dest
& 15), (left
& 15), (right
& 15));
329 if (dest
!= add
) emit_dm(as
, ARMI_VMOV_D
, (dest
& 15), (add
& 15));
336 /* -- Calls --------------------------------------------------------------- */
338 /* Generate a call to a C function. */
339 static void asm_gencall(ASMState
*as
, const CCallInfo
*ci
, IRRef
*args
)
341 uint32_t n
, nargs
= CCI_NARGS(ci
);
344 Reg gpr
= REGARG_FIRSTGPR
;
346 Reg gpr
, fpr
= REGARG_FIRSTFPR
, fprodd
= 0;
348 if ((void *)ci
->func
)
349 emit_call(as
, (void *)ci
->func
);
351 for (gpr
= REGARG_FIRSTGPR
; gpr
<= REGARG_LASTGPR
; gpr
++)
352 as
->cost
[gpr
] = REGCOST(~0u, ASMREF_L
);
353 gpr
= REGARG_FIRSTGPR
;
355 for (n
= 0; n
< nargs
; n
++) { /* Setup args. */
359 if (ref
&& irt_isfp(ir
->t
)) {
360 RegSet of
= as
->freeset
;
362 if (!LJ_ABI_SOFTFP
&& !(ci
->flags
& CCI_VARARG
)) {
363 if (irt_isnum(ir
->t
)) {
364 if (fpr
<= REGARG_LASTFPR
) {
365 ra_leftov(as
, fpr
, ref
);
369 } else if (fprodd
) { /* Ick. */
370 src
= ra_alloc1(as
, ref
, RSET_FPR
);
371 emit_dm(as
, ARMI_VMOV_S
, (fprodd
& 15), (src
& 15) | 0x00400000);
374 } else if (fpr
<= REGARG_LASTFPR
) {
375 ra_leftov(as
, fpr
, ref
);
379 /* Workaround to protect argument GPRs from being used for remat. */
380 as
->freeset
&= ~RSET_RANGE(REGARG_FIRSTGPR
, REGARG_LASTGPR
+1);
381 src
= ra_alloc1(as
, ref
, RSET_FPR
); /* May alloc GPR to remat FPR. */
382 as
->freeset
|= (of
& RSET_RANGE(REGARG_FIRSTGPR
, REGARG_LASTGPR
+1));
386 /* Workaround to protect argument GPRs from being used for remat. */
387 as
->freeset
&= ~RSET_RANGE(REGARG_FIRSTGPR
, REGARG_LASTGPR
+1);
388 src
= ra_alloc1(as
, ref
, RSET_FPR
); /* May alloc GPR to remat FPR. */
389 as
->freeset
|= (of
& RSET_RANGE(REGARG_FIRSTGPR
, REGARG_LASTGPR
+1));
390 if (irt_isnum(ir
->t
)) gpr
= (gpr
+1) & ~1u;
391 if (gpr
<= REGARG_LASTGPR
) {
392 lua_assert(rset_test(as
->freeset
, gpr
)); /* Must have been evicted. */
393 if (irt_isnum(ir
->t
)) {
394 lua_assert(rset_test(as
->freeset
, gpr
+1)); /* Ditto. */
395 emit_dnm(as
, ARMI_VMOV_RR_D
, gpr
, gpr
+1, (src
& 15));
398 emit_dn(as
, ARMI_VMOV_R_S
, gpr
, (src
& 15));
403 if (irt_isnum(ir
->t
)) ofs
= (ofs
+ 4) & ~4;
404 emit_spstore(as
, ir
, src
, ofs
);
405 ofs
+= irt_isnum(ir
->t
) ? 8 : 4;
410 if (gpr
<= REGARG_LASTGPR
) {
411 lua_assert(rset_test(as
->freeset
, gpr
)); /* Must have been evicted. */
412 if (ref
) ra_leftov(as
, gpr
, ref
);
416 Reg r
= ra_alloc1(as
, ref
, RSET_GPR
);
417 emit_spstore(as
, ir
, r
, ofs
);
425 /* Setup result reg/sp for call. Evict scratch regs. */
426 static void asm_setupresult(ASMState
*as
, IRIns
*ir
, const CCallInfo
*ci
)
428 RegSet drop
= RSET_SCRATCH
;
429 int hiop
= ((ir
+1)->o
== IR_HIOP
);
430 if (ra_hasreg(ir
->r
))
431 rset_clear(drop
, ir
->r
); /* Dest reg handled below. */
432 if (hiop
&& ra_hasreg((ir
+1)->r
))
433 rset_clear(drop
, (ir
+1)->r
); /* Dest reg handled below. */
434 ra_evictset(as
, drop
); /* Evictions must be performed first. */
436 lua_assert(!irt_ispri(ir
->t
));
437 if (!LJ_SOFTFP
&& irt_isfp(ir
->t
)) {
438 if (LJ_ABI_SOFTFP
|| (ci
->flags
& (CCI_CASTU64
|CCI_VARARG
))) {
439 Reg dest
= (ra_dest(as
, ir
, RSET_FPR
) & 15);
440 if (irt_isnum(ir
->t
))
441 emit_dnm(as
, ARMI_VMOV_D_RR
, RID_RETLO
, RID_RETHI
, dest
);
443 emit_dn(as
, ARMI_VMOV_S_R
, RID_RET
, dest
);
445 ra_destreg(as
, ir
, RID_FPRET
);
450 ra_destreg(as
, ir
, RID_RET
);
456 static void asm_call(ASMState
*as
, IRIns
*ir
)
458 IRRef args
[CCI_NARGS_MAX
];
459 const CCallInfo
*ci
= &lj_ir_callinfo
[ir
->op2
];
460 asm_collectargs(as
, ir
, ci
, args
);
461 asm_setupresult(as
, ir
, ci
);
462 asm_gencall(as
, ci
, args
);
465 static void asm_callx(ASMState
*as
, IRIns
*ir
)
467 IRRef args
[CCI_NARGS_MAX
*2];
471 ci
.flags
= asm_callx_flags(as
, ir
);
472 asm_collectargs(as
, ir
, &ci
, args
);
473 asm_setupresult(as
, ir
, &ci
);
474 func
= ir
->op2
; irf
= IR(func
);
475 if (irf
->o
== IR_CARG
) { func
= irf
->op1
; irf
= IR(func
); }
476 if (irref_isk(func
)) { /* Call to constant address. */
477 ci
.func
= (ASMFunction
)(void *)(irf
->i
);
478 } else { /* Need a non-argument register for indirect calls. */
479 Reg freg
= ra_alloc1(as
, func
, RSET_RANGE(RID_R4
, RID_R12
+1));
480 emit_m(as
, ARMI_BLXr
, freg
);
481 ci
.func
= (ASMFunction
)(void *)0;
483 asm_gencall(as
, &ci
, args
);
486 /* -- Returns ------------------------------------------------------------- */
488 /* Return to lower frame. Guard that it goes to the right spot. */
489 static void asm_retf(ASMState
*as
, IRIns
*ir
)
491 Reg base
= ra_alloc1(as
, REF_BASE
, RSET_GPR
);
492 void *pc
= ir_kptr(IR(ir
->op2
));
493 int32_t delta
= 1+bc_a(*((const BCIns
*)pc
- 1));
494 as
->topslot
-= (BCReg
)delta
;
495 if ((int32_t)as
->topslot
< 0) as
->topslot
= 0;
496 irt_setmark(IR(REF_BASE
)->t
); /* Children must not coalesce with BASE reg. */
497 /* Need to force a spill on REF_BASE now to update the stack slot. */
498 emit_lso(as
, ARMI_STR
, base
, RID_SP
, ra_spill(as
, IR(REF_BASE
)));
499 emit_setgl(as
, base
, jit_base
);
500 emit_addptr(as
, base
, -8*delta
);
501 asm_guardcc(as
, CC_NE
);
502 emit_nm(as
, ARMI_CMP
, RID_TMP
,
503 ra_allock(as
, i32ptr(pc
), rset_exclude(RSET_GPR
, base
)));
504 emit_lso(as
, ARMI_LDR
, RID_TMP
, base
, -4);
507 /* -- Type conversions ---------------------------------------------------- */
510 static void asm_tointg(ASMState
*as
, IRIns
*ir
, Reg left
)
512 Reg tmp
= ra_scratch(as
, rset_exclude(RSET_FPR
, left
));
513 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
514 asm_guardcc(as
, CC_NE
);
515 emit_d(as
, ARMI_VMRS
, 0);
516 emit_dm(as
, ARMI_VCMP_D
, (tmp
& 15), (left
& 15));
517 emit_dm(as
, ARMI_VCVT_F64_S32
, (tmp
& 15), (tmp
& 15));
518 emit_dn(as
, ARMI_VMOV_R_S
, dest
, (tmp
& 15));
519 emit_dm(as
, ARMI_VCVT_S32_F64
, (tmp
& 15), (left
& 15));
522 static void asm_tobit(ASMState
*as
, IRIns
*ir
)
524 RegSet allow
= RSET_FPR
;
525 Reg left
= ra_alloc1(as
, ir
->op1
, allow
);
526 Reg right
= ra_alloc1(as
, ir
->op2
, rset_clear(allow
, left
));
527 Reg tmp
= ra_scratch(as
, rset_clear(allow
, right
));
528 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
529 emit_dn(as
, ARMI_VMOV_R_S
, dest
, (tmp
& 15));
530 emit_dnm(as
, ARMI_VADD_D
, (tmp
& 15), (left
& 15), (right
& 15));
534 static void asm_conv(ASMState
*as
, IRIns
*ir
)
536 IRType st
= (IRType
)(ir
->op2
& IRCONV_SRCMASK
);
538 int stfp
= (st
== IRT_NUM
|| st
== IRT_FLOAT
);
540 IRRef lref
= ir
->op1
;
541 /* 64 bit integer conversions are handled by SPLIT. */
542 lua_assert(!irt_isint64(ir
->t
) && !(st
== IRT_I64
|| st
== IRT_U64
));
544 /* FP conversions are handled by SPLIT. */
545 lua_assert(!irt_isfp(ir
->t
) && !(st
== IRT_NUM
|| st
== IRT_FLOAT
));
546 /* Can't check for same types: SPLIT uses CONV int.int + BXOR for sfp NEG. */
548 lua_assert(irt_type(ir
->t
) != st
);
549 if (irt_isfp(ir
->t
)) {
550 Reg dest
= ra_dest(as
, ir
, RSET_FPR
);
551 if (stfp
) { /* FP to FP conversion. */
552 emit_dm(as
, st
== IRT_NUM
? ARMI_VCVT_F32_F64
: ARMI_VCVT_F64_F32
,
553 (dest
& 15), (ra_alloc1(as
, lref
, RSET_FPR
) & 15));
554 } else { /* Integer to FP conversion. */
555 Reg left
= ra_alloc1(as
, lref
, RSET_GPR
);
556 ARMIns ai
= irt_isfloat(ir
->t
) ?
557 (st
== IRT_INT
? ARMI_VCVT_F32_S32
: ARMI_VCVT_F32_U32
) :
558 (st
== IRT_INT
? ARMI_VCVT_F64_S32
: ARMI_VCVT_F64_U32
);
559 emit_dm(as
, ai
, (dest
& 15), (dest
& 15));
560 emit_dn(as
, ARMI_VMOV_S_R
, left
, (dest
& 15));
562 } else if (stfp
) { /* FP to integer conversion. */
563 if (irt_isguard(ir
->t
)) {
564 /* Checked conversions are only supported from number to int. */
565 lua_assert(irt_isint(ir
->t
) && st
== IRT_NUM
);
566 asm_tointg(as
, ir
, ra_alloc1(as
, lref
, RSET_FPR
));
568 Reg left
= ra_alloc1(as
, lref
, RSET_FPR
);
569 Reg tmp
= ra_scratch(as
, rset_exclude(RSET_FPR
, left
));
570 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
572 emit_dn(as
, ARMI_VMOV_R_S
, dest
, (tmp
& 15));
573 ai
= irt_isint(ir
->t
) ?
574 (st
== IRT_NUM
? ARMI_VCVT_S32_F64
: ARMI_VCVT_S32_F32
) :
575 (st
== IRT_NUM
? ARMI_VCVT_U32_F64
: ARMI_VCVT_U32_F32
);
576 emit_dm(as
, ai
, (tmp
& 15), (left
& 15));
581 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
582 if (st
>= IRT_I8
&& st
<= IRT_U16
) { /* Extend to 32 bit integer. */
583 Reg left
= ra_alloc1(as
, lref
, RSET_GPR
);
584 lua_assert(irt_isint(ir
->t
) || irt_isu32(ir
->t
));
585 if ((as
->flags
& JIT_F_ARMV6
)) {
586 ARMIns ai
= st
== IRT_I8
? ARMI_SXTB
:
587 st
== IRT_U8
? ARMI_UXTB
:
588 st
== IRT_I16
? ARMI_SXTH
: ARMI_UXTH
;
589 emit_dm(as
, ai
, dest
, left
);
590 } else if (st
== IRT_U8
) {
591 emit_dn(as
, ARMI_AND
|ARMI_K12
|255, dest
, left
);
593 uint32_t shift
= st
== IRT_I8
? 24 : 16;
594 ARMShift sh
= st
== IRT_U16
? ARMSH_LSR
: ARMSH_ASR
;
595 emit_dm(as
, ARMI_MOV
|ARMF_SH(sh
, shift
), dest
, RID_TMP
);
596 emit_dm(as
, ARMI_MOV
|ARMF_SH(ARMSH_LSL
, shift
), RID_TMP
, left
);
598 } else { /* Handle 32/32 bit no-op (cast). */
599 ra_leftov(as
, dest
, lref
); /* Do nothing, but may need to move regs. */
604 #if !LJ_SOFTFP && LJ_HASFFI
605 static void asm_conv64(ASMState
*as
, IRIns
*ir
)
607 IRType st
= (IRType
)((ir
-1)->op2
& IRCONV_SRCMASK
);
608 IRType dt
= (((ir
-1)->op2
& IRCONV_DSTMASK
) >> IRCONV_DSH
);
612 args
[0] = (ir
-1)->op1
;
614 if (st
== IRT_NUM
|| st
== IRT_FLOAT
) {
615 id
= IRCALL_fp64_d2l
+ ((st
== IRT_FLOAT
) ? 2 : 0) + (dt
- IRT_I64
);
618 id
= IRCALL_fp64_l2d
+ ((dt
== IRT_FLOAT
) ? 2 : 0) + (st
- IRT_I64
);
620 ci
= lj_ir_callinfo
[id
];
622 ci
.flags
|= CCI_VARARG
; /* These calls don't use the hard-float ABI! */
624 asm_setupresult(as
, ir
, &ci
);
625 asm_gencall(as
, &ci
, args
);
629 static void asm_strto(ASMState
*as
, IRIns
*ir
)
631 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_strscan_num
];
633 Reg rlo
= 0, rhi
= 0, tmp
;
634 int destused
= ra_used(ir
);
636 ra_evictset(as
, RSET_SCRATCH
);
639 if (ra_hasspill(ir
->s
) && ra_hasspill((ir
+1)->s
) &&
640 (ir
->s
& 1) == 0 && ir
->s
+ 1 == (ir
+1)->s
) {
642 for (i
= 0; i
< 2; i
++) {
647 emit_spload(as
, ir
+i
, r
, sps_scale((ir
+i
)->s
));
650 ofs
= sps_scale(ir
->s
);
653 rhi
= ra_dest(as
, ir
+1, RSET_GPR
);
654 rlo
= ra_dest(as
, ir
, rset_exclude(RSET_GPR
, rhi
));
657 asm_guardcc(as
, CC_EQ
);
659 emit_lso(as
, ARMI_LDR
, rhi
, RID_SP
, 4);
660 emit_lso(as
, ARMI_LDR
, rlo
, RID_SP
, 0);
665 if (ra_hasspill(ir
->s
)) {
666 ofs
= sps_scale(ir
->s
);
668 if (ra_hasreg(ir
->r
)) {
670 ra_modified(as
, ir
->r
);
671 emit_spload(as
, ir
, ir
->r
, ofs
);
674 rlo
= ra_dest(as
, ir
, RSET_FPR
);
677 asm_guardcc(as
, CC_EQ
);
679 emit_vlso(as
, ARMI_VLDR_D
, rlo
, RID_SP
, 0);
681 emit_n(as
, ARMI_CMP
|ARMI_K12
|0, RID_RET
); /* Test return status. */
682 args
[0] = ir
->op1
; /* GCstr *str */
683 args
[1] = ASMREF_TMP1
; /* TValue *n */
684 asm_gencall(as
, ci
, args
);
685 tmp
= ra_releasetmp(as
, ASMREF_TMP1
);
687 emit_dm(as
, ARMI_MOV
, tmp
, RID_SP
);
689 emit_opk(as
, ARMI_ADD
, tmp
, RID_SP
, ofs
, RSET_GPR
);
692 /* Get pointer to TValue. */
693 static void asm_tvptr(ASMState
*as
, Reg dest
, IRRef ref
)
696 if (irt_isnum(ir
->t
)) {
697 if (irref_isk(ref
)) {
698 /* Use the number constant itself as a TValue. */
699 ra_allockreg(as
, i32ptr(ir_knum(ir
)), dest
);
704 /* Otherwise force a spill and use the spill slot. */
705 emit_opk(as
, ARMI_ADD
, dest
, RID_SP
, ra_spill(as
, ir
), RSET_GPR
);
709 /* Otherwise use [sp] and [sp+4] to hold the TValue. */
710 RegSet allow
= rset_exclude(RSET_GPR
, dest
);
712 emit_dm(as
, ARMI_MOV
, dest
, RID_SP
);
713 if (!irt_ispri(ir
->t
)) {
714 Reg src
= ra_alloc1(as
, ref
, allow
);
715 emit_lso(as
, ARMI_STR
, src
, RID_SP
, 0);
717 if ((ir
+1)->o
== IR_HIOP
)
718 type
= ra_alloc1(as
, ref
+1, allow
);
720 type
= ra_allock(as
, irt_toitype(ir
->t
), allow
);
721 emit_lso(as
, ARMI_STR
, type
, RID_SP
, 4);
725 static void asm_tostr(ASMState
*as
, IRIns
*ir
)
730 if (irt_isnum(IR(ir
->op1
)->t
) || (ir
+1)->o
== IR_HIOP
) {
731 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_str_fromnum
];
732 args
[1] = ASMREF_TMP1
; /* const lua_Number * */
733 asm_setupresult(as
, ir
, ci
); /* GCstr * */
734 asm_gencall(as
, ci
, args
);
735 asm_tvptr(as
, ra_releasetmp(as
, ASMREF_TMP1
), ir
->op1
);
737 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_str_fromint
];
738 args
[1] = ir
->op1
; /* int32_t k */
739 asm_setupresult(as
, ir
, ci
); /* GCstr * */
740 asm_gencall(as
, ci
, args
);
744 /* -- Memory references --------------------------------------------------- */
746 static void asm_aref(ASMState
*as
, IRIns
*ir
)
748 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
750 if (irref_isk(ir
->op2
)) {
751 IRRef tab
= IR(ir
->op1
)->op1
;
752 int32_t ofs
= asm_fuseabase(as
, tab
);
753 IRRef refa
= ofs
? tab
: ir
->op1
;
754 uint32_t k
= emit_isk12(ARMI_ADD
, ofs
+ 8*IR(ir
->op2
)->i
);
756 base
= ra_alloc1(as
, refa
, RSET_GPR
);
757 emit_dn(as
, ARMI_ADD
^k
, dest
, base
);
761 base
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
762 idx
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, base
));
763 emit_dnm(as
, ARMI_ADD
|ARMF_SH(ARMSH_LSL
, 3), dest
, base
, idx
);
766 /* Inlined hash lookup. Specialized for key type and for const keys.
767 ** The equivalent C code is:
768 ** Node *n = hashkey(t, key);
770 ** if (lj_obj_equal(&n->key, key)) return &n->val;
771 ** } while ((n = nextnode(n)));
774 static void asm_href(ASMState
*as
, IRIns
*ir
, IROp merge
)
776 RegSet allow
= RSET_GPR
;
777 int destused
= ra_used(ir
);
778 Reg dest
= ra_dest(as
, ir
, allow
);
779 Reg tab
= ra_alloc1(as
, ir
->op1
, rset_clear(allow
, dest
));
780 Reg key
= 0, keyhi
= 0, keynumhi
= RID_NONE
, tmp
= RID_TMP
;
781 IRRef refkey
= ir
->op2
;
782 IRIns
*irkey
= IR(refkey
);
783 IRType1 kt
= irkey
->t
;
784 int32_t k
= 0, khi
= emit_isk12(ARMI_CMP
, irt_toitype(kt
));
786 MCLabel l_end
, l_loop
;
787 rset_clear(allow
, tab
);
788 if (!irref_isk(refkey
) || irt_isstr(kt
)) {
790 key
= ra_alloc1(as
, refkey
, allow
);
791 rset_clear(allow
, key
);
792 if (irkey
[1].o
== IR_HIOP
) {
793 if (ra_hasreg((irkey
+1)->r
)) {
794 keynumhi
= (irkey
+1)->r
;
796 ra_noweak(as
, keynumhi
);
798 keyhi
= keynumhi
= ra_allocref(as
, refkey
+1, allow
);
800 rset_clear(allow
, keynumhi
);
805 key
= ra_scratch(as
, allow
);
806 rset_clear(allow
, key
);
807 keyhi
= keynumhi
= ra_scratch(as
, allow
);
808 rset_clear(allow
, keyhi
);
811 key
= ra_alloc1(as
, refkey
, allow
);
812 rset_clear(allow
, key
);
815 } else if (irt_isnum(kt
)) {
816 int32_t val
= (int32_t)ir_knum(irkey
)->u32
.lo
;
817 k
= emit_isk12(ARMI_CMP
, val
);
819 key
= ra_allock(as
, val
, allow
);
820 rset_clear(allow
, key
);
822 val
= (int32_t)ir_knum(irkey
)->u32
.hi
;
823 khi
= emit_isk12(ARMI_CMP
, val
);
825 keyhi
= ra_allock(as
, val
, allow
);
826 rset_clear(allow
, keyhi
);
828 } else if (!irt_ispri(kt
)) {
829 k
= emit_isk12(ARMI_CMP
, irkey
->i
);
831 key
= ra_alloc1(as
, refkey
, allow
);
832 rset_clear(allow
, key
);
836 tmp
= ra_scratchpair(as
, allow
);
838 /* Key not found in chain: jump to exit (if merged) or load niltv. */
839 l_end
= emit_label(as
);
842 asm_guardcc(as
, CC_AL
);
844 emit_loada(as
, dest
, niltvg(J2G(as
->J
)));
846 /* Follow hash chain until the end. */
848 emit_n(as
, ARMI_CMP
|ARMI_K12
|0, dest
);
849 emit_lso(as
, ARMI_LDR
, dest
, dest
, (int32_t)offsetof(Node
, next
));
851 /* Type and value comparison. */
853 asm_guardcc(as
, CC_EQ
);
855 emit_branch(as
, ARMF_CC(ARMI_B
, CC_EQ
), l_end
);
856 if (!irt_ispri(kt
)) {
857 emit_nm(as
, ARMF_CC(ARMI_CMP
, CC_EQ
)^k
, tmp
, key
);
858 emit_nm(as
, ARMI_CMP
^khi
, tmp
+1, keyhi
);
859 emit_lsox(as
, ARMI_LDRD
, tmp
, dest
, (int32_t)offsetof(Node
, key
));
861 emit_n(as
, ARMI_CMP
^khi
, tmp
);
862 emit_lso(as
, ARMI_LDR
, tmp
, dest
, (int32_t)offsetof(Node
, key
.it
));
864 *l_loop
= ARMF_CC(ARMI_B
, CC_NE
) | ((as
->mcp
-l_loop
-2) & 0x00ffffffu
);
866 /* Load main position relative to tab->node into dest. */
867 khash
= irref_isk(refkey
) ? ir_khash(irkey
) : 1;
869 emit_lso(as
, ARMI_LDR
, dest
, tab
, (int32_t)offsetof(GCtab
, node
));
871 emit_dnm(as
, ARMI_ADD
|ARMF_SH(ARMSH_LSL
, 3), dest
, dest
, tmp
);
872 emit_dnm(as
, ARMI_ADD
|ARMF_SH(ARMSH_LSL
, 1), tmp
, tmp
, tmp
);
873 if (irt_isstr(kt
)) { /* Fetch of str->hash is cheaper than ra_allock. */
874 emit_dnm(as
, ARMI_AND
, tmp
, tmp
+1, RID_TMP
);
875 emit_lso(as
, ARMI_LDR
, dest
, tab
, (int32_t)offsetof(GCtab
, node
));
876 emit_lso(as
, ARMI_LDR
, tmp
+1, key
, (int32_t)offsetof(GCstr
, hash
));
877 emit_lso(as
, ARMI_LDR
, RID_TMP
, tab
, (int32_t)offsetof(GCtab
, hmask
));
878 } else if (irref_isk(refkey
)) {
879 emit_opk(as
, ARMI_AND
, tmp
, RID_TMP
, (int32_t)khash
,
880 rset_exclude(rset_exclude(RSET_GPR
, tab
), dest
));
881 emit_lso(as
, ARMI_LDR
, dest
, tab
, (int32_t)offsetof(GCtab
, node
));
882 emit_lso(as
, ARMI_LDR
, RID_TMP
, tab
, (int32_t)offsetof(GCtab
, hmask
));
883 } else { /* Must match with hash*() in lj_tab.c. */
884 if (ra_hasreg(keynumhi
)) { /* Canonicalize +-0.0 to 0.0. */
885 if (keyhi
== RID_TMP
)
886 emit_dm(as
, ARMF_CC(ARMI_MOV
, CC_NE
), keyhi
, keynumhi
);
887 emit_d(as
, ARMF_CC(ARMI_MOV
, CC_EQ
)|ARMI_K12
|0, keyhi
);
889 emit_dnm(as
, ARMI_AND
, tmp
, tmp
, RID_TMP
);
890 emit_dnm(as
, ARMI_SUB
|ARMF_SH(ARMSH_ROR
, 32-HASH_ROT3
), tmp
, tmp
, tmp
+1);
891 emit_lso(as
, ARMI_LDR
, dest
, tab
, (int32_t)offsetof(GCtab
, node
));
892 emit_dnm(as
, ARMI_EOR
|ARMF_SH(ARMSH_ROR
, 32-((HASH_ROT2
+HASH_ROT1
)&31)),
894 emit_lso(as
, ARMI_LDR
, RID_TMP
, tab
, (int32_t)offsetof(GCtab
, hmask
));
895 emit_dnm(as
, ARMI_SUB
|ARMF_SH(ARMSH_ROR
, 32-HASH_ROT1
), tmp
+1, tmp
+1, tmp
);
896 if (ra_hasreg(keynumhi
)) {
897 emit_dnm(as
, ARMI_EOR
, tmp
+1, tmp
, key
);
898 emit_dnm(as
, ARMI_ORR
|ARMI_S
, RID_TMP
, tmp
, key
); /* Test for +-0.0. */
899 emit_dnm(as
, ARMI_ADD
, tmp
, keynumhi
, keynumhi
);
901 emit_dnm(as
, ARMI_VMOV_RR_D
, key
, keynumhi
,
902 (ra_alloc1(as
, refkey
, RSET_FPR
) & 15));
905 emit_dnm(as
, ARMI_EOR
, tmp
+1, tmp
, key
);
906 emit_opk(as
, ARMI_ADD
, tmp
, key
, (int32_t)HASH_BIAS
,
907 rset_exclude(rset_exclude(RSET_GPR
, tab
), key
));
913 static void asm_hrefk(ASMState
*as
, IRIns
*ir
)
915 IRIns
*kslot
= IR(ir
->op2
);
916 IRIns
*irkey
= IR(kslot
->op1
);
917 int32_t ofs
= (int32_t)(kslot
->op2
* sizeof(Node
));
918 int32_t kofs
= ofs
+ (int32_t)offsetof(Node
, key
);
919 Reg dest
= (ra_used(ir
) || ofs
> 4095) ? ra_dest(as
, ir
, RSET_GPR
) : RID_NONE
;
920 Reg node
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
921 Reg key
= RID_NONE
, type
= RID_TMP
, idx
= node
;
922 RegSet allow
= rset_exclude(RSET_GPR
, node
);
923 lua_assert(ofs
% sizeof(Node
) == 0);
926 rset_clear(allow
, dest
);
927 kofs
= (int32_t)offsetof(Node
, key
);
928 } else if (ra_hasreg(dest
)) {
929 emit_opk(as
, ARMI_ADD
, dest
, node
, ofs
, allow
);
931 asm_guardcc(as
, CC_NE
);
932 if (!irt_ispri(irkey
->t
)) {
933 RegSet even
= (as
->freeset
& allow
);
934 even
= even
& (even
>> 1) & RSET_GPREVEN
;
936 key
= ra_scratch(as
, even
);
937 if (rset_test(as
->freeset
, key
+1)) {
939 ra_modified(as
, type
);
942 key
= ra_scratch(as
, allow
);
944 rset_clear(allow
, key
);
946 rset_clear(allow
, type
);
947 if (irt_isnum(irkey
->t
)) {
948 emit_opk(as
, ARMF_CC(ARMI_CMP
, CC_EQ
), 0, type
,
949 (int32_t)ir_knum(irkey
)->u32
.hi
, allow
);
950 emit_opk(as
, ARMI_CMP
, 0, key
,
951 (int32_t)ir_knum(irkey
)->u32
.lo
, allow
);
954 emit_opk(as
, ARMF_CC(ARMI_CMP
, CC_EQ
), 0, key
, irkey
->i
, allow
);
955 emit_n(as
, ARMI_CMN
|ARMI_K12
|-irt_toitype(irkey
->t
), type
);
957 emit_lso(as
, ARMI_LDR
, type
, idx
, kofs
+4);
958 if (ra_hasreg(key
)) emit_lso(as
, ARMI_LDR
, key
, idx
, kofs
);
960 emit_opk(as
, ARMI_ADD
, dest
, node
, ofs
, RSET_GPR
);
963 static void asm_newref(ASMState
*as
, IRIns
*ir
)
965 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_tab_newkey
];
967 if (ir
->r
== RID_SINK
)
969 args
[0] = ASMREF_L
; /* lua_State *L */
970 args
[1] = ir
->op1
; /* GCtab *t */
971 args
[2] = ASMREF_TMP1
; /* cTValue *key */
972 asm_setupresult(as
, ir
, ci
); /* TValue * */
973 asm_gencall(as
, ci
, args
);
974 asm_tvptr(as
, ra_releasetmp(as
, ASMREF_TMP1
), ir
->op2
);
977 static void asm_uref(ASMState
*as
, IRIns
*ir
)
979 /* NYI: Check that UREFO is still open and not aliasing a slot. */
980 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
981 if (irref_isk(ir
->op1
)) {
982 GCfunc
*fn
= ir_kfunc(IR(ir
->op1
));
983 MRef
*v
= &gcref(fn
->l
.uvptr
[(ir
->op2
>> 8)])->uv
.v
;
984 emit_lsptr(as
, ARMI_LDR
, dest
, v
);
986 Reg uv
= ra_scratch(as
, RSET_GPR
);
987 Reg func
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
988 if (ir
->o
== IR_UREFC
) {
989 asm_guardcc(as
, CC_NE
);
990 emit_n(as
, ARMI_CMP
|ARMI_K12
|1, RID_TMP
);
991 emit_opk(as
, ARMI_ADD
, dest
, uv
,
992 (int32_t)offsetof(GCupval
, tv
), RSET_GPR
);
993 emit_lso(as
, ARMI_LDRB
, RID_TMP
, uv
, (int32_t)offsetof(GCupval
, closed
));
995 emit_lso(as
, ARMI_LDR
, dest
, uv
, (int32_t)offsetof(GCupval
, v
));
997 emit_lso(as
, ARMI_LDR
, uv
, func
,
998 (int32_t)offsetof(GCfuncL
, uvptr
) + 4*(int32_t)(ir
->op2
>> 8));
1002 static void asm_fref(ASMState
*as
, IRIns
*ir
)
1004 UNUSED(as
); UNUSED(ir
);
1005 lua_assert(!ra_used(ir
));
1008 static void asm_strref(ASMState
*as
, IRIns
*ir
)
1010 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1011 IRRef ref
= ir
->op2
, refk
= ir
->op1
;
1013 if (irref_isk(ref
)) {
1014 IRRef tmp
= refk
; refk
= ref
; ref
= tmp
;
1015 } else if (!irref_isk(refk
)) {
1016 uint32_t k
, m
= ARMI_K12
|sizeof(GCstr
);
1017 Reg right
, left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1018 IRIns
*irr
= IR(ir
->op2
);
1019 if (ra_hasreg(irr
->r
)) {
1020 ra_noweak(as
, irr
->r
);
1022 } else if (mayfuse(as
, irr
->op2
) &&
1023 irr
->o
== IR_ADD
&& irref_isk(irr
->op2
) &&
1024 (k
= emit_isk12(ARMI_ADD
,
1025 (int32_t)sizeof(GCstr
) + IR(irr
->op2
)->i
))) {
1027 right
= ra_alloc1(as
, irr
->op1
, rset_exclude(RSET_GPR
, left
));
1029 right
= ra_allocref(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
1031 emit_dn(as
, ARMI_ADD
^m
, dest
, dest
);
1032 emit_dnm(as
, ARMI_ADD
, dest
, left
, right
);
1035 r
= ra_alloc1(as
, ref
, RSET_GPR
);
1036 emit_opk(as
, ARMI_ADD
, dest
, r
,
1037 sizeof(GCstr
) + IR(refk
)->i
, rset_exclude(RSET_GPR
, r
));
1040 /* -- Loads and stores ---------------------------------------------------- */
1042 static ARMIns
asm_fxloadins(IRIns
*ir
)
1044 switch (irt_type(ir
->t
)) {
1045 case IRT_I8
: return ARMI_LDRSB
;
1046 case IRT_U8
: return ARMI_LDRB
;
1047 case IRT_I16
: return ARMI_LDRSH
;
1048 case IRT_U16
: return ARMI_LDRH
;
1049 case IRT_NUM
: lua_assert(!LJ_SOFTFP
); return ARMI_VLDR_D
;
1050 case IRT_FLOAT
: if (!LJ_SOFTFP
) return ARMI_VLDR_S
;
1051 default: return ARMI_LDR
;
1055 static ARMIns
asm_fxstoreins(IRIns
*ir
)
1057 switch (irt_type(ir
->t
)) {
1058 case IRT_I8
: case IRT_U8
: return ARMI_STRB
;
1059 case IRT_I16
: case IRT_U16
: return ARMI_STRH
;
1060 case IRT_NUM
: lua_assert(!LJ_SOFTFP
); return ARMI_VSTR_D
;
1061 case IRT_FLOAT
: if (!LJ_SOFTFP
) return ARMI_VSTR_S
;
1062 default: return ARMI_STR
;
1066 static void asm_fload(ASMState
*as
, IRIns
*ir
)
1068 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1069 Reg idx
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1070 ARMIns ai
= asm_fxloadins(ir
);
1072 if (ir
->op2
== IRFL_TAB_ARRAY
) {
1073 ofs
= asm_fuseabase(as
, ir
->op1
);
1074 if (ofs
) { /* Turn the t->array load into an add for colocated arrays. */
1075 emit_dn(as
, ARMI_ADD
|ARMI_K12
|ofs
, dest
, idx
);
1079 ofs
= field_ofs
[ir
->op2
];
1080 if ((ai
& 0x04000000))
1081 emit_lso(as
, ai
, dest
, idx
, ofs
);
1083 emit_lsox(as
, ai
, dest
, idx
, ofs
);
1086 static void asm_fstore(ASMState
*as
, IRIns
*ir
)
1088 if (ir
->r
!= RID_SINK
) {
1089 Reg src
= ra_alloc1(as
, ir
->op2
, RSET_GPR
);
1090 IRIns
*irf
= IR(ir
->op1
);
1091 Reg idx
= ra_alloc1(as
, irf
->op1
, rset_exclude(RSET_GPR
, src
));
1092 int32_t ofs
= field_ofs
[irf
->op2
];
1093 ARMIns ai
= asm_fxstoreins(ir
);
1094 if ((ai
& 0x04000000))
1095 emit_lso(as
, ai
, src
, idx
, ofs
);
1097 emit_lsox(as
, ai
, src
, idx
, ofs
);
1101 static void asm_xload(ASMState
*as
, IRIns
*ir
)
1103 Reg dest
= ra_dest(as
, ir
,
1104 (!LJ_SOFTFP
&& irt_isfp(ir
->t
)) ? RSET_FPR
: RSET_GPR
);
1105 lua_assert(!(ir
->op2
& IRXLOAD_UNALIGNED
));
1106 asm_fusexref(as
, asm_fxloadins(ir
), dest
, ir
->op1
, RSET_GPR
, 0);
1109 static void asm_xstore(ASMState
*as
, IRIns
*ir
, int32_t ofs
)
1111 if (ir
->r
!= RID_SINK
) {
1112 Reg src
= ra_alloc1(as
, ir
->op2
,
1113 (!LJ_SOFTFP
&& irt_isfp(ir
->t
)) ? RSET_FPR
: RSET_GPR
);
1114 asm_fusexref(as
, asm_fxstoreins(ir
), src
, ir
->op1
,
1115 rset_exclude(RSET_GPR
, src
), ofs
);
1119 static void asm_ahuvload(ASMState
*as
, IRIns
*ir
)
1121 int hiop
= (LJ_SOFTFP
&& (ir
+1)->o
== IR_HIOP
);
1122 IRType t
= hiop
? IRT_NUM
: irt_type(ir
->t
);
1123 Reg dest
= RID_NONE
, type
= RID_NONE
, idx
;
1124 RegSet allow
= RSET_GPR
;
1126 if (hiop
&& ra_used(ir
+1)) {
1127 type
= ra_dest(as
, ir
+1, allow
);
1128 rset_clear(allow
, type
);
1131 lua_assert((LJ_SOFTFP
? 0 : irt_isnum(ir
->t
)) ||
1132 irt_isint(ir
->t
) || irt_isaddr(ir
->t
));
1133 dest
= ra_dest(as
, ir
, (!LJ_SOFTFP
&& t
== IRT_NUM
) ? RSET_FPR
: allow
);
1134 rset_clear(allow
, dest
);
1136 idx
= asm_fuseahuref(as
, ir
->op1
, &ofs
, allow
,
1137 (!LJ_SOFTFP
&& t
== IRT_NUM
) ? 1024 : 4096);
1138 if (!hiop
|| type
== RID_NONE
) {
1139 rset_clear(allow
, idx
);
1140 if (ofs
< 256 && ra_hasreg(dest
) && (dest
& 1) == 0 &&
1141 rset_test((as
->freeset
& allow
), dest
+1)) {
1143 ra_modified(as
, type
);
1148 asm_guardcc(as
, t
== IRT_NUM
? CC_HS
: CC_NE
);
1149 emit_n(as
, ARMI_CMN
|ARMI_K12
|-irt_toitype_(t
), type
);
1150 if (ra_hasreg(dest
)) {
1153 emit_vlso(as
, ARMI_VLDR_D
, dest
, idx
, ofs
);
1156 emit_lso(as
, ARMI_LDR
, dest
, idx
, ofs
);
1158 emit_lso(as
, ARMI_LDR
, type
, idx
, ofs
+4);
1161 static void asm_ahustore(ASMState
*as
, IRIns
*ir
)
1163 if (ir
->r
!= RID_SINK
) {
1164 RegSet allow
= RSET_GPR
;
1165 Reg idx
, src
= RID_NONE
, type
= RID_NONE
;
1168 if (irt_isnum(ir
->t
)) {
1169 src
= ra_alloc1(as
, ir
->op2
, RSET_FPR
);
1170 idx
= asm_fuseahuref(as
, ir
->op1
, &ofs
, allow
, 1024);
1171 emit_vlso(as
, ARMI_VSTR_D
, src
, idx
, ofs
);
1175 int hiop
= (LJ_SOFTFP
&& (ir
+1)->o
== IR_HIOP
);
1176 if (!irt_ispri(ir
->t
)) {
1177 src
= ra_alloc1(as
, ir
->op2
, allow
);
1178 rset_clear(allow
, src
);
1181 type
= ra_alloc1(as
, (ir
+1)->op2
, allow
);
1183 type
= ra_allock(as
, (int32_t)irt_toitype(ir
->t
), allow
);
1184 idx
= asm_fuseahuref(as
, ir
->op1
, &ofs
, rset_exclude(allow
, type
), 4096);
1185 if (ra_hasreg(src
)) emit_lso(as
, ARMI_STR
, src
, idx
, ofs
);
1186 emit_lso(as
, ARMI_STR
, type
, idx
, ofs
+4);
1191 static void asm_sload(ASMState
*as
, IRIns
*ir
)
1193 int32_t ofs
= 8*((int32_t)ir
->op1
-1) + ((ir
->op2
& IRSLOAD_FRAME
) ? 4 : 0);
1194 int hiop
= (LJ_SOFTFP
&& (ir
+1)->o
== IR_HIOP
);
1195 IRType t
= hiop
? IRT_NUM
: irt_type(ir
->t
);
1196 Reg dest
= RID_NONE
, type
= RID_NONE
, base
;
1197 RegSet allow
= RSET_GPR
;
1198 lua_assert(!(ir
->op2
& IRSLOAD_PARENT
)); /* Handled by asm_head_side(). */
1199 lua_assert(irt_isguard(ir
->t
) || !(ir
->op2
& IRSLOAD_TYPECHECK
));
1201 lua_assert(!(ir
->op2
& IRSLOAD_CONVERT
)); /* Handled by LJ_SOFTFP SPLIT. */
1202 if (hiop
&& ra_used(ir
+1)) {
1203 type
= ra_dest(as
, ir
+1, allow
);
1204 rset_clear(allow
, type
);
1207 if ((ir
->op2
& IRSLOAD_CONVERT
) && irt_isguard(ir
->t
) && t
== IRT_INT
) {
1208 dest
= ra_scratch(as
, RSET_FPR
);
1209 asm_tointg(as
, ir
, dest
);
1210 t
= IRT_NUM
; /* Continue with a regular number type check. */
1215 if ((ir
->op2
& IRSLOAD_CONVERT
))
1216 tmp
= ra_scratch(as
, t
== IRT_INT
? RSET_FPR
: RSET_GPR
);
1217 lua_assert((LJ_SOFTFP
? 0 : irt_isnum(ir
->t
)) ||
1218 irt_isint(ir
->t
) || irt_isaddr(ir
->t
));
1219 dest
= ra_dest(as
, ir
, (!LJ_SOFTFP
&& t
== IRT_NUM
) ? RSET_FPR
: allow
);
1220 rset_clear(allow
, dest
);
1221 base
= ra_alloc1(as
, REF_BASE
, allow
);
1222 if ((ir
->op2
& IRSLOAD_CONVERT
)) {
1224 emit_dn(as
, ARMI_VMOV_R_S
, dest
, (tmp
& 15));
1225 emit_dm(as
, ARMI_VCVT_S32_F64
, (tmp
& 15), (tmp
& 15));
1226 t
= IRT_NUM
; /* Check for original type. */
1228 emit_dm(as
, ARMI_VCVT_F64_S32
, (dest
& 15), (dest
& 15));
1229 emit_dn(as
, ARMI_VMOV_S_R
, tmp
, (dest
& 15));
1230 t
= IRT_INT
; /* Check for original type. */
1236 base
= ra_alloc1(as
, REF_BASE
, allow
);
1238 rset_clear(allow
, base
);
1239 if ((ir
->op2
& IRSLOAD_TYPECHECK
)) {
1240 if (ra_noreg(type
)) {
1241 if (ofs
< 256 && ra_hasreg(dest
) && (dest
& 1) == 0 &&
1242 rset_test((as
->freeset
& allow
), dest
+1)) {
1244 ra_modified(as
, type
);
1249 asm_guardcc(as
, t
== IRT_NUM
? CC_HS
: CC_NE
);
1250 emit_n(as
, ARMI_CMN
|ARMI_K12
|-irt_toitype_(t
), type
);
1252 if (ra_hasreg(dest
)) {
1256 emit_vlso(as
, ARMI_VLDR_D
, dest
, base
, ofs
);
1258 if (ra_hasreg(type
)) emit_lso(as
, ARMI_LDR
, type
, base
, ofs
+4);
1259 emit_vlso(as
, ARMI_VLDR_D
, dest
, RID_TMP
, 0);
1260 emit_opk(as
, ARMI_ADD
, RID_TMP
, base
, ofs
, allow
);
1265 emit_lso(as
, ARMI_LDR
, dest
, base
, ofs
);
1267 if (ra_hasreg(type
)) emit_lso(as
, ARMI_LDR
, type
, base
, ofs
+4);
1270 /* -- Allocations --------------------------------------------------------- */
1273 static void asm_cnew(ASMState
*as
, IRIns
*ir
)
1275 CTState
*cts
= ctype_ctsG(J2G(as
->J
));
1276 CTypeID ctypeid
= (CTypeID
)IR(ir
->op1
)->i
;
1277 CTSize sz
= (ir
->o
== IR_CNEWI
|| ir
->op2
== REF_NIL
) ?
1278 lj_ctype_size(cts
, ctypeid
) : (CTSize
)IR(ir
->op2
)->i
;
1279 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_mem_newgco
];
1281 RegSet allow
= (RSET_GPR
& ~RSET_SCRATCH
);
1282 RegSet drop
= RSET_SCRATCH
;
1283 lua_assert(sz
!= CTSIZE_INVALID
);
1285 args
[0] = ASMREF_L
; /* lua_State *L */
1286 args
[1] = ASMREF_TMP1
; /* MSize size */
1289 if (ra_hasreg(ir
->r
))
1290 rset_clear(drop
, ir
->r
); /* Dest reg handled below. */
1291 ra_evictset(as
, drop
);
1293 ra_destreg(as
, ir
, RID_RET
); /* GCcdata * */
1295 /* Initialize immutable cdata object. */
1296 if (ir
->o
== IR_CNEWI
) {
1297 int32_t ofs
= sizeof(GCcdata
);
1298 lua_assert(sz
== 4 || sz
== 8);
1301 lua_assert(ir
->o
== IR_HIOP
);
1304 Reg r
= ra_alloc1(as
, ir
->op2
, allow
);
1305 emit_lso(as
, ARMI_STR
, r
, RID_RET
, ofs
);
1306 rset_clear(allow
, r
);
1307 if (ofs
== sizeof(GCcdata
)) break;
1311 /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */
1313 uint32_t k
= emit_isk12(ARMI_MOV
, ctypeid
);
1314 Reg r
= k
? RID_R1
: ra_allock(as
, ctypeid
, allow
);
1315 emit_lso(as
, ARMI_STRB
, RID_TMP
, RID_RET
, offsetof(GCcdata
, gct
));
1316 emit_lsox(as
, ARMI_STRH
, r
, RID_RET
, offsetof(GCcdata
, ctypeid
));
1317 emit_d(as
, ARMI_MOV
|ARMI_K12
|~LJ_TCDATA
, RID_TMP
);
1318 if (k
) emit_d(as
, ARMI_MOV
^k
, RID_R1
);
1320 asm_gencall(as
, ci
, args
);
1321 ra_allockreg(as
, (int32_t)(sz
+sizeof(GCcdata
)),
1322 ra_releasetmp(as
, ASMREF_TMP1
));
1325 #define asm_cnew(as, ir) ((void)0)
1328 /* -- Write barriers ------------------------------------------------------ */
1330 static void asm_tbar(ASMState
*as
, IRIns
*ir
)
1332 Reg tab
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1333 Reg link
= ra_scratch(as
, rset_exclude(RSET_GPR
, tab
));
1334 Reg gr
= ra_allock(as
, i32ptr(J2G(as
->J
)),
1335 rset_exclude(rset_exclude(RSET_GPR
, tab
), link
));
1337 MCLabel l_end
= emit_label(as
);
1338 emit_lso(as
, ARMI_STR
, link
, tab
, (int32_t)offsetof(GCtab
, gclist
));
1339 emit_lso(as
, ARMI_STRB
, mark
, tab
, (int32_t)offsetof(GCtab
, marked
));
1340 emit_lso(as
, ARMI_STR
, tab
, gr
,
1341 (int32_t)offsetof(global_State
, gc
.grayagain
));
1342 emit_dn(as
, ARMI_BIC
|ARMI_K12
|LJ_GC_BLACK
, mark
, mark
);
1343 emit_lso(as
, ARMI_LDR
, link
, gr
,
1344 (int32_t)offsetof(global_State
, gc
.grayagain
));
1345 emit_branch(as
, ARMF_CC(ARMI_B
, CC_EQ
), l_end
);
1346 emit_n(as
, ARMI_TST
|ARMI_K12
|LJ_GC_BLACK
, mark
);
1347 emit_lso(as
, ARMI_LDRB
, mark
, tab
, (int32_t)offsetof(GCtab
, marked
));
1350 static void asm_obar(ASMState
*as
, IRIns
*ir
)
1352 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_gc_barrieruv
];
1356 /* No need for other object barriers (yet). */
1357 lua_assert(IR(ir
->op1
)->o
== IR_UREFC
);
1358 ra_evictset(as
, RSET_SCRATCH
);
1359 l_end
= emit_label(as
);
1360 args
[0] = ASMREF_TMP1
; /* global_State *g */
1361 args
[1] = ir
->op1
; /* TValue *tv */
1362 asm_gencall(as
, ci
, args
);
1363 if ((l_end
[-1] >> 28) == CC_AL
)
1364 l_end
[-1] = ARMF_CC(l_end
[-1], CC_NE
);
1366 emit_branch(as
, ARMF_CC(ARMI_B
, CC_EQ
), l_end
);
1367 ra_allockreg(as
, i32ptr(J2G(as
->J
)), ra_releasetmp(as
, ASMREF_TMP1
));
1368 obj
= IR(ir
->op1
)->r
;
1369 tmp
= ra_scratch(as
, rset_exclude(RSET_GPR
, obj
));
1370 emit_n(as
, ARMF_CC(ARMI_TST
, CC_NE
)|ARMI_K12
|LJ_GC_BLACK
, tmp
);
1371 emit_n(as
, ARMI_TST
|ARMI_K12
|LJ_GC_WHITES
, RID_TMP
);
1372 val
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, obj
));
1373 emit_lso(as
, ARMI_LDRB
, tmp
, obj
,
1374 (int32_t)offsetof(GCupval
, marked
)-(int32_t)offsetof(GCupval
, tv
));
1375 emit_lso(as
, ARMI_LDRB
, RID_TMP
, val
, (int32_t)offsetof(GChead
, marked
));
1378 /* -- Arithmetic and logic operations ------------------------------------- */
1381 static void asm_fparith(ASMState
*as
, IRIns
*ir
, ARMIns ai
)
1383 Reg dest
= ra_dest(as
, ir
, RSET_FPR
);
1384 Reg right
, left
= ra_alloc2(as
, ir
, RSET_FPR
);
1385 right
= (left
>> 8); left
&= 255;
1386 emit_dnm(as
, ai
, (dest
& 15), (left
& 15), (right
& 15));
1389 static void asm_fpunary(ASMState
*as
, IRIns
*ir
, ARMIns ai
)
1391 Reg dest
= ra_dest(as
, ir
, RSET_FPR
);
1392 Reg left
= ra_hintalloc(as
, ir
->op1
, dest
, RSET_FPR
);
1393 emit_dm(as
, ai
, (dest
& 15), (left
& 15));
1396 static int asm_fpjoin_pow(ASMState
*as
, IRIns
*ir
)
1398 IRIns
*irp
= IR(ir
->op1
);
1399 if (irp
== ir
-1 && irp
->o
== IR_MUL
&& !ra_used(irp
)) {
1400 IRIns
*irpp
= IR(irp
->op1
);
1401 if (irpp
== ir
-2 && irpp
->o
== IR_FPMATH
&&
1402 irpp
->op2
== IRFPM_LOG2
&& !ra_used(irpp
)) {
1403 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_pow
];
1405 args
[0] = irpp
->op1
;
1407 asm_setupresult(as
, ir
, ci
);
1408 asm_gencall(as
, ci
, args
);
1416 static int asm_swapops(ASMState
*as
, IRRef lref
, IRRef rref
)
1419 if (irref_isk(rref
))
1420 return 0; /* Don't swap constants to the left. */
1421 if (irref_isk(lref
))
1422 return 1; /* But swap constants to the right. */
1424 if ((ir
->o
>= IR_BSHL
&& ir
->o
<= IR_BROR
) ||
1425 (ir
->o
== IR_ADD
&& ir
->op1
== ir
->op2
))
1426 return 0; /* Don't swap fusable operands to the left. */
1428 if ((ir
->o
>= IR_BSHL
&& ir
->o
<= IR_BROR
) ||
1429 (ir
->o
== IR_ADD
&& ir
->op1
== ir
->op2
))
1430 return 1; /* But swap fusable operands to the right. */
1431 return 0; /* Otherwise don't swap. */
1434 static void asm_intop(ASMState
*as
, IRIns
*ir
, ARMIns ai
)
1436 IRRef lref
= ir
->op1
, rref
= ir
->op2
;
1437 Reg left
, dest
= ra_dest(as
, ir
, RSET_GPR
);
1439 if (asm_swapops(as
, lref
, rref
)) {
1440 IRRef tmp
= lref
; lref
= rref
; rref
= tmp
;
1441 if ((ai
& ~ARMI_S
) == ARMI_SUB
|| (ai
& ~ARMI_S
) == ARMI_SBC
)
1442 ai
^= (ARMI_SUB
^ARMI_RSB
);
1444 left
= ra_hintalloc(as
, lref
, dest
, RSET_GPR
);
1445 m
= asm_fuseopm(as
, ai
, rref
, rset_exclude(RSET_GPR
, left
));
1446 if (irt_isguard(ir
->t
)) { /* For IR_ADDOV etc. */
1447 asm_guardcc(as
, CC_VS
);
1450 emit_dn(as
, ai
^m
, dest
, left
);
1453 static void asm_intop_s(ASMState
*as
, IRIns
*ir
, ARMIns ai
)
1455 if (as
->flagmcp
== as
->mcp
) { /* Drop cmp r, #0. */
1460 asm_intop(as
, ir
, ai
);
1463 static void asm_bitop(ASMState
*as
, IRIns
*ir
, ARMIns ai
)
1465 if (as
->flagmcp
== as
->mcp
) { /* Try to drop cmp r, #0. */
1466 uint32_t cc
= (as
->mcp
[1] >> 28);
1471 } else if (cc
== CC_GE
) {
1472 *++as
->mcp
^= ((CC_GE
^CC_PL
) << 28);
1474 } else if (cc
== CC_LT
) {
1475 *++as
->mcp
^= ((CC_LT
^CC_MI
) << 28);
1477 } /* else: other conds don't work with bit ops. */
1480 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1481 uint32_t m
= asm_fuseopm(as
, ai
, ir
->op1
, RSET_GPR
);
1482 emit_d(as
, ai
^m
, dest
);
1484 /* NYI: Turn BAND !k12 into uxtb, uxth or bfc or shl+shr. */
1485 asm_intop(as
, ir
, ai
);
1489 static void asm_intneg(ASMState
*as
, IRIns
*ir
, ARMIns ai
)
1491 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1492 Reg left
= ra_hintalloc(as
, ir
->op1
, dest
, RSET_GPR
);
1493 emit_dn(as
, ai
|ARMI_K12
|0, dest
, left
);
1496 /* NYI: use add/shift for MUL(OV) with constants. FOLD only does 2^k. */
1497 static void asm_intmul(ASMState
*as
, IRIns
*ir
)
1499 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1500 Reg left
= ra_alloc1(as
, ir
->op1
, rset_exclude(RSET_GPR
, dest
));
1501 Reg right
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
1503 /* ARMv5 restriction: dest != left and dest_hi != left. */
1504 if (dest
== left
&& left
!= right
) { left
= right
; right
= dest
; }
1505 if (irt_isguard(ir
->t
)) { /* IR_MULOV */
1506 if (!(as
->flags
& JIT_F_ARMV6
) && dest
== left
)
1507 tmp
= left
= ra_scratch(as
, rset_exclude(RSET_GPR
, left
));
1508 asm_guardcc(as
, CC_NE
);
1509 emit_nm(as
, ARMI_TEQ
|ARMF_SH(ARMSH_ASR
, 31), RID_TMP
, dest
);
1510 emit_dnm(as
, ARMI_SMULL
|ARMF_S(right
), dest
, RID_TMP
, left
);
1512 if (!(as
->flags
& JIT_F_ARMV6
) && dest
== left
) tmp
= left
= RID_TMP
;
1513 emit_nm(as
, ARMI_MUL
|ARMF_S(right
), dest
, left
);
1515 /* Only need this for the dest == left == right case. */
1516 if (ra_hasreg(tmp
)) emit_dm(as
, ARMI_MOV
, tmp
, right
);
1519 static void asm_add(ASMState
*as
, IRIns
*ir
)
1522 if (irt_isnum(ir
->t
)) {
1523 if (!asm_fusemadd(as
, ir
, ARMI_VMLA_D
, ARMI_VMLA_D
))
1524 asm_fparith(as
, ir
, ARMI_VADD_D
);
1528 asm_intop_s(as
, ir
, ARMI_ADD
);
1531 static void asm_sub(ASMState
*as
, IRIns
*ir
)
1534 if (irt_isnum(ir
->t
)) {
1535 if (!asm_fusemadd(as
, ir
, ARMI_VNMLS_D
, ARMI_VMLS_D
))
1536 asm_fparith(as
, ir
, ARMI_VSUB_D
);
1540 asm_intop_s(as
, ir
, ARMI_SUB
);
1543 static void asm_mul(ASMState
*as
, IRIns
*ir
)
1546 if (irt_isnum(ir
->t
)) {
1547 asm_fparith(as
, ir
, ARMI_VMUL_D
);
1554 static void asm_neg(ASMState
*as
, IRIns
*ir
)
1557 if (irt_isnum(ir
->t
)) {
1558 asm_fpunary(as
, ir
, ARMI_VNEG_D
);
1562 asm_intneg(as
, ir
, ARMI_RSB
);
1565 static void asm_callid(ASMState
*as
, IRIns
*ir
, IRCallID id
)
1567 const CCallInfo
*ci
= &lj_ir_callinfo
[id
];
1571 asm_setupresult(as
, ir
, ci
);
1572 asm_gencall(as
, ci
, args
);
1576 static void asm_callround(ASMState
*as
, IRIns
*ir
, int id
)
1578 /* The modified regs must match with the *.dasc implementation. */
1579 RegSet drop
= RID2RSET(RID_R0
)|RID2RSET(RID_R1
)|RID2RSET(RID_R2
)|
1580 RID2RSET(RID_R3
)|RID2RSET(RID_R12
);
1583 ra_evictset(as
, drop
);
1584 dest
= ra_dest(as
, ir
, RSET_FPR
);
1585 emit_dnm(as
, ARMI_VMOV_D_RR
, RID_RETLO
, RID_RETHI
, (dest
& 15));
1586 emit_call(as
, id
== IRFPM_FLOOR
? (void *)lj_vm_floor_sf
:
1587 id
== IRFPM_CEIL
? (void *)lj_vm_ceil_sf
:
1588 (void *)lj_vm_trunc_sf
);
1589 /* Workaround to protect argument GPRs from being used for remat. */
1591 as
->freeset
&= ~RSET_RANGE(RID_R0
, RID_R1
+1);
1592 as
->cost
[RID_R0
] = as
->cost
[RID_R1
] = REGCOST(~0u, ASMREF_L
);
1593 src
= ra_alloc1(as
, ir
->op1
, RSET_FPR
); /* May alloc GPR to remat FPR. */
1594 as
->freeset
|= (of
& RSET_RANGE(RID_R0
, RID_R1
+1));
1595 emit_dnm(as
, ARMI_VMOV_RR_D
, RID_R0
, RID_R1
, (src
& 15));
1599 static void asm_bitswap(ASMState
*as
, IRIns
*ir
)
1601 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1602 Reg left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1603 if ((as
->flags
& JIT_F_ARMV6
)) {
1604 emit_dm(as
, ARMI_REV
, dest
, left
);
1608 tmp2
= ra_scratch(as
, rset_exclude(rset_exclude(RSET_GPR
, dest
), left
));
1609 emit_dnm(as
, ARMI_EOR
|ARMF_SH(ARMSH_LSR
, 8), dest
, tmp2
, RID_TMP
);
1610 emit_dm(as
, ARMI_MOV
|ARMF_SH(ARMSH_ROR
, 8), tmp2
, left
);
1611 emit_dn(as
, ARMI_BIC
|ARMI_K12
|256*8|255, RID_TMP
, RID_TMP
);
1612 emit_dnm(as
, ARMI_EOR
|ARMF_SH(ARMSH_ROR
, 16), RID_TMP
, left
, left
);
1616 static void asm_bitshift(ASMState
*as
, IRIns
*ir
, ARMShift sh
)
1618 if (irref_isk(ir
->op2
)) { /* Constant shifts. */
1619 /* NYI: Turn SHL+SHR or BAND+SHR into uxtb, uxth or ubfx. */
1620 /* NYI: Turn SHL+ASR into sxtb, sxth or sbfx. */
1621 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1622 Reg left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1623 int32_t shift
= (IR(ir
->op2
)->i
& 31);
1624 emit_dm(as
, ARMI_MOV
|ARMF_SH(sh
, shift
), dest
, left
);
1626 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1627 Reg left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1628 Reg right
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
1629 emit_dm(as
, ARMI_MOV
|ARMF_RSH(sh
, right
), dest
, left
);
1633 static void asm_intmin_max(ASMState
*as
, IRIns
*ir
, int cc
)
1635 uint32_t kcmp
= 0, kmov
= 0;
1636 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1637 Reg left
= ra_hintalloc(as
, ir
->op1
, dest
, RSET_GPR
);
1639 if (irref_isk(ir
->op2
)) {
1640 kcmp
= emit_isk12(ARMI_CMP
, IR(ir
->op2
)->i
);
1641 if (kcmp
) kmov
= emit_isk12(ARMI_MOV
, IR(ir
->op2
)->i
);
1645 right
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
1647 if (kmov
|| dest
!= right
) {
1648 emit_dm(as
, ARMF_CC(ARMI_MOV
, cc
)^kmov
, dest
, right
);
1649 cc
^= 1; /* Must use opposite conditions for paired moves. */
1651 cc
^= (CC_LT
^CC_GT
); /* Otherwise may swap CC_LT <-> CC_GT. */
1653 if (dest
!= left
) emit_dm(as
, ARMF_CC(ARMI_MOV
, cc
), dest
, left
);
1654 emit_nm(as
, ARMI_CMP
^kcmp
, left
, right
);
1658 static void asm_sfpmin_max(ASMState
*as
, IRIns
*ir
, int cc
)
1660 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_softfp_cmp
];
1661 RegSet drop
= RSET_SCRATCH
;
1664 args
[0] = ir
->op1
; args
[1] = (ir
+1)->op1
;
1665 args
[2] = ir
->op2
; args
[3] = (ir
+1)->op2
;
1666 /* __aeabi_cdcmple preserves r0-r3. */
1667 if (ra_hasreg(ir
->r
)) rset_clear(drop
, ir
->r
);
1668 if (ra_hasreg((ir
+1)->r
)) rset_clear(drop
, (ir
+1)->r
);
1669 if (!rset_test(as
->freeset
, RID_R2
) &&
1670 regcost_ref(as
->cost
[RID_R2
]) == args
[2]) rset_clear(drop
, RID_R2
);
1671 if (!rset_test(as
->freeset
, RID_R3
) &&
1672 regcost_ref(as
->cost
[RID_R3
]) == args
[3]) rset_clear(drop
, RID_R3
);
1673 ra_evictset(as
, drop
);
1674 ra_destpair(as
, ir
);
1675 emit_dm(as
, ARMF_CC(ARMI_MOV
, cc
), RID_RETHI
, RID_R3
);
1676 emit_dm(as
, ARMF_CC(ARMI_MOV
, cc
), RID_RETLO
, RID_R2
);
1677 emit_call(as
, (void *)ci
->func
);
1678 for (r
= RID_R0
; r
<= RID_R3
; r
++)
1679 ra_leftov(as
, r
, args
[r
-RID_R0
]);
1682 static void asm_fpmin_max(ASMState
*as
, IRIns
*ir
, int cc
)
1684 Reg dest
= (ra_dest(as
, ir
, RSET_FPR
) & 15);
1685 Reg right
, left
= ra_alloc2(as
, ir
, RSET_FPR
);
1686 right
= ((left
>> 8) & 15); left
&= 15;
1687 if (dest
!= left
) emit_dm(as
, ARMF_CC(ARMI_VMOV_D
, cc
^1), dest
, left
);
1688 if (dest
!= right
) emit_dm(as
, ARMF_CC(ARMI_VMOV_D
, cc
), dest
, right
);
1689 emit_d(as
, ARMI_VMRS
, 0);
1690 emit_dm(as
, ARMI_VCMP_D
, left
, right
);
1694 static void asm_min_max(ASMState
*as
, IRIns
*ir
, int cc
, int fcc
)
1699 if (irt_isnum(ir
->t
))
1700 asm_fpmin_max(as
, ir
, fcc
);
1703 asm_intmin_max(as
, ir
, cc
);
1706 /* -- Comparisons --------------------------------------------------------- */
1708 /* Map of comparisons to flags. ORDER IR. */
1709 static const uint8_t asm_compmap
[IR_ABC
+1] = {
1710 /* op FP swp int cc FP cc */
1711 /* LT */ CC_GE
+ (CC_HS
<< 4),
1712 /* GE x */ CC_LT
+ (CC_HI
<< 4),
1713 /* LE */ CC_GT
+ (CC_HI
<< 4),
1714 /* GT x */ CC_LE
+ (CC_HS
<< 4),
1715 /* ULT x */ CC_HS
+ (CC_LS
<< 4),
1716 /* UGE */ CC_LO
+ (CC_LO
<< 4),
1717 /* ULE x */ CC_HI
+ (CC_LO
<< 4),
1718 /* UGT */ CC_LS
+ (CC_LS
<< 4),
1719 /* EQ */ CC_NE
+ (CC_NE
<< 4),
1720 /* NE */ CC_EQ
+ (CC_EQ
<< 4),
1721 /* ABC */ CC_LS
+ (CC_LS
<< 4) /* Same as UGT. */
1725 /* FP comparisons. */
1726 static void asm_sfpcomp(ASMState
*as
, IRIns
*ir
)
1728 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_softfp_cmp
];
1729 RegSet drop
= RSET_SCRATCH
;
1732 int swp
= (((ir
->o
^ (ir
->o
>> 2)) & ~(ir
->o
>> 3) & 1) << 1);
1733 args
[swp
^0] = ir
->op1
; args
[swp
^1] = (ir
+1)->op1
;
1734 args
[swp
^2] = ir
->op2
; args
[swp
^3] = (ir
+1)->op2
;
1735 /* __aeabi_cdcmple preserves r0-r3. This helps to reduce spills. */
1736 for (r
= RID_R0
; r
<= RID_R3
; r
++)
1737 if (!rset_test(as
->freeset
, r
) &&
1738 regcost_ref(as
->cost
[r
]) == args
[r
-RID_R0
]) rset_clear(drop
, r
);
1739 ra_evictset(as
, drop
);
1740 asm_guardcc(as
, (asm_compmap
[ir
->o
] >> 4));
1741 emit_call(as
, (void *)ci
->func
);
1742 for (r
= RID_R0
; r
<= RID_R3
; r
++)
1743 ra_leftov(as
, r
, args
[r
-RID_R0
]);
1746 /* FP comparisons. */
1747 static void asm_fpcomp(ASMState
*as
, IRIns
*ir
)
1751 int swp
= ((ir
->o
^ (ir
->o
>> 2)) & ~(ir
->o
>> 3) & 1);
1752 if (!swp
&& irref_isk(ir
->op2
) && ir_knum(IR(ir
->op2
))->u64
== 0) {
1753 left
= (ra_alloc1(as
, ir
->op1
, RSET_FPR
) & 15);
1757 left
= ra_alloc2(as
, ir
, RSET_FPR
);
1759 right
= (left
& 15); left
= ((left
>> 8) & 15);
1761 right
= ((left
>> 8) & 15); left
&= 15;
1765 asm_guardcc(as
, (asm_compmap
[ir
->o
] >> 4));
1766 emit_d(as
, ARMI_VMRS
, 0);
1767 emit_dm(as
, ai
, left
, right
);
1771 /* Integer comparisons. */
1772 static void asm_intcomp(ASMState
*as
, IRIns
*ir
)
1774 ARMCC cc
= (asm_compmap
[ir
->o
] & 15);
1775 IRRef lref
= ir
->op1
, rref
= ir
->op2
;
1779 lua_assert(irt_isint(ir
->t
) || irt_isu32(ir
->t
) || irt_isaddr(ir
->t
));
1780 if (asm_swapops(as
, lref
, rref
)) {
1781 Reg tmp
= lref
; lref
= rref
; rref
= tmp
;
1782 if (cc
>= CC_GE
) cc
^= 7; /* LT <-> GT, LE <-> GE */
1783 else if (cc
> CC_NE
) cc
^= 11; /* LO <-> HI, LS <-> HS */
1785 if (irref_isk(rref
) && IR(rref
)->i
== 0) {
1786 IRIns
*irl
= IR(lref
);
1787 cmpprev0
= (irl
+1 == ir
);
1788 /* Combine comp(BAND(left, right), 0) into tst left, right. */
1789 if (cmpprev0
&& irl
->o
== IR_BAND
&& !ra_used(irl
)) {
1790 IRRef blref
= irl
->op1
, brref
= irl
->op2
;
1793 if (asm_swapops(as
, blref
, brref
)) {
1794 Reg tmp
= blref
; blref
= brref
; brref
= tmp
;
1796 if (irref_isk(brref
)) {
1797 m2
= emit_isk12(ARMI_AND
, IR(brref
)->i
);
1798 if ((m2
& (ARMI_AND
^ARMI_BIC
)))
1799 goto notst
; /* Not beneficial if we miss a constant operand. */
1801 if (cc
== CC_GE
) cc
= CC_PL
;
1802 else if (cc
== CC_LT
) cc
= CC_MI
;
1803 else if (cc
> CC_NE
) goto notst
; /* Other conds don't work with tst. */
1804 bleft
= ra_alloc1(as
, blref
, RSET_GPR
);
1805 if (!m2
) m2
= asm_fuseopm(as
, 0, brref
, rset_exclude(RSET_GPR
, bleft
));
1806 asm_guardcc(as
, cc
);
1807 emit_n(as
, ARMI_TST
^m2
, bleft
);
1812 left
= ra_alloc1(as
, lref
, RSET_GPR
);
1813 m
= asm_fuseopm(as
, ARMI_CMP
, rref
, rset_exclude(RSET_GPR
, left
));
1814 asm_guardcc(as
, cc
);
1815 emit_n(as
, ARMI_CMP
^m
, left
);
1816 /* Signed comparison with zero and referencing previous ins? */
1817 if (cmpprev0
&& (cc
<= CC_NE
|| cc
>= CC_GE
))
1818 as
->flagmcp
= as
->mcp
; /* Allow elimination of the compare. */
1822 /* 64 bit integer comparisons. */
1823 static void asm_int64comp(ASMState
*as
, IRIns
*ir
)
1825 int signedcomp
= (ir
->o
<= IR_GT
);
1829 RegSet allow
= RSET_GPR
, oldfree
;
1831 /* Always use unsigned comparison for loword. */
1832 cclo
= asm_compmap
[ir
->o
+ (signedcomp
? 4 : 0)] & 15;
1833 leftlo
= ra_alloc1(as
, ir
->op1
, allow
);
1834 oldfree
= as
->freeset
;
1835 mlo
= asm_fuseopm(as
, ARMI_CMP
, ir
->op2
, rset_clear(allow
, leftlo
));
1836 allow
&= ~(oldfree
& ~as
->freeset
); /* Update for allocs of asm_fuseopm. */
1838 /* Use signed or unsigned comparison for hiword. */
1839 cchi
= asm_compmap
[ir
->o
] & 15;
1840 lefthi
= ra_alloc1(as
, (ir
+1)->op1
, allow
);
1841 mhi
= asm_fuseopm(as
, ARMI_CMP
, (ir
+1)->op2
, rset_clear(allow
, lefthi
));
1843 /* All register allocations must be performed _before_ this point. */
1845 MCLabel l_around
= emit_label(as
);
1846 asm_guardcc(as
, cclo
);
1847 emit_n(as
, ARMI_CMP
^mlo
, leftlo
);
1848 emit_branch(as
, ARMF_CC(ARMI_B
, CC_NE
), l_around
);
1849 if (cchi
== CC_GE
|| cchi
== CC_LE
) cchi
^= 6; /* GE -> GT, LE -> LT */
1850 asm_guardcc(as
, cchi
);
1852 asm_guardcc(as
, cclo
);
1853 emit_n(as
, ARMF_CC(ARMI_CMP
, CC_EQ
)^mlo
, leftlo
);
1855 emit_n(as
, ARMI_CMP
^mhi
, lefthi
);
1859 /* -- Support for 64 bit ops in 32 bit mode ------------------------------- */
1861 /* Hiword op of a split 64 bit op. Previous op must be the loword op. */
1862 static void asm_hiop(ASMState
*as
, IRIns
*ir
)
1864 #if LJ_HASFFI || LJ_SOFTFP
1865 /* HIOP is marked as a store because it needs its own DCE logic. */
1866 int uselo
= ra_used(ir
-1), usehi
= ra_used(ir
); /* Loword/hiword used? */
1867 if (LJ_UNLIKELY(!(as
->flags
& JIT_F_OPT_DCE
))) uselo
= usehi
= 1;
1868 if ((ir
-1)->o
<= IR_NE
) { /* 64 bit integer or FP comparisons. ORDER IR. */
1869 as
->curins
--; /* Always skip the loword comparison. */
1871 if (!irt_isint(ir
->t
)) {
1872 asm_sfpcomp(as
, ir
-1);
1877 asm_int64comp(as
, ir
-1);
1881 } else if ((ir
-1)->o
== IR_MIN
|| (ir
-1)->o
== IR_MAX
) {
1882 as
->curins
--; /* Always skip the loword min/max. */
1884 asm_sfpmin_max(as
, ir
-1, (ir
-1)->o
== IR_MIN
? CC_HI
: CC_LO
);
1887 } else if ((ir
-1)->o
== IR_CONV
) {
1888 as
->curins
--; /* Always skip the CONV. */
1893 } else if ((ir
-1)->o
== IR_XSTORE
) {
1894 if ((ir
-1)->r
!= RID_SINK
)
1895 asm_xstore(as
, ir
, 4);
1898 if (!usehi
) return; /* Skip unused hiword op for all remaining ops. */
1899 switch ((ir
-1)->o
) {
1903 asm_intop(as
, ir
, ARMI_ADC
);
1904 asm_intop(as
, ir
-1, ARMI_ADD
|ARMI_S
);
1908 asm_intop(as
, ir
, ARMI_SBC
);
1909 asm_intop(as
, ir
-1, ARMI_SUB
|ARMI_S
);
1913 asm_intneg(as
, ir
, ARMI_RSC
);
1914 asm_intneg(as
, ir
-1, ARMI_RSB
|ARMI_S
);
1918 case IR_SLOAD
: case IR_ALOAD
: case IR_HLOAD
: case IR_ULOAD
: case IR_VLOAD
:
1921 ra_allocref(as
, ir
->op1
, RSET_GPR
); /* Mark lo op as used. */
1928 ra_allocref(as
, ir
->op1
, RID2RSET(RID_RETLO
)); /* Mark lo op as used. */
1931 case IR_ASTORE
: case IR_HSTORE
: case IR_USTORE
: case IR_TOSTR
:
1934 /* Nothing to do here. Handled by lo op itself. */
1936 default: lua_assert(0); break;
1939 UNUSED(as
); UNUSED(ir
); lua_assert(0);
1943 /* -- Stack handling ------------------------------------------------------ */
1945 /* Check Lua stack size for overflow. Use exit handler as fallback. */
1946 static void asm_stack_check(ASMState
*as
, BCReg topslot
,
1947 IRIns
*irp
, RegSet allow
, ExitNo exitno
)
1952 if (!ra_hasspill(irp
->s
)) {
1954 lua_assert(ra_hasreg(pbase
));
1956 pbase
= rset_pickbot(allow
);
1959 emit_lso(as
, ARMI_LDR
, RID_RET
, RID_SP
, 0); /* Restore temp. register. */
1964 emit_branch(as
, ARMF_CC(ARMI_BL
, CC_LS
), exitstub_addr(as
->J
, exitno
));
1965 k
= emit_isk12(0, (int32_t)(8*topslot
));
1967 emit_n(as
, ARMI_CMP
^k
, RID_TMP
);
1968 emit_dnm(as
, ARMI_SUB
, RID_TMP
, RID_TMP
, pbase
);
1969 emit_lso(as
, ARMI_LDR
, RID_TMP
, RID_TMP
,
1970 (int32_t)offsetof(lua_State
, maxstack
));
1971 if (irp
) { /* Must not spill arbitrary registers in head of side trace. */
1972 int32_t i
= i32ptr(&J2G(as
->J
)->jit_L
);
1973 if (ra_hasspill(irp
->s
))
1974 emit_lso(as
, ARMI_LDR
, pbase
, RID_SP
, sps_scale(irp
->s
));
1975 emit_lso(as
, ARMI_LDR
, RID_TMP
, RID_TMP
, (i
& 4095));
1976 if (ra_hasspill(irp
->s
) && !allow
)
1977 emit_lso(as
, ARMI_STR
, RID_RET
, RID_SP
, 0); /* Save temp. register. */
1978 emit_loadi(as
, RID_TMP
, (i
& ~4095));
1980 emit_getgl(as
, RID_TMP
, jit_L
);
1984 /* Restore Lua stack from on-trace state. */
1985 static void asm_stack_restore(ASMState
*as
, SnapShot
*snap
)
1987 SnapEntry
*map
= &as
->T
->snapmap
[snap
->mapofs
];
1988 SnapEntry
*flinks
= &as
->T
->snapmap
[snap_nextofs(as
->T
, snap
)-1];
1989 MSize n
, nent
= snap
->nent
;
1990 /* Store the value of all modified slots to the Lua stack. */
1991 for (n
= 0; n
< nent
; n
++) {
1992 SnapEntry sn
= map
[n
];
1993 BCReg s
= snap_slot(sn
);
1994 int32_t ofs
= 8*((int32_t)s
-1);
1995 IRRef ref
= snap_ref(sn
);
1996 IRIns
*ir
= IR(ref
);
1997 if ((sn
& SNAP_NORESTORE
))
1999 if (irt_isnum(ir
->t
)) {
2001 RegSet odd
= rset_exclude(RSET_GPRODD
, RID_BASE
);
2003 lua_assert(irref_isk(ref
)); /* LJ_SOFTFP: must be a number constant. */
2004 tmp
= ra_allock(as
, (int32_t)ir_knum(ir
)->u32
.lo
,
2005 rset_exclude(RSET_GPREVEN
, RID_BASE
));
2006 emit_lso(as
, ARMI_STR
, tmp
, RID_BASE
, ofs
);
2007 if (rset_test(as
->freeset
, tmp
+1)) odd
= RID2RSET(tmp
+1);
2008 tmp
= ra_allock(as
, (int32_t)ir_knum(ir
)->u32
.hi
, odd
);
2009 emit_lso(as
, ARMI_STR
, tmp
, RID_BASE
, ofs
+4);
2011 Reg src
= ra_alloc1(as
, ref
, RSET_FPR
);
2012 emit_vlso(as
, ARMI_VSTR_D
, src
, RID_BASE
, ofs
);
2015 RegSet odd
= rset_exclude(RSET_GPRODD
, RID_BASE
);
2017 lua_assert(irt_ispri(ir
->t
) || irt_isaddr(ir
->t
) || irt_isinteger(ir
->t
));
2018 if (!irt_ispri(ir
->t
)) {
2019 Reg src
= ra_alloc1(as
, ref
, rset_exclude(RSET_GPREVEN
, RID_BASE
));
2020 emit_lso(as
, ARMI_STR
, src
, RID_BASE
, ofs
);
2021 if (rset_test(as
->freeset
, src
+1)) odd
= RID2RSET(src
+1);
2023 if ((sn
& (SNAP_CONT
|SNAP_FRAME
))) {
2024 if (s
== 0) continue; /* Do not overwrite link to previous frame. */
2025 type
= ra_allock(as
, (int32_t)(*flinks
--), odd
);
2027 } else if ((sn
& SNAP_SOFTFPNUM
)) {
2028 type
= ra_alloc1(as
, ref
+1, rset_exclude(RSET_GPRODD
, RID_BASE
));
2031 type
= ra_allock(as
, (int32_t)irt_toitype(ir
->t
), odd
);
2033 emit_lso(as
, ARMI_STR
, type
, RID_BASE
, ofs
+4);
2037 lua_assert(map
+ nent
== flinks
);
2040 /* -- GC handling --------------------------------------------------------- */
2042 /* Check GC threshold and do one or more GC steps. */
2043 static void asm_gc_check(ASMState
*as
)
2045 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_gc_step_jit
];
2049 ra_evictset(as
, RSET_SCRATCH
);
2050 l_end
= emit_label(as
);
2051 /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
2052 asm_guardcc(as
, CC_NE
); /* Assumes asm_snap_prep() already done. */
2053 emit_n(as
, ARMI_CMP
|ARMI_K12
|0, RID_RET
);
2054 args
[0] = ASMREF_TMP1
; /* global_State *g */
2055 args
[1] = ASMREF_TMP2
; /* MSize steps */
2056 asm_gencall(as
, ci
, args
);
2057 tmp1
= ra_releasetmp(as
, ASMREF_TMP1
);
2058 tmp2
= ra_releasetmp(as
, ASMREF_TMP2
);
2059 emit_loadi(as
, tmp2
, as
->gcsteps
);
2060 /* Jump around GC step if GC total < GC threshold. */
2061 emit_branch(as
, ARMF_CC(ARMI_B
, CC_LS
), l_end
);
2062 emit_nm(as
, ARMI_CMP
, RID_TMP
, tmp2
);
2063 emit_lso(as
, ARMI_LDR
, tmp2
, tmp1
,
2064 (int32_t)offsetof(global_State
, gc
.threshold
));
2065 emit_lso(as
, ARMI_LDR
, RID_TMP
, tmp1
,
2066 (int32_t)offsetof(global_State
, gc
.total
));
2067 ra_allockreg(as
, i32ptr(J2G(as
->J
)), tmp1
);
2072 /* -- Loop handling ------------------------------------------------------- */
2074 /* Fixup the loop branch. */
2075 static void asm_loop_fixup(ASMState
*as
)
2077 MCode
*p
= as
->mctop
;
2078 MCode
*target
= as
->mcp
;
2079 if (as
->loopinv
) { /* Inverted loop branch? */
2080 /* asm_guardcc already inverted the bcc and patched the final bl. */
2081 p
[-2] |= ((uint32_t)(target
-p
) & 0x00ffffffu
);
2083 p
[-1] = ARMI_B
| ((uint32_t)((target
-p
)-1) & 0x00ffffffu
);
2087 /* -- Head of trace ------------------------------------------------------- */
2089 /* Reload L register from g->jit_L. */
2090 static void asm_head_lreg(ASMState
*as
)
2092 IRIns
*ir
= IR(ASMREF_L
);
2094 Reg r
= ra_dest(as
, ir
, RSET_GPR
);
2095 emit_getgl(as
, r
, jit_L
);
2100 /* Coalesce BASE register for a root trace. */
2101 static void asm_head_root_base(ASMState
*as
)
2106 if (ra_hasreg(ir
->r
) && (rset_test(as
->modset
, ir
->r
) || irt_ismarked(ir
->t
)))
2108 ra_destreg(as
, ir
, RID_BASE
);
2111 /* Coalesce BASE register for a side trace. */
2112 static RegSet
asm_head_side_base(ASMState
*as
, IRIns
*irp
, RegSet allow
)
2117 if (ra_hasreg(ir
->r
) && (rset_test(as
->modset
, ir
->r
) || irt_ismarked(ir
->t
)))
2119 if (ra_hasspill(irp
->s
)) {
2120 rset_clear(allow
, ra_dest(as
, ir
, allow
));
2123 lua_assert(ra_hasreg(r
));
2124 rset_clear(allow
, r
);
2125 if (r
!= ir
->r
&& !rset_test(as
->freeset
, r
))
2126 ra_restore(as
, regcost_ref(as
->cost
[r
]));
2127 ra_destreg(as
, ir
, r
);
2132 /* -- Tail of trace ------------------------------------------------------- */
2134 /* Fixup the tail code. */
2135 static void asm_tail_fixup(ASMState
*as
, TraceNo lnk
)
2137 MCode
*p
= as
->mctop
;
2139 int32_t spadj
= as
->T
->spadjust
;
2143 /* Patch stack adjustment. */
2144 uint32_t k
= emit_isk12(ARMI_ADD
, spadj
);
2146 p
[-2] = (ARMI_ADD
^k
) | ARMF_D(RID_SP
) | ARMF_N(RID_SP
);
2148 /* Patch exit branch. */
2149 target
= lnk
? traceref(as
->J
, lnk
)->mcode
: (MCode
*)lj_vm_exit_interp
;
2150 p
[-1] = ARMI_B
|(((target
-p
)-1)&0x00ffffffu
);
2153 /* Prepare tail of code. */
2154 static void asm_tail_prep(ASMState
*as
)
2156 MCode
*p
= as
->mctop
- 1; /* Leave room for exit branch. */
2158 as
->invmcp
= as
->mcp
= p
;
2160 as
->mcp
= p
-1; /* Leave room for stack pointer adjustment. */
2163 *p
= 0; /* Prevent load/store merging. */
2166 /* -- Instruction dispatch ------------------------------------------------ */
2168 /* Assemble a single instruction. */
2169 static void asm_ir(ASMState
*as
, IRIns
*ir
)
2171 switch ((IROp
)ir
->o
) {
2172 /* Miscellaneous ops. */
2173 case IR_LOOP
: asm_loop(as
); break;
2174 case IR_NOP
: case IR_XBAR
: lua_assert(!ra_used(ir
)); break;
2176 ra_alloc1(as
, ir
->op1
, irt_isfp(ir
->t
) ? RSET_FPR
: RSET_GPR
); break;
2177 case IR_PHI
: asm_phi(as
, ir
); break;
2178 case IR_HIOP
: asm_hiop(as
, ir
); break;
2179 case IR_GCSTEP
: asm_gcstep(as
, ir
); break;
2181 /* Guarded assertions. */
2182 case IR_EQ
: case IR_NE
:
2183 if ((ir
-1)->o
== IR_HREF
&& ir
->op1
== as
->curins
-1) {
2185 asm_href(as
, ir
-1, (IROp
)ir
->o
);
2189 case IR_LT
: case IR_GE
: case IR_LE
: case IR_GT
:
2190 case IR_ULT
: case IR_UGE
: case IR_ULE
: case IR_UGT
:
2193 if (irt_isnum(ir
->t
)) { asm_fpcomp(as
, ir
); break; }
2195 asm_intcomp(as
, ir
);
2198 case IR_RETF
: asm_retf(as
, ir
); break;
2201 case IR_BNOT
: asm_bitop(as
, ir
, ARMI_MVN
); break;
2202 case IR_BSWAP
: asm_bitswap(as
, ir
); break;
2204 case IR_BAND
: asm_bitop(as
, ir
, ARMI_AND
); break;
2205 case IR_BOR
: asm_bitop(as
, ir
, ARMI_ORR
); break;
2206 case IR_BXOR
: asm_bitop(as
, ir
, ARMI_EOR
); break;
2208 case IR_BSHL
: asm_bitshift(as
, ir
, ARMSH_LSL
); break;
2209 case IR_BSHR
: asm_bitshift(as
, ir
, ARMSH_LSR
); break;
2210 case IR_BSAR
: asm_bitshift(as
, ir
, ARMSH_ASR
); break;
2211 case IR_BROR
: asm_bitshift(as
, ir
, ARMSH_ROR
); break;
2212 case IR_BROL
: lua_assert(0); break;
2214 /* Arithmetic ops. */
2215 case IR_ADD
: case IR_ADDOV
: asm_add(as
, ir
); break;
2216 case IR_SUB
: case IR_SUBOV
: asm_sub(as
, ir
); break;
2217 case IR_MUL
: case IR_MULOV
: asm_mul(as
, ir
); break;
2218 case IR_MOD
: asm_callid(as
, ir
, IRCALL_lj_vm_modi
); break;
2219 case IR_NEG
: asm_neg(as
, ir
); break;
2222 case IR_DIV
: case IR_POW
: case IR_ABS
:
2223 case IR_ATAN2
: case IR_LDEXP
: case IR_FPMATH
: case IR_TOBIT
:
2224 lua_assert(0); /* Unused for LJ_SOFTFP. */
2227 case IR_DIV
: asm_fparith(as
, ir
, ARMI_VDIV_D
); break;
2228 case IR_POW
: asm_callid(as
, ir
, IRCALL_lj_vm_powi
); break;
2229 case IR_ABS
: asm_fpunary(as
, ir
, ARMI_VABS_D
); break;
2230 case IR_ATAN2
: asm_callid(as
, ir
, IRCALL_atan2
); break;
2231 case IR_LDEXP
: asm_callid(as
, ir
, IRCALL_ldexp
); break;
2233 if (ir
->op2
== IRFPM_EXP2
&& asm_fpjoin_pow(as
, ir
))
2235 if (ir
->op2
<= IRFPM_TRUNC
)
2236 asm_callround(as
, ir
, ir
->op2
);
2237 else if (ir
->op2
== IRFPM_SQRT
)
2238 asm_fpunary(as
, ir
, ARMI_VSQRT_D
);
2240 asm_callid(as
, ir
, IRCALL_lj_vm_floor
+ ir
->op2
);
2242 case IR_TOBIT
: asm_tobit(as
, ir
); break;
2245 case IR_MIN
: asm_min_max(as
, ir
, CC_GT
, CC_HI
); break;
2246 case IR_MAX
: asm_min_max(as
, ir
, CC_LT
, CC_LO
); break;
2248 /* Memory references. */
2249 case IR_AREF
: asm_aref(as
, ir
); break;
2250 case IR_HREF
: asm_href(as
, ir
, 0); break;
2251 case IR_HREFK
: asm_hrefk(as
, ir
); break;
2252 case IR_NEWREF
: asm_newref(as
, ir
); break;
2253 case IR_UREFO
: case IR_UREFC
: asm_uref(as
, ir
); break;
2254 case IR_FREF
: asm_fref(as
, ir
); break;
2255 case IR_STRREF
: asm_strref(as
, ir
); break;
2257 /* Loads and stores. */
2258 case IR_ALOAD
: case IR_HLOAD
: case IR_ULOAD
: case IR_VLOAD
:
2259 asm_ahuvload(as
, ir
);
2261 case IR_FLOAD
: asm_fload(as
, ir
); break;
2262 case IR_XLOAD
: asm_xload(as
, ir
); break;
2263 case IR_SLOAD
: asm_sload(as
, ir
); break;
2265 case IR_ASTORE
: case IR_HSTORE
: case IR_USTORE
: asm_ahustore(as
, ir
); break;
2266 case IR_FSTORE
: asm_fstore(as
, ir
); break;
2267 case IR_XSTORE
: asm_xstore(as
, ir
, 0); break;
2270 case IR_SNEW
: case IR_XSNEW
: asm_snew(as
, ir
); break;
2271 case IR_TNEW
: asm_tnew(as
, ir
); break;
2272 case IR_TDUP
: asm_tdup(as
, ir
); break;
2273 case IR_CNEW
: case IR_CNEWI
: asm_cnew(as
, ir
); break;
2275 /* Write barriers. */
2276 case IR_TBAR
: asm_tbar(as
, ir
); break;
2277 case IR_OBAR
: asm_obar(as
, ir
); break;
2279 /* Type conversions. */
2280 case IR_CONV
: asm_conv(as
, ir
); break;
2281 case IR_TOSTR
: asm_tostr(as
, ir
); break;
2282 case IR_STRTO
: asm_strto(as
, ir
); break;
2285 case IR_CALLN
: case IR_CALLL
: case IR_CALLS
: asm_call(as
, ir
); break;
2286 case IR_CALLXS
: asm_callx(as
, ir
); break;
2287 case IR_CARG
: break;
2290 setintV(&as
->J
->errinfo
, ir
->o
);
2291 lj_trace_err_info(as
->J
, LJ_TRERR_NYIIR
);
2296 /* -- Trace setup --------------------------------------------------------- */
2298 /* Ensure there are enough stack slots for call arguments. */
2299 static Reg
asm_setup_call_slots(ASMState
*as
, IRIns
*ir
, const CCallInfo
*ci
)
2301 IRRef args
[CCI_NARGS_MAX
*2];
2302 uint32_t i
, nargs
= (int)CCI_NARGS(ci
);
2303 int nslots
= 0, ngpr
= REGARG_NUMGPR
, nfpr
= REGARG_NUMFPR
, fprodd
= 0;
2304 asm_collectargs(as
, ir
, ci
, args
);
2305 for (i
= 0; i
< nargs
; i
++) {
2306 if (!LJ_SOFTFP
&& args
[i
] && irt_isfp(IR(args
[i
])->t
)) {
2307 if (!LJ_ABI_SOFTFP
&& !(ci
->flags
& CCI_VARARG
)) {
2308 if (irt_isnum(IR(args
[i
])->t
)) {
2309 if (nfpr
> 0) nfpr
--;
2310 else fprodd
= 0, nslots
= (nslots
+ 3) & ~1;
2312 if (fprodd
) fprodd
--;
2313 else if (nfpr
> 0) fprodd
= 1, nfpr
--;
2316 } else if (irt_isnum(IR(args
[i
])->t
)) {
2318 if (ngpr
> 0) ngpr
-= 2; else nslots
+= 2;
2320 if (ngpr
> 0) ngpr
--; else nslots
++;
2323 if (ngpr
> 0) ngpr
--; else nslots
++;
2326 if (nslots
> as
->evenspill
) /* Leave room for args in stack slots. */
2327 as
->evenspill
= nslots
;
2328 return REGSP_HINT(RID_RET
);
2331 static void asm_setup_target(ASMState
*as
)
2333 /* May need extra exit for asm_stack_check on side traces. */
2334 asm_exitstub_setup(as
, as
->T
->nsnap
+ (as
->parent
? 1 : 0));
2337 /* -- Trace patching ------------------------------------------------------ */
2339 /* Patch exit jumps of existing machine code to a new target. */
2340 void lj_asm_patchexit(jit_State
*J
, GCtrace
*T
, ExitNo exitno
, MCode
*target
)
2342 MCode
*p
= T
->mcode
;
2343 MCode
*pe
= (MCode
*)((char *)p
+ T
->szmcode
);
2344 MCode
*cstart
= NULL
, *cend
= p
;
2345 MCode
*mcarea
= lj_mcode_patch(J
, p
, 0);
2346 MCode
*px
= exitstub_addr(J
, exitno
) - 2;
2347 for (; p
< pe
; p
++) {
2348 /* Look for bl_cc exitstub, replace with b_cc target. */
2350 if ((ins
& 0x0f000000u
) == 0x0b000000u
&& ins
< 0xf0000000u
&&
2351 ((ins
^ (px
-p
)) & 0x00ffffffu
) == 0) {
2352 *p
= (ins
& 0xfe000000u
) | (((target
-p
)-2) & 0x00ffffffu
);
2354 if (!cstart
) cstart
= p
;
2357 lua_assert(cstart
!= NULL
);
2358 lj_mcode_sync(cstart
, cend
);
2359 lj_mcode_patch(J
, mcarea
, 1);