2 ** ARM IR assembler (SSA IR -> machine code).
3 ** Copyright (C) 2005-2023 Mike Pall. See Copyright Notice in luajit.h
6 /* -- Register allocator extensions --------------------------------------- */
8 /* Allocate a register with a hint. */
9 static Reg
ra_hintalloc(ASMState
*as
, IRRef ref
, Reg hint
, RegSet allow
)
13 if (!ra_hashint(r
) && !iscrossref(as
, ref
))
14 ra_sethint(IR(ref
)->r
, hint
); /* Propagate register hint. */
15 r
= ra_allocref(as
, ref
, allow
);
21 /* Allocate a scratch register pair. */
22 static Reg
ra_scratchpair(ASMState
*as
, RegSet allow
)
24 RegSet pick1
= as
->freeset
& allow
;
25 RegSet pick2
= pick1
& (pick1
>> 1) & RSET_GPREVEN
;
28 r
= rset_picktop(pick2
);
30 RegSet pick
= pick1
& (allow
>> 1) & RSET_GPREVEN
;
32 r
= rset_picktop(pick
);
33 ra_restore(as
, regcost_ref(as
->cost
[r
+1]));
35 pick
= pick1
& (allow
<< 1) & RSET_GPRODD
;
37 r
= ra_restore(as
, regcost_ref(as
->cost
[rset_picktop(pick
)-1]));
39 r
= ra_evict(as
, allow
& (allow
>> 1) & RSET_GPREVEN
);
40 ra_restore(as
, regcost_ref(as
->cost
[r
+1]));
44 lj_assertA(rset_test(RSET_GPREVEN
, r
), "odd reg %d", r
);
47 RA_DBGX((as
, "scratchpair $r $r", r
, r
+1));
52 /* Allocate two source registers for three-operand instructions. */
53 static Reg
ra_alloc2(ASMState
*as
, IRIns
*ir
, RegSet allow
)
55 IRIns
*irl
= IR(ir
->op1
), *irr
= IR(ir
->op2
);
56 Reg left
= irl
->r
, right
= irr
->r
;
57 if (ra_hasreg(left
)) {
60 right
= ra_allocref(as
, ir
->op2
, rset_exclude(allow
, left
));
63 } else if (ra_hasreg(right
)) {
65 left
= ra_allocref(as
, ir
->op1
, rset_exclude(allow
, right
));
66 } else if (ra_hashint(right
)) {
67 right
= ra_allocref(as
, ir
->op2
, allow
);
68 left
= ra_alloc1(as
, ir
->op1
, rset_exclude(allow
, right
));
70 left
= ra_allocref(as
, ir
->op1
, allow
);
71 right
= ra_alloc1(as
, ir
->op2
, rset_exclude(allow
, left
));
73 return left
| (right
<< 8);
77 /* -- Guard handling ------------------------------------------------------ */
79 /* Generate an exit stub group at the bottom of the reserved MCode memory. */
80 static MCode
*asm_exitstub_gen(ASMState
*as
, ExitNo group
)
82 MCode
*mxp
= as
->mcbot
;
84 if (mxp
+ 4*4+4*EXITSTUBS_PER_GROUP
>= as
->mctop
)
86 /* str lr, [sp]; bl ->vm_exit_handler; .long DISPATCH_address, group. */
87 *mxp
++ = ARMI_STR
|ARMI_LS_P
|ARMI_LS_U
|ARMF_D(RID_LR
)|ARMF_N(RID_SP
);
88 *mxp
= ARMI_BL
|((((MCode
*)(void *)lj_vm_exit_handler
-mxp
)-2)&0x00ffffffu
);
90 *mxp
++ = (MCode
)i32ptr(J2GG(as
->J
)->dispatch
); /* DISPATCH address */
91 *mxp
++ = group
*EXITSTUBS_PER_GROUP
;
92 for (i
= 0; i
< EXITSTUBS_PER_GROUP
; i
++)
93 *mxp
++ = ARMI_B
|((-6-i
)&0x00ffffffu
);
94 lj_mcode_sync(as
->mcbot
, mxp
);
95 lj_mcode_commitbot(as
->J
, mxp
);
97 as
->mclim
= as
->mcbot
+ MCLIM_REDZONE
;
98 return mxp
- EXITSTUBS_PER_GROUP
;
101 /* Setup all needed exit stubs. */
102 static void asm_exitstub_setup(ASMState
*as
, ExitNo nexits
)
105 if (nexits
>= EXITSTUBS_PER_GROUP
*LJ_MAX_EXITSTUBGR
)
106 lj_trace_err(as
->J
, LJ_TRERR_SNAPOV
);
107 for (i
= 0; i
< (nexits
+EXITSTUBS_PER_GROUP
-1)/EXITSTUBS_PER_GROUP
; i
++)
108 if (as
->J
->exitstubgroup
[i
] == NULL
)
109 as
->J
->exitstubgroup
[i
] = asm_exitstub_gen(as
, i
);
112 /* Emit conditional branch to exit for guard. */
113 static void asm_guardcc(ASMState
*as
, ARMCC cc
)
115 MCode
*target
= exitstub_addr(as
->J
, as
->snapno
);
117 if (LJ_UNLIKELY(p
== as
->invmcp
)) {
119 *p
= ARMI_BL
| ((target
-p
-2) & 0x00ffffffu
);
120 emit_branch(as
, ARMF_CC(ARMI_B
, cc
^1), p
+1);
123 emit_branch(as
, ARMF_CC(ARMI_BL
, cc
), target
);
126 /* -- Operand fusion ------------------------------------------------------ */
128 /* Limit linear search to this distance. Avoids O(n^2) behavior. */
129 #define CONFLICT_SEARCH_LIM 31
131 /* Check if there's no conflicting instruction between curins and ref. */
132 static int noconflict(ASMState
*as
, IRRef ref
, IROp conflict
)
135 IRRef i
= as
->curins
;
136 if (i
> ref
+ CONFLICT_SEARCH_LIM
)
137 return 0; /* Give up, ref is too far away. */
139 if (ir
[i
].o
== conflict
)
140 return 0; /* Conflict found. */
141 return 1; /* Ok, no conflict. */
144 /* Fuse the array base of colocated arrays. */
145 static int32_t asm_fuseabase(ASMState
*as
, IRRef ref
)
148 if (ir
->o
== IR_TNEW
&& ir
->op1
<= LJ_MAX_COLOSIZE
&&
149 !neverfuse(as
) && noconflict(as
, ref
, IR_NEWREF
))
150 return (int32_t)sizeof(GCtab
);
154 /* Fuse array/hash/upvalue reference into register+offset operand. */
155 static Reg
asm_fuseahuref(ASMState
*as
, IRRef ref
, int32_t *ofsp
, RegSet allow
,
159 if (ra_noreg(ir
->r
)) {
160 if (ir
->o
== IR_AREF
) {
161 if (mayfuse(as
, ref
)) {
162 if (irref_isk(ir
->op2
)) {
163 IRRef tab
= IR(ir
->op1
)->op1
;
164 int32_t ofs
= asm_fuseabase(as
, tab
);
165 IRRef refa
= ofs
? tab
: ir
->op1
;
166 ofs
+= 8*IR(ir
->op2
)->i
;
167 if (ofs
> -lim
&& ofs
< lim
) {
169 return ra_alloc1(as
, refa
, allow
);
173 } else if (ir
->o
== IR_HREFK
) {
174 if (mayfuse(as
, ref
)) {
175 int32_t ofs
= (int32_t)(IR(ir
->op2
)->op2
* sizeof(Node
));
178 return ra_alloc1(as
, ir
->op1
, allow
);
181 } else if (ir
->o
== IR_UREFC
) {
182 if (irref_isk(ir
->op1
)) {
183 GCfunc
*fn
= ir_kfunc(IR(ir
->op1
));
184 int32_t ofs
= i32ptr(&gcref(fn
->l
.uvptr
[(ir
->op2
>> 8)])->uv
.tv
);
185 *ofsp
= (ofs
& 255); /* Mask out less bits to allow LDRD. */
186 return ra_allock(as
, (ofs
& ~255), allow
);
188 } else if (ir
->o
== IR_TMPREF
) {
194 return ra_alloc1(as
, ref
, allow
);
197 /* Fuse m operand into arithmetic/logic instructions. */
198 static uint32_t asm_fuseopm(ASMState
*as
, ARMIns ai
, IRRef ref
, RegSet allow
)
201 if (ra_hasreg(ir
->r
)) {
202 ra_noweak(as
, ir
->r
);
203 return ARMF_M(ir
->r
);
204 } else if (irref_isk(ref
)) {
205 uint32_t k
= emit_isk12(ai
, ir
->i
);
208 } else if (mayfuse(as
, ref
)) {
209 if (ir
->o
>= IR_BSHL
&& ir
->o
<= IR_BROR
) {
210 Reg m
= ra_alloc1(as
, ir
->op1
, allow
);
211 ARMShift sh
= ir
->o
== IR_BSHL
? ARMSH_LSL
:
212 ir
->o
== IR_BSHR
? ARMSH_LSR
:
213 ir
->o
== IR_BSAR
? ARMSH_ASR
: ARMSH_ROR
;
214 if (irref_isk(ir
->op2
)) {
215 return m
| ARMF_SH(sh
, (IR(ir
->op2
)->i
& 31));
217 Reg s
= ra_alloc1(as
, ir
->op2
, rset_exclude(allow
, m
));
218 return m
| ARMF_RSH(sh
, s
);
220 } else if (ir
->o
== IR_ADD
&& ir
->op1
== ir
->op2
) {
221 Reg m
= ra_alloc1(as
, ir
->op1
, allow
);
222 return m
| ARMF_SH(ARMSH_LSL
, 1);
225 return ra_allocref(as
, ref
, allow
);
228 /* Fuse shifts into loads/stores. Only bother with BSHL 2 => lsl #2. */
229 static IRRef
asm_fuselsl2(ASMState
*as
, IRRef ref
)
232 if (ra_noreg(ir
->r
) && mayfuse(as
, ref
) && ir
->o
== IR_BSHL
&&
233 irref_isk(ir
->op2
) && IR(ir
->op2
)->i
== 2)
235 return 0; /* No fusion. */
238 /* Fuse XLOAD/XSTORE reference into load/store operand. */
239 static void asm_fusexref(ASMState
*as
, ARMIns ai
, Reg rd
, IRRef ref
,
240 RegSet allow
, int32_t ofs
)
244 if (ra_noreg(ir
->r
) && canfuse(as
, ir
)) {
245 int32_t lim
= (!LJ_SOFTFP
&& (ai
& 0x08000000)) ? 1024 :
246 (ai
& 0x04000000) ? 4096 : 256;
247 if (ir
->o
== IR_ADD
) {
249 if (irref_isk(ir
->op2
) &&
250 (ofs2
= ofs
+ IR(ir
->op2
)->i
) > -lim
&& ofs2
< lim
&&
251 (!(!LJ_SOFTFP
&& (ai
& 0x08000000)) || !(ofs2
& 3))) {
254 } else if (ofs
== 0 && !(!LJ_SOFTFP
&& (ai
& 0x08000000))) {
255 IRRef lref
= ir
->op1
, rref
= ir
->op2
;
257 if ((ai
& 0x04000000)) {
258 IRRef sref
= asm_fuselsl2(as
, rref
);
261 ai
|= ARMF_SH(ARMSH_LSL
, 2);
262 } else if ((sref
= asm_fuselsl2(as
, lref
)) != 0) {
265 ai
|= ARMF_SH(ARMSH_LSL
, 2);
268 rn
= ra_alloc1(as
, lref
, allow
);
269 rm
= ra_alloc1(as
, rref
, rset_exclude(allow
, rn
));
270 if ((ai
& 0x04000000)) ai
|= ARMI_LS_R
;
271 emit_dnm(as
, ai
|ARMI_LS_P
|ARMI_LS_U
, rd
, rn
, rm
);
274 } else if (ir
->o
== IR_STRREF
&& !(!LJ_SOFTFP
&& (ai
& 0x08000000))) {
275 lj_assertA(ofs
== 0, "bad usage");
276 ofs
= (int32_t)sizeof(GCstr
);
277 if (irref_isk(ir
->op2
)) {
278 ofs
+= IR(ir
->op2
)->i
;
280 } else if (irref_isk(ir
->op1
)) {
281 ofs
+= IR(ir
->op1
)->i
;
284 /* NYI: Fuse ADD with constant. */
285 Reg rn
= ra_alloc1(as
, ir
->op1
, allow
);
286 uint32_t m
= asm_fuseopm(as
, 0, ir
->op2
, rset_exclude(allow
, rn
));
287 if ((ai
& 0x04000000))
288 emit_lso(as
, ai
, rd
, rd
, ofs
);
290 emit_lsox(as
, ai
, rd
, rd
, ofs
);
291 emit_dn(as
, ARMI_ADD
^m
, rd
, rn
);
294 if (ofs
<= -lim
|| ofs
>= lim
) {
295 Reg rn
= ra_alloc1(as
, ref
, allow
);
296 Reg rm
= ra_allock(as
, ofs
, rset_exclude(allow
, rn
));
297 if ((ai
& 0x04000000)) ai
|= ARMI_LS_R
;
298 emit_dnm(as
, ai
|ARMI_LS_P
|ARMI_LS_U
, rd
, rn
, rm
);
303 base
= ra_alloc1(as
, ref
, allow
);
305 if ((ai
& 0x08000000))
306 emit_vlso(as
, ai
, rd
, base
, ofs
);
309 if ((ai
& 0x04000000))
310 emit_lso(as
, ai
, rd
, base
, ofs
);
312 emit_lsox(as
, ai
, rd
, base
, ofs
);
317 ** Fuse to multiply-add/sub instruction.
318 ** VMLA rounds twice (UMA, not FMA) -- no need to check for JIT_F_OPT_FMA.
319 ** VFMA needs VFPv4, which is uncommon on the remaining ARM32 targets.
321 static int asm_fusemadd(ASMState
*as
, IRIns
*ir
, ARMIns ai
, ARMIns air
)
323 IRRef lref
= ir
->op1
, rref
= ir
->op2
;
326 ((mayfuse(as
, lref
) && (irm
= IR(lref
), irm
->o
== IR_MUL
) &&
328 (mayfuse(as
, rref
) && (irm
= IR(rref
), irm
->o
== IR_MUL
) &&
329 (rref
= lref
, ai
= air
, ra_noreg(irm
->r
))))) {
330 Reg dest
= ra_dest(as
, ir
, RSET_FPR
);
331 Reg add
= ra_hintalloc(as
, rref
, dest
, RSET_FPR
);
332 Reg right
, left
= ra_alloc2(as
, irm
,
333 rset_exclude(rset_exclude(RSET_FPR
, dest
), add
));
334 right
= (left
>> 8); left
&= 255;
335 emit_dnm(as
, ai
, (dest
& 15), (left
& 15), (right
& 15));
336 if (dest
!= add
) emit_dm(as
, ARMI_VMOV_D
, (dest
& 15), (add
& 15));
343 /* -- Calls --------------------------------------------------------------- */
345 /* Generate a call to a C function. */
346 static void asm_gencall(ASMState
*as
, const CCallInfo
*ci
, IRRef
*args
)
348 uint32_t n
, nargs
= CCI_XNARGS(ci
);
351 Reg gpr
= REGARG_FIRSTGPR
;
353 Reg gpr
, fpr
= REGARG_FIRSTFPR
, fprodd
= 0;
355 if ((void *)ci
->func
)
356 emit_call(as
, (void *)ci
->func
);
358 for (gpr
= REGARG_FIRSTGPR
; gpr
<= REGARG_LASTGPR
; gpr
++)
359 as
->cost
[gpr
] = REGCOST(~0u, ASMREF_L
);
360 gpr
= REGARG_FIRSTGPR
;
362 for (n
= 0; n
< nargs
; n
++) { /* Setup args. */
366 if (ref
&& irt_isfp(ir
->t
)) {
367 RegSet of
= as
->freeset
;
369 if (!LJ_ABI_SOFTFP
&& !(ci
->flags
& CCI_VARARG
)) {
370 if (irt_isnum(ir
->t
)) {
371 if (fpr
<= REGARG_LASTFPR
) {
372 ra_leftov(as
, fpr
, ref
);
376 } else if (fprodd
) { /* Ick. */
377 src
= ra_alloc1(as
, ref
, RSET_FPR
);
378 emit_dm(as
, ARMI_VMOV_S
, (fprodd
& 15), (src
& 15) | 0x00400000);
381 } else if (fpr
<= REGARG_LASTFPR
) {
382 ra_leftov(as
, fpr
, ref
);
386 /* Workaround to protect argument GPRs from being used for remat. */
387 as
->freeset
&= ~RSET_RANGE(REGARG_FIRSTGPR
, REGARG_LASTGPR
+1);
388 src
= ra_alloc1(as
, ref
, RSET_FPR
); /* May alloc GPR to remat FPR. */
389 as
->freeset
|= (of
& RSET_RANGE(REGARG_FIRSTGPR
, REGARG_LASTGPR
+1));
393 /* Workaround to protect argument GPRs from being used for remat. */
394 as
->freeset
&= ~RSET_RANGE(REGARG_FIRSTGPR
, REGARG_LASTGPR
+1);
395 src
= ra_alloc1(as
, ref
, RSET_FPR
); /* May alloc GPR to remat FPR. */
396 as
->freeset
|= (of
& RSET_RANGE(REGARG_FIRSTGPR
, REGARG_LASTGPR
+1));
397 if (irt_isnum(ir
->t
)) gpr
= (gpr
+1) & ~1u;
398 if (gpr
<= REGARG_LASTGPR
) {
399 lj_assertA(rset_test(as
->freeset
, gpr
),
400 "reg %d not free", gpr
); /* Must have been evicted. */
401 if (irt_isnum(ir
->t
)) {
402 lj_assertA(rset_test(as
->freeset
, gpr
+1),
403 "reg %d not free", gpr
+1); /* Ditto. */
404 emit_dnm(as
, ARMI_VMOV_RR_D
, gpr
, gpr
+1, (src
& 15));
407 emit_dn(as
, ARMI_VMOV_R_S
, gpr
, (src
& 15));
412 if (irt_isnum(ir
->t
)) ofs
= (ofs
+ 4) & ~4;
413 emit_spstore(as
, ir
, src
, ofs
);
414 ofs
+= irt_isnum(ir
->t
) ? 8 : 4;
419 if (gpr
<= REGARG_LASTGPR
) {
420 lj_assertA(rset_test(as
->freeset
, gpr
),
421 "reg %d not free", gpr
); /* Must have been evicted. */
422 if (ref
) ra_leftov(as
, gpr
, ref
);
426 Reg r
= ra_alloc1(as
, ref
, RSET_GPR
);
427 emit_spstore(as
, ir
, r
, ofs
);
435 /* Setup result reg/sp for call. Evict scratch regs. */
436 static void asm_setupresult(ASMState
*as
, IRIns
*ir
, const CCallInfo
*ci
)
438 RegSet drop
= RSET_SCRATCH
;
439 int hiop
= ((ir
+1)->o
== IR_HIOP
&& !irt_isnil((ir
+1)->t
));
440 if (ra_hasreg(ir
->r
))
441 rset_clear(drop
, ir
->r
); /* Dest reg handled below. */
442 if (hiop
&& ra_hasreg((ir
+1)->r
))
443 rset_clear(drop
, (ir
+1)->r
); /* Dest reg handled below. */
444 ra_evictset(as
, drop
); /* Evictions must be performed first. */
446 lj_assertA(!irt_ispri(ir
->t
), "PRI dest");
447 if (!LJ_SOFTFP
&& irt_isfp(ir
->t
)) {
448 if (LJ_ABI_SOFTFP
|| (ci
->flags
& (CCI_CASTU64
|CCI_VARARG
))) {
449 Reg dest
= (ra_dest(as
, ir
, RSET_FPR
) & 15);
450 if (irt_isnum(ir
->t
))
451 emit_dnm(as
, ARMI_VMOV_D_RR
, RID_RETLO
, RID_RETHI
, dest
);
453 emit_dn(as
, ARMI_VMOV_S_R
, RID_RET
, dest
);
455 ra_destreg(as
, ir
, RID_FPRET
);
460 ra_destreg(as
, ir
, RID_RET
);
466 static void asm_callx(ASMState
*as
, IRIns
*ir
)
468 IRRef args
[CCI_NARGS_MAX
*2];
472 ci
.flags
= asm_callx_flags(as
, ir
);
473 asm_collectargs(as
, ir
, &ci
, args
);
474 asm_setupresult(as
, ir
, &ci
);
475 func
= ir
->op2
; irf
= IR(func
);
476 if (irf
->o
== IR_CARG
) { func
= irf
->op1
; irf
= IR(func
); }
477 if (irref_isk(func
)) { /* Call to constant address. */
478 ci
.func
= (ASMFunction
)(void *)(irf
->i
);
479 } else { /* Need a non-argument register for indirect calls. */
480 Reg freg
= ra_alloc1(as
, func
, RSET_RANGE(RID_R4
, RID_R12
+1));
481 emit_m(as
, ARMI_BLXr
, freg
);
482 ci
.func
= (ASMFunction
)(void *)0;
484 asm_gencall(as
, &ci
, args
);
487 /* -- Returns ------------------------------------------------------------- */
489 /* Return to lower frame. Guard that it goes to the right spot. */
490 static void asm_retf(ASMState
*as
, IRIns
*ir
)
492 Reg base
= ra_alloc1(as
, REF_BASE
, RSET_GPR
);
493 void *pc
= ir_kptr(IR(ir
->op2
));
494 int32_t delta
= 1+LJ_FR2
+bc_a(*((const BCIns
*)pc
- 1));
495 as
->topslot
-= (BCReg
)delta
;
496 if ((int32_t)as
->topslot
< 0) as
->topslot
= 0;
497 irt_setmark(IR(REF_BASE
)->t
); /* Children must not coalesce with BASE reg. */
498 /* Need to force a spill on REF_BASE now to update the stack slot. */
499 emit_lso(as
, ARMI_STR
, base
, RID_SP
, ra_spill(as
, IR(REF_BASE
)));
500 emit_setgl(as
, base
, jit_base
);
501 emit_addptr(as
, base
, -8*delta
);
502 asm_guardcc(as
, CC_NE
);
503 emit_nm(as
, ARMI_CMP
, RID_TMP
,
504 ra_allock(as
, i32ptr(pc
), rset_exclude(RSET_GPR
, base
)));
505 emit_lso(as
, ARMI_LDR
, RID_TMP
, base
, -4);
508 /* -- Buffer operations --------------------------------------------------- */
511 static void asm_bufhdr_write(ASMState
*as
, Reg sb
)
513 Reg tmp
= ra_scratch(as
, rset_exclude(RSET_GPR
, sb
));
515 int32_t addr
= i32ptr((void *)&J2G(as
->J
)->cur_L
);
516 irgc
.ot
= IRT(0, IRT_PGC
); /* GC type. */
517 emit_storeofs(as
, &irgc
, RID_TMP
, sb
, offsetof(SBuf
, L
));
518 if ((as
->flags
& JIT_F_ARMV6T2
)) {
519 emit_dnm(as
, ARMI_BFI
, RID_TMP
, lj_fls(SBUF_MASK_FLAG
), tmp
);
521 emit_dnm(as
, ARMI_ORR
, RID_TMP
, RID_TMP
, tmp
);
522 emit_dn(as
, ARMI_AND
|ARMI_K12
|SBUF_MASK_FLAG
, tmp
, tmp
);
524 emit_lso(as
, ARMI_LDR
, RID_TMP
,
525 ra_allock(as
, (addr
& ~4095),
526 rset_exclude(rset_exclude(RSET_GPR
, sb
), tmp
)),
528 emit_loadofs(as
, &irgc
, tmp
, sb
, offsetof(SBuf
, L
));
532 /* -- Type conversions ---------------------------------------------------- */
535 static void asm_tointg(ASMState
*as
, IRIns
*ir
, Reg left
)
537 Reg tmp
= ra_scratch(as
, rset_exclude(RSET_FPR
, left
));
538 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
539 asm_guardcc(as
, CC_NE
);
540 emit_d(as
, ARMI_VMRS
, 0);
541 emit_dm(as
, ARMI_VCMP_D
, (tmp
& 15), (left
& 15));
542 emit_dm(as
, ARMI_VCVT_F64_S32
, (tmp
& 15), (tmp
& 15));
543 emit_dn(as
, ARMI_VMOV_R_S
, dest
, (tmp
& 15));
544 emit_dm(as
, ARMI_VCVT_S32_F64
, (tmp
& 15), (left
& 15));
547 static void asm_tobit(ASMState
*as
, IRIns
*ir
)
549 RegSet allow
= RSET_FPR
;
550 Reg left
= ra_alloc1(as
, ir
->op1
, allow
);
551 Reg right
= ra_alloc1(as
, ir
->op2
, rset_clear(allow
, left
));
552 Reg tmp
= ra_scratch(as
, rset_clear(allow
, right
));
553 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
554 emit_dn(as
, ARMI_VMOV_R_S
, dest
, (tmp
& 15));
555 emit_dnm(as
, ARMI_VADD_D
, (tmp
& 15), (left
& 15), (right
& 15));
559 static void asm_conv(ASMState
*as
, IRIns
*ir
)
561 IRType st
= (IRType
)(ir
->op2
& IRCONV_SRCMASK
);
563 int stfp
= (st
== IRT_NUM
|| st
== IRT_FLOAT
);
565 IRRef lref
= ir
->op1
;
566 /* 64 bit integer conversions are handled by SPLIT. */
567 lj_assertA(!irt_isint64(ir
->t
) && !(st
== IRT_I64
|| st
== IRT_U64
),
568 "IR %04d has unsplit 64 bit type",
569 (int)(ir
- as
->ir
) - REF_BIAS
);
571 /* FP conversions are handled by SPLIT. */
572 lj_assertA(!irt_isfp(ir
->t
) && !(st
== IRT_NUM
|| st
== IRT_FLOAT
),
573 "IR %04d has FP type",
574 (int)(ir
- as
->ir
) - REF_BIAS
);
575 /* Can't check for same types: SPLIT uses CONV int.int + BXOR for sfp NEG. */
577 lj_assertA(irt_type(ir
->t
) != st
, "inconsistent types for CONV");
578 if (irt_isfp(ir
->t
)) {
579 Reg dest
= ra_dest(as
, ir
, RSET_FPR
);
580 if (stfp
) { /* FP to FP conversion. */
581 emit_dm(as
, st
== IRT_NUM
? ARMI_VCVT_F32_F64
: ARMI_VCVT_F64_F32
,
582 (dest
& 15), (ra_alloc1(as
, lref
, RSET_FPR
) & 15));
583 } else { /* Integer to FP conversion. */
584 Reg left
= ra_alloc1(as
, lref
, RSET_GPR
);
585 ARMIns ai
= irt_isfloat(ir
->t
) ?
586 (st
== IRT_INT
? ARMI_VCVT_F32_S32
: ARMI_VCVT_F32_U32
) :
587 (st
== IRT_INT
? ARMI_VCVT_F64_S32
: ARMI_VCVT_F64_U32
);
588 emit_dm(as
, ai
, (dest
& 15), (dest
& 15));
589 emit_dn(as
, ARMI_VMOV_S_R
, left
, (dest
& 15));
591 } else if (stfp
) { /* FP to integer conversion. */
592 if (irt_isguard(ir
->t
)) {
593 /* Checked conversions are only supported from number to int. */
594 lj_assertA(irt_isint(ir
->t
) && st
== IRT_NUM
,
595 "bad type for checked CONV");
596 asm_tointg(as
, ir
, ra_alloc1(as
, lref
, RSET_FPR
));
598 Reg left
= ra_alloc1(as
, lref
, RSET_FPR
);
599 Reg tmp
= ra_scratch(as
, rset_exclude(RSET_FPR
, left
));
600 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
602 emit_dn(as
, ARMI_VMOV_R_S
, dest
, (tmp
& 15));
603 ai
= irt_isint(ir
->t
) ?
604 (st
== IRT_NUM
? ARMI_VCVT_S32_F64
: ARMI_VCVT_S32_F32
) :
605 (st
== IRT_NUM
? ARMI_VCVT_U32_F64
: ARMI_VCVT_U32_F32
);
606 emit_dm(as
, ai
, (tmp
& 15), (left
& 15));
611 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
612 if (st
>= IRT_I8
&& st
<= IRT_U16
) { /* Extend to 32 bit integer. */
613 Reg left
= ra_alloc1(as
, lref
, RSET_GPR
);
614 lj_assertA(irt_isint(ir
->t
) || irt_isu32(ir
->t
), "bad type for CONV EXT");
615 if ((as
->flags
& JIT_F_ARMV6
)) {
616 ARMIns ai
= st
== IRT_I8
? ARMI_SXTB
:
617 st
== IRT_U8
? ARMI_UXTB
:
618 st
== IRT_I16
? ARMI_SXTH
: ARMI_UXTH
;
619 emit_dm(as
, ai
, dest
, left
);
620 } else if (st
== IRT_U8
) {
621 emit_dn(as
, ARMI_AND
|ARMI_K12
|255, dest
, left
);
623 uint32_t shift
= st
== IRT_I8
? 24 : 16;
624 ARMShift sh
= st
== IRT_U16
? ARMSH_LSR
: ARMSH_ASR
;
625 emit_dm(as
, ARMI_MOV
|ARMF_SH(sh
, shift
), dest
, RID_TMP
);
626 emit_dm(as
, ARMI_MOV
|ARMF_SH(ARMSH_LSL
, shift
), RID_TMP
, left
);
628 } else { /* Handle 32/32 bit no-op (cast). */
629 ra_leftov(as
, dest
, lref
); /* Do nothing, but may need to move regs. */
634 static void asm_strto(ASMState
*as
, IRIns
*ir
)
636 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_strscan_num
];
638 Reg rlo
= 0, rhi
= 0, tmp
;
639 int destused
= ra_used(ir
);
641 ra_evictset(as
, RSET_SCRATCH
);
644 if (ra_hasspill(ir
->s
) && ra_hasspill((ir
+1)->s
) &&
645 (ir
->s
& 1) == 0 && ir
->s
+ 1 == (ir
+1)->s
) {
647 for (i
= 0; i
< 2; i
++) {
652 emit_spload(as
, ir
+i
, r
, sps_scale((ir
+i
)->s
));
655 ofs
= sps_scale(ir
->s
);
658 rhi
= ra_dest(as
, ir
+1, RSET_GPR
);
659 rlo
= ra_dest(as
, ir
, rset_exclude(RSET_GPR
, rhi
));
662 asm_guardcc(as
, CC_EQ
);
664 emit_lso(as
, ARMI_LDR
, rhi
, RID_SP
, 4);
665 emit_lso(as
, ARMI_LDR
, rlo
, RID_SP
, 0);
670 if (ra_hasspill(ir
->s
)) {
671 ofs
= sps_scale(ir
->s
);
673 if (ra_hasreg(ir
->r
)) {
675 ra_modified(as
, ir
->r
);
676 emit_spload(as
, ir
, ir
->r
, ofs
);
679 rlo
= ra_dest(as
, ir
, RSET_FPR
);
682 asm_guardcc(as
, CC_EQ
);
684 emit_vlso(as
, ARMI_VLDR_D
, rlo
, RID_SP
, 0);
686 emit_n(as
, ARMI_CMP
|ARMI_K12
|0, RID_RET
); /* Test return status. */
687 args
[0] = ir
->op1
; /* GCstr *str */
688 args
[1] = ASMREF_TMP1
; /* TValue *n */
689 asm_gencall(as
, ci
, args
);
690 tmp
= ra_releasetmp(as
, ASMREF_TMP1
);
692 emit_dm(as
, ARMI_MOV
, tmp
, RID_SP
);
694 emit_opk(as
, ARMI_ADD
, tmp
, RID_SP
, ofs
, RSET_GPR
);
697 /* -- Memory references --------------------------------------------------- */
699 /* Get pointer to TValue. */
700 static void asm_tvptr(ASMState
*as
, Reg dest
, IRRef ref
, MSize mode
)
702 if ((mode
& IRTMPREF_IN1
)) {
704 if (irt_isnum(ir
->t
)) {
705 if ((mode
& IRTMPREF_OUT1
)) {
707 lj_assertA(irref_isk(ref
), "unsplit FP op");
708 emit_dm(as
, ARMI_MOV
, dest
, RID_SP
);
709 emit_lso(as
, ARMI_STR
,
710 ra_allock(as
, (int32_t)ir_knum(ir
)->u32
.lo
, RSET_GPR
),
712 emit_lso(as
, ARMI_STR
,
713 ra_allock(as
, (int32_t)ir_knum(ir
)->u32
.hi
, RSET_GPR
),
716 Reg src
= ra_alloc1(as
, ref
, RSET_FPR
);
717 emit_dm(as
, ARMI_MOV
, dest
, RID_SP
);
718 emit_vlso(as
, ARMI_VSTR_D
, src
, RID_SP
, 0);
720 } else if (irref_isk(ref
)) {
721 /* Use the number constant itself as a TValue. */
722 ra_allockreg(as
, i32ptr(ir_knum(ir
)), dest
);
725 lj_assertA(0, "unsplit FP op");
727 /* Otherwise force a spill and use the spill slot. */
728 emit_opk(as
, ARMI_ADD
, dest
, RID_SP
, ra_spill(as
, ir
), RSET_GPR
);
732 /* Otherwise use [sp] and [sp+4] to hold the TValue.
733 ** This assumes the following call has max. 4 args.
736 emit_dm(as
, ARMI_MOV
, dest
, RID_SP
);
737 if (!irt_ispri(ir
->t
)) {
738 Reg src
= ra_alloc1(as
, ref
, RSET_GPR
);
739 emit_lso(as
, ARMI_STR
, src
, RID_SP
, 0);
741 if (LJ_SOFTFP
&& (ir
+1)->o
== IR_HIOP
&& !irt_isnil((ir
+1)->t
))
742 type
= ra_alloc1(as
, ref
+1, RSET_GPR
);
744 type
= ra_allock(as
, irt_toitype(ir
->t
), RSET_GPR
);
745 emit_lso(as
, ARMI_STR
, type
, RID_SP
, 4);
748 emit_dm(as
, ARMI_MOV
, dest
, RID_SP
);
752 static void asm_aref(ASMState
*as
, IRIns
*ir
)
754 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
756 if (irref_isk(ir
->op2
)) {
757 IRRef tab
= IR(ir
->op1
)->op1
;
758 int32_t ofs
= asm_fuseabase(as
, tab
);
759 IRRef refa
= ofs
? tab
: ir
->op1
;
760 uint32_t k
= emit_isk12(ARMI_ADD
, ofs
+ 8*IR(ir
->op2
)->i
);
762 base
= ra_alloc1(as
, refa
, RSET_GPR
);
763 emit_dn(as
, ARMI_ADD
^k
, dest
, base
);
767 base
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
768 idx
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, base
));
769 emit_dnm(as
, ARMI_ADD
|ARMF_SH(ARMSH_LSL
, 3), dest
, base
, idx
);
772 /* Inlined hash lookup. Specialized for key type and for const keys.
773 ** The equivalent C code is:
774 ** Node *n = hashkey(t, key);
776 ** if (lj_obj_equal(&n->key, key)) return &n->val;
777 ** } while ((n = nextnode(n)));
780 static void asm_href(ASMState
*as
, IRIns
*ir
, IROp merge
)
782 RegSet allow
= RSET_GPR
;
783 int destused
= ra_used(ir
);
784 Reg dest
= ra_dest(as
, ir
, allow
);
785 Reg tab
= ra_alloc1(as
, ir
->op1
, rset_clear(allow
, dest
));
786 Reg key
= 0, keyhi
= 0, keynumhi
= RID_NONE
, tmp
= RID_TMP
;
787 IRRef refkey
= ir
->op2
;
788 IRIns
*irkey
= IR(refkey
);
789 IRType1 kt
= irkey
->t
;
790 int32_t k
= 0, khi
= emit_isk12(ARMI_CMP
, irt_toitype(kt
));
792 MCLabel l_end
, l_loop
;
793 rset_clear(allow
, tab
);
794 if (!irref_isk(refkey
) || irt_isstr(kt
)) {
796 key
= ra_alloc1(as
, refkey
, allow
);
797 rset_clear(allow
, key
);
798 if (irkey
[1].o
== IR_HIOP
) {
799 if (ra_hasreg((irkey
+1)->r
)) {
800 keynumhi
= (irkey
+1)->r
;
802 ra_noweak(as
, keynumhi
);
804 keyhi
= keynumhi
= ra_allocref(as
, refkey
+1, allow
);
806 rset_clear(allow
, keynumhi
);
811 key
= ra_scratch(as
, allow
);
812 rset_clear(allow
, key
);
813 keyhi
= keynumhi
= ra_scratch(as
, allow
);
814 rset_clear(allow
, keyhi
);
817 key
= ra_alloc1(as
, refkey
, allow
);
818 rset_clear(allow
, key
);
821 } else if (irt_isnum(kt
)) {
822 int32_t val
= (int32_t)ir_knum(irkey
)->u32
.lo
;
823 k
= emit_isk12(ARMI_CMP
, val
);
825 key
= ra_allock(as
, val
, allow
);
826 rset_clear(allow
, key
);
828 val
= (int32_t)ir_knum(irkey
)->u32
.hi
;
829 khi
= emit_isk12(ARMI_CMP
, val
);
831 keyhi
= ra_allock(as
, val
, allow
);
832 rset_clear(allow
, keyhi
);
834 } else if (!irt_ispri(kt
)) {
835 k
= emit_isk12(ARMI_CMP
, irkey
->i
);
837 key
= ra_alloc1(as
, refkey
, allow
);
838 rset_clear(allow
, key
);
842 tmp
= ra_scratchpair(as
, allow
);
844 /* Key not found in chain: jump to exit (if merged) or load niltv. */
845 l_end
= emit_label(as
);
848 asm_guardcc(as
, CC_AL
);
850 emit_loada(as
, dest
, niltvg(J2G(as
->J
)));
852 /* Follow hash chain until the end. */
854 emit_n(as
, ARMI_CMP
|ARMI_K12
|0, dest
);
855 emit_lso(as
, ARMI_LDR
, dest
, dest
, (int32_t)offsetof(Node
, next
));
857 /* Type and value comparison. */
859 asm_guardcc(as
, CC_EQ
);
861 emit_branch(as
, ARMF_CC(ARMI_B
, CC_EQ
), l_end
);
862 if (!irt_ispri(kt
)) {
863 emit_nm(as
, ARMF_CC(ARMI_CMP
, CC_EQ
)^k
, tmp
, key
);
864 emit_nm(as
, ARMI_CMP
^khi
, tmp
+1, keyhi
);
865 emit_lsox(as
, ARMI_LDRD
, tmp
, dest
, (int32_t)offsetof(Node
, key
));
867 emit_n(as
, ARMI_CMP
^khi
, tmp
);
868 emit_lso(as
, ARMI_LDR
, tmp
, dest
, (int32_t)offsetof(Node
, key
.it
));
870 *l_loop
= ARMF_CC(ARMI_B
, CC_NE
) | ((as
->mcp
-l_loop
-2) & 0x00ffffffu
);
872 /* Load main position relative to tab->node into dest. */
873 khash
= irref_isk(refkey
) ? ir_khash(as
, irkey
) : 1;
875 emit_lso(as
, ARMI_LDR
, dest
, tab
, (int32_t)offsetof(GCtab
, node
));
877 emit_dnm(as
, ARMI_ADD
|ARMF_SH(ARMSH_LSL
, 3), dest
, dest
, tmp
);
878 emit_dnm(as
, ARMI_ADD
|ARMF_SH(ARMSH_LSL
, 1), tmp
, tmp
, tmp
);
879 if (irt_isstr(kt
)) { /* Fetch of str->sid is cheaper than ra_allock. */
880 emit_dnm(as
, ARMI_AND
, tmp
, tmp
+1, RID_TMP
);
881 emit_lso(as
, ARMI_LDR
, dest
, tab
, (int32_t)offsetof(GCtab
, node
));
882 emit_lso(as
, ARMI_LDR
, tmp
+1, key
, (int32_t)offsetof(GCstr
, sid
));
883 emit_lso(as
, ARMI_LDR
, RID_TMP
, tab
, (int32_t)offsetof(GCtab
, hmask
));
884 } else if (irref_isk(refkey
)) {
885 emit_opk(as
, ARMI_AND
, tmp
, RID_TMP
, (int32_t)khash
,
886 rset_exclude(rset_exclude(RSET_GPR
, tab
), dest
));
887 emit_lso(as
, ARMI_LDR
, dest
, tab
, (int32_t)offsetof(GCtab
, node
));
888 emit_lso(as
, ARMI_LDR
, RID_TMP
, tab
, (int32_t)offsetof(GCtab
, hmask
));
889 } else { /* Must match with hash*() in lj_tab.c. */
890 if (ra_hasreg(keynumhi
)) { /* Canonicalize +-0.0 to 0.0. */
891 if (keyhi
== RID_TMP
)
892 emit_dm(as
, ARMF_CC(ARMI_MOV
, CC_NE
), keyhi
, keynumhi
);
893 emit_d(as
, ARMF_CC(ARMI_MOV
, CC_EQ
)|ARMI_K12
|0, keyhi
);
895 emit_dnm(as
, ARMI_AND
, tmp
, tmp
, RID_TMP
);
896 emit_dnm(as
, ARMI_SUB
|ARMF_SH(ARMSH_ROR
, 32-HASH_ROT3
), tmp
, tmp
, tmp
+1);
897 emit_lso(as
, ARMI_LDR
, dest
, tab
, (int32_t)offsetof(GCtab
, node
));
898 emit_dnm(as
, ARMI_EOR
|ARMF_SH(ARMSH_ROR
, 32-((HASH_ROT2
+HASH_ROT1
)&31)),
900 emit_lso(as
, ARMI_LDR
, RID_TMP
, tab
, (int32_t)offsetof(GCtab
, hmask
));
901 emit_dnm(as
, ARMI_SUB
|ARMF_SH(ARMSH_ROR
, 32-HASH_ROT1
), tmp
+1, tmp
+1, tmp
);
902 if (ra_hasreg(keynumhi
)) {
903 emit_dnm(as
, ARMI_EOR
, tmp
+1, tmp
, key
);
904 emit_dnm(as
, ARMI_ORR
|ARMI_S
, RID_TMP
, tmp
, key
); /* Test for +-0.0. */
905 emit_dnm(as
, ARMI_ADD
, tmp
, keynumhi
, keynumhi
);
907 emit_dnm(as
, ARMI_VMOV_RR_D
, key
, keynumhi
,
908 (ra_alloc1(as
, refkey
, RSET_FPR
) & 15));
911 emit_dnm(as
, ARMI_EOR
, tmp
+1, tmp
, key
);
912 emit_opk(as
, ARMI_ADD
, tmp
, key
, (int32_t)HASH_BIAS
,
913 rset_exclude(rset_exclude(RSET_GPR
, tab
), key
));
919 static void asm_hrefk(ASMState
*as
, IRIns
*ir
)
921 IRIns
*kslot
= IR(ir
->op2
);
922 IRIns
*irkey
= IR(kslot
->op1
);
923 int32_t ofs
= (int32_t)(kslot
->op2
* sizeof(Node
));
924 int32_t kofs
= ofs
+ (int32_t)offsetof(Node
, key
);
925 Reg dest
= (ra_used(ir
) || ofs
> 4095) ? ra_dest(as
, ir
, RSET_GPR
) : RID_NONE
;
926 Reg node
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
927 Reg key
= RID_NONE
, type
= RID_TMP
, idx
= node
;
928 RegSet allow
= rset_exclude(RSET_GPR
, node
);
929 lj_assertA(ofs
% sizeof(Node
) == 0, "unaligned HREFK slot");
932 rset_clear(allow
, dest
);
933 kofs
= (int32_t)offsetof(Node
, key
);
934 } else if (ra_hasreg(dest
)) {
935 emit_opk(as
, ARMI_ADD
, dest
, node
, ofs
, allow
);
937 asm_guardcc(as
, CC_NE
);
938 if (!irt_ispri(irkey
->t
)) {
939 RegSet even
= (as
->freeset
& allow
);
940 even
= even
& (even
>> 1) & RSET_GPREVEN
;
942 key
= ra_scratch(as
, even
);
943 if (rset_test(as
->freeset
, key
+1)) {
945 ra_modified(as
, type
);
948 key
= ra_scratch(as
, allow
);
950 rset_clear(allow
, key
);
952 rset_clear(allow
, type
);
953 if (irt_isnum(irkey
->t
)) {
954 emit_opk(as
, ARMF_CC(ARMI_CMP
, CC_EQ
), 0, type
,
955 (int32_t)ir_knum(irkey
)->u32
.hi
, allow
);
956 emit_opk(as
, ARMI_CMP
, 0, key
,
957 (int32_t)ir_knum(irkey
)->u32
.lo
, allow
);
960 emit_opk(as
, ARMF_CC(ARMI_CMP
, CC_EQ
), 0, key
, irkey
->i
, allow
);
961 emit_n(as
, ARMI_CMN
|ARMI_K12
|-irt_toitype(irkey
->t
), type
);
963 emit_lso(as
, ARMI_LDR
, type
, idx
, kofs
+4);
964 if (ra_hasreg(key
)) emit_lso(as
, ARMI_LDR
, key
, idx
, kofs
);
966 emit_opk(as
, ARMI_ADD
, dest
, node
, ofs
, RSET_GPR
);
969 static void asm_uref(ASMState
*as
, IRIns
*ir
)
971 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
972 int guarded
= (irt_t(ir
->t
) & (IRT_GUARD
|IRT_TYPE
)) == (IRT_GUARD
|IRT_PGC
);
973 if (irref_isk(ir
->op1
) && !guarded
) {
974 GCfunc
*fn
= ir_kfunc(IR(ir
->op1
));
975 MRef
*v
= &gcref(fn
->l
.uvptr
[(ir
->op2
>> 8)])->uv
.v
;
976 emit_lsptr(as
, ARMI_LDR
, dest
, v
);
979 asm_guardcc(as
, ir
->o
== IR_UREFC
? CC_NE
: CC_EQ
);
980 emit_n(as
, ARMI_CMP
|ARMI_K12
|1, RID_TMP
);
982 if (ir
->o
== IR_UREFC
)
983 emit_opk(as
, ARMI_ADD
, dest
, dest
,
984 (int32_t)offsetof(GCupval
, tv
), RSET_GPR
);
986 emit_lso(as
, ARMI_LDR
, dest
, dest
, (int32_t)offsetof(GCupval
, v
));
988 emit_lso(as
, ARMI_LDRB
, RID_TMP
, dest
,
989 (int32_t)offsetof(GCupval
, closed
));
990 if (irref_isk(ir
->op1
)) {
991 GCfunc
*fn
= ir_kfunc(IR(ir
->op1
));
992 int32_t k
= (int32_t)gcrefu(fn
->l
.uvptr
[(ir
->op2
>> 8)]);
993 emit_loadi(as
, dest
, k
);
995 emit_lso(as
, ARMI_LDR
, dest
, ra_alloc1(as
, ir
->op1
, RSET_GPR
),
996 (int32_t)offsetof(GCfuncL
, uvptr
) + 4*(int32_t)(ir
->op2
>> 8));
1001 static void asm_fref(ASMState
*as
, IRIns
*ir
)
1003 UNUSED(as
); UNUSED(ir
);
1004 lj_assertA(!ra_used(ir
), "unfused FREF");
1007 static void asm_strref(ASMState
*as
, IRIns
*ir
)
1009 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1010 IRRef ref
= ir
->op2
, refk
= ir
->op1
;
1012 if (irref_isk(ref
)) {
1013 IRRef tmp
= refk
; refk
= ref
; ref
= tmp
;
1014 } else if (!irref_isk(refk
)) {
1015 uint32_t k
, m
= ARMI_K12
|sizeof(GCstr
);
1016 Reg right
, left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1017 IRIns
*irr
= IR(ir
->op2
);
1018 if (ra_hasreg(irr
->r
)) {
1019 ra_noweak(as
, irr
->r
);
1021 } else if (mayfuse(as
, irr
->op2
) &&
1022 irr
->o
== IR_ADD
&& irref_isk(irr
->op2
) &&
1023 (k
= emit_isk12(ARMI_ADD
,
1024 (int32_t)sizeof(GCstr
) + IR(irr
->op2
)->i
))) {
1026 right
= ra_alloc1(as
, irr
->op1
, rset_exclude(RSET_GPR
, left
));
1028 right
= ra_allocref(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
1030 emit_dn(as
, ARMI_ADD
^m
, dest
, dest
);
1031 emit_dnm(as
, ARMI_ADD
, dest
, left
, right
);
1034 r
= ra_alloc1(as
, ref
, RSET_GPR
);
1035 emit_opk(as
, ARMI_ADD
, dest
, r
,
1036 sizeof(GCstr
) + IR(refk
)->i
, rset_exclude(RSET_GPR
, r
));
1039 /* -- Loads and stores ---------------------------------------------------- */
1041 static ARMIns
asm_fxloadins(ASMState
*as
, IRIns
*ir
)
1044 switch (irt_type(ir
->t
)) {
1045 case IRT_I8
: return ARMI_LDRSB
;
1046 case IRT_U8
: return ARMI_LDRB
;
1047 case IRT_I16
: return ARMI_LDRSH
;
1048 case IRT_U16
: return ARMI_LDRH
;
1049 case IRT_NUM
: lj_assertA(!LJ_SOFTFP
, "unsplit FP op"); return ARMI_VLDR_D
;
1050 case IRT_FLOAT
: if (!LJ_SOFTFP
) return ARMI_VLDR_S
; /* fallthrough */
1051 default: return ARMI_LDR
;
1055 static ARMIns
asm_fxstoreins(ASMState
*as
, IRIns
*ir
)
1058 switch (irt_type(ir
->t
)) {
1059 case IRT_I8
: case IRT_U8
: return ARMI_STRB
;
1060 case IRT_I16
: case IRT_U16
: return ARMI_STRH
;
1061 case IRT_NUM
: lj_assertA(!LJ_SOFTFP
, "unsplit FP op"); return ARMI_VSTR_D
;
1062 case IRT_FLOAT
: if (!LJ_SOFTFP
) return ARMI_VSTR_S
; /* fallthrough */
1063 default: return ARMI_STR
;
1067 static void asm_fload(ASMState
*as
, IRIns
*ir
)
1069 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1070 ARMIns ai
= asm_fxloadins(as
, ir
);
1073 if (ir
->op1
== REF_NIL
) { /* FLOAD from GG_State with offset. */
1074 idx
= ra_allock(as
, (int32_t)(ir
->op2
<<2) + (int32_t)J2GG(as
->J
), RSET_GPR
);
1077 idx
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1078 if (ir
->op2
== IRFL_TAB_ARRAY
) {
1079 ofs
= asm_fuseabase(as
, ir
->op1
);
1080 if (ofs
) { /* Turn the t->array load into an add for colocated arrays. */
1081 emit_dn(as
, ARMI_ADD
|ARMI_K12
|ofs
, dest
, idx
);
1085 ofs
= field_ofs
[ir
->op2
];
1087 if ((ai
& 0x04000000))
1088 emit_lso(as
, ai
, dest
, idx
, ofs
);
1090 emit_lsox(as
, ai
, dest
, idx
, ofs
);
1093 static void asm_fstore(ASMState
*as
, IRIns
*ir
)
1095 if (ir
->r
!= RID_SINK
) {
1096 Reg src
= ra_alloc1(as
, ir
->op2
, RSET_GPR
);
1097 IRIns
*irf
= IR(ir
->op1
);
1098 Reg idx
= ra_alloc1(as
, irf
->op1
, rset_exclude(RSET_GPR
, src
));
1099 int32_t ofs
= field_ofs
[irf
->op2
];
1100 ARMIns ai
= asm_fxstoreins(as
, ir
);
1101 if ((ai
& 0x04000000))
1102 emit_lso(as
, ai
, src
, idx
, ofs
);
1104 emit_lsox(as
, ai
, src
, idx
, ofs
);
1108 static void asm_xload(ASMState
*as
, IRIns
*ir
)
1110 Reg dest
= ra_dest(as
, ir
,
1111 (!LJ_SOFTFP
&& irt_isfp(ir
->t
)) ? RSET_FPR
: RSET_GPR
);
1112 lj_assertA(!(ir
->op2
& IRXLOAD_UNALIGNED
), "unaligned XLOAD");
1113 asm_fusexref(as
, asm_fxloadins(as
, ir
), dest
, ir
->op1
, RSET_GPR
, 0);
1116 static void asm_xstore_(ASMState
*as
, IRIns
*ir
, int32_t ofs
)
1118 if (ir
->r
!= RID_SINK
) {
1119 Reg src
= ra_alloc1(as
, ir
->op2
,
1120 (!LJ_SOFTFP
&& irt_isfp(ir
->t
)) ? RSET_FPR
: RSET_GPR
);
1121 asm_fusexref(as
, asm_fxstoreins(as
, ir
), src
, ir
->op1
,
1122 rset_exclude(RSET_GPR
, src
), ofs
);
1126 #define asm_xstore(as, ir) asm_xstore_(as, ir, 0)
1128 static void asm_ahuvload(ASMState
*as
, IRIns
*ir
)
1130 int hiop
= (LJ_SOFTFP
&& (ir
+1)->o
== IR_HIOP
);
1131 IRType t
= hiop
? IRT_NUM
: irt_type(ir
->t
);
1132 Reg dest
= RID_NONE
, type
= RID_NONE
, idx
;
1133 RegSet allow
= RSET_GPR
;
1135 if (hiop
&& ra_used(ir
+1)) {
1136 type
= ra_dest(as
, ir
+1, allow
);
1137 rset_clear(allow
, type
);
1140 lj_assertA((LJ_SOFTFP
? 0 : irt_isnum(ir
->t
)) ||
1141 irt_isint(ir
->t
) || irt_isaddr(ir
->t
),
1142 "bad load type %d", irt_type(ir
->t
));
1143 dest
= ra_dest(as
, ir
, (!LJ_SOFTFP
&& t
== IRT_NUM
) ? RSET_FPR
: allow
);
1144 rset_clear(allow
, dest
);
1146 idx
= asm_fuseahuref(as
, ir
->op1
, &ofs
, allow
,
1147 (!LJ_SOFTFP
&& t
== IRT_NUM
) ? 1024 : 4096);
1148 if (ir
->o
== IR_VLOAD
) ofs
+= 8 * ir
->op2
;
1149 if (!hiop
|| type
== RID_NONE
) {
1150 rset_clear(allow
, idx
);
1151 if (ofs
< 256 && ra_hasreg(dest
) && (dest
& 1) == 0 &&
1152 rset_test((as
->freeset
& allow
), dest
+1)) {
1154 ra_modified(as
, type
);
1159 asm_guardcc(as
, t
== IRT_NUM
? CC_HS
: CC_NE
);
1160 emit_n(as
, ARMI_CMN
|ARMI_K12
|-irt_toitype_(t
), type
);
1161 if (ra_hasreg(dest
)) {
1164 emit_vlso(as
, ARMI_VLDR_D
, dest
, idx
, ofs
);
1167 emit_lso(as
, ARMI_LDR
, dest
, idx
, ofs
);
1169 emit_lso(as
, ARMI_LDR
, type
, idx
, ofs
+4);
1172 static void asm_ahustore(ASMState
*as
, IRIns
*ir
)
1174 if (ir
->r
!= RID_SINK
) {
1175 RegSet allow
= RSET_GPR
;
1176 Reg idx
, src
= RID_NONE
, type
= RID_NONE
;
1179 if (irt_isnum(ir
->t
)) {
1180 src
= ra_alloc1(as
, ir
->op2
, RSET_FPR
);
1181 idx
= asm_fuseahuref(as
, ir
->op1
, &ofs
, allow
, 1024);
1182 emit_vlso(as
, ARMI_VSTR_D
, src
, idx
, ofs
);
1186 int hiop
= (LJ_SOFTFP
&& (ir
+1)->o
== IR_HIOP
);
1187 if (!irt_ispri(ir
->t
)) {
1188 src
= ra_alloc1(as
, ir
->op2
, allow
);
1189 rset_clear(allow
, src
);
1192 type
= ra_alloc1(as
, (ir
+1)->op2
, allow
);
1194 type
= ra_allock(as
, (int32_t)irt_toitype(ir
->t
), allow
);
1195 idx
= asm_fuseahuref(as
, ir
->op1
, &ofs
, rset_exclude(allow
, type
), 4096);
1196 if (ra_hasreg(src
)) emit_lso(as
, ARMI_STR
, src
, idx
, ofs
);
1197 emit_lso(as
, ARMI_STR
, type
, idx
, ofs
+4);
1202 static void asm_sload(ASMState
*as
, IRIns
*ir
)
1204 int32_t ofs
= 8*((int32_t)ir
->op1
-1) + ((ir
->op2
& IRSLOAD_FRAME
) ? 4 : 0);
1205 int hiop
= (LJ_SOFTFP
&& (ir
+1)->o
== IR_HIOP
);
1206 IRType t
= hiop
? IRT_NUM
: irt_type(ir
->t
);
1207 Reg dest
= RID_NONE
, type
= RID_NONE
, base
;
1208 RegSet allow
= RSET_GPR
;
1209 lj_assertA(!(ir
->op2
& IRSLOAD_PARENT
),
1210 "bad parent SLOAD"); /* Handled by asm_head_side(). */
1211 lj_assertA(irt_isguard(ir
->t
) || !(ir
->op2
& IRSLOAD_TYPECHECK
),
1212 "inconsistent SLOAD variant");
1214 lj_assertA(!(ir
->op2
& IRSLOAD_CONVERT
),
1215 "unsplit SLOAD convert"); /* Handled by LJ_SOFTFP SPLIT. */
1216 if (hiop
&& ra_used(ir
+1)) {
1217 type
= ra_dest(as
, ir
+1, allow
);
1218 rset_clear(allow
, type
);
1221 if ((ir
->op2
& IRSLOAD_CONVERT
) && irt_isguard(ir
->t
) && t
== IRT_INT
) {
1222 dest
= ra_scratch(as
, RSET_FPR
);
1223 asm_tointg(as
, ir
, dest
);
1224 t
= IRT_NUM
; /* Continue with a regular number type check. */
1229 if ((ir
->op2
& IRSLOAD_CONVERT
))
1230 tmp
= ra_scratch(as
, t
== IRT_INT
? RSET_FPR
: RSET_GPR
);
1231 lj_assertA((LJ_SOFTFP
? 0 : irt_isnum(ir
->t
)) ||
1232 irt_isint(ir
->t
) || irt_isaddr(ir
->t
),
1233 "bad SLOAD type %d", irt_type(ir
->t
));
1234 dest
= ra_dest(as
, ir
, (!LJ_SOFTFP
&& t
== IRT_NUM
) ? RSET_FPR
: allow
);
1235 rset_clear(allow
, dest
);
1236 base
= ra_alloc1(as
, REF_BASE
, allow
);
1237 if ((ir
->op2
& IRSLOAD_CONVERT
)) {
1239 emit_dn(as
, ARMI_VMOV_R_S
, dest
, (tmp
& 15));
1240 emit_dm(as
, ARMI_VCVT_S32_F64
, (tmp
& 15), (tmp
& 15));
1241 t
= IRT_NUM
; /* Check for original type. */
1243 emit_dm(as
, ARMI_VCVT_F64_S32
, (dest
& 15), (dest
& 15));
1244 emit_dn(as
, ARMI_VMOV_S_R
, tmp
, (dest
& 15));
1245 t
= IRT_INT
; /* Check for original type. */
1251 base
= ra_alloc1(as
, REF_BASE
, allow
);
1253 rset_clear(allow
, base
);
1254 if ((ir
->op2
& IRSLOAD_TYPECHECK
)) {
1255 if (ra_noreg(type
)) {
1256 if (ofs
< 256 && ra_hasreg(dest
) && (dest
& 1) == 0 &&
1257 rset_test((as
->freeset
& allow
), dest
+1)) {
1259 ra_modified(as
, type
);
1264 asm_guardcc(as
, t
== IRT_NUM
? CC_HS
: CC_NE
);
1265 if ((ir
->op2
& IRSLOAD_KEYINDEX
)) {
1266 emit_n(as
, ARMI_CMN
|ARMI_K12
|1, type
);
1267 emit_dn(as
, ARMI_EOR
^emit_isk12(ARMI_EOR
, ~LJ_KEYINDEX
), type
, type
);
1269 emit_n(as
, ARMI_CMN
|ARMI_K12
|-irt_toitype_(t
), type
);
1272 if (ra_hasreg(dest
)) {
1276 emit_vlso(as
, ARMI_VLDR_D
, dest
, base
, ofs
);
1278 if (ra_hasreg(type
)) emit_lso(as
, ARMI_LDR
, type
, base
, ofs
+4);
1279 emit_vlso(as
, ARMI_VLDR_D
, dest
, RID_TMP
, 0);
1280 emit_opk(as
, ARMI_ADD
, RID_TMP
, base
, ofs
, allow
);
1285 emit_lso(as
, ARMI_LDR
, dest
, base
, ofs
);
1287 if (ra_hasreg(type
)) emit_lso(as
, ARMI_LDR
, type
, base
, ofs
+4);
1290 /* -- Allocations --------------------------------------------------------- */
1293 static void asm_cnew(ASMState
*as
, IRIns
*ir
)
1295 CTState
*cts
= ctype_ctsG(J2G(as
->J
));
1296 CTypeID id
= (CTypeID
)IR(ir
->op1
)->i
;
1298 CTInfo info
= lj_ctype_info(cts
, id
, &sz
);
1299 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_mem_newgco
];
1301 RegSet allow
= (RSET_GPR
& ~RSET_SCRATCH
);
1302 RegSet drop
= RSET_SCRATCH
;
1303 lj_assertA(sz
!= CTSIZE_INVALID
|| (ir
->o
== IR_CNEW
&& ir
->op2
!= REF_NIL
),
1304 "bad CNEW/CNEWI operands");
1307 if (ra_hasreg(ir
->r
))
1308 rset_clear(drop
, ir
->r
); /* Dest reg handled below. */
1309 ra_evictset(as
, drop
);
1311 ra_destreg(as
, ir
, RID_RET
); /* GCcdata * */
1313 /* Initialize immutable cdata object. */
1314 if (ir
->o
== IR_CNEWI
) {
1315 int32_t ofs
= sizeof(GCcdata
);
1316 lj_assertA(sz
== 4 || sz
== 8, "bad CNEWI size %d", sz
);
1319 lj_assertA(ir
->o
== IR_HIOP
, "expected HIOP for CNEWI");
1322 Reg r
= ra_alloc1(as
, ir
->op2
, allow
);
1323 emit_lso(as
, ARMI_STR
, r
, RID_RET
, ofs
);
1324 rset_clear(allow
, r
);
1325 if (ofs
== sizeof(GCcdata
)) break;
1328 } else if (ir
->op2
!= REF_NIL
) { /* Create VLA/VLS/aligned cdata. */
1329 ci
= &lj_ir_callinfo
[IRCALL_lj_cdata_newv
];
1330 args
[0] = ASMREF_L
; /* lua_State *L */
1331 args
[1] = ir
->op1
; /* CTypeID id */
1332 args
[2] = ir
->op2
; /* CTSize sz */
1333 args
[3] = ASMREF_TMP1
; /* CTSize align */
1334 asm_gencall(as
, ci
, args
);
1335 emit_loadi(as
, ra_releasetmp(as
, ASMREF_TMP1
), (int32_t)ctype_align(info
));
1339 /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */
1341 uint32_t k
= emit_isk12(ARMI_MOV
, id
);
1342 Reg r
= k
? RID_R1
: ra_allock(as
, id
, allow
);
1343 emit_lso(as
, ARMI_STRB
, RID_TMP
, RID_RET
, offsetof(GCcdata
, gct
));
1344 emit_lsox(as
, ARMI_STRH
, r
, RID_RET
, offsetof(GCcdata
, ctypeid
));
1345 emit_d(as
, ARMI_MOV
|ARMI_K12
|~LJ_TCDATA
, RID_TMP
);
1346 if (k
) emit_d(as
, ARMI_MOV
^k
, RID_R1
);
1348 args
[0] = ASMREF_L
; /* lua_State *L */
1349 args
[1] = ASMREF_TMP1
; /* MSize size */
1350 asm_gencall(as
, ci
, args
);
1351 ra_allockreg(as
, (int32_t)(sz
+sizeof(GCcdata
)),
1352 ra_releasetmp(as
, ASMREF_TMP1
));
1356 /* -- Write barriers ------------------------------------------------------ */
1358 static void asm_tbar(ASMState
*as
, IRIns
*ir
)
1360 Reg tab
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1361 Reg link
= ra_scratch(as
, rset_exclude(RSET_GPR
, tab
));
1362 Reg gr
= ra_allock(as
, i32ptr(J2G(as
->J
)),
1363 rset_exclude(rset_exclude(RSET_GPR
, tab
), link
));
1365 MCLabel l_end
= emit_label(as
);
1366 emit_lso(as
, ARMI_STR
, link
, tab
, (int32_t)offsetof(GCtab
, gclist
));
1367 emit_lso(as
, ARMI_STRB
, mark
, tab
, (int32_t)offsetof(GCtab
, marked
));
1368 emit_lso(as
, ARMI_STR
, tab
, gr
,
1369 (int32_t)offsetof(global_State
, gc
.grayagain
));
1370 emit_dn(as
, ARMI_BIC
|ARMI_K12
|LJ_GC_BLACK
, mark
, mark
);
1371 emit_lso(as
, ARMI_LDR
, link
, gr
,
1372 (int32_t)offsetof(global_State
, gc
.grayagain
));
1373 emit_branch(as
, ARMF_CC(ARMI_B
, CC_EQ
), l_end
);
1374 emit_n(as
, ARMI_TST
|ARMI_K12
|LJ_GC_BLACK
, mark
);
1375 emit_lso(as
, ARMI_LDRB
, mark
, tab
, (int32_t)offsetof(GCtab
, marked
));
1378 static void asm_obar(ASMState
*as
, IRIns
*ir
)
1380 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_gc_barrieruv
];
1384 /* No need for other object barriers (yet). */
1385 lj_assertA(IR(ir
->op1
)->o
== IR_UREFC
, "bad OBAR type");
1386 ra_evictset(as
, RSET_SCRATCH
);
1387 l_end
= emit_label(as
);
1388 args
[0] = ASMREF_TMP1
; /* global_State *g */
1389 args
[1] = ir
->op1
; /* TValue *tv */
1390 asm_gencall(as
, ci
, args
);
1391 if ((l_end
[-1] >> 28) == CC_AL
)
1392 l_end
[-1] = ARMF_CC(l_end
[-1], CC_NE
);
1394 emit_branch(as
, ARMF_CC(ARMI_B
, CC_EQ
), l_end
);
1395 ra_allockreg(as
, i32ptr(J2G(as
->J
)), ra_releasetmp(as
, ASMREF_TMP1
));
1396 obj
= IR(ir
->op1
)->r
;
1397 tmp
= ra_scratch(as
, rset_exclude(RSET_GPR
, obj
));
1398 emit_n(as
, ARMF_CC(ARMI_TST
, CC_NE
)|ARMI_K12
|LJ_GC_BLACK
, tmp
);
1399 emit_n(as
, ARMI_TST
|ARMI_K12
|LJ_GC_WHITES
, RID_TMP
);
1400 val
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, obj
));
1401 emit_lso(as
, ARMI_LDRB
, tmp
, obj
,
1402 (int32_t)offsetof(GCupval
, marked
)-(int32_t)offsetof(GCupval
, tv
));
1403 emit_lso(as
, ARMI_LDRB
, RID_TMP
, val
, (int32_t)offsetof(GChead
, marked
));
1406 /* -- Arithmetic and logic operations ------------------------------------- */
1409 static void asm_fparith(ASMState
*as
, IRIns
*ir
, ARMIns ai
)
1411 Reg dest
= ra_dest(as
, ir
, RSET_FPR
);
1412 Reg right
, left
= ra_alloc2(as
, ir
, RSET_FPR
);
1413 right
= (left
>> 8); left
&= 255;
1414 emit_dnm(as
, ai
, (dest
& 15), (left
& 15), (right
& 15));
1417 static void asm_fpunary(ASMState
*as
, IRIns
*ir
, ARMIns ai
)
1419 Reg dest
= ra_dest(as
, ir
, RSET_FPR
);
1420 Reg left
= ra_hintalloc(as
, ir
->op1
, dest
, RSET_FPR
);
1421 emit_dm(as
, ai
, (dest
& 15), (left
& 15));
1424 static void asm_callround(ASMState
*as
, IRIns
*ir
, int id
)
1426 /* The modified regs must match with the *.dasc implementation. */
1427 RegSet drop
= RID2RSET(RID_R0
)|RID2RSET(RID_R1
)|RID2RSET(RID_R2
)|
1428 RID2RSET(RID_R3
)|RID2RSET(RID_R12
);
1431 ra_evictset(as
, drop
);
1432 dest
= ra_dest(as
, ir
, RSET_FPR
);
1433 emit_dnm(as
, ARMI_VMOV_D_RR
, RID_RETLO
, RID_RETHI
, (dest
& 15));
1434 emit_call(as
, id
== IRFPM_FLOOR
? (void *)lj_vm_floor_sf
:
1435 id
== IRFPM_CEIL
? (void *)lj_vm_ceil_sf
:
1436 (void *)lj_vm_trunc_sf
);
1437 /* Workaround to protect argument GPRs from being used for remat. */
1439 as
->freeset
&= ~RSET_RANGE(RID_R0
, RID_R1
+1);
1440 as
->cost
[RID_R0
] = as
->cost
[RID_R1
] = REGCOST(~0u, ASMREF_L
);
1441 src
= ra_alloc1(as
, ir
->op1
, RSET_FPR
); /* May alloc GPR to remat FPR. */
1442 as
->freeset
|= (of
& RSET_RANGE(RID_R0
, RID_R1
+1));
1443 emit_dnm(as
, ARMI_VMOV_RR_D
, RID_R0
, RID_R1
, (src
& 15));
1446 static void asm_fpmath(ASMState
*as
, IRIns
*ir
)
1448 if (ir
->op2
<= IRFPM_TRUNC
)
1449 asm_callround(as
, ir
, ir
->op2
);
1450 else if (ir
->op2
== IRFPM_SQRT
)
1451 asm_fpunary(as
, ir
, ARMI_VSQRT_D
);
1453 asm_callid(as
, ir
, IRCALL_lj_vm_floor
+ ir
->op2
);
1457 static int asm_swapops(ASMState
*as
, IRRef lref
, IRRef rref
)
1460 if (irref_isk(rref
))
1461 return 0; /* Don't swap constants to the left. */
1462 if (irref_isk(lref
))
1463 return 1; /* But swap constants to the right. */
1465 if ((ir
->o
>= IR_BSHL
&& ir
->o
<= IR_BROR
) ||
1466 (ir
->o
== IR_ADD
&& ir
->op1
== ir
->op2
))
1467 return 0; /* Don't swap fusable operands to the left. */
1469 if ((ir
->o
>= IR_BSHL
&& ir
->o
<= IR_BROR
) ||
1470 (ir
->o
== IR_ADD
&& ir
->op1
== ir
->op2
))
1471 return 1; /* But swap fusable operands to the right. */
1472 return 0; /* Otherwise don't swap. */
1475 static void asm_intop(ASMState
*as
, IRIns
*ir
, ARMIns ai
)
1477 IRRef lref
= ir
->op1
, rref
= ir
->op2
;
1478 Reg left
, dest
= ra_dest(as
, ir
, RSET_GPR
);
1480 if (asm_swapops(as
, lref
, rref
)) {
1481 IRRef tmp
= lref
; lref
= rref
; rref
= tmp
;
1482 if ((ai
& ~ARMI_S
) == ARMI_SUB
|| (ai
& ~ARMI_S
) == ARMI_SBC
)
1483 ai
^= (ARMI_SUB
^ARMI_RSB
);
1485 left
= ra_hintalloc(as
, lref
, dest
, RSET_GPR
);
1486 m
= asm_fuseopm(as
, ai
, rref
, rset_exclude(RSET_GPR
, left
));
1487 if (irt_isguard(ir
->t
)) { /* For IR_ADDOV etc. */
1488 asm_guardcc(as
, CC_VS
);
1491 emit_dn(as
, ai
^m
, dest
, left
);
1494 /* Try to drop cmp r, #0. */
1495 static ARMIns
asm_drop_cmp0(ASMState
*as
, ARMIns ai
)
1497 if (as
->flagmcp
== as
->mcp
) {
1498 uint32_t cc
= (as
->mcp
[1] >> 28);
1503 } else if (cc
== CC_GE
) {
1504 *++as
->mcp
^= ((CC_GE
^CC_PL
) << 28);
1506 } else if (cc
== CC_LT
) {
1507 *++as
->mcp
^= ((CC_LT
^CC_MI
) << 28);
1509 } /* else: other conds don't work in general. */
1514 static void asm_intop_s(ASMState
*as
, IRIns
*ir
, ARMIns ai
)
1516 asm_intop(as
, ir
, asm_drop_cmp0(as
, ai
));
1519 static void asm_intneg(ASMState
*as
, IRIns
*ir
, ARMIns ai
)
1521 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1522 Reg left
= ra_hintalloc(as
, ir
->op1
, dest
, RSET_GPR
);
1523 emit_dn(as
, ai
|ARMI_K12
|0, dest
, left
);
1526 /* NYI: use add/shift for MUL(OV) with constants. FOLD only does 2^k. */
1527 static void asm_intmul(ASMState
*as
, IRIns
*ir
)
1529 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1530 Reg left
= ra_alloc1(as
, ir
->op1
, rset_exclude(RSET_GPR
, dest
));
1531 Reg right
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
1533 /* ARMv5 restriction: dest != left and dest_hi != left. */
1534 if (dest
== left
&& left
!= right
) { left
= right
; right
= dest
; }
1535 if (irt_isguard(ir
->t
)) { /* IR_MULOV */
1536 if (!(as
->flags
& JIT_F_ARMV6
) && dest
== left
)
1537 tmp
= left
= ra_scratch(as
, rset_exclude(RSET_GPR
, left
));
1538 asm_guardcc(as
, CC_NE
);
1539 emit_nm(as
, ARMI_TEQ
|ARMF_SH(ARMSH_ASR
, 31), RID_TMP
, dest
);
1540 emit_dnm(as
, ARMI_SMULL
|ARMF_S(right
), dest
, RID_TMP
, left
);
1542 if (!(as
->flags
& JIT_F_ARMV6
) && dest
== left
) tmp
= left
= RID_TMP
;
1543 emit_nm(as
, ARMI_MUL
|ARMF_S(right
), dest
, left
);
1545 /* Only need this for the dest == left == right case. */
1546 if (ra_hasreg(tmp
)) emit_dm(as
, ARMI_MOV
, tmp
, right
);
1549 static void asm_add(ASMState
*as
, IRIns
*ir
)
1552 if (irt_isnum(ir
->t
)) {
1553 if (!asm_fusemadd(as
, ir
, ARMI_VMLA_D
, ARMI_VMLA_D
))
1554 asm_fparith(as
, ir
, ARMI_VADD_D
);
1558 asm_intop_s(as
, ir
, ARMI_ADD
);
1561 static void asm_sub(ASMState
*as
, IRIns
*ir
)
1564 if (irt_isnum(ir
->t
)) {
1565 if (!asm_fusemadd(as
, ir
, ARMI_VNMLS_D
, ARMI_VMLS_D
))
1566 asm_fparith(as
, ir
, ARMI_VSUB_D
);
1570 asm_intop_s(as
, ir
, ARMI_SUB
);
1573 static void asm_mul(ASMState
*as
, IRIns
*ir
)
1576 if (irt_isnum(ir
->t
)) {
1577 asm_fparith(as
, ir
, ARMI_VMUL_D
);
1584 #define asm_addov(as, ir) asm_add(as, ir)
1585 #define asm_subov(as, ir) asm_sub(as, ir)
1586 #define asm_mulov(as, ir) asm_mul(as, ir)
1589 #define asm_fpdiv(as, ir) asm_fparith(as, ir, ARMI_VDIV_D)
1590 #define asm_abs(as, ir) asm_fpunary(as, ir, ARMI_VABS_D)
1593 static void asm_neg(ASMState
*as
, IRIns
*ir
)
1596 if (irt_isnum(ir
->t
)) {
1597 asm_fpunary(as
, ir
, ARMI_VNEG_D
);
1601 asm_intneg(as
, ir
, ARMI_RSB
);
1604 static void asm_bitop(ASMState
*as
, IRIns
*ir
, ARMIns ai
)
1606 ai
= asm_drop_cmp0(as
, ai
);
1608 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1609 uint32_t m
= asm_fuseopm(as
, ai
, ir
->op1
, RSET_GPR
);
1610 emit_d(as
, ai
^m
, dest
);
1612 /* NYI: Turn BAND !k12 into uxtb, uxth or bfc or shl+shr. */
1613 asm_intop(as
, ir
, ai
);
1617 #define asm_bnot(as, ir) asm_bitop(as, ir, ARMI_MVN)
1619 static void asm_bswap(ASMState
*as
, IRIns
*ir
)
1621 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1622 Reg left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1623 if ((as
->flags
& JIT_F_ARMV6
)) {
1624 emit_dm(as
, ARMI_REV
, dest
, left
);
1628 tmp2
= ra_scratch(as
, rset_exclude(rset_exclude(RSET_GPR
, dest
), left
));
1629 emit_dnm(as
, ARMI_EOR
|ARMF_SH(ARMSH_LSR
, 8), dest
, tmp2
, RID_TMP
);
1630 emit_dm(as
, ARMI_MOV
|ARMF_SH(ARMSH_ROR
, 8), tmp2
, left
);
1631 emit_dn(as
, ARMI_BIC
|ARMI_K12
|256*8|255, RID_TMP
, RID_TMP
);
1632 emit_dnm(as
, ARMI_EOR
|ARMF_SH(ARMSH_ROR
, 16), RID_TMP
, left
, left
);
1636 #define asm_band(as, ir) asm_bitop(as, ir, ARMI_AND)
1637 #define asm_bor(as, ir) asm_bitop(as, ir, ARMI_ORR)
1638 #define asm_bxor(as, ir) asm_bitop(as, ir, ARMI_EOR)
1640 static void asm_bitshift(ASMState
*as
, IRIns
*ir
, ARMShift sh
)
1642 if (irref_isk(ir
->op2
)) { /* Constant shifts. */
1643 /* NYI: Turn SHL+SHR or BAND+SHR into uxtb, uxth or ubfx. */
1644 /* NYI: Turn SHL+ASR into sxtb, sxth or sbfx. */
1645 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1646 Reg left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1647 int32_t shift
= (IR(ir
->op2
)->i
& 31);
1648 emit_dm(as
, ARMI_MOV
|ARMF_SH(sh
, shift
), dest
, left
);
1650 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1651 Reg left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1652 Reg right
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
1653 emit_dm(as
, ARMI_MOV
|ARMF_RSH(sh
, right
), dest
, left
);
1657 #define asm_bshl(as, ir) asm_bitshift(as, ir, ARMSH_LSL)
1658 #define asm_bshr(as, ir) asm_bitshift(as, ir, ARMSH_LSR)
1659 #define asm_bsar(as, ir) asm_bitshift(as, ir, ARMSH_ASR)
1660 #define asm_bror(as, ir) asm_bitshift(as, ir, ARMSH_ROR)
1661 #define asm_brol(as, ir) lj_assertA(0, "unexpected BROL")
1663 static void asm_intmin_max(ASMState
*as
, IRIns
*ir
, int cc
)
1665 uint32_t kcmp
= 0, kmov
= 0;
1666 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1667 Reg left
= ra_hintalloc(as
, ir
->op1
, dest
, RSET_GPR
);
1669 if (irref_isk(ir
->op2
)) {
1670 kcmp
= emit_isk12(ARMI_CMP
, IR(ir
->op2
)->i
);
1671 if (kcmp
) kmov
= emit_isk12(ARMI_MOV
, IR(ir
->op2
)->i
);
1675 right
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
1677 if (kmov
|| dest
!= right
) {
1678 emit_dm(as
, ARMF_CC(ARMI_MOV
, cc
)^kmov
, dest
, right
);
1679 cc
^= 1; /* Must use opposite conditions for paired moves. */
1681 cc
^= (CC_LT
^CC_GT
); /* Otherwise may swap CC_LT <-> CC_GT. */
1683 if (dest
!= left
) emit_dm(as
, ARMF_CC(ARMI_MOV
, cc
), dest
, left
);
1684 emit_nm(as
, ARMI_CMP
^kcmp
, left
, right
);
1688 static void asm_sfpmin_max(ASMState
*as
, IRIns
*ir
, int cc
)
1690 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_softfp_cmp
];
1691 RegSet drop
= RSET_SCRATCH
;
1694 args
[0] = ir
->op1
; args
[1] = (ir
+1)->op1
;
1695 args
[2] = ir
->op2
; args
[3] = (ir
+1)->op2
;
1696 /* __aeabi_cdcmple preserves r0-r3. */
1697 if (ra_hasreg(ir
->r
)) rset_clear(drop
, ir
->r
);
1698 if (ra_hasreg((ir
+1)->r
)) rset_clear(drop
, (ir
+1)->r
);
1699 if (!rset_test(as
->freeset
, RID_R2
) &&
1700 regcost_ref(as
->cost
[RID_R2
]) == args
[2]) rset_clear(drop
, RID_R2
);
1701 if (!rset_test(as
->freeset
, RID_R3
) &&
1702 regcost_ref(as
->cost
[RID_R3
]) == args
[3]) rset_clear(drop
, RID_R3
);
1703 ra_evictset(as
, drop
);
1704 ra_destpair(as
, ir
);
1705 emit_dm(as
, ARMF_CC(ARMI_MOV
, cc
), RID_RETHI
, RID_R3
);
1706 emit_dm(as
, ARMF_CC(ARMI_MOV
, cc
), RID_RETLO
, RID_R2
);
1707 emit_call(as
, (void *)ci
->func
);
1708 for (r
= RID_R0
; r
<= RID_R3
; r
++)
1709 ra_leftov(as
, r
, args
[r
-RID_R0
]);
1712 static void asm_fpmin_max(ASMState
*as
, IRIns
*ir
, int cc
)
1714 Reg dest
= (ra_dest(as
, ir
, RSET_FPR
) & 15);
1715 Reg right
, left
= ra_alloc2(as
, ir
, RSET_FPR
);
1716 right
= ((left
>> 8) & 15); left
&= 15;
1717 if (dest
!= left
) emit_dm(as
, ARMF_CC(ARMI_VMOV_D
, cc
^1), dest
, left
);
1718 if (dest
!= right
) emit_dm(as
, ARMF_CC(ARMI_VMOV_D
, cc
), dest
, right
);
1719 emit_d(as
, ARMI_VMRS
, 0);
1720 emit_dm(as
, ARMI_VCMP_D
, left
, right
);
1724 static void asm_min_max(ASMState
*as
, IRIns
*ir
, int cc
, int fcc
)
1729 if (irt_isnum(ir
->t
))
1730 asm_fpmin_max(as
, ir
, fcc
);
1733 asm_intmin_max(as
, ir
, cc
);
1736 #define asm_min(as, ir) asm_min_max(as, ir, CC_GT, CC_PL)
1737 #define asm_max(as, ir) asm_min_max(as, ir, CC_LT, CC_LE)
1739 /* -- Comparisons --------------------------------------------------------- */
1741 /* Map of comparisons to flags. ORDER IR. */
1742 static const uint8_t asm_compmap
[IR_ABC
+1] = {
1743 /* op FP swp int cc FP cc */
1744 /* LT */ CC_GE
+ (CC_HS
<< 4),
1745 /* GE x */ CC_LT
+ (CC_HI
<< 4),
1746 /* LE */ CC_GT
+ (CC_HI
<< 4),
1747 /* GT x */ CC_LE
+ (CC_HS
<< 4),
1748 /* ULT x */ CC_HS
+ (CC_LS
<< 4),
1749 /* UGE */ CC_LO
+ (CC_LO
<< 4),
1750 /* ULE x */ CC_HI
+ (CC_LO
<< 4),
1751 /* UGT */ CC_LS
+ (CC_LS
<< 4),
1752 /* EQ */ CC_NE
+ (CC_NE
<< 4),
1753 /* NE */ CC_EQ
+ (CC_EQ
<< 4),
1754 /* ABC */ CC_LS
+ (CC_LS
<< 4) /* Same as UGT. */
1758 /* FP comparisons. */
1759 static void asm_sfpcomp(ASMState
*as
, IRIns
*ir
)
1761 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_softfp_cmp
];
1762 RegSet drop
= RSET_SCRATCH
;
1765 int swp
= (((ir
->o
^ (ir
->o
>> 2)) & ~(ir
->o
>> 3) & 1) << 1);
1766 args
[swp
^0] = ir
->op1
; args
[swp
^1] = (ir
+1)->op1
;
1767 args
[swp
^2] = ir
->op2
; args
[swp
^3] = (ir
+1)->op2
;
1768 /* __aeabi_cdcmple preserves r0-r3. This helps to reduce spills. */
1769 for (r
= RID_R0
; r
<= RID_R3
; r
++)
1770 if (!rset_test(as
->freeset
, r
) &&
1771 regcost_ref(as
->cost
[r
]) == args
[r
-RID_R0
]) rset_clear(drop
, r
);
1772 ra_evictset(as
, drop
);
1773 asm_guardcc(as
, (asm_compmap
[ir
->o
] >> 4));
1774 emit_call(as
, (void *)ci
->func
);
1775 for (r
= RID_R0
; r
<= RID_R3
; r
++)
1776 ra_leftov(as
, r
, args
[r
-RID_R0
]);
1779 /* FP comparisons. */
1780 static void asm_fpcomp(ASMState
*as
, IRIns
*ir
)
1784 int swp
= ((ir
->o
^ (ir
->o
>> 2)) & ~(ir
->o
>> 3) & 1);
1785 if (!swp
&& irref_isk(ir
->op2
) && ir_knum(IR(ir
->op2
))->u64
== 0) {
1786 left
= (ra_alloc1(as
, ir
->op1
, RSET_FPR
) & 15);
1790 left
= ra_alloc2(as
, ir
, RSET_FPR
);
1792 right
= (left
& 15); left
= ((left
>> 8) & 15);
1794 right
= ((left
>> 8) & 15); left
&= 15;
1798 asm_guardcc(as
, (asm_compmap
[ir
->o
] >> 4));
1799 emit_d(as
, ARMI_VMRS
, 0);
1800 emit_dm(as
, ai
, left
, right
);
1804 /* Integer comparisons. */
1805 static void asm_intcomp(ASMState
*as
, IRIns
*ir
)
1807 ARMCC cc
= (asm_compmap
[ir
->o
] & 15);
1808 IRRef lref
= ir
->op1
, rref
= ir
->op2
;
1812 lj_assertA(irt_isint(ir
->t
) || irt_isu32(ir
->t
) || irt_isaddr(ir
->t
),
1813 "bad comparison data type %d", irt_type(ir
->t
));
1814 if (asm_swapops(as
, lref
, rref
)) {
1815 Reg tmp
= lref
; lref
= rref
; rref
= tmp
;
1816 if (cc
>= CC_GE
) cc
^= 7; /* LT <-> GT, LE <-> GE */
1817 else if (cc
> CC_NE
) cc
^= 11; /* LO <-> HI, LS <-> HS */
1819 if (irref_isk(rref
) && IR(rref
)->i
== 0) {
1820 IRIns
*irl
= IR(lref
);
1821 cmpprev0
= (irl
+1 == ir
);
1822 /* Combine comp(BAND(left, right), 0) into tst left, right. */
1823 if (cmpprev0
&& irl
->o
== IR_BAND
&& !ra_used(irl
)) {
1824 IRRef blref
= irl
->op1
, brref
= irl
->op2
;
1827 if (asm_swapops(as
, blref
, brref
)) {
1828 Reg tmp
= blref
; blref
= brref
; brref
= tmp
;
1830 if (irref_isk(brref
)) {
1831 m2
= emit_isk12(ARMI_AND
, IR(brref
)->i
);
1832 if ((m2
& (ARMI_AND
^ARMI_BIC
)))
1833 goto notst
; /* Not beneficial if we miss a constant operand. */
1835 if (cc
== CC_GE
) cc
= CC_PL
;
1836 else if (cc
== CC_LT
) cc
= CC_MI
;
1837 else if (cc
> CC_NE
) goto notst
; /* Other conds don't work with tst. */
1838 bleft
= ra_alloc1(as
, blref
, RSET_GPR
);
1839 if (!m2
) m2
= asm_fuseopm(as
, 0, brref
, rset_exclude(RSET_GPR
, bleft
));
1840 asm_guardcc(as
, cc
);
1841 emit_n(as
, ARMI_TST
^m2
, bleft
);
1846 left
= ra_alloc1(as
, lref
, RSET_GPR
);
1847 m
= asm_fuseopm(as
, ARMI_CMP
, rref
, rset_exclude(RSET_GPR
, left
));
1848 asm_guardcc(as
, cc
);
1849 emit_n(as
, ARMI_CMP
^m
, left
);
1850 /* Signed comparison with zero and referencing previous ins? */
1851 if (cmpprev0
&& (cc
<= CC_NE
|| cc
>= CC_GE
))
1852 as
->flagmcp
= as
->mcp
; /* Allow elimination of the compare. */
1855 static void asm_comp(ASMState
*as
, IRIns
*ir
)
1858 if (irt_isnum(ir
->t
))
1862 asm_intcomp(as
, ir
);
1865 #define asm_equal(as, ir) asm_comp(as, ir)
1868 /* 64 bit integer comparisons. */
1869 static void asm_int64comp(ASMState
*as
, IRIns
*ir
)
1871 int signedcomp
= (ir
->o
<= IR_GT
);
1875 RegSet allow
= RSET_GPR
, oldfree
;
1877 /* Always use unsigned comparison for loword. */
1878 cclo
= asm_compmap
[ir
->o
+ (signedcomp
? 4 : 0)] & 15;
1879 leftlo
= ra_alloc1(as
, ir
->op1
, allow
);
1880 oldfree
= as
->freeset
;
1881 mlo
= asm_fuseopm(as
, ARMI_CMP
, ir
->op2
, rset_clear(allow
, leftlo
));
1882 allow
&= ~(oldfree
& ~as
->freeset
); /* Update for allocs of asm_fuseopm. */
1884 /* Use signed or unsigned comparison for hiword. */
1885 cchi
= asm_compmap
[ir
->o
] & 15;
1886 lefthi
= ra_alloc1(as
, (ir
+1)->op1
, allow
);
1887 mhi
= asm_fuseopm(as
, ARMI_CMP
, (ir
+1)->op2
, rset_clear(allow
, lefthi
));
1889 /* All register allocations must be performed _before_ this point. */
1891 MCLabel l_around
= emit_label(as
);
1892 asm_guardcc(as
, cclo
);
1893 emit_n(as
, ARMI_CMP
^mlo
, leftlo
);
1894 emit_branch(as
, ARMF_CC(ARMI_B
, CC_NE
), l_around
);
1895 if (cchi
== CC_GE
|| cchi
== CC_LE
) cchi
^= 6; /* GE -> GT, LE -> LT */
1896 asm_guardcc(as
, cchi
);
1898 asm_guardcc(as
, cclo
);
1899 emit_n(as
, ARMF_CC(ARMI_CMP
, CC_EQ
)^mlo
, leftlo
);
1901 emit_n(as
, ARMI_CMP
^mhi
, lefthi
);
1905 /* -- Split register ops -------------------------------------------------- */
1907 /* Hiword op of a split 32/32 bit op. Previous op is the loword op. */
1908 static void asm_hiop(ASMState
*as
, IRIns
*ir
)
1910 /* HIOP is marked as a store because it needs its own DCE logic. */
1911 int uselo
= ra_used(ir
-1), usehi
= ra_used(ir
); /* Loword/hiword used? */
1912 if (LJ_UNLIKELY(!(as
->flags
& JIT_F_OPT_DCE
))) uselo
= usehi
= 1;
1913 #if LJ_HASFFI || LJ_SOFTFP
1914 if ((ir
-1)->o
<= IR_NE
) { /* 64 bit integer or FP comparisons. ORDER IR. */
1915 as
->curins
--; /* Always skip the loword comparison. */
1917 if (!irt_isint(ir
->t
)) {
1918 asm_sfpcomp(as
, ir
-1);
1923 asm_int64comp(as
, ir
-1);
1927 } else if ((ir
-1)->o
== IR_MIN
|| (ir
-1)->o
== IR_MAX
) {
1928 as
->curins
--; /* Always skip the loword min/max. */
1930 asm_sfpmin_max(as
, ir
-1, (ir
-1)->o
== IR_MIN
? CC_PL
: CC_LE
);
1933 } else if ((ir
-1)->o
== IR_CONV
) {
1934 as
->curins
--; /* Always skip the CONV. */
1939 } else if ((ir
-1)->o
== IR_XSTORE
) {
1940 if ((ir
-1)->r
!= RID_SINK
)
1941 asm_xstore_(as
, ir
, 4);
1945 if (!usehi
) return; /* Skip unused hiword op for all remaining ops. */
1946 switch ((ir
-1)->o
) {
1950 asm_intop(as
, ir
, ARMI_ADC
);
1951 asm_intop(as
, ir
-1, ARMI_ADD
|ARMI_S
);
1955 asm_intop(as
, ir
, ARMI_SBC
);
1956 asm_intop(as
, ir
-1, ARMI_SUB
|ARMI_S
);
1960 asm_intneg(as
, ir
, ARMI_RSC
);
1961 asm_intneg(as
, ir
-1, ARMI_RSB
|ARMI_S
);
1964 /* Nothing to do here. Handled by lo op itself. */
1968 case IR_SLOAD
: case IR_ALOAD
: case IR_HLOAD
: case IR_ULOAD
: case IR_VLOAD
:
1971 ra_allocref(as
, ir
->op1
, RSET_GPR
); /* Mark lo op as used. */
1973 case IR_ASTORE
: case IR_HSTORE
: case IR_USTORE
: case IR_TOSTR
: case IR_TMPREF
:
1974 /* Nothing to do here. Handled by lo op itself. */
1977 case IR_CALLN
: case IR_CALLL
: case IR_CALLS
: case IR_CALLXS
:
1979 ra_allocref(as
, ir
->op1
, RID2RSET(RID_RETLO
)); /* Mark lo op as used. */
1981 default: lj_assertA(0, "bad HIOP for op %d", (ir
-1)->o
); break;
1985 /* -- Profiling ----------------------------------------------------------- */
1987 static void asm_prof(ASMState
*as
, IRIns
*ir
)
1990 asm_guardcc(as
, CC_NE
);
1991 emit_n(as
, ARMI_TST
|ARMI_K12
|HOOK_PROFILE
, RID_TMP
);
1992 emit_lsptr(as
, ARMI_LDRB
, RID_TMP
, (void *)&J2G(as
->J
)->hookmask
);
1995 /* -- Stack handling ------------------------------------------------------ */
1997 /* Check Lua stack size for overflow. Use exit handler as fallback. */
1998 static void asm_stack_check(ASMState
*as
, BCReg topslot
,
1999 IRIns
*irp
, RegSet allow
, ExitNo exitno
)
2005 if (!ra_hasspill(irp
->s
)) {
2007 lj_assertA(ra_hasreg(pbase
), "base reg lost");
2009 pbase
= rset_pickbot(allow
);
2017 emit_branch(as
, ARMF_CC(ARMI_BL
, CC_LS
), exitstub_addr(as
->J
, exitno
));
2019 emit_lso(as
, ARMI_LDR
, RID_RET
, RID_SP
, 0); /* Restore temp. register. */
2020 k
= emit_isk12(0, (int32_t)(8*topslot
));
2021 lj_assertA(k
, "slot offset %d does not fit in K12", 8*topslot
);
2022 emit_n(as
, ARMI_CMP
^k
, RID_TMP
);
2023 emit_dnm(as
, ARMI_SUB
, RID_TMP
, RID_TMP
, pbase
);
2024 emit_lso(as
, ARMI_LDR
, RID_TMP
, RID_TMP
,
2025 (int32_t)offsetof(lua_State
, maxstack
));
2026 if (irp
) { /* Must not spill arbitrary registers in head of side trace. */
2027 int32_t i
= i32ptr(&J2G(as
->J
)->cur_L
);
2028 if (ra_hasspill(irp
->s
))
2029 emit_lso(as
, ARMI_LDR
, pbase
, RID_SP
, sps_scale(irp
->s
));
2030 emit_lso(as
, ARMI_LDR
, RID_TMP
, RID_TMP
, (i
& 4095));
2032 emit_lso(as
, ARMI_STR
, RID_RET
, RID_SP
, 0); /* Save temp. register. */
2033 emit_loadi(as
, RID_TMP
, (i
& ~4095));
2035 emit_getgl(as
, RID_TMP
, cur_L
);
2039 /* Restore Lua stack from on-trace state. */
2040 static void asm_stack_restore(ASMState
*as
, SnapShot
*snap
)
2042 SnapEntry
*map
= &as
->T
->snapmap
[snap
->mapofs
];
2043 SnapEntry
*flinks
= &as
->T
->snapmap
[snap_nextofs(as
->T
, snap
)-1];
2044 MSize n
, nent
= snap
->nent
;
2046 /* Store the value of all modified slots to the Lua stack. */
2047 for (n
= 0; n
< nent
; n
++) {
2048 SnapEntry sn
= map
[n
];
2049 BCReg s
= snap_slot(sn
);
2050 int32_t ofs
= 8*((int32_t)s
-1) - bias
;
2051 IRRef ref
= snap_ref(sn
);
2052 IRIns
*ir
= IR(ref
);
2053 if ((sn
& SNAP_NORESTORE
))
2055 if (irt_isnum(ir
->t
)) {
2057 RegSet odd
= rset_exclude(RSET_GPRODD
, RID_BASE
);
2059 /* LJ_SOFTFP: must be a number constant. */
2060 lj_assertA(irref_isk(ref
), "unsplit FP op");
2061 tmp
= ra_allock(as
, (int32_t)ir_knum(ir
)->u32
.lo
,
2062 rset_exclude(RSET_GPREVEN
, RID_BASE
));
2063 emit_lso(as
, ARMI_STR
, tmp
, RID_BASE
, ofs
);
2064 if (rset_test(as
->freeset
, tmp
+1)) odd
= RID2RSET(tmp
+1);
2065 tmp
= ra_allock(as
, (int32_t)ir_knum(ir
)->u32
.hi
, odd
);
2066 emit_lso(as
, ARMI_STR
, tmp
, RID_BASE
, ofs
+4);
2068 Reg src
= ra_alloc1(as
, ref
, RSET_FPR
);
2069 if (LJ_UNLIKELY(ofs
< -1020 || ofs
> 1020)) {
2070 int32_t adj
= ofs
& 0xffffff00; /* K12-friendly. */
2073 emit_addptr(as
, RID_BASE
, -adj
);
2075 emit_vlso(as
, ARMI_VSTR_D
, src
, RID_BASE
, ofs
);
2078 RegSet odd
= rset_exclude(RSET_GPRODD
, RID_BASE
);
2080 lj_assertA(irt_ispri(ir
->t
) || irt_isaddr(ir
->t
) || irt_isinteger(ir
->t
),
2081 "restore of IR type %d", irt_type(ir
->t
));
2082 if (!irt_ispri(ir
->t
)) {
2083 Reg src
= ra_alloc1(as
, ref
, rset_exclude(RSET_GPREVEN
, RID_BASE
));
2084 emit_lso(as
, ARMI_STR
, src
, RID_BASE
, ofs
);
2085 if (rset_test(as
->freeset
, src
+1)) odd
= RID2RSET(src
+1);
2087 if ((sn
& (SNAP_CONT
|SNAP_FRAME
))) {
2088 if (s
== 0) continue; /* Do not overwrite link to previous frame. */
2089 type
= ra_allock(as
, (int32_t)(*flinks
--), odd
);
2091 } else if ((sn
& SNAP_SOFTFPNUM
)) {
2092 type
= ra_alloc1(as
, ref
+1, rset_exclude(RSET_GPRODD
, RID_BASE
));
2094 } else if ((sn
& SNAP_KEYINDEX
)) {
2095 type
= ra_allock(as
, (int32_t)LJ_KEYINDEX
, odd
);
2097 type
= ra_allock(as
, (int32_t)irt_toitype(ir
->t
), odd
);
2099 emit_lso(as
, ARMI_STR
, type
, RID_BASE
, ofs
+4);
2103 emit_addptr(as
, RID_BASE
, bias
);
2104 lj_assertA(map
+ nent
== flinks
, "inconsistent frames in snapshot");
2107 /* -- GC handling --------------------------------------------------------- */
2109 /* Marker to prevent patching the GC check exit. */
2110 #define ARM_NOPATCH_GC_CHECK (ARMI_BIC|ARMI_K12)
2112 /* Check GC threshold and do one or more GC steps. */
2113 static void asm_gc_check(ASMState
*as
)
2115 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_gc_step_jit
];
2119 ra_evictset(as
, RSET_SCRATCH
);
2120 l_end
= emit_label(as
);
2121 /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
2122 asm_guardcc(as
, CC_NE
); /* Assumes asm_snap_prep() already done. */
2123 *--as
->mcp
= ARM_NOPATCH_GC_CHECK
;
2124 emit_n(as
, ARMI_CMP
|ARMI_K12
|0, RID_RET
);
2125 args
[0] = ASMREF_TMP1
; /* global_State *g */
2126 args
[1] = ASMREF_TMP2
; /* MSize steps */
2127 asm_gencall(as
, ci
, args
);
2128 tmp1
= ra_releasetmp(as
, ASMREF_TMP1
);
2129 tmp2
= ra_releasetmp(as
, ASMREF_TMP2
);
2130 emit_loadi(as
, tmp2
, as
->gcsteps
);
2131 /* Jump around GC step if GC total < GC threshold. */
2132 emit_branch(as
, ARMF_CC(ARMI_B
, CC_LS
), l_end
);
2133 emit_nm(as
, ARMI_CMP
, RID_TMP
, tmp2
);
2134 emit_lso(as
, ARMI_LDR
, tmp2
, tmp1
,
2135 (int32_t)offsetof(global_State
, gc
.threshold
));
2136 emit_lso(as
, ARMI_LDR
, RID_TMP
, tmp1
,
2137 (int32_t)offsetof(global_State
, gc
.total
));
2138 ra_allockreg(as
, i32ptr(J2G(as
->J
)), tmp1
);
2143 /* -- Loop handling ------------------------------------------------------- */
2145 /* Fixup the loop branch. */
2146 static void asm_loop_fixup(ASMState
*as
)
2148 MCode
*p
= as
->mctop
;
2149 MCode
*target
= as
->mcp
;
2150 if (as
->loopinv
) { /* Inverted loop branch? */
2151 /* asm_guardcc already inverted the bcc and patched the final bl. */
2152 p
[-2] |= ((uint32_t)(target
-p
) & 0x00ffffffu
);
2154 p
[-1] = ARMI_B
| ((uint32_t)((target
-p
)-1) & 0x00ffffffu
);
2158 /* Fixup the tail of the loop. */
2159 static void asm_loop_tail_fixup(ASMState
*as
)
2161 UNUSED(as
); /* Nothing to do. */
2164 /* -- Head of trace ------------------------------------------------------- */
2166 /* Reload L register from g->cur_L. */
2167 static void asm_head_lreg(ASMState
*as
)
2169 IRIns
*ir
= IR(ASMREF_L
);
2171 Reg r
= ra_dest(as
, ir
, RSET_GPR
);
2172 emit_getgl(as
, r
, cur_L
);
2177 /* Coalesce BASE register for a root trace. */
2178 static void asm_head_root_base(ASMState
*as
)
2183 if (ra_hasreg(ir
->r
) && (rset_test(as
->modset
, ir
->r
) || irt_ismarked(ir
->t
)))
2185 ra_destreg(as
, ir
, RID_BASE
);
2188 /* Coalesce BASE register for a side trace. */
2189 static Reg
asm_head_side_base(ASMState
*as
, IRIns
*irp
)
2194 if (ra_hasreg(ir
->r
) && (rset_test(as
->modset
, ir
->r
) || irt_ismarked(ir
->t
)))
2196 if (ra_hasspill(irp
->s
)) {
2197 return ra_dest(as
, ir
, RSET_GPR
);
2200 lj_assertA(ra_hasreg(r
), "base reg lost");
2201 if (r
!= ir
->r
&& !rset_test(as
->freeset
, r
))
2202 ra_restore(as
, regcost_ref(as
->cost
[r
]));
2203 ra_destreg(as
, ir
, r
);
2208 /* -- Tail of trace ------------------------------------------------------- */
2210 /* Fixup the tail code. */
2211 static void asm_tail_fixup(ASMState
*as
, TraceNo lnk
)
2213 MCode
*p
= as
->mctop
;
2215 int32_t spadj
= as
->T
->spadjust
;
2219 /* Patch stack adjustment. */
2220 uint32_t k
= emit_isk12(ARMI_ADD
, spadj
);
2221 lj_assertA(k
, "stack adjustment %d does not fit in K12", spadj
);
2222 p
[-2] = (ARMI_ADD
^k
) | ARMF_D(RID_SP
) | ARMF_N(RID_SP
);
2224 /* Patch exit branch. */
2225 target
= lnk
? traceref(as
->J
, lnk
)->mcode
: (MCode
*)lj_vm_exit_interp
;
2226 p
[-1] = ARMI_B
|(((target
-p
)-1)&0x00ffffffu
);
2229 /* Prepare tail of code. */
2230 static void asm_tail_prep(ASMState
*as
)
2232 MCode
*p
= as
->mctop
- 1; /* Leave room for exit branch. */
2234 as
->invmcp
= as
->mcp
= p
;
2236 as
->mcp
= p
-1; /* Leave room for stack pointer adjustment. */
2239 *p
= 0; /* Prevent load/store merging. */
2242 /* -- Trace setup --------------------------------------------------------- */
2244 /* Ensure there are enough stack slots for call arguments. */
2245 static Reg
asm_setup_call_slots(ASMState
*as
, IRIns
*ir
, const CCallInfo
*ci
)
2247 IRRef args
[CCI_NARGS_MAX
*2];
2248 uint32_t i
, nargs
= CCI_XNARGS(ci
);
2249 int nslots
= 0, ngpr
= REGARG_NUMGPR
, nfpr
= REGARG_NUMFPR
, fprodd
= 0;
2250 asm_collectargs(as
, ir
, ci
, args
);
2251 for (i
= 0; i
< nargs
; i
++) {
2252 if (!LJ_SOFTFP
&& args
[i
] && irt_isfp(IR(args
[i
])->t
)) {
2253 if (!LJ_ABI_SOFTFP
&& !(ci
->flags
& CCI_VARARG
)) {
2254 if (irt_isnum(IR(args
[i
])->t
)) {
2255 if (nfpr
> 0) nfpr
--;
2256 else fprodd
= 0, nslots
= (nslots
+ 3) & ~1;
2258 if (fprodd
) fprodd
--;
2259 else if (nfpr
> 0) fprodd
= 1, nfpr
--;
2262 } else if (irt_isnum(IR(args
[i
])->t
)) {
2264 if (ngpr
> 0) ngpr
-= 2; else nslots
+= 2;
2266 if (ngpr
> 0) ngpr
--; else nslots
++;
2269 if (ngpr
> 0) ngpr
--; else nslots
++;
2272 if (nslots
> as
->evenspill
) /* Leave room for args in stack slots. */
2273 as
->evenspill
= nslots
;
2274 return REGSP_HINT(irt_isfp(ir
->t
) ? RID_FPRET
: RID_RET
);
2277 static void asm_setup_target(ASMState
*as
)
2279 /* May need extra exit for asm_stack_check on side traces. */
2280 asm_exitstub_setup(as
, as
->T
->nsnap
+ (as
->parent
? 1 : 0));
2283 /* -- Trace patching ------------------------------------------------------ */
2285 /* Patch exit jumps of existing machine code to a new target. */
2286 void lj_asm_patchexit(jit_State
*J
, GCtrace
*T
, ExitNo exitno
, MCode
*target
)
2288 MCode
*p
= T
->mcode
;
2289 MCode
*pe
= (MCode
*)((char *)p
+ T
->szmcode
);
2290 MCode
*cstart
= NULL
, *cend
= p
;
2291 MCode
*mcarea
= lj_mcode_patch(J
, p
, 0);
2292 MCode
*px
= exitstub_addr(J
, exitno
) - 2;
2293 for (; p
< pe
; p
++) {
2294 /* Look for bl_cc exitstub, replace with b_cc target. */
2296 if ((ins
& 0x0f000000u
) == 0x0b000000u
&& ins
< 0xf0000000u
&&
2297 ((ins
^ (px
-p
)) & 0x00ffffffu
) == 0 &&
2298 p
[-1] != ARM_NOPATCH_GC_CHECK
) {
2299 *p
= (ins
& 0xfe000000u
) | (((target
-p
)-2) & 0x00ffffffu
);
2301 if (!cstart
) cstart
= p
;
2304 lj_assertJ(cstart
!= NULL
, "exit stub %d not found", exitno
);
2305 lj_mcode_sync(cstart
, cend
);
2306 lj_mcode_patch(J
, mcarea
, 1);