2 ** ARM IR assembler (SSA IR -> machine code).
3 ** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h
6 /* -- Register allocator extensions --------------------------------------- */
8 /* Allocate a register with a hint. */
9 static Reg
ra_hintalloc(ASMState
*as
, IRRef ref
, Reg hint
, RegSet allow
)
13 if (!ra_hashint(r
) && !iscrossref(as
, ref
))
14 ra_sethint(IR(ref
)->r
, hint
); /* Propagate register hint. */
15 r
= ra_allocref(as
, ref
, allow
);
21 /* Allocate a scratch register pair. */
22 static Reg
ra_scratchpair(ASMState
*as
, RegSet allow
)
24 RegSet pick1
= as
->freeset
& allow
;
25 RegSet pick2
= pick1
& (pick1
>> 1) & RSET_GPREVEN
;
28 r
= rset_picktop(pick2
);
30 RegSet pick
= pick1
& (allow
>> 1) & RSET_GPREVEN
;
32 r
= rset_picktop(pick
);
33 ra_restore(as
, regcost_ref(as
->cost
[r
+1]));
35 pick
= pick1
& (allow
<< 1) & RSET_GPRODD
;
37 r
= ra_restore(as
, regcost_ref(as
->cost
[rset_picktop(pick
)-1]));
39 r
= ra_evict(as
, allow
& (allow
>> 1) & RSET_GPREVEN
);
40 ra_restore(as
, regcost_ref(as
->cost
[r
+1]));
44 lua_assert(rset_test(RSET_GPREVEN
, r
));
47 RA_DBGX((as
, "scratchpair $r $r", r
, r
+1));
51 /* -- Guard handling ------------------------------------------------------ */
53 /* Generate an exit stub group at the bottom of the reserved MCode memory. */
54 static MCode
*asm_exitstub_gen(ASMState
*as
, ExitNo group
)
56 MCode
*mxp
= as
->mcbot
;
58 if (mxp
+ 4*4+4*EXITSTUBS_PER_GROUP
>= as
->mctop
)
60 /* str lr, [sp]; bl ->vm_exit_handler; .long DISPATCH_address, group. */
61 *mxp
++ = ARMI_STR
|ARMI_LS_P
|ARMI_LS_U
|ARMF_D(RID_LR
)|ARMF_N(RID_SP
);
62 *mxp
= ARMI_BL
|((((MCode
*)(void *)lj_vm_exit_handler
-mxp
)-2)&0x00ffffffu
);
64 *mxp
++ = (MCode
)i32ptr(J2GG(as
->J
)->dispatch
); /* DISPATCH address */
65 *mxp
++ = group
*EXITSTUBS_PER_GROUP
;
66 for (i
= 0; i
< EXITSTUBS_PER_GROUP
; i
++)
67 *mxp
++ = ARMI_B
|((-6-i
)&0x00ffffffu
);
68 lj_mcode_commitbot(as
->J
, mxp
);
70 as
->mclim
= as
->mcbot
+ MCLIM_REDZONE
;
71 return mxp
- EXITSTUBS_PER_GROUP
;
74 /* Setup all needed exit stubs. */
75 static void asm_exitstub_setup(ASMState
*as
, ExitNo nexits
)
78 if (nexits
>= EXITSTUBS_PER_GROUP
*LJ_MAX_EXITSTUBGR
)
79 lj_trace_err(as
->J
, LJ_TRERR_SNAPOV
);
80 for (i
= 0; i
< (nexits
+EXITSTUBS_PER_GROUP
-1)/EXITSTUBS_PER_GROUP
; i
++)
81 if (as
->J
->exitstubgroup
[i
] == NULL
)
82 as
->J
->exitstubgroup
[i
] = asm_exitstub_gen(as
, i
);
85 /* Emit conditional branch to exit for guard. */
86 static void asm_guardcc(ASMState
*as
, ARMCC cc
)
88 MCode
*target
= exitstub_addr(as
->J
, as
->snapno
);
90 if (LJ_UNLIKELY(p
== as
->invmcp
)) {
92 *p
= ARMI_BL
| ((target
-p
-2) & 0x00ffffffu
);
93 emit_branch(as
, ARMF_CC(ARMI_B
, cc
^1), p
+1);
96 emit_branch(as
, ARMF_CC(ARMI_BL
, cc
), target
);
99 /* -- Operand fusion ------------------------------------------------------ */
101 /* Limit linear search to this distance. Avoids O(n^2) behavior. */
102 #define CONFLICT_SEARCH_LIM 31
104 /* Check if there's no conflicting instruction between curins and ref. */
105 static int noconflict(ASMState
*as
, IRRef ref
, IROp conflict
)
108 IRRef i
= as
->curins
;
109 if (i
> ref
+ CONFLICT_SEARCH_LIM
)
110 return 0; /* Give up, ref is too far away. */
112 if (ir
[i
].o
== conflict
)
113 return 0; /* Conflict found. */
114 return 1; /* Ok, no conflict. */
117 /* Fuse the array base of colocated arrays. */
118 static int32_t asm_fuseabase(ASMState
*as
, IRRef ref
)
121 if (ir
->o
== IR_TNEW
&& ir
->op1
<= LJ_MAX_COLOSIZE
&&
122 !neverfuse(as
) && noconflict(as
, ref
, IR_NEWREF
))
123 return (int32_t)sizeof(GCtab
);
127 /* Fuse array/hash/upvalue reference into register+offset operand. */
128 static Reg
asm_fuseahuref(ASMState
*as
, IRRef ref
, int32_t *ofsp
, RegSet allow
)
131 if (ra_noreg(ir
->r
)) {
132 if (ir
->o
== IR_AREF
) {
133 if (mayfuse(as
, ref
)) {
134 if (irref_isk(ir
->op2
)) {
135 IRRef tab
= IR(ir
->op1
)->op1
;
136 int32_t ofs
= asm_fuseabase(as
, tab
);
137 IRRef refa
= ofs
? tab
: ir
->op1
;
138 ofs
+= 8*IR(ir
->op2
)->i
;
139 if (ofs
> -4096 && ofs
< 4096) {
141 return ra_alloc1(as
, refa
, allow
);
145 } else if (ir
->o
== IR_HREFK
) {
146 if (mayfuse(as
, ref
)) {
147 int32_t ofs
= (int32_t)(IR(ir
->op2
)->op2
* sizeof(Node
));
150 return ra_alloc1(as
, ir
->op1
, allow
);
153 } else if (ir
->o
== IR_UREFC
) {
154 if (irref_isk(ir
->op1
)) {
155 GCfunc
*fn
= ir_kfunc(IR(ir
->op1
));
156 int32_t ofs
= i32ptr(&gcref(fn
->l
.uvptr
[(ir
->op2
>> 8)])->uv
.tv
);
157 *ofsp
= (ofs
& 255); /* Mask out less bits to allow LDRD. */
158 return ra_allock(as
, (ofs
& ~255), allow
);
163 return ra_alloc1(as
, ref
, allow
);
166 /* Fuse m operand into arithmetic/logic instructions. */
167 static uint32_t asm_fuseopm(ASMState
*as
, ARMIns ai
, IRRef ref
, RegSet allow
)
170 if (ra_hasreg(ir
->r
)) {
171 ra_noweak(as
, ir
->r
);
172 return ARMF_M(ir
->r
);
173 } else if (irref_isk(ref
)) {
174 uint32_t k
= emit_isk12(ai
, ir
->i
);
177 } else if (mayfuse(as
, ref
)) {
178 if (ir
->o
>= IR_BSHL
&& ir
->o
<= IR_BROR
) {
179 Reg m
= ra_alloc1(as
, ir
->op1
, allow
);
180 ARMShift sh
= ir
->o
== IR_BSHL
? ARMSH_LSL
:
181 ir
->o
== IR_BSHR
? ARMSH_LSR
:
182 ir
->o
== IR_BSAR
? ARMSH_ASR
: ARMSH_ROR
;
183 if (irref_isk(ir
->op2
)) {
184 return m
| ARMF_SH(sh
, (IR(ir
->op2
)->i
& 31));
186 Reg s
= ra_alloc1(as
, ir
->op2
, rset_exclude(allow
, m
));
187 return m
| ARMF_RSH(sh
, s
);
189 } else if (ir
->o
== IR_ADD
&& ir
->op1
== ir
->op2
) {
190 Reg m
= ra_alloc1(as
, ir
->op1
, allow
);
191 return m
| ARMF_SH(ARMSH_LSL
, 1);
194 return ra_allocref(as
, ref
, allow
);
197 /* Fuse shifts into loads/stores. Only bother with BSHL 2 => lsl #2. */
198 static IRRef
asm_fuselsl2(ASMState
*as
, IRRef ref
)
201 if (ra_noreg(ir
->r
) && mayfuse(as
, ref
) && ir
->o
== IR_BSHL
&&
202 irref_isk(ir
->op2
) && IR(ir
->op2
)->i
== 2)
204 return 0; /* No fusion. */
207 /* Fuse XLOAD/XSTORE reference into load/store operand. */
208 static void asm_fusexref(ASMState
*as
, ARMIns ai
, Reg rd
, IRRef ref
,
214 if (ra_noreg(ir
->r
) && mayfuse(as
, ref
)) {
215 int32_t lim
= (ai
& 0x04000000) ? 4096 : 256;
216 if (ir
->o
== IR_ADD
) {
217 if (irref_isk(ir
->op2
) && (ofs
= IR(ir
->op2
)->i
) > -lim
&& ofs
< lim
) {
220 IRRef lref
= ir
->op1
, rref
= ir
->op2
;
222 if ((ai
& 0x04000000)) {
223 IRRef sref
= asm_fuselsl2(as
, rref
);
226 ai
|= ARMF_SH(ARMSH_LSL
, 2);
227 } else if ((sref
= asm_fuselsl2(as
, lref
)) != 0) {
230 ai
|= ARMF_SH(ARMSH_LSL
, 2);
233 rn
= ra_alloc1(as
, lref
, allow
);
234 rm
= ra_alloc1(as
, rref
, rset_exclude(allow
, rn
));
235 if ((ai
& 0x04000000)) ai
|= ARMI_LS_R
;
236 emit_dnm(as
, ai
|ARMI_LS_P
|ARMI_LS_U
, rd
, rn
, rm
);
239 } else if (ir
->o
== IR_STRREF
) {
240 ofs
= (int32_t)sizeof(GCstr
);
241 if (irref_isk(ir
->op2
)) {
242 ofs
+= IR(ir
->op2
)->i
;
244 } else if (irref_isk(ir
->op1
)) {
245 ofs
+= IR(ir
->op1
)->i
;
248 /* NYI: Fuse ADD with constant. */
249 Reg rn
= ra_alloc1(as
, ir
->op1
, allow
);
250 uint32_t m
= asm_fuseopm(as
, 0, ir
->op2
, rset_exclude(allow
, rn
));
251 if ((ai
& 0x04000000))
252 emit_lso(as
, ai
, rd
, rd
, ofs
);
254 emit_lsox(as
, ai
, rd
, rd
, ofs
);
255 emit_dn(as
, ARMI_ADD
^m
, rd
, rn
);
258 if (ofs
<= -lim
|| ofs
>= lim
) {
259 Reg rn
= ra_alloc1(as
, ref
, allow
);
260 Reg rm
= ra_allock(as
, ofs
, rset_exclude(allow
, rn
));
261 if ((ai
& 0x04000000)) ai
|= ARMI_LS_R
;
262 emit_dnm(as
, ai
|ARMI_LS_P
|ARMI_LS_U
, rd
, rn
, rm
);
267 base
= ra_alloc1(as
, ref
, allow
);
268 if ((ai
& 0x04000000))
269 emit_lso(as
, ai
, rd
, base
, ofs
);
271 emit_lsox(as
, ai
, rd
, base
, ofs
);
274 /* -- Calls --------------------------------------------------------------- */
276 /* Generate a call to a C function. */
277 static void asm_gencall(ASMState
*as
, const CCallInfo
*ci
, IRRef
*args
)
279 uint32_t n
, nargs
= CCI_NARGS(ci
);
281 Reg gpr
= REGARG_FIRSTGPR
;
282 if ((void *)ci
->func
)
283 emit_call(as
, (void *)ci
->func
);
284 for (n
= 0; n
< nargs
; n
++) { /* Setup args. */
287 if (gpr
<= REGARG_LASTGPR
) {
288 lua_assert(rset_test(as
->freeset
, gpr
)); /* Must have been evicted. */
289 if (ref
) ra_leftov(as
, gpr
, ref
);
293 Reg r
= ra_alloc1(as
, ref
, RSET_GPR
);
294 emit_spstore(as
, ir
, r
, ofs
);
301 /* Setup result reg/sp for call. Evict scratch regs. */
302 static void asm_setupresult(ASMState
*as
, IRIns
*ir
, const CCallInfo
*ci
)
304 RegSet drop
= RSET_SCRATCH
;
305 int hiop
= ((ir
+1)->o
== IR_HIOP
);
306 if (ra_hasreg(ir
->r
))
307 rset_clear(drop
, ir
->r
); /* Dest reg handled below. */
308 if (hiop
&& ra_hasreg((ir
+1)->r
))
309 rset_clear(drop
, (ir
+1)->r
); /* Dest reg handled below. */
310 ra_evictset(as
, drop
); /* Evictions must be performed first. */
312 lua_assert(!irt_ispri(ir
->t
));
316 ra_destreg(as
, ir
, RID_RET
);
321 static void asm_call(ASMState
*as
, IRIns
*ir
)
323 IRRef args
[CCI_NARGS_MAX
];
324 const CCallInfo
*ci
= &lj_ir_callinfo
[ir
->op2
];
325 asm_collectargs(as
, ir
, ci
, args
);
326 asm_setupresult(as
, ir
, ci
);
327 asm_gencall(as
, ci
, args
);
330 static void asm_callx(ASMState
*as
, IRIns
*ir
)
332 IRRef args
[CCI_NARGS_MAX
];
334 ci
.flags
= asm_callx_flags(as
, ir
);
335 asm_collectargs(as
, ir
, &ci
, args
);
336 asm_setupresult(as
, ir
, &ci
);
337 if (irref_isk(ir
->op2
)) { /* Call to constant address. */
338 ci
.func
= (ASMFunction
)(void *)(IR(ir
->op2
)->i
);
339 } else { /* Need a non-argument register for indirect calls. */
340 Reg freg
= ra_alloc1(as
, ir
->op2
, RSET_RANGE(RID_R4
, RID_R12
+1));
341 emit_m(as
, ARMI_BLXr
, freg
);
342 ci
.func
= (ASMFunction
)(void *)0;
344 asm_gencall(as
, &ci
, args
);
347 /* -- Returns ------------------------------------------------------------- */
349 /* Return to lower frame. Guard that it goes to the right spot. */
350 static void asm_retf(ASMState
*as
, IRIns
*ir
)
352 Reg base
= ra_alloc1(as
, REF_BASE
, RSET_GPR
);
353 void *pc
= ir_kptr(IR(ir
->op2
));
354 int32_t delta
= 1+bc_a(*((const BCIns
*)pc
- 1));
355 as
->topslot
-= (BCReg
)delta
;
356 if ((int32_t)as
->topslot
< 0) as
->topslot
= 0;
357 /* Need to force a spill on REF_BASE now to update the stack slot. */
358 emit_lso(as
, ARMI_STR
, base
, RID_SP
, ra_spill(as
, IR(REF_BASE
)));
359 emit_setgl(as
, base
, jit_base
);
360 emit_addptr(as
, base
, -8*delta
);
361 asm_guardcc(as
, CC_NE
);
362 emit_nm(as
, ARMI_CMP
, RID_TMP
,
363 ra_allock(as
, i32ptr(pc
), rset_exclude(RSET_GPR
, base
)));
364 emit_lso(as
, ARMI_LDR
, RID_TMP
, base
, -4);
367 /* -- Type conversions ---------------------------------------------------- */
369 static void asm_conv(ASMState
*as
, IRIns
*ir
)
371 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
372 IRType st
= (IRType
)(ir
->op2
& IRCONV_SRCMASK
);
373 /* FP conversions and 64 bit integer conversions are handled by SPLIT. */
374 lua_assert(!irt_isfp(ir
->t
) && !(st
== IRT_NUM
|| st
== IRT_FLOAT
));
375 lua_assert(!irt_isint64(ir
->t
) && !(st
== IRT_I64
|| st
== IRT_U64
));
376 if (st
>= IRT_I8
&& st
<= IRT_U16
) { /* Extend to 32 bit integer. */
377 Reg left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
378 lua_assert(irt_isint(ir
->t
) || irt_isu32(ir
->t
));
379 if ((as
->flags
& JIT_F_ARMV6
)) {
380 ARMIns ai
= st
== IRT_I8
? ARMI_SXTB
:
381 st
== IRT_U8
? ARMI_UXTB
:
382 st
== IRT_I16
? ARMI_SXTH
: ARMI_UXTH
;
383 emit_dm(as
, ai
, dest
, left
);
384 } else if (st
== IRT_U8
) {
385 emit_dn(as
, ARMI_AND
|ARMI_K12
|255, dest
, left
);
387 uint32_t shift
= st
== IRT_I8
? 24 : 16;
388 ARMShift sh
= st
== IRT_U16
? ARMSH_LSR
: ARMSH_ASR
;
389 emit_dm(as
, ARMI_MOV
|ARMF_SH(sh
, shift
), dest
, RID_TMP
);
390 emit_dm(as
, ARMI_MOV
|ARMF_SH(ARMSH_LSL
, shift
), RID_TMP
, left
);
392 } else { /* Handle 32/32 bit no-op (cast). */
393 ra_leftov(as
, dest
, ir
->op1
); /* Do nothing, but may need to move regs. */
397 static void asm_strto(ASMState
*as
, IRIns
*ir
)
399 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_str_tonum
];
401 Reg rlo
= 0, rhi
= 0, tmp
;
402 int destused
= ra_used(ir
);
404 ra_evictset(as
, RSET_SCRATCH
);
406 if (ra_hasspill(ir
->s
) && ra_hasspill((ir
+1)->s
) &&
407 (ir
->s
& 1) == 0 && ir
->s
+ 1 == (ir
+1)->s
) {
409 for (i
= 0; i
< 2; i
++) {
414 emit_spload(as
, ir
+i
, r
, sps_scale((ir
+i
)->s
));
417 ofs
= sps_scale(ir
->s
);
420 rhi
= ra_dest(as
, ir
+1, RSET_GPR
);
421 rlo
= ra_dest(as
, ir
, rset_exclude(RSET_GPR
, rhi
));
424 asm_guardcc(as
, CC_EQ
);
426 emit_lso(as
, ARMI_LDR
, rhi
, RID_SP
, 4);
427 emit_lso(as
, ARMI_LDR
, rlo
, RID_SP
, 0);
429 emit_n(as
, ARMI_CMP
|ARMI_K12
|0, RID_RET
); /* Test return status. */
430 args
[0] = ir
->op1
; /* GCstr *str */
431 args
[1] = ASMREF_TMP1
; /* TValue *n */
432 asm_gencall(as
, ci
, args
);
433 tmp
= ra_releasetmp(as
, ASMREF_TMP1
);
435 emit_dm(as
, ARMI_MOV
, tmp
, RID_SP
);
437 emit_opk(as
, ARMI_ADD
, tmp
, RID_SP
, ofs
, RSET_GPR
);
440 /* Get pointer to TValue. */
441 static void asm_tvptr(ASMState
*as
, Reg dest
, IRRef ref
)
444 if (irt_isnum(ir
->t
)) { /* Use the number constant itself as a TValue. */
445 lua_assert(irref_isk(ref
));
446 ra_allockreg(as
, i32ptr(ir_knum(ir
)), dest
);
448 /* Otherwise use [sp] and [sp+4] to hold the TValue. */
449 RegSet allow
= rset_exclude(RSET_GPR
, dest
);
451 emit_dm(as
, ARMI_MOV
, dest
, RID_SP
);
452 if (!irt_ispri(ir
->t
)) {
453 Reg src
= ra_alloc1(as
, ref
, allow
);
454 emit_lso(as
, ARMI_STR
, src
, RID_SP
, 0);
456 if ((ir
+1)->o
== IR_HIOP
)
457 type
= ra_alloc1(as
, ref
+1, allow
);
459 type
= ra_allock(as
, irt_toitype(ir
->t
), allow
);
460 emit_lso(as
, ARMI_STR
, type
, RID_SP
, 4);
464 static void asm_tostr(ASMState
*as
, IRIns
*ir
)
469 if (irt_isnum(IR(ir
->op1
)->t
) || (ir
+1)->o
== IR_HIOP
) {
470 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_str_fromnum
];
471 args
[1] = ASMREF_TMP1
; /* const lua_Number * */
472 asm_setupresult(as
, ir
, ci
); /* GCstr * */
473 asm_gencall(as
, ci
, args
);
474 asm_tvptr(as
, ra_releasetmp(as
, ASMREF_TMP1
), ir
->op1
);
476 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_str_fromint
];
477 args
[1] = ir
->op1
; /* int32_t k */
478 asm_setupresult(as
, ir
, ci
); /* GCstr * */
479 asm_gencall(as
, ci
, args
);
483 /* -- Memory references --------------------------------------------------- */
485 static void asm_aref(ASMState
*as
, IRIns
*ir
)
487 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
489 if (irref_isk(ir
->op2
)) {
490 IRRef tab
= IR(ir
->op1
)->op1
;
491 int32_t ofs
= asm_fuseabase(as
, tab
);
492 IRRef refa
= ofs
? tab
: ir
->op1
;
493 uint32_t k
= emit_isk12(ARMI_ADD
, ofs
+ 8*IR(ir
->op2
)->i
);
495 base
= ra_alloc1(as
, refa
, RSET_GPR
);
496 emit_dn(as
, ARMI_ADD
^k
, dest
, base
);
500 base
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
501 idx
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, base
));
502 emit_dnm(as
, ARMI_ADD
|ARMF_SH(ARMSH_LSL
, 3), dest
, base
, idx
);
505 /* Inlined hash lookup. Specialized for key type and for const keys.
506 ** The equivalent C code is:
507 ** Node *n = hashkey(t, key);
509 ** if (lj_obj_equal(&n->key, key)) return &n->val;
510 ** } while ((n = nextnode(n)));
513 static void asm_href(ASMState
*as
, IRIns
*ir
, IROp merge
)
515 RegSet allow
= RSET_GPR
;
516 int destused
= ra_used(ir
);
517 Reg dest
= ra_dest(as
, ir
, allow
);
518 Reg tab
= ra_alloc1(as
, ir
->op1
, rset_clear(allow
, dest
));
519 Reg key
= 0, keyhi
= 0, keynumhi
= RID_NONE
, tmp
= RID_TMP
;
520 IRRef refkey
= ir
->op2
;
521 IRIns
*irkey
= IR(refkey
);
522 IRType1 kt
= irkey
->t
;
523 int32_t k
= 0, khi
= emit_isk12(ARMI_CMP
, irt_toitype(kt
));
525 MCLabel l_end
, l_loop
;
526 rset_clear(allow
, tab
);
527 if (!irref_isk(refkey
) || irt_isstr(kt
)) {
528 key
= ra_alloc1(as
, refkey
, allow
);
529 rset_clear(allow
, key
);
530 if (irkey
[1].o
== IR_HIOP
) {
531 if (ra_hasreg((irkey
+1)->r
)) {
532 keynumhi
= (irkey
+1)->r
;
534 ra_noweak(as
, keynumhi
);
536 keyhi
= keynumhi
= ra_allocref(as
, refkey
+1, allow
);
538 rset_clear(allow
, keynumhi
);
541 } else if (irt_isnum(kt
)) {
542 int32_t val
= (int32_t)ir_knum(irkey
)->u32
.lo
;
543 k
= emit_isk12(ARMI_CMP
, val
);
545 key
= ra_allock(as
, val
, allow
);
546 rset_clear(allow
, key
);
548 val
= (int32_t)ir_knum(irkey
)->u32
.hi
;
549 khi
= emit_isk12(ARMI_CMP
, val
);
551 keyhi
= ra_allock(as
, val
, allow
);
552 rset_clear(allow
, keyhi
);
554 } else if (!irt_ispri(kt
)) {
555 k
= emit_isk12(ARMI_CMP
, irkey
->i
);
557 key
= ra_alloc1(as
, refkey
, allow
);
558 rset_clear(allow
, key
);
562 tmp
= ra_scratchpair(as
, allow
);
564 /* Key not found in chain: jump to exit (if merged) or load niltv. */
565 l_end
= emit_label(as
);
568 asm_guardcc(as
, CC_AL
);
570 emit_loada(as
, dest
, niltvg(J2G(as
->J
)));
572 /* Follow hash chain until the end. */
574 emit_n(as
, ARMI_CMP
|ARMI_K12
|0, dest
);
575 emit_lso(as
, ARMI_LDR
, dest
, dest
, (int32_t)offsetof(Node
, next
));
577 /* Type and value comparison. */
579 asm_guardcc(as
, CC_EQ
);
581 emit_branch(as
, ARMF_CC(ARMI_B
, CC_EQ
), l_end
);
582 if (!irt_ispri(kt
)) {
583 emit_nm(as
, ARMF_CC(ARMI_CMP
, CC_EQ
)^khi
, tmp
+1, keyhi
);
584 emit_nm(as
, ARMI_CMP
^k
, tmp
, key
);
585 emit_lsox(as
, ARMI_LDRD
, tmp
, dest
, (int32_t)offsetof(Node
, key
));
587 emit_n(as
, ARMI_CMP
^khi
, tmp
);
588 emit_lso(as
, ARMI_LDR
, tmp
, dest
, (int32_t)offsetof(Node
, key
.it
));
590 *l_loop
= ARMF_CC(ARMI_B
, CC_NE
) | ((as
->mcp
-l_loop
-2) & 0x00ffffffu
);
592 /* Load main position relative to tab->node into dest. */
593 khash
= irref_isk(refkey
) ? ir_khash(irkey
) : 1;
595 emit_lso(as
, ARMI_LDR
, dest
, tab
, (int32_t)offsetof(GCtab
, node
));
597 emit_dnm(as
, ARMI_ADD
|ARMF_SH(ARMSH_LSL
, 3), dest
, dest
, tmp
);
598 emit_dnm(as
, ARMI_ADD
|ARMF_SH(ARMSH_LSL
, 1), tmp
, tmp
, tmp
);
599 if (irt_isstr(kt
)) { /* Fetch of str->hash is cheaper than ra_allock. */
600 emit_dnm(as
, ARMI_AND
, tmp
, tmp
+1, RID_TMP
);
601 emit_lso(as
, ARMI_LDR
, dest
, tab
, (int32_t)offsetof(GCtab
, node
));
602 emit_lso(as
, ARMI_LDR
, tmp
+1, key
, (int32_t)offsetof(GCstr
, hash
));
603 emit_lso(as
, ARMI_LDR
, RID_TMP
, tab
, (int32_t)offsetof(GCtab
, hmask
));
604 } else if (irref_isk(refkey
)) {
605 emit_opk(as
, ARMI_AND
, tmp
, RID_TMP
, (int32_t)khash
,
606 rset_exclude(rset_exclude(RSET_GPR
, tab
), dest
));
607 emit_lso(as
, ARMI_LDR
, dest
, tab
, (int32_t)offsetof(GCtab
, node
));
608 emit_lso(as
, ARMI_LDR
, RID_TMP
, tab
, (int32_t)offsetof(GCtab
, hmask
));
609 } else { /* Must match with hash*() in lj_tab.c. */
610 if (ra_hasreg(keynumhi
)) { /* Canonicalize +-0.0 to 0.0. */
611 if (keyhi
== RID_TMP
)
612 emit_dm(as
, ARMF_CC(ARMI_MOV
, CC_NE
), keyhi
, keynumhi
);
613 emit_d(as
, ARMF_CC(ARMI_MOV
, CC_EQ
)|ARMI_K12
|0, keyhi
);
615 emit_dnm(as
, ARMI_AND
, tmp
, tmp
, RID_TMP
);
616 emit_dnm(as
, ARMI_SUB
|ARMF_SH(ARMSH_ROR
, 32-HASH_ROT3
), tmp
, tmp
, tmp
+1);
617 emit_lso(as
, ARMI_LDR
, dest
, tab
, (int32_t)offsetof(GCtab
, node
));
618 emit_dnm(as
, ARMI_EOR
|ARMF_SH(ARMSH_ROR
, 32-((HASH_ROT2
+HASH_ROT1
)&31)),
620 emit_lso(as
, ARMI_LDR
, RID_TMP
, tab
, (int32_t)offsetof(GCtab
, hmask
));
621 emit_dnm(as
, ARMI_SUB
|ARMF_SH(ARMSH_ROR
, 32-HASH_ROT1
), tmp
+1, tmp
+1, tmp
);
622 if (ra_hasreg(keynumhi
)) {
623 emit_dnm(as
, ARMI_EOR
, tmp
+1, tmp
, key
);
624 emit_dnm(as
, ARMI_ORR
|ARMI_S
, RID_TMP
, tmp
, key
); /* Test for +-0.0. */
625 emit_dnm(as
, ARMI_ADD
, tmp
, keynumhi
, keynumhi
);
627 emit_dnm(as
, ARMI_EOR
, tmp
+1, tmp
, key
);
628 emit_opk(as
, ARMI_ADD
, tmp
, key
, (int32_t)HASH_BIAS
,
629 rset_exclude(rset_exclude(RSET_GPR
, tab
), key
));
635 static void asm_hrefk(ASMState
*as
, IRIns
*ir
)
637 IRIns
*kslot
= IR(ir
->op2
);
638 IRIns
*irkey
= IR(kslot
->op1
);
639 int32_t ofs
= (int32_t)(kslot
->op2
* sizeof(Node
));
640 int32_t kofs
= ofs
+ (int32_t)offsetof(Node
, key
);
641 Reg dest
= (ra_used(ir
) || ofs
> 4095) ? ra_dest(as
, ir
, RSET_GPR
) : RID_NONE
;
642 Reg node
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
643 Reg key
= RID_NONE
, type
= RID_TMP
, idx
= node
;
644 RegSet allow
= rset_exclude(RSET_GPR
, node
);
645 lua_assert(ofs
% sizeof(Node
) == 0);
648 rset_clear(allow
, dest
);
649 kofs
= (int32_t)offsetof(Node
, key
);
650 } else if (ra_hasreg(dest
)) {
651 emit_opk(as
, ARMI_ADD
, dest
, node
, ofs
, allow
);
653 asm_guardcc(as
, CC_NE
);
654 if (!irt_ispri(irkey
->t
)) {
655 RegSet even
= (as
->freeset
& (as
->freeset
>> 1) & allow
& RSET_GPREVEN
);
657 key
= ra_scratch(as
, even
);
658 if (rset_test(as
->freeset
, key
+1)) {
660 ra_modified(as
, type
);
663 key
= ra_scratch(as
, allow
);
665 rset_clear(allow
, key
);
667 rset_clear(allow
, type
);
668 if (irt_isnum(irkey
->t
)) {
669 emit_opk(as
, ARMF_CC(ARMI_CMP
, CC_EQ
), 0, type
,
670 (int32_t)ir_knum(irkey
)->u32
.hi
, allow
);
671 emit_opk(as
, ARMI_CMP
, 0, key
,
672 (int32_t)ir_knum(irkey
)->u32
.lo
, allow
);
673 } else if (ra_hasreg(key
)) {
674 emit_n(as
, ARMF_CC(ARMI_CMN
, CC_EQ
)|ARMI_K12
|-irt_toitype(irkey
->t
), type
);
675 emit_opk(as
, ARMI_CMP
, 0, key
, irkey
->i
, allow
);
677 emit_n(as
, ARMI_CMN
|ARMI_K12
|-irt_toitype(irkey
->t
), type
);
679 emit_lso(as
, ARMI_LDR
, type
, idx
, kofs
+4);
680 if (ra_hasreg(key
)) emit_lso(as
, ARMI_LDR
, key
, idx
, kofs
);
682 emit_opk(as
, ARMI_ADD
, dest
, node
, ofs
, RSET_GPR
);
685 static void asm_newref(ASMState
*as
, IRIns
*ir
)
687 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_tab_newkey
];
689 args
[0] = ASMREF_L
; /* lua_State *L */
690 args
[1] = ir
->op1
; /* GCtab *t */
691 args
[2] = ASMREF_TMP1
; /* cTValue *key */
692 asm_setupresult(as
, ir
, ci
); /* TValue * */
693 asm_gencall(as
, ci
, args
);
694 asm_tvptr(as
, ra_releasetmp(as
, ASMREF_TMP1
), ir
->op2
);
697 static void asm_uref(ASMState
*as
, IRIns
*ir
)
699 /* NYI: Check that UREFO is still open and not aliasing a slot. */
700 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
701 if (irref_isk(ir
->op1
)) {
702 GCfunc
*fn
= ir_kfunc(IR(ir
->op1
));
703 MRef
*v
= &gcref(fn
->l
.uvptr
[(ir
->op2
>> 8)])->uv
.v
;
704 emit_lsptr(as
, ARMI_LDR
, dest
, v
);
706 Reg uv
= ra_scratch(as
, RSET_GPR
);
707 Reg func
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
708 if (ir
->o
== IR_UREFC
) {
709 asm_guardcc(as
, CC_NE
);
710 emit_n(as
, ARMI_CMP
|ARMI_K12
|1, RID_TMP
);
711 emit_opk(as
, ARMI_ADD
, dest
, uv
,
712 (int32_t)offsetof(GCupval
, tv
), RSET_GPR
);
713 emit_lso(as
, ARMI_LDRB
, RID_TMP
, uv
, (int32_t)offsetof(GCupval
, closed
));
715 emit_lso(as
, ARMI_LDR
, dest
, uv
, (int32_t)offsetof(GCupval
, v
));
717 emit_lso(as
, ARMI_LDR
, uv
, func
,
718 (int32_t)offsetof(GCfuncL
, uvptr
) + 4*(int32_t)(ir
->op2
>> 8));
722 static void asm_fref(ASMState
*as
, IRIns
*ir
)
724 UNUSED(as
); UNUSED(ir
);
725 lua_assert(!ra_used(ir
));
728 static void asm_strref(ASMState
*as
, IRIns
*ir
)
730 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
731 IRRef ref
= ir
->op2
, refk
= ir
->op1
;
733 if (irref_isk(ref
)) {
734 IRRef tmp
= refk
; refk
= ref
; ref
= tmp
;
735 } else if (!irref_isk(refk
)) {
736 uint32_t k
, m
= ARMI_K12
|sizeof(GCstr
);
737 Reg right
, left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
738 IRIns
*irr
= IR(ir
->op2
);
739 if (ra_hasreg(irr
->r
)) {
740 ra_noweak(as
, irr
->r
);
742 } else if (mayfuse(as
, irr
->op2
) &&
743 irr
->o
== IR_ADD
&& irref_isk(irr
->op2
) &&
744 (k
= emit_isk12(ARMI_ADD
,
745 (int32_t)sizeof(GCstr
) + IR(irr
->op2
)->i
))) {
747 right
= ra_alloc1(as
, irr
->op1
, rset_exclude(RSET_GPR
, left
));
749 right
= ra_allocref(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
751 emit_dn(as
, ARMI_ADD
^m
, dest
, dest
);
752 emit_dnm(as
, ARMI_ADD
, dest
, left
, right
);
755 r
= ra_alloc1(as
, ref
, RSET_GPR
);
756 emit_opk(as
, ARMI_ADD
, dest
, r
,
757 sizeof(GCstr
) + IR(refk
)->i
, rset_exclude(RSET_GPR
, r
));
760 /* -- Loads and stores ---------------------------------------------------- */
762 static ARMIns
asm_fxloadins(IRIns
*ir
)
764 switch (irt_type(ir
->t
)) {
765 case IRT_I8
: return ARMI_LDRSB
;
766 case IRT_U8
: return ARMI_LDRB
;
767 case IRT_I16
: return ARMI_LDRSH
;
768 case IRT_U16
: return ARMI_LDRH
;
769 case IRT_NUM
: lua_assert(0);
771 default: return ARMI_LDR
;
775 static ARMIns
asm_fxstoreins(IRIns
*ir
)
777 switch (irt_type(ir
->t
)) {
778 case IRT_I8
: case IRT_U8
: return ARMI_STRB
;
779 case IRT_I16
: case IRT_U16
: return ARMI_STRH
;
780 case IRT_NUM
: lua_assert(0);
782 default: return ARMI_STR
;
786 static void asm_fload(ASMState
*as
, IRIns
*ir
)
788 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
789 Reg idx
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
790 ARMIns ai
= asm_fxloadins(ir
);
792 if (ir
->op2
== IRFL_TAB_ARRAY
) {
793 ofs
= asm_fuseabase(as
, ir
->op1
);
794 if (ofs
) { /* Turn the t->array load into an add for colocated arrays. */
795 emit_dn(as
, ARMI_ADD
|ARMI_K12
|ofs
, dest
, idx
);
799 ofs
= field_ofs
[ir
->op2
];
800 if ((ai
& 0x04000000))
801 emit_lso(as
, ai
, dest
, idx
, ofs
);
803 emit_lsox(as
, ai
, dest
, idx
, ofs
);
806 static void asm_fstore(ASMState
*as
, IRIns
*ir
)
808 Reg src
= ra_alloc1(as
, ir
->op2
, RSET_GPR
);
809 IRIns
*irf
= IR(ir
->op1
);
810 Reg idx
= ra_alloc1(as
, irf
->op1
, rset_exclude(RSET_GPR
, src
));
811 int32_t ofs
= field_ofs
[irf
->op2
];
812 ARMIns ai
= asm_fxstoreins(ir
);
813 if ((ai
& 0x04000000))
814 emit_lso(as
, ai
, src
, idx
, ofs
);
816 emit_lsox(as
, ai
, src
, idx
, ofs
);
819 static void asm_xload(ASMState
*as
, IRIns
*ir
)
821 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
822 lua_assert(!(ir
->op2
& IRXLOAD_UNALIGNED
));
823 asm_fusexref(as
, asm_fxloadins(ir
), dest
, ir
->op1
, RSET_GPR
);
826 static void asm_xstore(ASMState
*as
, IRIns
*ir
)
828 Reg src
= ra_alloc1(as
, ir
->op2
, RSET_GPR
);
829 asm_fusexref(as
, asm_fxstoreins(ir
), src
, ir
->op1
,
830 rset_exclude(RSET_GPR
, src
));
833 static void asm_ahuvload(ASMState
*as
, IRIns
*ir
)
835 int hiop
= ((ir
+1)->o
== IR_HIOP
);
836 IRType t
= hiop
? IRT_NUM
: irt_type(ir
->t
);
837 Reg dest
= RID_NONE
, type
= RID_NONE
, idx
;
838 RegSet allow
= RSET_GPR
;
840 if (hiop
&& ra_used(ir
+1)) {
841 type
= ra_dest(as
, ir
+1, allow
);
842 rset_clear(allow
, type
);
845 lua_assert(irt_isint(ir
->t
) || irt_isaddr(ir
->t
));
846 dest
= ra_dest(as
, ir
, allow
);
847 rset_clear(allow
, dest
);
849 idx
= asm_fuseahuref(as
, ir
->op1
, &ofs
, allow
);
851 rset_clear(allow
, idx
);
852 if (ofs
< 256 && ra_hasreg(dest
) && (dest
& 1) == 0 &&
853 rset_test((as
->freeset
& allow
), dest
+1)) {
855 ra_modified(as
, type
);
860 asm_guardcc(as
, t
== IRT_NUM
? CC_HS
: CC_NE
);
861 emit_n(as
, ARMI_CMN
|ARMI_K12
|-irt_toitype_(t
), type
);
862 if (ra_hasreg(dest
)) emit_lso(as
, ARMI_LDR
, dest
, idx
, ofs
);
863 emit_lso(as
, ARMI_LDR
, type
, idx
, ofs
+4);
866 static void asm_ahustore(ASMState
*as
, IRIns
*ir
)
868 RegSet allow
= RSET_GPR
;
869 Reg idx
, src
= RID_NONE
, type
= RID_NONE
;
871 int hiop
= ((ir
+1)->o
== IR_HIOP
);
872 if (!irt_ispri(ir
->t
)) {
873 src
= ra_alloc1(as
, ir
->op2
, allow
);
874 rset_clear(allow
, src
);
877 type
= ra_alloc1(as
, (ir
+1)->op2
, allow
);
879 type
= ra_allock(as
, (int32_t)irt_toitype(ir
->t
), allow
);
880 idx
= asm_fuseahuref(as
, ir
->op1
, &ofs
, rset_exclude(allow
, type
));
881 if (ra_hasreg(src
)) emit_lso(as
, ARMI_STR
, src
, idx
, ofs
);
882 emit_lso(as
, ARMI_STR
, type
, idx
, ofs
+4);
885 static void asm_sload(ASMState
*as
, IRIns
*ir
)
887 int32_t ofs
= 8*((int32_t)ir
->op1
-1) + ((ir
->op2
& IRSLOAD_FRAME
) ? 4 : 0);
888 int hiop
= ((ir
+1)->o
== IR_HIOP
);
889 IRType t
= hiop
? IRT_NUM
: irt_type(ir
->t
);
890 Reg dest
= RID_NONE
, type
= RID_NONE
, base
;
891 RegSet allow
= RSET_GPR
;
892 lua_assert(!(ir
->op2
& IRSLOAD_PARENT
)); /* Handled by asm_head_side(). */
893 lua_assert(irt_isguard(ir
->t
) || !(ir
->op2
& IRSLOAD_TYPECHECK
));
894 lua_assert(!(ir
->op2
& IRSLOAD_CONVERT
)); /* Handled by LJ_SOFTFP SPLIT. */
895 if (hiop
&& ra_used(ir
+1)) {
896 type
= ra_dest(as
, ir
+1, allow
);
897 rset_clear(allow
, type
);
900 lua_assert(irt_isint(ir
->t
) || irt_isaddr(ir
->t
));
901 dest
= ra_dest(as
, ir
, allow
);
902 rset_clear(allow
, dest
);
904 base
= ra_alloc1(as
, REF_BASE
, allow
);
905 if ((ir
->op2
& IRSLOAD_TYPECHECK
)) {
906 if (ra_noreg(type
)) {
907 rset_clear(allow
, base
);
908 if (ofs
< 256 && ra_hasreg(dest
) && (dest
& 1) == 0 &&
909 rset_test((as
->freeset
& allow
), dest
+1)) {
911 ra_modified(as
, type
);
916 asm_guardcc(as
, t
== IRT_NUM
? CC_HS
: CC_NE
);
917 emit_n(as
, ARMI_CMN
|ARMI_K12
|-irt_toitype_(t
), type
);
919 if (ra_hasreg(dest
)) emit_lso(as
, ARMI_LDR
, dest
, base
, ofs
);
920 if (ra_hasreg(type
)) emit_lso(as
, ARMI_LDR
, type
, base
, ofs
+4);
923 /* -- Allocations --------------------------------------------------------- */
926 static void asm_cnew(ASMState
*as
, IRIns
*ir
)
928 CTState
*cts
= ctype_ctsG(J2G(as
->J
));
929 CTypeID
typeid = (CTypeID
)IR(ir
->op1
)->i
;
930 CTSize sz
= (ir
->o
== IR_CNEWI
|| ir
->op2
== REF_NIL
) ?
931 lj_ctype_size(cts
, typeid) : (CTSize
)IR(ir
->op2
)->i
;
932 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_mem_newgco
];
934 RegSet allow
= (RSET_GPR
& ~RSET_SCRATCH
);
935 RegSet drop
= RSET_SCRATCH
;
936 lua_assert(sz
!= CTSIZE_INVALID
);
938 args
[0] = ASMREF_L
; /* lua_State *L */
939 args
[1] = ASMREF_TMP1
; /* MSize size */
942 if (ra_hasreg(ir
->r
))
943 rset_clear(drop
, ir
->r
); /* Dest reg handled below. */
944 ra_evictset(as
, drop
);
946 ra_destreg(as
, ir
, RID_RET
); /* GCcdata * */
948 /* Initialize immutable cdata object. */
949 if (ir
->o
== IR_CNEWI
) {
950 int32_t ofs
= sizeof(GCcdata
);
951 lua_assert(sz
== 4 || sz
== 8);
954 lua_assert(ir
->o
== IR_HIOP
);
957 Reg r
= ra_alloc1(as
, ir
->op2
, allow
);
958 emit_lso(as
, ARMI_STR
, r
, RID_RET
, ofs
);
959 rset_clear(allow
, r
);
960 if (ofs
== sizeof(GCcdata
)) break;
964 /* Initialize gct and typeid. lj_mem_newgco() already sets marked. */
966 uint32_t k
= emit_isk12(ARMI_MOV
, typeid);
967 Reg r
= k
? RID_R1
: ra_allock(as
, typeid, allow
);
968 emit_lso(as
, ARMI_STRB
, RID_TMP
, RID_RET
, offsetof(GCcdata
, gct
));
969 emit_lsox(as
, ARMI_STRH
, r
, RID_RET
, offsetof(GCcdata
, typeid));
970 emit_d(as
, ARMI_MOV
|ARMI_K12
|~LJ_TCDATA
, RID_TMP
);
971 if (k
) emit_d(as
, ARMI_MOV
^k
, RID_R1
);
973 asm_gencall(as
, ci
, args
);
974 ra_allockreg(as
, (int32_t)(sz
+sizeof(GCcdata
)),
975 ra_releasetmp(as
, ASMREF_TMP1
));
978 #define asm_cnew(as, ir) ((void)0)
981 /* -- Write barriers ------------------------------------------------------ */
983 static void asm_tbar(ASMState
*as
, IRIns
*ir
)
985 Reg tab
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
986 Reg link
= ra_scratch(as
, rset_exclude(RSET_GPR
, tab
));
987 Reg gr
= ra_allock(as
, i32ptr(J2G(as
->J
)),
988 rset_exclude(rset_exclude(RSET_GPR
, tab
), link
));
990 MCLabel l_end
= emit_label(as
);
991 emit_lso(as
, ARMI_STR
, link
, tab
, (int32_t)offsetof(GCtab
, gclist
));
992 emit_lso(as
, ARMI_STRB
, mark
, tab
, (int32_t)offsetof(GCtab
, marked
));
993 emit_lso(as
, ARMI_STR
, tab
, gr
,
994 (int32_t)offsetof(global_State
, gc
.grayagain
));
995 emit_dn(as
, ARMI_BIC
|ARMI_K12
|LJ_GC_BLACK
, mark
, mark
);
996 emit_lso(as
, ARMI_LDR
, link
, gr
,
997 (int32_t)offsetof(global_State
, gc
.grayagain
));
998 emit_branch(as
, ARMF_CC(ARMI_B
, CC_EQ
), l_end
);
999 emit_n(as
, ARMI_TST
|ARMI_K12
|LJ_GC_BLACK
, mark
);
1000 emit_lso(as
, ARMI_LDRB
, mark
, tab
, (int32_t)offsetof(GCtab
, marked
));
1003 static void asm_obar(ASMState
*as
, IRIns
*ir
)
1005 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_gc_barrieruv
];
1009 /* No need for other object barriers (yet). */
1010 lua_assert(IR(ir
->op1
)->o
== IR_UREFC
);
1011 ra_evictset(as
, RSET_SCRATCH
);
1012 l_end
= emit_label(as
);
1013 args
[0] = ASMREF_TMP1
; /* global_State *g */
1014 args
[1] = ir
->op1
; /* TValue *tv */
1015 asm_gencall(as
, ci
, args
);
1016 if ((*as
->mcp
>> 28) == CC_AL
)
1017 *as
->mcp
= ARMF_CC(*as
->mcp
, CC_NE
);
1019 emit_branch(as
, ARMF_CC(ARMI_B
, CC_EQ
), l_end
);
1020 ra_allockreg(as
, i32ptr(J2G(as
->J
)), ra_releasetmp(as
, ASMREF_TMP1
));
1021 obj
= IR(ir
->op1
)->r
;
1022 tmp
= ra_scratch(as
, rset_exclude(RSET_GPR
, obj
));
1023 emit_n(as
, ARMF_CC(ARMI_TST
, CC_NE
)|ARMI_K12
|LJ_GC_BLACK
, tmp
);
1024 emit_n(as
, ARMI_TST
|ARMI_K12
|LJ_GC_WHITES
, RID_TMP
);
1025 val
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, obj
));
1026 emit_lso(as
, ARMI_LDRB
, tmp
, obj
,
1027 (int32_t)offsetof(GCupval
, marked
)-(int32_t)offsetof(GCupval
, tv
));
1028 emit_lso(as
, ARMI_LDRB
, RID_TMP
, val
, (int32_t)offsetof(GChead
, marked
));
1031 /* -- Arithmetic and logic operations ------------------------------------- */
1033 static int asm_swapops(ASMState
*as
, IRRef lref
, IRRef rref
)
1036 if (irref_isk(rref
))
1037 return 0; /* Don't swap constants to the left. */
1038 if (irref_isk(lref
))
1039 return 1; /* But swap constants to the right. */
1041 if ((ir
->o
>= IR_BSHL
&& ir
->o
<= IR_BROR
) ||
1042 (ir
->o
== IR_ADD
&& ir
->op1
== ir
->op2
))
1043 return 0; /* Don't swap fusable operands to the left. */
1045 if ((ir
->o
>= IR_BSHL
&& ir
->o
<= IR_BROR
) ||
1046 (ir
->o
== IR_ADD
&& ir
->op1
== ir
->op2
))
1047 return 1; /* But swap fusable operands to the right. */
1048 return 0; /* Otherwise don't swap. */
1051 static void asm_intop(ASMState
*as
, IRIns
*ir
, ARMIns ai
)
1053 IRRef lref
= ir
->op1
, rref
= ir
->op2
;
1054 Reg left
, dest
= ra_dest(as
, ir
, RSET_GPR
);
1056 if (asm_swapops(as
, lref
, rref
)) {
1057 IRRef tmp
= lref
; lref
= rref
; rref
= tmp
;
1058 if ((ai
& ~ARMI_S
) == ARMI_SUB
|| (ai
& ~ARMI_S
) == ARMI_SBC
)
1059 ai
^= (ARMI_SUB
^ARMI_RSB
);
1061 left
= ra_hintalloc(as
, lref
, dest
, RSET_GPR
);
1062 m
= asm_fuseopm(as
, ai
, rref
, rset_exclude(RSET_GPR
, left
));
1063 if (irt_isguard(ir
->t
)) { /* For IR_ADDOV etc. */
1064 asm_guardcc(as
, CC_VS
);
1067 emit_dn(as
, ai
^m
, dest
, left
);
1070 static void asm_bitop(ASMState
*as
, IRIns
*ir
, ARMIns ai
)
1072 if (as
->flagmcp
== as
->mcp
) { /* Try to drop cmp r, #0. */
1073 uint32_t cc
= (as
->mcp
[1] >> 28);
1078 } else if (cc
== CC_GE
) {
1079 *++as
->mcp
^= ((CC_GE
^CC_PL
) << 28);
1081 } else if (cc
== CC_LT
) {
1082 *++as
->mcp
^= ((CC_LT
^CC_MI
) << 28);
1084 } /* else: other conds don't work with bit ops. */
1087 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1088 uint32_t m
= asm_fuseopm(as
, ai
, ir
->op1
, RSET_GPR
);
1089 emit_d(as
, ai
^m
, dest
);
1091 /* NYI: Turn BAND !k12 into uxtb, uxth or bfc or shl+shr. */
1092 asm_intop(as
, ir
, ai
);
1096 static void asm_arithop(ASMState
*as
, IRIns
*ir
, ARMIns ai
)
1098 if (as
->flagmcp
== as
->mcp
) { /* Drop cmp r, #0. */
1103 asm_intop(as
, ir
, ai
);
1106 static void asm_intneg(ASMState
*as
, IRIns
*ir
, ARMIns ai
)
1108 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1109 Reg left
= ra_hintalloc(as
, ir
->op1
, dest
, RSET_GPR
);
1110 emit_dn(as
, ai
|ARMI_K12
|0, dest
, left
);
1113 /* NYI: use add/shift for MUL(OV) with constants. FOLD only does 2^k. */
1114 static void asm_intmul(ASMState
*as
, IRIns
*ir
)
1116 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1117 Reg left
= ra_alloc1(as
, ir
->op1
, rset_exclude(RSET_GPR
, dest
));
1118 Reg right
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
1120 /* ARMv5 restriction: dest != left and dest_hi != left. */
1121 if (dest
== left
&& left
!= right
) { left
= right
; right
= dest
; }
1122 if (irt_isguard(ir
->t
)) { /* IR_MULOV */
1123 if (!(as
->flags
& JIT_F_ARMV6
) && dest
== left
)
1124 tmp
= left
= ra_scratch(as
, rset_exclude(RSET_FPR
, left
));
1125 asm_guardcc(as
, CC_NE
);
1126 emit_nm(as
, ARMI_TEQ
|ARMF_SH(ARMSH_ASR
, 31), RID_TMP
, dest
);
1127 emit_dnm(as
, ARMI_SMULL
|ARMF_S(right
), dest
, RID_TMP
, left
);
1129 if (!(as
->flags
& JIT_F_ARMV6
) && dest
== left
) tmp
= left
= RID_TMP
;
1130 emit_nm(as
, ARMI_MUL
|ARMF_S(right
), dest
, left
);
1132 /* Only need this for the dest == left == right case. */
1133 if (ra_hasreg(tmp
)) emit_dm(as
, ARMI_MOV
, tmp
, right
);
1136 static void asm_intmod(ASMState
*as
, IRIns
*ir
)
1138 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_vm_modi
];
1142 asm_setupresult(as
, ir
, ci
);
1143 asm_gencall(as
, ci
, args
);
1146 static void asm_bitswap(ASMState
*as
, IRIns
*ir
)
1148 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1149 Reg left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1150 if ((as
->flags
& JIT_F_ARMV6
)) {
1151 emit_dm(as
, ARMI_REV
, dest
, left
);
1155 tmp2
= ra_scratch(as
, rset_exclude(rset_exclude(RSET_GPR
, dest
), left
));
1156 emit_dnm(as
, ARMI_EOR
|ARMF_SH(ARMSH_LSR
, 8), dest
, tmp2
, RID_TMP
);
1157 emit_dm(as
, ARMI_MOV
|ARMF_SH(ARMSH_ROR
, 8), tmp2
, left
);
1158 emit_dn(as
, ARMI_BIC
|ARMI_K12
|256*8|255, RID_TMP
, RID_TMP
);
1159 emit_dnm(as
, ARMI_EOR
|ARMF_SH(ARMSH_ROR
, 16), RID_TMP
, left
, left
);
1163 static void asm_bitshift(ASMState
*as
, IRIns
*ir
, ARMShift sh
)
1165 if (irref_isk(ir
->op2
)) { /* Constant shifts. */
1166 /* NYI: Turn SHL+SHR or BAND+SHR into uxtb, uxth or ubfx. */
1167 /* NYI: Turn SHL+ASR into sxtb, sxth or sbfx. */
1168 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1169 Reg left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1170 int32_t shift
= (IR(ir
->op2
)->i
& 31);
1171 emit_dm(as
, ARMI_MOV
|ARMF_SH(sh
, shift
), dest
, left
);
1173 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1174 Reg left
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1175 Reg right
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
1176 emit_dm(as
, ARMI_MOV
|ARMF_RSH(sh
, right
), dest
, left
);
1180 static void asm_intmin_max(ASMState
*as
, IRIns
*ir
, int cc
)
1182 uint32_t kcmp
= 0, kmov
= 0;
1183 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1184 Reg left
= ra_hintalloc(as
, ir
->op1
, dest
, RSET_GPR
);
1186 if (irref_isk(ir
->op2
)) {
1187 kcmp
= emit_isk12(ARMI_CMP
, IR(ir
->op2
)->i
);
1188 if (kcmp
) kmov
= emit_isk12(ARMI_MOV
, IR(ir
->op2
)->i
);
1192 right
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, left
));
1194 if (dest
!= right
) {
1195 emit_dm(as
, ARMF_CC(ARMI_MOV
, cc
)^kmov
, dest
, right
);
1196 cc
^= 1; /* Must use opposite conditions for paired moves. */
1198 cc
^= (CC_LT
^CC_GT
); /* Otherwise may swap CC_LT <-> CC_GT. */
1200 if (dest
!= left
) emit_dm(as
, ARMF_CC(ARMI_MOV
, cc
)^kmov
, dest
, left
);
1201 emit_nm(as
, ARMI_CMP
^kcmp
, left
, right
);
1204 static void asm_fpmin_max(ASMState
*as
, IRIns
*ir
, int cc
)
1206 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_softfp_cmp
];
1207 RegSet drop
= RSET_SCRATCH
;
1210 args
[0] = ir
->op1
; args
[1] = (ir
+1)->op1
;
1211 args
[2] = ir
->op2
; args
[3] = (ir
+1)->op2
;
1212 /* __aeabi_cdcmple preserves r0-r3. */
1213 if (ra_hasreg(ir
->r
)) rset_clear(drop
, ir
->r
);
1214 if (ra_hasreg((ir
+1)->r
)) rset_clear(drop
, (ir
+1)->r
);
1215 if (!rset_test(as
->freeset
, RID_R2
) &&
1216 regcost_ref(as
->cost
[RID_R2
]) == args
[2]) rset_clear(drop
, RID_R2
);
1217 if (!rset_test(as
->freeset
, RID_R3
) &&
1218 regcost_ref(as
->cost
[RID_R3
]) == args
[3]) rset_clear(drop
, RID_R3
);
1219 ra_evictset(as
, drop
);
1220 ra_destpair(as
, ir
);
1221 emit_dm(as
, ARMF_CC(ARMI_MOV
, cc
), RID_RETHI
, RID_R3
);
1222 emit_dm(as
, ARMF_CC(ARMI_MOV
, cc
), RID_RETLO
, RID_R2
);
1223 emit_call(as
, (void *)ci
->func
);
1224 for (r
= RID_R0
; r
<= RID_R3
; r
++)
1225 ra_leftov(as
, r
, args
[r
-RID_R0
]);
1228 /* -- Comparisons --------------------------------------------------------- */
1230 /* Map of comparisons to flags. ORDER IR. */
1231 static const uint8_t asm_compmap
[IR_ABC
+1] = {
1232 /* op FP swp int cc FP cc */
1233 /* LT */ CC_GE
+ (CC_HS
<< 4),
1234 /* GE x */ CC_LT
+ (CC_HI
<< 4),
1235 /* LE */ CC_GT
+ (CC_HI
<< 4),
1236 /* GT x */ CC_LE
+ (CC_HS
<< 4),
1237 /* ULT x */ CC_HS
+ (CC_LS
<< 4),
1238 /* UGE */ CC_LO
+ (CC_LO
<< 4),
1239 /* ULE x */ CC_HI
+ (CC_LO
<< 4),
1240 /* UGT */ CC_LS
+ (CC_LS
<< 4),
1241 /* EQ */ CC_NE
+ (CC_NE
<< 4),
1242 /* NE */ CC_EQ
+ (CC_EQ
<< 4),
1243 /* ABC */ CC_LS
+ (CC_LS
<< 4) /* Same as UGT. */
1246 /* FP comparisons. */
1247 static void asm_fpcomp(ASMState
*as
, IRIns
*ir
)
1249 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_softfp_cmp
];
1250 RegSet drop
= RSET_SCRATCH
;
1253 int swp
= (((ir
->o
^ (ir
->o
>> 2)) & ~(ir
->o
>> 3) & 1) << 1);
1254 args
[swp
^0] = ir
->op1
; args
[swp
^1] = (ir
+1)->op1
;
1255 args
[swp
^2] = ir
->op2
; args
[swp
^3] = (ir
+1)->op2
;
1256 /* __aeabi_cdcmple preserves r0-r3. This helps to reduce spills. */
1257 for (r
= RID_R0
; r
<= RID_R3
; r
++)
1258 if (!rset_test(as
->freeset
, r
) &&
1259 regcost_ref(as
->cost
[r
]) == args
[r
-RID_R0
]) rset_clear(drop
, r
);
1260 ra_evictset(as
, drop
);
1261 asm_guardcc(as
, (asm_compmap
[ir
->o
] >> 4));
1262 emit_call(as
, (void *)ci
->func
);
1263 for (r
= RID_R0
; r
<= RID_R3
; r
++)
1264 ra_leftov(as
, r
, args
[r
-RID_R0
]);
1267 /* Integer comparisons. */
1268 static void asm_intcomp(ASMState
*as
, IRIns
*ir
)
1270 ARMCC cc
= (asm_compmap
[ir
->o
] & 15);
1271 IRRef lref
= ir
->op1
, rref
= ir
->op2
;
1275 lua_assert(irt_isint(ir
->t
) || irt_isaddr(ir
->t
));
1276 if (asm_swapops(as
, lref
, rref
)) {
1277 Reg tmp
= lref
; lref
= rref
; rref
= tmp
;
1278 if (cc
>= CC_GE
) cc
^= 7; /* LT <-> GT, LE <-> GE */
1279 else if (cc
> CC_NE
) cc
^= 11; /* LO <-> HI, LS <-> HS */
1281 if (irref_isk(rref
) && IR(rref
)->i
== 0) {
1282 IRIns
*irl
= IR(lref
);
1283 cmpprev0
= (irl
+1 == ir
);
1284 /* Combine comp(BAND(left, right), 0) into tst left, right. */
1285 if (cmpprev0
&& irl
->o
== IR_BAND
&& !ra_used(irl
)) {
1286 IRRef blref
= irl
->op1
, brref
= irl
->op2
;
1289 if (asm_swapops(as
, blref
, brref
)) {
1290 Reg tmp
= blref
; blref
= brref
; brref
= tmp
;
1292 if (irref_isk(brref
)) {
1293 m2
= emit_isk12(ARMI_AND
, IR(brref
)->i
);
1294 if ((m2
& (ARMI_AND
^ARMI_BIC
)))
1295 goto notst
; /* Not beneficial if we miss a constant operand. */
1297 if (cc
== CC_GE
) cc
= CC_PL
;
1298 else if (cc
== CC_LT
) cc
= CC_MI
;
1299 else if (cc
> CC_NE
) goto notst
; /* Other conds don't work with tst. */
1300 bleft
= ra_alloc1(as
, blref
, RSET_GPR
);
1301 if (!m2
) m2
= asm_fuseopm(as
, 0, brref
, rset_exclude(RSET_GPR
, bleft
));
1302 asm_guardcc(as
, cc
);
1303 emit_n(as
, ARMI_TST
^m2
, bleft
);
1308 left
= ra_alloc1(as
, lref
, RSET_GPR
);
1309 m
= asm_fuseopm(as
, ARMI_CMP
, rref
, rset_exclude(RSET_GPR
, left
));
1310 asm_guardcc(as
, cc
);
1311 emit_n(as
, ARMI_CMP
^m
, left
);
1312 /* Signed comparison with zero and referencing previous ins? */
1313 if (cmpprev0
&& (cc
<= CC_NE
|| cc
>= CC_GE
))
1314 as
->flagmcp
= as
->mcp
; /* Allow elimination of the compare. */
1317 /* 64 bit integer comparisons. */
1318 static void asm_int64comp(ASMState
*as
, IRIns
*ir
)
1320 int signedcomp
= (ir
->o
<= IR_GT
);
1324 RegSet allow
= RSET_GPR
, oldfree
;
1326 /* Always use unsigned comparison for loword. */
1327 cclo
= asm_compmap
[ir
->o
+ (signedcomp
? 4 : 0)] & 15;
1328 leftlo
= ra_alloc1(as
, ir
->op1
, allow
);
1329 oldfree
= as
->freeset
;
1330 mlo
= asm_fuseopm(as
, ARMI_CMP
, ir
->op2
, rset_clear(allow
, leftlo
));
1331 allow
&= ~(oldfree
& ~as
->freeset
); /* Update for allocs of asm_fuseopm. */
1333 /* Use signed or unsigned comparison for hiword. */
1334 cchi
= asm_compmap
[ir
->o
] & 15;
1335 lefthi
= ra_alloc1(as
, (ir
+1)->op1
, allow
);
1336 mhi
= asm_fuseopm(as
, ARMI_CMP
, (ir
+1)->op2
, rset_clear(allow
, lefthi
));
1338 /* All register allocations must be performed _before_ this point. */
1340 MCLabel l_around
= emit_label(as
);
1341 asm_guardcc(as
, cclo
);
1342 emit_n(as
, ARMI_CMP
^mlo
, leftlo
);
1343 emit_branch(as
, ARMF_CC(ARMI_B
, CC_NE
), l_around
);
1344 if (cchi
== CC_GE
|| cchi
== CC_LE
) cchi
^= 6; /* GE -> GT, LE -> LT */
1345 asm_guardcc(as
, cchi
);
1347 asm_guardcc(as
, cclo
);
1348 emit_n(as
, ARMF_CC(ARMI_CMP
, CC_EQ
)^mlo
, leftlo
);
1350 emit_n(as
, ARMI_CMP
^mhi
, lefthi
);
1353 /* -- Support for 64 bit ops in 32 bit mode ------------------------------- */
1355 /* Hiword op of a split 64 bit op. Previous op must be the loword op. */
1356 static void asm_hiop(ASMState
*as
, IRIns
*ir
)
1358 /* HIOP is marked as a store because it needs its own DCE logic. */
1359 int uselo
= ra_used(ir
-1), usehi
= ra_used(ir
); /* Loword/hiword used? */
1360 if (LJ_UNLIKELY(!(as
->flags
& JIT_F_OPT_DCE
))) uselo
= usehi
= 1;
1361 if ((ir
-1)->o
<= IR_NE
) { /* 64 bit integer or FP comparisons. ORDER IR. */
1362 as
->curins
--; /* Always skip the loword comparison. */
1363 if (irt_isint(ir
->t
))
1364 asm_int64comp(as
, ir
-1);
1366 asm_fpcomp(as
, ir
-1);
1368 } else if ((ir
-1)->o
== IR_MIN
|| (ir
-1)->o
== IR_MAX
) {
1369 as
->curins
--; /* Always skip the loword min/max. */
1371 asm_fpmin_max(as
, ir
-1, (ir
-1)->o
== IR_MIN
? CC_HI
: CC_LO
);
1374 if (!usehi
) return; /* Skip unused hiword op for all remaining ops. */
1375 switch ((ir
-1)->o
) {
1379 asm_intop(as
, ir
, ARMI_ADC
);
1380 asm_intop(as
, ir
-1, ARMI_ADD
|ARMI_S
);
1384 asm_intop(as
, ir
, ARMI_SBC
);
1385 asm_intop(as
, ir
-1, ARMI_SUB
|ARMI_S
);
1389 asm_intneg(as
, ir
, ARMI_RSC
);
1390 asm_intneg(as
, ir
-1, ARMI_RSB
|ARMI_S
);
1393 case IR_SLOAD
: case IR_ALOAD
: case IR_HLOAD
: case IR_ULOAD
: case IR_VLOAD
:
1396 ra_allocref(as
, ir
->op1
, RSET_GPR
); /* Mark lo op as used. */
1402 ra_allocref(as
, ir
->op1
, RID2RSET(RID_RETLO
)); /* Mark lo op as used. */
1404 case IR_ASTORE
: case IR_HSTORE
: case IR_USTORE
:
1405 case IR_TOSTR
: case IR_CNEWI
:
1406 /* Nothing to do here. Handled by lo op itself. */
1408 default: lua_assert(0); break;
1412 /* -- Stack handling ------------------------------------------------------ */
1414 /* Check Lua stack size for overflow. Use exit handler as fallback. */
1415 static void asm_stack_check(ASMState
*as
, BCReg topslot
,
1416 IRIns
*irp
, RegSet allow
, ExitNo exitno
)
1421 if (ra_hasreg(irp
->r
)) {
1424 pbase
= rset_pickbot(allow
);
1427 emit_lso(as
, ARMI_LDR
, RID_RET
, RID_SP
, 0); /* Restore temp. register. */
1432 emit_branch(as
, ARMF_CC(ARMI_BL
, CC_LS
), exitstub_addr(as
->J
, exitno
));
1433 k
= emit_isk12(0, (int32_t)(8*topslot
));
1435 emit_n(as
, ARMI_CMP
^k
, RID_TMP
);
1436 emit_dnm(as
, ARMI_SUB
, RID_TMP
, RID_TMP
, pbase
);
1437 emit_lso(as
, ARMI_LDR
, RID_TMP
, RID_TMP
,
1438 (int32_t)offsetof(lua_State
, maxstack
));
1439 if (irp
) { /* Must not spill arbitrary registers in head of side trace. */
1440 int32_t i
= i32ptr(&J2G(as
->J
)->jit_L
);
1441 if (ra_noreg(irp
->r
)) {
1442 lua_assert(ra_hasspill(irp
->s
));
1443 emit_lso(as
, ARMI_LDR
, RID_RET
, RID_SP
, sps_scale(irp
->s
));
1445 emit_lso(as
, ARMI_LDR
, RID_TMP
, RID_TMP
, (i
& 4095));
1446 if (ra_noreg(irp
->r
)) {
1447 emit_lso(as
, ARMI_STR
, RID_RET
, RID_SP
, 0); /* Save temp. register. */
1449 emit_loadi(as
, RID_TMP
, (i
& ~4095));
1451 emit_getgl(as
, RID_TMP
, jit_L
);
1455 /* Restore Lua stack from on-trace state. */
1456 static void asm_stack_restore(ASMState
*as
, SnapShot
*snap
)
1458 SnapEntry
*map
= &as
->T
->snapmap
[snap
->mapofs
];
1459 MSize n
, nent
= snap
->nent
;
1460 SnapEntry
*flinks
= map
+ nent
+ snap
->depth
;
1461 /* Store the value of all modified slots to the Lua stack. */
1462 for (n
= 0; n
< nent
; n
++) {
1463 SnapEntry sn
= map
[n
];
1464 BCReg s
= snap_slot(sn
);
1465 int32_t ofs
= 8*((int32_t)s
-1);
1466 IRRef ref
= snap_ref(sn
);
1467 IRIns
*ir
= IR(ref
);
1468 if ((sn
& SNAP_NORESTORE
))
1470 if (irt_isnum(ir
->t
)) {
1471 RegSet odd
= rset_exclude(RSET_GPRODD
, RID_BASE
);
1473 lua_assert(irref_isk(ref
)); /* LJ_SOFTFP: must be a number constant. */
1474 tmp
= ra_allock(as
, (int32_t)ir_knum(ir
)->u32
.lo
,
1475 rset_exclude(RSET_GPREVEN
, RID_BASE
));
1476 emit_lso(as
, ARMI_STR
, tmp
, RID_BASE
, ofs
);
1477 if (rset_test(as
->freeset
, tmp
+1)) odd
= RID2RSET(tmp
+1);
1478 tmp
= ra_allock(as
, (int32_t)ir_knum(ir
)->u32
.hi
, odd
);
1479 emit_lso(as
, ARMI_STR
, tmp
, RID_BASE
, ofs
+4);
1481 RegSet odd
= rset_exclude(RSET_GPRODD
, RID_BASE
);
1483 lua_assert(irt_ispri(ir
->t
) || irt_isaddr(ir
->t
) || irt_isinteger(ir
->t
));
1484 if (!irt_ispri(ir
->t
)) {
1485 Reg src
= ra_alloc1(as
, ref
, rset_exclude(RSET_GPREVEN
, RID_BASE
));
1486 emit_lso(as
, ARMI_STR
, src
, RID_BASE
, ofs
);
1487 if (rset_test(as
->freeset
, src
+1)) odd
= RID2RSET(src
+1);
1489 if ((sn
& (SNAP_CONT
|SNAP_FRAME
))) {
1490 if (s
== 0) continue; /* Do not overwrite link to previous frame. */
1491 type
= ra_allock(as
, (int32_t)(*flinks
--), odd
);
1492 } else if ((sn
& SNAP_SOFTFPNUM
)) {
1493 type
= ra_alloc1(as
, ref
+1, rset_exclude(RSET_GPRODD
, RID_BASE
));
1495 type
= ra_allock(as
, (int32_t)irt_toitype(ir
->t
), odd
);
1497 emit_lso(as
, ARMI_STR
, type
, RID_BASE
, ofs
+4);
1501 lua_assert(map
+ nent
== flinks
);
1504 /* -- GC handling --------------------------------------------------------- */
1506 /* Check GC threshold and do one or more GC steps. */
1507 static void asm_gc_check(ASMState
*as
)
1509 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_gc_step_jit
];
1513 ra_evictset(as
, RSET_SCRATCH
);
1514 l_end
= emit_label(as
);
1515 /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
1516 asm_guardcc(as
, CC_NE
); /* Assumes asm_snap_prep() already done. */
1517 emit_n(as
, ARMI_CMP
|ARMI_K12
|0, RID_RET
);
1518 args
[0] = ASMREF_TMP1
; /* global_State *g */
1519 args
[1] = ASMREF_TMP2
; /* MSize steps */
1520 asm_gencall(as
, ci
, args
);
1521 tmp1
= ra_releasetmp(as
, ASMREF_TMP1
);
1522 tmp2
= ra_releasetmp(as
, ASMREF_TMP2
);
1523 emit_loadi(as
, tmp2
, (int32_t)as
->gcsteps
);
1524 /* Jump around GC step if GC total < GC threshold. */
1525 emit_branch(as
, ARMF_CC(ARMI_B
, CC_LS
), l_end
);
1526 emit_nm(as
, ARMI_CMP
, RID_TMP
, tmp2
);
1527 emit_lso(as
, ARMI_LDR
, tmp2
, tmp1
,
1528 (int32_t)offsetof(global_State
, gc
.threshold
));
1529 emit_lso(as
, ARMI_LDR
, RID_TMP
, tmp1
,
1530 (int32_t)offsetof(global_State
, gc
.total
));
1531 ra_allockreg(as
, i32ptr(J2G(as
->J
)), tmp1
);
1536 /* -- Loop handling ------------------------------------------------------- */
1538 /* Fixup the loop branch. */
1539 static void asm_loop_fixup(ASMState
*as
)
1541 MCode
*p
= as
->mctop
;
1542 MCode
*target
= as
->mcp
;
1543 if (as
->loopinv
) { /* Inverted loop branch? */
1544 /* asm_guardcc already inverted the bcc and patched the final bl. */
1545 p
[-2] |= ((uint32_t)(target
-p
) & 0x00ffffffu
);
1547 p
[-1] = ARMI_B
| ((uint32_t)((target
-p
)-1) & 0x00ffffffu
);
1551 /* -- Head of trace ------------------------------------------------------- */
1553 /* Reload L register from g->jit_L. */
1554 static void asm_head_lreg(ASMState
*as
)
1556 IRIns
*ir
= IR(ASMREF_L
);
1558 Reg r
= ra_dest(as
, ir
, RSET_GPR
);
1559 emit_getgl(as
, r
, jit_L
);
1564 /* Coalesce BASE register for a root trace. */
1565 static void asm_head_root_base(ASMState
*as
)
1570 if (ra_hasreg(ir
->r
) && rset_test(as
->modset
, ir
->r
)) ra_spill(as
, ir
);
1571 ra_destreg(as
, ir
, RID_BASE
);
1574 /* Coalesce BASE register for a side trace. */
1575 static RegSet
asm_head_side_base(ASMState
*as
, IRIns
*irp
, RegSet allow
)
1580 if (ra_hasreg(ir
->r
) && rset_test(as
->modset
, ir
->r
)) ra_spill(as
, ir
);
1581 if (ra_hasspill(irp
->s
)) {
1582 rset_clear(allow
, ra_dest(as
, ir
, allow
));
1584 lua_assert(ra_hasreg(irp
->r
));
1585 rset_clear(allow
, irp
->r
);
1586 ra_destreg(as
, ir
, irp
->r
);
1591 /* -- Tail of trace ------------------------------------------------------- */
1593 /* Fixup the tail code. */
1594 static void asm_tail_fixup(ASMState
*as
, TraceNo lnk
)
1596 MCode
*p
= as
->mctop
;
1598 int32_t spadj
= as
->T
->spadjust
;
1602 /* Patch stack adjustment. */
1603 uint32_t k
= emit_isk12(ARMI_ADD
, spadj
);
1605 p
[-2] = (ARMI_ADD
^k
) | ARMF_D(RID_SP
) | ARMF_N(RID_SP
);
1607 /* Patch exit branch. */
1608 target
= lnk
? traceref(as
->J
, lnk
)->mcode
: (MCode
*)lj_vm_exit_interp
;
1609 p
[-1] = ARMI_B
|(((target
-p
)-1)&0x00ffffffu
);
1612 /* Prepare tail of code. */
1613 static void asm_tail_prep(ASMState
*as
)
1615 MCode
*p
= as
->mctop
- 1; /* Leave room for exit branch. */
1617 as
->invmcp
= as
->mcp
= p
;
1619 as
->mcp
= p
-1; /* Leave room for stack pointer adjustment. */
1622 *p
= 0; /* Prevent load/store merging. */
1625 /* -- Instruction dispatch ------------------------------------------------ */
1627 /* Assemble a single instruction. */
1628 static void asm_ir(ASMState
*as
, IRIns
*ir
)
1630 switch ((IROp
)ir
->o
) {
1631 /* Miscellaneous ops. */
1632 case IR_LOOP
: asm_loop(as
); break;
1633 case IR_NOP
: case IR_XBAR
: lua_assert(!ra_used(ir
)); break;
1634 case IR_USE
: ra_alloc1(as
, ir
->op1
, RSET_GPR
); break;
1635 case IR_PHI
: asm_phi(as
, ir
); break;
1636 case IR_HIOP
: asm_hiop(as
, ir
); break;
1638 /* Guarded assertions. */
1639 case IR_EQ
: case IR_NE
:
1640 if ((ir
-1)->o
== IR_HREF
&& ir
->op1
== as
->curins
-1) {
1642 asm_href(as
, ir
-1, (IROp
)ir
->o
);
1646 case IR_LT
: case IR_GE
: case IR_LE
: case IR_GT
:
1647 case IR_ULT
: case IR_UGE
: case IR_ULE
: case IR_UGT
:
1649 asm_intcomp(as
, ir
);
1652 case IR_RETF
: asm_retf(as
, ir
); break;
1655 case IR_BNOT
: asm_bitop(as
, ir
, ARMI_MVN
); break;
1656 case IR_BSWAP
: asm_bitswap(as
, ir
); break;
1658 case IR_BAND
: asm_bitop(as
, ir
, ARMI_AND
); break;
1659 case IR_BOR
: asm_bitop(as
, ir
, ARMI_ORR
); break;
1660 case IR_BXOR
: asm_bitop(as
, ir
, ARMI_EOR
); break;
1662 case IR_BSHL
: asm_bitshift(as
, ir
, ARMSH_LSL
); break;
1663 case IR_BSHR
: asm_bitshift(as
, ir
, ARMSH_LSR
); break;
1664 case IR_BSAR
: asm_bitshift(as
, ir
, ARMSH_ASR
); break;
1665 case IR_BROR
: asm_bitshift(as
, ir
, ARMSH_ROR
); break;
1666 case IR_BROL
: lua_assert(0); break;
1668 /* Arithmetic ops. */
1669 case IR_ADD
: case IR_ADDOV
: asm_arithop(as
, ir
, ARMI_ADD
); break;
1670 case IR_SUB
: case IR_SUBOV
: asm_arithop(as
, ir
, ARMI_SUB
); break;
1671 case IR_MUL
: case IR_MULOV
: asm_intmul(as
, ir
); break;
1672 case IR_MOD
: asm_intmod(as
, ir
); break;
1674 case IR_NEG
: asm_intneg(as
, ir
, ARMI_RSB
); break;
1676 case IR_MIN
: asm_intmin_max(as
, ir
, CC_GT
); break;
1677 case IR_MAX
: asm_intmin_max(as
, ir
, CC_LT
); break;
1679 case IR_FPMATH
: case IR_ATAN2
: case IR_LDEXP
:
1680 case IR_DIV
: case IR_POW
: case IR_ABS
: case IR_TOBIT
:
1681 lua_assert(0); /* Unused for LJ_SOFTFP. */
1684 /* Memory references. */
1685 case IR_AREF
: asm_aref(as
, ir
); break;
1686 case IR_HREF
: asm_href(as
, ir
, 0); break;
1687 case IR_HREFK
: asm_hrefk(as
, ir
); break;
1688 case IR_NEWREF
: asm_newref(as
, ir
); break;
1689 case IR_UREFO
: case IR_UREFC
: asm_uref(as
, ir
); break;
1690 case IR_FREF
: asm_fref(as
, ir
); break;
1691 case IR_STRREF
: asm_strref(as
, ir
); break;
1693 /* Loads and stores. */
1694 case IR_ALOAD
: case IR_HLOAD
: case IR_ULOAD
: case IR_VLOAD
:
1695 asm_ahuvload(as
, ir
);
1697 case IR_FLOAD
: asm_fload(as
, ir
); break;
1698 case IR_XLOAD
: asm_xload(as
, ir
); break;
1699 case IR_SLOAD
: asm_sload(as
, ir
); break;
1701 case IR_ASTORE
: case IR_HSTORE
: case IR_USTORE
: asm_ahustore(as
, ir
); break;
1702 case IR_FSTORE
: asm_fstore(as
, ir
); break;
1703 case IR_XSTORE
: asm_xstore(as
, ir
); break;
1706 case IR_SNEW
: case IR_XSNEW
: asm_snew(as
, ir
); break;
1707 case IR_TNEW
: asm_tnew(as
, ir
); break;
1708 case IR_TDUP
: asm_tdup(as
, ir
); break;
1709 case IR_CNEW
: case IR_CNEWI
: asm_cnew(as
, ir
); break;
1711 /* Write barriers. */
1712 case IR_TBAR
: asm_tbar(as
, ir
); break;
1713 case IR_OBAR
: asm_obar(as
, ir
); break;
1715 /* Type conversions. */
1716 case IR_CONV
: asm_conv(as
, ir
); break;
1717 case IR_TOSTR
: asm_tostr(as
, ir
); break;
1718 case IR_STRTO
: asm_strto(as
, ir
); break;
1721 case IR_CALLN
: case IR_CALLL
: case IR_CALLS
: asm_call(as
, ir
); break;
1722 case IR_CALLXS
: asm_callx(as
, ir
); break;
1723 case IR_CARG
: break;
1726 setintV(&as
->J
->errinfo
, ir
->o
);
1727 lj_trace_err_info(as
->J
, LJ_TRERR_NYIIR
);
1732 /* -- Trace setup --------------------------------------------------------- */
1734 /* Ensure there are enough stack slots for call arguments. */
1735 static Reg
asm_setup_call_slots(ASMState
*as
, IRIns
*ir
, const CCallInfo
*ci
)
1737 IRRef args
[CCI_NARGS_MAX
];
1738 uint32_t i
, nargs
= (int)CCI_NARGS(ci
);
1739 int nslots
= 0, ngpr
= REGARG_NUMGPR
;
1740 asm_collectargs(as
, ir
, ci
, args
);
1741 for (i
= 0; i
< nargs
; i
++)
1742 if (!LJ_SOFTFP
&& args
[i
] && irt_isnum(IR(args
[i
])->t
)) {
1744 if (ngpr
> 0) ngpr
-= 2; else nslots
+= 2;
1746 if (ngpr
> 0) ngpr
--; else nslots
++;
1748 if (nslots
> as
->evenspill
) /* Leave room for args in stack slots. */
1749 as
->evenspill
= nslots
;
1750 return REGSP_HINT(RID_RET
);
1753 static void asm_setup_target(ASMState
*as
)
1755 /* May need extra exit for asm_stack_check on side traces. */
1756 asm_exitstub_setup(as
, as
->T
->nsnap
+ (as
->parent
? 1 : 0));
1759 /* -- Trace patching ------------------------------------------------------ */
1761 /* Patch exit jumps of existing machine code to a new target. */
1762 void lj_asm_patchexit(jit_State
*J
, GCtrace
*T
, ExitNo exitno
, MCode
*target
)
1764 MCode
*p
= T
->mcode
;
1765 MCode
*pe
= (MCode
*)((char *)p
+ T
->szmcode
);
1766 MCode
*cstart
= NULL
, *cend
= p
;
1767 MCode
*mcarea
= lj_mcode_patch(J
, p
, 0);
1768 MCode
*px
= exitstub_addr(J
, exitno
) - 2;
1769 for (; p
< pe
; p
++) {
1770 /* Look for bl_cc exitstub, replace with b_cc target. */
1772 if ((ins
& 0x0f000000u
) == 0x0b000000u
&& ins
< 0xf0000000u
&&
1773 ((ins
^ (px
-p
)) & 0x00ffffffu
) == 0) {
1774 *p
= (ins
& 0xfe000000u
) | (((target
-p
)-2) & 0x00ffffffu
);
1776 if (!cstart
) cstart
= p
;
1779 lua_assert(cstart
!= NULL
);
1780 asm_cache_flush(cstart
, cend
);
1781 lj_mcode_patch(J
, mcarea
, 1);