FFI: Don't assert on #1LL (5.2 compatibility mode only).
[luajit-2.0.git] / src / lj_asm_arm.h
blob961f7e3972278c09e83315e9bb2bc0dceeb54d09
1 /*
2 ** ARM IR assembler (SSA IR -> machine code).
3 ** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
4 */
6 /* -- Register allocator extensions --------------------------------------- */
8 /* Allocate a register with a hint. */
9 static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow)
11 Reg r = IR(ref)->r;
12 if (ra_noreg(r)) {
13 if (!ra_hashint(r) && !iscrossref(as, ref))
14 ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */
15 r = ra_allocref(as, ref, allow);
17 ra_noweak(as, r);
18 return r;
21 /* Allocate a scratch register pair. */
22 static Reg ra_scratchpair(ASMState *as, RegSet allow)
24 RegSet pick1 = as->freeset & allow;
25 RegSet pick2 = pick1 & (pick1 >> 1) & RSET_GPREVEN;
26 Reg r;
27 if (pick2) {
28 r = rset_picktop(pick2);
29 } else {
30 RegSet pick = pick1 & (allow >> 1) & RSET_GPREVEN;
31 if (pick) {
32 r = rset_picktop(pick);
33 ra_restore(as, regcost_ref(as->cost[r+1]));
34 } else {
35 pick = pick1 & (allow << 1) & RSET_GPRODD;
36 if (pick) {
37 r = ra_restore(as, regcost_ref(as->cost[rset_picktop(pick)-1]));
38 } else {
39 r = ra_evict(as, allow & (allow >> 1) & RSET_GPREVEN);
40 ra_restore(as, regcost_ref(as->cost[r+1]));
44 lua_assert(rset_test(RSET_GPREVEN, r));
45 ra_modified(as, r);
46 ra_modified(as, r+1);
47 RA_DBGX((as, "scratchpair $r $r", r, r+1));
48 return r;
51 #if !LJ_SOFTFP
52 /* Allocate two source registers for three-operand instructions. */
53 static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow)
55 IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
56 Reg left = irl->r, right = irr->r;
57 if (ra_hasreg(left)) {
58 ra_noweak(as, left);
59 if (ra_noreg(right))
60 right = ra_allocref(as, ir->op2, rset_exclude(allow, left));
61 else
62 ra_noweak(as, right);
63 } else if (ra_hasreg(right)) {
64 ra_noweak(as, right);
65 left = ra_allocref(as, ir->op1, rset_exclude(allow, right));
66 } else if (ra_hashint(right)) {
67 right = ra_allocref(as, ir->op2, allow);
68 left = ra_alloc1(as, ir->op1, rset_exclude(allow, right));
69 } else {
70 left = ra_allocref(as, ir->op1, allow);
71 right = ra_alloc1(as, ir->op2, rset_exclude(allow, left));
73 return left | (right << 8);
75 #endif
77 /* -- Guard handling ------------------------------------------------------ */
79 /* Generate an exit stub group at the bottom of the reserved MCode memory. */
80 static MCode *asm_exitstub_gen(ASMState *as, ExitNo group)
82 MCode *mxp = as->mcbot;
83 int i;
84 if (mxp + 4*4+4*EXITSTUBS_PER_GROUP >= as->mctop)
85 asm_mclimit(as);
86 /* str lr, [sp]; bl ->vm_exit_handler; .long DISPATCH_address, group. */
87 *mxp++ = ARMI_STR|ARMI_LS_P|ARMI_LS_U|ARMF_D(RID_LR)|ARMF_N(RID_SP);
88 *mxp = ARMI_BL|((((MCode *)(void *)lj_vm_exit_handler-mxp)-2)&0x00ffffffu);
89 mxp++;
90 *mxp++ = (MCode)i32ptr(J2GG(as->J)->dispatch); /* DISPATCH address */
91 *mxp++ = group*EXITSTUBS_PER_GROUP;
92 for (i = 0; i < EXITSTUBS_PER_GROUP; i++)
93 *mxp++ = ARMI_B|((-6-i)&0x00ffffffu);
94 lj_mcode_sync(as->mcbot, mxp);
95 lj_mcode_commitbot(as->J, mxp);
96 as->mcbot = mxp;
97 as->mclim = as->mcbot + MCLIM_REDZONE;
98 return mxp - EXITSTUBS_PER_GROUP;
101 /* Setup all needed exit stubs. */
102 static void asm_exitstub_setup(ASMState *as, ExitNo nexits)
104 ExitNo i;
105 if (nexits >= EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR)
106 lj_trace_err(as->J, LJ_TRERR_SNAPOV);
107 for (i = 0; i < (nexits+EXITSTUBS_PER_GROUP-1)/EXITSTUBS_PER_GROUP; i++)
108 if (as->J->exitstubgroup[i] == NULL)
109 as->J->exitstubgroup[i] = asm_exitstub_gen(as, i);
112 /* Emit conditional branch to exit for guard. */
113 static void asm_guardcc(ASMState *as, ARMCC cc)
115 MCode *target = exitstub_addr(as->J, as->snapno);
116 MCode *p = as->mcp;
117 if (LJ_UNLIKELY(p == as->invmcp)) {
118 as->loopinv = 1;
119 *p = ARMI_BL | ((target-p-2) & 0x00ffffffu);
120 emit_branch(as, ARMF_CC(ARMI_B, cc^1), p+1);
121 return;
123 emit_branch(as, ARMF_CC(ARMI_BL, cc), target);
126 /* -- Operand fusion ------------------------------------------------------ */
128 /* Limit linear search to this distance. Avoids O(n^2) behavior. */
129 #define CONFLICT_SEARCH_LIM 31
131 /* Check if there's no conflicting instruction between curins and ref. */
132 static int noconflict(ASMState *as, IRRef ref, IROp conflict)
134 IRIns *ir = as->ir;
135 IRRef i = as->curins;
136 if (i > ref + CONFLICT_SEARCH_LIM)
137 return 0; /* Give up, ref is too far away. */
138 while (--i > ref)
139 if (ir[i].o == conflict)
140 return 0; /* Conflict found. */
141 return 1; /* Ok, no conflict. */
144 /* Fuse the array base of colocated arrays. */
145 static int32_t asm_fuseabase(ASMState *as, IRRef ref)
147 IRIns *ir = IR(ref);
148 if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE &&
149 !neverfuse(as) && noconflict(as, ref, IR_NEWREF))
150 return (int32_t)sizeof(GCtab);
151 return 0;
154 /* Fuse array/hash/upvalue reference into register+offset operand. */
155 static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow,
156 int lim)
158 IRIns *ir = IR(ref);
159 if (ra_noreg(ir->r)) {
160 if (ir->o == IR_AREF) {
161 if (mayfuse(as, ref)) {
162 if (irref_isk(ir->op2)) {
163 IRRef tab = IR(ir->op1)->op1;
164 int32_t ofs = asm_fuseabase(as, tab);
165 IRRef refa = ofs ? tab : ir->op1;
166 ofs += 8*IR(ir->op2)->i;
167 if (ofs > -lim && ofs < lim) {
168 *ofsp = ofs;
169 return ra_alloc1(as, refa, allow);
173 } else if (ir->o == IR_HREFK) {
174 if (mayfuse(as, ref)) {
175 int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
176 if (ofs < lim) {
177 *ofsp = ofs;
178 return ra_alloc1(as, ir->op1, allow);
181 } else if (ir->o == IR_UREFC) {
182 if (irref_isk(ir->op1)) {
183 GCfunc *fn = ir_kfunc(IR(ir->op1));
184 int32_t ofs = i32ptr(&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv);
185 *ofsp = (ofs & 255); /* Mask out less bits to allow LDRD. */
186 return ra_allock(as, (ofs & ~255), allow);
190 *ofsp = 0;
191 return ra_alloc1(as, ref, allow);
194 /* Fuse m operand into arithmetic/logic instructions. */
195 static uint32_t asm_fuseopm(ASMState *as, ARMIns ai, IRRef ref, RegSet allow)
197 IRIns *ir = IR(ref);
198 if (ra_hasreg(ir->r)) {
199 ra_noweak(as, ir->r);
200 return ARMF_M(ir->r);
201 } else if (irref_isk(ref)) {
202 uint32_t k = emit_isk12(ai, ir->i);
203 if (k)
204 return k;
205 } else if (mayfuse(as, ref)) {
206 if (ir->o >= IR_BSHL && ir->o <= IR_BROR) {
207 Reg m = ra_alloc1(as, ir->op1, allow);
208 ARMShift sh = ir->o == IR_BSHL ? ARMSH_LSL :
209 ir->o == IR_BSHR ? ARMSH_LSR :
210 ir->o == IR_BSAR ? ARMSH_ASR : ARMSH_ROR;
211 if (irref_isk(ir->op2)) {
212 return m | ARMF_SH(sh, (IR(ir->op2)->i & 31));
213 } else {
214 Reg s = ra_alloc1(as, ir->op2, rset_exclude(allow, m));
215 return m | ARMF_RSH(sh, s);
217 } else if (ir->o == IR_ADD && ir->op1 == ir->op2) {
218 Reg m = ra_alloc1(as, ir->op1, allow);
219 return m | ARMF_SH(ARMSH_LSL, 1);
222 return ra_allocref(as, ref, allow);
225 /* Fuse shifts into loads/stores. Only bother with BSHL 2 => lsl #2. */
226 static IRRef asm_fuselsl2(ASMState *as, IRRef ref)
228 IRIns *ir = IR(ref);
229 if (ra_noreg(ir->r) && mayfuse(as, ref) && ir->o == IR_BSHL &&
230 irref_isk(ir->op2) && IR(ir->op2)->i == 2)
231 return ir->op1;
232 return 0; /* No fusion. */
235 /* Fuse XLOAD/XSTORE reference into load/store operand. */
236 static void asm_fusexref(ASMState *as, ARMIns ai, Reg rd, IRRef ref,
237 RegSet allow, int32_t ofs)
239 IRIns *ir = IR(ref);
240 Reg base;
241 if (ra_noreg(ir->r) && canfuse(as, ir)) {
242 int32_t lim = (!LJ_SOFTFP && (ai & 0x08000000)) ? 1024 :
243 (ai & 0x04000000) ? 4096 : 256;
244 if (ir->o == IR_ADD) {
245 int32_t ofs2;
246 if (irref_isk(ir->op2) &&
247 (ofs2 = ofs + IR(ir->op2)->i) > -lim && ofs2 < lim &&
248 (!(!LJ_SOFTFP && (ai & 0x08000000)) || !(ofs2 & 3))) {
249 ofs = ofs2;
250 ref = ir->op1;
251 } else if (ofs == 0 && !(!LJ_SOFTFP && (ai & 0x08000000))) {
252 IRRef lref = ir->op1, rref = ir->op2;
253 Reg rn, rm;
254 if ((ai & 0x04000000)) {
255 IRRef sref = asm_fuselsl2(as, rref);
256 if (sref) {
257 rref = sref;
258 ai |= ARMF_SH(ARMSH_LSL, 2);
259 } else if ((sref = asm_fuselsl2(as, lref)) != 0) {
260 lref = rref;
261 rref = sref;
262 ai |= ARMF_SH(ARMSH_LSL, 2);
265 rn = ra_alloc1(as, lref, allow);
266 rm = ra_alloc1(as, rref, rset_exclude(allow, rn));
267 if ((ai & 0x04000000)) ai |= ARMI_LS_R;
268 emit_dnm(as, ai|ARMI_LS_P|ARMI_LS_U, rd, rn, rm);
269 return;
271 } else if (ir->o == IR_STRREF && !(!LJ_SOFTFP && (ai & 0x08000000))) {
272 lua_assert(ofs == 0);
273 ofs = (int32_t)sizeof(GCstr);
274 if (irref_isk(ir->op2)) {
275 ofs += IR(ir->op2)->i;
276 ref = ir->op1;
277 } else if (irref_isk(ir->op1)) {
278 ofs += IR(ir->op1)->i;
279 ref = ir->op2;
280 } else {
281 /* NYI: Fuse ADD with constant. */
282 Reg rn = ra_alloc1(as, ir->op1, allow);
283 uint32_t m = asm_fuseopm(as, 0, ir->op2, rset_exclude(allow, rn));
284 if ((ai & 0x04000000))
285 emit_lso(as, ai, rd, rd, ofs);
286 else
287 emit_lsox(as, ai, rd, rd, ofs);
288 emit_dn(as, ARMI_ADD^m, rd, rn);
289 return;
291 if (ofs <= -lim || ofs >= lim) {
292 Reg rn = ra_alloc1(as, ref, allow);
293 Reg rm = ra_allock(as, ofs, rset_exclude(allow, rn));
294 if ((ai & 0x04000000)) ai |= ARMI_LS_R;
295 emit_dnm(as, ai|ARMI_LS_P|ARMI_LS_U, rd, rn, rm);
296 return;
300 base = ra_alloc1(as, ref, allow);
301 #if !LJ_SOFTFP
302 if ((ai & 0x08000000))
303 emit_vlso(as, ai, rd, base, ofs);
304 else
305 #endif
306 if ((ai & 0x04000000))
307 emit_lso(as, ai, rd, base, ofs);
308 else
309 emit_lsox(as, ai, rd, base, ofs);
312 #if !LJ_SOFTFP
313 /* Fuse to multiply-add/sub instruction. */
314 static int asm_fusemadd(ASMState *as, IRIns *ir, ARMIns ai, ARMIns air)
316 IRRef lref = ir->op1, rref = ir->op2;
317 IRIns *irm;
318 if (lref != rref &&
319 ((mayfuse(as, lref) && (irm = IR(lref), irm->o == IR_MUL) &&
320 ra_noreg(irm->r)) ||
321 (mayfuse(as, rref) && (irm = IR(rref), irm->o == IR_MUL) &&
322 (rref = lref, ai = air, ra_noreg(irm->r))))) {
323 Reg dest = ra_dest(as, ir, RSET_FPR);
324 Reg add = ra_hintalloc(as, rref, dest, RSET_FPR);
325 Reg right, left = ra_alloc2(as, irm,
326 rset_exclude(rset_exclude(RSET_FPR, dest), add));
327 right = (left >> 8); left &= 255;
328 emit_dnm(as, ai, (dest & 15), (left & 15), (right & 15));
329 if (dest != add) emit_dm(as, ARMI_VMOV_D, (dest & 15), (add & 15));
330 return 1;
332 return 0;
334 #endif
336 /* -- Calls --------------------------------------------------------------- */
338 /* Generate a call to a C function. */
339 static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
341 uint32_t n, nargs = CCI_NARGS(ci);
342 int32_t ofs = 0;
343 #if LJ_SOFTFP
344 Reg gpr = REGARG_FIRSTGPR;
345 #else
346 Reg gpr, fpr = REGARG_FIRSTFPR, fprodd = 0;
347 #endif
348 if ((void *)ci->func)
349 emit_call(as, (void *)ci->func);
350 #if !LJ_SOFTFP
351 for (gpr = REGARG_FIRSTGPR; gpr <= REGARG_LASTGPR; gpr++)
352 as->cost[gpr] = REGCOST(~0u, ASMREF_L);
353 gpr = REGARG_FIRSTGPR;
354 #endif
355 for (n = 0; n < nargs; n++) { /* Setup args. */
356 IRRef ref = args[n];
357 IRIns *ir = IR(ref);
358 #if !LJ_SOFTFP
359 if (ref && irt_isfp(ir->t)) {
360 RegSet of = as->freeset;
361 Reg src;
362 if (!LJ_ABI_SOFTFP && !(ci->flags & CCI_VARARG)) {
363 if (irt_isnum(ir->t)) {
364 if (fpr <= REGARG_LASTFPR) {
365 ra_leftov(as, fpr, ref);
366 fpr++;
367 continue;
369 } else if (fprodd) { /* Ick. */
370 src = ra_alloc1(as, ref, RSET_FPR);
371 emit_dm(as, ARMI_VMOV_S, (fprodd & 15), (src & 15) | 0x00400000);
372 fprodd = 0;
373 continue;
374 } else if (fpr <= REGARG_LASTFPR) {
375 ra_leftov(as, fpr, ref);
376 fprodd = fpr++;
377 continue;
379 /* Workaround to protect argument GPRs from being used for remat. */
380 as->freeset &= ~RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1);
381 src = ra_alloc1(as, ref, RSET_FPR); /* May alloc GPR to remat FPR. */
382 as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1));
383 fprodd = 0;
384 goto stackfp;
386 /* Workaround to protect argument GPRs from being used for remat. */
387 as->freeset &= ~RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1);
388 src = ra_alloc1(as, ref, RSET_FPR); /* May alloc GPR to remat FPR. */
389 as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1));
390 if (irt_isnum(ir->t)) gpr = (gpr+1) & ~1u;
391 if (gpr <= REGARG_LASTGPR) {
392 lua_assert(rset_test(as->freeset, gpr)); /* Must have been evicted. */
393 if (irt_isnum(ir->t)) {
394 lua_assert(rset_test(as->freeset, gpr+1)); /* Ditto. */
395 emit_dnm(as, ARMI_VMOV_RR_D, gpr, gpr+1, (src & 15));
396 gpr += 2;
397 } else {
398 emit_dn(as, ARMI_VMOV_R_S, gpr, (src & 15));
399 gpr++;
401 } else {
402 stackfp:
403 if (irt_isnum(ir->t)) ofs = (ofs + 4) & ~4;
404 emit_spstore(as, ir, src, ofs);
405 ofs += irt_isnum(ir->t) ? 8 : 4;
407 } else
408 #endif
410 if (gpr <= REGARG_LASTGPR) {
411 lua_assert(rset_test(as->freeset, gpr)); /* Must have been evicted. */
412 if (ref) ra_leftov(as, gpr, ref);
413 gpr++;
414 } else {
415 if (ref) {
416 Reg r = ra_alloc1(as, ref, RSET_GPR);
417 emit_spstore(as, ir, r, ofs);
419 ofs += 4;
425 /* Setup result reg/sp for call. Evict scratch regs. */
426 static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
428 RegSet drop = RSET_SCRATCH;
429 int hiop = ((ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t));
430 if (ra_hasreg(ir->r))
431 rset_clear(drop, ir->r); /* Dest reg handled below. */
432 if (hiop && ra_hasreg((ir+1)->r))
433 rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
434 ra_evictset(as, drop); /* Evictions must be performed first. */
435 if (ra_used(ir)) {
436 lua_assert(!irt_ispri(ir->t));
437 if (!LJ_SOFTFP && irt_isfp(ir->t)) {
438 if (LJ_ABI_SOFTFP || (ci->flags & (CCI_CASTU64|CCI_VARARG))) {
439 Reg dest = (ra_dest(as, ir, RSET_FPR) & 15);
440 if (irt_isnum(ir->t))
441 emit_dnm(as, ARMI_VMOV_D_RR, RID_RETLO, RID_RETHI, dest);
442 else
443 emit_dn(as, ARMI_VMOV_S_R, RID_RET, dest);
444 } else {
445 ra_destreg(as, ir, RID_FPRET);
447 } else if (hiop) {
448 ra_destpair(as, ir);
449 } else {
450 ra_destreg(as, ir, RID_RET);
453 UNUSED(ci);
456 static void asm_call(ASMState *as, IRIns *ir)
458 IRRef args[CCI_NARGS_MAX];
459 const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
460 asm_collectargs(as, ir, ci, args);
461 asm_setupresult(as, ir, ci);
462 asm_gencall(as, ci, args);
465 static void asm_callx(ASMState *as, IRIns *ir)
467 IRRef args[CCI_NARGS_MAX*2];
468 CCallInfo ci;
469 IRRef func;
470 IRIns *irf;
471 ci.flags = asm_callx_flags(as, ir);
472 asm_collectargs(as, ir, &ci, args);
473 asm_setupresult(as, ir, &ci);
474 func = ir->op2; irf = IR(func);
475 if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
476 if (irref_isk(func)) { /* Call to constant address. */
477 ci.func = (ASMFunction)(void *)(irf->i);
478 } else { /* Need a non-argument register for indirect calls. */
479 Reg freg = ra_alloc1(as, func, RSET_RANGE(RID_R4, RID_R12+1));
480 emit_m(as, ARMI_BLXr, freg);
481 ci.func = (ASMFunction)(void *)0;
483 asm_gencall(as, &ci, args);
486 /* -- Returns ------------------------------------------------------------- */
488 /* Return to lower frame. Guard that it goes to the right spot. */
489 static void asm_retf(ASMState *as, IRIns *ir)
491 Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
492 void *pc = ir_kptr(IR(ir->op2));
493 int32_t delta = 1+bc_a(*((const BCIns *)pc - 1));
494 as->topslot -= (BCReg)delta;
495 if ((int32_t)as->topslot < 0) as->topslot = 0;
496 irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */
497 /* Need to force a spill on REF_BASE now to update the stack slot. */
498 emit_lso(as, ARMI_STR, base, RID_SP, ra_spill(as, IR(REF_BASE)));
499 emit_setgl(as, base, jit_base);
500 emit_addptr(as, base, -8*delta);
501 asm_guardcc(as, CC_NE);
502 emit_nm(as, ARMI_CMP, RID_TMP,
503 ra_allock(as, i32ptr(pc), rset_exclude(RSET_GPR, base)));
504 emit_lso(as, ARMI_LDR, RID_TMP, base, -4);
507 /* -- Type conversions ---------------------------------------------------- */
509 #if !LJ_SOFTFP
510 static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
512 Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
513 Reg dest = ra_dest(as, ir, RSET_GPR);
514 asm_guardcc(as, CC_NE);
515 emit_d(as, ARMI_VMRS, 0);
516 emit_dm(as, ARMI_VCMP_D, (tmp & 15), (left & 15));
517 emit_dm(as, ARMI_VCVT_F64_S32, (tmp & 15), (tmp & 15));
518 emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15));
519 emit_dm(as, ARMI_VCVT_S32_F64, (tmp & 15), (left & 15));
522 static void asm_tobit(ASMState *as, IRIns *ir)
524 RegSet allow = RSET_FPR;
525 Reg left = ra_alloc1(as, ir->op1, allow);
526 Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left));
527 Reg tmp = ra_scratch(as, rset_clear(allow, right));
528 Reg dest = ra_dest(as, ir, RSET_GPR);
529 emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15));
530 emit_dnm(as, ARMI_VADD_D, (tmp & 15), (left & 15), (right & 15));
532 #endif
534 static void asm_conv(ASMState *as, IRIns *ir)
536 IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
537 #if !LJ_SOFTFP
538 int stfp = (st == IRT_NUM || st == IRT_FLOAT);
539 #endif
540 IRRef lref = ir->op1;
541 /* 64 bit integer conversions are handled by SPLIT. */
542 lua_assert(!irt_isint64(ir->t) && !(st == IRT_I64 || st == IRT_U64));
543 #if LJ_SOFTFP
544 /* FP conversions are handled by SPLIT. */
545 lua_assert(!irt_isfp(ir->t) && !(st == IRT_NUM || st == IRT_FLOAT));
546 /* Can't check for same types: SPLIT uses CONV int.int + BXOR for sfp NEG. */
547 #else
548 lua_assert(irt_type(ir->t) != st);
549 if (irt_isfp(ir->t)) {
550 Reg dest = ra_dest(as, ir, RSET_FPR);
551 if (stfp) { /* FP to FP conversion. */
552 emit_dm(as, st == IRT_NUM ? ARMI_VCVT_F32_F64 : ARMI_VCVT_F64_F32,
553 (dest & 15), (ra_alloc1(as, lref, RSET_FPR) & 15));
554 } else { /* Integer to FP conversion. */
555 Reg left = ra_alloc1(as, lref, RSET_GPR);
556 ARMIns ai = irt_isfloat(ir->t) ?
557 (st == IRT_INT ? ARMI_VCVT_F32_S32 : ARMI_VCVT_F32_U32) :
558 (st == IRT_INT ? ARMI_VCVT_F64_S32 : ARMI_VCVT_F64_U32);
559 emit_dm(as, ai, (dest & 15), (dest & 15));
560 emit_dn(as, ARMI_VMOV_S_R, left, (dest & 15));
562 } else if (stfp) { /* FP to integer conversion. */
563 if (irt_isguard(ir->t)) {
564 /* Checked conversions are only supported from number to int. */
565 lua_assert(irt_isint(ir->t) && st == IRT_NUM);
566 asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
567 } else {
568 Reg left = ra_alloc1(as, lref, RSET_FPR);
569 Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
570 Reg dest = ra_dest(as, ir, RSET_GPR);
571 ARMIns ai;
572 emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15));
573 ai = irt_isint(ir->t) ?
574 (st == IRT_NUM ? ARMI_VCVT_S32_F64 : ARMI_VCVT_S32_F32) :
575 (st == IRT_NUM ? ARMI_VCVT_U32_F64 : ARMI_VCVT_U32_F32);
576 emit_dm(as, ai, (tmp & 15), (left & 15));
578 } else
579 #endif
581 Reg dest = ra_dest(as, ir, RSET_GPR);
582 if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
583 Reg left = ra_alloc1(as, lref, RSET_GPR);
584 lua_assert(irt_isint(ir->t) || irt_isu32(ir->t));
585 if ((as->flags & JIT_F_ARMV6)) {
586 ARMIns ai = st == IRT_I8 ? ARMI_SXTB :
587 st == IRT_U8 ? ARMI_UXTB :
588 st == IRT_I16 ? ARMI_SXTH : ARMI_UXTH;
589 emit_dm(as, ai, dest, left);
590 } else if (st == IRT_U8) {
591 emit_dn(as, ARMI_AND|ARMI_K12|255, dest, left);
592 } else {
593 uint32_t shift = st == IRT_I8 ? 24 : 16;
594 ARMShift sh = st == IRT_U16 ? ARMSH_LSR : ARMSH_ASR;
595 emit_dm(as, ARMI_MOV|ARMF_SH(sh, shift), dest, RID_TMP);
596 emit_dm(as, ARMI_MOV|ARMF_SH(ARMSH_LSL, shift), RID_TMP, left);
598 } else { /* Handle 32/32 bit no-op (cast). */
599 ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
604 #if !LJ_SOFTFP && LJ_HASFFI
605 static void asm_conv64(ASMState *as, IRIns *ir)
607 IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
608 IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
609 IRCallID id;
610 CCallInfo ci;
611 IRRef args[2];
612 args[0] = (ir-1)->op1;
613 args[1] = ir->op1;
614 if (st == IRT_NUM || st == IRT_FLOAT) {
615 id = IRCALL_fp64_d2l + ((st == IRT_FLOAT) ? 2 : 0) + (dt - IRT_I64);
616 ir--;
617 } else {
618 id = IRCALL_fp64_l2d + ((dt == IRT_FLOAT) ? 2 : 0) + (st - IRT_I64);
620 ci = lj_ir_callinfo[id];
621 #if !LJ_ABI_SOFTFP
622 ci.flags |= CCI_VARARG; /* These calls don't use the hard-float ABI! */
623 #endif
624 asm_setupresult(as, ir, &ci);
625 asm_gencall(as, &ci, args);
627 #endif
629 static void asm_strto(ASMState *as, IRIns *ir)
631 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num];
632 IRRef args[2];
633 Reg rlo = 0, rhi = 0, tmp;
634 int destused = ra_used(ir);
635 int32_t ofs = 0;
636 ra_evictset(as, RSET_SCRATCH);
637 #if LJ_SOFTFP
638 if (destused) {
639 if (ra_hasspill(ir->s) && ra_hasspill((ir+1)->s) &&
640 (ir->s & 1) == 0 && ir->s + 1 == (ir+1)->s) {
641 int i;
642 for (i = 0; i < 2; i++) {
643 Reg r = (ir+i)->r;
644 if (ra_hasreg(r)) {
645 ra_free(as, r);
646 ra_modified(as, r);
647 emit_spload(as, ir+i, r, sps_scale((ir+i)->s));
650 ofs = sps_scale(ir->s);
651 destused = 0;
652 } else {
653 rhi = ra_dest(as, ir+1, RSET_GPR);
654 rlo = ra_dest(as, ir, rset_exclude(RSET_GPR, rhi));
657 asm_guardcc(as, CC_EQ);
658 if (destused) {
659 emit_lso(as, ARMI_LDR, rhi, RID_SP, 4);
660 emit_lso(as, ARMI_LDR, rlo, RID_SP, 0);
662 #else
663 UNUSED(rhi);
664 if (destused) {
665 if (ra_hasspill(ir->s)) {
666 ofs = sps_scale(ir->s);
667 destused = 0;
668 if (ra_hasreg(ir->r)) {
669 ra_free(as, ir->r);
670 ra_modified(as, ir->r);
671 emit_spload(as, ir, ir->r, ofs);
673 } else {
674 rlo = ra_dest(as, ir, RSET_FPR);
677 asm_guardcc(as, CC_EQ);
678 if (destused)
679 emit_vlso(as, ARMI_VLDR_D, rlo, RID_SP, 0);
680 #endif
681 emit_n(as, ARMI_CMP|ARMI_K12|0, RID_RET); /* Test return status. */
682 args[0] = ir->op1; /* GCstr *str */
683 args[1] = ASMREF_TMP1; /* TValue *n */
684 asm_gencall(as, ci, args);
685 tmp = ra_releasetmp(as, ASMREF_TMP1);
686 if (ofs == 0)
687 emit_dm(as, ARMI_MOV, tmp, RID_SP);
688 else
689 emit_opk(as, ARMI_ADD, tmp, RID_SP, ofs, RSET_GPR);
692 /* Get pointer to TValue. */
693 static void asm_tvptr(ASMState *as, Reg dest, IRRef ref)
695 IRIns *ir = IR(ref);
696 if (irt_isnum(ir->t)) {
697 if (irref_isk(ref)) {
698 /* Use the number constant itself as a TValue. */
699 ra_allockreg(as, i32ptr(ir_knum(ir)), dest);
700 } else {
701 #if LJ_SOFTFP
702 lua_assert(0);
703 #else
704 /* Otherwise force a spill and use the spill slot. */
705 emit_opk(as, ARMI_ADD, dest, RID_SP, ra_spill(as, ir), RSET_GPR);
706 #endif
708 } else {
709 /* Otherwise use [sp] and [sp+4] to hold the TValue. */
710 RegSet allow = rset_exclude(RSET_GPR, dest);
711 Reg type;
712 emit_dm(as, ARMI_MOV, dest, RID_SP);
713 if (!irt_ispri(ir->t)) {
714 Reg src = ra_alloc1(as, ref, allow);
715 emit_lso(as, ARMI_STR, src, RID_SP, 0);
717 if ((ir+1)->o == IR_HIOP)
718 type = ra_alloc1(as, ref+1, allow);
719 else
720 type = ra_allock(as, irt_toitype(ir->t), allow);
721 emit_lso(as, ARMI_STR, type, RID_SP, 4);
725 static void asm_tostr(ASMState *as, IRIns *ir)
727 IRRef args[2];
728 args[0] = ASMREF_L;
729 as->gcsteps++;
730 if (irt_isnum(IR(ir->op1)->t) || (ir+1)->o == IR_HIOP) {
731 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromnum];
732 args[1] = ASMREF_TMP1; /* const lua_Number * */
733 asm_setupresult(as, ir, ci); /* GCstr * */
734 asm_gencall(as, ci, args);
735 asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op1);
736 } else {
737 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromint];
738 args[1] = ir->op1; /* int32_t k */
739 asm_setupresult(as, ir, ci); /* GCstr * */
740 asm_gencall(as, ci, args);
744 /* -- Memory references --------------------------------------------------- */
746 static void asm_aref(ASMState *as, IRIns *ir)
748 Reg dest = ra_dest(as, ir, RSET_GPR);
749 Reg idx, base;
750 if (irref_isk(ir->op2)) {
751 IRRef tab = IR(ir->op1)->op1;
752 int32_t ofs = asm_fuseabase(as, tab);
753 IRRef refa = ofs ? tab : ir->op1;
754 uint32_t k = emit_isk12(ARMI_ADD, ofs + 8*IR(ir->op2)->i);
755 if (k) {
756 base = ra_alloc1(as, refa, RSET_GPR);
757 emit_dn(as, ARMI_ADD^k, dest, base);
758 return;
761 base = ra_alloc1(as, ir->op1, RSET_GPR);
762 idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
763 emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 3), dest, base, idx);
766 /* Inlined hash lookup. Specialized for key type and for const keys.
767 ** The equivalent C code is:
768 ** Node *n = hashkey(t, key);
769 ** do {
770 ** if (lj_obj_equal(&n->key, key)) return &n->val;
771 ** } while ((n = nextnode(n)));
772 ** return niltv(L);
774 static void asm_href(ASMState *as, IRIns *ir, IROp merge)
776 RegSet allow = RSET_GPR;
777 int destused = ra_used(ir);
778 Reg dest = ra_dest(as, ir, allow);
779 Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
780 Reg key = 0, keyhi = 0, keynumhi = RID_NONE, tmp = RID_TMP;
781 IRRef refkey = ir->op2;
782 IRIns *irkey = IR(refkey);
783 IRType1 kt = irkey->t;
784 int32_t k = 0, khi = emit_isk12(ARMI_CMP, irt_toitype(kt));
785 uint32_t khash;
786 MCLabel l_end, l_loop;
787 rset_clear(allow, tab);
788 if (!irref_isk(refkey) || irt_isstr(kt)) {
789 #if LJ_SOFTFP
790 key = ra_alloc1(as, refkey, allow);
791 rset_clear(allow, key);
792 if (irkey[1].o == IR_HIOP) {
793 if (ra_hasreg((irkey+1)->r)) {
794 keynumhi = (irkey+1)->r;
795 keyhi = RID_TMP;
796 ra_noweak(as, keynumhi);
797 } else {
798 keyhi = keynumhi = ra_allocref(as, refkey+1, allow);
800 rset_clear(allow, keynumhi);
801 khi = 0;
803 #else
804 if (irt_isnum(kt)) {
805 key = ra_scratch(as, allow);
806 rset_clear(allow, key);
807 keyhi = keynumhi = ra_scratch(as, allow);
808 rset_clear(allow, keyhi);
809 khi = 0;
810 } else {
811 key = ra_alloc1(as, refkey, allow);
812 rset_clear(allow, key);
814 #endif
815 } else if (irt_isnum(kt)) {
816 int32_t val = (int32_t)ir_knum(irkey)->u32.lo;
817 k = emit_isk12(ARMI_CMP, val);
818 if (!k) {
819 key = ra_allock(as, val, allow);
820 rset_clear(allow, key);
822 val = (int32_t)ir_knum(irkey)->u32.hi;
823 khi = emit_isk12(ARMI_CMP, val);
824 if (!khi) {
825 keyhi = ra_allock(as, val, allow);
826 rset_clear(allow, keyhi);
828 } else if (!irt_ispri(kt)) {
829 k = emit_isk12(ARMI_CMP, irkey->i);
830 if (!k) {
831 key = ra_alloc1(as, refkey, allow);
832 rset_clear(allow, key);
835 if (!irt_ispri(kt))
836 tmp = ra_scratchpair(as, allow);
838 /* Key not found in chain: jump to exit (if merged) or load niltv. */
839 l_end = emit_label(as);
840 as->invmcp = NULL;
841 if (merge == IR_NE)
842 asm_guardcc(as, CC_AL);
843 else if (destused)
844 emit_loada(as, dest, niltvg(J2G(as->J)));
846 /* Follow hash chain until the end. */
847 l_loop = --as->mcp;
848 emit_n(as, ARMI_CMP|ARMI_K12|0, dest);
849 emit_lso(as, ARMI_LDR, dest, dest, (int32_t)offsetof(Node, next));
851 /* Type and value comparison. */
852 if (merge == IR_EQ)
853 asm_guardcc(as, CC_EQ);
854 else
855 emit_branch(as, ARMF_CC(ARMI_B, CC_EQ), l_end);
856 if (!irt_ispri(kt)) {
857 emit_nm(as, ARMF_CC(ARMI_CMP, CC_EQ)^k, tmp, key);
858 emit_nm(as, ARMI_CMP^khi, tmp+1, keyhi);
859 emit_lsox(as, ARMI_LDRD, tmp, dest, (int32_t)offsetof(Node, key));
860 } else {
861 emit_n(as, ARMI_CMP^khi, tmp);
862 emit_lso(as, ARMI_LDR, tmp, dest, (int32_t)offsetof(Node, key.it));
864 *l_loop = ARMF_CC(ARMI_B, CC_NE) | ((as->mcp-l_loop-2) & 0x00ffffffu);
866 /* Load main position relative to tab->node into dest. */
867 khash = irref_isk(refkey) ? ir_khash(irkey) : 1;
868 if (khash == 0) {
869 emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node));
870 } else {
871 emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 3), dest, dest, tmp);
872 emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 1), tmp, tmp, tmp);
873 if (irt_isstr(kt)) { /* Fetch of str->hash is cheaper than ra_allock. */
874 emit_dnm(as, ARMI_AND, tmp, tmp+1, RID_TMP);
875 emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node));
876 emit_lso(as, ARMI_LDR, tmp+1, key, (int32_t)offsetof(GCstr, hash));
877 emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask));
878 } else if (irref_isk(refkey)) {
879 emit_opk(as, ARMI_AND, tmp, RID_TMP, (int32_t)khash,
880 rset_exclude(rset_exclude(RSET_GPR, tab), dest));
881 emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node));
882 emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask));
883 } else { /* Must match with hash*() in lj_tab.c. */
884 if (ra_hasreg(keynumhi)) { /* Canonicalize +-0.0 to 0.0. */
885 if (keyhi == RID_TMP)
886 emit_dm(as, ARMF_CC(ARMI_MOV, CC_NE), keyhi, keynumhi);
887 emit_d(as, ARMF_CC(ARMI_MOV, CC_EQ)|ARMI_K12|0, keyhi);
889 emit_dnm(as, ARMI_AND, tmp, tmp, RID_TMP);
890 emit_dnm(as, ARMI_SUB|ARMF_SH(ARMSH_ROR, 32-HASH_ROT3), tmp, tmp, tmp+1);
891 emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node));
892 emit_dnm(as, ARMI_EOR|ARMF_SH(ARMSH_ROR, 32-((HASH_ROT2+HASH_ROT1)&31)),
893 tmp, tmp+1, tmp);
894 emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask));
895 emit_dnm(as, ARMI_SUB|ARMF_SH(ARMSH_ROR, 32-HASH_ROT1), tmp+1, tmp+1, tmp);
896 if (ra_hasreg(keynumhi)) {
897 emit_dnm(as, ARMI_EOR, tmp+1, tmp, key);
898 emit_dnm(as, ARMI_ORR|ARMI_S, RID_TMP, tmp, key); /* Test for +-0.0. */
899 emit_dnm(as, ARMI_ADD, tmp, keynumhi, keynumhi);
900 #if !LJ_SOFTFP
901 emit_dnm(as, ARMI_VMOV_RR_D, key, keynumhi,
902 (ra_alloc1(as, refkey, RSET_FPR) & 15));
903 #endif
904 } else {
905 emit_dnm(as, ARMI_EOR, tmp+1, tmp, key);
906 emit_opk(as, ARMI_ADD, tmp, key, (int32_t)HASH_BIAS,
907 rset_exclude(rset_exclude(RSET_GPR, tab), key));
913 static void asm_hrefk(ASMState *as, IRIns *ir)
915 IRIns *kslot = IR(ir->op2);
916 IRIns *irkey = IR(kslot->op1);
917 int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
918 int32_t kofs = ofs + (int32_t)offsetof(Node, key);
919 Reg dest = (ra_used(ir) || ofs > 4095) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
920 Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
921 Reg key = RID_NONE, type = RID_TMP, idx = node;
922 RegSet allow = rset_exclude(RSET_GPR, node);
923 lua_assert(ofs % sizeof(Node) == 0);
924 if (ofs > 4095) {
925 idx = dest;
926 rset_clear(allow, dest);
927 kofs = (int32_t)offsetof(Node, key);
928 } else if (ra_hasreg(dest)) {
929 emit_opk(as, ARMI_ADD, dest, node, ofs, allow);
931 asm_guardcc(as, CC_NE);
932 if (!irt_ispri(irkey->t)) {
933 RegSet even = (as->freeset & allow);
934 even = even & (even >> 1) & RSET_GPREVEN;
935 if (even) {
936 key = ra_scratch(as, even);
937 if (rset_test(as->freeset, key+1)) {
938 type = key+1;
939 ra_modified(as, type);
941 } else {
942 key = ra_scratch(as, allow);
944 rset_clear(allow, key);
946 rset_clear(allow, type);
947 if (irt_isnum(irkey->t)) {
948 emit_opk(as, ARMF_CC(ARMI_CMP, CC_EQ), 0, type,
949 (int32_t)ir_knum(irkey)->u32.hi, allow);
950 emit_opk(as, ARMI_CMP, 0, key,
951 (int32_t)ir_knum(irkey)->u32.lo, allow);
952 } else {
953 if (ra_hasreg(key))
954 emit_opk(as, ARMF_CC(ARMI_CMP, CC_EQ), 0, key, irkey->i, allow);
955 emit_n(as, ARMI_CMN|ARMI_K12|-irt_toitype(irkey->t), type);
957 emit_lso(as, ARMI_LDR, type, idx, kofs+4);
958 if (ra_hasreg(key)) emit_lso(as, ARMI_LDR, key, idx, kofs);
959 if (ofs > 4095)
960 emit_opk(as, ARMI_ADD, dest, node, ofs, RSET_GPR);
963 static void asm_newref(ASMState *as, IRIns *ir)
965 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey];
966 IRRef args[3];
967 if (ir->r == RID_SINK)
968 return;
969 args[0] = ASMREF_L; /* lua_State *L */
970 args[1] = ir->op1; /* GCtab *t */
971 args[2] = ASMREF_TMP1; /* cTValue *key */
972 asm_setupresult(as, ir, ci); /* TValue * */
973 asm_gencall(as, ci, args);
974 asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op2);
977 static void asm_uref(ASMState *as, IRIns *ir)
979 Reg dest = ra_dest(as, ir, RSET_GPR);
980 if (irref_isk(ir->op1)) {
981 GCfunc *fn = ir_kfunc(IR(ir->op1));
982 MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
983 emit_lsptr(as, ARMI_LDR, dest, v);
984 } else {
985 Reg uv = ra_scratch(as, RSET_GPR);
986 Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
987 if (ir->o == IR_UREFC) {
988 asm_guardcc(as, CC_NE);
989 emit_n(as, ARMI_CMP|ARMI_K12|1, RID_TMP);
990 emit_opk(as, ARMI_ADD, dest, uv,
991 (int32_t)offsetof(GCupval, tv), RSET_GPR);
992 emit_lso(as, ARMI_LDRB, RID_TMP, uv, (int32_t)offsetof(GCupval, closed));
993 } else {
994 emit_lso(as, ARMI_LDR, dest, uv, (int32_t)offsetof(GCupval, v));
996 emit_lso(as, ARMI_LDR, uv, func,
997 (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8));
1001 static void asm_fref(ASMState *as, IRIns *ir)
1003 UNUSED(as); UNUSED(ir);
1004 lua_assert(!ra_used(ir));
1007 static void asm_strref(ASMState *as, IRIns *ir)
1009 Reg dest = ra_dest(as, ir, RSET_GPR);
1010 IRRef ref = ir->op2, refk = ir->op1;
1011 Reg r;
1012 if (irref_isk(ref)) {
1013 IRRef tmp = refk; refk = ref; ref = tmp;
1014 } else if (!irref_isk(refk)) {
1015 uint32_t k, m = ARMI_K12|sizeof(GCstr);
1016 Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
1017 IRIns *irr = IR(ir->op2);
1018 if (ra_hasreg(irr->r)) {
1019 ra_noweak(as, irr->r);
1020 right = irr->r;
1021 } else if (mayfuse(as, irr->op2) &&
1022 irr->o == IR_ADD && irref_isk(irr->op2) &&
1023 (k = emit_isk12(ARMI_ADD,
1024 (int32_t)sizeof(GCstr) + IR(irr->op2)->i))) {
1025 m = k;
1026 right = ra_alloc1(as, irr->op1, rset_exclude(RSET_GPR, left));
1027 } else {
1028 right = ra_allocref(as, ir->op2, rset_exclude(RSET_GPR, left));
1030 emit_dn(as, ARMI_ADD^m, dest, dest);
1031 emit_dnm(as, ARMI_ADD, dest, left, right);
1032 return;
1034 r = ra_alloc1(as, ref, RSET_GPR);
1035 emit_opk(as, ARMI_ADD, dest, r,
1036 sizeof(GCstr) + IR(refk)->i, rset_exclude(RSET_GPR, r));
1039 /* -- Loads and stores ---------------------------------------------------- */
1041 static ARMIns asm_fxloadins(IRIns *ir)
1043 switch (irt_type(ir->t)) {
1044 case IRT_I8: return ARMI_LDRSB;
1045 case IRT_U8: return ARMI_LDRB;
1046 case IRT_I16: return ARMI_LDRSH;
1047 case IRT_U16: return ARMI_LDRH;
1048 case IRT_NUM: lua_assert(!LJ_SOFTFP); return ARMI_VLDR_D;
1049 case IRT_FLOAT: if (!LJ_SOFTFP) return ARMI_VLDR_S;
1050 default: return ARMI_LDR;
1054 static ARMIns asm_fxstoreins(IRIns *ir)
1056 switch (irt_type(ir->t)) {
1057 case IRT_I8: case IRT_U8: return ARMI_STRB;
1058 case IRT_I16: case IRT_U16: return ARMI_STRH;
1059 case IRT_NUM: lua_assert(!LJ_SOFTFP); return ARMI_VSTR_D;
1060 case IRT_FLOAT: if (!LJ_SOFTFP) return ARMI_VSTR_S;
1061 default: return ARMI_STR;
1065 static void asm_fload(ASMState *as, IRIns *ir)
1067 Reg dest = ra_dest(as, ir, RSET_GPR);
1068 Reg idx = ra_alloc1(as, ir->op1, RSET_GPR);
1069 ARMIns ai = asm_fxloadins(ir);
1070 int32_t ofs;
1071 if (ir->op2 == IRFL_TAB_ARRAY) {
1072 ofs = asm_fuseabase(as, ir->op1);
1073 if (ofs) { /* Turn the t->array load into an add for colocated arrays. */
1074 emit_dn(as, ARMI_ADD|ARMI_K12|ofs, dest, idx);
1075 return;
1078 ofs = field_ofs[ir->op2];
1079 if ((ai & 0x04000000))
1080 emit_lso(as, ai, dest, idx, ofs);
1081 else
1082 emit_lsox(as, ai, dest, idx, ofs);
1085 static void asm_fstore(ASMState *as, IRIns *ir)
1087 if (ir->r != RID_SINK) {
1088 Reg src = ra_alloc1(as, ir->op2, RSET_GPR);
1089 IRIns *irf = IR(ir->op1);
1090 Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src));
1091 int32_t ofs = field_ofs[irf->op2];
1092 ARMIns ai = asm_fxstoreins(ir);
1093 if ((ai & 0x04000000))
1094 emit_lso(as, ai, src, idx, ofs);
1095 else
1096 emit_lsox(as, ai, src, idx, ofs);
1100 static void asm_xload(ASMState *as, IRIns *ir)
1102 Reg dest = ra_dest(as, ir,
1103 (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR);
1104 lua_assert(!(ir->op2 & IRXLOAD_UNALIGNED));
1105 asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR, 0);
1108 static void asm_xstore(ASMState *as, IRIns *ir, int32_t ofs)
1110 if (ir->r != RID_SINK) {
1111 Reg src = ra_alloc1(as, ir->op2,
1112 (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR);
1113 asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1,
1114 rset_exclude(RSET_GPR, src), ofs);
1118 static void asm_ahuvload(ASMState *as, IRIns *ir)
1120 int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP);
1121 IRType t = hiop ? IRT_NUM : irt_type(ir->t);
1122 Reg dest = RID_NONE, type = RID_NONE, idx;
1123 RegSet allow = RSET_GPR;
1124 int32_t ofs = 0;
1125 if (hiop && ra_used(ir+1)) {
1126 type = ra_dest(as, ir+1, allow);
1127 rset_clear(allow, type);
1129 if (ra_used(ir)) {
1130 lua_assert((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) ||
1131 irt_isint(ir->t) || irt_isaddr(ir->t));
1132 dest = ra_dest(as, ir, (!LJ_SOFTFP && t == IRT_NUM) ? RSET_FPR : allow);
1133 rset_clear(allow, dest);
1135 idx = asm_fuseahuref(as, ir->op1, &ofs, allow,
1136 (!LJ_SOFTFP && t == IRT_NUM) ? 1024 : 4096);
1137 if (!hiop || type == RID_NONE) {
1138 rset_clear(allow, idx);
1139 if (ofs < 256 && ra_hasreg(dest) && (dest & 1) == 0 &&
1140 rset_test((as->freeset & allow), dest+1)) {
1141 type = dest+1;
1142 ra_modified(as, type);
1143 } else {
1144 type = RID_TMP;
1147 asm_guardcc(as, t == IRT_NUM ? CC_HS : CC_NE);
1148 emit_n(as, ARMI_CMN|ARMI_K12|-irt_toitype_(t), type);
1149 if (ra_hasreg(dest)) {
1150 #if !LJ_SOFTFP
1151 if (t == IRT_NUM)
1152 emit_vlso(as, ARMI_VLDR_D, dest, idx, ofs);
1153 else
1154 #endif
1155 emit_lso(as, ARMI_LDR, dest, idx, ofs);
1157 emit_lso(as, ARMI_LDR, type, idx, ofs+4);
1160 static void asm_ahustore(ASMState *as, IRIns *ir)
1162 if (ir->r != RID_SINK) {
1163 RegSet allow = RSET_GPR;
1164 Reg idx, src = RID_NONE, type = RID_NONE;
1165 int32_t ofs = 0;
1166 #if !LJ_SOFTFP
1167 if (irt_isnum(ir->t)) {
1168 src = ra_alloc1(as, ir->op2, RSET_FPR);
1169 idx = asm_fuseahuref(as, ir->op1, &ofs, allow, 1024);
1170 emit_vlso(as, ARMI_VSTR_D, src, idx, ofs);
1171 } else
1172 #endif
1174 int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP);
1175 if (!irt_ispri(ir->t)) {
1176 src = ra_alloc1(as, ir->op2, allow);
1177 rset_clear(allow, src);
1179 if (hiop)
1180 type = ra_alloc1(as, (ir+1)->op2, allow);
1181 else
1182 type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
1183 idx = asm_fuseahuref(as, ir->op1, &ofs, rset_exclude(allow, type), 4096);
1184 if (ra_hasreg(src)) emit_lso(as, ARMI_STR, src, idx, ofs);
1185 emit_lso(as, ARMI_STR, type, idx, ofs+4);
1190 static void asm_sload(ASMState *as, IRIns *ir)
1192 int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0);
1193 int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP);
1194 IRType t = hiop ? IRT_NUM : irt_type(ir->t);
1195 Reg dest = RID_NONE, type = RID_NONE, base;
1196 RegSet allow = RSET_GPR;
1197 lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */
1198 lua_assert(irt_isguard(ir->t) || !(ir->op2 & IRSLOAD_TYPECHECK));
1199 #if LJ_SOFTFP
1200 lua_assert(!(ir->op2 & IRSLOAD_CONVERT)); /* Handled by LJ_SOFTFP SPLIT. */
1201 if (hiop && ra_used(ir+1)) {
1202 type = ra_dest(as, ir+1, allow);
1203 rset_clear(allow, type);
1205 #else
1206 if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(ir->t) && t == IRT_INT) {
1207 dest = ra_scratch(as, RSET_FPR);
1208 asm_tointg(as, ir, dest);
1209 t = IRT_NUM; /* Continue with a regular number type check. */
1210 } else
1211 #endif
1212 if (ra_used(ir)) {
1213 Reg tmp = RID_NONE;
1214 if ((ir->op2 & IRSLOAD_CONVERT))
1215 tmp = ra_scratch(as, t == IRT_INT ? RSET_FPR : RSET_GPR);
1216 lua_assert((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) ||
1217 irt_isint(ir->t) || irt_isaddr(ir->t));
1218 dest = ra_dest(as, ir, (!LJ_SOFTFP && t == IRT_NUM) ? RSET_FPR : allow);
1219 rset_clear(allow, dest);
1220 base = ra_alloc1(as, REF_BASE, allow);
1221 if ((ir->op2 & IRSLOAD_CONVERT)) {
1222 if (t == IRT_INT) {
1223 emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15));
1224 emit_dm(as, ARMI_VCVT_S32_F64, (tmp & 15), (tmp & 15));
1225 t = IRT_NUM; /* Check for original type. */
1226 } else {
1227 emit_dm(as, ARMI_VCVT_F64_S32, (dest & 15), (dest & 15));
1228 emit_dn(as, ARMI_VMOV_S_R, tmp, (dest & 15));
1229 t = IRT_INT; /* Check for original type. */
1231 dest = tmp;
1233 goto dotypecheck;
1235 base = ra_alloc1(as, REF_BASE, allow);
1236 dotypecheck:
1237 rset_clear(allow, base);
1238 if ((ir->op2 & IRSLOAD_TYPECHECK)) {
1239 if (ra_noreg(type)) {
1240 if (ofs < 256 && ra_hasreg(dest) && (dest & 1) == 0 &&
1241 rset_test((as->freeset & allow), dest+1)) {
1242 type = dest+1;
1243 ra_modified(as, type);
1244 } else {
1245 type = RID_TMP;
1248 asm_guardcc(as, t == IRT_NUM ? CC_HS : CC_NE);
1249 emit_n(as, ARMI_CMN|ARMI_K12|-irt_toitype_(t), type);
1251 if (ra_hasreg(dest)) {
1252 #if !LJ_SOFTFP
1253 if (t == IRT_NUM) {
1254 if (ofs < 1024) {
1255 emit_vlso(as, ARMI_VLDR_D, dest, base, ofs);
1256 } else {
1257 if (ra_hasreg(type)) emit_lso(as, ARMI_LDR, type, base, ofs+4);
1258 emit_vlso(as, ARMI_VLDR_D, dest, RID_TMP, 0);
1259 emit_opk(as, ARMI_ADD, RID_TMP, base, ofs, allow);
1260 return;
1262 } else
1263 #endif
1264 emit_lso(as, ARMI_LDR, dest, base, ofs);
1266 if (ra_hasreg(type)) emit_lso(as, ARMI_LDR, type, base, ofs+4);
1269 /* -- Allocations --------------------------------------------------------- */
1271 #if LJ_HASFFI
1272 static void asm_cnew(ASMState *as, IRIns *ir)
1274 CTState *cts = ctype_ctsG(J2G(as->J));
1275 CTypeID ctypeid = (CTypeID)IR(ir->op1)->i;
1276 CTSize sz = (ir->o == IR_CNEWI || ir->op2 == REF_NIL) ?
1277 lj_ctype_size(cts, ctypeid) : (CTSize)IR(ir->op2)->i;
1278 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
1279 IRRef args[2];
1280 RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
1281 RegSet drop = RSET_SCRATCH;
1282 lua_assert(sz != CTSIZE_INVALID);
1284 args[0] = ASMREF_L; /* lua_State *L */
1285 args[1] = ASMREF_TMP1; /* MSize size */
1286 as->gcsteps++;
1288 if (ra_hasreg(ir->r))
1289 rset_clear(drop, ir->r); /* Dest reg handled below. */
1290 ra_evictset(as, drop);
1291 if (ra_used(ir))
1292 ra_destreg(as, ir, RID_RET); /* GCcdata * */
1294 /* Initialize immutable cdata object. */
1295 if (ir->o == IR_CNEWI) {
1296 int32_t ofs = sizeof(GCcdata);
1297 lua_assert(sz == 4 || sz == 8);
1298 if (sz == 8) {
1299 ofs += 4; ir++;
1300 lua_assert(ir->o == IR_HIOP);
1302 for (;;) {
1303 Reg r = ra_alloc1(as, ir->op2, allow);
1304 emit_lso(as, ARMI_STR, r, RID_RET, ofs);
1305 rset_clear(allow, r);
1306 if (ofs == sizeof(GCcdata)) break;
1307 ofs -= 4; ir--;
1310 /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */
1312 uint32_t k = emit_isk12(ARMI_MOV, ctypeid);
1313 Reg r = k ? RID_R1 : ra_allock(as, ctypeid, allow);
1314 emit_lso(as, ARMI_STRB, RID_TMP, RID_RET, offsetof(GCcdata, gct));
1315 emit_lsox(as, ARMI_STRH, r, RID_RET, offsetof(GCcdata, ctypeid));
1316 emit_d(as, ARMI_MOV|ARMI_K12|~LJ_TCDATA, RID_TMP);
1317 if (k) emit_d(as, ARMI_MOV^k, RID_R1);
1319 asm_gencall(as, ci, args);
1320 ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)),
1321 ra_releasetmp(as, ASMREF_TMP1));
1323 #else
1324 #define asm_cnew(as, ir) ((void)0)
1325 #endif
1327 /* -- Write barriers ------------------------------------------------------ */
1329 static void asm_tbar(ASMState *as, IRIns *ir)
1331 Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
1332 Reg link = ra_scratch(as, rset_exclude(RSET_GPR, tab));
1333 Reg gr = ra_allock(as, i32ptr(J2G(as->J)),
1334 rset_exclude(rset_exclude(RSET_GPR, tab), link));
1335 Reg mark = RID_TMP;
1336 MCLabel l_end = emit_label(as);
1337 emit_lso(as, ARMI_STR, link, tab, (int32_t)offsetof(GCtab, gclist));
1338 emit_lso(as, ARMI_STRB, mark, tab, (int32_t)offsetof(GCtab, marked));
1339 emit_lso(as, ARMI_STR, tab, gr,
1340 (int32_t)offsetof(global_State, gc.grayagain));
1341 emit_dn(as, ARMI_BIC|ARMI_K12|LJ_GC_BLACK, mark, mark);
1342 emit_lso(as, ARMI_LDR, link, gr,
1343 (int32_t)offsetof(global_State, gc.grayagain));
1344 emit_branch(as, ARMF_CC(ARMI_B, CC_EQ), l_end);
1345 emit_n(as, ARMI_TST|ARMI_K12|LJ_GC_BLACK, mark);
1346 emit_lso(as, ARMI_LDRB, mark, tab, (int32_t)offsetof(GCtab, marked));
1349 static void asm_obar(ASMState *as, IRIns *ir)
1351 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
1352 IRRef args[2];
1353 MCLabel l_end;
1354 Reg obj, val, tmp;
1355 /* No need for other object barriers (yet). */
1356 lua_assert(IR(ir->op1)->o == IR_UREFC);
1357 ra_evictset(as, RSET_SCRATCH);
1358 l_end = emit_label(as);
1359 args[0] = ASMREF_TMP1; /* global_State *g */
1360 args[1] = ir->op1; /* TValue *tv */
1361 asm_gencall(as, ci, args);
1362 if ((l_end[-1] >> 28) == CC_AL)
1363 l_end[-1] = ARMF_CC(l_end[-1], CC_NE);
1364 else
1365 emit_branch(as, ARMF_CC(ARMI_B, CC_EQ), l_end);
1366 ra_allockreg(as, i32ptr(J2G(as->J)), ra_releasetmp(as, ASMREF_TMP1));
1367 obj = IR(ir->op1)->r;
1368 tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj));
1369 emit_n(as, ARMF_CC(ARMI_TST, CC_NE)|ARMI_K12|LJ_GC_BLACK, tmp);
1370 emit_n(as, ARMI_TST|ARMI_K12|LJ_GC_WHITES, RID_TMP);
1371 val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj));
1372 emit_lso(as, ARMI_LDRB, tmp, obj,
1373 (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
1374 emit_lso(as, ARMI_LDRB, RID_TMP, val, (int32_t)offsetof(GChead, marked));
1377 /* -- Arithmetic and logic operations ------------------------------------- */
1379 #if !LJ_SOFTFP
1380 static void asm_fparith(ASMState *as, IRIns *ir, ARMIns ai)
1382 Reg dest = ra_dest(as, ir, RSET_FPR);
1383 Reg right, left = ra_alloc2(as, ir, RSET_FPR);
1384 right = (left >> 8); left &= 255;
1385 emit_dnm(as, ai, (dest & 15), (left & 15), (right & 15));
1388 static void asm_fpunary(ASMState *as, IRIns *ir, ARMIns ai)
1390 Reg dest = ra_dest(as, ir, RSET_FPR);
1391 Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR);
1392 emit_dm(as, ai, (dest & 15), (left & 15));
1395 static int asm_fpjoin_pow(ASMState *as, IRIns *ir)
1397 IRIns *irp = IR(ir->op1);
1398 if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) {
1399 IRIns *irpp = IR(irp->op1);
1400 if (irpp == ir-2 && irpp->o == IR_FPMATH &&
1401 irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) {
1402 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_pow];
1403 IRRef args[2];
1404 args[0] = irpp->op1;
1405 args[1] = irp->op2;
1406 asm_setupresult(as, ir, ci);
1407 asm_gencall(as, ci, args);
1408 return 1;
1411 return 0;
1413 #endif
1415 static int asm_swapops(ASMState *as, IRRef lref, IRRef rref)
1417 IRIns *ir;
1418 if (irref_isk(rref))
1419 return 0; /* Don't swap constants to the left. */
1420 if (irref_isk(lref))
1421 return 1; /* But swap constants to the right. */
1422 ir = IR(rref);
1423 if ((ir->o >= IR_BSHL && ir->o <= IR_BROR) ||
1424 (ir->o == IR_ADD && ir->op1 == ir->op2))
1425 return 0; /* Don't swap fusable operands to the left. */
1426 ir = IR(lref);
1427 if ((ir->o >= IR_BSHL && ir->o <= IR_BROR) ||
1428 (ir->o == IR_ADD && ir->op1 == ir->op2))
1429 return 1; /* But swap fusable operands to the right. */
1430 return 0; /* Otherwise don't swap. */
1433 static void asm_intop(ASMState *as, IRIns *ir, ARMIns ai)
1435 IRRef lref = ir->op1, rref = ir->op2;
1436 Reg left, dest = ra_dest(as, ir, RSET_GPR);
1437 uint32_t m;
1438 if (asm_swapops(as, lref, rref)) {
1439 IRRef tmp = lref; lref = rref; rref = tmp;
1440 if ((ai & ~ARMI_S) == ARMI_SUB || (ai & ~ARMI_S) == ARMI_SBC)
1441 ai ^= (ARMI_SUB^ARMI_RSB);
1443 left = ra_hintalloc(as, lref, dest, RSET_GPR);
1444 m = asm_fuseopm(as, ai, rref, rset_exclude(RSET_GPR, left));
1445 if (irt_isguard(ir->t)) { /* For IR_ADDOV etc. */
1446 asm_guardcc(as, CC_VS);
1447 ai |= ARMI_S;
1449 emit_dn(as, ai^m, dest, left);
1452 static void asm_intop_s(ASMState *as, IRIns *ir, ARMIns ai)
1454 if (as->flagmcp == as->mcp) { /* Drop cmp r, #0. */
1455 as->flagmcp = NULL;
1456 as->mcp++;
1457 ai |= ARMI_S;
1459 asm_intop(as, ir, ai);
1462 static void asm_bitop(ASMState *as, IRIns *ir, ARMIns ai)
1464 if (as->flagmcp == as->mcp) { /* Try to drop cmp r, #0. */
1465 uint32_t cc = (as->mcp[1] >> 28);
1466 as->flagmcp = NULL;
1467 if (cc <= CC_NE) {
1468 as->mcp++;
1469 ai |= ARMI_S;
1470 } else if (cc == CC_GE) {
1471 *++as->mcp ^= ((CC_GE^CC_PL) << 28);
1472 ai |= ARMI_S;
1473 } else if (cc == CC_LT) {
1474 *++as->mcp ^= ((CC_LT^CC_MI) << 28);
1475 ai |= ARMI_S;
1476 } /* else: other conds don't work with bit ops. */
1478 if (ir->op2 == 0) {
1479 Reg dest = ra_dest(as, ir, RSET_GPR);
1480 uint32_t m = asm_fuseopm(as, ai, ir->op1, RSET_GPR);
1481 emit_d(as, ai^m, dest);
1482 } else {
1483 /* NYI: Turn BAND !k12 into uxtb, uxth or bfc or shl+shr. */
1484 asm_intop(as, ir, ai);
1488 static void asm_intneg(ASMState *as, IRIns *ir, ARMIns ai)
1490 Reg dest = ra_dest(as, ir, RSET_GPR);
1491 Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
1492 emit_dn(as, ai|ARMI_K12|0, dest, left);
1495 /* NYI: use add/shift for MUL(OV) with constants. FOLD only does 2^k. */
1496 static void asm_intmul(ASMState *as, IRIns *ir)
1498 Reg dest = ra_dest(as, ir, RSET_GPR);
1499 Reg left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, dest));
1500 Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1501 Reg tmp = RID_NONE;
1502 /* ARMv5 restriction: dest != left and dest_hi != left. */
1503 if (dest == left && left != right) { left = right; right = dest; }
1504 if (irt_isguard(ir->t)) { /* IR_MULOV */
1505 if (!(as->flags & JIT_F_ARMV6) && dest == left)
1506 tmp = left = ra_scratch(as, rset_exclude(RSET_GPR, left));
1507 asm_guardcc(as, CC_NE);
1508 emit_nm(as, ARMI_TEQ|ARMF_SH(ARMSH_ASR, 31), RID_TMP, dest);
1509 emit_dnm(as, ARMI_SMULL|ARMF_S(right), dest, RID_TMP, left);
1510 } else {
1511 if (!(as->flags & JIT_F_ARMV6) && dest == left) tmp = left = RID_TMP;
1512 emit_nm(as, ARMI_MUL|ARMF_S(right), dest, left);
1514 /* Only need this for the dest == left == right case. */
1515 if (ra_hasreg(tmp)) emit_dm(as, ARMI_MOV, tmp, right);
1518 static void asm_add(ASMState *as, IRIns *ir)
1520 #if !LJ_SOFTFP
1521 if (irt_isnum(ir->t)) {
1522 if (!asm_fusemadd(as, ir, ARMI_VMLA_D, ARMI_VMLA_D))
1523 asm_fparith(as, ir, ARMI_VADD_D);
1524 return;
1526 #endif
1527 asm_intop_s(as, ir, ARMI_ADD);
1530 static void asm_sub(ASMState *as, IRIns *ir)
1532 #if !LJ_SOFTFP
1533 if (irt_isnum(ir->t)) {
1534 if (!asm_fusemadd(as, ir, ARMI_VNMLS_D, ARMI_VMLS_D))
1535 asm_fparith(as, ir, ARMI_VSUB_D);
1536 return;
1538 #endif
1539 asm_intop_s(as, ir, ARMI_SUB);
1542 static void asm_mul(ASMState *as, IRIns *ir)
1544 #if !LJ_SOFTFP
1545 if (irt_isnum(ir->t)) {
1546 asm_fparith(as, ir, ARMI_VMUL_D);
1547 return;
1549 #endif
1550 asm_intmul(as, ir);
1553 static void asm_neg(ASMState *as, IRIns *ir)
1555 #if !LJ_SOFTFP
1556 if (irt_isnum(ir->t)) {
1557 asm_fpunary(as, ir, ARMI_VNEG_D);
1558 return;
1560 #endif
1561 asm_intneg(as, ir, ARMI_RSB);
1564 static void asm_callid(ASMState *as, IRIns *ir, IRCallID id)
1566 const CCallInfo *ci = &lj_ir_callinfo[id];
1567 IRRef args[2];
1568 args[0] = ir->op1;
1569 args[1] = ir->op2;
1570 asm_setupresult(as, ir, ci);
1571 asm_gencall(as, ci, args);
1574 #if !LJ_SOFTFP
1575 static void asm_callround(ASMState *as, IRIns *ir, int id)
1577 /* The modified regs must match with the *.dasc implementation. */
1578 RegSet drop = RID2RSET(RID_R0)|RID2RSET(RID_R1)|RID2RSET(RID_R2)|
1579 RID2RSET(RID_R3)|RID2RSET(RID_R12);
1580 RegSet of;
1581 Reg dest, src;
1582 ra_evictset(as, drop);
1583 dest = ra_dest(as, ir, RSET_FPR);
1584 emit_dnm(as, ARMI_VMOV_D_RR, RID_RETLO, RID_RETHI, (dest & 15));
1585 emit_call(as, id == IRFPM_FLOOR ? (void *)lj_vm_floor_sf :
1586 id == IRFPM_CEIL ? (void *)lj_vm_ceil_sf :
1587 (void *)lj_vm_trunc_sf);
1588 /* Workaround to protect argument GPRs from being used for remat. */
1589 of = as->freeset;
1590 as->freeset &= ~RSET_RANGE(RID_R0, RID_R1+1);
1591 as->cost[RID_R0] = as->cost[RID_R1] = REGCOST(~0u, ASMREF_L);
1592 src = ra_alloc1(as, ir->op1, RSET_FPR); /* May alloc GPR to remat FPR. */
1593 as->freeset |= (of & RSET_RANGE(RID_R0, RID_R1+1));
1594 emit_dnm(as, ARMI_VMOV_RR_D, RID_R0, RID_R1, (src & 15));
1596 #endif
1598 static void asm_bitswap(ASMState *as, IRIns *ir)
1600 Reg dest = ra_dest(as, ir, RSET_GPR);
1601 Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
1602 if ((as->flags & JIT_F_ARMV6)) {
1603 emit_dm(as, ARMI_REV, dest, left);
1604 } else {
1605 Reg tmp2 = dest;
1606 if (tmp2 == left)
1607 tmp2 = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, dest), left));
1608 emit_dnm(as, ARMI_EOR|ARMF_SH(ARMSH_LSR, 8), dest, tmp2, RID_TMP);
1609 emit_dm(as, ARMI_MOV|ARMF_SH(ARMSH_ROR, 8), tmp2, left);
1610 emit_dn(as, ARMI_BIC|ARMI_K12|256*8|255, RID_TMP, RID_TMP);
1611 emit_dnm(as, ARMI_EOR|ARMF_SH(ARMSH_ROR, 16), RID_TMP, left, left);
1615 static void asm_bitshift(ASMState *as, IRIns *ir, ARMShift sh)
1617 if (irref_isk(ir->op2)) { /* Constant shifts. */
1618 /* NYI: Turn SHL+SHR or BAND+SHR into uxtb, uxth or ubfx. */
1619 /* NYI: Turn SHL+ASR into sxtb, sxth or sbfx. */
1620 Reg dest = ra_dest(as, ir, RSET_GPR);
1621 Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
1622 int32_t shift = (IR(ir->op2)->i & 31);
1623 emit_dm(as, ARMI_MOV|ARMF_SH(sh, shift), dest, left);
1624 } else {
1625 Reg dest = ra_dest(as, ir, RSET_GPR);
1626 Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
1627 Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1628 emit_dm(as, ARMI_MOV|ARMF_RSH(sh, right), dest, left);
1632 static void asm_intmin_max(ASMState *as, IRIns *ir, int cc)
1634 uint32_t kcmp = 0, kmov = 0;
1635 Reg dest = ra_dest(as, ir, RSET_GPR);
1636 Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
1637 Reg right = 0;
1638 if (irref_isk(ir->op2)) {
1639 kcmp = emit_isk12(ARMI_CMP, IR(ir->op2)->i);
1640 if (kcmp) kmov = emit_isk12(ARMI_MOV, IR(ir->op2)->i);
1642 if (!kmov) {
1643 kcmp = 0;
1644 right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1646 if (kmov || dest != right) {
1647 emit_dm(as, ARMF_CC(ARMI_MOV, cc)^kmov, dest, right);
1648 cc ^= 1; /* Must use opposite conditions for paired moves. */
1649 } else {
1650 cc ^= (CC_LT^CC_GT); /* Otherwise may swap CC_LT <-> CC_GT. */
1652 if (dest != left) emit_dm(as, ARMF_CC(ARMI_MOV, cc), dest, left);
1653 emit_nm(as, ARMI_CMP^kcmp, left, right);
1656 #if LJ_SOFTFP
1657 static void asm_sfpmin_max(ASMState *as, IRIns *ir, int cc)
1659 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_softfp_cmp];
1660 RegSet drop = RSET_SCRATCH;
1661 Reg r;
1662 IRRef args[4];
1663 args[0] = ir->op1; args[1] = (ir+1)->op1;
1664 args[2] = ir->op2; args[3] = (ir+1)->op2;
1665 /* __aeabi_cdcmple preserves r0-r3. */
1666 if (ra_hasreg(ir->r)) rset_clear(drop, ir->r);
1667 if (ra_hasreg((ir+1)->r)) rset_clear(drop, (ir+1)->r);
1668 if (!rset_test(as->freeset, RID_R2) &&
1669 regcost_ref(as->cost[RID_R2]) == args[2]) rset_clear(drop, RID_R2);
1670 if (!rset_test(as->freeset, RID_R3) &&
1671 regcost_ref(as->cost[RID_R3]) == args[3]) rset_clear(drop, RID_R3);
1672 ra_evictset(as, drop);
1673 ra_destpair(as, ir);
1674 emit_dm(as, ARMF_CC(ARMI_MOV, cc), RID_RETHI, RID_R3);
1675 emit_dm(as, ARMF_CC(ARMI_MOV, cc), RID_RETLO, RID_R2);
1676 emit_call(as, (void *)ci->func);
1677 for (r = RID_R0; r <= RID_R3; r++)
1678 ra_leftov(as, r, args[r-RID_R0]);
1680 #else
1681 static void asm_fpmin_max(ASMState *as, IRIns *ir, int cc)
1683 Reg dest = (ra_dest(as, ir, RSET_FPR) & 15);
1684 Reg right, left = ra_alloc2(as, ir, RSET_FPR);
1685 right = ((left >> 8) & 15); left &= 15;
1686 if (dest != left) emit_dm(as, ARMF_CC(ARMI_VMOV_D, cc^1), dest, left);
1687 if (dest != right) emit_dm(as, ARMF_CC(ARMI_VMOV_D, cc), dest, right);
1688 emit_d(as, ARMI_VMRS, 0);
1689 emit_dm(as, ARMI_VCMP_D, left, right);
1691 #endif
1693 static void asm_min_max(ASMState *as, IRIns *ir, int cc, int fcc)
1695 #if LJ_SOFTFP
1696 UNUSED(fcc);
1697 #else
1698 if (irt_isnum(ir->t))
1699 asm_fpmin_max(as, ir, fcc);
1700 else
1701 #endif
1702 asm_intmin_max(as, ir, cc);
1705 /* -- Comparisons --------------------------------------------------------- */
1707 /* Map of comparisons to flags. ORDER IR. */
1708 static const uint8_t asm_compmap[IR_ABC+1] = {
1709 /* op FP swp int cc FP cc */
1710 /* LT */ CC_GE + (CC_HS << 4),
1711 /* GE x */ CC_LT + (CC_HI << 4),
1712 /* LE */ CC_GT + (CC_HI << 4),
1713 /* GT x */ CC_LE + (CC_HS << 4),
1714 /* ULT x */ CC_HS + (CC_LS << 4),
1715 /* UGE */ CC_LO + (CC_LO << 4),
1716 /* ULE x */ CC_HI + (CC_LO << 4),
1717 /* UGT */ CC_LS + (CC_LS << 4),
1718 /* EQ */ CC_NE + (CC_NE << 4),
1719 /* NE */ CC_EQ + (CC_EQ << 4),
1720 /* ABC */ CC_LS + (CC_LS << 4) /* Same as UGT. */
1723 #if LJ_SOFTFP
1724 /* FP comparisons. */
1725 static void asm_sfpcomp(ASMState *as, IRIns *ir)
1727 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_softfp_cmp];
1728 RegSet drop = RSET_SCRATCH;
1729 Reg r;
1730 IRRef args[4];
1731 int swp = (((ir->o ^ (ir->o >> 2)) & ~(ir->o >> 3) & 1) << 1);
1732 args[swp^0] = ir->op1; args[swp^1] = (ir+1)->op1;
1733 args[swp^2] = ir->op2; args[swp^3] = (ir+1)->op2;
1734 /* __aeabi_cdcmple preserves r0-r3. This helps to reduce spills. */
1735 for (r = RID_R0; r <= RID_R3; r++)
1736 if (!rset_test(as->freeset, r) &&
1737 regcost_ref(as->cost[r]) == args[r-RID_R0]) rset_clear(drop, r);
1738 ra_evictset(as, drop);
1739 asm_guardcc(as, (asm_compmap[ir->o] >> 4));
1740 emit_call(as, (void *)ci->func);
1741 for (r = RID_R0; r <= RID_R3; r++)
1742 ra_leftov(as, r, args[r-RID_R0]);
1744 #else
1745 /* FP comparisons. */
1746 static void asm_fpcomp(ASMState *as, IRIns *ir)
1748 Reg left, right;
1749 ARMIns ai;
1750 int swp = ((ir->o ^ (ir->o >> 2)) & ~(ir->o >> 3) & 1);
1751 if (!swp && irref_isk(ir->op2) && ir_knum(IR(ir->op2))->u64 == 0) {
1752 left = (ra_alloc1(as, ir->op1, RSET_FPR) & 15);
1753 right = 0;
1754 ai = ARMI_VCMPZ_D;
1755 } else {
1756 left = ra_alloc2(as, ir, RSET_FPR);
1757 if (swp) {
1758 right = (left & 15); left = ((left >> 8) & 15);
1759 } else {
1760 right = ((left >> 8) & 15); left &= 15;
1762 ai = ARMI_VCMP_D;
1764 asm_guardcc(as, (asm_compmap[ir->o] >> 4));
1765 emit_d(as, ARMI_VMRS, 0);
1766 emit_dm(as, ai, left, right);
1768 #endif
1770 /* Integer comparisons. */
1771 static void asm_intcomp(ASMState *as, IRIns *ir)
1773 ARMCC cc = (asm_compmap[ir->o] & 15);
1774 IRRef lref = ir->op1, rref = ir->op2;
1775 Reg left;
1776 uint32_t m;
1777 int cmpprev0 = 0;
1778 lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t));
1779 if (asm_swapops(as, lref, rref)) {
1780 Reg tmp = lref; lref = rref; rref = tmp;
1781 if (cc >= CC_GE) cc ^= 7; /* LT <-> GT, LE <-> GE */
1782 else if (cc > CC_NE) cc ^= 11; /* LO <-> HI, LS <-> HS */
1784 if (irref_isk(rref) && IR(rref)->i == 0) {
1785 IRIns *irl = IR(lref);
1786 cmpprev0 = (irl+1 == ir);
1787 /* Combine comp(BAND(left, right), 0) into tst left, right. */
1788 if (cmpprev0 && irl->o == IR_BAND && !ra_used(irl)) {
1789 IRRef blref = irl->op1, brref = irl->op2;
1790 uint32_t m2 = 0;
1791 Reg bleft;
1792 if (asm_swapops(as, blref, brref)) {
1793 Reg tmp = blref; blref = brref; brref = tmp;
1795 if (irref_isk(brref)) {
1796 m2 = emit_isk12(ARMI_AND, IR(brref)->i);
1797 if ((m2 & (ARMI_AND^ARMI_BIC)))
1798 goto notst; /* Not beneficial if we miss a constant operand. */
1800 if (cc == CC_GE) cc = CC_PL;
1801 else if (cc == CC_LT) cc = CC_MI;
1802 else if (cc > CC_NE) goto notst; /* Other conds don't work with tst. */
1803 bleft = ra_alloc1(as, blref, RSET_GPR);
1804 if (!m2) m2 = asm_fuseopm(as, 0, brref, rset_exclude(RSET_GPR, bleft));
1805 asm_guardcc(as, cc);
1806 emit_n(as, ARMI_TST^m2, bleft);
1807 return;
1810 notst:
1811 left = ra_alloc1(as, lref, RSET_GPR);
1812 m = asm_fuseopm(as, ARMI_CMP, rref, rset_exclude(RSET_GPR, left));
1813 asm_guardcc(as, cc);
1814 emit_n(as, ARMI_CMP^m, left);
1815 /* Signed comparison with zero and referencing previous ins? */
1816 if (cmpprev0 && (cc <= CC_NE || cc >= CC_GE))
1817 as->flagmcp = as->mcp; /* Allow elimination of the compare. */
1820 #if LJ_HASFFI
1821 /* 64 bit integer comparisons. */
1822 static void asm_int64comp(ASMState *as, IRIns *ir)
1824 int signedcomp = (ir->o <= IR_GT);
1825 ARMCC cclo, cchi;
1826 Reg leftlo, lefthi;
1827 uint32_t mlo, mhi;
1828 RegSet allow = RSET_GPR, oldfree;
1830 /* Always use unsigned comparison for loword. */
1831 cclo = asm_compmap[ir->o + (signedcomp ? 4 : 0)] & 15;
1832 leftlo = ra_alloc1(as, ir->op1, allow);
1833 oldfree = as->freeset;
1834 mlo = asm_fuseopm(as, ARMI_CMP, ir->op2, rset_clear(allow, leftlo));
1835 allow &= ~(oldfree & ~as->freeset); /* Update for allocs of asm_fuseopm. */
1837 /* Use signed or unsigned comparison for hiword. */
1838 cchi = asm_compmap[ir->o] & 15;
1839 lefthi = ra_alloc1(as, (ir+1)->op1, allow);
1840 mhi = asm_fuseopm(as, ARMI_CMP, (ir+1)->op2, rset_clear(allow, lefthi));
1842 /* All register allocations must be performed _before_ this point. */
1843 if (signedcomp) {
1844 MCLabel l_around = emit_label(as);
1845 asm_guardcc(as, cclo);
1846 emit_n(as, ARMI_CMP^mlo, leftlo);
1847 emit_branch(as, ARMF_CC(ARMI_B, CC_NE), l_around);
1848 if (cchi == CC_GE || cchi == CC_LE) cchi ^= 6; /* GE -> GT, LE -> LT */
1849 asm_guardcc(as, cchi);
1850 } else {
1851 asm_guardcc(as, cclo);
1852 emit_n(as, ARMF_CC(ARMI_CMP, CC_EQ)^mlo, leftlo);
1854 emit_n(as, ARMI_CMP^mhi, lefthi);
1856 #endif
1858 /* -- Support for 64 bit ops in 32 bit mode ------------------------------- */
1860 /* Hiword op of a split 64 bit op. Previous op must be the loword op. */
1861 static void asm_hiop(ASMState *as, IRIns *ir)
1863 #if LJ_HASFFI || LJ_SOFTFP
1864 /* HIOP is marked as a store because it needs its own DCE logic. */
1865 int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
1866 if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
1867 if ((ir-1)->o <= IR_NE) { /* 64 bit integer or FP comparisons. ORDER IR. */
1868 as->curins--; /* Always skip the loword comparison. */
1869 #if LJ_SOFTFP
1870 if (!irt_isint(ir->t)) {
1871 asm_sfpcomp(as, ir-1);
1872 return;
1874 #endif
1875 #if LJ_HASFFI
1876 asm_int64comp(as, ir-1);
1877 #endif
1878 return;
1879 #if LJ_SOFTFP
1880 } else if ((ir-1)->o == IR_MIN || (ir-1)->o == IR_MAX) {
1881 as->curins--; /* Always skip the loword min/max. */
1882 if (uselo || usehi)
1883 asm_sfpmin_max(as, ir-1, (ir-1)->o == IR_MIN ? CC_HI : CC_LO);
1884 return;
1885 #elif LJ_HASFFI
1886 } else if ((ir-1)->o == IR_CONV) {
1887 as->curins--; /* Always skip the CONV. */
1888 if (usehi || uselo)
1889 asm_conv64(as, ir);
1890 return;
1891 #endif
1892 } else if ((ir-1)->o == IR_XSTORE) {
1893 if ((ir-1)->r != RID_SINK)
1894 asm_xstore(as, ir, 4);
1895 return;
1897 if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
1898 switch ((ir-1)->o) {
1899 #if LJ_HASFFI
1900 case IR_ADD:
1901 as->curins--;
1902 asm_intop(as, ir, ARMI_ADC);
1903 asm_intop(as, ir-1, ARMI_ADD|ARMI_S);
1904 break;
1905 case IR_SUB:
1906 as->curins--;
1907 asm_intop(as, ir, ARMI_SBC);
1908 asm_intop(as, ir-1, ARMI_SUB|ARMI_S);
1909 break;
1910 case IR_NEG:
1911 as->curins--;
1912 asm_intneg(as, ir, ARMI_RSC);
1913 asm_intneg(as, ir-1, ARMI_RSB|ARMI_S);
1914 break;
1915 #endif
1916 #if LJ_SOFTFP
1917 case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
1918 case IR_STRTO:
1919 if (!uselo)
1920 ra_allocref(as, ir->op1, RSET_GPR); /* Mark lo op as used. */
1921 break;
1922 #endif
1923 case IR_CALLN:
1924 case IR_CALLS:
1925 case IR_CALLXS:
1926 if (!uselo)
1927 ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */
1928 break;
1929 #if LJ_SOFTFP
1930 case IR_ASTORE: case IR_HSTORE: case IR_USTORE: case IR_TOSTR:
1931 #endif
1932 case IR_CNEWI:
1933 /* Nothing to do here. Handled by lo op itself. */
1934 break;
1935 default: lua_assert(0); break;
1937 #else
1938 UNUSED(as); UNUSED(ir); lua_assert(0);
1939 #endif
1942 /* -- Stack handling ------------------------------------------------------ */
1944 /* Check Lua stack size for overflow. Use exit handler as fallback. */
1945 static void asm_stack_check(ASMState *as, BCReg topslot,
1946 IRIns *irp, RegSet allow, ExitNo exitno)
1948 Reg pbase;
1949 uint32_t k;
1950 if (irp) {
1951 if (!ra_hasspill(irp->s)) {
1952 pbase = irp->r;
1953 lua_assert(ra_hasreg(pbase));
1954 } else if (allow) {
1955 pbase = rset_pickbot(allow);
1956 } else {
1957 pbase = RID_RET;
1958 emit_lso(as, ARMI_LDR, RID_RET, RID_SP, 0); /* Restore temp. register. */
1960 } else {
1961 pbase = RID_BASE;
1963 emit_branch(as, ARMF_CC(ARMI_BL, CC_LS), exitstub_addr(as->J, exitno));
1964 k = emit_isk12(0, (int32_t)(8*topslot));
1965 lua_assert(k);
1966 emit_n(as, ARMI_CMP^k, RID_TMP);
1967 emit_dnm(as, ARMI_SUB, RID_TMP, RID_TMP, pbase);
1968 emit_lso(as, ARMI_LDR, RID_TMP, RID_TMP,
1969 (int32_t)offsetof(lua_State, maxstack));
1970 if (irp) { /* Must not spill arbitrary registers in head of side trace. */
1971 int32_t i = i32ptr(&J2G(as->J)->jit_L);
1972 if (ra_hasspill(irp->s))
1973 emit_lso(as, ARMI_LDR, pbase, RID_SP, sps_scale(irp->s));
1974 emit_lso(as, ARMI_LDR, RID_TMP, RID_TMP, (i & 4095));
1975 if (ra_hasspill(irp->s) && !allow)
1976 emit_lso(as, ARMI_STR, RID_RET, RID_SP, 0); /* Save temp. register. */
1977 emit_loadi(as, RID_TMP, (i & ~4095));
1978 } else {
1979 emit_getgl(as, RID_TMP, jit_L);
1983 /* Restore Lua stack from on-trace state. */
1984 static void asm_stack_restore(ASMState *as, SnapShot *snap)
1986 SnapEntry *map = &as->T->snapmap[snap->mapofs];
1987 SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1];
1988 MSize n, nent = snap->nent;
1989 /* Store the value of all modified slots to the Lua stack. */
1990 for (n = 0; n < nent; n++) {
1991 SnapEntry sn = map[n];
1992 BCReg s = snap_slot(sn);
1993 int32_t ofs = 8*((int32_t)s-1);
1994 IRRef ref = snap_ref(sn);
1995 IRIns *ir = IR(ref);
1996 if ((sn & SNAP_NORESTORE))
1997 continue;
1998 if (irt_isnum(ir->t)) {
1999 #if LJ_SOFTFP
2000 RegSet odd = rset_exclude(RSET_GPRODD, RID_BASE);
2001 Reg tmp;
2002 lua_assert(irref_isk(ref)); /* LJ_SOFTFP: must be a number constant. */
2003 tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.lo,
2004 rset_exclude(RSET_GPREVEN, RID_BASE));
2005 emit_lso(as, ARMI_STR, tmp, RID_BASE, ofs);
2006 if (rset_test(as->freeset, tmp+1)) odd = RID2RSET(tmp+1);
2007 tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.hi, odd);
2008 emit_lso(as, ARMI_STR, tmp, RID_BASE, ofs+4);
2009 #else
2010 Reg src = ra_alloc1(as, ref, RSET_FPR);
2011 emit_vlso(as, ARMI_VSTR_D, src, RID_BASE, ofs);
2012 #endif
2013 } else {
2014 RegSet odd = rset_exclude(RSET_GPRODD, RID_BASE);
2015 Reg type;
2016 lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t));
2017 if (!irt_ispri(ir->t)) {
2018 Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPREVEN, RID_BASE));
2019 emit_lso(as, ARMI_STR, src, RID_BASE, ofs);
2020 if (rset_test(as->freeset, src+1)) odd = RID2RSET(src+1);
2022 if ((sn & (SNAP_CONT|SNAP_FRAME))) {
2023 if (s == 0) continue; /* Do not overwrite link to previous frame. */
2024 type = ra_allock(as, (int32_t)(*flinks--), odd);
2025 #if LJ_SOFTFP
2026 } else if ((sn & SNAP_SOFTFPNUM)) {
2027 type = ra_alloc1(as, ref+1, rset_exclude(RSET_GPRODD, RID_BASE));
2028 #endif
2029 } else {
2030 type = ra_allock(as, (int32_t)irt_toitype(ir->t), odd);
2032 emit_lso(as, ARMI_STR, type, RID_BASE, ofs+4);
2034 checkmclim(as);
2036 lua_assert(map + nent == flinks);
2039 /* -- GC handling --------------------------------------------------------- */
2041 /* Check GC threshold and do one or more GC steps. */
2042 static void asm_gc_check(ASMState *as)
2044 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
2045 IRRef args[2];
2046 MCLabel l_end;
2047 Reg tmp1, tmp2;
2048 ra_evictset(as, RSET_SCRATCH);
2049 l_end = emit_label(as);
2050 /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
2051 asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */
2052 emit_n(as, ARMI_CMP|ARMI_K12|0, RID_RET);
2053 args[0] = ASMREF_TMP1; /* global_State *g */
2054 args[1] = ASMREF_TMP2; /* MSize steps */
2055 asm_gencall(as, ci, args);
2056 tmp1 = ra_releasetmp(as, ASMREF_TMP1);
2057 tmp2 = ra_releasetmp(as, ASMREF_TMP2);
2058 emit_loadi(as, tmp2, as->gcsteps);
2059 /* Jump around GC step if GC total < GC threshold. */
2060 emit_branch(as, ARMF_CC(ARMI_B, CC_LS), l_end);
2061 emit_nm(as, ARMI_CMP, RID_TMP, tmp2);
2062 emit_lso(as, ARMI_LDR, tmp2, tmp1,
2063 (int32_t)offsetof(global_State, gc.threshold));
2064 emit_lso(as, ARMI_LDR, RID_TMP, tmp1,
2065 (int32_t)offsetof(global_State, gc.total));
2066 ra_allockreg(as, i32ptr(J2G(as->J)), tmp1);
2067 as->gcsteps = 0;
2068 checkmclim(as);
2071 /* -- Loop handling ------------------------------------------------------- */
2073 /* Fixup the loop branch. */
2074 static void asm_loop_fixup(ASMState *as)
2076 MCode *p = as->mctop;
2077 MCode *target = as->mcp;
2078 if (as->loopinv) { /* Inverted loop branch? */
2079 /* asm_guardcc already inverted the bcc and patched the final bl. */
2080 p[-2] |= ((uint32_t)(target-p) & 0x00ffffffu);
2081 } else {
2082 p[-1] = ARMI_B | ((uint32_t)((target-p)-1) & 0x00ffffffu);
2086 /* -- Head of trace ------------------------------------------------------- */
2088 /* Reload L register from g->jit_L. */
2089 static void asm_head_lreg(ASMState *as)
2091 IRIns *ir = IR(ASMREF_L);
2092 if (ra_used(ir)) {
2093 Reg r = ra_dest(as, ir, RSET_GPR);
2094 emit_getgl(as, r, jit_L);
2095 ra_evictk(as);
2099 /* Coalesce BASE register for a root trace. */
2100 static void asm_head_root_base(ASMState *as)
2102 IRIns *ir;
2103 asm_head_lreg(as);
2104 ir = IR(REF_BASE);
2105 if (ra_hasreg(ir->r) && (rset_test(as->modset, ir->r) || irt_ismarked(ir->t)))
2106 ra_spill(as, ir);
2107 ra_destreg(as, ir, RID_BASE);
2110 /* Coalesce BASE register for a side trace. */
2111 static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
2113 IRIns *ir;
2114 asm_head_lreg(as);
2115 ir = IR(REF_BASE);
2116 if (ra_hasreg(ir->r) && (rset_test(as->modset, ir->r) || irt_ismarked(ir->t)))
2117 ra_spill(as, ir);
2118 if (ra_hasspill(irp->s)) {
2119 rset_clear(allow, ra_dest(as, ir, allow));
2120 } else {
2121 Reg r = irp->r;
2122 lua_assert(ra_hasreg(r));
2123 rset_clear(allow, r);
2124 if (r != ir->r && !rset_test(as->freeset, r))
2125 ra_restore(as, regcost_ref(as->cost[r]));
2126 ra_destreg(as, ir, r);
2128 return allow;
2131 /* -- Tail of trace ------------------------------------------------------- */
2133 /* Fixup the tail code. */
2134 static void asm_tail_fixup(ASMState *as, TraceNo lnk)
2136 MCode *p = as->mctop;
2137 MCode *target;
2138 int32_t spadj = as->T->spadjust;
2139 if (spadj == 0) {
2140 as->mctop = --p;
2141 } else {
2142 /* Patch stack adjustment. */
2143 uint32_t k = emit_isk12(ARMI_ADD, spadj);
2144 lua_assert(k);
2145 p[-2] = (ARMI_ADD^k) | ARMF_D(RID_SP) | ARMF_N(RID_SP);
2147 /* Patch exit branch. */
2148 target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp;
2149 p[-1] = ARMI_B|(((target-p)-1)&0x00ffffffu);
2152 /* Prepare tail of code. */
2153 static void asm_tail_prep(ASMState *as)
2155 MCode *p = as->mctop - 1; /* Leave room for exit branch. */
2156 if (as->loopref) {
2157 as->invmcp = as->mcp = p;
2158 } else {
2159 as->mcp = p-1; /* Leave room for stack pointer adjustment. */
2160 as->invmcp = NULL;
2162 *p = 0; /* Prevent load/store merging. */
2165 /* -- Instruction dispatch ------------------------------------------------ */
2167 /* Assemble a single instruction. */
2168 static void asm_ir(ASMState *as, IRIns *ir)
2170 switch ((IROp)ir->o) {
2171 /* Miscellaneous ops. */
2172 case IR_LOOP: asm_loop(as); break;
2173 case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break;
2174 case IR_USE:
2175 ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break;
2176 case IR_PHI: asm_phi(as, ir); break;
2177 case IR_HIOP: asm_hiop(as, ir); break;
2178 case IR_GCSTEP: asm_gcstep(as, ir); break;
2180 /* Guarded assertions. */
2181 case IR_EQ: case IR_NE:
2182 if ((ir-1)->o == IR_HREF && ir->op1 == as->curins-1) {
2183 as->curins--;
2184 asm_href(as, ir-1, (IROp)ir->o);
2185 break;
2187 /* fallthrough */
2188 case IR_LT: case IR_GE: case IR_LE: case IR_GT:
2189 case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT:
2190 case IR_ABC:
2191 #if !LJ_SOFTFP
2192 if (irt_isnum(ir->t)) { asm_fpcomp(as, ir); break; }
2193 #endif
2194 asm_intcomp(as, ir);
2195 break;
2197 case IR_RETF: asm_retf(as, ir); break;
2199 /* Bit ops. */
2200 case IR_BNOT: asm_bitop(as, ir, ARMI_MVN); break;
2201 case IR_BSWAP: asm_bitswap(as, ir); break;
2203 case IR_BAND: asm_bitop(as, ir, ARMI_AND); break;
2204 case IR_BOR: asm_bitop(as, ir, ARMI_ORR); break;
2205 case IR_BXOR: asm_bitop(as, ir, ARMI_EOR); break;
2207 case IR_BSHL: asm_bitshift(as, ir, ARMSH_LSL); break;
2208 case IR_BSHR: asm_bitshift(as, ir, ARMSH_LSR); break;
2209 case IR_BSAR: asm_bitshift(as, ir, ARMSH_ASR); break;
2210 case IR_BROR: asm_bitshift(as, ir, ARMSH_ROR); break;
2211 case IR_BROL: lua_assert(0); break;
2213 /* Arithmetic ops. */
2214 case IR_ADD: case IR_ADDOV: asm_add(as, ir); break;
2215 case IR_SUB: case IR_SUBOV: asm_sub(as, ir); break;
2216 case IR_MUL: case IR_MULOV: asm_mul(as, ir); break;
2217 case IR_MOD: asm_callid(as, ir, IRCALL_lj_vm_modi); break;
2218 case IR_NEG: asm_neg(as, ir); break;
2220 #if LJ_SOFTFP
2221 case IR_DIV: case IR_POW: case IR_ABS:
2222 case IR_ATAN2: case IR_LDEXP: case IR_FPMATH: case IR_TOBIT:
2223 lua_assert(0); /* Unused for LJ_SOFTFP. */
2224 break;
2225 #else
2226 case IR_DIV: asm_fparith(as, ir, ARMI_VDIV_D); break;
2227 case IR_POW: asm_callid(as, ir, IRCALL_lj_vm_powi); break;
2228 case IR_ABS: asm_fpunary(as, ir, ARMI_VABS_D); break;
2229 case IR_ATAN2: asm_callid(as, ir, IRCALL_atan2); break;
2230 case IR_LDEXP: asm_callid(as, ir, IRCALL_ldexp); break;
2231 case IR_FPMATH:
2232 if (ir->op2 == IRFPM_EXP2 && asm_fpjoin_pow(as, ir))
2233 break;
2234 if (ir->op2 <= IRFPM_TRUNC)
2235 asm_callround(as, ir, ir->op2);
2236 else if (ir->op2 == IRFPM_SQRT)
2237 asm_fpunary(as, ir, ARMI_VSQRT_D);
2238 else
2239 asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2);
2240 break;
2241 case IR_TOBIT: asm_tobit(as, ir); break;
2242 #endif
2244 case IR_MIN: asm_min_max(as, ir, CC_GT, CC_HI); break;
2245 case IR_MAX: asm_min_max(as, ir, CC_LT, CC_LO); break;
2247 /* Memory references. */
2248 case IR_AREF: asm_aref(as, ir); break;
2249 case IR_HREF: asm_href(as, ir, 0); break;
2250 case IR_HREFK: asm_hrefk(as, ir); break;
2251 case IR_NEWREF: asm_newref(as, ir); break;
2252 case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break;
2253 case IR_FREF: asm_fref(as, ir); break;
2254 case IR_STRREF: asm_strref(as, ir); break;
2256 /* Loads and stores. */
2257 case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
2258 asm_ahuvload(as, ir);
2259 break;
2260 case IR_FLOAD: asm_fload(as, ir); break;
2261 case IR_XLOAD: asm_xload(as, ir); break;
2262 case IR_SLOAD: asm_sload(as, ir); break;
2264 case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break;
2265 case IR_FSTORE: asm_fstore(as, ir); break;
2266 case IR_XSTORE: asm_xstore(as, ir, 0); break;
2268 /* Allocations. */
2269 case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
2270 case IR_TNEW: asm_tnew(as, ir); break;
2271 case IR_TDUP: asm_tdup(as, ir); break;
2272 case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break;
2274 /* Write barriers. */
2275 case IR_TBAR: asm_tbar(as, ir); break;
2276 case IR_OBAR: asm_obar(as, ir); break;
2278 /* Type conversions. */
2279 case IR_CONV: asm_conv(as, ir); break;
2280 case IR_TOSTR: asm_tostr(as, ir); break;
2281 case IR_STRTO: asm_strto(as, ir); break;
2283 /* Calls. */
2284 case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break;
2285 case IR_CALLXS: asm_callx(as, ir); break;
2286 case IR_CARG: break;
2288 default:
2289 setintV(&as->J->errinfo, ir->o);
2290 lj_trace_err_info(as->J, LJ_TRERR_NYIIR);
2291 break;
2295 /* -- Trace setup --------------------------------------------------------- */
2297 /* Ensure there are enough stack slots for call arguments. */
2298 static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
2300 IRRef args[CCI_NARGS_MAX*2];
2301 uint32_t i, nargs = (int)CCI_NARGS(ci);
2302 int nslots = 0, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR, fprodd = 0;
2303 asm_collectargs(as, ir, ci, args);
2304 for (i = 0; i < nargs; i++) {
2305 if (!LJ_SOFTFP && args[i] && irt_isfp(IR(args[i])->t)) {
2306 if (!LJ_ABI_SOFTFP && !(ci->flags & CCI_VARARG)) {
2307 if (irt_isnum(IR(args[i])->t)) {
2308 if (nfpr > 0) nfpr--;
2309 else fprodd = 0, nslots = (nslots + 3) & ~1;
2310 } else {
2311 if (fprodd) fprodd--;
2312 else if (nfpr > 0) fprodd = 1, nfpr--;
2313 else nslots++;
2315 } else if (irt_isnum(IR(args[i])->t)) {
2316 ngpr &= ~1;
2317 if (ngpr > 0) ngpr -= 2; else nslots += 2;
2318 } else {
2319 if (ngpr > 0) ngpr--; else nslots++;
2321 } else {
2322 if (ngpr > 0) ngpr--; else nslots++;
2325 if (nslots > as->evenspill) /* Leave room for args in stack slots. */
2326 as->evenspill = nslots;
2327 return REGSP_HINT(RID_RET);
2330 static void asm_setup_target(ASMState *as)
2332 /* May need extra exit for asm_stack_check on side traces. */
2333 asm_exitstub_setup(as, as->T->nsnap + (as->parent ? 1 : 0));
2336 /* -- Trace patching ------------------------------------------------------ */
2338 /* Patch exit jumps of existing machine code to a new target. */
2339 void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
2341 MCode *p = T->mcode;
2342 MCode *pe = (MCode *)((char *)p + T->szmcode);
2343 MCode *cstart = NULL, *cend = p;
2344 MCode *mcarea = lj_mcode_patch(J, p, 0);
2345 MCode *px = exitstub_addr(J, exitno) - 2;
2346 for (; p < pe; p++) {
2347 /* Look for bl_cc exitstub, replace with b_cc target. */
2348 uint32_t ins = *p;
2349 if ((ins & 0x0f000000u) == 0x0b000000u && ins < 0xf0000000u &&
2350 ((ins ^ (px-p)) & 0x00ffffffu) == 0) {
2351 *p = (ins & 0xfe000000u) | (((target-p)-2) & 0x00ffffffu);
2352 cend = p+1;
2353 if (!cstart) cstart = p;
2356 lua_assert(cstart != NULL);
2357 lj_mcode_sync(cstart, cend);
2358 lj_mcode_patch(J, mcarea, 1);