x86/x64: Search for exit jumps with instruction length decoder.
[luajit-2.0.git] / src / lj_asm_x86.h
blob39a792c261ef724417d03da11b44d377da233b41
1 /*
2 ** x86/x64 IR assembler (SSA IR -> machine code).
3 ** Copyright (C) 2005-2016 Mike Pall. See Copyright Notice in luajit.h
4 */
6 /* -- Guard handling ------------------------------------------------------ */
8 /* Generate an exit stub group at the bottom of the reserved MCode memory. */
9 static MCode *asm_exitstub_gen(ASMState *as, ExitNo group)
11 ExitNo i, groupofs = (group*EXITSTUBS_PER_GROUP) & 0xff;
12 MCode *mxp = as->mcbot;
13 MCode *mxpstart = mxp;
14 if (mxp + (2+2)*EXITSTUBS_PER_GROUP+8+5 >= as->mctop)
15 asm_mclimit(as);
16 /* Push low byte of exitno for each exit stub. */
17 *mxp++ = XI_PUSHi8; *mxp++ = (MCode)groupofs;
18 for (i = 1; i < EXITSTUBS_PER_GROUP; i++) {
19 *mxp++ = XI_JMPs; *mxp++ = (MCode)((2+2)*(EXITSTUBS_PER_GROUP - i) - 2);
20 *mxp++ = XI_PUSHi8; *mxp++ = (MCode)(groupofs + i);
22 /* Push the high byte of the exitno for each exit stub group. */
23 *mxp++ = XI_PUSHi8; *mxp++ = (MCode)((group*EXITSTUBS_PER_GROUP)>>8);
24 /* Store DISPATCH at original stack slot 0. Account for the two push ops. */
25 *mxp++ = XI_MOVmi;
26 *mxp++ = MODRM(XM_OFS8, 0, RID_ESP);
27 *mxp++ = MODRM(XM_SCALE1, RID_ESP, RID_ESP);
28 *mxp++ = 2*sizeof(void *);
29 *(int32_t *)mxp = ptr2addr(J2GG(as->J)->dispatch); mxp += 4;
30 /* Jump to exit handler which fills in the ExitState. */
31 *mxp++ = XI_JMP; mxp += 4;
32 *((int32_t *)(mxp-4)) = jmprel(mxp, (MCode *)(void *)lj_vm_exit_handler);
33 /* Commit the code for this group (even if assembly fails later on). */
34 lj_mcode_commitbot(as->J, mxp);
35 as->mcbot = mxp;
36 as->mclim = as->mcbot + MCLIM_REDZONE;
37 return mxpstart;
40 /* Setup all needed exit stubs. */
41 static void asm_exitstub_setup(ASMState *as, ExitNo nexits)
43 ExitNo i;
44 if (nexits >= EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR)
45 lj_trace_err(as->J, LJ_TRERR_SNAPOV);
46 for (i = 0; i < (nexits+EXITSTUBS_PER_GROUP-1)/EXITSTUBS_PER_GROUP; i++)
47 if (as->J->exitstubgroup[i] == NULL)
48 as->J->exitstubgroup[i] = asm_exitstub_gen(as, i);
51 /* Emit conditional branch to exit for guard.
52 ** It's important to emit this *after* all registers have been allocated,
53 ** because rematerializations may invalidate the flags.
55 static void asm_guardcc(ASMState *as, int cc)
57 MCode *target = exitstub_addr(as->J, as->snapno);
58 MCode *p = as->mcp;
59 if (LJ_UNLIKELY(p == as->invmcp)) {
60 as->loopinv = 1;
61 *(int32_t *)(p+1) = jmprel(p+5, target);
62 target = p;
63 cc ^= 1;
64 if (as->realign) {
65 emit_sjcc(as, cc, target);
66 return;
69 emit_jcc(as, cc, target);
72 /* -- Memory operand fusion ----------------------------------------------- */
74 /* Limit linear search to this distance. Avoids O(n^2) behavior. */
75 #define CONFLICT_SEARCH_LIM 31
77 /* Check if a reference is a signed 32 bit constant. */
78 static int asm_isk32(ASMState *as, IRRef ref, int32_t *k)
80 if (irref_isk(ref)) {
81 IRIns *ir = IR(ref);
82 if (ir->o != IR_KINT64) {
83 *k = ir->i;
84 return 1;
85 } else if (checki32((int64_t)ir_kint64(ir)->u64)) {
86 *k = (int32_t)ir_kint64(ir)->u64;
87 return 1;
90 return 0;
93 /* Check if there's no conflicting instruction between curins and ref.
94 ** Also avoid fusing loads if there are multiple references.
96 static int noconflict(ASMState *as, IRRef ref, IROp conflict, int noload)
98 IRIns *ir = as->ir;
99 IRRef i = as->curins;
100 if (i > ref + CONFLICT_SEARCH_LIM)
101 return 0; /* Give up, ref is too far away. */
102 while (--i > ref) {
103 if (ir[i].o == conflict)
104 return 0; /* Conflict found. */
105 else if (!noload && (ir[i].op1 == ref || ir[i].op2 == ref))
106 return 0;
108 return 1; /* Ok, no conflict. */
111 /* Fuse array base into memory operand. */
112 static IRRef asm_fuseabase(ASMState *as, IRRef ref)
114 IRIns *irb = IR(ref);
115 as->mrm.ofs = 0;
116 if (irb->o == IR_FLOAD) {
117 IRIns *ira = IR(irb->op1);
118 lua_assert(irb->op2 == IRFL_TAB_ARRAY);
119 /* We can avoid the FLOAD of t->array for colocated arrays. */
120 if (ira->o == IR_TNEW && ira->op1 <= LJ_MAX_COLOSIZE &&
121 !neverfuse(as) && noconflict(as, irb->op1, IR_NEWREF, 1)) {
122 as->mrm.ofs = (int32_t)sizeof(GCtab); /* Ofs to colocated array. */
123 return irb->op1; /* Table obj. */
125 } else if (irb->o == IR_ADD && irref_isk(irb->op2)) {
126 /* Fuse base offset (vararg load). */
127 as->mrm.ofs = IR(irb->op2)->i;
128 return irb->op1;
130 return ref; /* Otherwise use the given array base. */
133 /* Fuse array reference into memory operand. */
134 static void asm_fusearef(ASMState *as, IRIns *ir, RegSet allow)
136 IRIns *irx;
137 lua_assert(ir->o == IR_AREF);
138 as->mrm.base = (uint8_t)ra_alloc1(as, asm_fuseabase(as, ir->op1), allow);
139 irx = IR(ir->op2);
140 if (irref_isk(ir->op2)) {
141 as->mrm.ofs += 8*irx->i;
142 as->mrm.idx = RID_NONE;
143 } else {
144 rset_clear(allow, as->mrm.base);
145 as->mrm.scale = XM_SCALE8;
146 /* Fuse a constant ADD (e.g. t[i+1]) into the offset.
147 ** Doesn't help much without ABCelim, but reduces register pressure.
149 if (!LJ_64 && /* Has bad effects with negative index on x64. */
150 mayfuse(as, ir->op2) && ra_noreg(irx->r) &&
151 irx->o == IR_ADD && irref_isk(irx->op2)) {
152 as->mrm.ofs += 8*IR(irx->op2)->i;
153 as->mrm.idx = (uint8_t)ra_alloc1(as, irx->op1, allow);
154 } else {
155 as->mrm.idx = (uint8_t)ra_alloc1(as, ir->op2, allow);
160 /* Fuse array/hash/upvalue reference into memory operand.
161 ** Caveat: this may allocate GPRs for the base/idx registers. Be sure to
162 ** pass the final allow mask, excluding any GPRs used for other inputs.
163 ** In particular: 2-operand GPR instructions need to call ra_dest() first!
165 static void asm_fuseahuref(ASMState *as, IRRef ref, RegSet allow)
167 IRIns *ir = IR(ref);
168 if (ra_noreg(ir->r)) {
169 switch ((IROp)ir->o) {
170 case IR_AREF:
171 if (mayfuse(as, ref)) {
172 asm_fusearef(as, ir, allow);
173 return;
175 break;
176 case IR_HREFK:
177 if (mayfuse(as, ref)) {
178 as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow);
179 as->mrm.ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
180 as->mrm.idx = RID_NONE;
181 return;
183 break;
184 case IR_UREFC:
185 if (irref_isk(ir->op1)) {
186 GCfunc *fn = ir_kfunc(IR(ir->op1));
187 GCupval *uv = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv;
188 as->mrm.ofs = ptr2addr(&uv->tv);
189 as->mrm.base = as->mrm.idx = RID_NONE;
190 return;
192 break;
193 default:
194 lua_assert(ir->o == IR_HREF || ir->o == IR_NEWREF || ir->o == IR_UREFO ||
195 ir->o == IR_KKPTR);
196 break;
199 as->mrm.base = (uint8_t)ra_alloc1(as, ref, allow);
200 as->mrm.ofs = 0;
201 as->mrm.idx = RID_NONE;
204 /* Fuse FLOAD/FREF reference into memory operand. */
205 static void asm_fusefref(ASMState *as, IRIns *ir, RegSet allow)
207 lua_assert(ir->o == IR_FLOAD || ir->o == IR_FREF);
208 as->mrm.ofs = field_ofs[ir->op2];
209 as->mrm.idx = RID_NONE;
210 if (irref_isk(ir->op1)) {
211 as->mrm.ofs += IR(ir->op1)->i;
212 as->mrm.base = RID_NONE;
213 } else {
214 as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow);
218 /* Fuse string reference into memory operand. */
219 static void asm_fusestrref(ASMState *as, IRIns *ir, RegSet allow)
221 IRIns *irr;
222 lua_assert(ir->o == IR_STRREF);
223 as->mrm.base = as->mrm.idx = RID_NONE;
224 as->mrm.scale = XM_SCALE1;
225 as->mrm.ofs = sizeof(GCstr);
226 if (irref_isk(ir->op1)) {
227 as->mrm.ofs += IR(ir->op1)->i;
228 } else {
229 Reg r = ra_alloc1(as, ir->op1, allow);
230 rset_clear(allow, r);
231 as->mrm.base = (uint8_t)r;
233 irr = IR(ir->op2);
234 if (irref_isk(ir->op2)) {
235 as->mrm.ofs += irr->i;
236 } else {
237 Reg r;
238 /* Fuse a constant add into the offset, e.g. string.sub(s, i+10). */
239 if (!LJ_64 && /* Has bad effects with negative index on x64. */
240 mayfuse(as, ir->op2) && irr->o == IR_ADD && irref_isk(irr->op2)) {
241 as->mrm.ofs += IR(irr->op2)->i;
242 r = ra_alloc1(as, irr->op1, allow);
243 } else {
244 r = ra_alloc1(as, ir->op2, allow);
246 if (as->mrm.base == RID_NONE)
247 as->mrm.base = (uint8_t)r;
248 else
249 as->mrm.idx = (uint8_t)r;
253 static void asm_fusexref(ASMState *as, IRRef ref, RegSet allow)
255 IRIns *ir = IR(ref);
256 as->mrm.idx = RID_NONE;
257 if (ir->o == IR_KPTR || ir->o == IR_KKPTR) {
258 as->mrm.ofs = ir->i;
259 as->mrm.base = RID_NONE;
260 } else if (ir->o == IR_STRREF) {
261 asm_fusestrref(as, ir, allow);
262 } else {
263 as->mrm.ofs = 0;
264 if (canfuse(as, ir) && ir->o == IR_ADD && ra_noreg(ir->r)) {
265 /* Gather (base+idx*sz)+ofs as emitted by cdata ptr/array indexing. */
266 IRIns *irx;
267 IRRef idx;
268 Reg r;
269 if (asm_isk32(as, ir->op2, &as->mrm.ofs)) { /* Recognize x+ofs. */
270 ref = ir->op1;
271 ir = IR(ref);
272 if (!(ir->o == IR_ADD && canfuse(as, ir) && ra_noreg(ir->r)))
273 goto noadd;
275 as->mrm.scale = XM_SCALE1;
276 idx = ir->op1;
277 ref = ir->op2;
278 irx = IR(idx);
279 if (!(irx->o == IR_BSHL || irx->o == IR_ADD)) { /* Try other operand. */
280 idx = ir->op2;
281 ref = ir->op1;
282 irx = IR(idx);
284 if (canfuse(as, irx) && ra_noreg(irx->r)) {
285 if (irx->o == IR_BSHL && irref_isk(irx->op2) && IR(irx->op2)->i <= 3) {
286 /* Recognize idx<<b with b = 0-3, corresponding to sz = (1),2,4,8. */
287 idx = irx->op1;
288 as->mrm.scale = (uint8_t)(IR(irx->op2)->i << 6);
289 } else if (irx->o == IR_ADD && irx->op1 == irx->op2) {
290 /* FOLD does idx*2 ==> idx<<1 ==> idx+idx. */
291 idx = irx->op1;
292 as->mrm.scale = XM_SCALE2;
295 r = ra_alloc1(as, idx, allow);
296 rset_clear(allow, r);
297 as->mrm.idx = (uint8_t)r;
299 noadd:
300 as->mrm.base = (uint8_t)ra_alloc1(as, ref, allow);
304 /* Fuse load into memory operand. */
305 static Reg asm_fuseload(ASMState *as, IRRef ref, RegSet allow)
307 IRIns *ir = IR(ref);
308 if (ra_hasreg(ir->r)) {
309 if (allow != RSET_EMPTY) { /* Fast path. */
310 ra_noweak(as, ir->r);
311 return ir->r;
313 fusespill:
314 /* Force a spill if only memory operands are allowed (asm_x87load). */
315 as->mrm.base = RID_ESP;
316 as->mrm.ofs = ra_spill(as, ir);
317 as->mrm.idx = RID_NONE;
318 return RID_MRM;
320 if (ir->o == IR_KNUM) {
321 RegSet avail = as->freeset & ~as->modset & RSET_FPR;
322 lua_assert(allow != RSET_EMPTY);
323 if (!(avail & (avail-1))) { /* Fuse if less than two regs available. */
324 as->mrm.ofs = ptr2addr(ir_knum(ir));
325 as->mrm.base = as->mrm.idx = RID_NONE;
326 return RID_MRM;
328 } else if (ir->o == IR_KINT64) {
329 RegSet avail = as->freeset & ~as->modset & RSET_GPR;
330 lua_assert(allow != RSET_EMPTY);
331 if (!(avail & (avail-1))) { /* Fuse if less than two regs available. */
332 as->mrm.ofs = ptr2addr(ir_kint64(ir));
333 as->mrm.base = as->mrm.idx = RID_NONE;
334 return RID_MRM;
336 } else if (mayfuse(as, ref)) {
337 RegSet xallow = (allow & RSET_GPR) ? allow : RSET_GPR;
338 if (ir->o == IR_SLOAD) {
339 if (!(ir->op2 & (IRSLOAD_PARENT|IRSLOAD_CONVERT)) &&
340 noconflict(as, ref, IR_RETF, 0)) {
341 as->mrm.base = (uint8_t)ra_alloc1(as, REF_BASE, xallow);
342 as->mrm.ofs = 8*((int32_t)ir->op1-1) + ((ir->op2&IRSLOAD_FRAME)?4:0);
343 as->mrm.idx = RID_NONE;
344 return RID_MRM;
346 } else if (ir->o == IR_FLOAD) {
347 /* Generic fusion is only ok for 32 bit operand (but see asm_comp). */
348 if ((irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t)) &&
349 noconflict(as, ref, IR_FSTORE, 0)) {
350 asm_fusefref(as, ir, xallow);
351 return RID_MRM;
353 } else if (ir->o == IR_ALOAD || ir->o == IR_HLOAD || ir->o == IR_ULOAD) {
354 if (noconflict(as, ref, ir->o + IRDELTA_L2S, 0)) {
355 asm_fuseahuref(as, ir->op1, xallow);
356 return RID_MRM;
358 } else if (ir->o == IR_XLOAD) {
359 /* Generic fusion is not ok for 8/16 bit operands (but see asm_comp).
360 ** Fusing unaligned memory operands is ok on x86 (except for SIMD types).
362 if ((!irt_typerange(ir->t, IRT_I8, IRT_U16)) &&
363 noconflict(as, ref, IR_XSTORE, 0)) {
364 asm_fusexref(as, ir->op1, xallow);
365 return RID_MRM;
367 } else if (ir->o == IR_VLOAD) {
368 asm_fuseahuref(as, ir->op1, xallow);
369 return RID_MRM;
372 if (!(as->freeset & allow) && !irref_isk(ref) &&
373 (allow == RSET_EMPTY || ra_hasspill(ir->s) || iscrossref(as, ref)))
374 goto fusespill;
375 return ra_allocref(as, ref, allow);
378 #if LJ_64
379 /* Don't fuse a 32 bit load into a 64 bit operation. */
380 static Reg asm_fuseloadm(ASMState *as, IRRef ref, RegSet allow, int is64)
382 if (is64 && !irt_is64(IR(ref)->t))
383 return ra_alloc1(as, ref, allow);
384 return asm_fuseload(as, ref, allow);
386 #else
387 #define asm_fuseloadm(as, ref, allow, is64) asm_fuseload(as, (ref), (allow))
388 #endif
390 /* -- Calls --------------------------------------------------------------- */
392 /* Count the required number of stack slots for a call. */
393 static int asm_count_call_slots(ASMState *as, const CCallInfo *ci, IRRef *args)
395 uint32_t i, nargs = CCI_NARGS(ci);
396 int nslots = 0;
397 #if LJ_64
398 if (LJ_ABI_WIN) {
399 nslots = (int)(nargs*2); /* Only matters for more than four args. */
400 } else {
401 int ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR;
402 for (i = 0; i < nargs; i++)
403 if (args[i] && irt_isfp(IR(args[i])->t)) {
404 if (nfpr > 0) nfpr--; else nslots += 2;
405 } else {
406 if (ngpr > 0) ngpr--; else nslots += 2;
409 #else
410 int ngpr = 0;
411 if ((ci->flags & CCI_CC_MASK) == CCI_CC_FASTCALL)
412 ngpr = 2;
413 else if ((ci->flags & CCI_CC_MASK) == CCI_CC_THISCALL)
414 ngpr = 1;
415 for (i = 0; i < nargs; i++)
416 if (args[i] && irt_isfp(IR(args[i])->t)) {
417 nslots += irt_isnum(IR(args[i])->t) ? 2 : 1;
418 } else {
419 if (ngpr > 0) ngpr--; else nslots++;
421 #endif
422 return nslots;
425 /* Generate a call to a C function. */
426 static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
428 uint32_t n, nargs = CCI_NARGS(ci);
429 int32_t ofs = STACKARG_OFS;
430 #if LJ_64
431 uint32_t gprs = REGARG_GPRS;
432 Reg fpr = REGARG_FIRSTFPR;
433 #if !LJ_ABI_WIN
434 MCode *patchnfpr = NULL;
435 #endif
436 #else
437 uint32_t gprs = 0;
438 if ((ci->flags & CCI_CC_MASK) != CCI_CC_CDECL) {
439 if ((ci->flags & CCI_CC_MASK) == CCI_CC_THISCALL)
440 gprs = (REGARG_GPRS & 31);
441 else if ((ci->flags & CCI_CC_MASK) == CCI_CC_FASTCALL)
442 gprs = REGARG_GPRS;
444 #endif
445 if ((void *)ci->func)
446 emit_call(as, ci->func);
447 #if LJ_64
448 if ((ci->flags & CCI_VARARG)) { /* Special handling for vararg calls. */
449 #if LJ_ABI_WIN
450 for (n = 0; n < 4 && n < nargs; n++) {
451 IRIns *ir = IR(args[n]);
452 if (irt_isfp(ir->t)) /* Duplicate FPRs in GPRs. */
453 emit_rr(as, XO_MOVDto, (irt_isnum(ir->t) ? REX_64 : 0) | (fpr+n),
454 ((gprs >> (n*5)) & 31)); /* Either MOVD or MOVQ. */
456 #else
457 patchnfpr = --as->mcp; /* Indicate number of used FPRs in register al. */
458 *--as->mcp = XI_MOVrib | RID_EAX;
459 #endif
461 #endif
462 for (n = 0; n < nargs; n++) { /* Setup args. */
463 IRRef ref = args[n];
464 IRIns *ir = IR(ref);
465 Reg r;
466 #if LJ_64 && LJ_ABI_WIN
467 /* Windows/x64 argument registers are strictly positional. */
468 r = irt_isfp(ir->t) ? (fpr <= REGARG_LASTFPR ? fpr : 0) : (gprs & 31);
469 fpr++; gprs >>= 5;
470 #elif LJ_64
471 /* POSIX/x64 argument registers are used in order of appearance. */
472 if (irt_isfp(ir->t)) {
473 r = fpr <= REGARG_LASTFPR ? fpr++ : 0;
474 } else {
475 r = gprs & 31; gprs >>= 5;
477 #else
478 if (ref && irt_isfp(ir->t)) {
479 r = 0;
480 } else {
481 r = gprs & 31; gprs >>= 5;
482 if (!ref) continue;
484 #endif
485 if (r) { /* Argument is in a register. */
486 if (r < RID_MAX_GPR && ref < ASMREF_TMP1) {
487 #if LJ_64
488 if (ir->o == IR_KINT64)
489 emit_loadu64(as, r, ir_kint64(ir)->u64);
490 else
491 #endif
492 emit_loadi(as, r, ir->i);
493 } else {
494 lua_assert(rset_test(as->freeset, r)); /* Must have been evicted. */
495 if (ra_hasreg(ir->r)) {
496 ra_noweak(as, ir->r);
497 emit_movrr(as, ir, r, ir->r);
498 } else {
499 ra_allocref(as, ref, RID2RSET(r));
502 } else if (irt_isfp(ir->t)) { /* FP argument is on stack. */
503 lua_assert(!(irt_isfloat(ir->t) && irref_isk(ref))); /* No float k. */
504 if (LJ_32 && (ofs & 4) && irref_isk(ref)) {
505 /* Split stores for unaligned FP consts. */
506 emit_movmroi(as, RID_ESP, ofs, (int32_t)ir_knum(ir)->u32.lo);
507 emit_movmroi(as, RID_ESP, ofs+4, (int32_t)ir_knum(ir)->u32.hi);
508 } else {
509 r = ra_alloc1(as, ref, RSET_FPR);
510 emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSDto : XO_MOVSSto,
511 r, RID_ESP, ofs);
513 ofs += (LJ_32 && irt_isfloat(ir->t)) ? 4 : 8;
514 } else { /* Non-FP argument is on stack. */
515 if (LJ_32 && ref < ASMREF_TMP1) {
516 emit_movmroi(as, RID_ESP, ofs, ir->i);
517 } else {
518 r = ra_alloc1(as, ref, RSET_GPR);
519 emit_movtomro(as, REX_64 + r, RID_ESP, ofs);
521 ofs += sizeof(intptr_t);
523 checkmclim(as);
525 #if LJ_64 && !LJ_ABI_WIN
526 if (patchnfpr) *patchnfpr = fpr - REGARG_FIRSTFPR;
527 #endif
530 /* Setup result reg/sp for call. Evict scratch regs. */
531 static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
533 RegSet drop = RSET_SCRATCH;
534 int hiop = (LJ_32 && (ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t));
535 if ((ci->flags & CCI_NOFPRCLOBBER))
536 drop &= ~RSET_FPR;
537 if (ra_hasreg(ir->r))
538 rset_clear(drop, ir->r); /* Dest reg handled below. */
539 if (hiop && ra_hasreg((ir+1)->r))
540 rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
541 ra_evictset(as, drop); /* Evictions must be performed first. */
542 if (ra_used(ir)) {
543 if (irt_isfp(ir->t)) {
544 int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */
545 #if LJ_64
546 if ((ci->flags & CCI_CASTU64)) {
547 Reg dest = ir->r;
548 if (ra_hasreg(dest)) {
549 ra_free(as, dest);
550 ra_modified(as, dest);
551 emit_rr(as, XO_MOVD, dest|REX_64, RID_RET); /* Really MOVQ. */
553 if (ofs) emit_movtomro(as, RID_RET|REX_64, RID_ESP, ofs);
554 } else {
555 ra_destreg(as, ir, RID_FPRET);
557 #else
558 /* Number result is in x87 st0 for x86 calling convention. */
559 Reg dest = ir->r;
560 if (ra_hasreg(dest)) {
561 ra_free(as, dest);
562 ra_modified(as, dest);
563 emit_rmro(as, irt_isnum(ir->t) ? XMM_MOVRM(as) : XO_MOVSS,
564 dest, RID_ESP, ofs);
566 if ((ci->flags & CCI_CASTU64)) {
567 emit_movtomro(as, RID_RETLO, RID_ESP, ofs);
568 emit_movtomro(as, RID_RETHI, RID_ESP, ofs+4);
569 } else {
570 emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd,
571 irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs);
573 #endif
574 #if LJ_32
575 } else if (hiop) {
576 ra_destpair(as, ir);
577 #endif
578 } else {
579 lua_assert(!irt_ispri(ir->t));
580 ra_destreg(as, ir, RID_RET);
582 } else if (LJ_32 && irt_isfp(ir->t) && !(ci->flags & CCI_CASTU64)) {
583 emit_x87op(as, XI_FPOP); /* Pop unused result from x87 st0. */
587 static void asm_call(ASMState *as, IRIns *ir)
589 IRRef args[CCI_NARGS_MAX];
590 const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
591 asm_collectargs(as, ir, ci, args);
592 asm_setupresult(as, ir, ci);
593 asm_gencall(as, ci, args);
596 /* Return a constant function pointer or NULL for indirect calls. */
597 static void *asm_callx_func(ASMState *as, IRIns *irf, IRRef func)
599 #if LJ_32
600 UNUSED(as);
601 if (irref_isk(func))
602 return (void *)irf->i;
603 #else
604 if (irref_isk(func)) {
605 MCode *p;
606 if (irf->o == IR_KINT64)
607 p = (MCode *)(void *)ir_k64(irf)->u64;
608 else
609 p = (MCode *)(void *)(uintptr_t)(uint32_t)irf->i;
610 if (p - as->mcp == (int32_t)(p - as->mcp))
611 return p; /* Call target is still in +-2GB range. */
612 /* Avoid the indirect case of emit_call(). Try to hoist func addr. */
614 #endif
615 return NULL;
618 static void asm_callx(ASMState *as, IRIns *ir)
620 IRRef args[CCI_NARGS_MAX*2];
621 CCallInfo ci;
622 IRRef func;
623 IRIns *irf;
624 int32_t spadj = 0;
625 ci.flags = asm_callx_flags(as, ir);
626 asm_collectargs(as, ir, &ci, args);
627 asm_setupresult(as, ir, &ci);
628 #if LJ_32
629 /* Have to readjust stack after non-cdecl calls due to callee cleanup. */
630 if ((ci.flags & CCI_CC_MASK) != CCI_CC_CDECL)
631 spadj = 4 * asm_count_call_slots(as, &ci, args);
632 #endif
633 func = ir->op2; irf = IR(func);
634 if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
635 ci.func = (ASMFunction)asm_callx_func(as, irf, func);
636 if (!(void *)ci.func) {
637 /* Use a (hoistable) non-scratch register for indirect calls. */
638 RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
639 Reg r = ra_alloc1(as, func, allow);
640 if (LJ_32) emit_spsub(as, spadj); /* Above code may cause restores! */
641 emit_rr(as, XO_GROUP5, XOg_CALL, r);
642 } else if (LJ_32) {
643 emit_spsub(as, spadj);
645 asm_gencall(as, &ci, args);
648 /* -- Returns ------------------------------------------------------------- */
650 /* Return to lower frame. Guard that it goes to the right spot. */
651 static void asm_retf(ASMState *as, IRIns *ir)
653 Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
654 void *pc = ir_kptr(IR(ir->op2));
655 int32_t delta = 1+bc_a(*((const BCIns *)pc - 1));
656 as->topslot -= (BCReg)delta;
657 if ((int32_t)as->topslot < 0) as->topslot = 0;
658 irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */
659 emit_setgl(as, base, jit_base);
660 emit_addptr(as, base, -8*delta);
661 asm_guardcc(as, CC_NE);
662 emit_gmroi(as, XG_ARITHi(XOg_CMP), base, -4, ptr2addr(pc));
665 /* -- Type conversions ---------------------------------------------------- */
667 static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
669 Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
670 Reg dest = ra_dest(as, ir, RSET_GPR);
671 asm_guardcc(as, CC_P);
672 asm_guardcc(as, CC_NE);
673 emit_rr(as, XO_UCOMISD, left, tmp);
674 emit_rr(as, XO_CVTSI2SD, tmp, dest);
675 if (!(as->flags & JIT_F_SPLIT_XMM))
676 emit_rr(as, XO_XORPS, tmp, tmp); /* Avoid partial register stall. */
677 emit_rr(as, XO_CVTTSD2SI, dest, left);
678 /* Can't fuse since left is needed twice. */
681 static void asm_tobit(ASMState *as, IRIns *ir)
683 Reg dest = ra_dest(as, ir, RSET_GPR);
684 Reg tmp = ra_noreg(IR(ir->op1)->r) ?
685 ra_alloc1(as, ir->op1, RSET_FPR) :
686 ra_scratch(as, RSET_FPR);
687 Reg right = asm_fuseload(as, ir->op2, rset_exclude(RSET_FPR, tmp));
688 emit_rr(as, XO_MOVDto, tmp, dest);
689 emit_mrm(as, XO_ADDSD, tmp, right);
690 ra_left(as, tmp, ir->op1);
693 static void asm_conv(ASMState *as, IRIns *ir)
695 IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
696 int st64 = (st == IRT_I64 || st == IRT_U64 || (LJ_64 && st == IRT_P64));
697 int stfp = (st == IRT_NUM || st == IRT_FLOAT);
698 IRRef lref = ir->op1;
699 lua_assert(irt_type(ir->t) != st);
700 lua_assert(!(LJ_32 && (irt_isint64(ir->t) || st64))); /* Handled by SPLIT. */
701 if (irt_isfp(ir->t)) {
702 Reg dest = ra_dest(as, ir, RSET_FPR);
703 if (stfp) { /* FP to FP conversion. */
704 Reg left = asm_fuseload(as, lref, RSET_FPR);
705 emit_mrm(as, st == IRT_NUM ? XO_CVTSD2SS : XO_CVTSS2SD, dest, left);
706 if (left == dest) return; /* Avoid the XO_XORPS. */
707 } else if (LJ_32 && st == IRT_U32) { /* U32 to FP conversion on x86. */
708 /* number = (2^52+2^51 .. u32) - (2^52+2^51) */
709 cTValue *k = lj_ir_k64_find(as->J, U64x(43380000,00000000));
710 Reg bias = ra_scratch(as, rset_exclude(RSET_FPR, dest));
711 if (irt_isfloat(ir->t))
712 emit_rr(as, XO_CVTSD2SS, dest, dest);
713 emit_rr(as, XO_SUBSD, dest, bias); /* Subtract 2^52+2^51 bias. */
714 emit_rr(as, XO_XORPS, dest, bias); /* Merge bias and integer. */
715 emit_loadn(as, bias, k);
716 emit_mrm(as, XO_MOVD, dest, asm_fuseload(as, lref, RSET_GPR));
717 return;
718 } else { /* Integer to FP conversion. */
719 Reg left = (LJ_64 && (st == IRT_U32 || st == IRT_U64)) ?
720 ra_alloc1(as, lref, RSET_GPR) :
721 asm_fuseloadm(as, lref, RSET_GPR, st64);
722 if (LJ_64 && st == IRT_U64) {
723 MCLabel l_end = emit_label(as);
724 const void *k = lj_ir_k64_find(as->J, U64x(43f00000,00000000));
725 emit_rma(as, XO_ADDSD, dest, k); /* Add 2^64 to compensate. */
726 emit_sjcc(as, CC_NS, l_end);
727 emit_rr(as, XO_TEST, left|REX_64, left); /* Check if u64 >= 2^63. */
729 emit_mrm(as, irt_isnum(ir->t) ? XO_CVTSI2SD : XO_CVTSI2SS,
730 dest|((LJ_64 && (st64 || st == IRT_U32)) ? REX_64 : 0), left);
732 if (!(as->flags & JIT_F_SPLIT_XMM))
733 emit_rr(as, XO_XORPS, dest, dest); /* Avoid partial register stall. */
734 } else if (stfp) { /* FP to integer conversion. */
735 if (irt_isguard(ir->t)) {
736 /* Checked conversions are only supported from number to int. */
737 lua_assert(irt_isint(ir->t) && st == IRT_NUM);
738 asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
739 } else {
740 Reg dest = ra_dest(as, ir, RSET_GPR);
741 x86Op op = st == IRT_NUM ?
742 ((ir->op2 & IRCONV_TRUNC) ? XO_CVTTSD2SI : XO_CVTSD2SI) :
743 ((ir->op2 & IRCONV_TRUNC) ? XO_CVTTSS2SI : XO_CVTSS2SI);
744 if (LJ_64 ? irt_isu64(ir->t) : irt_isu32(ir->t)) {
745 /* LJ_64: For inputs >= 2^63 add -2^64, convert again. */
746 /* LJ_32: For inputs >= 2^31 add -2^31, convert again and add 2^31. */
747 Reg tmp = ra_noreg(IR(lref)->r) ? ra_alloc1(as, lref, RSET_FPR) :
748 ra_scratch(as, RSET_FPR);
749 MCLabel l_end = emit_label(as);
750 if (LJ_32)
751 emit_gri(as, XG_ARITHi(XOg_ADD), dest, (int32_t)0x80000000);
752 emit_rr(as, op, dest|REX_64, tmp);
753 if (st == IRT_NUM)
754 emit_rma(as, XO_ADDSD, tmp, lj_ir_k64_find(as->J,
755 LJ_64 ? U64x(c3f00000,00000000) : U64x(c1e00000,00000000)));
756 else
757 emit_rma(as, XO_ADDSS, tmp, lj_ir_k64_find(as->J,
758 LJ_64 ? U64x(00000000,df800000) : U64x(00000000,cf000000)));
759 emit_sjcc(as, CC_NS, l_end);
760 emit_rr(as, XO_TEST, dest|REX_64, dest); /* Check if dest negative. */
761 emit_rr(as, op, dest|REX_64, tmp);
762 ra_left(as, tmp, lref);
763 } else {
764 Reg left = asm_fuseload(as, lref, RSET_FPR);
765 if (LJ_64 && irt_isu32(ir->t))
766 emit_rr(as, XO_MOV, dest, dest); /* Zero hiword. */
767 emit_mrm(as, op,
768 dest|((LJ_64 &&
769 (irt_is64(ir->t) || irt_isu32(ir->t))) ? REX_64 : 0),
770 left);
773 } else if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
774 Reg left, dest = ra_dest(as, ir, RSET_GPR);
775 RegSet allow = RSET_GPR;
776 x86Op op;
777 lua_assert(irt_isint(ir->t) || irt_isu32(ir->t));
778 if (st == IRT_I8) {
779 op = XO_MOVSXb; allow = RSET_GPR8; dest |= FORCE_REX;
780 } else if (st == IRT_U8) {
781 op = XO_MOVZXb; allow = RSET_GPR8; dest |= FORCE_REX;
782 } else if (st == IRT_I16) {
783 op = XO_MOVSXw;
784 } else {
785 op = XO_MOVZXw;
787 left = asm_fuseload(as, lref, allow);
788 /* Add extra MOV if source is already in wrong register. */
789 if (!LJ_64 && left != RID_MRM && !rset_test(allow, left)) {
790 Reg tmp = ra_scratch(as, allow);
791 emit_rr(as, op, dest, tmp);
792 emit_rr(as, XO_MOV, tmp, left);
793 } else {
794 emit_mrm(as, op, dest, left);
796 } else { /* 32/64 bit integer conversions. */
797 if (LJ_32) { /* Only need to handle 32/32 bit no-op (cast) on x86. */
798 Reg dest = ra_dest(as, ir, RSET_GPR);
799 ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */
800 } else if (irt_is64(ir->t)) {
801 Reg dest = ra_dest(as, ir, RSET_GPR);
802 if (st64 || !(ir->op2 & IRCONV_SEXT)) {
803 /* 64/64 bit no-op (cast) or 32 to 64 bit zero extension. */
804 ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */
805 } else { /* 32 to 64 bit sign extension. */
806 Reg left = asm_fuseload(as, lref, RSET_GPR);
807 emit_mrm(as, XO_MOVSXd, dest|REX_64, left);
809 } else {
810 Reg dest = ra_dest(as, ir, RSET_GPR);
811 if (st64) {
812 Reg left = asm_fuseload(as, lref, RSET_GPR);
813 /* This is either a 32 bit reg/reg mov which zeroes the hiword
814 ** or a load of the loword from a 64 bit address.
816 emit_mrm(as, XO_MOV, dest, left);
817 } else { /* 32/32 bit no-op (cast). */
818 ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */
824 #if LJ_32 && LJ_HASFFI
825 /* No SSE conversions to/from 64 bit on x86, so resort to ugly x87 code. */
827 /* 64 bit integer to FP conversion in 32 bit mode. */
828 static void asm_conv_fp_int64(ASMState *as, IRIns *ir)
830 Reg hi = ra_alloc1(as, ir->op1, RSET_GPR);
831 Reg lo = ra_alloc1(as, (ir-1)->op1, rset_exclude(RSET_GPR, hi));
832 int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */
833 Reg dest = ir->r;
834 if (ra_hasreg(dest)) {
835 ra_free(as, dest);
836 ra_modified(as, dest);
837 emit_rmro(as, irt_isnum(ir->t) ? XMM_MOVRM(as) : XO_MOVSS,
838 dest, RID_ESP, ofs);
840 emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd,
841 irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs);
842 if (((ir-1)->op2 & IRCONV_SRCMASK) == IRT_U64) {
843 /* For inputs in [2^63,2^64-1] add 2^64 to compensate. */
844 MCLabel l_end = emit_label(as);
845 emit_rma(as, XO_FADDq, XOg_FADDq,
846 lj_ir_k64_find(as->J, U64x(43f00000,00000000)));
847 emit_sjcc(as, CC_NS, l_end);
848 emit_rr(as, XO_TEST, hi, hi); /* Check if u64 >= 2^63. */
849 } else {
850 lua_assert(((ir-1)->op2 & IRCONV_SRCMASK) == IRT_I64);
852 emit_rmro(as, XO_FILDq, XOg_FILDq, RID_ESP, 0);
853 /* NYI: Avoid narrow-to-wide store-to-load forwarding stall. */
854 emit_rmro(as, XO_MOVto, hi, RID_ESP, 4);
855 emit_rmro(as, XO_MOVto, lo, RID_ESP, 0);
858 /* FP to 64 bit integer conversion in 32 bit mode. */
859 static void asm_conv_int64_fp(ASMState *as, IRIns *ir)
861 IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
862 IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
863 Reg lo, hi;
864 lua_assert(st == IRT_NUM || st == IRT_FLOAT);
865 lua_assert(dt == IRT_I64 || dt == IRT_U64);
866 lua_assert(((ir-1)->op2 & IRCONV_TRUNC));
867 hi = ra_dest(as, ir, RSET_GPR);
868 lo = ra_dest(as, ir-1, rset_exclude(RSET_GPR, hi));
869 if (ra_used(ir-1)) emit_rmro(as, XO_MOV, lo, RID_ESP, 0);
870 /* NYI: Avoid wide-to-narrow store-to-load forwarding stall. */
871 if (!(as->flags & JIT_F_SSE3)) { /* Set FPU rounding mode to default. */
872 emit_rmro(as, XO_FLDCW, XOg_FLDCW, RID_ESP, 4);
873 emit_rmro(as, XO_MOVto, lo, RID_ESP, 4);
874 emit_gri(as, XG_ARITHi(XOg_AND), lo, 0xf3ff);
876 if (dt == IRT_U64) {
877 /* For inputs in [2^63,2^64-1] add -2^64 and convert again. */
878 MCLabel l_pop, l_end = emit_label(as);
879 emit_x87op(as, XI_FPOP);
880 l_pop = emit_label(as);
881 emit_sjmp(as, l_end);
882 emit_rmro(as, XO_MOV, hi, RID_ESP, 4);
883 if ((as->flags & JIT_F_SSE3))
884 emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0);
885 else
886 emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0);
887 emit_rma(as, XO_FADDq, XOg_FADDq,
888 lj_ir_k64_find(as->J, U64x(c3f00000,00000000)));
889 emit_sjcc(as, CC_NS, l_pop);
890 emit_rr(as, XO_TEST, hi, hi); /* Check if out-of-range (2^63). */
892 emit_rmro(as, XO_MOV, hi, RID_ESP, 4);
893 if ((as->flags & JIT_F_SSE3)) { /* Truncation is easy with SSE3. */
894 emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0);
895 } else { /* Otherwise set FPU rounding mode to truncate before the store. */
896 emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0);
897 emit_rmro(as, XO_FLDCW, XOg_FLDCW, RID_ESP, 0);
898 emit_rmro(as, XO_MOVtow, lo, RID_ESP, 0);
899 emit_rmro(as, XO_ARITHw(XOg_OR), lo, RID_ESP, 0);
900 emit_loadi(as, lo, 0xc00);
901 emit_rmro(as, XO_FNSTCW, XOg_FNSTCW, RID_ESP, 0);
903 if (dt == IRT_U64)
904 emit_x87op(as, XI_FDUP);
905 emit_mrm(as, st == IRT_NUM ? XO_FLDq : XO_FLDd,
906 st == IRT_NUM ? XOg_FLDq: XOg_FLDd,
907 asm_fuseload(as, ir->op1, RSET_EMPTY));
909 #endif
911 static void asm_strto(ASMState *as, IRIns *ir)
913 /* Force a spill slot for the destination register (if any). */
914 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num];
915 IRRef args[2];
916 RegSet drop = RSET_SCRATCH;
917 if ((drop & RSET_FPR) != RSET_FPR && ra_hasreg(ir->r))
918 rset_set(drop, ir->r); /* WIN64 doesn't spill all FPRs. */
919 ra_evictset(as, drop);
920 asm_guardcc(as, CC_E);
921 emit_rr(as, XO_TEST, RID_RET, RID_RET); /* Test return status. */
922 args[0] = ir->op1; /* GCstr *str */
923 args[1] = ASMREF_TMP1; /* TValue *n */
924 asm_gencall(as, ci, args);
925 /* Store the result to the spill slot or temp slots. */
926 emit_rmro(as, XO_LEA, ra_releasetmp(as, ASMREF_TMP1)|REX_64,
927 RID_ESP, sps_scale(ir->s));
930 static void asm_tostr(ASMState *as, IRIns *ir)
932 IRIns *irl = IR(ir->op1);
933 IRRef args[2];
934 args[0] = ASMREF_L;
935 as->gcsteps++;
936 if (irt_isnum(irl->t)) {
937 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromnum];
938 args[1] = ASMREF_TMP1; /* const lua_Number * */
939 asm_setupresult(as, ir, ci); /* GCstr * */
940 asm_gencall(as, ci, args);
941 emit_rmro(as, XO_LEA, ra_releasetmp(as, ASMREF_TMP1)|REX_64,
942 RID_ESP, ra_spill(as, irl));
943 } else {
944 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromint];
945 args[1] = ir->op1; /* int32_t k */
946 asm_setupresult(as, ir, ci); /* GCstr * */
947 asm_gencall(as, ci, args);
951 /* -- Memory references --------------------------------------------------- */
953 static void asm_aref(ASMState *as, IRIns *ir)
955 Reg dest = ra_dest(as, ir, RSET_GPR);
956 asm_fusearef(as, ir, RSET_GPR);
957 if (!(as->mrm.idx == RID_NONE && as->mrm.ofs == 0))
958 emit_mrm(as, XO_LEA, dest, RID_MRM);
959 else if (as->mrm.base != dest)
960 emit_rr(as, XO_MOV, dest, as->mrm.base);
963 /* Merge NE(HREF, niltv) check. */
964 static MCode *merge_href_niltv(ASMState *as, IRIns *ir)
966 /* Assumes nothing else generates NE of HREF. */
967 if ((ir[1].o == IR_NE || ir[1].o == IR_EQ) && ir[1].op1 == as->curins &&
968 ra_hasreg(ir->r)) {
969 MCode *p = as->mcp;
970 p += (LJ_64 && *p != XI_ARITHi) ? 7+6 : 6+6;
971 /* Ensure no loop branch inversion happened. */
972 if (p[-6] == 0x0f && p[-5] == XI_JCCn+(CC_NE^(ir[1].o & 1))) {
973 as->mcp = p; /* Kill cmp reg, imm32 + jz exit. */
974 return p + *(int32_t *)(p-4); /* Return exit address. */
977 return NULL;
980 /* Inlined hash lookup. Specialized for key type and for const keys.
981 ** The equivalent C code is:
982 ** Node *n = hashkey(t, key);
983 ** do {
984 ** if (lj_obj_equal(&n->key, key)) return &n->val;
985 ** } while ((n = nextnode(n)));
986 ** return niltv(L);
988 static void asm_href(ASMState *as, IRIns *ir)
990 MCode *nilexit = merge_href_niltv(as, ir); /* Do this before any restores. */
991 RegSet allow = RSET_GPR;
992 Reg dest = ra_dest(as, ir, allow);
993 Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
994 Reg key = RID_NONE, tmp = RID_NONE;
995 IRIns *irkey = IR(ir->op2);
996 int isk = irref_isk(ir->op2);
997 IRType1 kt = irkey->t;
998 uint32_t khash;
999 MCLabel l_end, l_loop, l_next;
1001 if (!isk) {
1002 rset_clear(allow, tab);
1003 key = ra_alloc1(as, ir->op2, irt_isnum(kt) ? RSET_FPR : allow);
1004 if (!irt_isstr(kt))
1005 tmp = ra_scratch(as, rset_exclude(allow, key));
1008 /* Key not found in chain: jump to exit (if merged with NE) or load niltv. */
1009 l_end = emit_label(as);
1010 if (nilexit && ir[1].o == IR_NE) {
1011 emit_jcc(as, CC_E, nilexit); /* XI_JMP is not found by lj_asm_patchexit. */
1012 nilexit = NULL;
1013 } else {
1014 emit_loada(as, dest, niltvg(J2G(as->J)));
1017 /* Follow hash chain until the end. */
1018 l_loop = emit_sjcc_label(as, CC_NZ);
1019 emit_rr(as, XO_TEST, dest, dest);
1020 emit_rmro(as, XO_MOV, dest, dest, offsetof(Node, next));
1021 l_next = emit_label(as);
1023 /* Type and value comparison. */
1024 if (nilexit)
1025 emit_jcc(as, CC_E, nilexit);
1026 else
1027 emit_sjcc(as, CC_E, l_end);
1028 if (irt_isnum(kt)) {
1029 if (isk) {
1030 /* Assumes -0.0 is already canonicalized to +0.0. */
1031 emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.lo),
1032 (int32_t)ir_knum(irkey)->u32.lo);
1033 emit_sjcc(as, CC_NE, l_next);
1034 emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.hi),
1035 (int32_t)ir_knum(irkey)->u32.hi);
1036 } else {
1037 emit_sjcc(as, CC_P, l_next);
1038 emit_rmro(as, XO_UCOMISD, key, dest, offsetof(Node, key.n));
1039 emit_sjcc(as, CC_AE, l_next);
1040 /* The type check avoids NaN penalties and complaints from Valgrind. */
1041 #if LJ_64
1042 emit_u32(as, LJ_TISNUM);
1043 emit_rmro(as, XO_ARITHi, XOg_CMP, dest, offsetof(Node, key.it));
1044 #else
1045 emit_i8(as, LJ_TISNUM);
1046 emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it));
1047 #endif
1049 #if LJ_64
1050 } else if (irt_islightud(kt)) {
1051 emit_rmro(as, XO_CMP, key|REX_64, dest, offsetof(Node, key.u64));
1052 #endif
1053 } else {
1054 if (!irt_ispri(kt)) {
1055 lua_assert(irt_isaddr(kt));
1056 if (isk)
1057 emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.gcr),
1058 ptr2addr(ir_kgc(irkey)));
1059 else
1060 emit_rmro(as, XO_CMP, key, dest, offsetof(Node, key.gcr));
1061 emit_sjcc(as, CC_NE, l_next);
1063 lua_assert(!irt_isnil(kt));
1064 emit_i8(as, irt_toitype(kt));
1065 emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it));
1067 emit_sfixup(as, l_loop);
1068 checkmclim(as);
1070 /* Load main position relative to tab->node into dest. */
1071 khash = isk ? ir_khash(irkey) : 1;
1072 if (khash == 0) {
1073 emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, node));
1074 } else {
1075 emit_rmro(as, XO_ARITH(XOg_ADD), dest, tab, offsetof(GCtab, node));
1076 if ((as->flags & JIT_F_PREFER_IMUL)) {
1077 emit_i8(as, sizeof(Node));
1078 emit_rr(as, XO_IMULi8, dest, dest);
1079 } else {
1080 emit_shifti(as, XOg_SHL, dest, 3);
1081 emit_rmrxo(as, XO_LEA, dest, dest, dest, XM_SCALE2, 0);
1083 if (isk) {
1084 emit_gri(as, XG_ARITHi(XOg_AND), dest, (int32_t)khash);
1085 emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask));
1086 } else if (irt_isstr(kt)) {
1087 emit_rmro(as, XO_ARITH(XOg_AND), dest, key, offsetof(GCstr, hash));
1088 emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask));
1089 } else { /* Must match with hashrot() in lj_tab.c. */
1090 emit_rmro(as, XO_ARITH(XOg_AND), dest, tab, offsetof(GCtab, hmask));
1091 emit_rr(as, XO_ARITH(XOg_SUB), dest, tmp);
1092 emit_shifti(as, XOg_ROL, tmp, HASH_ROT3);
1093 emit_rr(as, XO_ARITH(XOg_XOR), dest, tmp);
1094 emit_shifti(as, XOg_ROL, dest, HASH_ROT2);
1095 emit_rr(as, XO_ARITH(XOg_SUB), tmp, dest);
1096 emit_shifti(as, XOg_ROL, dest, HASH_ROT1);
1097 emit_rr(as, XO_ARITH(XOg_XOR), tmp, dest);
1098 if (irt_isnum(kt)) {
1099 emit_rr(as, XO_ARITH(XOg_ADD), dest, dest);
1100 #if LJ_64
1101 emit_shifti(as, XOg_SHR|REX_64, dest, 32);
1102 emit_rr(as, XO_MOV, tmp, dest);
1103 emit_rr(as, XO_MOVDto, key|REX_64, dest);
1104 #else
1105 emit_rmro(as, XO_MOV, dest, RID_ESP, ra_spill(as, irkey)+4);
1106 emit_rr(as, XO_MOVDto, key, tmp);
1107 #endif
1108 } else {
1109 emit_rr(as, XO_MOV, tmp, key);
1110 emit_rmro(as, XO_LEA, dest, key, HASH_BIAS);
1116 static void asm_hrefk(ASMState *as, IRIns *ir)
1118 IRIns *kslot = IR(ir->op2);
1119 IRIns *irkey = IR(kslot->op1);
1120 int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
1121 Reg dest = ra_used(ir) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
1122 Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
1123 #if !LJ_64
1124 MCLabel l_exit;
1125 #endif
1126 lua_assert(ofs % sizeof(Node) == 0);
1127 if (ra_hasreg(dest)) {
1128 if (ofs != 0) {
1129 if (dest == node && !(as->flags & JIT_F_LEA_AGU))
1130 emit_gri(as, XG_ARITHi(XOg_ADD), dest, ofs);
1131 else
1132 emit_rmro(as, XO_LEA, dest, node, ofs);
1133 } else if (dest != node) {
1134 emit_rr(as, XO_MOV, dest, node);
1137 asm_guardcc(as, CC_NE);
1138 #if LJ_64
1139 if (!irt_ispri(irkey->t)) {
1140 Reg key = ra_scratch(as, rset_exclude(RSET_GPR, node));
1141 emit_rmro(as, XO_CMP, key|REX_64, node,
1142 ofs + (int32_t)offsetof(Node, key.u64));
1143 lua_assert(irt_isnum(irkey->t) || irt_isgcv(irkey->t));
1144 /* Assumes -0.0 is already canonicalized to +0.0. */
1145 emit_loadu64(as, key, irt_isnum(irkey->t) ? ir_knum(irkey)->u64 :
1146 ((uint64_t)irt_toitype(irkey->t) << 32) |
1147 (uint64_t)(uint32_t)ptr2addr(ir_kgc(irkey)));
1148 } else {
1149 lua_assert(!irt_isnil(irkey->t));
1150 emit_i8(as, irt_toitype(irkey->t));
1151 emit_rmro(as, XO_ARITHi8, XOg_CMP, node,
1152 ofs + (int32_t)offsetof(Node, key.it));
1154 #else
1155 l_exit = emit_label(as);
1156 if (irt_isnum(irkey->t)) {
1157 /* Assumes -0.0 is already canonicalized to +0.0. */
1158 emit_gmroi(as, XG_ARITHi(XOg_CMP), node,
1159 ofs + (int32_t)offsetof(Node, key.u32.lo),
1160 (int32_t)ir_knum(irkey)->u32.lo);
1161 emit_sjcc(as, CC_NE, l_exit);
1162 emit_gmroi(as, XG_ARITHi(XOg_CMP), node,
1163 ofs + (int32_t)offsetof(Node, key.u32.hi),
1164 (int32_t)ir_knum(irkey)->u32.hi);
1165 } else {
1166 if (!irt_ispri(irkey->t)) {
1167 lua_assert(irt_isgcv(irkey->t));
1168 emit_gmroi(as, XG_ARITHi(XOg_CMP), node,
1169 ofs + (int32_t)offsetof(Node, key.gcr),
1170 ptr2addr(ir_kgc(irkey)));
1171 emit_sjcc(as, CC_NE, l_exit);
1173 lua_assert(!irt_isnil(irkey->t));
1174 emit_i8(as, irt_toitype(irkey->t));
1175 emit_rmro(as, XO_ARITHi8, XOg_CMP, node,
1176 ofs + (int32_t)offsetof(Node, key.it));
1178 #endif
1181 static void asm_newref(ASMState *as, IRIns *ir)
1183 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey];
1184 IRRef args[3];
1185 IRIns *irkey;
1186 Reg tmp;
1187 if (ir->r == RID_SINK)
1188 return;
1189 args[0] = ASMREF_L; /* lua_State *L */
1190 args[1] = ir->op1; /* GCtab *t */
1191 args[2] = ASMREF_TMP1; /* cTValue *key */
1192 asm_setupresult(as, ir, ci); /* TValue * */
1193 asm_gencall(as, ci, args);
1194 tmp = ra_releasetmp(as, ASMREF_TMP1);
1195 irkey = IR(ir->op2);
1196 if (irt_isnum(irkey->t)) {
1197 /* For numbers use the constant itself or a spill slot as a TValue. */
1198 if (irref_isk(ir->op2))
1199 emit_loada(as, tmp, ir_knum(irkey));
1200 else
1201 emit_rmro(as, XO_LEA, tmp|REX_64, RID_ESP, ra_spill(as, irkey));
1202 } else {
1203 /* Otherwise use g->tmptv to hold the TValue. */
1204 if (!irref_isk(ir->op2)) {
1205 Reg src = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, tmp));
1206 emit_movtomro(as, REX_64IR(irkey, src), tmp, 0);
1207 } else if (!irt_ispri(irkey->t)) {
1208 emit_movmroi(as, tmp, 0, irkey->i);
1210 if (!(LJ_64 && irt_islightud(irkey->t)))
1211 emit_movmroi(as, tmp, 4, irt_toitype(irkey->t));
1212 emit_loada(as, tmp, &J2G(as->J)->tmptv);
1216 static void asm_uref(ASMState *as, IRIns *ir)
1218 /* NYI: Check that UREFO is still open and not aliasing a slot. */
1219 Reg dest = ra_dest(as, ir, RSET_GPR);
1220 if (irref_isk(ir->op1)) {
1221 GCfunc *fn = ir_kfunc(IR(ir->op1));
1222 MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
1223 emit_rma(as, XO_MOV, dest, v);
1224 } else {
1225 Reg uv = ra_scratch(as, RSET_GPR);
1226 Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
1227 if (ir->o == IR_UREFC) {
1228 emit_rmro(as, XO_LEA, dest, uv, offsetof(GCupval, tv));
1229 asm_guardcc(as, CC_NE);
1230 emit_i8(as, 1);
1231 emit_rmro(as, XO_ARITHib, XOg_CMP, uv, offsetof(GCupval, closed));
1232 } else {
1233 emit_rmro(as, XO_MOV, dest, uv, offsetof(GCupval, v));
1235 emit_rmro(as, XO_MOV, uv, func,
1236 (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8));
1240 static void asm_fref(ASMState *as, IRIns *ir)
1242 Reg dest = ra_dest(as, ir, RSET_GPR);
1243 asm_fusefref(as, ir, RSET_GPR);
1244 emit_mrm(as, XO_LEA, dest, RID_MRM);
1247 static void asm_strref(ASMState *as, IRIns *ir)
1249 Reg dest = ra_dest(as, ir, RSET_GPR);
1250 asm_fusestrref(as, ir, RSET_GPR);
1251 if (as->mrm.base == RID_NONE)
1252 emit_loadi(as, dest, as->mrm.ofs);
1253 else if (as->mrm.base == dest && as->mrm.idx == RID_NONE)
1254 emit_gri(as, XG_ARITHi(XOg_ADD), dest, as->mrm.ofs);
1255 else
1256 emit_mrm(as, XO_LEA, dest, RID_MRM);
1259 /* -- Loads and stores ---------------------------------------------------- */
1261 static void asm_fxload(ASMState *as, IRIns *ir)
1263 Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
1264 x86Op xo;
1265 if (ir->o == IR_FLOAD)
1266 asm_fusefref(as, ir, RSET_GPR);
1267 else
1268 asm_fusexref(as, ir->op1, RSET_GPR);
1269 /* ir->op2 is ignored -- unaligned loads are ok on x86. */
1270 switch (irt_type(ir->t)) {
1271 case IRT_I8: xo = XO_MOVSXb; break;
1272 case IRT_U8: xo = XO_MOVZXb; break;
1273 case IRT_I16: xo = XO_MOVSXw; break;
1274 case IRT_U16: xo = XO_MOVZXw; break;
1275 case IRT_NUM: xo = XMM_MOVRM(as); break;
1276 case IRT_FLOAT: xo = XO_MOVSS; break;
1277 default:
1278 if (LJ_64 && irt_is64(ir->t))
1279 dest |= REX_64;
1280 else
1281 lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t));
1282 xo = XO_MOV;
1283 break;
1285 emit_mrm(as, xo, dest, RID_MRM);
1288 static void asm_fxstore(ASMState *as, IRIns *ir)
1290 RegSet allow = RSET_GPR;
1291 Reg src = RID_NONE, osrc = RID_NONE;
1292 int32_t k = 0;
1293 if (ir->r == RID_SINK)
1294 return;
1295 /* The IRT_I16/IRT_U16 stores should never be simplified for constant
1296 ** values since mov word [mem], imm16 has a length-changing prefix.
1298 if (irt_isi16(ir->t) || irt_isu16(ir->t) || irt_isfp(ir->t) ||
1299 !asm_isk32(as, ir->op2, &k)) {
1300 RegSet allow8 = irt_isfp(ir->t) ? RSET_FPR :
1301 (irt_isi8(ir->t) || irt_isu8(ir->t)) ? RSET_GPR8 : RSET_GPR;
1302 src = osrc = ra_alloc1(as, ir->op2, allow8);
1303 if (!LJ_64 && !rset_test(allow8, src)) { /* Already in wrong register. */
1304 rset_clear(allow, osrc);
1305 src = ra_scratch(as, allow8);
1307 rset_clear(allow, src);
1309 if (ir->o == IR_FSTORE) {
1310 asm_fusefref(as, IR(ir->op1), allow);
1311 } else {
1312 asm_fusexref(as, ir->op1, allow);
1313 if (LJ_32 && ir->o == IR_HIOP) as->mrm.ofs += 4;
1315 if (ra_hasreg(src)) {
1316 x86Op xo;
1317 switch (irt_type(ir->t)) {
1318 case IRT_I8: case IRT_U8: xo = XO_MOVtob; src |= FORCE_REX; break;
1319 case IRT_I16: case IRT_U16: xo = XO_MOVtow; break;
1320 case IRT_NUM: xo = XO_MOVSDto; break;
1321 case IRT_FLOAT: xo = XO_MOVSSto; break;
1322 #if LJ_64
1323 case IRT_LIGHTUD: lua_assert(0); /* NYI: mask 64 bit lightuserdata. */
1324 #endif
1325 default:
1326 if (LJ_64 && irt_is64(ir->t))
1327 src |= REX_64;
1328 else
1329 lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t));
1330 xo = XO_MOVto;
1331 break;
1333 emit_mrm(as, xo, src, RID_MRM);
1334 if (!LJ_64 && src != osrc) {
1335 ra_noweak(as, osrc);
1336 emit_rr(as, XO_MOV, src, osrc);
1338 } else {
1339 if (irt_isi8(ir->t) || irt_isu8(ir->t)) {
1340 emit_i8(as, k);
1341 emit_mrm(as, XO_MOVmib, 0, RID_MRM);
1342 } else {
1343 lua_assert(irt_is64(ir->t) || irt_isint(ir->t) || irt_isu32(ir->t) ||
1344 irt_isaddr(ir->t));
1345 emit_i32(as, k);
1346 emit_mrm(as, XO_MOVmi, REX_64IR(ir, 0), RID_MRM);
1351 #if LJ_64
1352 static Reg asm_load_lightud64(ASMState *as, IRIns *ir, int typecheck)
1354 if (ra_used(ir) || typecheck) {
1355 Reg dest = ra_dest(as, ir, RSET_GPR);
1356 if (typecheck) {
1357 Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, dest));
1358 asm_guardcc(as, CC_NE);
1359 emit_i8(as, -2);
1360 emit_rr(as, XO_ARITHi8, XOg_CMP, tmp);
1361 emit_shifti(as, XOg_SAR|REX_64, tmp, 47);
1362 emit_rr(as, XO_MOV, tmp|REX_64, dest);
1364 return dest;
1365 } else {
1366 return RID_NONE;
1369 #endif
1371 static void asm_ahuvload(ASMState *as, IRIns *ir)
1373 lua_assert(irt_isnum(ir->t) || irt_ispri(ir->t) || irt_isaddr(ir->t) ||
1374 (LJ_DUALNUM && irt_isint(ir->t)));
1375 #if LJ_64
1376 if (irt_islightud(ir->t)) {
1377 Reg dest = asm_load_lightud64(as, ir, 1);
1378 if (ra_hasreg(dest)) {
1379 asm_fuseahuref(as, ir->op1, RSET_GPR);
1380 emit_mrm(as, XO_MOV, dest|REX_64, RID_MRM);
1382 return;
1383 } else
1384 #endif
1385 if (ra_used(ir)) {
1386 RegSet allow = irt_isnum(ir->t) ? RSET_FPR : RSET_GPR;
1387 Reg dest = ra_dest(as, ir, allow);
1388 asm_fuseahuref(as, ir->op1, RSET_GPR);
1389 emit_mrm(as, dest < RID_MAX_GPR ? XO_MOV : XMM_MOVRM(as), dest, RID_MRM);
1390 } else {
1391 asm_fuseahuref(as, ir->op1, RSET_GPR);
1393 /* Always do the type check, even if the load result is unused. */
1394 as->mrm.ofs += 4;
1395 asm_guardcc(as, irt_isnum(ir->t) ? CC_AE : CC_NE);
1396 if (LJ_64 && irt_type(ir->t) >= IRT_NUM) {
1397 lua_assert(irt_isinteger(ir->t) || irt_isnum(ir->t));
1398 emit_u32(as, LJ_TISNUM);
1399 emit_mrm(as, XO_ARITHi, XOg_CMP, RID_MRM);
1400 } else {
1401 emit_i8(as, irt_toitype(ir->t));
1402 emit_mrm(as, XO_ARITHi8, XOg_CMP, RID_MRM);
1406 static void asm_ahustore(ASMState *as, IRIns *ir)
1408 if (ir->r == RID_SINK)
1409 return;
1410 if (irt_isnum(ir->t)) {
1411 Reg src = ra_alloc1(as, ir->op2, RSET_FPR);
1412 asm_fuseahuref(as, ir->op1, RSET_GPR);
1413 emit_mrm(as, XO_MOVSDto, src, RID_MRM);
1414 #if LJ_64
1415 } else if (irt_islightud(ir->t)) {
1416 Reg src = ra_alloc1(as, ir->op2, RSET_GPR);
1417 asm_fuseahuref(as, ir->op1, rset_exclude(RSET_GPR, src));
1418 emit_mrm(as, XO_MOVto, src|REX_64, RID_MRM);
1419 #endif
1420 } else {
1421 IRIns *irr = IR(ir->op2);
1422 RegSet allow = RSET_GPR;
1423 Reg src = RID_NONE;
1424 if (!irref_isk(ir->op2)) {
1425 src = ra_alloc1(as, ir->op2, allow);
1426 rset_clear(allow, src);
1428 asm_fuseahuref(as, ir->op1, allow);
1429 if (ra_hasreg(src)) {
1430 emit_mrm(as, XO_MOVto, src, RID_MRM);
1431 } else if (!irt_ispri(irr->t)) {
1432 lua_assert(irt_isaddr(ir->t) || (LJ_DUALNUM && irt_isinteger(ir->t)));
1433 emit_i32(as, irr->i);
1434 emit_mrm(as, XO_MOVmi, 0, RID_MRM);
1436 as->mrm.ofs += 4;
1437 emit_i32(as, (int32_t)irt_toitype(ir->t));
1438 emit_mrm(as, XO_MOVmi, 0, RID_MRM);
1442 static void asm_sload(ASMState *as, IRIns *ir)
1444 int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0);
1445 IRType1 t = ir->t;
1446 Reg base;
1447 lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */
1448 lua_assert(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK));
1449 lua_assert(LJ_DUALNUM ||
1450 !irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME)));
1451 if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) {
1452 Reg left = ra_scratch(as, RSET_FPR);
1453 asm_tointg(as, ir, left); /* Frees dest reg. Do this before base alloc. */
1454 base = ra_alloc1(as, REF_BASE, RSET_GPR);
1455 emit_rmro(as, XMM_MOVRM(as), left, base, ofs);
1456 t.irt = IRT_NUM; /* Continue with a regular number type check. */
1457 #if LJ_64
1458 } else if (irt_islightud(t)) {
1459 Reg dest = asm_load_lightud64(as, ir, (ir->op2 & IRSLOAD_TYPECHECK));
1460 if (ra_hasreg(dest)) {
1461 base = ra_alloc1(as, REF_BASE, RSET_GPR);
1462 emit_rmro(as, XO_MOV, dest|REX_64, base, ofs);
1464 return;
1465 #endif
1466 } else if (ra_used(ir)) {
1467 RegSet allow = irt_isnum(t) ? RSET_FPR : RSET_GPR;
1468 Reg dest = ra_dest(as, ir, allow);
1469 base = ra_alloc1(as, REF_BASE, RSET_GPR);
1470 lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t));
1471 if ((ir->op2 & IRSLOAD_CONVERT)) {
1472 t.irt = irt_isint(t) ? IRT_NUM : IRT_INT; /* Check for original type. */
1473 emit_rmro(as, irt_isint(t) ? XO_CVTSI2SD : XO_CVTSD2SI, dest, base, ofs);
1474 } else if (irt_isnum(t)) {
1475 emit_rmro(as, XMM_MOVRM(as), dest, base, ofs);
1476 } else {
1477 emit_rmro(as, XO_MOV, dest, base, ofs);
1479 } else {
1480 if (!(ir->op2 & IRSLOAD_TYPECHECK))
1481 return; /* No type check: avoid base alloc. */
1482 base = ra_alloc1(as, REF_BASE, RSET_GPR);
1484 if ((ir->op2 & IRSLOAD_TYPECHECK)) {
1485 /* Need type check, even if the load result is unused. */
1486 asm_guardcc(as, irt_isnum(t) ? CC_AE : CC_NE);
1487 if (LJ_64 && irt_type(t) >= IRT_NUM) {
1488 lua_assert(irt_isinteger(t) || irt_isnum(t));
1489 emit_u32(as, LJ_TISNUM);
1490 emit_rmro(as, XO_ARITHi, XOg_CMP, base, ofs+4);
1491 } else {
1492 emit_i8(as, irt_toitype(t));
1493 emit_rmro(as, XO_ARITHi8, XOg_CMP, base, ofs+4);
1498 /* -- Allocations --------------------------------------------------------- */
1500 #if LJ_HASFFI
1501 static void asm_cnew(ASMState *as, IRIns *ir)
1503 CTState *cts = ctype_ctsG(J2G(as->J));
1504 CTypeID ctypeid = (CTypeID)IR(ir->op1)->i;
1505 CTSize sz = (ir->o == IR_CNEWI || ir->op2 == REF_NIL) ?
1506 lj_ctype_size(cts, ctypeid) : (CTSize)IR(ir->op2)->i;
1507 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
1508 IRRef args[2];
1509 lua_assert(sz != CTSIZE_INVALID);
1511 args[0] = ASMREF_L; /* lua_State *L */
1512 args[1] = ASMREF_TMP1; /* MSize size */
1513 as->gcsteps++;
1514 asm_setupresult(as, ir, ci); /* GCcdata * */
1516 /* Initialize immutable cdata object. */
1517 if (ir->o == IR_CNEWI) {
1518 RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
1519 #if LJ_64
1520 Reg r64 = sz == 8 ? REX_64 : 0;
1521 if (irref_isk(ir->op2)) {
1522 IRIns *irk = IR(ir->op2);
1523 uint64_t k = irk->o == IR_KINT64 ? ir_k64(irk)->u64 :
1524 (uint64_t)(uint32_t)irk->i;
1525 if (sz == 4 || checki32((int64_t)k)) {
1526 emit_i32(as, (int32_t)k);
1527 emit_rmro(as, XO_MOVmi, r64, RID_RET, sizeof(GCcdata));
1528 } else {
1529 emit_movtomro(as, RID_ECX + r64, RID_RET, sizeof(GCcdata));
1530 emit_loadu64(as, RID_ECX, k);
1532 } else {
1533 Reg r = ra_alloc1(as, ir->op2, allow);
1534 emit_movtomro(as, r + r64, RID_RET, sizeof(GCcdata));
1536 #else
1537 int32_t ofs = sizeof(GCcdata);
1538 if (sz == 8) {
1539 ofs += 4; ir++;
1540 lua_assert(ir->o == IR_HIOP);
1542 do {
1543 if (irref_isk(ir->op2)) {
1544 emit_movmroi(as, RID_RET, ofs, IR(ir->op2)->i);
1545 } else {
1546 Reg r = ra_alloc1(as, ir->op2, allow);
1547 emit_movtomro(as, r, RID_RET, ofs);
1548 rset_clear(allow, r);
1550 if (ofs == sizeof(GCcdata)) break;
1551 ofs -= 4; ir--;
1552 } while (1);
1553 #endif
1554 lua_assert(sz == 4 || sz == 8);
1557 /* Combine initialization of marked, gct and ctypeid. */
1558 emit_movtomro(as, RID_ECX, RID_RET, offsetof(GCcdata, marked));
1559 emit_gri(as, XG_ARITHi(XOg_OR), RID_ECX,
1560 (int32_t)((~LJ_TCDATA<<8)+(ctypeid<<16)));
1561 emit_gri(as, XG_ARITHi(XOg_AND), RID_ECX, LJ_GC_WHITES);
1562 emit_opgl(as, XO_MOVZXb, RID_ECX, gc.currentwhite);
1564 asm_gencall(as, ci, args);
1565 emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)(sz+sizeof(GCcdata)));
1567 #else
1568 #define asm_cnew(as, ir) ((void)0)
1569 #endif
1571 /* -- Write barriers ------------------------------------------------------ */
1573 static void asm_tbar(ASMState *as, IRIns *ir)
1575 Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
1576 Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, tab));
1577 MCLabel l_end = emit_label(as);
1578 emit_movtomro(as, tmp, tab, offsetof(GCtab, gclist));
1579 emit_setgl(as, tab, gc.grayagain);
1580 emit_getgl(as, tmp, gc.grayagain);
1581 emit_i8(as, ~LJ_GC_BLACK);
1582 emit_rmro(as, XO_ARITHib, XOg_AND, tab, offsetof(GCtab, marked));
1583 emit_sjcc(as, CC_Z, l_end);
1584 emit_i8(as, LJ_GC_BLACK);
1585 emit_rmro(as, XO_GROUP3b, XOg_TEST, tab, offsetof(GCtab, marked));
1588 static void asm_obar(ASMState *as, IRIns *ir)
1590 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
1591 IRRef args[2];
1592 MCLabel l_end;
1593 Reg obj;
1594 /* No need for other object barriers (yet). */
1595 lua_assert(IR(ir->op1)->o == IR_UREFC);
1596 ra_evictset(as, RSET_SCRATCH);
1597 l_end = emit_label(as);
1598 args[0] = ASMREF_TMP1; /* global_State *g */
1599 args[1] = ir->op1; /* TValue *tv */
1600 asm_gencall(as, ci, args);
1601 emit_loada(as, ra_releasetmp(as, ASMREF_TMP1), J2G(as->J));
1602 obj = IR(ir->op1)->r;
1603 emit_sjcc(as, CC_Z, l_end);
1604 emit_i8(as, LJ_GC_WHITES);
1605 if (irref_isk(ir->op2)) {
1606 GCobj *vp = ir_kgc(IR(ir->op2));
1607 emit_rma(as, XO_GROUP3b, XOg_TEST, &vp->gch.marked);
1608 } else {
1609 Reg val = ra_alloc1(as, ir->op2, rset_exclude(RSET_SCRATCH&RSET_GPR, obj));
1610 emit_rmro(as, XO_GROUP3b, XOg_TEST, val, (int32_t)offsetof(GChead, marked));
1612 emit_sjcc(as, CC_Z, l_end);
1613 emit_i8(as, LJ_GC_BLACK);
1614 emit_rmro(as, XO_GROUP3b, XOg_TEST, obj,
1615 (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
1618 /* -- FP/int arithmetic and logic operations ------------------------------ */
1620 /* Load reference onto x87 stack. Force a spill to memory if needed. */
1621 static void asm_x87load(ASMState *as, IRRef ref)
1623 IRIns *ir = IR(ref);
1624 if (ir->o == IR_KNUM) {
1625 cTValue *tv = ir_knum(ir);
1626 if (tvispzero(tv)) /* Use fldz only for +0. */
1627 emit_x87op(as, XI_FLDZ);
1628 else if (tvispone(tv))
1629 emit_x87op(as, XI_FLD1);
1630 else
1631 emit_rma(as, XO_FLDq, XOg_FLDq, tv);
1632 } else if (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT && !ra_used(ir) &&
1633 !irref_isk(ir->op1) && mayfuse(as, ir->op1)) {
1634 IRIns *iri = IR(ir->op1);
1635 emit_rmro(as, XO_FILDd, XOg_FILDd, RID_ESP, ra_spill(as, iri));
1636 } else {
1637 emit_mrm(as, XO_FLDq, XOg_FLDq, asm_fuseload(as, ref, RSET_EMPTY));
1641 /* Try to rejoin pow from EXP2, MUL and LOG2 (if still unsplit). */
1642 static int fpmjoin_pow(ASMState *as, IRIns *ir)
1644 IRIns *irp = IR(ir->op1);
1645 if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) {
1646 IRIns *irpp = IR(irp->op1);
1647 if (irpp == ir-2 && irpp->o == IR_FPMATH &&
1648 irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) {
1649 /* The modified regs must match with the *.dasc implementation. */
1650 RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM2+1)|RID2RSET(RID_EAX);
1651 IRIns *irx;
1652 if (ra_hasreg(ir->r))
1653 rset_clear(drop, ir->r); /* Dest reg handled below. */
1654 ra_evictset(as, drop);
1655 ra_destreg(as, ir, RID_XMM0);
1656 emit_call(as, lj_vm_pow_sse);
1657 irx = IR(irpp->op1);
1658 if (ra_noreg(irx->r) && ra_gethint(irx->r) == RID_XMM1)
1659 irx->r = RID_INIT; /* Avoid allocating xmm1 for x. */
1660 ra_left(as, RID_XMM0, irpp->op1);
1661 ra_left(as, RID_XMM1, irp->op2);
1662 return 1;
1665 return 0;
1668 static void asm_fpmath(ASMState *as, IRIns *ir)
1670 IRFPMathOp fpm = ir->o == IR_FPMATH ? (IRFPMathOp)ir->op2 : IRFPM_OTHER;
1671 if (fpm == IRFPM_SQRT) {
1672 Reg dest = ra_dest(as, ir, RSET_FPR);
1673 Reg left = asm_fuseload(as, ir->op1, RSET_FPR);
1674 emit_mrm(as, XO_SQRTSD, dest, left);
1675 } else if (fpm <= IRFPM_TRUNC) {
1676 if (as->flags & JIT_F_SSE4_1) { /* SSE4.1 has a rounding instruction. */
1677 Reg dest = ra_dest(as, ir, RSET_FPR);
1678 Reg left = asm_fuseload(as, ir->op1, RSET_FPR);
1679 /* ROUNDSD has a 4-byte opcode which doesn't fit in x86Op.
1680 ** Let's pretend it's a 3-byte opcode, and compensate afterwards.
1681 ** This is atrocious, but the alternatives are much worse.
1683 /* Round down/up/trunc == 1001/1010/1011. */
1684 emit_i8(as, 0x09 + fpm);
1685 emit_mrm(as, XO_ROUNDSD, dest, left);
1686 if (LJ_64 && as->mcp[1] != (MCode)(XO_ROUNDSD >> 16)) {
1687 as->mcp[0] = as->mcp[1]; as->mcp[1] = 0x0f; /* Swap 0F and REX. */
1689 *--as->mcp = 0x66; /* 1st byte of ROUNDSD opcode. */
1690 } else { /* Call helper functions for SSE2 variant. */
1691 /* The modified regs must match with the *.dasc implementation. */
1692 RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX);
1693 if (ra_hasreg(ir->r))
1694 rset_clear(drop, ir->r); /* Dest reg handled below. */
1695 ra_evictset(as, drop);
1696 ra_destreg(as, ir, RID_XMM0);
1697 emit_call(as, fpm == IRFPM_FLOOR ? lj_vm_floor_sse :
1698 fpm == IRFPM_CEIL ? lj_vm_ceil_sse : lj_vm_trunc_sse);
1699 ra_left(as, RID_XMM0, ir->op1);
1701 } else if (fpm == IRFPM_EXP2 && fpmjoin_pow(as, ir)) {
1702 /* Rejoined to pow(). */
1703 } else { /* Handle x87 ops. */
1704 int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */
1705 Reg dest = ir->r;
1706 if (ra_hasreg(dest)) {
1707 ra_free(as, dest);
1708 ra_modified(as, dest);
1709 emit_rmro(as, XMM_MOVRM(as), dest, RID_ESP, ofs);
1711 emit_rmro(as, XO_FSTPq, XOg_FSTPq, RID_ESP, ofs);
1712 switch (fpm) { /* st0 = lj_vm_*(st0) */
1713 case IRFPM_EXP: emit_call(as, lj_vm_exp_x87); break;
1714 case IRFPM_EXP2: emit_call(as, lj_vm_exp2_x87); break;
1715 case IRFPM_SIN: emit_x87op(as, XI_FSIN); break;
1716 case IRFPM_COS: emit_x87op(as, XI_FCOS); break;
1717 case IRFPM_TAN: emit_x87op(as, XI_FPOP); emit_x87op(as, XI_FPTAN); break;
1718 case IRFPM_LOG: case IRFPM_LOG2: case IRFPM_LOG10:
1719 /* Note: the use of fyl2xp1 would be pointless here. When computing
1720 ** log(1.0+eps) the precision is already lost after 1.0 is added.
1721 ** Subtracting 1.0 won't recover it. OTOH math.log1p would make sense.
1723 emit_x87op(as, XI_FYL2X); break;
1724 case IRFPM_OTHER:
1725 switch (ir->o) {
1726 case IR_ATAN2:
1727 emit_x87op(as, XI_FPATAN); asm_x87load(as, ir->op2); break;
1728 case IR_LDEXP:
1729 emit_x87op(as, XI_FPOP1); emit_x87op(as, XI_FSCALE); break;
1730 default: lua_assert(0); break;
1732 break;
1733 default: lua_assert(0); break;
1735 asm_x87load(as, ir->op1);
1736 switch (fpm) {
1737 case IRFPM_LOG: emit_x87op(as, XI_FLDLN2); break;
1738 case IRFPM_LOG2: emit_x87op(as, XI_FLD1); break;
1739 case IRFPM_LOG10: emit_x87op(as, XI_FLDLG2); break;
1740 case IRFPM_OTHER:
1741 if (ir->o == IR_LDEXP) asm_x87load(as, ir->op2);
1742 break;
1743 default: break;
1748 static void asm_fppowi(ASMState *as, IRIns *ir)
1750 /* The modified regs must match with the *.dasc implementation. */
1751 RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM1+1)|RID2RSET(RID_EAX);
1752 if (ra_hasreg(ir->r))
1753 rset_clear(drop, ir->r); /* Dest reg handled below. */
1754 ra_evictset(as, drop);
1755 ra_destreg(as, ir, RID_XMM0);
1756 emit_call(as, lj_vm_powi_sse);
1757 ra_left(as, RID_XMM0, ir->op1);
1758 ra_left(as, RID_EAX, ir->op2);
1761 #if LJ_64 && LJ_HASFFI
1762 static void asm_arith64(ASMState *as, IRIns *ir, IRCallID id)
1764 const CCallInfo *ci = &lj_ir_callinfo[id];
1765 IRRef args[2];
1766 args[0] = ir->op1;
1767 args[1] = ir->op2;
1768 asm_setupresult(as, ir, ci);
1769 asm_gencall(as, ci, args);
1771 #endif
1773 static void asm_intmod(ASMState *as, IRIns *ir)
1775 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_vm_modi];
1776 IRRef args[2];
1777 args[0] = ir->op1;
1778 args[1] = ir->op2;
1779 asm_setupresult(as, ir, ci);
1780 asm_gencall(as, ci, args);
1783 static int asm_swapops(ASMState *as, IRIns *ir)
1785 IRIns *irl = IR(ir->op1);
1786 IRIns *irr = IR(ir->op2);
1787 lua_assert(ra_noreg(irr->r));
1788 if (!irm_iscomm(lj_ir_mode[ir->o]))
1789 return 0; /* Can't swap non-commutative operations. */
1790 if (irref_isk(ir->op2))
1791 return 0; /* Don't swap constants to the left. */
1792 if (ra_hasreg(irl->r))
1793 return 1; /* Swap if left already has a register. */
1794 if (ra_samehint(ir->r, irr->r))
1795 return 1; /* Swap if dest and right have matching hints. */
1796 if (as->curins > as->loopref) { /* In variant part? */
1797 if (ir->op2 < as->loopref && !irt_isphi(irr->t))
1798 return 0; /* Keep invariants on the right. */
1799 if (ir->op1 < as->loopref && !irt_isphi(irl->t))
1800 return 1; /* Swap invariants to the right. */
1802 if (opisfusableload(irl->o))
1803 return 1; /* Swap fusable loads to the right. */
1804 return 0; /* Otherwise don't swap. */
1807 static void asm_fparith(ASMState *as, IRIns *ir, x86Op xo)
1809 IRRef lref = ir->op1;
1810 IRRef rref = ir->op2;
1811 RegSet allow = RSET_FPR;
1812 Reg dest;
1813 Reg right = IR(rref)->r;
1814 if (ra_hasreg(right)) {
1815 rset_clear(allow, right);
1816 ra_noweak(as, right);
1818 dest = ra_dest(as, ir, allow);
1819 if (lref == rref) {
1820 right = dest;
1821 } else if (ra_noreg(right)) {
1822 if (asm_swapops(as, ir)) {
1823 IRRef tmp = lref; lref = rref; rref = tmp;
1825 right = asm_fuseload(as, rref, rset_clear(allow, dest));
1827 emit_mrm(as, xo, dest, right);
1828 ra_left(as, dest, lref);
1831 static void asm_intarith(ASMState *as, IRIns *ir, x86Arith xa)
1833 IRRef lref = ir->op1;
1834 IRRef rref = ir->op2;
1835 RegSet allow = RSET_GPR;
1836 Reg dest, right;
1837 int32_t k = 0;
1838 if (as->flagmcp == as->mcp) { /* Drop test r,r instruction. */
1839 MCode *p = as->mcp + ((LJ_64 && *as->mcp < XI_TESTb) ? 3 : 2);
1840 if ((p[1] & 15) < 14) {
1841 if ((p[1] & 15) >= 12) p[1] -= 4; /* L <->S, NL <-> NS */
1842 as->flagmcp = NULL;
1843 as->mcp = p;
1844 } /* else: cannot transform LE/NLE to cc without use of OF. */
1846 right = IR(rref)->r;
1847 if (ra_hasreg(right)) {
1848 rset_clear(allow, right);
1849 ra_noweak(as, right);
1851 dest = ra_dest(as, ir, allow);
1852 if (lref == rref) {
1853 right = dest;
1854 } else if (ra_noreg(right) && !asm_isk32(as, rref, &k)) {
1855 if (asm_swapops(as, ir)) {
1856 IRRef tmp = lref; lref = rref; rref = tmp;
1858 right = asm_fuseloadm(as, rref, rset_clear(allow, dest), irt_is64(ir->t));
1860 if (irt_isguard(ir->t)) /* For IR_ADDOV etc. */
1861 asm_guardcc(as, CC_O);
1862 if (xa != XOg_X_IMUL) {
1863 if (ra_hasreg(right))
1864 emit_mrm(as, XO_ARITH(xa), REX_64IR(ir, dest), right);
1865 else
1866 emit_gri(as, XG_ARITHi(xa), REX_64IR(ir, dest), k);
1867 } else if (ra_hasreg(right)) { /* IMUL r, mrm. */
1868 emit_mrm(as, XO_IMUL, REX_64IR(ir, dest), right);
1869 } else { /* IMUL r, r, k. */
1870 /* NYI: use lea/shl/add/sub (FOLD only does 2^k) depending on CPU. */
1871 Reg left = asm_fuseloadm(as, lref, RSET_GPR, irt_is64(ir->t));
1872 x86Op xo;
1873 if (checki8(k)) { emit_i8(as, k); xo = XO_IMULi8;
1874 } else { emit_i32(as, k); xo = XO_IMULi; }
1875 emit_mrm(as, xo, REX_64IR(ir, dest), left);
1876 return;
1878 ra_left(as, dest, lref);
1881 /* LEA is really a 4-operand ADD with an independent destination register,
1882 ** up to two source registers and an immediate. One register can be scaled
1883 ** by 1, 2, 4 or 8. This can be used to avoid moves or to fuse several
1884 ** instructions.
1886 ** Currently only a few common cases are supported:
1887 ** - 3-operand ADD: y = a+b; y = a+k with a and b already allocated
1888 ** - Left ADD fusion: y = (a+b)+k; y = (a+k)+b
1889 ** - Right ADD fusion: y = a+(b+k)
1890 ** The ommited variants have already been reduced by FOLD.
1892 ** There are more fusion opportunities, like gathering shifts or joining
1893 ** common references. But these are probably not worth the trouble, since
1894 ** array indexing is not decomposed and already makes use of all fields
1895 ** of the ModRM operand.
1897 static int asm_lea(ASMState *as, IRIns *ir)
1899 IRIns *irl = IR(ir->op1);
1900 IRIns *irr = IR(ir->op2);
1901 RegSet allow = RSET_GPR;
1902 Reg dest;
1903 as->mrm.base = as->mrm.idx = RID_NONE;
1904 as->mrm.scale = XM_SCALE1;
1905 as->mrm.ofs = 0;
1906 if (ra_hasreg(irl->r)) {
1907 rset_clear(allow, irl->r);
1908 ra_noweak(as, irl->r);
1909 as->mrm.base = irl->r;
1910 if (irref_isk(ir->op2) || ra_hasreg(irr->r)) {
1911 /* The PHI renaming logic does a better job in some cases. */
1912 if (ra_hasreg(ir->r) &&
1913 ((irt_isphi(irl->t) && as->phireg[ir->r] == ir->op1) ||
1914 (irt_isphi(irr->t) && as->phireg[ir->r] == ir->op2)))
1915 return 0;
1916 if (irref_isk(ir->op2)) {
1917 as->mrm.ofs = irr->i;
1918 } else {
1919 rset_clear(allow, irr->r);
1920 ra_noweak(as, irr->r);
1921 as->mrm.idx = irr->r;
1923 } else if (irr->o == IR_ADD && mayfuse(as, ir->op2) &&
1924 irref_isk(irr->op2)) {
1925 Reg idx = ra_alloc1(as, irr->op1, allow);
1926 rset_clear(allow, idx);
1927 as->mrm.idx = (uint8_t)idx;
1928 as->mrm.ofs = IR(irr->op2)->i;
1929 } else {
1930 return 0;
1932 } else if (ir->op1 != ir->op2 && irl->o == IR_ADD && mayfuse(as, ir->op1) &&
1933 (irref_isk(ir->op2) || irref_isk(irl->op2))) {
1934 Reg idx, base = ra_alloc1(as, irl->op1, allow);
1935 rset_clear(allow, base);
1936 as->mrm.base = (uint8_t)base;
1937 if (irref_isk(ir->op2)) {
1938 as->mrm.ofs = irr->i;
1939 idx = ra_alloc1(as, irl->op2, allow);
1940 } else {
1941 as->mrm.ofs = IR(irl->op2)->i;
1942 idx = ra_alloc1(as, ir->op2, allow);
1944 rset_clear(allow, idx);
1945 as->mrm.idx = (uint8_t)idx;
1946 } else {
1947 return 0;
1949 dest = ra_dest(as, ir, allow);
1950 emit_mrm(as, XO_LEA, dest, RID_MRM);
1951 return 1; /* Success. */
1954 static void asm_add(ASMState *as, IRIns *ir)
1956 if (irt_isnum(ir->t))
1957 asm_fparith(as, ir, XO_ADDSD);
1958 else if ((as->flags & JIT_F_LEA_AGU) || as->flagmcp == as->mcp ||
1959 irt_is64(ir->t) || !asm_lea(as, ir))
1960 asm_intarith(as, ir, XOg_ADD);
1963 static void asm_neg_not(ASMState *as, IRIns *ir, x86Group3 xg)
1965 Reg dest = ra_dest(as, ir, RSET_GPR);
1966 emit_rr(as, XO_GROUP3, REX_64IR(ir, xg), dest);
1967 ra_left(as, dest, ir->op1);
1970 static void asm_min_max(ASMState *as, IRIns *ir, int cc)
1972 Reg right, dest = ra_dest(as, ir, RSET_GPR);
1973 IRRef lref = ir->op1, rref = ir->op2;
1974 if (irref_isk(rref)) { lref = rref; rref = ir->op1; }
1975 right = ra_alloc1(as, rref, rset_exclude(RSET_GPR, dest));
1976 emit_rr(as, XO_CMOV + (cc<<24), REX_64IR(ir, dest), right);
1977 emit_rr(as, XO_CMP, REX_64IR(ir, dest), right);
1978 ra_left(as, dest, lref);
1981 static void asm_bitswap(ASMState *as, IRIns *ir)
1983 Reg dest = ra_dest(as, ir, RSET_GPR);
1984 as->mcp = emit_op(XO_BSWAP + ((dest&7) << 24),
1985 REX_64IR(ir, 0), dest, 0, as->mcp, 1);
1986 ra_left(as, dest, ir->op1);
1989 static void asm_bitshift(ASMState *as, IRIns *ir, x86Shift xs)
1991 IRRef rref = ir->op2;
1992 IRIns *irr = IR(rref);
1993 Reg dest;
1994 if (irref_isk(rref)) { /* Constant shifts. */
1995 int shift;
1996 dest = ra_dest(as, ir, RSET_GPR);
1997 shift = irr->i & (irt_is64(ir->t) ? 63 : 31);
1998 switch (shift) {
1999 case 0: break;
2000 case 1: emit_rr(as, XO_SHIFT1, REX_64IR(ir, xs), dest); break;
2001 default: emit_shifti(as, REX_64IR(ir, xs), dest, shift); break;
2003 } else { /* Variable shifts implicitly use register cl (i.e. ecx). */
2004 Reg right;
2005 dest = ra_dest(as, ir, rset_exclude(RSET_GPR, RID_ECX));
2006 if (dest == RID_ECX) {
2007 dest = ra_scratch(as, rset_exclude(RSET_GPR, RID_ECX));
2008 emit_rr(as, XO_MOV, RID_ECX, dest);
2010 right = irr->r;
2011 if (ra_noreg(right))
2012 right = ra_allocref(as, rref, RID2RSET(RID_ECX));
2013 else if (right != RID_ECX)
2014 ra_scratch(as, RID2RSET(RID_ECX));
2015 emit_rr(as, XO_SHIFTcl, REX_64IR(ir, xs), dest);
2016 ra_noweak(as, right);
2017 if (right != RID_ECX)
2018 emit_rr(as, XO_MOV, RID_ECX, right);
2020 ra_left(as, dest, ir->op1);
2022 ** Note: avoid using the flags resulting from a shift or rotate!
2023 ** All of them cause a partial flag stall, except for r,1 shifts
2024 ** (but not rotates). And a shift count of 0 leaves the flags unmodified.
2028 /* -- Comparisons --------------------------------------------------------- */
2030 /* Virtual flags for unordered FP comparisons. */
2031 #define VCC_U 0x1000 /* Unordered. */
2032 #define VCC_P 0x2000 /* Needs extra CC_P branch. */
2033 #define VCC_S 0x4000 /* Swap avoids CC_P branch. */
2034 #define VCC_PS (VCC_P|VCC_S)
2036 /* Map of comparisons to flags. ORDER IR. */
2037 #define COMPFLAGS(ci, cin, cu, cf) ((ci)+((cu)<<4)+((cin)<<8)+(cf))
2038 static const uint16_t asm_compmap[IR_ABC+1] = {
2039 /* signed non-eq unsigned flags */
2040 /* LT */ COMPFLAGS(CC_GE, CC_G, CC_AE, VCC_PS),
2041 /* GE */ COMPFLAGS(CC_L, CC_L, CC_B, 0),
2042 /* LE */ COMPFLAGS(CC_G, CC_G, CC_A, VCC_PS),
2043 /* GT */ COMPFLAGS(CC_LE, CC_L, CC_BE, 0),
2044 /* ULT */ COMPFLAGS(CC_AE, CC_A, CC_AE, VCC_U),
2045 /* UGE */ COMPFLAGS(CC_B, CC_B, CC_B, VCC_U|VCC_PS),
2046 /* ULE */ COMPFLAGS(CC_A, CC_A, CC_A, VCC_U),
2047 /* UGT */ COMPFLAGS(CC_BE, CC_B, CC_BE, VCC_U|VCC_PS),
2048 /* EQ */ COMPFLAGS(CC_NE, CC_NE, CC_NE, VCC_P),
2049 /* NE */ COMPFLAGS(CC_E, CC_E, CC_E, VCC_U|VCC_P),
2050 /* ABC */ COMPFLAGS(CC_BE, CC_B, CC_BE, VCC_U|VCC_PS) /* Same as UGT. */
2053 /* FP and integer comparisons. */
2054 static void asm_comp(ASMState *as, IRIns *ir, uint32_t cc)
2056 if (irt_isnum(ir->t)) {
2057 IRRef lref = ir->op1;
2058 IRRef rref = ir->op2;
2059 Reg left, right;
2060 MCLabel l_around;
2062 ** An extra CC_P branch is required to preserve ordered/unordered
2063 ** semantics for FP comparisons. This can be avoided by swapping
2064 ** the operands and inverting the condition (except for EQ and UNE).
2065 ** So always try to swap if possible.
2067 ** Another option would be to swap operands to achieve better memory
2068 ** operand fusion. But it's unlikely that this outweighs the cost
2069 ** of the extra branches.
2071 if (cc & VCC_S) { /* Swap? */
2072 IRRef tmp = lref; lref = rref; rref = tmp;
2073 cc ^= (VCC_PS|(5<<4)); /* A <-> B, AE <-> BE, PS <-> none */
2075 left = ra_alloc1(as, lref, RSET_FPR);
2076 right = asm_fuseload(as, rref, rset_exclude(RSET_FPR, left));
2077 l_around = emit_label(as);
2078 asm_guardcc(as, cc >> 4);
2079 if (cc & VCC_P) { /* Extra CC_P branch required? */
2080 if (!(cc & VCC_U)) {
2081 asm_guardcc(as, CC_P); /* Branch to exit for ordered comparisons. */
2082 } else if (l_around != as->invmcp) {
2083 emit_sjcc(as, CC_P, l_around); /* Branch around for unordered. */
2084 } else {
2085 /* Patched to mcloop by asm_loop_fixup. */
2086 as->loopinv = 2;
2087 if (as->realign)
2088 emit_sjcc(as, CC_P, as->mcp);
2089 else
2090 emit_jcc(as, CC_P, as->mcp);
2093 emit_mrm(as, XO_UCOMISD, left, right);
2094 } else {
2095 IRRef lref = ir->op1, rref = ir->op2;
2096 IROp leftop = (IROp)(IR(lref)->o);
2097 Reg r64 = REX_64IR(ir, 0);
2098 int32_t imm = 0;
2099 lua_assert(irt_is64(ir->t) || irt_isint(ir->t) ||
2100 irt_isu32(ir->t) || irt_isaddr(ir->t) || irt_isu8(ir->t));
2101 /* Swap constants (only for ABC) and fusable loads to the right. */
2102 if (irref_isk(lref) || (!irref_isk(rref) && opisfusableload(leftop))) {
2103 if ((cc & 0xc) == 0xc) cc ^= 0x53; /* L <-> G, LE <-> GE */
2104 else if ((cc & 0xa) == 0x2) cc ^= 0x55; /* A <-> B, AE <-> BE */
2105 lref = ir->op2; rref = ir->op1;
2107 if (asm_isk32(as, rref, &imm)) {
2108 IRIns *irl = IR(lref);
2109 /* Check wether we can use test ins. Not for unsigned, since CF=0. */
2110 int usetest = (imm == 0 && (cc & 0xa) != 0x2);
2111 if (usetest && irl->o == IR_BAND && irl+1 == ir && !ra_used(irl)) {
2112 /* Combine comp(BAND(ref, r/imm), 0) into test mrm, r/imm. */
2113 Reg right, left = RID_NONE;
2114 RegSet allow = RSET_GPR;
2115 if (!asm_isk32(as, irl->op2, &imm)) {
2116 left = ra_alloc1(as, irl->op2, allow);
2117 rset_clear(allow, left);
2118 } else { /* Try to Fuse IRT_I8/IRT_U8 loads, too. See below. */
2119 IRIns *irll = IR(irl->op1);
2120 if (opisfusableload((IROp)irll->o) &&
2121 (irt_isi8(irll->t) || irt_isu8(irll->t))) {
2122 IRType1 origt = irll->t; /* Temporarily flip types. */
2123 irll->t.irt = (irll->t.irt & ~IRT_TYPE) | IRT_INT;
2124 as->curins--; /* Skip to BAND to avoid failing in noconflict(). */
2125 right = asm_fuseload(as, irl->op1, RSET_GPR);
2126 as->curins++;
2127 irll->t = origt;
2128 if (right != RID_MRM) goto test_nofuse;
2129 /* Fusion succeeded, emit test byte mrm, imm8. */
2130 asm_guardcc(as, cc);
2131 emit_i8(as, (imm & 0xff));
2132 emit_mrm(as, XO_GROUP3b, XOg_TEST, RID_MRM);
2133 return;
2136 as->curins--; /* Skip to BAND to avoid failing in noconflict(). */
2137 right = asm_fuseloadm(as, irl->op1, allow, r64);
2138 as->curins++; /* Undo the above. */
2139 test_nofuse:
2140 asm_guardcc(as, cc);
2141 if (ra_noreg(left)) {
2142 emit_i32(as, imm);
2143 emit_mrm(as, XO_GROUP3, r64 + XOg_TEST, right);
2144 } else {
2145 emit_mrm(as, XO_TEST, r64 + left, right);
2147 } else {
2148 Reg left;
2149 if (opisfusableload((IROp)irl->o) &&
2150 ((irt_isu8(irl->t) && checku8(imm)) ||
2151 ((irt_isi8(irl->t) || irt_isi16(irl->t)) && checki8(imm)) ||
2152 (irt_isu16(irl->t) && checku16(imm) && checki8((int16_t)imm)))) {
2153 /* Only the IRT_INT case is fused by asm_fuseload.
2154 ** The IRT_I8/IRT_U8 loads and some IRT_I16/IRT_U16 loads
2155 ** are handled here.
2156 ** Note that cmp word [mem], imm16 should not be generated,
2157 ** since it has a length-changing prefix. Compares of a word
2158 ** against a sign-extended imm8 are ok, however.
2160 IRType1 origt = irl->t; /* Temporarily flip types. */
2161 irl->t.irt = (irl->t.irt & ~IRT_TYPE) | IRT_INT;
2162 left = asm_fuseload(as, lref, RSET_GPR);
2163 irl->t = origt;
2164 if (left == RID_MRM) { /* Fusion succeeded? */
2165 if (irt_isu8(irl->t) || irt_isu16(irl->t))
2166 cc >>= 4; /* Need unsigned compare. */
2167 asm_guardcc(as, cc);
2168 emit_i8(as, imm);
2169 emit_mrm(as, (irt_isi8(origt) || irt_isu8(origt)) ?
2170 XO_ARITHib : XO_ARITHiw8, r64 + XOg_CMP, RID_MRM);
2171 return;
2172 } /* Otherwise handle register case as usual. */
2173 } else {
2174 left = asm_fuseloadm(as, lref,
2175 irt_isu8(ir->t) ? RSET_GPR8 : RSET_GPR, r64);
2177 asm_guardcc(as, cc);
2178 if (usetest && left != RID_MRM) {
2179 /* Use test r,r instead of cmp r,0. */
2180 x86Op xo = XO_TEST;
2181 if (irt_isu8(ir->t)) {
2182 lua_assert(ir->o == IR_EQ || ir->o == IR_NE);
2183 xo = XO_TESTb;
2184 if (!rset_test(RSET_RANGE(RID_EAX, RID_EBX+1), left)) {
2185 if (LJ_64) {
2186 left |= FORCE_REX;
2187 } else {
2188 emit_i32(as, 0xff);
2189 emit_mrm(as, XO_GROUP3, XOg_TEST, left);
2190 return;
2194 emit_rr(as, xo, r64 + left, left);
2195 if (irl+1 == ir) /* Referencing previous ins? */
2196 as->flagmcp = as->mcp; /* Set flag to drop test r,r if possible. */
2197 } else {
2198 emit_gmrmi(as, XG_ARITHi(XOg_CMP), r64 + left, imm);
2201 } else {
2202 Reg left = ra_alloc1(as, lref, RSET_GPR);
2203 Reg right = asm_fuseloadm(as, rref, rset_exclude(RSET_GPR, left), r64);
2204 asm_guardcc(as, cc);
2205 emit_mrm(as, XO_CMP, r64 + left, right);
2210 #if LJ_32 && LJ_HASFFI
2211 /* 64 bit integer comparisons in 32 bit mode. */
2212 static void asm_comp_int64(ASMState *as, IRIns *ir)
2214 uint32_t cc = asm_compmap[(ir-1)->o];
2215 RegSet allow = RSET_GPR;
2216 Reg lefthi = RID_NONE, leftlo = RID_NONE;
2217 Reg righthi = RID_NONE, rightlo = RID_NONE;
2218 MCLabel l_around;
2219 x86ModRM mrm;
2221 as->curins--; /* Skip loword ins. Avoids failing in noconflict(), too. */
2223 /* Allocate/fuse hiword operands. */
2224 if (irref_isk(ir->op2)) {
2225 lefthi = asm_fuseload(as, ir->op1, allow);
2226 } else {
2227 lefthi = ra_alloc1(as, ir->op1, allow);
2228 rset_clear(allow, lefthi);
2229 righthi = asm_fuseload(as, ir->op2, allow);
2230 if (righthi == RID_MRM) {
2231 if (as->mrm.base != RID_NONE) rset_clear(allow, as->mrm.base);
2232 if (as->mrm.idx != RID_NONE) rset_clear(allow, as->mrm.idx);
2233 } else {
2234 rset_clear(allow, righthi);
2237 mrm = as->mrm; /* Save state for hiword instruction. */
2239 /* Allocate/fuse loword operands. */
2240 if (irref_isk((ir-1)->op2)) {
2241 leftlo = asm_fuseload(as, (ir-1)->op1, allow);
2242 } else {
2243 leftlo = ra_alloc1(as, (ir-1)->op1, allow);
2244 rset_clear(allow, leftlo);
2245 rightlo = asm_fuseload(as, (ir-1)->op2, allow);
2248 /* All register allocations must be performed _before_ this point. */
2249 l_around = emit_label(as);
2250 as->invmcp = as->flagmcp = NULL; /* Cannot use these optimizations. */
2252 /* Loword comparison and branch. */
2253 asm_guardcc(as, cc >> 4); /* Always use unsigned compare for loword. */
2254 if (ra_noreg(rightlo)) {
2255 int32_t imm = IR((ir-1)->op2)->i;
2256 if (imm == 0 && ((cc >> 4) & 0xa) != 0x2 && leftlo != RID_MRM)
2257 emit_rr(as, XO_TEST, leftlo, leftlo);
2258 else
2259 emit_gmrmi(as, XG_ARITHi(XOg_CMP), leftlo, imm);
2260 } else {
2261 emit_mrm(as, XO_CMP, leftlo, rightlo);
2264 /* Hiword comparison and branches. */
2265 if ((cc & 15) != CC_NE)
2266 emit_sjcc(as, CC_NE, l_around); /* Hiword unequal: skip loword compare. */
2267 if ((cc & 15) != CC_E)
2268 asm_guardcc(as, cc >> 8); /* Hiword compare without equality check. */
2269 as->mrm = mrm; /* Restore state. */
2270 if (ra_noreg(righthi)) {
2271 int32_t imm = IR(ir->op2)->i;
2272 if (imm == 0 && (cc & 0xa) != 0x2 && lefthi != RID_MRM)
2273 emit_rr(as, XO_TEST, lefthi, lefthi);
2274 else
2275 emit_gmrmi(as, XG_ARITHi(XOg_CMP), lefthi, imm);
2276 } else {
2277 emit_mrm(as, XO_CMP, lefthi, righthi);
2280 #endif
2282 /* -- Support for 64 bit ops in 32 bit mode ------------------------------- */
2284 /* Hiword op of a split 64 bit op. Previous op must be the loword op. */
2285 static void asm_hiop(ASMState *as, IRIns *ir)
2287 #if LJ_32 && LJ_HASFFI
2288 /* HIOP is marked as a store because it needs its own DCE logic. */
2289 int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
2290 if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
2291 if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */
2292 if (usehi || uselo) {
2293 if (irt_isfp(ir->t))
2294 asm_conv_fp_int64(as, ir);
2295 else
2296 asm_conv_int64_fp(as, ir);
2298 as->curins--; /* Always skip the CONV. */
2299 return;
2300 } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */
2301 asm_comp_int64(as, ir);
2302 return;
2303 } else if ((ir-1)->o == IR_XSTORE) {
2304 if ((ir-1)->r != RID_SINK)
2305 asm_fxstore(as, ir);
2306 return;
2308 if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
2309 switch ((ir-1)->o) {
2310 case IR_ADD:
2311 as->flagmcp = NULL;
2312 as->curins--;
2313 asm_intarith(as, ir, XOg_ADC);
2314 asm_intarith(as, ir-1, XOg_ADD);
2315 break;
2316 case IR_SUB:
2317 as->flagmcp = NULL;
2318 as->curins--;
2319 asm_intarith(as, ir, XOg_SBB);
2320 asm_intarith(as, ir-1, XOg_SUB);
2321 break;
2322 case IR_NEG: {
2323 Reg dest = ra_dest(as, ir, RSET_GPR);
2324 emit_rr(as, XO_GROUP3, XOg_NEG, dest);
2325 emit_i8(as, 0);
2326 emit_rr(as, XO_ARITHi8, XOg_ADC, dest);
2327 ra_left(as, dest, ir->op1);
2328 as->curins--;
2329 asm_neg_not(as, ir-1, XOg_NEG);
2330 break;
2332 case IR_CALLN:
2333 case IR_CALLXS:
2334 if (!uselo)
2335 ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */
2336 break;
2337 case IR_CNEWI:
2338 /* Nothing to do here. Handled by CNEWI itself. */
2339 break;
2340 default: lua_assert(0); break;
2342 #else
2343 UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused on x64 or without FFI. */
2344 #endif
2347 /* -- Stack handling ------------------------------------------------------ */
2349 /* Check Lua stack size for overflow. Use exit handler as fallback. */
2350 static void asm_stack_check(ASMState *as, BCReg topslot,
2351 IRIns *irp, RegSet allow, ExitNo exitno)
2353 /* Try to get an unused temp. register, otherwise spill/restore eax. */
2354 Reg pbase = irp ? irp->r : RID_BASE;
2355 Reg r = allow ? rset_pickbot(allow) : RID_EAX;
2356 emit_jcc(as, CC_B, exitstub_addr(as->J, exitno));
2357 if (allow == RSET_EMPTY) /* Restore temp. register. */
2358 emit_rmro(as, XO_MOV, r|REX_64, RID_ESP, 0);
2359 else
2360 ra_modified(as, r);
2361 emit_gri(as, XG_ARITHi(XOg_CMP), r, (int32_t)(8*topslot));
2362 if (ra_hasreg(pbase) && pbase != r)
2363 emit_rr(as, XO_ARITH(XOg_SUB), r, pbase);
2364 else
2365 emit_rmro(as, XO_ARITH(XOg_SUB), r, RID_NONE,
2366 ptr2addr(&J2G(as->J)->jit_base));
2367 emit_rmro(as, XO_MOV, r, r, offsetof(lua_State, maxstack));
2368 emit_getgl(as, r, jit_L);
2369 if (allow == RSET_EMPTY) /* Spill temp. register. */
2370 emit_rmro(as, XO_MOVto, r|REX_64, RID_ESP, 0);
2373 /* Restore Lua stack from on-trace state. */
2374 static void asm_stack_restore(ASMState *as, SnapShot *snap)
2376 SnapEntry *map = &as->T->snapmap[snap->mapofs];
2377 SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1];
2378 MSize n, nent = snap->nent;
2379 /* Store the value of all modified slots to the Lua stack. */
2380 for (n = 0; n < nent; n++) {
2381 SnapEntry sn = map[n];
2382 BCReg s = snap_slot(sn);
2383 int32_t ofs = 8*((int32_t)s-1);
2384 IRRef ref = snap_ref(sn);
2385 IRIns *ir = IR(ref);
2386 if ((sn & SNAP_NORESTORE))
2387 continue;
2388 if (irt_isnum(ir->t)) {
2389 Reg src = ra_alloc1(as, ref, RSET_FPR);
2390 emit_rmro(as, XO_MOVSDto, src, RID_BASE, ofs);
2391 } else {
2392 lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) ||
2393 (LJ_DUALNUM && irt_isinteger(ir->t)));
2394 if (!irref_isk(ref)) {
2395 Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, RID_BASE));
2396 emit_movtomro(as, REX_64IR(ir, src), RID_BASE, ofs);
2397 } else if (!irt_ispri(ir->t)) {
2398 emit_movmroi(as, RID_BASE, ofs, ir->i);
2400 if ((sn & (SNAP_CONT|SNAP_FRAME))) {
2401 if (s != 0) /* Do not overwrite link to previous frame. */
2402 emit_movmroi(as, RID_BASE, ofs+4, (int32_t)(*flinks--));
2403 } else {
2404 if (!(LJ_64 && irt_islightud(ir->t)))
2405 emit_movmroi(as, RID_BASE, ofs+4, irt_toitype(ir->t));
2408 checkmclim(as);
2410 lua_assert(map + nent == flinks);
2413 /* -- GC handling --------------------------------------------------------- */
2415 /* Check GC threshold and do one or more GC steps. */
2416 static void asm_gc_check(ASMState *as)
2418 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
2419 IRRef args[2];
2420 MCLabel l_end;
2421 Reg tmp;
2422 ra_evictset(as, RSET_SCRATCH);
2423 l_end = emit_label(as);
2424 /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
2425 asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */
2426 emit_rr(as, XO_TEST, RID_RET, RID_RET);
2427 args[0] = ASMREF_TMP1; /* global_State *g */
2428 args[1] = ASMREF_TMP2; /* MSize steps */
2429 asm_gencall(as, ci, args);
2430 tmp = ra_releasetmp(as, ASMREF_TMP1);
2431 emit_loada(as, tmp, J2G(as->J));
2432 emit_loadi(as, ra_releasetmp(as, ASMREF_TMP2), as->gcsteps);
2433 /* Jump around GC step if GC total < GC threshold. */
2434 emit_sjcc(as, CC_B, l_end);
2435 emit_opgl(as, XO_ARITH(XOg_CMP), tmp, gc.threshold);
2436 emit_getgl(as, tmp, gc.total);
2437 as->gcsteps = 0;
2438 checkmclim(as);
2441 /* -- Loop handling ------------------------------------------------------- */
2443 /* Fixup the loop branch. */
2444 static void asm_loop_fixup(ASMState *as)
2446 MCode *p = as->mctop;
2447 MCode *target = as->mcp;
2448 if (as->realign) { /* Realigned loops use short jumps. */
2449 as->realign = NULL; /* Stop another retry. */
2450 lua_assert(((intptr_t)target & 15) == 0);
2451 if (as->loopinv) { /* Inverted loop branch? */
2452 p -= 5;
2453 p[0] = XI_JMP;
2454 lua_assert(target - p >= -128);
2455 p[-1] = (MCode)(target - p); /* Patch sjcc. */
2456 if (as->loopinv == 2)
2457 p[-3] = (MCode)(target - p + 2); /* Patch opt. short jp. */
2458 } else {
2459 lua_assert(target - p >= -128);
2460 p[-1] = (MCode)(int8_t)(target - p); /* Patch short jmp. */
2461 p[-2] = XI_JMPs;
2463 } else {
2464 MCode *newloop;
2465 p[-5] = XI_JMP;
2466 if (as->loopinv) { /* Inverted loop branch? */
2467 /* asm_guardcc already inverted the jcc and patched the jmp. */
2468 p -= 5;
2469 newloop = target+4;
2470 *(int32_t *)(p-4) = (int32_t)(target - p); /* Patch jcc. */
2471 if (as->loopinv == 2) {
2472 *(int32_t *)(p-10) = (int32_t)(target - p + 6); /* Patch opt. jp. */
2473 newloop = target+8;
2475 } else { /* Otherwise just patch jmp. */
2476 *(int32_t *)(p-4) = (int32_t)(target - p);
2477 newloop = target+3;
2479 /* Realign small loops and shorten the loop branch. */
2480 if (newloop >= p - 128) {
2481 as->realign = newloop; /* Force a retry and remember alignment. */
2482 as->curins = as->stopins; /* Abort asm_trace now. */
2483 as->T->nins = as->orignins; /* Remove any added renames. */
2488 /* -- Head of trace ------------------------------------------------------- */
2490 /* Coalesce BASE register for a root trace. */
2491 static void asm_head_root_base(ASMState *as)
2493 IRIns *ir = IR(REF_BASE);
2494 Reg r = ir->r;
2495 if (ra_hasreg(r)) {
2496 ra_free(as, r);
2497 if (rset_test(as->modset, r) || irt_ismarked(ir->t))
2498 ir->r = RID_INIT; /* No inheritance for modified BASE register. */
2499 if (r != RID_BASE)
2500 emit_rr(as, XO_MOV, r, RID_BASE);
2504 /* Coalesce or reload BASE register for a side trace. */
2505 static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
2507 IRIns *ir = IR(REF_BASE);
2508 Reg r = ir->r;
2509 if (ra_hasreg(r)) {
2510 ra_free(as, r);
2511 if (rset_test(as->modset, r) || irt_ismarked(ir->t))
2512 ir->r = RID_INIT; /* No inheritance for modified BASE register. */
2513 if (irp->r == r) {
2514 rset_clear(allow, r); /* Mark same BASE register as coalesced. */
2515 } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) {
2516 rset_clear(allow, irp->r);
2517 emit_rr(as, XO_MOV, r, irp->r); /* Move from coalesced parent reg. */
2518 } else {
2519 emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */
2522 return allow;
2525 /* -- Tail of trace ------------------------------------------------------- */
2527 /* Fixup the tail code. */
2528 static void asm_tail_fixup(ASMState *as, TraceNo lnk)
2530 /* Note: don't use as->mcp swap + emit_*: emit_op overwrites more bytes. */
2531 MCode *p = as->mctop;
2532 MCode *target, *q;
2533 int32_t spadj = as->T->spadjust;
2534 if (spadj == 0) {
2535 p -= ((as->flags & JIT_F_LEA_AGU) ? 7 : 6) + (LJ_64 ? 1 : 0);
2536 } else {
2537 MCode *p1;
2538 /* Patch stack adjustment. */
2539 if (checki8(spadj)) {
2540 p -= 3;
2541 p1 = p-6;
2542 *p1 = (MCode)spadj;
2543 } else {
2544 p1 = p-9;
2545 *(int32_t *)p1 = spadj;
2547 if ((as->flags & JIT_F_LEA_AGU)) {
2548 #if LJ_64
2549 p1[-4] = 0x48;
2550 #endif
2551 p1[-3] = (MCode)XI_LEA;
2552 p1[-2] = MODRM(checki8(spadj) ? XM_OFS8 : XM_OFS32, RID_ESP, RID_ESP);
2553 p1[-1] = MODRM(XM_SCALE1, RID_ESP, RID_ESP);
2554 } else {
2555 #if LJ_64
2556 p1[-3] = 0x48;
2557 #endif
2558 p1[-2] = (MCode)(checki8(spadj) ? XI_ARITHi8 : XI_ARITHi);
2559 p1[-1] = MODRM(XM_REG, XOg_ADD, RID_ESP);
2562 /* Patch exit branch. */
2563 target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp;
2564 *(int32_t *)(p-4) = jmprel(p, target);
2565 p[-5] = XI_JMP;
2566 /* Drop unused mcode tail. Fill with NOPs to make the prefetcher happy. */
2567 for (q = as->mctop-1; q >= p; q--)
2568 *q = XI_NOP;
2569 as->mctop = p;
2572 /* Prepare tail of code. */
2573 static void asm_tail_prep(ASMState *as)
2575 MCode *p = as->mctop;
2576 /* Realign and leave room for backwards loop branch or exit branch. */
2577 if (as->realign) {
2578 int i = ((int)(intptr_t)as->realign) & 15;
2579 /* Fill unused mcode tail with NOPs to make the prefetcher happy. */
2580 while (i-- > 0)
2581 *--p = XI_NOP;
2582 as->mctop = p;
2583 p -= (as->loopinv ? 5 : 2); /* Space for short/near jmp. */
2584 } else {
2585 p -= 5; /* Space for exit branch (near jmp). */
2587 if (as->loopref) {
2588 as->invmcp = as->mcp = p;
2589 } else {
2590 /* Leave room for ESP adjustment: add esp, imm or lea esp, [esp+imm] */
2591 as->mcp = p - (((as->flags & JIT_F_LEA_AGU) ? 7 : 6) + (LJ_64 ? 1 : 0));
2592 as->invmcp = NULL;
2596 /* -- Instruction dispatch ------------------------------------------------ */
2598 /* Assemble a single instruction. */
2599 static void asm_ir(ASMState *as, IRIns *ir)
2601 switch ((IROp)ir->o) {
2602 /* Miscellaneous ops. */
2603 case IR_LOOP: asm_loop(as); break;
2604 case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break;
2605 case IR_USE:
2606 ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break;
2607 case IR_PHI: asm_phi(as, ir); break;
2608 case IR_HIOP: asm_hiop(as, ir); break;
2609 case IR_GCSTEP: asm_gcstep(as, ir); break;
2611 /* Guarded assertions. */
2612 case IR_LT: case IR_GE: case IR_LE: case IR_GT:
2613 case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT:
2614 case IR_EQ: case IR_NE: case IR_ABC:
2615 asm_comp(as, ir, asm_compmap[ir->o]);
2616 break;
2618 case IR_RETF: asm_retf(as, ir); break;
2620 /* Bit ops. */
2621 case IR_BNOT: asm_neg_not(as, ir, XOg_NOT); break;
2622 case IR_BSWAP: asm_bitswap(as, ir); break;
2624 case IR_BAND: asm_intarith(as, ir, XOg_AND); break;
2625 case IR_BOR: asm_intarith(as, ir, XOg_OR); break;
2626 case IR_BXOR: asm_intarith(as, ir, XOg_XOR); break;
2628 case IR_BSHL: asm_bitshift(as, ir, XOg_SHL); break;
2629 case IR_BSHR: asm_bitshift(as, ir, XOg_SHR); break;
2630 case IR_BSAR: asm_bitshift(as, ir, XOg_SAR); break;
2631 case IR_BROL: asm_bitshift(as, ir, XOg_ROL); break;
2632 case IR_BROR: asm_bitshift(as, ir, XOg_ROR); break;
2634 /* Arithmetic ops. */
2635 case IR_ADD: asm_add(as, ir); break;
2636 case IR_SUB:
2637 if (irt_isnum(ir->t))
2638 asm_fparith(as, ir, XO_SUBSD);
2639 else /* Note: no need for LEA trick here. i-k is encoded as i+(-k). */
2640 asm_intarith(as, ir, XOg_SUB);
2641 break;
2642 case IR_MUL:
2643 if (irt_isnum(ir->t))
2644 asm_fparith(as, ir, XO_MULSD);
2645 else
2646 asm_intarith(as, ir, XOg_X_IMUL);
2647 break;
2648 case IR_DIV:
2649 #if LJ_64 && LJ_HASFFI
2650 if (!irt_isnum(ir->t))
2651 asm_arith64(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 :
2652 IRCALL_lj_carith_divu64);
2653 else
2654 #endif
2655 asm_fparith(as, ir, XO_DIVSD);
2656 break;
2657 case IR_MOD:
2658 #if LJ_64 && LJ_HASFFI
2659 if (!irt_isint(ir->t))
2660 asm_arith64(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 :
2661 IRCALL_lj_carith_modu64);
2662 else
2663 #endif
2664 asm_intmod(as, ir);
2665 break;
2667 case IR_NEG:
2668 if (irt_isnum(ir->t))
2669 asm_fparith(as, ir, XO_XORPS);
2670 else
2671 asm_neg_not(as, ir, XOg_NEG);
2672 break;
2673 case IR_ABS: asm_fparith(as, ir, XO_ANDPS); break;
2675 case IR_MIN:
2676 if (irt_isnum(ir->t))
2677 asm_fparith(as, ir, XO_MINSD);
2678 else
2679 asm_min_max(as, ir, CC_G);
2680 break;
2681 case IR_MAX:
2682 if (irt_isnum(ir->t))
2683 asm_fparith(as, ir, XO_MAXSD);
2684 else
2685 asm_min_max(as, ir, CC_L);
2686 break;
2688 case IR_FPMATH: case IR_ATAN2: case IR_LDEXP:
2689 asm_fpmath(as, ir);
2690 break;
2691 case IR_POW:
2692 #if LJ_64 && LJ_HASFFI
2693 if (!irt_isnum(ir->t))
2694 asm_arith64(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 :
2695 IRCALL_lj_carith_powu64);
2696 else
2697 #endif
2698 asm_fppowi(as, ir);
2699 break;
2701 /* Overflow-checking arithmetic ops. Note: don't use LEA here! */
2702 case IR_ADDOV: asm_intarith(as, ir, XOg_ADD); break;
2703 case IR_SUBOV: asm_intarith(as, ir, XOg_SUB); break;
2704 case IR_MULOV: asm_intarith(as, ir, XOg_X_IMUL); break;
2706 /* Memory references. */
2707 case IR_AREF: asm_aref(as, ir); break;
2708 case IR_HREF: asm_href(as, ir); break;
2709 case IR_HREFK: asm_hrefk(as, ir); break;
2710 case IR_NEWREF: asm_newref(as, ir); break;
2711 case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break;
2712 case IR_FREF: asm_fref(as, ir); break;
2713 case IR_STRREF: asm_strref(as, ir); break;
2715 /* Loads and stores. */
2716 case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
2717 asm_ahuvload(as, ir);
2718 break;
2719 case IR_FLOAD: case IR_XLOAD: asm_fxload(as, ir); break;
2720 case IR_SLOAD: asm_sload(as, ir); break;
2722 case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break;
2723 case IR_FSTORE: case IR_XSTORE: asm_fxstore(as, ir); break;
2725 /* Allocations. */
2726 case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
2727 case IR_TNEW: asm_tnew(as, ir); break;
2728 case IR_TDUP: asm_tdup(as, ir); break;
2729 case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break;
2731 /* Write barriers. */
2732 case IR_TBAR: asm_tbar(as, ir); break;
2733 case IR_OBAR: asm_obar(as, ir); break;
2735 /* Type conversions. */
2736 case IR_TOBIT: asm_tobit(as, ir); break;
2737 case IR_CONV: asm_conv(as, ir); break;
2738 case IR_TOSTR: asm_tostr(as, ir); break;
2739 case IR_STRTO: asm_strto(as, ir); break;
2741 /* Calls. */
2742 case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break;
2743 case IR_CALLXS: asm_callx(as, ir); break;
2744 case IR_CARG: break;
2746 default:
2747 setintV(&as->J->errinfo, ir->o);
2748 lj_trace_err_info(as->J, LJ_TRERR_NYIIR);
2749 break;
2753 /* -- Trace setup --------------------------------------------------------- */
2755 /* Ensure there are enough stack slots for call arguments. */
2756 static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
2758 IRRef args[CCI_NARGS_MAX*2];
2759 int nslots;
2760 asm_collectargs(as, ir, ci, args);
2761 nslots = asm_count_call_slots(as, ci, args);
2762 if (nslots > as->evenspill) /* Leave room for args in stack slots. */
2763 as->evenspill = nslots;
2764 #if LJ_64
2765 return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET);
2766 #else
2767 return irt_isfp(ir->t) ? REGSP_INIT : REGSP_HINT(RID_RET);
2768 #endif
2771 /* Target-specific setup. */
2772 static void asm_setup_target(ASMState *as)
2774 asm_exitstub_setup(as, as->T->nsnap);
2777 /* -- Trace patching ------------------------------------------------------ */
2779 static const uint8_t map_op1[256] = {
2780 0x92,0x92,0x92,0x92,0x52,0x45,0x51,0x51,0x92,0x92,0x92,0x92,0x52,0x45,0x51,0x20,
2781 0x92,0x92,0x92,0x92,0x52,0x45,0x51,0x51,0x92,0x92,0x92,0x92,0x52,0x45,0x51,0x51,
2782 0x92,0x92,0x92,0x92,0x52,0x45,0x10,0x51,0x92,0x92,0x92,0x92,0x52,0x45,0x10,0x51,
2783 0x92,0x92,0x92,0x92,0x52,0x45,0x10,0x51,0x92,0x92,0x92,0x92,0x52,0x45,0x10,0x51,
2784 #if LJ_64
2785 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x14,0x14,0x14,0x14,0x14,0x14,0x14,0x14,
2786 #else
2787 0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,
2788 #endif
2789 0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,
2790 0x51,0x51,0x92,0x92,0x10,0x10,0x12,0x11,0x45,0x86,0x52,0x93,0x51,0x51,0x51,0x51,
2791 0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,
2792 0x93,0x86,0x93,0x93,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,
2793 0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x47,0x51,0x51,0x51,0x51,0x51,
2794 #if LJ_64
2795 0x59,0x59,0x59,0x59,0x51,0x51,0x51,0x51,0x52,0x45,0x51,0x51,0x51,0x51,0x51,0x51,
2796 #else
2797 0x55,0x55,0x55,0x55,0x51,0x51,0x51,0x51,0x52,0x45,0x51,0x51,0x51,0x51,0x51,0x51,
2798 #endif
2799 0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x05,0x05,0x05,0x05,0x05,0x05,0x05,0x05,
2800 0x93,0x93,0x53,0x51,0x70,0x71,0x93,0x86,0x54,0x51,0x53,0x51,0x51,0x52,0x51,0x51,
2801 0x92,0x92,0x92,0x92,0x52,0x52,0x51,0x51,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,
2802 0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x45,0x45,0x47,0x52,0x51,0x51,0x51,0x51,
2803 0x10,0x51,0x10,0x10,0x51,0x51,0x63,0x66,0x51,0x51,0x51,0x51,0x51,0x51,0x92,0x92
2806 static const uint8_t map_op2[256] = {
2807 0x93,0x93,0x93,0x93,0x52,0x52,0x52,0x52,0x52,0x52,0x51,0x52,0x51,0x93,0x52,0x94,
2808 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
2809 0x53,0x53,0x53,0x53,0x53,0x53,0x53,0x53,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
2810 0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x34,0x51,0x35,0x51,0x51,0x51,0x51,0x51,
2811 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
2812 0x53,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
2813 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
2814 0x94,0x54,0x54,0x54,0x93,0x93,0x93,0x52,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
2815 0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,
2816 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
2817 0x52,0x52,0x52,0x93,0x94,0x93,0x51,0x51,0x52,0x52,0x52,0x93,0x94,0x93,0x93,0x93,
2818 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x94,0x93,0x93,0x93,0x93,0x93,
2819 0x93,0x93,0x94,0x93,0x94,0x94,0x94,0x93,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,
2820 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
2821 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
2822 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x52
2825 static uint32_t asm_x86_inslen(const uint8_t* p)
2827 uint32_t result = 0;
2828 uint32_t prefixes = 0;
2829 uint32_t x = map_op1[*p];
2830 for (;;) {
2831 switch (x >> 4) {
2832 case 0: return result + x + (prefixes & 4);
2833 case 1: prefixes |= x; x = map_op1[*++p]; result++; break;
2834 case 2: x = map_op2[*++p]; break;
2835 case 3: p++; goto mrm;
2836 case 4: result -= (prefixes & 2); /* fallthrough */
2837 case 5: return result + (x & 15);
2838 case 6: /* Group 3. */
2839 if (p[1] & 0x38) return result + 2;
2840 if ((prefixes & 2) && (x == 0x66)) return result + 4;
2841 return result + (x & 15);
2842 case 7: /* VEX c4/c5. */
2843 if (LJ_32 && p[1] < 0xc0) {
2844 x = 2;
2845 goto mrm;
2847 if (x == 0x70) {
2848 x = *++p & 0x1f;
2849 result++;
2850 if (x >= 2) {
2851 p += 2;
2852 result += 2;
2853 goto mrm;
2856 p++;
2857 result++;
2858 x = map_op2[*++p];
2859 break;
2860 case 8: result -= (prefixes & 2); /* fallthrough */
2861 case 9: mrm: /* ModR/M and possibly SIB. */
2862 result += (x & 15);
2863 x = *++p;
2864 switch (x >> 6) {
2865 case 0: if ((x & 7) == 5) return result + 4; break;
2866 case 1: result++; break;
2867 case 2: result += 4; break;
2868 case 3: return result;
2870 if ((x & 7) == 4) {
2871 result++;
2872 if (x < 0x40 && (p[1] & 7) == 5) result += 4;
2874 return result;
2879 /* Patch exit jumps of existing machine code to a new target. */
2880 void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
2882 MCode *p = T->mcode;
2883 MCode *mcarea = lj_mcode_patch(J, p, 0);
2884 MSize len = T->szmcode;
2885 MCode *px = exitstub_addr(J, exitno) - 6;
2886 MCode *pe = p+len-6;
2887 uint32_t stateaddr = u32ptr(&J2G(J)->vmstate);
2888 if (len > 5 && p[len-5] == XI_JMP && p+len-6 + *(int32_t *)(p+len-4) == px)
2889 *(int32_t *)(p+len-4) = jmprel(p+len, target);
2890 /* Do not patch parent exit for a stack check. Skip beyond vmstate update. */
2891 for (; p < pe; p += asm_x86_inslen(p))
2892 if (*(uint32_t *)(p+(LJ_64 ? 3 : 2)) == stateaddr && p[0] == XI_MOVmi)
2893 break;
2894 lua_assert(p < pe);
2895 for (; p < pe; p += asm_x86_inslen(p))
2896 if ((*(uint16_t *)p & 0xf0ff) == 0x800f && p + *(int32_t *)(p+2) == px)
2897 *(int32_t *)(p+2) = jmprel(p+6, target);
2898 lj_mcode_sync(T->mcode, T->mcode + T->szmcode);
2899 lj_mcode_patch(J, mcarea, 1);