2 ** x86/x64 IR assembler (SSA IR -> machine code).
3 ** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
6 /* -- Guard handling ------------------------------------------------------ */
8 /* Generate an exit stub group at the bottom of the reserved MCode memory. */
9 static MCode
*asm_exitstub_gen(ASMState
*as
, ExitNo group
)
11 ExitNo i
, groupofs
= (group
*EXITSTUBS_PER_GROUP
) & 0xff;
12 MCode
*mxp
= as
->mcbot
;
13 MCode
*mxpstart
= mxp
;
14 if (mxp
+ (2+2)*EXITSTUBS_PER_GROUP
+8+5 >= as
->mctop
)
16 /* Push low byte of exitno for each exit stub. */
17 *mxp
++ = XI_PUSHi8
; *mxp
++ = (MCode
)groupofs
;
18 for (i
= 1; i
< EXITSTUBS_PER_GROUP
; i
++) {
19 *mxp
++ = XI_JMPs
; *mxp
++ = (MCode
)((2+2)*(EXITSTUBS_PER_GROUP
- i
) - 2);
20 *mxp
++ = XI_PUSHi8
; *mxp
++ = (MCode
)(groupofs
+ i
);
22 /* Push the high byte of the exitno for each exit stub group. */
23 *mxp
++ = XI_PUSHi8
; *mxp
++ = (MCode
)((group
*EXITSTUBS_PER_GROUP
)>>8);
24 /* Store DISPATCH at original stack slot 0. Account for the two push ops. */
26 *mxp
++ = MODRM(XM_OFS8
, 0, RID_ESP
);
27 *mxp
++ = MODRM(XM_SCALE1
, RID_ESP
, RID_ESP
);
28 *mxp
++ = 2*sizeof(void *);
29 *(int32_t *)mxp
= ptr2addr(J2GG(as
->J
)->dispatch
); mxp
+= 4;
30 /* Jump to exit handler which fills in the ExitState. */
31 *mxp
++ = XI_JMP
; mxp
+= 4;
32 *((int32_t *)(mxp
-4)) = jmprel(mxp
, (MCode
*)(void *)lj_vm_exit_handler
);
33 /* Commit the code for this group (even if assembly fails later on). */
34 lj_mcode_commitbot(as
->J
, mxp
);
36 as
->mclim
= as
->mcbot
+ MCLIM_REDZONE
;
40 /* Setup all needed exit stubs. */
41 static void asm_exitstub_setup(ASMState
*as
, ExitNo nexits
)
44 if (nexits
>= EXITSTUBS_PER_GROUP
*LJ_MAX_EXITSTUBGR
)
45 lj_trace_err(as
->J
, LJ_TRERR_SNAPOV
);
46 for (i
= 0; i
< (nexits
+EXITSTUBS_PER_GROUP
-1)/EXITSTUBS_PER_GROUP
; i
++)
47 if (as
->J
->exitstubgroup
[i
] == NULL
)
48 as
->J
->exitstubgroup
[i
] = asm_exitstub_gen(as
, i
);
51 /* Emit conditional branch to exit for guard.
52 ** It's important to emit this *after* all registers have been allocated,
53 ** because rematerializations may invalidate the flags.
55 static void asm_guardcc(ASMState
*as
, int cc
)
57 MCode
*target
= exitstub_addr(as
->J
, as
->snapno
);
59 if (LJ_UNLIKELY(p
== as
->invmcp
)) {
61 *(int32_t *)(p
+1) = jmprel(p
+5, target
);
65 emit_sjcc(as
, cc
, target
);
69 emit_jcc(as
, cc
, target
);
72 /* -- Memory operand fusion ----------------------------------------------- */
74 /* Limit linear search to this distance. Avoids O(n^2) behavior. */
75 #define CONFLICT_SEARCH_LIM 31
77 /* Check if a reference is a signed 32 bit constant. */
78 static int asm_isk32(ASMState
*as
, IRRef ref
, int32_t *k
)
82 if (ir
->o
!= IR_KINT64
) {
85 } else if (checki32((int64_t)ir_kint64(ir
)->u64
)) {
86 *k
= (int32_t)ir_kint64(ir
)->u64
;
93 /* Check if there's no conflicting instruction between curins and ref.
94 ** Also avoid fusing loads if there are multiple references.
96 static int noconflict(ASMState
*as
, IRRef ref
, IROp conflict
, int noload
)
100 if (i
> ref
+ CONFLICT_SEARCH_LIM
)
101 return 0; /* Give up, ref is too far away. */
103 if (ir
[i
].o
== conflict
)
104 return 0; /* Conflict found. */
105 else if (!noload
&& (ir
[i
].op1
== ref
|| ir
[i
].op2
== ref
))
108 return 1; /* Ok, no conflict. */
111 /* Fuse array base into memory operand. */
112 static IRRef
asm_fuseabase(ASMState
*as
, IRRef ref
)
114 IRIns
*irb
= IR(ref
);
116 if (irb
->o
== IR_FLOAD
) {
117 IRIns
*ira
= IR(irb
->op1
);
118 lua_assert(irb
->op2
== IRFL_TAB_ARRAY
);
119 /* We can avoid the FLOAD of t->array for colocated arrays. */
120 if (ira
->o
== IR_TNEW
&& ira
->op1
<= LJ_MAX_COLOSIZE
&&
121 !neverfuse(as
) && noconflict(as
, irb
->op1
, IR_NEWREF
, 1)) {
122 as
->mrm
.ofs
= (int32_t)sizeof(GCtab
); /* Ofs to colocated array. */
123 return irb
->op1
; /* Table obj. */
125 } else if (irb
->o
== IR_ADD
&& irref_isk(irb
->op2
)) {
126 /* Fuse base offset (vararg load). */
127 as
->mrm
.ofs
= IR(irb
->op2
)->i
;
130 return ref
; /* Otherwise use the given array base. */
133 /* Fuse array reference into memory operand. */
134 static void asm_fusearef(ASMState
*as
, IRIns
*ir
, RegSet allow
)
137 lua_assert(ir
->o
== IR_AREF
);
138 as
->mrm
.base
= (uint8_t)ra_alloc1(as
, asm_fuseabase(as
, ir
->op1
), allow
);
140 if (irref_isk(ir
->op2
)) {
141 as
->mrm
.ofs
+= 8*irx
->i
;
142 as
->mrm
.idx
= RID_NONE
;
144 rset_clear(allow
, as
->mrm
.base
);
145 as
->mrm
.scale
= XM_SCALE8
;
146 /* Fuse a constant ADD (e.g. t[i+1]) into the offset.
147 ** Doesn't help much without ABCelim, but reduces register pressure.
149 if (!LJ_64
&& /* Has bad effects with negative index on x64. */
150 mayfuse(as
, ir
->op2
) && ra_noreg(irx
->r
) &&
151 irx
->o
== IR_ADD
&& irref_isk(irx
->op2
)) {
152 as
->mrm
.ofs
+= 8*IR(irx
->op2
)->i
;
153 as
->mrm
.idx
= (uint8_t)ra_alloc1(as
, irx
->op1
, allow
);
155 as
->mrm
.idx
= (uint8_t)ra_alloc1(as
, ir
->op2
, allow
);
160 /* Fuse array/hash/upvalue reference into memory operand.
161 ** Caveat: this may allocate GPRs for the base/idx registers. Be sure to
162 ** pass the final allow mask, excluding any GPRs used for other inputs.
163 ** In particular: 2-operand GPR instructions need to call ra_dest() first!
165 static void asm_fuseahuref(ASMState
*as
, IRRef ref
, RegSet allow
)
168 if (ra_noreg(ir
->r
)) {
169 switch ((IROp
)ir
->o
) {
171 if (mayfuse(as
, ref
)) {
172 asm_fusearef(as
, ir
, allow
);
177 if (mayfuse(as
, ref
)) {
178 as
->mrm
.base
= (uint8_t)ra_alloc1(as
, ir
->op1
, allow
);
179 as
->mrm
.ofs
= (int32_t)(IR(ir
->op2
)->op2
* sizeof(Node
));
180 as
->mrm
.idx
= RID_NONE
;
185 if (irref_isk(ir
->op1
)) {
186 GCfunc
*fn
= ir_kfunc(IR(ir
->op1
));
187 GCupval
*uv
= &gcref(fn
->l
.uvptr
[(ir
->op2
>> 8)])->uv
;
188 as
->mrm
.ofs
= ptr2addr(&uv
->tv
);
189 as
->mrm
.base
= as
->mrm
.idx
= RID_NONE
;
194 lua_assert(ir
->o
== IR_HREF
|| ir
->o
== IR_NEWREF
|| ir
->o
== IR_UREFO
||
199 as
->mrm
.base
= (uint8_t)ra_alloc1(as
, ref
, allow
);
201 as
->mrm
.idx
= RID_NONE
;
204 /* Fuse FLOAD/FREF reference into memory operand. */
205 static void asm_fusefref(ASMState
*as
, IRIns
*ir
, RegSet allow
)
207 lua_assert(ir
->o
== IR_FLOAD
|| ir
->o
== IR_FREF
);
208 as
->mrm
.ofs
= field_ofs
[ir
->op2
];
209 as
->mrm
.idx
= RID_NONE
;
210 if (irref_isk(ir
->op1
)) {
211 as
->mrm
.ofs
+= IR(ir
->op1
)->i
;
212 as
->mrm
.base
= RID_NONE
;
214 as
->mrm
.base
= (uint8_t)ra_alloc1(as
, ir
->op1
, allow
);
218 /* Fuse string reference into memory operand. */
219 static void asm_fusestrref(ASMState
*as
, IRIns
*ir
, RegSet allow
)
222 lua_assert(ir
->o
== IR_STRREF
);
223 as
->mrm
.base
= as
->mrm
.idx
= RID_NONE
;
224 as
->mrm
.scale
= XM_SCALE1
;
225 as
->mrm
.ofs
= sizeof(GCstr
);
226 if (irref_isk(ir
->op1
)) {
227 as
->mrm
.ofs
+= IR(ir
->op1
)->i
;
229 Reg r
= ra_alloc1(as
, ir
->op1
, allow
);
230 rset_clear(allow
, r
);
231 as
->mrm
.base
= (uint8_t)r
;
234 if (irref_isk(ir
->op2
)) {
235 as
->mrm
.ofs
+= irr
->i
;
238 /* Fuse a constant add into the offset, e.g. string.sub(s, i+10). */
239 if (!LJ_64
&& /* Has bad effects with negative index on x64. */
240 mayfuse(as
, ir
->op2
) && irr
->o
== IR_ADD
&& irref_isk(irr
->op2
)) {
241 as
->mrm
.ofs
+= IR(irr
->op2
)->i
;
242 r
= ra_alloc1(as
, irr
->op1
, allow
);
244 r
= ra_alloc1(as
, ir
->op2
, allow
);
246 if (as
->mrm
.base
== RID_NONE
)
247 as
->mrm
.base
= (uint8_t)r
;
249 as
->mrm
.idx
= (uint8_t)r
;
253 static void asm_fusexref(ASMState
*as
, IRRef ref
, RegSet allow
)
256 as
->mrm
.idx
= RID_NONE
;
257 if (ir
->o
== IR_KPTR
|| ir
->o
== IR_KKPTR
) {
259 as
->mrm
.base
= RID_NONE
;
260 } else if (ir
->o
== IR_STRREF
) {
261 asm_fusestrref(as
, ir
, allow
);
264 if (canfuse(as
, ir
) && ir
->o
== IR_ADD
&& ra_noreg(ir
->r
)) {
265 /* Gather (base+idx*sz)+ofs as emitted by cdata ptr/array indexing. */
269 if (asm_isk32(as
, ir
->op2
, &as
->mrm
.ofs
)) { /* Recognize x+ofs. */
272 if (!(ir
->o
== IR_ADD
&& canfuse(as
, ir
) && ra_noreg(ir
->r
)))
275 as
->mrm
.scale
= XM_SCALE1
;
279 if (!(irx
->o
== IR_BSHL
|| irx
->o
== IR_ADD
)) { /* Try other operand. */
284 if (canfuse(as
, irx
) && ra_noreg(irx
->r
)) {
285 if (irx
->o
== IR_BSHL
&& irref_isk(irx
->op2
) && IR(irx
->op2
)->i
<= 3) {
286 /* Recognize idx<<b with b = 0-3, corresponding to sz = (1),2,4,8. */
288 as
->mrm
.scale
= (uint8_t)(IR(irx
->op2
)->i
<< 6);
289 } else if (irx
->o
== IR_ADD
&& irx
->op1
== irx
->op2
) {
290 /* FOLD does idx*2 ==> idx<<1 ==> idx+idx. */
292 as
->mrm
.scale
= XM_SCALE2
;
295 r
= ra_alloc1(as
, idx
, allow
);
296 rset_clear(allow
, r
);
297 as
->mrm
.idx
= (uint8_t)r
;
300 as
->mrm
.base
= (uint8_t)ra_alloc1(as
, ref
, allow
);
304 /* Fuse load into memory operand. */
305 static Reg
asm_fuseload(ASMState
*as
, IRRef ref
, RegSet allow
)
308 if (ra_hasreg(ir
->r
)) {
309 if (allow
!= RSET_EMPTY
) { /* Fast path. */
310 ra_noweak(as
, ir
->r
);
314 /* Force a spill if only memory operands are allowed (asm_x87load). */
315 as
->mrm
.base
= RID_ESP
;
316 as
->mrm
.ofs
= ra_spill(as
, ir
);
317 as
->mrm
.idx
= RID_NONE
;
320 if (ir
->o
== IR_KNUM
) {
321 RegSet avail
= as
->freeset
& ~as
->modset
& RSET_FPR
;
322 lua_assert(allow
!= RSET_EMPTY
);
323 if (!(avail
& (avail
-1))) { /* Fuse if less than two regs available. */
324 as
->mrm
.ofs
= ptr2addr(ir_knum(ir
));
325 as
->mrm
.base
= as
->mrm
.idx
= RID_NONE
;
328 } else if (mayfuse(as
, ref
)) {
329 RegSet xallow
= (allow
& RSET_GPR
) ? allow
: RSET_GPR
;
330 if (ir
->o
== IR_SLOAD
) {
331 if (!(ir
->op2
& (IRSLOAD_PARENT
|IRSLOAD_CONVERT
)) &&
332 noconflict(as
, ref
, IR_RETF
, 0)) {
333 as
->mrm
.base
= (uint8_t)ra_alloc1(as
, REF_BASE
, xallow
);
334 as
->mrm
.ofs
= 8*((int32_t)ir
->op1
-1) + ((ir
->op2
&IRSLOAD_FRAME
)?4:0);
335 as
->mrm
.idx
= RID_NONE
;
338 } else if (ir
->o
== IR_FLOAD
) {
339 /* Generic fusion is only ok for 32 bit operand (but see asm_comp). */
340 if ((irt_isint(ir
->t
) || irt_isu32(ir
->t
) || irt_isaddr(ir
->t
)) &&
341 noconflict(as
, ref
, IR_FSTORE
, 0)) {
342 asm_fusefref(as
, ir
, xallow
);
345 } else if (ir
->o
== IR_ALOAD
|| ir
->o
== IR_HLOAD
|| ir
->o
== IR_ULOAD
) {
346 if (noconflict(as
, ref
, ir
->o
+ IRDELTA_L2S
, 0)) {
347 asm_fuseahuref(as
, ir
->op1
, xallow
);
350 } else if (ir
->o
== IR_XLOAD
) {
351 /* Generic fusion is not ok for 8/16 bit operands (but see asm_comp).
352 ** Fusing unaligned memory operands is ok on x86 (except for SIMD types).
354 if ((!irt_typerange(ir
->t
, IRT_I8
, IRT_U16
)) &&
355 noconflict(as
, ref
, IR_XSTORE
, 0)) {
356 asm_fusexref(as
, ir
->op1
, xallow
);
359 } else if (ir
->o
== IR_VLOAD
) {
360 asm_fuseahuref(as
, ir
->op1
, xallow
);
364 if (!(as
->freeset
& allow
) &&
365 (allow
== RSET_EMPTY
|| ra_hasspill(ir
->s
) || iscrossref(as
, ref
)))
367 return ra_allocref(as
, ref
, allow
);
370 /* -- Calls --------------------------------------------------------------- */
372 /* Count the required number of stack slots for a call. */
373 static int asm_count_call_slots(ASMState
*as
, const CCallInfo
*ci
, IRRef
*args
)
375 uint32_t i
, nargs
= CCI_NARGS(ci
);
379 nslots
= (int)(nargs
*2); /* Only matters for more than four args. */
381 int ngpr
= REGARG_NUMGPR
, nfpr
= REGARG_NUMFPR
;
382 for (i
= 0; i
< nargs
; i
++)
383 if (args
[i
] && irt_isfp(IR(args
[i
])->t
)) {
384 if (nfpr
> 0) nfpr
--; else nslots
+= 2;
386 if (ngpr
> 0) ngpr
--; else nslots
+= 2;
391 if ((ci
->flags
& CCI_CC_MASK
) == CCI_CC_FASTCALL
)
393 else if ((ci
->flags
& CCI_CC_MASK
) == CCI_CC_THISCALL
)
395 for (i
= 0; i
< nargs
; i
++)
396 if (args
[i
] && irt_isfp(IR(args
[i
])->t
)) {
397 nslots
+= irt_isnum(IR(args
[i
])->t
) ? 2 : 1;
399 if (ngpr
> 0) ngpr
--; else nslots
++;
405 /* Generate a call to a C function. */
406 static void asm_gencall(ASMState
*as
, const CCallInfo
*ci
, IRRef
*args
)
408 uint32_t n
, nargs
= CCI_NARGS(ci
);
409 int32_t ofs
= STACKARG_OFS
;
411 uint32_t gprs
= REGARG_GPRS
;
412 Reg fpr
= REGARG_FIRSTFPR
;
414 MCode
*patchnfpr
= NULL
;
418 if ((ci
->flags
& CCI_CC_MASK
) != CCI_CC_CDECL
) {
419 if ((ci
->flags
& CCI_CC_MASK
) == CCI_CC_THISCALL
)
420 gprs
= (REGARG_GPRS
& 31);
421 else if ((ci
->flags
& CCI_CC_MASK
) == CCI_CC_FASTCALL
)
425 if ((void *)ci
->func
)
426 emit_call(as
, ci
->func
);
428 if ((ci
->flags
& CCI_VARARG
)) { /* Special handling for vararg calls. */
430 for (n
= 0; n
< 4 && n
< nargs
; n
++) {
431 IRIns
*ir
= IR(args
[n
]);
432 if (irt_isfp(ir
->t
)) /* Duplicate FPRs in GPRs. */
433 emit_rr(as
, XO_MOVDto
, (irt_isnum(ir
->t
) ? REX_64
: 0) | (fpr
+n
),
434 ((gprs
>> (n
*5)) & 31)); /* Either MOVD or MOVQ. */
437 patchnfpr
= --as
->mcp
; /* Indicate number of used FPRs in register al. */
438 *--as
->mcp
= XI_MOVrib
| RID_EAX
;
442 for (n
= 0; n
< nargs
; n
++) { /* Setup args. */
446 #if LJ_64 && LJ_ABI_WIN
447 /* Windows/x64 argument registers are strictly positional. */
448 r
= irt_isfp(ir
->t
) ? (fpr
<= REGARG_LASTFPR
? fpr
: 0) : (gprs
& 31);
451 /* POSIX/x64 argument registers are used in order of appearance. */
452 if (irt_isfp(ir
->t
)) {
453 r
= fpr
<= REGARG_LASTFPR
? fpr
++ : 0;
455 r
= gprs
& 31; gprs
>>= 5;
458 if (ref
&& irt_isfp(ir
->t
)) {
461 r
= gprs
& 31; gprs
>>= 5;
465 if (r
) { /* Argument is in a register. */
466 if (r
< RID_MAX_GPR
&& ref
< ASMREF_TMP1
) {
468 if (ir
->o
== IR_KINT64
)
469 emit_loadu64(as
, r
, ir_kint64(ir
)->u64
);
472 emit_loadi(as
, r
, ir
->i
);
474 lua_assert(rset_test(as
->freeset
, r
)); /* Must have been evicted. */
475 if (ra_hasreg(ir
->r
)) {
476 ra_noweak(as
, ir
->r
);
477 emit_movrr(as
, ir
, r
, ir
->r
);
479 ra_allocref(as
, ref
, RID2RSET(r
));
482 } else if (irt_isfp(ir
->t
)) { /* FP argument is on stack. */
483 lua_assert(!(irt_isfloat(ir
->t
) && irref_isk(ref
))); /* No float k. */
484 if (LJ_32
&& (ofs
& 4) && irref_isk(ref
)) {
485 /* Split stores for unaligned FP consts. */
486 emit_movmroi(as
, RID_ESP
, ofs
, (int32_t)ir_knum(ir
)->u32
.lo
);
487 emit_movmroi(as
, RID_ESP
, ofs
+4, (int32_t)ir_knum(ir
)->u32
.hi
);
489 r
= ra_alloc1(as
, ref
, RSET_FPR
);
490 emit_rmro(as
, irt_isnum(ir
->t
) ? XO_MOVSDto
: XO_MOVSSto
,
493 ofs
+= (LJ_32
&& irt_isfloat(ir
->t
)) ? 4 : 8;
494 } else { /* Non-FP argument is on stack. */
495 if (LJ_32
&& ref
< ASMREF_TMP1
) {
496 emit_movmroi(as
, RID_ESP
, ofs
, ir
->i
);
498 r
= ra_alloc1(as
, ref
, RSET_GPR
);
499 emit_movtomro(as
, REX_64
+ r
, RID_ESP
, ofs
);
501 ofs
+= sizeof(intptr_t);
504 #if LJ_64 && !LJ_ABI_WIN
505 if (patchnfpr
) *patchnfpr
= fpr
- REGARG_FIRSTFPR
;
509 /* Setup result reg/sp for call. Evict scratch regs. */
510 static void asm_setupresult(ASMState
*as
, IRIns
*ir
, const CCallInfo
*ci
)
512 RegSet drop
= RSET_SCRATCH
;
513 int hiop
= (LJ_32
&& (ir
+1)->o
== IR_HIOP
);
514 if ((ci
->flags
& CCI_NOFPRCLOBBER
))
516 if (ra_hasreg(ir
->r
))
517 rset_clear(drop
, ir
->r
); /* Dest reg handled below. */
518 if (hiop
&& ra_hasreg((ir
+1)->r
))
519 rset_clear(drop
, (ir
+1)->r
); /* Dest reg handled below. */
520 ra_evictset(as
, drop
); /* Evictions must be performed first. */
522 if (irt_isfp(ir
->t
)) {
523 int32_t ofs
= sps_scale(ir
->s
); /* Use spill slot or temp slots. */
525 if ((ci
->flags
& CCI_CASTU64
)) {
527 if (ra_hasreg(dest
)) {
529 ra_modified(as
, dest
);
530 emit_rr(as
, XO_MOVD
, dest
|REX_64
, RID_RET
); /* Really MOVQ. */
532 if (ofs
) emit_movtomro(as
, RID_RET
|REX_64
, RID_ESP
, ofs
);
534 ra_destreg(as
, ir
, RID_FPRET
);
537 /* Number result is in x87 st0 for x86 calling convention. */
539 if (ra_hasreg(dest
)) {
541 ra_modified(as
, dest
);
542 emit_rmro(as
, irt_isnum(ir
->t
) ? XMM_MOVRM(as
) : XO_MOVSS
,
545 if ((ci
->flags
& CCI_CASTU64
)) {
546 emit_movtomro(as
, RID_RETLO
, RID_ESP
, ofs
);
547 emit_movtomro(as
, RID_RETHI
, RID_ESP
, ofs
+4);
549 emit_rmro(as
, irt_isnum(ir
->t
) ? XO_FSTPq
: XO_FSTPd
,
550 irt_isnum(ir
->t
) ? XOg_FSTPq
: XOg_FSTPd
, RID_ESP
, ofs
);
556 lua_assert(!irt_ispri(ir
->t
));
557 ra_destreg(as
, ir
, RID_RET
);
559 } else if (LJ_32
&& irt_isfp(ir
->t
)) {
560 emit_x87op(as
, XI_FPOP
); /* Pop unused result from x87 st0. */
564 static void asm_call(ASMState
*as
, IRIns
*ir
)
566 IRRef args
[CCI_NARGS_MAX
];
567 const CCallInfo
*ci
= &lj_ir_callinfo
[ir
->op2
];
568 asm_collectargs(as
, ir
, ci
, args
);
569 asm_setupresult(as
, ir
, ci
);
570 asm_gencall(as
, ci
, args
);
573 /* Return a constant function pointer or NULL for indirect calls. */
574 static void *asm_callx_func(ASMState
*as
, IRIns
*irf
, IRRef func
)
579 return (void *)irf
->i
;
581 if (irref_isk(func
)) {
583 if (irf
->o
== IR_KINT64
)
584 p
= (MCode
*)(void *)ir_k64(irf
)->u64
;
586 p
= (MCode
*)(void *)(uintptr_t)(uint32_t)irf
->i
;
587 if (p
- as
->mcp
== (int32_t)(p
- as
->mcp
))
588 return p
; /* Call target is still in +-2GB range. */
589 /* Avoid the indirect case of emit_call(). Try to hoist func addr. */
595 static void asm_callx(ASMState
*as
, IRIns
*ir
)
597 IRRef args
[CCI_NARGS_MAX
];
602 ci
.flags
= asm_callx_flags(as
, ir
);
603 asm_collectargs(as
, ir
, &ci
, args
);
604 asm_setupresult(as
, ir
, &ci
);
606 /* Have to readjust stack after non-cdecl calls due to callee cleanup. */
607 if ((ci
.flags
& CCI_CC_MASK
) != CCI_CC_CDECL
)
608 spadj
= 4 * asm_count_call_slots(as
, &ci
, args
);
610 func
= ir
->op2
; irf
= IR(func
);
611 if (irf
->o
== IR_CARG
) { func
= irf
->op1
; irf
= IR(func
); }
612 ci
.func
= (ASMFunction
)asm_callx_func(as
, irf
, func
);
613 if (!(void *)ci
.func
) {
614 /* Use a (hoistable) non-scratch register for indirect calls. */
615 RegSet allow
= (RSET_GPR
& ~RSET_SCRATCH
);
616 Reg r
= ra_alloc1(as
, func
, allow
);
617 if (LJ_32
) emit_spsub(as
, spadj
); /* Above code may cause restores! */
618 emit_rr(as
, XO_GROUP5
, XOg_CALL
, r
);
620 emit_spsub(as
, spadj
);
622 asm_gencall(as
, &ci
, args
);
625 /* -- Returns ------------------------------------------------------------- */
627 /* Return to lower frame. Guard that it goes to the right spot. */
628 static void asm_retf(ASMState
*as
, IRIns
*ir
)
630 Reg base
= ra_alloc1(as
, REF_BASE
, RSET_GPR
);
631 void *pc
= ir_kptr(IR(ir
->op2
));
632 int32_t delta
= 1+bc_a(*((const BCIns
*)pc
- 1));
633 as
->topslot
-= (BCReg
)delta
;
634 if ((int32_t)as
->topslot
< 0) as
->topslot
= 0;
635 emit_setgl(as
, base
, jit_base
);
636 emit_addptr(as
, base
, -8*delta
);
637 asm_guardcc(as
, CC_NE
);
638 emit_gmroi(as
, XG_ARITHi(XOg_CMP
), base
, -4, ptr2addr(pc
));
641 /* -- Type conversions ---------------------------------------------------- */
643 static void asm_tointg(ASMState
*as
, IRIns
*ir
, Reg left
)
645 Reg tmp
= ra_scratch(as
, rset_exclude(RSET_FPR
, left
));
646 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
647 asm_guardcc(as
, CC_P
);
648 asm_guardcc(as
, CC_NE
);
649 emit_rr(as
, XO_UCOMISD
, left
, tmp
);
650 emit_rr(as
, XO_CVTSI2SD
, tmp
, dest
);
651 if (!(as
->flags
& JIT_F_SPLIT_XMM
))
652 emit_rr(as
, XO_XORPS
, tmp
, tmp
); /* Avoid partial register stall. */
653 emit_rr(as
, XO_CVTTSD2SI
, dest
, left
);
654 /* Can't fuse since left is needed twice. */
657 static void asm_tobit(ASMState
*as
, IRIns
*ir
)
659 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
660 Reg tmp
= ra_noreg(IR(ir
->op1
)->r
) ?
661 ra_alloc1(as
, ir
->op1
, RSET_FPR
) :
662 ra_scratch(as
, RSET_FPR
);
663 Reg right
= asm_fuseload(as
, ir
->op2
, rset_exclude(RSET_FPR
, tmp
));
664 emit_rr(as
, XO_MOVDto
, tmp
, dest
);
665 emit_mrm(as
, XO_ADDSD
, tmp
, right
);
666 ra_left(as
, tmp
, ir
->op1
);
669 static void asm_conv(ASMState
*as
, IRIns
*ir
)
671 IRType st
= (IRType
)(ir
->op2
& IRCONV_SRCMASK
);
672 int st64
= (st
== IRT_I64
|| st
== IRT_U64
|| (LJ_64
&& st
== IRT_P64
));
673 int stfp
= (st
== IRT_NUM
|| st
== IRT_FLOAT
);
674 IRRef lref
= ir
->op1
;
675 lua_assert(irt_type(ir
->t
) != st
);
676 lua_assert(!(LJ_32
&& (irt_isint64(ir
->t
) || st64
))); /* Handled by SPLIT. */
677 if (irt_isfp(ir
->t
)) {
678 Reg dest
= ra_dest(as
, ir
, RSET_FPR
);
679 if (stfp
) { /* FP to FP conversion. */
680 Reg left
= asm_fuseload(as
, lref
, RSET_FPR
);
681 emit_mrm(as
, st
== IRT_NUM
? XO_CVTSD2SS
: XO_CVTSS2SD
, dest
, left
);
682 if (left
== dest
) return; /* Avoid the XO_XORPS. */
683 } else if (LJ_32
&& st
== IRT_U32
) { /* U32 to FP conversion on x86. */
684 /* number = (2^52+2^51 .. u32) - (2^52+2^51) */
685 cTValue
*k
= lj_ir_k64_find(as
->J
, U64x(43380000,00000000));
686 Reg bias
= ra_scratch(as
, rset_exclude(RSET_FPR
, dest
));
687 if (irt_isfloat(ir
->t
))
688 emit_rr(as
, XO_CVTSD2SS
, dest
, dest
);
689 emit_rr(as
, XO_SUBSD
, dest
, bias
); /* Subtract 2^52+2^51 bias. */
690 emit_rr(as
, XO_XORPS
, dest
, bias
); /* Merge bias and integer. */
691 emit_loadn(as
, bias
, k
);
692 emit_mrm(as
, XO_MOVD
, dest
, asm_fuseload(as
, lref
, RSET_GPR
));
694 } else { /* Integer to FP conversion. */
695 Reg left
= (LJ_64
&& (st
== IRT_U32
|| st
== IRT_U64
)) ?
696 ra_alloc1(as
, lref
, RSET_GPR
) :
697 asm_fuseload(as
, lref
, RSET_GPR
);
698 if (LJ_64
&& st
== IRT_U64
) {
699 MCLabel l_end
= emit_label(as
);
700 const void *k
= lj_ir_k64_find(as
->J
, U64x(43f00000
,00000000));
701 emit_rma(as
, XO_ADDSD
, dest
, k
); /* Add 2^64 to compensate. */
702 emit_sjcc(as
, CC_NS
, l_end
);
703 emit_rr(as
, XO_TEST
, left
|REX_64
, left
); /* Check if u64 >= 2^63. */
705 emit_mrm(as
, irt_isnum(ir
->t
) ? XO_CVTSI2SD
: XO_CVTSI2SS
,
706 dest
|((LJ_64
&& (st64
|| st
== IRT_U32
)) ? REX_64
: 0), left
);
708 if (!(as
->flags
& JIT_F_SPLIT_XMM
))
709 emit_rr(as
, XO_XORPS
, dest
, dest
); /* Avoid partial register stall. */
710 } else if (stfp
) { /* FP to integer conversion. */
711 if (irt_isguard(ir
->t
)) {
712 /* Checked conversions are only supported from number to int. */
713 lua_assert(irt_isint(ir
->t
) && st
== IRT_NUM
);
714 asm_tointg(as
, ir
, ra_alloc1(as
, lref
, RSET_FPR
));
716 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
717 x86Op op
= st
== IRT_NUM
?
718 ((ir
->op2
& IRCONV_TRUNC
) ? XO_CVTTSD2SI
: XO_CVTSD2SI
) :
719 ((ir
->op2
& IRCONV_TRUNC
) ? XO_CVTTSS2SI
: XO_CVTSS2SI
);
720 if (LJ_64
? irt_isu64(ir
->t
) : irt_isu32(ir
->t
)) {
721 /* LJ_64: For inputs >= 2^63 add -2^64, convert again. */
722 /* LJ_32: For inputs >= 2^31 add -2^31, convert again and add 2^31. */
723 Reg tmp
= ra_noreg(IR(lref
)->r
) ? ra_alloc1(as
, lref
, RSET_FPR
) :
724 ra_scratch(as
, RSET_FPR
);
725 MCLabel l_end
= emit_label(as
);
727 emit_gri(as
, XG_ARITHi(XOg_ADD
), dest
, (int32_t)0x80000000);
728 emit_rr(as
, op
, dest
|REX_64
, tmp
);
730 emit_rma(as
, XO_ADDSD
, tmp
, lj_ir_k64_find(as
->J
,
731 LJ_64
? U64x(c3f00000
,00000000) : U64x(c1e00000
,00000000)));
733 emit_rma(as
, XO_ADDSS
, tmp
, lj_ir_k64_find(as
->J
,
734 LJ_64
? U64x(00000000,df800000
) : U64x(00000000,cf000000
)));
735 emit_sjcc(as
, CC_NS
, l_end
);
736 emit_rr(as
, XO_TEST
, dest
|REX_64
, dest
); /* Check if dest negative. */
737 emit_rr(as
, op
, dest
|REX_64
, tmp
);
738 ra_left(as
, tmp
, lref
);
740 Reg left
= asm_fuseload(as
, lref
, RSET_FPR
);
741 if (LJ_64
&& irt_isu32(ir
->t
))
742 emit_rr(as
, XO_MOV
, dest
, dest
); /* Zero hiword. */
745 (irt_is64(ir
->t
) || irt_isu32(ir
->t
))) ? REX_64
: 0),
749 } else if (st
>= IRT_I8
&& st
<= IRT_U16
) { /* Extend to 32 bit integer. */
750 Reg left
, dest
= ra_dest(as
, ir
, RSET_GPR
);
751 RegSet allow
= RSET_GPR
;
753 lua_assert(irt_isint(ir
->t
) || irt_isu32(ir
->t
));
755 op
= XO_MOVSXb
; allow
= RSET_GPR8
; dest
|= FORCE_REX
;
756 } else if (st
== IRT_U8
) {
757 op
= XO_MOVZXb
; allow
= RSET_GPR8
; dest
|= FORCE_REX
;
758 } else if (st
== IRT_I16
) {
763 left
= asm_fuseload(as
, lref
, allow
);
764 /* Add extra MOV if source is already in wrong register. */
765 if (!LJ_64
&& left
!= RID_MRM
&& !rset_test(allow
, left
)) {
766 Reg tmp
= ra_scratch(as
, allow
);
767 emit_rr(as
, op
, dest
, tmp
);
768 emit_rr(as
, XO_MOV
, tmp
, left
);
770 emit_mrm(as
, op
, dest
, left
);
772 } else { /* 32/64 bit integer conversions. */
773 if (LJ_32
) { /* Only need to handle 32/32 bit no-op (cast) on x86. */
774 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
775 ra_left(as
, dest
, lref
); /* Do nothing, but may need to move regs. */
776 } else if (irt_is64(ir
->t
)) {
777 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
778 if (st64
|| !(ir
->op2
& IRCONV_SEXT
)) {
779 /* 64/64 bit no-op (cast) or 32 to 64 bit zero extension. */
780 ra_left(as
, dest
, lref
); /* Do nothing, but may need to move regs. */
781 } else { /* 32 to 64 bit sign extension. */
782 Reg left
= asm_fuseload(as
, lref
, RSET_GPR
);
783 emit_mrm(as
, XO_MOVSXd
, dest
|REX_64
, left
);
786 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
788 Reg left
= asm_fuseload(as
, lref
, RSET_GPR
);
789 /* This is either a 32 bit reg/reg mov which zeroes the hiword
790 ** or a load of the loword from a 64 bit address.
792 emit_mrm(as
, XO_MOV
, dest
, left
);
793 } else { /* 32/32 bit no-op (cast). */
794 ra_left(as
, dest
, lref
); /* Do nothing, but may need to move regs. */
800 #if LJ_32 && LJ_HASFFI
801 /* No SSE conversions to/from 64 bit on x86, so resort to ugly x87 code. */
803 /* 64 bit integer to FP conversion in 32 bit mode. */
804 static void asm_conv_fp_int64(ASMState
*as
, IRIns
*ir
)
806 Reg hi
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
807 Reg lo
= ra_alloc1(as
, (ir
-1)->op1
, rset_exclude(RSET_GPR
, hi
));
808 int32_t ofs
= sps_scale(ir
->s
); /* Use spill slot or temp slots. */
810 if (ra_hasreg(dest
)) {
812 ra_modified(as
, dest
);
813 emit_rmro(as
, irt_isnum(ir
->t
) ? XMM_MOVRM(as
) : XO_MOVSS
,
816 emit_rmro(as
, irt_isnum(ir
->t
) ? XO_FSTPq
: XO_FSTPd
,
817 irt_isnum(ir
->t
) ? XOg_FSTPq
: XOg_FSTPd
, RID_ESP
, ofs
);
818 if (((ir
-1)->op2
& IRCONV_SRCMASK
) == IRT_U64
) {
819 /* For inputs in [2^63,2^64-1] add 2^64 to compensate. */
820 MCLabel l_end
= emit_label(as
);
821 emit_rma(as
, XO_FADDq
, XOg_FADDq
,
822 lj_ir_k64_find(as
->J
, U64x(43f00000
,00000000)));
823 emit_sjcc(as
, CC_NS
, l_end
);
824 emit_rr(as
, XO_TEST
, hi
, hi
); /* Check if u64 >= 2^63. */
826 lua_assert(((ir
-1)->op2
& IRCONV_SRCMASK
) == IRT_I64
);
828 emit_rmro(as
, XO_FILDq
, XOg_FILDq
, RID_ESP
, 0);
829 /* NYI: Avoid narrow-to-wide store-to-load forwarding stall. */
830 emit_rmro(as
, XO_MOVto
, hi
, RID_ESP
, 4);
831 emit_rmro(as
, XO_MOVto
, lo
, RID_ESP
, 0);
834 /* FP to 64 bit integer conversion in 32 bit mode. */
835 static void asm_conv_int64_fp(ASMState
*as
, IRIns
*ir
)
837 IRType st
= (IRType
)((ir
-1)->op2
& IRCONV_SRCMASK
);
838 IRType dt
= (((ir
-1)->op2
& IRCONV_DSTMASK
) >> IRCONV_DSH
);
840 lua_assert(st
== IRT_NUM
|| st
== IRT_FLOAT
);
841 lua_assert(dt
== IRT_I64
|| dt
== IRT_U64
);
842 lua_assert(((ir
-1)->op2
& IRCONV_TRUNC
));
843 hi
= ra_dest(as
, ir
, RSET_GPR
);
844 lo
= ra_dest(as
, ir
-1, rset_exclude(RSET_GPR
, hi
));
845 if (ra_used(ir
-1)) emit_rmro(as
, XO_MOV
, lo
, RID_ESP
, 0);
846 /* NYI: Avoid wide-to-narrow store-to-load forwarding stall. */
847 if (!(as
->flags
& JIT_F_SSE3
)) { /* Set FPU rounding mode to default. */
848 emit_rmro(as
, XO_FLDCW
, XOg_FLDCW
, RID_ESP
, 4);
849 emit_rmro(as
, XO_MOVto
, lo
, RID_ESP
, 4);
850 emit_gri(as
, XG_ARITHi(XOg_AND
), lo
, 0xf3ff);
853 /* For inputs in [2^63,2^64-1] add -2^64 and convert again. */
854 MCLabel l_pop
, l_end
= emit_label(as
);
855 emit_x87op(as
, XI_FPOP
);
856 l_pop
= emit_label(as
);
857 emit_sjmp(as
, l_end
);
858 emit_rmro(as
, XO_MOV
, hi
, RID_ESP
, 4);
859 if ((as
->flags
& JIT_F_SSE3
))
860 emit_rmro(as
, XO_FISTTPq
, XOg_FISTTPq
, RID_ESP
, 0);
862 emit_rmro(as
, XO_FISTPq
, XOg_FISTPq
, RID_ESP
, 0);
863 emit_rma(as
, XO_FADDq
, XOg_FADDq
,
864 lj_ir_k64_find(as
->J
, U64x(c3f00000
,00000000)));
865 emit_sjcc(as
, CC_NS
, l_pop
);
866 emit_rr(as
, XO_TEST
, hi
, hi
); /* Check if out-of-range (2^63). */
868 emit_rmro(as
, XO_MOV
, hi
, RID_ESP
, 4);
869 if ((as
->flags
& JIT_F_SSE3
)) { /* Truncation is easy with SSE3. */
870 emit_rmro(as
, XO_FISTTPq
, XOg_FISTTPq
, RID_ESP
, 0);
871 } else { /* Otherwise set FPU rounding mode to truncate before the store. */
872 emit_rmro(as
, XO_FISTPq
, XOg_FISTPq
, RID_ESP
, 0);
873 emit_rmro(as
, XO_FLDCW
, XOg_FLDCW
, RID_ESP
, 0);
874 emit_rmro(as
, XO_MOVtow
, lo
, RID_ESP
, 0);
875 emit_rmro(as
, XO_ARITHw(XOg_OR
), lo
, RID_ESP
, 0);
876 emit_loadi(as
, lo
, 0xc00);
877 emit_rmro(as
, XO_FNSTCW
, XOg_FNSTCW
, RID_ESP
, 0);
880 emit_x87op(as
, XI_FDUP
);
881 emit_mrm(as
, st
== IRT_NUM
? XO_FLDq
: XO_FLDd
,
882 st
== IRT_NUM
? XOg_FLDq
: XOg_FLDd
,
883 asm_fuseload(as
, ir
->op1
, RSET_EMPTY
));
887 static void asm_strto(ASMState
*as
, IRIns
*ir
)
889 /* Force a spill slot for the destination register (if any). */
890 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_strscan_num
];
892 RegSet drop
= RSET_SCRATCH
;
893 if ((drop
& RSET_FPR
) != RSET_FPR
&& ra_hasreg(ir
->r
))
894 rset_set(drop
, ir
->r
); /* WIN64 doesn't spill all FPRs. */
895 ra_evictset(as
, drop
);
896 asm_guardcc(as
, CC_E
);
897 emit_rr(as
, XO_TEST
, RID_RET
, RID_RET
); /* Test return status. */
898 args
[0] = ir
->op1
; /* GCstr *str */
899 args
[1] = ASMREF_TMP1
; /* TValue *n */
900 asm_gencall(as
, ci
, args
);
901 /* Store the result to the spill slot or temp slots. */
902 emit_rmro(as
, XO_LEA
, ra_releasetmp(as
, ASMREF_TMP1
)|REX_64
,
903 RID_ESP
, sps_scale(ir
->s
));
906 static void asm_tostr(ASMState
*as
, IRIns
*ir
)
908 IRIns
*irl
= IR(ir
->op1
);
912 if (irt_isnum(irl
->t
)) {
913 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_str_fromnum
];
914 args
[1] = ASMREF_TMP1
; /* const lua_Number * */
915 asm_setupresult(as
, ir
, ci
); /* GCstr * */
916 asm_gencall(as
, ci
, args
);
917 emit_rmro(as
, XO_LEA
, ra_releasetmp(as
, ASMREF_TMP1
)|REX_64
,
918 RID_ESP
, ra_spill(as
, irl
));
920 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_str_fromint
];
921 args
[1] = ir
->op1
; /* int32_t k */
922 asm_setupresult(as
, ir
, ci
); /* GCstr * */
923 asm_gencall(as
, ci
, args
);
927 /* -- Memory references --------------------------------------------------- */
929 static void asm_aref(ASMState
*as
, IRIns
*ir
)
931 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
932 asm_fusearef(as
, ir
, RSET_GPR
);
933 if (!(as
->mrm
.idx
== RID_NONE
&& as
->mrm
.ofs
== 0))
934 emit_mrm(as
, XO_LEA
, dest
, RID_MRM
);
935 else if (as
->mrm
.base
!= dest
)
936 emit_rr(as
, XO_MOV
, dest
, as
->mrm
.base
);
939 /* Merge NE(HREF, niltv) check. */
940 static MCode
*merge_href_niltv(ASMState
*as
, IRIns
*ir
)
942 /* Assumes nothing else generates NE of HREF. */
943 if ((ir
[1].o
== IR_NE
|| ir
[1].o
== IR_EQ
) && ir
[1].op1
== as
->curins
&&
946 p
+= (LJ_64
&& *p
!= XI_ARITHi
) ? 7+6 : 6+6;
947 /* Ensure no loop branch inversion happened. */
948 if (p
[-6] == 0x0f && p
[-5] == XI_JCCn
+(CC_NE
^(ir
[1].o
& 1))) {
949 as
->mcp
= p
; /* Kill cmp reg, imm32 + jz exit. */
950 return p
+ *(int32_t *)(p
-4); /* Return exit address. */
956 /* Inlined hash lookup. Specialized for key type and for const keys.
957 ** The equivalent C code is:
958 ** Node *n = hashkey(t, key);
960 ** if (lj_obj_equal(&n->key, key)) return &n->val;
961 ** } while ((n = nextnode(n)));
964 static void asm_href(ASMState
*as
, IRIns
*ir
)
966 MCode
*nilexit
= merge_href_niltv(as
, ir
); /* Do this before any restores. */
967 RegSet allow
= RSET_GPR
;
968 Reg dest
= ra_dest(as
, ir
, allow
);
969 Reg tab
= ra_alloc1(as
, ir
->op1
, rset_clear(allow
, dest
));
970 Reg key
= RID_NONE
, tmp
= RID_NONE
;
971 IRIns
*irkey
= IR(ir
->op2
);
972 int isk
= irref_isk(ir
->op2
);
973 IRType1 kt
= irkey
->t
;
975 MCLabel l_end
, l_loop
, l_next
;
978 rset_clear(allow
, tab
);
979 key
= ra_alloc1(as
, ir
->op2
, irt_isnum(kt
) ? RSET_FPR
: allow
);
981 tmp
= ra_scratch(as
, rset_exclude(allow
, key
));
984 /* Key not found in chain: jump to exit (if merged with NE) or load niltv. */
985 l_end
= emit_label(as
);
986 if (nilexit
&& ir
[1].o
== IR_NE
) {
987 emit_jcc(as
, CC_E
, nilexit
); /* XI_JMP is not found by lj_asm_patchexit. */
990 emit_loada(as
, dest
, niltvg(J2G(as
->J
)));
993 /* Follow hash chain until the end. */
994 l_loop
= emit_sjcc_label(as
, CC_NZ
);
995 emit_rr(as
, XO_TEST
, dest
, dest
);
996 emit_rmro(as
, XO_MOV
, dest
, dest
, offsetof(Node
, next
));
997 l_next
= emit_label(as
);
999 /* Type and value comparison. */
1001 emit_jcc(as
, CC_E
, nilexit
);
1003 emit_sjcc(as
, CC_E
, l_end
);
1004 if (irt_isnum(kt
)) {
1006 /* Assumes -0.0 is already canonicalized to +0.0. */
1007 emit_gmroi(as
, XG_ARITHi(XOg_CMP
), dest
, offsetof(Node
, key
.u32
.lo
),
1008 (int32_t)ir_knum(irkey
)->u32
.lo
);
1009 emit_sjcc(as
, CC_NE
, l_next
);
1010 emit_gmroi(as
, XG_ARITHi(XOg_CMP
), dest
, offsetof(Node
, key
.u32
.hi
),
1011 (int32_t)ir_knum(irkey
)->u32
.hi
);
1013 emit_sjcc(as
, CC_P
, l_next
);
1014 emit_rmro(as
, XO_UCOMISD
, key
, dest
, offsetof(Node
, key
.n
));
1015 emit_sjcc(as
, CC_AE
, l_next
);
1016 /* The type check avoids NaN penalties and complaints from Valgrind. */
1018 emit_u32(as
, LJ_TISNUM
);
1019 emit_rmro(as
, XO_ARITHi
, XOg_CMP
, dest
, offsetof(Node
, key
.it
));
1021 emit_i8(as
, LJ_TISNUM
);
1022 emit_rmro(as
, XO_ARITHi8
, XOg_CMP
, dest
, offsetof(Node
, key
.it
));
1026 } else if (irt_islightud(kt
)) {
1027 emit_rmro(as
, XO_CMP
, key
|REX_64
, dest
, offsetof(Node
, key
.u64
));
1030 if (!irt_ispri(kt
)) {
1031 lua_assert(irt_isaddr(kt
));
1033 emit_gmroi(as
, XG_ARITHi(XOg_CMP
), dest
, offsetof(Node
, key
.gcr
),
1034 ptr2addr(ir_kgc(irkey
)));
1036 emit_rmro(as
, XO_CMP
, key
, dest
, offsetof(Node
, key
.gcr
));
1037 emit_sjcc(as
, CC_NE
, l_next
);
1039 lua_assert(!irt_isnil(kt
));
1040 emit_i8(as
, irt_toitype(kt
));
1041 emit_rmro(as
, XO_ARITHi8
, XOg_CMP
, dest
, offsetof(Node
, key
.it
));
1043 emit_sfixup(as
, l_loop
);
1046 /* Load main position relative to tab->node into dest. */
1047 khash
= isk
? ir_khash(irkey
) : 1;
1049 emit_rmro(as
, XO_MOV
, dest
, tab
, offsetof(GCtab
, node
));
1051 emit_rmro(as
, XO_ARITH(XOg_ADD
), dest
, tab
, offsetof(GCtab
, node
));
1052 if ((as
->flags
& JIT_F_PREFER_IMUL
)) {
1053 emit_i8(as
, sizeof(Node
));
1054 emit_rr(as
, XO_IMULi8
, dest
, dest
);
1056 emit_shifti(as
, XOg_SHL
, dest
, 3);
1057 emit_rmrxo(as
, XO_LEA
, dest
, dest
, dest
, XM_SCALE2
, 0);
1060 emit_gri(as
, XG_ARITHi(XOg_AND
), dest
, (int32_t)khash
);
1061 emit_rmro(as
, XO_MOV
, dest
, tab
, offsetof(GCtab
, hmask
));
1062 } else if (irt_isstr(kt
)) {
1063 emit_rmro(as
, XO_ARITH(XOg_AND
), dest
, key
, offsetof(GCstr
, hash
));
1064 emit_rmro(as
, XO_MOV
, dest
, tab
, offsetof(GCtab
, hmask
));
1065 } else { /* Must match with hashrot() in lj_tab.c. */
1066 emit_rmro(as
, XO_ARITH(XOg_AND
), dest
, tab
, offsetof(GCtab
, hmask
));
1067 emit_rr(as
, XO_ARITH(XOg_SUB
), dest
, tmp
);
1068 emit_shifti(as
, XOg_ROL
, tmp
, HASH_ROT3
);
1069 emit_rr(as
, XO_ARITH(XOg_XOR
), dest
, tmp
);
1070 emit_shifti(as
, XOg_ROL
, dest
, HASH_ROT2
);
1071 emit_rr(as
, XO_ARITH(XOg_SUB
), tmp
, dest
);
1072 emit_shifti(as
, XOg_ROL
, dest
, HASH_ROT1
);
1073 emit_rr(as
, XO_ARITH(XOg_XOR
), tmp
, dest
);
1074 if (irt_isnum(kt
)) {
1075 emit_rr(as
, XO_ARITH(XOg_ADD
), dest
, dest
);
1077 emit_shifti(as
, XOg_SHR
|REX_64
, dest
, 32);
1078 emit_rr(as
, XO_MOV
, tmp
, dest
);
1079 emit_rr(as
, XO_MOVDto
, key
|REX_64
, dest
);
1081 emit_rmro(as
, XO_MOV
, dest
, RID_ESP
, ra_spill(as
, irkey
)+4);
1082 emit_rr(as
, XO_MOVDto
, key
, tmp
);
1085 emit_rr(as
, XO_MOV
, tmp
, key
);
1086 emit_rmro(as
, XO_LEA
, dest
, key
, HASH_BIAS
);
1092 static void asm_hrefk(ASMState
*as
, IRIns
*ir
)
1094 IRIns
*kslot
= IR(ir
->op2
);
1095 IRIns
*irkey
= IR(kslot
->op1
);
1096 int32_t ofs
= (int32_t)(kslot
->op2
* sizeof(Node
));
1097 Reg dest
= ra_used(ir
) ? ra_dest(as
, ir
, RSET_GPR
) : RID_NONE
;
1098 Reg node
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1102 lua_assert(ofs
% sizeof(Node
) == 0);
1103 if (ra_hasreg(dest
)) {
1105 if (dest
== node
&& !(as
->flags
& JIT_F_LEA_AGU
))
1106 emit_gri(as
, XG_ARITHi(XOg_ADD
), dest
, ofs
);
1108 emit_rmro(as
, XO_LEA
, dest
, node
, ofs
);
1109 } else if (dest
!= node
) {
1110 emit_rr(as
, XO_MOV
, dest
, node
);
1113 asm_guardcc(as
, CC_NE
);
1115 if (!irt_ispri(irkey
->t
)) {
1116 Reg key
= ra_scratch(as
, rset_exclude(RSET_GPR
, node
));
1117 emit_rmro(as
, XO_CMP
, key
|REX_64
, node
,
1118 ofs
+ (int32_t)offsetof(Node
, key
.u64
));
1119 lua_assert(irt_isnum(irkey
->t
) || irt_isgcv(irkey
->t
));
1120 /* Assumes -0.0 is already canonicalized to +0.0. */
1121 emit_loadu64(as
, key
, irt_isnum(irkey
->t
) ? ir_knum(irkey
)->u64
:
1122 ((uint64_t)irt_toitype(irkey
->t
) << 32) |
1123 (uint64_t)(uint32_t)ptr2addr(ir_kgc(irkey
)));
1125 lua_assert(!irt_isnil(irkey
->t
));
1126 emit_i8(as
, irt_toitype(irkey
->t
));
1127 emit_rmro(as
, XO_ARITHi8
, XOg_CMP
, node
,
1128 ofs
+ (int32_t)offsetof(Node
, key
.it
));
1131 l_exit
= emit_label(as
);
1132 if (irt_isnum(irkey
->t
)) {
1133 /* Assumes -0.0 is already canonicalized to +0.0. */
1134 emit_gmroi(as
, XG_ARITHi(XOg_CMP
), node
,
1135 ofs
+ (int32_t)offsetof(Node
, key
.u32
.lo
),
1136 (int32_t)ir_knum(irkey
)->u32
.lo
);
1137 emit_sjcc(as
, CC_NE
, l_exit
);
1138 emit_gmroi(as
, XG_ARITHi(XOg_CMP
), node
,
1139 ofs
+ (int32_t)offsetof(Node
, key
.u32
.hi
),
1140 (int32_t)ir_knum(irkey
)->u32
.hi
);
1142 if (!irt_ispri(irkey
->t
)) {
1143 lua_assert(irt_isgcv(irkey
->t
));
1144 emit_gmroi(as
, XG_ARITHi(XOg_CMP
), node
,
1145 ofs
+ (int32_t)offsetof(Node
, key
.gcr
),
1146 ptr2addr(ir_kgc(irkey
)));
1147 emit_sjcc(as
, CC_NE
, l_exit
);
1149 lua_assert(!irt_isnil(irkey
->t
));
1150 emit_i8(as
, irt_toitype(irkey
->t
));
1151 emit_rmro(as
, XO_ARITHi8
, XOg_CMP
, node
,
1152 ofs
+ (int32_t)offsetof(Node
, key
.it
));
1157 static void asm_newref(ASMState
*as
, IRIns
*ir
)
1159 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_tab_newkey
];
1163 if (ir
->r
== RID_SINK
)
1165 args
[0] = ASMREF_L
; /* lua_State *L */
1166 args
[1] = ir
->op1
; /* GCtab *t */
1167 args
[2] = ASMREF_TMP1
; /* cTValue *key */
1168 asm_setupresult(as
, ir
, ci
); /* TValue * */
1169 asm_gencall(as
, ci
, args
);
1170 tmp
= ra_releasetmp(as
, ASMREF_TMP1
);
1171 irkey
= IR(ir
->op2
);
1172 if (irt_isnum(irkey
->t
)) {
1173 /* For numbers use the constant itself or a spill slot as a TValue. */
1174 if (irref_isk(ir
->op2
))
1175 emit_loada(as
, tmp
, ir_knum(irkey
));
1177 emit_rmro(as
, XO_LEA
, tmp
|REX_64
, RID_ESP
, ra_spill(as
, irkey
));
1179 /* Otherwise use g->tmptv to hold the TValue. */
1180 if (!irref_isk(ir
->op2
)) {
1181 Reg src
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, tmp
));
1182 emit_movtomro(as
, REX_64IR(irkey
, src
), tmp
, 0);
1183 } else if (!irt_ispri(irkey
->t
)) {
1184 emit_movmroi(as
, tmp
, 0, irkey
->i
);
1186 if (!(LJ_64
&& irt_islightud(irkey
->t
)))
1187 emit_movmroi(as
, tmp
, 4, irt_toitype(irkey
->t
));
1188 emit_loada(as
, tmp
, &J2G(as
->J
)->tmptv
);
1192 static void asm_uref(ASMState
*as
, IRIns
*ir
)
1194 /* NYI: Check that UREFO is still open and not aliasing a slot. */
1195 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1196 if (irref_isk(ir
->op1
)) {
1197 GCfunc
*fn
= ir_kfunc(IR(ir
->op1
));
1198 MRef
*v
= &gcref(fn
->l
.uvptr
[(ir
->op2
>> 8)])->uv
.v
;
1199 emit_rma(as
, XO_MOV
, dest
, v
);
1201 Reg uv
= ra_scratch(as
, RSET_GPR
);
1202 Reg func
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1203 if (ir
->o
== IR_UREFC
) {
1204 emit_rmro(as
, XO_LEA
, dest
, uv
, offsetof(GCupval
, tv
));
1205 asm_guardcc(as
, CC_NE
);
1207 emit_rmro(as
, XO_ARITHib
, XOg_CMP
, uv
, offsetof(GCupval
, closed
));
1209 emit_rmro(as
, XO_MOV
, dest
, uv
, offsetof(GCupval
, v
));
1211 emit_rmro(as
, XO_MOV
, uv
, func
,
1212 (int32_t)offsetof(GCfuncL
, uvptr
) + 4*(int32_t)(ir
->op2
>> 8));
1216 static void asm_fref(ASMState
*as
, IRIns
*ir
)
1218 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1219 asm_fusefref(as
, ir
, RSET_GPR
);
1220 emit_mrm(as
, XO_LEA
, dest
, RID_MRM
);
1223 static void asm_strref(ASMState
*as
, IRIns
*ir
)
1225 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1226 asm_fusestrref(as
, ir
, RSET_GPR
);
1227 if (as
->mrm
.base
== RID_NONE
)
1228 emit_loadi(as
, dest
, as
->mrm
.ofs
);
1229 else if (as
->mrm
.base
== dest
&& as
->mrm
.idx
== RID_NONE
)
1230 emit_gri(as
, XG_ARITHi(XOg_ADD
), dest
, as
->mrm
.ofs
);
1232 emit_mrm(as
, XO_LEA
, dest
, RID_MRM
);
1235 /* -- Loads and stores ---------------------------------------------------- */
1237 static void asm_fxload(ASMState
*as
, IRIns
*ir
)
1239 Reg dest
= ra_dest(as
, ir
, irt_isfp(ir
->t
) ? RSET_FPR
: RSET_GPR
);
1241 if (ir
->o
== IR_FLOAD
)
1242 asm_fusefref(as
, ir
, RSET_GPR
);
1244 asm_fusexref(as
, ir
->op1
, RSET_GPR
);
1245 /* ir->op2 is ignored -- unaligned loads are ok on x86. */
1246 switch (irt_type(ir
->t
)) {
1247 case IRT_I8
: xo
= XO_MOVSXb
; break;
1248 case IRT_U8
: xo
= XO_MOVZXb
; break;
1249 case IRT_I16
: xo
= XO_MOVSXw
; break;
1250 case IRT_U16
: xo
= XO_MOVZXw
; break;
1251 case IRT_NUM
: xo
= XMM_MOVRM(as
); break;
1252 case IRT_FLOAT
: xo
= XO_MOVSS
; break;
1254 if (LJ_64
&& irt_is64(ir
->t
))
1257 lua_assert(irt_isint(ir
->t
) || irt_isu32(ir
->t
) || irt_isaddr(ir
->t
));
1261 emit_mrm(as
, xo
, dest
, RID_MRM
);
1264 static void asm_fxstore(ASMState
*as
, IRIns
*ir
)
1266 RegSet allow
= RSET_GPR
;
1267 Reg src
= RID_NONE
, osrc
= RID_NONE
;
1269 if (ir
->r
== RID_SINK
)
1271 /* The IRT_I16/IRT_U16 stores should never be simplified for constant
1272 ** values since mov word [mem], imm16 has a length-changing prefix.
1274 if (irt_isi16(ir
->t
) || irt_isu16(ir
->t
) || irt_isfp(ir
->t
) ||
1275 !asm_isk32(as
, ir
->op2
, &k
)) {
1276 RegSet allow8
= irt_isfp(ir
->t
) ? RSET_FPR
:
1277 (irt_isi8(ir
->t
) || irt_isu8(ir
->t
)) ? RSET_GPR8
: RSET_GPR
;
1278 src
= osrc
= ra_alloc1(as
, ir
->op2
, allow8
);
1279 if (!LJ_64
&& !rset_test(allow8
, src
)) { /* Already in wrong register. */
1280 rset_clear(allow
, osrc
);
1281 src
= ra_scratch(as
, allow8
);
1283 rset_clear(allow
, src
);
1285 if (ir
->o
== IR_FSTORE
) {
1286 asm_fusefref(as
, IR(ir
->op1
), allow
);
1288 asm_fusexref(as
, ir
->op1
, allow
);
1289 if (LJ_32
&& ir
->o
== IR_HIOP
) as
->mrm
.ofs
+= 4;
1291 if (ra_hasreg(src
)) {
1293 switch (irt_type(ir
->t
)) {
1294 case IRT_I8
: case IRT_U8
: xo
= XO_MOVtob
; src
|= FORCE_REX
; break;
1295 case IRT_I16
: case IRT_U16
: xo
= XO_MOVtow
; break;
1296 case IRT_NUM
: xo
= XO_MOVSDto
; break;
1297 case IRT_FLOAT
: xo
= XO_MOVSSto
; break;
1299 case IRT_LIGHTUD
: lua_assert(0); /* NYI: mask 64 bit lightuserdata. */
1302 if (LJ_64
&& irt_is64(ir
->t
))
1305 lua_assert(irt_isint(ir
->t
) || irt_isu32(ir
->t
) || irt_isaddr(ir
->t
));
1309 emit_mrm(as
, xo
, src
, RID_MRM
);
1310 if (!LJ_64
&& src
!= osrc
) {
1311 ra_noweak(as
, osrc
);
1312 emit_rr(as
, XO_MOV
, src
, osrc
);
1315 if (irt_isi8(ir
->t
) || irt_isu8(ir
->t
)) {
1317 emit_mrm(as
, XO_MOVmib
, 0, RID_MRM
);
1319 lua_assert(irt_is64(ir
->t
) || irt_isint(ir
->t
) || irt_isu32(ir
->t
) ||
1322 emit_mrm(as
, XO_MOVmi
, REX_64IR(ir
, 0), RID_MRM
);
1328 static Reg
asm_load_lightud64(ASMState
*as
, IRIns
*ir
, int typecheck
)
1330 if (ra_used(ir
) || typecheck
) {
1331 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1333 Reg tmp
= ra_scratch(as
, rset_exclude(RSET_GPR
, dest
));
1334 asm_guardcc(as
, CC_NE
);
1336 emit_rr(as
, XO_ARITHi8
, XOg_CMP
, tmp
);
1337 emit_shifti(as
, XOg_SAR
|REX_64
, tmp
, 47);
1338 emit_rr(as
, XO_MOV
, tmp
|REX_64
, dest
);
1347 static void asm_ahuvload(ASMState
*as
, IRIns
*ir
)
1349 lua_assert(irt_isnum(ir
->t
) || irt_ispri(ir
->t
) || irt_isaddr(ir
->t
) ||
1350 (LJ_DUALNUM
&& irt_isint(ir
->t
)));
1352 if (irt_islightud(ir
->t
)) {
1353 Reg dest
= asm_load_lightud64(as
, ir
, 1);
1354 if (ra_hasreg(dest
)) {
1355 asm_fuseahuref(as
, ir
->op1
, RSET_GPR
);
1356 emit_mrm(as
, XO_MOV
, dest
|REX_64
, RID_MRM
);
1362 RegSet allow
= irt_isnum(ir
->t
) ? RSET_FPR
: RSET_GPR
;
1363 Reg dest
= ra_dest(as
, ir
, allow
);
1364 asm_fuseahuref(as
, ir
->op1
, RSET_GPR
);
1365 emit_mrm(as
, dest
< RID_MAX_GPR
? XO_MOV
: XMM_MOVRM(as
), dest
, RID_MRM
);
1367 asm_fuseahuref(as
, ir
->op1
, RSET_GPR
);
1369 /* Always do the type check, even if the load result is unused. */
1371 asm_guardcc(as
, irt_isnum(ir
->t
) ? CC_AE
: CC_NE
);
1372 if (LJ_64
&& irt_type(ir
->t
) >= IRT_NUM
) {
1373 lua_assert(irt_isinteger(ir
->t
) || irt_isnum(ir
->t
));
1374 emit_u32(as
, LJ_TISNUM
);
1375 emit_mrm(as
, XO_ARITHi
, XOg_CMP
, RID_MRM
);
1377 emit_i8(as
, irt_toitype(ir
->t
));
1378 emit_mrm(as
, XO_ARITHi8
, XOg_CMP
, RID_MRM
);
1382 static void asm_ahustore(ASMState
*as
, IRIns
*ir
)
1384 if (ir
->r
== RID_SINK
)
1386 if (irt_isnum(ir
->t
)) {
1387 Reg src
= ra_alloc1(as
, ir
->op2
, RSET_FPR
);
1388 asm_fuseahuref(as
, ir
->op1
, RSET_GPR
);
1389 emit_mrm(as
, XO_MOVSDto
, src
, RID_MRM
);
1391 } else if (irt_islightud(ir
->t
)) {
1392 Reg src
= ra_alloc1(as
, ir
->op2
, RSET_GPR
);
1393 asm_fuseahuref(as
, ir
->op1
, rset_exclude(RSET_GPR
, src
));
1394 emit_mrm(as
, XO_MOVto
, src
|REX_64
, RID_MRM
);
1397 IRIns
*irr
= IR(ir
->op2
);
1398 RegSet allow
= RSET_GPR
;
1400 if (!irref_isk(ir
->op2
)) {
1401 src
= ra_alloc1(as
, ir
->op2
, allow
);
1402 rset_clear(allow
, src
);
1404 asm_fuseahuref(as
, ir
->op1
, allow
);
1405 if (ra_hasreg(src
)) {
1406 emit_mrm(as
, XO_MOVto
, src
, RID_MRM
);
1407 } else if (!irt_ispri(irr
->t
)) {
1408 lua_assert(irt_isaddr(ir
->t
) || (LJ_DUALNUM
&& irt_isinteger(ir
->t
)));
1409 emit_i32(as
, irr
->i
);
1410 emit_mrm(as
, XO_MOVmi
, 0, RID_MRM
);
1413 emit_i32(as
, (int32_t)irt_toitype(ir
->t
));
1414 emit_mrm(as
, XO_MOVmi
, 0, RID_MRM
);
1418 static void asm_sload(ASMState
*as
, IRIns
*ir
)
1420 int32_t ofs
= 8*((int32_t)ir
->op1
-1) + ((ir
->op2
& IRSLOAD_FRAME
) ? 4 : 0);
1423 lua_assert(!(ir
->op2
& IRSLOAD_PARENT
)); /* Handled by asm_head_side(). */
1424 lua_assert(irt_isguard(t
) || !(ir
->op2
& IRSLOAD_TYPECHECK
));
1425 lua_assert(LJ_DUALNUM
||
1426 !irt_isint(t
) || (ir
->op2
& (IRSLOAD_CONVERT
|IRSLOAD_FRAME
)));
1427 if ((ir
->op2
& IRSLOAD_CONVERT
) && irt_isguard(t
) && irt_isint(t
)) {
1428 Reg left
= ra_scratch(as
, RSET_FPR
);
1429 asm_tointg(as
, ir
, left
); /* Frees dest reg. Do this before base alloc. */
1430 base
= ra_alloc1(as
, REF_BASE
, RSET_GPR
);
1431 emit_rmro(as
, XMM_MOVRM(as
), left
, base
, ofs
);
1432 t
.irt
= IRT_NUM
; /* Continue with a regular number type check. */
1434 } else if (irt_islightud(t
)) {
1435 Reg dest
= asm_load_lightud64(as
, ir
, (ir
->op2
& IRSLOAD_TYPECHECK
));
1436 if (ra_hasreg(dest
)) {
1437 base
= ra_alloc1(as
, REF_BASE
, RSET_GPR
);
1438 emit_rmro(as
, XO_MOV
, dest
|REX_64
, base
, ofs
);
1442 } else if (ra_used(ir
)) {
1443 RegSet allow
= irt_isnum(t
) ? RSET_FPR
: RSET_GPR
;
1444 Reg dest
= ra_dest(as
, ir
, allow
);
1445 base
= ra_alloc1(as
, REF_BASE
, RSET_GPR
);
1446 lua_assert(irt_isnum(t
) || irt_isint(t
) || irt_isaddr(t
));
1447 if ((ir
->op2
& IRSLOAD_CONVERT
)) {
1448 t
.irt
= irt_isint(t
) ? IRT_NUM
: IRT_INT
; /* Check for original type. */
1449 emit_rmro(as
, irt_isint(t
) ? XO_CVTSI2SD
: XO_CVTSD2SI
, dest
, base
, ofs
);
1450 } else if (irt_isnum(t
)) {
1451 emit_rmro(as
, XMM_MOVRM(as
), dest
, base
, ofs
);
1453 emit_rmro(as
, XO_MOV
, dest
, base
, ofs
);
1456 if (!(ir
->op2
& IRSLOAD_TYPECHECK
))
1457 return; /* No type check: avoid base alloc. */
1458 base
= ra_alloc1(as
, REF_BASE
, RSET_GPR
);
1460 if ((ir
->op2
& IRSLOAD_TYPECHECK
)) {
1461 /* Need type check, even if the load result is unused. */
1462 asm_guardcc(as
, irt_isnum(t
) ? CC_AE
: CC_NE
);
1463 if (LJ_64
&& irt_type(t
) >= IRT_NUM
) {
1464 lua_assert(irt_isinteger(t
) || irt_isnum(t
));
1465 emit_u32(as
, LJ_TISNUM
);
1466 emit_rmro(as
, XO_ARITHi
, XOg_CMP
, base
, ofs
+4);
1468 emit_i8(as
, irt_toitype(t
));
1469 emit_rmro(as
, XO_ARITHi8
, XOg_CMP
, base
, ofs
+4);
1474 /* -- Allocations --------------------------------------------------------- */
1477 static void asm_cnew(ASMState
*as
, IRIns
*ir
)
1479 CTState
*cts
= ctype_ctsG(J2G(as
->J
));
1480 CTypeID ctypeid
= (CTypeID
)IR(ir
->op1
)->i
;
1481 CTSize sz
= (ir
->o
== IR_CNEWI
|| ir
->op2
== REF_NIL
) ?
1482 lj_ctype_size(cts
, ctypeid
) : (CTSize
)IR(ir
->op2
)->i
;
1483 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_mem_newgco
];
1485 lua_assert(sz
!= CTSIZE_INVALID
);
1487 args
[0] = ASMREF_L
; /* lua_State *L */
1488 args
[1] = ASMREF_TMP1
; /* MSize size */
1490 asm_setupresult(as
, ir
, ci
); /* GCcdata * */
1492 /* Initialize immutable cdata object. */
1493 if (ir
->o
== IR_CNEWI
) {
1494 RegSet allow
= (RSET_GPR
& ~RSET_SCRATCH
);
1496 Reg r64
= sz
== 8 ? REX_64
: 0;
1497 if (irref_isk(ir
->op2
)) {
1498 IRIns
*irk
= IR(ir
->op2
);
1499 uint64_t k
= irk
->o
== IR_KINT64
? ir_k64(irk
)->u64
:
1500 (uint64_t)(uint32_t)irk
->i
;
1501 if (sz
== 4 || checki32((int64_t)k
)) {
1502 emit_i32(as
, (int32_t)k
);
1503 emit_rmro(as
, XO_MOVmi
, r64
, RID_RET
, sizeof(GCcdata
));
1505 emit_movtomro(as
, RID_ECX
+ r64
, RID_RET
, sizeof(GCcdata
));
1506 emit_loadu64(as
, RID_ECX
, k
);
1509 Reg r
= ra_alloc1(as
, ir
->op2
, allow
);
1510 emit_movtomro(as
, r
+ r64
, RID_RET
, sizeof(GCcdata
));
1513 int32_t ofs
= sizeof(GCcdata
);
1516 lua_assert(ir
->o
== IR_HIOP
);
1519 if (irref_isk(ir
->op2
)) {
1520 emit_movmroi(as
, RID_RET
, ofs
, IR(ir
->op2
)->i
);
1522 Reg r
= ra_alloc1(as
, ir
->op2
, allow
);
1523 emit_movtomro(as
, r
, RID_RET
, ofs
);
1524 rset_clear(allow
, r
);
1526 if (ofs
== sizeof(GCcdata
)) break;
1530 lua_assert(sz
== 4 || sz
== 8);
1533 /* Combine initialization of marked, gct and ctypeid. */
1534 emit_movtomro(as
, RID_ECX
, RID_RET
, offsetof(GCcdata
, marked
));
1535 emit_gri(as
, XG_ARITHi(XOg_OR
), RID_ECX
,
1536 (int32_t)((~LJ_TCDATA
<<8)+(ctypeid
<<16)));
1537 emit_gri(as
, XG_ARITHi(XOg_AND
), RID_ECX
, LJ_GC_WHITES
);
1538 emit_opgl(as
, XO_MOVZXb
, RID_ECX
, gc
.currentwhite
);
1540 asm_gencall(as
, ci
, args
);
1541 emit_loadi(as
, ra_releasetmp(as
, ASMREF_TMP1
), (int32_t)(sz
+sizeof(GCcdata
)));
1544 #define asm_cnew(as, ir) ((void)0)
1547 /* -- Write barriers ------------------------------------------------------ */
1549 static void asm_tbar(ASMState
*as
, IRIns
*ir
)
1551 Reg tab
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1552 Reg tmp
= ra_scratch(as
, rset_exclude(RSET_GPR
, tab
));
1553 MCLabel l_end
= emit_label(as
);
1554 emit_movtomro(as
, tmp
, tab
, offsetof(GCtab
, gclist
));
1555 emit_setgl(as
, tab
, gc
.grayagain
);
1556 emit_getgl(as
, tmp
, gc
.grayagain
);
1557 emit_i8(as
, ~LJ_GC_BLACK
);
1558 emit_rmro(as
, XO_ARITHib
, XOg_AND
, tab
, offsetof(GCtab
, marked
));
1559 emit_sjcc(as
, CC_Z
, l_end
);
1560 emit_i8(as
, LJ_GC_BLACK
);
1561 emit_rmro(as
, XO_GROUP3b
, XOg_TEST
, tab
, offsetof(GCtab
, marked
));
1564 static void asm_obar(ASMState
*as
, IRIns
*ir
)
1566 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_gc_barrieruv
];
1570 /* No need for other object barriers (yet). */
1571 lua_assert(IR(ir
->op1
)->o
== IR_UREFC
);
1572 ra_evictset(as
, RSET_SCRATCH
);
1573 l_end
= emit_label(as
);
1574 args
[0] = ASMREF_TMP1
; /* global_State *g */
1575 args
[1] = ir
->op1
; /* TValue *tv */
1576 asm_gencall(as
, ci
, args
);
1577 emit_loada(as
, ra_releasetmp(as
, ASMREF_TMP1
), J2G(as
->J
));
1578 obj
= IR(ir
->op1
)->r
;
1579 emit_sjcc(as
, CC_Z
, l_end
);
1580 emit_i8(as
, LJ_GC_WHITES
);
1581 if (irref_isk(ir
->op2
)) {
1582 GCobj
*vp
= ir_kgc(IR(ir
->op2
));
1583 emit_rma(as
, XO_GROUP3b
, XOg_TEST
, &vp
->gch
.marked
);
1585 Reg val
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_SCRATCH
&RSET_GPR
, obj
));
1586 emit_rmro(as
, XO_GROUP3b
, XOg_TEST
, val
, (int32_t)offsetof(GChead
, marked
));
1588 emit_sjcc(as
, CC_Z
, l_end
);
1589 emit_i8(as
, LJ_GC_BLACK
);
1590 emit_rmro(as
, XO_GROUP3b
, XOg_TEST
, obj
,
1591 (int32_t)offsetof(GCupval
, marked
)-(int32_t)offsetof(GCupval
, tv
));
1594 /* -- FP/int arithmetic and logic operations ------------------------------ */
1596 /* Load reference onto x87 stack. Force a spill to memory if needed. */
1597 static void asm_x87load(ASMState
*as
, IRRef ref
)
1599 IRIns
*ir
= IR(ref
);
1600 if (ir
->o
== IR_KNUM
) {
1601 cTValue
*tv
= ir_knum(ir
);
1602 if (tvispzero(tv
)) /* Use fldz only for +0. */
1603 emit_x87op(as
, XI_FLDZ
);
1604 else if (tvispone(tv
))
1605 emit_x87op(as
, XI_FLD1
);
1607 emit_rma(as
, XO_FLDq
, XOg_FLDq
, tv
);
1608 } else if (ir
->o
== IR_CONV
&& ir
->op2
== IRCONV_NUM_INT
&& !ra_used(ir
) &&
1609 !irref_isk(ir
->op1
) && mayfuse(as
, ir
->op1
)) {
1610 IRIns
*iri
= IR(ir
->op1
);
1611 emit_rmro(as
, XO_FILDd
, XOg_FILDd
, RID_ESP
, ra_spill(as
, iri
));
1613 emit_mrm(as
, XO_FLDq
, XOg_FLDq
, asm_fuseload(as
, ref
, RSET_EMPTY
));
1617 /* Try to rejoin pow from EXP2, MUL and LOG2 (if still unsplit). */
1618 static int fpmjoin_pow(ASMState
*as
, IRIns
*ir
)
1620 IRIns
*irp
= IR(ir
->op1
);
1621 if (irp
== ir
-1 && irp
->o
== IR_MUL
&& !ra_used(irp
)) {
1622 IRIns
*irpp
= IR(irp
->op1
);
1623 if (irpp
== ir
-2 && irpp
->o
== IR_FPMATH
&&
1624 irpp
->op2
== IRFPM_LOG2
&& !ra_used(irpp
)) {
1625 /* The modified regs must match with the *.dasc implementation. */
1626 RegSet drop
= RSET_RANGE(RID_XMM0
, RID_XMM2
+1)|RID2RSET(RID_EAX
);
1628 if (ra_hasreg(ir
->r
))
1629 rset_clear(drop
, ir
->r
); /* Dest reg handled below. */
1630 ra_evictset(as
, drop
);
1631 ra_destreg(as
, ir
, RID_XMM0
);
1632 emit_call(as
, lj_vm_pow_sse
);
1633 irx
= IR(irpp
->op1
);
1634 if (ra_noreg(irx
->r
) && ra_gethint(irx
->r
) == RID_XMM1
)
1635 irx
->r
= RID_INIT
; /* Avoid allocating xmm1 for x. */
1636 ra_left(as
, RID_XMM0
, irpp
->op1
);
1637 ra_left(as
, RID_XMM1
, irp
->op2
);
1644 static void asm_fpmath(ASMState
*as
, IRIns
*ir
)
1646 IRFPMathOp fpm
= ir
->o
== IR_FPMATH
? (IRFPMathOp
)ir
->op2
: IRFPM_OTHER
;
1647 if (fpm
== IRFPM_SQRT
) {
1648 Reg dest
= ra_dest(as
, ir
, RSET_FPR
);
1649 Reg left
= asm_fuseload(as
, ir
->op1
, RSET_FPR
);
1650 emit_mrm(as
, XO_SQRTSD
, dest
, left
);
1651 } else if (fpm
<= IRFPM_TRUNC
) {
1652 if (as
->flags
& JIT_F_SSE4_1
) { /* SSE4.1 has a rounding instruction. */
1653 Reg dest
= ra_dest(as
, ir
, RSET_FPR
);
1654 Reg left
= asm_fuseload(as
, ir
->op1
, RSET_FPR
);
1655 /* ROUNDSD has a 4-byte opcode which doesn't fit in x86Op.
1656 ** Let's pretend it's a 3-byte opcode, and compensate afterwards.
1657 ** This is atrocious, but the alternatives are much worse.
1659 /* Round down/up/trunc == 1001/1010/1011. */
1660 emit_i8(as
, 0x09 + fpm
);
1661 emit_mrm(as
, XO_ROUNDSD
, dest
, left
);
1662 if (LJ_64
&& as
->mcp
[1] != (MCode
)(XO_ROUNDSD
>> 16)) {
1663 as
->mcp
[0] = as
->mcp
[1]; as
->mcp
[1] = 0x0f; /* Swap 0F and REX. */
1665 *--as
->mcp
= 0x66; /* 1st byte of ROUNDSD opcode. */
1666 } else { /* Call helper functions for SSE2 variant. */
1667 /* The modified regs must match with the *.dasc implementation. */
1668 RegSet drop
= RSET_RANGE(RID_XMM0
, RID_XMM3
+1)|RID2RSET(RID_EAX
);
1669 if (ra_hasreg(ir
->r
))
1670 rset_clear(drop
, ir
->r
); /* Dest reg handled below. */
1671 ra_evictset(as
, drop
);
1672 ra_destreg(as
, ir
, RID_XMM0
);
1673 emit_call(as
, fpm
== IRFPM_FLOOR
? lj_vm_floor_sse
:
1674 fpm
== IRFPM_CEIL
? lj_vm_ceil_sse
: lj_vm_trunc_sse
);
1675 ra_left(as
, RID_XMM0
, ir
->op1
);
1677 } else if (fpm
== IRFPM_EXP2
&& fpmjoin_pow(as
, ir
)) {
1678 /* Rejoined to pow(). */
1679 } else { /* Handle x87 ops. */
1680 int32_t ofs
= sps_scale(ir
->s
); /* Use spill slot or temp slots. */
1682 if (ra_hasreg(dest
)) {
1684 ra_modified(as
, dest
);
1685 emit_rmro(as
, XMM_MOVRM(as
), dest
, RID_ESP
, ofs
);
1687 emit_rmro(as
, XO_FSTPq
, XOg_FSTPq
, RID_ESP
, ofs
);
1688 switch (fpm
) { /* st0 = lj_vm_*(st0) */
1689 case IRFPM_EXP
: emit_call(as
, lj_vm_exp_x87
); break;
1690 case IRFPM_EXP2
: emit_call(as
, lj_vm_exp2_x87
); break;
1691 case IRFPM_SIN
: emit_x87op(as
, XI_FSIN
); break;
1692 case IRFPM_COS
: emit_x87op(as
, XI_FCOS
); break;
1693 case IRFPM_TAN
: emit_x87op(as
, XI_FPOP
); emit_x87op(as
, XI_FPTAN
); break;
1694 case IRFPM_LOG
: case IRFPM_LOG2
: case IRFPM_LOG10
:
1695 /* Note: the use of fyl2xp1 would be pointless here. When computing
1696 ** log(1.0+eps) the precision is already lost after 1.0 is added.
1697 ** Subtracting 1.0 won't recover it. OTOH math.log1p would make sense.
1699 emit_x87op(as
, XI_FYL2X
); break;
1703 emit_x87op(as
, XI_FPATAN
); asm_x87load(as
, ir
->op2
); break;
1705 emit_x87op(as
, XI_FPOP1
); emit_x87op(as
, XI_FSCALE
); break;
1706 default: lua_assert(0); break;
1709 default: lua_assert(0); break;
1711 asm_x87load(as
, ir
->op1
);
1713 case IRFPM_LOG
: emit_x87op(as
, XI_FLDLN2
); break;
1714 case IRFPM_LOG2
: emit_x87op(as
, XI_FLD1
); break;
1715 case IRFPM_LOG10
: emit_x87op(as
, XI_FLDLG2
); break;
1717 if (ir
->o
== IR_LDEXP
) asm_x87load(as
, ir
->op2
);
1724 static void asm_fppowi(ASMState
*as
, IRIns
*ir
)
1726 /* The modified regs must match with the *.dasc implementation. */
1727 RegSet drop
= RSET_RANGE(RID_XMM0
, RID_XMM1
+1)|RID2RSET(RID_EAX
);
1728 if (ra_hasreg(ir
->r
))
1729 rset_clear(drop
, ir
->r
); /* Dest reg handled below. */
1730 ra_evictset(as
, drop
);
1731 ra_destreg(as
, ir
, RID_XMM0
);
1732 emit_call(as
, lj_vm_powi_sse
);
1733 ra_left(as
, RID_XMM0
, ir
->op1
);
1734 ra_left(as
, RID_EAX
, ir
->op2
);
1737 #if LJ_64 && LJ_HASFFI
1738 static void asm_arith64(ASMState
*as
, IRIns
*ir
, IRCallID id
)
1740 const CCallInfo
*ci
= &lj_ir_callinfo
[id
];
1744 asm_setupresult(as
, ir
, ci
);
1745 asm_gencall(as
, ci
, args
);
1749 static void asm_intmod(ASMState
*as
, IRIns
*ir
)
1751 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_vm_modi
];
1755 asm_setupresult(as
, ir
, ci
);
1756 asm_gencall(as
, ci
, args
);
1759 static int asm_swapops(ASMState
*as
, IRIns
*ir
)
1761 IRIns
*irl
= IR(ir
->op1
);
1762 IRIns
*irr
= IR(ir
->op2
);
1763 lua_assert(ra_noreg(irr
->r
));
1764 if (!irm_iscomm(lj_ir_mode
[ir
->o
]))
1765 return 0; /* Can't swap non-commutative operations. */
1766 if (irref_isk(ir
->op2
))
1767 return 0; /* Don't swap constants to the left. */
1768 if (ra_hasreg(irl
->r
))
1769 return 1; /* Swap if left already has a register. */
1770 if (ra_samehint(ir
->r
, irr
->r
))
1771 return 1; /* Swap if dest and right have matching hints. */
1772 if (as
->curins
> as
->loopref
) { /* In variant part? */
1773 if (ir
->op2
< as
->loopref
&& !irt_isphi(irr
->t
))
1774 return 0; /* Keep invariants on the right. */
1775 if (ir
->op1
< as
->loopref
&& !irt_isphi(irl
->t
))
1776 return 1; /* Swap invariants to the right. */
1778 if (opisfusableload(irl
->o
))
1779 return 1; /* Swap fusable loads to the right. */
1780 return 0; /* Otherwise don't swap. */
1783 static void asm_fparith(ASMState
*as
, IRIns
*ir
, x86Op xo
)
1785 IRRef lref
= ir
->op1
;
1786 IRRef rref
= ir
->op2
;
1787 RegSet allow
= RSET_FPR
;
1789 Reg right
= IR(rref
)->r
;
1790 if (ra_hasreg(right
)) {
1791 rset_clear(allow
, right
);
1792 ra_noweak(as
, right
);
1794 dest
= ra_dest(as
, ir
, allow
);
1797 } else if (ra_noreg(right
)) {
1798 if (asm_swapops(as
, ir
)) {
1799 IRRef tmp
= lref
; lref
= rref
; rref
= tmp
;
1801 right
= asm_fuseload(as
, rref
, rset_clear(allow
, dest
));
1803 emit_mrm(as
, xo
, dest
, right
);
1804 ra_left(as
, dest
, lref
);
1807 static void asm_intarith(ASMState
*as
, IRIns
*ir
, x86Arith xa
)
1809 IRRef lref
= ir
->op1
;
1810 IRRef rref
= ir
->op2
;
1811 RegSet allow
= RSET_GPR
;
1814 if (as
->flagmcp
== as
->mcp
) { /* Drop test r,r instruction. */
1816 as
->mcp
+= (LJ_64
&& *as
->mcp
!= XI_TEST
) ? 3 : 2;
1818 right
= IR(rref
)->r
;
1819 if (ra_hasreg(right
)) {
1820 rset_clear(allow
, right
);
1821 ra_noweak(as
, right
);
1823 dest
= ra_dest(as
, ir
, allow
);
1826 } else if (ra_noreg(right
) && !asm_isk32(as
, rref
, &k
)) {
1827 if (asm_swapops(as
, ir
)) {
1828 IRRef tmp
= lref
; lref
= rref
; rref
= tmp
;
1830 right
= asm_fuseload(as
, rref
, rset_clear(allow
, dest
));
1832 if (irt_isguard(ir
->t
)) /* For IR_ADDOV etc. */
1833 asm_guardcc(as
, CC_O
);
1834 if (xa
!= XOg_X_IMUL
) {
1835 if (ra_hasreg(right
))
1836 emit_mrm(as
, XO_ARITH(xa
), REX_64IR(ir
, dest
), right
);
1838 emit_gri(as
, XG_ARITHi(xa
), REX_64IR(ir
, dest
), k
);
1839 } else if (ra_hasreg(right
)) { /* IMUL r, mrm. */
1840 emit_mrm(as
, XO_IMUL
, REX_64IR(ir
, dest
), right
);
1841 } else { /* IMUL r, r, k. */
1842 /* NYI: use lea/shl/add/sub (FOLD only does 2^k) depending on CPU. */
1843 Reg left
= asm_fuseload(as
, lref
, RSET_GPR
);
1845 if (checki8(k
)) { emit_i8(as
, k
); xo
= XO_IMULi8
;
1846 } else { emit_i32(as
, k
); xo
= XO_IMULi
; }
1847 emit_mrm(as
, xo
, REX_64IR(ir
, dest
), left
);
1850 ra_left(as
, dest
, lref
);
1853 /* LEA is really a 4-operand ADD with an independent destination register,
1854 ** up to two source registers and an immediate. One register can be scaled
1855 ** by 1, 2, 4 or 8. This can be used to avoid moves or to fuse several
1858 ** Currently only a few common cases are supported:
1859 ** - 3-operand ADD: y = a+b; y = a+k with a and b already allocated
1860 ** - Left ADD fusion: y = (a+b)+k; y = (a+k)+b
1861 ** - Right ADD fusion: y = a+(b+k)
1862 ** The ommited variants have already been reduced by FOLD.
1864 ** There are more fusion opportunities, like gathering shifts or joining
1865 ** common references. But these are probably not worth the trouble, since
1866 ** array indexing is not decomposed and already makes use of all fields
1867 ** of the ModRM operand.
1869 static int asm_lea(ASMState
*as
, IRIns
*ir
)
1871 IRIns
*irl
= IR(ir
->op1
);
1872 IRIns
*irr
= IR(ir
->op2
);
1873 RegSet allow
= RSET_GPR
;
1875 as
->mrm
.base
= as
->mrm
.idx
= RID_NONE
;
1876 as
->mrm
.scale
= XM_SCALE1
;
1878 if (ra_hasreg(irl
->r
)) {
1879 rset_clear(allow
, irl
->r
);
1880 ra_noweak(as
, irl
->r
);
1881 as
->mrm
.base
= irl
->r
;
1882 if (irref_isk(ir
->op2
) || ra_hasreg(irr
->r
)) {
1883 /* The PHI renaming logic does a better job in some cases. */
1884 if (ra_hasreg(ir
->r
) &&
1885 ((irt_isphi(irl
->t
) && as
->phireg
[ir
->r
] == ir
->op1
) ||
1886 (irt_isphi(irr
->t
) && as
->phireg
[ir
->r
] == ir
->op2
)))
1888 if (irref_isk(ir
->op2
)) {
1889 as
->mrm
.ofs
= irr
->i
;
1891 rset_clear(allow
, irr
->r
);
1892 ra_noweak(as
, irr
->r
);
1893 as
->mrm
.idx
= irr
->r
;
1895 } else if (irr
->o
== IR_ADD
&& mayfuse(as
, ir
->op2
) &&
1896 irref_isk(irr
->op2
)) {
1897 Reg idx
= ra_alloc1(as
, irr
->op1
, allow
);
1898 rset_clear(allow
, idx
);
1899 as
->mrm
.idx
= (uint8_t)idx
;
1900 as
->mrm
.ofs
= IR(irr
->op2
)->i
;
1904 } else if (ir
->op1
!= ir
->op2
&& irl
->o
== IR_ADD
&& mayfuse(as
, ir
->op1
) &&
1905 (irref_isk(ir
->op2
) || irref_isk(irl
->op2
))) {
1906 Reg idx
, base
= ra_alloc1(as
, irl
->op1
, allow
);
1907 rset_clear(allow
, base
);
1908 as
->mrm
.base
= (uint8_t)base
;
1909 if (irref_isk(ir
->op2
)) {
1910 as
->mrm
.ofs
= irr
->i
;
1911 idx
= ra_alloc1(as
, irl
->op2
, allow
);
1913 as
->mrm
.ofs
= IR(irl
->op2
)->i
;
1914 idx
= ra_alloc1(as
, ir
->op2
, allow
);
1916 rset_clear(allow
, idx
);
1917 as
->mrm
.idx
= (uint8_t)idx
;
1921 dest
= ra_dest(as
, ir
, allow
);
1922 emit_mrm(as
, XO_LEA
, dest
, RID_MRM
);
1923 return 1; /* Success. */
1926 static void asm_add(ASMState
*as
, IRIns
*ir
)
1928 if (irt_isnum(ir
->t
))
1929 asm_fparith(as
, ir
, XO_ADDSD
);
1930 else if ((as
->flags
& JIT_F_LEA_AGU
) || as
->flagmcp
== as
->mcp
||
1931 irt_is64(ir
->t
) || !asm_lea(as
, ir
))
1932 asm_intarith(as
, ir
, XOg_ADD
);
1935 static void asm_neg_not(ASMState
*as
, IRIns
*ir
, x86Group3 xg
)
1937 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1938 emit_rr(as
, XO_GROUP3
, REX_64IR(ir
, xg
), dest
);
1939 ra_left(as
, dest
, ir
->op1
);
1942 static void asm_min_max(ASMState
*as
, IRIns
*ir
, int cc
)
1944 Reg right
, dest
= ra_dest(as
, ir
, RSET_GPR
);
1945 IRRef lref
= ir
->op1
, rref
= ir
->op2
;
1946 if (irref_isk(rref
)) { lref
= rref
; rref
= ir
->op1
; }
1947 right
= ra_alloc1(as
, rref
, rset_exclude(RSET_GPR
, dest
));
1948 emit_rr(as
, XO_CMOV
+ (cc
<<24), REX_64IR(ir
, dest
), right
);
1949 emit_rr(as
, XO_CMP
, REX_64IR(ir
, dest
), right
);
1950 ra_left(as
, dest
, lref
);
1953 static void asm_bitswap(ASMState
*as
, IRIns
*ir
)
1955 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1956 as
->mcp
= emit_op(XO_BSWAP
+ ((dest
&7) << 24),
1957 REX_64IR(ir
, 0), dest
, 0, as
->mcp
, 1);
1958 ra_left(as
, dest
, ir
->op1
);
1961 static void asm_bitshift(ASMState
*as
, IRIns
*ir
, x86Shift xs
)
1963 IRRef rref
= ir
->op2
;
1964 IRIns
*irr
= IR(rref
);
1966 if (irref_isk(rref
)) { /* Constant shifts. */
1968 dest
= ra_dest(as
, ir
, RSET_GPR
);
1969 shift
= irr
->i
& (irt_is64(ir
->t
) ? 63 : 31);
1972 case 1: emit_rr(as
, XO_SHIFT1
, REX_64IR(ir
, xs
), dest
); break;
1973 default: emit_shifti(as
, REX_64IR(ir
, xs
), dest
, shift
); break;
1975 } else { /* Variable shifts implicitly use register cl (i.e. ecx). */
1977 dest
= ra_dest(as
, ir
, rset_exclude(RSET_GPR
, RID_ECX
));
1978 if (dest
== RID_ECX
) {
1979 dest
= ra_scratch(as
, rset_exclude(RSET_GPR
, RID_ECX
));
1980 emit_rr(as
, XO_MOV
, RID_ECX
, dest
);
1983 if (ra_noreg(right
))
1984 right
= ra_allocref(as
, rref
, RID2RSET(RID_ECX
));
1985 else if (right
!= RID_ECX
)
1986 ra_scratch(as
, RID2RSET(RID_ECX
));
1987 emit_rr(as
, XO_SHIFTcl
, REX_64IR(ir
, xs
), dest
);
1988 if (right
!= RID_ECX
) {
1989 ra_noweak(as
, right
);
1990 emit_rr(as
, XO_MOV
, RID_ECX
, right
);
1993 ra_left(as
, dest
, ir
->op1
);
1995 ** Note: avoid using the flags resulting from a shift or rotate!
1996 ** All of them cause a partial flag stall, except for r,1 shifts
1997 ** (but not rotates). And a shift count of 0 leaves the flags unmodified.
2001 /* -- Comparisons --------------------------------------------------------- */
2003 /* Virtual flags for unordered FP comparisons. */
2004 #define VCC_U 0x1000 /* Unordered. */
2005 #define VCC_P 0x2000 /* Needs extra CC_P branch. */
2006 #define VCC_S 0x4000 /* Swap avoids CC_P branch. */
2007 #define VCC_PS (VCC_P|VCC_S)
2009 /* Map of comparisons to flags. ORDER IR. */
2010 #define COMPFLAGS(ci, cin, cu, cf) ((ci)+((cu)<<4)+((cin)<<8)+(cf))
2011 static const uint16_t asm_compmap
[IR_ABC
+1] = {
2012 /* signed non-eq unsigned flags */
2013 /* LT */ COMPFLAGS(CC_GE
, CC_G
, CC_AE
, VCC_PS
),
2014 /* GE */ COMPFLAGS(CC_L
, CC_L
, CC_B
, 0),
2015 /* LE */ COMPFLAGS(CC_G
, CC_G
, CC_A
, VCC_PS
),
2016 /* GT */ COMPFLAGS(CC_LE
, CC_L
, CC_BE
, 0),
2017 /* ULT */ COMPFLAGS(CC_AE
, CC_A
, CC_AE
, VCC_U
),
2018 /* UGE */ COMPFLAGS(CC_B
, CC_B
, CC_B
, VCC_U
|VCC_PS
),
2019 /* ULE */ COMPFLAGS(CC_A
, CC_A
, CC_A
, VCC_U
),
2020 /* UGT */ COMPFLAGS(CC_BE
, CC_B
, CC_BE
, VCC_U
|VCC_PS
),
2021 /* EQ */ COMPFLAGS(CC_NE
, CC_NE
, CC_NE
, VCC_P
),
2022 /* NE */ COMPFLAGS(CC_E
, CC_E
, CC_E
, VCC_U
|VCC_P
),
2023 /* ABC */ COMPFLAGS(CC_BE
, CC_B
, CC_BE
, VCC_U
|VCC_PS
) /* Same as UGT. */
2026 /* FP and integer comparisons. */
2027 static void asm_comp(ASMState
*as
, IRIns
*ir
, uint32_t cc
)
2029 if (irt_isnum(ir
->t
)) {
2030 IRRef lref
= ir
->op1
;
2031 IRRef rref
= ir
->op2
;
2035 ** An extra CC_P branch is required to preserve ordered/unordered
2036 ** semantics for FP comparisons. This can be avoided by swapping
2037 ** the operands and inverting the condition (except for EQ and UNE).
2038 ** So always try to swap if possible.
2040 ** Another option would be to swap operands to achieve better memory
2041 ** operand fusion. But it's unlikely that this outweighs the cost
2042 ** of the extra branches.
2044 if (cc
& VCC_S
) { /* Swap? */
2045 IRRef tmp
= lref
; lref
= rref
; rref
= tmp
;
2046 cc
^= (VCC_PS
|(5<<4)); /* A <-> B, AE <-> BE, PS <-> none */
2048 left
= ra_alloc1(as
, lref
, RSET_FPR
);
2049 right
= asm_fuseload(as
, rref
, rset_exclude(RSET_FPR
, left
));
2050 l_around
= emit_label(as
);
2051 asm_guardcc(as
, cc
>> 4);
2052 if (cc
& VCC_P
) { /* Extra CC_P branch required? */
2053 if (!(cc
& VCC_U
)) {
2054 asm_guardcc(as
, CC_P
); /* Branch to exit for ordered comparisons. */
2055 } else if (l_around
!= as
->invmcp
) {
2056 emit_sjcc(as
, CC_P
, l_around
); /* Branch around for unordered. */
2058 /* Patched to mcloop by asm_loop_fixup. */
2061 emit_sjcc(as
, CC_P
, as
->mcp
);
2063 emit_jcc(as
, CC_P
, as
->mcp
);
2066 emit_mrm(as
, XO_UCOMISD
, left
, right
);
2068 IRRef lref
= ir
->op1
, rref
= ir
->op2
;
2069 IROp leftop
= (IROp
)(IR(lref
)->o
);
2070 Reg r64
= REX_64IR(ir
, 0);
2072 lua_assert(irt_is64(ir
->t
) || irt_isint(ir
->t
) ||
2073 irt_isu32(ir
->t
) || irt_isaddr(ir
->t
));
2074 /* Swap constants (only for ABC) and fusable loads to the right. */
2075 if (irref_isk(lref
) || (!irref_isk(rref
) && opisfusableload(leftop
))) {
2076 if ((cc
& 0xc) == 0xc) cc
^= 0x53; /* L <-> G, LE <-> GE */
2077 else if ((cc
& 0xa) == 0x2) cc
^= 0x55; /* A <-> B, AE <-> BE */
2078 lref
= ir
->op2
; rref
= ir
->op1
;
2080 if (asm_isk32(as
, rref
, &imm
)) {
2081 IRIns
*irl
= IR(lref
);
2082 /* Check wether we can use test ins. Not for unsigned, since CF=0. */
2083 int usetest
= (imm
== 0 && (cc
& 0xa) != 0x2);
2084 if (usetest
&& irl
->o
== IR_BAND
&& irl
+1 == ir
&& !ra_used(irl
)) {
2085 /* Combine comp(BAND(ref, r/imm), 0) into test mrm, r/imm. */
2086 Reg right
, left
= RID_NONE
;
2087 RegSet allow
= RSET_GPR
;
2088 if (!asm_isk32(as
, irl
->op2
, &imm
)) {
2089 left
= ra_alloc1(as
, irl
->op2
, allow
);
2090 rset_clear(allow
, left
);
2091 } else { /* Try to Fuse IRT_I8/IRT_U8 loads, too. See below. */
2092 IRIns
*irll
= IR(irl
->op1
);
2093 if (opisfusableload((IROp
)irll
->o
) &&
2094 (irt_isi8(irll
->t
) || irt_isu8(irll
->t
))) {
2095 IRType1 origt
= irll
->t
; /* Temporarily flip types. */
2096 irll
->t
.irt
= (irll
->t
.irt
& ~IRT_TYPE
) | IRT_INT
;
2097 as
->curins
--; /* Skip to BAND to avoid failing in noconflict(). */
2098 right
= asm_fuseload(as
, irl
->op1
, RSET_GPR
);
2101 if (right
!= RID_MRM
) goto test_nofuse
;
2102 /* Fusion succeeded, emit test byte mrm, imm8. */
2103 asm_guardcc(as
, cc
);
2104 emit_i8(as
, (imm
& 0xff));
2105 emit_mrm(as
, XO_GROUP3b
, XOg_TEST
, RID_MRM
);
2109 as
->curins
--; /* Skip to BAND to avoid failing in noconflict(). */
2110 right
= asm_fuseload(as
, irl
->op1
, allow
);
2111 as
->curins
++; /* Undo the above. */
2113 asm_guardcc(as
, cc
);
2114 if (ra_noreg(left
)) {
2116 emit_mrm(as
, XO_GROUP3
, r64
+ XOg_TEST
, right
);
2118 emit_mrm(as
, XO_TEST
, r64
+ left
, right
);
2122 if (opisfusableload((IROp
)irl
->o
) &&
2123 ((irt_isu8(irl
->t
) && checku8(imm
)) ||
2124 ((irt_isi8(irl
->t
) || irt_isi16(irl
->t
)) && checki8(imm
)) ||
2125 (irt_isu16(irl
->t
) && checku16(imm
) && checki8((int16_t)imm
)))) {
2126 /* Only the IRT_INT case is fused by asm_fuseload.
2127 ** The IRT_I8/IRT_U8 loads and some IRT_I16/IRT_U16 loads
2128 ** are handled here.
2129 ** Note that cmp word [mem], imm16 should not be generated,
2130 ** since it has a length-changing prefix. Compares of a word
2131 ** against a sign-extended imm8 are ok, however.
2133 IRType1 origt
= irl
->t
; /* Temporarily flip types. */
2134 irl
->t
.irt
= (irl
->t
.irt
& ~IRT_TYPE
) | IRT_INT
;
2135 left
= asm_fuseload(as
, lref
, RSET_GPR
);
2137 if (left
== RID_MRM
) { /* Fusion succeeded? */
2138 if (irt_isu8(irl
->t
) || irt_isu16(irl
->t
))
2139 cc
>>= 4; /* Need unsigned compare. */
2140 asm_guardcc(as
, cc
);
2142 emit_mrm(as
, (irt_isi8(origt
) || irt_isu8(origt
)) ?
2143 XO_ARITHib
: XO_ARITHiw8
, r64
+ XOg_CMP
, RID_MRM
);
2145 } /* Otherwise handle register case as usual. */
2147 left
= asm_fuseload(as
, lref
, RSET_GPR
);
2149 asm_guardcc(as
, cc
);
2150 if (usetest
&& left
!= RID_MRM
) {
2151 /* Use test r,r instead of cmp r,0. */
2152 emit_rr(as
, XO_TEST
, r64
+ left
, left
);
2153 if (irl
+1 == ir
) /* Referencing previous ins? */
2154 as
->flagmcp
= as
->mcp
; /* Set flag to drop test r,r if possible. */
2156 emit_gmrmi(as
, XG_ARITHi(XOg_CMP
), r64
+ left
, imm
);
2160 Reg left
= ra_alloc1(as
, lref
, RSET_GPR
);
2161 Reg right
= asm_fuseload(as
, rref
, rset_exclude(RSET_GPR
, left
));
2162 asm_guardcc(as
, cc
);
2163 emit_mrm(as
, XO_CMP
, r64
+ left
, right
);
2168 #if LJ_32 && LJ_HASFFI
2169 /* 64 bit integer comparisons in 32 bit mode. */
2170 static void asm_comp_int64(ASMState
*as
, IRIns
*ir
)
2172 uint32_t cc
= asm_compmap
[(ir
-1)->o
];
2173 RegSet allow
= RSET_GPR
;
2174 Reg lefthi
= RID_NONE
, leftlo
= RID_NONE
;
2175 Reg righthi
= RID_NONE
, rightlo
= RID_NONE
;
2179 as
->curins
--; /* Skip loword ins. Avoids failing in noconflict(), too. */
2181 /* Allocate/fuse hiword operands. */
2182 if (irref_isk(ir
->op2
)) {
2183 lefthi
= asm_fuseload(as
, ir
->op1
, allow
);
2185 lefthi
= ra_alloc1(as
, ir
->op1
, allow
);
2186 righthi
= asm_fuseload(as
, ir
->op2
, allow
);
2187 if (righthi
== RID_MRM
) {
2188 if (as
->mrm
.base
!= RID_NONE
) rset_clear(allow
, as
->mrm
.base
);
2189 if (as
->mrm
.idx
!= RID_NONE
) rset_clear(allow
, as
->mrm
.idx
);
2191 rset_clear(allow
, righthi
);
2194 mrm
= as
->mrm
; /* Save state for hiword instruction. */
2196 /* Allocate/fuse loword operands. */
2197 if (irref_isk((ir
-1)->op2
)) {
2198 leftlo
= asm_fuseload(as
, (ir
-1)->op1
, allow
);
2200 leftlo
= ra_alloc1(as
, (ir
-1)->op1
, allow
);
2201 rightlo
= asm_fuseload(as
, (ir
-1)->op2
, allow
);
2202 if (rightlo
== RID_MRM
) {
2203 if (as
->mrm
.base
!= RID_NONE
) rset_clear(allow
, as
->mrm
.base
);
2204 if (as
->mrm
.idx
!= RID_NONE
) rset_clear(allow
, as
->mrm
.idx
);
2206 rset_clear(allow
, rightlo
);
2210 /* All register allocations must be performed _before_ this point. */
2211 l_around
= emit_label(as
);
2212 as
->invmcp
= as
->flagmcp
= NULL
; /* Cannot use these optimizations. */
2214 /* Loword comparison and branch. */
2215 asm_guardcc(as
, cc
>> 4); /* Always use unsigned compare for loword. */
2216 if (ra_noreg(rightlo
)) {
2217 int32_t imm
= IR((ir
-1)->op2
)->i
;
2218 if (imm
== 0 && ((cc
>> 4) & 0xa) != 0x2 && leftlo
!= RID_MRM
)
2219 emit_rr(as
, XO_TEST
, leftlo
, leftlo
);
2221 emit_gmrmi(as
, XG_ARITHi(XOg_CMP
), leftlo
, imm
);
2223 emit_mrm(as
, XO_CMP
, leftlo
, rightlo
);
2226 /* Hiword comparison and branches. */
2227 if ((cc
& 15) != CC_NE
)
2228 emit_sjcc(as
, CC_NE
, l_around
); /* Hiword unequal: skip loword compare. */
2229 if ((cc
& 15) != CC_E
)
2230 asm_guardcc(as
, cc
>> 8); /* Hiword compare without equality check. */
2231 as
->mrm
= mrm
; /* Restore state. */
2232 if (ra_noreg(righthi
)) {
2233 int32_t imm
= IR(ir
->op2
)->i
;
2234 if (imm
== 0 && (cc
& 0xa) != 0x2 && lefthi
!= RID_MRM
)
2235 emit_rr(as
, XO_TEST
, lefthi
, lefthi
);
2237 emit_gmrmi(as
, XG_ARITHi(XOg_CMP
), lefthi
, imm
);
2239 emit_mrm(as
, XO_CMP
, lefthi
, righthi
);
2244 /* -- Support for 64 bit ops in 32 bit mode ------------------------------- */
2246 /* Hiword op of a split 64 bit op. Previous op must be the loword op. */
2247 static void asm_hiop(ASMState
*as
, IRIns
*ir
)
2249 #if LJ_32 && LJ_HASFFI
2250 /* HIOP is marked as a store because it needs its own DCE logic. */
2251 int uselo
= ra_used(ir
-1), usehi
= ra_used(ir
); /* Loword/hiword used? */
2252 if (LJ_UNLIKELY(!(as
->flags
& JIT_F_OPT_DCE
))) uselo
= usehi
= 1;
2253 if ((ir
-1)->o
== IR_CONV
) { /* Conversions to/from 64 bit. */
2254 if (usehi
|| uselo
) {
2255 if (irt_isfp(ir
->t
))
2256 asm_conv_fp_int64(as
, ir
);
2258 asm_conv_int64_fp(as
, ir
);
2260 as
->curins
--; /* Always skip the CONV. */
2262 } else if ((ir
-1)->o
<= IR_NE
) { /* 64 bit integer comparisons. ORDER IR. */
2263 asm_comp_int64(as
, ir
);
2265 } else if ((ir
-1)->o
== IR_XSTORE
) {
2266 if ((ir
-1)->r
!= RID_SINK
)
2267 asm_fxstore(as
, ir
);
2270 if (!usehi
) return; /* Skip unused hiword op for all remaining ops. */
2271 switch ((ir
-1)->o
) {
2275 asm_intarith(as
, ir
, XOg_ADC
);
2276 asm_intarith(as
, ir
-1, XOg_ADD
);
2281 asm_intarith(as
, ir
, XOg_SBB
);
2282 asm_intarith(as
, ir
-1, XOg_SUB
);
2285 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
2286 emit_rr(as
, XO_GROUP3
, XOg_NEG
, dest
);
2288 emit_rr(as
, XO_ARITHi8
, XOg_ADC
, dest
);
2289 ra_left(as
, dest
, ir
->op1
);
2291 asm_neg_not(as
, ir
-1, XOg_NEG
);
2297 ra_allocref(as
, ir
->op1
, RID2RSET(RID_RETLO
)); /* Mark lo op as used. */
2300 /* Nothing to do here. Handled by CNEWI itself. */
2302 default: lua_assert(0); break;
2305 UNUSED(as
); UNUSED(ir
); lua_assert(0); /* Unused on x64 or without FFI. */
2309 /* -- Stack handling ------------------------------------------------------ */
2311 /* Check Lua stack size for overflow. Use exit handler as fallback. */
2312 static void asm_stack_check(ASMState
*as
, BCReg topslot
,
2313 IRIns
*irp
, RegSet allow
, ExitNo exitno
)
2315 /* Try to get an unused temp. register, otherwise spill/restore eax. */
2316 Reg pbase
= irp
? irp
->r
: RID_BASE
;
2317 Reg r
= allow
? rset_pickbot(allow
) : RID_EAX
;
2318 emit_jcc(as
, CC_B
, exitstub_addr(as
->J
, exitno
));
2319 if (allow
== RSET_EMPTY
) /* Restore temp. register. */
2320 emit_rmro(as
, XO_MOV
, r
|REX_64
, RID_ESP
, 0);
2323 emit_gri(as
, XG_ARITHi(XOg_CMP
), r
, (int32_t)(8*topslot
));
2324 if (ra_hasreg(pbase
) && pbase
!= r
)
2325 emit_rr(as
, XO_ARITH(XOg_SUB
), r
, pbase
);
2327 emit_rmro(as
, XO_ARITH(XOg_SUB
), r
, RID_NONE
,
2328 ptr2addr(&J2G(as
->J
)->jit_base
));
2329 emit_rmro(as
, XO_MOV
, r
, r
, offsetof(lua_State
, maxstack
));
2330 emit_getgl(as
, r
, jit_L
);
2331 if (allow
== RSET_EMPTY
) /* Spill temp. register. */
2332 emit_rmro(as
, XO_MOVto
, r
|REX_64
, RID_ESP
, 0);
2335 /* Restore Lua stack from on-trace state. */
2336 static void asm_stack_restore(ASMState
*as
, SnapShot
*snap
)
2338 SnapEntry
*map
= &as
->T
->snapmap
[snap
->mapofs
];
2339 SnapEntry
*flinks
= &as
->T
->snapmap
[snap_nextofs(as
->T
, snap
)-1];
2340 MSize n
, nent
= snap
->nent
;
2341 /* Store the value of all modified slots to the Lua stack. */
2342 for (n
= 0; n
< nent
; n
++) {
2343 SnapEntry sn
= map
[n
];
2344 BCReg s
= snap_slot(sn
);
2345 int32_t ofs
= 8*((int32_t)s
-1);
2346 IRRef ref
= snap_ref(sn
);
2347 IRIns
*ir
= IR(ref
);
2348 if ((sn
& SNAP_NORESTORE
))
2350 if (irt_isnum(ir
->t
)) {
2351 Reg src
= ra_alloc1(as
, ref
, RSET_FPR
);
2352 emit_rmro(as
, XO_MOVSDto
, src
, RID_BASE
, ofs
);
2354 lua_assert(irt_ispri(ir
->t
) || irt_isaddr(ir
->t
) ||
2355 (LJ_DUALNUM
&& irt_isinteger(ir
->t
)));
2356 if (!irref_isk(ref
)) {
2357 Reg src
= ra_alloc1(as
, ref
, rset_exclude(RSET_GPR
, RID_BASE
));
2358 emit_movtomro(as
, REX_64IR(ir
, src
), RID_BASE
, ofs
);
2359 } else if (!irt_ispri(ir
->t
)) {
2360 emit_movmroi(as
, RID_BASE
, ofs
, ir
->i
);
2362 if ((sn
& (SNAP_CONT
|SNAP_FRAME
))) {
2363 if (s
!= 0) /* Do not overwrite link to previous frame. */
2364 emit_movmroi(as
, RID_BASE
, ofs
+4, (int32_t)(*flinks
--));
2366 if (!(LJ_64
&& irt_islightud(ir
->t
)))
2367 emit_movmroi(as
, RID_BASE
, ofs
+4, irt_toitype(ir
->t
));
2372 lua_assert(map
+ nent
== flinks
);
2375 /* -- GC handling --------------------------------------------------------- */
2377 /* Check GC threshold and do one or more GC steps. */
2378 static void asm_gc_check(ASMState
*as
)
2380 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_gc_step_jit
];
2384 ra_evictset(as
, RSET_SCRATCH
);
2385 l_end
= emit_label(as
);
2386 /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
2387 asm_guardcc(as
, CC_NE
); /* Assumes asm_snap_prep() already done. */
2388 emit_rr(as
, XO_TEST
, RID_RET
, RID_RET
);
2389 args
[0] = ASMREF_TMP1
; /* global_State *g */
2390 args
[1] = ASMREF_TMP2
; /* MSize steps */
2391 asm_gencall(as
, ci
, args
);
2392 tmp
= ra_releasetmp(as
, ASMREF_TMP1
);
2393 emit_loada(as
, tmp
, J2G(as
->J
));
2394 emit_loadi(as
, ra_releasetmp(as
, ASMREF_TMP2
), as
->gcsteps
);
2395 /* Jump around GC step if GC total < GC threshold. */
2396 emit_sjcc(as
, CC_B
, l_end
);
2397 emit_opgl(as
, XO_ARITH(XOg_CMP
), tmp
, gc
.threshold
);
2398 emit_getgl(as
, tmp
, gc
.total
);
2403 /* -- Loop handling ------------------------------------------------------- */
2405 /* Fixup the loop branch. */
2406 static void asm_loop_fixup(ASMState
*as
)
2408 MCode
*p
= as
->mctop
;
2409 MCode
*target
= as
->mcp
;
2410 if (as
->realign
) { /* Realigned loops use short jumps. */
2411 as
->realign
= NULL
; /* Stop another retry. */
2412 lua_assert(((intptr_t)target
& 15) == 0);
2413 if (as
->loopinv
) { /* Inverted loop branch? */
2416 lua_assert(target
- p
>= -128);
2417 p
[-1] = (MCode
)(target
- p
); /* Patch sjcc. */
2418 if (as
->loopinv
== 2)
2419 p
[-3] = (MCode
)(target
- p
+ 2); /* Patch opt. short jp. */
2421 lua_assert(target
- p
>= -128);
2422 p
[-1] = (MCode
)(int8_t)(target
- p
); /* Patch short jmp. */
2428 if (as
->loopinv
) { /* Inverted loop branch? */
2429 /* asm_guardcc already inverted the jcc and patched the jmp. */
2432 *(int32_t *)(p
-4) = (int32_t)(target
- p
); /* Patch jcc. */
2433 if (as
->loopinv
== 2) {
2434 *(int32_t *)(p
-10) = (int32_t)(target
- p
+ 6); /* Patch opt. jp. */
2437 } else { /* Otherwise just patch jmp. */
2438 *(int32_t *)(p
-4) = (int32_t)(target
- p
);
2441 /* Realign small loops and shorten the loop branch. */
2442 if (newloop
>= p
- 128) {
2443 as
->realign
= newloop
; /* Force a retry and remember alignment. */
2444 as
->curins
= as
->stopins
; /* Abort asm_trace now. */
2445 as
->T
->nins
= as
->orignins
; /* Remove any added renames. */
2450 /* -- Head of trace ------------------------------------------------------- */
2452 /* Coalesce BASE register for a root trace. */
2453 static void asm_head_root_base(ASMState
*as
)
2455 IRIns
*ir
= IR(REF_BASE
);
2459 if (rset_test(as
->modset
, r
))
2460 ir
->r
= RID_INIT
; /* No inheritance for modified BASE register. */
2462 emit_rr(as
, XO_MOV
, r
, RID_BASE
);
2466 /* Coalesce or reload BASE register for a side trace. */
2467 static RegSet
asm_head_side_base(ASMState
*as
, IRIns
*irp
, RegSet allow
)
2469 IRIns
*ir
= IR(REF_BASE
);
2473 if (rset_test(as
->modset
, r
))
2474 ir
->r
= RID_INIT
; /* No inheritance for modified BASE register. */
2476 rset_clear(allow
, r
); /* Mark same BASE register as coalesced. */
2477 } else if (ra_hasreg(irp
->r
) && rset_test(as
->freeset
, irp
->r
)) {
2478 rset_clear(allow
, irp
->r
);
2479 emit_rr(as
, XO_MOV
, r
, irp
->r
); /* Move from coalesced parent reg. */
2481 emit_getgl(as
, r
, jit_base
); /* Otherwise reload BASE. */
2487 /* -- Tail of trace ------------------------------------------------------- */
2489 /* Fixup the tail code. */
2490 static void asm_tail_fixup(ASMState
*as
, TraceNo lnk
)
2492 /* Note: don't use as->mcp swap + emit_*: emit_op overwrites more bytes. */
2493 MCode
*p
= as
->mctop
;
2495 int32_t spadj
= as
->T
->spadjust
;
2497 p
-= ((as
->flags
& JIT_F_LEA_AGU
) ? 7 : 6) + (LJ_64
? 1 : 0);
2500 /* Patch stack adjustment. */
2501 if (checki8(spadj
)) {
2507 *(int32_t *)p1
= spadj
;
2509 if ((as
->flags
& JIT_F_LEA_AGU
)) {
2513 p1
[-3] = (MCode
)XI_LEA
;
2514 p1
[-2] = MODRM(checki8(spadj
) ? XM_OFS8
: XM_OFS32
, RID_ESP
, RID_ESP
);
2515 p1
[-1] = MODRM(XM_SCALE1
, RID_ESP
, RID_ESP
);
2520 p1
[-2] = (MCode
)(checki8(spadj
) ? XI_ARITHi8
: XI_ARITHi
);
2521 p1
[-1] = MODRM(XM_REG
, XOg_ADD
, RID_ESP
);
2524 /* Patch exit branch. */
2525 target
= lnk
? traceref(as
->J
, lnk
)->mcode
: (MCode
*)lj_vm_exit_interp
;
2526 *(int32_t *)(p
-4) = jmprel(p
, target
);
2528 /* Drop unused mcode tail. Fill with NOPs to make the prefetcher happy. */
2529 for (q
= as
->mctop
-1; q
>= p
; q
--)
2534 /* Prepare tail of code. */
2535 static void asm_tail_prep(ASMState
*as
)
2537 MCode
*p
= as
->mctop
;
2538 /* Realign and leave room for backwards loop branch or exit branch. */
2540 int i
= ((int)(intptr_t)as
->realign
) & 15;
2541 /* Fill unused mcode tail with NOPs to make the prefetcher happy. */
2545 p
-= (as
->loopinv
? 5 : 2); /* Space for short/near jmp. */
2547 p
-= 5; /* Space for exit branch (near jmp). */
2550 as
->invmcp
= as
->mcp
= p
;
2552 /* Leave room for ESP adjustment: add esp, imm or lea esp, [esp+imm] */
2553 as
->mcp
= p
- (((as
->flags
& JIT_F_LEA_AGU
) ? 7 : 6) + (LJ_64
? 1 : 0));
2558 /* -- Instruction dispatch ------------------------------------------------ */
2560 /* Assemble a single instruction. */
2561 static void asm_ir(ASMState
*as
, IRIns
*ir
)
2563 switch ((IROp
)ir
->o
) {
2564 /* Miscellaneous ops. */
2565 case IR_LOOP
: asm_loop(as
); break;
2566 case IR_NOP
: case IR_XBAR
: lua_assert(!ra_used(ir
)); break;
2568 ra_alloc1(as
, ir
->op1
, irt_isfp(ir
->t
) ? RSET_FPR
: RSET_GPR
); break;
2569 case IR_PHI
: asm_phi(as
, ir
); break;
2570 case IR_HIOP
: asm_hiop(as
, ir
); break;
2571 case IR_GCSTEP
: asm_gcstep(as
, ir
); break;
2573 /* Guarded assertions. */
2574 case IR_LT
: case IR_GE
: case IR_LE
: case IR_GT
:
2575 case IR_ULT
: case IR_UGE
: case IR_ULE
: case IR_UGT
:
2576 case IR_EQ
: case IR_NE
: case IR_ABC
:
2577 asm_comp(as
, ir
, asm_compmap
[ir
->o
]);
2580 case IR_RETF
: asm_retf(as
, ir
); break;
2583 case IR_BNOT
: asm_neg_not(as
, ir
, XOg_NOT
); break;
2584 case IR_BSWAP
: asm_bitswap(as
, ir
); break;
2586 case IR_BAND
: asm_intarith(as
, ir
, XOg_AND
); break;
2587 case IR_BOR
: asm_intarith(as
, ir
, XOg_OR
); break;
2588 case IR_BXOR
: asm_intarith(as
, ir
, XOg_XOR
); break;
2590 case IR_BSHL
: asm_bitshift(as
, ir
, XOg_SHL
); break;
2591 case IR_BSHR
: asm_bitshift(as
, ir
, XOg_SHR
); break;
2592 case IR_BSAR
: asm_bitshift(as
, ir
, XOg_SAR
); break;
2593 case IR_BROL
: asm_bitshift(as
, ir
, XOg_ROL
); break;
2594 case IR_BROR
: asm_bitshift(as
, ir
, XOg_ROR
); break;
2596 /* Arithmetic ops. */
2597 case IR_ADD
: asm_add(as
, ir
); break;
2599 if (irt_isnum(ir
->t
))
2600 asm_fparith(as
, ir
, XO_SUBSD
);
2601 else /* Note: no need for LEA trick here. i-k is encoded as i+(-k). */
2602 asm_intarith(as
, ir
, XOg_SUB
);
2605 if (irt_isnum(ir
->t
))
2606 asm_fparith(as
, ir
, XO_MULSD
);
2608 asm_intarith(as
, ir
, XOg_X_IMUL
);
2611 #if LJ_64 && LJ_HASFFI
2612 if (!irt_isnum(ir
->t
))
2613 asm_arith64(as
, ir
, irt_isi64(ir
->t
) ? IRCALL_lj_carith_divi64
:
2614 IRCALL_lj_carith_divu64
);
2617 asm_fparith(as
, ir
, XO_DIVSD
);
2620 #if LJ_64 && LJ_HASFFI
2621 if (!irt_isint(ir
->t
))
2622 asm_arith64(as
, ir
, irt_isi64(ir
->t
) ? IRCALL_lj_carith_modi64
:
2623 IRCALL_lj_carith_modu64
);
2630 if (irt_isnum(ir
->t
))
2631 asm_fparith(as
, ir
, XO_XORPS
);
2633 asm_neg_not(as
, ir
, XOg_NEG
);
2635 case IR_ABS
: asm_fparith(as
, ir
, XO_ANDPS
); break;
2638 if (irt_isnum(ir
->t
))
2639 asm_fparith(as
, ir
, XO_MINSD
);
2641 asm_min_max(as
, ir
, CC_G
);
2644 if (irt_isnum(ir
->t
))
2645 asm_fparith(as
, ir
, XO_MAXSD
);
2647 asm_min_max(as
, ir
, CC_L
);
2650 case IR_FPMATH
: case IR_ATAN2
: case IR_LDEXP
:
2654 #if LJ_64 && LJ_HASFFI
2655 if (!irt_isnum(ir
->t
))
2656 asm_arith64(as
, ir
, irt_isi64(ir
->t
) ? IRCALL_lj_carith_powi64
:
2657 IRCALL_lj_carith_powu64
);
2663 /* Overflow-checking arithmetic ops. Note: don't use LEA here! */
2664 case IR_ADDOV
: asm_intarith(as
, ir
, XOg_ADD
); break;
2665 case IR_SUBOV
: asm_intarith(as
, ir
, XOg_SUB
); break;
2666 case IR_MULOV
: asm_intarith(as
, ir
, XOg_X_IMUL
); break;
2668 /* Memory references. */
2669 case IR_AREF
: asm_aref(as
, ir
); break;
2670 case IR_HREF
: asm_href(as
, ir
); break;
2671 case IR_HREFK
: asm_hrefk(as
, ir
); break;
2672 case IR_NEWREF
: asm_newref(as
, ir
); break;
2673 case IR_UREFO
: case IR_UREFC
: asm_uref(as
, ir
); break;
2674 case IR_FREF
: asm_fref(as
, ir
); break;
2675 case IR_STRREF
: asm_strref(as
, ir
); break;
2677 /* Loads and stores. */
2678 case IR_ALOAD
: case IR_HLOAD
: case IR_ULOAD
: case IR_VLOAD
:
2679 asm_ahuvload(as
, ir
);
2681 case IR_FLOAD
: case IR_XLOAD
: asm_fxload(as
, ir
); break;
2682 case IR_SLOAD
: asm_sload(as
, ir
); break;
2684 case IR_ASTORE
: case IR_HSTORE
: case IR_USTORE
: asm_ahustore(as
, ir
); break;
2685 case IR_FSTORE
: case IR_XSTORE
: asm_fxstore(as
, ir
); break;
2688 case IR_SNEW
: case IR_XSNEW
: asm_snew(as
, ir
); break;
2689 case IR_TNEW
: asm_tnew(as
, ir
); break;
2690 case IR_TDUP
: asm_tdup(as
, ir
); break;
2691 case IR_CNEW
: case IR_CNEWI
: asm_cnew(as
, ir
); break;
2693 /* Write barriers. */
2694 case IR_TBAR
: asm_tbar(as
, ir
); break;
2695 case IR_OBAR
: asm_obar(as
, ir
); break;
2697 /* Type conversions. */
2698 case IR_TOBIT
: asm_tobit(as
, ir
); break;
2699 case IR_CONV
: asm_conv(as
, ir
); break;
2700 case IR_TOSTR
: asm_tostr(as
, ir
); break;
2701 case IR_STRTO
: asm_strto(as
, ir
); break;
2704 case IR_CALLN
: case IR_CALLL
: case IR_CALLS
: asm_call(as
, ir
); break;
2705 case IR_CALLXS
: asm_callx(as
, ir
); break;
2706 case IR_CARG
: break;
2709 setintV(&as
->J
->errinfo
, ir
->o
);
2710 lj_trace_err_info(as
->J
, LJ_TRERR_NYIIR
);
2715 /* -- Trace setup --------------------------------------------------------- */
2717 /* Ensure there are enough stack slots for call arguments. */
2718 static Reg
asm_setup_call_slots(ASMState
*as
, IRIns
*ir
, const CCallInfo
*ci
)
2720 IRRef args
[CCI_NARGS_MAX
];
2722 asm_collectargs(as
, ir
, ci
, args
);
2723 nslots
= asm_count_call_slots(as
, ci
, args
);
2724 if (nslots
> as
->evenspill
) /* Leave room for args in stack slots. */
2725 as
->evenspill
= nslots
;
2727 return irt_isfp(ir
->t
) ? REGSP_HINT(RID_FPRET
) : REGSP_HINT(RID_RET
);
2729 return irt_isfp(ir
->t
) ? REGSP_INIT
: REGSP_HINT(RID_RET
);
2733 /* Target-specific setup. */
2734 static void asm_setup_target(ASMState
*as
)
2736 asm_exitstub_setup(as
, as
->T
->nsnap
);
2739 /* -- Trace patching ------------------------------------------------------ */
2741 /* Patch exit jumps of existing machine code to a new target. */
2742 void lj_asm_patchexit(jit_State
*J
, GCtrace
*T
, ExitNo exitno
, MCode
*target
)
2744 MCode
*p
= T
->mcode
;
2745 MCode
*mcarea
= lj_mcode_patch(J
, p
, 0);
2746 MSize len
= T
->szmcode
;
2747 MCode
*px
= exitstub_addr(J
, exitno
) - 6;
2748 MCode
*pe
= p
+len
-6;
2749 uint32_t stateaddr
= u32ptr(&J2G(J
)->vmstate
);
2750 if (len
> 5 && p
[len
-5] == XI_JMP
&& p
+len
-6 + *(int32_t *)(p
+len
-4) == px
)
2751 *(int32_t *)(p
+len
-4) = jmprel(p
+len
, target
);
2752 /* Do not patch parent exit for a stack check. Skip beyond vmstate update. */
2754 if (*(uint32_t *)(p
+(LJ_64
? 3 : 2)) == stateaddr
&& p
[0] == XI_MOVmi
) {
2755 p
+= LJ_64
? 11 : 10;
2759 for (; p
< pe
; p
++) {
2760 if ((*(uint16_t *)p
& 0xf0ff) == 0x800f && p
+ *(int32_t *)(p
+2) == px
) {
2761 *(int32_t *)(p
+2) = jmprel(p
+6, target
);
2765 lj_mcode_sync(T
->mcode
, T
->mcode
+ T
->szmcode
);
2766 lj_mcode_patch(J
, mcarea
, 1);