2 ** x86/x64 IR assembler (SSA IR -> machine code).
3 ** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h
6 /* -- Guard handling ------------------------------------------------------ */
8 /* Generate an exit stub group at the bottom of the reserved MCode memory. */
9 static MCode
*asm_exitstub_gen(ASMState
*as
, ExitNo group
)
11 ExitNo i
, groupofs
= (group
*EXITSTUBS_PER_GROUP
) & 0xff;
12 MCode
*mxp
= as
->mcbot
;
13 MCode
*mxpstart
= mxp
;
14 if (mxp
+ (2+2)*EXITSTUBS_PER_GROUP
+8+5 >= as
->mctop
)
16 /* Push low byte of exitno for each exit stub. */
17 *mxp
++ = XI_PUSHi8
; *mxp
++ = (MCode
)groupofs
;
18 for (i
= 1; i
< EXITSTUBS_PER_GROUP
; i
++) {
19 *mxp
++ = XI_JMPs
; *mxp
++ = (MCode
)((2+2)*(EXITSTUBS_PER_GROUP
- i
) - 2);
20 *mxp
++ = XI_PUSHi8
; *mxp
++ = (MCode
)(groupofs
+ i
);
22 /* Push the high byte of the exitno for each exit stub group. */
23 *mxp
++ = XI_PUSHi8
; *mxp
++ = (MCode
)((group
*EXITSTUBS_PER_GROUP
)>>8);
24 /* Store DISPATCH at original stack slot 0. Account for the two push ops. */
26 *mxp
++ = MODRM(XM_OFS8
, 0, RID_ESP
);
27 *mxp
++ = MODRM(XM_SCALE1
, RID_ESP
, RID_ESP
);
28 *mxp
++ = 2*sizeof(void *);
29 *(int32_t *)mxp
= ptr2addr(J2GG(as
->J
)->dispatch
); mxp
+= 4;
30 /* Jump to exit handler which fills in the ExitState. */
31 *mxp
++ = XI_JMP
; mxp
+= 4;
32 *((int32_t *)(mxp
-4)) = jmprel(mxp
, (MCode
*)(void *)lj_vm_exit_handler
);
33 /* Commit the code for this group (even if assembly fails later on). */
34 lj_mcode_commitbot(as
->J
, mxp
);
36 as
->mclim
= as
->mcbot
+ MCLIM_REDZONE
;
40 /* Setup all needed exit stubs. */
41 static void asm_exitstub_setup(ASMState
*as
, ExitNo nexits
)
44 if (nexits
>= EXITSTUBS_PER_GROUP
*LJ_MAX_EXITSTUBGR
)
45 lj_trace_err(as
->J
, LJ_TRERR_SNAPOV
);
46 for (i
= 0; i
< (nexits
+EXITSTUBS_PER_GROUP
-1)/EXITSTUBS_PER_GROUP
; i
++)
47 if (as
->J
->exitstubgroup
[i
] == NULL
)
48 as
->J
->exitstubgroup
[i
] = asm_exitstub_gen(as
, i
);
51 /* Emit conditional branch to exit for guard.
52 ** It's important to emit this *after* all registers have been allocated,
53 ** because rematerializations may invalidate the flags.
55 static void asm_guardcc(ASMState
*as
, int cc
)
57 MCode
*target
= exitstub_addr(as
->J
, as
->snapno
);
59 if (LJ_UNLIKELY(p
== as
->invmcp
)) {
61 *(int32_t *)(p
+1) = jmprel(p
+5, target
);
65 emit_sjcc(as
, cc
, target
);
69 emit_jcc(as
, cc
, target
);
72 /* -- Memory operand fusion ----------------------------------------------- */
74 /* Limit linear search to this distance. Avoids O(n^2) behavior. */
75 #define CONFLICT_SEARCH_LIM 31
77 /* Check if a reference is a signed 32 bit constant. */
78 static int asm_isk32(ASMState
*as
, IRRef ref
, int32_t *k
)
82 if (ir
->o
!= IR_KINT64
) {
85 } else if (checki32((int64_t)ir_kint64(ir
)->u64
)) {
86 *k
= (int32_t)ir_kint64(ir
)->u64
;
93 /* Check if there's no conflicting instruction between curins and ref.
94 ** Also avoid fusing loads if there are multiple references.
96 static int noconflict(ASMState
*as
, IRRef ref
, IROp conflict
, int noload
)
100 if (i
> ref
+ CONFLICT_SEARCH_LIM
)
101 return 0; /* Give up, ref is too far away. */
103 if (ir
[i
].o
== conflict
)
104 return 0; /* Conflict found. */
105 else if (!noload
&& (ir
[i
].op1
== ref
|| ir
[i
].op2
== ref
))
108 return 1; /* Ok, no conflict. */
111 /* Fuse array base into memory operand. */
112 static IRRef
asm_fuseabase(ASMState
*as
, IRRef ref
)
114 IRIns
*irb
= IR(ref
);
116 if (irb
->o
== IR_FLOAD
) {
117 IRIns
*ira
= IR(irb
->op1
);
118 lua_assert(irb
->op2
== IRFL_TAB_ARRAY
);
119 /* We can avoid the FLOAD of t->array for colocated arrays. */
120 if (ira
->o
== IR_TNEW
&& ira
->op1
<= LJ_MAX_COLOSIZE
&&
121 !neverfuse(as
) && noconflict(as
, irb
->op1
, IR_NEWREF
, 1)) {
122 as
->mrm
.ofs
= (int32_t)sizeof(GCtab
); /* Ofs to colocated array. */
123 return irb
->op1
; /* Table obj. */
125 } else if (irb
->o
== IR_ADD
&& irref_isk(irb
->op2
)) {
126 /* Fuse base offset (vararg load). */
127 as
->mrm
.ofs
= IR(irb
->op2
)->i
;
130 return ref
; /* Otherwise use the given array base. */
133 /* Fuse array reference into memory operand. */
134 static void asm_fusearef(ASMState
*as
, IRIns
*ir
, RegSet allow
)
137 lua_assert(ir
->o
== IR_AREF
);
138 as
->mrm
.base
= (uint8_t)ra_alloc1(as
, asm_fuseabase(as
, ir
->op1
), allow
);
140 if (irref_isk(ir
->op2
)) {
141 as
->mrm
.ofs
+= 8*irx
->i
;
142 as
->mrm
.idx
= RID_NONE
;
144 rset_clear(allow
, as
->mrm
.base
);
145 as
->mrm
.scale
= XM_SCALE8
;
146 /* Fuse a constant ADD (e.g. t[i+1]) into the offset.
147 ** Doesn't help much without ABCelim, but reduces register pressure.
149 if (!LJ_64
&& /* Has bad effects with negative index on x64. */
150 mayfuse(as
, ir
->op2
) && ra_noreg(irx
->r
) &&
151 irx
->o
== IR_ADD
&& irref_isk(irx
->op2
)) {
152 as
->mrm
.ofs
+= 8*IR(irx
->op2
)->i
;
153 as
->mrm
.idx
= (uint8_t)ra_alloc1(as
, irx
->op1
, allow
);
155 as
->mrm
.idx
= (uint8_t)ra_alloc1(as
, ir
->op2
, allow
);
160 /* Fuse array/hash/upvalue reference into memory operand.
161 ** Caveat: this may allocate GPRs for the base/idx registers. Be sure to
162 ** pass the final allow mask, excluding any GPRs used for other inputs.
163 ** In particular: 2-operand GPR instructions need to call ra_dest() first!
165 static void asm_fuseahuref(ASMState
*as
, IRRef ref
, RegSet allow
)
168 if (ra_noreg(ir
->r
)) {
169 switch ((IROp
)ir
->o
) {
171 if (mayfuse(as
, ref
)) {
172 asm_fusearef(as
, ir
, allow
);
177 if (mayfuse(as
, ref
)) {
178 as
->mrm
.base
= (uint8_t)ra_alloc1(as
, ir
->op1
, allow
);
179 as
->mrm
.ofs
= (int32_t)(IR(ir
->op2
)->op2
* sizeof(Node
));
180 as
->mrm
.idx
= RID_NONE
;
185 if (irref_isk(ir
->op1
)) {
186 GCfunc
*fn
= ir_kfunc(IR(ir
->op1
));
187 GCupval
*uv
= &gcref(fn
->l
.uvptr
[(ir
->op2
>> 8)])->uv
;
188 as
->mrm
.ofs
= ptr2addr(&uv
->tv
);
189 as
->mrm
.base
= as
->mrm
.idx
= RID_NONE
;
194 lua_assert(ir
->o
== IR_HREF
|| ir
->o
== IR_NEWREF
|| ir
->o
== IR_UREFO
||
199 as
->mrm
.base
= (uint8_t)ra_alloc1(as
, ref
, allow
);
201 as
->mrm
.idx
= RID_NONE
;
204 /* Fuse FLOAD/FREF reference into memory operand. */
205 static void asm_fusefref(ASMState
*as
, IRIns
*ir
, RegSet allow
)
207 lua_assert(ir
->o
== IR_FLOAD
|| ir
->o
== IR_FREF
);
208 as
->mrm
.ofs
= field_ofs
[ir
->op2
];
209 as
->mrm
.idx
= RID_NONE
;
210 if (irref_isk(ir
->op1
)) {
211 as
->mrm
.ofs
+= IR(ir
->op1
)->i
;
212 as
->mrm
.base
= RID_NONE
;
214 as
->mrm
.base
= (uint8_t)ra_alloc1(as
, ir
->op1
, allow
);
218 /* Fuse string reference into memory operand. */
219 static void asm_fusestrref(ASMState
*as
, IRIns
*ir
, RegSet allow
)
222 lua_assert(ir
->o
== IR_STRREF
);
223 as
->mrm
.base
= as
->mrm
.idx
= RID_NONE
;
224 as
->mrm
.scale
= XM_SCALE1
;
225 as
->mrm
.ofs
= sizeof(GCstr
);
226 if (irref_isk(ir
->op1
)) {
227 as
->mrm
.ofs
+= IR(ir
->op1
)->i
;
229 Reg r
= ra_alloc1(as
, ir
->op1
, allow
);
230 rset_clear(allow
, r
);
231 as
->mrm
.base
= (uint8_t)r
;
234 if (irref_isk(ir
->op2
)) {
235 as
->mrm
.ofs
+= irr
->i
;
238 /* Fuse a constant add into the offset, e.g. string.sub(s, i+10). */
239 if (!LJ_64
&& /* Has bad effects with negative index on x64. */
240 mayfuse(as
, ir
->op2
) && irr
->o
== IR_ADD
&& irref_isk(irr
->op2
)) {
241 as
->mrm
.ofs
+= IR(irr
->op2
)->i
;
242 r
= ra_alloc1(as
, irr
->op1
, allow
);
244 r
= ra_alloc1(as
, ir
->op2
, allow
);
246 if (as
->mrm
.base
== RID_NONE
)
247 as
->mrm
.base
= (uint8_t)r
;
249 as
->mrm
.idx
= (uint8_t)r
;
253 static void asm_fusexref(ASMState
*as
, IRRef ref
, RegSet allow
)
256 as
->mrm
.idx
= RID_NONE
;
257 if (ir
->o
== IR_KPTR
|| ir
->o
== IR_KKPTR
) {
259 as
->mrm
.base
= RID_NONE
;
260 } else if (ir
->o
== IR_STRREF
) {
261 asm_fusestrref(as
, ir
, allow
);
264 if (canfuse(as
, ir
) && ir
->o
== IR_ADD
&& ra_noreg(ir
->r
)) {
265 /* Gather (base+idx*sz)+ofs as emitted by cdata ptr/array indexing. */
269 if (asm_isk32(as
, ir
->op2
, &as
->mrm
.ofs
)) { /* Recognize x+ofs. */
272 if (!(ir
->o
== IR_ADD
&& canfuse(as
, ir
) && ra_noreg(ir
->r
)))
275 as
->mrm
.scale
= XM_SCALE1
;
279 if (!(irx
->o
== IR_BSHL
|| irx
->o
== IR_ADD
)) { /* Try other operand. */
284 if (canfuse(as
, irx
) && ra_noreg(irx
->r
)) {
285 if (irx
->o
== IR_BSHL
&& irref_isk(irx
->op2
) && IR(irx
->op2
)->i
<= 3) {
286 /* Recognize idx<<b with b = 0-3, corresponding to sz = (1),2,4,8. */
288 as
->mrm
.scale
= (uint8_t)(IR(irx
->op2
)->i
<< 6);
289 } else if (irx
->o
== IR_ADD
&& irx
->op1
== irx
->op2
) {
290 /* FOLD does idx*2 ==> idx<<1 ==> idx+idx. */
292 as
->mrm
.scale
= XM_SCALE2
;
295 r
= ra_alloc1(as
, idx
, allow
);
296 rset_clear(allow
, r
);
297 as
->mrm
.idx
= (uint8_t)r
;
300 as
->mrm
.base
= (uint8_t)ra_alloc1(as
, ref
, allow
);
304 /* Fuse load into memory operand. */
305 static Reg
asm_fuseload(ASMState
*as
, IRRef ref
, RegSet allow
)
308 if (ra_hasreg(ir
->r
)) {
309 if (allow
!= RSET_EMPTY
) { /* Fast path. */
310 ra_noweak(as
, ir
->r
);
314 /* Force a spill if only memory operands are allowed (asm_x87load). */
315 as
->mrm
.base
= RID_ESP
;
316 as
->mrm
.ofs
= ra_spill(as
, ir
);
317 as
->mrm
.idx
= RID_NONE
;
320 if (ir
->o
== IR_KNUM
) {
321 RegSet avail
= as
->freeset
& ~as
->modset
& RSET_FPR
;
322 lua_assert(allow
!= RSET_EMPTY
);
323 if (!(avail
& (avail
-1))) { /* Fuse if less than two regs available. */
324 as
->mrm
.ofs
= ptr2addr(ir_knum(ir
));
325 as
->mrm
.base
= as
->mrm
.idx
= RID_NONE
;
328 } else if (mayfuse(as
, ref
)) {
329 RegSet xallow
= (allow
& RSET_GPR
) ? allow
: RSET_GPR
;
330 if (ir
->o
== IR_SLOAD
) {
331 if (!(ir
->op2
& (IRSLOAD_PARENT
|IRSLOAD_CONVERT
)) &&
332 noconflict(as
, ref
, IR_RETF
, 0)) {
333 as
->mrm
.base
= (uint8_t)ra_alloc1(as
, REF_BASE
, xallow
);
334 as
->mrm
.ofs
= 8*((int32_t)ir
->op1
-1) + ((ir
->op2
&IRSLOAD_FRAME
)?4:0);
335 as
->mrm
.idx
= RID_NONE
;
338 } else if (ir
->o
== IR_FLOAD
) {
339 /* Generic fusion is only ok for 32 bit operand (but see asm_comp). */
340 if ((irt_isint(ir
->t
) || irt_isu32(ir
->t
) || irt_isaddr(ir
->t
)) &&
341 noconflict(as
, ref
, IR_FSTORE
, 0)) {
342 asm_fusefref(as
, ir
, xallow
);
345 } else if (ir
->o
== IR_ALOAD
|| ir
->o
== IR_HLOAD
|| ir
->o
== IR_ULOAD
) {
346 if (noconflict(as
, ref
, ir
->o
+ IRDELTA_L2S
, 0)) {
347 asm_fuseahuref(as
, ir
->op1
, xallow
);
350 } else if (ir
->o
== IR_XLOAD
) {
351 /* Generic fusion is not ok for 8/16 bit operands (but see asm_comp).
352 ** Fusing unaligned memory operands is ok on x86 (except for SIMD types).
354 if ((!irt_typerange(ir
->t
, IRT_I8
, IRT_U16
)) &&
355 noconflict(as
, ref
, IR_XSTORE
, 0)) {
356 asm_fusexref(as
, ir
->op1
, xallow
);
359 } else if (ir
->o
== IR_VLOAD
) {
360 asm_fuseahuref(as
, ir
->op1
, xallow
);
364 if (!(as
->freeset
& allow
) &&
365 (allow
== RSET_EMPTY
|| ra_hasspill(ir
->s
) || iscrossref(as
, ref
)))
367 return ra_allocref(as
, ref
, allow
);
371 /* Don't fuse a 32 bit load into a 64 bit operation. */
372 static Reg
asm_fuseloadm(ASMState
*as
, IRRef ref
, RegSet allow
, int is64
)
374 if (is64
&& !irt_is64(IR(ref
)->t
))
375 return ra_alloc1(as
, ref
, allow
);
376 return asm_fuseload(as
, ref
, allow
);
379 #define asm_fuseloadm(as, ref, allow, is64) asm_fuseload(as, (ref), (allow))
382 /* -- Calls --------------------------------------------------------------- */
384 /* Count the required number of stack slots for a call. */
385 static int asm_count_call_slots(ASMState
*as
, const CCallInfo
*ci
, IRRef
*args
)
387 uint32_t i
, nargs
= CCI_NARGS(ci
);
391 nslots
= (int)(nargs
*2); /* Only matters for more than four args. */
393 int ngpr
= REGARG_NUMGPR
, nfpr
= REGARG_NUMFPR
;
394 for (i
= 0; i
< nargs
; i
++)
395 if (args
[i
] && irt_isfp(IR(args
[i
])->t
)) {
396 if (nfpr
> 0) nfpr
--; else nslots
+= 2;
398 if (ngpr
> 0) ngpr
--; else nslots
+= 2;
403 if ((ci
->flags
& CCI_CC_MASK
) == CCI_CC_FASTCALL
)
405 else if ((ci
->flags
& CCI_CC_MASK
) == CCI_CC_THISCALL
)
407 for (i
= 0; i
< nargs
; i
++)
408 if (args
[i
] && irt_isfp(IR(args
[i
])->t
)) {
409 nslots
+= irt_isnum(IR(args
[i
])->t
) ? 2 : 1;
411 if (ngpr
> 0) ngpr
--; else nslots
++;
417 /* Generate a call to a C function. */
418 static void asm_gencall(ASMState
*as
, const CCallInfo
*ci
, IRRef
*args
)
420 uint32_t n
, nargs
= CCI_NARGS(ci
);
421 int32_t ofs
= STACKARG_OFS
;
423 uint32_t gprs
= REGARG_GPRS
;
424 Reg fpr
= REGARG_FIRSTFPR
;
426 MCode
*patchnfpr
= NULL
;
430 if ((ci
->flags
& CCI_CC_MASK
) != CCI_CC_CDECL
) {
431 if ((ci
->flags
& CCI_CC_MASK
) == CCI_CC_THISCALL
)
432 gprs
= (REGARG_GPRS
& 31);
433 else if ((ci
->flags
& CCI_CC_MASK
) == CCI_CC_FASTCALL
)
437 if ((void *)ci
->func
)
438 emit_call(as
, ci
->func
);
440 if ((ci
->flags
& CCI_VARARG
)) { /* Special handling for vararg calls. */
442 for (n
= 0; n
< 4 && n
< nargs
; n
++) {
443 IRIns
*ir
= IR(args
[n
]);
444 if (irt_isfp(ir
->t
)) /* Duplicate FPRs in GPRs. */
445 emit_rr(as
, XO_MOVDto
, (irt_isnum(ir
->t
) ? REX_64
: 0) | (fpr
+n
),
446 ((gprs
>> (n
*5)) & 31)); /* Either MOVD or MOVQ. */
449 patchnfpr
= --as
->mcp
; /* Indicate number of used FPRs in register al. */
450 *--as
->mcp
= XI_MOVrib
| RID_EAX
;
454 for (n
= 0; n
< nargs
; n
++) { /* Setup args. */
458 #if LJ_64 && LJ_ABI_WIN
459 /* Windows/x64 argument registers are strictly positional. */
460 r
= irt_isfp(ir
->t
) ? (fpr
<= REGARG_LASTFPR
? fpr
: 0) : (gprs
& 31);
463 /* POSIX/x64 argument registers are used in order of appearance. */
464 if (irt_isfp(ir
->t
)) {
465 r
= fpr
<= REGARG_LASTFPR
? fpr
++ : 0;
467 r
= gprs
& 31; gprs
>>= 5;
470 if (ref
&& irt_isfp(ir
->t
)) {
473 r
= gprs
& 31; gprs
>>= 5;
477 if (r
) { /* Argument is in a register. */
478 if (r
< RID_MAX_GPR
&& ref
< ASMREF_TMP1
) {
480 if (ir
->o
== IR_KINT64
)
481 emit_loadu64(as
, r
, ir_kint64(ir
)->u64
);
484 emit_loadi(as
, r
, ir
->i
);
486 lua_assert(rset_test(as
->freeset
, r
)); /* Must have been evicted. */
487 if (ra_hasreg(ir
->r
)) {
488 ra_noweak(as
, ir
->r
);
489 emit_movrr(as
, ir
, r
, ir
->r
);
491 ra_allocref(as
, ref
, RID2RSET(r
));
494 } else if (irt_isfp(ir
->t
)) { /* FP argument is on stack. */
495 lua_assert(!(irt_isfloat(ir
->t
) && irref_isk(ref
))); /* No float k. */
496 if (LJ_32
&& (ofs
& 4) && irref_isk(ref
)) {
497 /* Split stores for unaligned FP consts. */
498 emit_movmroi(as
, RID_ESP
, ofs
, (int32_t)ir_knum(ir
)->u32
.lo
);
499 emit_movmroi(as
, RID_ESP
, ofs
+4, (int32_t)ir_knum(ir
)->u32
.hi
);
501 r
= ra_alloc1(as
, ref
, RSET_FPR
);
502 emit_rmro(as
, irt_isnum(ir
->t
) ? XO_MOVSDto
: XO_MOVSSto
,
505 ofs
+= (LJ_32
&& irt_isfloat(ir
->t
)) ? 4 : 8;
506 } else { /* Non-FP argument is on stack. */
507 if (LJ_32
&& ref
< ASMREF_TMP1
) {
508 emit_movmroi(as
, RID_ESP
, ofs
, ir
->i
);
510 r
= ra_alloc1(as
, ref
, RSET_GPR
);
511 emit_movtomro(as
, REX_64
+ r
, RID_ESP
, ofs
);
513 ofs
+= sizeof(intptr_t);
517 #if LJ_64 && !LJ_ABI_WIN
518 if (patchnfpr
) *patchnfpr
= fpr
- REGARG_FIRSTFPR
;
522 /* Setup result reg/sp for call. Evict scratch regs. */
523 static void asm_setupresult(ASMState
*as
, IRIns
*ir
, const CCallInfo
*ci
)
525 RegSet drop
= RSET_SCRATCH
;
526 int hiop
= (LJ_32
&& (ir
+1)->o
== IR_HIOP
);
527 if ((ci
->flags
& CCI_NOFPRCLOBBER
))
529 if (ra_hasreg(ir
->r
))
530 rset_clear(drop
, ir
->r
); /* Dest reg handled below. */
531 if (hiop
&& ra_hasreg((ir
+1)->r
))
532 rset_clear(drop
, (ir
+1)->r
); /* Dest reg handled below. */
533 ra_evictset(as
, drop
); /* Evictions must be performed first. */
535 if (irt_isfp(ir
->t
)) {
536 int32_t ofs
= sps_scale(ir
->s
); /* Use spill slot or temp slots. */
538 if ((ci
->flags
& CCI_CASTU64
)) {
540 if (ra_hasreg(dest
)) {
542 ra_modified(as
, dest
);
543 emit_rr(as
, XO_MOVD
, dest
|REX_64
, RID_RET
); /* Really MOVQ. */
545 if (ofs
) emit_movtomro(as
, RID_RET
|REX_64
, RID_ESP
, ofs
);
547 ra_destreg(as
, ir
, RID_FPRET
);
550 /* Number result is in x87 st0 for x86 calling convention. */
552 if (ra_hasreg(dest
)) {
554 ra_modified(as
, dest
);
555 emit_rmro(as
, irt_isnum(ir
->t
) ? XMM_MOVRM(as
) : XO_MOVSS
,
558 if ((ci
->flags
& CCI_CASTU64
)) {
559 emit_movtomro(as
, RID_RETLO
, RID_ESP
, ofs
);
560 emit_movtomro(as
, RID_RETHI
, RID_ESP
, ofs
+4);
562 emit_rmro(as
, irt_isnum(ir
->t
) ? XO_FSTPq
: XO_FSTPd
,
563 irt_isnum(ir
->t
) ? XOg_FSTPq
: XOg_FSTPd
, RID_ESP
, ofs
);
571 lua_assert(!irt_ispri(ir
->t
));
572 ra_destreg(as
, ir
, RID_RET
);
574 } else if (LJ_32
&& irt_isfp(ir
->t
)) {
575 emit_x87op(as
, XI_FPOP
); /* Pop unused result from x87 st0. */
579 static void asm_call(ASMState
*as
, IRIns
*ir
)
581 IRRef args
[CCI_NARGS_MAX
];
582 const CCallInfo
*ci
= &lj_ir_callinfo
[ir
->op2
];
583 asm_collectargs(as
, ir
, ci
, args
);
584 asm_setupresult(as
, ir
, ci
);
585 asm_gencall(as
, ci
, args
);
588 /* Return a constant function pointer or NULL for indirect calls. */
589 static void *asm_callx_func(ASMState
*as
, IRIns
*irf
, IRRef func
)
594 return (void *)irf
->i
;
596 if (irref_isk(func
)) {
598 if (irf
->o
== IR_KINT64
)
599 p
= (MCode
*)(void *)ir_k64(irf
)->u64
;
601 p
= (MCode
*)(void *)(uintptr_t)(uint32_t)irf
->i
;
602 if (p
- as
->mcp
== (int32_t)(p
- as
->mcp
))
603 return p
; /* Call target is still in +-2GB range. */
604 /* Avoid the indirect case of emit_call(). Try to hoist func addr. */
610 static void asm_callx(ASMState
*as
, IRIns
*ir
)
612 IRRef args
[CCI_NARGS_MAX
*2];
617 ci
.flags
= asm_callx_flags(as
, ir
);
618 asm_collectargs(as
, ir
, &ci
, args
);
619 asm_setupresult(as
, ir
, &ci
);
621 /* Have to readjust stack after non-cdecl calls due to callee cleanup. */
622 if ((ci
.flags
& CCI_CC_MASK
) != CCI_CC_CDECL
)
623 spadj
= 4 * asm_count_call_slots(as
, &ci
, args
);
625 func
= ir
->op2
; irf
= IR(func
);
626 if (irf
->o
== IR_CARG
) { func
= irf
->op1
; irf
= IR(func
); }
627 ci
.func
= (ASMFunction
)asm_callx_func(as
, irf
, func
);
628 if (!(void *)ci
.func
) {
629 /* Use a (hoistable) non-scratch register for indirect calls. */
630 RegSet allow
= (RSET_GPR
& ~RSET_SCRATCH
);
631 Reg r
= ra_alloc1(as
, func
, allow
);
632 if (LJ_32
) emit_spsub(as
, spadj
); /* Above code may cause restores! */
633 emit_rr(as
, XO_GROUP5
, XOg_CALL
, r
);
635 emit_spsub(as
, spadj
);
637 asm_gencall(as
, &ci
, args
);
640 /* -- Returns ------------------------------------------------------------- */
642 /* Return to lower frame. Guard that it goes to the right spot. */
643 static void asm_retf(ASMState
*as
, IRIns
*ir
)
645 Reg base
= ra_alloc1(as
, REF_BASE
, RSET_GPR
);
646 void *pc
= ir_kptr(IR(ir
->op2
));
647 int32_t delta
= 1+bc_a(*((const BCIns
*)pc
- 1));
648 as
->topslot
-= (BCReg
)delta
;
649 if ((int32_t)as
->topslot
< 0) as
->topslot
= 0;
650 emit_setgl(as
, base
, jit_base
);
651 emit_addptr(as
, base
, -8*delta
);
652 asm_guardcc(as
, CC_NE
);
653 emit_gmroi(as
, XG_ARITHi(XOg_CMP
), base
, -4, ptr2addr(pc
));
656 /* -- Type conversions ---------------------------------------------------- */
658 static void asm_tointg(ASMState
*as
, IRIns
*ir
, Reg left
)
660 Reg tmp
= ra_scratch(as
, rset_exclude(RSET_FPR
, left
));
661 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
662 asm_guardcc(as
, CC_P
);
663 asm_guardcc(as
, CC_NE
);
664 emit_rr(as
, XO_UCOMISD
, left
, tmp
);
665 emit_rr(as
, XO_CVTSI2SD
, tmp
, dest
);
666 if (!(as
->flags
& JIT_F_SPLIT_XMM
))
667 emit_rr(as
, XO_XORPS
, tmp
, tmp
); /* Avoid partial register stall. */
668 emit_rr(as
, XO_CVTTSD2SI
, dest
, left
);
669 /* Can't fuse since left is needed twice. */
672 static void asm_tobit(ASMState
*as
, IRIns
*ir
)
674 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
675 Reg tmp
= ra_noreg(IR(ir
->op1
)->r
) ?
676 ra_alloc1(as
, ir
->op1
, RSET_FPR
) :
677 ra_scratch(as
, RSET_FPR
);
678 Reg right
= asm_fuseload(as
, ir
->op2
, rset_exclude(RSET_FPR
, tmp
));
679 emit_rr(as
, XO_MOVDto
, tmp
, dest
);
680 emit_mrm(as
, XO_ADDSD
, tmp
, right
);
681 ra_left(as
, tmp
, ir
->op1
);
684 static void asm_conv(ASMState
*as
, IRIns
*ir
)
686 IRType st
= (IRType
)(ir
->op2
& IRCONV_SRCMASK
);
687 int st64
= (st
== IRT_I64
|| st
== IRT_U64
|| (LJ_64
&& st
== IRT_P64
));
688 int stfp
= (st
== IRT_NUM
|| st
== IRT_FLOAT
);
689 IRRef lref
= ir
->op1
;
690 lua_assert(irt_type(ir
->t
) != st
);
691 lua_assert(!(LJ_32
&& (irt_isint64(ir
->t
) || st64
))); /* Handled by SPLIT. */
692 if (irt_isfp(ir
->t
)) {
693 Reg dest
= ra_dest(as
, ir
, RSET_FPR
);
694 if (stfp
) { /* FP to FP conversion. */
695 Reg left
= asm_fuseload(as
, lref
, RSET_FPR
);
696 emit_mrm(as
, st
== IRT_NUM
? XO_CVTSD2SS
: XO_CVTSS2SD
, dest
, left
);
697 if (left
== dest
) return; /* Avoid the XO_XORPS. */
698 } else if (LJ_32
&& st
== IRT_U32
) { /* U32 to FP conversion on x86. */
699 /* number = (2^52+2^51 .. u32) - (2^52+2^51) */
700 cTValue
*k
= lj_ir_k64_find(as
->J
, U64x(43380000,00000000));
701 Reg bias
= ra_scratch(as
, rset_exclude(RSET_FPR
, dest
));
702 if (irt_isfloat(ir
->t
))
703 emit_rr(as
, XO_CVTSD2SS
, dest
, dest
);
704 emit_rr(as
, XO_SUBSD
, dest
, bias
); /* Subtract 2^52+2^51 bias. */
705 emit_rr(as
, XO_XORPS
, dest
, bias
); /* Merge bias and integer. */
706 emit_loadn(as
, bias
, k
);
707 emit_mrm(as
, XO_MOVD
, dest
, asm_fuseload(as
, lref
, RSET_GPR
));
709 } else { /* Integer to FP conversion. */
710 Reg left
= (LJ_64
&& (st
== IRT_U32
|| st
== IRT_U64
)) ?
711 ra_alloc1(as
, lref
, RSET_GPR
) :
712 asm_fuseloadm(as
, lref
, RSET_GPR
, st64
);
713 if (LJ_64
&& st
== IRT_U64
) {
714 MCLabel l_end
= emit_label(as
);
715 const void *k
= lj_ir_k64_find(as
->J
, U64x(43f00000
,00000000));
716 emit_rma(as
, XO_ADDSD
, dest
, k
); /* Add 2^64 to compensate. */
717 emit_sjcc(as
, CC_NS
, l_end
);
718 emit_rr(as
, XO_TEST
, left
|REX_64
, left
); /* Check if u64 >= 2^63. */
720 emit_mrm(as
, irt_isnum(ir
->t
) ? XO_CVTSI2SD
: XO_CVTSI2SS
,
721 dest
|((LJ_64
&& (st64
|| st
== IRT_U32
)) ? REX_64
: 0), left
);
723 if (!(as
->flags
& JIT_F_SPLIT_XMM
))
724 emit_rr(as
, XO_XORPS
, dest
, dest
); /* Avoid partial register stall. */
725 } else if (stfp
) { /* FP to integer conversion. */
726 if (irt_isguard(ir
->t
)) {
727 /* Checked conversions are only supported from number to int. */
728 lua_assert(irt_isint(ir
->t
) && st
== IRT_NUM
);
729 asm_tointg(as
, ir
, ra_alloc1(as
, lref
, RSET_FPR
));
731 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
732 x86Op op
= st
== IRT_NUM
?
733 ((ir
->op2
& IRCONV_TRUNC
) ? XO_CVTTSD2SI
: XO_CVTSD2SI
) :
734 ((ir
->op2
& IRCONV_TRUNC
) ? XO_CVTTSS2SI
: XO_CVTSS2SI
);
735 if (LJ_64
? irt_isu64(ir
->t
) : irt_isu32(ir
->t
)) {
736 /* LJ_64: For inputs >= 2^63 add -2^64, convert again. */
737 /* LJ_32: For inputs >= 2^31 add -2^31, convert again and add 2^31. */
738 Reg tmp
= ra_noreg(IR(lref
)->r
) ? ra_alloc1(as
, lref
, RSET_FPR
) :
739 ra_scratch(as
, RSET_FPR
);
740 MCLabel l_end
= emit_label(as
);
742 emit_gri(as
, XG_ARITHi(XOg_ADD
), dest
, (int32_t)0x80000000);
743 emit_rr(as
, op
, dest
|REX_64
, tmp
);
745 emit_rma(as
, XO_ADDSD
, tmp
, lj_ir_k64_find(as
->J
,
746 LJ_64
? U64x(c3f00000
,00000000) : U64x(c1e00000
,00000000)));
748 emit_rma(as
, XO_ADDSS
, tmp
, lj_ir_k64_find(as
->J
,
749 LJ_64
? U64x(00000000,df800000
) : U64x(00000000,cf000000
)));
750 emit_sjcc(as
, CC_NS
, l_end
);
751 emit_rr(as
, XO_TEST
, dest
|REX_64
, dest
); /* Check if dest negative. */
752 emit_rr(as
, op
, dest
|REX_64
, tmp
);
753 ra_left(as
, tmp
, lref
);
755 Reg left
= asm_fuseload(as
, lref
, RSET_FPR
);
756 if (LJ_64
&& irt_isu32(ir
->t
))
757 emit_rr(as
, XO_MOV
, dest
, dest
); /* Zero hiword. */
760 (irt_is64(ir
->t
) || irt_isu32(ir
->t
))) ? REX_64
: 0),
764 } else if (st
>= IRT_I8
&& st
<= IRT_U16
) { /* Extend to 32 bit integer. */
765 Reg left
, dest
= ra_dest(as
, ir
, RSET_GPR
);
766 RegSet allow
= RSET_GPR
;
768 lua_assert(irt_isint(ir
->t
) || irt_isu32(ir
->t
));
770 op
= XO_MOVSXb
; allow
= RSET_GPR8
; dest
|= FORCE_REX
;
771 } else if (st
== IRT_U8
) {
772 op
= XO_MOVZXb
; allow
= RSET_GPR8
; dest
|= FORCE_REX
;
773 } else if (st
== IRT_I16
) {
778 left
= asm_fuseload(as
, lref
, allow
);
779 /* Add extra MOV if source is already in wrong register. */
780 if (!LJ_64
&& left
!= RID_MRM
&& !rset_test(allow
, left
)) {
781 Reg tmp
= ra_scratch(as
, allow
);
782 emit_rr(as
, op
, dest
, tmp
);
783 emit_rr(as
, XO_MOV
, tmp
, left
);
785 emit_mrm(as
, op
, dest
, left
);
787 } else { /* 32/64 bit integer conversions. */
788 if (LJ_32
) { /* Only need to handle 32/32 bit no-op (cast) on x86. */
789 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
790 ra_left(as
, dest
, lref
); /* Do nothing, but may need to move regs. */
791 } else if (irt_is64(ir
->t
)) {
792 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
793 if (st64
|| !(ir
->op2
& IRCONV_SEXT
)) {
794 /* 64/64 bit no-op (cast) or 32 to 64 bit zero extension. */
795 ra_left(as
, dest
, lref
); /* Do nothing, but may need to move regs. */
796 } else { /* 32 to 64 bit sign extension. */
797 Reg left
= asm_fuseload(as
, lref
, RSET_GPR
);
798 emit_mrm(as
, XO_MOVSXd
, dest
|REX_64
, left
);
801 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
803 Reg left
= asm_fuseload(as
, lref
, RSET_GPR
);
804 /* This is either a 32 bit reg/reg mov which zeroes the hiword
805 ** or a load of the loword from a 64 bit address.
807 emit_mrm(as
, XO_MOV
, dest
, left
);
808 } else { /* 32/32 bit no-op (cast). */
809 ra_left(as
, dest
, lref
); /* Do nothing, but may need to move regs. */
815 #if LJ_32 && LJ_HASFFI
816 /* No SSE conversions to/from 64 bit on x86, so resort to ugly x87 code. */
818 /* 64 bit integer to FP conversion in 32 bit mode. */
819 static void asm_conv_fp_int64(ASMState
*as
, IRIns
*ir
)
821 Reg hi
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
822 Reg lo
= ra_alloc1(as
, (ir
-1)->op1
, rset_exclude(RSET_GPR
, hi
));
823 int32_t ofs
= sps_scale(ir
->s
); /* Use spill slot or temp slots. */
825 if (ra_hasreg(dest
)) {
827 ra_modified(as
, dest
);
828 emit_rmro(as
, irt_isnum(ir
->t
) ? XMM_MOVRM(as
) : XO_MOVSS
,
831 emit_rmro(as
, irt_isnum(ir
->t
) ? XO_FSTPq
: XO_FSTPd
,
832 irt_isnum(ir
->t
) ? XOg_FSTPq
: XOg_FSTPd
, RID_ESP
, ofs
);
833 if (((ir
-1)->op2
& IRCONV_SRCMASK
) == IRT_U64
) {
834 /* For inputs in [2^63,2^64-1] add 2^64 to compensate. */
835 MCLabel l_end
= emit_label(as
);
836 emit_rma(as
, XO_FADDq
, XOg_FADDq
,
837 lj_ir_k64_find(as
->J
, U64x(43f00000
,00000000)));
838 emit_sjcc(as
, CC_NS
, l_end
);
839 emit_rr(as
, XO_TEST
, hi
, hi
); /* Check if u64 >= 2^63. */
841 lua_assert(((ir
-1)->op2
& IRCONV_SRCMASK
) == IRT_I64
);
843 emit_rmro(as
, XO_FILDq
, XOg_FILDq
, RID_ESP
, 0);
844 /* NYI: Avoid narrow-to-wide store-to-load forwarding stall. */
845 emit_rmro(as
, XO_MOVto
, hi
, RID_ESP
, 4);
846 emit_rmro(as
, XO_MOVto
, lo
, RID_ESP
, 0);
849 /* FP to 64 bit integer conversion in 32 bit mode. */
850 static void asm_conv_int64_fp(ASMState
*as
, IRIns
*ir
)
852 IRType st
= (IRType
)((ir
-1)->op2
& IRCONV_SRCMASK
);
853 IRType dt
= (((ir
-1)->op2
& IRCONV_DSTMASK
) >> IRCONV_DSH
);
855 lua_assert(st
== IRT_NUM
|| st
== IRT_FLOAT
);
856 lua_assert(dt
== IRT_I64
|| dt
== IRT_U64
);
857 lua_assert(((ir
-1)->op2
& IRCONV_TRUNC
));
858 hi
= ra_dest(as
, ir
, RSET_GPR
);
859 lo
= ra_dest(as
, ir
-1, rset_exclude(RSET_GPR
, hi
));
860 if (ra_used(ir
-1)) emit_rmro(as
, XO_MOV
, lo
, RID_ESP
, 0);
861 /* NYI: Avoid wide-to-narrow store-to-load forwarding stall. */
862 if (!(as
->flags
& JIT_F_SSE3
)) { /* Set FPU rounding mode to default. */
863 emit_rmro(as
, XO_FLDCW
, XOg_FLDCW
, RID_ESP
, 4);
864 emit_rmro(as
, XO_MOVto
, lo
, RID_ESP
, 4);
865 emit_gri(as
, XG_ARITHi(XOg_AND
), lo
, 0xf3ff);
868 /* For inputs in [2^63,2^64-1] add -2^64 and convert again. */
869 MCLabel l_pop
, l_end
= emit_label(as
);
870 emit_x87op(as
, XI_FPOP
);
871 l_pop
= emit_label(as
);
872 emit_sjmp(as
, l_end
);
873 emit_rmro(as
, XO_MOV
, hi
, RID_ESP
, 4);
874 if ((as
->flags
& JIT_F_SSE3
))
875 emit_rmro(as
, XO_FISTTPq
, XOg_FISTTPq
, RID_ESP
, 0);
877 emit_rmro(as
, XO_FISTPq
, XOg_FISTPq
, RID_ESP
, 0);
878 emit_rma(as
, XO_FADDq
, XOg_FADDq
,
879 lj_ir_k64_find(as
->J
, U64x(c3f00000
,00000000)));
880 emit_sjcc(as
, CC_NS
, l_pop
);
881 emit_rr(as
, XO_TEST
, hi
, hi
); /* Check if out-of-range (2^63). */
883 emit_rmro(as
, XO_MOV
, hi
, RID_ESP
, 4);
884 if ((as
->flags
& JIT_F_SSE3
)) { /* Truncation is easy with SSE3. */
885 emit_rmro(as
, XO_FISTTPq
, XOg_FISTTPq
, RID_ESP
, 0);
886 } else { /* Otherwise set FPU rounding mode to truncate before the store. */
887 emit_rmro(as
, XO_FISTPq
, XOg_FISTPq
, RID_ESP
, 0);
888 emit_rmro(as
, XO_FLDCW
, XOg_FLDCW
, RID_ESP
, 0);
889 emit_rmro(as
, XO_MOVtow
, lo
, RID_ESP
, 0);
890 emit_rmro(as
, XO_ARITHw(XOg_OR
), lo
, RID_ESP
, 0);
891 emit_loadi(as
, lo
, 0xc00);
892 emit_rmro(as
, XO_FNSTCW
, XOg_FNSTCW
, RID_ESP
, 0);
895 emit_x87op(as
, XI_FDUP
);
896 emit_mrm(as
, st
== IRT_NUM
? XO_FLDq
: XO_FLDd
,
897 st
== IRT_NUM
? XOg_FLDq
: XOg_FLDd
,
898 asm_fuseload(as
, ir
->op1
, RSET_EMPTY
));
902 static void asm_strto(ASMState
*as
, IRIns
*ir
)
904 /* Force a spill slot for the destination register (if any). */
905 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_strscan_num
];
907 RegSet drop
= RSET_SCRATCH
;
908 if ((drop
& RSET_FPR
) != RSET_FPR
&& ra_hasreg(ir
->r
))
909 rset_set(drop
, ir
->r
); /* WIN64 doesn't spill all FPRs. */
910 ra_evictset(as
, drop
);
911 asm_guardcc(as
, CC_E
);
912 emit_rr(as
, XO_TEST
, RID_RET
, RID_RET
); /* Test return status. */
913 args
[0] = ir
->op1
; /* GCstr *str */
914 args
[1] = ASMREF_TMP1
; /* TValue *n */
915 asm_gencall(as
, ci
, args
);
916 /* Store the result to the spill slot or temp slots. */
917 emit_rmro(as
, XO_LEA
, ra_releasetmp(as
, ASMREF_TMP1
)|REX_64
,
918 RID_ESP
, sps_scale(ir
->s
));
921 static void asm_tostr(ASMState
*as
, IRIns
*ir
)
923 IRIns
*irl
= IR(ir
->op1
);
927 if (irt_isnum(irl
->t
)) {
928 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_str_fromnum
];
929 args
[1] = ASMREF_TMP1
; /* const lua_Number * */
930 asm_setupresult(as
, ir
, ci
); /* GCstr * */
931 asm_gencall(as
, ci
, args
);
932 emit_rmro(as
, XO_LEA
, ra_releasetmp(as
, ASMREF_TMP1
)|REX_64
,
933 RID_ESP
, ra_spill(as
, irl
));
935 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_str_fromint
];
936 args
[1] = ir
->op1
; /* int32_t k */
937 asm_setupresult(as
, ir
, ci
); /* GCstr * */
938 asm_gencall(as
, ci
, args
);
942 /* -- Memory references --------------------------------------------------- */
944 static void asm_aref(ASMState
*as
, IRIns
*ir
)
946 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
947 asm_fusearef(as
, ir
, RSET_GPR
);
948 if (!(as
->mrm
.idx
== RID_NONE
&& as
->mrm
.ofs
== 0))
949 emit_mrm(as
, XO_LEA
, dest
, RID_MRM
);
950 else if (as
->mrm
.base
!= dest
)
951 emit_rr(as
, XO_MOV
, dest
, as
->mrm
.base
);
954 /* Merge NE(HREF, niltv) check. */
955 static MCode
*merge_href_niltv(ASMState
*as
, IRIns
*ir
)
957 /* Assumes nothing else generates NE of HREF. */
958 if ((ir
[1].o
== IR_NE
|| ir
[1].o
== IR_EQ
) && ir
[1].op1
== as
->curins
&&
961 p
+= (LJ_64
&& *p
!= XI_ARITHi
) ? 7+6 : 6+6;
962 /* Ensure no loop branch inversion happened. */
963 if (p
[-6] == 0x0f && p
[-5] == XI_JCCn
+(CC_NE
^(ir
[1].o
& 1))) {
964 as
->mcp
= p
; /* Kill cmp reg, imm32 + jz exit. */
965 return p
+ *(int32_t *)(p
-4); /* Return exit address. */
971 /* Inlined hash lookup. Specialized for key type and for const keys.
972 ** The equivalent C code is:
973 ** Node *n = hashkey(t, key);
975 ** if (lj_obj_equal(&n->key, key)) return &n->val;
976 ** } while ((n = nextnode(n)));
979 static void asm_href(ASMState
*as
, IRIns
*ir
)
981 MCode
*nilexit
= merge_href_niltv(as
, ir
); /* Do this before any restores. */
982 RegSet allow
= RSET_GPR
;
983 Reg dest
= ra_dest(as
, ir
, allow
);
984 Reg tab
= ra_alloc1(as
, ir
->op1
, rset_clear(allow
, dest
));
985 Reg key
= RID_NONE
, tmp
= RID_NONE
;
986 IRIns
*irkey
= IR(ir
->op2
);
987 int isk
= irref_isk(ir
->op2
);
988 IRType1 kt
= irkey
->t
;
990 MCLabel l_end
, l_loop
, l_next
;
993 rset_clear(allow
, tab
);
994 key
= ra_alloc1(as
, ir
->op2
, irt_isnum(kt
) ? RSET_FPR
: allow
);
996 tmp
= ra_scratch(as
, rset_exclude(allow
, key
));
999 /* Key not found in chain: jump to exit (if merged with NE) or load niltv. */
1000 l_end
= emit_label(as
);
1001 if (nilexit
&& ir
[1].o
== IR_NE
) {
1002 emit_jcc(as
, CC_E
, nilexit
); /* XI_JMP is not found by lj_asm_patchexit. */
1005 emit_loada(as
, dest
, niltvg(J2G(as
->J
)));
1008 /* Follow hash chain until the end. */
1009 l_loop
= emit_sjcc_label(as
, CC_NZ
);
1010 emit_rr(as
, XO_TEST
, dest
, dest
);
1011 emit_rmro(as
, XO_MOV
, dest
, dest
, offsetof(Node
, next
));
1012 l_next
= emit_label(as
);
1014 /* Type and value comparison. */
1016 emit_jcc(as
, CC_E
, nilexit
);
1018 emit_sjcc(as
, CC_E
, l_end
);
1019 if (irt_isnum(kt
)) {
1021 /* Assumes -0.0 is already canonicalized to +0.0. */
1022 emit_gmroi(as
, XG_ARITHi(XOg_CMP
), dest
, offsetof(Node
, key
.u32
.lo
),
1023 (int32_t)ir_knum(irkey
)->u32
.lo
);
1024 emit_sjcc(as
, CC_NE
, l_next
);
1025 emit_gmroi(as
, XG_ARITHi(XOg_CMP
), dest
, offsetof(Node
, key
.u32
.hi
),
1026 (int32_t)ir_knum(irkey
)->u32
.hi
);
1028 emit_sjcc(as
, CC_P
, l_next
);
1029 emit_rmro(as
, XO_UCOMISD
, key
, dest
, offsetof(Node
, key
.n
));
1030 emit_sjcc(as
, CC_AE
, l_next
);
1031 /* The type check avoids NaN penalties and complaints from Valgrind. */
1033 emit_u32(as
, LJ_TISNUM
);
1034 emit_rmro(as
, XO_ARITHi
, XOg_CMP
, dest
, offsetof(Node
, key
.it
));
1036 emit_i8(as
, LJ_TISNUM
);
1037 emit_rmro(as
, XO_ARITHi8
, XOg_CMP
, dest
, offsetof(Node
, key
.it
));
1041 } else if (irt_islightud(kt
)) {
1042 emit_rmro(as
, XO_CMP
, key
|REX_64
, dest
, offsetof(Node
, key
.u64
));
1045 if (!irt_ispri(kt
)) {
1046 lua_assert(irt_isaddr(kt
));
1048 emit_gmroi(as
, XG_ARITHi(XOg_CMP
), dest
, offsetof(Node
, key
.gcr
),
1049 ptr2addr(ir_kgc(irkey
)));
1051 emit_rmro(as
, XO_CMP
, key
, dest
, offsetof(Node
, key
.gcr
));
1052 emit_sjcc(as
, CC_NE
, l_next
);
1054 lua_assert(!irt_isnil(kt
));
1055 emit_i8(as
, irt_toitype(kt
));
1056 emit_rmro(as
, XO_ARITHi8
, XOg_CMP
, dest
, offsetof(Node
, key
.it
));
1058 emit_sfixup(as
, l_loop
);
1061 /* Load main position relative to tab->node into dest. */
1062 khash
= isk
? ir_khash(irkey
) : 1;
1064 emit_rmro(as
, XO_MOV
, dest
, tab
, offsetof(GCtab
, node
));
1066 emit_rmro(as
, XO_ARITH(XOg_ADD
), dest
, tab
, offsetof(GCtab
, node
));
1067 if ((as
->flags
& JIT_F_PREFER_IMUL
)) {
1068 emit_i8(as
, sizeof(Node
));
1069 emit_rr(as
, XO_IMULi8
, dest
, dest
);
1071 emit_shifti(as
, XOg_SHL
, dest
, 3);
1072 emit_rmrxo(as
, XO_LEA
, dest
, dest
, dest
, XM_SCALE2
, 0);
1075 emit_gri(as
, XG_ARITHi(XOg_AND
), dest
, (int32_t)khash
);
1076 emit_rmro(as
, XO_MOV
, dest
, tab
, offsetof(GCtab
, hmask
));
1077 } else if (irt_isstr(kt
)) {
1078 emit_rmro(as
, XO_ARITH(XOg_AND
), dest
, key
, offsetof(GCstr
, hash
));
1079 emit_rmro(as
, XO_MOV
, dest
, tab
, offsetof(GCtab
, hmask
));
1080 } else { /* Must match with hashrot() in lj_tab.c. */
1081 emit_rmro(as
, XO_ARITH(XOg_AND
), dest
, tab
, offsetof(GCtab
, hmask
));
1082 emit_rr(as
, XO_ARITH(XOg_SUB
), dest
, tmp
);
1083 emit_shifti(as
, XOg_ROL
, tmp
, HASH_ROT3
);
1084 emit_rr(as
, XO_ARITH(XOg_XOR
), dest
, tmp
);
1085 emit_shifti(as
, XOg_ROL
, dest
, HASH_ROT2
);
1086 emit_rr(as
, XO_ARITH(XOg_SUB
), tmp
, dest
);
1087 emit_shifti(as
, XOg_ROL
, dest
, HASH_ROT1
);
1088 emit_rr(as
, XO_ARITH(XOg_XOR
), tmp
, dest
);
1089 if (irt_isnum(kt
)) {
1090 emit_rr(as
, XO_ARITH(XOg_ADD
), dest
, dest
);
1092 emit_shifti(as
, XOg_SHR
|REX_64
, dest
, 32);
1093 emit_rr(as
, XO_MOV
, tmp
, dest
);
1094 emit_rr(as
, XO_MOVDto
, key
|REX_64
, dest
);
1096 emit_rmro(as
, XO_MOV
, dest
, RID_ESP
, ra_spill(as
, irkey
)+4);
1097 emit_rr(as
, XO_MOVDto
, key
, tmp
);
1100 emit_rr(as
, XO_MOV
, tmp
, key
);
1101 emit_rmro(as
, XO_LEA
, dest
, key
, HASH_BIAS
);
1107 static void asm_hrefk(ASMState
*as
, IRIns
*ir
)
1109 IRIns
*kslot
= IR(ir
->op2
);
1110 IRIns
*irkey
= IR(kslot
->op1
);
1111 int32_t ofs
= (int32_t)(kslot
->op2
* sizeof(Node
));
1112 Reg dest
= ra_used(ir
) ? ra_dest(as
, ir
, RSET_GPR
) : RID_NONE
;
1113 Reg node
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1117 lua_assert(ofs
% sizeof(Node
) == 0);
1118 if (ra_hasreg(dest
)) {
1120 if (dest
== node
&& !(as
->flags
& JIT_F_LEA_AGU
))
1121 emit_gri(as
, XG_ARITHi(XOg_ADD
), dest
, ofs
);
1123 emit_rmro(as
, XO_LEA
, dest
, node
, ofs
);
1124 } else if (dest
!= node
) {
1125 emit_rr(as
, XO_MOV
, dest
, node
);
1128 asm_guardcc(as
, CC_NE
);
1130 if (!irt_ispri(irkey
->t
)) {
1131 Reg key
= ra_scratch(as
, rset_exclude(RSET_GPR
, node
));
1132 emit_rmro(as
, XO_CMP
, key
|REX_64
, node
,
1133 ofs
+ (int32_t)offsetof(Node
, key
.u64
));
1134 lua_assert(irt_isnum(irkey
->t
) || irt_isgcv(irkey
->t
));
1135 /* Assumes -0.0 is already canonicalized to +0.0. */
1136 emit_loadu64(as
, key
, irt_isnum(irkey
->t
) ? ir_knum(irkey
)->u64
:
1137 ((uint64_t)irt_toitype(irkey
->t
) << 32) |
1138 (uint64_t)(uint32_t)ptr2addr(ir_kgc(irkey
)));
1140 lua_assert(!irt_isnil(irkey
->t
));
1141 emit_i8(as
, irt_toitype(irkey
->t
));
1142 emit_rmro(as
, XO_ARITHi8
, XOg_CMP
, node
,
1143 ofs
+ (int32_t)offsetof(Node
, key
.it
));
1146 l_exit
= emit_label(as
);
1147 if (irt_isnum(irkey
->t
)) {
1148 /* Assumes -0.0 is already canonicalized to +0.0. */
1149 emit_gmroi(as
, XG_ARITHi(XOg_CMP
), node
,
1150 ofs
+ (int32_t)offsetof(Node
, key
.u32
.lo
),
1151 (int32_t)ir_knum(irkey
)->u32
.lo
);
1152 emit_sjcc(as
, CC_NE
, l_exit
);
1153 emit_gmroi(as
, XG_ARITHi(XOg_CMP
), node
,
1154 ofs
+ (int32_t)offsetof(Node
, key
.u32
.hi
),
1155 (int32_t)ir_knum(irkey
)->u32
.hi
);
1157 if (!irt_ispri(irkey
->t
)) {
1158 lua_assert(irt_isgcv(irkey
->t
));
1159 emit_gmroi(as
, XG_ARITHi(XOg_CMP
), node
,
1160 ofs
+ (int32_t)offsetof(Node
, key
.gcr
),
1161 ptr2addr(ir_kgc(irkey
)));
1162 emit_sjcc(as
, CC_NE
, l_exit
);
1164 lua_assert(!irt_isnil(irkey
->t
));
1165 emit_i8(as
, irt_toitype(irkey
->t
));
1166 emit_rmro(as
, XO_ARITHi8
, XOg_CMP
, node
,
1167 ofs
+ (int32_t)offsetof(Node
, key
.it
));
1172 static void asm_newref(ASMState
*as
, IRIns
*ir
)
1174 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_tab_newkey
];
1178 if (ir
->r
== RID_SINK
)
1180 args
[0] = ASMREF_L
; /* lua_State *L */
1181 args
[1] = ir
->op1
; /* GCtab *t */
1182 args
[2] = ASMREF_TMP1
; /* cTValue *key */
1183 asm_setupresult(as
, ir
, ci
); /* TValue * */
1184 asm_gencall(as
, ci
, args
);
1185 tmp
= ra_releasetmp(as
, ASMREF_TMP1
);
1186 irkey
= IR(ir
->op2
);
1187 if (irt_isnum(irkey
->t
)) {
1188 /* For numbers use the constant itself or a spill slot as a TValue. */
1189 if (irref_isk(ir
->op2
))
1190 emit_loada(as
, tmp
, ir_knum(irkey
));
1192 emit_rmro(as
, XO_LEA
, tmp
|REX_64
, RID_ESP
, ra_spill(as
, irkey
));
1194 /* Otherwise use g->tmptv to hold the TValue. */
1195 if (!irref_isk(ir
->op2
)) {
1196 Reg src
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_GPR
, tmp
));
1197 emit_movtomro(as
, REX_64IR(irkey
, src
), tmp
, 0);
1198 } else if (!irt_ispri(irkey
->t
)) {
1199 emit_movmroi(as
, tmp
, 0, irkey
->i
);
1201 if (!(LJ_64
&& irt_islightud(irkey
->t
)))
1202 emit_movmroi(as
, tmp
, 4, irt_toitype(irkey
->t
));
1203 emit_loada(as
, tmp
, &J2G(as
->J
)->tmptv
);
1207 static void asm_uref(ASMState
*as
, IRIns
*ir
)
1209 /* NYI: Check that UREFO is still open and not aliasing a slot. */
1210 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1211 if (irref_isk(ir
->op1
)) {
1212 GCfunc
*fn
= ir_kfunc(IR(ir
->op1
));
1213 MRef
*v
= &gcref(fn
->l
.uvptr
[(ir
->op2
>> 8)])->uv
.v
;
1214 emit_rma(as
, XO_MOV
, dest
, v
);
1216 Reg uv
= ra_scratch(as
, RSET_GPR
);
1217 Reg func
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1218 if (ir
->o
== IR_UREFC
) {
1219 emit_rmro(as
, XO_LEA
, dest
, uv
, offsetof(GCupval
, tv
));
1220 asm_guardcc(as
, CC_NE
);
1222 emit_rmro(as
, XO_ARITHib
, XOg_CMP
, uv
, offsetof(GCupval
, closed
));
1224 emit_rmro(as
, XO_MOV
, dest
, uv
, offsetof(GCupval
, v
));
1226 emit_rmro(as
, XO_MOV
, uv
, func
,
1227 (int32_t)offsetof(GCfuncL
, uvptr
) + 4*(int32_t)(ir
->op2
>> 8));
1231 static void asm_fref(ASMState
*as
, IRIns
*ir
)
1233 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1234 asm_fusefref(as
, ir
, RSET_GPR
);
1235 emit_mrm(as
, XO_LEA
, dest
, RID_MRM
);
1238 static void asm_strref(ASMState
*as
, IRIns
*ir
)
1240 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1241 asm_fusestrref(as
, ir
, RSET_GPR
);
1242 if (as
->mrm
.base
== RID_NONE
)
1243 emit_loadi(as
, dest
, as
->mrm
.ofs
);
1244 else if (as
->mrm
.base
== dest
&& as
->mrm
.idx
== RID_NONE
)
1245 emit_gri(as
, XG_ARITHi(XOg_ADD
), dest
, as
->mrm
.ofs
);
1247 emit_mrm(as
, XO_LEA
, dest
, RID_MRM
);
1250 /* -- Loads and stores ---------------------------------------------------- */
1252 static void asm_fxload(ASMState
*as
, IRIns
*ir
)
1254 Reg dest
= ra_dest(as
, ir
, irt_isfp(ir
->t
) ? RSET_FPR
: RSET_GPR
);
1256 if (ir
->o
== IR_FLOAD
)
1257 asm_fusefref(as
, ir
, RSET_GPR
);
1259 asm_fusexref(as
, ir
->op1
, RSET_GPR
);
1260 /* ir->op2 is ignored -- unaligned loads are ok on x86. */
1261 switch (irt_type(ir
->t
)) {
1262 case IRT_I8
: xo
= XO_MOVSXb
; break;
1263 case IRT_U8
: xo
= XO_MOVZXb
; break;
1264 case IRT_I16
: xo
= XO_MOVSXw
; break;
1265 case IRT_U16
: xo
= XO_MOVZXw
; break;
1266 case IRT_NUM
: xo
= XMM_MOVRM(as
); break;
1267 case IRT_FLOAT
: xo
= XO_MOVSS
; break;
1269 if (LJ_64
&& irt_is64(ir
->t
))
1272 lua_assert(irt_isint(ir
->t
) || irt_isu32(ir
->t
) || irt_isaddr(ir
->t
));
1276 emit_mrm(as
, xo
, dest
, RID_MRM
);
1279 static void asm_fxstore(ASMState
*as
, IRIns
*ir
)
1281 RegSet allow
= RSET_GPR
;
1282 Reg src
= RID_NONE
, osrc
= RID_NONE
;
1284 if (ir
->r
== RID_SINK
)
1286 /* The IRT_I16/IRT_U16 stores should never be simplified for constant
1287 ** values since mov word [mem], imm16 has a length-changing prefix.
1289 if (irt_isi16(ir
->t
) || irt_isu16(ir
->t
) || irt_isfp(ir
->t
) ||
1290 !asm_isk32(as
, ir
->op2
, &k
)) {
1291 RegSet allow8
= irt_isfp(ir
->t
) ? RSET_FPR
:
1292 (irt_isi8(ir
->t
) || irt_isu8(ir
->t
)) ? RSET_GPR8
: RSET_GPR
;
1293 src
= osrc
= ra_alloc1(as
, ir
->op2
, allow8
);
1294 if (!LJ_64
&& !rset_test(allow8
, src
)) { /* Already in wrong register. */
1295 rset_clear(allow
, osrc
);
1296 src
= ra_scratch(as
, allow8
);
1298 rset_clear(allow
, src
);
1300 if (ir
->o
== IR_FSTORE
) {
1301 asm_fusefref(as
, IR(ir
->op1
), allow
);
1303 asm_fusexref(as
, ir
->op1
, allow
);
1304 if (LJ_32
&& ir
->o
== IR_HIOP
) as
->mrm
.ofs
+= 4;
1306 if (ra_hasreg(src
)) {
1308 switch (irt_type(ir
->t
)) {
1309 case IRT_I8
: case IRT_U8
: xo
= XO_MOVtob
; src
|= FORCE_REX
; break;
1310 case IRT_I16
: case IRT_U16
: xo
= XO_MOVtow
; break;
1311 case IRT_NUM
: xo
= XO_MOVSDto
; break;
1312 case IRT_FLOAT
: xo
= XO_MOVSSto
; break;
1314 case IRT_LIGHTUD
: lua_assert(0); /* NYI: mask 64 bit lightuserdata. */
1317 if (LJ_64
&& irt_is64(ir
->t
))
1320 lua_assert(irt_isint(ir
->t
) || irt_isu32(ir
->t
) || irt_isaddr(ir
->t
));
1324 emit_mrm(as
, xo
, src
, RID_MRM
);
1325 if (!LJ_64
&& src
!= osrc
) {
1326 ra_noweak(as
, osrc
);
1327 emit_rr(as
, XO_MOV
, src
, osrc
);
1330 if (irt_isi8(ir
->t
) || irt_isu8(ir
->t
)) {
1332 emit_mrm(as
, XO_MOVmib
, 0, RID_MRM
);
1334 lua_assert(irt_is64(ir
->t
) || irt_isint(ir
->t
) || irt_isu32(ir
->t
) ||
1337 emit_mrm(as
, XO_MOVmi
, REX_64IR(ir
, 0), RID_MRM
);
1343 static Reg
asm_load_lightud64(ASMState
*as
, IRIns
*ir
, int typecheck
)
1345 if (ra_used(ir
) || typecheck
) {
1346 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1348 Reg tmp
= ra_scratch(as
, rset_exclude(RSET_GPR
, dest
));
1349 asm_guardcc(as
, CC_NE
);
1351 emit_rr(as
, XO_ARITHi8
, XOg_CMP
, tmp
);
1352 emit_shifti(as
, XOg_SAR
|REX_64
, tmp
, 47);
1353 emit_rr(as
, XO_MOV
, tmp
|REX_64
, dest
);
1362 static void asm_ahuvload(ASMState
*as
, IRIns
*ir
)
1364 lua_assert(irt_isnum(ir
->t
) || irt_ispri(ir
->t
) || irt_isaddr(ir
->t
) ||
1365 (LJ_DUALNUM
&& irt_isint(ir
->t
)));
1367 if (irt_islightud(ir
->t
)) {
1368 Reg dest
= asm_load_lightud64(as
, ir
, 1);
1369 if (ra_hasreg(dest
)) {
1370 asm_fuseahuref(as
, ir
->op1
, RSET_GPR
);
1371 emit_mrm(as
, XO_MOV
, dest
|REX_64
, RID_MRM
);
1377 RegSet allow
= irt_isnum(ir
->t
) ? RSET_FPR
: RSET_GPR
;
1378 Reg dest
= ra_dest(as
, ir
, allow
);
1379 asm_fuseahuref(as
, ir
->op1
, RSET_GPR
);
1380 emit_mrm(as
, dest
< RID_MAX_GPR
? XO_MOV
: XMM_MOVRM(as
), dest
, RID_MRM
);
1382 asm_fuseahuref(as
, ir
->op1
, RSET_GPR
);
1384 /* Always do the type check, even if the load result is unused. */
1386 asm_guardcc(as
, irt_isnum(ir
->t
) ? CC_AE
: CC_NE
);
1387 if (LJ_64
&& irt_type(ir
->t
) >= IRT_NUM
) {
1388 lua_assert(irt_isinteger(ir
->t
) || irt_isnum(ir
->t
));
1389 emit_u32(as
, LJ_TISNUM
);
1390 emit_mrm(as
, XO_ARITHi
, XOg_CMP
, RID_MRM
);
1392 emit_i8(as
, irt_toitype(ir
->t
));
1393 emit_mrm(as
, XO_ARITHi8
, XOg_CMP
, RID_MRM
);
1397 static void asm_ahustore(ASMState
*as
, IRIns
*ir
)
1399 if (ir
->r
== RID_SINK
)
1401 if (irt_isnum(ir
->t
)) {
1402 Reg src
= ra_alloc1(as
, ir
->op2
, RSET_FPR
);
1403 asm_fuseahuref(as
, ir
->op1
, RSET_GPR
);
1404 emit_mrm(as
, XO_MOVSDto
, src
, RID_MRM
);
1406 } else if (irt_islightud(ir
->t
)) {
1407 Reg src
= ra_alloc1(as
, ir
->op2
, RSET_GPR
);
1408 asm_fuseahuref(as
, ir
->op1
, rset_exclude(RSET_GPR
, src
));
1409 emit_mrm(as
, XO_MOVto
, src
|REX_64
, RID_MRM
);
1412 IRIns
*irr
= IR(ir
->op2
);
1413 RegSet allow
= RSET_GPR
;
1415 if (!irref_isk(ir
->op2
)) {
1416 src
= ra_alloc1(as
, ir
->op2
, allow
);
1417 rset_clear(allow
, src
);
1419 asm_fuseahuref(as
, ir
->op1
, allow
);
1420 if (ra_hasreg(src
)) {
1421 emit_mrm(as
, XO_MOVto
, src
, RID_MRM
);
1422 } else if (!irt_ispri(irr
->t
)) {
1423 lua_assert(irt_isaddr(ir
->t
) || (LJ_DUALNUM
&& irt_isinteger(ir
->t
)));
1424 emit_i32(as
, irr
->i
);
1425 emit_mrm(as
, XO_MOVmi
, 0, RID_MRM
);
1428 emit_i32(as
, (int32_t)irt_toitype(ir
->t
));
1429 emit_mrm(as
, XO_MOVmi
, 0, RID_MRM
);
1433 static void asm_sload(ASMState
*as
, IRIns
*ir
)
1435 int32_t ofs
= 8*((int32_t)ir
->op1
-1) + ((ir
->op2
& IRSLOAD_FRAME
) ? 4 : 0);
1438 lua_assert(!(ir
->op2
& IRSLOAD_PARENT
)); /* Handled by asm_head_side(). */
1439 lua_assert(irt_isguard(t
) || !(ir
->op2
& IRSLOAD_TYPECHECK
));
1440 lua_assert(LJ_DUALNUM
||
1441 !irt_isint(t
) || (ir
->op2
& (IRSLOAD_CONVERT
|IRSLOAD_FRAME
)));
1442 if ((ir
->op2
& IRSLOAD_CONVERT
) && irt_isguard(t
) && irt_isint(t
)) {
1443 Reg left
= ra_scratch(as
, RSET_FPR
);
1444 asm_tointg(as
, ir
, left
); /* Frees dest reg. Do this before base alloc. */
1445 base
= ra_alloc1(as
, REF_BASE
, RSET_GPR
);
1446 emit_rmro(as
, XMM_MOVRM(as
), left
, base
, ofs
);
1447 t
.irt
= IRT_NUM
; /* Continue with a regular number type check. */
1449 } else if (irt_islightud(t
)) {
1450 Reg dest
= asm_load_lightud64(as
, ir
, (ir
->op2
& IRSLOAD_TYPECHECK
));
1451 if (ra_hasreg(dest
)) {
1452 base
= ra_alloc1(as
, REF_BASE
, RSET_GPR
);
1453 emit_rmro(as
, XO_MOV
, dest
|REX_64
, base
, ofs
);
1457 } else if (ra_used(ir
)) {
1458 RegSet allow
= irt_isnum(t
) ? RSET_FPR
: RSET_GPR
;
1459 Reg dest
= ra_dest(as
, ir
, allow
);
1460 base
= ra_alloc1(as
, REF_BASE
, RSET_GPR
);
1461 lua_assert(irt_isnum(t
) || irt_isint(t
) || irt_isaddr(t
));
1462 if ((ir
->op2
& IRSLOAD_CONVERT
)) {
1463 t
.irt
= irt_isint(t
) ? IRT_NUM
: IRT_INT
; /* Check for original type. */
1464 emit_rmro(as
, irt_isint(t
) ? XO_CVTSI2SD
: XO_CVTSD2SI
, dest
, base
, ofs
);
1465 } else if (irt_isnum(t
)) {
1466 emit_rmro(as
, XMM_MOVRM(as
), dest
, base
, ofs
);
1468 emit_rmro(as
, XO_MOV
, dest
, base
, ofs
);
1471 if (!(ir
->op2
& IRSLOAD_TYPECHECK
))
1472 return; /* No type check: avoid base alloc. */
1473 base
= ra_alloc1(as
, REF_BASE
, RSET_GPR
);
1475 if ((ir
->op2
& IRSLOAD_TYPECHECK
)) {
1476 /* Need type check, even if the load result is unused. */
1477 asm_guardcc(as
, irt_isnum(t
) ? CC_AE
: CC_NE
);
1478 if (LJ_64
&& irt_type(t
) >= IRT_NUM
) {
1479 lua_assert(irt_isinteger(t
) || irt_isnum(t
));
1480 emit_u32(as
, LJ_TISNUM
);
1481 emit_rmro(as
, XO_ARITHi
, XOg_CMP
, base
, ofs
+4);
1483 emit_i8(as
, irt_toitype(t
));
1484 emit_rmro(as
, XO_ARITHi8
, XOg_CMP
, base
, ofs
+4);
1489 /* -- Allocations --------------------------------------------------------- */
1492 static void asm_cnew(ASMState
*as
, IRIns
*ir
)
1494 CTState
*cts
= ctype_ctsG(J2G(as
->J
));
1495 CTypeID ctypeid
= (CTypeID
)IR(ir
->op1
)->i
;
1496 CTSize sz
= (ir
->o
== IR_CNEWI
|| ir
->op2
== REF_NIL
) ?
1497 lj_ctype_size(cts
, ctypeid
) : (CTSize
)IR(ir
->op2
)->i
;
1498 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_mem_newgco
];
1500 lua_assert(sz
!= CTSIZE_INVALID
);
1502 args
[0] = ASMREF_L
; /* lua_State *L */
1503 args
[1] = ASMREF_TMP1
; /* MSize size */
1505 asm_setupresult(as
, ir
, ci
); /* GCcdata * */
1507 /* Initialize immutable cdata object. */
1508 if (ir
->o
== IR_CNEWI
) {
1509 RegSet allow
= (RSET_GPR
& ~RSET_SCRATCH
);
1511 Reg r64
= sz
== 8 ? REX_64
: 0;
1512 if (irref_isk(ir
->op2
)) {
1513 IRIns
*irk
= IR(ir
->op2
);
1514 uint64_t k
= irk
->o
== IR_KINT64
? ir_k64(irk
)->u64
:
1515 (uint64_t)(uint32_t)irk
->i
;
1516 if (sz
== 4 || checki32((int64_t)k
)) {
1517 emit_i32(as
, (int32_t)k
);
1518 emit_rmro(as
, XO_MOVmi
, r64
, RID_RET
, sizeof(GCcdata
));
1520 emit_movtomro(as
, RID_ECX
+ r64
, RID_RET
, sizeof(GCcdata
));
1521 emit_loadu64(as
, RID_ECX
, k
);
1524 Reg r
= ra_alloc1(as
, ir
->op2
, allow
);
1525 emit_movtomro(as
, r
+ r64
, RID_RET
, sizeof(GCcdata
));
1528 int32_t ofs
= sizeof(GCcdata
);
1531 lua_assert(ir
->o
== IR_HIOP
);
1534 if (irref_isk(ir
->op2
)) {
1535 emit_movmroi(as
, RID_RET
, ofs
, IR(ir
->op2
)->i
);
1537 Reg r
= ra_alloc1(as
, ir
->op2
, allow
);
1538 emit_movtomro(as
, r
, RID_RET
, ofs
);
1539 rset_clear(allow
, r
);
1541 if (ofs
== sizeof(GCcdata
)) break;
1545 lua_assert(sz
== 4 || sz
== 8);
1548 /* Combine initialization of marked, gct and ctypeid. */
1549 emit_movtomro(as
, RID_ECX
, RID_RET
, offsetof(GCcdata
, marked
));
1550 emit_gri(as
, XG_ARITHi(XOg_OR
), RID_ECX
,
1551 (int32_t)((~LJ_TCDATA
<<8)+(ctypeid
<<16)));
1552 emit_gri(as
, XG_ARITHi(XOg_AND
), RID_ECX
, LJ_GC_WHITES
);
1553 emit_opgl(as
, XO_MOVZXb
, RID_ECX
, gc
.currentwhite
);
1555 asm_gencall(as
, ci
, args
);
1556 emit_loadi(as
, ra_releasetmp(as
, ASMREF_TMP1
), (int32_t)(sz
+sizeof(GCcdata
)));
1559 #define asm_cnew(as, ir) ((void)0)
1562 /* -- Write barriers ------------------------------------------------------ */
1564 static void asm_tbar(ASMState
*as
, IRIns
*ir
)
1566 Reg tab
= ra_alloc1(as
, ir
->op1
, RSET_GPR
);
1567 Reg tmp
= ra_scratch(as
, rset_exclude(RSET_GPR
, tab
));
1568 MCLabel l_end
= emit_label(as
);
1569 emit_movtomro(as
, tmp
, tab
, offsetof(GCtab
, gclist
));
1570 emit_setgl(as
, tab
, gc
.grayagain
);
1571 emit_getgl(as
, tmp
, gc
.grayagain
);
1572 emit_i8(as
, ~LJ_GC_BLACK
);
1573 emit_rmro(as
, XO_ARITHib
, XOg_AND
, tab
, offsetof(GCtab
, marked
));
1574 emit_sjcc(as
, CC_Z
, l_end
);
1575 emit_i8(as
, LJ_GC_BLACK
);
1576 emit_rmro(as
, XO_GROUP3b
, XOg_TEST
, tab
, offsetof(GCtab
, marked
));
1579 static void asm_obar(ASMState
*as
, IRIns
*ir
)
1581 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_gc_barrieruv
];
1585 /* No need for other object barriers (yet). */
1586 lua_assert(IR(ir
->op1
)->o
== IR_UREFC
);
1587 ra_evictset(as
, RSET_SCRATCH
);
1588 l_end
= emit_label(as
);
1589 args
[0] = ASMREF_TMP1
; /* global_State *g */
1590 args
[1] = ir
->op1
; /* TValue *tv */
1591 asm_gencall(as
, ci
, args
);
1592 emit_loada(as
, ra_releasetmp(as
, ASMREF_TMP1
), J2G(as
->J
));
1593 obj
= IR(ir
->op1
)->r
;
1594 emit_sjcc(as
, CC_Z
, l_end
);
1595 emit_i8(as
, LJ_GC_WHITES
);
1596 if (irref_isk(ir
->op2
)) {
1597 GCobj
*vp
= ir_kgc(IR(ir
->op2
));
1598 emit_rma(as
, XO_GROUP3b
, XOg_TEST
, &vp
->gch
.marked
);
1600 Reg val
= ra_alloc1(as
, ir
->op2
, rset_exclude(RSET_SCRATCH
&RSET_GPR
, obj
));
1601 emit_rmro(as
, XO_GROUP3b
, XOg_TEST
, val
, (int32_t)offsetof(GChead
, marked
));
1603 emit_sjcc(as
, CC_Z
, l_end
);
1604 emit_i8(as
, LJ_GC_BLACK
);
1605 emit_rmro(as
, XO_GROUP3b
, XOg_TEST
, obj
,
1606 (int32_t)offsetof(GCupval
, marked
)-(int32_t)offsetof(GCupval
, tv
));
1609 /* -- FP/int arithmetic and logic operations ------------------------------ */
1611 /* Load reference onto x87 stack. Force a spill to memory if needed. */
1612 static void asm_x87load(ASMState
*as
, IRRef ref
)
1614 IRIns
*ir
= IR(ref
);
1615 if (ir
->o
== IR_KNUM
) {
1616 cTValue
*tv
= ir_knum(ir
);
1617 if (tvispzero(tv
)) /* Use fldz only for +0. */
1618 emit_x87op(as
, XI_FLDZ
);
1619 else if (tvispone(tv
))
1620 emit_x87op(as
, XI_FLD1
);
1622 emit_rma(as
, XO_FLDq
, XOg_FLDq
, tv
);
1623 } else if (ir
->o
== IR_CONV
&& ir
->op2
== IRCONV_NUM_INT
&& !ra_used(ir
) &&
1624 !irref_isk(ir
->op1
) && mayfuse(as
, ir
->op1
)) {
1625 IRIns
*iri
= IR(ir
->op1
);
1626 emit_rmro(as
, XO_FILDd
, XOg_FILDd
, RID_ESP
, ra_spill(as
, iri
));
1628 emit_mrm(as
, XO_FLDq
, XOg_FLDq
, asm_fuseload(as
, ref
, RSET_EMPTY
));
1632 /* Try to rejoin pow from EXP2, MUL and LOG2 (if still unsplit). */
1633 static int fpmjoin_pow(ASMState
*as
, IRIns
*ir
)
1635 IRIns
*irp
= IR(ir
->op1
);
1636 if (irp
== ir
-1 && irp
->o
== IR_MUL
&& !ra_used(irp
)) {
1637 IRIns
*irpp
= IR(irp
->op1
);
1638 if (irpp
== ir
-2 && irpp
->o
== IR_FPMATH
&&
1639 irpp
->op2
== IRFPM_LOG2
&& !ra_used(irpp
)) {
1640 /* The modified regs must match with the *.dasc implementation. */
1641 RegSet drop
= RSET_RANGE(RID_XMM0
, RID_XMM2
+1)|RID2RSET(RID_EAX
);
1643 if (ra_hasreg(ir
->r
))
1644 rset_clear(drop
, ir
->r
); /* Dest reg handled below. */
1645 ra_evictset(as
, drop
);
1646 ra_destreg(as
, ir
, RID_XMM0
);
1647 emit_call(as
, lj_vm_pow_sse
);
1648 irx
= IR(irpp
->op1
);
1649 if (ra_noreg(irx
->r
) && ra_gethint(irx
->r
) == RID_XMM1
)
1650 irx
->r
= RID_INIT
; /* Avoid allocating xmm1 for x. */
1651 ra_left(as
, RID_XMM0
, irpp
->op1
);
1652 ra_left(as
, RID_XMM1
, irp
->op2
);
1659 static void asm_fpmath(ASMState
*as
, IRIns
*ir
)
1661 IRFPMathOp fpm
= ir
->o
== IR_FPMATH
? (IRFPMathOp
)ir
->op2
: IRFPM_OTHER
;
1662 if (fpm
== IRFPM_SQRT
) {
1663 Reg dest
= ra_dest(as
, ir
, RSET_FPR
);
1664 Reg left
= asm_fuseload(as
, ir
->op1
, RSET_FPR
);
1665 emit_mrm(as
, XO_SQRTSD
, dest
, left
);
1666 } else if (fpm
<= IRFPM_TRUNC
) {
1667 if (as
->flags
& JIT_F_SSE4_1
) { /* SSE4.1 has a rounding instruction. */
1668 Reg dest
= ra_dest(as
, ir
, RSET_FPR
);
1669 Reg left
= asm_fuseload(as
, ir
->op1
, RSET_FPR
);
1670 /* ROUNDSD has a 4-byte opcode which doesn't fit in x86Op.
1671 ** Let's pretend it's a 3-byte opcode, and compensate afterwards.
1672 ** This is atrocious, but the alternatives are much worse.
1674 /* Round down/up/trunc == 1001/1010/1011. */
1675 emit_i8(as
, 0x09 + fpm
);
1676 emit_mrm(as
, XO_ROUNDSD
, dest
, left
);
1677 if (LJ_64
&& as
->mcp
[1] != (MCode
)(XO_ROUNDSD
>> 16)) {
1678 as
->mcp
[0] = as
->mcp
[1]; as
->mcp
[1] = 0x0f; /* Swap 0F and REX. */
1680 *--as
->mcp
= 0x66; /* 1st byte of ROUNDSD opcode. */
1681 } else { /* Call helper functions for SSE2 variant. */
1682 /* The modified regs must match with the *.dasc implementation. */
1683 RegSet drop
= RSET_RANGE(RID_XMM0
, RID_XMM3
+1)|RID2RSET(RID_EAX
);
1684 if (ra_hasreg(ir
->r
))
1685 rset_clear(drop
, ir
->r
); /* Dest reg handled below. */
1686 ra_evictset(as
, drop
);
1687 ra_destreg(as
, ir
, RID_XMM0
);
1688 emit_call(as
, fpm
== IRFPM_FLOOR
? lj_vm_floor_sse
:
1689 fpm
== IRFPM_CEIL
? lj_vm_ceil_sse
: lj_vm_trunc_sse
);
1690 ra_left(as
, RID_XMM0
, ir
->op1
);
1692 } else if (fpm
== IRFPM_EXP2
&& fpmjoin_pow(as
, ir
)) {
1693 /* Rejoined to pow(). */
1694 } else { /* Handle x87 ops. */
1695 int32_t ofs
= sps_scale(ir
->s
); /* Use spill slot or temp slots. */
1697 if (ra_hasreg(dest
)) {
1699 ra_modified(as
, dest
);
1700 emit_rmro(as
, XMM_MOVRM(as
), dest
, RID_ESP
, ofs
);
1702 emit_rmro(as
, XO_FSTPq
, XOg_FSTPq
, RID_ESP
, ofs
);
1703 switch (fpm
) { /* st0 = lj_vm_*(st0) */
1704 case IRFPM_EXP
: emit_call(as
, lj_vm_exp_x87
); break;
1705 case IRFPM_EXP2
: emit_call(as
, lj_vm_exp2_x87
); break;
1706 case IRFPM_SIN
: emit_x87op(as
, XI_FSIN
); break;
1707 case IRFPM_COS
: emit_x87op(as
, XI_FCOS
); break;
1708 case IRFPM_TAN
: emit_x87op(as
, XI_FPOP
); emit_x87op(as
, XI_FPTAN
); break;
1709 case IRFPM_LOG
: case IRFPM_LOG2
: case IRFPM_LOG10
:
1710 /* Note: the use of fyl2xp1 would be pointless here. When computing
1711 ** log(1.0+eps) the precision is already lost after 1.0 is added.
1712 ** Subtracting 1.0 won't recover it. OTOH math.log1p would make sense.
1714 emit_x87op(as
, XI_FYL2X
); break;
1718 emit_x87op(as
, XI_FPATAN
); asm_x87load(as
, ir
->op2
); break;
1720 emit_x87op(as
, XI_FPOP1
); emit_x87op(as
, XI_FSCALE
); break;
1721 default: lua_assert(0); break;
1724 default: lua_assert(0); break;
1726 asm_x87load(as
, ir
->op1
);
1728 case IRFPM_LOG
: emit_x87op(as
, XI_FLDLN2
); break;
1729 case IRFPM_LOG2
: emit_x87op(as
, XI_FLD1
); break;
1730 case IRFPM_LOG10
: emit_x87op(as
, XI_FLDLG2
); break;
1732 if (ir
->o
== IR_LDEXP
) asm_x87load(as
, ir
->op2
);
1739 static void asm_fppowi(ASMState
*as
, IRIns
*ir
)
1741 /* The modified regs must match with the *.dasc implementation. */
1742 RegSet drop
= RSET_RANGE(RID_XMM0
, RID_XMM1
+1)|RID2RSET(RID_EAX
);
1743 if (ra_hasreg(ir
->r
))
1744 rset_clear(drop
, ir
->r
); /* Dest reg handled below. */
1745 ra_evictset(as
, drop
);
1746 ra_destreg(as
, ir
, RID_XMM0
);
1747 emit_call(as
, lj_vm_powi_sse
);
1748 ra_left(as
, RID_XMM0
, ir
->op1
);
1749 ra_left(as
, RID_EAX
, ir
->op2
);
1752 #if LJ_64 && LJ_HASFFI
1753 static void asm_arith64(ASMState
*as
, IRIns
*ir
, IRCallID id
)
1755 const CCallInfo
*ci
= &lj_ir_callinfo
[id
];
1759 asm_setupresult(as
, ir
, ci
);
1760 asm_gencall(as
, ci
, args
);
1764 static void asm_intmod(ASMState
*as
, IRIns
*ir
)
1766 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_vm_modi
];
1770 asm_setupresult(as
, ir
, ci
);
1771 asm_gencall(as
, ci
, args
);
1774 static int asm_swapops(ASMState
*as
, IRIns
*ir
)
1776 IRIns
*irl
= IR(ir
->op1
);
1777 IRIns
*irr
= IR(ir
->op2
);
1778 lua_assert(ra_noreg(irr
->r
));
1779 if (!irm_iscomm(lj_ir_mode
[ir
->o
]))
1780 return 0; /* Can't swap non-commutative operations. */
1781 if (irref_isk(ir
->op2
))
1782 return 0; /* Don't swap constants to the left. */
1783 if (ra_hasreg(irl
->r
))
1784 return 1; /* Swap if left already has a register. */
1785 if (ra_samehint(ir
->r
, irr
->r
))
1786 return 1; /* Swap if dest and right have matching hints. */
1787 if (as
->curins
> as
->loopref
) { /* In variant part? */
1788 if (ir
->op2
< as
->loopref
&& !irt_isphi(irr
->t
))
1789 return 0; /* Keep invariants on the right. */
1790 if (ir
->op1
< as
->loopref
&& !irt_isphi(irl
->t
))
1791 return 1; /* Swap invariants to the right. */
1793 if (opisfusableload(irl
->o
))
1794 return 1; /* Swap fusable loads to the right. */
1795 return 0; /* Otherwise don't swap. */
1798 static void asm_fparith(ASMState
*as
, IRIns
*ir
, x86Op xo
)
1800 IRRef lref
= ir
->op1
;
1801 IRRef rref
= ir
->op2
;
1802 RegSet allow
= RSET_FPR
;
1804 Reg right
= IR(rref
)->r
;
1805 if (ra_hasreg(right
)) {
1806 rset_clear(allow
, right
);
1807 ra_noweak(as
, right
);
1809 dest
= ra_dest(as
, ir
, allow
);
1812 } else if (ra_noreg(right
)) {
1813 if (asm_swapops(as
, ir
)) {
1814 IRRef tmp
= lref
; lref
= rref
; rref
= tmp
;
1816 right
= asm_fuseload(as
, rref
, rset_clear(allow
, dest
));
1818 emit_mrm(as
, xo
, dest
, right
);
1819 ra_left(as
, dest
, lref
);
1822 static void asm_intarith(ASMState
*as
, IRIns
*ir
, x86Arith xa
)
1824 IRRef lref
= ir
->op1
;
1825 IRRef rref
= ir
->op2
;
1826 RegSet allow
= RSET_GPR
;
1829 if (as
->flagmcp
== as
->mcp
) { /* Drop test r,r instruction. */
1831 as
->mcp
+= (LJ_64
&& *as
->mcp
< XI_TESTb
) ? 3 : 2;
1833 right
= IR(rref
)->r
;
1834 if (ra_hasreg(right
)) {
1835 rset_clear(allow
, right
);
1836 ra_noweak(as
, right
);
1838 dest
= ra_dest(as
, ir
, allow
);
1841 } else if (ra_noreg(right
) && !asm_isk32(as
, rref
, &k
)) {
1842 if (asm_swapops(as
, ir
)) {
1843 IRRef tmp
= lref
; lref
= rref
; rref
= tmp
;
1845 right
= asm_fuseloadm(as
, rref
, rset_clear(allow
, dest
), irt_is64(ir
->t
));
1847 if (irt_isguard(ir
->t
)) /* For IR_ADDOV etc. */
1848 asm_guardcc(as
, CC_O
);
1849 if (xa
!= XOg_X_IMUL
) {
1850 if (ra_hasreg(right
))
1851 emit_mrm(as
, XO_ARITH(xa
), REX_64IR(ir
, dest
), right
);
1853 emit_gri(as
, XG_ARITHi(xa
), REX_64IR(ir
, dest
), k
);
1854 } else if (ra_hasreg(right
)) { /* IMUL r, mrm. */
1855 emit_mrm(as
, XO_IMUL
, REX_64IR(ir
, dest
), right
);
1856 } else { /* IMUL r, r, k. */
1857 /* NYI: use lea/shl/add/sub (FOLD only does 2^k) depending on CPU. */
1858 Reg left
= asm_fuseloadm(as
, lref
, RSET_GPR
, irt_is64(ir
->t
));
1860 if (checki8(k
)) { emit_i8(as
, k
); xo
= XO_IMULi8
;
1861 } else { emit_i32(as
, k
); xo
= XO_IMULi
; }
1862 emit_mrm(as
, xo
, REX_64IR(ir
, dest
), left
);
1865 ra_left(as
, dest
, lref
);
1868 /* LEA is really a 4-operand ADD with an independent destination register,
1869 ** up to two source registers and an immediate. One register can be scaled
1870 ** by 1, 2, 4 or 8. This can be used to avoid moves or to fuse several
1873 ** Currently only a few common cases are supported:
1874 ** - 3-operand ADD: y = a+b; y = a+k with a and b already allocated
1875 ** - Left ADD fusion: y = (a+b)+k; y = (a+k)+b
1876 ** - Right ADD fusion: y = a+(b+k)
1877 ** The ommited variants have already been reduced by FOLD.
1879 ** There are more fusion opportunities, like gathering shifts or joining
1880 ** common references. But these are probably not worth the trouble, since
1881 ** array indexing is not decomposed and already makes use of all fields
1882 ** of the ModRM operand.
1884 static int asm_lea(ASMState
*as
, IRIns
*ir
)
1886 IRIns
*irl
= IR(ir
->op1
);
1887 IRIns
*irr
= IR(ir
->op2
);
1888 RegSet allow
= RSET_GPR
;
1890 as
->mrm
.base
= as
->mrm
.idx
= RID_NONE
;
1891 as
->mrm
.scale
= XM_SCALE1
;
1893 if (ra_hasreg(irl
->r
)) {
1894 rset_clear(allow
, irl
->r
);
1895 ra_noweak(as
, irl
->r
);
1896 as
->mrm
.base
= irl
->r
;
1897 if (irref_isk(ir
->op2
) || ra_hasreg(irr
->r
)) {
1898 /* The PHI renaming logic does a better job in some cases. */
1899 if (ra_hasreg(ir
->r
) &&
1900 ((irt_isphi(irl
->t
) && as
->phireg
[ir
->r
] == ir
->op1
) ||
1901 (irt_isphi(irr
->t
) && as
->phireg
[ir
->r
] == ir
->op2
)))
1903 if (irref_isk(ir
->op2
)) {
1904 as
->mrm
.ofs
= irr
->i
;
1906 rset_clear(allow
, irr
->r
);
1907 ra_noweak(as
, irr
->r
);
1908 as
->mrm
.idx
= irr
->r
;
1910 } else if (irr
->o
== IR_ADD
&& mayfuse(as
, ir
->op2
) &&
1911 irref_isk(irr
->op2
)) {
1912 Reg idx
= ra_alloc1(as
, irr
->op1
, allow
);
1913 rset_clear(allow
, idx
);
1914 as
->mrm
.idx
= (uint8_t)idx
;
1915 as
->mrm
.ofs
= IR(irr
->op2
)->i
;
1919 } else if (ir
->op1
!= ir
->op2
&& irl
->o
== IR_ADD
&& mayfuse(as
, ir
->op1
) &&
1920 (irref_isk(ir
->op2
) || irref_isk(irl
->op2
))) {
1921 Reg idx
, base
= ra_alloc1(as
, irl
->op1
, allow
);
1922 rset_clear(allow
, base
);
1923 as
->mrm
.base
= (uint8_t)base
;
1924 if (irref_isk(ir
->op2
)) {
1925 as
->mrm
.ofs
= irr
->i
;
1926 idx
= ra_alloc1(as
, irl
->op2
, allow
);
1928 as
->mrm
.ofs
= IR(irl
->op2
)->i
;
1929 idx
= ra_alloc1(as
, ir
->op2
, allow
);
1931 rset_clear(allow
, idx
);
1932 as
->mrm
.idx
= (uint8_t)idx
;
1936 dest
= ra_dest(as
, ir
, allow
);
1937 emit_mrm(as
, XO_LEA
, dest
, RID_MRM
);
1938 return 1; /* Success. */
1941 static void asm_add(ASMState
*as
, IRIns
*ir
)
1943 if (irt_isnum(ir
->t
))
1944 asm_fparith(as
, ir
, XO_ADDSD
);
1945 else if ((as
->flags
& JIT_F_LEA_AGU
) || as
->flagmcp
== as
->mcp
||
1946 irt_is64(ir
->t
) || !asm_lea(as
, ir
))
1947 asm_intarith(as
, ir
, XOg_ADD
);
1950 static void asm_neg_not(ASMState
*as
, IRIns
*ir
, x86Group3 xg
)
1952 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1953 emit_rr(as
, XO_GROUP3
, REX_64IR(ir
, xg
), dest
);
1954 ra_left(as
, dest
, ir
->op1
);
1957 static void asm_min_max(ASMState
*as
, IRIns
*ir
, int cc
)
1959 Reg right
, dest
= ra_dest(as
, ir
, RSET_GPR
);
1960 IRRef lref
= ir
->op1
, rref
= ir
->op2
;
1961 if (irref_isk(rref
)) { lref
= rref
; rref
= ir
->op1
; }
1962 right
= ra_alloc1(as
, rref
, rset_exclude(RSET_GPR
, dest
));
1963 emit_rr(as
, XO_CMOV
+ (cc
<<24), REX_64IR(ir
, dest
), right
);
1964 emit_rr(as
, XO_CMP
, REX_64IR(ir
, dest
), right
);
1965 ra_left(as
, dest
, lref
);
1968 static void asm_bitswap(ASMState
*as
, IRIns
*ir
)
1970 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
1971 as
->mcp
= emit_op(XO_BSWAP
+ ((dest
&7) << 24),
1972 REX_64IR(ir
, 0), dest
, 0, as
->mcp
, 1);
1973 ra_left(as
, dest
, ir
->op1
);
1976 static void asm_bitshift(ASMState
*as
, IRIns
*ir
, x86Shift xs
)
1978 IRRef rref
= ir
->op2
;
1979 IRIns
*irr
= IR(rref
);
1981 if (irref_isk(rref
)) { /* Constant shifts. */
1983 dest
= ra_dest(as
, ir
, RSET_GPR
);
1984 shift
= irr
->i
& (irt_is64(ir
->t
) ? 63 : 31);
1987 case 1: emit_rr(as
, XO_SHIFT1
, REX_64IR(ir
, xs
), dest
); break;
1988 default: emit_shifti(as
, REX_64IR(ir
, xs
), dest
, shift
); break;
1990 } else { /* Variable shifts implicitly use register cl (i.e. ecx). */
1992 dest
= ra_dest(as
, ir
, rset_exclude(RSET_GPR
, RID_ECX
));
1993 if (dest
== RID_ECX
) {
1994 dest
= ra_scratch(as
, rset_exclude(RSET_GPR
, RID_ECX
));
1995 emit_rr(as
, XO_MOV
, RID_ECX
, dest
);
1998 if (ra_noreg(right
))
1999 right
= ra_allocref(as
, rref
, RID2RSET(RID_ECX
));
2000 else if (right
!= RID_ECX
)
2001 ra_scratch(as
, RID2RSET(RID_ECX
));
2002 emit_rr(as
, XO_SHIFTcl
, REX_64IR(ir
, xs
), dest
);
2003 ra_noweak(as
, right
);
2004 if (right
!= RID_ECX
)
2005 emit_rr(as
, XO_MOV
, RID_ECX
, right
);
2007 ra_left(as
, dest
, ir
->op1
);
2009 ** Note: avoid using the flags resulting from a shift or rotate!
2010 ** All of them cause a partial flag stall, except for r,1 shifts
2011 ** (but not rotates). And a shift count of 0 leaves the flags unmodified.
2015 /* -- Comparisons --------------------------------------------------------- */
2017 /* Virtual flags for unordered FP comparisons. */
2018 #define VCC_U 0x1000 /* Unordered. */
2019 #define VCC_P 0x2000 /* Needs extra CC_P branch. */
2020 #define VCC_S 0x4000 /* Swap avoids CC_P branch. */
2021 #define VCC_PS (VCC_P|VCC_S)
2023 /* Map of comparisons to flags. ORDER IR. */
2024 #define COMPFLAGS(ci, cin, cu, cf) ((ci)+((cu)<<4)+((cin)<<8)+(cf))
2025 static const uint16_t asm_compmap
[IR_ABC
+1] = {
2026 /* signed non-eq unsigned flags */
2027 /* LT */ COMPFLAGS(CC_GE
, CC_G
, CC_AE
, VCC_PS
),
2028 /* GE */ COMPFLAGS(CC_L
, CC_L
, CC_B
, 0),
2029 /* LE */ COMPFLAGS(CC_G
, CC_G
, CC_A
, VCC_PS
),
2030 /* GT */ COMPFLAGS(CC_LE
, CC_L
, CC_BE
, 0),
2031 /* ULT */ COMPFLAGS(CC_AE
, CC_A
, CC_AE
, VCC_U
),
2032 /* UGE */ COMPFLAGS(CC_B
, CC_B
, CC_B
, VCC_U
|VCC_PS
),
2033 /* ULE */ COMPFLAGS(CC_A
, CC_A
, CC_A
, VCC_U
),
2034 /* UGT */ COMPFLAGS(CC_BE
, CC_B
, CC_BE
, VCC_U
|VCC_PS
),
2035 /* EQ */ COMPFLAGS(CC_NE
, CC_NE
, CC_NE
, VCC_P
),
2036 /* NE */ COMPFLAGS(CC_E
, CC_E
, CC_E
, VCC_U
|VCC_P
),
2037 /* ABC */ COMPFLAGS(CC_BE
, CC_B
, CC_BE
, VCC_U
|VCC_PS
) /* Same as UGT. */
2040 /* FP and integer comparisons. */
2041 static void asm_comp(ASMState
*as
, IRIns
*ir
, uint32_t cc
)
2043 if (irt_isnum(ir
->t
)) {
2044 IRRef lref
= ir
->op1
;
2045 IRRef rref
= ir
->op2
;
2049 ** An extra CC_P branch is required to preserve ordered/unordered
2050 ** semantics for FP comparisons. This can be avoided by swapping
2051 ** the operands and inverting the condition (except for EQ and UNE).
2052 ** So always try to swap if possible.
2054 ** Another option would be to swap operands to achieve better memory
2055 ** operand fusion. But it's unlikely that this outweighs the cost
2056 ** of the extra branches.
2058 if (cc
& VCC_S
) { /* Swap? */
2059 IRRef tmp
= lref
; lref
= rref
; rref
= tmp
;
2060 cc
^= (VCC_PS
|(5<<4)); /* A <-> B, AE <-> BE, PS <-> none */
2062 left
= ra_alloc1(as
, lref
, RSET_FPR
);
2063 right
= asm_fuseload(as
, rref
, rset_exclude(RSET_FPR
, left
));
2064 l_around
= emit_label(as
);
2065 asm_guardcc(as
, cc
>> 4);
2066 if (cc
& VCC_P
) { /* Extra CC_P branch required? */
2067 if (!(cc
& VCC_U
)) {
2068 asm_guardcc(as
, CC_P
); /* Branch to exit for ordered comparisons. */
2069 } else if (l_around
!= as
->invmcp
) {
2070 emit_sjcc(as
, CC_P
, l_around
); /* Branch around for unordered. */
2072 /* Patched to mcloop by asm_loop_fixup. */
2075 emit_sjcc(as
, CC_P
, as
->mcp
);
2077 emit_jcc(as
, CC_P
, as
->mcp
);
2080 emit_mrm(as
, XO_UCOMISD
, left
, right
);
2082 IRRef lref
= ir
->op1
, rref
= ir
->op2
;
2083 IROp leftop
= (IROp
)(IR(lref
)->o
);
2084 Reg r64
= REX_64IR(ir
, 0);
2086 lua_assert(irt_is64(ir
->t
) || irt_isint(ir
->t
) ||
2087 irt_isu32(ir
->t
) || irt_isaddr(ir
->t
) || irt_isu8(ir
->t
));
2088 /* Swap constants (only for ABC) and fusable loads to the right. */
2089 if (irref_isk(lref
) || (!irref_isk(rref
) && opisfusableload(leftop
))) {
2090 if ((cc
& 0xc) == 0xc) cc
^= 0x53; /* L <-> G, LE <-> GE */
2091 else if ((cc
& 0xa) == 0x2) cc
^= 0x55; /* A <-> B, AE <-> BE */
2092 lref
= ir
->op2
; rref
= ir
->op1
;
2094 if (asm_isk32(as
, rref
, &imm
)) {
2095 IRIns
*irl
= IR(lref
);
2096 /* Check wether we can use test ins. Not for unsigned, since CF=0. */
2097 int usetest
= (imm
== 0 && (cc
& 0xa) != 0x2);
2098 if (usetest
&& irl
->o
== IR_BAND
&& irl
+1 == ir
&& !ra_used(irl
)) {
2099 /* Combine comp(BAND(ref, r/imm), 0) into test mrm, r/imm. */
2100 Reg right
, left
= RID_NONE
;
2101 RegSet allow
= RSET_GPR
;
2102 if (!asm_isk32(as
, irl
->op2
, &imm
)) {
2103 left
= ra_alloc1(as
, irl
->op2
, allow
);
2104 rset_clear(allow
, left
);
2105 } else { /* Try to Fuse IRT_I8/IRT_U8 loads, too. See below. */
2106 IRIns
*irll
= IR(irl
->op1
);
2107 if (opisfusableload((IROp
)irll
->o
) &&
2108 (irt_isi8(irll
->t
) || irt_isu8(irll
->t
))) {
2109 IRType1 origt
= irll
->t
; /* Temporarily flip types. */
2110 irll
->t
.irt
= (irll
->t
.irt
& ~IRT_TYPE
) | IRT_INT
;
2111 as
->curins
--; /* Skip to BAND to avoid failing in noconflict(). */
2112 right
= asm_fuseload(as
, irl
->op1
, RSET_GPR
);
2115 if (right
!= RID_MRM
) goto test_nofuse
;
2116 /* Fusion succeeded, emit test byte mrm, imm8. */
2117 asm_guardcc(as
, cc
);
2118 emit_i8(as
, (imm
& 0xff));
2119 emit_mrm(as
, XO_GROUP3b
, XOg_TEST
, RID_MRM
);
2123 as
->curins
--; /* Skip to BAND to avoid failing in noconflict(). */
2124 right
= asm_fuseloadm(as
, irl
->op1
, allow
, r64
);
2125 as
->curins
++; /* Undo the above. */
2127 asm_guardcc(as
, cc
);
2128 if (ra_noreg(left
)) {
2130 emit_mrm(as
, XO_GROUP3
, r64
+ XOg_TEST
, right
);
2132 emit_mrm(as
, XO_TEST
, r64
+ left
, right
);
2136 if (opisfusableload((IROp
)irl
->o
) &&
2137 ((irt_isu8(irl
->t
) && checku8(imm
)) ||
2138 ((irt_isi8(irl
->t
) || irt_isi16(irl
->t
)) && checki8(imm
)) ||
2139 (irt_isu16(irl
->t
) && checku16(imm
) && checki8((int16_t)imm
)))) {
2140 /* Only the IRT_INT case is fused by asm_fuseload.
2141 ** The IRT_I8/IRT_U8 loads and some IRT_I16/IRT_U16 loads
2142 ** are handled here.
2143 ** Note that cmp word [mem], imm16 should not be generated,
2144 ** since it has a length-changing prefix. Compares of a word
2145 ** against a sign-extended imm8 are ok, however.
2147 IRType1 origt
= irl
->t
; /* Temporarily flip types. */
2148 irl
->t
.irt
= (irl
->t
.irt
& ~IRT_TYPE
) | IRT_INT
;
2149 left
= asm_fuseload(as
, lref
, RSET_GPR
);
2151 if (left
== RID_MRM
) { /* Fusion succeeded? */
2152 if (irt_isu8(irl
->t
) || irt_isu16(irl
->t
))
2153 cc
>>= 4; /* Need unsigned compare. */
2154 asm_guardcc(as
, cc
);
2156 emit_mrm(as
, (irt_isi8(origt
) || irt_isu8(origt
)) ?
2157 XO_ARITHib
: XO_ARITHiw8
, r64
+ XOg_CMP
, RID_MRM
);
2159 } /* Otherwise handle register case as usual. */
2161 left
= asm_fuseloadm(as
, lref
,
2162 irt_isu8(ir
->t
) ? RSET_GPR8
: RSET_GPR
, r64
);
2164 asm_guardcc(as
, cc
);
2165 if (usetest
&& left
!= RID_MRM
) {
2166 /* Use test r,r instead of cmp r,0. */
2168 if (irt_isu8(ir
->t
)) {
2169 lua_assert(ir
->o
== IR_EQ
|| ir
->o
== IR_NE
);
2171 if (!rset_test(RSET_RANGE(RID_EAX
, RID_EBX
+1), left
)) {
2176 emit_mrm(as
, XO_GROUP3
, XOg_TEST
, left
);
2181 emit_rr(as
, xo
, r64
+ left
, left
);
2182 if (irl
+1 == ir
) /* Referencing previous ins? */
2183 as
->flagmcp
= as
->mcp
; /* Set flag to drop test r,r if possible. */
2185 emit_gmrmi(as
, XG_ARITHi(XOg_CMP
), r64
+ left
, imm
);
2189 Reg left
= ra_alloc1(as
, lref
, RSET_GPR
);
2190 Reg right
= asm_fuseloadm(as
, rref
, rset_exclude(RSET_GPR
, left
), r64
);
2191 asm_guardcc(as
, cc
);
2192 emit_mrm(as
, XO_CMP
, r64
+ left
, right
);
2197 #if LJ_32 && LJ_HASFFI
2198 /* 64 bit integer comparisons in 32 bit mode. */
2199 static void asm_comp_int64(ASMState
*as
, IRIns
*ir
)
2201 uint32_t cc
= asm_compmap
[(ir
-1)->o
];
2202 RegSet allow
= RSET_GPR
;
2203 Reg lefthi
= RID_NONE
, leftlo
= RID_NONE
;
2204 Reg righthi
= RID_NONE
, rightlo
= RID_NONE
;
2208 as
->curins
--; /* Skip loword ins. Avoids failing in noconflict(), too. */
2210 /* Allocate/fuse hiword operands. */
2211 if (irref_isk(ir
->op2
)) {
2212 lefthi
= asm_fuseload(as
, ir
->op1
, allow
);
2214 lefthi
= ra_alloc1(as
, ir
->op1
, allow
);
2215 rset_clear(allow
, lefthi
);
2216 righthi
= asm_fuseload(as
, ir
->op2
, allow
);
2217 if (righthi
== RID_MRM
) {
2218 if (as
->mrm
.base
!= RID_NONE
) rset_clear(allow
, as
->mrm
.base
);
2219 if (as
->mrm
.idx
!= RID_NONE
) rset_clear(allow
, as
->mrm
.idx
);
2221 rset_clear(allow
, righthi
);
2224 mrm
= as
->mrm
; /* Save state for hiword instruction. */
2226 /* Allocate/fuse loword operands. */
2227 if (irref_isk((ir
-1)->op2
)) {
2228 leftlo
= asm_fuseload(as
, (ir
-1)->op1
, allow
);
2230 leftlo
= ra_alloc1(as
, (ir
-1)->op1
, allow
);
2231 rset_clear(allow
, leftlo
);
2232 rightlo
= asm_fuseload(as
, (ir
-1)->op2
, allow
);
2235 /* All register allocations must be performed _before_ this point. */
2236 l_around
= emit_label(as
);
2237 as
->invmcp
= as
->flagmcp
= NULL
; /* Cannot use these optimizations. */
2239 /* Loword comparison and branch. */
2240 asm_guardcc(as
, cc
>> 4); /* Always use unsigned compare for loword. */
2241 if (ra_noreg(rightlo
)) {
2242 int32_t imm
= IR((ir
-1)->op2
)->i
;
2243 if (imm
== 0 && ((cc
>> 4) & 0xa) != 0x2 && leftlo
!= RID_MRM
)
2244 emit_rr(as
, XO_TEST
, leftlo
, leftlo
);
2246 emit_gmrmi(as
, XG_ARITHi(XOg_CMP
), leftlo
, imm
);
2248 emit_mrm(as
, XO_CMP
, leftlo
, rightlo
);
2251 /* Hiword comparison and branches. */
2252 if ((cc
& 15) != CC_NE
)
2253 emit_sjcc(as
, CC_NE
, l_around
); /* Hiword unequal: skip loword compare. */
2254 if ((cc
& 15) != CC_E
)
2255 asm_guardcc(as
, cc
>> 8); /* Hiword compare without equality check. */
2256 as
->mrm
= mrm
; /* Restore state. */
2257 if (ra_noreg(righthi
)) {
2258 int32_t imm
= IR(ir
->op2
)->i
;
2259 if (imm
== 0 && (cc
& 0xa) != 0x2 && lefthi
!= RID_MRM
)
2260 emit_rr(as
, XO_TEST
, lefthi
, lefthi
);
2262 emit_gmrmi(as
, XG_ARITHi(XOg_CMP
), lefthi
, imm
);
2264 emit_mrm(as
, XO_CMP
, lefthi
, righthi
);
2269 /* -- Support for 64 bit ops in 32 bit mode ------------------------------- */
2271 /* Hiword op of a split 64 bit op. Previous op must be the loword op. */
2272 static void asm_hiop(ASMState
*as
, IRIns
*ir
)
2274 #if LJ_32 && LJ_HASFFI
2275 /* HIOP is marked as a store because it needs its own DCE logic. */
2276 int uselo
= ra_used(ir
-1), usehi
= ra_used(ir
); /* Loword/hiword used? */
2277 if (LJ_UNLIKELY(!(as
->flags
& JIT_F_OPT_DCE
))) uselo
= usehi
= 1;
2278 if ((ir
-1)->o
== IR_CONV
) { /* Conversions to/from 64 bit. */
2279 if (usehi
|| uselo
) {
2280 if (irt_isfp(ir
->t
))
2281 asm_conv_fp_int64(as
, ir
);
2283 asm_conv_int64_fp(as
, ir
);
2285 as
->curins
--; /* Always skip the CONV. */
2287 } else if ((ir
-1)->o
<= IR_NE
) { /* 64 bit integer comparisons. ORDER IR. */
2288 asm_comp_int64(as
, ir
);
2290 } else if ((ir
-1)->o
== IR_XSTORE
) {
2291 if ((ir
-1)->r
!= RID_SINK
)
2292 asm_fxstore(as
, ir
);
2295 if (!usehi
) return; /* Skip unused hiword op for all remaining ops. */
2296 switch ((ir
-1)->o
) {
2300 asm_intarith(as
, ir
, XOg_ADC
);
2301 asm_intarith(as
, ir
-1, XOg_ADD
);
2306 asm_intarith(as
, ir
, XOg_SBB
);
2307 asm_intarith(as
, ir
-1, XOg_SUB
);
2310 Reg dest
= ra_dest(as
, ir
, RSET_GPR
);
2311 emit_rr(as
, XO_GROUP3
, XOg_NEG
, dest
);
2313 emit_rr(as
, XO_ARITHi8
, XOg_ADC
, dest
);
2314 ra_left(as
, dest
, ir
->op1
);
2316 asm_neg_not(as
, ir
-1, XOg_NEG
);
2322 ra_allocref(as
, ir
->op1
, RID2RSET(RID_RETLO
)); /* Mark lo op as used. */
2325 /* Nothing to do here. Handled by CNEWI itself. */
2327 default: lua_assert(0); break;
2330 UNUSED(as
); UNUSED(ir
); lua_assert(0); /* Unused on x64 or without FFI. */
2334 /* -- Stack handling ------------------------------------------------------ */
2336 /* Check Lua stack size for overflow. Use exit handler as fallback. */
2337 static void asm_stack_check(ASMState
*as
, BCReg topslot
,
2338 IRIns
*irp
, RegSet allow
, ExitNo exitno
)
2340 /* Try to get an unused temp. register, otherwise spill/restore eax. */
2341 Reg pbase
= irp
? irp
->r
: RID_BASE
;
2342 Reg r
= allow
? rset_pickbot(allow
) : RID_EAX
;
2343 emit_jcc(as
, CC_B
, exitstub_addr(as
->J
, exitno
));
2344 if (allow
== RSET_EMPTY
) /* Restore temp. register. */
2345 emit_rmro(as
, XO_MOV
, r
|REX_64
, RID_ESP
, 0);
2348 emit_gri(as
, XG_ARITHi(XOg_CMP
), r
, (int32_t)(8*topslot
));
2349 if (ra_hasreg(pbase
) && pbase
!= r
)
2350 emit_rr(as
, XO_ARITH(XOg_SUB
), r
, pbase
);
2352 emit_rmro(as
, XO_ARITH(XOg_SUB
), r
, RID_NONE
,
2353 ptr2addr(&J2G(as
->J
)->jit_base
));
2354 emit_rmro(as
, XO_MOV
, r
, r
, offsetof(lua_State
, maxstack
));
2355 emit_getgl(as
, r
, jit_L
);
2356 if (allow
== RSET_EMPTY
) /* Spill temp. register. */
2357 emit_rmro(as
, XO_MOVto
, r
|REX_64
, RID_ESP
, 0);
2360 /* Restore Lua stack from on-trace state. */
2361 static void asm_stack_restore(ASMState
*as
, SnapShot
*snap
)
2363 SnapEntry
*map
= &as
->T
->snapmap
[snap
->mapofs
];
2364 SnapEntry
*flinks
= &as
->T
->snapmap
[snap_nextofs(as
->T
, snap
)-1];
2365 MSize n
, nent
= snap
->nent
;
2366 /* Store the value of all modified slots to the Lua stack. */
2367 for (n
= 0; n
< nent
; n
++) {
2368 SnapEntry sn
= map
[n
];
2369 BCReg s
= snap_slot(sn
);
2370 int32_t ofs
= 8*((int32_t)s
-1);
2371 IRRef ref
= snap_ref(sn
);
2372 IRIns
*ir
= IR(ref
);
2373 if ((sn
& SNAP_NORESTORE
))
2375 if (irt_isnum(ir
->t
)) {
2376 Reg src
= ra_alloc1(as
, ref
, RSET_FPR
);
2377 emit_rmro(as
, XO_MOVSDto
, src
, RID_BASE
, ofs
);
2379 lua_assert(irt_ispri(ir
->t
) || irt_isaddr(ir
->t
) ||
2380 (LJ_DUALNUM
&& irt_isinteger(ir
->t
)));
2381 if (!irref_isk(ref
)) {
2382 Reg src
= ra_alloc1(as
, ref
, rset_exclude(RSET_GPR
, RID_BASE
));
2383 emit_movtomro(as
, REX_64IR(ir
, src
), RID_BASE
, ofs
);
2384 } else if (!irt_ispri(ir
->t
)) {
2385 emit_movmroi(as
, RID_BASE
, ofs
, ir
->i
);
2387 if ((sn
& (SNAP_CONT
|SNAP_FRAME
))) {
2388 if (s
!= 0) /* Do not overwrite link to previous frame. */
2389 emit_movmroi(as
, RID_BASE
, ofs
+4, (int32_t)(*flinks
--));
2391 if (!(LJ_64
&& irt_islightud(ir
->t
)))
2392 emit_movmroi(as
, RID_BASE
, ofs
+4, irt_toitype(ir
->t
));
2397 lua_assert(map
+ nent
== flinks
);
2400 /* -- GC handling --------------------------------------------------------- */
2402 /* Check GC threshold and do one or more GC steps. */
2403 static void asm_gc_check(ASMState
*as
)
2405 const CCallInfo
*ci
= &lj_ir_callinfo
[IRCALL_lj_gc_step_jit
];
2409 ra_evictset(as
, RSET_SCRATCH
);
2410 l_end
= emit_label(as
);
2411 /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
2412 asm_guardcc(as
, CC_NE
); /* Assumes asm_snap_prep() already done. */
2413 emit_rr(as
, XO_TEST
, RID_RET
, RID_RET
);
2414 args
[0] = ASMREF_TMP1
; /* global_State *g */
2415 args
[1] = ASMREF_TMP2
; /* MSize steps */
2416 asm_gencall(as
, ci
, args
);
2417 tmp
= ra_releasetmp(as
, ASMREF_TMP1
);
2418 emit_loada(as
, tmp
, J2G(as
->J
));
2419 emit_loadi(as
, ra_releasetmp(as
, ASMREF_TMP2
), as
->gcsteps
);
2420 /* Jump around GC step if GC total < GC threshold. */
2421 emit_sjcc(as
, CC_B
, l_end
);
2422 emit_opgl(as
, XO_ARITH(XOg_CMP
), tmp
, gc
.threshold
);
2423 emit_getgl(as
, tmp
, gc
.total
);
2428 /* -- Loop handling ------------------------------------------------------- */
2430 /* Fixup the loop branch. */
2431 static void asm_loop_fixup(ASMState
*as
)
2433 MCode
*p
= as
->mctop
;
2434 MCode
*target
= as
->mcp
;
2435 if (as
->realign
) { /* Realigned loops use short jumps. */
2436 as
->realign
= NULL
; /* Stop another retry. */
2437 lua_assert(((intptr_t)target
& 15) == 0);
2438 if (as
->loopinv
) { /* Inverted loop branch? */
2441 lua_assert(target
- p
>= -128);
2442 p
[-1] = (MCode
)(target
- p
); /* Patch sjcc. */
2443 if (as
->loopinv
== 2)
2444 p
[-3] = (MCode
)(target
- p
+ 2); /* Patch opt. short jp. */
2446 lua_assert(target
- p
>= -128);
2447 p
[-1] = (MCode
)(int8_t)(target
- p
); /* Patch short jmp. */
2453 if (as
->loopinv
) { /* Inverted loop branch? */
2454 /* asm_guardcc already inverted the jcc and patched the jmp. */
2457 *(int32_t *)(p
-4) = (int32_t)(target
- p
); /* Patch jcc. */
2458 if (as
->loopinv
== 2) {
2459 *(int32_t *)(p
-10) = (int32_t)(target
- p
+ 6); /* Patch opt. jp. */
2462 } else { /* Otherwise just patch jmp. */
2463 *(int32_t *)(p
-4) = (int32_t)(target
- p
);
2466 /* Realign small loops and shorten the loop branch. */
2467 if (newloop
>= p
- 128) {
2468 as
->realign
= newloop
; /* Force a retry and remember alignment. */
2469 as
->curins
= as
->stopins
; /* Abort asm_trace now. */
2470 as
->T
->nins
= as
->orignins
; /* Remove any added renames. */
2475 /* -- Head of trace ------------------------------------------------------- */
2477 /* Coalesce BASE register for a root trace. */
2478 static void asm_head_root_base(ASMState
*as
)
2480 IRIns
*ir
= IR(REF_BASE
);
2484 if (rset_test(as
->modset
, r
))
2485 ir
->r
= RID_INIT
; /* No inheritance for modified BASE register. */
2487 emit_rr(as
, XO_MOV
, r
, RID_BASE
);
2491 /* Coalesce or reload BASE register for a side trace. */
2492 static RegSet
asm_head_side_base(ASMState
*as
, IRIns
*irp
, RegSet allow
)
2494 IRIns
*ir
= IR(REF_BASE
);
2498 if (rset_test(as
->modset
, r
))
2499 ir
->r
= RID_INIT
; /* No inheritance for modified BASE register. */
2501 rset_clear(allow
, r
); /* Mark same BASE register as coalesced. */
2502 } else if (ra_hasreg(irp
->r
) && rset_test(as
->freeset
, irp
->r
)) {
2503 rset_clear(allow
, irp
->r
);
2504 emit_rr(as
, XO_MOV
, r
, irp
->r
); /* Move from coalesced parent reg. */
2506 emit_getgl(as
, r
, jit_base
); /* Otherwise reload BASE. */
2512 /* -- Tail of trace ------------------------------------------------------- */
2514 /* Fixup the tail code. */
2515 static void asm_tail_fixup(ASMState
*as
, TraceNo lnk
)
2517 /* Note: don't use as->mcp swap + emit_*: emit_op overwrites more bytes. */
2518 MCode
*p
= as
->mctop
;
2520 int32_t spadj
= as
->T
->spadjust
;
2522 p
-= ((as
->flags
& JIT_F_LEA_AGU
) ? 7 : 6) + (LJ_64
? 1 : 0);
2525 /* Patch stack adjustment. */
2526 if (checki8(spadj
)) {
2532 *(int32_t *)p1
= spadj
;
2534 if ((as
->flags
& JIT_F_LEA_AGU
)) {
2538 p1
[-3] = (MCode
)XI_LEA
;
2539 p1
[-2] = MODRM(checki8(spadj
) ? XM_OFS8
: XM_OFS32
, RID_ESP
, RID_ESP
);
2540 p1
[-1] = MODRM(XM_SCALE1
, RID_ESP
, RID_ESP
);
2545 p1
[-2] = (MCode
)(checki8(spadj
) ? XI_ARITHi8
: XI_ARITHi
);
2546 p1
[-1] = MODRM(XM_REG
, XOg_ADD
, RID_ESP
);
2549 /* Patch exit branch. */
2550 target
= lnk
? traceref(as
->J
, lnk
)->mcode
: (MCode
*)lj_vm_exit_interp
;
2551 *(int32_t *)(p
-4) = jmprel(p
, target
);
2553 /* Drop unused mcode tail. Fill with NOPs to make the prefetcher happy. */
2554 for (q
= as
->mctop
-1; q
>= p
; q
--)
2559 /* Prepare tail of code. */
2560 static void asm_tail_prep(ASMState
*as
)
2562 MCode
*p
= as
->mctop
;
2563 /* Realign and leave room for backwards loop branch or exit branch. */
2565 int i
= ((int)(intptr_t)as
->realign
) & 15;
2566 /* Fill unused mcode tail with NOPs to make the prefetcher happy. */
2570 p
-= (as
->loopinv
? 5 : 2); /* Space for short/near jmp. */
2572 p
-= 5; /* Space for exit branch (near jmp). */
2575 as
->invmcp
= as
->mcp
= p
;
2577 /* Leave room for ESP adjustment: add esp, imm or lea esp, [esp+imm] */
2578 as
->mcp
= p
- (((as
->flags
& JIT_F_LEA_AGU
) ? 7 : 6) + (LJ_64
? 1 : 0));
2583 /* -- Instruction dispatch ------------------------------------------------ */
2585 /* Assemble a single instruction. */
2586 static void asm_ir(ASMState
*as
, IRIns
*ir
)
2588 switch ((IROp
)ir
->o
) {
2589 /* Miscellaneous ops. */
2590 case IR_LOOP
: asm_loop(as
); break;
2591 case IR_NOP
: case IR_XBAR
: lua_assert(!ra_used(ir
)); break;
2593 ra_alloc1(as
, ir
->op1
, irt_isfp(ir
->t
) ? RSET_FPR
: RSET_GPR
); break;
2594 case IR_PHI
: asm_phi(as
, ir
); break;
2595 case IR_HIOP
: asm_hiop(as
, ir
); break;
2596 case IR_GCSTEP
: asm_gcstep(as
, ir
); break;
2598 /* Guarded assertions. */
2599 case IR_LT
: case IR_GE
: case IR_LE
: case IR_GT
:
2600 case IR_ULT
: case IR_UGE
: case IR_ULE
: case IR_UGT
:
2601 case IR_EQ
: case IR_NE
: case IR_ABC
:
2602 asm_comp(as
, ir
, asm_compmap
[ir
->o
]);
2605 case IR_RETF
: asm_retf(as
, ir
); break;
2608 case IR_BNOT
: asm_neg_not(as
, ir
, XOg_NOT
); break;
2609 case IR_BSWAP
: asm_bitswap(as
, ir
); break;
2611 case IR_BAND
: asm_intarith(as
, ir
, XOg_AND
); break;
2612 case IR_BOR
: asm_intarith(as
, ir
, XOg_OR
); break;
2613 case IR_BXOR
: asm_intarith(as
, ir
, XOg_XOR
); break;
2615 case IR_BSHL
: asm_bitshift(as
, ir
, XOg_SHL
); break;
2616 case IR_BSHR
: asm_bitshift(as
, ir
, XOg_SHR
); break;
2617 case IR_BSAR
: asm_bitshift(as
, ir
, XOg_SAR
); break;
2618 case IR_BROL
: asm_bitshift(as
, ir
, XOg_ROL
); break;
2619 case IR_BROR
: asm_bitshift(as
, ir
, XOg_ROR
); break;
2621 /* Arithmetic ops. */
2622 case IR_ADD
: asm_add(as
, ir
); break;
2624 if (irt_isnum(ir
->t
))
2625 asm_fparith(as
, ir
, XO_SUBSD
);
2626 else /* Note: no need for LEA trick here. i-k is encoded as i+(-k). */
2627 asm_intarith(as
, ir
, XOg_SUB
);
2630 if (irt_isnum(ir
->t
))
2631 asm_fparith(as
, ir
, XO_MULSD
);
2633 asm_intarith(as
, ir
, XOg_X_IMUL
);
2636 #if LJ_64 && LJ_HASFFI
2637 if (!irt_isnum(ir
->t
))
2638 asm_arith64(as
, ir
, irt_isi64(ir
->t
) ? IRCALL_lj_carith_divi64
:
2639 IRCALL_lj_carith_divu64
);
2642 asm_fparith(as
, ir
, XO_DIVSD
);
2645 #if LJ_64 && LJ_HASFFI
2646 if (!irt_isint(ir
->t
))
2647 asm_arith64(as
, ir
, irt_isi64(ir
->t
) ? IRCALL_lj_carith_modi64
:
2648 IRCALL_lj_carith_modu64
);
2655 if (irt_isnum(ir
->t
))
2656 asm_fparith(as
, ir
, XO_XORPS
);
2658 asm_neg_not(as
, ir
, XOg_NEG
);
2660 case IR_ABS
: asm_fparith(as
, ir
, XO_ANDPS
); break;
2663 if (irt_isnum(ir
->t
))
2664 asm_fparith(as
, ir
, XO_MINSD
);
2666 asm_min_max(as
, ir
, CC_G
);
2669 if (irt_isnum(ir
->t
))
2670 asm_fparith(as
, ir
, XO_MAXSD
);
2672 asm_min_max(as
, ir
, CC_L
);
2675 case IR_FPMATH
: case IR_ATAN2
: case IR_LDEXP
:
2679 #if LJ_64 && LJ_HASFFI
2680 if (!irt_isnum(ir
->t
))
2681 asm_arith64(as
, ir
, irt_isi64(ir
->t
) ? IRCALL_lj_carith_powi64
:
2682 IRCALL_lj_carith_powu64
);
2688 /* Overflow-checking arithmetic ops. Note: don't use LEA here! */
2689 case IR_ADDOV
: asm_intarith(as
, ir
, XOg_ADD
); break;
2690 case IR_SUBOV
: asm_intarith(as
, ir
, XOg_SUB
); break;
2691 case IR_MULOV
: asm_intarith(as
, ir
, XOg_X_IMUL
); break;
2693 /* Memory references. */
2694 case IR_AREF
: asm_aref(as
, ir
); break;
2695 case IR_HREF
: asm_href(as
, ir
); break;
2696 case IR_HREFK
: asm_hrefk(as
, ir
); break;
2697 case IR_NEWREF
: asm_newref(as
, ir
); break;
2698 case IR_UREFO
: case IR_UREFC
: asm_uref(as
, ir
); break;
2699 case IR_FREF
: asm_fref(as
, ir
); break;
2700 case IR_STRREF
: asm_strref(as
, ir
); break;
2702 /* Loads and stores. */
2703 case IR_ALOAD
: case IR_HLOAD
: case IR_ULOAD
: case IR_VLOAD
:
2704 asm_ahuvload(as
, ir
);
2706 case IR_FLOAD
: case IR_XLOAD
: asm_fxload(as
, ir
); break;
2707 case IR_SLOAD
: asm_sload(as
, ir
); break;
2709 case IR_ASTORE
: case IR_HSTORE
: case IR_USTORE
: asm_ahustore(as
, ir
); break;
2710 case IR_FSTORE
: case IR_XSTORE
: asm_fxstore(as
, ir
); break;
2713 case IR_SNEW
: case IR_XSNEW
: asm_snew(as
, ir
); break;
2714 case IR_TNEW
: asm_tnew(as
, ir
); break;
2715 case IR_TDUP
: asm_tdup(as
, ir
); break;
2716 case IR_CNEW
: case IR_CNEWI
: asm_cnew(as
, ir
); break;
2718 /* Write barriers. */
2719 case IR_TBAR
: asm_tbar(as
, ir
); break;
2720 case IR_OBAR
: asm_obar(as
, ir
); break;
2722 /* Type conversions. */
2723 case IR_TOBIT
: asm_tobit(as
, ir
); break;
2724 case IR_CONV
: asm_conv(as
, ir
); break;
2725 case IR_TOSTR
: asm_tostr(as
, ir
); break;
2726 case IR_STRTO
: asm_strto(as
, ir
); break;
2729 case IR_CALLN
: case IR_CALLL
: case IR_CALLS
: asm_call(as
, ir
); break;
2730 case IR_CALLXS
: asm_callx(as
, ir
); break;
2731 case IR_CARG
: break;
2734 setintV(&as
->J
->errinfo
, ir
->o
);
2735 lj_trace_err_info(as
->J
, LJ_TRERR_NYIIR
);
2740 /* -- Trace setup --------------------------------------------------------- */
2742 /* Ensure there are enough stack slots for call arguments. */
2743 static Reg
asm_setup_call_slots(ASMState
*as
, IRIns
*ir
, const CCallInfo
*ci
)
2745 IRRef args
[CCI_NARGS_MAX
*2];
2747 asm_collectargs(as
, ir
, ci
, args
);
2748 nslots
= asm_count_call_slots(as
, ci
, args
);
2749 if (nslots
> as
->evenspill
) /* Leave room for args in stack slots. */
2750 as
->evenspill
= nslots
;
2752 return irt_isfp(ir
->t
) ? REGSP_HINT(RID_FPRET
) : REGSP_HINT(RID_RET
);
2754 return irt_isfp(ir
->t
) ? REGSP_INIT
: REGSP_HINT(RID_RET
);
2758 /* Target-specific setup. */
2759 static void asm_setup_target(ASMState
*as
)
2761 asm_exitstub_setup(as
, as
->T
->nsnap
);
2764 /* -- Trace patching ------------------------------------------------------ */
2766 /* Patch exit jumps of existing machine code to a new target. */
2767 void lj_asm_patchexit(jit_State
*J
, GCtrace
*T
, ExitNo exitno
, MCode
*target
)
2769 MCode
*p
= T
->mcode
;
2770 MCode
*mcarea
= lj_mcode_patch(J
, p
, 0);
2771 MSize len
= T
->szmcode
;
2772 MCode
*px
= exitstub_addr(J
, exitno
) - 6;
2773 MCode
*pe
= p
+len
-6;
2774 uint32_t stateaddr
= u32ptr(&J2G(J
)->vmstate
);
2775 if (len
> 5 && p
[len
-5] == XI_JMP
&& p
+len
-6 + *(int32_t *)(p
+len
-4) == px
)
2776 *(int32_t *)(p
+len
-4) = jmprel(p
+len
, target
);
2777 /* Do not patch parent exit for a stack check. Skip beyond vmstate update. */
2779 if (*(uint32_t *)(p
+(LJ_64
? 3 : 2)) == stateaddr
&& p
[0] == XI_MOVmi
) {
2780 p
+= LJ_64
? 11 : 10;
2784 for (; p
< pe
; p
++) {
2785 if ((*(uint16_t *)p
& 0xf0ff) == 0x800f && p
+ *(int32_t *)(p
+2) == px
) {
2786 *(int32_t *)(p
+2) = jmprel(p
+6, target
);
2790 lj_mcode_sync(T
->mcode
, T
->mcode
+ T
->szmcode
);
2791 lj_mcode_patch(J
, mcarea
, 1);