2 ** x86/x64 instruction emitter.
3 ** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
6 /* -- Emit basic instructions --------------------------------------------- */
8 #define MODRM(mode, r1, r2) ((MCode)((mode)+(((r1)&7)<<3)+((r2)&7)))
11 #define REXRB(p, rr, rb) \
12 { MCode rex = 0x40 + (((rr)>>1)&4) + (((rb)>>3)&1); \
13 if (rex != 0x40) *--(p) = rex; }
14 #define FORCE_REX 0x200
15 #define REX_64 (FORCE_REX|0x080000)
17 #define REXRB(p, rr, rb) ((void)0)
22 #define emit_i8(as, i) (*--as->mcp = (MCode)(i))
23 #define emit_i32(as, i) (*(int32_t *)(as->mcp-4) = (i), as->mcp -= 4)
24 #define emit_u32(as, u) (*(uint32_t *)(as->mcp-4) = (u), as->mcp -= 4)
26 #define emit_x87op(as, xo) \
27 (*(uint16_t *)(as->mcp-2) = (uint16_t)(xo), as->mcp -= 2)
30 static LJ_AINLINE MCode
*emit_op(x86Op xo
, Reg rr
, Reg rb
, Reg rx
,
35 if (__builtin_constant_p(xo
) && n
== -2)
36 p
[delta
-2] = (MCode
)(xo
>> 24);
37 else if (__builtin_constant_p(xo
) && n
== -3)
38 *(uint16_t *)(p
+delta
-3) = (uint16_t)(xo
>> 16);
41 *(uint32_t *)(p
+delta
-5) = (uint32_t)xo
;
45 uint32_t rex
= 0x40 + ((rr
>>1)&(4+(FORCE_REX
>>1)))+((rx
>>2)&2)+((rb
>>3)&1);
48 if (n
== -4) { *p
= (MCode
)rex
; rex
= (MCode
)(xo
>> 8); }
49 else if ((xo
& 0xffffff) == 0x6600fd) { *p
= (MCode
)rex
; rex
= 0x66; }
54 UNUSED(rr
); UNUSED(rb
); UNUSED(rx
);
60 #define emit_opm(xo, mode, rr, rb, p, delta) \
61 (p[(delta)-1] = MODRM((mode), (rr), (rb)), \
62 emit_op((xo), (rr), (rb), 0, (p), (delta)))
64 /* op + modrm + sib */
65 #define emit_opmx(xo, mode, scale, rr, rb, rx, p) \
66 (p[-1] = MODRM((scale), (rx), (rb)), \
67 p[-2] = MODRM((mode), (rr), RID_ESP), \
68 emit_op((xo), (rr), (rb), (rx), (p), -1))
71 static void emit_rr(ASMState
*as
, x86Op xo
, Reg r1
, Reg r2
)
74 as
->mcp
= emit_opm(xo
, XM_REG
, r1
, r2
, p
, 0);
77 #if LJ_64 && defined(LUA_USE_ASSERT)
78 /* [addr] is sign-extended in x64 and must be in lower 2G (not 4G). */
79 static int32_t ptr2addr(const void *p
)
81 lua_assert((uintptr_t)p
< (uintptr_t)0x80000000);
85 #define ptr2addr(p) (i32ptr((p)))
89 static void emit_rma(ASMState
*as
, x86Op xo
, Reg rr
, const void *addr
)
92 *(int32_t *)(p
-4) = ptr2addr(addr
);
94 p
[-5] = MODRM(XM_SCALE1
, RID_ESP
, RID_EBP
);
95 as
->mcp
= emit_opm(xo
, XM_OFS0
, rr
, RID_ESP
, p
, -5);
97 as
->mcp
= emit_opm(xo
, XM_OFS0
, rr
, RID_EBP
, p
, -4);
101 /* op r, [base+ofs] */
102 static void emit_rmro(ASMState
*as
, x86Op xo
, Reg rr
, Reg rb
, int32_t ofs
)
107 if (ofs
== 0 && (rb
&7) != RID_EBP
) {
109 } else if (checki8(ofs
)) {
117 if ((rb
&7) == RID_ESP
)
118 *--p
= MODRM(XM_SCALE1
, RID_ESP
, RID_ESP
);
120 *(int32_t *)(p
-4) = ofs
;
122 p
[-5] = MODRM(XM_SCALE1
, RID_ESP
, RID_EBP
);
131 as
->mcp
= emit_opm(xo
, mode
, rr
, rb
, p
, 0);
134 /* op r, [base+idx*scale+ofs] */
135 static void emit_rmrxo(ASMState
*as
, x86Op xo
, Reg rr
, Reg rb
, Reg rx
,
136 x86Mode scale
, int32_t ofs
)
140 if (ofs
== 0 && (rb
&7) != RID_EBP
) {
142 } else if (checki8(ofs
)) {
150 as
->mcp
= emit_opmx(xo
, mode
, scale
, rr
, rb
, rx
, p
);
154 static void emit_gri(ASMState
*as
, x86Group xg
, Reg rb
, int32_t i
)
166 as
->mcp
= emit_opm(xo
, XM_REG
, (Reg
)(xg
& 7) | (rb
& REX_64
), rb
, p
, 0);
169 /* op [base+ofs], i */
170 static void emit_gmroi(ASMState
*as
, x86Group xg
, Reg rb
, int32_t ofs
,
181 emit_rmro(as
, xo
, (Reg
)(xg
& 7), rb
, ofs
);
184 #define emit_shifti(as, xg, r, i) \
185 (emit_i8(as, (i)), emit_rr(as, XO_SHIFTi, (Reg)(xg), (r)))
188 static void emit_mrm(ASMState
*as
, x86Op xo
, Reg rr
, Reg rb
)
191 x86Mode mode
= XM_REG
;
194 if (rb
== RID_NONE
) {
198 *(int32_t *)p
= as
->mrm
.ofs
;
199 if (as
->mrm
.idx
!= RID_NONE
)
202 *--p
= MODRM(XM_SCALE1
, RID_ESP
, RID_EBP
);
206 if (as
->mrm
.ofs
== 0 && (rb
&7) != RID_EBP
) {
208 } else if (checki8(as
->mrm
.ofs
)) {
209 *--p
= (MCode
)as
->mrm
.ofs
;
213 *(int32_t *)p
= as
->mrm
.ofs
;
216 if (as
->mrm
.idx
!= RID_NONE
) {
218 as
->mcp
= emit_opmx(xo
, mode
, as
->mrm
.scale
, rr
, rb
, as
->mrm
.idx
, p
);
221 if ((rb
&7) == RID_ESP
)
222 *--p
= MODRM(XM_SCALE1
, RID_ESP
, RID_ESP
);
225 as
->mcp
= emit_opm(xo
, mode
, rr
, rb
, p
, 0);
229 static void emit_gmrmi(ASMState
*as
, x86Group xg
, Reg rb
, int32_t i
)
239 emit_mrm(as
, xo
, (Reg
)(xg
& 7) | (rb
& REX_64
), (rb
& ~REX_64
));
242 /* -- Emit loads/stores --------------------------------------------------- */
244 /* Instruction selection for XMM moves. */
245 #define XMM_MOVRR(as) ((as->flags & JIT_F_SPLIT_XMM) ? XO_MOVSD : XO_MOVAPS)
246 #define XMM_MOVRM(as) ((as->flags & JIT_F_SPLIT_XMM) ? XO_MOVLPD : XO_MOVSD)
248 /* mov [base+ofs], i */
249 static void emit_movmroi(ASMState
*as
, Reg base
, int32_t ofs
, int32_t i
)
252 emit_rmro(as
, XO_MOVmi
, 0, base
, ofs
);
255 /* mov [base+ofs], r */
256 #define emit_movtomro(as, r, base, ofs) \
257 emit_rmro(as, XO_MOVto, (r), (base), (ofs))
259 /* Get/set global_State fields. */
260 #define emit_opgl(as, xo, r, field) \
261 emit_rma(as, (xo), (r), (void *)&J2G(as->J)->field)
262 #define emit_getgl(as, r, field) emit_opgl(as, XO_MOV, (r), field)
263 #define emit_setgl(as, r, field) emit_opgl(as, XO_MOVto, (r), field)
265 #define emit_setvmstate(as, i) \
266 (emit_i32(as, i), emit_opgl(as, XO_MOVmi, 0, vmstate))
268 /* mov r, i / xor r, r */
269 static void emit_loadi(ASMState
*as
, Reg r
, int32_t i
)
271 /* XOR r,r is shorter, but modifies the flags. This is bad for HIOP. */
272 if (i
== 0 && !(LJ_32
&& (IR(as
->curins
)->o
== IR_HIOP
||
273 (as
->curins
+1 < as
->T
->nins
&&
274 IR(as
->curins
+1)->o
== IR_HIOP
)))) {
275 emit_rr(as
, XO_ARITH(XOg_XOR
), r
, r
);
278 *(int32_t *)(p
-4) = i
;
279 p
[-5] = (MCode
)(XI_MOVri
+(r
&7));
287 #define emit_loada(as, r, addr) \
288 emit_loadi(as, (r), ptr2addr((addr)))
291 /* mov r, imm64 or shorter 32 bit extended load. */
292 static void emit_loadu64(ASMState
*as
, Reg r
, uint64_t u64
)
294 if (checku32(u64
)) { /* 32 bit load clears upper 32 bits. */
295 emit_loadi(as
, r
, (int32_t)u64
);
296 } else if (checki32((int64_t)u64
)) { /* Sign-extended 32 bit load. */
298 *(int32_t *)(p
-4) = (int32_t)u64
;
299 as
->mcp
= emit_opm(XO_MOVmi
, XM_REG
, REX_64
, r
, p
, -4);
300 } else { /* Full-size 64 bit load. */
302 *(uint64_t *)(p
-8) = u64
;
303 p
[-9] = (MCode
)(XI_MOVri
+(r
&7));
304 p
[-10] = 0x48 + ((r
>>3)&1);
311 /* movsd r, [&tv->n] / xorps r, r */
312 static void emit_loadn(ASMState
*as
, Reg r
, cTValue
*tv
)
314 if (tvispzero(tv
)) /* Use xor only for +0. */
315 emit_rr(as
, XO_XORPS
, r
, r
);
317 emit_rma(as
, XMM_MOVRM(as
), r
, &tv
->n
);
320 /* -- Emit control-flow instructions -------------------------------------- */
322 /* Label for short jumps. */
323 typedef MCode
*MCLabel
;
325 #if LJ_32 && LJ_HASFFI
326 /* jmp short target */
327 static void emit_sjmp(ASMState
*as
, MCLabel target
)
330 ptrdiff_t delta
= target
- p
;
331 lua_assert(delta
== (int8_t)delta
);
332 p
[-1] = (MCode
)(int8_t)delta
;
338 /* jcc short target */
339 static void emit_sjcc(ASMState
*as
, int cc
, MCLabel target
)
342 ptrdiff_t delta
= target
- p
;
343 lua_assert(delta
== (int8_t)delta
);
344 p
[-1] = (MCode
)(int8_t)delta
;
345 p
[-2] = (MCode
)(XI_JCCs
+(cc
&15));
349 /* jcc short (pending target) */
350 static MCLabel
emit_sjcc_label(ASMState
*as
, int cc
)
354 p
[-2] = (MCode
)(XI_JCCs
+(cc
&15));
359 /* Fixup jcc short target. */
360 static void emit_sfixup(ASMState
*as
, MCLabel source
)
362 source
[-1] = (MCode
)(as
->mcp
-source
);
365 /* Return label pointing to current PC. */
366 #define emit_label(as) ((as)->mcp)
368 /* Compute relative 32 bit offset for jump and call instructions. */
369 static LJ_AINLINE
int32_t jmprel(MCode
*p
, MCode
*target
)
371 ptrdiff_t delta
= target
- p
;
372 lua_assert(delta
== (int32_t)delta
);
373 return (int32_t)delta
;
377 static void emit_jcc(ASMState
*as
, int cc
, MCode
*target
)
380 *(int32_t *)(p
-4) = jmprel(p
, target
);
381 p
[-5] = (MCode
)(XI_JCCn
+(cc
&15));
387 static void emit_jmp(ASMState
*as
, MCode
*target
)
390 *(int32_t *)(p
-4) = jmprel(p
, target
);
396 static void emit_call_(ASMState
*as
, MCode
*target
)
400 if (target
-p
!= (int32_t)(target
-p
)) {
401 /* Assumes RID_RET is never an argument to calls and always clobbered. */
402 emit_rr(as
, XO_GROUP5
, XOg_CALL
, RID_RET
);
403 emit_loadu64(as
, RID_RET
, (uint64_t)target
);
407 *(int32_t *)(p
-4) = jmprel(p
, target
);
412 #define emit_call(as, f) emit_call_(as, (MCode *)(void *)(f))
414 /* -- Emit generic operations --------------------------------------------- */
416 /* Use 64 bit operations to handle 64 bit IR types. */
418 #define REX_64IR(ir, r) ((r) + (irt_is64((ir)->t) ? REX_64 : 0))
420 #define REX_64IR(ir, r) (r)
423 /* Generic move between two regs. */
424 static void emit_movrr(ASMState
*as
, IRIns
*ir
, Reg dst
, Reg src
)
427 if (dst
< RID_MAX_GPR
)
428 emit_rr(as
, XO_MOV
, REX_64IR(ir
, dst
), src
);
430 emit_rr(as
, XMM_MOVRR(as
), dst
, src
);
433 /* Generic load of register from stack slot. */
434 static void emit_spload(ASMState
*as
, IRIns
*ir
, Reg r
, int32_t ofs
)
437 emit_rmro(as
, XO_MOV
, REX_64IR(ir
, r
), RID_ESP
, ofs
);
439 emit_rmro(as
, irt_isnum(ir
->t
) ? XMM_MOVRM(as
) : XO_MOVSS
, r
, RID_ESP
, ofs
);
442 /* Generic store of register to stack slot. */
443 static void emit_spstore(ASMState
*as
, IRIns
*ir
, Reg r
, int32_t ofs
)
446 emit_rmro(as
, XO_MOVto
, REX_64IR(ir
, r
), RID_ESP
, ofs
);
448 emit_rmro(as
, irt_isnum(ir
->t
) ? XO_MOVSDto
: XO_MOVSSto
, r
, RID_ESP
, ofs
);
451 /* Add offset to pointer. */
452 static void emit_addptr(ASMState
*as
, Reg r
, int32_t ofs
)
455 if ((as
->flags
& JIT_F_LEA_AGU
))
456 emit_rmro(as
, XO_LEA
, r
, r
, ofs
);
458 emit_gri(as
, XG_ARITHi(XOg_ADD
), r
, ofs
);
462 #define emit_spsub(as, ofs) emit_addptr(as, RID_ESP|REX_64, -(ofs))
464 /* Prefer rematerialization of BASE/L from global_State over spills. */
465 #define emit_canremat(ref) ((ref) <= REF_BASE)