beta-0.89.2
[luatex.git] / source / libs / luajit / LuaJIT-src / src / lj_emit_x86.h
blobac42db3e20f6e3573464b99a20bfe3ac0c512f88
1 /*
2 ** x86/x64 instruction emitter.
3 ** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
4 */
6 /* -- Emit basic instructions --------------------------------------------- */
8 #define MODRM(mode, r1, r2) ((MCode)((mode)+(((r1)&7)<<3)+((r2)&7)))
10 #if LJ_64
11 #define REXRB(p, rr, rb) \
12 { MCode rex = 0x40 + (((rr)>>1)&4) + (((rb)>>3)&1); \
13 if (rex != 0x40) *--(p) = rex; }
14 #define FORCE_REX 0x200
15 #define REX_64 (FORCE_REX|0x080000)
16 #else
17 #define REXRB(p, rr, rb) ((void)0)
18 #define FORCE_REX 0
19 #define REX_64 0
20 #endif
22 #define emit_i8(as, i) (*--as->mcp = (MCode)(i))
23 #define emit_i32(as, i) (*(int32_t *)(as->mcp-4) = (i), as->mcp -= 4)
24 #define emit_u32(as, u) (*(uint32_t *)(as->mcp-4) = (u), as->mcp -= 4)
26 #define emit_x87op(as, xo) \
27 (*(uint16_t *)(as->mcp-2) = (uint16_t)(xo), as->mcp -= 2)
29 /* op */
30 static LJ_AINLINE MCode *emit_op(x86Op xo, Reg rr, Reg rb, Reg rx,
31 MCode *p, int delta)
33 int n = (int8_t)xo;
34 #if defined(__GNUC__)
35 if (__builtin_constant_p(xo) && n == -2)
36 p[delta-2] = (MCode)(xo >> 24);
37 else if (__builtin_constant_p(xo) && n == -3)
38 *(uint16_t *)(p+delta-3) = (uint16_t)(xo >> 16);
39 else
40 #endif
41 *(uint32_t *)(p+delta-5) = (uint32_t)xo;
42 p += n + delta;
43 #if LJ_64
45 uint32_t rex = 0x40 + ((rr>>1)&(4+(FORCE_REX>>1)))+((rx>>2)&2)+((rb>>3)&1);
46 if (rex != 0x40) {
47 rex |= (rr >> 16);
48 if (n == -4) { *p = (MCode)rex; rex = (MCode)(xo >> 8); }
49 else if ((xo & 0xffffff) == 0x6600fd) { *p = (MCode)rex; rex = 0x66; }
50 *--p = (MCode)rex;
53 #else
54 UNUSED(rr); UNUSED(rb); UNUSED(rx);
55 #endif
56 return p;
59 /* op + modrm */
60 #define emit_opm(xo, mode, rr, rb, p, delta) \
61 (p[(delta)-1] = MODRM((mode), (rr), (rb)), \
62 emit_op((xo), (rr), (rb), 0, (p), (delta)))
64 /* op + modrm + sib */
65 #define emit_opmx(xo, mode, scale, rr, rb, rx, p) \
66 (p[-1] = MODRM((scale), (rx), (rb)), \
67 p[-2] = MODRM((mode), (rr), RID_ESP), \
68 emit_op((xo), (rr), (rb), (rx), (p), -1))
70 /* op r1, r2 */
71 static void emit_rr(ASMState *as, x86Op xo, Reg r1, Reg r2)
73 MCode *p = as->mcp;
74 as->mcp = emit_opm(xo, XM_REG, r1, r2, p, 0);
77 #if LJ_64 && defined(LUA_USE_ASSERT)
78 /* [addr] is sign-extended in x64 and must be in lower 2G (not 4G). */
79 static int32_t ptr2addr(const void *p)
81 lua_assert((uintptr_t)p < (uintptr_t)0x80000000);
82 return i32ptr(p);
84 #else
85 #define ptr2addr(p) (i32ptr((p)))
86 #endif
88 /* op r, [addr] */
89 static void emit_rma(ASMState *as, x86Op xo, Reg rr, const void *addr)
91 MCode *p = as->mcp;
92 *(int32_t *)(p-4) = ptr2addr(addr);
93 #if LJ_64
94 p[-5] = MODRM(XM_SCALE1, RID_ESP, RID_EBP);
95 as->mcp = emit_opm(xo, XM_OFS0, rr, RID_ESP, p, -5);
96 #else
97 as->mcp = emit_opm(xo, XM_OFS0, rr, RID_EBP, p, -4);
98 #endif
101 /* op r, [base+ofs] */
102 static void emit_rmro(ASMState *as, x86Op xo, Reg rr, Reg rb, int32_t ofs)
104 MCode *p = as->mcp;
105 x86Mode mode;
106 if (ra_hasreg(rb)) {
107 if (ofs == 0 && (rb&7) != RID_EBP) {
108 mode = XM_OFS0;
109 } else if (checki8(ofs)) {
110 *--p = (MCode)ofs;
111 mode = XM_OFS8;
112 } else {
113 p -= 4;
114 *(int32_t *)p = ofs;
115 mode = XM_OFS32;
117 if ((rb&7) == RID_ESP)
118 *--p = MODRM(XM_SCALE1, RID_ESP, RID_ESP);
119 } else {
120 *(int32_t *)(p-4) = ofs;
121 #if LJ_64
122 p[-5] = MODRM(XM_SCALE1, RID_ESP, RID_EBP);
123 p -= 5;
124 rb = RID_ESP;
125 #else
126 p -= 4;
127 rb = RID_EBP;
128 #endif
129 mode = XM_OFS0;
131 as->mcp = emit_opm(xo, mode, rr, rb, p, 0);
134 /* op r, [base+idx*scale+ofs] */
135 static void emit_rmrxo(ASMState *as, x86Op xo, Reg rr, Reg rb, Reg rx,
136 x86Mode scale, int32_t ofs)
138 MCode *p = as->mcp;
139 x86Mode mode;
140 if (ofs == 0 && (rb&7) != RID_EBP) {
141 mode = XM_OFS0;
142 } else if (checki8(ofs)) {
143 mode = XM_OFS8;
144 *--p = (MCode)ofs;
145 } else {
146 mode = XM_OFS32;
147 p -= 4;
148 *(int32_t *)p = ofs;
150 as->mcp = emit_opmx(xo, mode, scale, rr, rb, rx, p);
153 /* op r, i */
154 static void emit_gri(ASMState *as, x86Group xg, Reg rb, int32_t i)
156 MCode *p = as->mcp;
157 x86Op xo;
158 if (checki8(i)) {
159 *--p = (MCode)i;
160 xo = XG_TOXOi8(xg);
161 } else {
162 p -= 4;
163 *(int32_t *)p = i;
164 xo = XG_TOXOi(xg);
166 as->mcp = emit_opm(xo, XM_REG, (Reg)(xg & 7) | (rb & REX_64), rb, p, 0);
169 /* op [base+ofs], i */
170 static void emit_gmroi(ASMState *as, x86Group xg, Reg rb, int32_t ofs,
171 int32_t i)
173 x86Op xo;
174 if (checki8(i)) {
175 emit_i8(as, i);
176 xo = XG_TOXOi8(xg);
177 } else {
178 emit_i32(as, i);
179 xo = XG_TOXOi(xg);
181 emit_rmro(as, xo, (Reg)(xg & 7), rb, ofs);
184 #define emit_shifti(as, xg, r, i) \
185 (emit_i8(as, (i)), emit_rr(as, XO_SHIFTi, (Reg)(xg), (r)))
187 /* op r, rm/mrm */
188 static void emit_mrm(ASMState *as, x86Op xo, Reg rr, Reg rb)
190 MCode *p = as->mcp;
191 x86Mode mode = XM_REG;
192 if (rb == RID_MRM) {
193 rb = as->mrm.base;
194 if (rb == RID_NONE) {
195 rb = RID_EBP;
196 mode = XM_OFS0;
197 p -= 4;
198 *(int32_t *)p = as->mrm.ofs;
199 if (as->mrm.idx != RID_NONE)
200 goto mrmidx;
201 #if LJ_64
202 *--p = MODRM(XM_SCALE1, RID_ESP, RID_EBP);
203 rb = RID_ESP;
204 #endif
205 } else {
206 if (as->mrm.ofs == 0 && (rb&7) != RID_EBP) {
207 mode = XM_OFS0;
208 } else if (checki8(as->mrm.ofs)) {
209 *--p = (MCode)as->mrm.ofs;
210 mode = XM_OFS8;
211 } else {
212 p -= 4;
213 *(int32_t *)p = as->mrm.ofs;
214 mode = XM_OFS32;
216 if (as->mrm.idx != RID_NONE) {
217 mrmidx:
218 as->mcp = emit_opmx(xo, mode, as->mrm.scale, rr, rb, as->mrm.idx, p);
219 return;
221 if ((rb&7) == RID_ESP)
222 *--p = MODRM(XM_SCALE1, RID_ESP, RID_ESP);
225 as->mcp = emit_opm(xo, mode, rr, rb, p, 0);
228 /* op rm/mrm, i */
229 static void emit_gmrmi(ASMState *as, x86Group xg, Reg rb, int32_t i)
231 x86Op xo;
232 if (checki8(i)) {
233 emit_i8(as, i);
234 xo = XG_TOXOi8(xg);
235 } else {
236 emit_i32(as, i);
237 xo = XG_TOXOi(xg);
239 emit_mrm(as, xo, (Reg)(xg & 7) | (rb & REX_64), (rb & ~REX_64));
242 /* -- Emit loads/stores --------------------------------------------------- */
244 /* mov [base+ofs], i */
245 static void emit_movmroi(ASMState *as, Reg base, int32_t ofs, int32_t i)
247 emit_i32(as, i);
248 emit_rmro(as, XO_MOVmi, 0, base, ofs);
251 /* mov [base+ofs], r */
252 #define emit_movtomro(as, r, base, ofs) \
253 emit_rmro(as, XO_MOVto, (r), (base), (ofs))
255 /* Get/set global_State fields. */
256 #define emit_opgl(as, xo, r, field) \
257 emit_rma(as, (xo), (r), (void *)&J2G(as->J)->field)
258 #define emit_getgl(as, r, field) emit_opgl(as, XO_MOV, (r), field)
259 #define emit_setgl(as, r, field) emit_opgl(as, XO_MOVto, (r), field)
261 #define emit_setvmstate(as, i) \
262 (emit_i32(as, i), emit_opgl(as, XO_MOVmi, 0, vmstate))
264 /* mov r, i / xor r, r */
265 static void emit_loadi(ASMState *as, Reg r, int32_t i)
267 /* XOR r,r is shorter, but modifies the flags. This is bad for HIOP. */
268 if (i == 0 && !(LJ_32 && (IR(as->curins)->o == IR_HIOP ||
269 (as->curins+1 < as->T->nins &&
270 IR(as->curins+1)->o == IR_HIOP)))) {
271 emit_rr(as, XO_ARITH(XOg_XOR), r, r);
272 } else {
273 MCode *p = as->mcp;
274 *(int32_t *)(p-4) = i;
275 p[-5] = (MCode)(XI_MOVri+(r&7));
276 p -= 5;
277 REXRB(p, 0, r);
278 as->mcp = p;
282 /* mov r, addr */
283 #define emit_loada(as, r, addr) \
284 emit_loadi(as, (r), ptr2addr((addr)))
286 #if LJ_64
287 /* mov r, imm64 or shorter 32 bit extended load. */
288 static void emit_loadu64(ASMState *as, Reg r, uint64_t u64)
290 if (checku32(u64)) { /* 32 bit load clears upper 32 bits. */
291 emit_loadi(as, r, (int32_t)u64);
292 } else if (checki32((int64_t)u64)) { /* Sign-extended 32 bit load. */
293 MCode *p = as->mcp;
294 *(int32_t *)(p-4) = (int32_t)u64;
295 as->mcp = emit_opm(XO_MOVmi, XM_REG, REX_64, r, p, -4);
296 } else { /* Full-size 64 bit load. */
297 MCode *p = as->mcp;
298 *(uint64_t *)(p-8) = u64;
299 p[-9] = (MCode)(XI_MOVri+(r&7));
300 p[-10] = 0x48 + ((r>>3)&1);
301 p -= 10;
302 as->mcp = p;
305 #endif
307 /* movsd r, [&tv->n] / xorps r, r */
308 static void emit_loadn(ASMState *as, Reg r, cTValue *tv)
310 if (tvispzero(tv)) /* Use xor only for +0. */
311 emit_rr(as, XO_XORPS, r, r);
312 else
313 emit_rma(as, XO_MOVSD, r, &tv->n);
316 /* -- Emit control-flow instructions -------------------------------------- */
318 /* Label for short jumps. */
319 typedef MCode *MCLabel;
321 #if LJ_32 && LJ_HASFFI
322 /* jmp short target */
323 static void emit_sjmp(ASMState *as, MCLabel target)
325 MCode *p = as->mcp;
326 ptrdiff_t delta = target - p;
327 lua_assert(delta == (int8_t)delta);
328 p[-1] = (MCode)(int8_t)delta;
329 p[-2] = XI_JMPs;
330 as->mcp = p - 2;
332 #endif
334 /* jcc short target */
335 static void emit_sjcc(ASMState *as, int cc, MCLabel target)
337 MCode *p = as->mcp;
338 ptrdiff_t delta = target - p;
339 lua_assert(delta == (int8_t)delta);
340 p[-1] = (MCode)(int8_t)delta;
341 p[-2] = (MCode)(XI_JCCs+(cc&15));
342 as->mcp = p - 2;
345 /* jcc short (pending target) */
346 static MCLabel emit_sjcc_label(ASMState *as, int cc)
348 MCode *p = as->mcp;
349 p[-1] = 0;
350 p[-2] = (MCode)(XI_JCCs+(cc&15));
351 as->mcp = p - 2;
352 return p;
355 /* Fixup jcc short target. */
356 static void emit_sfixup(ASMState *as, MCLabel source)
358 source[-1] = (MCode)(as->mcp-source);
361 /* Return label pointing to current PC. */
362 #define emit_label(as) ((as)->mcp)
364 /* Compute relative 32 bit offset for jump and call instructions. */
365 static LJ_AINLINE int32_t jmprel(MCode *p, MCode *target)
367 ptrdiff_t delta = target - p;
368 lua_assert(delta == (int32_t)delta);
369 return (int32_t)delta;
372 /* jcc target */
373 static void emit_jcc(ASMState *as, int cc, MCode *target)
375 MCode *p = as->mcp;
376 *(int32_t *)(p-4) = jmprel(p, target);
377 p[-5] = (MCode)(XI_JCCn+(cc&15));
378 p[-6] = 0x0f;
379 as->mcp = p - 6;
382 /* jmp target */
383 static void emit_jmp(ASMState *as, MCode *target)
385 MCode *p = as->mcp;
386 *(int32_t *)(p-4) = jmprel(p, target);
387 p[-5] = XI_JMP;
388 as->mcp = p - 5;
391 /* call target */
392 static void emit_call_(ASMState *as, MCode *target)
394 MCode *p = as->mcp;
395 #if LJ_64
396 if (target-p != (int32_t)(target-p)) {
397 /* Assumes RID_RET is never an argument to calls and always clobbered. */
398 emit_rr(as, XO_GROUP5, XOg_CALL, RID_RET);
399 emit_loadu64(as, RID_RET, (uint64_t)target);
400 return;
402 #endif
403 *(int32_t *)(p-4) = jmprel(p, target);
404 p[-5] = XI_CALL;
405 as->mcp = p - 5;
408 #define emit_call(as, f) emit_call_(as, (MCode *)(void *)(f))
410 /* -- Emit generic operations --------------------------------------------- */
412 /* Use 64 bit operations to handle 64 bit IR types. */
413 #if LJ_64
414 #define REX_64IR(ir, r) ((r) + (irt_is64((ir)->t) ? REX_64 : 0))
415 #else
416 #define REX_64IR(ir, r) (r)
417 #endif
419 /* Generic move between two regs. */
420 static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src)
422 UNUSED(ir);
423 if (dst < RID_MAX_GPR)
424 emit_rr(as, XO_MOV, REX_64IR(ir, dst), src);
425 else
426 emit_rr(as, XO_MOVAPS, dst, src);
429 /* Generic load of register with base and (small) offset address. */
430 static void emit_loadofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs)
432 if (r < RID_MAX_GPR)
433 emit_rmro(as, XO_MOV, REX_64IR(ir, r), base, ofs);
434 else
435 emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSD : XO_MOVSS, r, base, ofs);
438 /* Generic store of register with base and (small) offset address. */
439 static void emit_storeofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs)
441 if (r < RID_MAX_GPR)
442 emit_rmro(as, XO_MOVto, REX_64IR(ir, r), base, ofs);
443 else
444 emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSDto : XO_MOVSSto, r, base, ofs);
447 /* Add offset to pointer. */
448 static void emit_addptr(ASMState *as, Reg r, int32_t ofs)
450 if (ofs) {
451 if ((as->flags & JIT_F_LEA_AGU))
452 emit_rmro(as, XO_LEA, r, r, ofs);
453 else
454 emit_gri(as, XG_ARITHi(XOg_ADD), r, ofs);
458 #define emit_spsub(as, ofs) emit_addptr(as, RID_ESP|REX_64, -(ofs))
460 /* Prefer rematerialization of BASE/L from global_State over spills. */
461 #define emit_canremat(ref) ((ref) <= REF_BASE)