tcg-i386: Implement small forward branches.
[qemu/aliguori-queue.git] / tcg / i386 / tcg-target.c
blob052af4949c2b16cbe4ea9cabad1cda5b46b3b9c4
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #ifndef NDEBUG
26 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
27 "%eax",
28 "%ecx",
29 "%edx",
30 "%ebx",
31 "%esp",
32 "%ebp",
33 "%esi",
34 "%edi",
36 #endif
38 static const int tcg_target_reg_alloc_order[] = {
39 TCG_REG_EAX,
40 TCG_REG_EDX,
41 TCG_REG_ECX,
42 TCG_REG_EBX,
43 TCG_REG_ESI,
44 TCG_REG_EDI,
45 TCG_REG_EBP,
48 static const int tcg_target_call_iarg_regs[3] = { TCG_REG_EAX, TCG_REG_EDX, TCG_REG_ECX };
49 static const int tcg_target_call_oarg_regs[2] = { TCG_REG_EAX, TCG_REG_EDX };
51 static uint8_t *tb_ret_addr;
53 static void patch_reloc(uint8_t *code_ptr, int type,
54 tcg_target_long value, tcg_target_long addend)
56 value += addend;
57 switch(type) {
58 case R_386_32:
59 *(uint32_t *)code_ptr = value;
60 break;
61 case R_386_PC32:
62 *(uint32_t *)code_ptr = value - (long)code_ptr;
63 break;
64 case R_386_PC8:
65 value -= (long)code_ptr;
66 if (value != (int8_t)value) {
67 tcg_abort();
69 *(uint8_t *)code_ptr = value;
70 break;
71 default:
72 tcg_abort();
76 /* maximum number of register used for input function arguments */
77 static inline int tcg_target_get_call_iarg_regs_count(int flags)
79 flags &= TCG_CALL_TYPE_MASK;
80 switch(flags) {
81 case TCG_CALL_TYPE_STD:
82 return 0;
83 case TCG_CALL_TYPE_REGPARM_1:
84 case TCG_CALL_TYPE_REGPARM_2:
85 case TCG_CALL_TYPE_REGPARM:
86 return flags - TCG_CALL_TYPE_REGPARM_1 + 1;
87 default:
88 tcg_abort();
92 /* parse target specific constraints */
93 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
95 const char *ct_str;
97 ct_str = *pct_str;
98 switch(ct_str[0]) {
99 case 'a':
100 ct->ct |= TCG_CT_REG;
101 tcg_regset_set_reg(ct->u.regs, TCG_REG_EAX);
102 break;
103 case 'b':
104 ct->ct |= TCG_CT_REG;
105 tcg_regset_set_reg(ct->u.regs, TCG_REG_EBX);
106 break;
107 case 'c':
108 ct->ct |= TCG_CT_REG;
109 tcg_regset_set_reg(ct->u.regs, TCG_REG_ECX);
110 break;
111 case 'd':
112 ct->ct |= TCG_CT_REG;
113 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDX);
114 break;
115 case 'S':
116 ct->ct |= TCG_CT_REG;
117 tcg_regset_set_reg(ct->u.regs, TCG_REG_ESI);
118 break;
119 case 'D':
120 ct->ct |= TCG_CT_REG;
121 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDI);
122 break;
123 case 'q':
124 ct->ct |= TCG_CT_REG;
125 tcg_regset_set32(ct->u.regs, 0, 0xf);
126 break;
127 case 'r':
128 ct->ct |= TCG_CT_REG;
129 tcg_regset_set32(ct->u.regs, 0, 0xff);
130 break;
132 /* qemu_ld/st address constraint */
133 case 'L':
134 ct->ct |= TCG_CT_REG;
135 tcg_regset_set32(ct->u.regs, 0, 0xff);
136 tcg_regset_reset_reg(ct->u.regs, TCG_REG_EAX);
137 tcg_regset_reset_reg(ct->u.regs, TCG_REG_EDX);
138 break;
139 default:
140 return -1;
142 ct_str++;
143 *pct_str = ct_str;
144 return 0;
147 /* test if a constant matches the constraint */
148 static inline int tcg_target_const_match(tcg_target_long val,
149 const TCGArgConstraint *arg_ct)
151 int ct;
152 ct = arg_ct->ct;
153 if (ct & TCG_CT_CONST)
154 return 1;
155 else
156 return 0;
159 #define ARITH_ADD 0
160 #define ARITH_OR 1
161 #define ARITH_ADC 2
162 #define ARITH_SBB 3
163 #define ARITH_AND 4
164 #define ARITH_SUB 5
165 #define ARITH_XOR 6
166 #define ARITH_CMP 7
168 #define SHIFT_ROL 0
169 #define SHIFT_ROR 1
170 #define SHIFT_SHL 4
171 #define SHIFT_SHR 5
172 #define SHIFT_SAR 7
174 #define JCC_JMP (-1)
175 #define JCC_JO 0x0
176 #define JCC_JNO 0x1
177 #define JCC_JB 0x2
178 #define JCC_JAE 0x3
179 #define JCC_JE 0x4
180 #define JCC_JNE 0x5
181 #define JCC_JBE 0x6
182 #define JCC_JA 0x7
183 #define JCC_JS 0x8
184 #define JCC_JNS 0x9
185 #define JCC_JP 0xa
186 #define JCC_JNP 0xb
187 #define JCC_JL 0xc
188 #define JCC_JGE 0xd
189 #define JCC_JLE 0xe
190 #define JCC_JG 0xf
192 #define P_EXT 0x100 /* 0x0f opcode prefix */
194 static const uint8_t tcg_cond_to_jcc[10] = {
195 [TCG_COND_EQ] = JCC_JE,
196 [TCG_COND_NE] = JCC_JNE,
197 [TCG_COND_LT] = JCC_JL,
198 [TCG_COND_GE] = JCC_JGE,
199 [TCG_COND_LE] = JCC_JLE,
200 [TCG_COND_GT] = JCC_JG,
201 [TCG_COND_LTU] = JCC_JB,
202 [TCG_COND_GEU] = JCC_JAE,
203 [TCG_COND_LEU] = JCC_JBE,
204 [TCG_COND_GTU] = JCC_JA,
207 static inline void tcg_out_opc(TCGContext *s, int opc)
209 if (opc & P_EXT)
210 tcg_out8(s, 0x0f);
211 tcg_out8(s, opc);
214 static inline void tcg_out_modrm(TCGContext *s, int opc, int r, int rm)
216 tcg_out_opc(s, opc);
217 tcg_out8(s, 0xc0 | (r << 3) | rm);
220 /* rm == -1 means no register index */
221 static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r, int rm,
222 int32_t offset)
224 tcg_out_opc(s, opc);
225 if (rm == -1) {
226 tcg_out8(s, 0x05 | (r << 3));
227 tcg_out32(s, offset);
228 } else if (offset == 0 && rm != TCG_REG_EBP) {
229 if (rm == TCG_REG_ESP) {
230 tcg_out8(s, 0x04 | (r << 3));
231 tcg_out8(s, 0x24);
232 } else {
233 tcg_out8(s, 0x00 | (r << 3) | rm);
235 } else if ((int8_t)offset == offset) {
236 if (rm == TCG_REG_ESP) {
237 tcg_out8(s, 0x44 | (r << 3));
238 tcg_out8(s, 0x24);
239 } else {
240 tcg_out8(s, 0x40 | (r << 3) | rm);
242 tcg_out8(s, offset);
243 } else {
244 if (rm == TCG_REG_ESP) {
245 tcg_out8(s, 0x84 | (r << 3));
246 tcg_out8(s, 0x24);
247 } else {
248 tcg_out8(s, 0x80 | (r << 3) | rm);
250 tcg_out32(s, offset);
254 static inline void tcg_out_mov(TCGContext *s, int ret, int arg)
256 if (arg != ret)
257 tcg_out_modrm(s, 0x8b, ret, arg);
260 static inline void tcg_out_movi(TCGContext *s, TCGType type,
261 int ret, int32_t arg)
263 if (arg == 0) {
264 /* xor r0,r0 */
265 tcg_out_modrm(s, 0x01 | (ARITH_XOR << 3), ret, ret);
266 } else {
267 tcg_out8(s, 0xb8 + ret);
268 tcg_out32(s, arg);
272 static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret,
273 int arg1, tcg_target_long arg2)
275 /* movl */
276 tcg_out_modrm_offset(s, 0x8b, ret, arg1, arg2);
279 static inline void tcg_out_st(TCGContext *s, TCGType type, int arg,
280 int arg1, tcg_target_long arg2)
282 /* movl */
283 tcg_out_modrm_offset(s, 0x89, arg, arg1, arg2);
286 static inline void tgen_arithi(TCGContext *s, int c, int r0, int32_t val, int cf)
288 if (!cf && ((c == ARITH_ADD && val == 1) || (c == ARITH_SUB && val == -1))) {
289 /* inc */
290 tcg_out_opc(s, 0x40 + r0);
291 } else if (!cf && ((c == ARITH_ADD && val == -1) || (c == ARITH_SUB && val == 1))) {
292 /* dec */
293 tcg_out_opc(s, 0x48 + r0);
294 } else if (val == (int8_t)val) {
295 tcg_out_modrm(s, 0x83, c, r0);
296 tcg_out8(s, val);
297 } else if (c == ARITH_AND && val == 0xffu && r0 < 4) {
298 /* movzbl */
299 tcg_out_modrm(s, 0xb6 | P_EXT, r0, r0);
300 } else if (c == ARITH_AND && val == 0xffffu) {
301 /* movzwl */
302 tcg_out_modrm(s, 0xb7 | P_EXT, r0, r0);
303 } else {
304 tcg_out_modrm(s, 0x81, c, r0);
305 tcg_out32(s, val);
309 static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
311 if (val != 0)
312 tgen_arithi(s, ARITH_ADD, reg, val, 0);
315 /* Use SMALL != 0 to force a short forward branch. */
316 static void tcg_out_jxx(TCGContext *s, int opc, int label_index, int small)
318 int32_t val, val1;
319 TCGLabel *l = &s->labels[label_index];
321 if (l->has_value) {
322 val = l->u.value - (tcg_target_long)s->code_ptr;
323 val1 = val - 2;
324 if ((int8_t)val1 == val1) {
325 if (opc == -1) {
326 tcg_out8(s, 0xeb);
327 } else {
328 tcg_out8(s, 0x70 + opc);
330 tcg_out8(s, val1);
331 } else {
332 if (small) {
333 tcg_abort();
335 if (opc == -1) {
336 tcg_out8(s, 0xe9);
337 tcg_out32(s, val - 5);
338 } else {
339 tcg_out8(s, 0x0f);
340 tcg_out8(s, 0x80 + opc);
341 tcg_out32(s, val - 6);
344 } else if (small) {
345 if (opc == -1) {
346 tcg_out8(s, 0xeb);
347 } else {
348 tcg_out8(s, 0x70 + opc);
350 tcg_out_reloc(s, s->code_ptr, R_386_PC8, label_index, -1);
351 s->code_ptr += 1;
352 } else {
353 if (opc == -1) {
354 tcg_out8(s, 0xe9);
355 } else {
356 tcg_out8(s, 0x0f);
357 tcg_out8(s, 0x80 + opc);
359 tcg_out_reloc(s, s->code_ptr, R_386_PC32, label_index, -4);
360 s->code_ptr += 4;
364 static void tcg_out_brcond(TCGContext *s, int cond,
365 TCGArg arg1, TCGArg arg2, int const_arg2,
366 int label_index, int small)
368 if (const_arg2) {
369 if (arg2 == 0) {
370 /* test r, r */
371 tcg_out_modrm(s, 0x85, arg1, arg1);
372 } else {
373 tgen_arithi(s, ARITH_CMP, arg1, arg2, 0);
375 } else {
376 tcg_out_modrm(s, 0x01 | (ARITH_CMP << 3), arg2, arg1);
378 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index, small);
381 /* XXX: we implement it at the target level to avoid having to
382 handle cross basic blocks temporaries */
383 static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
384 const int *const_args, int small)
386 int label_next;
387 label_next = gen_new_label();
388 switch(args[4]) {
389 case TCG_COND_EQ:
390 tcg_out_brcond(s, TCG_COND_NE, args[0], args[2], const_args[2],
391 label_next, 1);
392 tcg_out_brcond(s, TCG_COND_EQ, args[1], args[3], const_args[3],
393 args[5], small);
394 break;
395 case TCG_COND_NE:
396 tcg_out_brcond(s, TCG_COND_NE, args[0], args[2], const_args[2],
397 args[5], small);
398 tcg_out_brcond(s, TCG_COND_NE, args[1], args[3], const_args[3],
399 args[5], small);
400 break;
401 case TCG_COND_LT:
402 tcg_out_brcond(s, TCG_COND_LT, args[1], args[3], const_args[3],
403 args[5], small);
404 tcg_out_jxx(s, JCC_JNE, label_next, 1);
405 tcg_out_brcond(s, TCG_COND_LTU, args[0], args[2], const_args[2],
406 args[5], small);
407 break;
408 case TCG_COND_LE:
409 tcg_out_brcond(s, TCG_COND_LT, args[1], args[3], const_args[3],
410 args[5], small);
411 tcg_out_jxx(s, JCC_JNE, label_next, 1);
412 tcg_out_brcond(s, TCG_COND_LEU, args[0], args[2], const_args[2],
413 args[5], small);
414 break;
415 case TCG_COND_GT:
416 tcg_out_brcond(s, TCG_COND_GT, args[1], args[3], const_args[3],
417 args[5], small);
418 tcg_out_jxx(s, JCC_JNE, label_next, 1);
419 tcg_out_brcond(s, TCG_COND_GTU, args[0], args[2], const_args[2],
420 args[5], small);
421 break;
422 case TCG_COND_GE:
423 tcg_out_brcond(s, TCG_COND_GT, args[1], args[3], const_args[3],
424 args[5], small);
425 tcg_out_jxx(s, JCC_JNE, label_next, 1);
426 tcg_out_brcond(s, TCG_COND_GEU, args[0], args[2], const_args[2],
427 args[5], small);
428 break;
429 case TCG_COND_LTU:
430 tcg_out_brcond(s, TCG_COND_LTU, args[1], args[3], const_args[3],
431 args[5], small);
432 tcg_out_jxx(s, JCC_JNE, label_next, 1);
433 tcg_out_brcond(s, TCG_COND_LTU, args[0], args[2], const_args[2],
434 args[5], small);
435 break;
436 case TCG_COND_LEU:
437 tcg_out_brcond(s, TCG_COND_LTU, args[1], args[3], const_args[3],
438 args[5], small);
439 tcg_out_jxx(s, JCC_JNE, label_next, 1);
440 tcg_out_brcond(s, TCG_COND_LEU, args[0], args[2], const_args[2],
441 args[5], small);
442 break;
443 case TCG_COND_GTU:
444 tcg_out_brcond(s, TCG_COND_GTU, args[1], args[3], const_args[3],
445 args[5], small);
446 tcg_out_jxx(s, JCC_JNE, label_next, 1);
447 tcg_out_brcond(s, TCG_COND_GTU, args[0], args[2], const_args[2],
448 args[5], small);
449 break;
450 case TCG_COND_GEU:
451 tcg_out_brcond(s, TCG_COND_GTU, args[1], args[3], const_args[3],
452 args[5], small);
453 tcg_out_jxx(s, JCC_JNE, label_next, 1);
454 tcg_out_brcond(s, TCG_COND_GEU, args[0], args[2], const_args[2],
455 args[5], small);
456 break;
457 default:
458 tcg_abort();
460 tcg_out_label(s, label_next, (tcg_target_long)s->code_ptr);
463 #if defined(CONFIG_SOFTMMU)
465 #include "../../softmmu_defs.h"
467 static void *qemu_ld_helpers[4] = {
468 __ldb_mmu,
469 __ldw_mmu,
470 __ldl_mmu,
471 __ldq_mmu,
474 static void *qemu_st_helpers[4] = {
475 __stb_mmu,
476 __stw_mmu,
477 __stl_mmu,
478 __stq_mmu,
480 #endif
482 #ifndef CONFIG_USER_ONLY
483 #define GUEST_BASE 0
484 #endif
486 /* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
487 EAX. It will be useful once fixed registers globals are less
488 common. */
489 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
490 int opc)
492 int addr_reg, data_reg, data_reg2, r0, r1, mem_index, s_bits, bswap;
493 #if defined(CONFIG_SOFTMMU)
494 uint8_t *label1_ptr, *label2_ptr;
495 #endif
496 #if TARGET_LONG_BITS == 64
497 #if defined(CONFIG_SOFTMMU)
498 uint8_t *label3_ptr;
499 #endif
500 int addr_reg2;
501 #endif
503 data_reg = *args++;
504 if (opc == 3)
505 data_reg2 = *args++;
506 else
507 data_reg2 = 0;
508 addr_reg = *args++;
509 #if TARGET_LONG_BITS == 64
510 addr_reg2 = *args++;
511 #endif
512 mem_index = *args;
513 s_bits = opc & 3;
515 r0 = TCG_REG_EAX;
516 r1 = TCG_REG_EDX;
518 #if defined(CONFIG_SOFTMMU)
519 tcg_out_mov(s, r1, addr_reg);
521 tcg_out_mov(s, r0, addr_reg);
523 tcg_out_modrm(s, 0xc1, 5, r1); /* shr $x, r1 */
524 tcg_out8(s, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
526 tcg_out_modrm(s, 0x81, 4, r0); /* andl $x, r0 */
527 tcg_out32(s, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
529 tcg_out_modrm(s, 0x81, 4, r1); /* andl $x, r1 */
530 tcg_out32(s, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
532 tcg_out_opc(s, 0x8d); /* lea offset(r1, %ebp), r1 */
533 tcg_out8(s, 0x80 | (r1 << 3) | 0x04);
534 tcg_out8(s, (5 << 3) | r1);
535 tcg_out32(s, offsetof(CPUState, tlb_table[mem_index][0].addr_read));
537 /* cmp 0(r1), r0 */
538 tcg_out_modrm_offset(s, 0x3b, r0, r1, 0);
540 tcg_out_mov(s, r0, addr_reg);
542 #if TARGET_LONG_BITS == 32
543 /* je label1 */
544 tcg_out8(s, 0x70 + JCC_JE);
545 label1_ptr = s->code_ptr;
546 s->code_ptr++;
547 #else
548 /* jne label3 */
549 tcg_out8(s, 0x70 + JCC_JNE);
550 label3_ptr = s->code_ptr;
551 s->code_ptr++;
553 /* cmp 4(r1), addr_reg2 */
554 tcg_out_modrm_offset(s, 0x3b, addr_reg2, r1, 4);
556 /* je label1 */
557 tcg_out8(s, 0x70 + JCC_JE);
558 label1_ptr = s->code_ptr;
559 s->code_ptr++;
561 /* label3: */
562 *label3_ptr = s->code_ptr - label3_ptr - 1;
563 #endif
565 /* XXX: move that code at the end of the TB */
566 #if TARGET_LONG_BITS == 32
567 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_EDX, mem_index);
568 #else
569 tcg_out_mov(s, TCG_REG_EDX, addr_reg2);
570 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_ECX, mem_index);
571 #endif
572 tcg_out8(s, 0xe8);
573 tcg_out32(s, (tcg_target_long)qemu_ld_helpers[s_bits] -
574 (tcg_target_long)s->code_ptr - 4);
576 switch(opc) {
577 case 0 | 4:
578 /* movsbl */
579 tcg_out_modrm(s, 0xbe | P_EXT, data_reg, TCG_REG_EAX);
580 break;
581 case 1 | 4:
582 /* movswl */
583 tcg_out_modrm(s, 0xbf | P_EXT, data_reg, TCG_REG_EAX);
584 break;
585 case 0:
586 /* movzbl */
587 tcg_out_modrm(s, 0xb6 | P_EXT, data_reg, TCG_REG_EAX);
588 break;
589 case 1:
590 /* movzwl */
591 tcg_out_modrm(s, 0xb7 | P_EXT, data_reg, TCG_REG_EAX);
592 break;
593 case 2:
594 default:
595 tcg_out_mov(s, data_reg, TCG_REG_EAX);
596 break;
597 case 3:
598 if (data_reg == TCG_REG_EDX) {
599 tcg_out_opc(s, 0x90 + TCG_REG_EDX); /* xchg %edx, %eax */
600 tcg_out_mov(s, data_reg2, TCG_REG_EAX);
601 } else {
602 tcg_out_mov(s, data_reg, TCG_REG_EAX);
603 tcg_out_mov(s, data_reg2, TCG_REG_EDX);
605 break;
608 /* jmp label2 */
609 tcg_out8(s, 0xeb);
610 label2_ptr = s->code_ptr;
611 s->code_ptr++;
613 /* label1: */
614 *label1_ptr = s->code_ptr - label1_ptr - 1;
616 /* add x(r1), r0 */
617 tcg_out_modrm_offset(s, 0x03, r0, r1, offsetof(CPUTLBEntry, addend) -
618 offsetof(CPUTLBEntry, addr_read));
619 #else
620 r0 = addr_reg;
621 #endif
623 #ifdef TARGET_WORDS_BIGENDIAN
624 bswap = 1;
625 #else
626 bswap = 0;
627 #endif
628 switch(opc) {
629 case 0:
630 /* movzbl */
631 tcg_out_modrm_offset(s, 0xb6 | P_EXT, data_reg, r0, GUEST_BASE);
632 break;
633 case 0 | 4:
634 /* movsbl */
635 tcg_out_modrm_offset(s, 0xbe | P_EXT, data_reg, r0, GUEST_BASE);
636 break;
637 case 1:
638 /* movzwl */
639 tcg_out_modrm_offset(s, 0xb7 | P_EXT, data_reg, r0, GUEST_BASE);
640 if (bswap) {
641 /* rolw $8, data_reg */
642 tcg_out8(s, 0x66);
643 tcg_out_modrm(s, 0xc1, 0, data_reg);
644 tcg_out8(s, 8);
646 break;
647 case 1 | 4:
648 /* movswl */
649 tcg_out_modrm_offset(s, 0xbf | P_EXT, data_reg, r0, GUEST_BASE);
650 if (bswap) {
651 /* rolw $8, data_reg */
652 tcg_out8(s, 0x66);
653 tcg_out_modrm(s, 0xc1, 0, data_reg);
654 tcg_out8(s, 8);
656 /* movswl data_reg, data_reg */
657 tcg_out_modrm(s, 0xbf | P_EXT, data_reg, data_reg);
659 break;
660 case 2:
661 /* movl (r0), data_reg */
662 tcg_out_modrm_offset(s, 0x8b, data_reg, r0, GUEST_BASE);
663 if (bswap) {
664 /* bswap */
665 tcg_out_opc(s, (0xc8 + data_reg) | P_EXT);
667 break;
668 case 3:
669 /* XXX: could be nicer */
670 if (r0 == data_reg) {
671 r1 = TCG_REG_EDX;
672 if (r1 == data_reg)
673 r1 = TCG_REG_EAX;
674 tcg_out_mov(s, r1, r0);
675 r0 = r1;
677 if (!bswap) {
678 tcg_out_modrm_offset(s, 0x8b, data_reg, r0, GUEST_BASE);
679 tcg_out_modrm_offset(s, 0x8b, data_reg2, r0, GUEST_BASE + 4);
680 } else {
681 tcg_out_modrm_offset(s, 0x8b, data_reg, r0, GUEST_BASE + 4);
682 tcg_out_opc(s, (0xc8 + data_reg) | P_EXT);
684 tcg_out_modrm_offset(s, 0x8b, data_reg2, r0, GUEST_BASE);
685 /* bswap */
686 tcg_out_opc(s, (0xc8 + data_reg2) | P_EXT);
688 break;
689 default:
690 tcg_abort();
693 #if defined(CONFIG_SOFTMMU)
694 /* label2: */
695 *label2_ptr = s->code_ptr - label2_ptr - 1;
696 #endif
700 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
701 int opc)
703 int addr_reg, data_reg, data_reg2, r0, r1, mem_index, s_bits, bswap;
704 #if defined(CONFIG_SOFTMMU)
705 uint8_t *label1_ptr, *label2_ptr;
706 #endif
707 #if TARGET_LONG_BITS == 64
708 #if defined(CONFIG_SOFTMMU)
709 uint8_t *label3_ptr;
710 #endif
711 int addr_reg2;
712 #endif
714 data_reg = *args++;
715 if (opc == 3)
716 data_reg2 = *args++;
717 else
718 data_reg2 = 0;
719 addr_reg = *args++;
720 #if TARGET_LONG_BITS == 64
721 addr_reg2 = *args++;
722 #endif
723 mem_index = *args;
725 s_bits = opc;
727 r0 = TCG_REG_EAX;
728 r1 = TCG_REG_EDX;
730 #if defined(CONFIG_SOFTMMU)
731 tcg_out_mov(s, r1, addr_reg);
733 tcg_out_mov(s, r0, addr_reg);
735 tcg_out_modrm(s, 0xc1, 5, r1); /* shr $x, r1 */
736 tcg_out8(s, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
738 tcg_out_modrm(s, 0x81, 4, r0); /* andl $x, r0 */
739 tcg_out32(s, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
741 tcg_out_modrm(s, 0x81, 4, r1); /* andl $x, r1 */
742 tcg_out32(s, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
744 tcg_out_opc(s, 0x8d); /* lea offset(r1, %ebp), r1 */
745 tcg_out8(s, 0x80 | (r1 << 3) | 0x04);
746 tcg_out8(s, (5 << 3) | r1);
747 tcg_out32(s, offsetof(CPUState, tlb_table[mem_index][0].addr_write));
749 /* cmp 0(r1), r0 */
750 tcg_out_modrm_offset(s, 0x3b, r0, r1, 0);
752 tcg_out_mov(s, r0, addr_reg);
754 #if TARGET_LONG_BITS == 32
755 /* je label1 */
756 tcg_out8(s, 0x70 + JCC_JE);
757 label1_ptr = s->code_ptr;
758 s->code_ptr++;
759 #else
760 /* jne label3 */
761 tcg_out8(s, 0x70 + JCC_JNE);
762 label3_ptr = s->code_ptr;
763 s->code_ptr++;
765 /* cmp 4(r1), addr_reg2 */
766 tcg_out_modrm_offset(s, 0x3b, addr_reg2, r1, 4);
768 /* je label1 */
769 tcg_out8(s, 0x70 + JCC_JE);
770 label1_ptr = s->code_ptr;
771 s->code_ptr++;
773 /* label3: */
774 *label3_ptr = s->code_ptr - label3_ptr - 1;
775 #endif
777 /* XXX: move that code at the end of the TB */
778 #if TARGET_LONG_BITS == 32
779 if (opc == 3) {
780 tcg_out_mov(s, TCG_REG_EDX, data_reg);
781 tcg_out_mov(s, TCG_REG_ECX, data_reg2);
782 tcg_out8(s, 0x6a); /* push Ib */
783 tcg_out8(s, mem_index);
784 tcg_out8(s, 0xe8);
785 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] -
786 (tcg_target_long)s->code_ptr - 4);
787 tcg_out_addi(s, TCG_REG_ESP, 4);
788 } else {
789 switch(opc) {
790 case 0:
791 /* movzbl */
792 tcg_out_modrm(s, 0xb6 | P_EXT, TCG_REG_EDX, data_reg);
793 break;
794 case 1:
795 /* movzwl */
796 tcg_out_modrm(s, 0xb7 | P_EXT, TCG_REG_EDX, data_reg);
797 break;
798 case 2:
799 tcg_out_mov(s, TCG_REG_EDX, data_reg);
800 break;
802 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_ECX, mem_index);
803 tcg_out8(s, 0xe8);
804 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] -
805 (tcg_target_long)s->code_ptr - 4);
807 #else
808 if (opc == 3) {
809 tcg_out_mov(s, TCG_REG_EDX, addr_reg2);
810 tcg_out8(s, 0x6a); /* push Ib */
811 tcg_out8(s, mem_index);
812 tcg_out_opc(s, 0x50 + data_reg2); /* push */
813 tcg_out_opc(s, 0x50 + data_reg); /* push */
814 tcg_out8(s, 0xe8);
815 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] -
816 (tcg_target_long)s->code_ptr - 4);
817 tcg_out_addi(s, TCG_REG_ESP, 12);
818 } else {
819 tcg_out_mov(s, TCG_REG_EDX, addr_reg2);
820 switch(opc) {
821 case 0:
822 /* movzbl */
823 tcg_out_modrm(s, 0xb6 | P_EXT, TCG_REG_ECX, data_reg);
824 break;
825 case 1:
826 /* movzwl */
827 tcg_out_modrm(s, 0xb7 | P_EXT, TCG_REG_ECX, data_reg);
828 break;
829 case 2:
830 tcg_out_mov(s, TCG_REG_ECX, data_reg);
831 break;
833 tcg_out8(s, 0x6a); /* push Ib */
834 tcg_out8(s, mem_index);
835 tcg_out8(s, 0xe8);
836 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] -
837 (tcg_target_long)s->code_ptr - 4);
838 tcg_out_addi(s, TCG_REG_ESP, 4);
840 #endif
842 /* jmp label2 */
843 tcg_out8(s, 0xeb);
844 label2_ptr = s->code_ptr;
845 s->code_ptr++;
847 /* label1: */
848 *label1_ptr = s->code_ptr - label1_ptr - 1;
850 /* add x(r1), r0 */
851 tcg_out_modrm_offset(s, 0x03, r0, r1, offsetof(CPUTLBEntry, addend) -
852 offsetof(CPUTLBEntry, addr_write));
853 #else
854 r0 = addr_reg;
855 #endif
857 #ifdef TARGET_WORDS_BIGENDIAN
858 bswap = 1;
859 #else
860 bswap = 0;
861 #endif
862 switch(opc) {
863 case 0:
864 /* movb */
865 tcg_out_modrm_offset(s, 0x88, data_reg, r0, GUEST_BASE);
866 break;
867 case 1:
868 if (bswap) {
869 tcg_out_mov(s, r1, data_reg);
870 tcg_out8(s, 0x66); /* rolw $8, %ecx */
871 tcg_out_modrm(s, 0xc1, 0, r1);
872 tcg_out8(s, 8);
873 data_reg = r1;
875 /* movw */
876 tcg_out8(s, 0x66);
877 tcg_out_modrm_offset(s, 0x89, data_reg, r0, GUEST_BASE);
878 break;
879 case 2:
880 if (bswap) {
881 tcg_out_mov(s, r1, data_reg);
882 /* bswap data_reg */
883 tcg_out_opc(s, (0xc8 + r1) | P_EXT);
884 data_reg = r1;
886 /* movl */
887 tcg_out_modrm_offset(s, 0x89, data_reg, r0, GUEST_BASE);
888 break;
889 case 3:
890 if (bswap) {
891 tcg_out_mov(s, r1, data_reg2);
892 /* bswap data_reg */
893 tcg_out_opc(s, (0xc8 + r1) | P_EXT);
894 tcg_out_modrm_offset(s, 0x89, r1, r0, GUEST_BASE);
895 tcg_out_mov(s, r1, data_reg);
896 /* bswap data_reg */
897 tcg_out_opc(s, (0xc8 + r1) | P_EXT);
898 tcg_out_modrm_offset(s, 0x89, r1, r0, GUEST_BASE + 4);
899 } else {
900 tcg_out_modrm_offset(s, 0x89, data_reg, r0, GUEST_BASE);
901 tcg_out_modrm_offset(s, 0x89, data_reg2, r0, GUEST_BASE + 4);
903 break;
904 default:
905 tcg_abort();
908 #if defined(CONFIG_SOFTMMU)
909 /* label2: */
910 *label2_ptr = s->code_ptr - label2_ptr - 1;
911 #endif
914 static inline void tcg_out_op(TCGContext *s, int opc,
915 const TCGArg *args, const int *const_args)
917 int c;
919 switch(opc) {
920 case INDEX_op_exit_tb:
921 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_EAX, args[0]);
922 tcg_out8(s, 0xe9); /* jmp tb_ret_addr */
923 tcg_out32(s, tb_ret_addr - s->code_ptr - 4);
924 break;
925 case INDEX_op_goto_tb:
926 if (s->tb_jmp_offset) {
927 /* direct jump method */
928 tcg_out8(s, 0xe9); /* jmp im */
929 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
930 tcg_out32(s, 0);
931 } else {
932 /* indirect jump method */
933 /* jmp Ev */
934 tcg_out_modrm_offset(s, 0xff, 4, -1,
935 (tcg_target_long)(s->tb_next + args[0]));
937 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
938 break;
939 case INDEX_op_call:
940 if (const_args[0]) {
941 tcg_out8(s, 0xe8);
942 tcg_out32(s, args[0] - (tcg_target_long)s->code_ptr - 4);
943 } else {
944 tcg_out_modrm(s, 0xff, 2, args[0]);
946 break;
947 case INDEX_op_jmp:
948 if (const_args[0]) {
949 tcg_out8(s, 0xe9);
950 tcg_out32(s, args[0] - (tcg_target_long)s->code_ptr - 4);
951 } else {
952 tcg_out_modrm(s, 0xff, 4, args[0]);
954 break;
955 case INDEX_op_br:
956 tcg_out_jxx(s, JCC_JMP, args[0], 0);
957 break;
958 case INDEX_op_movi_i32:
959 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]);
960 break;
961 case INDEX_op_ld8u_i32:
962 /* movzbl */
963 tcg_out_modrm_offset(s, 0xb6 | P_EXT, args[0], args[1], args[2]);
964 break;
965 case INDEX_op_ld8s_i32:
966 /* movsbl */
967 tcg_out_modrm_offset(s, 0xbe | P_EXT, args[0], args[1], args[2]);
968 break;
969 case INDEX_op_ld16u_i32:
970 /* movzwl */
971 tcg_out_modrm_offset(s, 0xb7 | P_EXT, args[0], args[1], args[2]);
972 break;
973 case INDEX_op_ld16s_i32:
974 /* movswl */
975 tcg_out_modrm_offset(s, 0xbf | P_EXT, args[0], args[1], args[2]);
976 break;
977 case INDEX_op_ld_i32:
978 /* movl */
979 tcg_out_modrm_offset(s, 0x8b, args[0], args[1], args[2]);
980 break;
981 case INDEX_op_st8_i32:
982 /* movb */
983 tcg_out_modrm_offset(s, 0x88, args[0], args[1], args[2]);
984 break;
985 case INDEX_op_st16_i32:
986 /* movw */
987 tcg_out8(s, 0x66);
988 tcg_out_modrm_offset(s, 0x89, args[0], args[1], args[2]);
989 break;
990 case INDEX_op_st_i32:
991 /* movl */
992 tcg_out_modrm_offset(s, 0x89, args[0], args[1], args[2]);
993 break;
994 case INDEX_op_sub_i32:
995 c = ARITH_SUB;
996 goto gen_arith;
997 case INDEX_op_and_i32:
998 c = ARITH_AND;
999 goto gen_arith;
1000 case INDEX_op_or_i32:
1001 c = ARITH_OR;
1002 goto gen_arith;
1003 case INDEX_op_xor_i32:
1004 c = ARITH_XOR;
1005 goto gen_arith;
1006 case INDEX_op_add_i32:
1007 c = ARITH_ADD;
1008 gen_arith:
1009 if (const_args[2]) {
1010 tgen_arithi(s, c, args[0], args[2], 0);
1011 } else {
1012 tcg_out_modrm(s, 0x01 | (c << 3), args[2], args[0]);
1014 break;
1015 case INDEX_op_mul_i32:
1016 if (const_args[2]) {
1017 int32_t val;
1018 val = args[2];
1019 if (val == (int8_t)val) {
1020 tcg_out_modrm(s, 0x6b, args[0], args[0]);
1021 tcg_out8(s, val);
1022 } else {
1023 tcg_out_modrm(s, 0x69, args[0], args[0]);
1024 tcg_out32(s, val);
1026 } else {
1027 tcg_out_modrm(s, 0xaf | P_EXT, args[0], args[2]);
1029 break;
1030 case INDEX_op_mulu2_i32:
1031 tcg_out_modrm(s, 0xf7, 4, args[3]);
1032 break;
1033 case INDEX_op_div2_i32:
1034 tcg_out_modrm(s, 0xf7, 7, args[4]);
1035 break;
1036 case INDEX_op_divu2_i32:
1037 tcg_out_modrm(s, 0xf7, 6, args[4]);
1038 break;
1039 case INDEX_op_shl_i32:
1040 c = SHIFT_SHL;
1041 gen_shift32:
1042 if (const_args[2]) {
1043 if (args[2] == 1) {
1044 tcg_out_modrm(s, 0xd1, c, args[0]);
1045 } else {
1046 tcg_out_modrm(s, 0xc1, c, args[0]);
1047 tcg_out8(s, args[2]);
1049 } else {
1050 tcg_out_modrm(s, 0xd3, c, args[0]);
1052 break;
1053 case INDEX_op_shr_i32:
1054 c = SHIFT_SHR;
1055 goto gen_shift32;
1056 case INDEX_op_sar_i32:
1057 c = SHIFT_SAR;
1058 goto gen_shift32;
1059 case INDEX_op_rotl_i32:
1060 c = SHIFT_ROL;
1061 goto gen_shift32;
1062 case INDEX_op_rotr_i32:
1063 c = SHIFT_ROR;
1064 goto gen_shift32;
1066 case INDEX_op_add2_i32:
1067 if (const_args[4])
1068 tgen_arithi(s, ARITH_ADD, args[0], args[4], 1);
1069 else
1070 tcg_out_modrm(s, 0x01 | (ARITH_ADD << 3), args[4], args[0]);
1071 if (const_args[5])
1072 tgen_arithi(s, ARITH_ADC, args[1], args[5], 1);
1073 else
1074 tcg_out_modrm(s, 0x01 | (ARITH_ADC << 3), args[5], args[1]);
1075 break;
1076 case INDEX_op_sub2_i32:
1077 if (const_args[4])
1078 tgen_arithi(s, ARITH_SUB, args[0], args[4], 1);
1079 else
1080 tcg_out_modrm(s, 0x01 | (ARITH_SUB << 3), args[4], args[0]);
1081 if (const_args[5])
1082 tgen_arithi(s, ARITH_SBB, args[1], args[5], 1);
1083 else
1084 tcg_out_modrm(s, 0x01 | (ARITH_SBB << 3), args[5], args[1]);
1085 break;
1086 case INDEX_op_brcond_i32:
1087 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
1088 args[3], 0);
1089 break;
1090 case INDEX_op_brcond2_i32:
1091 tcg_out_brcond2(s, args, const_args, 0);
1092 break;
1094 case INDEX_op_bswap16_i32:
1095 tcg_out8(s, 0x66);
1096 tcg_out_modrm(s, 0xc1, SHIFT_ROL, args[0]);
1097 tcg_out8(s, 8);
1098 break;
1099 case INDEX_op_bswap32_i32:
1100 tcg_out_opc(s, (0xc8 + args[0]) | P_EXT);
1101 break;
1103 case INDEX_op_neg_i32:
1104 tcg_out_modrm(s, 0xf7, 3, args[0]);
1105 break;
1107 case INDEX_op_not_i32:
1108 tcg_out_modrm(s, 0xf7, 2, args[0]);
1109 break;
1111 case INDEX_op_ext8s_i32:
1112 tcg_out_modrm(s, 0xbe | P_EXT, args[0], args[1]);
1113 break;
1114 case INDEX_op_ext16s_i32:
1115 tcg_out_modrm(s, 0xbf | P_EXT, args[0], args[1]);
1116 break;
1117 case INDEX_op_ext8u_i32:
1118 tcg_out_modrm(s, 0xb6 | P_EXT, args[0], args[1]);
1119 break;
1120 case INDEX_op_ext16u_i32:
1121 tcg_out_modrm(s, 0xb7 | P_EXT, args[0], args[1]);
1122 break;
1124 case INDEX_op_qemu_ld8u:
1125 tcg_out_qemu_ld(s, args, 0);
1126 break;
1127 case INDEX_op_qemu_ld8s:
1128 tcg_out_qemu_ld(s, args, 0 | 4);
1129 break;
1130 case INDEX_op_qemu_ld16u:
1131 tcg_out_qemu_ld(s, args, 1);
1132 break;
1133 case INDEX_op_qemu_ld16s:
1134 tcg_out_qemu_ld(s, args, 1 | 4);
1135 break;
1136 case INDEX_op_qemu_ld32u:
1137 tcg_out_qemu_ld(s, args, 2);
1138 break;
1139 case INDEX_op_qemu_ld64:
1140 tcg_out_qemu_ld(s, args, 3);
1141 break;
1143 case INDEX_op_qemu_st8:
1144 tcg_out_qemu_st(s, args, 0);
1145 break;
1146 case INDEX_op_qemu_st16:
1147 tcg_out_qemu_st(s, args, 1);
1148 break;
1149 case INDEX_op_qemu_st32:
1150 tcg_out_qemu_st(s, args, 2);
1151 break;
1152 case INDEX_op_qemu_st64:
1153 tcg_out_qemu_st(s, args, 3);
1154 break;
1156 default:
1157 tcg_abort();
1161 static const TCGTargetOpDef x86_op_defs[] = {
1162 { INDEX_op_exit_tb, { } },
1163 { INDEX_op_goto_tb, { } },
1164 { INDEX_op_call, { "ri" } },
1165 { INDEX_op_jmp, { "ri" } },
1166 { INDEX_op_br, { } },
1167 { INDEX_op_mov_i32, { "r", "r" } },
1168 { INDEX_op_movi_i32, { "r" } },
1169 { INDEX_op_ld8u_i32, { "r", "r" } },
1170 { INDEX_op_ld8s_i32, { "r", "r" } },
1171 { INDEX_op_ld16u_i32, { "r", "r" } },
1172 { INDEX_op_ld16s_i32, { "r", "r" } },
1173 { INDEX_op_ld_i32, { "r", "r" } },
1174 { INDEX_op_st8_i32, { "q", "r" } },
1175 { INDEX_op_st16_i32, { "r", "r" } },
1176 { INDEX_op_st_i32, { "r", "r" } },
1178 { INDEX_op_add_i32, { "r", "0", "ri" } },
1179 { INDEX_op_sub_i32, { "r", "0", "ri" } },
1180 { INDEX_op_mul_i32, { "r", "0", "ri" } },
1181 { INDEX_op_mulu2_i32, { "a", "d", "a", "r" } },
1182 { INDEX_op_div2_i32, { "a", "d", "0", "1", "r" } },
1183 { INDEX_op_divu2_i32, { "a", "d", "0", "1", "r" } },
1184 { INDEX_op_and_i32, { "r", "0", "ri" } },
1185 { INDEX_op_or_i32, { "r", "0", "ri" } },
1186 { INDEX_op_xor_i32, { "r", "0", "ri" } },
1188 { INDEX_op_shl_i32, { "r", "0", "ci" } },
1189 { INDEX_op_shr_i32, { "r", "0", "ci" } },
1190 { INDEX_op_sar_i32, { "r", "0", "ci" } },
1191 { INDEX_op_rotl_i32, { "r", "0", "ci" } },
1192 { INDEX_op_rotr_i32, { "r", "0", "ci" } },
1194 { INDEX_op_brcond_i32, { "r", "ri" } },
1196 { INDEX_op_add2_i32, { "r", "r", "0", "1", "ri", "ri" } },
1197 { INDEX_op_sub2_i32, { "r", "r", "0", "1", "ri", "ri" } },
1198 { INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } },
1200 { INDEX_op_bswap16_i32, { "r", "0" } },
1201 { INDEX_op_bswap32_i32, { "r", "0" } },
1203 { INDEX_op_neg_i32, { "r", "0" } },
1205 { INDEX_op_not_i32, { "r", "0" } },
1207 { INDEX_op_ext8s_i32, { "r", "q" } },
1208 { INDEX_op_ext16s_i32, { "r", "r" } },
1209 { INDEX_op_ext8u_i32, { "r", "q"} },
1210 { INDEX_op_ext16u_i32, { "r", "r"} },
1212 #if TARGET_LONG_BITS == 32
1213 { INDEX_op_qemu_ld8u, { "r", "L" } },
1214 { INDEX_op_qemu_ld8s, { "r", "L" } },
1215 { INDEX_op_qemu_ld16u, { "r", "L" } },
1216 { INDEX_op_qemu_ld16s, { "r", "L" } },
1217 { INDEX_op_qemu_ld32u, { "r", "L" } },
1218 { INDEX_op_qemu_ld64, { "r", "r", "L" } },
1220 { INDEX_op_qemu_st8, { "cb", "L" } },
1221 { INDEX_op_qemu_st16, { "L", "L" } },
1222 { INDEX_op_qemu_st32, { "L", "L" } },
1223 { INDEX_op_qemu_st64, { "L", "L", "L" } },
1224 #else
1225 { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
1226 { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
1227 { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
1228 { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
1229 { INDEX_op_qemu_ld32u, { "r", "L", "L" } },
1230 { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } },
1232 { INDEX_op_qemu_st8, { "cb", "L", "L" } },
1233 { INDEX_op_qemu_st16, { "L", "L", "L" } },
1234 { INDEX_op_qemu_st32, { "L", "L", "L" } },
1235 { INDEX_op_qemu_st64, { "L", "L", "L", "L" } },
1236 #endif
1237 { -1 },
1240 static int tcg_target_callee_save_regs[] = {
1241 /* TCG_REG_EBP, */ /* currently used for the global env, so no
1242 need to save */
1243 TCG_REG_EBX,
1244 TCG_REG_ESI,
1245 TCG_REG_EDI,
1248 static inline void tcg_out_push(TCGContext *s, int reg)
1250 tcg_out_opc(s, 0x50 + reg);
1253 static inline void tcg_out_pop(TCGContext *s, int reg)
1255 tcg_out_opc(s, 0x58 + reg);
1258 /* Generate global QEMU prologue and epilogue code */
1259 void tcg_target_qemu_prologue(TCGContext *s)
1261 int i, frame_size, push_size, stack_addend;
1263 /* TB prologue */
1264 /* save all callee saved registers */
1265 for(i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1266 tcg_out_push(s, tcg_target_callee_save_regs[i]);
1268 /* reserve some stack space */
1269 push_size = 4 + ARRAY_SIZE(tcg_target_callee_save_regs) * 4;
1270 frame_size = push_size + TCG_STATIC_CALL_ARGS_SIZE;
1271 frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) &
1272 ~(TCG_TARGET_STACK_ALIGN - 1);
1273 stack_addend = frame_size - push_size;
1274 tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
1276 tcg_out_modrm(s, 0xff, 4, TCG_REG_EAX); /* jmp *%eax */
1278 /* TB epilogue */
1279 tb_ret_addr = s->code_ptr;
1280 tcg_out_addi(s, TCG_REG_ESP, stack_addend);
1281 for(i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--) {
1282 tcg_out_pop(s, tcg_target_callee_save_regs[i]);
1284 tcg_out8(s, 0xc3); /* ret */
1287 void tcg_target_init(TCGContext *s)
1289 /* fail safe */
1290 if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry))
1291 tcg_abort();
1293 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xff);
1294 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
1295 (1 << TCG_REG_EAX) |
1296 (1 << TCG_REG_EDX) |
1297 (1 << TCG_REG_ECX));
1299 tcg_regset_clear(s->reserved_regs);
1300 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ESP);
1302 tcg_add_target_add_op_defs(x86_op_defs);