tcg-i386: Tidy ext8s and ext16s operations.
[qemu.git] / tcg / i386 / tcg-target.c
blob949e974e6534277dba34ccf393992631f229ad58
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #ifndef NDEBUG
26 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
27 "%eax",
28 "%ecx",
29 "%edx",
30 "%ebx",
31 "%esp",
32 "%ebp",
33 "%esi",
34 "%edi",
36 #endif
38 static const int tcg_target_reg_alloc_order[] = {
39 TCG_REG_EBX,
40 TCG_REG_ESI,
41 TCG_REG_EDI,
42 TCG_REG_EBP,
43 TCG_REG_ECX,
44 TCG_REG_EDX,
45 TCG_REG_EAX,
48 static const int tcg_target_call_iarg_regs[3] = { TCG_REG_EAX, TCG_REG_EDX, TCG_REG_ECX };
49 static const int tcg_target_call_oarg_regs[2] = { TCG_REG_EAX, TCG_REG_EDX };
51 static uint8_t *tb_ret_addr;
53 static void patch_reloc(uint8_t *code_ptr, int type,
54 tcg_target_long value, tcg_target_long addend)
56 value += addend;
57 switch(type) {
58 case R_386_32:
59 *(uint32_t *)code_ptr = value;
60 break;
61 case R_386_PC32:
62 *(uint32_t *)code_ptr = value - (long)code_ptr;
63 break;
64 case R_386_PC8:
65 value -= (long)code_ptr;
66 if (value != (int8_t)value) {
67 tcg_abort();
69 *(uint8_t *)code_ptr = value;
70 break;
71 default:
72 tcg_abort();
76 /* maximum number of register used for input function arguments */
77 static inline int tcg_target_get_call_iarg_regs_count(int flags)
79 flags &= TCG_CALL_TYPE_MASK;
80 switch(flags) {
81 case TCG_CALL_TYPE_STD:
82 return 0;
83 case TCG_CALL_TYPE_REGPARM_1:
84 case TCG_CALL_TYPE_REGPARM_2:
85 case TCG_CALL_TYPE_REGPARM:
86 return flags - TCG_CALL_TYPE_REGPARM_1 + 1;
87 default:
88 tcg_abort();
92 /* parse target specific constraints */
93 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
95 const char *ct_str;
97 ct_str = *pct_str;
98 switch(ct_str[0]) {
99 case 'a':
100 ct->ct |= TCG_CT_REG;
101 tcg_regset_set_reg(ct->u.regs, TCG_REG_EAX);
102 break;
103 case 'b':
104 ct->ct |= TCG_CT_REG;
105 tcg_regset_set_reg(ct->u.regs, TCG_REG_EBX);
106 break;
107 case 'c':
108 ct->ct |= TCG_CT_REG;
109 tcg_regset_set_reg(ct->u.regs, TCG_REG_ECX);
110 break;
111 case 'd':
112 ct->ct |= TCG_CT_REG;
113 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDX);
114 break;
115 case 'S':
116 ct->ct |= TCG_CT_REG;
117 tcg_regset_set_reg(ct->u.regs, TCG_REG_ESI);
118 break;
119 case 'D':
120 ct->ct |= TCG_CT_REG;
121 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDI);
122 break;
123 case 'q':
124 ct->ct |= TCG_CT_REG;
125 tcg_regset_set32(ct->u.regs, 0, 0xf);
126 break;
127 case 'r':
128 ct->ct |= TCG_CT_REG;
129 tcg_regset_set32(ct->u.regs, 0, 0xff);
130 break;
132 /* qemu_ld/st address constraint */
133 case 'L':
134 ct->ct |= TCG_CT_REG;
135 tcg_regset_set32(ct->u.regs, 0, 0xff);
136 tcg_regset_reset_reg(ct->u.regs, TCG_REG_EAX);
137 tcg_regset_reset_reg(ct->u.regs, TCG_REG_EDX);
138 break;
139 default:
140 return -1;
142 ct_str++;
143 *pct_str = ct_str;
144 return 0;
147 /* test if a constant matches the constraint */
148 static inline int tcg_target_const_match(tcg_target_long val,
149 const TCGArgConstraint *arg_ct)
151 int ct;
152 ct = arg_ct->ct;
153 if (ct & TCG_CT_CONST)
154 return 1;
155 else
156 return 0;
159 #define P_EXT 0x100 /* 0x0f opcode prefix */
161 #define OPC_BSWAP (0xc8 | P_EXT)
162 #define OPC_JCC_long (0x80 | P_EXT) /* ... plus condition code */
163 #define OPC_JCC_short (0x70) /* ... plus condition code */
164 #define OPC_JMP_long (0xe9)
165 #define OPC_JMP_short (0xeb)
166 #define OPC_MOVB_EvGv (0x88) /* stores, more or less */
167 #define OPC_MOVL_EvGv (0x89) /* stores, more or less */
168 #define OPC_MOVL_GvEv (0x8b) /* loads, more or less */
169 #define OPC_MOVSBL (0xbe | P_EXT)
170 #define OPC_MOVSWL (0xbf | P_EXT)
171 #define OPC_MOVZBL (0xb6 | P_EXT)
172 #define OPC_MOVZWL (0xb7 | P_EXT)
173 #define OPC_SHIFT_1 (0xd1)
174 #define OPC_SHIFT_Ib (0xc1)
175 #define OPC_SHIFT_cl (0xd3)
177 /* Group 1 opcode extensions for 0x80-0x83. */
178 #define ARITH_ADD 0
179 #define ARITH_OR 1
180 #define ARITH_ADC 2
181 #define ARITH_SBB 3
182 #define ARITH_AND 4
183 #define ARITH_SUB 5
184 #define ARITH_XOR 6
185 #define ARITH_CMP 7
187 /* Group 2 opcode extensions for 0xc0, 0xc1, 0xd0-0xd3. */
188 #define SHIFT_ROL 0
189 #define SHIFT_ROR 1
190 #define SHIFT_SHL 4
191 #define SHIFT_SHR 5
192 #define SHIFT_SAR 7
194 /* Group 5 opcode extensions for 0xff. */
195 #define EXT_JMPN_Ev 4
197 /* Condition codes to be added to OPC_JCC_{long,short}. */
198 #define JCC_JMP (-1)
199 #define JCC_JO 0x0
200 #define JCC_JNO 0x1
201 #define JCC_JB 0x2
202 #define JCC_JAE 0x3
203 #define JCC_JE 0x4
204 #define JCC_JNE 0x5
205 #define JCC_JBE 0x6
206 #define JCC_JA 0x7
207 #define JCC_JS 0x8
208 #define JCC_JNS 0x9
209 #define JCC_JP 0xa
210 #define JCC_JNP 0xb
211 #define JCC_JL 0xc
212 #define JCC_JGE 0xd
213 #define JCC_JLE 0xe
214 #define JCC_JG 0xf
216 static const uint8_t tcg_cond_to_jcc[10] = {
217 [TCG_COND_EQ] = JCC_JE,
218 [TCG_COND_NE] = JCC_JNE,
219 [TCG_COND_LT] = JCC_JL,
220 [TCG_COND_GE] = JCC_JGE,
221 [TCG_COND_LE] = JCC_JLE,
222 [TCG_COND_GT] = JCC_JG,
223 [TCG_COND_LTU] = JCC_JB,
224 [TCG_COND_GEU] = JCC_JAE,
225 [TCG_COND_LEU] = JCC_JBE,
226 [TCG_COND_GTU] = JCC_JA,
229 static inline void tcg_out_opc(TCGContext *s, int opc)
231 if (opc & P_EXT)
232 tcg_out8(s, 0x0f);
233 tcg_out8(s, opc);
236 static inline void tcg_out_modrm(TCGContext *s, int opc, int r, int rm)
238 tcg_out_opc(s, opc);
239 tcg_out8(s, 0xc0 | (r << 3) | rm);
242 /* rm == -1 means no register index */
243 static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r, int rm,
244 int32_t offset)
246 tcg_out_opc(s, opc);
247 if (rm == -1) {
248 tcg_out8(s, 0x05 | (r << 3));
249 tcg_out32(s, offset);
250 } else if (offset == 0 && rm != TCG_REG_EBP) {
251 if (rm == TCG_REG_ESP) {
252 tcg_out8(s, 0x04 | (r << 3));
253 tcg_out8(s, 0x24);
254 } else {
255 tcg_out8(s, 0x00 | (r << 3) | rm);
257 } else if ((int8_t)offset == offset) {
258 if (rm == TCG_REG_ESP) {
259 tcg_out8(s, 0x44 | (r << 3));
260 tcg_out8(s, 0x24);
261 } else {
262 tcg_out8(s, 0x40 | (r << 3) | rm);
264 tcg_out8(s, offset);
265 } else {
266 if (rm == TCG_REG_ESP) {
267 tcg_out8(s, 0x84 | (r << 3));
268 tcg_out8(s, 0x24);
269 } else {
270 tcg_out8(s, 0x80 | (r << 3) | rm);
272 tcg_out32(s, offset);
276 static inline void tcg_out_mov(TCGContext *s, int ret, int arg)
278 if (arg != ret) {
279 tcg_out_modrm(s, OPC_MOVL_GvEv, ret, arg);
283 static inline void tcg_out_movi(TCGContext *s, TCGType type,
284 int ret, int32_t arg)
286 if (arg == 0) {
287 /* xor r0,r0 */
288 tcg_out_modrm(s, 0x01 | (ARITH_XOR << 3), ret, ret);
289 } else {
290 tcg_out8(s, 0xb8 + ret);
291 tcg_out32(s, arg);
295 static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret,
296 int arg1, tcg_target_long arg2)
298 tcg_out_modrm_offset(s, OPC_MOVL_GvEv, ret, arg1, arg2);
301 static inline void tcg_out_st(TCGContext *s, TCGType type, int arg,
302 int arg1, tcg_target_long arg2)
304 tcg_out_modrm_offset(s, OPC_MOVL_EvGv, arg, arg1, arg2);
307 static void tcg_out_shifti(TCGContext *s, int subopc, int reg, int count)
309 if (count == 1) {
310 tcg_out_modrm(s, OPC_SHIFT_1, subopc, reg);
311 } else {
312 tcg_out_modrm(s, OPC_SHIFT_Ib, subopc, reg);
313 tcg_out8(s, count);
317 static inline void tcg_out_bswap32(TCGContext *s, int reg)
319 tcg_out_opc(s, OPC_BSWAP + reg);
322 static inline void tcg_out_rolw_8(TCGContext *s, int reg)
324 tcg_out8(s, 0x66);
325 tcg_out_shifti(s, SHIFT_ROL, reg, 8);
328 static inline void tcg_out_ext8u(TCGContext *s, int dest, int src)
330 /* movzbl */
331 assert(src < 4);
332 tcg_out_modrm(s, OPC_MOVZBL, dest, src);
335 static void tcg_out_ext8s(TCGContext *s, int dest, int src)
337 /* movsbl */
338 assert(src < 4);
339 tcg_out_modrm(s, OPC_MOVSBL, dest, src);
342 static inline void tcg_out_ext16u(TCGContext *s, int dest, int src)
344 /* movzwl */
345 tcg_out_modrm(s, OPC_MOVZWL, dest, src);
348 static inline void tcg_out_ext16s(TCGContext *s, int dest, int src)
350 /* movswl */
351 tcg_out_modrm(s, OPC_MOVSWL, dest, src);
354 static inline void tgen_arithi(TCGContext *s, int c, int r0, int32_t val, int cf)
356 if (!cf && ((c == ARITH_ADD && val == 1) || (c == ARITH_SUB && val == -1))) {
357 /* inc */
358 tcg_out_opc(s, 0x40 + r0);
359 } else if (!cf && ((c == ARITH_ADD && val == -1) || (c == ARITH_SUB && val == 1))) {
360 /* dec */
361 tcg_out_opc(s, 0x48 + r0);
362 } else if (val == (int8_t)val) {
363 tcg_out_modrm(s, 0x83, c, r0);
364 tcg_out8(s, val);
365 } else if (c == ARITH_AND && val == 0xffu && r0 < 4) {
366 tcg_out_ext8u(s, r0, r0);
367 } else if (c == ARITH_AND && val == 0xffffu) {
368 tcg_out_ext16u(s, r0, r0);
369 } else {
370 tcg_out_modrm(s, 0x81, c, r0);
371 tcg_out32(s, val);
375 static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
377 if (val != 0)
378 tgen_arithi(s, ARITH_ADD, reg, val, 0);
381 /* Use SMALL != 0 to force a short forward branch. */
382 static void tcg_out_jxx(TCGContext *s, int opc, int label_index, int small)
384 int32_t val, val1;
385 TCGLabel *l = &s->labels[label_index];
387 if (l->has_value) {
388 val = l->u.value - (tcg_target_long)s->code_ptr;
389 val1 = val - 2;
390 if ((int8_t)val1 == val1) {
391 if (opc == -1) {
392 tcg_out8(s, OPC_JMP_short);
393 } else {
394 tcg_out8(s, OPC_JCC_short + opc);
396 tcg_out8(s, val1);
397 } else {
398 if (small) {
399 tcg_abort();
401 if (opc == -1) {
402 tcg_out8(s, OPC_JMP_long);
403 tcg_out32(s, val - 5);
404 } else {
405 tcg_out_opc(s, OPC_JCC_long + opc);
406 tcg_out32(s, val - 6);
409 } else if (small) {
410 if (opc == -1) {
411 tcg_out8(s, OPC_JMP_short);
412 } else {
413 tcg_out8(s, OPC_JCC_short + opc);
415 tcg_out_reloc(s, s->code_ptr, R_386_PC8, label_index, -1);
416 s->code_ptr += 1;
417 } else {
418 if (opc == -1) {
419 tcg_out8(s, OPC_JMP_long);
420 } else {
421 tcg_out_opc(s, OPC_JCC_long + opc);
423 tcg_out_reloc(s, s->code_ptr, R_386_PC32, label_index, -4);
424 s->code_ptr += 4;
428 static void tcg_out_cmp(TCGContext *s, TCGArg arg1, TCGArg arg2,
429 int const_arg2)
431 if (const_arg2) {
432 if (arg2 == 0) {
433 /* test r, r */
434 tcg_out_modrm(s, 0x85, arg1, arg1);
435 } else {
436 tgen_arithi(s, ARITH_CMP, arg1, arg2, 0);
438 } else {
439 tcg_out_modrm(s, 0x01 | (ARITH_CMP << 3), arg2, arg1);
443 static void tcg_out_brcond(TCGContext *s, TCGCond cond,
444 TCGArg arg1, TCGArg arg2, int const_arg2,
445 int label_index, int small)
447 tcg_out_cmp(s, arg1, arg2, const_arg2);
448 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index, small);
451 /* XXX: we implement it at the target level to avoid having to
452 handle cross basic blocks temporaries */
453 static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
454 const int *const_args, int small)
456 int label_next;
457 label_next = gen_new_label();
458 switch(args[4]) {
459 case TCG_COND_EQ:
460 tcg_out_brcond(s, TCG_COND_NE, args[0], args[2], const_args[2],
461 label_next, 1);
462 tcg_out_brcond(s, TCG_COND_EQ, args[1], args[3], const_args[3],
463 args[5], small);
464 break;
465 case TCG_COND_NE:
466 tcg_out_brcond(s, TCG_COND_NE, args[0], args[2], const_args[2],
467 args[5], small);
468 tcg_out_brcond(s, TCG_COND_NE, args[1], args[3], const_args[3],
469 args[5], small);
470 break;
471 case TCG_COND_LT:
472 tcg_out_brcond(s, TCG_COND_LT, args[1], args[3], const_args[3],
473 args[5], small);
474 tcg_out_jxx(s, JCC_JNE, label_next, 1);
475 tcg_out_brcond(s, TCG_COND_LTU, args[0], args[2], const_args[2],
476 args[5], small);
477 break;
478 case TCG_COND_LE:
479 tcg_out_brcond(s, TCG_COND_LT, args[1], args[3], const_args[3],
480 args[5], small);
481 tcg_out_jxx(s, JCC_JNE, label_next, 1);
482 tcg_out_brcond(s, TCG_COND_LEU, args[0], args[2], const_args[2],
483 args[5], small);
484 break;
485 case TCG_COND_GT:
486 tcg_out_brcond(s, TCG_COND_GT, args[1], args[3], const_args[3],
487 args[5], small);
488 tcg_out_jxx(s, JCC_JNE, label_next, 1);
489 tcg_out_brcond(s, TCG_COND_GTU, args[0], args[2], const_args[2],
490 args[5], small);
491 break;
492 case TCG_COND_GE:
493 tcg_out_brcond(s, TCG_COND_GT, args[1], args[3], const_args[3],
494 args[5], small);
495 tcg_out_jxx(s, JCC_JNE, label_next, 1);
496 tcg_out_brcond(s, TCG_COND_GEU, args[0], args[2], const_args[2],
497 args[5], small);
498 break;
499 case TCG_COND_LTU:
500 tcg_out_brcond(s, TCG_COND_LTU, args[1], args[3], const_args[3],
501 args[5], small);
502 tcg_out_jxx(s, JCC_JNE, label_next, 1);
503 tcg_out_brcond(s, TCG_COND_LTU, args[0], args[2], const_args[2],
504 args[5], small);
505 break;
506 case TCG_COND_LEU:
507 tcg_out_brcond(s, TCG_COND_LTU, args[1], args[3], const_args[3],
508 args[5], small);
509 tcg_out_jxx(s, JCC_JNE, label_next, 1);
510 tcg_out_brcond(s, TCG_COND_LEU, args[0], args[2], const_args[2],
511 args[5], small);
512 break;
513 case TCG_COND_GTU:
514 tcg_out_brcond(s, TCG_COND_GTU, args[1], args[3], const_args[3],
515 args[5], small);
516 tcg_out_jxx(s, JCC_JNE, label_next, 1);
517 tcg_out_brcond(s, TCG_COND_GTU, args[0], args[2], const_args[2],
518 args[5], small);
519 break;
520 case TCG_COND_GEU:
521 tcg_out_brcond(s, TCG_COND_GTU, args[1], args[3], const_args[3],
522 args[5], small);
523 tcg_out_jxx(s, JCC_JNE, label_next, 1);
524 tcg_out_brcond(s, TCG_COND_GEU, args[0], args[2], const_args[2],
525 args[5], small);
526 break;
527 default:
528 tcg_abort();
530 tcg_out_label(s, label_next, (tcg_target_long)s->code_ptr);
533 static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGArg dest,
534 TCGArg arg1, TCGArg arg2, int const_arg2)
536 tcg_out_cmp(s, arg1, arg2, const_arg2);
537 /* setcc */
538 tcg_out_modrm(s, 0x90 | tcg_cond_to_jcc[cond] | P_EXT, 0, dest);
539 tgen_arithi(s, ARITH_AND, dest, 0xff, 0);
542 static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
543 const int *const_args)
545 TCGArg new_args[6];
546 int label_true, label_over;
548 memcpy(new_args, args+1, 5*sizeof(TCGArg));
550 if (args[0] == args[1] || args[0] == args[2]
551 || (!const_args[3] && args[0] == args[3])
552 || (!const_args[4] && args[0] == args[4])) {
553 /* When the destination overlaps with one of the argument
554 registers, don't do anything tricky. */
555 label_true = gen_new_label();
556 label_over = gen_new_label();
558 new_args[5] = label_true;
559 tcg_out_brcond2(s, new_args, const_args+1, 1);
561 tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
562 tcg_out_jxx(s, JCC_JMP, label_over, 1);
563 tcg_out_label(s, label_true, (tcg_target_long)s->code_ptr);
565 tcg_out_movi(s, TCG_TYPE_I32, args[0], 1);
566 tcg_out_label(s, label_over, (tcg_target_long)s->code_ptr);
567 } else {
568 /* When the destination does not overlap one of the arguments,
569 clear the destination first, jump if cond false, and emit an
570 increment in the true case. This results in smaller code. */
572 tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
574 label_over = gen_new_label();
575 new_args[4] = tcg_invert_cond(new_args[4]);
576 new_args[5] = label_over;
577 tcg_out_brcond2(s, new_args, const_args+1, 1);
579 tgen_arithi(s, ARITH_ADD, args[0], 1, 0);
580 tcg_out_label(s, label_over, (tcg_target_long)s->code_ptr);
584 #if defined(CONFIG_SOFTMMU)
586 #include "../../softmmu_defs.h"
588 static void *qemu_ld_helpers[4] = {
589 __ldb_mmu,
590 __ldw_mmu,
591 __ldl_mmu,
592 __ldq_mmu,
595 static void *qemu_st_helpers[4] = {
596 __stb_mmu,
597 __stw_mmu,
598 __stl_mmu,
599 __stq_mmu,
601 #endif
603 #ifndef CONFIG_USER_ONLY
604 #define GUEST_BASE 0
605 #endif
607 /* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
608 EAX. It will be useful once fixed registers globals are less
609 common. */
610 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
611 int opc)
613 int addr_reg, data_reg, data_reg2, r0, r1, mem_index, s_bits, bswap;
614 #if defined(CONFIG_SOFTMMU)
615 uint8_t *label1_ptr, *label2_ptr;
616 #endif
617 #if TARGET_LONG_BITS == 64
618 #if defined(CONFIG_SOFTMMU)
619 uint8_t *label3_ptr;
620 #endif
621 int addr_reg2;
622 #endif
624 data_reg = *args++;
625 if (opc == 3)
626 data_reg2 = *args++;
627 else
628 data_reg2 = 0;
629 addr_reg = *args++;
630 #if TARGET_LONG_BITS == 64
631 addr_reg2 = *args++;
632 #endif
633 mem_index = *args;
634 s_bits = opc & 3;
636 r0 = TCG_REG_EAX;
637 r1 = TCG_REG_EDX;
639 #if defined(CONFIG_SOFTMMU)
640 tcg_out_mov(s, r1, addr_reg);
642 tcg_out_mov(s, r0, addr_reg);
644 tcg_out_shifti(s, SHIFT_SHR, r1, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
646 tcg_out_modrm(s, 0x81, 4, r0); /* andl $x, r0 */
647 tcg_out32(s, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
649 tcg_out_modrm(s, 0x81, 4, r1); /* andl $x, r1 */
650 tcg_out32(s, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
652 tcg_out_opc(s, 0x8d); /* lea offset(r1, %ebp), r1 */
653 tcg_out8(s, 0x80 | (r1 << 3) | 0x04);
654 tcg_out8(s, (5 << 3) | r1);
655 tcg_out32(s, offsetof(CPUState, tlb_table[mem_index][0].addr_read));
657 /* cmp 0(r1), r0 */
658 tcg_out_modrm_offset(s, 0x3b, r0, r1, 0);
660 tcg_out_mov(s, r0, addr_reg);
662 #if TARGET_LONG_BITS == 32
663 /* je label1 */
664 tcg_out8(s, OPC_JCC_short + JCC_JE);
665 label1_ptr = s->code_ptr;
666 s->code_ptr++;
667 #else
668 /* jne label3 */
669 tcg_out8(s, OPC_JCC_short + JCC_JNE);
670 label3_ptr = s->code_ptr;
671 s->code_ptr++;
673 /* cmp 4(r1), addr_reg2 */
674 tcg_out_modrm_offset(s, 0x3b, addr_reg2, r1, 4);
676 /* je label1 */
677 tcg_out8(s, OPC_JCC_short + JCC_JE);
678 label1_ptr = s->code_ptr;
679 s->code_ptr++;
681 /* label3: */
682 *label3_ptr = s->code_ptr - label3_ptr - 1;
683 #endif
685 /* XXX: move that code at the end of the TB */
686 #if TARGET_LONG_BITS == 32
687 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_EDX, mem_index);
688 #else
689 tcg_out_mov(s, TCG_REG_EDX, addr_reg2);
690 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_ECX, mem_index);
691 #endif
692 tcg_out8(s, 0xe8);
693 tcg_out32(s, (tcg_target_long)qemu_ld_helpers[s_bits] -
694 (tcg_target_long)s->code_ptr - 4);
696 switch(opc) {
697 case 0 | 4:
698 tcg_out_ext8s(s, data_reg, TCG_REG_EAX);
699 break;
700 case 1 | 4:
701 tcg_out_ext16s(s, data_reg, TCG_REG_EAX);
702 break;
703 case 0:
704 tcg_out_ext8u(s, data_reg, TCG_REG_EAX);
705 break;
706 case 1:
707 tcg_out_ext16u(s, data_reg, TCG_REG_EAX);
708 break;
709 case 2:
710 default:
711 tcg_out_mov(s, data_reg, TCG_REG_EAX);
712 break;
713 case 3:
714 if (data_reg == TCG_REG_EDX) {
715 tcg_out_opc(s, 0x90 + TCG_REG_EDX); /* xchg %edx, %eax */
716 tcg_out_mov(s, data_reg2, TCG_REG_EAX);
717 } else {
718 tcg_out_mov(s, data_reg, TCG_REG_EAX);
719 tcg_out_mov(s, data_reg2, TCG_REG_EDX);
721 break;
724 /* jmp label2 */
725 tcg_out8(s, OPC_JMP_short);
726 label2_ptr = s->code_ptr;
727 s->code_ptr++;
729 /* label1: */
730 *label1_ptr = s->code_ptr - label1_ptr - 1;
732 /* add x(r1), r0 */
733 tcg_out_modrm_offset(s, 0x03, r0, r1, offsetof(CPUTLBEntry, addend) -
734 offsetof(CPUTLBEntry, addr_read));
735 #else
736 r0 = addr_reg;
737 #endif
739 #ifdef TARGET_WORDS_BIGENDIAN
740 bswap = 1;
741 #else
742 bswap = 0;
743 #endif
744 switch(opc) {
745 case 0:
746 /* movzbl */
747 tcg_out_modrm_offset(s, OPC_MOVZBL, data_reg, r0, GUEST_BASE);
748 break;
749 case 0 | 4:
750 /* movsbl */
751 tcg_out_modrm_offset(s, OPC_MOVSBL, data_reg, r0, GUEST_BASE);
752 break;
753 case 1:
754 /* movzwl */
755 tcg_out_modrm_offset(s, OPC_MOVZWL, data_reg, r0, GUEST_BASE);
756 if (bswap) {
757 tcg_out_rolw_8(s, data_reg);
759 break;
760 case 1 | 4:
761 /* movswl */
762 tcg_out_modrm_offset(s, OPC_MOVSWL, data_reg, r0, GUEST_BASE);
763 if (bswap) {
764 tcg_out_rolw_8(s, data_reg);
766 /* movswl data_reg, data_reg */
767 tcg_out_modrm(s, OPC_MOVSWL, data_reg, data_reg);
769 break;
770 case 2:
771 tcg_out_ld(s, TCG_TYPE_I32, data_reg, r0, GUEST_BASE);
772 if (bswap) {
773 tcg_out_bswap32(s, data_reg);
775 break;
776 case 3:
777 if (bswap) {
778 int t = data_reg;
779 data_reg = data_reg2;
780 data_reg2 = t;
782 if (r0 != data_reg) {
783 tcg_out_ld(s, TCG_TYPE_I32, data_reg, r0, GUEST_BASE);
784 tcg_out_ld(s, TCG_TYPE_I32, data_reg2, r0, GUEST_BASE + 4);
785 } else {
786 tcg_out_ld(s, TCG_TYPE_I32, data_reg2, r0, GUEST_BASE + 4);
787 tcg_out_ld(s, TCG_TYPE_I32, data_reg, r0, GUEST_BASE);
789 if (bswap) {
790 tcg_out_bswap32(s, data_reg);
791 tcg_out_bswap32(s, data_reg2);
793 break;
794 default:
795 tcg_abort();
798 #if defined(CONFIG_SOFTMMU)
799 /* label2: */
800 *label2_ptr = s->code_ptr - label2_ptr - 1;
801 #endif
805 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
806 int opc)
808 int addr_reg, data_reg, data_reg2, r0, r1, mem_index, s_bits, bswap;
809 #if defined(CONFIG_SOFTMMU)
810 uint8_t *label1_ptr, *label2_ptr;
811 #endif
812 #if TARGET_LONG_BITS == 64
813 #if defined(CONFIG_SOFTMMU)
814 uint8_t *label3_ptr;
815 #endif
816 int addr_reg2;
817 #endif
819 data_reg = *args++;
820 if (opc == 3)
821 data_reg2 = *args++;
822 else
823 data_reg2 = 0;
824 addr_reg = *args++;
825 #if TARGET_LONG_BITS == 64
826 addr_reg2 = *args++;
827 #endif
828 mem_index = *args;
830 s_bits = opc;
832 r0 = TCG_REG_EAX;
833 r1 = TCG_REG_EDX;
835 #if defined(CONFIG_SOFTMMU)
836 tcg_out_mov(s, r1, addr_reg);
838 tcg_out_mov(s, r0, addr_reg);
840 tcg_out_shifti(s, SHIFT_SHR, r1, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
842 tcg_out_modrm(s, 0x81, 4, r0); /* andl $x, r0 */
843 tcg_out32(s, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
845 tcg_out_modrm(s, 0x81, 4, r1); /* andl $x, r1 */
846 tcg_out32(s, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
848 tcg_out_opc(s, 0x8d); /* lea offset(r1, %ebp), r1 */
849 tcg_out8(s, 0x80 | (r1 << 3) | 0x04);
850 tcg_out8(s, (5 << 3) | r1);
851 tcg_out32(s, offsetof(CPUState, tlb_table[mem_index][0].addr_write));
853 /* cmp 0(r1), r0 */
854 tcg_out_modrm_offset(s, 0x3b, r0, r1, 0);
856 tcg_out_mov(s, r0, addr_reg);
858 #if TARGET_LONG_BITS == 32
859 /* je label1 */
860 tcg_out8(s, OPC_JCC_short + JCC_JE);
861 label1_ptr = s->code_ptr;
862 s->code_ptr++;
863 #else
864 /* jne label3 */
865 tcg_out8(s, OPC_JCC_short + JCC_JNE);
866 label3_ptr = s->code_ptr;
867 s->code_ptr++;
869 /* cmp 4(r1), addr_reg2 */
870 tcg_out_modrm_offset(s, 0x3b, addr_reg2, r1, 4);
872 /* je label1 */
873 tcg_out8(s, OPC_JCC_short + JCC_JE);
874 label1_ptr = s->code_ptr;
875 s->code_ptr++;
877 /* label3: */
878 *label3_ptr = s->code_ptr - label3_ptr - 1;
879 #endif
881 /* XXX: move that code at the end of the TB */
882 #if TARGET_LONG_BITS == 32
883 if (opc == 3) {
884 tcg_out_mov(s, TCG_REG_EDX, data_reg);
885 tcg_out_mov(s, TCG_REG_ECX, data_reg2);
886 tcg_out8(s, 0x6a); /* push Ib */
887 tcg_out8(s, mem_index);
888 tcg_out8(s, 0xe8);
889 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] -
890 (tcg_target_long)s->code_ptr - 4);
891 tcg_out_addi(s, TCG_REG_ESP, 4);
892 } else {
893 switch(opc) {
894 case 0:
895 tcg_out_ext8u(s, TCG_REG_EDX, data_reg);
896 break;
897 case 1:
898 tcg_out_ext16u(s, TCG_REG_EDX, data_reg);
899 break;
900 case 2:
901 tcg_out_mov(s, TCG_REG_EDX, data_reg);
902 break;
904 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_ECX, mem_index);
905 tcg_out8(s, 0xe8);
906 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] -
907 (tcg_target_long)s->code_ptr - 4);
909 #else
910 if (opc == 3) {
911 tcg_out_mov(s, TCG_REG_EDX, addr_reg2);
912 tcg_out8(s, 0x6a); /* push Ib */
913 tcg_out8(s, mem_index);
914 tcg_out_opc(s, 0x50 + data_reg2); /* push */
915 tcg_out_opc(s, 0x50 + data_reg); /* push */
916 tcg_out8(s, 0xe8);
917 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] -
918 (tcg_target_long)s->code_ptr - 4);
919 tcg_out_addi(s, TCG_REG_ESP, 12);
920 } else {
921 tcg_out_mov(s, TCG_REG_EDX, addr_reg2);
922 switch(opc) {
923 case 0:
924 tcg_out_ext8u(s, TCG_REG_ECX, data_reg);
925 break;
926 case 1:
927 tcg_out_ext16u(s, TCG_REG_ECX, data_reg);
928 break;
929 case 2:
930 tcg_out_mov(s, TCG_REG_ECX, data_reg);
931 break;
933 tcg_out8(s, 0x6a); /* push Ib */
934 tcg_out8(s, mem_index);
935 tcg_out8(s, 0xe8);
936 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] -
937 (tcg_target_long)s->code_ptr - 4);
938 tcg_out_addi(s, TCG_REG_ESP, 4);
940 #endif
942 /* jmp label2 */
943 tcg_out8(s, OPC_JMP_short);
944 label2_ptr = s->code_ptr;
945 s->code_ptr++;
947 /* label1: */
948 *label1_ptr = s->code_ptr - label1_ptr - 1;
950 /* add x(r1), r0 */
951 tcg_out_modrm_offset(s, 0x03, r0, r1, offsetof(CPUTLBEntry, addend) -
952 offsetof(CPUTLBEntry, addr_write));
953 #else
954 r0 = addr_reg;
955 #endif
957 #ifdef TARGET_WORDS_BIGENDIAN
958 bswap = 1;
959 #else
960 bswap = 0;
961 #endif
962 switch(opc) {
963 case 0:
964 tcg_out_modrm_offset(s, OPC_MOVB_EvGv, data_reg, r0, GUEST_BASE);
965 break;
966 case 1:
967 if (bswap) {
968 tcg_out_mov(s, r1, data_reg);
969 tcg_out_rolw_8(s, r1);
970 data_reg = r1;
972 /* movw */
973 tcg_out8(s, 0x66);
974 tcg_out_modrm_offset(s, OPC_MOVL_EvGv, data_reg, r0, GUEST_BASE);
975 break;
976 case 2:
977 if (bswap) {
978 tcg_out_mov(s, r1, data_reg);
979 tcg_out_bswap32(s, r1);
980 data_reg = r1;
982 tcg_out_st(s, TCG_TYPE_I32, data_reg, r0, GUEST_BASE);
983 break;
984 case 3:
985 if (bswap) {
986 tcg_out_mov(s, r1, data_reg2);
987 tcg_out_bswap32(s, r1);
988 tcg_out_st(s, TCG_TYPE_I32, r1, r0, GUEST_BASE);
989 tcg_out_mov(s, r1, data_reg);
990 tcg_out_bswap32(s, r1);
991 tcg_out_st(s, TCG_TYPE_I32, r1, r0, GUEST_BASE + 4);
992 } else {
993 tcg_out_st(s, TCG_TYPE_I32, data_reg, r0, GUEST_BASE);
994 tcg_out_st(s, TCG_TYPE_I32, data_reg2, r0, GUEST_BASE + 4);
996 break;
997 default:
998 tcg_abort();
1001 #if defined(CONFIG_SOFTMMU)
1002 /* label2: */
1003 *label2_ptr = s->code_ptr - label2_ptr - 1;
1004 #endif
1007 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1008 const TCGArg *args, const int *const_args)
1010 int c;
1012 switch(opc) {
1013 case INDEX_op_exit_tb:
1014 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_EAX, args[0]);
1015 tcg_out8(s, OPC_JMP_long); /* jmp tb_ret_addr */
1016 tcg_out32(s, tb_ret_addr - s->code_ptr - 4);
1017 break;
1018 case INDEX_op_goto_tb:
1019 if (s->tb_jmp_offset) {
1020 /* direct jump method */
1021 tcg_out8(s, OPC_JMP_long); /* jmp im */
1022 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1023 tcg_out32(s, 0);
1024 } else {
1025 /* indirect jump method */
1026 tcg_out_modrm_offset(s, 0xff, EXT_JMPN_Ev, -1,
1027 (tcg_target_long)(s->tb_next + args[0]));
1029 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1030 break;
1031 case INDEX_op_call:
1032 if (const_args[0]) {
1033 tcg_out8(s, 0xe8);
1034 tcg_out32(s, args[0] - (tcg_target_long)s->code_ptr - 4);
1035 } else {
1036 tcg_out_modrm(s, 0xff, 2, args[0]);
1038 break;
1039 case INDEX_op_jmp:
1040 if (const_args[0]) {
1041 tcg_out8(s, OPC_JMP_long);
1042 tcg_out32(s, args[0] - (tcg_target_long)s->code_ptr - 4);
1043 } else {
1044 /* jmp *reg */
1045 tcg_out_modrm(s, 0xff, EXT_JMPN_Ev, args[0]);
1047 break;
1048 case INDEX_op_br:
1049 tcg_out_jxx(s, JCC_JMP, args[0], 0);
1050 break;
1051 case INDEX_op_movi_i32:
1052 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]);
1053 break;
1054 case INDEX_op_ld8u_i32:
1055 /* movzbl */
1056 tcg_out_modrm_offset(s, OPC_MOVZBL, args[0], args[1], args[2]);
1057 break;
1058 case INDEX_op_ld8s_i32:
1059 /* movsbl */
1060 tcg_out_modrm_offset(s, OPC_MOVSBL, args[0], args[1], args[2]);
1061 break;
1062 case INDEX_op_ld16u_i32:
1063 /* movzwl */
1064 tcg_out_modrm_offset(s, OPC_MOVZWL, args[0], args[1], args[2]);
1065 break;
1066 case INDEX_op_ld16s_i32:
1067 /* movswl */
1068 tcg_out_modrm_offset(s, OPC_MOVSWL, args[0], args[1], args[2]);
1069 break;
1070 case INDEX_op_ld_i32:
1071 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1072 break;
1073 case INDEX_op_st8_i32:
1074 /* movb */
1075 tcg_out_modrm_offset(s, OPC_MOVB_EvGv, args[0], args[1], args[2]);
1076 break;
1077 case INDEX_op_st16_i32:
1078 /* movw */
1079 tcg_out8(s, 0x66);
1080 tcg_out_modrm_offset(s, OPC_MOVL_EvGv, args[0], args[1], args[2]);
1081 break;
1082 case INDEX_op_st_i32:
1083 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1084 break;
1085 case INDEX_op_sub_i32:
1086 c = ARITH_SUB;
1087 goto gen_arith;
1088 case INDEX_op_and_i32:
1089 c = ARITH_AND;
1090 goto gen_arith;
1091 case INDEX_op_or_i32:
1092 c = ARITH_OR;
1093 goto gen_arith;
1094 case INDEX_op_xor_i32:
1095 c = ARITH_XOR;
1096 goto gen_arith;
1097 case INDEX_op_add_i32:
1098 c = ARITH_ADD;
1099 gen_arith:
1100 if (const_args[2]) {
1101 tgen_arithi(s, c, args[0], args[2], 0);
1102 } else {
1103 tcg_out_modrm(s, 0x01 | (c << 3), args[2], args[0]);
1105 break;
1106 case INDEX_op_mul_i32:
1107 if (const_args[2]) {
1108 int32_t val;
1109 val = args[2];
1110 if (val == (int8_t)val) {
1111 tcg_out_modrm(s, 0x6b, args[0], args[0]);
1112 tcg_out8(s, val);
1113 } else {
1114 tcg_out_modrm(s, 0x69, args[0], args[0]);
1115 tcg_out32(s, val);
1117 } else {
1118 tcg_out_modrm(s, 0xaf | P_EXT, args[0], args[2]);
1120 break;
1121 case INDEX_op_mulu2_i32:
1122 tcg_out_modrm(s, 0xf7, 4, args[3]);
1123 break;
1124 case INDEX_op_div2_i32:
1125 tcg_out_modrm(s, 0xf7, 7, args[4]);
1126 break;
1127 case INDEX_op_divu2_i32:
1128 tcg_out_modrm(s, 0xf7, 6, args[4]);
1129 break;
1130 case INDEX_op_shl_i32:
1131 c = SHIFT_SHL;
1132 gen_shift32:
1133 if (const_args[2]) {
1134 tcg_out_shifti(s, c, args[0], args[2]);
1135 } else {
1136 tcg_out_modrm(s, OPC_SHIFT_cl, c, args[0]);
1138 break;
1139 case INDEX_op_shr_i32:
1140 c = SHIFT_SHR;
1141 goto gen_shift32;
1142 case INDEX_op_sar_i32:
1143 c = SHIFT_SAR;
1144 goto gen_shift32;
1145 case INDEX_op_rotl_i32:
1146 c = SHIFT_ROL;
1147 goto gen_shift32;
1148 case INDEX_op_rotr_i32:
1149 c = SHIFT_ROR;
1150 goto gen_shift32;
1152 case INDEX_op_add2_i32:
1153 if (const_args[4])
1154 tgen_arithi(s, ARITH_ADD, args[0], args[4], 1);
1155 else
1156 tcg_out_modrm(s, 0x01 | (ARITH_ADD << 3), args[4], args[0]);
1157 if (const_args[5])
1158 tgen_arithi(s, ARITH_ADC, args[1], args[5], 1);
1159 else
1160 tcg_out_modrm(s, 0x01 | (ARITH_ADC << 3), args[5], args[1]);
1161 break;
1162 case INDEX_op_sub2_i32:
1163 if (const_args[4])
1164 tgen_arithi(s, ARITH_SUB, args[0], args[4], 1);
1165 else
1166 tcg_out_modrm(s, 0x01 | (ARITH_SUB << 3), args[4], args[0]);
1167 if (const_args[5])
1168 tgen_arithi(s, ARITH_SBB, args[1], args[5], 1);
1169 else
1170 tcg_out_modrm(s, 0x01 | (ARITH_SBB << 3), args[5], args[1]);
1171 break;
1172 case INDEX_op_brcond_i32:
1173 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
1174 args[3], 0);
1175 break;
1176 case INDEX_op_brcond2_i32:
1177 tcg_out_brcond2(s, args, const_args, 0);
1178 break;
1180 case INDEX_op_bswap16_i32:
1181 tcg_out_rolw_8(s, args[0]);
1182 break;
1183 case INDEX_op_bswap32_i32:
1184 tcg_out_bswap32(s, args[0]);
1185 break;
1187 case INDEX_op_neg_i32:
1188 tcg_out_modrm(s, 0xf7, 3, args[0]);
1189 break;
1191 case INDEX_op_not_i32:
1192 tcg_out_modrm(s, 0xf7, 2, args[0]);
1193 break;
1195 case INDEX_op_ext8s_i32:
1196 tcg_out_ext8s(s, args[0], args[1]);
1197 break;
1198 case INDEX_op_ext16s_i32:
1199 tcg_out_ext16s(s, args[0], args[1]);
1200 break;
1201 case INDEX_op_ext8u_i32:
1202 tcg_out_ext8u(s, args[0], args[1]);
1203 break;
1204 case INDEX_op_ext16u_i32:
1205 tcg_out_ext16u(s, args[0], args[1]);
1206 break;
1208 case INDEX_op_setcond_i32:
1209 tcg_out_setcond(s, args[3], args[0], args[1], args[2], const_args[2]);
1210 break;
1211 case INDEX_op_setcond2_i32:
1212 tcg_out_setcond2(s, args, const_args);
1213 break;
1215 case INDEX_op_qemu_ld8u:
1216 tcg_out_qemu_ld(s, args, 0);
1217 break;
1218 case INDEX_op_qemu_ld8s:
1219 tcg_out_qemu_ld(s, args, 0 | 4);
1220 break;
1221 case INDEX_op_qemu_ld16u:
1222 tcg_out_qemu_ld(s, args, 1);
1223 break;
1224 case INDEX_op_qemu_ld16s:
1225 tcg_out_qemu_ld(s, args, 1 | 4);
1226 break;
1227 case INDEX_op_qemu_ld32:
1228 tcg_out_qemu_ld(s, args, 2);
1229 break;
1230 case INDEX_op_qemu_ld64:
1231 tcg_out_qemu_ld(s, args, 3);
1232 break;
1234 case INDEX_op_qemu_st8:
1235 tcg_out_qemu_st(s, args, 0);
1236 break;
1237 case INDEX_op_qemu_st16:
1238 tcg_out_qemu_st(s, args, 1);
1239 break;
1240 case INDEX_op_qemu_st32:
1241 tcg_out_qemu_st(s, args, 2);
1242 break;
1243 case INDEX_op_qemu_st64:
1244 tcg_out_qemu_st(s, args, 3);
1245 break;
1247 default:
1248 tcg_abort();
1252 static const TCGTargetOpDef x86_op_defs[] = {
1253 { INDEX_op_exit_tb, { } },
1254 { INDEX_op_goto_tb, { } },
1255 { INDEX_op_call, { "ri" } },
1256 { INDEX_op_jmp, { "ri" } },
1257 { INDEX_op_br, { } },
1258 { INDEX_op_mov_i32, { "r", "r" } },
1259 { INDEX_op_movi_i32, { "r" } },
1260 { INDEX_op_ld8u_i32, { "r", "r" } },
1261 { INDEX_op_ld8s_i32, { "r", "r" } },
1262 { INDEX_op_ld16u_i32, { "r", "r" } },
1263 { INDEX_op_ld16s_i32, { "r", "r" } },
1264 { INDEX_op_ld_i32, { "r", "r" } },
1265 { INDEX_op_st8_i32, { "q", "r" } },
1266 { INDEX_op_st16_i32, { "r", "r" } },
1267 { INDEX_op_st_i32, { "r", "r" } },
1269 { INDEX_op_add_i32, { "r", "0", "ri" } },
1270 { INDEX_op_sub_i32, { "r", "0", "ri" } },
1271 { INDEX_op_mul_i32, { "r", "0", "ri" } },
1272 { INDEX_op_mulu2_i32, { "a", "d", "a", "r" } },
1273 { INDEX_op_div2_i32, { "a", "d", "0", "1", "r" } },
1274 { INDEX_op_divu2_i32, { "a", "d", "0", "1", "r" } },
1275 { INDEX_op_and_i32, { "r", "0", "ri" } },
1276 { INDEX_op_or_i32, { "r", "0", "ri" } },
1277 { INDEX_op_xor_i32, { "r", "0", "ri" } },
1279 { INDEX_op_shl_i32, { "r", "0", "ci" } },
1280 { INDEX_op_shr_i32, { "r", "0", "ci" } },
1281 { INDEX_op_sar_i32, { "r", "0", "ci" } },
1282 { INDEX_op_rotl_i32, { "r", "0", "ci" } },
1283 { INDEX_op_rotr_i32, { "r", "0", "ci" } },
1285 { INDEX_op_brcond_i32, { "r", "ri" } },
1287 { INDEX_op_add2_i32, { "r", "r", "0", "1", "ri", "ri" } },
1288 { INDEX_op_sub2_i32, { "r", "r", "0", "1", "ri", "ri" } },
1289 { INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } },
1291 { INDEX_op_bswap16_i32, { "r", "0" } },
1292 { INDEX_op_bswap32_i32, { "r", "0" } },
1294 { INDEX_op_neg_i32, { "r", "0" } },
1296 { INDEX_op_not_i32, { "r", "0" } },
1298 { INDEX_op_ext8s_i32, { "r", "q" } },
1299 { INDEX_op_ext16s_i32, { "r", "r" } },
1300 { INDEX_op_ext8u_i32, { "r", "q" } },
1301 { INDEX_op_ext16u_i32, { "r", "r" } },
1303 { INDEX_op_setcond_i32, { "q", "r", "ri" } },
1304 { INDEX_op_setcond2_i32, { "r", "r", "r", "ri", "ri" } },
1306 #if TARGET_LONG_BITS == 32
1307 { INDEX_op_qemu_ld8u, { "r", "L" } },
1308 { INDEX_op_qemu_ld8s, { "r", "L" } },
1309 { INDEX_op_qemu_ld16u, { "r", "L" } },
1310 { INDEX_op_qemu_ld16s, { "r", "L" } },
1311 { INDEX_op_qemu_ld32, { "r", "L" } },
1312 { INDEX_op_qemu_ld64, { "r", "r", "L" } },
1314 { INDEX_op_qemu_st8, { "cb", "L" } },
1315 { INDEX_op_qemu_st16, { "L", "L" } },
1316 { INDEX_op_qemu_st32, { "L", "L" } },
1317 { INDEX_op_qemu_st64, { "L", "L", "L" } },
1318 #else
1319 { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
1320 { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
1321 { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
1322 { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
1323 { INDEX_op_qemu_ld32, { "r", "L", "L" } },
1324 { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } },
1326 { INDEX_op_qemu_st8, { "cb", "L", "L" } },
1327 { INDEX_op_qemu_st16, { "L", "L", "L" } },
1328 { INDEX_op_qemu_st32, { "L", "L", "L" } },
1329 { INDEX_op_qemu_st64, { "L", "L", "L", "L" } },
1330 #endif
1331 { -1 },
1334 static int tcg_target_callee_save_regs[] = {
1335 /* TCG_REG_EBP, */ /* currently used for the global env, so no
1336 need to save */
1337 TCG_REG_EBX,
1338 TCG_REG_ESI,
1339 TCG_REG_EDI,
1342 static inline void tcg_out_push(TCGContext *s, int reg)
1344 tcg_out_opc(s, 0x50 + reg);
1347 static inline void tcg_out_pop(TCGContext *s, int reg)
1349 tcg_out_opc(s, 0x58 + reg);
1352 /* Generate global QEMU prologue and epilogue code */
1353 void tcg_target_qemu_prologue(TCGContext *s)
1355 int i, frame_size, push_size, stack_addend;
1357 /* TB prologue */
1358 /* save all callee saved registers */
1359 for(i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1360 tcg_out_push(s, tcg_target_callee_save_regs[i]);
1362 /* reserve some stack space */
1363 push_size = 4 + ARRAY_SIZE(tcg_target_callee_save_regs) * 4;
1364 frame_size = push_size + TCG_STATIC_CALL_ARGS_SIZE;
1365 frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) &
1366 ~(TCG_TARGET_STACK_ALIGN - 1);
1367 stack_addend = frame_size - push_size;
1368 tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
1370 tcg_out_modrm(s, 0xff, EXT_JMPN_Ev, TCG_REG_EAX); /* jmp *%eax */
1372 /* TB epilogue */
1373 tb_ret_addr = s->code_ptr;
1374 tcg_out_addi(s, TCG_REG_ESP, stack_addend);
1375 for(i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--) {
1376 tcg_out_pop(s, tcg_target_callee_save_regs[i]);
1378 tcg_out8(s, 0xc3); /* ret */
1381 void tcg_target_init(TCGContext *s)
1383 #if !defined(CONFIG_USER_ONLY)
1384 /* fail safe */
1385 if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry))
1386 tcg_abort();
1387 #endif
1389 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xff);
1391 tcg_regset_clear(tcg_target_call_clobber_regs);
1392 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EAX);
1393 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EDX);
1394 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_ECX);
1396 tcg_regset_clear(s->reserved_regs);
1397 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ESP);
1399 tcg_add_target_add_op_defs(x86_op_defs);