tcg-i386: Tidy push/pop.
[qemu/aliguori-queue.git] / tcg / i386 / tcg-target.c
blob5557f8cc8cd4d193a14c5f5551039d1a06a3c295
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #ifndef NDEBUG
26 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
27 "%eax",
28 "%ecx",
29 "%edx",
30 "%ebx",
31 "%esp",
32 "%ebp",
33 "%esi",
34 "%edi",
36 #endif
38 static const int tcg_target_reg_alloc_order[] = {
39 TCG_REG_EBX,
40 TCG_REG_ESI,
41 TCG_REG_EDI,
42 TCG_REG_EBP,
43 TCG_REG_ECX,
44 TCG_REG_EDX,
45 TCG_REG_EAX,
48 static const int tcg_target_call_iarg_regs[3] = { TCG_REG_EAX, TCG_REG_EDX, TCG_REG_ECX };
49 static const int tcg_target_call_oarg_regs[2] = { TCG_REG_EAX, TCG_REG_EDX };
51 static uint8_t *tb_ret_addr;
53 static void patch_reloc(uint8_t *code_ptr, int type,
54 tcg_target_long value, tcg_target_long addend)
56 value += addend;
57 switch(type) {
58 case R_386_32:
59 *(uint32_t *)code_ptr = value;
60 break;
61 case R_386_PC32:
62 *(uint32_t *)code_ptr = value - (long)code_ptr;
63 break;
64 case R_386_PC8:
65 value -= (long)code_ptr;
66 if (value != (int8_t)value) {
67 tcg_abort();
69 *(uint8_t *)code_ptr = value;
70 break;
71 default:
72 tcg_abort();
76 /* maximum number of register used for input function arguments */
77 static inline int tcg_target_get_call_iarg_regs_count(int flags)
79 flags &= TCG_CALL_TYPE_MASK;
80 switch(flags) {
81 case TCG_CALL_TYPE_STD:
82 return 0;
83 case TCG_CALL_TYPE_REGPARM_1:
84 case TCG_CALL_TYPE_REGPARM_2:
85 case TCG_CALL_TYPE_REGPARM:
86 return flags - TCG_CALL_TYPE_REGPARM_1 + 1;
87 default:
88 tcg_abort();
92 /* parse target specific constraints */
93 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
95 const char *ct_str;
97 ct_str = *pct_str;
98 switch(ct_str[0]) {
99 case 'a':
100 ct->ct |= TCG_CT_REG;
101 tcg_regset_set_reg(ct->u.regs, TCG_REG_EAX);
102 break;
103 case 'b':
104 ct->ct |= TCG_CT_REG;
105 tcg_regset_set_reg(ct->u.regs, TCG_REG_EBX);
106 break;
107 case 'c':
108 ct->ct |= TCG_CT_REG;
109 tcg_regset_set_reg(ct->u.regs, TCG_REG_ECX);
110 break;
111 case 'd':
112 ct->ct |= TCG_CT_REG;
113 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDX);
114 break;
115 case 'S':
116 ct->ct |= TCG_CT_REG;
117 tcg_regset_set_reg(ct->u.regs, TCG_REG_ESI);
118 break;
119 case 'D':
120 ct->ct |= TCG_CT_REG;
121 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDI);
122 break;
123 case 'q':
124 ct->ct |= TCG_CT_REG;
125 tcg_regset_set32(ct->u.regs, 0, 0xf);
126 break;
127 case 'r':
128 ct->ct |= TCG_CT_REG;
129 tcg_regset_set32(ct->u.regs, 0, 0xff);
130 break;
132 /* qemu_ld/st address constraint */
133 case 'L':
134 ct->ct |= TCG_CT_REG;
135 tcg_regset_set32(ct->u.regs, 0, 0xff);
136 tcg_regset_reset_reg(ct->u.regs, TCG_REG_EAX);
137 tcg_regset_reset_reg(ct->u.regs, TCG_REG_EDX);
138 break;
139 default:
140 return -1;
142 ct_str++;
143 *pct_str = ct_str;
144 return 0;
147 /* test if a constant matches the constraint */
148 static inline int tcg_target_const_match(tcg_target_long val,
149 const TCGArgConstraint *arg_ct)
151 int ct;
152 ct = arg_ct->ct;
153 if (ct & TCG_CT_CONST)
154 return 1;
155 else
156 return 0;
159 #define P_EXT 0x100 /* 0x0f opcode prefix */
161 #define OPC_ARITH_EvIz (0x81)
162 #define OPC_ARITH_EvIb (0x83)
163 #define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */
164 #define OPC_ADD_GvEv (OPC_ARITH_GvEv | (ARITH_ADD << 3))
165 #define OPC_BSWAP (0xc8 | P_EXT)
166 #define OPC_CMP_GvEv (OPC_ARITH_GvEv | (ARITH_CMP << 3))
167 #define OPC_DEC_r32 (0x48)
168 #define OPC_INC_r32 (0x40)
169 #define OPC_JCC_long (0x80 | P_EXT) /* ... plus condition code */
170 #define OPC_JCC_short (0x70) /* ... plus condition code */
171 #define OPC_JMP_long (0xe9)
172 #define OPC_JMP_short (0xeb)
173 #define OPC_MOVB_EvGv (0x88) /* stores, more or less */
174 #define OPC_MOVL_EvGv (0x89) /* stores, more or less */
175 #define OPC_MOVL_GvEv (0x8b) /* loads, more or less */
176 #define OPC_MOVL_Iv (0xb8)
177 #define OPC_MOVSBL (0xbe | P_EXT)
178 #define OPC_MOVSWL (0xbf | P_EXT)
179 #define OPC_MOVZBL (0xb6 | P_EXT)
180 #define OPC_MOVZWL (0xb7 | P_EXT)
181 #define OPC_POP_r32 (0x58)
182 #define OPC_PUSH_r32 (0x50)
183 #define OPC_PUSH_Iv (0x68)
184 #define OPC_PUSH_Ib (0x6a)
185 #define OPC_SHIFT_1 (0xd1)
186 #define OPC_SHIFT_Ib (0xc1)
187 #define OPC_SHIFT_cl (0xd3)
188 #define OPC_TESTL (0x85)
190 /* Group 1 opcode extensions for 0x80-0x83. */
191 #define ARITH_ADD 0
192 #define ARITH_OR 1
193 #define ARITH_ADC 2
194 #define ARITH_SBB 3
195 #define ARITH_AND 4
196 #define ARITH_SUB 5
197 #define ARITH_XOR 6
198 #define ARITH_CMP 7
200 /* Group 2 opcode extensions for 0xc0, 0xc1, 0xd0-0xd3. */
201 #define SHIFT_ROL 0
202 #define SHIFT_ROR 1
203 #define SHIFT_SHL 4
204 #define SHIFT_SHR 5
205 #define SHIFT_SAR 7
207 /* Group 5 opcode extensions for 0xff. */
208 #define EXT_JMPN_Ev 4
210 /* Condition codes to be added to OPC_JCC_{long,short}. */
211 #define JCC_JMP (-1)
212 #define JCC_JO 0x0
213 #define JCC_JNO 0x1
214 #define JCC_JB 0x2
215 #define JCC_JAE 0x3
216 #define JCC_JE 0x4
217 #define JCC_JNE 0x5
218 #define JCC_JBE 0x6
219 #define JCC_JA 0x7
220 #define JCC_JS 0x8
221 #define JCC_JNS 0x9
222 #define JCC_JP 0xa
223 #define JCC_JNP 0xb
224 #define JCC_JL 0xc
225 #define JCC_JGE 0xd
226 #define JCC_JLE 0xe
227 #define JCC_JG 0xf
229 static const uint8_t tcg_cond_to_jcc[10] = {
230 [TCG_COND_EQ] = JCC_JE,
231 [TCG_COND_NE] = JCC_JNE,
232 [TCG_COND_LT] = JCC_JL,
233 [TCG_COND_GE] = JCC_JGE,
234 [TCG_COND_LE] = JCC_JLE,
235 [TCG_COND_GT] = JCC_JG,
236 [TCG_COND_LTU] = JCC_JB,
237 [TCG_COND_GEU] = JCC_JAE,
238 [TCG_COND_LEU] = JCC_JBE,
239 [TCG_COND_GTU] = JCC_JA,
242 static inline void tcg_out_opc(TCGContext *s, int opc)
244 if (opc & P_EXT)
245 tcg_out8(s, 0x0f);
246 tcg_out8(s, opc);
249 static inline void tcg_out_modrm(TCGContext *s, int opc, int r, int rm)
251 tcg_out_opc(s, opc);
252 tcg_out8(s, 0xc0 | (r << 3) | rm);
255 /* rm == -1 means no register index */
256 static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r, int rm,
257 int32_t offset)
259 tcg_out_opc(s, opc);
260 if (rm == -1) {
261 tcg_out8(s, 0x05 | (r << 3));
262 tcg_out32(s, offset);
263 } else if (offset == 0 && rm != TCG_REG_EBP) {
264 if (rm == TCG_REG_ESP) {
265 tcg_out8(s, 0x04 | (r << 3));
266 tcg_out8(s, 0x24);
267 } else {
268 tcg_out8(s, 0x00 | (r << 3) | rm);
270 } else if ((int8_t)offset == offset) {
271 if (rm == TCG_REG_ESP) {
272 tcg_out8(s, 0x44 | (r << 3));
273 tcg_out8(s, 0x24);
274 } else {
275 tcg_out8(s, 0x40 | (r << 3) | rm);
277 tcg_out8(s, offset);
278 } else {
279 if (rm == TCG_REG_ESP) {
280 tcg_out8(s, 0x84 | (r << 3));
281 tcg_out8(s, 0x24);
282 } else {
283 tcg_out8(s, 0x80 | (r << 3) | rm);
285 tcg_out32(s, offset);
289 /* Generate dest op= src. Uses the same ARITH_* codes as tgen_arithi. */
290 static inline void tgen_arithr(TCGContext *s, int subop, int dest, int src)
292 tcg_out_modrm(s, OPC_ARITH_GvEv + (subop << 3), dest, src);
295 static inline void tcg_out_mov(TCGContext *s, int ret, int arg)
297 if (arg != ret) {
298 tcg_out_modrm(s, OPC_MOVL_GvEv, ret, arg);
302 static inline void tcg_out_movi(TCGContext *s, TCGType type,
303 int ret, int32_t arg)
305 if (arg == 0) {
306 tgen_arithr(s, ARITH_XOR, ret, ret);
307 } else {
308 tcg_out8(s, OPC_MOVL_Iv + ret);
309 tcg_out32(s, arg);
313 static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val)
315 if (val == (int8_t)val) {
316 tcg_out_opc(s, OPC_PUSH_Ib);
317 tcg_out8(s, val);
318 } else {
319 tcg_out_opc(s, OPC_PUSH_Iv);
320 tcg_out32(s, val);
324 static inline void tcg_out_push(TCGContext *s, int reg)
326 tcg_out_opc(s, OPC_PUSH_r32 + reg);
329 static inline void tcg_out_pop(TCGContext *s, int reg)
331 tcg_out_opc(s, OPC_POP_r32 + reg);
334 static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret,
335 int arg1, tcg_target_long arg2)
337 tcg_out_modrm_offset(s, OPC_MOVL_GvEv, ret, arg1, arg2);
340 static inline void tcg_out_st(TCGContext *s, TCGType type, int arg,
341 int arg1, tcg_target_long arg2)
343 tcg_out_modrm_offset(s, OPC_MOVL_EvGv, arg, arg1, arg2);
346 static void tcg_out_shifti(TCGContext *s, int subopc, int reg, int count)
348 if (count == 1) {
349 tcg_out_modrm(s, OPC_SHIFT_1, subopc, reg);
350 } else {
351 tcg_out_modrm(s, OPC_SHIFT_Ib, subopc, reg);
352 tcg_out8(s, count);
356 static inline void tcg_out_bswap32(TCGContext *s, int reg)
358 tcg_out_opc(s, OPC_BSWAP + reg);
361 static inline void tcg_out_rolw_8(TCGContext *s, int reg)
363 tcg_out8(s, 0x66);
364 tcg_out_shifti(s, SHIFT_ROL, reg, 8);
367 static inline void tcg_out_ext8u(TCGContext *s, int dest, int src)
369 /* movzbl */
370 assert(src < 4);
371 tcg_out_modrm(s, OPC_MOVZBL, dest, src);
374 static void tcg_out_ext8s(TCGContext *s, int dest, int src)
376 /* movsbl */
377 assert(src < 4);
378 tcg_out_modrm(s, OPC_MOVSBL, dest, src);
381 static inline void tcg_out_ext16u(TCGContext *s, int dest, int src)
383 /* movzwl */
384 tcg_out_modrm(s, OPC_MOVZWL, dest, src);
387 static inline void tcg_out_ext16s(TCGContext *s, int dest, int src)
389 /* movswl */
390 tcg_out_modrm(s, OPC_MOVSWL, dest, src);
393 static inline void tgen_arithi(TCGContext *s, int c, int r0,
394 int32_t val, int cf)
396 /* ??? While INC is 2 bytes shorter than ADDL $1, they also induce
397 partial flags update stalls on Pentium4 and are not recommended
398 by current Intel optimization manuals. */
399 if (!cf && (c == ARITH_ADD || c == ARITH_SUB) && (val == 1 || val == -1)) {
400 int opc = ((c == ARITH_ADD) ^ (val < 0) ? OPC_INC_r32 : OPC_DEC_r32);
401 tcg_out_opc(s, opc + r0);
402 } else if (val == (int8_t)val) {
403 tcg_out_modrm(s, OPC_ARITH_EvIb, c, r0);
404 tcg_out8(s, val);
405 } else if (c == ARITH_AND && val == 0xffu && r0 < 4) {
406 tcg_out_ext8u(s, r0, r0);
407 } else if (c == ARITH_AND && val == 0xffffu) {
408 tcg_out_ext16u(s, r0, r0);
409 } else {
410 tcg_out_modrm(s, OPC_ARITH_EvIz, c, r0);
411 tcg_out32(s, val);
415 static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
417 if (val != 0)
418 tgen_arithi(s, ARITH_ADD, reg, val, 0);
421 /* Use SMALL != 0 to force a short forward branch. */
422 static void tcg_out_jxx(TCGContext *s, int opc, int label_index, int small)
424 int32_t val, val1;
425 TCGLabel *l = &s->labels[label_index];
427 if (l->has_value) {
428 val = l->u.value - (tcg_target_long)s->code_ptr;
429 val1 = val - 2;
430 if ((int8_t)val1 == val1) {
431 if (opc == -1) {
432 tcg_out8(s, OPC_JMP_short);
433 } else {
434 tcg_out8(s, OPC_JCC_short + opc);
436 tcg_out8(s, val1);
437 } else {
438 if (small) {
439 tcg_abort();
441 if (opc == -1) {
442 tcg_out8(s, OPC_JMP_long);
443 tcg_out32(s, val - 5);
444 } else {
445 tcg_out_opc(s, OPC_JCC_long + opc);
446 tcg_out32(s, val - 6);
449 } else if (small) {
450 if (opc == -1) {
451 tcg_out8(s, OPC_JMP_short);
452 } else {
453 tcg_out8(s, OPC_JCC_short + opc);
455 tcg_out_reloc(s, s->code_ptr, R_386_PC8, label_index, -1);
456 s->code_ptr += 1;
457 } else {
458 if (opc == -1) {
459 tcg_out8(s, OPC_JMP_long);
460 } else {
461 tcg_out_opc(s, OPC_JCC_long + opc);
463 tcg_out_reloc(s, s->code_ptr, R_386_PC32, label_index, -4);
464 s->code_ptr += 4;
468 static void tcg_out_cmp(TCGContext *s, TCGArg arg1, TCGArg arg2,
469 int const_arg2)
471 if (const_arg2) {
472 if (arg2 == 0) {
473 /* test r, r */
474 tcg_out_modrm(s, OPC_TESTL, arg1, arg1);
475 } else {
476 tgen_arithi(s, ARITH_CMP, arg1, arg2, 0);
478 } else {
479 tgen_arithr(s, ARITH_CMP, arg1, arg2);
483 static void tcg_out_brcond(TCGContext *s, TCGCond cond,
484 TCGArg arg1, TCGArg arg2, int const_arg2,
485 int label_index, int small)
487 tcg_out_cmp(s, arg1, arg2, const_arg2);
488 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index, small);
491 /* XXX: we implement it at the target level to avoid having to
492 handle cross basic blocks temporaries */
493 static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
494 const int *const_args, int small)
496 int label_next;
497 label_next = gen_new_label();
498 switch(args[4]) {
499 case TCG_COND_EQ:
500 tcg_out_brcond(s, TCG_COND_NE, args[0], args[2], const_args[2],
501 label_next, 1);
502 tcg_out_brcond(s, TCG_COND_EQ, args[1], args[3], const_args[3],
503 args[5], small);
504 break;
505 case TCG_COND_NE:
506 tcg_out_brcond(s, TCG_COND_NE, args[0], args[2], const_args[2],
507 args[5], small);
508 tcg_out_brcond(s, TCG_COND_NE, args[1], args[3], const_args[3],
509 args[5], small);
510 break;
511 case TCG_COND_LT:
512 tcg_out_brcond(s, TCG_COND_LT, args[1], args[3], const_args[3],
513 args[5], small);
514 tcg_out_jxx(s, JCC_JNE, label_next, 1);
515 tcg_out_brcond(s, TCG_COND_LTU, args[0], args[2], const_args[2],
516 args[5], small);
517 break;
518 case TCG_COND_LE:
519 tcg_out_brcond(s, TCG_COND_LT, args[1], args[3], const_args[3],
520 args[5], small);
521 tcg_out_jxx(s, JCC_JNE, label_next, 1);
522 tcg_out_brcond(s, TCG_COND_LEU, args[0], args[2], const_args[2],
523 args[5], small);
524 break;
525 case TCG_COND_GT:
526 tcg_out_brcond(s, TCG_COND_GT, args[1], args[3], const_args[3],
527 args[5], small);
528 tcg_out_jxx(s, JCC_JNE, label_next, 1);
529 tcg_out_brcond(s, TCG_COND_GTU, args[0], args[2], const_args[2],
530 args[5], small);
531 break;
532 case TCG_COND_GE:
533 tcg_out_brcond(s, TCG_COND_GT, args[1], args[3], const_args[3],
534 args[5], small);
535 tcg_out_jxx(s, JCC_JNE, label_next, 1);
536 tcg_out_brcond(s, TCG_COND_GEU, args[0], args[2], const_args[2],
537 args[5], small);
538 break;
539 case TCG_COND_LTU:
540 tcg_out_brcond(s, TCG_COND_LTU, args[1], args[3], const_args[3],
541 args[5], small);
542 tcg_out_jxx(s, JCC_JNE, label_next, 1);
543 tcg_out_brcond(s, TCG_COND_LTU, args[0], args[2], const_args[2],
544 args[5], small);
545 break;
546 case TCG_COND_LEU:
547 tcg_out_brcond(s, TCG_COND_LTU, args[1], args[3], const_args[3],
548 args[5], small);
549 tcg_out_jxx(s, JCC_JNE, label_next, 1);
550 tcg_out_brcond(s, TCG_COND_LEU, args[0], args[2], const_args[2],
551 args[5], small);
552 break;
553 case TCG_COND_GTU:
554 tcg_out_brcond(s, TCG_COND_GTU, args[1], args[3], const_args[3],
555 args[5], small);
556 tcg_out_jxx(s, JCC_JNE, label_next, 1);
557 tcg_out_brcond(s, TCG_COND_GTU, args[0], args[2], const_args[2],
558 args[5], small);
559 break;
560 case TCG_COND_GEU:
561 tcg_out_brcond(s, TCG_COND_GTU, args[1], args[3], const_args[3],
562 args[5], small);
563 tcg_out_jxx(s, JCC_JNE, label_next, 1);
564 tcg_out_brcond(s, TCG_COND_GEU, args[0], args[2], const_args[2],
565 args[5], small);
566 break;
567 default:
568 tcg_abort();
570 tcg_out_label(s, label_next, (tcg_target_long)s->code_ptr);
573 static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGArg dest,
574 TCGArg arg1, TCGArg arg2, int const_arg2)
576 tcg_out_cmp(s, arg1, arg2, const_arg2);
577 /* setcc */
578 tcg_out_modrm(s, 0x90 | tcg_cond_to_jcc[cond] | P_EXT, 0, dest);
579 tcg_out_ext8u(s, dest, dest);
582 static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
583 const int *const_args)
585 TCGArg new_args[6];
586 int label_true, label_over;
588 memcpy(new_args, args+1, 5*sizeof(TCGArg));
590 if (args[0] == args[1] || args[0] == args[2]
591 || (!const_args[3] && args[0] == args[3])
592 || (!const_args[4] && args[0] == args[4])) {
593 /* When the destination overlaps with one of the argument
594 registers, don't do anything tricky. */
595 label_true = gen_new_label();
596 label_over = gen_new_label();
598 new_args[5] = label_true;
599 tcg_out_brcond2(s, new_args, const_args+1, 1);
601 tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
602 tcg_out_jxx(s, JCC_JMP, label_over, 1);
603 tcg_out_label(s, label_true, (tcg_target_long)s->code_ptr);
605 tcg_out_movi(s, TCG_TYPE_I32, args[0], 1);
606 tcg_out_label(s, label_over, (tcg_target_long)s->code_ptr);
607 } else {
608 /* When the destination does not overlap one of the arguments,
609 clear the destination first, jump if cond false, and emit an
610 increment in the true case. This results in smaller code. */
612 tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
614 label_over = gen_new_label();
615 new_args[4] = tcg_invert_cond(new_args[4]);
616 new_args[5] = label_over;
617 tcg_out_brcond2(s, new_args, const_args+1, 1);
619 tgen_arithi(s, ARITH_ADD, args[0], 1, 0);
620 tcg_out_label(s, label_over, (tcg_target_long)s->code_ptr);
624 #if defined(CONFIG_SOFTMMU)
626 #include "../../softmmu_defs.h"
628 static void *qemu_ld_helpers[4] = {
629 __ldb_mmu,
630 __ldw_mmu,
631 __ldl_mmu,
632 __ldq_mmu,
635 static void *qemu_st_helpers[4] = {
636 __stb_mmu,
637 __stw_mmu,
638 __stl_mmu,
639 __stq_mmu,
641 #endif
643 #ifndef CONFIG_USER_ONLY
644 #define GUEST_BASE 0
645 #endif
647 /* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
648 EAX. It will be useful once fixed registers globals are less
649 common. */
650 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
651 int opc)
653 int addr_reg, data_reg, data_reg2, r0, r1, mem_index, s_bits, bswap;
654 #if defined(CONFIG_SOFTMMU)
655 uint8_t *label1_ptr, *label2_ptr;
656 #endif
657 #if TARGET_LONG_BITS == 64
658 #if defined(CONFIG_SOFTMMU)
659 uint8_t *label3_ptr;
660 #endif
661 int addr_reg2;
662 #endif
664 data_reg = *args++;
665 if (opc == 3)
666 data_reg2 = *args++;
667 else
668 data_reg2 = 0;
669 addr_reg = *args++;
670 #if TARGET_LONG_BITS == 64
671 addr_reg2 = *args++;
672 #endif
673 mem_index = *args;
674 s_bits = opc & 3;
676 r0 = TCG_REG_EAX;
677 r1 = TCG_REG_EDX;
679 #if defined(CONFIG_SOFTMMU)
680 tcg_out_mov(s, r1, addr_reg);
681 tcg_out_mov(s, r0, addr_reg);
683 tcg_out_shifti(s, SHIFT_SHR, r1, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
685 tgen_arithi(s, ARITH_AND, r0, TARGET_PAGE_MASK | ((1 << s_bits) - 1), 0);
686 tgen_arithi(s, ARITH_AND, r1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0);
688 tcg_out_opc(s, 0x8d); /* lea offset(r1, %ebp), r1 */
689 tcg_out8(s, 0x80 | (r1 << 3) | 0x04);
690 tcg_out8(s, (5 << 3) | r1);
691 tcg_out32(s, offsetof(CPUState, tlb_table[mem_index][0].addr_read));
693 /* cmp 0(r1), r0 */
694 tcg_out_modrm_offset(s, OPC_CMP_GvEv, r0, r1, 0);
696 tcg_out_mov(s, r0, addr_reg);
698 #if TARGET_LONG_BITS == 32
699 /* je label1 */
700 tcg_out8(s, OPC_JCC_short + JCC_JE);
701 label1_ptr = s->code_ptr;
702 s->code_ptr++;
703 #else
704 /* jne label3 */
705 tcg_out8(s, OPC_JCC_short + JCC_JNE);
706 label3_ptr = s->code_ptr;
707 s->code_ptr++;
709 /* cmp 4(r1), addr_reg2 */
710 tcg_out_modrm_offset(s, OPC_CMP_GvEv, addr_reg2, r1, 4);
712 /* je label1 */
713 tcg_out8(s, OPC_JCC_short + JCC_JE);
714 label1_ptr = s->code_ptr;
715 s->code_ptr++;
717 /* label3: */
718 *label3_ptr = s->code_ptr - label3_ptr - 1;
719 #endif
721 /* XXX: move that code at the end of the TB */
722 #if TARGET_LONG_BITS == 32
723 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_EDX, mem_index);
724 #else
725 tcg_out_mov(s, TCG_REG_EDX, addr_reg2);
726 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_ECX, mem_index);
727 #endif
728 tcg_out8(s, 0xe8);
729 tcg_out32(s, (tcg_target_long)qemu_ld_helpers[s_bits] -
730 (tcg_target_long)s->code_ptr - 4);
732 switch(opc) {
733 case 0 | 4:
734 tcg_out_ext8s(s, data_reg, TCG_REG_EAX);
735 break;
736 case 1 | 4:
737 tcg_out_ext16s(s, data_reg, TCG_REG_EAX);
738 break;
739 case 0:
740 tcg_out_ext8u(s, data_reg, TCG_REG_EAX);
741 break;
742 case 1:
743 tcg_out_ext16u(s, data_reg, TCG_REG_EAX);
744 break;
745 case 2:
746 default:
747 tcg_out_mov(s, data_reg, TCG_REG_EAX);
748 break;
749 case 3:
750 if (data_reg == TCG_REG_EDX) {
751 tcg_out_opc(s, 0x90 + TCG_REG_EDX); /* xchg %edx, %eax */
752 tcg_out_mov(s, data_reg2, TCG_REG_EAX);
753 } else {
754 tcg_out_mov(s, data_reg, TCG_REG_EAX);
755 tcg_out_mov(s, data_reg2, TCG_REG_EDX);
757 break;
760 /* jmp label2 */
761 tcg_out8(s, OPC_JMP_short);
762 label2_ptr = s->code_ptr;
763 s->code_ptr++;
765 /* label1: */
766 *label1_ptr = s->code_ptr - label1_ptr - 1;
768 /* add x(r1), r0 */
769 tcg_out_modrm_offset(s, OPC_ADD_GvEv, r0, r1,
770 offsetof(CPUTLBEntry, addend) -
771 offsetof(CPUTLBEntry, addr_read));
772 #else
773 r0 = addr_reg;
774 #endif
776 #ifdef TARGET_WORDS_BIGENDIAN
777 bswap = 1;
778 #else
779 bswap = 0;
780 #endif
781 switch(opc) {
782 case 0:
783 /* movzbl */
784 tcg_out_modrm_offset(s, OPC_MOVZBL, data_reg, r0, GUEST_BASE);
785 break;
786 case 0 | 4:
787 /* movsbl */
788 tcg_out_modrm_offset(s, OPC_MOVSBL, data_reg, r0, GUEST_BASE);
789 break;
790 case 1:
791 /* movzwl */
792 tcg_out_modrm_offset(s, OPC_MOVZWL, data_reg, r0, GUEST_BASE);
793 if (bswap) {
794 tcg_out_rolw_8(s, data_reg);
796 break;
797 case 1 | 4:
798 /* movswl */
799 tcg_out_modrm_offset(s, OPC_MOVSWL, data_reg, r0, GUEST_BASE);
800 if (bswap) {
801 tcg_out_rolw_8(s, data_reg);
803 /* movswl data_reg, data_reg */
804 tcg_out_modrm(s, OPC_MOVSWL, data_reg, data_reg);
806 break;
807 case 2:
808 tcg_out_ld(s, TCG_TYPE_I32, data_reg, r0, GUEST_BASE);
809 if (bswap) {
810 tcg_out_bswap32(s, data_reg);
812 break;
813 case 3:
814 if (bswap) {
815 int t = data_reg;
816 data_reg = data_reg2;
817 data_reg2 = t;
819 if (r0 != data_reg) {
820 tcg_out_ld(s, TCG_TYPE_I32, data_reg, r0, GUEST_BASE);
821 tcg_out_ld(s, TCG_TYPE_I32, data_reg2, r0, GUEST_BASE + 4);
822 } else {
823 tcg_out_ld(s, TCG_TYPE_I32, data_reg2, r0, GUEST_BASE + 4);
824 tcg_out_ld(s, TCG_TYPE_I32, data_reg, r0, GUEST_BASE);
826 if (bswap) {
827 tcg_out_bswap32(s, data_reg);
828 tcg_out_bswap32(s, data_reg2);
830 break;
831 default:
832 tcg_abort();
835 #if defined(CONFIG_SOFTMMU)
836 /* label2: */
837 *label2_ptr = s->code_ptr - label2_ptr - 1;
838 #endif
842 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
843 int opc)
845 int addr_reg, data_reg, data_reg2, r0, r1, mem_index, s_bits, bswap;
846 #if defined(CONFIG_SOFTMMU)
847 uint8_t *label1_ptr, *label2_ptr;
848 #endif
849 #if TARGET_LONG_BITS == 64
850 #if defined(CONFIG_SOFTMMU)
851 uint8_t *label3_ptr;
852 #endif
853 int addr_reg2;
854 #endif
856 data_reg = *args++;
857 if (opc == 3)
858 data_reg2 = *args++;
859 else
860 data_reg2 = 0;
861 addr_reg = *args++;
862 #if TARGET_LONG_BITS == 64
863 addr_reg2 = *args++;
864 #endif
865 mem_index = *args;
867 s_bits = opc;
869 r0 = TCG_REG_EAX;
870 r1 = TCG_REG_EDX;
872 #if defined(CONFIG_SOFTMMU)
873 tcg_out_mov(s, r1, addr_reg);
874 tcg_out_mov(s, r0, addr_reg);
876 tcg_out_shifti(s, SHIFT_SHR, r1, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
878 tgen_arithi(s, ARITH_AND, r0, TARGET_PAGE_MASK | ((1 << s_bits) - 1), 0);
879 tgen_arithi(s, ARITH_AND, r1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0);
881 tcg_out_opc(s, 0x8d); /* lea offset(r1, %ebp), r1 */
882 tcg_out8(s, 0x80 | (r1 << 3) | 0x04);
883 tcg_out8(s, (5 << 3) | r1);
884 tcg_out32(s, offsetof(CPUState, tlb_table[mem_index][0].addr_write));
886 /* cmp 0(r1), r0 */
887 tcg_out_modrm_offset(s, OPC_CMP_GvEv, r0, r1, 0);
889 tcg_out_mov(s, r0, addr_reg);
891 #if TARGET_LONG_BITS == 32
892 /* je label1 */
893 tcg_out8(s, OPC_JCC_short + JCC_JE);
894 label1_ptr = s->code_ptr;
895 s->code_ptr++;
896 #else
897 /* jne label3 */
898 tcg_out8(s, OPC_JCC_short + JCC_JNE);
899 label3_ptr = s->code_ptr;
900 s->code_ptr++;
902 /* cmp 4(r1), addr_reg2 */
903 tcg_out_modrm_offset(s, OPC_CMP_GvEv, addr_reg2, r1, 4);
905 /* je label1 */
906 tcg_out8(s, OPC_JCC_short + JCC_JE);
907 label1_ptr = s->code_ptr;
908 s->code_ptr++;
910 /* label3: */
911 *label3_ptr = s->code_ptr - label3_ptr - 1;
912 #endif
914 /* XXX: move that code at the end of the TB */
915 #if TARGET_LONG_BITS == 32
916 if (opc == 3) {
917 tcg_out_mov(s, TCG_REG_EDX, data_reg);
918 tcg_out_mov(s, TCG_REG_ECX, data_reg2);
919 tcg_out_pushi(s, mem_index);
920 tcg_out8(s, 0xe8);
921 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] -
922 (tcg_target_long)s->code_ptr - 4);
923 tcg_out_addi(s, TCG_REG_ESP, 4);
924 } else {
925 switch(opc) {
926 case 0:
927 tcg_out_ext8u(s, TCG_REG_EDX, data_reg);
928 break;
929 case 1:
930 tcg_out_ext16u(s, TCG_REG_EDX, data_reg);
931 break;
932 case 2:
933 tcg_out_mov(s, TCG_REG_EDX, data_reg);
934 break;
936 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_ECX, mem_index);
937 tcg_out8(s, 0xe8);
938 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] -
939 (tcg_target_long)s->code_ptr - 4);
941 #else
942 if (opc == 3) {
943 tcg_out_mov(s, TCG_REG_EDX, addr_reg2);
944 tcg_out_pushi(s, mem_index);
945 tcg_out_push(s, data_reg2);
946 tcg_out_push(s, data_reg);
947 tcg_out8(s, 0xe8);
948 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] -
949 (tcg_target_long)s->code_ptr - 4);
950 tcg_out_addi(s, TCG_REG_ESP, 12);
951 } else {
952 tcg_out_mov(s, TCG_REG_EDX, addr_reg2);
953 switch(opc) {
954 case 0:
955 tcg_out_ext8u(s, TCG_REG_ECX, data_reg);
956 break;
957 case 1:
958 tcg_out_ext16u(s, TCG_REG_ECX, data_reg);
959 break;
960 case 2:
961 tcg_out_mov(s, TCG_REG_ECX, data_reg);
962 break;
964 tcg_out_pushi(s, mem_index);
965 tcg_out8(s, 0xe8);
966 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] -
967 (tcg_target_long)s->code_ptr - 4);
968 tcg_out_addi(s, TCG_REG_ESP, 4);
970 #endif
972 /* jmp label2 */
973 tcg_out8(s, OPC_JMP_short);
974 label2_ptr = s->code_ptr;
975 s->code_ptr++;
977 /* label1: */
978 *label1_ptr = s->code_ptr - label1_ptr - 1;
980 /* add x(r1), r0 */
981 tcg_out_modrm_offset(s, OPC_ADD_GvEv, r0, r1,
982 offsetof(CPUTLBEntry, addend) -
983 offsetof(CPUTLBEntry, addr_write));
984 #else
985 r0 = addr_reg;
986 #endif
988 #ifdef TARGET_WORDS_BIGENDIAN
989 bswap = 1;
990 #else
991 bswap = 0;
992 #endif
993 switch(opc) {
994 case 0:
995 tcg_out_modrm_offset(s, OPC_MOVB_EvGv, data_reg, r0, GUEST_BASE);
996 break;
997 case 1:
998 if (bswap) {
999 tcg_out_mov(s, r1, data_reg);
1000 tcg_out_rolw_8(s, r1);
1001 data_reg = r1;
1003 /* movw */
1004 tcg_out8(s, 0x66);
1005 tcg_out_modrm_offset(s, OPC_MOVL_EvGv, data_reg, r0, GUEST_BASE);
1006 break;
1007 case 2:
1008 if (bswap) {
1009 tcg_out_mov(s, r1, data_reg);
1010 tcg_out_bswap32(s, r1);
1011 data_reg = r1;
1013 tcg_out_st(s, TCG_TYPE_I32, data_reg, r0, GUEST_BASE);
1014 break;
1015 case 3:
1016 if (bswap) {
1017 tcg_out_mov(s, r1, data_reg2);
1018 tcg_out_bswap32(s, r1);
1019 tcg_out_st(s, TCG_TYPE_I32, r1, r0, GUEST_BASE);
1020 tcg_out_mov(s, r1, data_reg);
1021 tcg_out_bswap32(s, r1);
1022 tcg_out_st(s, TCG_TYPE_I32, r1, r0, GUEST_BASE + 4);
1023 } else {
1024 tcg_out_st(s, TCG_TYPE_I32, data_reg, r0, GUEST_BASE);
1025 tcg_out_st(s, TCG_TYPE_I32, data_reg2, r0, GUEST_BASE + 4);
1027 break;
1028 default:
1029 tcg_abort();
1032 #if defined(CONFIG_SOFTMMU)
1033 /* label2: */
1034 *label2_ptr = s->code_ptr - label2_ptr - 1;
1035 #endif
1038 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1039 const TCGArg *args, const int *const_args)
1041 int c;
1043 switch(opc) {
1044 case INDEX_op_exit_tb:
1045 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_EAX, args[0]);
1046 tcg_out8(s, OPC_JMP_long); /* jmp tb_ret_addr */
1047 tcg_out32(s, tb_ret_addr - s->code_ptr - 4);
1048 break;
1049 case INDEX_op_goto_tb:
1050 if (s->tb_jmp_offset) {
1051 /* direct jump method */
1052 tcg_out8(s, OPC_JMP_long); /* jmp im */
1053 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1054 tcg_out32(s, 0);
1055 } else {
1056 /* indirect jump method */
1057 tcg_out_modrm_offset(s, 0xff, EXT_JMPN_Ev, -1,
1058 (tcg_target_long)(s->tb_next + args[0]));
1060 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1061 break;
1062 case INDEX_op_call:
1063 if (const_args[0]) {
1064 tcg_out8(s, 0xe8);
1065 tcg_out32(s, args[0] - (tcg_target_long)s->code_ptr - 4);
1066 } else {
1067 tcg_out_modrm(s, 0xff, 2, args[0]);
1069 break;
1070 case INDEX_op_jmp:
1071 if (const_args[0]) {
1072 tcg_out8(s, OPC_JMP_long);
1073 tcg_out32(s, args[0] - (tcg_target_long)s->code_ptr - 4);
1074 } else {
1075 /* jmp *reg */
1076 tcg_out_modrm(s, 0xff, EXT_JMPN_Ev, args[0]);
1078 break;
1079 case INDEX_op_br:
1080 tcg_out_jxx(s, JCC_JMP, args[0], 0);
1081 break;
1082 case INDEX_op_movi_i32:
1083 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]);
1084 break;
1085 case INDEX_op_ld8u_i32:
1086 /* movzbl */
1087 tcg_out_modrm_offset(s, OPC_MOVZBL, args[0], args[1], args[2]);
1088 break;
1089 case INDEX_op_ld8s_i32:
1090 /* movsbl */
1091 tcg_out_modrm_offset(s, OPC_MOVSBL, args[0], args[1], args[2]);
1092 break;
1093 case INDEX_op_ld16u_i32:
1094 /* movzwl */
1095 tcg_out_modrm_offset(s, OPC_MOVZWL, args[0], args[1], args[2]);
1096 break;
1097 case INDEX_op_ld16s_i32:
1098 /* movswl */
1099 tcg_out_modrm_offset(s, OPC_MOVSWL, args[0], args[1], args[2]);
1100 break;
1101 case INDEX_op_ld_i32:
1102 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1103 break;
1104 case INDEX_op_st8_i32:
1105 /* movb */
1106 tcg_out_modrm_offset(s, OPC_MOVB_EvGv, args[0], args[1], args[2]);
1107 break;
1108 case INDEX_op_st16_i32:
1109 /* movw */
1110 tcg_out8(s, 0x66);
1111 tcg_out_modrm_offset(s, OPC_MOVL_EvGv, args[0], args[1], args[2]);
1112 break;
1113 case INDEX_op_st_i32:
1114 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1115 break;
1116 case INDEX_op_sub_i32:
1117 c = ARITH_SUB;
1118 goto gen_arith;
1119 case INDEX_op_and_i32:
1120 c = ARITH_AND;
1121 goto gen_arith;
1122 case INDEX_op_or_i32:
1123 c = ARITH_OR;
1124 goto gen_arith;
1125 case INDEX_op_xor_i32:
1126 c = ARITH_XOR;
1127 goto gen_arith;
1128 case INDEX_op_add_i32:
1129 c = ARITH_ADD;
1130 gen_arith:
1131 if (const_args[2]) {
1132 tgen_arithi(s, c, args[0], args[2], 0);
1133 } else {
1134 tgen_arithr(s, c, args[0], args[2]);
1136 break;
1137 case INDEX_op_mul_i32:
1138 if (const_args[2]) {
1139 int32_t val;
1140 val = args[2];
1141 if (val == (int8_t)val) {
1142 tcg_out_modrm(s, 0x6b, args[0], args[0]);
1143 tcg_out8(s, val);
1144 } else {
1145 tcg_out_modrm(s, 0x69, args[0], args[0]);
1146 tcg_out32(s, val);
1148 } else {
1149 tcg_out_modrm(s, 0xaf | P_EXT, args[0], args[2]);
1151 break;
1152 case INDEX_op_mulu2_i32:
1153 tcg_out_modrm(s, 0xf7, 4, args[3]);
1154 break;
1155 case INDEX_op_div2_i32:
1156 tcg_out_modrm(s, 0xf7, 7, args[4]);
1157 break;
1158 case INDEX_op_divu2_i32:
1159 tcg_out_modrm(s, 0xf7, 6, args[4]);
1160 break;
1161 case INDEX_op_shl_i32:
1162 c = SHIFT_SHL;
1163 gen_shift32:
1164 if (const_args[2]) {
1165 tcg_out_shifti(s, c, args[0], args[2]);
1166 } else {
1167 tcg_out_modrm(s, OPC_SHIFT_cl, c, args[0]);
1169 break;
1170 case INDEX_op_shr_i32:
1171 c = SHIFT_SHR;
1172 goto gen_shift32;
1173 case INDEX_op_sar_i32:
1174 c = SHIFT_SAR;
1175 goto gen_shift32;
1176 case INDEX_op_rotl_i32:
1177 c = SHIFT_ROL;
1178 goto gen_shift32;
1179 case INDEX_op_rotr_i32:
1180 c = SHIFT_ROR;
1181 goto gen_shift32;
1183 case INDEX_op_add2_i32:
1184 if (const_args[4]) {
1185 tgen_arithi(s, ARITH_ADD, args[0], args[4], 1);
1186 } else {
1187 tgen_arithr(s, ARITH_ADD, args[0], args[4]);
1189 if (const_args[5]) {
1190 tgen_arithi(s, ARITH_ADC, args[1], args[5], 1);
1191 } else {
1192 tgen_arithr(s, ARITH_ADC, args[1], args[5]);
1194 break;
1195 case INDEX_op_sub2_i32:
1196 if (const_args[4]) {
1197 tgen_arithi(s, ARITH_SUB, args[0], args[4], 1);
1198 } else {
1199 tgen_arithr(s, ARITH_SUB, args[0], args[4]);
1201 if (const_args[5]) {
1202 tgen_arithi(s, ARITH_SBB, args[1], args[5], 1);
1203 } else {
1204 tgen_arithr(s, ARITH_SBB, args[1], args[5]);
1206 break;
1207 case INDEX_op_brcond_i32:
1208 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
1209 args[3], 0);
1210 break;
1211 case INDEX_op_brcond2_i32:
1212 tcg_out_brcond2(s, args, const_args, 0);
1213 break;
1215 case INDEX_op_bswap16_i32:
1216 tcg_out_rolw_8(s, args[0]);
1217 break;
1218 case INDEX_op_bswap32_i32:
1219 tcg_out_bswap32(s, args[0]);
1220 break;
1222 case INDEX_op_neg_i32:
1223 tcg_out_modrm(s, 0xf7, 3, args[0]);
1224 break;
1226 case INDEX_op_not_i32:
1227 tcg_out_modrm(s, 0xf7, 2, args[0]);
1228 break;
1230 case INDEX_op_ext8s_i32:
1231 tcg_out_ext8s(s, args[0], args[1]);
1232 break;
1233 case INDEX_op_ext16s_i32:
1234 tcg_out_ext16s(s, args[0], args[1]);
1235 break;
1236 case INDEX_op_ext8u_i32:
1237 tcg_out_ext8u(s, args[0], args[1]);
1238 break;
1239 case INDEX_op_ext16u_i32:
1240 tcg_out_ext16u(s, args[0], args[1]);
1241 break;
1243 case INDEX_op_setcond_i32:
1244 tcg_out_setcond(s, args[3], args[0], args[1], args[2], const_args[2]);
1245 break;
1246 case INDEX_op_setcond2_i32:
1247 tcg_out_setcond2(s, args, const_args);
1248 break;
1250 case INDEX_op_qemu_ld8u:
1251 tcg_out_qemu_ld(s, args, 0);
1252 break;
1253 case INDEX_op_qemu_ld8s:
1254 tcg_out_qemu_ld(s, args, 0 | 4);
1255 break;
1256 case INDEX_op_qemu_ld16u:
1257 tcg_out_qemu_ld(s, args, 1);
1258 break;
1259 case INDEX_op_qemu_ld16s:
1260 tcg_out_qemu_ld(s, args, 1 | 4);
1261 break;
1262 case INDEX_op_qemu_ld32:
1263 tcg_out_qemu_ld(s, args, 2);
1264 break;
1265 case INDEX_op_qemu_ld64:
1266 tcg_out_qemu_ld(s, args, 3);
1267 break;
1269 case INDEX_op_qemu_st8:
1270 tcg_out_qemu_st(s, args, 0);
1271 break;
1272 case INDEX_op_qemu_st16:
1273 tcg_out_qemu_st(s, args, 1);
1274 break;
1275 case INDEX_op_qemu_st32:
1276 tcg_out_qemu_st(s, args, 2);
1277 break;
1278 case INDEX_op_qemu_st64:
1279 tcg_out_qemu_st(s, args, 3);
1280 break;
1282 default:
1283 tcg_abort();
1287 static const TCGTargetOpDef x86_op_defs[] = {
1288 { INDEX_op_exit_tb, { } },
1289 { INDEX_op_goto_tb, { } },
1290 { INDEX_op_call, { "ri" } },
1291 { INDEX_op_jmp, { "ri" } },
1292 { INDEX_op_br, { } },
1293 { INDEX_op_mov_i32, { "r", "r" } },
1294 { INDEX_op_movi_i32, { "r" } },
1295 { INDEX_op_ld8u_i32, { "r", "r" } },
1296 { INDEX_op_ld8s_i32, { "r", "r" } },
1297 { INDEX_op_ld16u_i32, { "r", "r" } },
1298 { INDEX_op_ld16s_i32, { "r", "r" } },
1299 { INDEX_op_ld_i32, { "r", "r" } },
1300 { INDEX_op_st8_i32, { "q", "r" } },
1301 { INDEX_op_st16_i32, { "r", "r" } },
1302 { INDEX_op_st_i32, { "r", "r" } },
1304 { INDEX_op_add_i32, { "r", "0", "ri" } },
1305 { INDEX_op_sub_i32, { "r", "0", "ri" } },
1306 { INDEX_op_mul_i32, { "r", "0", "ri" } },
1307 { INDEX_op_mulu2_i32, { "a", "d", "a", "r" } },
1308 { INDEX_op_div2_i32, { "a", "d", "0", "1", "r" } },
1309 { INDEX_op_divu2_i32, { "a", "d", "0", "1", "r" } },
1310 { INDEX_op_and_i32, { "r", "0", "ri" } },
1311 { INDEX_op_or_i32, { "r", "0", "ri" } },
1312 { INDEX_op_xor_i32, { "r", "0", "ri" } },
1314 { INDEX_op_shl_i32, { "r", "0", "ci" } },
1315 { INDEX_op_shr_i32, { "r", "0", "ci" } },
1316 { INDEX_op_sar_i32, { "r", "0", "ci" } },
1317 { INDEX_op_rotl_i32, { "r", "0", "ci" } },
1318 { INDEX_op_rotr_i32, { "r", "0", "ci" } },
1320 { INDEX_op_brcond_i32, { "r", "ri" } },
1322 { INDEX_op_add2_i32, { "r", "r", "0", "1", "ri", "ri" } },
1323 { INDEX_op_sub2_i32, { "r", "r", "0", "1", "ri", "ri" } },
1324 { INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } },
1326 { INDEX_op_bswap16_i32, { "r", "0" } },
1327 { INDEX_op_bswap32_i32, { "r", "0" } },
1329 { INDEX_op_neg_i32, { "r", "0" } },
1331 { INDEX_op_not_i32, { "r", "0" } },
1333 { INDEX_op_ext8s_i32, { "r", "q" } },
1334 { INDEX_op_ext16s_i32, { "r", "r" } },
1335 { INDEX_op_ext8u_i32, { "r", "q" } },
1336 { INDEX_op_ext16u_i32, { "r", "r" } },
1338 { INDEX_op_setcond_i32, { "q", "r", "ri" } },
1339 { INDEX_op_setcond2_i32, { "r", "r", "r", "ri", "ri" } },
1341 #if TARGET_LONG_BITS == 32
1342 { INDEX_op_qemu_ld8u, { "r", "L" } },
1343 { INDEX_op_qemu_ld8s, { "r", "L" } },
1344 { INDEX_op_qemu_ld16u, { "r", "L" } },
1345 { INDEX_op_qemu_ld16s, { "r", "L" } },
1346 { INDEX_op_qemu_ld32, { "r", "L" } },
1347 { INDEX_op_qemu_ld64, { "r", "r", "L" } },
1349 { INDEX_op_qemu_st8, { "cb", "L" } },
1350 { INDEX_op_qemu_st16, { "L", "L" } },
1351 { INDEX_op_qemu_st32, { "L", "L" } },
1352 { INDEX_op_qemu_st64, { "L", "L", "L" } },
1353 #else
1354 { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
1355 { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
1356 { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
1357 { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
1358 { INDEX_op_qemu_ld32, { "r", "L", "L" } },
1359 { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } },
1361 { INDEX_op_qemu_st8, { "cb", "L", "L" } },
1362 { INDEX_op_qemu_st16, { "L", "L", "L" } },
1363 { INDEX_op_qemu_st32, { "L", "L", "L" } },
1364 { INDEX_op_qemu_st64, { "L", "L", "L", "L" } },
1365 #endif
1366 { -1 },
1369 static int tcg_target_callee_save_regs[] = {
1370 /* TCG_REG_EBP, */ /* currently used for the global env, so no
1371 need to save */
1372 TCG_REG_EBX,
1373 TCG_REG_ESI,
1374 TCG_REG_EDI,
1377 /* Generate global QEMU prologue and epilogue code */
1378 void tcg_target_qemu_prologue(TCGContext *s)
1380 int i, frame_size, push_size, stack_addend;
1382 /* TB prologue */
1383 /* save all callee saved registers */
1384 for(i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1385 tcg_out_push(s, tcg_target_callee_save_regs[i]);
1387 /* reserve some stack space */
1388 push_size = 4 + ARRAY_SIZE(tcg_target_callee_save_regs) * 4;
1389 frame_size = push_size + TCG_STATIC_CALL_ARGS_SIZE;
1390 frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) &
1391 ~(TCG_TARGET_STACK_ALIGN - 1);
1392 stack_addend = frame_size - push_size;
1393 tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
1395 tcg_out_modrm(s, 0xff, EXT_JMPN_Ev, TCG_REG_EAX); /* jmp *%eax */
1397 /* TB epilogue */
1398 tb_ret_addr = s->code_ptr;
1399 tcg_out_addi(s, TCG_REG_ESP, stack_addend);
1400 for(i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--) {
1401 tcg_out_pop(s, tcg_target_callee_save_regs[i]);
1403 tcg_out8(s, 0xc3); /* ret */
1406 void tcg_target_init(TCGContext *s)
1408 #if !defined(CONFIG_USER_ONLY)
1409 /* fail safe */
1410 if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry))
1411 tcg_abort();
1412 #endif
1414 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xff);
1416 tcg_regset_clear(tcg_target_call_clobber_regs);
1417 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EAX);
1418 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EDX);
1419 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_ECX);
1421 tcg_regset_clear(s->reserved_regs);
1422 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ESP);
1424 tcg_add_target_add_op_defs(x86_op_defs);