2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
38 static const int tcg_target_reg_alloc_order
[] = {
48 static const int tcg_target_call_iarg_regs
[3] = { TCG_REG_EAX
, TCG_REG_EDX
, TCG_REG_ECX
};
49 static const int tcg_target_call_oarg_regs
[2] = { TCG_REG_EAX
, TCG_REG_EDX
};
51 static uint8_t *tb_ret_addr
;
53 static void patch_reloc(uint8_t *code_ptr
, int type
,
54 tcg_target_long value
, tcg_target_long addend
)
59 *(uint32_t *)code_ptr
= value
;
62 *(uint32_t *)code_ptr
= value
- (long)code_ptr
;
65 value
-= (long)code_ptr
;
66 if (value
!= (int8_t)value
) {
69 *(uint8_t *)code_ptr
= value
;
76 /* maximum number of register used for input function arguments */
77 static inline int tcg_target_get_call_iarg_regs_count(int flags
)
79 flags
&= TCG_CALL_TYPE_MASK
;
81 case TCG_CALL_TYPE_STD
:
83 case TCG_CALL_TYPE_REGPARM_1
:
84 case TCG_CALL_TYPE_REGPARM_2
:
85 case TCG_CALL_TYPE_REGPARM
:
86 return flags
- TCG_CALL_TYPE_REGPARM_1
+ 1;
92 /* parse target specific constraints */
93 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
)
100 ct
->ct
|= TCG_CT_REG
;
101 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EAX
);
104 ct
->ct
|= TCG_CT_REG
;
105 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EBX
);
108 ct
->ct
|= TCG_CT_REG
;
109 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_ECX
);
112 ct
->ct
|= TCG_CT_REG
;
113 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EDX
);
116 ct
->ct
|= TCG_CT_REG
;
117 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_ESI
);
120 ct
->ct
|= TCG_CT_REG
;
121 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EDI
);
124 ct
->ct
|= TCG_CT_REG
;
125 tcg_regset_set32(ct
->u
.regs
, 0, 0xf);
128 ct
->ct
|= TCG_CT_REG
;
129 tcg_regset_set32(ct
->u
.regs
, 0, 0xff);
132 /* qemu_ld/st address constraint */
134 ct
->ct
|= TCG_CT_REG
;
135 tcg_regset_set32(ct
->u
.regs
, 0, 0xff);
136 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_EAX
);
137 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_EDX
);
147 /* test if a constant matches the constraint */
148 static inline int tcg_target_const_match(tcg_target_long val
,
149 const TCGArgConstraint
*arg_ct
)
153 if (ct
& TCG_CT_CONST
)
159 #define P_EXT 0x100 /* 0x0f opcode prefix */
160 #define P_DATA16 0x200 /* 0x66 opcode prefix */
162 #define OPC_ARITH_EvIz (0x81)
163 #define OPC_ARITH_EvIb (0x83)
164 #define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */
165 #define OPC_ADD_GvEv (OPC_ARITH_GvEv | (ARITH_ADD << 3))
166 #define OPC_BSWAP (0xc8 | P_EXT)
167 #define OPC_CALL_Jz (0xe8)
168 #define OPC_CMP_GvEv (OPC_ARITH_GvEv | (ARITH_CMP << 3))
169 #define OPC_DEC_r32 (0x48)
170 #define OPC_IMUL_GvEv (0xaf | P_EXT)
171 #define OPC_IMUL_GvEvIb (0x6b)
172 #define OPC_IMUL_GvEvIz (0x69)
173 #define OPC_INC_r32 (0x40)
174 #define OPC_JCC_long (0x80 | P_EXT) /* ... plus condition code */
175 #define OPC_JCC_short (0x70) /* ... plus condition code */
176 #define OPC_JMP_long (0xe9)
177 #define OPC_JMP_short (0xeb)
178 #define OPC_LEA (0x8d)
179 #define OPC_MOVB_EvGv (0x88) /* stores, more or less */
180 #define OPC_MOVL_EvGv (0x89) /* stores, more or less */
181 #define OPC_MOVL_GvEv (0x8b) /* loads, more or less */
182 #define OPC_MOVL_Iv (0xb8)
183 #define OPC_MOVSBL (0xbe | P_EXT)
184 #define OPC_MOVSWL (0xbf | P_EXT)
185 #define OPC_MOVZBL (0xb6 | P_EXT)
186 #define OPC_MOVZWL (0xb7 | P_EXT)
187 #define OPC_POP_r32 (0x58)
188 #define OPC_PUSH_r32 (0x50)
189 #define OPC_PUSH_Iv (0x68)
190 #define OPC_PUSH_Ib (0x6a)
191 #define OPC_RET (0xc3)
192 #define OPC_SETCC (0x90 | P_EXT) /* ... plus condition code */
193 #define OPC_SHIFT_1 (0xd1)
194 #define OPC_SHIFT_Ib (0xc1)
195 #define OPC_SHIFT_cl (0xd3)
196 #define OPC_TESTL (0x85)
197 #define OPC_XCHG_ax_r32 (0x90)
199 #define OPC_GRP3_Ev (0xf7)
200 #define OPC_GRP5 (0xff)
202 /* Group 1 opcode extensions for 0x80-0x83.
203 These are also used as modifiers for OPC_ARITH. */
213 /* Group 2 opcode extensions for 0xc0, 0xc1, 0xd0-0xd3. */
220 /* Group 3 opcode extensions for 0xf6, 0xf7. To be used with OPC_GRP3. */
228 /* Group 5 opcode extensions for 0xff. To be used with OPC_GRP5. */
229 #define EXT5_CALLN_Ev 2
230 #define EXT5_JMPN_Ev 4
232 /* Condition codes to be added to OPC_JCC_{long,short}. */
251 static const uint8_t tcg_cond_to_jcc
[10] = {
252 [TCG_COND_EQ
] = JCC_JE
,
253 [TCG_COND_NE
] = JCC_JNE
,
254 [TCG_COND_LT
] = JCC_JL
,
255 [TCG_COND_GE
] = JCC_JGE
,
256 [TCG_COND_LE
] = JCC_JLE
,
257 [TCG_COND_GT
] = JCC_JG
,
258 [TCG_COND_LTU
] = JCC_JB
,
259 [TCG_COND_GEU
] = JCC_JAE
,
260 [TCG_COND_LEU
] = JCC_JBE
,
261 [TCG_COND_GTU
] = JCC_JA
,
264 static inline void tcg_out_opc(TCGContext
*s
, int opc
)
266 if (opc
& P_DATA16
) {
275 static inline void tcg_out_modrm(TCGContext
*s
, int opc
, int r
, int rm
)
278 tcg_out8(s
, 0xc0 | (r
<< 3) | rm
);
281 /* Output an opcode with a full "rm + (index<<shift) + offset" address mode.
282 We handle either RM and INDEX missing with a -1 value. */
284 static void tcg_out_modrm_sib_offset(TCGContext
*s
, int opc
, int r
, int rm
,
285 int index
, int shift
, int32_t offset
)
289 if (index
== -1 && rm
== -1) {
290 /* Absolute address. */
292 tcg_out8(s
, (r
<< 3) | 5);
293 tcg_out32(s
, offset
);
299 /* Find the length of the immediate addend. Note that the encoding
300 that would be used for (%ebp) indicates absolute addressing. */
302 mod
= 0, len
= 4, rm
= 5;
303 } else if (offset
== 0 && rm
!= TCG_REG_EBP
) {
305 } else if (offset
== (int8_t)offset
) {
311 /* Use a single byte MODRM format if possible. Note that the encoding
312 that would be used for %esp is the escape to the two byte form. */
313 if (index
== -1 && rm
!= TCG_REG_ESP
) {
314 /* Single byte MODRM format. */
315 tcg_out8(s
, mod
| (r
<< 3) | rm
);
317 /* Two byte MODRM+SIB format. */
319 /* Note that the encoding that would place %esp into the index
320 field indicates no index register. */
324 assert(index
!= TCG_REG_ESP
);
327 tcg_out8(s
, mod
| (r
<< 3) | 4);
328 tcg_out8(s
, (shift
<< 6) | (index
<< 3) | rm
);
333 } else if (len
== 4) {
334 tcg_out32(s
, offset
);
338 /* rm == -1 means no register index */
339 static inline void tcg_out_modrm_offset(TCGContext
*s
, int opc
, int r
, int rm
,
342 tcg_out_modrm_sib_offset(s
, opc
, r
, rm
, -1, 0, offset
);
345 /* Generate dest op= src. Uses the same ARITH_* codes as tgen_arithi. */
346 static inline void tgen_arithr(TCGContext
*s
, int subop
, int dest
, int src
)
348 tcg_out_modrm(s
, OPC_ARITH_GvEv
+ (subop
<< 3), dest
, src
);
351 static inline void tcg_out_mov(TCGContext
*s
, int ret
, int arg
)
354 tcg_out_modrm(s
, OPC_MOVL_GvEv
, ret
, arg
);
358 static inline void tcg_out_movi(TCGContext
*s
, TCGType type
,
359 int ret
, int32_t arg
)
362 tgen_arithr(s
, ARITH_XOR
, ret
, ret
);
364 tcg_out8(s
, OPC_MOVL_Iv
+ ret
);
369 static inline void tcg_out_pushi(TCGContext
*s
, tcg_target_long val
)
371 if (val
== (int8_t)val
) {
372 tcg_out_opc(s
, OPC_PUSH_Ib
);
375 tcg_out_opc(s
, OPC_PUSH_Iv
);
380 static inline void tcg_out_push(TCGContext
*s
, int reg
)
382 tcg_out_opc(s
, OPC_PUSH_r32
+ reg
);
385 static inline void tcg_out_pop(TCGContext
*s
, int reg
)
387 tcg_out_opc(s
, OPC_POP_r32
+ reg
);
390 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, int ret
,
391 int arg1
, tcg_target_long arg2
)
393 tcg_out_modrm_offset(s
, OPC_MOVL_GvEv
, ret
, arg1
, arg2
);
396 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, int arg
,
397 int arg1
, tcg_target_long arg2
)
399 tcg_out_modrm_offset(s
, OPC_MOVL_EvGv
, arg
, arg1
, arg2
);
402 static void tcg_out_shifti(TCGContext
*s
, int subopc
, int reg
, int count
)
404 /* Propagate an opcode prefix, such as P_DATA16. */
405 int ext
= subopc
& ~0x7;
409 tcg_out_modrm(s
, OPC_SHIFT_1
| ext
, subopc
, reg
);
411 tcg_out_modrm(s
, OPC_SHIFT_Ib
| ext
, subopc
, reg
);
416 static inline void tcg_out_bswap32(TCGContext
*s
, int reg
)
418 tcg_out_opc(s
, OPC_BSWAP
+ reg
);
421 static inline void tcg_out_rolw_8(TCGContext
*s
, int reg
)
423 tcg_out_shifti(s
, SHIFT_ROL
| P_DATA16
, reg
, 8);
426 static inline void tcg_out_ext8u(TCGContext
*s
, int dest
, int src
)
430 tcg_out_modrm(s
, OPC_MOVZBL
, dest
, src
);
433 static void tcg_out_ext8s(TCGContext
*s
, int dest
, int src
)
437 tcg_out_modrm(s
, OPC_MOVSBL
, dest
, src
);
440 static inline void tcg_out_ext16u(TCGContext
*s
, int dest
, int src
)
443 tcg_out_modrm(s
, OPC_MOVZWL
, dest
, src
);
446 static inline void tcg_out_ext16s(TCGContext
*s
, int dest
, int src
)
449 tcg_out_modrm(s
, OPC_MOVSWL
, dest
, src
);
452 static inline void tgen_arithi(TCGContext
*s
, int c
, int r0
,
455 /* ??? While INC is 2 bytes shorter than ADDL $1, they also induce
456 partial flags update stalls on Pentium4 and are not recommended
457 by current Intel optimization manuals. */
458 if (!cf
&& (c
== ARITH_ADD
|| c
== ARITH_SUB
) && (val
== 1 || val
== -1)) {
459 int opc
= ((c
== ARITH_ADD
) ^ (val
< 0) ? OPC_INC_r32
: OPC_DEC_r32
);
460 tcg_out_opc(s
, opc
+ r0
);
461 } else if (val
== (int8_t)val
) {
462 tcg_out_modrm(s
, OPC_ARITH_EvIb
, c
, r0
);
464 } else if (c
== ARITH_AND
&& val
== 0xffu
&& r0
< 4) {
465 tcg_out_ext8u(s
, r0
, r0
);
466 } else if (c
== ARITH_AND
&& val
== 0xffffu
) {
467 tcg_out_ext16u(s
, r0
, r0
);
469 tcg_out_modrm(s
, OPC_ARITH_EvIz
, c
, r0
);
474 static void tcg_out_addi(TCGContext
*s
, int reg
, tcg_target_long val
)
477 tgen_arithi(s
, ARITH_ADD
, reg
, val
, 0);
480 /* Use SMALL != 0 to force a short forward branch. */
481 static void tcg_out_jxx(TCGContext
*s
, int opc
, int label_index
, int small
)
484 TCGLabel
*l
= &s
->labels
[label_index
];
487 val
= l
->u
.value
- (tcg_target_long
)s
->code_ptr
;
489 if ((int8_t)val1
== val1
) {
491 tcg_out8(s
, OPC_JMP_short
);
493 tcg_out8(s
, OPC_JCC_short
+ opc
);
501 tcg_out8(s
, OPC_JMP_long
);
502 tcg_out32(s
, val
- 5);
504 tcg_out_opc(s
, OPC_JCC_long
+ opc
);
505 tcg_out32(s
, val
- 6);
510 tcg_out8(s
, OPC_JMP_short
);
512 tcg_out8(s
, OPC_JCC_short
+ opc
);
514 tcg_out_reloc(s
, s
->code_ptr
, R_386_PC8
, label_index
, -1);
518 tcg_out8(s
, OPC_JMP_long
);
520 tcg_out_opc(s
, OPC_JCC_long
+ opc
);
522 tcg_out_reloc(s
, s
->code_ptr
, R_386_PC32
, label_index
, -4);
527 static void tcg_out_cmp(TCGContext
*s
, TCGArg arg1
, TCGArg arg2
,
533 tcg_out_modrm(s
, OPC_TESTL
, arg1
, arg1
);
535 tgen_arithi(s
, ARITH_CMP
, arg1
, arg2
, 0);
538 tgen_arithr(s
, ARITH_CMP
, arg1
, arg2
);
542 static void tcg_out_brcond(TCGContext
*s
, TCGCond cond
,
543 TCGArg arg1
, TCGArg arg2
, int const_arg2
,
544 int label_index
, int small
)
546 tcg_out_cmp(s
, arg1
, arg2
, const_arg2
);
547 tcg_out_jxx(s
, tcg_cond_to_jcc
[cond
], label_index
, small
);
550 /* XXX: we implement it at the target level to avoid having to
551 handle cross basic blocks temporaries */
552 static void tcg_out_brcond2(TCGContext
*s
, const TCGArg
*args
,
553 const int *const_args
, int small
)
556 label_next
= gen_new_label();
559 tcg_out_brcond(s
, TCG_COND_NE
, args
[0], args
[2], const_args
[2],
561 tcg_out_brcond(s
, TCG_COND_EQ
, args
[1], args
[3], const_args
[3],
565 tcg_out_brcond(s
, TCG_COND_NE
, args
[0], args
[2], const_args
[2],
567 tcg_out_brcond(s
, TCG_COND_NE
, args
[1], args
[3], const_args
[3],
571 tcg_out_brcond(s
, TCG_COND_LT
, args
[1], args
[3], const_args
[3],
573 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
574 tcg_out_brcond(s
, TCG_COND_LTU
, args
[0], args
[2], const_args
[2],
578 tcg_out_brcond(s
, TCG_COND_LT
, args
[1], args
[3], const_args
[3],
580 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
581 tcg_out_brcond(s
, TCG_COND_LEU
, args
[0], args
[2], const_args
[2],
585 tcg_out_brcond(s
, TCG_COND_GT
, args
[1], args
[3], const_args
[3],
587 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
588 tcg_out_brcond(s
, TCG_COND_GTU
, args
[0], args
[2], const_args
[2],
592 tcg_out_brcond(s
, TCG_COND_GT
, args
[1], args
[3], const_args
[3],
594 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
595 tcg_out_brcond(s
, TCG_COND_GEU
, args
[0], args
[2], const_args
[2],
599 tcg_out_brcond(s
, TCG_COND_LTU
, args
[1], args
[3], const_args
[3],
601 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
602 tcg_out_brcond(s
, TCG_COND_LTU
, args
[0], args
[2], const_args
[2],
606 tcg_out_brcond(s
, TCG_COND_LTU
, args
[1], args
[3], const_args
[3],
608 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
609 tcg_out_brcond(s
, TCG_COND_LEU
, args
[0], args
[2], const_args
[2],
613 tcg_out_brcond(s
, TCG_COND_GTU
, args
[1], args
[3], const_args
[3],
615 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
616 tcg_out_brcond(s
, TCG_COND_GTU
, args
[0], args
[2], const_args
[2],
620 tcg_out_brcond(s
, TCG_COND_GTU
, args
[1], args
[3], const_args
[3],
622 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
623 tcg_out_brcond(s
, TCG_COND_GEU
, args
[0], args
[2], const_args
[2],
629 tcg_out_label(s
, label_next
, (tcg_target_long
)s
->code_ptr
);
632 static void tcg_out_setcond(TCGContext
*s
, TCGCond cond
, TCGArg dest
,
633 TCGArg arg1
, TCGArg arg2
, int const_arg2
)
635 tcg_out_cmp(s
, arg1
, arg2
, const_arg2
);
636 tcg_out_modrm(s
, OPC_SETCC
| tcg_cond_to_jcc
[cond
], 0, dest
);
637 tcg_out_ext8u(s
, dest
, dest
);
640 static void tcg_out_setcond2(TCGContext
*s
, const TCGArg
*args
,
641 const int *const_args
)
644 int label_true
, label_over
;
646 memcpy(new_args
, args
+1, 5*sizeof(TCGArg
));
648 if (args
[0] == args
[1] || args
[0] == args
[2]
649 || (!const_args
[3] && args
[0] == args
[3])
650 || (!const_args
[4] && args
[0] == args
[4])) {
651 /* When the destination overlaps with one of the argument
652 registers, don't do anything tricky. */
653 label_true
= gen_new_label();
654 label_over
= gen_new_label();
656 new_args
[5] = label_true
;
657 tcg_out_brcond2(s
, new_args
, const_args
+1, 1);
659 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], 0);
660 tcg_out_jxx(s
, JCC_JMP
, label_over
, 1);
661 tcg_out_label(s
, label_true
, (tcg_target_long
)s
->code_ptr
);
663 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], 1);
664 tcg_out_label(s
, label_over
, (tcg_target_long
)s
->code_ptr
);
666 /* When the destination does not overlap one of the arguments,
667 clear the destination first, jump if cond false, and emit an
668 increment in the true case. This results in smaller code. */
670 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], 0);
672 label_over
= gen_new_label();
673 new_args
[4] = tcg_invert_cond(new_args
[4]);
674 new_args
[5] = label_over
;
675 tcg_out_brcond2(s
, new_args
, const_args
+1, 1);
677 tgen_arithi(s
, ARITH_ADD
, args
[0], 1, 0);
678 tcg_out_label(s
, label_over
, (tcg_target_long
)s
->code_ptr
);
682 static void tcg_out_calli(TCGContext
*s
, tcg_target_long dest
)
684 tcg_out_opc(s
, OPC_CALL_Jz
);
685 tcg_out32(s
, dest
- (tcg_target_long
)s
->code_ptr
- 4);
688 #if defined(CONFIG_SOFTMMU)
690 #include "../../softmmu_defs.h"
692 static void *qemu_ld_helpers
[4] = {
699 static void *qemu_st_helpers
[4] = {
707 static void tcg_out_qemu_ld_direct(TCGContext
*s
, int datalo
, int datahi
,
708 int base
, tcg_target_long ofs
, int sizeop
)
710 #ifdef TARGET_WORDS_BIGENDIAN
718 tcg_out_modrm_offset(s
, OPC_MOVZBL
, datalo
, base
, ofs
);
722 tcg_out_modrm_offset(s
, OPC_MOVSBL
, datalo
, base
, ofs
);
726 tcg_out_modrm_offset(s
, OPC_MOVZWL
, datalo
, base
, ofs
);
728 tcg_out_rolw_8(s
, datalo
);
733 tcg_out_modrm_offset(s
, OPC_MOVSWL
, datalo
, base
, ofs
);
735 tcg_out_rolw_8(s
, datalo
);
736 tcg_out_modrm(s
, OPC_MOVSWL
, datalo
, datalo
);
740 tcg_out_ld(s
, TCG_TYPE_I32
, datalo
, base
, ofs
);
742 tcg_out_bswap32(s
, datalo
);
751 if (base
!= datalo
) {
752 tcg_out_ld(s
, TCG_TYPE_I32
, datalo
, base
, ofs
);
753 tcg_out_ld(s
, TCG_TYPE_I32
, datahi
, base
, ofs
+ 4);
755 tcg_out_ld(s
, TCG_TYPE_I32
, datahi
, base
, ofs
+ 4);
756 tcg_out_ld(s
, TCG_TYPE_I32
, datalo
, base
, ofs
);
759 tcg_out_bswap32(s
, datalo
);
760 tcg_out_bswap32(s
, datahi
);
768 /* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
769 EAX. It will be useful once fixed registers globals are less
771 static void tcg_out_qemu_ld(TCGContext
*s
, const TCGArg
*args
,
774 int addr_reg
, data_reg
, data_reg2
, r0
, r1
, mem_index
, s_bits
;
775 #if defined(CONFIG_SOFTMMU)
776 uint8_t *label1_ptr
, *label2_ptr
;
778 #if TARGET_LONG_BITS == 64
779 #if defined(CONFIG_SOFTMMU)
791 #if TARGET_LONG_BITS == 64
800 #if defined(CONFIG_SOFTMMU)
801 tcg_out_mov(s
, r1
, addr_reg
);
802 tcg_out_mov(s
, r0
, addr_reg
);
804 tcg_out_shifti(s
, SHIFT_SHR
, r1
, TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
);
806 tgen_arithi(s
, ARITH_AND
, r0
, TARGET_PAGE_MASK
| ((1 << s_bits
) - 1), 0);
807 tgen_arithi(s
, ARITH_AND
, r1
, (CPU_TLB_SIZE
- 1) << CPU_TLB_ENTRY_BITS
, 0);
809 tcg_out_modrm_sib_offset(s
, OPC_LEA
, r1
, TCG_AREG0
, r1
, 0,
811 tlb_table
[mem_index
][0].addr_read
));
814 tcg_out_modrm_offset(s
, OPC_CMP_GvEv
, r0
, r1
, 0);
816 tcg_out_mov(s
, r0
, addr_reg
);
818 #if TARGET_LONG_BITS == 32
820 tcg_out8(s
, OPC_JCC_short
+ JCC_JE
);
821 label1_ptr
= s
->code_ptr
;
825 tcg_out8(s
, OPC_JCC_short
+ JCC_JNE
);
826 label3_ptr
= s
->code_ptr
;
829 /* cmp 4(r1), addr_reg2 */
830 tcg_out_modrm_offset(s
, OPC_CMP_GvEv
, addr_reg2
, r1
, 4);
833 tcg_out8(s
, OPC_JCC_short
+ JCC_JE
);
834 label1_ptr
= s
->code_ptr
;
838 *label3_ptr
= s
->code_ptr
- label3_ptr
- 1;
841 /* XXX: move that code at the end of the TB */
842 #if TARGET_LONG_BITS == 32
843 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_EDX
, mem_index
);
845 tcg_out_mov(s
, TCG_REG_EDX
, addr_reg2
);
846 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_ECX
, mem_index
);
848 tcg_out_calli(s
, (tcg_target_long
)qemu_ld_helpers
[s_bits
]);
852 tcg_out_ext8s(s
, data_reg
, TCG_REG_EAX
);
855 tcg_out_ext16s(s
, data_reg
, TCG_REG_EAX
);
858 tcg_out_ext8u(s
, data_reg
, TCG_REG_EAX
);
861 tcg_out_ext16u(s
, data_reg
, TCG_REG_EAX
);
865 tcg_out_mov(s
, data_reg
, TCG_REG_EAX
);
868 if (data_reg
== TCG_REG_EDX
) {
869 /* xchg %edx, %eax */
870 tcg_out_opc(s
, OPC_XCHG_ax_r32
+ TCG_REG_EDX
);
871 tcg_out_mov(s
, data_reg2
, TCG_REG_EAX
);
873 tcg_out_mov(s
, data_reg
, TCG_REG_EAX
);
874 tcg_out_mov(s
, data_reg2
, TCG_REG_EDX
);
880 tcg_out8(s
, OPC_JMP_short
);
881 label2_ptr
= s
->code_ptr
;
885 *label1_ptr
= s
->code_ptr
- label1_ptr
- 1;
888 tcg_out_modrm_offset(s
, OPC_ADD_GvEv
, r0
, r1
,
889 offsetof(CPUTLBEntry
, addend
) -
890 offsetof(CPUTLBEntry
, addr_read
));
892 tcg_out_qemu_ld_direct(s
, data_reg
, data_reg2
, r0
, 0, opc
);
895 *label2_ptr
= s
->code_ptr
- label2_ptr
- 1;
897 tcg_out_qemu_ld_direct(s
, data_reg
, data_reg2
, addr_reg
, GUEST_BASE
, opc
);
901 static void tcg_out_qemu_st_direct(TCGContext
*s
, int datalo
, int datahi
,
902 int base
, tcg_target_long ofs
, int sizeop
)
904 #ifdef TARGET_WORDS_BIGENDIAN
909 /* ??? Ideally we wouldn't need a scratch register. For user-only,
910 we could perform the bswap twice to restore the original value
911 instead of moving to the scratch. But as it is, the L constraint
912 means that EDX is definitely free here. */
913 int scratch
= TCG_REG_EDX
;
917 tcg_out_modrm_offset(s
, OPC_MOVB_EvGv
, datalo
, base
, ofs
);
921 tcg_out_mov(s
, scratch
, datalo
);
922 tcg_out_rolw_8(s
, scratch
);
926 tcg_out_modrm_offset(s
, OPC_MOVL_EvGv
| P_DATA16
,
931 tcg_out_mov(s
, scratch
, datalo
);
932 tcg_out_bswap32(s
, scratch
);
935 tcg_out_st(s
, TCG_TYPE_I32
, datalo
, base
, ofs
);
939 tcg_out_mov(s
, scratch
, datahi
);
940 tcg_out_bswap32(s
, scratch
);
941 tcg_out_st(s
, TCG_TYPE_I32
, scratch
, base
, ofs
);
942 tcg_out_mov(s
, scratch
, datalo
);
943 tcg_out_bswap32(s
, scratch
);
944 tcg_out_st(s
, TCG_TYPE_I32
, scratch
, base
, ofs
+ 4);
946 tcg_out_st(s
, TCG_TYPE_I32
, datalo
, base
, ofs
);
947 tcg_out_st(s
, TCG_TYPE_I32
, datahi
, base
, ofs
+ 4);
955 static void tcg_out_qemu_st(TCGContext
*s
, const TCGArg
*args
,
958 int addr_reg
, data_reg
, data_reg2
, r0
, r1
, mem_index
, s_bits
;
959 #if defined(CONFIG_SOFTMMU)
961 uint8_t *label1_ptr
, *label2_ptr
;
963 #if TARGET_LONG_BITS == 64
964 #if defined(CONFIG_SOFTMMU)
976 #if TARGET_LONG_BITS == 64
986 #if defined(CONFIG_SOFTMMU)
987 tcg_out_mov(s
, r1
, addr_reg
);
988 tcg_out_mov(s
, r0
, addr_reg
);
990 tcg_out_shifti(s
, SHIFT_SHR
, r1
, TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
);
992 tgen_arithi(s
, ARITH_AND
, r0
, TARGET_PAGE_MASK
| ((1 << s_bits
) - 1), 0);
993 tgen_arithi(s
, ARITH_AND
, r1
, (CPU_TLB_SIZE
- 1) << CPU_TLB_ENTRY_BITS
, 0);
995 tcg_out_modrm_sib_offset(s
, OPC_LEA
, r1
, TCG_AREG0
, r1
, 0,
997 tlb_table
[mem_index
][0].addr_write
));
1000 tcg_out_modrm_offset(s
, OPC_CMP_GvEv
, r0
, r1
, 0);
1002 tcg_out_mov(s
, r0
, addr_reg
);
1004 #if TARGET_LONG_BITS == 32
1006 tcg_out8(s
, OPC_JCC_short
+ JCC_JE
);
1007 label1_ptr
= s
->code_ptr
;
1011 tcg_out8(s
, OPC_JCC_short
+ JCC_JNE
);
1012 label3_ptr
= s
->code_ptr
;
1015 /* cmp 4(r1), addr_reg2 */
1016 tcg_out_modrm_offset(s
, OPC_CMP_GvEv
, addr_reg2
, r1
, 4);
1019 tcg_out8(s
, OPC_JCC_short
+ JCC_JE
);
1020 label1_ptr
= s
->code_ptr
;
1024 *label3_ptr
= s
->code_ptr
- label3_ptr
- 1;
1027 /* XXX: move that code at the end of the TB */
1028 #if TARGET_LONG_BITS == 32
1030 tcg_out_mov(s
, TCG_REG_EDX
, data_reg
);
1031 tcg_out_mov(s
, TCG_REG_ECX
, data_reg2
);
1032 tcg_out_pushi(s
, mem_index
);
1037 tcg_out_ext8u(s
, TCG_REG_EDX
, data_reg
);
1040 tcg_out_ext16u(s
, TCG_REG_EDX
, data_reg
);
1043 tcg_out_mov(s
, TCG_REG_EDX
, data_reg
);
1046 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_ECX
, mem_index
);
1051 tcg_out_mov(s
, TCG_REG_EDX
, addr_reg2
);
1052 tcg_out_pushi(s
, mem_index
);
1053 tcg_out_push(s
, data_reg2
);
1054 tcg_out_push(s
, data_reg
);
1057 tcg_out_mov(s
, TCG_REG_EDX
, addr_reg2
);
1060 tcg_out_ext8u(s
, TCG_REG_ECX
, data_reg
);
1063 tcg_out_ext16u(s
, TCG_REG_ECX
, data_reg
);
1066 tcg_out_mov(s
, TCG_REG_ECX
, data_reg
);
1069 tcg_out_pushi(s
, mem_index
);
1074 tcg_out_calli(s
, (tcg_target_long
)qemu_st_helpers
[s_bits
]);
1076 if (stack_adjust
== 4) {
1077 /* Pop and discard. This is 2 bytes smaller than the add. */
1078 tcg_out_pop(s
, TCG_REG_ECX
);
1079 } else if (stack_adjust
!= 0) {
1080 tcg_out_addi(s
, TCG_REG_ESP
, stack_adjust
);
1084 tcg_out8(s
, OPC_JMP_short
);
1085 label2_ptr
= s
->code_ptr
;
1089 *label1_ptr
= s
->code_ptr
- label1_ptr
- 1;
1092 tcg_out_modrm_offset(s
, OPC_ADD_GvEv
, r0
, r1
,
1093 offsetof(CPUTLBEntry
, addend
) -
1094 offsetof(CPUTLBEntry
, addr_write
));
1096 tcg_out_qemu_st_direct(s
, data_reg
, data_reg2
, r0
, 0, opc
);
1099 *label2_ptr
= s
->code_ptr
- label2_ptr
- 1;
1101 tcg_out_qemu_st_direct(s
, data_reg
, data_reg2
, addr_reg
, GUEST_BASE
, opc
);
1105 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
1106 const TCGArg
*args
, const int *const_args
)
1111 case INDEX_op_exit_tb
:
1112 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_EAX
, args
[0]);
1113 tcg_out8(s
, OPC_JMP_long
); /* jmp tb_ret_addr */
1114 tcg_out32(s
, tb_ret_addr
- s
->code_ptr
- 4);
1116 case INDEX_op_goto_tb
:
1117 if (s
->tb_jmp_offset
) {
1118 /* direct jump method */
1119 tcg_out8(s
, OPC_JMP_long
); /* jmp im */
1120 s
->tb_jmp_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1123 /* indirect jump method */
1124 tcg_out_modrm_offset(s
, OPC_GRP5
, EXT5_JMPN_Ev
, -1,
1125 (tcg_target_long
)(s
->tb_next
+ args
[0]));
1127 s
->tb_next_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1130 if (const_args
[0]) {
1131 tcg_out_calli(s
, args
[0]);
1134 tcg_out_modrm(s
, OPC_GRP5
, EXT5_CALLN_Ev
, args
[0]);
1138 if (const_args
[0]) {
1139 tcg_out8(s
, OPC_JMP_long
);
1140 tcg_out32(s
, args
[0] - (tcg_target_long
)s
->code_ptr
- 4);
1143 tcg_out_modrm(s
, OPC_GRP5
, EXT5_JMPN_Ev
, args
[0]);
1147 tcg_out_jxx(s
, JCC_JMP
, args
[0], 0);
1149 case INDEX_op_movi_i32
:
1150 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1152 case INDEX_op_ld8u_i32
:
1154 tcg_out_modrm_offset(s
, OPC_MOVZBL
, args
[0], args
[1], args
[2]);
1156 case INDEX_op_ld8s_i32
:
1158 tcg_out_modrm_offset(s
, OPC_MOVSBL
, args
[0], args
[1], args
[2]);
1160 case INDEX_op_ld16u_i32
:
1162 tcg_out_modrm_offset(s
, OPC_MOVZWL
, args
[0], args
[1], args
[2]);
1164 case INDEX_op_ld16s_i32
:
1166 tcg_out_modrm_offset(s
, OPC_MOVSWL
, args
[0], args
[1], args
[2]);
1168 case INDEX_op_ld_i32
:
1169 tcg_out_ld(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1171 case INDEX_op_st8_i32
:
1173 tcg_out_modrm_offset(s
, OPC_MOVB_EvGv
, args
[0], args
[1], args
[2]);
1175 case INDEX_op_st16_i32
:
1177 tcg_out_modrm_offset(s
, OPC_MOVL_EvGv
| P_DATA16
,
1178 args
[0], args
[1], args
[2]);
1180 case INDEX_op_st_i32
:
1181 tcg_out_st(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1183 case INDEX_op_add_i32
:
1184 /* For 3-operand addition, use LEA. */
1185 if (args
[0] != args
[1]) {
1186 TCGArg a0
= args
[0], a1
= args
[1], a2
= args
[2], c3
= 0;
1188 if (const_args
[2]) {
1190 } else if (a0
== a2
) {
1191 /* Watch out for dest = src + dest, since we've removed
1192 the matching constraint on the add. */
1193 tgen_arithr(s
, ARITH_ADD
, a0
, a1
);
1197 tcg_out_modrm_sib_offset(s
, OPC_LEA
, a0
, a1
, a2
, 0, c3
);
1202 case INDEX_op_sub_i32
:
1205 case INDEX_op_and_i32
:
1208 case INDEX_op_or_i32
:
1211 case INDEX_op_xor_i32
:
1215 if (const_args
[2]) {
1216 tgen_arithi(s
, c
, args
[0], args
[2], 0);
1218 tgen_arithr(s
, c
, args
[0], args
[2]);
1221 case INDEX_op_mul_i32
:
1222 if (const_args
[2]) {
1225 if (val
== (int8_t)val
) {
1226 tcg_out_modrm(s
, OPC_IMUL_GvEvIb
, args
[0], args
[0]);
1229 tcg_out_modrm(s
, OPC_IMUL_GvEvIz
, args
[0], args
[0]);
1233 tcg_out_modrm(s
, OPC_IMUL_GvEv
, args
[0], args
[2]);
1236 case INDEX_op_mulu2_i32
:
1237 tcg_out_modrm(s
, OPC_GRP3_Ev
, EXT3_MUL
, args
[3]);
1239 case INDEX_op_div2_i32
:
1240 tcg_out_modrm(s
, OPC_GRP3_Ev
, EXT3_IDIV
, args
[4]);
1242 case INDEX_op_divu2_i32
:
1243 tcg_out_modrm(s
, OPC_GRP3_Ev
, EXT3_DIV
, args
[4]);
1245 case INDEX_op_shl_i32
:
1248 if (const_args
[2]) {
1249 tcg_out_shifti(s
, c
, args
[0], args
[2]);
1251 tcg_out_modrm(s
, OPC_SHIFT_cl
, c
, args
[0]);
1254 case INDEX_op_shr_i32
:
1257 case INDEX_op_sar_i32
:
1260 case INDEX_op_rotl_i32
:
1263 case INDEX_op_rotr_i32
:
1267 case INDEX_op_add2_i32
:
1268 if (const_args
[4]) {
1269 tgen_arithi(s
, ARITH_ADD
, args
[0], args
[4], 1);
1271 tgen_arithr(s
, ARITH_ADD
, args
[0], args
[4]);
1273 if (const_args
[5]) {
1274 tgen_arithi(s
, ARITH_ADC
, args
[1], args
[5], 1);
1276 tgen_arithr(s
, ARITH_ADC
, args
[1], args
[5]);
1279 case INDEX_op_sub2_i32
:
1280 if (const_args
[4]) {
1281 tgen_arithi(s
, ARITH_SUB
, args
[0], args
[4], 1);
1283 tgen_arithr(s
, ARITH_SUB
, args
[0], args
[4]);
1285 if (const_args
[5]) {
1286 tgen_arithi(s
, ARITH_SBB
, args
[1], args
[5], 1);
1288 tgen_arithr(s
, ARITH_SBB
, args
[1], args
[5]);
1291 case INDEX_op_brcond_i32
:
1292 tcg_out_brcond(s
, args
[2], args
[0], args
[1], const_args
[1],
1295 case INDEX_op_brcond2_i32
:
1296 tcg_out_brcond2(s
, args
, const_args
, 0);
1299 case INDEX_op_bswap16_i32
:
1300 tcg_out_rolw_8(s
, args
[0]);
1302 case INDEX_op_bswap32_i32
:
1303 tcg_out_bswap32(s
, args
[0]);
1306 case INDEX_op_neg_i32
:
1307 tcg_out_modrm(s
, OPC_GRP3_Ev
, EXT3_NEG
, args
[0]);
1310 case INDEX_op_not_i32
:
1311 tcg_out_modrm(s
, OPC_GRP3_Ev
, EXT3_NOT
, args
[0]);
1314 case INDEX_op_ext8s_i32
:
1315 tcg_out_ext8s(s
, args
[0], args
[1]);
1317 case INDEX_op_ext16s_i32
:
1318 tcg_out_ext16s(s
, args
[0], args
[1]);
1320 case INDEX_op_ext8u_i32
:
1321 tcg_out_ext8u(s
, args
[0], args
[1]);
1323 case INDEX_op_ext16u_i32
:
1324 tcg_out_ext16u(s
, args
[0], args
[1]);
1327 case INDEX_op_setcond_i32
:
1328 tcg_out_setcond(s
, args
[3], args
[0], args
[1], args
[2], const_args
[2]);
1330 case INDEX_op_setcond2_i32
:
1331 tcg_out_setcond2(s
, args
, const_args
);
1334 case INDEX_op_qemu_ld8u
:
1335 tcg_out_qemu_ld(s
, args
, 0);
1337 case INDEX_op_qemu_ld8s
:
1338 tcg_out_qemu_ld(s
, args
, 0 | 4);
1340 case INDEX_op_qemu_ld16u
:
1341 tcg_out_qemu_ld(s
, args
, 1);
1343 case INDEX_op_qemu_ld16s
:
1344 tcg_out_qemu_ld(s
, args
, 1 | 4);
1346 case INDEX_op_qemu_ld32
:
1347 tcg_out_qemu_ld(s
, args
, 2);
1349 case INDEX_op_qemu_ld64
:
1350 tcg_out_qemu_ld(s
, args
, 3);
1353 case INDEX_op_qemu_st8
:
1354 tcg_out_qemu_st(s
, args
, 0);
1356 case INDEX_op_qemu_st16
:
1357 tcg_out_qemu_st(s
, args
, 1);
1359 case INDEX_op_qemu_st32
:
1360 tcg_out_qemu_st(s
, args
, 2);
1362 case INDEX_op_qemu_st64
:
1363 tcg_out_qemu_st(s
, args
, 3);
1371 static const TCGTargetOpDef x86_op_defs
[] = {
1372 { INDEX_op_exit_tb
, { } },
1373 { INDEX_op_goto_tb
, { } },
1374 { INDEX_op_call
, { "ri" } },
1375 { INDEX_op_jmp
, { "ri" } },
1376 { INDEX_op_br
, { } },
1377 { INDEX_op_mov_i32
, { "r", "r" } },
1378 { INDEX_op_movi_i32
, { "r" } },
1379 { INDEX_op_ld8u_i32
, { "r", "r" } },
1380 { INDEX_op_ld8s_i32
, { "r", "r" } },
1381 { INDEX_op_ld16u_i32
, { "r", "r" } },
1382 { INDEX_op_ld16s_i32
, { "r", "r" } },
1383 { INDEX_op_ld_i32
, { "r", "r" } },
1384 { INDEX_op_st8_i32
, { "q", "r" } },
1385 { INDEX_op_st16_i32
, { "r", "r" } },
1386 { INDEX_op_st_i32
, { "r", "r" } },
1388 { INDEX_op_add_i32
, { "r", "r", "ri" } },
1389 { INDEX_op_sub_i32
, { "r", "0", "ri" } },
1390 { INDEX_op_mul_i32
, { "r", "0", "ri" } },
1391 { INDEX_op_mulu2_i32
, { "a", "d", "a", "r" } },
1392 { INDEX_op_div2_i32
, { "a", "d", "0", "1", "r" } },
1393 { INDEX_op_divu2_i32
, { "a", "d", "0", "1", "r" } },
1394 { INDEX_op_and_i32
, { "r", "0", "ri" } },
1395 { INDEX_op_or_i32
, { "r", "0", "ri" } },
1396 { INDEX_op_xor_i32
, { "r", "0", "ri" } },
1398 { INDEX_op_shl_i32
, { "r", "0", "ci" } },
1399 { INDEX_op_shr_i32
, { "r", "0", "ci" } },
1400 { INDEX_op_sar_i32
, { "r", "0", "ci" } },
1401 { INDEX_op_rotl_i32
, { "r", "0", "ci" } },
1402 { INDEX_op_rotr_i32
, { "r", "0", "ci" } },
1404 { INDEX_op_brcond_i32
, { "r", "ri" } },
1406 { INDEX_op_add2_i32
, { "r", "r", "0", "1", "ri", "ri" } },
1407 { INDEX_op_sub2_i32
, { "r", "r", "0", "1", "ri", "ri" } },
1408 { INDEX_op_brcond2_i32
, { "r", "r", "ri", "ri" } },
1410 { INDEX_op_bswap16_i32
, { "r", "0" } },
1411 { INDEX_op_bswap32_i32
, { "r", "0" } },
1413 { INDEX_op_neg_i32
, { "r", "0" } },
1415 { INDEX_op_not_i32
, { "r", "0" } },
1417 { INDEX_op_ext8s_i32
, { "r", "q" } },
1418 { INDEX_op_ext16s_i32
, { "r", "r" } },
1419 { INDEX_op_ext8u_i32
, { "r", "q" } },
1420 { INDEX_op_ext16u_i32
, { "r", "r" } },
1422 { INDEX_op_setcond_i32
, { "q", "r", "ri" } },
1423 { INDEX_op_setcond2_i32
, { "r", "r", "r", "ri", "ri" } },
1425 #if TARGET_LONG_BITS == 32
1426 { INDEX_op_qemu_ld8u
, { "r", "L" } },
1427 { INDEX_op_qemu_ld8s
, { "r", "L" } },
1428 { INDEX_op_qemu_ld16u
, { "r", "L" } },
1429 { INDEX_op_qemu_ld16s
, { "r", "L" } },
1430 { INDEX_op_qemu_ld32
, { "r", "L" } },
1431 { INDEX_op_qemu_ld64
, { "r", "r", "L" } },
1433 { INDEX_op_qemu_st8
, { "cb", "L" } },
1434 { INDEX_op_qemu_st16
, { "L", "L" } },
1435 { INDEX_op_qemu_st32
, { "L", "L" } },
1436 { INDEX_op_qemu_st64
, { "L", "L", "L" } },
1438 { INDEX_op_qemu_ld8u
, { "r", "L", "L" } },
1439 { INDEX_op_qemu_ld8s
, { "r", "L", "L" } },
1440 { INDEX_op_qemu_ld16u
, { "r", "L", "L" } },
1441 { INDEX_op_qemu_ld16s
, { "r", "L", "L" } },
1442 { INDEX_op_qemu_ld32
, { "r", "L", "L" } },
1443 { INDEX_op_qemu_ld64
, { "r", "r", "L", "L" } },
1445 { INDEX_op_qemu_st8
, { "cb", "L", "L" } },
1446 { INDEX_op_qemu_st16
, { "L", "L", "L" } },
1447 { INDEX_op_qemu_st32
, { "L", "L", "L" } },
1448 { INDEX_op_qemu_st64
, { "L", "L", "L", "L" } },
1453 static int tcg_target_callee_save_regs
[] = {
1454 /* TCG_REG_EBP, */ /* currently used for the global env, so no
1461 /* Generate global QEMU prologue and epilogue code */
1462 void tcg_target_qemu_prologue(TCGContext
*s
)
1464 int i
, frame_size
, push_size
, stack_addend
;
1467 /* save all callee saved registers */
1468 for(i
= 0; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); i
++) {
1469 tcg_out_push(s
, tcg_target_callee_save_regs
[i
]);
1471 /* reserve some stack space */
1472 push_size
= 4 + ARRAY_SIZE(tcg_target_callee_save_regs
) * 4;
1473 frame_size
= push_size
+ TCG_STATIC_CALL_ARGS_SIZE
;
1474 frame_size
= (frame_size
+ TCG_TARGET_STACK_ALIGN
- 1) &
1475 ~(TCG_TARGET_STACK_ALIGN
- 1);
1476 stack_addend
= frame_size
- push_size
;
1477 tcg_out_addi(s
, TCG_REG_ESP
, -stack_addend
);
1479 tcg_out_modrm(s
, OPC_GRP5
, EXT5_JMPN_Ev
, TCG_REG_EAX
); /* jmp *%eax */
1482 tb_ret_addr
= s
->code_ptr
;
1483 tcg_out_addi(s
, TCG_REG_ESP
, stack_addend
);
1484 for(i
= ARRAY_SIZE(tcg_target_callee_save_regs
) - 1; i
>= 0; i
--) {
1485 tcg_out_pop(s
, tcg_target_callee_save_regs
[i
]);
1487 tcg_out_opc(s
, OPC_RET
);
1490 void tcg_target_init(TCGContext
*s
)
1492 #if !defined(CONFIG_USER_ONLY)
1494 if ((1 << CPU_TLB_ENTRY_BITS
) != sizeof(CPUTLBEntry
))
1498 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I32
], 0, 0xff);
1500 tcg_regset_clear(tcg_target_call_clobber_regs
);
1501 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_EAX
);
1502 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_EDX
);
1503 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_ECX
);
1505 tcg_regset_clear(s
->reserved_regs
);
1506 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_ESP
);
1508 tcg_add_target_add_op_defs(x86_op_defs
);