2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "tcg-be-ldst.h"
28 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
29 #if TCG_TARGET_REG_BITS == 64
30 "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi",
31 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
33 "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi",
38 static const int tcg_target_reg_alloc_order
[] = {
39 #if TCG_TARGET_REG_BITS == 64
66 static const int tcg_target_call_iarg_regs
[] = {
67 #if TCG_TARGET_REG_BITS == 64
80 /* 32 bit mode uses stack based calling convention (GCC default). */
84 static const int tcg_target_call_oarg_regs
[] = {
86 #if TCG_TARGET_REG_BITS == 32
91 /* Constants we accept. */
92 #define TCG_CT_CONST_S32 0x100
93 #define TCG_CT_CONST_U32 0x200
94 #define TCG_CT_CONST_I32 0x400
96 /* Registers used with L constraint, which are the first argument
97 registers on x86_64, and two random call clobbered registers on
99 #if TCG_TARGET_REG_BITS == 64
100 # define TCG_REG_L0 tcg_target_call_iarg_regs[0]
101 # define TCG_REG_L1 tcg_target_call_iarg_regs[1]
103 # define TCG_REG_L0 TCG_REG_EAX
104 # define TCG_REG_L1 TCG_REG_EDX
107 /* The host compiler should supply <cpuid.h> to enable runtime features
108 detection, as we're not going to go so far as our own inline assembly.
109 If not available, default values will be assumed. */
110 #if defined(CONFIG_CPUID_H)
114 /* For 32-bit, we are going to attempt to determine at runtime whether cmov
116 #if TCG_TARGET_REG_BITS == 64
118 #elif defined(CONFIG_CPUID_H) && defined(bit_CMOV)
119 static bool have_cmov
;
124 /* If bit_MOVBE is defined in cpuid.h (added in GCC version 4.6), we are
125 going to attempt to determine at runtime whether movbe is available. */
126 #if defined(CONFIG_CPUID_H) && defined(bit_MOVBE)
127 static bool have_movbe
;
129 # define have_movbe 0
132 /* We need this symbol in tcg-target.h, and we can't properly conditionalize
133 it there. Therefore we always define the variable. */
136 #if defined(CONFIG_CPUID_H) && defined(bit_BMI2)
137 static bool have_bmi2
;
142 static tcg_insn_unit
*tb_ret_addr
;
144 static void patch_reloc(tcg_insn_unit
*code_ptr
, int type
,
145 intptr_t value
, intptr_t addend
)
150 value
-= (uintptr_t)code_ptr
;
151 if (value
!= (int32_t)value
) {
154 tcg_patch32(code_ptr
, value
);
157 value
-= (uintptr_t)code_ptr
;
158 if (value
!= (int8_t)value
) {
161 tcg_patch8(code_ptr
, value
);
168 /* parse target specific constraints */
169 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
)
176 ct
->ct
|= TCG_CT_REG
;
177 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EAX
);
180 ct
->ct
|= TCG_CT_REG
;
181 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EBX
);
185 ct
->ct
|= TCG_CT_REG
;
186 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_ECX
);
189 ct
->ct
|= TCG_CT_REG
;
190 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EDX
);
193 ct
->ct
|= TCG_CT_REG
;
194 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_ESI
);
197 ct
->ct
|= TCG_CT_REG
;
198 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EDI
);
201 ct
->ct
|= TCG_CT_REG
;
202 if (TCG_TARGET_REG_BITS
== 64) {
203 tcg_regset_set32(ct
->u
.regs
, 0, 0xffff);
205 tcg_regset_set32(ct
->u
.regs
, 0, 0xf);
209 ct
->ct
|= TCG_CT_REG
;
210 tcg_regset_set32(ct
->u
.regs
, 0, 0xf);
214 ct
->ct
|= TCG_CT_REG
;
215 if (TCG_TARGET_REG_BITS
== 64) {
216 tcg_regset_set32(ct
->u
.regs
, 0, 0xffff);
218 tcg_regset_set32(ct
->u
.regs
, 0, 0xff);
222 /* With SHRX et al, we need not use ECX as shift count register. */
229 /* qemu_ld/st address constraint */
231 ct
->ct
|= TCG_CT_REG
;
232 if (TCG_TARGET_REG_BITS
== 64) {
233 tcg_regset_set32(ct
->u
.regs
, 0, 0xffff);
235 tcg_regset_set32(ct
->u
.regs
, 0, 0xff);
237 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_L0
);
238 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_L1
);
242 ct
->ct
|= TCG_CT_CONST_S32
;
245 ct
->ct
|= TCG_CT_CONST_U32
;
248 ct
->ct
|= TCG_CT_CONST_I32
;
259 /* test if a constant matches the constraint */
260 static inline int tcg_target_const_match(tcg_target_long val
, TCGType type
,
261 const TCGArgConstraint
*arg_ct
)
264 if (ct
& TCG_CT_CONST
) {
267 if ((ct
& TCG_CT_CONST_S32
) && val
== (int32_t)val
) {
270 if ((ct
& TCG_CT_CONST_U32
) && val
== (uint32_t)val
) {
273 if ((ct
& TCG_CT_CONST_I32
) && ~val
== (int32_t)~val
) {
279 #if TCG_TARGET_REG_BITS == 64
280 # define LOWREGMASK(x) ((x) & 7)
282 # define LOWREGMASK(x) (x)
285 #define P_EXT 0x100 /* 0x0f opcode prefix */
286 #define P_EXT38 0x200 /* 0x0f 0x38 opcode prefix */
287 #define P_DATA16 0x400 /* 0x66 opcode prefix */
288 #if TCG_TARGET_REG_BITS == 64
289 # define P_ADDR32 0x800 /* 0x67 opcode prefix */
290 # define P_REXW 0x1000 /* Set REX.W = 1 */
291 # define P_REXB_R 0x2000 /* REG field as byte register */
292 # define P_REXB_RM 0x4000 /* R/M field as byte register */
293 # define P_GS 0x8000 /* gs segment override */
301 #define P_SIMDF3 0x10000 /* 0xf3 opcode prefix */
302 #define P_SIMDF2 0x20000 /* 0xf2 opcode prefix */
304 #define OPC_ARITH_EvIz (0x81)
305 #define OPC_ARITH_EvIb (0x83)
306 #define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */
307 #define OPC_ANDN (0xf2 | P_EXT38)
308 #define OPC_ADD_GvEv (OPC_ARITH_GvEv | (ARITH_ADD << 3))
309 #define OPC_BSWAP (0xc8 | P_EXT)
310 #define OPC_CALL_Jz (0xe8)
311 #define OPC_CMOVCC (0x40 | P_EXT) /* ... plus condition code */
312 #define OPC_CMP_GvEv (OPC_ARITH_GvEv | (ARITH_CMP << 3))
313 #define OPC_DEC_r32 (0x48)
314 #define OPC_IMUL_GvEv (0xaf | P_EXT)
315 #define OPC_IMUL_GvEvIb (0x6b)
316 #define OPC_IMUL_GvEvIz (0x69)
317 #define OPC_INC_r32 (0x40)
318 #define OPC_JCC_long (0x80 | P_EXT) /* ... plus condition code */
319 #define OPC_JCC_short (0x70) /* ... plus condition code */
320 #define OPC_JMP_long (0xe9)
321 #define OPC_JMP_short (0xeb)
322 #define OPC_LEA (0x8d)
323 #define OPC_MOVB_EvGv (0x88) /* stores, more or less */
324 #define OPC_MOVL_EvGv (0x89) /* stores, more or less */
325 #define OPC_MOVL_GvEv (0x8b) /* loads, more or less */
326 #define OPC_MOVB_EvIz (0xc6)
327 #define OPC_MOVL_EvIz (0xc7)
328 #define OPC_MOVL_Iv (0xb8)
329 #define OPC_MOVBE_GyMy (0xf0 | P_EXT38)
330 #define OPC_MOVBE_MyGy (0xf1 | P_EXT38)
331 #define OPC_MOVSBL (0xbe | P_EXT)
332 #define OPC_MOVSWL (0xbf | P_EXT)
333 #define OPC_MOVSLQ (0x63 | P_REXW)
334 #define OPC_MOVZBL (0xb6 | P_EXT)
335 #define OPC_MOVZWL (0xb7 | P_EXT)
336 #define OPC_POP_r32 (0x58)
337 #define OPC_PUSH_r32 (0x50)
338 #define OPC_PUSH_Iv (0x68)
339 #define OPC_PUSH_Ib (0x6a)
340 #define OPC_RET (0xc3)
341 #define OPC_SETCC (0x90 | P_EXT | P_REXB_RM) /* ... plus cc */
342 #define OPC_SHIFT_1 (0xd1)
343 #define OPC_SHIFT_Ib (0xc1)
344 #define OPC_SHIFT_cl (0xd3)
345 #define OPC_SARX (0xf7 | P_EXT38 | P_SIMDF3)
346 #define OPC_SHLX (0xf7 | P_EXT38 | P_DATA16)
347 #define OPC_SHRX (0xf7 | P_EXT38 | P_SIMDF2)
348 #define OPC_TESTL (0x85)
349 #define OPC_XCHG_ax_r32 (0x90)
351 #define OPC_GRP3_Ev (0xf7)
352 #define OPC_GRP5 (0xff)
354 /* Group 1 opcode extensions for 0x80-0x83.
355 These are also used as modifiers for OPC_ARITH. */
365 /* Group 2 opcode extensions for 0xc0, 0xc1, 0xd0-0xd3. */
372 /* Group 3 opcode extensions for 0xf6, 0xf7. To be used with OPC_GRP3. */
380 /* Group 5 opcode extensions for 0xff. To be used with OPC_GRP5. */
381 #define EXT5_INC_Ev 0
382 #define EXT5_DEC_Ev 1
383 #define EXT5_CALLN_Ev 2
384 #define EXT5_JMPN_Ev 4
386 /* Condition codes to be added to OPC_JCC_{long,short}. */
405 static const uint8_t tcg_cond_to_jcc
[] = {
406 [TCG_COND_EQ
] = JCC_JE
,
407 [TCG_COND_NE
] = JCC_JNE
,
408 [TCG_COND_LT
] = JCC_JL
,
409 [TCG_COND_GE
] = JCC_JGE
,
410 [TCG_COND_LE
] = JCC_JLE
,
411 [TCG_COND_GT
] = JCC_JG
,
412 [TCG_COND_LTU
] = JCC_JB
,
413 [TCG_COND_GEU
] = JCC_JAE
,
414 [TCG_COND_LEU
] = JCC_JBE
,
415 [TCG_COND_GTU
] = JCC_JA
,
418 #if TCG_TARGET_REG_BITS == 64
419 static void tcg_out_opc(TCGContext
*s
, int opc
, int r
, int rm
, int x
)
426 if (opc
& P_DATA16
) {
427 /* We should never be asking for both 16 and 64-bit operation. */
428 assert((opc
& P_REXW
) == 0);
431 if (opc
& P_ADDR32
) {
436 rex
|= (opc
& P_REXW
) ? 0x8 : 0x0; /* REX.W */
437 rex
|= (r
& 8) >> 1; /* REX.R */
438 rex
|= (x
& 8) >> 2; /* REX.X */
439 rex
|= (rm
& 8) >> 3; /* REX.B */
441 /* P_REXB_{R,RM} indicates that the given register is the low byte.
442 For %[abcd]l we need no REX prefix, but for %{si,di,bp,sp}l we do,
443 as otherwise the encoding indicates %[abcd]h. Note that the values
444 that are ORed in merely indicate that the REX byte must be present;
445 those bits get discarded in output. */
446 rex
|= opc
& (r
>= 4 ? P_REXB_R
: 0);
447 rex
|= opc
& (rm
>= 4 ? P_REXB_RM
: 0);
450 tcg_out8(s
, (uint8_t)(rex
| 0x40));
453 if (opc
& (P_EXT
| P_EXT38
)) {
463 static void tcg_out_opc(TCGContext
*s
, int opc
)
465 if (opc
& P_DATA16
) {
468 if (opc
& (P_EXT
| P_EXT38
)) {
476 /* Discard the register arguments to tcg_out_opc early, so as not to penalize
477 the 32-bit compilation paths. This method works with all versions of gcc,
478 whereas relying on optimization may not be able to exclude them. */
479 #define tcg_out_opc(s, opc, r, rm, x) (tcg_out_opc)(s, opc)
482 static void tcg_out_modrm(TCGContext
*s
, int opc
, int r
, int rm
)
484 tcg_out_opc(s
, opc
, r
, rm
, 0);
485 tcg_out8(s
, 0xc0 | (LOWREGMASK(r
) << 3) | LOWREGMASK(rm
));
488 static void tcg_out_vex_modrm(TCGContext
*s
, int opc
, int r
, int v
, int rm
)
492 if ((opc
& (P_REXW
| P_EXT
| P_EXT38
)) || (rm
& 8)) {
493 /* Three byte VEX prefix. */
499 } else if (opc
& P_EXT
) {
504 tmp
|= 0x40; /* VEX.X */
505 tmp
|= (r
& 8 ? 0 : 0x80); /* VEX.R */
506 tmp
|= (rm
& 8 ? 0 : 0x20); /* VEX.B */
509 tmp
= (opc
& P_REXW
? 0x80 : 0); /* VEX.W */
511 /* Two byte VEX prefix. */
514 tmp
= (r
& 8 ? 0 : 0x80); /* VEX.R */
517 if (opc
& P_DATA16
) {
519 } else if (opc
& P_SIMDF3
) {
521 } else if (opc
& P_SIMDF2
) {
524 tmp
|= (~v
& 15) << 3; /* VEX.vvvv */
527 tcg_out8(s
, 0xc0 | (LOWREGMASK(r
) << 3) | LOWREGMASK(rm
));
530 /* Output an opcode with a full "rm + (index<<shift) + offset" address mode.
531 We handle either RM and INDEX missing with a negative value. In 64-bit
532 mode for absolute addresses, ~RM is the size of the immediate operand
533 that will follow the instruction. */
535 static void tcg_out_modrm_sib_offset(TCGContext
*s
, int opc
, int r
, int rm
,
536 int index
, int shift
, intptr_t offset
)
540 if (index
< 0 && rm
< 0) {
541 if (TCG_TARGET_REG_BITS
== 64) {
542 /* Try for a rip-relative addressing mode. This has replaced
543 the 32-bit-mode absolute addressing encoding. */
544 intptr_t pc
= (intptr_t)s
->code_ptr
+ 5 + ~rm
;
545 intptr_t disp
= offset
- pc
;
546 if (disp
== (int32_t)disp
) {
547 tcg_out_opc(s
, opc
, r
, 0, 0);
548 tcg_out8(s
, (LOWREGMASK(r
) << 3) | 5);
553 /* Try for an absolute address encoding. This requires the
554 use of the MODRM+SIB encoding and is therefore larger than
555 rip-relative addressing. */
556 if (offset
== (int32_t)offset
) {
557 tcg_out_opc(s
, opc
, r
, 0, 0);
558 tcg_out8(s
, (LOWREGMASK(r
) << 3) | 4);
559 tcg_out8(s
, (4 << 3) | 5);
560 tcg_out32(s
, offset
);
564 /* ??? The memory isn't directly addressable. */
567 /* Absolute address. */
568 tcg_out_opc(s
, opc
, r
, 0, 0);
569 tcg_out8(s
, (r
<< 3) | 5);
570 tcg_out32(s
, offset
);
575 /* Find the length of the immediate addend. Note that the encoding
576 that would be used for (%ebp) indicates absolute addressing. */
578 mod
= 0, len
= 4, rm
= 5;
579 } else if (offset
== 0 && LOWREGMASK(rm
) != TCG_REG_EBP
) {
581 } else if (offset
== (int8_t)offset
) {
587 /* Use a single byte MODRM format if possible. Note that the encoding
588 that would be used for %esp is the escape to the two byte form. */
589 if (index
< 0 && LOWREGMASK(rm
) != TCG_REG_ESP
) {
590 /* Single byte MODRM format. */
591 tcg_out_opc(s
, opc
, r
, rm
, 0);
592 tcg_out8(s
, mod
| (LOWREGMASK(r
) << 3) | LOWREGMASK(rm
));
594 /* Two byte MODRM+SIB format. */
596 /* Note that the encoding that would place %esp into the index
597 field indicates no index register. In 64-bit mode, the REX.X
598 bit counts, so %r12 can be used as the index. */
602 assert(index
!= TCG_REG_ESP
);
605 tcg_out_opc(s
, opc
, r
, rm
, index
);
606 tcg_out8(s
, mod
| (LOWREGMASK(r
) << 3) | 4);
607 tcg_out8(s
, (shift
<< 6) | (LOWREGMASK(index
) << 3) | LOWREGMASK(rm
));
612 } else if (len
== 4) {
613 tcg_out32(s
, offset
);
617 /* A simplification of the above with no index or shift. */
618 static inline void tcg_out_modrm_offset(TCGContext
*s
, int opc
, int r
,
619 int rm
, intptr_t offset
)
621 tcg_out_modrm_sib_offset(s
, opc
, r
, rm
, -1, 0, offset
);
624 /* Generate dest op= src. Uses the same ARITH_* codes as tgen_arithi. */
625 static inline void tgen_arithr(TCGContext
*s
, int subop
, int dest
, int src
)
627 /* Propagate an opcode prefix, such as P_REXW. */
628 int ext
= subop
& ~0x7;
631 tcg_out_modrm(s
, OPC_ARITH_GvEv
+ (subop
<< 3) + ext
, dest
, src
);
634 static inline void tcg_out_mov(TCGContext
*s
, TCGType type
,
635 TCGReg ret
, TCGReg arg
)
638 int opc
= OPC_MOVL_GvEv
+ (type
== TCG_TYPE_I64
? P_REXW
: 0);
639 tcg_out_modrm(s
, opc
, ret
, arg
);
643 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
644 TCGReg ret
, tcg_target_long arg
)
646 tcg_target_long diff
;
649 tgen_arithr(s
, ARITH_XOR
, ret
, ret
);
652 if (arg
== (uint32_t)arg
|| type
== TCG_TYPE_I32
) {
653 tcg_out_opc(s
, OPC_MOVL_Iv
+ LOWREGMASK(ret
), 0, ret
, 0);
657 if (arg
== (int32_t)arg
) {
658 tcg_out_modrm(s
, OPC_MOVL_EvIz
+ P_REXW
, 0, ret
);
663 /* Try a 7 byte pc-relative lea before the 10 byte movq. */
664 diff
= arg
- ((uintptr_t)s
->code_ptr
+ 7);
665 if (diff
== (int32_t)diff
) {
666 tcg_out_opc(s
, OPC_LEA
| P_REXW
, ret
, 0, 0);
667 tcg_out8(s
, (LOWREGMASK(ret
) << 3) | 5);
672 tcg_out_opc(s
, OPC_MOVL_Iv
+ P_REXW
+ LOWREGMASK(ret
), 0, ret
, 0);
676 static inline void tcg_out_pushi(TCGContext
*s
, tcg_target_long val
)
678 if (val
== (int8_t)val
) {
679 tcg_out_opc(s
, OPC_PUSH_Ib
, 0, 0, 0);
681 } else if (val
== (int32_t)val
) {
682 tcg_out_opc(s
, OPC_PUSH_Iv
, 0, 0, 0);
689 static inline void tcg_out_push(TCGContext
*s
, int reg
)
691 tcg_out_opc(s
, OPC_PUSH_r32
+ LOWREGMASK(reg
), 0, reg
, 0);
694 static inline void tcg_out_pop(TCGContext
*s
, int reg
)
696 tcg_out_opc(s
, OPC_POP_r32
+ LOWREGMASK(reg
), 0, reg
, 0);
699 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg ret
,
700 TCGReg arg1
, intptr_t arg2
)
702 int opc
= OPC_MOVL_GvEv
+ (type
== TCG_TYPE_I64
? P_REXW
: 0);
703 tcg_out_modrm_offset(s
, opc
, ret
, arg1
, arg2
);
706 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
,
707 TCGReg arg1
, intptr_t arg2
)
709 int opc
= OPC_MOVL_EvGv
+ (type
== TCG_TYPE_I64
? P_REXW
: 0);
710 tcg_out_modrm_offset(s
, opc
, arg
, arg1
, arg2
);
713 static inline void tcg_out_sti(TCGContext
*s
, TCGType type
, TCGReg base
,
714 tcg_target_long ofs
, tcg_target_long val
)
716 int opc
= OPC_MOVL_EvIz
+ (type
== TCG_TYPE_I64
? P_REXW
: 0);
717 tcg_out_modrm_offset(s
, opc
, 0, base
, ofs
);
721 static void tcg_out_shifti(TCGContext
*s
, int subopc
, int reg
, int count
)
723 /* Propagate an opcode prefix, such as P_DATA16. */
724 int ext
= subopc
& ~0x7;
728 tcg_out_modrm(s
, OPC_SHIFT_1
+ ext
, subopc
, reg
);
730 tcg_out_modrm(s
, OPC_SHIFT_Ib
+ ext
, subopc
, reg
);
735 static inline void tcg_out_bswap32(TCGContext
*s
, int reg
)
737 tcg_out_opc(s
, OPC_BSWAP
+ LOWREGMASK(reg
), 0, reg
, 0);
740 static inline void tcg_out_rolw_8(TCGContext
*s
, int reg
)
742 tcg_out_shifti(s
, SHIFT_ROL
+ P_DATA16
, reg
, 8);
745 static inline void tcg_out_ext8u(TCGContext
*s
, int dest
, int src
)
748 assert(src
< 4 || TCG_TARGET_REG_BITS
== 64);
749 tcg_out_modrm(s
, OPC_MOVZBL
+ P_REXB_RM
, dest
, src
);
752 static void tcg_out_ext8s(TCGContext
*s
, int dest
, int src
, int rexw
)
755 assert(src
< 4 || TCG_TARGET_REG_BITS
== 64);
756 tcg_out_modrm(s
, OPC_MOVSBL
+ P_REXB_RM
+ rexw
, dest
, src
);
759 static inline void tcg_out_ext16u(TCGContext
*s
, int dest
, int src
)
762 tcg_out_modrm(s
, OPC_MOVZWL
, dest
, src
);
765 static inline void tcg_out_ext16s(TCGContext
*s
, int dest
, int src
, int rexw
)
768 tcg_out_modrm(s
, OPC_MOVSWL
+ rexw
, dest
, src
);
771 static inline void tcg_out_ext32u(TCGContext
*s
, int dest
, int src
)
773 /* 32-bit mov zero extends. */
774 tcg_out_modrm(s
, OPC_MOVL_GvEv
, dest
, src
);
777 static inline void tcg_out_ext32s(TCGContext
*s
, int dest
, int src
)
779 tcg_out_modrm(s
, OPC_MOVSLQ
, dest
, src
);
782 static inline void tcg_out_bswap64(TCGContext
*s
, int reg
)
784 tcg_out_opc(s
, OPC_BSWAP
+ P_REXW
+ LOWREGMASK(reg
), 0, reg
, 0);
787 static void tgen_arithi(TCGContext
*s
, int c
, int r0
,
788 tcg_target_long val
, int cf
)
792 if (TCG_TARGET_REG_BITS
== 64) {
797 /* ??? While INC is 2 bytes shorter than ADDL $1, they also induce
798 partial flags update stalls on Pentium4 and are not recommended
799 by current Intel optimization manuals. */
800 if (!cf
&& (c
== ARITH_ADD
|| c
== ARITH_SUB
) && (val
== 1 || val
== -1)) {
801 int is_inc
= (c
== ARITH_ADD
) ^ (val
< 0);
802 if (TCG_TARGET_REG_BITS
== 64) {
803 /* The single-byte increment encodings are re-tasked as the
804 REX prefixes. Use the MODRM encoding. */
805 tcg_out_modrm(s
, OPC_GRP5
+ rexw
,
806 (is_inc
? EXT5_INC_Ev
: EXT5_DEC_Ev
), r0
);
808 tcg_out8(s
, (is_inc
? OPC_INC_r32
: OPC_DEC_r32
) + r0
);
813 if (c
== ARITH_AND
) {
814 if (TCG_TARGET_REG_BITS
== 64) {
815 if (val
== 0xffffffffu
) {
816 tcg_out_ext32u(s
, r0
, r0
);
819 if (val
== (uint32_t)val
) {
820 /* AND with no high bits set can use a 32-bit operation. */
824 if (val
== 0xffu
&& (r0
< 4 || TCG_TARGET_REG_BITS
== 64)) {
825 tcg_out_ext8u(s
, r0
, r0
);
828 if (val
== 0xffffu
) {
829 tcg_out_ext16u(s
, r0
, r0
);
834 if (val
== (int8_t)val
) {
835 tcg_out_modrm(s
, OPC_ARITH_EvIb
+ rexw
, c
, r0
);
839 if (rexw
== 0 || val
== (int32_t)val
) {
840 tcg_out_modrm(s
, OPC_ARITH_EvIz
+ rexw
, c
, r0
);
848 static void tcg_out_addi(TCGContext
*s
, int reg
, tcg_target_long val
)
851 tgen_arithi(s
, ARITH_ADD
+ P_REXW
, reg
, val
, 0);
855 /* Use SMALL != 0 to force a short forward branch. */
856 static void tcg_out_jxx(TCGContext
*s
, int opc
, TCGLabel
*l
, int small
)
861 val
= tcg_pcrel_diff(s
, l
->u
.value_ptr
);
863 if ((int8_t)val1
== val1
) {
865 tcg_out8(s
, OPC_JMP_short
);
867 tcg_out8(s
, OPC_JCC_short
+ opc
);
875 tcg_out8(s
, OPC_JMP_long
);
876 tcg_out32(s
, val
- 5);
878 tcg_out_opc(s
, OPC_JCC_long
+ opc
, 0, 0, 0);
879 tcg_out32(s
, val
- 6);
884 tcg_out8(s
, OPC_JMP_short
);
886 tcg_out8(s
, OPC_JCC_short
+ opc
);
888 tcg_out_reloc(s
, s
->code_ptr
, R_386_PC8
, l
, -1);
892 tcg_out8(s
, OPC_JMP_long
);
894 tcg_out_opc(s
, OPC_JCC_long
+ opc
, 0, 0, 0);
896 tcg_out_reloc(s
, s
->code_ptr
, R_386_PC32
, l
, -4);
901 static void tcg_out_cmp(TCGContext
*s
, TCGArg arg1
, TCGArg arg2
,
902 int const_arg2
, int rexw
)
907 tcg_out_modrm(s
, OPC_TESTL
+ rexw
, arg1
, arg1
);
909 tgen_arithi(s
, ARITH_CMP
+ rexw
, arg1
, arg2
, 0);
912 tgen_arithr(s
, ARITH_CMP
+ rexw
, arg1
, arg2
);
916 static void tcg_out_brcond32(TCGContext
*s
, TCGCond cond
,
917 TCGArg arg1
, TCGArg arg2
, int const_arg2
,
918 TCGLabel
*label
, int small
)
920 tcg_out_cmp(s
, arg1
, arg2
, const_arg2
, 0);
921 tcg_out_jxx(s
, tcg_cond_to_jcc
[cond
], label
, small
);
924 #if TCG_TARGET_REG_BITS == 64
925 static void tcg_out_brcond64(TCGContext
*s
, TCGCond cond
,
926 TCGArg arg1
, TCGArg arg2
, int const_arg2
,
927 TCGLabel
*label
, int small
)
929 tcg_out_cmp(s
, arg1
, arg2
, const_arg2
, P_REXW
);
930 tcg_out_jxx(s
, tcg_cond_to_jcc
[cond
], label
, small
);
933 /* XXX: we implement it at the target level to avoid having to
934 handle cross basic blocks temporaries */
935 static void tcg_out_brcond2(TCGContext
*s
, const TCGArg
*args
,
936 const int *const_args
, int small
)
938 TCGLabel
*label_next
= gen_new_label();
939 TCGLabel
*label_this
= arg_label(args
[5]);
943 tcg_out_brcond32(s
, TCG_COND_NE
, args
[0], args
[2], const_args
[2],
945 tcg_out_brcond32(s
, TCG_COND_EQ
, args
[1], args
[3], const_args
[3],
949 tcg_out_brcond32(s
, TCG_COND_NE
, args
[0], args
[2], const_args
[2],
951 tcg_out_brcond32(s
, TCG_COND_NE
, args
[1], args
[3], const_args
[3],
955 tcg_out_brcond32(s
, TCG_COND_LT
, args
[1], args
[3], const_args
[3],
957 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
958 tcg_out_brcond32(s
, TCG_COND_LTU
, args
[0], args
[2], const_args
[2],
962 tcg_out_brcond32(s
, TCG_COND_LT
, args
[1], args
[3], const_args
[3],
964 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
965 tcg_out_brcond32(s
, TCG_COND_LEU
, args
[0], args
[2], const_args
[2],
969 tcg_out_brcond32(s
, TCG_COND_GT
, args
[1], args
[3], const_args
[3],
971 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
972 tcg_out_brcond32(s
, TCG_COND_GTU
, args
[0], args
[2], const_args
[2],
976 tcg_out_brcond32(s
, TCG_COND_GT
, args
[1], args
[3], const_args
[3],
978 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
979 tcg_out_brcond32(s
, TCG_COND_GEU
, args
[0], args
[2], const_args
[2],
983 tcg_out_brcond32(s
, TCG_COND_LTU
, args
[1], args
[3], const_args
[3],
985 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
986 tcg_out_brcond32(s
, TCG_COND_LTU
, args
[0], args
[2], const_args
[2],
990 tcg_out_brcond32(s
, TCG_COND_LTU
, args
[1], args
[3], const_args
[3],
992 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
993 tcg_out_brcond32(s
, TCG_COND_LEU
, args
[0], args
[2], const_args
[2],
997 tcg_out_brcond32(s
, TCG_COND_GTU
, args
[1], args
[3], const_args
[3],
999 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
1000 tcg_out_brcond32(s
, TCG_COND_GTU
, args
[0], args
[2], const_args
[2],
1004 tcg_out_brcond32(s
, TCG_COND_GTU
, args
[1], args
[3], const_args
[3],
1006 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
1007 tcg_out_brcond32(s
, TCG_COND_GEU
, args
[0], args
[2], const_args
[2],
1013 tcg_out_label(s
, label_next
, s
->code_ptr
);
1017 static void tcg_out_setcond32(TCGContext
*s
, TCGCond cond
, TCGArg dest
,
1018 TCGArg arg1
, TCGArg arg2
, int const_arg2
)
1020 tcg_out_cmp(s
, arg1
, arg2
, const_arg2
, 0);
1021 tcg_out_modrm(s
, OPC_SETCC
| tcg_cond_to_jcc
[cond
], 0, dest
);
1022 tcg_out_ext8u(s
, dest
, dest
);
1025 #if TCG_TARGET_REG_BITS == 64
1026 static void tcg_out_setcond64(TCGContext
*s
, TCGCond cond
, TCGArg dest
,
1027 TCGArg arg1
, TCGArg arg2
, int const_arg2
)
1029 tcg_out_cmp(s
, arg1
, arg2
, const_arg2
, P_REXW
);
1030 tcg_out_modrm(s
, OPC_SETCC
| tcg_cond_to_jcc
[cond
], 0, dest
);
1031 tcg_out_ext8u(s
, dest
, dest
);
1034 static void tcg_out_setcond2(TCGContext
*s
, const TCGArg
*args
,
1035 const int *const_args
)
1038 TCGLabel
*label_true
, *label_over
;
1040 memcpy(new_args
, args
+1, 5*sizeof(TCGArg
));
1042 if (args
[0] == args
[1] || args
[0] == args
[2]
1043 || (!const_args
[3] && args
[0] == args
[3])
1044 || (!const_args
[4] && args
[0] == args
[4])) {
1045 /* When the destination overlaps with one of the argument
1046 registers, don't do anything tricky. */
1047 label_true
= gen_new_label();
1048 label_over
= gen_new_label();
1050 new_args
[5] = label_arg(label_true
);
1051 tcg_out_brcond2(s
, new_args
, const_args
+1, 1);
1053 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], 0);
1054 tcg_out_jxx(s
, JCC_JMP
, label_over
, 1);
1055 tcg_out_label(s
, label_true
, s
->code_ptr
);
1057 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], 1);
1058 tcg_out_label(s
, label_over
, s
->code_ptr
);
1060 /* When the destination does not overlap one of the arguments,
1061 clear the destination first, jump if cond false, and emit an
1062 increment in the true case. This results in smaller code. */
1064 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], 0);
1066 label_over
= gen_new_label();
1067 new_args
[4] = tcg_invert_cond(new_args
[4]);
1068 new_args
[5] = label_arg(label_over
);
1069 tcg_out_brcond2(s
, new_args
, const_args
+1, 1);
1071 tgen_arithi(s
, ARITH_ADD
, args
[0], 1, 0);
1072 tcg_out_label(s
, label_over
, s
->code_ptr
);
1077 static void tcg_out_movcond32(TCGContext
*s
, TCGCond cond
, TCGArg dest
,
1078 TCGArg c1
, TCGArg c2
, int const_c2
,
1081 tcg_out_cmp(s
, c1
, c2
, const_c2
, 0);
1083 tcg_out_modrm(s
, OPC_CMOVCC
| tcg_cond_to_jcc
[cond
], dest
, v1
);
1085 TCGLabel
*over
= gen_new_label();
1086 tcg_out_jxx(s
, tcg_cond_to_jcc
[tcg_invert_cond(cond
)], over
, 1);
1087 tcg_out_mov(s
, TCG_TYPE_I32
, dest
, v1
);
1088 tcg_out_label(s
, over
, s
->code_ptr
);
1092 #if TCG_TARGET_REG_BITS == 64
1093 static void tcg_out_movcond64(TCGContext
*s
, TCGCond cond
, TCGArg dest
,
1094 TCGArg c1
, TCGArg c2
, int const_c2
,
1097 tcg_out_cmp(s
, c1
, c2
, const_c2
, P_REXW
);
1098 tcg_out_modrm(s
, OPC_CMOVCC
| tcg_cond_to_jcc
[cond
] | P_REXW
, dest
, v1
);
1102 static void tcg_out_branch(TCGContext
*s
, int call
, tcg_insn_unit
*dest
)
1104 intptr_t disp
= tcg_pcrel_diff(s
, dest
) - 5;
1106 if (disp
== (int32_t)disp
) {
1107 tcg_out_opc(s
, call
? OPC_CALL_Jz
: OPC_JMP_long
, 0, 0, 0);
1110 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R10
, (uintptr_t)dest
);
1111 tcg_out_modrm(s
, OPC_GRP5
,
1112 call
? EXT5_CALLN_Ev
: EXT5_JMPN_Ev
, TCG_REG_R10
);
1116 static inline void tcg_out_call(TCGContext
*s
, tcg_insn_unit
*dest
)
1118 tcg_out_branch(s
, 1, dest
);
1121 static void tcg_out_jmp(TCGContext
*s
, tcg_insn_unit
*dest
)
1123 tcg_out_branch(s
, 0, dest
);
1126 #if defined(CONFIG_SOFTMMU)
1127 /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
1128 * int mmu_idx, uintptr_t ra)
1130 static void * const qemu_ld_helpers
[16] = {
1131 [MO_UB
] = helper_ret_ldub_mmu
,
1132 [MO_LEUW
] = helper_le_lduw_mmu
,
1133 [MO_LEUL
] = helper_le_ldul_mmu
,
1134 [MO_LEQ
] = helper_le_ldq_mmu
,
1135 [MO_BEUW
] = helper_be_lduw_mmu
,
1136 [MO_BEUL
] = helper_be_ldul_mmu
,
1137 [MO_BEQ
] = helper_be_ldq_mmu
,
1140 /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
1141 * uintxx_t val, int mmu_idx, uintptr_t ra)
1143 static void * const qemu_st_helpers
[16] = {
1144 [MO_UB
] = helper_ret_stb_mmu
,
1145 [MO_LEUW
] = helper_le_stw_mmu
,
1146 [MO_LEUL
] = helper_le_stl_mmu
,
1147 [MO_LEQ
] = helper_le_stq_mmu
,
1148 [MO_BEUW
] = helper_be_stw_mmu
,
1149 [MO_BEUL
] = helper_be_stl_mmu
,
1150 [MO_BEQ
] = helper_be_stq_mmu
,
1153 /* Perform the TLB load and compare.
1156 ADDRLO and ADDRHI contain the low and high part of the address.
1158 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1160 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1161 This should be offsetof addr_read or addr_write.
1164 LABEL_PTRS is filled with 1 (32-bit addresses) or 2 (64-bit addresses)
1165 positions of the displacements of forward jumps to the TLB miss case.
1167 Second argument register is loaded with the low part of the address.
1168 In the TLB hit case, it has been adjusted as indicated by the TLB
1169 and so is a host address. In the TLB miss case, it continues to
1170 hold a guest address.
1172 First argument register is clobbered. */
1174 static inline void tcg_out_tlb_load(TCGContext
*s
, TCGReg addrlo
, TCGReg addrhi
,
1175 int mem_index
, TCGMemOp opc
,
1176 tcg_insn_unit
**label_ptr
, int which
)
1178 const TCGReg r0
= TCG_REG_L0
;
1179 const TCGReg r1
= TCG_REG_L1
;
1180 TCGType ttype
= TCG_TYPE_I32
;
1181 TCGType tlbtype
= TCG_TYPE_I32
;
1182 int trexw
= 0, hrexw
= 0, tlbrexw
= 0;
1183 int s_mask
= (1 << (opc
& MO_SIZE
)) - 1;
1184 bool aligned
= (opc
& MO_AMASK
) == MO_ALIGN
|| s_mask
== 0;
1186 if (TCG_TARGET_REG_BITS
== 64) {
1187 if (TARGET_LONG_BITS
== 64) {
1188 ttype
= TCG_TYPE_I64
;
1191 if (TCG_TYPE_PTR
== TCG_TYPE_I64
) {
1193 if (TARGET_PAGE_BITS
+ CPU_TLB_BITS
> 32) {
1194 tlbtype
= TCG_TYPE_I64
;
1200 tcg_out_mov(s
, tlbtype
, r0
, addrlo
);
1202 tcg_out_mov(s
, ttype
, r1
, addrlo
);
1204 /* For unaligned access check that we don't cross pages using
1205 the page address of the last byte. */
1206 tcg_out_modrm_offset(s
, OPC_LEA
+ trexw
, r1
, addrlo
, s_mask
);
1209 tcg_out_shifti(s
, SHIFT_SHR
+ tlbrexw
, r0
,
1210 TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
);
1212 tgen_arithi(s
, ARITH_AND
+ trexw
, r1
,
1213 TARGET_PAGE_MASK
| (aligned
? s_mask
: 0), 0);
1214 tgen_arithi(s
, ARITH_AND
+ tlbrexw
, r0
,
1215 (CPU_TLB_SIZE
- 1) << CPU_TLB_ENTRY_BITS
, 0);
1217 tcg_out_modrm_sib_offset(s
, OPC_LEA
+ hrexw
, r0
, TCG_AREG0
, r0
, 0,
1218 offsetof(CPUArchState
, tlb_table
[mem_index
][0])
1222 tcg_out_modrm_offset(s
, OPC_CMP_GvEv
+ trexw
, r1
, r0
, 0);
1224 /* Prepare for both the fast path add of the tlb addend, and the slow
1225 path function argument setup. There are two cases worth note:
1226 For 32-bit guest and x86_64 host, MOVL zero-extends the guest address
1227 before the fastpath ADDQ below. For 64-bit guest and x32 host, MOVQ
1228 copies the entire guest address for the slow path, while truncation
1229 for the 32-bit host happens with the fastpath ADDL below. */
1230 tcg_out_mov(s
, ttype
, r1
, addrlo
);
1233 tcg_out_opc(s
, OPC_JCC_long
+ JCC_JNE
, 0, 0, 0);
1234 label_ptr
[0] = s
->code_ptr
;
1237 if (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
) {
1238 /* cmp 4(r0), addrhi */
1239 tcg_out_modrm_offset(s
, OPC_CMP_GvEv
, addrhi
, r0
, 4);
1242 tcg_out_opc(s
, OPC_JCC_long
+ JCC_JNE
, 0, 0, 0);
1243 label_ptr
[1] = s
->code_ptr
;
1249 /* add addend(r0), r1 */
1250 tcg_out_modrm_offset(s
, OPC_ADD_GvEv
+ hrexw
, r1
, r0
,
1251 offsetof(CPUTLBEntry
, addend
) - which
);
1255 * Record the context of a call to the out of line helper code for the slow path
1256 * for a load or store, so that we can later generate the correct helper code
1258 static void add_qemu_ldst_label(TCGContext
*s
, bool is_ld
, TCGMemOpIdx oi
,
1259 TCGReg datalo
, TCGReg datahi
,
1260 TCGReg addrlo
, TCGReg addrhi
,
1261 tcg_insn_unit
*raddr
,
1262 tcg_insn_unit
**label_ptr
)
1264 TCGLabelQemuLdst
*label
= new_ldst_label(s
);
1266 label
->is_ld
= is_ld
;
1268 label
->datalo_reg
= datalo
;
1269 label
->datahi_reg
= datahi
;
1270 label
->addrlo_reg
= addrlo
;
1271 label
->addrhi_reg
= addrhi
;
1272 label
->raddr
= raddr
;
1273 label
->label_ptr
[0] = label_ptr
[0];
1274 if (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
) {
1275 label
->label_ptr
[1] = label_ptr
[1];
1280 * Generate code for the slow path for a load at the end of block
1282 static void tcg_out_qemu_ld_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*l
)
1284 TCGMemOpIdx oi
= l
->oi
;
1285 TCGMemOp opc
= get_memop(oi
);
1287 tcg_insn_unit
**label_ptr
= &l
->label_ptr
[0];
1289 /* resolve label address */
1290 tcg_patch32(label_ptr
[0], s
->code_ptr
- label_ptr
[0] - 4);
1291 if (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
) {
1292 tcg_patch32(label_ptr
[1], s
->code_ptr
- label_ptr
[1] - 4);
1295 if (TCG_TARGET_REG_BITS
== 32) {
1298 tcg_out_st(s
, TCG_TYPE_PTR
, TCG_AREG0
, TCG_REG_ESP
, ofs
);
1301 tcg_out_st(s
, TCG_TYPE_I32
, l
->addrlo_reg
, TCG_REG_ESP
, ofs
);
1304 if (TARGET_LONG_BITS
== 64) {
1305 tcg_out_st(s
, TCG_TYPE_I32
, l
->addrhi_reg
, TCG_REG_ESP
, ofs
);
1309 tcg_out_sti(s
, TCG_TYPE_I32
, TCG_REG_ESP
, ofs
, oi
);
1312 tcg_out_sti(s
, TCG_TYPE_PTR
, TCG_REG_ESP
, ofs
, (uintptr_t)l
->raddr
);
1314 tcg_out_mov(s
, TCG_TYPE_PTR
, tcg_target_call_iarg_regs
[0], TCG_AREG0
);
1315 /* The second argument is already loaded with addrlo. */
1316 tcg_out_movi(s
, TCG_TYPE_I32
, tcg_target_call_iarg_regs
[2], oi
);
1317 tcg_out_movi(s
, TCG_TYPE_PTR
, tcg_target_call_iarg_regs
[3],
1318 (uintptr_t)l
->raddr
);
1321 tcg_out_call(s
, qemu_ld_helpers
[opc
& (MO_BSWAP
| MO_SIZE
)]);
1323 data_reg
= l
->datalo_reg
;
1324 switch (opc
& MO_SSIZE
) {
1326 tcg_out_ext8s(s
, data_reg
, TCG_REG_EAX
, P_REXW
);
1329 tcg_out_ext16s(s
, data_reg
, TCG_REG_EAX
, P_REXW
);
1331 #if TCG_TARGET_REG_BITS == 64
1333 tcg_out_ext32s(s
, data_reg
, TCG_REG_EAX
);
1338 /* Note that the helpers have zero-extended to tcg_target_long. */
1340 tcg_out_mov(s
, TCG_TYPE_I32
, data_reg
, TCG_REG_EAX
);
1343 if (TCG_TARGET_REG_BITS
== 64) {
1344 tcg_out_mov(s
, TCG_TYPE_I64
, data_reg
, TCG_REG_RAX
);
1345 } else if (data_reg
== TCG_REG_EDX
) {
1346 /* xchg %edx, %eax */
1347 tcg_out_opc(s
, OPC_XCHG_ax_r32
+ TCG_REG_EDX
, 0, 0, 0);
1348 tcg_out_mov(s
, TCG_TYPE_I32
, l
->datahi_reg
, TCG_REG_EAX
);
1350 tcg_out_mov(s
, TCG_TYPE_I32
, data_reg
, TCG_REG_EAX
);
1351 tcg_out_mov(s
, TCG_TYPE_I32
, l
->datahi_reg
, TCG_REG_EDX
);
1358 /* Jump to the code corresponding to next IR of qemu_st */
1359 tcg_out_jmp(s
, l
->raddr
);
1363 * Generate code for the slow path for a store at the end of block
1365 static void tcg_out_qemu_st_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*l
)
1367 TCGMemOpIdx oi
= l
->oi
;
1368 TCGMemOp opc
= get_memop(oi
);
1369 TCGMemOp s_bits
= opc
& MO_SIZE
;
1370 tcg_insn_unit
**label_ptr
= &l
->label_ptr
[0];
1373 /* resolve label address */
1374 tcg_patch32(label_ptr
[0], s
->code_ptr
- label_ptr
[0] - 4);
1375 if (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
) {
1376 tcg_patch32(label_ptr
[1], s
->code_ptr
- label_ptr
[1] - 4);
1379 if (TCG_TARGET_REG_BITS
== 32) {
1382 tcg_out_st(s
, TCG_TYPE_PTR
, TCG_AREG0
, TCG_REG_ESP
, ofs
);
1385 tcg_out_st(s
, TCG_TYPE_I32
, l
->addrlo_reg
, TCG_REG_ESP
, ofs
);
1388 if (TARGET_LONG_BITS
== 64) {
1389 tcg_out_st(s
, TCG_TYPE_I32
, l
->addrhi_reg
, TCG_REG_ESP
, ofs
);
1393 tcg_out_st(s
, TCG_TYPE_I32
, l
->datalo_reg
, TCG_REG_ESP
, ofs
);
1396 if (s_bits
== MO_64
) {
1397 tcg_out_st(s
, TCG_TYPE_I32
, l
->datahi_reg
, TCG_REG_ESP
, ofs
);
1401 tcg_out_sti(s
, TCG_TYPE_I32
, TCG_REG_ESP
, ofs
, oi
);
1404 retaddr
= TCG_REG_EAX
;
1405 tcg_out_movi(s
, TCG_TYPE_PTR
, retaddr
, (uintptr_t)l
->raddr
);
1406 tcg_out_st(s
, TCG_TYPE_PTR
, retaddr
, TCG_REG_ESP
, ofs
);
1408 tcg_out_mov(s
, TCG_TYPE_PTR
, tcg_target_call_iarg_regs
[0], TCG_AREG0
);
1409 /* The second argument is already loaded with addrlo. */
1410 tcg_out_mov(s
, (s_bits
== MO_64
? TCG_TYPE_I64
: TCG_TYPE_I32
),
1411 tcg_target_call_iarg_regs
[2], l
->datalo_reg
);
1412 tcg_out_movi(s
, TCG_TYPE_I32
, tcg_target_call_iarg_regs
[3], oi
);
1414 if (ARRAY_SIZE(tcg_target_call_iarg_regs
) > 4) {
1415 retaddr
= tcg_target_call_iarg_regs
[4];
1416 tcg_out_movi(s
, TCG_TYPE_PTR
, retaddr
, (uintptr_t)l
->raddr
);
1418 retaddr
= TCG_REG_RAX
;
1419 tcg_out_movi(s
, TCG_TYPE_PTR
, retaddr
, (uintptr_t)l
->raddr
);
1420 tcg_out_st(s
, TCG_TYPE_PTR
, retaddr
, TCG_REG_ESP
,
1421 TCG_TARGET_CALL_STACK_OFFSET
);
1425 /* "Tail call" to the helper, with the return address back inline. */
1426 tcg_out_push(s
, retaddr
);
1427 tcg_out_jmp(s
, qemu_st_helpers
[opc
& (MO_BSWAP
| MO_SIZE
)]);
1429 #elif defined(__x86_64__) && defined(__linux__)
1430 # include <asm/prctl.h>
1431 # include <sys/prctl.h>
1433 int arch_prctl(int code
, unsigned long addr
);
1435 static int guest_base_flags
;
1436 static inline void setup_guest_base_seg(void)
1438 if (arch_prctl(ARCH_SET_GS
, guest_base
) == 0) {
1439 guest_base_flags
= P_GS
;
1443 # define guest_base_flags 0
1444 static inline void setup_guest_base_seg(void) { }
1445 #endif /* SOFTMMU */
1447 static void tcg_out_qemu_ld_direct(TCGContext
*s
, TCGReg datalo
, TCGReg datahi
,
1448 TCGReg base
, int index
, intptr_t ofs
,
1449 int seg
, TCGMemOp memop
)
1451 const TCGMemOp real_bswap
= memop
& MO_BSWAP
;
1452 TCGMemOp bswap
= real_bswap
;
1453 int movop
= OPC_MOVL_GvEv
;
1455 if (have_movbe
&& real_bswap
) {
1457 movop
= OPC_MOVBE_GyMy
;
1460 switch (memop
& MO_SSIZE
) {
1462 tcg_out_modrm_sib_offset(s
, OPC_MOVZBL
+ seg
, datalo
,
1463 base
, index
, 0, ofs
);
1466 tcg_out_modrm_sib_offset(s
, OPC_MOVSBL
+ P_REXW
+ seg
, datalo
,
1467 base
, index
, 0, ofs
);
1470 tcg_out_modrm_sib_offset(s
, OPC_MOVZWL
+ seg
, datalo
,
1471 base
, index
, 0, ofs
);
1473 tcg_out_rolw_8(s
, datalo
);
1479 tcg_out_modrm_sib_offset(s
, OPC_MOVBE_GyMy
+ P_DATA16
+ seg
,
1480 datalo
, base
, index
, 0, ofs
);
1482 tcg_out_modrm_sib_offset(s
, OPC_MOVZWL
+ seg
, datalo
,
1483 base
, index
, 0, ofs
);
1484 tcg_out_rolw_8(s
, datalo
);
1486 tcg_out_modrm(s
, OPC_MOVSWL
+ P_REXW
, datalo
, datalo
);
1488 tcg_out_modrm_sib_offset(s
, OPC_MOVSWL
+ P_REXW
+ seg
,
1489 datalo
, base
, index
, 0, ofs
);
1493 tcg_out_modrm_sib_offset(s
, movop
+ seg
, datalo
, base
, index
, 0, ofs
);
1495 tcg_out_bswap32(s
, datalo
);
1498 #if TCG_TARGET_REG_BITS == 64
1501 tcg_out_modrm_sib_offset(s
, movop
+ seg
, datalo
,
1502 base
, index
, 0, ofs
);
1504 tcg_out_bswap32(s
, datalo
);
1506 tcg_out_ext32s(s
, datalo
, datalo
);
1508 tcg_out_modrm_sib_offset(s
, OPC_MOVSLQ
+ seg
, datalo
,
1509 base
, index
, 0, ofs
);
1514 if (TCG_TARGET_REG_BITS
== 64) {
1515 tcg_out_modrm_sib_offset(s
, movop
+ P_REXW
+ seg
, datalo
,
1516 base
, index
, 0, ofs
);
1518 tcg_out_bswap64(s
, datalo
);
1526 if (base
!= datalo
) {
1527 tcg_out_modrm_sib_offset(s
, movop
+ seg
, datalo
,
1528 base
, index
, 0, ofs
);
1529 tcg_out_modrm_sib_offset(s
, movop
+ seg
, datahi
,
1530 base
, index
, 0, ofs
+ 4);
1532 tcg_out_modrm_sib_offset(s
, movop
+ seg
, datahi
,
1533 base
, index
, 0, ofs
+ 4);
1534 tcg_out_modrm_sib_offset(s
, movop
+ seg
, datalo
,
1535 base
, index
, 0, ofs
);
1538 tcg_out_bswap32(s
, datalo
);
1539 tcg_out_bswap32(s
, datahi
);
1548 /* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
1549 EAX. It will be useful once fixed registers globals are less
1551 static void tcg_out_qemu_ld(TCGContext
*s
, const TCGArg
*args
, bool is64
)
1553 TCGReg datalo
, datahi
, addrlo
;
1554 TCGReg addrhi
__attribute__((unused
));
1557 #if defined(CONFIG_SOFTMMU)
1559 tcg_insn_unit
*label_ptr
[2];
1563 datahi
= (TCG_TARGET_REG_BITS
== 32 && is64
? *args
++ : 0);
1565 addrhi
= (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
? *args
++ : 0);
1567 opc
= get_memop(oi
);
1569 #if defined(CONFIG_SOFTMMU)
1570 mem_index
= get_mmuidx(oi
);
1572 tcg_out_tlb_load(s
, addrlo
, addrhi
, mem_index
, opc
,
1573 label_ptr
, offsetof(CPUTLBEntry
, addr_read
));
1576 tcg_out_qemu_ld_direct(s
, datalo
, datahi
, TCG_REG_L1
, -1, 0, 0, opc
);
1578 /* Record the current context of a load into ldst label */
1579 add_qemu_ldst_label(s
, true, oi
, datalo
, datahi
, addrlo
, addrhi
,
1580 s
->code_ptr
, label_ptr
);
1583 int32_t offset
= guest_base
;
1584 TCGReg base
= addrlo
;
1588 /* For a 32-bit guest, the high 32 bits may contain garbage.
1589 We can do this with the ADDR32 prefix if we're not using
1590 a guest base, or when using segmentation. Otherwise we
1591 need to zero-extend manually. */
1592 if (guest_base
== 0 || guest_base_flags
) {
1593 seg
= guest_base_flags
;
1595 if (TCG_TARGET_REG_BITS
> TARGET_LONG_BITS
) {
1598 } else if (TCG_TARGET_REG_BITS
== 64) {
1599 if (TARGET_LONG_BITS
== 32) {
1600 tcg_out_ext32u(s
, TCG_REG_L0
, base
);
1603 if (offset
!= guest_base
) {
1604 tcg_out_movi(s
, TCG_TYPE_I64
, TCG_REG_L1
, guest_base
);
1610 tcg_out_qemu_ld_direct(s
, datalo
, datahi
,
1611 base
, index
, offset
, seg
, opc
);
1616 static void tcg_out_qemu_st_direct(TCGContext
*s
, TCGReg datalo
, TCGReg datahi
,
1617 TCGReg base
, intptr_t ofs
, int seg
,
1620 /* ??? Ideally we wouldn't need a scratch register. For user-only,
1621 we could perform the bswap twice to restore the original value
1622 instead of moving to the scratch. But as it is, the L constraint
1623 means that TCG_REG_L0 is definitely free here. */
1624 const TCGReg scratch
= TCG_REG_L0
;
1625 const TCGMemOp real_bswap
= memop
& MO_BSWAP
;
1626 TCGMemOp bswap
= real_bswap
;
1627 int movop
= OPC_MOVL_EvGv
;
1629 if (have_movbe
&& real_bswap
) {
1631 movop
= OPC_MOVBE_MyGy
;
1634 switch (memop
& MO_SIZE
) {
1636 /* In 32-bit mode, 8-bit stores can only happen from [abcd]x.
1637 Use the scratch register if necessary. */
1638 if (TCG_TARGET_REG_BITS
== 32 && datalo
>= 4) {
1639 tcg_out_mov(s
, TCG_TYPE_I32
, scratch
, datalo
);
1642 tcg_out_modrm_offset(s
, OPC_MOVB_EvGv
+ P_REXB_R
+ seg
,
1647 tcg_out_mov(s
, TCG_TYPE_I32
, scratch
, datalo
);
1648 tcg_out_rolw_8(s
, scratch
);
1651 tcg_out_modrm_offset(s
, movop
+ P_DATA16
+ seg
, datalo
, base
, ofs
);
1655 tcg_out_mov(s
, TCG_TYPE_I32
, scratch
, datalo
);
1656 tcg_out_bswap32(s
, scratch
);
1659 tcg_out_modrm_offset(s
, movop
+ seg
, datalo
, base
, ofs
);
1662 if (TCG_TARGET_REG_BITS
== 64) {
1664 tcg_out_mov(s
, TCG_TYPE_I64
, scratch
, datalo
);
1665 tcg_out_bswap64(s
, scratch
);
1668 tcg_out_modrm_offset(s
, movop
+ P_REXW
+ seg
, datalo
, base
, ofs
);
1670 tcg_out_mov(s
, TCG_TYPE_I32
, scratch
, datahi
);
1671 tcg_out_bswap32(s
, scratch
);
1672 tcg_out_modrm_offset(s
, OPC_MOVL_EvGv
+ seg
, scratch
, base
, ofs
);
1673 tcg_out_mov(s
, TCG_TYPE_I32
, scratch
, datalo
);
1674 tcg_out_bswap32(s
, scratch
);
1675 tcg_out_modrm_offset(s
, OPC_MOVL_EvGv
+ seg
, scratch
, base
, ofs
+4);
1682 tcg_out_modrm_offset(s
, movop
+ seg
, datalo
, base
, ofs
);
1683 tcg_out_modrm_offset(s
, movop
+ seg
, datahi
, base
, ofs
+4);
1691 static void tcg_out_qemu_st(TCGContext
*s
, const TCGArg
*args
, bool is64
)
1693 TCGReg datalo
, datahi
, addrlo
;
1694 TCGReg addrhi
__attribute__((unused
));
1697 #if defined(CONFIG_SOFTMMU)
1699 tcg_insn_unit
*label_ptr
[2];
1703 datahi
= (TCG_TARGET_REG_BITS
== 32 && is64
? *args
++ : 0);
1705 addrhi
= (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
? *args
++ : 0);
1707 opc
= get_memop(oi
);
1709 #if defined(CONFIG_SOFTMMU)
1710 mem_index
= get_mmuidx(oi
);
1712 tcg_out_tlb_load(s
, addrlo
, addrhi
, mem_index
, opc
,
1713 label_ptr
, offsetof(CPUTLBEntry
, addr_write
));
1716 tcg_out_qemu_st_direct(s
, datalo
, datahi
, TCG_REG_L1
, 0, 0, opc
);
1718 /* Record the current context of a store into ldst label */
1719 add_qemu_ldst_label(s
, false, oi
, datalo
, datahi
, addrlo
, addrhi
,
1720 s
->code_ptr
, label_ptr
);
1723 int32_t offset
= guest_base
;
1724 TCGReg base
= addrlo
;
1727 /* See comment in tcg_out_qemu_ld re zero-extension of addrlo. */
1728 if (guest_base
== 0 || guest_base_flags
) {
1729 seg
= guest_base_flags
;
1731 if (TCG_TARGET_REG_BITS
> TARGET_LONG_BITS
) {
1734 } else if (TCG_TARGET_REG_BITS
== 64) {
1735 /* ??? Note that we can't use the same SIB addressing scheme
1736 as for loads, since we require L0 free for bswap. */
1737 if (offset
!= guest_base
) {
1738 if (TARGET_LONG_BITS
== 32) {
1739 tcg_out_ext32u(s
, TCG_REG_L0
, base
);
1742 tcg_out_movi(s
, TCG_TYPE_I64
, TCG_REG_L1
, guest_base
);
1743 tgen_arithr(s
, ARITH_ADD
+ P_REXW
, TCG_REG_L1
, base
);
1746 } else if (TARGET_LONG_BITS
== 32) {
1747 tcg_out_ext32u(s
, TCG_REG_L1
, base
);
1752 tcg_out_qemu_st_direct(s
, datalo
, datahi
, base
, offset
, seg
, opc
);
1757 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
1758 const TCGArg
*args
, const int *const_args
)
1760 int c
, vexop
, rexw
= 0;
1762 #if TCG_TARGET_REG_BITS == 64
1763 # define OP_32_64(x) \
1764 case glue(glue(INDEX_op_, x), _i64): \
1765 rexw = P_REXW; /* FALLTHRU */ \
1766 case glue(glue(INDEX_op_, x), _i32)
1768 # define OP_32_64(x) \
1769 case glue(glue(INDEX_op_, x), _i32)
1773 case INDEX_op_exit_tb
:
1774 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_EAX
, args
[0]);
1775 tcg_out_jmp(s
, tb_ret_addr
);
1777 case INDEX_op_goto_tb
:
1778 if (s
->tb_jmp_offset
) {
1779 /* direct jump method */
1780 tcg_out8(s
, OPC_JMP_long
); /* jmp im */
1781 s
->tb_jmp_offset
[args
[0]] = tcg_current_code_size(s
);
1784 /* indirect jump method */
1785 tcg_out_modrm_offset(s
, OPC_GRP5
, EXT5_JMPN_Ev
, -1,
1786 (intptr_t)(s
->tb_next
+ args
[0]));
1788 s
->tb_next_offset
[args
[0]] = tcg_current_code_size(s
);
1791 tcg_out_jxx(s
, JCC_JMP
, arg_label(args
[0]), 0);
1794 /* Note that we can ignore REXW for the zero-extend to 64-bit. */
1795 tcg_out_modrm_offset(s
, OPC_MOVZBL
, args
[0], args
[1], args
[2]);
1798 tcg_out_modrm_offset(s
, OPC_MOVSBL
+ rexw
, args
[0], args
[1], args
[2]);
1801 /* Note that we can ignore REXW for the zero-extend to 64-bit. */
1802 tcg_out_modrm_offset(s
, OPC_MOVZWL
, args
[0], args
[1], args
[2]);
1805 tcg_out_modrm_offset(s
, OPC_MOVSWL
+ rexw
, args
[0], args
[1], args
[2]);
1807 #if TCG_TARGET_REG_BITS == 64
1808 case INDEX_op_ld32u_i64
:
1810 case INDEX_op_ld_i32
:
1811 tcg_out_ld(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1815 if (const_args
[0]) {
1816 tcg_out_modrm_offset(s
, OPC_MOVB_EvIz
,
1817 0, args
[1], args
[2]);
1818 tcg_out8(s
, args
[0]);
1820 tcg_out_modrm_offset(s
, OPC_MOVB_EvGv
| P_REXB_R
,
1821 args
[0], args
[1], args
[2]);
1825 if (const_args
[0]) {
1826 tcg_out_modrm_offset(s
, OPC_MOVL_EvIz
| P_DATA16
,
1827 0, args
[1], args
[2]);
1828 tcg_out16(s
, args
[0]);
1830 tcg_out_modrm_offset(s
, OPC_MOVL_EvGv
| P_DATA16
,
1831 args
[0], args
[1], args
[2]);
1834 #if TCG_TARGET_REG_BITS == 64
1835 case INDEX_op_st32_i64
:
1837 case INDEX_op_st_i32
:
1838 if (const_args
[0]) {
1839 tcg_out_modrm_offset(s
, OPC_MOVL_EvIz
, 0, args
[1], args
[2]);
1840 tcg_out32(s
, args
[0]);
1842 tcg_out_st(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1847 /* For 3-operand addition, use LEA. */
1848 if (args
[0] != args
[1]) {
1849 TCGArg a0
= args
[0], a1
= args
[1], a2
= args
[2], c3
= 0;
1851 if (const_args
[2]) {
1853 } else if (a0
== a2
) {
1854 /* Watch out for dest = src + dest, since we've removed
1855 the matching constraint on the add. */
1856 tgen_arithr(s
, ARITH_ADD
+ rexw
, a0
, a1
);
1860 tcg_out_modrm_sib_offset(s
, OPC_LEA
+ rexw
, a0
, a1
, a2
, 0, c3
);
1878 if (const_args
[2]) {
1879 tgen_arithi(s
, c
+ rexw
, args
[0], args
[2], 0);
1881 tgen_arithr(s
, c
+ rexw
, args
[0], args
[2]);
1886 if (const_args
[2]) {
1887 tcg_out_mov(s
, rexw
? TCG_TYPE_I64
: TCG_TYPE_I32
,
1889 tgen_arithi(s
, ARITH_AND
+ rexw
, args
[0], ~args
[2], 0);
1891 tcg_out_vex_modrm(s
, OPC_ANDN
+ rexw
, args
[0], args
[2], args
[1]);
1896 if (const_args
[2]) {
1899 if (val
== (int8_t)val
) {
1900 tcg_out_modrm(s
, OPC_IMUL_GvEvIb
+ rexw
, args
[0], args
[0]);
1903 tcg_out_modrm(s
, OPC_IMUL_GvEvIz
+ rexw
, args
[0], args
[0]);
1907 tcg_out_modrm(s
, OPC_IMUL_GvEv
+ rexw
, args
[0], args
[2]);
1912 tcg_out_modrm(s
, OPC_GRP3_Ev
+ rexw
, EXT3_IDIV
, args
[4]);
1915 tcg_out_modrm(s
, OPC_GRP3_Ev
+ rexw
, EXT3_DIV
, args
[4]);
1921 goto gen_shift_maybe_vex
;
1925 goto gen_shift_maybe_vex
;
1929 goto gen_shift_maybe_vex
;
1936 gen_shift_maybe_vex
:
1937 if (have_bmi2
&& !const_args
[2]) {
1938 tcg_out_vex_modrm(s
, vexop
+ rexw
, args
[0], args
[2], args
[1]);
1943 if (const_args
[2]) {
1944 tcg_out_shifti(s
, c
+ rexw
, args
[0], args
[2]);
1946 tcg_out_modrm(s
, OPC_SHIFT_cl
+ rexw
, c
, args
[0]);
1950 case INDEX_op_brcond_i32
:
1951 tcg_out_brcond32(s
, args
[2], args
[0], args
[1], const_args
[1],
1952 arg_label(args
[3]), 0);
1954 case INDEX_op_setcond_i32
:
1955 tcg_out_setcond32(s
, args
[3], args
[0], args
[1],
1956 args
[2], const_args
[2]);
1958 case INDEX_op_movcond_i32
:
1959 tcg_out_movcond32(s
, args
[5], args
[0], args
[1],
1960 args
[2], const_args
[2], args
[3]);
1964 tcg_out_rolw_8(s
, args
[0]);
1967 tcg_out_bswap32(s
, args
[0]);
1971 tcg_out_modrm(s
, OPC_GRP3_Ev
+ rexw
, EXT3_NEG
, args
[0]);
1974 tcg_out_modrm(s
, OPC_GRP3_Ev
+ rexw
, EXT3_NOT
, args
[0]);
1978 tcg_out_ext8s(s
, args
[0], args
[1], rexw
);
1981 tcg_out_ext16s(s
, args
[0], args
[1], rexw
);
1984 tcg_out_ext8u(s
, args
[0], args
[1]);
1987 tcg_out_ext16u(s
, args
[0], args
[1]);
1990 case INDEX_op_qemu_ld_i32
:
1991 tcg_out_qemu_ld(s
, args
, 0);
1993 case INDEX_op_qemu_ld_i64
:
1994 tcg_out_qemu_ld(s
, args
, 1);
1996 case INDEX_op_qemu_st_i32
:
1997 tcg_out_qemu_st(s
, args
, 0);
1999 case INDEX_op_qemu_st_i64
:
2000 tcg_out_qemu_st(s
, args
, 1);
2004 tcg_out_modrm(s
, OPC_GRP3_Ev
+ rexw
, EXT3_MUL
, args
[3]);
2007 tcg_out_modrm(s
, OPC_GRP3_Ev
+ rexw
, EXT3_IMUL
, args
[3]);
2010 if (const_args
[4]) {
2011 tgen_arithi(s
, ARITH_ADD
+ rexw
, args
[0], args
[4], 1);
2013 tgen_arithr(s
, ARITH_ADD
+ rexw
, args
[0], args
[4]);
2015 if (const_args
[5]) {
2016 tgen_arithi(s
, ARITH_ADC
+ rexw
, args
[1], args
[5], 1);
2018 tgen_arithr(s
, ARITH_ADC
+ rexw
, args
[1], args
[5]);
2022 if (const_args
[4]) {
2023 tgen_arithi(s
, ARITH_SUB
+ rexw
, args
[0], args
[4], 1);
2025 tgen_arithr(s
, ARITH_SUB
+ rexw
, args
[0], args
[4]);
2027 if (const_args
[5]) {
2028 tgen_arithi(s
, ARITH_SBB
+ rexw
, args
[1], args
[5], 1);
2030 tgen_arithr(s
, ARITH_SBB
+ rexw
, args
[1], args
[5]);
2034 #if TCG_TARGET_REG_BITS == 32
2035 case INDEX_op_brcond2_i32
:
2036 tcg_out_brcond2(s
, args
, const_args
, 0);
2038 case INDEX_op_setcond2_i32
:
2039 tcg_out_setcond2(s
, args
, const_args
);
2041 #else /* TCG_TARGET_REG_BITS == 64 */
2042 case INDEX_op_ld32s_i64
:
2043 tcg_out_modrm_offset(s
, OPC_MOVSLQ
, args
[0], args
[1], args
[2]);
2045 case INDEX_op_ld_i64
:
2046 tcg_out_ld(s
, TCG_TYPE_I64
, args
[0], args
[1], args
[2]);
2048 case INDEX_op_st_i64
:
2049 if (const_args
[0]) {
2050 tcg_out_modrm_offset(s
, OPC_MOVL_EvIz
| P_REXW
,
2051 0, args
[1], args
[2]);
2052 tcg_out32(s
, args
[0]);
2054 tcg_out_st(s
, TCG_TYPE_I64
, args
[0], args
[1], args
[2]);
2058 case INDEX_op_brcond_i64
:
2059 tcg_out_brcond64(s
, args
[2], args
[0], args
[1], const_args
[1],
2060 arg_label(args
[3]), 0);
2062 case INDEX_op_setcond_i64
:
2063 tcg_out_setcond64(s
, args
[3], args
[0], args
[1],
2064 args
[2], const_args
[2]);
2066 case INDEX_op_movcond_i64
:
2067 tcg_out_movcond64(s
, args
[5], args
[0], args
[1],
2068 args
[2], const_args
[2], args
[3]);
2071 case INDEX_op_bswap64_i64
:
2072 tcg_out_bswap64(s
, args
[0]);
2074 case INDEX_op_extu_i32_i64
:
2075 case INDEX_op_ext32u_i64
:
2076 tcg_out_ext32u(s
, args
[0], args
[1]);
2078 case INDEX_op_ext_i32_i64
:
2079 case INDEX_op_ext32s_i64
:
2080 tcg_out_ext32s(s
, args
[0], args
[1]);
2085 if (args
[3] == 0 && args
[4] == 8) {
2086 /* load bits 0..7 */
2087 tcg_out_modrm(s
, OPC_MOVB_EvGv
| P_REXB_R
| P_REXB_RM
,
2089 } else if (args
[3] == 8 && args
[4] == 8) {
2090 /* load bits 8..15 */
2091 tcg_out_modrm(s
, OPC_MOVB_EvGv
, args
[2], args
[0] + 4);
2092 } else if (args
[3] == 0 && args
[4] == 16) {
2093 /* load bits 0..15 */
2094 tcg_out_modrm(s
, OPC_MOVL_EvGv
| P_DATA16
, args
[2], args
[0]);
2100 case INDEX_op_mov_i32
: /* Always emitted via tcg_out_mov. */
2101 case INDEX_op_mov_i64
:
2102 case INDEX_op_movi_i32
: /* Always emitted via tcg_out_movi. */
2103 case INDEX_op_movi_i64
:
2104 case INDEX_op_call
: /* Always emitted via tcg_out_call. */
2112 static const TCGTargetOpDef x86_op_defs
[] = {
2113 { INDEX_op_exit_tb
, { } },
2114 { INDEX_op_goto_tb
, { } },
2115 { INDEX_op_br
, { } },
2116 { INDEX_op_ld8u_i32
, { "r", "r" } },
2117 { INDEX_op_ld8s_i32
, { "r", "r" } },
2118 { INDEX_op_ld16u_i32
, { "r", "r" } },
2119 { INDEX_op_ld16s_i32
, { "r", "r" } },
2120 { INDEX_op_ld_i32
, { "r", "r" } },
2121 { INDEX_op_st8_i32
, { "qi", "r" } },
2122 { INDEX_op_st16_i32
, { "ri", "r" } },
2123 { INDEX_op_st_i32
, { "ri", "r" } },
2125 { INDEX_op_add_i32
, { "r", "r", "ri" } },
2126 { INDEX_op_sub_i32
, { "r", "0", "ri" } },
2127 { INDEX_op_mul_i32
, { "r", "0", "ri" } },
2128 { INDEX_op_div2_i32
, { "a", "d", "0", "1", "r" } },
2129 { INDEX_op_divu2_i32
, { "a", "d", "0", "1", "r" } },
2130 { INDEX_op_and_i32
, { "r", "0", "ri" } },
2131 { INDEX_op_or_i32
, { "r", "0", "ri" } },
2132 { INDEX_op_xor_i32
, { "r", "0", "ri" } },
2133 { INDEX_op_andc_i32
, { "r", "r", "ri" } },
2135 { INDEX_op_shl_i32
, { "r", "0", "Ci" } },
2136 { INDEX_op_shr_i32
, { "r", "0", "Ci" } },
2137 { INDEX_op_sar_i32
, { "r", "0", "Ci" } },
2138 { INDEX_op_rotl_i32
, { "r", "0", "ci" } },
2139 { INDEX_op_rotr_i32
, { "r", "0", "ci" } },
2141 { INDEX_op_brcond_i32
, { "r", "ri" } },
2143 { INDEX_op_bswap16_i32
, { "r", "0" } },
2144 { INDEX_op_bswap32_i32
, { "r", "0" } },
2146 { INDEX_op_neg_i32
, { "r", "0" } },
2148 { INDEX_op_not_i32
, { "r", "0" } },
2150 { INDEX_op_ext8s_i32
, { "r", "q" } },
2151 { INDEX_op_ext16s_i32
, { "r", "r" } },
2152 { INDEX_op_ext8u_i32
, { "r", "q" } },
2153 { INDEX_op_ext16u_i32
, { "r", "r" } },
2155 { INDEX_op_setcond_i32
, { "q", "r", "ri" } },
2157 { INDEX_op_deposit_i32
, { "Q", "0", "Q" } },
2158 { INDEX_op_movcond_i32
, { "r", "r", "ri", "r", "0" } },
2160 { INDEX_op_mulu2_i32
, { "a", "d", "a", "r" } },
2161 { INDEX_op_muls2_i32
, { "a", "d", "a", "r" } },
2162 { INDEX_op_add2_i32
, { "r", "r", "0", "1", "ri", "ri" } },
2163 { INDEX_op_sub2_i32
, { "r", "r", "0", "1", "ri", "ri" } },
2165 #if TCG_TARGET_REG_BITS == 32
2166 { INDEX_op_brcond2_i32
, { "r", "r", "ri", "ri" } },
2167 { INDEX_op_setcond2_i32
, { "r", "r", "r", "ri", "ri" } },
2169 { INDEX_op_ld8u_i64
, { "r", "r" } },
2170 { INDEX_op_ld8s_i64
, { "r", "r" } },
2171 { INDEX_op_ld16u_i64
, { "r", "r" } },
2172 { INDEX_op_ld16s_i64
, { "r", "r" } },
2173 { INDEX_op_ld32u_i64
, { "r", "r" } },
2174 { INDEX_op_ld32s_i64
, { "r", "r" } },
2175 { INDEX_op_ld_i64
, { "r", "r" } },
2176 { INDEX_op_st8_i64
, { "ri", "r" } },
2177 { INDEX_op_st16_i64
, { "ri", "r" } },
2178 { INDEX_op_st32_i64
, { "ri", "r" } },
2179 { INDEX_op_st_i64
, { "re", "r" } },
2181 { INDEX_op_add_i64
, { "r", "r", "re" } },
2182 { INDEX_op_mul_i64
, { "r", "0", "re" } },
2183 { INDEX_op_div2_i64
, { "a", "d", "0", "1", "r" } },
2184 { INDEX_op_divu2_i64
, { "a", "d", "0", "1", "r" } },
2185 { INDEX_op_sub_i64
, { "r", "0", "re" } },
2186 { INDEX_op_and_i64
, { "r", "0", "reZ" } },
2187 { INDEX_op_or_i64
, { "r", "0", "re" } },
2188 { INDEX_op_xor_i64
, { "r", "0", "re" } },
2189 { INDEX_op_andc_i64
, { "r", "r", "rI" } },
2191 { INDEX_op_shl_i64
, { "r", "0", "Ci" } },
2192 { INDEX_op_shr_i64
, { "r", "0", "Ci" } },
2193 { INDEX_op_sar_i64
, { "r", "0", "Ci" } },
2194 { INDEX_op_rotl_i64
, { "r", "0", "ci" } },
2195 { INDEX_op_rotr_i64
, { "r", "0", "ci" } },
2197 { INDEX_op_brcond_i64
, { "r", "re" } },
2198 { INDEX_op_setcond_i64
, { "r", "r", "re" } },
2200 { INDEX_op_bswap16_i64
, { "r", "0" } },
2201 { INDEX_op_bswap32_i64
, { "r", "0" } },
2202 { INDEX_op_bswap64_i64
, { "r", "0" } },
2203 { INDEX_op_neg_i64
, { "r", "0" } },
2204 { INDEX_op_not_i64
, { "r", "0" } },
2206 { INDEX_op_ext8s_i64
, { "r", "r" } },
2207 { INDEX_op_ext16s_i64
, { "r", "r" } },
2208 { INDEX_op_ext32s_i64
, { "r", "r" } },
2209 { INDEX_op_ext8u_i64
, { "r", "r" } },
2210 { INDEX_op_ext16u_i64
, { "r", "r" } },
2211 { INDEX_op_ext32u_i64
, { "r", "r" } },
2213 { INDEX_op_ext_i32_i64
, { "r", "r" } },
2214 { INDEX_op_extu_i32_i64
, { "r", "r" } },
2216 { INDEX_op_deposit_i64
, { "Q", "0", "Q" } },
2217 { INDEX_op_movcond_i64
, { "r", "r", "re", "r", "0" } },
2219 { INDEX_op_mulu2_i64
, { "a", "d", "a", "r" } },
2220 { INDEX_op_muls2_i64
, { "a", "d", "a", "r" } },
2221 { INDEX_op_add2_i64
, { "r", "r", "0", "1", "re", "re" } },
2222 { INDEX_op_sub2_i64
, { "r", "r", "0", "1", "re", "re" } },
2225 #if TCG_TARGET_REG_BITS == 64
2226 { INDEX_op_qemu_ld_i32
, { "r", "L" } },
2227 { INDEX_op_qemu_st_i32
, { "L", "L" } },
2228 { INDEX_op_qemu_ld_i64
, { "r", "L" } },
2229 { INDEX_op_qemu_st_i64
, { "L", "L" } },
2230 #elif TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
2231 { INDEX_op_qemu_ld_i32
, { "r", "L" } },
2232 { INDEX_op_qemu_st_i32
, { "L", "L" } },
2233 { INDEX_op_qemu_ld_i64
, { "r", "r", "L" } },
2234 { INDEX_op_qemu_st_i64
, { "L", "L", "L" } },
2236 { INDEX_op_qemu_ld_i32
, { "r", "L", "L" } },
2237 { INDEX_op_qemu_st_i32
, { "L", "L", "L" } },
2238 { INDEX_op_qemu_ld_i64
, { "r", "r", "L", "L" } },
2239 { INDEX_op_qemu_st_i64
, { "L", "L", "L", "L" } },
2244 static int tcg_target_callee_save_regs
[] = {
2245 #if TCG_TARGET_REG_BITS == 64
2254 TCG_REG_R14
, /* Currently used for the global env. */
2257 TCG_REG_EBP
, /* Currently used for the global env. */
2264 /* Compute frame size via macros, to share between tcg_target_qemu_prologue
2265 and tcg_register_jit. */
2268 ((1 + ARRAY_SIZE(tcg_target_callee_save_regs)) \
2269 * (TCG_TARGET_REG_BITS / 8))
2271 #define FRAME_SIZE \
2273 + TCG_STATIC_CALL_ARGS_SIZE \
2274 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
2275 + TCG_TARGET_STACK_ALIGN - 1) \
2276 & ~(TCG_TARGET_STACK_ALIGN - 1))
2278 /* Generate global QEMU prologue and epilogue code */
2279 static void tcg_target_qemu_prologue(TCGContext
*s
)
2281 int i
, stack_addend
;
2285 /* Reserve some stack space, also for TCG temps. */
2286 stack_addend
= FRAME_SIZE
- PUSH_SIZE
;
2287 tcg_set_frame(s
, TCG_REG_CALL_STACK
, TCG_STATIC_CALL_ARGS_SIZE
,
2288 CPU_TEMP_BUF_NLONGS
* sizeof(long));
2290 /* Save all callee saved registers. */
2291 for (i
= 0; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); i
++) {
2292 tcg_out_push(s
, tcg_target_callee_save_regs
[i
]);
2295 #if TCG_TARGET_REG_BITS == 32
2296 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_AREG0
, TCG_REG_ESP
,
2297 (ARRAY_SIZE(tcg_target_callee_save_regs
) + 1) * 4);
2298 tcg_out_addi(s
, TCG_REG_ESP
, -stack_addend
);
2300 tcg_out_modrm_offset(s
, OPC_GRP5
, EXT5_JMPN_Ev
, TCG_REG_ESP
,
2301 (ARRAY_SIZE(tcg_target_callee_save_regs
) + 2) * 4
2304 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_AREG0
, tcg_target_call_iarg_regs
[0]);
2305 tcg_out_addi(s
, TCG_REG_ESP
, -stack_addend
);
2307 tcg_out_modrm(s
, OPC_GRP5
, EXT5_JMPN_Ev
, tcg_target_call_iarg_regs
[1]);
2311 tb_ret_addr
= s
->code_ptr
;
2313 tcg_out_addi(s
, TCG_REG_CALL_STACK
, stack_addend
);
2315 for (i
= ARRAY_SIZE(tcg_target_callee_save_regs
) - 1; i
>= 0; i
--) {
2316 tcg_out_pop(s
, tcg_target_callee_save_regs
[i
]);
2318 tcg_out_opc(s
, OPC_RET
, 0, 0, 0);
2320 #if !defined(CONFIG_SOFTMMU)
2321 /* Try to set up a segment register to point to guest_base. */
2323 setup_guest_base_seg();
2328 static void tcg_target_init(TCGContext
*s
)
2330 #ifdef CONFIG_CPUID_H
2331 unsigned a
, b
, c
, d
;
2332 int max
= __get_cpuid_max(0, 0);
2335 __cpuid(1, a
, b
, c
, d
);
2337 /* For 32-bit, 99% certainty that we're running on hardware that
2338 supports cmov, but we still need to check. In case cmov is not
2339 available, we'll use a small forward branch. */
2340 have_cmov
= (d
& bit_CMOV
) != 0;
2343 /* MOVBE is only available on Intel Atom and Haswell CPUs, so we
2344 need to probe for it. */
2345 have_movbe
= (c
& bit_MOVBE
) != 0;
2350 /* BMI1 is available on AMD Piledriver and Intel Haswell CPUs. */
2351 __cpuid_count(7, 0, a
, b
, c
, d
);
2353 have_bmi1
= (b
& bit_BMI
) != 0;
2356 have_bmi2
= (b
& bit_BMI2
) != 0;
2361 if (TCG_TARGET_REG_BITS
== 64) {
2362 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I32
], 0, 0xffff);
2363 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I64
], 0, 0xffff);
2365 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I32
], 0, 0xff);
2368 tcg_regset_clear(tcg_target_call_clobber_regs
);
2369 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_EAX
);
2370 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_EDX
);
2371 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_ECX
);
2372 if (TCG_TARGET_REG_BITS
== 64) {
2373 #if !defined(_WIN64)
2374 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_RDI
);
2375 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_RSI
);
2377 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R8
);
2378 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R9
);
2379 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R10
);
2380 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R11
);
2383 tcg_regset_clear(s
->reserved_regs
);
2384 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_CALL_STACK
);
2386 tcg_add_target_add_op_defs(x86_op_defs
);
2391 uint8_t fde_def_cfa
[4];
2392 uint8_t fde_reg_ofs
[14];
2395 /* We're expecting a 2 byte uleb128 encoded value. */
2396 QEMU_BUILD_BUG_ON(FRAME_SIZE
>= (1 << 14));
2398 #if !defined(__ELF__)
2399 /* Host machine without ELF. */
2400 #elif TCG_TARGET_REG_BITS == 64
2401 #define ELF_HOST_MACHINE EM_X86_64
2402 static const DebugFrame debug_frame
= {
2403 .h
.cie
.len
= sizeof(DebugFrameCIE
)-4, /* length after .len member */
2406 .h
.cie
.code_align
= 1,
2407 .h
.cie
.data_align
= 0x78, /* sleb128 -8 */
2408 .h
.cie
.return_column
= 16,
2410 /* Total FDE size does not include the "len" member. */
2411 .h
.fde
.len
= sizeof(DebugFrame
) - offsetof(DebugFrame
, h
.fde
.cie_offset
),
2414 12, 7, /* DW_CFA_def_cfa %rsp, ... */
2415 (FRAME_SIZE
& 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2419 0x90, 1, /* DW_CFA_offset, %rip, -8 */
2420 /* The following ordering must match tcg_target_callee_save_regs. */
2421 0x86, 2, /* DW_CFA_offset, %rbp, -16 */
2422 0x83, 3, /* DW_CFA_offset, %rbx, -24 */
2423 0x8c, 4, /* DW_CFA_offset, %r12, -32 */
2424 0x8d, 5, /* DW_CFA_offset, %r13, -40 */
2425 0x8e, 6, /* DW_CFA_offset, %r14, -48 */
2426 0x8f, 7, /* DW_CFA_offset, %r15, -56 */
2430 #define ELF_HOST_MACHINE EM_386
2431 static const DebugFrame debug_frame
= {
2432 .h
.cie
.len
= sizeof(DebugFrameCIE
)-4, /* length after .len member */
2435 .h
.cie
.code_align
= 1,
2436 .h
.cie
.data_align
= 0x7c, /* sleb128 -4 */
2437 .h
.cie
.return_column
= 8,
2439 /* Total FDE size does not include the "len" member. */
2440 .h
.fde
.len
= sizeof(DebugFrame
) - offsetof(DebugFrame
, h
.fde
.cie_offset
),
2443 12, 4, /* DW_CFA_def_cfa %esp, ... */
2444 (FRAME_SIZE
& 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2448 0x88, 1, /* DW_CFA_offset, %eip, -4 */
2449 /* The following ordering must match tcg_target_callee_save_regs. */
2450 0x85, 2, /* DW_CFA_offset, %ebp, -8 */
2451 0x83, 3, /* DW_CFA_offset, %ebx, -12 */
2452 0x86, 4, /* DW_CFA_offset, %esi, -16 */
2453 0x87, 5, /* DW_CFA_offset, %edi, -20 */
2458 #if defined(ELF_HOST_MACHINE)
2459 void tcg_register_jit(void *buf
, size_t buf_size
)
2461 tcg_register_jit_int(buf
, buf_size
, &debug_frame
, sizeof(debug_frame
));