4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
26 #include "qemu/host-utils.h"
28 #include "disas/disas.h"
30 #include "exec/cpu_ldst.h"
32 #include "exec/helper-proto.h"
33 #include "exec/helper-gen.h"
35 #include "trace-tcg.h"
38 #define PREFIX_REPZ 0x01
39 #define PREFIX_REPNZ 0x02
40 #define PREFIX_LOCK 0x04
41 #define PREFIX_DATA 0x08
42 #define PREFIX_ADR 0x10
43 #define PREFIX_VEX 0x20
46 #define CODE64(s) ((s)->code64)
47 #define REX_X(s) ((s)->rex_x)
48 #define REX_B(s) ((s)->rex_b)
63 //#define MACRO_TEST 1
65 /* global register indexes */
66 static TCGv_ptr cpu_env
;
68 static TCGv cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
, cpu_cc_srcT
;
69 static TCGv_i32 cpu_cc_op
;
70 static TCGv cpu_regs
[CPU_NB_REGS
];
73 /* local register indexes (only used inside old micro ops) */
74 static TCGv cpu_tmp0
, cpu_tmp4
;
75 static TCGv_ptr cpu_ptr0
, cpu_ptr1
;
76 static TCGv_i32 cpu_tmp2_i32
, cpu_tmp3_i32
;
77 static TCGv_i64 cpu_tmp1_i64
;
79 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
81 #include "exec/gen-icount.h"
84 static int x86_64_hregs
;
87 typedef struct DisasContext
{
88 /* current insn context */
89 int override
; /* -1 if no override */
93 target_ulong pc
; /* pc = eip + cs_base */
94 int is_jmp
; /* 1 = means jump (stop translation), 2 means CPU
95 static state change (stop translation) */
96 /* current block context */
97 target_ulong cs_base
; /* base of CS segment */
98 int pe
; /* protected mode */
99 int code32
; /* 32 bit code segment */
101 int lma
; /* long mode active */
102 int code64
; /* 64 bit code segment */
105 int vex_l
; /* vex vector length */
106 int vex_v
; /* vex vvvv register, without 1's compliment. */
107 int ss32
; /* 32 bit stack segment */
108 CCOp cc_op
; /* current CC operation */
110 int addseg
; /* non zero if either DS/ES/SS have a non zero base */
111 int f_st
; /* currently unused */
112 int vm86
; /* vm86 mode */
115 int tf
; /* TF cpu flag */
116 int singlestep_enabled
; /* "hardware" single step enabled */
117 int jmp_opt
; /* use direct block chaining for direct jumps */
118 int repz_opt
; /* optimize jumps within repz instructions */
119 int mem_index
; /* select memory access functions */
120 uint64_t flags
; /* all execution flags */
121 struct TranslationBlock
*tb
;
122 int popl_esp_hack
; /* for correct popl with esp base handling */
123 int rip_offset
; /* only used in x86_64, but left for simplicity */
125 int cpuid_ext_features
;
126 int cpuid_ext2_features
;
127 int cpuid_ext3_features
;
128 int cpuid_7_0_ebx_features
;
131 static void gen_eob(DisasContext
*s
);
132 static void gen_jmp(DisasContext
*s
, target_ulong eip
);
133 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
);
134 static void gen_op(DisasContext
*s1
, int op
, TCGMemOp ot
, int d
);
136 /* i386 arith/logic operations */
156 OP_SHL1
, /* undocumented */
172 /* I386 int registers */
173 OR_EAX
, /* MUST be even numbered */
182 OR_TMP0
= 16, /* temporary operand register */
184 OR_A0
, /* temporary register used when doing address evaluation */
194 /* Bit set if the global variable is live after setting CC_OP to X. */
195 static const uint8_t cc_op_live
[CC_OP_NB
] = {
196 [CC_OP_DYNAMIC
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
197 [CC_OP_EFLAGS
] = USES_CC_SRC
,
198 [CC_OP_MULB
... CC_OP_MULQ
] = USES_CC_DST
| USES_CC_SRC
,
199 [CC_OP_ADDB
... CC_OP_ADDQ
] = USES_CC_DST
| USES_CC_SRC
,
200 [CC_OP_ADCB
... CC_OP_ADCQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
201 [CC_OP_SUBB
... CC_OP_SUBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRCT
,
202 [CC_OP_SBBB
... CC_OP_SBBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
203 [CC_OP_LOGICB
... CC_OP_LOGICQ
] = USES_CC_DST
,
204 [CC_OP_INCB
... CC_OP_INCQ
] = USES_CC_DST
| USES_CC_SRC
,
205 [CC_OP_DECB
... CC_OP_DECQ
] = USES_CC_DST
| USES_CC_SRC
,
206 [CC_OP_SHLB
... CC_OP_SHLQ
] = USES_CC_DST
| USES_CC_SRC
,
207 [CC_OP_SARB
... CC_OP_SARQ
] = USES_CC_DST
| USES_CC_SRC
,
208 [CC_OP_BMILGB
... CC_OP_BMILGQ
] = USES_CC_DST
| USES_CC_SRC
,
209 [CC_OP_ADCX
] = USES_CC_DST
| USES_CC_SRC
,
210 [CC_OP_ADOX
] = USES_CC_SRC
| USES_CC_SRC2
,
211 [CC_OP_ADCOX
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
215 static void set_cc_op(DisasContext
*s
, CCOp op
)
219 if (s
->cc_op
== op
) {
223 /* Discard CC computation that will no longer be used. */
224 dead
= cc_op_live
[s
->cc_op
] & ~cc_op_live
[op
];
225 if (dead
& USES_CC_DST
) {
226 tcg_gen_discard_tl(cpu_cc_dst
);
228 if (dead
& USES_CC_SRC
) {
229 tcg_gen_discard_tl(cpu_cc_src
);
231 if (dead
& USES_CC_SRC2
) {
232 tcg_gen_discard_tl(cpu_cc_src2
);
234 if (dead
& USES_CC_SRCT
) {
235 tcg_gen_discard_tl(cpu_cc_srcT
);
238 if (op
== CC_OP_DYNAMIC
) {
239 /* The DYNAMIC setting is translator only, and should never be
240 stored. Thus we always consider it clean. */
241 s
->cc_op_dirty
= false;
243 /* Discard any computed CC_OP value (see shifts). */
244 if (s
->cc_op
== CC_OP_DYNAMIC
) {
245 tcg_gen_discard_i32(cpu_cc_op
);
247 s
->cc_op_dirty
= true;
252 static void gen_update_cc_op(DisasContext
*s
)
254 if (s
->cc_op_dirty
) {
255 tcg_gen_movi_i32(cpu_cc_op
, s
->cc_op
);
256 s
->cc_op_dirty
= false;
262 #define NB_OP_SIZES 4
264 #else /* !TARGET_X86_64 */
266 #define NB_OP_SIZES 3
268 #endif /* !TARGET_X86_64 */
270 #if defined(HOST_WORDS_BIGENDIAN)
271 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
272 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
273 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
274 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
275 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
277 #define REG_B_OFFSET 0
278 #define REG_H_OFFSET 1
279 #define REG_W_OFFSET 0
280 #define REG_L_OFFSET 0
281 #define REG_LH_OFFSET 4
284 /* In instruction encodings for byte register accesses the
285 * register number usually indicates "low 8 bits of register N";
286 * however there are some special cases where N 4..7 indicates
287 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
288 * true for this special case, false otherwise.
290 static inline bool byte_reg_is_xH(int reg
)
296 if (reg
>= 8 || x86_64_hregs
) {
303 /* Select the size of a push/pop operation. */
304 static inline TCGMemOp
mo_pushpop(DisasContext
*s
, TCGMemOp ot
)
307 return ot
== MO_16
? MO_16
: MO_64
;
313 /* Select only size 64 else 32. Used for SSE operand sizes. */
314 static inline TCGMemOp
mo_64_32(TCGMemOp ot
)
317 return ot
== MO_64
? MO_64
: MO_32
;
323 /* Select size 8 if lsb of B is clear, else OT. Used for decoding
324 byte vs word opcodes. */
325 static inline TCGMemOp
mo_b_d(int b
, TCGMemOp ot
)
327 return b
& 1 ? ot
: MO_8
;
330 /* Select size 8 if lsb of B is clear, else OT capped at 32.
331 Used for decoding operand size of port opcodes. */
332 static inline TCGMemOp
mo_b_d32(int b
, TCGMemOp ot
)
334 return b
& 1 ? (ot
== MO_16
? MO_16
: MO_32
) : MO_8
;
337 static void gen_op_mov_reg_v(TCGMemOp ot
, int reg
, TCGv t0
)
341 if (!byte_reg_is_xH(reg
)) {
342 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 8);
344 tcg_gen_deposit_tl(cpu_regs
[reg
- 4], cpu_regs
[reg
- 4], t0
, 8, 8);
348 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 16);
351 /* For x86_64, this sets the higher half of register to zero.
352 For i386, this is equivalent to a mov. */
353 tcg_gen_ext32u_tl(cpu_regs
[reg
], t0
);
357 tcg_gen_mov_tl(cpu_regs
[reg
], t0
);
365 static inline void gen_op_mov_v_reg(TCGMemOp ot
, TCGv t0
, int reg
)
367 if (ot
== MO_8
&& byte_reg_is_xH(reg
)) {
368 tcg_gen_shri_tl(t0
, cpu_regs
[reg
- 4], 8);
369 tcg_gen_ext8u_tl(t0
, t0
);
371 tcg_gen_mov_tl(t0
, cpu_regs
[reg
]);
375 static inline void gen_op_movl_A0_reg(int reg
)
377 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[reg
]);
380 static inline void gen_op_addl_A0_im(int32_t val
)
382 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
384 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
389 static inline void gen_op_addq_A0_im(int64_t val
)
391 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
395 static void gen_add_A0_im(DisasContext
*s
, int val
)
399 gen_op_addq_A0_im(val
);
402 gen_op_addl_A0_im(val
);
405 static inline void gen_op_jmp_v(TCGv dest
)
407 tcg_gen_st_tl(dest
, cpu_env
, offsetof(CPUX86State
, eip
));
410 static inline void gen_op_add_reg_im(TCGMemOp size
, int reg
, int32_t val
)
412 tcg_gen_addi_tl(cpu_tmp0
, cpu_regs
[reg
], val
);
413 gen_op_mov_reg_v(size
, reg
, cpu_tmp0
);
416 static inline void gen_op_add_reg_T0(TCGMemOp size
, int reg
)
418 tcg_gen_add_tl(cpu_tmp0
, cpu_regs
[reg
], cpu_T
[0]);
419 gen_op_mov_reg_v(size
, reg
, cpu_tmp0
);
422 static inline void gen_op_addl_A0_reg_sN(int shift
, int reg
)
424 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[reg
]);
426 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, shift
);
427 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
428 /* For x86_64, this sets the higher half of register to zero.
429 For i386, this is equivalent to a nop. */
430 tcg_gen_ext32u_tl(cpu_A0
, cpu_A0
);
433 static inline void gen_op_movl_A0_seg(int reg
)
435 tcg_gen_ld32u_tl(cpu_A0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
) + REG_L_OFFSET
);
438 static inline void gen_op_addl_A0_seg(DisasContext
*s
, int reg
)
440 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
443 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
444 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
446 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
447 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
450 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
455 static inline void gen_op_movq_A0_seg(int reg
)
457 tcg_gen_ld_tl(cpu_A0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
460 static inline void gen_op_addq_A0_seg(int reg
)
462 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
463 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
466 static inline void gen_op_movq_A0_reg(int reg
)
468 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[reg
]);
471 static inline void gen_op_addq_A0_reg_sN(int shift
, int reg
)
473 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[reg
]);
475 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, shift
);
476 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
480 static inline void gen_op_ld_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
482 tcg_gen_qemu_ld_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
485 static inline void gen_op_st_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
487 tcg_gen_qemu_st_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
490 static inline void gen_op_st_rm_T0_A0(DisasContext
*s
, int idx
, int d
)
493 gen_op_st_v(s
, idx
, cpu_T
[0], cpu_A0
);
495 gen_op_mov_reg_v(idx
, d
, cpu_T
[0]);
499 static inline void gen_jmp_im(target_ulong pc
)
501 tcg_gen_movi_tl(cpu_tmp0
, pc
);
502 gen_op_jmp_v(cpu_tmp0
);
505 static inline void gen_string_movl_A0_ESI(DisasContext
*s
)
509 override
= s
->override
;
514 gen_op_movq_A0_seg(override
);
515 gen_op_addq_A0_reg_sN(0, R_ESI
);
517 gen_op_movq_A0_reg(R_ESI
);
523 if (s
->addseg
&& override
< 0)
526 gen_op_movl_A0_seg(override
);
527 gen_op_addl_A0_reg_sN(0, R_ESI
);
529 gen_op_movl_A0_reg(R_ESI
);
533 /* 16 address, always override */
536 tcg_gen_ext16u_tl(cpu_A0
, cpu_regs
[R_ESI
]);
537 gen_op_addl_A0_seg(s
, override
);
544 static inline void gen_string_movl_A0_EDI(DisasContext
*s
)
549 gen_op_movq_A0_reg(R_EDI
);
554 gen_op_movl_A0_seg(R_ES
);
555 gen_op_addl_A0_reg_sN(0, R_EDI
);
557 gen_op_movl_A0_reg(R_EDI
);
561 tcg_gen_ext16u_tl(cpu_A0
, cpu_regs
[R_EDI
]);
562 gen_op_addl_A0_seg(s
, R_ES
);
569 static inline void gen_op_movl_T0_Dshift(TCGMemOp ot
)
571 tcg_gen_ld32s_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, df
));
572 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], ot
);
575 static TCGv
gen_ext_tl(TCGv dst
, TCGv src
, TCGMemOp size
, bool sign
)
580 tcg_gen_ext8s_tl(dst
, src
);
582 tcg_gen_ext8u_tl(dst
, src
);
587 tcg_gen_ext16s_tl(dst
, src
);
589 tcg_gen_ext16u_tl(dst
, src
);
595 tcg_gen_ext32s_tl(dst
, src
);
597 tcg_gen_ext32u_tl(dst
, src
);
606 static void gen_extu(TCGMemOp ot
, TCGv reg
)
608 gen_ext_tl(reg
, reg
, ot
, false);
611 static void gen_exts(TCGMemOp ot
, TCGv reg
)
613 gen_ext_tl(reg
, reg
, ot
, true);
616 static inline void gen_op_jnz_ecx(TCGMemOp size
, TCGLabel
*label1
)
618 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
619 gen_extu(size
, cpu_tmp0
);
620 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_tmp0
, 0, label1
);
623 static inline void gen_op_jz_ecx(TCGMemOp size
, TCGLabel
*label1
)
625 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
626 gen_extu(size
, cpu_tmp0
);
627 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
630 static void gen_helper_in_func(TCGMemOp ot
, TCGv v
, TCGv_i32 n
)
634 gen_helper_inb(v
, cpu_env
, n
);
637 gen_helper_inw(v
, cpu_env
, n
);
640 gen_helper_inl(v
, cpu_env
, n
);
647 static void gen_helper_out_func(TCGMemOp ot
, TCGv_i32 v
, TCGv_i32 n
)
651 gen_helper_outb(cpu_env
, v
, n
);
654 gen_helper_outw(cpu_env
, v
, n
);
657 gen_helper_outl(cpu_env
, v
, n
);
664 static void gen_check_io(DisasContext
*s
, TCGMemOp ot
, target_ulong cur_eip
,
668 target_ulong next_eip
;
671 if (s
->pe
&& (s
->cpl
> s
->iopl
|| s
->vm86
)) {
675 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
678 gen_helper_check_iob(cpu_env
, cpu_tmp2_i32
);
681 gen_helper_check_iow(cpu_env
, cpu_tmp2_i32
);
684 gen_helper_check_iol(cpu_env
, cpu_tmp2_i32
);
690 if(s
->flags
& HF_SVMI_MASK
) {
695 svm_flags
|= (1 << (4 + ot
));
696 next_eip
= s
->pc
- s
->cs_base
;
697 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
698 gen_helper_svm_check_io(cpu_env
, cpu_tmp2_i32
,
699 tcg_const_i32(svm_flags
),
700 tcg_const_i32(next_eip
- cur_eip
));
704 static inline void gen_movs(DisasContext
*s
, TCGMemOp ot
)
706 gen_string_movl_A0_ESI(s
);
707 gen_op_ld_v(s
, ot
, cpu_T
[0], cpu_A0
);
708 gen_string_movl_A0_EDI(s
);
709 gen_op_st_v(s
, ot
, cpu_T
[0], cpu_A0
);
710 gen_op_movl_T0_Dshift(ot
);
711 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
712 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
715 static void gen_op_update1_cc(void)
717 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
720 static void gen_op_update2_cc(void)
722 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
723 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
726 static void gen_op_update3_cc(TCGv reg
)
728 tcg_gen_mov_tl(cpu_cc_src2
, reg
);
729 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
730 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
733 static inline void gen_op_testl_T0_T1_cc(void)
735 tcg_gen_and_tl(cpu_cc_dst
, cpu_T
[0], cpu_T
[1]);
738 static void gen_op_update_neg_cc(void)
740 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
741 tcg_gen_neg_tl(cpu_cc_src
, cpu_T
[0]);
742 tcg_gen_movi_tl(cpu_cc_srcT
, 0);
745 /* compute all eflags to cc_src */
746 static void gen_compute_eflags(DisasContext
*s
)
748 TCGv zero
, dst
, src1
, src2
;
751 if (s
->cc_op
== CC_OP_EFLAGS
) {
754 if (s
->cc_op
== CC_OP_CLR
) {
755 tcg_gen_movi_tl(cpu_cc_src
, CC_Z
| CC_P
);
756 set_cc_op(s
, CC_OP_EFLAGS
);
765 /* Take care to not read values that are not live. */
766 live
= cc_op_live
[s
->cc_op
] & ~USES_CC_SRCT
;
767 dead
= live
^ (USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
);
769 zero
= tcg_const_tl(0);
770 if (dead
& USES_CC_DST
) {
773 if (dead
& USES_CC_SRC
) {
776 if (dead
& USES_CC_SRC2
) {
782 gen_helper_cc_compute_all(cpu_cc_src
, dst
, src1
, src2
, cpu_cc_op
);
783 set_cc_op(s
, CC_OP_EFLAGS
);
790 typedef struct CCPrepare
{
800 /* compute eflags.C to reg */
801 static CCPrepare
gen_prepare_eflags_c(DisasContext
*s
, TCGv reg
)
807 case CC_OP_SUBB
... CC_OP_SUBQ
:
808 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
809 size
= s
->cc_op
- CC_OP_SUBB
;
810 t1
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
811 /* If no temporary was used, be careful not to alias t1 and t0. */
812 t0
= TCGV_EQUAL(t1
, cpu_cc_src
) ? cpu_tmp0
: reg
;
813 tcg_gen_mov_tl(t0
, cpu_cc_srcT
);
817 case CC_OP_ADDB
... CC_OP_ADDQ
:
818 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
819 size
= s
->cc_op
- CC_OP_ADDB
;
820 t1
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
821 t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
823 return (CCPrepare
) { .cond
= TCG_COND_LTU
, .reg
= t0
,
824 .reg2
= t1
, .mask
= -1, .use_reg2
= true };
826 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
828 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
830 case CC_OP_INCB
... CC_OP_INCQ
:
831 case CC_OP_DECB
... CC_OP_DECQ
:
832 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
833 .mask
= -1, .no_setcond
= true };
835 case CC_OP_SHLB
... CC_OP_SHLQ
:
836 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
837 size
= s
->cc_op
- CC_OP_SHLB
;
838 shift
= (8 << size
) - 1;
839 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
840 .mask
= (target_ulong
)1 << shift
};
842 case CC_OP_MULB
... CC_OP_MULQ
:
843 return (CCPrepare
) { .cond
= TCG_COND_NE
,
844 .reg
= cpu_cc_src
, .mask
= -1 };
846 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
847 size
= s
->cc_op
- CC_OP_BMILGB
;
848 t0
= gen_ext_tl(reg
, cpu_cc_src
, size
, false);
849 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
853 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_dst
,
854 .mask
= -1, .no_setcond
= true };
857 case CC_OP_SARB
... CC_OP_SARQ
:
859 return (CCPrepare
) { .cond
= TCG_COND_NE
,
860 .reg
= cpu_cc_src
, .mask
= CC_C
};
863 /* The need to compute only C from CC_OP_DYNAMIC is important
864 in efficiently implementing e.g. INC at the start of a TB. */
866 gen_helper_cc_compute_c(reg
, cpu_cc_dst
, cpu_cc_src
,
867 cpu_cc_src2
, cpu_cc_op
);
868 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
869 .mask
= -1, .no_setcond
= true };
873 /* compute eflags.P to reg */
874 static CCPrepare
gen_prepare_eflags_p(DisasContext
*s
, TCGv reg
)
876 gen_compute_eflags(s
);
877 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
881 /* compute eflags.S to reg */
882 static CCPrepare
gen_prepare_eflags_s(DisasContext
*s
, TCGv reg
)
886 gen_compute_eflags(s
);
892 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
895 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
898 TCGMemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
899 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, true);
900 return (CCPrepare
) { .cond
= TCG_COND_LT
, .reg
= t0
, .mask
= -1 };
905 /* compute eflags.O to reg */
906 static CCPrepare
gen_prepare_eflags_o(DisasContext
*s
, TCGv reg
)
911 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src2
,
912 .mask
= -1, .no_setcond
= true };
914 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
916 gen_compute_eflags(s
);
917 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
922 /* compute eflags.Z to reg */
923 static CCPrepare
gen_prepare_eflags_z(DisasContext
*s
, TCGv reg
)
927 gen_compute_eflags(s
);
933 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
936 return (CCPrepare
) { .cond
= TCG_COND_ALWAYS
, .mask
= -1 };
939 TCGMemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
940 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
941 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
946 /* perform a conditional store into register 'reg' according to jump opcode
947 value 'b'. In the fast case, T0 is guaranted not to be used. */
948 static CCPrepare
gen_prepare_cc(DisasContext
*s
, int b
, TCGv reg
)
950 int inv
, jcc_op
, cond
;
956 jcc_op
= (b
>> 1) & 7;
959 case CC_OP_SUBB
... CC_OP_SUBQ
:
960 /* We optimize relational operators for the cmp/jcc case. */
961 size
= s
->cc_op
- CC_OP_SUBB
;
964 tcg_gen_mov_tl(cpu_tmp4
, cpu_cc_srcT
);
965 gen_extu(size
, cpu_tmp4
);
966 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
967 cc
= (CCPrepare
) { .cond
= TCG_COND_LEU
, .reg
= cpu_tmp4
,
968 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
977 tcg_gen_mov_tl(cpu_tmp4
, cpu_cc_srcT
);
978 gen_exts(size
, cpu_tmp4
);
979 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, true);
980 cc
= (CCPrepare
) { .cond
= cond
, .reg
= cpu_tmp4
,
981 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
991 /* This actually generates good code for JC, JZ and JS. */
994 cc
= gen_prepare_eflags_o(s
, reg
);
997 cc
= gen_prepare_eflags_c(s
, reg
);
1000 cc
= gen_prepare_eflags_z(s
, reg
);
1003 gen_compute_eflags(s
);
1004 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1005 .mask
= CC_Z
| CC_C
};
1008 cc
= gen_prepare_eflags_s(s
, reg
);
1011 cc
= gen_prepare_eflags_p(s
, reg
);
1014 gen_compute_eflags(s
);
1015 if (TCGV_EQUAL(reg
, cpu_cc_src
)) {
1018 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
1019 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
1020 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1025 gen_compute_eflags(s
);
1026 if (TCGV_EQUAL(reg
, cpu_cc_src
)) {
1029 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
1030 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
1031 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1032 .mask
= CC_S
| CC_Z
};
1039 cc
.cond
= tcg_invert_cond(cc
.cond
);
1044 static void gen_setcc1(DisasContext
*s
, int b
, TCGv reg
)
1046 CCPrepare cc
= gen_prepare_cc(s
, b
, reg
);
1048 if (cc
.no_setcond
) {
1049 if (cc
.cond
== TCG_COND_EQ
) {
1050 tcg_gen_xori_tl(reg
, cc
.reg
, 1);
1052 tcg_gen_mov_tl(reg
, cc
.reg
);
1057 if (cc
.cond
== TCG_COND_NE
&& !cc
.use_reg2
&& cc
.imm
== 0 &&
1058 cc
.mask
!= 0 && (cc
.mask
& (cc
.mask
- 1)) == 0) {
1059 tcg_gen_shri_tl(reg
, cc
.reg
, ctztl(cc
.mask
));
1060 tcg_gen_andi_tl(reg
, reg
, 1);
1063 if (cc
.mask
!= -1) {
1064 tcg_gen_andi_tl(reg
, cc
.reg
, cc
.mask
);
1068 tcg_gen_setcond_tl(cc
.cond
, reg
, cc
.reg
, cc
.reg2
);
1070 tcg_gen_setcondi_tl(cc
.cond
, reg
, cc
.reg
, cc
.imm
);
1074 static inline void gen_compute_eflags_c(DisasContext
*s
, TCGv reg
)
1076 gen_setcc1(s
, JCC_B
<< 1, reg
);
1079 /* generate a conditional jump to label 'l1' according to jump opcode
1080 value 'b'. In the fast case, T0 is guaranted not to be used. */
1081 static inline void gen_jcc1_noeob(DisasContext
*s
, int b
, TCGLabel
*l1
)
1083 CCPrepare cc
= gen_prepare_cc(s
, b
, cpu_T
[0]);
1085 if (cc
.mask
!= -1) {
1086 tcg_gen_andi_tl(cpu_T
[0], cc
.reg
, cc
.mask
);
1090 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1092 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1096 /* Generate a conditional jump to label 'l1' according to jump opcode
1097 value 'b'. In the fast case, T0 is guaranted not to be used.
1098 A translation block must end soon. */
1099 static inline void gen_jcc1(DisasContext
*s
, int b
, TCGLabel
*l1
)
1101 CCPrepare cc
= gen_prepare_cc(s
, b
, cpu_T
[0]);
1103 gen_update_cc_op(s
);
1104 if (cc
.mask
!= -1) {
1105 tcg_gen_andi_tl(cpu_T
[0], cc
.reg
, cc
.mask
);
1108 set_cc_op(s
, CC_OP_DYNAMIC
);
1110 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1112 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1116 /* XXX: does not work with gdbstub "ice" single step - not a
1118 static TCGLabel
*gen_jz_ecx_string(DisasContext
*s
, target_ulong next_eip
)
1120 TCGLabel
*l1
= gen_new_label();
1121 TCGLabel
*l2
= gen_new_label();
1122 gen_op_jnz_ecx(s
->aflag
, l1
);
1124 gen_jmp_tb(s
, next_eip
, 1);
1129 static inline void gen_stos(DisasContext
*s
, TCGMemOp ot
)
1131 gen_op_mov_v_reg(MO_32
, cpu_T
[0], R_EAX
);
1132 gen_string_movl_A0_EDI(s
);
1133 gen_op_st_v(s
, ot
, cpu_T
[0], cpu_A0
);
1134 gen_op_movl_T0_Dshift(ot
);
1135 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1138 static inline void gen_lods(DisasContext
*s
, TCGMemOp ot
)
1140 gen_string_movl_A0_ESI(s
);
1141 gen_op_ld_v(s
, ot
, cpu_T
[0], cpu_A0
);
1142 gen_op_mov_reg_v(ot
, R_EAX
, cpu_T
[0]);
1143 gen_op_movl_T0_Dshift(ot
);
1144 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1147 static inline void gen_scas(DisasContext
*s
, TCGMemOp ot
)
1149 gen_string_movl_A0_EDI(s
);
1150 gen_op_ld_v(s
, ot
, cpu_T
[1], cpu_A0
);
1151 gen_op(s
, OP_CMPL
, ot
, R_EAX
);
1152 gen_op_movl_T0_Dshift(ot
);
1153 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1156 static inline void gen_cmps(DisasContext
*s
, TCGMemOp ot
)
1158 gen_string_movl_A0_EDI(s
);
1159 gen_op_ld_v(s
, ot
, cpu_T
[1], cpu_A0
);
1160 gen_string_movl_A0_ESI(s
);
1161 gen_op(s
, OP_CMPL
, ot
, OR_TMP0
);
1162 gen_op_movl_T0_Dshift(ot
);
1163 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1164 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1167 static inline void gen_ins(DisasContext
*s
, TCGMemOp ot
)
1169 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
1172 gen_string_movl_A0_EDI(s
);
1173 /* Note: we must do this dummy write first to be restartable in
1174 case of page fault. */
1175 tcg_gen_movi_tl(cpu_T
[0], 0);
1176 gen_op_st_v(s
, ot
, cpu_T
[0], cpu_A0
);
1177 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[R_EDX
]);
1178 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1179 gen_helper_in_func(ot
, cpu_T
[0], cpu_tmp2_i32
);
1180 gen_op_st_v(s
, ot
, cpu_T
[0], cpu_A0
);
1181 gen_op_movl_T0_Dshift(ot
);
1182 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1183 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
1188 static inline void gen_outs(DisasContext
*s
, TCGMemOp ot
)
1190 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
1193 gen_string_movl_A0_ESI(s
);
1194 gen_op_ld_v(s
, ot
, cpu_T
[0], cpu_A0
);
1196 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[R_EDX
]);
1197 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1198 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[0]);
1199 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1201 gen_op_movl_T0_Dshift(ot
);
1202 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1203 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
1208 /* same method as Valgrind : we generate jumps to current or next
1210 #define GEN_REPZ(op) \
1211 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1212 target_ulong cur_eip, target_ulong next_eip) \
1215 gen_update_cc_op(s); \
1216 l2 = gen_jz_ecx_string(s, next_eip); \
1217 gen_ ## op(s, ot); \
1218 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1219 /* a loop would cause two single step exceptions if ECX = 1 \
1220 before rep string_insn */ \
1222 gen_op_jz_ecx(s->aflag, l2); \
1223 gen_jmp(s, cur_eip); \
1226 #define GEN_REPZ2(op) \
1227 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1228 target_ulong cur_eip, \
1229 target_ulong next_eip, \
1233 gen_update_cc_op(s); \
1234 l2 = gen_jz_ecx_string(s, next_eip); \
1235 gen_ ## op(s, ot); \
1236 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1237 gen_update_cc_op(s); \
1238 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1240 gen_op_jz_ecx(s->aflag, l2); \
1241 gen_jmp(s, cur_eip); \
1252 static void gen_helper_fp_arith_ST0_FT0(int op
)
1256 gen_helper_fadd_ST0_FT0(cpu_env
);
1259 gen_helper_fmul_ST0_FT0(cpu_env
);
1262 gen_helper_fcom_ST0_FT0(cpu_env
);
1265 gen_helper_fcom_ST0_FT0(cpu_env
);
1268 gen_helper_fsub_ST0_FT0(cpu_env
);
1271 gen_helper_fsubr_ST0_FT0(cpu_env
);
1274 gen_helper_fdiv_ST0_FT0(cpu_env
);
1277 gen_helper_fdivr_ST0_FT0(cpu_env
);
1282 /* NOTE the exception in "r" op ordering */
1283 static void gen_helper_fp_arith_STN_ST0(int op
, int opreg
)
1285 TCGv_i32 tmp
= tcg_const_i32(opreg
);
1288 gen_helper_fadd_STN_ST0(cpu_env
, tmp
);
1291 gen_helper_fmul_STN_ST0(cpu_env
, tmp
);
1294 gen_helper_fsubr_STN_ST0(cpu_env
, tmp
);
1297 gen_helper_fsub_STN_ST0(cpu_env
, tmp
);
1300 gen_helper_fdivr_STN_ST0(cpu_env
, tmp
);
1303 gen_helper_fdiv_STN_ST0(cpu_env
, tmp
);
1308 /* if d == OR_TMP0, it means memory operand (address in A0) */
1309 static void gen_op(DisasContext
*s1
, int op
, TCGMemOp ot
, int d
)
1312 gen_op_mov_v_reg(ot
, cpu_T
[0], d
);
1314 gen_op_ld_v(s1
, ot
, cpu_T
[0], cpu_A0
);
1318 gen_compute_eflags_c(s1
, cpu_tmp4
);
1319 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1320 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
1321 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1322 gen_op_update3_cc(cpu_tmp4
);
1323 set_cc_op(s1
, CC_OP_ADCB
+ ot
);
1326 gen_compute_eflags_c(s1
, cpu_tmp4
);
1327 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1328 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
1329 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1330 gen_op_update3_cc(cpu_tmp4
);
1331 set_cc_op(s1
, CC_OP_SBBB
+ ot
);
1334 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1335 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1336 gen_op_update2_cc();
1337 set_cc_op(s1
, CC_OP_ADDB
+ ot
);
1340 tcg_gen_mov_tl(cpu_cc_srcT
, cpu_T
[0]);
1341 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1342 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1343 gen_op_update2_cc();
1344 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1348 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1349 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1350 gen_op_update1_cc();
1351 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1354 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1355 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1356 gen_op_update1_cc();
1357 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1360 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1361 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1362 gen_op_update1_cc();
1363 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1366 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
1367 tcg_gen_mov_tl(cpu_cc_srcT
, cpu_T
[0]);
1368 tcg_gen_sub_tl(cpu_cc_dst
, cpu_T
[0], cpu_T
[1]);
1369 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1374 /* if d == OR_TMP0, it means memory operand (address in A0) */
1375 static void gen_inc(DisasContext
*s1
, TCGMemOp ot
, int d
, int c
)
1378 gen_op_mov_v_reg(ot
, cpu_T
[0], d
);
1380 gen_op_ld_v(s1
, ot
, cpu_T
[0], cpu_A0
);
1382 gen_compute_eflags_c(s1
, cpu_cc_src
);
1384 tcg_gen_addi_tl(cpu_T
[0], cpu_T
[0], 1);
1385 set_cc_op(s1
, CC_OP_INCB
+ ot
);
1387 tcg_gen_addi_tl(cpu_T
[0], cpu_T
[0], -1);
1388 set_cc_op(s1
, CC_OP_DECB
+ ot
);
1390 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1391 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1394 static void gen_shift_flags(DisasContext
*s
, TCGMemOp ot
, TCGv result
,
1395 TCGv shm1
, TCGv count
, bool is_right
)
1397 TCGv_i32 z32
, s32
, oldop
;
1400 /* Store the results into the CC variables. If we know that the
1401 variable must be dead, store unconditionally. Otherwise we'll
1402 need to not disrupt the current contents. */
1403 z_tl
= tcg_const_tl(0);
1404 if (cc_op_live
[s
->cc_op
] & USES_CC_DST
) {
1405 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_dst
, count
, z_tl
,
1406 result
, cpu_cc_dst
);
1408 tcg_gen_mov_tl(cpu_cc_dst
, result
);
1410 if (cc_op_live
[s
->cc_op
] & USES_CC_SRC
) {
1411 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_src
, count
, z_tl
,
1414 tcg_gen_mov_tl(cpu_cc_src
, shm1
);
1416 tcg_temp_free(z_tl
);
1418 /* Get the two potential CC_OP values into temporaries. */
1419 tcg_gen_movi_i32(cpu_tmp2_i32
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1420 if (s
->cc_op
== CC_OP_DYNAMIC
) {
1423 tcg_gen_movi_i32(cpu_tmp3_i32
, s
->cc_op
);
1424 oldop
= cpu_tmp3_i32
;
1427 /* Conditionally store the CC_OP value. */
1428 z32
= tcg_const_i32(0);
1429 s32
= tcg_temp_new_i32();
1430 tcg_gen_trunc_tl_i32(s32
, count
);
1431 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, s32
, z32
, cpu_tmp2_i32
, oldop
);
1432 tcg_temp_free_i32(z32
);
1433 tcg_temp_free_i32(s32
);
1435 /* The CC_OP value is no longer predictable. */
1436 set_cc_op(s
, CC_OP_DYNAMIC
);
1439 static void gen_shift_rm_T1(DisasContext
*s
, TCGMemOp ot
, int op1
,
1440 int is_right
, int is_arith
)
1442 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1445 if (op1
== OR_TMP0
) {
1446 gen_op_ld_v(s
, ot
, cpu_T
[0], cpu_A0
);
1448 gen_op_mov_v_reg(ot
, cpu_T
[0], op1
);
1451 tcg_gen_andi_tl(cpu_T
[1], cpu_T
[1], mask
);
1452 tcg_gen_subi_tl(cpu_tmp0
, cpu_T
[1], 1);
1456 gen_exts(ot
, cpu_T
[0]);
1457 tcg_gen_sar_tl(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1458 tcg_gen_sar_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1460 gen_extu(ot
, cpu_T
[0]);
1461 tcg_gen_shr_tl(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1462 tcg_gen_shr_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1465 tcg_gen_shl_tl(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1466 tcg_gen_shl_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1470 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1472 gen_shift_flags(s
, ot
, cpu_T
[0], cpu_tmp0
, cpu_T
[1], is_right
);
1475 static void gen_shift_rm_im(DisasContext
*s
, TCGMemOp ot
, int op1
, int op2
,
1476 int is_right
, int is_arith
)
1478 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1482 gen_op_ld_v(s
, ot
, cpu_T
[0], cpu_A0
);
1484 gen_op_mov_v_reg(ot
, cpu_T
[0], op1
);
1490 gen_exts(ot
, cpu_T
[0]);
1491 tcg_gen_sari_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1492 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], op2
);
1494 gen_extu(ot
, cpu_T
[0]);
1495 tcg_gen_shri_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1496 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], op2
);
1499 tcg_gen_shli_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1500 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], op2
);
1505 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1507 /* update eflags if non zero shift */
1509 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
1510 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1511 set_cc_op(s
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1515 static void gen_rot_rm_T1(DisasContext
*s
, TCGMemOp ot
, int op1
, int is_right
)
1517 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1521 if (op1
== OR_TMP0
) {
1522 gen_op_ld_v(s
, ot
, cpu_T
[0], cpu_A0
);
1524 gen_op_mov_v_reg(ot
, cpu_T
[0], op1
);
1527 tcg_gen_andi_tl(cpu_T
[1], cpu_T
[1], mask
);
1531 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1532 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
1533 tcg_gen_muli_tl(cpu_T
[0], cpu_T
[0], 0x01010101);
1536 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1537 tcg_gen_deposit_tl(cpu_T
[0], cpu_T
[0], cpu_T
[0], 16, 16);
1540 #ifdef TARGET_X86_64
1542 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
1543 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
1545 tcg_gen_rotr_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1547 tcg_gen_rotl_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1549 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
1554 tcg_gen_rotr_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1556 tcg_gen_rotl_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1562 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1564 /* We'll need the flags computed into CC_SRC. */
1565 gen_compute_eflags(s
);
1567 /* The value that was "rotated out" is now present at the other end
1568 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1569 since we've computed the flags into CC_SRC, these variables are
1572 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T
[0], mask
- 1);
1573 tcg_gen_shri_tl(cpu_cc_dst
, cpu_T
[0], mask
);
1574 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1576 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T
[0], mask
);
1577 tcg_gen_andi_tl(cpu_cc_dst
, cpu_T
[0], 1);
1579 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1580 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1582 /* Now conditionally store the new CC_OP value. If the shift count
1583 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1584 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1585 exactly as we computed above. */
1586 t0
= tcg_const_i32(0);
1587 t1
= tcg_temp_new_i32();
1588 tcg_gen_trunc_tl_i32(t1
, cpu_T
[1]);
1589 tcg_gen_movi_i32(cpu_tmp2_i32
, CC_OP_ADCOX
);
1590 tcg_gen_movi_i32(cpu_tmp3_i32
, CC_OP_EFLAGS
);
1591 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, t1
, t0
,
1592 cpu_tmp2_i32
, cpu_tmp3_i32
);
1593 tcg_temp_free_i32(t0
);
1594 tcg_temp_free_i32(t1
);
1596 /* The CC_OP value is no longer predictable. */
1597 set_cc_op(s
, CC_OP_DYNAMIC
);
1600 static void gen_rot_rm_im(DisasContext
*s
, TCGMemOp ot
, int op1
, int op2
,
1603 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1607 if (op1
== OR_TMP0
) {
1608 gen_op_ld_v(s
, ot
, cpu_T
[0], cpu_A0
);
1610 gen_op_mov_v_reg(ot
, cpu_T
[0], op1
);
1616 #ifdef TARGET_X86_64
1618 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
1620 tcg_gen_rotri_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, op2
);
1622 tcg_gen_rotli_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, op2
);
1624 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
1629 tcg_gen_rotri_tl(cpu_T
[0], cpu_T
[0], op2
);
1631 tcg_gen_rotli_tl(cpu_T
[0], cpu_T
[0], op2
);
1642 shift
= mask
+ 1 - shift
;
1644 gen_extu(ot
, cpu_T
[0]);
1645 tcg_gen_shli_tl(cpu_tmp0
, cpu_T
[0], shift
);
1646 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], mask
+ 1 - shift
);
1647 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
1653 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1656 /* Compute the flags into CC_SRC. */
1657 gen_compute_eflags(s
);
1659 /* The value that was "rotated out" is now present at the other end
1660 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1661 since we've computed the flags into CC_SRC, these variables are
1664 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T
[0], mask
- 1);
1665 tcg_gen_shri_tl(cpu_cc_dst
, cpu_T
[0], mask
);
1666 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1668 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T
[0], mask
);
1669 tcg_gen_andi_tl(cpu_cc_dst
, cpu_T
[0], 1);
1671 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1672 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1673 set_cc_op(s
, CC_OP_ADCOX
);
1677 /* XXX: add faster immediate = 1 case */
1678 static void gen_rotc_rm_T1(DisasContext
*s
, TCGMemOp ot
, int op1
,
1681 gen_compute_eflags(s
);
1682 assert(s
->cc_op
== CC_OP_EFLAGS
);
1686 gen_op_ld_v(s
, ot
, cpu_T
[0], cpu_A0
);
1688 gen_op_mov_v_reg(ot
, cpu_T
[0], op1
);
1693 gen_helper_rcrb(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1696 gen_helper_rcrw(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1699 gen_helper_rcrl(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1701 #ifdef TARGET_X86_64
1703 gen_helper_rcrq(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1712 gen_helper_rclb(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1715 gen_helper_rclw(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1718 gen_helper_rcll(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1720 #ifdef TARGET_X86_64
1722 gen_helper_rclq(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1730 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1733 /* XXX: add faster immediate case */
1734 static void gen_shiftd_rm_T1(DisasContext
*s
, TCGMemOp ot
, int op1
,
1735 bool is_right
, TCGv count_in
)
1737 target_ulong mask
= (ot
== MO_64
? 63 : 31);
1741 if (op1
== OR_TMP0
) {
1742 gen_op_ld_v(s
, ot
, cpu_T
[0], cpu_A0
);
1744 gen_op_mov_v_reg(ot
, cpu_T
[0], op1
);
1747 count
= tcg_temp_new();
1748 tcg_gen_andi_tl(count
, count_in
, mask
);
1752 /* Note: we implement the Intel behaviour for shift count > 16.
1753 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1754 portion by constructing it as a 32-bit value. */
1756 tcg_gen_deposit_tl(cpu_tmp0
, cpu_T
[0], cpu_T
[1], 16, 16);
1757 tcg_gen_mov_tl(cpu_T
[1], cpu_T
[0]);
1758 tcg_gen_mov_tl(cpu_T
[0], cpu_tmp0
);
1760 tcg_gen_deposit_tl(cpu_T
[1], cpu_T
[0], cpu_T
[1], 16, 16);
1763 #ifdef TARGET_X86_64
1765 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1766 tcg_gen_subi_tl(cpu_tmp0
, count
, 1);
1768 tcg_gen_concat_tl_i64(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1769 tcg_gen_shr_i64(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1770 tcg_gen_shr_i64(cpu_T
[0], cpu_T
[0], count
);
1772 tcg_gen_concat_tl_i64(cpu_T
[0], cpu_T
[1], cpu_T
[0]);
1773 tcg_gen_shl_i64(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1774 tcg_gen_shl_i64(cpu_T
[0], cpu_T
[0], count
);
1775 tcg_gen_shri_i64(cpu_tmp0
, cpu_tmp0
, 32);
1776 tcg_gen_shri_i64(cpu_T
[0], cpu_T
[0], 32);
1781 tcg_gen_subi_tl(cpu_tmp0
, count
, 1);
1783 tcg_gen_shr_tl(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1785 tcg_gen_subfi_tl(cpu_tmp4
, mask
+ 1, count
);
1786 tcg_gen_shr_tl(cpu_T
[0], cpu_T
[0], count
);
1787 tcg_gen_shl_tl(cpu_T
[1], cpu_T
[1], cpu_tmp4
);
1789 tcg_gen_shl_tl(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1791 /* Only needed if count > 16, for Intel behaviour. */
1792 tcg_gen_subfi_tl(cpu_tmp4
, 33, count
);
1793 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[1], cpu_tmp4
);
1794 tcg_gen_or_tl(cpu_tmp0
, cpu_tmp0
, cpu_tmp4
);
1797 tcg_gen_subfi_tl(cpu_tmp4
, mask
+ 1, count
);
1798 tcg_gen_shl_tl(cpu_T
[0], cpu_T
[0], count
);
1799 tcg_gen_shr_tl(cpu_T
[1], cpu_T
[1], cpu_tmp4
);
1801 tcg_gen_movi_tl(cpu_tmp4
, 0);
1802 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_T
[1], count
, cpu_tmp4
,
1803 cpu_tmp4
, cpu_T
[1]);
1804 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1809 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1811 gen_shift_flags(s
, ot
, cpu_T
[0], cpu_tmp0
, count
, is_right
);
1812 tcg_temp_free(count
);
1815 static void gen_shift(DisasContext
*s1
, int op
, TCGMemOp ot
, int d
, int s
)
1818 gen_op_mov_v_reg(ot
, cpu_T
[1], s
);
1821 gen_rot_rm_T1(s1
, ot
, d
, 0);
1824 gen_rot_rm_T1(s1
, ot
, d
, 1);
1828 gen_shift_rm_T1(s1
, ot
, d
, 0, 0);
1831 gen_shift_rm_T1(s1
, ot
, d
, 1, 0);
1834 gen_shift_rm_T1(s1
, ot
, d
, 1, 1);
1837 gen_rotc_rm_T1(s1
, ot
, d
, 0);
1840 gen_rotc_rm_T1(s1
, ot
, d
, 1);
1845 static void gen_shifti(DisasContext
*s1
, int op
, TCGMemOp ot
, int d
, int c
)
1849 gen_rot_rm_im(s1
, ot
, d
, c
, 0);
1852 gen_rot_rm_im(s1
, ot
, d
, c
, 1);
1856 gen_shift_rm_im(s1
, ot
, d
, c
, 0, 0);
1859 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 0);
1862 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 1);
1865 /* currently not optimized */
1866 tcg_gen_movi_tl(cpu_T
[1], c
);
1867 gen_shift(s1
, op
, ot
, d
, OR_TMP1
);
1872 static void gen_lea_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
1879 int mod
, rm
, code
, override
, must_add_seg
;
1882 override
= s
->override
;
1883 must_add_seg
= s
->addseg
;
1886 mod
= (modrm
>> 6) & 3;
1899 code
= cpu_ldub_code(env
, s
->pc
++);
1900 scale
= (code
>> 6) & 3;
1901 index
= ((code
>> 3) & 7) | REX_X(s
);
1903 index
= -1; /* no index */
1911 if ((base
& 7) == 5) {
1913 disp
= (int32_t)cpu_ldl_code(env
, s
->pc
);
1915 if (CODE64(s
) && !havesib
) {
1916 disp
+= s
->pc
+ s
->rip_offset
;
1923 disp
= (int8_t)cpu_ldub_code(env
, s
->pc
++);
1927 disp
= (int32_t)cpu_ldl_code(env
, s
->pc
);
1932 /* For correct popl handling with esp. */
1933 if (base
== R_ESP
&& s
->popl_esp_hack
) {
1934 disp
+= s
->popl_esp_hack
;
1937 /* Compute the address, with a minimum number of TCG ops. */
1941 sum
= cpu_regs
[index
];
1943 tcg_gen_shli_tl(cpu_A0
, cpu_regs
[index
], scale
);
1947 tcg_gen_add_tl(cpu_A0
, sum
, cpu_regs
[base
]);
1950 } else if (base
>= 0) {
1951 sum
= cpu_regs
[base
];
1953 if (TCGV_IS_UNUSED(sum
)) {
1954 tcg_gen_movi_tl(cpu_A0
, disp
);
1956 tcg_gen_addi_tl(cpu_A0
, sum
, disp
);
1961 if (base
== R_EBP
|| base
== R_ESP
) {
1968 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
,
1969 offsetof(CPUX86State
, segs
[override
].base
));
1971 if (s
->aflag
== MO_32
) {
1972 tcg_gen_ext32u_tl(cpu_A0
, cpu_A0
);
1974 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
1978 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
1981 if (s
->aflag
== MO_32
) {
1982 tcg_gen_ext32u_tl(cpu_A0
, cpu_A0
);
1990 disp
= cpu_lduw_code(env
, s
->pc
);
1992 tcg_gen_movi_tl(cpu_A0
, disp
);
1993 rm
= 0; /* avoid SS override */
2000 disp
= (int8_t)cpu_ldub_code(env
, s
->pc
++);
2004 disp
= (int16_t)cpu_lduw_code(env
, s
->pc
);
2012 tcg_gen_add_tl(cpu_A0
, cpu_regs
[R_EBX
], cpu_regs
[R_ESI
]);
2015 tcg_gen_add_tl(cpu_A0
, cpu_regs
[R_EBX
], cpu_regs
[R_EDI
]);
2018 tcg_gen_add_tl(cpu_A0
, cpu_regs
[R_EBP
], cpu_regs
[R_ESI
]);
2021 tcg_gen_add_tl(cpu_A0
, cpu_regs
[R_EBP
], cpu_regs
[R_EDI
]);
2024 sum
= cpu_regs
[R_ESI
];
2027 sum
= cpu_regs
[R_EDI
];
2030 sum
= cpu_regs
[R_EBP
];
2034 sum
= cpu_regs
[R_EBX
];
2037 tcg_gen_addi_tl(cpu_A0
, sum
, disp
);
2038 tcg_gen_ext16u_tl(cpu_A0
, cpu_A0
);
2042 if (rm
== 2 || rm
== 3 || rm
== 6) {
2048 gen_op_addl_A0_seg(s
, override
);
2057 static void gen_nop_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2059 int mod
, rm
, base
, code
;
2061 mod
= (modrm
>> 6) & 3;
2072 code
= cpu_ldub_code(env
, s
->pc
++);
2114 /* used for LEA and MOV AX, mem */
2115 static void gen_add_A0_ds_seg(DisasContext
*s
)
2117 int override
, must_add_seg
;
2118 must_add_seg
= s
->addseg
;
2120 if (s
->override
>= 0) {
2121 override
= s
->override
;
2125 #ifdef TARGET_X86_64
2127 gen_op_addq_A0_seg(override
);
2131 gen_op_addl_A0_seg(s
, override
);
2136 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2138 static void gen_ldst_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2139 TCGMemOp ot
, int reg
, int is_store
)
2143 mod
= (modrm
>> 6) & 3;
2144 rm
= (modrm
& 7) | REX_B(s
);
2148 gen_op_mov_v_reg(ot
, cpu_T
[0], reg
);
2149 gen_op_mov_reg_v(ot
, rm
, cpu_T
[0]);
2151 gen_op_mov_v_reg(ot
, cpu_T
[0], rm
);
2153 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
2156 gen_lea_modrm(env
, s
, modrm
);
2159 gen_op_mov_v_reg(ot
, cpu_T
[0], reg
);
2160 gen_op_st_v(s
, ot
, cpu_T
[0], cpu_A0
);
2162 gen_op_ld_v(s
, ot
, cpu_T
[0], cpu_A0
);
2164 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
2169 static inline uint32_t insn_get(CPUX86State
*env
, DisasContext
*s
, TCGMemOp ot
)
2175 ret
= cpu_ldub_code(env
, s
->pc
);
2179 ret
= cpu_lduw_code(env
, s
->pc
);
2183 #ifdef TARGET_X86_64
2186 ret
= cpu_ldl_code(env
, s
->pc
);
2195 static inline int insn_const_size(TCGMemOp ot
)
2204 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong eip
)
2206 TranslationBlock
*tb
;
2209 pc
= s
->cs_base
+ eip
;
2211 /* NOTE: we handle the case where the TB spans two pages here */
2212 if ((pc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) ||
2213 (pc
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
)) {
2214 /* jump to same page: we can use a direct jump */
2215 tcg_gen_goto_tb(tb_num
);
2217 tcg_gen_exit_tb((uintptr_t)tb
+ tb_num
);
2219 /* jump to another page: currently not optimized */
2225 static inline void gen_jcc(DisasContext
*s
, int b
,
2226 target_ulong val
, target_ulong next_eip
)
2231 l1
= gen_new_label();
2234 gen_goto_tb(s
, 0, next_eip
);
2237 gen_goto_tb(s
, 1, val
);
2238 s
->is_jmp
= DISAS_TB_JUMP
;
2240 l1
= gen_new_label();
2241 l2
= gen_new_label();
2244 gen_jmp_im(next_eip
);
2254 static void gen_cmovcc1(CPUX86State
*env
, DisasContext
*s
, TCGMemOp ot
, int b
,
2259 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
2261 cc
= gen_prepare_cc(s
, b
, cpu_T
[1]);
2262 if (cc
.mask
!= -1) {
2263 TCGv t0
= tcg_temp_new();
2264 tcg_gen_andi_tl(t0
, cc
.reg
, cc
.mask
);
2268 cc
.reg2
= tcg_const_tl(cc
.imm
);
2271 tcg_gen_movcond_tl(cc
.cond
, cpu_T
[0], cc
.reg
, cc
.reg2
,
2272 cpu_T
[0], cpu_regs
[reg
]);
2273 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
2275 if (cc
.mask
!= -1) {
2276 tcg_temp_free(cc
.reg
);
2279 tcg_temp_free(cc
.reg2
);
2283 static inline void gen_op_movl_T0_seg(int seg_reg
)
2285 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
2286 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2289 static inline void gen_op_movl_seg_T0_vm(int seg_reg
)
2291 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xffff);
2292 tcg_gen_st32_tl(cpu_T
[0], cpu_env
,
2293 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2294 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], 4);
2295 tcg_gen_st_tl(cpu_T
[0], cpu_env
,
2296 offsetof(CPUX86State
,segs
[seg_reg
].base
));
2299 /* move T0 to seg_reg and compute if the CPU state may change. Never
2300 call this function with seg_reg == R_CS */
2301 static void gen_movl_seg_T0(DisasContext
*s
, int seg_reg
, target_ulong cur_eip
)
2303 if (s
->pe
&& !s
->vm86
) {
2304 /* XXX: optimize by finding processor state dynamically */
2305 gen_update_cc_op(s
);
2306 gen_jmp_im(cur_eip
);
2307 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
2308 gen_helper_load_seg(cpu_env
, tcg_const_i32(seg_reg
), cpu_tmp2_i32
);
2309 /* abort translation because the addseg value may change or
2310 because ss32 may change. For R_SS, translation must always
2311 stop as a special handling must be done to disable hardware
2312 interrupts for the next instruction */
2313 if (seg_reg
== R_SS
|| (s
->code32
&& seg_reg
< R_FS
))
2314 s
->is_jmp
= DISAS_TB_JUMP
;
2316 gen_op_movl_seg_T0_vm(seg_reg
);
2317 if (seg_reg
== R_SS
)
2318 s
->is_jmp
= DISAS_TB_JUMP
;
2322 static inline int svm_is_rep(int prefixes
)
2324 return ((prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) ? 8 : 0);
2328 gen_svm_check_intercept_param(DisasContext
*s
, target_ulong pc_start
,
2329 uint32_t type
, uint64_t param
)
2331 /* no SVM activated; fast case */
2332 if (likely(!(s
->flags
& HF_SVMI_MASK
)))
2334 gen_update_cc_op(s
);
2335 gen_jmp_im(pc_start
- s
->cs_base
);
2336 gen_helper_svm_check_intercept_param(cpu_env
, tcg_const_i32(type
),
2337 tcg_const_i64(param
));
2341 gen_svm_check_intercept(DisasContext
*s
, target_ulong pc_start
, uint64_t type
)
2343 gen_svm_check_intercept_param(s
, pc_start
, type
, 0);
2346 static inline void gen_stack_update(DisasContext
*s
, int addend
)
2348 #ifdef TARGET_X86_64
2350 gen_op_add_reg_im(MO_64
, R_ESP
, addend
);
2354 gen_op_add_reg_im(MO_32
, R_ESP
, addend
);
2356 gen_op_add_reg_im(MO_16
, R_ESP
, addend
);
2360 /* Generate a push. It depends on ss32, addseg and dflag. */
2361 static void gen_push_v(DisasContext
*s
, TCGv val
)
2363 TCGMemOp a_ot
, d_ot
= mo_pushpop(s
, s
->dflag
);
2364 int size
= 1 << d_ot
;
2365 TCGv new_esp
= cpu_A0
;
2367 tcg_gen_subi_tl(cpu_A0
, cpu_regs
[R_ESP
], size
);
2371 } else if (s
->ss32
) {
2375 tcg_gen_mov_tl(new_esp
, cpu_A0
);
2376 gen_op_addl_A0_seg(s
, R_SS
);
2378 tcg_gen_ext32u_tl(cpu_A0
, cpu_A0
);
2383 tcg_gen_ext16u_tl(cpu_A0
, cpu_A0
);
2384 tcg_gen_mov_tl(new_esp
, cpu_A0
);
2385 gen_op_addl_A0_seg(s
, R_SS
);
2388 gen_op_st_v(s
, d_ot
, val
, cpu_A0
);
2389 gen_op_mov_reg_v(a_ot
, R_ESP
, new_esp
);
2392 /* two step pop is necessary for precise exceptions */
2393 static TCGMemOp
gen_pop_T0(DisasContext
*s
)
2395 TCGMemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2399 addr
= cpu_regs
[R_ESP
];
2400 } else if (!s
->ss32
) {
2401 tcg_gen_ext16u_tl(cpu_A0
, cpu_regs
[R_ESP
]);
2402 gen_op_addl_A0_seg(s
, R_SS
);
2403 } else if (s
->addseg
) {
2404 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[R_ESP
]);
2405 gen_op_addl_A0_seg(s
, R_SS
);
2407 tcg_gen_ext32u_tl(cpu_A0
, cpu_regs
[R_ESP
]);
2410 gen_op_ld_v(s
, d_ot
, cpu_T
[0], addr
);
2414 static void gen_pop_update(DisasContext
*s
, TCGMemOp ot
)
2416 gen_stack_update(s
, 1 << ot
);
2419 static void gen_stack_A0(DisasContext
*s
)
2421 gen_op_movl_A0_reg(R_ESP
);
2423 tcg_gen_ext16u_tl(cpu_A0
, cpu_A0
);
2424 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2426 gen_op_addl_A0_seg(s
, R_SS
);
2429 /* NOTE: wrap around in 16 bit not fully handled */
2430 static void gen_pusha(DisasContext
*s
)
2433 gen_op_movl_A0_reg(R_ESP
);
2434 gen_op_addl_A0_im(-8 << s
->dflag
);
2436 tcg_gen_ext16u_tl(cpu_A0
, cpu_A0
);
2437 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2439 gen_op_addl_A0_seg(s
, R_SS
);
2440 for(i
= 0;i
< 8; i
++) {
2441 gen_op_mov_v_reg(MO_32
, cpu_T
[0], 7 - i
);
2442 gen_op_st_v(s
, s
->dflag
, cpu_T
[0], cpu_A0
);
2443 gen_op_addl_A0_im(1 << s
->dflag
);
2445 gen_op_mov_reg_v(MO_16
+ s
->ss32
, R_ESP
, cpu_T
[1]);
2448 /* NOTE: wrap around in 16 bit not fully handled */
2449 static void gen_popa(DisasContext
*s
)
2452 gen_op_movl_A0_reg(R_ESP
);
2454 tcg_gen_ext16u_tl(cpu_A0
, cpu_A0
);
2455 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2456 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], 8 << s
->dflag
);
2458 gen_op_addl_A0_seg(s
, R_SS
);
2459 for(i
= 0;i
< 8; i
++) {
2460 /* ESP is not reloaded */
2462 gen_op_ld_v(s
, s
->dflag
, cpu_T
[0], cpu_A0
);
2463 gen_op_mov_reg_v(s
->dflag
, 7 - i
, cpu_T
[0]);
2465 gen_op_addl_A0_im(1 << s
->dflag
);
2467 gen_op_mov_reg_v(MO_16
+ s
->ss32
, R_ESP
, cpu_T
[1]);
2470 static void gen_enter(DisasContext
*s
, int esp_addend
, int level
)
2472 TCGMemOp ot
= mo_pushpop(s
, s
->dflag
);
2473 int opsize
= 1 << ot
;
2476 #ifdef TARGET_X86_64
2478 gen_op_movl_A0_reg(R_ESP
);
2479 gen_op_addq_A0_im(-opsize
);
2480 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2483 gen_op_mov_v_reg(MO_32
, cpu_T
[0], R_EBP
);
2484 gen_op_st_v(s
, ot
, cpu_T
[0], cpu_A0
);
2486 /* XXX: must save state */
2487 gen_helper_enter64_level(cpu_env
, tcg_const_i32(level
),
2488 tcg_const_i32((ot
== MO_64
)),
2491 gen_op_mov_reg_v(ot
, R_EBP
, cpu_T
[1]);
2492 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], -esp_addend
+ (-opsize
* level
));
2493 gen_op_mov_reg_v(MO_64
, R_ESP
, cpu_T
[1]);
2497 gen_op_movl_A0_reg(R_ESP
);
2498 gen_op_addl_A0_im(-opsize
);
2500 tcg_gen_ext16u_tl(cpu_A0
, cpu_A0
);
2501 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2503 gen_op_addl_A0_seg(s
, R_SS
);
2505 gen_op_mov_v_reg(MO_32
, cpu_T
[0], R_EBP
);
2506 gen_op_st_v(s
, ot
, cpu_T
[0], cpu_A0
);
2508 /* XXX: must save state */
2509 gen_helper_enter_level(cpu_env
, tcg_const_i32(level
),
2510 tcg_const_i32(s
->dflag
- 1),
2513 gen_op_mov_reg_v(ot
, R_EBP
, cpu_T
[1]);
2514 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], -esp_addend
+ (-opsize
* level
));
2515 gen_op_mov_reg_v(MO_16
+ s
->ss32
, R_ESP
, cpu_T
[1]);
2519 static void gen_exception(DisasContext
*s
, int trapno
, target_ulong cur_eip
)
2521 gen_update_cc_op(s
);
2522 gen_jmp_im(cur_eip
);
2523 gen_helper_raise_exception(cpu_env
, tcg_const_i32(trapno
));
2524 s
->is_jmp
= DISAS_TB_JUMP
;
2527 /* an interrupt is different from an exception because of the
2529 static void gen_interrupt(DisasContext
*s
, int intno
,
2530 target_ulong cur_eip
, target_ulong next_eip
)
2532 gen_update_cc_op(s
);
2533 gen_jmp_im(cur_eip
);
2534 gen_helper_raise_interrupt(cpu_env
, tcg_const_i32(intno
),
2535 tcg_const_i32(next_eip
- cur_eip
));
2536 s
->is_jmp
= DISAS_TB_JUMP
;
2539 static void gen_debug(DisasContext
*s
, target_ulong cur_eip
)
2541 gen_update_cc_op(s
);
2542 gen_jmp_im(cur_eip
);
2543 gen_helper_debug(cpu_env
);
2544 s
->is_jmp
= DISAS_TB_JUMP
;
2547 /* generate a generic end of block. Trace exception is also generated
2549 static void gen_eob(DisasContext
*s
)
2551 gen_update_cc_op(s
);
2552 if (s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
) {
2553 gen_helper_reset_inhibit_irq(cpu_env
);
2555 if (s
->tb
->flags
& HF_RF_MASK
) {
2556 gen_helper_reset_rf(cpu_env
);
2558 if (s
->singlestep_enabled
) {
2559 gen_helper_debug(cpu_env
);
2561 gen_helper_single_step(cpu_env
);
2565 s
->is_jmp
= DISAS_TB_JUMP
;
2568 /* generate a jump to eip. No segment change must happen before as a
2569 direct call to the next block may occur */
2570 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
)
2572 gen_update_cc_op(s
);
2573 set_cc_op(s
, CC_OP_DYNAMIC
);
2575 gen_goto_tb(s
, tb_num
, eip
);
2576 s
->is_jmp
= DISAS_TB_JUMP
;
2583 static void gen_jmp(DisasContext
*s
, target_ulong eip
)
2585 gen_jmp_tb(s
, eip
, 0);
2588 static inline void gen_ldq_env_A0(DisasContext
*s
, int offset
)
2590 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_A0
, s
->mem_index
, MO_LEQ
);
2591 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2594 static inline void gen_stq_env_A0(DisasContext
*s
, int offset
)
2596 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2597 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_A0
, s
->mem_index
, MO_LEQ
);
2600 static inline void gen_ldo_env_A0(DisasContext
*s
, int offset
)
2602 int mem_index
= s
->mem_index
;
2603 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_A0
, mem_index
, MO_LEQ
);
2604 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2605 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2606 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
, MO_LEQ
);
2607 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2610 static inline void gen_sto_env_A0(DisasContext
*s
, int offset
)
2612 int mem_index
= s
->mem_index
;
2613 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2614 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_A0
, mem_index
, MO_LEQ
);
2615 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2616 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2617 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
, MO_LEQ
);
2620 static inline void gen_op_movo(int d_offset
, int s_offset
)
2622 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
+ offsetof(XMMReg
, XMM_Q(0)));
2623 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
+ offsetof(XMMReg
, XMM_Q(0)));
2624 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
+ offsetof(XMMReg
, XMM_Q(1)));
2625 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
+ offsetof(XMMReg
, XMM_Q(1)));
2628 static inline void gen_op_movq(int d_offset
, int s_offset
)
2630 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2631 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2634 static inline void gen_op_movl(int d_offset
, int s_offset
)
2636 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
, s_offset
);
2637 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, d_offset
);
2640 static inline void gen_op_movq_env_0(int d_offset
)
2642 tcg_gen_movi_i64(cpu_tmp1_i64
, 0);
2643 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2646 typedef void (*SSEFunc_i_ep
)(TCGv_i32 val
, TCGv_ptr env
, TCGv_ptr reg
);
2647 typedef void (*SSEFunc_l_ep
)(TCGv_i64 val
, TCGv_ptr env
, TCGv_ptr reg
);
2648 typedef void (*SSEFunc_0_epi
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i32 val
);
2649 typedef void (*SSEFunc_0_epl
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i64 val
);
2650 typedef void (*SSEFunc_0_epp
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
);
2651 typedef void (*SSEFunc_0_eppi
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2653 typedef void (*SSEFunc_0_ppi
)(TCGv_ptr reg_a
, TCGv_ptr reg_b
, TCGv_i32 val
);
2654 typedef void (*SSEFunc_0_eppt
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2657 #define SSE_SPECIAL ((void *)1)
2658 #define SSE_DUMMY ((void *)2)
2660 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2661 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2662 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2664 static const SSEFunc_0_epp sse_op_table1
[256][4] = {
2665 /* 3DNow! extensions */
2666 [0x0e] = { SSE_DUMMY
}, /* femms */
2667 [0x0f] = { SSE_DUMMY
}, /* pf... */
2668 /* pure SSE operations */
2669 [0x10] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2670 [0x11] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2671 [0x12] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd, movsldup, movddup */
2672 [0x13] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd */
2673 [0x14] = { gen_helper_punpckldq_xmm
, gen_helper_punpcklqdq_xmm
},
2674 [0x15] = { gen_helper_punpckhdq_xmm
, gen_helper_punpckhqdq_xmm
},
2675 [0x16] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd, movshdup */
2676 [0x17] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd */
2678 [0x28] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2679 [0x29] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2680 [0x2a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2681 [0x2b] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movntps, movntpd, movntss, movntsd */
2682 [0x2c] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2683 [0x2d] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2684 [0x2e] = { gen_helper_ucomiss
, gen_helper_ucomisd
},
2685 [0x2f] = { gen_helper_comiss
, gen_helper_comisd
},
2686 [0x50] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movmskps, movmskpd */
2687 [0x51] = SSE_FOP(sqrt
),
2688 [0x52] = { gen_helper_rsqrtps
, NULL
, gen_helper_rsqrtss
, NULL
},
2689 [0x53] = { gen_helper_rcpps
, NULL
, gen_helper_rcpss
, NULL
},
2690 [0x54] = { gen_helper_pand_xmm
, gen_helper_pand_xmm
}, /* andps, andpd */
2691 [0x55] = { gen_helper_pandn_xmm
, gen_helper_pandn_xmm
}, /* andnps, andnpd */
2692 [0x56] = { gen_helper_por_xmm
, gen_helper_por_xmm
}, /* orps, orpd */
2693 [0x57] = { gen_helper_pxor_xmm
, gen_helper_pxor_xmm
}, /* xorps, xorpd */
2694 [0x58] = SSE_FOP(add
),
2695 [0x59] = SSE_FOP(mul
),
2696 [0x5a] = { gen_helper_cvtps2pd
, gen_helper_cvtpd2ps
,
2697 gen_helper_cvtss2sd
, gen_helper_cvtsd2ss
},
2698 [0x5b] = { gen_helper_cvtdq2ps
, gen_helper_cvtps2dq
, gen_helper_cvttps2dq
},
2699 [0x5c] = SSE_FOP(sub
),
2700 [0x5d] = SSE_FOP(min
),
2701 [0x5e] = SSE_FOP(div
),
2702 [0x5f] = SSE_FOP(max
),
2704 [0xc2] = SSE_FOP(cmpeq
),
2705 [0xc6] = { (SSEFunc_0_epp
)gen_helper_shufps
,
2706 (SSEFunc_0_epp
)gen_helper_shufpd
}, /* XXX: casts */
2708 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2709 [0x38] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2710 [0x3a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2712 /* MMX ops and their SSE extensions */
2713 [0x60] = MMX_OP2(punpcklbw
),
2714 [0x61] = MMX_OP2(punpcklwd
),
2715 [0x62] = MMX_OP2(punpckldq
),
2716 [0x63] = MMX_OP2(packsswb
),
2717 [0x64] = MMX_OP2(pcmpgtb
),
2718 [0x65] = MMX_OP2(pcmpgtw
),
2719 [0x66] = MMX_OP2(pcmpgtl
),
2720 [0x67] = MMX_OP2(packuswb
),
2721 [0x68] = MMX_OP2(punpckhbw
),
2722 [0x69] = MMX_OP2(punpckhwd
),
2723 [0x6a] = MMX_OP2(punpckhdq
),
2724 [0x6b] = MMX_OP2(packssdw
),
2725 [0x6c] = { NULL
, gen_helper_punpcklqdq_xmm
},
2726 [0x6d] = { NULL
, gen_helper_punpckhqdq_xmm
},
2727 [0x6e] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movd mm, ea */
2728 [0x6f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, , movqdu */
2729 [0x70] = { (SSEFunc_0_epp
)gen_helper_pshufw_mmx
,
2730 (SSEFunc_0_epp
)gen_helper_pshufd_xmm
,
2731 (SSEFunc_0_epp
)gen_helper_pshufhw_xmm
,
2732 (SSEFunc_0_epp
)gen_helper_pshuflw_xmm
}, /* XXX: casts */
2733 [0x71] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftw */
2734 [0x72] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftd */
2735 [0x73] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftq */
2736 [0x74] = MMX_OP2(pcmpeqb
),
2737 [0x75] = MMX_OP2(pcmpeqw
),
2738 [0x76] = MMX_OP2(pcmpeql
),
2739 [0x77] = { SSE_DUMMY
}, /* emms */
2740 [0x78] = { NULL
, SSE_SPECIAL
, NULL
, SSE_SPECIAL
}, /* extrq_i, insertq_i */
2741 [0x79] = { NULL
, gen_helper_extrq_r
, NULL
, gen_helper_insertq_r
},
2742 [0x7c] = { NULL
, gen_helper_haddpd
, NULL
, gen_helper_haddps
},
2743 [0x7d] = { NULL
, gen_helper_hsubpd
, NULL
, gen_helper_hsubps
},
2744 [0x7e] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movd, movd, , movq */
2745 [0x7f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, movdqu */
2746 [0xc4] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pinsrw */
2747 [0xc5] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pextrw */
2748 [0xd0] = { NULL
, gen_helper_addsubpd
, NULL
, gen_helper_addsubps
},
2749 [0xd1] = MMX_OP2(psrlw
),
2750 [0xd2] = MMX_OP2(psrld
),
2751 [0xd3] = MMX_OP2(psrlq
),
2752 [0xd4] = MMX_OP2(paddq
),
2753 [0xd5] = MMX_OP2(pmullw
),
2754 [0xd6] = { NULL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2755 [0xd7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pmovmskb */
2756 [0xd8] = MMX_OP2(psubusb
),
2757 [0xd9] = MMX_OP2(psubusw
),
2758 [0xda] = MMX_OP2(pminub
),
2759 [0xdb] = MMX_OP2(pand
),
2760 [0xdc] = MMX_OP2(paddusb
),
2761 [0xdd] = MMX_OP2(paddusw
),
2762 [0xde] = MMX_OP2(pmaxub
),
2763 [0xdf] = MMX_OP2(pandn
),
2764 [0xe0] = MMX_OP2(pavgb
),
2765 [0xe1] = MMX_OP2(psraw
),
2766 [0xe2] = MMX_OP2(psrad
),
2767 [0xe3] = MMX_OP2(pavgw
),
2768 [0xe4] = MMX_OP2(pmulhuw
),
2769 [0xe5] = MMX_OP2(pmulhw
),
2770 [0xe6] = { NULL
, gen_helper_cvttpd2dq
, gen_helper_cvtdq2pd
, gen_helper_cvtpd2dq
},
2771 [0xe7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movntq, movntq */
2772 [0xe8] = MMX_OP2(psubsb
),
2773 [0xe9] = MMX_OP2(psubsw
),
2774 [0xea] = MMX_OP2(pminsw
),
2775 [0xeb] = MMX_OP2(por
),
2776 [0xec] = MMX_OP2(paddsb
),
2777 [0xed] = MMX_OP2(paddsw
),
2778 [0xee] = MMX_OP2(pmaxsw
),
2779 [0xef] = MMX_OP2(pxor
),
2780 [0xf0] = { NULL
, NULL
, NULL
, SSE_SPECIAL
}, /* lddqu */
2781 [0xf1] = MMX_OP2(psllw
),
2782 [0xf2] = MMX_OP2(pslld
),
2783 [0xf3] = MMX_OP2(psllq
),
2784 [0xf4] = MMX_OP2(pmuludq
),
2785 [0xf5] = MMX_OP2(pmaddwd
),
2786 [0xf6] = MMX_OP2(psadbw
),
2787 [0xf7] = { (SSEFunc_0_epp
)gen_helper_maskmov_mmx
,
2788 (SSEFunc_0_epp
)gen_helper_maskmov_xmm
}, /* XXX: casts */
2789 [0xf8] = MMX_OP2(psubb
),
2790 [0xf9] = MMX_OP2(psubw
),
2791 [0xfa] = MMX_OP2(psubl
),
2792 [0xfb] = MMX_OP2(psubq
),
2793 [0xfc] = MMX_OP2(paddb
),
2794 [0xfd] = MMX_OP2(paddw
),
2795 [0xfe] = MMX_OP2(paddl
),
2798 static const SSEFunc_0_epp sse_op_table2
[3 * 8][2] = {
2799 [0 + 2] = MMX_OP2(psrlw
),
2800 [0 + 4] = MMX_OP2(psraw
),
2801 [0 + 6] = MMX_OP2(psllw
),
2802 [8 + 2] = MMX_OP2(psrld
),
2803 [8 + 4] = MMX_OP2(psrad
),
2804 [8 + 6] = MMX_OP2(pslld
),
2805 [16 + 2] = MMX_OP2(psrlq
),
2806 [16 + 3] = { NULL
, gen_helper_psrldq_xmm
},
2807 [16 + 6] = MMX_OP2(psllq
),
2808 [16 + 7] = { NULL
, gen_helper_pslldq_xmm
},
2811 static const SSEFunc_0_epi sse_op_table3ai
[] = {
2812 gen_helper_cvtsi2ss
,
2816 #ifdef TARGET_X86_64
2817 static const SSEFunc_0_epl sse_op_table3aq
[] = {
2818 gen_helper_cvtsq2ss
,
2823 static const SSEFunc_i_ep sse_op_table3bi
[] = {
2824 gen_helper_cvttss2si
,
2825 gen_helper_cvtss2si
,
2826 gen_helper_cvttsd2si
,
2830 #ifdef TARGET_X86_64
2831 static const SSEFunc_l_ep sse_op_table3bq
[] = {
2832 gen_helper_cvttss2sq
,
2833 gen_helper_cvtss2sq
,
2834 gen_helper_cvttsd2sq
,
2839 static const SSEFunc_0_epp sse_op_table4
[8][4] = {
2850 static const SSEFunc_0_epp sse_op_table5
[256] = {
2851 [0x0c] = gen_helper_pi2fw
,
2852 [0x0d] = gen_helper_pi2fd
,
2853 [0x1c] = gen_helper_pf2iw
,
2854 [0x1d] = gen_helper_pf2id
,
2855 [0x8a] = gen_helper_pfnacc
,
2856 [0x8e] = gen_helper_pfpnacc
,
2857 [0x90] = gen_helper_pfcmpge
,
2858 [0x94] = gen_helper_pfmin
,
2859 [0x96] = gen_helper_pfrcp
,
2860 [0x97] = gen_helper_pfrsqrt
,
2861 [0x9a] = gen_helper_pfsub
,
2862 [0x9e] = gen_helper_pfadd
,
2863 [0xa0] = gen_helper_pfcmpgt
,
2864 [0xa4] = gen_helper_pfmax
,
2865 [0xa6] = gen_helper_movq
, /* pfrcpit1; no need to actually increase precision */
2866 [0xa7] = gen_helper_movq
, /* pfrsqit1 */
2867 [0xaa] = gen_helper_pfsubr
,
2868 [0xae] = gen_helper_pfacc
,
2869 [0xb0] = gen_helper_pfcmpeq
,
2870 [0xb4] = gen_helper_pfmul
,
2871 [0xb6] = gen_helper_movq
, /* pfrcpit2 */
2872 [0xb7] = gen_helper_pmulhrw_mmx
,
2873 [0xbb] = gen_helper_pswapd
,
2874 [0xbf] = gen_helper_pavgb_mmx
/* pavgusb */
2877 struct SSEOpHelper_epp
{
2878 SSEFunc_0_epp op
[2];
2882 struct SSEOpHelper_eppi
{
2883 SSEFunc_0_eppi op
[2];
2887 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
2888 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
2889 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
2890 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
2891 #define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
2892 CPUID_EXT_PCLMULQDQ }
2893 #define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
2895 static const struct SSEOpHelper_epp sse_op_table6
[256] = {
2896 [0x00] = SSSE3_OP(pshufb
),
2897 [0x01] = SSSE3_OP(phaddw
),
2898 [0x02] = SSSE3_OP(phaddd
),
2899 [0x03] = SSSE3_OP(phaddsw
),
2900 [0x04] = SSSE3_OP(pmaddubsw
),
2901 [0x05] = SSSE3_OP(phsubw
),
2902 [0x06] = SSSE3_OP(phsubd
),
2903 [0x07] = SSSE3_OP(phsubsw
),
2904 [0x08] = SSSE3_OP(psignb
),
2905 [0x09] = SSSE3_OP(psignw
),
2906 [0x0a] = SSSE3_OP(psignd
),
2907 [0x0b] = SSSE3_OP(pmulhrsw
),
2908 [0x10] = SSE41_OP(pblendvb
),
2909 [0x14] = SSE41_OP(blendvps
),
2910 [0x15] = SSE41_OP(blendvpd
),
2911 [0x17] = SSE41_OP(ptest
),
2912 [0x1c] = SSSE3_OP(pabsb
),
2913 [0x1d] = SSSE3_OP(pabsw
),
2914 [0x1e] = SSSE3_OP(pabsd
),
2915 [0x20] = SSE41_OP(pmovsxbw
),
2916 [0x21] = SSE41_OP(pmovsxbd
),
2917 [0x22] = SSE41_OP(pmovsxbq
),
2918 [0x23] = SSE41_OP(pmovsxwd
),
2919 [0x24] = SSE41_OP(pmovsxwq
),
2920 [0x25] = SSE41_OP(pmovsxdq
),
2921 [0x28] = SSE41_OP(pmuldq
),
2922 [0x29] = SSE41_OP(pcmpeqq
),
2923 [0x2a] = SSE41_SPECIAL
, /* movntqda */
2924 [0x2b] = SSE41_OP(packusdw
),
2925 [0x30] = SSE41_OP(pmovzxbw
),
2926 [0x31] = SSE41_OP(pmovzxbd
),
2927 [0x32] = SSE41_OP(pmovzxbq
),
2928 [0x33] = SSE41_OP(pmovzxwd
),
2929 [0x34] = SSE41_OP(pmovzxwq
),
2930 [0x35] = SSE41_OP(pmovzxdq
),
2931 [0x37] = SSE42_OP(pcmpgtq
),
2932 [0x38] = SSE41_OP(pminsb
),
2933 [0x39] = SSE41_OP(pminsd
),
2934 [0x3a] = SSE41_OP(pminuw
),
2935 [0x3b] = SSE41_OP(pminud
),
2936 [0x3c] = SSE41_OP(pmaxsb
),
2937 [0x3d] = SSE41_OP(pmaxsd
),
2938 [0x3e] = SSE41_OP(pmaxuw
),
2939 [0x3f] = SSE41_OP(pmaxud
),
2940 [0x40] = SSE41_OP(pmulld
),
2941 [0x41] = SSE41_OP(phminposuw
),
2942 [0xdb] = AESNI_OP(aesimc
),
2943 [0xdc] = AESNI_OP(aesenc
),
2944 [0xdd] = AESNI_OP(aesenclast
),
2945 [0xde] = AESNI_OP(aesdec
),
2946 [0xdf] = AESNI_OP(aesdeclast
),
2949 static const struct SSEOpHelper_eppi sse_op_table7
[256] = {
2950 [0x08] = SSE41_OP(roundps
),
2951 [0x09] = SSE41_OP(roundpd
),
2952 [0x0a] = SSE41_OP(roundss
),
2953 [0x0b] = SSE41_OP(roundsd
),
2954 [0x0c] = SSE41_OP(blendps
),
2955 [0x0d] = SSE41_OP(blendpd
),
2956 [0x0e] = SSE41_OP(pblendw
),
2957 [0x0f] = SSSE3_OP(palignr
),
2958 [0x14] = SSE41_SPECIAL
, /* pextrb */
2959 [0x15] = SSE41_SPECIAL
, /* pextrw */
2960 [0x16] = SSE41_SPECIAL
, /* pextrd/pextrq */
2961 [0x17] = SSE41_SPECIAL
, /* extractps */
2962 [0x20] = SSE41_SPECIAL
, /* pinsrb */
2963 [0x21] = SSE41_SPECIAL
, /* insertps */
2964 [0x22] = SSE41_SPECIAL
, /* pinsrd/pinsrq */
2965 [0x40] = SSE41_OP(dpps
),
2966 [0x41] = SSE41_OP(dppd
),
2967 [0x42] = SSE41_OP(mpsadbw
),
2968 [0x44] = PCLMULQDQ_OP(pclmulqdq
),
2969 [0x60] = SSE42_OP(pcmpestrm
),
2970 [0x61] = SSE42_OP(pcmpestri
),
2971 [0x62] = SSE42_OP(pcmpistrm
),
2972 [0x63] = SSE42_OP(pcmpistri
),
2973 [0xdf] = AESNI_OP(aeskeygenassist
),
2976 static void gen_sse(CPUX86State
*env
, DisasContext
*s
, int b
,
2977 target_ulong pc_start
, int rex_r
)
2979 int b1
, op1_offset
, op2_offset
, is_xmm
, val
;
2980 int modrm
, mod
, rm
, reg
;
2981 SSEFunc_0_epp sse_fn_epp
;
2982 SSEFunc_0_eppi sse_fn_eppi
;
2983 SSEFunc_0_ppi sse_fn_ppi
;
2984 SSEFunc_0_eppt sse_fn_eppt
;
2988 if (s
->prefix
& PREFIX_DATA
)
2990 else if (s
->prefix
& PREFIX_REPZ
)
2992 else if (s
->prefix
& PREFIX_REPNZ
)
2996 sse_fn_epp
= sse_op_table1
[b
][b1
];
3000 if ((b
<= 0x5f && b
>= 0x10) || b
== 0xc6 || b
== 0xc2) {
3010 /* simple MMX/SSE operation */
3011 if (s
->flags
& HF_TS_MASK
) {
3012 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
3015 if (s
->flags
& HF_EM_MASK
) {
3017 gen_exception(s
, EXCP06_ILLOP
, pc_start
- s
->cs_base
);
3020 if (is_xmm
&& !(s
->flags
& HF_OSFXSR_MASK
))
3021 if ((b
!= 0x38 && b
!= 0x3a) || (s
->prefix
& PREFIX_DATA
))
3024 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
))
3027 gen_helper_emms(cpu_env
);
3032 gen_helper_emms(cpu_env
);
3035 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3036 the static cpu state) */
3038 gen_helper_enter_mmx(cpu_env
);
3041 modrm
= cpu_ldub_code(env
, s
->pc
++);
3042 reg
= ((modrm
>> 3) & 7);
3045 mod
= (modrm
>> 6) & 3;
3046 if (sse_fn_epp
== SSE_SPECIAL
) {
3049 case 0x0e7: /* movntq */
3052 gen_lea_modrm(env
, s
, modrm
);
3053 gen_stq_env_A0(s
, offsetof(CPUX86State
, fpregs
[reg
].mmx
));
3055 case 0x1e7: /* movntdq */
3056 case 0x02b: /* movntps */
3057 case 0x12b: /* movntps */
3060 gen_lea_modrm(env
, s
, modrm
);
3061 gen_sto_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3063 case 0x3f0: /* lddqu */
3066 gen_lea_modrm(env
, s
, modrm
);
3067 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3069 case 0x22b: /* movntss */
3070 case 0x32b: /* movntsd */
3073 gen_lea_modrm(env
, s
, modrm
);
3075 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3076 xmm_regs
[reg
].XMM_Q(0)));
3078 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3079 xmm_regs
[reg
].XMM_L(0)));
3080 gen_op_st_v(s
, MO_32
, cpu_T
[0], cpu_A0
);
3083 case 0x6e: /* movd mm, ea */
3084 #ifdef TARGET_X86_64
3085 if (s
->dflag
== MO_64
) {
3086 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 0);
3087 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3091 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 0);
3092 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3093 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3094 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3095 gen_helper_movl_mm_T0_mmx(cpu_ptr0
, cpu_tmp2_i32
);
3098 case 0x16e: /* movd xmm, ea */
3099 #ifdef TARGET_X86_64
3100 if (s
->dflag
== MO_64
) {
3101 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 0);
3102 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3103 offsetof(CPUX86State
,xmm_regs
[reg
]));
3104 gen_helper_movq_mm_T0_xmm(cpu_ptr0
, cpu_T
[0]);
3108 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 0);
3109 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3110 offsetof(CPUX86State
,xmm_regs
[reg
]));
3111 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3112 gen_helper_movl_mm_T0_xmm(cpu_ptr0
, cpu_tmp2_i32
);
3115 case 0x6f: /* movq mm, ea */
3117 gen_lea_modrm(env
, s
, modrm
);
3118 gen_ldq_env_A0(s
, offsetof(CPUX86State
, fpregs
[reg
].mmx
));
3121 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
3122 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3123 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
3124 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3127 case 0x010: /* movups */
3128 case 0x110: /* movupd */
3129 case 0x028: /* movaps */
3130 case 0x128: /* movapd */
3131 case 0x16f: /* movdqa xmm, ea */
3132 case 0x26f: /* movdqu xmm, ea */
3134 gen_lea_modrm(env
, s
, modrm
);
3135 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3137 rm
= (modrm
& 7) | REX_B(s
);
3138 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[reg
]),
3139 offsetof(CPUX86State
,xmm_regs
[rm
]));
3142 case 0x210: /* movss xmm, ea */
3144 gen_lea_modrm(env
, s
, modrm
);
3145 gen_op_ld_v(s
, MO_32
, cpu_T
[0], cpu_A0
);
3146 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3147 tcg_gen_movi_tl(cpu_T
[0], 0);
3148 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)));
3149 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3150 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3152 rm
= (modrm
& 7) | REX_B(s
);
3153 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3154 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)));
3157 case 0x310: /* movsd xmm, ea */
3159 gen_lea_modrm(env
, s
, modrm
);
3160 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3161 xmm_regs
[reg
].XMM_Q(0)));
3162 tcg_gen_movi_tl(cpu_T
[0], 0);
3163 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3164 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3166 rm
= (modrm
& 7) | REX_B(s
);
3167 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3168 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3171 case 0x012: /* movlps */
3172 case 0x112: /* movlpd */
3174 gen_lea_modrm(env
, s
, modrm
);
3175 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3176 xmm_regs
[reg
].XMM_Q(0)));
3179 rm
= (modrm
& 7) | REX_B(s
);
3180 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3181 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(1)));
3184 case 0x212: /* movsldup */
3186 gen_lea_modrm(env
, s
, modrm
);
3187 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3189 rm
= (modrm
& 7) | REX_B(s
);
3190 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3191 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)));
3192 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)),
3193 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(2)));
3195 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)),
3196 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3197 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)),
3198 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3200 case 0x312: /* movddup */
3202 gen_lea_modrm(env
, s
, modrm
);
3203 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3204 xmm_regs
[reg
].XMM_Q(0)));
3206 rm
= (modrm
& 7) | REX_B(s
);
3207 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3208 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3210 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)),
3211 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3213 case 0x016: /* movhps */
3214 case 0x116: /* movhpd */
3216 gen_lea_modrm(env
, s
, modrm
);
3217 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3218 xmm_regs
[reg
].XMM_Q(1)));
3221 rm
= (modrm
& 7) | REX_B(s
);
3222 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)),
3223 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3226 case 0x216: /* movshdup */
3228 gen_lea_modrm(env
, s
, modrm
);
3229 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3231 rm
= (modrm
& 7) | REX_B(s
);
3232 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)),
3233 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(1)));
3234 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)),
3235 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(3)));
3237 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3238 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)));
3239 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)),
3240 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3245 int bit_index
, field_length
;
3247 if (b1
== 1 && reg
!= 0)
3249 field_length
= cpu_ldub_code(env
, s
->pc
++) & 0x3F;
3250 bit_index
= cpu_ldub_code(env
, s
->pc
++) & 0x3F;
3251 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3252 offsetof(CPUX86State
,xmm_regs
[reg
]));
3254 gen_helper_extrq_i(cpu_env
, cpu_ptr0
,
3255 tcg_const_i32(bit_index
),
3256 tcg_const_i32(field_length
));
3258 gen_helper_insertq_i(cpu_env
, cpu_ptr0
,
3259 tcg_const_i32(bit_index
),
3260 tcg_const_i32(field_length
));
3263 case 0x7e: /* movd ea, mm */
3264 #ifdef TARGET_X86_64
3265 if (s
->dflag
== MO_64
) {
3266 tcg_gen_ld_i64(cpu_T
[0], cpu_env
,
3267 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3268 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 1);
3272 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
3273 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_L(0)));
3274 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 1);
3277 case 0x17e: /* movd ea, xmm */
3278 #ifdef TARGET_X86_64
3279 if (s
->dflag
== MO_64
) {
3280 tcg_gen_ld_i64(cpu_T
[0], cpu_env
,
3281 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3282 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 1);
3286 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
3287 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3288 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 1);
3291 case 0x27e: /* movq xmm, ea */
3293 gen_lea_modrm(env
, s
, modrm
);
3294 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3295 xmm_regs
[reg
].XMM_Q(0)));
3297 rm
= (modrm
& 7) | REX_B(s
);
3298 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3299 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3301 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3303 case 0x7f: /* movq ea, mm */
3305 gen_lea_modrm(env
, s
, modrm
);
3306 gen_stq_env_A0(s
, offsetof(CPUX86State
, fpregs
[reg
].mmx
));
3309 gen_op_movq(offsetof(CPUX86State
,fpregs
[rm
].mmx
),
3310 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3313 case 0x011: /* movups */
3314 case 0x111: /* movupd */
3315 case 0x029: /* movaps */
3316 case 0x129: /* movapd */
3317 case 0x17f: /* movdqa ea, xmm */
3318 case 0x27f: /* movdqu ea, xmm */
3320 gen_lea_modrm(env
, s
, modrm
);
3321 gen_sto_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3323 rm
= (modrm
& 7) | REX_B(s
);
3324 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[rm
]),
3325 offsetof(CPUX86State
,xmm_regs
[reg
]));
3328 case 0x211: /* movss ea, xmm */
3330 gen_lea_modrm(env
, s
, modrm
);
3331 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3332 gen_op_st_v(s
, MO_32
, cpu_T
[0], cpu_A0
);
3334 rm
= (modrm
& 7) | REX_B(s
);
3335 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)),
3336 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3339 case 0x311: /* movsd ea, xmm */
3341 gen_lea_modrm(env
, s
, modrm
);
3342 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3343 xmm_regs
[reg
].XMM_Q(0)));
3345 rm
= (modrm
& 7) | REX_B(s
);
3346 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)),
3347 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3350 case 0x013: /* movlps */
3351 case 0x113: /* movlpd */
3353 gen_lea_modrm(env
, s
, modrm
);
3354 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3355 xmm_regs
[reg
].XMM_Q(0)));
3360 case 0x017: /* movhps */
3361 case 0x117: /* movhpd */
3363 gen_lea_modrm(env
, s
, modrm
);
3364 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3365 xmm_regs
[reg
].XMM_Q(1)));
3370 case 0x71: /* shift mm, im */
3373 case 0x171: /* shift xmm, im */
3379 val
= cpu_ldub_code(env
, s
->pc
++);
3381 tcg_gen_movi_tl(cpu_T
[0], val
);
3382 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
3383 tcg_gen_movi_tl(cpu_T
[0], 0);
3384 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(1)));
3385 op1_offset
= offsetof(CPUX86State
,xmm_t0
);
3387 tcg_gen_movi_tl(cpu_T
[0], val
);
3388 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(0)));
3389 tcg_gen_movi_tl(cpu_T
[0], 0);
3390 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(1)));
3391 op1_offset
= offsetof(CPUX86State
,mmx_t0
);
3393 sse_fn_epp
= sse_op_table2
[((b
- 1) & 3) * 8 +
3394 (((modrm
>> 3)) & 7)][b1
];
3399 rm
= (modrm
& 7) | REX_B(s
);
3400 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3403 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3405 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3406 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op1_offset
);
3407 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3409 case 0x050: /* movmskps */
3410 rm
= (modrm
& 7) | REX_B(s
);
3411 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3412 offsetof(CPUX86State
,xmm_regs
[rm
]));
3413 gen_helper_movmskps(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3414 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp2_i32
);
3416 case 0x150: /* movmskpd */
3417 rm
= (modrm
& 7) | REX_B(s
);
3418 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3419 offsetof(CPUX86State
,xmm_regs
[rm
]));
3420 gen_helper_movmskpd(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3421 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp2_i32
);
3423 case 0x02a: /* cvtpi2ps */
3424 case 0x12a: /* cvtpi2pd */
3425 gen_helper_enter_mmx(cpu_env
);
3427 gen_lea_modrm(env
, s
, modrm
);
3428 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3429 gen_ldq_env_A0(s
, op2_offset
);
3432 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3434 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3435 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3436 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3439 gen_helper_cvtpi2ps(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3443 gen_helper_cvtpi2pd(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3447 case 0x22a: /* cvtsi2ss */
3448 case 0x32a: /* cvtsi2sd */
3449 ot
= mo_64_32(s
->dflag
);
3450 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3451 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3452 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3454 SSEFunc_0_epi sse_fn_epi
= sse_op_table3ai
[(b
>> 8) & 1];
3455 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3456 sse_fn_epi(cpu_env
, cpu_ptr0
, cpu_tmp2_i32
);
3458 #ifdef TARGET_X86_64
3459 SSEFunc_0_epl sse_fn_epl
= sse_op_table3aq
[(b
>> 8) & 1];
3460 sse_fn_epl(cpu_env
, cpu_ptr0
, cpu_T
[0]);
3466 case 0x02c: /* cvttps2pi */
3467 case 0x12c: /* cvttpd2pi */
3468 case 0x02d: /* cvtps2pi */
3469 case 0x12d: /* cvtpd2pi */
3470 gen_helper_enter_mmx(cpu_env
);
3472 gen_lea_modrm(env
, s
, modrm
);
3473 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3474 gen_ldo_env_A0(s
, op2_offset
);
3476 rm
= (modrm
& 7) | REX_B(s
);
3477 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3479 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
);
3480 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3481 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3484 gen_helper_cvttps2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3487 gen_helper_cvttpd2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3490 gen_helper_cvtps2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3493 gen_helper_cvtpd2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3497 case 0x22c: /* cvttss2si */
3498 case 0x32c: /* cvttsd2si */
3499 case 0x22d: /* cvtss2si */
3500 case 0x32d: /* cvtsd2si */
3501 ot
= mo_64_32(s
->dflag
);
3503 gen_lea_modrm(env
, s
, modrm
);
3505 gen_ldq_env_A0(s
, offsetof(CPUX86State
, xmm_t0
.XMM_Q(0)));
3507 gen_op_ld_v(s
, MO_32
, cpu_T
[0], cpu_A0
);
3508 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
3510 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3512 rm
= (modrm
& 7) | REX_B(s
);
3513 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3515 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3517 SSEFunc_i_ep sse_fn_i_ep
=
3518 sse_op_table3bi
[((b
>> 7) & 2) | (b
& 1)];
3519 sse_fn_i_ep(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3520 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3522 #ifdef TARGET_X86_64
3523 SSEFunc_l_ep sse_fn_l_ep
=
3524 sse_op_table3bq
[((b
>> 7) & 2) | (b
& 1)];
3525 sse_fn_l_ep(cpu_T
[0], cpu_env
, cpu_ptr0
);
3530 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
3532 case 0xc4: /* pinsrw */
3535 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
3536 val
= cpu_ldub_code(env
, s
->pc
++);
3539 tcg_gen_st16_tl(cpu_T
[0], cpu_env
,
3540 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_W(val
)));
3543 tcg_gen_st16_tl(cpu_T
[0], cpu_env
,
3544 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_W(val
)));
3547 case 0xc5: /* pextrw */
3551 ot
= mo_64_32(s
->dflag
);
3552 val
= cpu_ldub_code(env
, s
->pc
++);
3555 rm
= (modrm
& 7) | REX_B(s
);
3556 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
,
3557 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_W(val
)));
3561 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
,
3562 offsetof(CPUX86State
,fpregs
[rm
].mmx
.MMX_W(val
)));
3564 reg
= ((modrm
>> 3) & 7) | rex_r
;
3565 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
3567 case 0x1d6: /* movq ea, xmm */
3569 gen_lea_modrm(env
, s
, modrm
);
3570 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3571 xmm_regs
[reg
].XMM_Q(0)));
3573 rm
= (modrm
& 7) | REX_B(s
);
3574 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)),
3575 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3576 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(1)));
3579 case 0x2d6: /* movq2dq */
3580 gen_helper_enter_mmx(cpu_env
);
3582 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3583 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3584 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3586 case 0x3d6: /* movdq2q */
3587 gen_helper_enter_mmx(cpu_env
);
3588 rm
= (modrm
& 7) | REX_B(s
);
3589 gen_op_movq(offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
),
3590 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3592 case 0xd7: /* pmovmskb */
3597 rm
= (modrm
& 7) | REX_B(s
);
3598 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[rm
]));
3599 gen_helper_pmovmskb_xmm(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3602 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3603 gen_helper_pmovmskb_mmx(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3605 reg
= ((modrm
>> 3) & 7) | rex_r
;
3606 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp2_i32
);
3612 if ((b
& 0xf0) == 0xf0) {
3615 modrm
= cpu_ldub_code(env
, s
->pc
++);
3617 reg
= ((modrm
>> 3) & 7) | rex_r
;
3618 mod
= (modrm
>> 6) & 3;
3623 sse_fn_epp
= sse_op_table6
[b
].op
[b1
];
3627 if (!(s
->cpuid_ext_features
& sse_op_table6
[b
].ext_mask
))
3631 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3633 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
3635 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3636 gen_lea_modrm(env
, s
, modrm
);
3638 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3639 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3640 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3641 gen_ldq_env_A0(s
, op2_offset
+
3642 offsetof(XMMReg
, XMM_Q(0)));
3644 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3645 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3646 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
3647 s
->mem_index
, MO_LEUL
);
3648 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, op2_offset
+
3649 offsetof(XMMReg
, XMM_L(0)));
3651 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3652 tcg_gen_qemu_ld_tl(cpu_tmp0
, cpu_A0
,
3653 s
->mem_index
, MO_LEUW
);
3654 tcg_gen_st16_tl(cpu_tmp0
, cpu_env
, op2_offset
+
3655 offsetof(XMMReg
, XMM_W(0)));
3657 case 0x2a: /* movntqda */
3658 gen_ldo_env_A0(s
, op1_offset
);
3661 gen_ldo_env_A0(s
, op2_offset
);
3665 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
3667 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3669 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3670 gen_lea_modrm(env
, s
, modrm
);
3671 gen_ldq_env_A0(s
, op2_offset
);
3674 if (sse_fn_epp
== SSE_SPECIAL
) {
3678 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3679 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3680 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3683 set_cc_op(s
, CC_OP_EFLAGS
);
3690 /* Various integer extensions at 0f 38 f[0-f]. */
3691 b
= modrm
| (b1
<< 8);
3692 modrm
= cpu_ldub_code(env
, s
->pc
++);
3693 reg
= ((modrm
>> 3) & 7) | rex_r
;
3696 case 0x3f0: /* crc32 Gd,Eb */
3697 case 0x3f1: /* crc32 Gd,Ey */
3699 if (!(s
->cpuid_ext_features
& CPUID_EXT_SSE42
)) {
3702 if ((b
& 0xff) == 0xf0) {
3704 } else if (s
->dflag
!= MO_64
) {
3705 ot
= (s
->prefix
& PREFIX_DATA
? MO_16
: MO_32
);
3710 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[reg
]);
3711 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3712 gen_helper_crc32(cpu_T
[0], cpu_tmp2_i32
,
3713 cpu_T
[0], tcg_const_i32(8 << ot
));
3715 ot
= mo_64_32(s
->dflag
);
3716 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
3719 case 0x1f0: /* crc32 or movbe */
3721 /* For these insns, the f3 prefix is supposed to have priority
3722 over the 66 prefix, but that's not what we implement above
3724 if (s
->prefix
& PREFIX_REPNZ
) {
3728 case 0x0f0: /* movbe Gy,My */
3729 case 0x0f1: /* movbe My,Gy */
3730 if (!(s
->cpuid_ext_features
& CPUID_EXT_MOVBE
)) {
3733 if (s
->dflag
!= MO_64
) {
3734 ot
= (s
->prefix
& PREFIX_DATA
? MO_16
: MO_32
);
3739 gen_lea_modrm(env
, s
, modrm
);
3741 tcg_gen_qemu_ld_tl(cpu_T
[0], cpu_A0
,
3742 s
->mem_index
, ot
| MO_BE
);
3743 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
3745 tcg_gen_qemu_st_tl(cpu_regs
[reg
], cpu_A0
,
3746 s
->mem_index
, ot
| MO_BE
);
3750 case 0x0f2: /* andn Gy, By, Ey */
3751 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
3752 || !(s
->prefix
& PREFIX_VEX
)
3756 ot
= mo_64_32(s
->dflag
);
3757 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3758 tcg_gen_andc_tl(cpu_T
[0], cpu_regs
[s
->vex_v
], cpu_T
[0]);
3759 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
3760 gen_op_update1_cc();
3761 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3764 case 0x0f7: /* bextr Gy, Ey, By */
3765 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
3766 || !(s
->prefix
& PREFIX_VEX
)
3770 ot
= mo_64_32(s
->dflag
);
3774 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3775 /* Extract START, and shift the operand.
3776 Shifts larger than operand size get zeros. */
3777 tcg_gen_ext8u_tl(cpu_A0
, cpu_regs
[s
->vex_v
]);
3778 tcg_gen_shr_tl(cpu_T
[0], cpu_T
[0], cpu_A0
);
3780 bound
= tcg_const_tl(ot
== MO_64
? 63 : 31);
3781 zero
= tcg_const_tl(0);
3782 tcg_gen_movcond_tl(TCG_COND_LEU
, cpu_T
[0], cpu_A0
, bound
,
3784 tcg_temp_free(zero
);
3786 /* Extract the LEN into a mask. Lengths larger than
3787 operand size get all ones. */
3788 tcg_gen_shri_tl(cpu_A0
, cpu_regs
[s
->vex_v
], 8);
3789 tcg_gen_ext8u_tl(cpu_A0
, cpu_A0
);
3790 tcg_gen_movcond_tl(TCG_COND_LEU
, cpu_A0
, cpu_A0
, bound
,
3792 tcg_temp_free(bound
);
3793 tcg_gen_movi_tl(cpu_T
[1], 1);
3794 tcg_gen_shl_tl(cpu_T
[1], cpu_T
[1], cpu_A0
);
3795 tcg_gen_subi_tl(cpu_T
[1], cpu_T
[1], 1);
3796 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
3798 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
3799 gen_op_update1_cc();
3800 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3804 case 0x0f5: /* bzhi Gy, Ey, By */
3805 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3806 || !(s
->prefix
& PREFIX_VEX
)
3810 ot
= mo_64_32(s
->dflag
);
3811 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3812 tcg_gen_ext8u_tl(cpu_T
[1], cpu_regs
[s
->vex_v
]);
3814 TCGv bound
= tcg_const_tl(ot
== MO_64
? 63 : 31);
3815 /* Note that since we're using BMILG (in order to get O
3816 cleared) we need to store the inverse into C. */
3817 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_cc_src
,
3819 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_T
[1], cpu_T
[1],
3820 bound
, bound
, cpu_T
[1]);
3821 tcg_temp_free(bound
);
3823 tcg_gen_movi_tl(cpu_A0
, -1);
3824 tcg_gen_shl_tl(cpu_A0
, cpu_A0
, cpu_T
[1]);
3825 tcg_gen_andc_tl(cpu_T
[0], cpu_T
[0], cpu_A0
);
3826 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
3827 gen_op_update1_cc();
3828 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
3831 case 0x3f6: /* mulx By, Gy, rdx, Ey */
3832 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3833 || !(s
->prefix
& PREFIX_VEX
)
3837 ot
= mo_64_32(s
->dflag
);
3838 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3841 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3842 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_regs
[R_EDX
]);
3843 tcg_gen_mulu2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
3844 cpu_tmp2_i32
, cpu_tmp3_i32
);
3845 tcg_gen_extu_i32_tl(cpu_regs
[s
->vex_v
], cpu_tmp2_i32
);
3846 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp3_i32
);
3848 #ifdef TARGET_X86_64
3850 tcg_gen_mulu2_i64(cpu_regs
[s
->vex_v
], cpu_regs
[reg
],
3851 cpu_T
[0], cpu_regs
[R_EDX
]);
3857 case 0x3f5: /* pdep Gy, By, Ey */
3858 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3859 || !(s
->prefix
& PREFIX_VEX
)
3863 ot
= mo_64_32(s
->dflag
);
3864 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3865 /* Note that by zero-extending the mask operand, we
3866 automatically handle zero-extending the result. */
3868 tcg_gen_mov_tl(cpu_T
[1], cpu_regs
[s
->vex_v
]);
3870 tcg_gen_ext32u_tl(cpu_T
[1], cpu_regs
[s
->vex_v
]);
3872 gen_helper_pdep(cpu_regs
[reg
], cpu_T
[0], cpu_T
[1]);
3875 case 0x2f5: /* pext Gy, By, Ey */
3876 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3877 || !(s
->prefix
& PREFIX_VEX
)
3881 ot
= mo_64_32(s
->dflag
);
3882 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3883 /* Note that by zero-extending the mask operand, we
3884 automatically handle zero-extending the result. */
3886 tcg_gen_mov_tl(cpu_T
[1], cpu_regs
[s
->vex_v
]);
3888 tcg_gen_ext32u_tl(cpu_T
[1], cpu_regs
[s
->vex_v
]);
3890 gen_helper_pext(cpu_regs
[reg
], cpu_T
[0], cpu_T
[1]);
3893 case 0x1f6: /* adcx Gy, Ey */
3894 case 0x2f6: /* adox Gy, Ey */
3895 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_ADX
)) {
3898 TCGv carry_in
, carry_out
, zero
;
3901 ot
= mo_64_32(s
->dflag
);
3902 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3904 /* Re-use the carry-out from a previous round. */
3905 TCGV_UNUSED(carry_in
);
3906 carry_out
= (b
== 0x1f6 ? cpu_cc_dst
: cpu_cc_src2
);
3910 carry_in
= cpu_cc_dst
;
3911 end_op
= CC_OP_ADCX
;
3913 end_op
= CC_OP_ADCOX
;
3918 end_op
= CC_OP_ADCOX
;
3920 carry_in
= cpu_cc_src2
;
3921 end_op
= CC_OP_ADOX
;
3925 end_op
= CC_OP_ADCOX
;
3926 carry_in
= carry_out
;
3929 end_op
= (b
== 0x1f6 ? CC_OP_ADCX
: CC_OP_ADOX
);
3932 /* If we can't reuse carry-out, get it out of EFLAGS. */
3933 if (TCGV_IS_UNUSED(carry_in
)) {
3934 if (s
->cc_op
!= CC_OP_ADCX
&& s
->cc_op
!= CC_OP_ADOX
) {
3935 gen_compute_eflags(s
);
3937 carry_in
= cpu_tmp0
;
3938 tcg_gen_shri_tl(carry_in
, cpu_cc_src
,
3939 ctz32(b
== 0x1f6 ? CC_C
: CC_O
));
3940 tcg_gen_andi_tl(carry_in
, carry_in
, 1);
3944 #ifdef TARGET_X86_64
3946 /* If we know TL is 64-bit, and we want a 32-bit
3947 result, just do everything in 64-bit arithmetic. */
3948 tcg_gen_ext32u_i64(cpu_regs
[reg
], cpu_regs
[reg
]);
3949 tcg_gen_ext32u_i64(cpu_T
[0], cpu_T
[0]);
3950 tcg_gen_add_i64(cpu_T
[0], cpu_T
[0], cpu_regs
[reg
]);
3951 tcg_gen_add_i64(cpu_T
[0], cpu_T
[0], carry_in
);
3952 tcg_gen_ext32u_i64(cpu_regs
[reg
], cpu_T
[0]);
3953 tcg_gen_shri_i64(carry_out
, cpu_T
[0], 32);
3957 /* Otherwise compute the carry-out in two steps. */
3958 zero
= tcg_const_tl(0);
3959 tcg_gen_add2_tl(cpu_T
[0], carry_out
,
3962 tcg_gen_add2_tl(cpu_regs
[reg
], carry_out
,
3963 cpu_regs
[reg
], carry_out
,
3965 tcg_temp_free(zero
);
3968 set_cc_op(s
, end_op
);
3972 case 0x1f7: /* shlx Gy, Ey, By */
3973 case 0x2f7: /* sarx Gy, Ey, By */
3974 case 0x3f7: /* shrx Gy, Ey, By */
3975 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3976 || !(s
->prefix
& PREFIX_VEX
)
3980 ot
= mo_64_32(s
->dflag
);
3981 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3983 tcg_gen_andi_tl(cpu_T
[1], cpu_regs
[s
->vex_v
], 63);
3985 tcg_gen_andi_tl(cpu_T
[1], cpu_regs
[s
->vex_v
], 31);
3988 tcg_gen_shl_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
3989 } else if (b
== 0x2f7) {
3991 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
3993 tcg_gen_sar_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
3996 tcg_gen_ext32u_tl(cpu_T
[0], cpu_T
[0]);
3998 tcg_gen_shr_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4000 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
4006 case 0x3f3: /* Group 17 */
4007 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
4008 || !(s
->prefix
& PREFIX_VEX
)
4012 ot
= mo_64_32(s
->dflag
);
4013 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4016 case 1: /* blsr By,Ey */
4017 tcg_gen_neg_tl(cpu_T
[1], cpu_T
[0]);
4018 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4019 gen_op_mov_reg_v(ot
, s
->vex_v
, cpu_T
[0]);
4020 gen_op_update2_cc();
4021 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
4024 case 2: /* blsmsk By,Ey */
4025 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4026 tcg_gen_subi_tl(cpu_T
[0], cpu_T
[0], 1);
4027 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_cc_src
);
4028 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4029 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
4032 case 3: /* blsi By, Ey */
4033 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4034 tcg_gen_subi_tl(cpu_T
[0], cpu_T
[0], 1);
4035 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_cc_src
);
4036 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4037 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
4053 modrm
= cpu_ldub_code(env
, s
->pc
++);
4055 reg
= ((modrm
>> 3) & 7) | rex_r
;
4056 mod
= (modrm
>> 6) & 3;
4061 sse_fn_eppi
= sse_op_table7
[b
].op
[b1
];
4065 if (!(s
->cpuid_ext_features
& sse_op_table7
[b
].ext_mask
))
4068 if (sse_fn_eppi
== SSE_SPECIAL
) {
4069 ot
= mo_64_32(s
->dflag
);
4070 rm
= (modrm
& 7) | REX_B(s
);
4072 gen_lea_modrm(env
, s
, modrm
);
4073 reg
= ((modrm
>> 3) & 7) | rex_r
;
4074 val
= cpu_ldub_code(env
, s
->pc
++);
4076 case 0x14: /* pextrb */
4077 tcg_gen_ld8u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
4078 xmm_regs
[reg
].XMM_B(val
& 15)));
4080 gen_op_mov_reg_v(ot
, rm
, cpu_T
[0]);
4082 tcg_gen_qemu_st_tl(cpu_T
[0], cpu_A0
,
4083 s
->mem_index
, MO_UB
);
4086 case 0x15: /* pextrw */
4087 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
4088 xmm_regs
[reg
].XMM_W(val
& 7)));
4090 gen_op_mov_reg_v(ot
, rm
, cpu_T
[0]);
4092 tcg_gen_qemu_st_tl(cpu_T
[0], cpu_A0
,
4093 s
->mem_index
, MO_LEUW
);
4097 if (ot
== MO_32
) { /* pextrd */
4098 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
4099 offsetof(CPUX86State
,
4100 xmm_regs
[reg
].XMM_L(val
& 3)));
4102 tcg_gen_extu_i32_tl(cpu_regs
[rm
], cpu_tmp2_i32
);
4104 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
4105 s
->mem_index
, MO_LEUL
);
4107 } else { /* pextrq */
4108 #ifdef TARGET_X86_64
4109 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
4110 offsetof(CPUX86State
,
4111 xmm_regs
[reg
].XMM_Q(val
& 1)));
4113 tcg_gen_mov_i64(cpu_regs
[rm
], cpu_tmp1_i64
);
4115 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_A0
,
4116 s
->mem_index
, MO_LEQ
);
4123 case 0x17: /* extractps */
4124 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
4125 xmm_regs
[reg
].XMM_L(val
& 3)));
4127 gen_op_mov_reg_v(ot
, rm
, cpu_T
[0]);
4129 tcg_gen_qemu_st_tl(cpu_T
[0], cpu_A0
,
4130 s
->mem_index
, MO_LEUL
);
4133 case 0x20: /* pinsrb */
4135 gen_op_mov_v_reg(MO_32
, cpu_T
[0], rm
);
4137 tcg_gen_qemu_ld_tl(cpu_T
[0], cpu_A0
,
4138 s
->mem_index
, MO_UB
);
4140 tcg_gen_st8_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
4141 xmm_regs
[reg
].XMM_B(val
& 15)));
4143 case 0x21: /* insertps */
4145 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
4146 offsetof(CPUX86State
,xmm_regs
[rm
]
4147 .XMM_L((val
>> 6) & 3)));
4149 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
4150 s
->mem_index
, MO_LEUL
);
4152 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
4153 offsetof(CPUX86State
,xmm_regs
[reg
]
4154 .XMM_L((val
>> 4) & 3)));
4156 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4157 cpu_env
, offsetof(CPUX86State
,
4158 xmm_regs
[reg
].XMM_L(0)));
4160 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4161 cpu_env
, offsetof(CPUX86State
,
4162 xmm_regs
[reg
].XMM_L(1)));
4164 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4165 cpu_env
, offsetof(CPUX86State
,
4166 xmm_regs
[reg
].XMM_L(2)));
4168 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4169 cpu_env
, offsetof(CPUX86State
,
4170 xmm_regs
[reg
].XMM_L(3)));
4173 if (ot
== MO_32
) { /* pinsrd */
4175 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[rm
]);
4177 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
4178 s
->mem_index
, MO_LEUL
);
4180 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
4181 offsetof(CPUX86State
,
4182 xmm_regs
[reg
].XMM_L(val
& 3)));
4183 } else { /* pinsrq */
4184 #ifdef TARGET_X86_64
4186 gen_op_mov_v_reg(ot
, cpu_tmp1_i64
, rm
);
4188 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_A0
,
4189 s
->mem_index
, MO_LEQ
);
4191 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
4192 offsetof(CPUX86State
,
4193 xmm_regs
[reg
].XMM_Q(val
& 1)));
4204 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4206 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
4208 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4209 gen_lea_modrm(env
, s
, modrm
);
4210 gen_ldo_env_A0(s
, op2_offset
);
4213 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4215 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4217 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4218 gen_lea_modrm(env
, s
, modrm
);
4219 gen_ldq_env_A0(s
, op2_offset
);
4222 val
= cpu_ldub_code(env
, s
->pc
++);
4224 if ((b
& 0xfc) == 0x60) { /* pcmpXstrX */
4225 set_cc_op(s
, CC_OP_EFLAGS
);
4227 if (s
->dflag
== MO_64
) {
4228 /* The helper must use entire 64-bit gp registers */
4233 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4234 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4235 sse_fn_eppi(cpu_env
, cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4239 /* Various integer extensions at 0f 3a f[0-f]. */
4240 b
= modrm
| (b1
<< 8);
4241 modrm
= cpu_ldub_code(env
, s
->pc
++);
4242 reg
= ((modrm
>> 3) & 7) | rex_r
;
4245 case 0x3f0: /* rorx Gy,Ey, Ib */
4246 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4247 || !(s
->prefix
& PREFIX_VEX
)
4251 ot
= mo_64_32(s
->dflag
);
4252 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4253 b
= cpu_ldub_code(env
, s
->pc
++);
4255 tcg_gen_rotri_tl(cpu_T
[0], cpu_T
[0], b
& 63);
4257 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4258 tcg_gen_rotri_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, b
& 31);
4259 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
4261 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
4273 /* generic MMX or SSE operation */
4275 case 0x70: /* pshufx insn */
4276 case 0xc6: /* pshufx insn */
4277 case 0xc2: /* compare insns */
4284 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4288 gen_lea_modrm(env
, s
, modrm
);
4289 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4295 /* Most sse scalar operations. */
4298 } else if (b1
== 3) {
4303 case 0x2e: /* ucomis[sd] */
4304 case 0x2f: /* comis[sd] */
4316 gen_op_ld_v(s
, MO_32
, cpu_T
[0], cpu_A0
);
4317 tcg_gen_st32_tl(cpu_T
[0], cpu_env
,
4318 offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
4322 gen_ldq_env_A0(s
, offsetof(CPUX86State
, xmm_t0
.XMM_D(0)));
4325 /* 128 bit access */
4326 gen_ldo_env_A0(s
, op2_offset
);
4330 rm
= (modrm
& 7) | REX_B(s
);
4331 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
4334 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4336 gen_lea_modrm(env
, s
, modrm
);
4337 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4338 gen_ldq_env_A0(s
, op2_offset
);
4341 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4345 case 0x0f: /* 3DNow! data insns */
4346 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
))
4348 val
= cpu_ldub_code(env
, s
->pc
++);
4349 sse_fn_epp
= sse_op_table5
[val
];
4353 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4354 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4355 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4357 case 0x70: /* pshufx insn */
4358 case 0xc6: /* pshufx insn */
4359 val
= cpu_ldub_code(env
, s
->pc
++);
4360 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4361 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4362 /* XXX: introduce a new table? */
4363 sse_fn_ppi
= (SSEFunc_0_ppi
)sse_fn_epp
;
4364 sse_fn_ppi(cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4368 val
= cpu_ldub_code(env
, s
->pc
++);
4371 sse_fn_epp
= sse_op_table4
[val
][b1
];
4373 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4374 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4375 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4378 /* maskmov : we must prepare A0 */
4381 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[R_EDI
]);
4382 gen_extu(s
->aflag
, cpu_A0
);
4383 gen_add_A0_ds_seg(s
);
4385 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4386 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4387 /* XXX: introduce a new table? */
4388 sse_fn_eppt
= (SSEFunc_0_eppt
)sse_fn_epp
;
4389 sse_fn_eppt(cpu_env
, cpu_ptr0
, cpu_ptr1
, cpu_A0
);
4392 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4393 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4394 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4397 if (b
== 0x2e || b
== 0x2f) {
4398 set_cc_op(s
, CC_OP_EFLAGS
);
4403 /* convert one instruction. s->is_jmp is set if the translation must
4404 be stopped. Return the next pc value */
4405 static target_ulong
disas_insn(CPUX86State
*env
, DisasContext
*s
,
4406 target_ulong pc_start
)
4410 TCGMemOp ot
, aflag
, dflag
;
4411 int modrm
, reg
, rm
, mod
, op
, opreg
, val
;
4412 target_ulong next_eip
, tval
;
4415 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4416 tcg_gen_debug_insn_start(pc_start
);
4423 #ifdef TARGET_X86_64
4428 s
->rip_offset
= 0; /* for relative ip address */
4432 b
= cpu_ldub_code(env
, s
->pc
);
4434 /* Collect prefixes. */
4437 prefixes
|= PREFIX_REPZ
;
4440 prefixes
|= PREFIX_REPNZ
;
4443 prefixes
|= PREFIX_LOCK
;
4464 prefixes
|= PREFIX_DATA
;
4467 prefixes
|= PREFIX_ADR
;
4469 #ifdef TARGET_X86_64
4473 rex_w
= (b
>> 3) & 1;
4474 rex_r
= (b
& 0x4) << 1;
4475 s
->rex_x
= (b
& 0x2) << 2;
4476 REX_B(s
) = (b
& 0x1) << 3;
4477 x86_64_hregs
= 1; /* select uniform byte register addressing */
4482 case 0xc5: /* 2-byte VEX */
4483 case 0xc4: /* 3-byte VEX */
4484 /* VEX prefixes cannot be used except in 32-bit mode.
4485 Otherwise the instruction is LES or LDS. */
4486 if (s
->code32
&& !s
->vm86
) {
4487 static const int pp_prefix
[4] = {
4488 0, PREFIX_DATA
, PREFIX_REPZ
, PREFIX_REPNZ
4490 int vex3
, vex2
= cpu_ldub_code(env
, s
->pc
);
4492 if (!CODE64(s
) && (vex2
& 0xc0) != 0xc0) {
4493 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4494 otherwise the instruction is LES or LDS. */
4499 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
4500 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
4501 | PREFIX_LOCK
| PREFIX_DATA
)) {
4504 #ifdef TARGET_X86_64
4509 rex_r
= (~vex2
>> 4) & 8;
4512 b
= cpu_ldub_code(env
, s
->pc
++);
4514 #ifdef TARGET_X86_64
4515 s
->rex_x
= (~vex2
>> 3) & 8;
4516 s
->rex_b
= (~vex2
>> 2) & 8;
4518 vex3
= cpu_ldub_code(env
, s
->pc
++);
4519 rex_w
= (vex3
>> 7) & 1;
4520 switch (vex2
& 0x1f) {
4521 case 0x01: /* Implied 0f leading opcode bytes. */
4522 b
= cpu_ldub_code(env
, s
->pc
++) | 0x100;
4524 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4527 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4530 default: /* Reserved for future use. */
4534 s
->vex_v
= (~vex3
>> 3) & 0xf;
4535 s
->vex_l
= (vex3
>> 2) & 1;
4536 prefixes
|= pp_prefix
[vex3
& 3] | PREFIX_VEX
;
4541 /* Post-process prefixes. */
4543 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
4544 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
4545 over 0x66 if both are present. */
4546 dflag
= (rex_w
> 0 ? MO_64
: prefixes
& PREFIX_DATA
? MO_16
: MO_32
);
4547 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
4548 aflag
= (prefixes
& PREFIX_ADR
? MO_32
: MO_64
);
4550 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
4551 if (s
->code32
^ ((prefixes
& PREFIX_DATA
) != 0)) {
4556 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
4557 if (s
->code32
^ ((prefixes
& PREFIX_ADR
) != 0)) {
4564 s
->prefix
= prefixes
;
4568 /* lock generation */
4569 if (prefixes
& PREFIX_LOCK
)
4572 /* now check op code */
4576 /**************************/
4577 /* extended op code */
4578 b
= cpu_ldub_code(env
, s
->pc
++) | 0x100;
4581 /**************************/
4596 ot
= mo_b_d(b
, dflag
);
4599 case 0: /* OP Ev, Gv */
4600 modrm
= cpu_ldub_code(env
, s
->pc
++);
4601 reg
= ((modrm
>> 3) & 7) | rex_r
;
4602 mod
= (modrm
>> 6) & 3;
4603 rm
= (modrm
& 7) | REX_B(s
);
4605 gen_lea_modrm(env
, s
, modrm
);
4607 } else if (op
== OP_XORL
&& rm
== reg
) {
4609 /* xor reg, reg optimisation */
4610 set_cc_op(s
, CC_OP_CLR
);
4611 tcg_gen_movi_tl(cpu_T
[0], 0);
4612 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
4617 gen_op_mov_v_reg(ot
, cpu_T
[1], reg
);
4618 gen_op(s
, op
, ot
, opreg
);
4620 case 1: /* OP Gv, Ev */
4621 modrm
= cpu_ldub_code(env
, s
->pc
++);
4622 mod
= (modrm
>> 6) & 3;
4623 reg
= ((modrm
>> 3) & 7) | rex_r
;
4624 rm
= (modrm
& 7) | REX_B(s
);
4626 gen_lea_modrm(env
, s
, modrm
);
4627 gen_op_ld_v(s
, ot
, cpu_T
[1], cpu_A0
);
4628 } else if (op
== OP_XORL
&& rm
== reg
) {
4631 gen_op_mov_v_reg(ot
, cpu_T
[1], rm
);
4633 gen_op(s
, op
, ot
, reg
);
4635 case 2: /* OP A, Iv */
4636 val
= insn_get(env
, s
, ot
);
4637 tcg_gen_movi_tl(cpu_T
[1], val
);
4638 gen_op(s
, op
, ot
, OR_EAX
);
4647 case 0x80: /* GRP1 */
4653 ot
= mo_b_d(b
, dflag
);
4655 modrm
= cpu_ldub_code(env
, s
->pc
++);
4656 mod
= (modrm
>> 6) & 3;
4657 rm
= (modrm
& 7) | REX_B(s
);
4658 op
= (modrm
>> 3) & 7;
4664 s
->rip_offset
= insn_const_size(ot
);
4665 gen_lea_modrm(env
, s
, modrm
);
4676 val
= insn_get(env
, s
, ot
);
4679 val
= (int8_t)insn_get(env
, s
, MO_8
);
4682 tcg_gen_movi_tl(cpu_T
[1], val
);
4683 gen_op(s
, op
, ot
, opreg
);
4687 /**************************/
4688 /* inc, dec, and other misc arith */
4689 case 0x40 ... 0x47: /* inc Gv */
4691 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), 1);
4693 case 0x48 ... 0x4f: /* dec Gv */
4695 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), -1);
4697 case 0xf6: /* GRP3 */
4699 ot
= mo_b_d(b
, dflag
);
4701 modrm
= cpu_ldub_code(env
, s
->pc
++);
4702 mod
= (modrm
>> 6) & 3;
4703 rm
= (modrm
& 7) | REX_B(s
);
4704 op
= (modrm
>> 3) & 7;
4707 s
->rip_offset
= insn_const_size(ot
);
4708 gen_lea_modrm(env
, s
, modrm
);
4709 gen_op_ld_v(s
, ot
, cpu_T
[0], cpu_A0
);
4711 gen_op_mov_v_reg(ot
, cpu_T
[0], rm
);
4716 val
= insn_get(env
, s
, ot
);
4717 tcg_gen_movi_tl(cpu_T
[1], val
);
4718 gen_op_testl_T0_T1_cc();
4719 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4722 tcg_gen_not_tl(cpu_T
[0], cpu_T
[0]);
4724 gen_op_st_v(s
, ot
, cpu_T
[0], cpu_A0
);
4726 gen_op_mov_reg_v(ot
, rm
, cpu_T
[0]);
4730 tcg_gen_neg_tl(cpu_T
[0], cpu_T
[0]);
4732 gen_op_st_v(s
, ot
, cpu_T
[0], cpu_A0
);
4734 gen_op_mov_reg_v(ot
, rm
, cpu_T
[0]);
4736 gen_op_update_neg_cc();
4737 set_cc_op(s
, CC_OP_SUBB
+ ot
);
4742 gen_op_mov_v_reg(MO_8
, cpu_T
[1], R_EAX
);
4743 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
4744 tcg_gen_ext8u_tl(cpu_T
[1], cpu_T
[1]);
4745 /* XXX: use 32 bit mul which could be faster */
4746 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4747 gen_op_mov_reg_v(MO_16
, R_EAX
, cpu_T
[0]);
4748 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4749 tcg_gen_andi_tl(cpu_cc_src
, cpu_T
[0], 0xff00);
4750 set_cc_op(s
, CC_OP_MULB
);
4753 gen_op_mov_v_reg(MO_16
, cpu_T
[1], R_EAX
);
4754 tcg_gen_ext16u_tl(cpu_T
[0], cpu_T
[0]);
4755 tcg_gen_ext16u_tl(cpu_T
[1], cpu_T
[1]);
4756 /* XXX: use 32 bit mul which could be faster */
4757 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4758 gen_op_mov_reg_v(MO_16
, R_EAX
, cpu_T
[0]);
4759 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4760 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 16);
4761 gen_op_mov_reg_v(MO_16
, R_EDX
, cpu_T
[0]);
4762 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4763 set_cc_op(s
, CC_OP_MULW
);
4767 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4768 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_regs
[R_EAX
]);
4769 tcg_gen_mulu2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
4770 cpu_tmp2_i32
, cpu_tmp3_i32
);
4771 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], cpu_tmp2_i32
);
4772 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], cpu_tmp3_i32
);
4773 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
4774 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
4775 set_cc_op(s
, CC_OP_MULL
);
4777 #ifdef TARGET_X86_64
4779 tcg_gen_mulu2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
4780 cpu_T
[0], cpu_regs
[R_EAX
]);
4781 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
4782 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
4783 set_cc_op(s
, CC_OP_MULQ
);
4791 gen_op_mov_v_reg(MO_8
, cpu_T
[1], R_EAX
);
4792 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
4793 tcg_gen_ext8s_tl(cpu_T
[1], cpu_T
[1]);
4794 /* XXX: use 32 bit mul which could be faster */
4795 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4796 gen_op_mov_reg_v(MO_16
, R_EAX
, cpu_T
[0]);
4797 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4798 tcg_gen_ext8s_tl(cpu_tmp0
, cpu_T
[0]);
4799 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4800 set_cc_op(s
, CC_OP_MULB
);
4803 gen_op_mov_v_reg(MO_16
, cpu_T
[1], R_EAX
);
4804 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4805 tcg_gen_ext16s_tl(cpu_T
[1], cpu_T
[1]);
4806 /* XXX: use 32 bit mul which could be faster */
4807 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4808 gen_op_mov_reg_v(MO_16
, R_EAX
, cpu_T
[0]);
4809 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4810 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T
[0]);
4811 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4812 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 16);
4813 gen_op_mov_reg_v(MO_16
, R_EDX
, cpu_T
[0]);
4814 set_cc_op(s
, CC_OP_MULW
);
4818 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4819 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_regs
[R_EAX
]);
4820 tcg_gen_muls2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
4821 cpu_tmp2_i32
, cpu_tmp3_i32
);
4822 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], cpu_tmp2_i32
);
4823 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], cpu_tmp3_i32
);
4824 tcg_gen_sari_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 31);
4825 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
4826 tcg_gen_sub_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
4827 tcg_gen_extu_i32_tl(cpu_cc_src
, cpu_tmp2_i32
);
4828 set_cc_op(s
, CC_OP_MULL
);
4830 #ifdef TARGET_X86_64
4832 tcg_gen_muls2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
4833 cpu_T
[0], cpu_regs
[R_EAX
]);
4834 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
4835 tcg_gen_sari_tl(cpu_cc_src
, cpu_regs
[R_EAX
], 63);
4836 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, cpu_regs
[R_EDX
]);
4837 set_cc_op(s
, CC_OP_MULQ
);
4845 gen_jmp_im(pc_start
- s
->cs_base
);
4846 gen_helper_divb_AL(cpu_env
, cpu_T
[0]);
4849 gen_jmp_im(pc_start
- s
->cs_base
);
4850 gen_helper_divw_AX(cpu_env
, cpu_T
[0]);
4854 gen_jmp_im(pc_start
- s
->cs_base
);
4855 gen_helper_divl_EAX(cpu_env
, cpu_T
[0]);
4857 #ifdef TARGET_X86_64
4859 gen_jmp_im(pc_start
- s
->cs_base
);
4860 gen_helper_divq_EAX(cpu_env
, cpu_T
[0]);
4868 gen_jmp_im(pc_start
- s
->cs_base
);
4869 gen_helper_idivb_AL(cpu_env
, cpu_T
[0]);
4872 gen_jmp_im(pc_start
- s
->cs_base
);
4873 gen_helper_idivw_AX(cpu_env
, cpu_T
[0]);
4877 gen_jmp_im(pc_start
- s
->cs_base
);
4878 gen_helper_idivl_EAX(cpu_env
, cpu_T
[0]);
4880 #ifdef TARGET_X86_64
4882 gen_jmp_im(pc_start
- s
->cs_base
);
4883 gen_helper_idivq_EAX(cpu_env
, cpu_T
[0]);
4893 case 0xfe: /* GRP4 */
4894 case 0xff: /* GRP5 */
4895 ot
= mo_b_d(b
, dflag
);
4897 modrm
= cpu_ldub_code(env
, s
->pc
++);
4898 mod
= (modrm
>> 6) & 3;
4899 rm
= (modrm
& 7) | REX_B(s
);
4900 op
= (modrm
>> 3) & 7;
4901 if (op
>= 2 && b
== 0xfe) {
4905 if (op
== 2 || op
== 4) {
4906 /* operand size for jumps is 64 bit */
4908 } else if (op
== 3 || op
== 5) {
4909 ot
= dflag
!= MO_16
? MO_32
+ (rex_w
== 1) : MO_16
;
4910 } else if (op
== 6) {
4911 /* default push size is 64 bit */
4912 ot
= mo_pushpop(s
, dflag
);
4916 gen_lea_modrm(env
, s
, modrm
);
4917 if (op
>= 2 && op
!= 3 && op
!= 5)
4918 gen_op_ld_v(s
, ot
, cpu_T
[0], cpu_A0
);
4920 gen_op_mov_v_reg(ot
, cpu_T
[0], rm
);
4924 case 0: /* inc Ev */
4929 gen_inc(s
, ot
, opreg
, 1);
4931 case 1: /* dec Ev */
4936 gen_inc(s
, ot
, opreg
, -1);
4938 case 2: /* call Ev */
4939 /* XXX: optimize if memory (no 'and' is necessary) */
4940 if (dflag
== MO_16
) {
4941 tcg_gen_ext16u_tl(cpu_T
[0], cpu_T
[0]);
4943 next_eip
= s
->pc
- s
->cs_base
;
4944 tcg_gen_movi_tl(cpu_T
[1], next_eip
);
4945 gen_push_v(s
, cpu_T
[1]);
4946 gen_op_jmp_v(cpu_T
[0]);
4949 case 3: /* lcall Ev */
4950 gen_op_ld_v(s
, ot
, cpu_T
[1], cpu_A0
);
4951 gen_add_A0_im(s
, 1 << ot
);
4952 gen_op_ld_v(s
, MO_16
, cpu_T
[0], cpu_A0
);
4954 if (s
->pe
&& !s
->vm86
) {
4955 gen_update_cc_op(s
);
4956 gen_jmp_im(pc_start
- s
->cs_base
);
4957 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4958 gen_helper_lcall_protected(cpu_env
, cpu_tmp2_i32
, cpu_T
[1],
4959 tcg_const_i32(dflag
- 1),
4960 tcg_const_i32(s
->pc
- pc_start
));
4962 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4963 gen_helper_lcall_real(cpu_env
, cpu_tmp2_i32
, cpu_T
[1],
4964 tcg_const_i32(dflag
- 1),
4965 tcg_const_i32(s
->pc
- s
->cs_base
));
4969 case 4: /* jmp Ev */
4970 if (dflag
== MO_16
) {
4971 tcg_gen_ext16u_tl(cpu_T
[0], cpu_T
[0]);
4973 gen_op_jmp_v(cpu_T
[0]);
4976 case 5: /* ljmp Ev */
4977 gen_op_ld_v(s
, ot
, cpu_T
[1], cpu_A0
);
4978 gen_add_A0_im(s
, 1 << ot
);
4979 gen_op_ld_v(s
, MO_16
, cpu_T
[0], cpu_A0
);
4981 if (s
->pe
&& !s
->vm86
) {
4982 gen_update_cc_op(s
);
4983 gen_jmp_im(pc_start
- s
->cs_base
);
4984 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4985 gen_helper_ljmp_protected(cpu_env
, cpu_tmp2_i32
, cpu_T
[1],
4986 tcg_const_i32(s
->pc
- pc_start
));
4988 gen_op_movl_seg_T0_vm(R_CS
);
4989 gen_op_jmp_v(cpu_T
[1]);
4993 case 6: /* push Ev */
4994 gen_push_v(s
, cpu_T
[0]);
5001 case 0x84: /* test Ev, Gv */
5003 ot
= mo_b_d(b
, dflag
);
5005 modrm
= cpu_ldub_code(env
, s
->pc
++);
5006 reg
= ((modrm
>> 3) & 7) | rex_r
;
5008 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5009 gen_op_mov_v_reg(ot
, cpu_T
[1], reg
);
5010 gen_op_testl_T0_T1_cc();
5011 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
5014 case 0xa8: /* test eAX, Iv */
5016 ot
= mo_b_d(b
, dflag
);
5017 val
= insn_get(env
, s
, ot
);
5019 gen_op_mov_v_reg(ot
, cpu_T
[0], OR_EAX
);
5020 tcg_gen_movi_tl(cpu_T
[1], val
);
5021 gen_op_testl_T0_T1_cc();
5022 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
5025 case 0x98: /* CWDE/CBW */
5027 #ifdef TARGET_X86_64
5029 gen_op_mov_v_reg(MO_32
, cpu_T
[0], R_EAX
);
5030 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
5031 gen_op_mov_reg_v(MO_64
, R_EAX
, cpu_T
[0]);
5035 gen_op_mov_v_reg(MO_16
, cpu_T
[0], R_EAX
);
5036 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5037 gen_op_mov_reg_v(MO_32
, R_EAX
, cpu_T
[0]);
5040 gen_op_mov_v_reg(MO_8
, cpu_T
[0], R_EAX
);
5041 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
5042 gen_op_mov_reg_v(MO_16
, R_EAX
, cpu_T
[0]);
5048 case 0x99: /* CDQ/CWD */
5050 #ifdef TARGET_X86_64
5052 gen_op_mov_v_reg(MO_64
, cpu_T
[0], R_EAX
);
5053 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 63);
5054 gen_op_mov_reg_v(MO_64
, R_EDX
, cpu_T
[0]);
5058 gen_op_mov_v_reg(MO_32
, cpu_T
[0], R_EAX
);
5059 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
5060 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 31);
5061 gen_op_mov_reg_v(MO_32
, R_EDX
, cpu_T
[0]);
5064 gen_op_mov_v_reg(MO_16
, cpu_T
[0], R_EAX
);
5065 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5066 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 15);
5067 gen_op_mov_reg_v(MO_16
, R_EDX
, cpu_T
[0]);
5073 case 0x1af: /* imul Gv, Ev */
5074 case 0x69: /* imul Gv, Ev, I */
5077 modrm
= cpu_ldub_code(env
, s
->pc
++);
5078 reg
= ((modrm
>> 3) & 7) | rex_r
;
5080 s
->rip_offset
= insn_const_size(ot
);
5083 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5085 val
= insn_get(env
, s
, ot
);
5086 tcg_gen_movi_tl(cpu_T
[1], val
);
5087 } else if (b
== 0x6b) {
5088 val
= (int8_t)insn_get(env
, s
, MO_8
);
5089 tcg_gen_movi_tl(cpu_T
[1], val
);
5091 gen_op_mov_v_reg(ot
, cpu_T
[1], reg
);
5094 #ifdef TARGET_X86_64
5096 tcg_gen_muls2_i64(cpu_regs
[reg
], cpu_T
[1], cpu_T
[0], cpu_T
[1]);
5097 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
5098 tcg_gen_sari_tl(cpu_cc_src
, cpu_cc_dst
, 63);
5099 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, cpu_T
[1]);
5103 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5104 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
5105 tcg_gen_muls2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
5106 cpu_tmp2_i32
, cpu_tmp3_i32
);
5107 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp2_i32
);
5108 tcg_gen_sari_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 31);
5109 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
5110 tcg_gen_sub_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
5111 tcg_gen_extu_i32_tl(cpu_cc_src
, cpu_tmp2_i32
);
5114 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5115 tcg_gen_ext16s_tl(cpu_T
[1], cpu_T
[1]);
5116 /* XXX: use 32 bit mul which could be faster */
5117 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
5118 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5119 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T
[0]);
5120 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
5121 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
5124 set_cc_op(s
, CC_OP_MULB
+ ot
);
5127 case 0x1c1: /* xadd Ev, Gv */
5128 ot
= mo_b_d(b
, dflag
);
5129 modrm
= cpu_ldub_code(env
, s
->pc
++);
5130 reg
= ((modrm
>> 3) & 7) | rex_r
;
5131 mod
= (modrm
>> 6) & 3;
5133 rm
= (modrm
& 7) | REX_B(s
);
5134 gen_op_mov_v_reg(ot
, cpu_T
[0], reg
);
5135 gen_op_mov_v_reg(ot
, cpu_T
[1], rm
);
5136 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
5137 gen_op_mov_reg_v(ot
, reg
, cpu_T
[1]);
5138 gen_op_mov_reg_v(ot
, rm
, cpu_T
[0]);
5140 gen_lea_modrm(env
, s
, modrm
);
5141 gen_op_mov_v_reg(ot
, cpu_T
[0], reg
);
5142 gen_op_ld_v(s
, ot
, cpu_T
[1], cpu_A0
);
5143 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
5144 gen_op_st_v(s
, ot
, cpu_T
[0], cpu_A0
);
5145 gen_op_mov_reg_v(ot
, reg
, cpu_T
[1]);
5147 gen_op_update2_cc();
5148 set_cc_op(s
, CC_OP_ADDB
+ ot
);
5151 case 0x1b1: /* cmpxchg Ev, Gv */
5153 TCGLabel
*label1
, *label2
;
5154 TCGv t0
, t1
, t2
, a0
;
5156 ot
= mo_b_d(b
, dflag
);
5157 modrm
= cpu_ldub_code(env
, s
->pc
++);
5158 reg
= ((modrm
>> 3) & 7) | rex_r
;
5159 mod
= (modrm
>> 6) & 3;
5160 t0
= tcg_temp_local_new();
5161 t1
= tcg_temp_local_new();
5162 t2
= tcg_temp_local_new();
5163 a0
= tcg_temp_local_new();
5164 gen_op_mov_v_reg(ot
, t1
, reg
);
5166 rm
= (modrm
& 7) | REX_B(s
);
5167 gen_op_mov_v_reg(ot
, t0
, rm
);
5169 gen_lea_modrm(env
, s
, modrm
);
5170 tcg_gen_mov_tl(a0
, cpu_A0
);
5171 gen_op_ld_v(s
, ot
, t0
, a0
);
5172 rm
= 0; /* avoid warning */
5174 label1
= gen_new_label();
5175 tcg_gen_mov_tl(t2
, cpu_regs
[R_EAX
]);
5178 tcg_gen_brcond_tl(TCG_COND_EQ
, t2
, t0
, label1
);
5179 label2
= gen_new_label();
5181 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
5183 gen_set_label(label1
);
5184 gen_op_mov_reg_v(ot
, rm
, t1
);
5186 /* perform no-op store cycle like physical cpu; must be
5187 before changing accumulator to ensure idempotency if
5188 the store faults and the instruction is restarted */
5189 gen_op_st_v(s
, ot
, t0
, a0
);
5190 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
5192 gen_set_label(label1
);
5193 gen_op_st_v(s
, ot
, t1
, a0
);
5195 gen_set_label(label2
);
5196 tcg_gen_mov_tl(cpu_cc_src
, t0
);
5197 tcg_gen_mov_tl(cpu_cc_srcT
, t2
);
5198 tcg_gen_sub_tl(cpu_cc_dst
, t2
, t0
);
5199 set_cc_op(s
, CC_OP_SUBB
+ ot
);
5206 case 0x1c7: /* cmpxchg8b */
5207 modrm
= cpu_ldub_code(env
, s
->pc
++);
5208 mod
= (modrm
>> 6) & 3;
5209 if ((mod
== 3) || ((modrm
& 0x38) != 0x8))
5211 #ifdef TARGET_X86_64
5212 if (dflag
== MO_64
) {
5213 if (!(s
->cpuid_ext_features
& CPUID_EXT_CX16
))
5215 gen_jmp_im(pc_start
- s
->cs_base
);
5216 gen_update_cc_op(s
);
5217 gen_lea_modrm(env
, s
, modrm
);
5218 gen_helper_cmpxchg16b(cpu_env
, cpu_A0
);
5222 if (!(s
->cpuid_features
& CPUID_CX8
))
5224 gen_jmp_im(pc_start
- s
->cs_base
);
5225 gen_update_cc_op(s
);
5226 gen_lea_modrm(env
, s
, modrm
);
5227 gen_helper_cmpxchg8b(cpu_env
, cpu_A0
);
5229 set_cc_op(s
, CC_OP_EFLAGS
);
5232 /**************************/
5234 case 0x50 ... 0x57: /* push */
5235 gen_op_mov_v_reg(MO_32
, cpu_T
[0], (b
& 7) | REX_B(s
));
5236 gen_push_v(s
, cpu_T
[0]);
5238 case 0x58 ... 0x5f: /* pop */
5240 /* NOTE: order is important for pop %sp */
5241 gen_pop_update(s
, ot
);
5242 gen_op_mov_reg_v(ot
, (b
& 7) | REX_B(s
), cpu_T
[0]);
5244 case 0x60: /* pusha */
5249 case 0x61: /* popa */
5254 case 0x68: /* push Iv */
5256 ot
= mo_pushpop(s
, dflag
);
5258 val
= insn_get(env
, s
, ot
);
5260 val
= (int8_t)insn_get(env
, s
, MO_8
);
5261 tcg_gen_movi_tl(cpu_T
[0], val
);
5262 gen_push_v(s
, cpu_T
[0]);
5264 case 0x8f: /* pop Ev */
5265 modrm
= cpu_ldub_code(env
, s
->pc
++);
5266 mod
= (modrm
>> 6) & 3;
5269 /* NOTE: order is important for pop %sp */
5270 gen_pop_update(s
, ot
);
5271 rm
= (modrm
& 7) | REX_B(s
);
5272 gen_op_mov_reg_v(ot
, rm
, cpu_T
[0]);
5274 /* NOTE: order is important too for MMU exceptions */
5275 s
->popl_esp_hack
= 1 << ot
;
5276 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5277 s
->popl_esp_hack
= 0;
5278 gen_pop_update(s
, ot
);
5281 case 0xc8: /* enter */
5284 val
= cpu_lduw_code(env
, s
->pc
);
5286 level
= cpu_ldub_code(env
, s
->pc
++);
5287 gen_enter(s
, val
, level
);
5290 case 0xc9: /* leave */
5291 /* XXX: exception not precise (ESP is updated before potential exception) */
5293 gen_op_mov_v_reg(MO_64
, cpu_T
[0], R_EBP
);
5294 gen_op_mov_reg_v(MO_64
, R_ESP
, cpu_T
[0]);
5295 } else if (s
->ss32
) {
5296 gen_op_mov_v_reg(MO_32
, cpu_T
[0], R_EBP
);
5297 gen_op_mov_reg_v(MO_32
, R_ESP
, cpu_T
[0]);
5299 gen_op_mov_v_reg(MO_16
, cpu_T
[0], R_EBP
);
5300 gen_op_mov_reg_v(MO_16
, R_ESP
, cpu_T
[0]);
5303 gen_op_mov_reg_v(ot
, R_EBP
, cpu_T
[0]);
5304 gen_pop_update(s
, ot
);
5306 case 0x06: /* push es */
5307 case 0x0e: /* push cs */
5308 case 0x16: /* push ss */
5309 case 0x1e: /* push ds */
5312 gen_op_movl_T0_seg(b
>> 3);
5313 gen_push_v(s
, cpu_T
[0]);
5315 case 0x1a0: /* push fs */
5316 case 0x1a8: /* push gs */
5317 gen_op_movl_T0_seg((b
>> 3) & 7);
5318 gen_push_v(s
, cpu_T
[0]);
5320 case 0x07: /* pop es */
5321 case 0x17: /* pop ss */
5322 case 0x1f: /* pop ds */
5327 gen_movl_seg_T0(s
, reg
, pc_start
- s
->cs_base
);
5328 gen_pop_update(s
, ot
);
5330 /* if reg == SS, inhibit interrupts/trace. */
5331 /* If several instructions disable interrupts, only the
5333 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
5334 gen_helper_set_inhibit_irq(cpu_env
);
5338 gen_jmp_im(s
->pc
- s
->cs_base
);
5342 case 0x1a1: /* pop fs */
5343 case 0x1a9: /* pop gs */
5345 gen_movl_seg_T0(s
, (b
>> 3) & 7, pc_start
- s
->cs_base
);
5346 gen_pop_update(s
, ot
);
5348 gen_jmp_im(s
->pc
- s
->cs_base
);
5353 /**************************/
5356 case 0x89: /* mov Gv, Ev */
5357 ot
= mo_b_d(b
, dflag
);
5358 modrm
= cpu_ldub_code(env
, s
->pc
++);
5359 reg
= ((modrm
>> 3) & 7) | rex_r
;
5361 /* generate a generic store */
5362 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
5365 case 0xc7: /* mov Ev, Iv */
5366 ot
= mo_b_d(b
, dflag
);
5367 modrm
= cpu_ldub_code(env
, s
->pc
++);
5368 mod
= (modrm
>> 6) & 3;
5370 s
->rip_offset
= insn_const_size(ot
);
5371 gen_lea_modrm(env
, s
, modrm
);
5373 val
= insn_get(env
, s
, ot
);
5374 tcg_gen_movi_tl(cpu_T
[0], val
);
5376 gen_op_st_v(s
, ot
, cpu_T
[0], cpu_A0
);
5378 gen_op_mov_reg_v(ot
, (modrm
& 7) | REX_B(s
), cpu_T
[0]);
5382 case 0x8b: /* mov Ev, Gv */
5383 ot
= mo_b_d(b
, dflag
);
5384 modrm
= cpu_ldub_code(env
, s
->pc
++);
5385 reg
= ((modrm
>> 3) & 7) | rex_r
;
5387 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5388 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
5390 case 0x8e: /* mov seg, Gv */
5391 modrm
= cpu_ldub_code(env
, s
->pc
++);
5392 reg
= (modrm
>> 3) & 7;
5393 if (reg
>= 6 || reg
== R_CS
)
5395 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
5396 gen_movl_seg_T0(s
, reg
, pc_start
- s
->cs_base
);
5398 /* if reg == SS, inhibit interrupts/trace */
5399 /* If several instructions disable interrupts, only the
5401 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
5402 gen_helper_set_inhibit_irq(cpu_env
);
5406 gen_jmp_im(s
->pc
- s
->cs_base
);
5410 case 0x8c: /* mov Gv, seg */
5411 modrm
= cpu_ldub_code(env
, s
->pc
++);
5412 reg
= (modrm
>> 3) & 7;
5413 mod
= (modrm
>> 6) & 3;
5416 gen_op_movl_T0_seg(reg
);
5417 ot
= mod
== 3 ? dflag
: MO_16
;
5418 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5421 case 0x1b6: /* movzbS Gv, Eb */
5422 case 0x1b7: /* movzwS Gv, Eb */
5423 case 0x1be: /* movsbS Gv, Eb */
5424 case 0x1bf: /* movswS Gv, Eb */
5429 /* d_ot is the size of destination */
5431 /* ot is the size of source */
5432 ot
= (b
& 1) + MO_8
;
5433 /* s_ot is the sign+size of source */
5434 s_ot
= b
& 8 ? MO_SIGN
| ot
: ot
;
5436 modrm
= cpu_ldub_code(env
, s
->pc
++);
5437 reg
= ((modrm
>> 3) & 7) | rex_r
;
5438 mod
= (modrm
>> 6) & 3;
5439 rm
= (modrm
& 7) | REX_B(s
);
5442 gen_op_mov_v_reg(ot
, cpu_T
[0], rm
);
5445 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
5448 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
5451 tcg_gen_ext16u_tl(cpu_T
[0], cpu_T
[0]);
5455 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5458 gen_op_mov_reg_v(d_ot
, reg
, cpu_T
[0]);
5460 gen_lea_modrm(env
, s
, modrm
);
5461 gen_op_ld_v(s
, s_ot
, cpu_T
[0], cpu_A0
);
5462 gen_op_mov_reg_v(d_ot
, reg
, cpu_T
[0]);
5467 case 0x8d: /* lea */
5469 modrm
= cpu_ldub_code(env
, s
->pc
++);
5470 mod
= (modrm
>> 6) & 3;
5473 reg
= ((modrm
>> 3) & 7) | rex_r
;
5474 /* we must ensure that no segment is added */
5478 gen_lea_modrm(env
, s
, modrm
);
5480 gen_op_mov_reg_v(ot
, reg
, cpu_A0
);
5483 case 0xa0: /* mov EAX, Ov */
5485 case 0xa2: /* mov Ov, EAX */
5488 target_ulong offset_addr
;
5490 ot
= mo_b_d(b
, dflag
);
5492 #ifdef TARGET_X86_64
5494 offset_addr
= cpu_ldq_code(env
, s
->pc
);
5499 offset_addr
= insn_get(env
, s
, s
->aflag
);
5502 tcg_gen_movi_tl(cpu_A0
, offset_addr
);
5503 gen_add_A0_ds_seg(s
);
5505 gen_op_ld_v(s
, ot
, cpu_T
[0], cpu_A0
);
5506 gen_op_mov_reg_v(ot
, R_EAX
, cpu_T
[0]);
5508 gen_op_mov_v_reg(ot
, cpu_T
[0], R_EAX
);
5509 gen_op_st_v(s
, ot
, cpu_T
[0], cpu_A0
);
5513 case 0xd7: /* xlat */
5514 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[R_EBX
]);
5515 tcg_gen_ext8u_tl(cpu_T
[0], cpu_regs
[R_EAX
]);
5516 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T
[0]);
5517 gen_extu(s
->aflag
, cpu_A0
);
5518 gen_add_A0_ds_seg(s
);
5519 gen_op_ld_v(s
, MO_8
, cpu_T
[0], cpu_A0
);
5520 gen_op_mov_reg_v(MO_8
, R_EAX
, cpu_T
[0]);
5522 case 0xb0 ... 0xb7: /* mov R, Ib */
5523 val
= insn_get(env
, s
, MO_8
);
5524 tcg_gen_movi_tl(cpu_T
[0], val
);
5525 gen_op_mov_reg_v(MO_8
, (b
& 7) | REX_B(s
), cpu_T
[0]);
5527 case 0xb8 ... 0xbf: /* mov R, Iv */
5528 #ifdef TARGET_X86_64
5529 if (dflag
== MO_64
) {
5532 tmp
= cpu_ldq_code(env
, s
->pc
);
5534 reg
= (b
& 7) | REX_B(s
);
5535 tcg_gen_movi_tl(cpu_T
[0], tmp
);
5536 gen_op_mov_reg_v(MO_64
, reg
, cpu_T
[0]);
5541 val
= insn_get(env
, s
, ot
);
5542 reg
= (b
& 7) | REX_B(s
);
5543 tcg_gen_movi_tl(cpu_T
[0], val
);
5544 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
5548 case 0x91 ... 0x97: /* xchg R, EAX */
5551 reg
= (b
& 7) | REX_B(s
);
5555 case 0x87: /* xchg Ev, Gv */
5556 ot
= mo_b_d(b
, dflag
);
5557 modrm
= cpu_ldub_code(env
, s
->pc
++);
5558 reg
= ((modrm
>> 3) & 7) | rex_r
;
5559 mod
= (modrm
>> 6) & 3;
5561 rm
= (modrm
& 7) | REX_B(s
);
5563 gen_op_mov_v_reg(ot
, cpu_T
[0], reg
);
5564 gen_op_mov_v_reg(ot
, cpu_T
[1], rm
);
5565 gen_op_mov_reg_v(ot
, rm
, cpu_T
[0]);
5566 gen_op_mov_reg_v(ot
, reg
, cpu_T
[1]);
5568 gen_lea_modrm(env
, s
, modrm
);
5569 gen_op_mov_v_reg(ot
, cpu_T
[0], reg
);
5570 /* for xchg, lock is implicit */
5571 if (!(prefixes
& PREFIX_LOCK
))
5573 gen_op_ld_v(s
, ot
, cpu_T
[1], cpu_A0
);
5574 gen_op_st_v(s
, ot
, cpu_T
[0], cpu_A0
);
5575 if (!(prefixes
& PREFIX_LOCK
))
5576 gen_helper_unlock();
5577 gen_op_mov_reg_v(ot
, reg
, cpu_T
[1]);
5580 case 0xc4: /* les Gv */
5581 /* In CODE64 this is VEX3; see above. */
5584 case 0xc5: /* lds Gv */
5585 /* In CODE64 this is VEX2; see above. */
5588 case 0x1b2: /* lss Gv */
5591 case 0x1b4: /* lfs Gv */
5594 case 0x1b5: /* lgs Gv */
5597 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
5598 modrm
= cpu_ldub_code(env
, s
->pc
++);
5599 reg
= ((modrm
>> 3) & 7) | rex_r
;
5600 mod
= (modrm
>> 6) & 3;
5603 gen_lea_modrm(env
, s
, modrm
);
5604 gen_op_ld_v(s
, ot
, cpu_T
[1], cpu_A0
);
5605 gen_add_A0_im(s
, 1 << ot
);
5606 /* load the segment first to handle exceptions properly */
5607 gen_op_ld_v(s
, MO_16
, cpu_T
[0], cpu_A0
);
5608 gen_movl_seg_T0(s
, op
, pc_start
- s
->cs_base
);
5609 /* then put the data */
5610 gen_op_mov_reg_v(ot
, reg
, cpu_T
[1]);
5612 gen_jmp_im(s
->pc
- s
->cs_base
);
5617 /************************/
5625 ot
= mo_b_d(b
, dflag
);
5626 modrm
= cpu_ldub_code(env
, s
->pc
++);
5627 mod
= (modrm
>> 6) & 3;
5628 op
= (modrm
>> 3) & 7;
5634 gen_lea_modrm(env
, s
, modrm
);
5637 opreg
= (modrm
& 7) | REX_B(s
);
5642 gen_shift(s
, op
, ot
, opreg
, OR_ECX
);
5645 shift
= cpu_ldub_code(env
, s
->pc
++);
5647 gen_shifti(s
, op
, ot
, opreg
, shift
);
5662 case 0x1a4: /* shld imm */
5666 case 0x1a5: /* shld cl */
5670 case 0x1ac: /* shrd imm */
5674 case 0x1ad: /* shrd cl */
5679 modrm
= cpu_ldub_code(env
, s
->pc
++);
5680 mod
= (modrm
>> 6) & 3;
5681 rm
= (modrm
& 7) | REX_B(s
);
5682 reg
= ((modrm
>> 3) & 7) | rex_r
;
5684 gen_lea_modrm(env
, s
, modrm
);
5689 gen_op_mov_v_reg(ot
, cpu_T
[1], reg
);
5692 TCGv imm
= tcg_const_tl(cpu_ldub_code(env
, s
->pc
++));
5693 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, imm
);
5696 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, cpu_regs
[R_ECX
]);
5700 /************************/
5703 if (s
->flags
& (HF_EM_MASK
| HF_TS_MASK
)) {
5704 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5705 /* XXX: what to do if illegal op ? */
5706 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
5709 modrm
= cpu_ldub_code(env
, s
->pc
++);
5710 mod
= (modrm
>> 6) & 3;
5712 op
= ((b
& 7) << 3) | ((modrm
>> 3) & 7);
5715 gen_lea_modrm(env
, s
, modrm
);
5717 case 0x00 ... 0x07: /* fxxxs */
5718 case 0x10 ... 0x17: /* fixxxl */
5719 case 0x20 ... 0x27: /* fxxxl */
5720 case 0x30 ... 0x37: /* fixxx */
5727 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5728 s
->mem_index
, MO_LEUL
);
5729 gen_helper_flds_FT0(cpu_env
, cpu_tmp2_i32
);
5732 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5733 s
->mem_index
, MO_LEUL
);
5734 gen_helper_fildl_FT0(cpu_env
, cpu_tmp2_i32
);
5737 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_A0
,
5738 s
->mem_index
, MO_LEQ
);
5739 gen_helper_fldl_FT0(cpu_env
, cpu_tmp1_i64
);
5743 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5744 s
->mem_index
, MO_LESW
);
5745 gen_helper_fildl_FT0(cpu_env
, cpu_tmp2_i32
);
5749 gen_helper_fp_arith_ST0_FT0(op1
);
5751 /* fcomp needs pop */
5752 gen_helper_fpop(cpu_env
);
5756 case 0x08: /* flds */
5757 case 0x0a: /* fsts */
5758 case 0x0b: /* fstps */
5759 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5760 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5761 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5766 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5767 s
->mem_index
, MO_LEUL
);
5768 gen_helper_flds_ST0(cpu_env
, cpu_tmp2_i32
);
5771 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5772 s
->mem_index
, MO_LEUL
);
5773 gen_helper_fildl_ST0(cpu_env
, cpu_tmp2_i32
);
5776 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_A0
,
5777 s
->mem_index
, MO_LEQ
);
5778 gen_helper_fldl_ST0(cpu_env
, cpu_tmp1_i64
);
5782 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5783 s
->mem_index
, MO_LESW
);
5784 gen_helper_fildl_ST0(cpu_env
, cpu_tmp2_i32
);
5789 /* XXX: the corresponding CPUID bit must be tested ! */
5792 gen_helper_fisttl_ST0(cpu_tmp2_i32
, cpu_env
);
5793 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5794 s
->mem_index
, MO_LEUL
);
5797 gen_helper_fisttll_ST0(cpu_tmp1_i64
, cpu_env
);
5798 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_A0
,
5799 s
->mem_index
, MO_LEQ
);
5803 gen_helper_fistt_ST0(cpu_tmp2_i32
, cpu_env
);
5804 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5805 s
->mem_index
, MO_LEUW
);
5808 gen_helper_fpop(cpu_env
);
5813 gen_helper_fsts_ST0(cpu_tmp2_i32
, cpu_env
);
5814 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5815 s
->mem_index
, MO_LEUL
);
5818 gen_helper_fistl_ST0(cpu_tmp2_i32
, cpu_env
);
5819 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5820 s
->mem_index
, MO_LEUL
);
5823 gen_helper_fstl_ST0(cpu_tmp1_i64
, cpu_env
);
5824 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_A0
,
5825 s
->mem_index
, MO_LEQ
);
5829 gen_helper_fist_ST0(cpu_tmp2_i32
, cpu_env
);
5830 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5831 s
->mem_index
, MO_LEUW
);
5835 gen_helper_fpop(cpu_env
);
5839 case 0x0c: /* fldenv mem */
5840 gen_update_cc_op(s
);
5841 gen_jmp_im(pc_start
- s
->cs_base
);
5842 gen_helper_fldenv(cpu_env
, cpu_A0
, tcg_const_i32(dflag
- 1));
5844 case 0x0d: /* fldcw mem */
5845 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5846 s
->mem_index
, MO_LEUW
);
5847 gen_helper_fldcw(cpu_env
, cpu_tmp2_i32
);
5849 case 0x0e: /* fnstenv mem */
5850 gen_update_cc_op(s
);
5851 gen_jmp_im(pc_start
- s
->cs_base
);
5852 gen_helper_fstenv(cpu_env
, cpu_A0
, tcg_const_i32(dflag
- 1));
5854 case 0x0f: /* fnstcw mem */
5855 gen_helper_fnstcw(cpu_tmp2_i32
, cpu_env
);
5856 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5857 s
->mem_index
, MO_LEUW
);
5859 case 0x1d: /* fldt mem */
5860 gen_update_cc_op(s
);
5861 gen_jmp_im(pc_start
- s
->cs_base
);
5862 gen_helper_fldt_ST0(cpu_env
, cpu_A0
);
5864 case 0x1f: /* fstpt mem */
5865 gen_update_cc_op(s
);
5866 gen_jmp_im(pc_start
- s
->cs_base
);
5867 gen_helper_fstt_ST0(cpu_env
, cpu_A0
);
5868 gen_helper_fpop(cpu_env
);
5870 case 0x2c: /* frstor mem */
5871 gen_update_cc_op(s
);
5872 gen_jmp_im(pc_start
- s
->cs_base
);
5873 gen_helper_frstor(cpu_env
, cpu_A0
, tcg_const_i32(dflag
- 1));
5875 case 0x2e: /* fnsave mem */
5876 gen_update_cc_op(s
);
5877 gen_jmp_im(pc_start
- s
->cs_base
);
5878 gen_helper_fsave(cpu_env
, cpu_A0
, tcg_const_i32(dflag
- 1));
5880 case 0x2f: /* fnstsw mem */
5881 gen_helper_fnstsw(cpu_tmp2_i32
, cpu_env
);
5882 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5883 s
->mem_index
, MO_LEUW
);
5885 case 0x3c: /* fbld */
5886 gen_update_cc_op(s
);
5887 gen_jmp_im(pc_start
- s
->cs_base
);
5888 gen_helper_fbld_ST0(cpu_env
, cpu_A0
);
5890 case 0x3e: /* fbstp */
5891 gen_update_cc_op(s
);
5892 gen_jmp_im(pc_start
- s
->cs_base
);
5893 gen_helper_fbst_ST0(cpu_env
, cpu_A0
);
5894 gen_helper_fpop(cpu_env
);
5896 case 0x3d: /* fildll */
5897 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_A0
, s
->mem_index
, MO_LEQ
);
5898 gen_helper_fildll_ST0(cpu_env
, cpu_tmp1_i64
);
5900 case 0x3f: /* fistpll */
5901 gen_helper_fistll_ST0(cpu_tmp1_i64
, cpu_env
);
5902 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_A0
, s
->mem_index
, MO_LEQ
);
5903 gen_helper_fpop(cpu_env
);
5909 /* register float ops */
5913 case 0x08: /* fld sti */
5914 gen_helper_fpush(cpu_env
);
5915 gen_helper_fmov_ST0_STN(cpu_env
,
5916 tcg_const_i32((opreg
+ 1) & 7));
5918 case 0x09: /* fxchg sti */
5919 case 0x29: /* fxchg4 sti, undocumented op */
5920 case 0x39: /* fxchg7 sti, undocumented op */
5921 gen_helper_fxchg_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
5923 case 0x0a: /* grp d9/2 */
5926 /* check exceptions (FreeBSD FPU probe) */
5927 gen_update_cc_op(s
);
5928 gen_jmp_im(pc_start
- s
->cs_base
);
5929 gen_helper_fwait(cpu_env
);
5935 case 0x0c: /* grp d9/4 */
5938 gen_helper_fchs_ST0(cpu_env
);
5941 gen_helper_fabs_ST0(cpu_env
);
5944 gen_helper_fldz_FT0(cpu_env
);
5945 gen_helper_fcom_ST0_FT0(cpu_env
);
5948 gen_helper_fxam_ST0(cpu_env
);
5954 case 0x0d: /* grp d9/5 */
5958 gen_helper_fpush(cpu_env
);
5959 gen_helper_fld1_ST0(cpu_env
);
5962 gen_helper_fpush(cpu_env
);
5963 gen_helper_fldl2t_ST0(cpu_env
);
5966 gen_helper_fpush(cpu_env
);
5967 gen_helper_fldl2e_ST0(cpu_env
);
5970 gen_helper_fpush(cpu_env
);
5971 gen_helper_fldpi_ST0(cpu_env
);
5974 gen_helper_fpush(cpu_env
);
5975 gen_helper_fldlg2_ST0(cpu_env
);
5978 gen_helper_fpush(cpu_env
);
5979 gen_helper_fldln2_ST0(cpu_env
);
5982 gen_helper_fpush(cpu_env
);
5983 gen_helper_fldz_ST0(cpu_env
);
5990 case 0x0e: /* grp d9/6 */
5993 gen_helper_f2xm1(cpu_env
);
5996 gen_helper_fyl2x(cpu_env
);
5999 gen_helper_fptan(cpu_env
);
6001 case 3: /* fpatan */
6002 gen_helper_fpatan(cpu_env
);
6004 case 4: /* fxtract */
6005 gen_helper_fxtract(cpu_env
);
6007 case 5: /* fprem1 */
6008 gen_helper_fprem1(cpu_env
);
6010 case 6: /* fdecstp */
6011 gen_helper_fdecstp(cpu_env
);
6014 case 7: /* fincstp */
6015 gen_helper_fincstp(cpu_env
);
6019 case 0x0f: /* grp d9/7 */
6022 gen_helper_fprem(cpu_env
);
6024 case 1: /* fyl2xp1 */
6025 gen_helper_fyl2xp1(cpu_env
);
6028 gen_helper_fsqrt(cpu_env
);
6030 case 3: /* fsincos */
6031 gen_helper_fsincos(cpu_env
);
6033 case 5: /* fscale */
6034 gen_helper_fscale(cpu_env
);
6036 case 4: /* frndint */
6037 gen_helper_frndint(cpu_env
);
6040 gen_helper_fsin(cpu_env
);
6044 gen_helper_fcos(cpu_env
);
6048 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
6049 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6050 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6056 gen_helper_fp_arith_STN_ST0(op1
, opreg
);
6058 gen_helper_fpop(cpu_env
);
6060 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6061 gen_helper_fp_arith_ST0_FT0(op1
);
6065 case 0x02: /* fcom */
6066 case 0x22: /* fcom2, undocumented op */
6067 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6068 gen_helper_fcom_ST0_FT0(cpu_env
);
6070 case 0x03: /* fcomp */
6071 case 0x23: /* fcomp3, undocumented op */
6072 case 0x32: /* fcomp5, undocumented op */
6073 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6074 gen_helper_fcom_ST0_FT0(cpu_env
);
6075 gen_helper_fpop(cpu_env
);
6077 case 0x15: /* da/5 */
6079 case 1: /* fucompp */
6080 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
6081 gen_helper_fucom_ST0_FT0(cpu_env
);
6082 gen_helper_fpop(cpu_env
);
6083 gen_helper_fpop(cpu_env
);
6091 case 0: /* feni (287 only, just do nop here) */
6093 case 1: /* fdisi (287 only, just do nop here) */
6096 gen_helper_fclex(cpu_env
);
6098 case 3: /* fninit */
6099 gen_helper_fninit(cpu_env
);
6101 case 4: /* fsetpm (287 only, just do nop here) */
6107 case 0x1d: /* fucomi */
6108 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6111 gen_update_cc_op(s
);
6112 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6113 gen_helper_fucomi_ST0_FT0(cpu_env
);
6114 set_cc_op(s
, CC_OP_EFLAGS
);
6116 case 0x1e: /* fcomi */
6117 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6120 gen_update_cc_op(s
);
6121 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6122 gen_helper_fcomi_ST0_FT0(cpu_env
);
6123 set_cc_op(s
, CC_OP_EFLAGS
);
6125 case 0x28: /* ffree sti */
6126 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6128 case 0x2a: /* fst sti */
6129 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6131 case 0x2b: /* fstp sti */
6132 case 0x0b: /* fstp1 sti, undocumented op */
6133 case 0x3a: /* fstp8 sti, undocumented op */
6134 case 0x3b: /* fstp9 sti, undocumented op */
6135 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6136 gen_helper_fpop(cpu_env
);
6138 case 0x2c: /* fucom st(i) */
6139 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6140 gen_helper_fucom_ST0_FT0(cpu_env
);
6142 case 0x2d: /* fucomp st(i) */
6143 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6144 gen_helper_fucom_ST0_FT0(cpu_env
);
6145 gen_helper_fpop(cpu_env
);
6147 case 0x33: /* de/3 */
6149 case 1: /* fcompp */
6150 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
6151 gen_helper_fcom_ST0_FT0(cpu_env
);
6152 gen_helper_fpop(cpu_env
);
6153 gen_helper_fpop(cpu_env
);
6159 case 0x38: /* ffreep sti, undocumented op */
6160 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6161 gen_helper_fpop(cpu_env
);
6163 case 0x3c: /* df/4 */
6166 gen_helper_fnstsw(cpu_tmp2_i32
, cpu_env
);
6167 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6168 gen_op_mov_reg_v(MO_16
, R_EAX
, cpu_T
[0]);
6174 case 0x3d: /* fucomip */
6175 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6178 gen_update_cc_op(s
);
6179 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6180 gen_helper_fucomi_ST0_FT0(cpu_env
);
6181 gen_helper_fpop(cpu_env
);
6182 set_cc_op(s
, CC_OP_EFLAGS
);
6184 case 0x3e: /* fcomip */
6185 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6188 gen_update_cc_op(s
);
6189 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6190 gen_helper_fcomi_ST0_FT0(cpu_env
);
6191 gen_helper_fpop(cpu_env
);
6192 set_cc_op(s
, CC_OP_EFLAGS
);
6194 case 0x10 ... 0x13: /* fcmovxx */
6199 static const uint8_t fcmov_cc
[8] = {
6206 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6209 op1
= fcmov_cc
[op
& 3] | (((op
>> 3) & 1) ^ 1);
6210 l1
= gen_new_label();
6211 gen_jcc1_noeob(s
, op1
, l1
);
6212 gen_helper_fmov_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
6221 /************************/
6224 case 0xa4: /* movsS */
6226 ot
= mo_b_d(b
, dflag
);
6227 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6228 gen_repz_movs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6234 case 0xaa: /* stosS */
6236 ot
= mo_b_d(b
, dflag
);
6237 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6238 gen_repz_stos(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6243 case 0xac: /* lodsS */
6245 ot
= mo_b_d(b
, dflag
);
6246 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6247 gen_repz_lods(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6252 case 0xae: /* scasS */
6254 ot
= mo_b_d(b
, dflag
);
6255 if (prefixes
& PREFIX_REPNZ
) {
6256 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6257 } else if (prefixes
& PREFIX_REPZ
) {
6258 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6264 case 0xa6: /* cmpsS */
6266 ot
= mo_b_d(b
, dflag
);
6267 if (prefixes
& PREFIX_REPNZ
) {
6268 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6269 } else if (prefixes
& PREFIX_REPZ
) {
6270 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6275 case 0x6c: /* insS */
6277 ot
= mo_b_d32(b
, dflag
);
6278 tcg_gen_ext16u_tl(cpu_T
[0], cpu_regs
[R_EDX
]);
6279 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6280 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
) | 4);
6281 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6282 gen_repz_ins(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6285 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6286 gen_jmp(s
, s
->pc
- s
->cs_base
);
6290 case 0x6e: /* outsS */
6292 ot
= mo_b_d32(b
, dflag
);
6293 tcg_gen_ext16u_tl(cpu_T
[0], cpu_regs
[R_EDX
]);
6294 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6295 svm_is_rep(prefixes
) | 4);
6296 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6297 gen_repz_outs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6300 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6301 gen_jmp(s
, s
->pc
- s
->cs_base
);
6306 /************************/
6311 ot
= mo_b_d32(b
, dflag
);
6312 val
= cpu_ldub_code(env
, s
->pc
++);
6313 tcg_gen_movi_tl(cpu_T
[0], val
);
6314 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6315 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6316 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6319 tcg_gen_movi_i32(cpu_tmp2_i32
, val
);
6320 gen_helper_in_func(ot
, cpu_T
[1], cpu_tmp2_i32
);
6321 gen_op_mov_reg_v(ot
, R_EAX
, cpu_T
[1]);
6322 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6324 gen_jmp(s
, s
->pc
- s
->cs_base
);
6329 ot
= mo_b_d32(b
, dflag
);
6330 val
= cpu_ldub_code(env
, s
->pc
++);
6331 tcg_gen_movi_tl(cpu_T
[0], val
);
6332 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6333 svm_is_rep(prefixes
));
6334 gen_op_mov_v_reg(ot
, cpu_T
[1], R_EAX
);
6336 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6339 tcg_gen_movi_i32(cpu_tmp2_i32
, val
);
6340 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
6341 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6342 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6344 gen_jmp(s
, s
->pc
- s
->cs_base
);
6349 ot
= mo_b_d32(b
, dflag
);
6350 tcg_gen_ext16u_tl(cpu_T
[0], cpu_regs
[R_EDX
]);
6351 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6352 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6353 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6356 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6357 gen_helper_in_func(ot
, cpu_T
[1], cpu_tmp2_i32
);
6358 gen_op_mov_reg_v(ot
, R_EAX
, cpu_T
[1]);
6359 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6361 gen_jmp(s
, s
->pc
- s
->cs_base
);
6366 ot
= mo_b_d32(b
, dflag
);
6367 tcg_gen_ext16u_tl(cpu_T
[0], cpu_regs
[R_EDX
]);
6368 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6369 svm_is_rep(prefixes
));
6370 gen_op_mov_v_reg(ot
, cpu_T
[1], R_EAX
);
6372 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6375 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6376 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
6377 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6378 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6380 gen_jmp(s
, s
->pc
- s
->cs_base
);
6384 /************************/
6386 case 0xc2: /* ret im */
6387 val
= cpu_ldsw_code(env
, s
->pc
);
6390 gen_stack_update(s
, val
+ (1 << ot
));
6391 /* Note that gen_pop_T0 uses a zero-extending load. */
6392 gen_op_jmp_v(cpu_T
[0]);
6395 case 0xc3: /* ret */
6397 gen_pop_update(s
, ot
);
6398 /* Note that gen_pop_T0 uses a zero-extending load. */
6399 gen_op_jmp_v(cpu_T
[0]);
6402 case 0xca: /* lret im */
6403 val
= cpu_ldsw_code(env
, s
->pc
);
6406 if (s
->pe
&& !s
->vm86
) {
6407 gen_update_cc_op(s
);
6408 gen_jmp_im(pc_start
- s
->cs_base
);
6409 gen_helper_lret_protected(cpu_env
, tcg_const_i32(dflag
- 1),
6410 tcg_const_i32(val
));
6414 gen_op_ld_v(s
, dflag
, cpu_T
[0], cpu_A0
);
6415 /* NOTE: keeping EIP updated is not a problem in case of
6417 gen_op_jmp_v(cpu_T
[0]);
6419 gen_op_addl_A0_im(1 << dflag
);
6420 gen_op_ld_v(s
, dflag
, cpu_T
[0], cpu_A0
);
6421 gen_op_movl_seg_T0_vm(R_CS
);
6422 /* add stack offset */
6423 gen_stack_update(s
, val
+ (2 << dflag
));
6427 case 0xcb: /* lret */
6430 case 0xcf: /* iret */
6431 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IRET
);
6434 gen_helper_iret_real(cpu_env
, tcg_const_i32(dflag
- 1));
6435 set_cc_op(s
, CC_OP_EFLAGS
);
6436 } else if (s
->vm86
) {
6438 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6440 gen_helper_iret_real(cpu_env
, tcg_const_i32(dflag
- 1));
6441 set_cc_op(s
, CC_OP_EFLAGS
);
6444 gen_update_cc_op(s
);
6445 gen_jmp_im(pc_start
- s
->cs_base
);
6446 gen_helper_iret_protected(cpu_env
, tcg_const_i32(dflag
- 1),
6447 tcg_const_i32(s
->pc
- s
->cs_base
));
6448 set_cc_op(s
, CC_OP_EFLAGS
);
6452 case 0xe8: /* call im */
6454 if (dflag
!= MO_16
) {
6455 tval
= (int32_t)insn_get(env
, s
, MO_32
);
6457 tval
= (int16_t)insn_get(env
, s
, MO_16
);
6459 next_eip
= s
->pc
- s
->cs_base
;
6461 if (dflag
== MO_16
) {
6463 } else if (!CODE64(s
)) {
6466 tcg_gen_movi_tl(cpu_T
[0], next_eip
);
6467 gen_push_v(s
, cpu_T
[0]);
6471 case 0x9a: /* lcall im */
6473 unsigned int selector
, offset
;
6478 offset
= insn_get(env
, s
, ot
);
6479 selector
= insn_get(env
, s
, MO_16
);
6481 tcg_gen_movi_tl(cpu_T
[0], selector
);
6482 tcg_gen_movi_tl(cpu_T
[1], offset
);
6485 case 0xe9: /* jmp im */
6486 if (dflag
!= MO_16
) {
6487 tval
= (int32_t)insn_get(env
, s
, MO_32
);
6489 tval
= (int16_t)insn_get(env
, s
, MO_16
);
6491 tval
+= s
->pc
- s
->cs_base
;
6492 if (dflag
== MO_16
) {
6494 } else if (!CODE64(s
)) {
6499 case 0xea: /* ljmp im */
6501 unsigned int selector
, offset
;
6506 offset
= insn_get(env
, s
, ot
);
6507 selector
= insn_get(env
, s
, MO_16
);
6509 tcg_gen_movi_tl(cpu_T
[0], selector
);
6510 tcg_gen_movi_tl(cpu_T
[1], offset
);
6513 case 0xeb: /* jmp Jb */
6514 tval
= (int8_t)insn_get(env
, s
, MO_8
);
6515 tval
+= s
->pc
- s
->cs_base
;
6516 if (dflag
== MO_16
) {
6521 case 0x70 ... 0x7f: /* jcc Jb */
6522 tval
= (int8_t)insn_get(env
, s
, MO_8
);
6524 case 0x180 ... 0x18f: /* jcc Jv */
6525 if (dflag
!= MO_16
) {
6526 tval
= (int32_t)insn_get(env
, s
, MO_32
);
6528 tval
= (int16_t)insn_get(env
, s
, MO_16
);
6531 next_eip
= s
->pc
- s
->cs_base
;
6533 if (dflag
== MO_16
) {
6536 gen_jcc(s
, b
, tval
, next_eip
);
6539 case 0x190 ... 0x19f: /* setcc Gv */
6540 modrm
= cpu_ldub_code(env
, s
->pc
++);
6541 gen_setcc1(s
, b
, cpu_T
[0]);
6542 gen_ldst_modrm(env
, s
, modrm
, MO_8
, OR_TMP0
, 1);
6544 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6545 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6549 modrm
= cpu_ldub_code(env
, s
->pc
++);
6550 reg
= ((modrm
>> 3) & 7) | rex_r
;
6551 gen_cmovcc1(env
, s
, ot
, b
, modrm
, reg
);
6554 /************************/
6556 case 0x9c: /* pushf */
6557 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PUSHF
);
6558 if (s
->vm86
&& s
->iopl
!= 3) {
6559 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6561 gen_update_cc_op(s
);
6562 gen_helper_read_eflags(cpu_T
[0], cpu_env
);
6563 gen_push_v(s
, cpu_T
[0]);
6566 case 0x9d: /* popf */
6567 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_POPF
);
6568 if (s
->vm86
&& s
->iopl
!= 3) {
6569 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6573 if (dflag
!= MO_16
) {
6574 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6575 tcg_const_i32((TF_MASK
| AC_MASK
|
6580 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6581 tcg_const_i32((TF_MASK
| AC_MASK
|
6583 IF_MASK
| IOPL_MASK
)
6587 if (s
->cpl
<= s
->iopl
) {
6588 if (dflag
!= MO_16
) {
6589 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6590 tcg_const_i32((TF_MASK
|
6596 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6597 tcg_const_i32((TF_MASK
|
6605 if (dflag
!= MO_16
) {
6606 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6607 tcg_const_i32((TF_MASK
| AC_MASK
|
6608 ID_MASK
| NT_MASK
)));
6610 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6611 tcg_const_i32((TF_MASK
| AC_MASK
|
6617 gen_pop_update(s
, ot
);
6618 set_cc_op(s
, CC_OP_EFLAGS
);
6619 /* abort translation because TF/AC flag may change */
6620 gen_jmp_im(s
->pc
- s
->cs_base
);
6624 case 0x9e: /* sahf */
6625 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6627 gen_op_mov_v_reg(MO_8
, cpu_T
[0], R_AH
);
6628 gen_compute_eflags(s
);
6629 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, CC_O
);
6630 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
6631 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_T
[0]);
6633 case 0x9f: /* lahf */
6634 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6636 gen_compute_eflags(s
);
6637 /* Note: gen_compute_eflags() only gives the condition codes */
6638 tcg_gen_ori_tl(cpu_T
[0], cpu_cc_src
, 0x02);
6639 gen_op_mov_reg_v(MO_8
, R_AH
, cpu_T
[0]);
6641 case 0xf5: /* cmc */
6642 gen_compute_eflags(s
);
6643 tcg_gen_xori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6645 case 0xf8: /* clc */
6646 gen_compute_eflags(s
);
6647 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_C
);
6649 case 0xf9: /* stc */
6650 gen_compute_eflags(s
);
6651 tcg_gen_ori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6653 case 0xfc: /* cld */
6654 tcg_gen_movi_i32(cpu_tmp2_i32
, 1);
6655 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6657 case 0xfd: /* std */
6658 tcg_gen_movi_i32(cpu_tmp2_i32
, -1);
6659 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6662 /************************/
6663 /* bit operations */
6664 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6666 modrm
= cpu_ldub_code(env
, s
->pc
++);
6667 op
= (modrm
>> 3) & 7;
6668 mod
= (modrm
>> 6) & 3;
6669 rm
= (modrm
& 7) | REX_B(s
);
6672 gen_lea_modrm(env
, s
, modrm
);
6673 gen_op_ld_v(s
, ot
, cpu_T
[0], cpu_A0
);
6675 gen_op_mov_v_reg(ot
, cpu_T
[0], rm
);
6678 val
= cpu_ldub_code(env
, s
->pc
++);
6679 tcg_gen_movi_tl(cpu_T
[1], val
);
6684 case 0x1a3: /* bt Gv, Ev */
6687 case 0x1ab: /* bts */
6690 case 0x1b3: /* btr */
6693 case 0x1bb: /* btc */
6697 modrm
= cpu_ldub_code(env
, s
->pc
++);
6698 reg
= ((modrm
>> 3) & 7) | rex_r
;
6699 mod
= (modrm
>> 6) & 3;
6700 rm
= (modrm
& 7) | REX_B(s
);
6701 gen_op_mov_v_reg(MO_32
, cpu_T
[1], reg
);
6703 gen_lea_modrm(env
, s
, modrm
);
6704 /* specific case: we need to add a displacement */
6705 gen_exts(ot
, cpu_T
[1]);
6706 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[1], 3 + ot
);
6707 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, ot
);
6708 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
6709 gen_op_ld_v(s
, ot
, cpu_T
[0], cpu_A0
);
6711 gen_op_mov_v_reg(ot
, cpu_T
[0], rm
);
6714 tcg_gen_andi_tl(cpu_T
[1], cpu_T
[1], (1 << (3 + ot
)) - 1);
6715 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
6720 tcg_gen_movi_tl(cpu_tmp0
, 1);
6721 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
6722 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
6725 tcg_gen_movi_tl(cpu_tmp0
, 1);
6726 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
6727 tcg_gen_andc_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
6731 tcg_gen_movi_tl(cpu_tmp0
, 1);
6732 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
6733 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
6738 gen_op_st_v(s
, ot
, cpu_T
[0], cpu_A0
);
6740 gen_op_mov_reg_v(ot
, rm
, cpu_T
[0]);
6744 /* Delay all CC updates until after the store above. Note that
6745 C is the result of the test, Z is unchanged, and the others
6746 are all undefined. */
6748 case CC_OP_MULB
... CC_OP_MULQ
:
6749 case CC_OP_ADDB
... CC_OP_ADDQ
:
6750 case CC_OP_ADCB
... CC_OP_ADCQ
:
6751 case CC_OP_SUBB
... CC_OP_SUBQ
:
6752 case CC_OP_SBBB
... CC_OP_SBBQ
:
6753 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
6754 case CC_OP_INCB
... CC_OP_INCQ
:
6755 case CC_OP_DECB
... CC_OP_DECQ
:
6756 case CC_OP_SHLB
... CC_OP_SHLQ
:
6757 case CC_OP_SARB
... CC_OP_SARQ
:
6758 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
6759 /* Z was going to be computed from the non-zero status of CC_DST.
6760 We can get that same Z value (and the new C value) by leaving
6761 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
6763 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
6764 set_cc_op(s
, ((s
->cc_op
- CC_OP_MULB
) & 3) + CC_OP_SARB
);
6767 /* Otherwise, generate EFLAGS and replace the C bit. */
6768 gen_compute_eflags(s
);
6769 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, cpu_tmp4
,
6774 case 0x1bc: /* bsf / tzcnt */
6775 case 0x1bd: /* bsr / lzcnt */
6777 modrm
= cpu_ldub_code(env
, s
->pc
++);
6778 reg
= ((modrm
>> 3) & 7) | rex_r
;
6779 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
6780 gen_extu(ot
, cpu_T
[0]);
6782 /* Note that lzcnt and tzcnt are in different extensions. */
6783 if ((prefixes
& PREFIX_REPZ
)
6785 ? s
->cpuid_ext3_features
& CPUID_EXT3_ABM
6786 : s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)) {
6788 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
6790 /* For lzcnt, reduce the target_ulong result by the
6791 number of zeros that we expect to find at the top. */
6792 gen_helper_clz(cpu_T
[0], cpu_T
[0]);
6793 tcg_gen_subi_tl(cpu_T
[0], cpu_T
[0], TARGET_LONG_BITS
- size
);
6795 /* For tzcnt, a zero input must return the operand size:
6796 force all bits outside the operand size to 1. */
6797 target_ulong mask
= (target_ulong
)-2 << (size
- 1);
6798 tcg_gen_ori_tl(cpu_T
[0], cpu_T
[0], mask
);
6799 gen_helper_ctz(cpu_T
[0], cpu_T
[0]);
6801 /* For lzcnt/tzcnt, C and Z bits are defined and are
6802 related to the result. */
6803 gen_op_update1_cc();
6804 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
6806 /* For bsr/bsf, only the Z bit is defined and it is related
6807 to the input and not the result. */
6808 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
6809 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
6811 /* For bsr, return the bit index of the first 1 bit,
6812 not the count of leading zeros. */
6813 gen_helper_clz(cpu_T
[0], cpu_T
[0]);
6814 tcg_gen_xori_tl(cpu_T
[0], cpu_T
[0], TARGET_LONG_BITS
- 1);
6816 gen_helper_ctz(cpu_T
[0], cpu_T
[0]);
6818 /* ??? The manual says that the output is undefined when the
6819 input is zero, but real hardware leaves it unchanged, and
6820 real programs appear to depend on that. */
6821 tcg_gen_movi_tl(cpu_tmp0
, 0);
6822 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_T
[0], cpu_cc_dst
, cpu_tmp0
,
6823 cpu_regs
[reg
], cpu_T
[0]);
6825 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
6827 /************************/
6829 case 0x27: /* daa */
6832 gen_update_cc_op(s
);
6833 gen_helper_daa(cpu_env
);
6834 set_cc_op(s
, CC_OP_EFLAGS
);
6836 case 0x2f: /* das */
6839 gen_update_cc_op(s
);
6840 gen_helper_das(cpu_env
);
6841 set_cc_op(s
, CC_OP_EFLAGS
);
6843 case 0x37: /* aaa */
6846 gen_update_cc_op(s
);
6847 gen_helper_aaa(cpu_env
);
6848 set_cc_op(s
, CC_OP_EFLAGS
);
6850 case 0x3f: /* aas */
6853 gen_update_cc_op(s
);
6854 gen_helper_aas(cpu_env
);
6855 set_cc_op(s
, CC_OP_EFLAGS
);
6857 case 0xd4: /* aam */
6860 val
= cpu_ldub_code(env
, s
->pc
++);
6862 gen_exception(s
, EXCP00_DIVZ
, pc_start
- s
->cs_base
);
6864 gen_helper_aam(cpu_env
, tcg_const_i32(val
));
6865 set_cc_op(s
, CC_OP_LOGICB
);
6868 case 0xd5: /* aad */
6871 val
= cpu_ldub_code(env
, s
->pc
++);
6872 gen_helper_aad(cpu_env
, tcg_const_i32(val
));
6873 set_cc_op(s
, CC_OP_LOGICB
);
6875 /************************/
6877 case 0x90: /* nop */
6878 /* XXX: correct lock test for all insn */
6879 if (prefixes
& PREFIX_LOCK
) {
6882 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6884 goto do_xchg_reg_eax
;
6886 if (prefixes
& PREFIX_REPZ
) {
6887 gen_update_cc_op(s
);
6888 gen_jmp_im(pc_start
- s
->cs_base
);
6889 gen_helper_pause(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
6890 s
->is_jmp
= DISAS_TB_JUMP
;
6893 case 0x9b: /* fwait */
6894 if ((s
->flags
& (HF_MP_MASK
| HF_TS_MASK
)) ==
6895 (HF_MP_MASK
| HF_TS_MASK
)) {
6896 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
6898 gen_update_cc_op(s
);
6899 gen_jmp_im(pc_start
- s
->cs_base
);
6900 gen_helper_fwait(cpu_env
);
6903 case 0xcc: /* int3 */
6904 gen_interrupt(s
, EXCP03_INT3
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6906 case 0xcd: /* int N */
6907 val
= cpu_ldub_code(env
, s
->pc
++);
6908 if (s
->vm86
&& s
->iopl
!= 3) {
6909 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6911 gen_interrupt(s
, val
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6914 case 0xce: /* into */
6917 gen_update_cc_op(s
);
6918 gen_jmp_im(pc_start
- s
->cs_base
);
6919 gen_helper_into(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
6922 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6923 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_ICEBP
);
6925 gen_debug(s
, pc_start
- s
->cs_base
);
6929 qemu_set_log(CPU_LOG_INT
| CPU_LOG_TB_IN_ASM
);
6933 case 0xfa: /* cli */
6935 if (s
->cpl
<= s
->iopl
) {
6936 gen_helper_cli(cpu_env
);
6938 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6942 gen_helper_cli(cpu_env
);
6944 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6948 case 0xfb: /* sti */
6950 if (s
->cpl
<= s
->iopl
) {
6952 gen_helper_sti(cpu_env
);
6953 /* interruptions are enabled only the first insn after sti */
6954 /* If several instructions disable interrupts, only the
6956 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
6957 gen_helper_set_inhibit_irq(cpu_env
);
6958 /* give a chance to handle pending irqs */
6959 gen_jmp_im(s
->pc
- s
->cs_base
);
6962 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6968 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6972 case 0x62: /* bound */
6976 modrm
= cpu_ldub_code(env
, s
->pc
++);
6977 reg
= (modrm
>> 3) & 7;
6978 mod
= (modrm
>> 6) & 3;
6981 gen_op_mov_v_reg(ot
, cpu_T
[0], reg
);
6982 gen_lea_modrm(env
, s
, modrm
);
6983 gen_jmp_im(pc_start
- s
->cs_base
);
6984 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6986 gen_helper_boundw(cpu_env
, cpu_A0
, cpu_tmp2_i32
);
6988 gen_helper_boundl(cpu_env
, cpu_A0
, cpu_tmp2_i32
);
6991 case 0x1c8 ... 0x1cf: /* bswap reg */
6992 reg
= (b
& 7) | REX_B(s
);
6993 #ifdef TARGET_X86_64
6994 if (dflag
== MO_64
) {
6995 gen_op_mov_v_reg(MO_64
, cpu_T
[0], reg
);
6996 tcg_gen_bswap64_i64(cpu_T
[0], cpu_T
[0]);
6997 gen_op_mov_reg_v(MO_64
, reg
, cpu_T
[0]);
7001 gen_op_mov_v_reg(MO_32
, cpu_T
[0], reg
);
7002 tcg_gen_ext32u_tl(cpu_T
[0], cpu_T
[0]);
7003 tcg_gen_bswap32_tl(cpu_T
[0], cpu_T
[0]);
7004 gen_op_mov_reg_v(MO_32
, reg
, cpu_T
[0]);
7007 case 0xd6: /* salc */
7010 gen_compute_eflags_c(s
, cpu_T
[0]);
7011 tcg_gen_neg_tl(cpu_T
[0], cpu_T
[0]);
7012 gen_op_mov_reg_v(MO_8
, R_EAX
, cpu_T
[0]);
7014 case 0xe0: /* loopnz */
7015 case 0xe1: /* loopz */
7016 case 0xe2: /* loop */
7017 case 0xe3: /* jecxz */
7019 TCGLabel
*l1
, *l2
, *l3
;
7021 tval
= (int8_t)insn_get(env
, s
, MO_8
);
7022 next_eip
= s
->pc
- s
->cs_base
;
7024 if (dflag
== MO_16
) {
7028 l1
= gen_new_label();
7029 l2
= gen_new_label();
7030 l3
= gen_new_label();
7033 case 0: /* loopnz */
7035 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
7036 gen_op_jz_ecx(s
->aflag
, l3
);
7037 gen_jcc1(s
, (JCC_Z
<< 1) | (b
^ 1), l1
);
7040 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
7041 gen_op_jnz_ecx(s
->aflag
, l1
);
7045 gen_op_jz_ecx(s
->aflag
, l1
);
7050 gen_jmp_im(next_eip
);
7059 case 0x130: /* wrmsr */
7060 case 0x132: /* rdmsr */
7062 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7064 gen_update_cc_op(s
);
7065 gen_jmp_im(pc_start
- s
->cs_base
);
7067 gen_helper_rdmsr(cpu_env
);
7069 gen_helper_wrmsr(cpu_env
);
7073 case 0x131: /* rdtsc */
7074 gen_update_cc_op(s
);
7075 gen_jmp_im(pc_start
- s
->cs_base
);
7076 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
7079 gen_helper_rdtsc(cpu_env
);
7080 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
7082 gen_jmp(s
, s
->pc
- s
->cs_base
);
7085 case 0x133: /* rdpmc */
7086 gen_update_cc_op(s
);
7087 gen_jmp_im(pc_start
- s
->cs_base
);
7088 gen_helper_rdpmc(cpu_env
);
7090 case 0x134: /* sysenter */
7091 /* For Intel SYSENTER is valid on 64-bit */
7092 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
7095 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7097 gen_update_cc_op(s
);
7098 gen_jmp_im(pc_start
- s
->cs_base
);
7099 gen_helper_sysenter(cpu_env
);
7103 case 0x135: /* sysexit */
7104 /* For Intel SYSEXIT is valid on 64-bit */
7105 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
7108 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7110 gen_update_cc_op(s
);
7111 gen_jmp_im(pc_start
- s
->cs_base
);
7112 gen_helper_sysexit(cpu_env
, tcg_const_i32(dflag
- 1));
7116 #ifdef TARGET_X86_64
7117 case 0x105: /* syscall */
7118 /* XXX: is it usable in real mode ? */
7119 gen_update_cc_op(s
);
7120 gen_jmp_im(pc_start
- s
->cs_base
);
7121 gen_helper_syscall(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7124 case 0x107: /* sysret */
7126 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7128 gen_update_cc_op(s
);
7129 gen_jmp_im(pc_start
- s
->cs_base
);
7130 gen_helper_sysret(cpu_env
, tcg_const_i32(dflag
- 1));
7131 /* condition codes are modified only in long mode */
7133 set_cc_op(s
, CC_OP_EFLAGS
);
7139 case 0x1a2: /* cpuid */
7140 gen_update_cc_op(s
);
7141 gen_jmp_im(pc_start
- s
->cs_base
);
7142 gen_helper_cpuid(cpu_env
);
7144 case 0xf4: /* hlt */
7146 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7148 gen_update_cc_op(s
);
7149 gen_jmp_im(pc_start
- s
->cs_base
);
7150 gen_helper_hlt(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7151 s
->is_jmp
= DISAS_TB_JUMP
;
7155 modrm
= cpu_ldub_code(env
, s
->pc
++);
7156 mod
= (modrm
>> 6) & 3;
7157 op
= (modrm
>> 3) & 7;
7160 if (!s
->pe
|| s
->vm86
)
7162 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_READ
);
7163 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,ldt
.selector
));
7164 ot
= mod
== 3 ? dflag
: MO_16
;
7165 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7168 if (!s
->pe
|| s
->vm86
)
7171 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7173 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_WRITE
);
7174 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7175 gen_jmp_im(pc_start
- s
->cs_base
);
7176 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7177 gen_helper_lldt(cpu_env
, cpu_tmp2_i32
);
7181 if (!s
->pe
|| s
->vm86
)
7183 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_READ
);
7184 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,tr
.selector
));
7185 ot
= mod
== 3 ? dflag
: MO_16
;
7186 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7189 if (!s
->pe
|| s
->vm86
)
7192 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7194 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_WRITE
);
7195 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7196 gen_jmp_im(pc_start
- s
->cs_base
);
7197 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7198 gen_helper_ltr(cpu_env
, cpu_tmp2_i32
);
7203 if (!s
->pe
|| s
->vm86
)
7205 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7206 gen_update_cc_op(s
);
7208 gen_helper_verr(cpu_env
, cpu_T
[0]);
7210 gen_helper_verw(cpu_env
, cpu_T
[0]);
7212 set_cc_op(s
, CC_OP_EFLAGS
);
7219 modrm
= cpu_ldub_code(env
, s
->pc
++);
7220 mod
= (modrm
>> 6) & 3;
7221 op
= (modrm
>> 3) & 7;
7227 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_GDTR_READ
);
7228 gen_lea_modrm(env
, s
, modrm
);
7229 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
7230 gen_op_st_v(s
, MO_16
, cpu_T
[0], cpu_A0
);
7231 gen_add_A0_im(s
, 2);
7232 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, gdt
.base
));
7233 if (dflag
== MO_16
) {
7234 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xffffff);
7236 gen_op_st_v(s
, CODE64(s
) + MO_32
, cpu_T
[0], cpu_A0
);
7241 case 0: /* monitor */
7242 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) ||
7245 gen_update_cc_op(s
);
7246 gen_jmp_im(pc_start
- s
->cs_base
);
7247 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[R_EAX
]);
7248 gen_extu(s
->aflag
, cpu_A0
);
7249 gen_add_A0_ds_seg(s
);
7250 gen_helper_monitor(cpu_env
, cpu_A0
);
7253 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) ||
7256 gen_update_cc_op(s
);
7257 gen_jmp_im(pc_start
- s
->cs_base
);
7258 gen_helper_mwait(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7262 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
) ||
7266 gen_helper_clac(cpu_env
);
7267 gen_jmp_im(s
->pc
- s
->cs_base
);
7271 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
) ||
7275 gen_helper_stac(cpu_env
);
7276 gen_jmp_im(s
->pc
- s
->cs_base
);
7283 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IDTR_READ
);
7284 gen_lea_modrm(env
, s
, modrm
);
7285 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, idt
.limit
));
7286 gen_op_st_v(s
, MO_16
, cpu_T
[0], cpu_A0
);
7287 gen_add_A0_im(s
, 2);
7288 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, idt
.base
));
7289 if (dflag
== MO_16
) {
7290 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xffffff);
7292 gen_op_st_v(s
, CODE64(s
) + MO_32
, cpu_T
[0], cpu_A0
);
7298 gen_update_cc_op(s
);
7299 gen_jmp_im(pc_start
- s
->cs_base
);
7302 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7305 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7308 gen_helper_vmrun(cpu_env
, tcg_const_i32(s
->aflag
- 1),
7309 tcg_const_i32(s
->pc
- pc_start
));
7311 s
->is_jmp
= DISAS_TB_JUMP
;
7314 case 1: /* VMMCALL */
7315 if (!(s
->flags
& HF_SVME_MASK
))
7317 gen_helper_vmmcall(cpu_env
);
7319 case 2: /* VMLOAD */
7320 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7323 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7326 gen_helper_vmload(cpu_env
, tcg_const_i32(s
->aflag
- 1));
7329 case 3: /* VMSAVE */
7330 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7333 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7336 gen_helper_vmsave(cpu_env
, tcg_const_i32(s
->aflag
- 1));
7340 if ((!(s
->flags
& HF_SVME_MASK
) &&
7341 !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
)) ||
7345 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7348 gen_helper_stgi(cpu_env
);
7352 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7355 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7358 gen_helper_clgi(cpu_env
);
7361 case 6: /* SKINIT */
7362 if ((!(s
->flags
& HF_SVME_MASK
) &&
7363 !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
)) ||
7366 gen_helper_skinit(cpu_env
);
7368 case 7: /* INVLPGA */
7369 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7372 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7375 gen_helper_invlpga(cpu_env
,
7376 tcg_const_i32(s
->aflag
- 1));
7382 } else if (s
->cpl
!= 0) {
7383 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7385 gen_svm_check_intercept(s
, pc_start
,
7386 op
==2 ? SVM_EXIT_GDTR_WRITE
: SVM_EXIT_IDTR_WRITE
);
7387 gen_lea_modrm(env
, s
, modrm
);
7388 gen_op_ld_v(s
, MO_16
, cpu_T
[1], cpu_A0
);
7389 gen_add_A0_im(s
, 2);
7390 gen_op_ld_v(s
, CODE64(s
) + MO_32
, cpu_T
[0], cpu_A0
);
7391 if (dflag
== MO_16
) {
7392 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xffffff);
7395 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,gdt
.base
));
7396 tcg_gen_st32_tl(cpu_T
[1], cpu_env
, offsetof(CPUX86State
,gdt
.limit
));
7398 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,idt
.base
));
7399 tcg_gen_st32_tl(cpu_T
[1], cpu_env
, offsetof(CPUX86State
,idt
.limit
));
7404 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_CR0
);
7405 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7406 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,cr
[0]) + 4);
7408 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,cr
[0]));
7410 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 1);
7414 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7416 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7417 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7418 gen_helper_lmsw(cpu_env
, cpu_T
[0]);
7419 gen_jmp_im(s
->pc
- s
->cs_base
);
7424 if (mod
!= 3) { /* invlpg */
7426 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7428 gen_update_cc_op(s
);
7429 gen_jmp_im(pc_start
- s
->cs_base
);
7430 gen_lea_modrm(env
, s
, modrm
);
7431 gen_helper_invlpg(cpu_env
, cpu_A0
);
7432 gen_jmp_im(s
->pc
- s
->cs_base
);
7437 case 0: /* swapgs */
7438 #ifdef TARGET_X86_64
7441 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7443 tcg_gen_ld_tl(cpu_T
[0], cpu_env
,
7444 offsetof(CPUX86State
,segs
[R_GS
].base
));
7445 tcg_gen_ld_tl(cpu_T
[1], cpu_env
,
7446 offsetof(CPUX86State
,kernelgsbase
));
7447 tcg_gen_st_tl(cpu_T
[1], cpu_env
,
7448 offsetof(CPUX86State
,segs
[R_GS
].base
));
7449 tcg_gen_st_tl(cpu_T
[0], cpu_env
,
7450 offsetof(CPUX86State
,kernelgsbase
));
7458 case 1: /* rdtscp */
7459 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_RDTSCP
))
7461 gen_update_cc_op(s
);
7462 gen_jmp_im(pc_start
- s
->cs_base
);
7463 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
7466 gen_helper_rdtscp(cpu_env
);
7467 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
7469 gen_jmp(s
, s
->pc
- s
->cs_base
);
7481 case 0x108: /* invd */
7482 case 0x109: /* wbinvd */
7484 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7486 gen_svm_check_intercept(s
, pc_start
, (b
& 2) ? SVM_EXIT_INVD
: SVM_EXIT_WBINVD
);
7490 case 0x63: /* arpl or movslS (x86_64) */
7491 #ifdef TARGET_X86_64
7494 /* d_ot is the size of destination */
7497 modrm
= cpu_ldub_code(env
, s
->pc
++);
7498 reg
= ((modrm
>> 3) & 7) | rex_r
;
7499 mod
= (modrm
>> 6) & 3;
7500 rm
= (modrm
& 7) | REX_B(s
);
7503 gen_op_mov_v_reg(MO_32
, cpu_T
[0], rm
);
7505 if (d_ot
== MO_64
) {
7506 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
7508 gen_op_mov_reg_v(d_ot
, reg
, cpu_T
[0]);
7510 gen_lea_modrm(env
, s
, modrm
);
7511 gen_op_ld_v(s
, MO_32
| MO_SIGN
, cpu_T
[0], cpu_A0
);
7512 gen_op_mov_reg_v(d_ot
, reg
, cpu_T
[0]);
7518 TCGv t0
, t1
, t2
, a0
;
7520 if (!s
->pe
|| s
->vm86
)
7522 t0
= tcg_temp_local_new();
7523 t1
= tcg_temp_local_new();
7524 t2
= tcg_temp_local_new();
7526 modrm
= cpu_ldub_code(env
, s
->pc
++);
7527 reg
= (modrm
>> 3) & 7;
7528 mod
= (modrm
>> 6) & 3;
7531 gen_lea_modrm(env
, s
, modrm
);
7532 gen_op_ld_v(s
, ot
, t0
, cpu_A0
);
7533 a0
= tcg_temp_local_new();
7534 tcg_gen_mov_tl(a0
, cpu_A0
);
7536 gen_op_mov_v_reg(ot
, t0
, rm
);
7539 gen_op_mov_v_reg(ot
, t1
, reg
);
7540 tcg_gen_andi_tl(cpu_tmp0
, t0
, 3);
7541 tcg_gen_andi_tl(t1
, t1
, 3);
7542 tcg_gen_movi_tl(t2
, 0);
7543 label1
= gen_new_label();
7544 tcg_gen_brcond_tl(TCG_COND_GE
, cpu_tmp0
, t1
, label1
);
7545 tcg_gen_andi_tl(t0
, t0
, ~3);
7546 tcg_gen_or_tl(t0
, t0
, t1
);
7547 tcg_gen_movi_tl(t2
, CC_Z
);
7548 gen_set_label(label1
);
7550 gen_op_st_v(s
, ot
, t0
, a0
);
7553 gen_op_mov_reg_v(ot
, rm
, t0
);
7555 gen_compute_eflags(s
);
7556 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_Z
);
7557 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t2
);
7563 case 0x102: /* lar */
7564 case 0x103: /* lsl */
7568 if (!s
->pe
|| s
->vm86
)
7570 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
7571 modrm
= cpu_ldub_code(env
, s
->pc
++);
7572 reg
= ((modrm
>> 3) & 7) | rex_r
;
7573 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7574 t0
= tcg_temp_local_new();
7575 gen_update_cc_op(s
);
7577 gen_helper_lar(t0
, cpu_env
, cpu_T
[0]);
7579 gen_helper_lsl(t0
, cpu_env
, cpu_T
[0]);
7581 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_src
, CC_Z
);
7582 label1
= gen_new_label();
7583 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
7584 gen_op_mov_reg_v(ot
, reg
, t0
);
7585 gen_set_label(label1
);
7586 set_cc_op(s
, CC_OP_EFLAGS
);
7591 modrm
= cpu_ldub_code(env
, s
->pc
++);
7592 mod
= (modrm
>> 6) & 3;
7593 op
= (modrm
>> 3) & 7;
7595 case 0: /* prefetchnta */
7596 case 1: /* prefetchnt0 */
7597 case 2: /* prefetchnt0 */
7598 case 3: /* prefetchnt0 */
7601 gen_lea_modrm(env
, s
, modrm
);
7602 /* nothing more to do */
7604 default: /* nop (multi byte) */
7605 gen_nop_modrm(env
, s
, modrm
);
7609 case 0x119 ... 0x11f: /* nop (multi byte) */
7610 modrm
= cpu_ldub_code(env
, s
->pc
++);
7611 gen_nop_modrm(env
, s
, modrm
);
7613 case 0x120: /* mov reg, crN */
7614 case 0x122: /* mov crN, reg */
7616 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7618 modrm
= cpu_ldub_code(env
, s
->pc
++);
7619 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7620 * AMD documentation (24594.pdf) and testing of
7621 * intel 386 and 486 processors all show that the mod bits
7622 * are assumed to be 1's, regardless of actual values.
7624 rm
= (modrm
& 7) | REX_B(s
);
7625 reg
= ((modrm
>> 3) & 7) | rex_r
;
7630 if ((prefixes
& PREFIX_LOCK
) && (reg
== 0) &&
7631 (s
->cpuid_ext3_features
& CPUID_EXT3_CR8LEG
)) {
7640 gen_update_cc_op(s
);
7641 gen_jmp_im(pc_start
- s
->cs_base
);
7643 gen_op_mov_v_reg(ot
, cpu_T
[0], rm
);
7644 gen_helper_write_crN(cpu_env
, tcg_const_i32(reg
),
7646 gen_jmp_im(s
->pc
- s
->cs_base
);
7649 gen_helper_read_crN(cpu_T
[0], cpu_env
, tcg_const_i32(reg
));
7650 gen_op_mov_reg_v(ot
, rm
, cpu_T
[0]);
7658 case 0x121: /* mov reg, drN */
7659 case 0x123: /* mov drN, reg */
7661 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7663 modrm
= cpu_ldub_code(env
, s
->pc
++);
7664 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7665 * AMD documentation (24594.pdf) and testing of
7666 * intel 386 and 486 processors all show that the mod bits
7667 * are assumed to be 1's, regardless of actual values.
7669 rm
= (modrm
& 7) | REX_B(s
);
7670 reg
= ((modrm
>> 3) & 7) | rex_r
;
7675 /* XXX: do it dynamically with CR4.DE bit */
7676 if (reg
== 4 || reg
== 5 || reg
>= 8)
7679 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_DR0
+ reg
);
7680 gen_op_mov_v_reg(ot
, cpu_T
[0], rm
);
7681 gen_helper_movl_drN_T0(cpu_env
, tcg_const_i32(reg
), cpu_T
[0]);
7682 gen_jmp_im(s
->pc
- s
->cs_base
);
7685 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_DR0
+ reg
);
7686 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,dr
[reg
]));
7687 gen_op_mov_reg_v(ot
, rm
, cpu_T
[0]);
7691 case 0x106: /* clts */
7693 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7695 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7696 gen_helper_clts(cpu_env
);
7697 /* abort block because static cpu state changed */
7698 gen_jmp_im(s
->pc
- s
->cs_base
);
7702 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7703 case 0x1c3: /* MOVNTI reg, mem */
7704 if (!(s
->cpuid_features
& CPUID_SSE2
))
7706 ot
= mo_64_32(dflag
);
7707 modrm
= cpu_ldub_code(env
, s
->pc
++);
7708 mod
= (modrm
>> 6) & 3;
7711 reg
= ((modrm
>> 3) & 7) | rex_r
;
7712 /* generate a generic store */
7713 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
7716 modrm
= cpu_ldub_code(env
, s
->pc
++);
7717 mod
= (modrm
>> 6) & 3;
7718 op
= (modrm
>> 3) & 7;
7720 case 0: /* fxsave */
7721 if (mod
== 3 || !(s
->cpuid_features
& CPUID_FXSR
) ||
7722 (s
->prefix
& PREFIX_LOCK
))
7724 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
7725 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7728 gen_lea_modrm(env
, s
, modrm
);
7729 gen_update_cc_op(s
);
7730 gen_jmp_im(pc_start
- s
->cs_base
);
7731 gen_helper_fxsave(cpu_env
, cpu_A0
, tcg_const_i32(dflag
== MO_64
));
7733 case 1: /* fxrstor */
7734 if (mod
== 3 || !(s
->cpuid_features
& CPUID_FXSR
) ||
7735 (s
->prefix
& PREFIX_LOCK
))
7737 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
7738 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7741 gen_lea_modrm(env
, s
, modrm
);
7742 gen_update_cc_op(s
);
7743 gen_jmp_im(pc_start
- s
->cs_base
);
7744 gen_helper_fxrstor(cpu_env
, cpu_A0
, tcg_const_i32(dflag
== MO_64
));
7746 case 2: /* ldmxcsr */
7747 case 3: /* stmxcsr */
7748 if (s
->flags
& HF_TS_MASK
) {
7749 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7752 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
) ||
7755 gen_lea_modrm(env
, s
, modrm
);
7757 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
7758 s
->mem_index
, MO_LEUL
);
7759 gen_helper_ldmxcsr(cpu_env
, cpu_tmp2_i32
);
7761 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, mxcsr
));
7762 gen_op_st_v(s
, MO_32
, cpu_T
[0], cpu_A0
);
7765 case 5: /* lfence */
7766 case 6: /* mfence */
7767 if ((modrm
& 0xc7) != 0xc0 || !(s
->cpuid_features
& CPUID_SSE2
))
7770 case 7: /* sfence / clflush */
7771 if ((modrm
& 0xc7) == 0xc0) {
7773 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7774 if (!(s
->cpuid_features
& CPUID_SSE
))
7778 if (!(s
->cpuid_features
& CPUID_CLFLUSH
))
7780 gen_lea_modrm(env
, s
, modrm
);
7787 case 0x10d: /* 3DNow! prefetch(w) */
7788 modrm
= cpu_ldub_code(env
, s
->pc
++);
7789 mod
= (modrm
>> 6) & 3;
7792 gen_lea_modrm(env
, s
, modrm
);
7793 /* ignore for now */
7795 case 0x1aa: /* rsm */
7796 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_RSM
);
7797 if (!(s
->flags
& HF_SMM_MASK
))
7799 gen_update_cc_op(s
);
7800 gen_jmp_im(s
->pc
- s
->cs_base
);
7801 gen_helper_rsm(cpu_env
);
7804 case 0x1b8: /* SSE4.2 popcnt */
7805 if ((prefixes
& (PREFIX_REPZ
| PREFIX_LOCK
| PREFIX_REPNZ
)) !=
7808 if (!(s
->cpuid_ext_features
& CPUID_EXT_POPCNT
))
7811 modrm
= cpu_ldub_code(env
, s
->pc
++);
7812 reg
= ((modrm
>> 3) & 7) | rex_r
;
7814 if (s
->prefix
& PREFIX_DATA
) {
7817 ot
= mo_64_32(dflag
);
7820 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
7821 gen_helper_popcnt(cpu_T
[0], cpu_env
, cpu_T
[0], tcg_const_i32(ot
));
7822 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
7824 set_cc_op(s
, CC_OP_EFLAGS
);
7826 case 0x10e ... 0x10f:
7827 /* 3DNow! instructions, ignore prefixes */
7828 s
->prefix
&= ~(PREFIX_REPZ
| PREFIX_REPNZ
| PREFIX_DATA
);
7829 case 0x110 ... 0x117:
7830 case 0x128 ... 0x12f:
7831 case 0x138 ... 0x13a:
7832 case 0x150 ... 0x179:
7833 case 0x17c ... 0x17f:
7835 case 0x1c4 ... 0x1c6:
7836 case 0x1d0 ... 0x1fe:
7837 gen_sse(env
, s
, b
, pc_start
, rex_r
);
7842 /* lock generation */
7843 if (s
->prefix
& PREFIX_LOCK
)
7844 gen_helper_unlock();
7847 if (s
->prefix
& PREFIX_LOCK
)
7848 gen_helper_unlock();
7849 /* XXX: ensure that no lock was generated */
7850 gen_exception(s
, EXCP06_ILLOP
, pc_start
- s
->cs_base
);
7854 void optimize_flags_init(void)
7856 static const char reg_names
[CPU_NB_REGS
][4] = {
7857 #ifdef TARGET_X86_64
7887 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
7888 cpu_cc_op
= tcg_global_mem_new_i32(TCG_AREG0
,
7889 offsetof(CPUX86State
, cc_op
), "cc_op");
7890 cpu_cc_dst
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_dst
),
7892 cpu_cc_src
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_src
),
7894 cpu_cc_src2
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_src2
),
7897 for (i
= 0; i
< CPU_NB_REGS
; ++i
) {
7898 cpu_regs
[i
] = tcg_global_mem_new(TCG_AREG0
,
7899 offsetof(CPUX86State
, regs
[i
]),
7904 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7905 basic block 'tb'. If search_pc is TRUE, also generate PC
7906 information for each intermediate instruction. */
7907 static inline void gen_intermediate_code_internal(X86CPU
*cpu
,
7908 TranslationBlock
*tb
,
7911 CPUState
*cs
= CPU(cpu
);
7912 CPUX86State
*env
= &cpu
->env
;
7913 DisasContext dc1
, *dc
= &dc1
;
7914 target_ulong pc_ptr
;
7918 target_ulong pc_start
;
7919 target_ulong cs_base
;
7923 /* generate intermediate code */
7925 cs_base
= tb
->cs_base
;
7928 dc
->pe
= (flags
>> HF_PE_SHIFT
) & 1;
7929 dc
->code32
= (flags
>> HF_CS32_SHIFT
) & 1;
7930 dc
->ss32
= (flags
>> HF_SS32_SHIFT
) & 1;
7931 dc
->addseg
= (flags
>> HF_ADDSEG_SHIFT
) & 1;
7933 dc
->vm86
= (flags
>> VM_SHIFT
) & 1;
7934 dc
->cpl
= (flags
>> HF_CPL_SHIFT
) & 3;
7935 dc
->iopl
= (flags
>> IOPL_SHIFT
) & 3;
7936 dc
->tf
= (flags
>> TF_SHIFT
) & 1;
7937 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
7938 dc
->cc_op
= CC_OP_DYNAMIC
;
7939 dc
->cc_op_dirty
= false;
7940 dc
->cs_base
= cs_base
;
7942 dc
->popl_esp_hack
= 0;
7943 /* select memory access functions */
7945 if (flags
& HF_SOFTMMU_MASK
) {
7946 dc
->mem_index
= cpu_mmu_index(env
);
7948 dc
->cpuid_features
= env
->features
[FEAT_1_EDX
];
7949 dc
->cpuid_ext_features
= env
->features
[FEAT_1_ECX
];
7950 dc
->cpuid_ext2_features
= env
->features
[FEAT_8000_0001_EDX
];
7951 dc
->cpuid_ext3_features
= env
->features
[FEAT_8000_0001_ECX
];
7952 dc
->cpuid_7_0_ebx_features
= env
->features
[FEAT_7_0_EBX
];
7953 #ifdef TARGET_X86_64
7954 dc
->lma
= (flags
>> HF_LMA_SHIFT
) & 1;
7955 dc
->code64
= (flags
>> HF_CS64_SHIFT
) & 1;
7958 dc
->jmp_opt
= !(dc
->tf
|| cs
->singlestep_enabled
||
7959 (flags
& HF_INHIBIT_IRQ_MASK
)
7960 #ifndef CONFIG_SOFTMMU
7961 || (flags
& HF_SOFTMMU_MASK
)
7964 /* Do not optimize repz jumps at all in icount mode, because
7965 rep movsS instructions are execured with different paths
7966 in !repz_opt and repz_opt modes. The first one was used
7967 always except single step mode. And this setting
7968 disables jumps optimization and control paths become
7969 equivalent in run and single step modes.
7970 Now there will be no jump optimization for repz in
7971 record/replay modes and there will always be an
7972 additional step for ecx=0 when icount is enabled.
7974 dc
->repz_opt
= !dc
->jmp_opt
&& !(tb
->cflags
& CF_USE_ICOUNT
);
7976 /* check addseg logic */
7977 if (!dc
->addseg
&& (dc
->vm86
|| !dc
->pe
|| !dc
->code32
))
7978 printf("ERROR addseg\n");
7981 cpu_T
[0] = tcg_temp_new();
7982 cpu_T
[1] = tcg_temp_new();
7983 cpu_A0
= tcg_temp_new();
7985 cpu_tmp0
= tcg_temp_new();
7986 cpu_tmp1_i64
= tcg_temp_new_i64();
7987 cpu_tmp2_i32
= tcg_temp_new_i32();
7988 cpu_tmp3_i32
= tcg_temp_new_i32();
7989 cpu_tmp4
= tcg_temp_new();
7990 cpu_ptr0
= tcg_temp_new_ptr();
7991 cpu_ptr1
= tcg_temp_new_ptr();
7992 cpu_cc_srcT
= tcg_temp_local_new();
7994 dc
->is_jmp
= DISAS_NEXT
;
7998 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
8000 max_insns
= CF_COUNT_MASK
;
8004 if (unlikely(!QTAILQ_EMPTY(&cs
->breakpoints
))) {
8005 QTAILQ_FOREACH(bp
, &cs
->breakpoints
, entry
) {
8006 if (bp
->pc
== pc_ptr
&&
8007 !((bp
->flags
& BP_CPU
) && (tb
->flags
& HF_RF_MASK
))) {
8008 gen_debug(dc
, pc_ptr
- dc
->cs_base
);
8009 goto done_generating
;
8014 j
= tcg_op_buf_count();
8018 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
8020 tcg_ctx
.gen_opc_pc
[lj
] = pc_ptr
;
8021 gen_opc_cc_op
[lj
] = dc
->cc_op
;
8022 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
8023 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
8025 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
8028 pc_ptr
= disas_insn(env
, dc
, pc_ptr
);
8030 /* stop translation if indicated */
8033 /* if single step mode, we generate only one instruction and
8034 generate an exception */
8035 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8036 the flag and abort the translation to give the irqs a
8037 change to be happen */
8038 if (dc
->tf
|| dc
->singlestep_enabled
||
8039 (flags
& HF_INHIBIT_IRQ_MASK
)) {
8040 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8044 /* Do not cross the boundary of the pages in icount mode,
8045 it can cause an exception. Do it only when boundary is
8046 crossed by the first instruction in the block.
8047 If current instruction already crossed the bound - it's ok,
8048 because an exception hasn't stopped this code.
8050 if ((tb
->cflags
& CF_USE_ICOUNT
)
8051 && ((pc_ptr
& TARGET_PAGE_MASK
)
8052 != ((pc_ptr
+ TARGET_MAX_INSN_SIZE
- 1) & TARGET_PAGE_MASK
)
8053 || (pc_ptr
& ~TARGET_PAGE_MASK
) == 0)) {
8054 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8058 /* if too long translation, stop generation too */
8059 if (tcg_op_buf_full() ||
8060 (pc_ptr
- pc_start
) >= (TARGET_PAGE_SIZE
- 32) ||
8061 num_insns
>= max_insns
) {
8062 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8067 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8072 if (tb
->cflags
& CF_LAST_IO
)
8075 gen_tb_end(tb
, num_insns
);
8077 /* we don't forget to fill the last values */
8079 j
= tcg_op_buf_count();
8082 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
8086 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
8088 qemu_log("----------------\n");
8089 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
8090 #ifdef TARGET_X86_64
8095 disas_flags
= !dc
->code32
;
8096 log_target_disas(cs
, pc_start
, pc_ptr
- pc_start
, disas_flags
);
8102 tb
->size
= pc_ptr
- pc_start
;
8103 tb
->icount
= num_insns
;
8107 void gen_intermediate_code(CPUX86State
*env
, TranslationBlock
*tb
)
8109 gen_intermediate_code_internal(x86_env_get_cpu(env
), tb
, false);
8112 void gen_intermediate_code_pc(CPUX86State
*env
, TranslationBlock
*tb
)
8114 gen_intermediate_code_internal(x86_env_get_cpu(env
), tb
, true);
8117 void restore_state_to_opc(CPUX86State
*env
, TranslationBlock
*tb
, int pc_pos
)
8121 if (qemu_loglevel_mask(CPU_LOG_TB_OP
)) {
8123 qemu_log("RESTORE:\n");
8124 for(i
= 0;i
<= pc_pos
; i
++) {
8125 if (tcg_ctx
.gen_opc_instr_start
[i
]) {
8126 qemu_log("0x%04x: " TARGET_FMT_lx
"\n", i
,
8127 tcg_ctx
.gen_opc_pc
[i
]);
8130 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx
" cs_base=%x\n",
8131 pc_pos
, tcg_ctx
.gen_opc_pc
[pc_pos
] - tb
->cs_base
,
8132 (uint32_t)tb
->cs_base
);
8135 env
->eip
= tcg_ctx
.gen_opc_pc
[pc_pos
] - tb
->cs_base
;
8136 cc_op
= gen_opc_cc_op
[pc_pos
];
8137 if (cc_op
!= CC_OP_DYNAMIC
)