4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "disas/disas.h"
34 #define PREFIX_REPZ 0x01
35 #define PREFIX_REPNZ 0x02
36 #define PREFIX_LOCK 0x04
37 #define PREFIX_DATA 0x08
38 #define PREFIX_ADR 0x10
41 #define CODE64(s) ((s)->code64)
42 #define REX_X(s) ((s)->rex_x)
43 #define REX_B(s) ((s)->rex_b)
50 //#define MACRO_TEST 1
52 /* global register indexes */
53 static TCGv_ptr cpu_env
;
54 static TCGv cpu_A0
, cpu_cc_src
, cpu_cc_dst
;
55 static TCGv_i32 cpu_cc_op
;
56 static TCGv cpu_regs
[CPU_NB_REGS
];
58 static TCGv cpu_T
[2], cpu_T3
;
59 /* local register indexes (only used inside old micro ops) */
60 static TCGv cpu_tmp0
, cpu_tmp4
;
61 static TCGv_ptr cpu_ptr0
, cpu_ptr1
;
62 static TCGv_i32 cpu_tmp2_i32
, cpu_tmp3_i32
;
63 static TCGv_i64 cpu_tmp1_i64
;
66 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
68 #include "exec/gen-icount.h"
71 static int x86_64_hregs
;
74 typedef struct DisasContext
{
75 /* current insn context */
76 int override
; /* -1 if no override */
79 target_ulong pc
; /* pc = eip + cs_base */
80 int is_jmp
; /* 1 = means jump (stop translation), 2 means CPU
81 static state change (stop translation) */
82 /* current block context */
83 target_ulong cs_base
; /* base of CS segment */
84 int pe
; /* protected mode */
85 int code32
; /* 32 bit code segment */
87 int lma
; /* long mode active */
88 int code64
; /* 64 bit code segment */
91 int ss32
; /* 32 bit stack segment */
92 CCOp cc_op
; /* current CC operation */
94 int addseg
; /* non zero if either DS/ES/SS have a non zero base */
95 int f_st
; /* currently unused */
96 int vm86
; /* vm86 mode */
99 int tf
; /* TF cpu flag */
100 int singlestep_enabled
; /* "hardware" single step enabled */
101 int jmp_opt
; /* use direct block chaining for direct jumps */
102 int mem_index
; /* select memory access functions */
103 uint64_t flags
; /* all execution flags */
104 struct TranslationBlock
*tb
;
105 int popl_esp_hack
; /* for correct popl with esp base handling */
106 int rip_offset
; /* only used in x86_64, but left for simplicity */
108 int cpuid_ext_features
;
109 int cpuid_ext2_features
;
110 int cpuid_ext3_features
;
111 int cpuid_7_0_ebx_features
;
114 static void gen_eob(DisasContext
*s
);
115 static void gen_jmp(DisasContext
*s
, target_ulong eip
);
116 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
);
118 /* i386 arith/logic operations */
138 OP_SHL1
, /* undocumented */
162 /* I386 int registers */
163 OR_EAX
, /* MUST be even numbered */
172 OR_TMP0
= 16, /* temporary operand register */
174 OR_A0
, /* temporary register used when doing address evaluation */
177 static void set_cc_op(DisasContext
*s
, CCOp op
)
179 if (s
->cc_op
!= op
) {
181 /* The DYNAMIC setting is translator only, and should never be
182 stored. Thus we always consider it clean. */
183 s
->cc_op_dirty
= (op
!= CC_OP_DYNAMIC
);
187 static void gen_update_cc_op(DisasContext
*s
)
189 if (s
->cc_op_dirty
) {
190 tcg_gen_movi_i32(cpu_cc_op
, s
->cc_op
);
191 s
->cc_op_dirty
= false;
195 static inline void gen_op_movl_T0_0(void)
197 tcg_gen_movi_tl(cpu_T
[0], 0);
200 static inline void gen_op_movl_T0_im(int32_t val
)
202 tcg_gen_movi_tl(cpu_T
[0], val
);
205 static inline void gen_op_movl_T0_imu(uint32_t val
)
207 tcg_gen_movi_tl(cpu_T
[0], val
);
210 static inline void gen_op_movl_T1_im(int32_t val
)
212 tcg_gen_movi_tl(cpu_T
[1], val
);
215 static inline void gen_op_movl_T1_imu(uint32_t val
)
217 tcg_gen_movi_tl(cpu_T
[1], val
);
220 static inline void gen_op_movl_A0_im(uint32_t val
)
222 tcg_gen_movi_tl(cpu_A0
, val
);
226 static inline void gen_op_movq_A0_im(int64_t val
)
228 tcg_gen_movi_tl(cpu_A0
, val
);
232 static inline void gen_movtl_T0_im(target_ulong val
)
234 tcg_gen_movi_tl(cpu_T
[0], val
);
237 static inline void gen_movtl_T1_im(target_ulong val
)
239 tcg_gen_movi_tl(cpu_T
[1], val
);
242 static inline void gen_op_andl_T0_ffff(void)
244 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xffff);
247 static inline void gen_op_andl_T0_im(uint32_t val
)
249 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], val
);
252 static inline void gen_op_movl_T0_T1(void)
254 tcg_gen_mov_tl(cpu_T
[0], cpu_T
[1]);
257 static inline void gen_op_andl_A0_ffff(void)
259 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffff);
264 #define NB_OP_SIZES 4
266 #else /* !TARGET_X86_64 */
268 #define NB_OP_SIZES 3
270 #endif /* !TARGET_X86_64 */
272 #if defined(HOST_WORDS_BIGENDIAN)
273 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
274 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
275 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
276 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
277 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
279 #define REG_B_OFFSET 0
280 #define REG_H_OFFSET 1
281 #define REG_W_OFFSET 0
282 #define REG_L_OFFSET 0
283 #define REG_LH_OFFSET 4
286 /* In instruction encodings for byte register accesses the
287 * register number usually indicates "low 8 bits of register N";
288 * however there are some special cases where N 4..7 indicates
289 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
290 * true for this special case, false otherwise.
292 static inline bool byte_reg_is_xH(int reg
)
298 if (reg
>= 8 || x86_64_hregs
) {
305 static inline void gen_op_mov_reg_v(int ot
, int reg
, TCGv t0
)
309 if (!byte_reg_is_xH(reg
)) {
310 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 8);
312 tcg_gen_deposit_tl(cpu_regs
[reg
- 4], cpu_regs
[reg
- 4], t0
, 8, 8);
316 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 16);
318 default: /* XXX this shouldn't be reached; abort? */
320 /* For x86_64, this sets the higher half of register to zero.
321 For i386, this is equivalent to a mov. */
322 tcg_gen_ext32u_tl(cpu_regs
[reg
], t0
);
326 tcg_gen_mov_tl(cpu_regs
[reg
], t0
);
332 static inline void gen_op_mov_reg_T0(int ot
, int reg
)
334 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
337 static inline void gen_op_mov_reg_T1(int ot
, int reg
)
339 gen_op_mov_reg_v(ot
, reg
, cpu_T
[1]);
342 static inline void gen_op_mov_reg_A0(int size
, int reg
)
346 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_A0
, 0, 16);
348 default: /* XXX this shouldn't be reached; abort? */
350 /* For x86_64, this sets the higher half of register to zero.
351 For i386, this is equivalent to a mov. */
352 tcg_gen_ext32u_tl(cpu_regs
[reg
], cpu_A0
);
356 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_A0
);
362 static inline void gen_op_mov_v_reg(int ot
, TCGv t0
, int reg
)
364 if (ot
== OT_BYTE
&& byte_reg_is_xH(reg
)) {
365 tcg_gen_shri_tl(t0
, cpu_regs
[reg
- 4], 8);
366 tcg_gen_ext8u_tl(t0
, t0
);
368 tcg_gen_mov_tl(t0
, cpu_regs
[reg
]);
372 static inline void gen_op_mov_TN_reg(int ot
, int t_index
, int reg
)
374 gen_op_mov_v_reg(ot
, cpu_T
[t_index
], reg
);
377 static inline void gen_op_movl_A0_reg(int reg
)
379 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[reg
]);
382 static inline void gen_op_addl_A0_im(int32_t val
)
384 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
386 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
391 static inline void gen_op_addq_A0_im(int64_t val
)
393 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
397 static void gen_add_A0_im(DisasContext
*s
, int val
)
401 gen_op_addq_A0_im(val
);
404 gen_op_addl_A0_im(val
);
407 static inline void gen_op_addl_T0_T1(void)
409 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
412 static inline void gen_op_jmp_T0(void)
414 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, eip
));
417 static inline void gen_op_add_reg_im(int size
, int reg
, int32_t val
)
421 tcg_gen_addi_tl(cpu_tmp0
, cpu_regs
[reg
], val
);
422 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_tmp0
, 0, 16);
425 tcg_gen_addi_tl(cpu_tmp0
, cpu_regs
[reg
], val
);
426 /* For x86_64, this sets the higher half of register to zero.
427 For i386, this is equivalent to a nop. */
428 tcg_gen_ext32u_tl(cpu_tmp0
, cpu_tmp0
);
429 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_tmp0
);
433 tcg_gen_addi_tl(cpu_regs
[reg
], cpu_regs
[reg
], val
);
439 static inline void gen_op_add_reg_T0(int size
, int reg
)
443 tcg_gen_add_tl(cpu_tmp0
, cpu_regs
[reg
], cpu_T
[0]);
444 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_tmp0
, 0, 16);
447 tcg_gen_add_tl(cpu_tmp0
, cpu_regs
[reg
], cpu_T
[0]);
448 /* For x86_64, this sets the higher half of register to zero.
449 For i386, this is equivalent to a nop. */
450 tcg_gen_ext32u_tl(cpu_tmp0
, cpu_tmp0
);
451 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_tmp0
);
455 tcg_gen_add_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_T
[0]);
461 static inline void gen_op_addl_A0_reg_sN(int shift
, int reg
)
463 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[reg
]);
465 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, shift
);
466 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
467 /* For x86_64, this sets the higher half of register to zero.
468 For i386, this is equivalent to a nop. */
469 tcg_gen_ext32u_tl(cpu_A0
, cpu_A0
);
472 static inline void gen_op_movl_A0_seg(int reg
)
474 tcg_gen_ld32u_tl(cpu_A0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
) + REG_L_OFFSET
);
477 static inline void gen_op_addl_A0_seg(DisasContext
*s
, int reg
)
479 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
482 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
483 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
485 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
486 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
489 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
494 static inline void gen_op_movq_A0_seg(int reg
)
496 tcg_gen_ld_tl(cpu_A0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
499 static inline void gen_op_addq_A0_seg(int reg
)
501 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
502 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
505 static inline void gen_op_movq_A0_reg(int reg
)
507 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[reg
]);
510 static inline void gen_op_addq_A0_reg_sN(int shift
, int reg
)
512 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[reg
]);
514 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, shift
);
515 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
519 static inline void gen_op_lds_T0_A0(int idx
)
521 int mem_index
= (idx
>> 2) - 1;
524 tcg_gen_qemu_ld8s(cpu_T
[0], cpu_A0
, mem_index
);
527 tcg_gen_qemu_ld16s(cpu_T
[0], cpu_A0
, mem_index
);
531 tcg_gen_qemu_ld32s(cpu_T
[0], cpu_A0
, mem_index
);
536 static inline void gen_op_ld_v(int idx
, TCGv t0
, TCGv a0
)
538 int mem_index
= (idx
>> 2) - 1;
541 tcg_gen_qemu_ld8u(t0
, a0
, mem_index
);
544 tcg_gen_qemu_ld16u(t0
, a0
, mem_index
);
547 tcg_gen_qemu_ld32u(t0
, a0
, mem_index
);
551 /* Should never happen on 32-bit targets. */
553 tcg_gen_qemu_ld64(t0
, a0
, mem_index
);
559 /* XXX: always use ldu or lds */
560 static inline void gen_op_ld_T0_A0(int idx
)
562 gen_op_ld_v(idx
, cpu_T
[0], cpu_A0
);
565 static inline void gen_op_ldu_T0_A0(int idx
)
567 gen_op_ld_v(idx
, cpu_T
[0], cpu_A0
);
570 static inline void gen_op_ld_T1_A0(int idx
)
572 gen_op_ld_v(idx
, cpu_T
[1], cpu_A0
);
575 static inline void gen_op_st_v(int idx
, TCGv t0
, TCGv a0
)
577 int mem_index
= (idx
>> 2) - 1;
580 tcg_gen_qemu_st8(t0
, a0
, mem_index
);
583 tcg_gen_qemu_st16(t0
, a0
, mem_index
);
586 tcg_gen_qemu_st32(t0
, a0
, mem_index
);
590 /* Should never happen on 32-bit targets. */
592 tcg_gen_qemu_st64(t0
, a0
, mem_index
);
598 static inline void gen_op_st_T0_A0(int idx
)
600 gen_op_st_v(idx
, cpu_T
[0], cpu_A0
);
603 static inline void gen_op_st_T1_A0(int idx
)
605 gen_op_st_v(idx
, cpu_T
[1], cpu_A0
);
608 static inline void gen_jmp_im(target_ulong pc
)
610 tcg_gen_movi_tl(cpu_tmp0
, pc
);
611 tcg_gen_st_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, eip
));
614 static inline void gen_string_movl_A0_ESI(DisasContext
*s
)
618 override
= s
->override
;
622 gen_op_movq_A0_seg(override
);
623 gen_op_addq_A0_reg_sN(0, R_ESI
);
625 gen_op_movq_A0_reg(R_ESI
);
631 if (s
->addseg
&& override
< 0)
634 gen_op_movl_A0_seg(override
);
635 gen_op_addl_A0_reg_sN(0, R_ESI
);
637 gen_op_movl_A0_reg(R_ESI
);
640 /* 16 address, always override */
643 gen_op_movl_A0_reg(R_ESI
);
644 gen_op_andl_A0_ffff();
645 gen_op_addl_A0_seg(s
, override
);
649 static inline void gen_string_movl_A0_EDI(DisasContext
*s
)
653 gen_op_movq_A0_reg(R_EDI
);
658 gen_op_movl_A0_seg(R_ES
);
659 gen_op_addl_A0_reg_sN(0, R_EDI
);
661 gen_op_movl_A0_reg(R_EDI
);
664 gen_op_movl_A0_reg(R_EDI
);
665 gen_op_andl_A0_ffff();
666 gen_op_addl_A0_seg(s
, R_ES
);
670 static inline void gen_op_movl_T0_Dshift(int ot
)
672 tcg_gen_ld32s_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, df
));
673 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], ot
);
676 static TCGv
gen_ext_tl(TCGv dst
, TCGv src
, int size
, bool sign
)
681 tcg_gen_ext8s_tl(dst
, src
);
683 tcg_gen_ext8u_tl(dst
, src
);
688 tcg_gen_ext16s_tl(dst
, src
);
690 tcg_gen_ext16u_tl(dst
, src
);
696 tcg_gen_ext32s_tl(dst
, src
);
698 tcg_gen_ext32u_tl(dst
, src
);
707 static void gen_extu(int ot
, TCGv reg
)
709 gen_ext_tl(reg
, reg
, ot
, false);
712 static void gen_exts(int ot
, TCGv reg
)
714 gen_ext_tl(reg
, reg
, ot
, true);
717 static inline void gen_op_jnz_ecx(int size
, int label1
)
719 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
720 gen_extu(size
+ 1, cpu_tmp0
);
721 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_tmp0
, 0, label1
);
724 static inline void gen_op_jz_ecx(int size
, int label1
)
726 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
727 gen_extu(size
+ 1, cpu_tmp0
);
728 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
731 static void gen_helper_in_func(int ot
, TCGv v
, TCGv_i32 n
)
735 gen_helper_inb(v
, n
);
738 gen_helper_inw(v
, n
);
741 gen_helper_inl(v
, n
);
746 static void gen_helper_out_func(int ot
, TCGv_i32 v
, TCGv_i32 n
)
750 gen_helper_outb(v
, n
);
753 gen_helper_outw(v
, n
);
756 gen_helper_outl(v
, n
);
761 static void gen_check_io(DisasContext
*s
, int ot
, target_ulong cur_eip
,
765 target_ulong next_eip
;
768 if (s
->pe
&& (s
->cpl
> s
->iopl
|| s
->vm86
)) {
772 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
775 gen_helper_check_iob(cpu_env
, cpu_tmp2_i32
);
778 gen_helper_check_iow(cpu_env
, cpu_tmp2_i32
);
781 gen_helper_check_iol(cpu_env
, cpu_tmp2_i32
);
785 if(s
->flags
& HF_SVMI_MASK
) {
790 svm_flags
|= (1 << (4 + ot
));
791 next_eip
= s
->pc
- s
->cs_base
;
792 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
793 gen_helper_svm_check_io(cpu_env
, cpu_tmp2_i32
,
794 tcg_const_i32(svm_flags
),
795 tcg_const_i32(next_eip
- cur_eip
));
799 static inline void gen_movs(DisasContext
*s
, int ot
)
801 gen_string_movl_A0_ESI(s
);
802 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
803 gen_string_movl_A0_EDI(s
);
804 gen_op_st_T0_A0(ot
+ s
->mem_index
);
805 gen_op_movl_T0_Dshift(ot
);
806 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
807 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
810 static void gen_op_update1_cc(void)
812 tcg_gen_discard_tl(cpu_cc_src
);
813 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
816 static void gen_op_update2_cc(void)
818 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
819 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
822 static inline void gen_op_cmpl_T0_T1_cc(void)
824 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
825 tcg_gen_sub_tl(cpu_cc_dst
, cpu_T
[0], cpu_T
[1]);
828 static inline void gen_op_testl_T0_T1_cc(void)
830 tcg_gen_discard_tl(cpu_cc_src
);
831 tcg_gen_and_tl(cpu_cc_dst
, cpu_T
[0], cpu_T
[1]);
834 static void gen_op_update_neg_cc(void)
836 tcg_gen_neg_tl(cpu_cc_src
, cpu_T
[0]);
837 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
840 /* compute eflags.C to reg */
841 static void gen_compute_eflags_c(DisasContext
*s
, TCGv reg
)
844 gen_helper_cc_compute_c(cpu_tmp2_i32
, cpu_env
, cpu_cc_op
);
845 tcg_gen_extu_i32_tl(reg
, cpu_tmp2_i32
);
848 /* compute all eflags to reg */
849 static void gen_compute_eflags(DisasContext
*s
, TCGv reg
)
852 gen_helper_cc_compute_all(cpu_tmp2_i32
, cpu_env
, cpu_cc_op
);
853 if (TCGV_EQUAL(reg
, cpu_cc_src
)) {
854 tcg_gen_discard_tl(cpu_cc_dst
);
855 set_cc_op(s
, CC_OP_EFLAGS
);
857 tcg_gen_extu_i32_tl(reg
, cpu_tmp2_i32
);
860 /* compute eflags.P to reg */
861 static void gen_compute_eflags_p(DisasContext
*s
, TCGv reg
)
863 gen_compute_eflags(s
, reg
);
864 tcg_gen_shri_tl(reg
, reg
, 2);
865 tcg_gen_andi_tl(reg
, reg
, 1);
868 /* compute eflags.S to reg */
869 static void gen_compute_eflags_s(DisasContext
*s
, TCGv reg
)
871 gen_compute_eflags(s
, reg
);
872 tcg_gen_shri_tl(reg
, reg
, 7);
873 tcg_gen_andi_tl(reg
, reg
, 1);
876 /* compute eflags.O to reg */
877 static void gen_compute_eflags_o(DisasContext
*s
, TCGv reg
)
879 gen_compute_eflags(s
, reg
);
880 tcg_gen_shri_tl(reg
, reg
, 11);
881 tcg_gen_andi_tl(reg
, reg
, 1);
884 /* compute eflags.Z to reg */
885 static void gen_compute_eflags_z(DisasContext
*s
, TCGv reg
)
887 gen_compute_eflags(s
, reg
);
888 tcg_gen_shri_tl(reg
, reg
, 6);
889 tcg_gen_andi_tl(reg
, reg
, 1);
892 static inline void gen_setcc_slow_T0(DisasContext
*s
, int jcc_op
)
896 gen_compute_eflags_o(s
, cpu_T
[0]);
899 gen_compute_eflags_c(s
, cpu_T
[0]);
902 gen_compute_eflags_z(s
, cpu_T
[0]);
905 gen_compute_eflags(s
, cpu_tmp0
);
906 tcg_gen_shri_tl(cpu_T
[0], cpu_tmp0
, 6);
907 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
908 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
911 gen_compute_eflags_s(s
, cpu_T
[0]);
914 gen_compute_eflags_p(s
, cpu_T
[0]);
917 gen_compute_eflags(s
, cpu_tmp0
);
918 tcg_gen_shri_tl(cpu_T
[0], cpu_tmp0
, 11); /* CC_O */
919 tcg_gen_shri_tl(cpu_tmp0
, cpu_tmp0
, 7); /* CC_S */
920 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
921 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
925 gen_compute_eflags(s
, cpu_tmp0
);
926 tcg_gen_shri_tl(cpu_T
[0], cpu_tmp0
, 11); /* CC_O */
927 tcg_gen_shri_tl(cpu_tmp4
, cpu_tmp0
, 7); /* CC_S */
928 tcg_gen_shri_tl(cpu_tmp0
, cpu_tmp0
, 6); /* CC_Z */
929 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
930 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
931 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
936 /* return true if setcc_slow is not needed (WARNING: must be kept in
937 sync with gen_jcc1) */
938 static int is_fast_jcc_case(DisasContext
*s
, int b
)
941 jcc_op
= (b
>> 1) & 7;
943 /* we optimize the cmp/jcc case */
948 if (jcc_op
== JCC_O
|| jcc_op
== JCC_P
)
952 /* some jumps are easy to compute */
977 if (jcc_op
!= JCC_Z
&& jcc_op
!= JCC_S
)
987 /* generate a conditional jump to label 'l1' according to jump opcode
988 value 'b'. In the fast case, T0 is guaranted not to be used. */
989 static inline void gen_jcc1(DisasContext
*s
, int b
, int l1
)
991 int inv
, jcc_op
, size
, cond
;
995 jcc_op
= (b
>> 1) & 7;
998 /* we optimize the cmp/jcc case */
1004 size
= s
->cc_op
- CC_OP_SUBB
;
1008 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_dst
, size
, false);
1009 tcg_gen_brcondi_tl(inv
? TCG_COND_NE
: TCG_COND_EQ
, t0
, 0, l1
);
1013 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_dst
, size
, true);
1014 tcg_gen_brcondi_tl(inv
? TCG_COND_GE
: TCG_COND_LT
, t0
, 0, l1
);
1018 cond
= inv
? TCG_COND_GEU
: TCG_COND_LTU
;
1021 cond
= inv
? TCG_COND_GTU
: TCG_COND_LEU
;
1023 tcg_gen_add_tl(cpu_tmp4
, cpu_cc_dst
, cpu_cc_src
);
1024 gen_extu(size
, cpu_tmp4
);
1025 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
1026 tcg_gen_brcond_tl(cond
, cpu_tmp4
, t0
, l1
);
1030 cond
= inv
? TCG_COND_GE
: TCG_COND_LT
;
1033 cond
= inv
? TCG_COND_GT
: TCG_COND_LE
;
1035 tcg_gen_add_tl(cpu_tmp4
, cpu_cc_dst
, cpu_cc_src
);
1036 gen_exts(size
, cpu_tmp4
);
1037 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, true);
1038 tcg_gen_brcond_tl(cond
, cpu_tmp4
, t0
, l1
);
1046 /* some jumps are easy to compute */
1088 size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1091 size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1099 gen_setcc_slow_T0(s
, jcc_op
);
1100 tcg_gen_brcondi_tl(inv
? TCG_COND_EQ
: TCG_COND_NE
,
1106 /* XXX: does not work with gdbstub "ice" single step - not a
1108 static int gen_jz_ecx_string(DisasContext
*s
, target_ulong next_eip
)
1112 l1
= gen_new_label();
1113 l2
= gen_new_label();
1114 gen_op_jnz_ecx(s
->aflag
, l1
);
1116 gen_jmp_tb(s
, next_eip
, 1);
1121 static inline void gen_stos(DisasContext
*s
, int ot
)
1123 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
1124 gen_string_movl_A0_EDI(s
);
1125 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1126 gen_op_movl_T0_Dshift(ot
);
1127 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1130 static inline void gen_lods(DisasContext
*s
, int ot
)
1132 gen_string_movl_A0_ESI(s
);
1133 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1134 gen_op_mov_reg_T0(ot
, R_EAX
);
1135 gen_op_movl_T0_Dshift(ot
);
1136 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1139 static inline void gen_scas(DisasContext
*s
, int ot
)
1141 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
1142 gen_string_movl_A0_EDI(s
);
1143 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
1144 gen_op_cmpl_T0_T1_cc();
1145 gen_op_movl_T0_Dshift(ot
);
1146 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1147 set_cc_op(s
, CC_OP_SUBB
+ ot
);
1150 static inline void gen_cmps(DisasContext
*s
, int ot
)
1152 gen_string_movl_A0_ESI(s
);
1153 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1154 gen_string_movl_A0_EDI(s
);
1155 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
1156 gen_op_cmpl_T0_T1_cc();
1157 gen_op_movl_T0_Dshift(ot
);
1158 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1159 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1160 set_cc_op(s
, CC_OP_SUBB
+ ot
);
1163 static inline void gen_ins(DisasContext
*s
, int ot
)
1167 gen_string_movl_A0_EDI(s
);
1168 /* Note: we must do this dummy write first to be restartable in
1169 case of page fault. */
1171 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1172 gen_op_mov_TN_reg(OT_WORD
, 1, R_EDX
);
1173 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[1]);
1174 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1175 gen_helper_in_func(ot
, cpu_T
[0], cpu_tmp2_i32
);
1176 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1177 gen_op_movl_T0_Dshift(ot
);
1178 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1183 static inline void gen_outs(DisasContext
*s
, int ot
)
1187 gen_string_movl_A0_ESI(s
);
1188 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1190 gen_op_mov_TN_reg(OT_WORD
, 1, R_EDX
);
1191 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[1]);
1192 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1193 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[0]);
1194 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1196 gen_op_movl_T0_Dshift(ot
);
1197 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1202 /* same method as Valgrind : we generate jumps to current or next
1204 #define GEN_REPZ(op) \
1205 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1206 target_ulong cur_eip, target_ulong next_eip) \
1209 gen_update_cc_op(s); \
1210 l2 = gen_jz_ecx_string(s, next_eip); \
1211 gen_ ## op(s, ot); \
1212 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1213 /* a loop would cause two single step exceptions if ECX = 1 \
1214 before rep string_insn */ \
1216 gen_op_jz_ecx(s->aflag, l2); \
1217 gen_jmp(s, cur_eip); \
1220 #define GEN_REPZ2(op) \
1221 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1222 target_ulong cur_eip, \
1223 target_ulong next_eip, \
1227 gen_update_cc_op(s); \
1228 l2 = gen_jz_ecx_string(s, next_eip); \
1229 gen_ ## op(s, ot); \
1230 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1231 gen_update_cc_op(s); \
1232 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1234 gen_op_jz_ecx(s->aflag, l2); \
1235 gen_jmp(s, cur_eip); \
1236 set_cc_op(s, CC_OP_DYNAMIC); \
1247 static void gen_helper_fp_arith_ST0_FT0(int op
)
1251 gen_helper_fadd_ST0_FT0(cpu_env
);
1254 gen_helper_fmul_ST0_FT0(cpu_env
);
1257 gen_helper_fcom_ST0_FT0(cpu_env
);
1260 gen_helper_fcom_ST0_FT0(cpu_env
);
1263 gen_helper_fsub_ST0_FT0(cpu_env
);
1266 gen_helper_fsubr_ST0_FT0(cpu_env
);
1269 gen_helper_fdiv_ST0_FT0(cpu_env
);
1272 gen_helper_fdivr_ST0_FT0(cpu_env
);
1277 /* NOTE the exception in "r" op ordering */
1278 static void gen_helper_fp_arith_STN_ST0(int op
, int opreg
)
1280 TCGv_i32 tmp
= tcg_const_i32(opreg
);
1283 gen_helper_fadd_STN_ST0(cpu_env
, tmp
);
1286 gen_helper_fmul_STN_ST0(cpu_env
, tmp
);
1289 gen_helper_fsubr_STN_ST0(cpu_env
, tmp
);
1292 gen_helper_fsub_STN_ST0(cpu_env
, tmp
);
1295 gen_helper_fdivr_STN_ST0(cpu_env
, tmp
);
1298 gen_helper_fdiv_STN_ST0(cpu_env
, tmp
);
1303 /* if d == OR_TMP0, it means memory operand (address in A0) */
1304 static void gen_op(DisasContext
*s1
, int op
, int ot
, int d
)
1307 gen_op_mov_TN_reg(ot
, 0, d
);
1309 gen_op_ld_T0_A0(ot
+ s1
->mem_index
);
1313 gen_compute_eflags_c(s1
, cpu_tmp4
);
1314 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1315 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
1317 gen_op_mov_reg_T0(ot
, d
);
1319 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1320 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
1321 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1322 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp4
);
1323 tcg_gen_shli_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 2);
1324 tcg_gen_addi_i32(cpu_cc_op
, cpu_tmp2_i32
, CC_OP_ADDB
+ ot
);
1325 set_cc_op(s1
, CC_OP_DYNAMIC
);
1328 gen_compute_eflags_c(s1
, cpu_tmp4
);
1329 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1330 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
1332 gen_op_mov_reg_T0(ot
, d
);
1334 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1335 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
1336 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1337 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp4
);
1338 tcg_gen_shli_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 2);
1339 tcg_gen_addi_i32(cpu_cc_op
, cpu_tmp2_i32
, CC_OP_SUBB
+ ot
);
1340 set_cc_op(s1
, CC_OP_DYNAMIC
);
1343 gen_op_addl_T0_T1();
1345 gen_op_mov_reg_T0(ot
, d
);
1347 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1348 gen_op_update2_cc();
1349 set_cc_op(s1
, CC_OP_ADDB
+ ot
);
1352 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1354 gen_op_mov_reg_T0(ot
, d
);
1356 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1357 gen_op_update2_cc();
1358 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1362 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1364 gen_op_mov_reg_T0(ot
, d
);
1366 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1367 gen_op_update1_cc();
1368 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1371 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1373 gen_op_mov_reg_T0(ot
, d
);
1375 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1376 gen_op_update1_cc();
1377 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1380 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1382 gen_op_mov_reg_T0(ot
, d
);
1384 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1385 gen_op_update1_cc();
1386 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1389 gen_op_cmpl_T0_T1_cc();
1390 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1395 /* if d == OR_TMP0, it means memory operand (address in A0) */
1396 static void gen_inc(DisasContext
*s1
, int ot
, int d
, int c
)
1399 gen_op_mov_TN_reg(ot
, 0, d
);
1401 gen_op_ld_T0_A0(ot
+ s1
->mem_index
);
1402 gen_compute_eflags_c(s1
, cpu_cc_src
);
1404 tcg_gen_addi_tl(cpu_T
[0], cpu_T
[0], 1);
1405 set_cc_op(s1
, CC_OP_INCB
+ ot
);
1407 tcg_gen_addi_tl(cpu_T
[0], cpu_T
[0], -1);
1408 set_cc_op(s1
, CC_OP_DECB
+ ot
);
1411 gen_op_mov_reg_T0(ot
, d
);
1413 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1414 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1417 static void gen_shift_rm_T1(DisasContext
*s
, int ot
, int op1
,
1418 int is_right
, int is_arith
)
1424 if (ot
== OT_QUAD
) {
1431 if (op1
== OR_TMP0
) {
1432 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1434 gen_op_mov_TN_reg(ot
, 0, op1
);
1437 t0
= tcg_temp_local_new();
1438 t1
= tcg_temp_local_new();
1439 t2
= tcg_temp_local_new();
1441 tcg_gen_andi_tl(t2
, cpu_T
[1], mask
);
1445 gen_exts(ot
, cpu_T
[0]);
1446 tcg_gen_mov_tl(t0
, cpu_T
[0]);
1447 tcg_gen_sar_tl(cpu_T
[0], cpu_T
[0], t2
);
1449 gen_extu(ot
, cpu_T
[0]);
1450 tcg_gen_mov_tl(t0
, cpu_T
[0]);
1451 tcg_gen_shr_tl(cpu_T
[0], cpu_T
[0], t2
);
1454 tcg_gen_mov_tl(t0
, cpu_T
[0]);
1455 tcg_gen_shl_tl(cpu_T
[0], cpu_T
[0], t2
);
1459 if (op1
== OR_TMP0
) {
1460 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1462 gen_op_mov_reg_T0(ot
, op1
);
1466 gen_update_cc_op(s
);
1468 tcg_gen_mov_tl(t1
, cpu_T
[0]);
1470 shift_label
= gen_new_label();
1471 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, shift_label
);
1473 tcg_gen_addi_tl(t2
, t2
, -1);
1474 tcg_gen_mov_tl(cpu_cc_dst
, t1
);
1478 tcg_gen_sar_tl(cpu_cc_src
, t0
, t2
);
1480 tcg_gen_shr_tl(cpu_cc_src
, t0
, t2
);
1483 tcg_gen_shl_tl(cpu_cc_src
, t0
, t2
);
1487 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SARB
+ ot
);
1489 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SHLB
+ ot
);
1492 gen_set_label(shift_label
);
1493 set_cc_op(s
, CC_OP_DYNAMIC
); /* cannot predict flags after */
1500 static void gen_shift_rm_im(DisasContext
*s
, int ot
, int op1
, int op2
,
1501 int is_right
, int is_arith
)
1512 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1514 gen_op_mov_TN_reg(ot
, 0, op1
);
1520 gen_exts(ot
, cpu_T
[0]);
1521 tcg_gen_sari_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1522 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], op2
);
1524 gen_extu(ot
, cpu_T
[0]);
1525 tcg_gen_shri_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1526 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], op2
);
1529 tcg_gen_shli_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1530 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], op2
);
1536 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1538 gen_op_mov_reg_T0(ot
, op1
);
1540 /* update eflags if non zero shift */
1542 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
1543 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1544 set_cc_op(s
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1548 static inline void tcg_gen_lshift(TCGv ret
, TCGv arg1
, target_long arg2
)
1551 tcg_gen_shli_tl(ret
, arg1
, arg2
);
1553 tcg_gen_shri_tl(ret
, arg1
, -arg2
);
1556 static void gen_rot_rm_T1(DisasContext
*s
, int ot
, int op1
,
1560 int label1
, label2
, data_bits
;
1561 TCGv t0
, t1
, t2
, a0
;
1563 /* XXX: inefficient, but we must use local temps */
1564 t0
= tcg_temp_local_new();
1565 t1
= tcg_temp_local_new();
1566 t2
= tcg_temp_local_new();
1567 a0
= tcg_temp_local_new();
1575 if (op1
== OR_TMP0
) {
1576 tcg_gen_mov_tl(a0
, cpu_A0
);
1577 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
1579 gen_op_mov_v_reg(ot
, t0
, op1
);
1582 tcg_gen_mov_tl(t1
, cpu_T
[1]);
1584 tcg_gen_andi_tl(t1
, t1
, mask
);
1586 /* Must test zero case to avoid using undefined behaviour in TCG
1588 label1
= gen_new_label();
1589 tcg_gen_brcondi_tl(TCG_COND_EQ
, t1
, 0, label1
);
1592 tcg_gen_andi_tl(cpu_tmp0
, t1
, (1 << (3 + ot
)) - 1);
1594 tcg_gen_mov_tl(cpu_tmp0
, t1
);
1597 tcg_gen_mov_tl(t2
, t0
);
1599 data_bits
= 8 << ot
;
1600 /* XXX: rely on behaviour of shifts when operand 2 overflows (XXX:
1601 fix TCG definition) */
1603 tcg_gen_shr_tl(cpu_tmp4
, t0
, cpu_tmp0
);
1604 tcg_gen_subfi_tl(cpu_tmp0
, data_bits
, cpu_tmp0
);
1605 tcg_gen_shl_tl(t0
, t0
, cpu_tmp0
);
1607 tcg_gen_shl_tl(cpu_tmp4
, t0
, cpu_tmp0
);
1608 tcg_gen_subfi_tl(cpu_tmp0
, data_bits
, cpu_tmp0
);
1609 tcg_gen_shr_tl(t0
, t0
, cpu_tmp0
);
1611 tcg_gen_or_tl(t0
, t0
, cpu_tmp4
);
1613 gen_set_label(label1
);
1615 if (op1
== OR_TMP0
) {
1616 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
1618 gen_op_mov_reg_v(ot
, op1
, t0
);
1621 /* update eflags. It is needed anyway most of the time, do it always. */
1622 gen_compute_eflags(s
, cpu_cc_src
);
1623 assert(s
->cc_op
== CC_OP_EFLAGS
);
1625 label2
= gen_new_label();
1626 tcg_gen_brcondi_tl(TCG_COND_EQ
, t1
, 0, label2
);
1628 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~(CC_O
| CC_C
));
1629 tcg_gen_xor_tl(cpu_tmp0
, t2
, t0
);
1630 tcg_gen_lshift(cpu_tmp0
, cpu_tmp0
, 11 - (data_bits
- 1));
1631 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, CC_O
);
1632 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_tmp0
);
1634 tcg_gen_shri_tl(t0
, t0
, data_bits
- 1);
1636 tcg_gen_andi_tl(t0
, t0
, CC_C
);
1637 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
1639 gen_set_label(label2
);
1647 static void gen_rot_rm_im(DisasContext
*s
, int ot
, int op1
, int op2
,
1654 /* XXX: inefficient, but we must use local temps */
1655 t0
= tcg_temp_local_new();
1656 t1
= tcg_temp_local_new();
1657 a0
= tcg_temp_local_new();
1665 if (op1
== OR_TMP0
) {
1666 tcg_gen_mov_tl(a0
, cpu_A0
);
1667 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
1669 gen_op_mov_v_reg(ot
, t0
, op1
);
1673 tcg_gen_mov_tl(t1
, t0
);
1676 data_bits
= 8 << ot
;
1678 int shift
= op2
& ((1 << (3 + ot
)) - 1);
1680 tcg_gen_shri_tl(cpu_tmp4
, t0
, shift
);
1681 tcg_gen_shli_tl(t0
, t0
, data_bits
- shift
);
1684 tcg_gen_shli_tl(cpu_tmp4
, t0
, shift
);
1685 tcg_gen_shri_tl(t0
, t0
, data_bits
- shift
);
1687 tcg_gen_or_tl(t0
, t0
, cpu_tmp4
);
1691 if (op1
== OR_TMP0
) {
1692 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
1694 gen_op_mov_reg_v(ot
, op1
, t0
);
1699 gen_compute_eflags(s
, cpu_cc_src
);
1700 assert(s
->cc_op
== CC_OP_EFLAGS
);
1702 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~(CC_O
| CC_C
));
1703 tcg_gen_xor_tl(cpu_tmp0
, t1
, t0
);
1704 tcg_gen_lshift(cpu_tmp0
, cpu_tmp0
, 11 - (data_bits
- 1));
1705 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, CC_O
);
1706 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_tmp0
);
1708 tcg_gen_shri_tl(t0
, t0
, data_bits
- 1);
1710 tcg_gen_andi_tl(t0
, t0
, CC_C
);
1711 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
1719 /* XXX: add faster immediate = 1 case */
1720 static void gen_rotc_rm_T1(DisasContext
*s
, int ot
, int op1
,
1723 gen_update_cc_op(s
);
1724 gen_compute_eflags(s
, cpu_cc_src
);
1725 assert(s
->cc_op
== CC_OP_EFLAGS
);
1729 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1731 gen_op_mov_TN_reg(ot
, 0, op1
);
1736 gen_helper_rcrb(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1739 gen_helper_rcrw(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1742 gen_helper_rcrl(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1744 #ifdef TARGET_X86_64
1746 gen_helper_rcrq(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1753 gen_helper_rclb(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1756 gen_helper_rclw(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1759 gen_helper_rcll(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1761 #ifdef TARGET_X86_64
1763 gen_helper_rclq(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1770 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1772 gen_op_mov_reg_T0(ot
, op1
);
1775 /* XXX: add faster immediate case */
1776 static void gen_shiftd_rm_T1_T3(DisasContext
*s
, int ot
, int op1
,
1779 int label1
, label2
, data_bits
;
1781 TCGv t0
, t1
, t2
, a0
;
1783 t0
= tcg_temp_local_new();
1784 t1
= tcg_temp_local_new();
1785 t2
= tcg_temp_local_new();
1786 a0
= tcg_temp_local_new();
1794 if (op1
== OR_TMP0
) {
1795 tcg_gen_mov_tl(a0
, cpu_A0
);
1796 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
1798 gen_op_mov_v_reg(ot
, t0
, op1
);
1801 tcg_gen_andi_tl(cpu_T3
, cpu_T3
, mask
);
1803 tcg_gen_mov_tl(t1
, cpu_T
[1]);
1804 tcg_gen_mov_tl(t2
, cpu_T3
);
1806 /* Must test zero case to avoid using undefined behaviour in TCG
1808 label1
= gen_new_label();
1809 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, label1
);
1811 tcg_gen_addi_tl(cpu_tmp5
, t2
, -1);
1812 if (ot
== OT_WORD
) {
1813 /* Note: we implement the Intel behaviour for shift count > 16 */
1815 tcg_gen_andi_tl(t0
, t0
, 0xffff);
1816 tcg_gen_shli_tl(cpu_tmp0
, t1
, 16);
1817 tcg_gen_or_tl(t0
, t0
, cpu_tmp0
);
1818 tcg_gen_ext32u_tl(t0
, t0
);
1820 tcg_gen_shr_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1822 /* only needed if count > 16, but a test would complicate */
1823 tcg_gen_subfi_tl(cpu_tmp5
, 32, t2
);
1824 tcg_gen_shl_tl(cpu_tmp0
, t0
, cpu_tmp5
);
1826 tcg_gen_shr_tl(t0
, t0
, t2
);
1828 tcg_gen_or_tl(t0
, t0
, cpu_tmp0
);
1830 /* XXX: not optimal */
1831 tcg_gen_andi_tl(t0
, t0
, 0xffff);
1832 tcg_gen_shli_tl(t1
, t1
, 16);
1833 tcg_gen_or_tl(t1
, t1
, t0
);
1834 tcg_gen_ext32u_tl(t1
, t1
);
1836 tcg_gen_shl_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1837 tcg_gen_subfi_tl(cpu_tmp0
, 32, cpu_tmp5
);
1838 tcg_gen_shr_tl(cpu_tmp5
, t1
, cpu_tmp0
);
1839 tcg_gen_or_tl(cpu_tmp4
, cpu_tmp4
, cpu_tmp5
);
1841 tcg_gen_shl_tl(t0
, t0
, t2
);
1842 tcg_gen_subfi_tl(cpu_tmp5
, 32, t2
);
1843 tcg_gen_shr_tl(t1
, t1
, cpu_tmp5
);
1844 tcg_gen_or_tl(t0
, t0
, t1
);
1847 data_bits
= 8 << ot
;
1850 tcg_gen_ext32u_tl(t0
, t0
);
1852 tcg_gen_shr_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1854 tcg_gen_shr_tl(t0
, t0
, t2
);
1855 tcg_gen_subfi_tl(cpu_tmp5
, data_bits
, t2
);
1856 tcg_gen_shl_tl(t1
, t1
, cpu_tmp5
);
1857 tcg_gen_or_tl(t0
, t0
, t1
);
1861 tcg_gen_ext32u_tl(t1
, t1
);
1863 tcg_gen_shl_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1865 tcg_gen_shl_tl(t0
, t0
, t2
);
1866 tcg_gen_subfi_tl(cpu_tmp5
, data_bits
, t2
);
1867 tcg_gen_shr_tl(t1
, t1
, cpu_tmp5
);
1868 tcg_gen_or_tl(t0
, t0
, t1
);
1871 tcg_gen_mov_tl(t1
, cpu_tmp4
);
1873 gen_set_label(label1
);
1875 if (op1
== OR_TMP0
) {
1876 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
1878 gen_op_mov_reg_v(ot
, op1
, t0
);
1882 gen_update_cc_op(s
);
1884 label2
= gen_new_label();
1885 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, label2
);
1887 tcg_gen_mov_tl(cpu_cc_src
, t1
);
1888 tcg_gen_mov_tl(cpu_cc_dst
, t0
);
1890 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SARB
+ ot
);
1892 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SHLB
+ ot
);
1894 gen_set_label(label2
);
1895 set_cc_op(s
, CC_OP_DYNAMIC
); /* cannot predict flags after */
1903 static void gen_shift(DisasContext
*s1
, int op
, int ot
, int d
, int s
)
1906 gen_op_mov_TN_reg(ot
, 1, s
);
1909 gen_rot_rm_T1(s1
, ot
, d
, 0);
1912 gen_rot_rm_T1(s1
, ot
, d
, 1);
1916 gen_shift_rm_T1(s1
, ot
, d
, 0, 0);
1919 gen_shift_rm_T1(s1
, ot
, d
, 1, 0);
1922 gen_shift_rm_T1(s1
, ot
, d
, 1, 1);
1925 gen_rotc_rm_T1(s1
, ot
, d
, 0);
1928 gen_rotc_rm_T1(s1
, ot
, d
, 1);
1933 static void gen_shifti(DisasContext
*s1
, int op
, int ot
, int d
, int c
)
1937 gen_rot_rm_im(s1
, ot
, d
, c
, 0);
1940 gen_rot_rm_im(s1
, ot
, d
, c
, 1);
1944 gen_shift_rm_im(s1
, ot
, d
, c
, 0, 0);
1947 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 0);
1950 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 1);
1953 /* currently not optimized */
1954 gen_op_movl_T1_im(c
);
1955 gen_shift(s1
, op
, ot
, d
, OR_TMP1
);
1960 static void gen_lea_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
1961 int *reg_ptr
, int *offset_ptr
)
1969 int mod
, rm
, code
, override
, must_add_seg
;
1971 override
= s
->override
;
1972 must_add_seg
= s
->addseg
;
1975 mod
= (modrm
>> 6) & 3;
1987 code
= cpu_ldub_code(env
, s
->pc
++);
1988 scale
= (code
>> 6) & 3;
1989 index
= ((code
>> 3) & 7) | REX_X(s
);
1996 if ((base
& 7) == 5) {
1998 disp
= (int32_t)cpu_ldl_code(env
, s
->pc
);
2000 if (CODE64(s
) && !havesib
) {
2001 disp
+= s
->pc
+ s
->rip_offset
;
2008 disp
= (int8_t)cpu_ldub_code(env
, s
->pc
++);
2012 disp
= (int32_t)cpu_ldl_code(env
, s
->pc
);
2018 /* for correct popl handling with esp */
2019 if (base
== 4 && s
->popl_esp_hack
)
2020 disp
+= s
->popl_esp_hack
;
2021 #ifdef TARGET_X86_64
2022 if (s
->aflag
== 2) {
2023 gen_op_movq_A0_reg(base
);
2025 gen_op_addq_A0_im(disp
);
2030 gen_op_movl_A0_reg(base
);
2032 gen_op_addl_A0_im(disp
);
2035 #ifdef TARGET_X86_64
2036 if (s
->aflag
== 2) {
2037 gen_op_movq_A0_im(disp
);
2041 gen_op_movl_A0_im(disp
);
2044 /* index == 4 means no index */
2045 if (havesib
&& (index
!= 4)) {
2046 #ifdef TARGET_X86_64
2047 if (s
->aflag
== 2) {
2048 gen_op_addq_A0_reg_sN(scale
, index
);
2052 gen_op_addl_A0_reg_sN(scale
, index
);
2057 if (base
== R_EBP
|| base
== R_ESP
)
2062 #ifdef TARGET_X86_64
2063 if (s
->aflag
== 2) {
2064 gen_op_addq_A0_seg(override
);
2068 gen_op_addl_A0_seg(s
, override
);
2075 disp
= cpu_lduw_code(env
, s
->pc
);
2077 gen_op_movl_A0_im(disp
);
2078 rm
= 0; /* avoid SS override */
2085 disp
= (int8_t)cpu_ldub_code(env
, s
->pc
++);
2089 disp
= cpu_lduw_code(env
, s
->pc
);
2095 gen_op_movl_A0_reg(R_EBX
);
2096 gen_op_addl_A0_reg_sN(0, R_ESI
);
2099 gen_op_movl_A0_reg(R_EBX
);
2100 gen_op_addl_A0_reg_sN(0, R_EDI
);
2103 gen_op_movl_A0_reg(R_EBP
);
2104 gen_op_addl_A0_reg_sN(0, R_ESI
);
2107 gen_op_movl_A0_reg(R_EBP
);
2108 gen_op_addl_A0_reg_sN(0, R_EDI
);
2111 gen_op_movl_A0_reg(R_ESI
);
2114 gen_op_movl_A0_reg(R_EDI
);
2117 gen_op_movl_A0_reg(R_EBP
);
2121 gen_op_movl_A0_reg(R_EBX
);
2125 gen_op_addl_A0_im(disp
);
2126 gen_op_andl_A0_ffff();
2130 if (rm
== 2 || rm
== 3 || rm
== 6)
2135 gen_op_addl_A0_seg(s
, override
);
2145 static void gen_nop_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2147 int mod
, rm
, base
, code
;
2149 mod
= (modrm
>> 6) & 3;
2159 code
= cpu_ldub_code(env
, s
->pc
++);
2195 /* used for LEA and MOV AX, mem */
2196 static void gen_add_A0_ds_seg(DisasContext
*s
)
2198 int override
, must_add_seg
;
2199 must_add_seg
= s
->addseg
;
2201 if (s
->override
>= 0) {
2202 override
= s
->override
;
2206 #ifdef TARGET_X86_64
2208 gen_op_addq_A0_seg(override
);
2212 gen_op_addl_A0_seg(s
, override
);
2217 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2219 static void gen_ldst_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2220 int ot
, int reg
, int is_store
)
2222 int mod
, rm
, opreg
, disp
;
2224 mod
= (modrm
>> 6) & 3;
2225 rm
= (modrm
& 7) | REX_B(s
);
2229 gen_op_mov_TN_reg(ot
, 0, reg
);
2230 gen_op_mov_reg_T0(ot
, rm
);
2232 gen_op_mov_TN_reg(ot
, 0, rm
);
2234 gen_op_mov_reg_T0(ot
, reg
);
2237 gen_lea_modrm(env
, s
, modrm
, &opreg
, &disp
);
2240 gen_op_mov_TN_reg(ot
, 0, reg
);
2241 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2243 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
2245 gen_op_mov_reg_T0(ot
, reg
);
2250 static inline uint32_t insn_get(CPUX86State
*env
, DisasContext
*s
, int ot
)
2256 ret
= cpu_ldub_code(env
, s
->pc
);
2260 ret
= cpu_lduw_code(env
, s
->pc
);
2265 ret
= cpu_ldl_code(env
, s
->pc
);
2272 static inline int insn_const_size(unsigned int ot
)
2280 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong eip
)
2282 TranslationBlock
*tb
;
2285 pc
= s
->cs_base
+ eip
;
2287 /* NOTE: we handle the case where the TB spans two pages here */
2288 if ((pc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) ||
2289 (pc
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
)) {
2290 /* jump to same page: we can use a direct jump */
2291 tcg_gen_goto_tb(tb_num
);
2293 tcg_gen_exit_tb((tcg_target_long
)tb
+ tb_num
);
2295 /* jump to another page: currently not optimized */
2301 static inline void gen_jcc(DisasContext
*s
, int b
,
2302 target_ulong val
, target_ulong next_eip
)
2306 gen_update_cc_op(s
);
2308 l1
= gen_new_label();
2310 set_cc_op(s
, CC_OP_DYNAMIC
);
2312 gen_goto_tb(s
, 0, next_eip
);
2315 gen_goto_tb(s
, 1, val
);
2316 s
->is_jmp
= DISAS_TB_JUMP
;
2319 l1
= gen_new_label();
2320 l2
= gen_new_label();
2322 set_cc_op(s
, CC_OP_DYNAMIC
);
2324 gen_jmp_im(next_eip
);
2334 static void gen_setcc(DisasContext
*s
, int b
)
2336 int inv
, jcc_op
, l1
;
2339 if (is_fast_jcc_case(s
, b
)) {
2340 /* nominal case: we use a jump */
2341 /* XXX: make it faster by adding new instructions in TCG */
2342 t0
= tcg_temp_local_new();
2343 tcg_gen_movi_tl(t0
, 0);
2344 l1
= gen_new_label();
2345 gen_jcc1(s
, b
^ 1, l1
);
2346 tcg_gen_movi_tl(t0
, 1);
2348 tcg_gen_mov_tl(cpu_T
[0], t0
);
2351 /* slow case: it is more efficient not to generate a jump,
2352 although it is questionnable whether this optimization is
2355 jcc_op
= (b
>> 1) & 7;
2356 gen_setcc_slow_T0(s
, jcc_op
);
2358 tcg_gen_xori_tl(cpu_T
[0], cpu_T
[0], 1);
2363 static inline void gen_op_movl_T0_seg(int seg_reg
)
2365 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
2366 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2369 static inline void gen_op_movl_seg_T0_vm(int seg_reg
)
2371 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xffff);
2372 tcg_gen_st32_tl(cpu_T
[0], cpu_env
,
2373 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2374 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], 4);
2375 tcg_gen_st_tl(cpu_T
[0], cpu_env
,
2376 offsetof(CPUX86State
,segs
[seg_reg
].base
));
2379 /* move T0 to seg_reg and compute if the CPU state may change. Never
2380 call this function with seg_reg == R_CS */
2381 static void gen_movl_seg_T0(DisasContext
*s
, int seg_reg
, target_ulong cur_eip
)
2383 if (s
->pe
&& !s
->vm86
) {
2384 /* XXX: optimize by finding processor state dynamically */
2385 gen_update_cc_op(s
);
2386 gen_jmp_im(cur_eip
);
2387 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
2388 gen_helper_load_seg(cpu_env
, tcg_const_i32(seg_reg
), cpu_tmp2_i32
);
2389 /* abort translation because the addseg value may change or
2390 because ss32 may change. For R_SS, translation must always
2391 stop as a special handling must be done to disable hardware
2392 interrupts for the next instruction */
2393 if (seg_reg
== R_SS
|| (s
->code32
&& seg_reg
< R_FS
))
2394 s
->is_jmp
= DISAS_TB_JUMP
;
2396 gen_op_movl_seg_T0_vm(seg_reg
);
2397 if (seg_reg
== R_SS
)
2398 s
->is_jmp
= DISAS_TB_JUMP
;
2402 static inline int svm_is_rep(int prefixes
)
2404 return ((prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) ? 8 : 0);
2408 gen_svm_check_intercept_param(DisasContext
*s
, target_ulong pc_start
,
2409 uint32_t type
, uint64_t param
)
2411 /* no SVM activated; fast case */
2412 if (likely(!(s
->flags
& HF_SVMI_MASK
)))
2414 gen_update_cc_op(s
);
2415 gen_jmp_im(pc_start
- s
->cs_base
);
2416 gen_helper_svm_check_intercept_param(cpu_env
, tcg_const_i32(type
),
2417 tcg_const_i64(param
));
2421 gen_svm_check_intercept(DisasContext
*s
, target_ulong pc_start
, uint64_t type
)
2423 gen_svm_check_intercept_param(s
, pc_start
, type
, 0);
2426 static inline void gen_stack_update(DisasContext
*s
, int addend
)
2428 #ifdef TARGET_X86_64
2430 gen_op_add_reg_im(2, R_ESP
, addend
);
2434 gen_op_add_reg_im(1, R_ESP
, addend
);
2436 gen_op_add_reg_im(0, R_ESP
, addend
);
2440 /* generate a push. It depends on ss32, addseg and dflag */
2441 static void gen_push_T0(DisasContext
*s
)
2443 #ifdef TARGET_X86_64
2445 gen_op_movq_A0_reg(R_ESP
);
2447 gen_op_addq_A0_im(-8);
2448 gen_op_st_T0_A0(OT_QUAD
+ s
->mem_index
);
2450 gen_op_addq_A0_im(-2);
2451 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
2453 gen_op_mov_reg_A0(2, R_ESP
);
2457 gen_op_movl_A0_reg(R_ESP
);
2459 gen_op_addl_A0_im(-2);
2461 gen_op_addl_A0_im(-4);
2464 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2465 gen_op_addl_A0_seg(s
, R_SS
);
2468 gen_op_andl_A0_ffff();
2469 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2470 gen_op_addl_A0_seg(s
, R_SS
);
2472 gen_op_st_T0_A0(s
->dflag
+ 1 + s
->mem_index
);
2473 if (s
->ss32
&& !s
->addseg
)
2474 gen_op_mov_reg_A0(1, R_ESP
);
2476 gen_op_mov_reg_T1(s
->ss32
+ 1, R_ESP
);
2480 /* generate a push. It depends on ss32, addseg and dflag */
2481 /* slower version for T1, only used for call Ev */
2482 static void gen_push_T1(DisasContext
*s
)
2484 #ifdef TARGET_X86_64
2486 gen_op_movq_A0_reg(R_ESP
);
2488 gen_op_addq_A0_im(-8);
2489 gen_op_st_T1_A0(OT_QUAD
+ s
->mem_index
);
2491 gen_op_addq_A0_im(-2);
2492 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
2494 gen_op_mov_reg_A0(2, R_ESP
);
2498 gen_op_movl_A0_reg(R_ESP
);
2500 gen_op_addl_A0_im(-2);
2502 gen_op_addl_A0_im(-4);
2505 gen_op_addl_A0_seg(s
, R_SS
);
2508 gen_op_andl_A0_ffff();
2509 gen_op_addl_A0_seg(s
, R_SS
);
2511 gen_op_st_T1_A0(s
->dflag
+ 1 + s
->mem_index
);
2513 if (s
->ss32
&& !s
->addseg
)
2514 gen_op_mov_reg_A0(1, R_ESP
);
2516 gen_stack_update(s
, (-2) << s
->dflag
);
2520 /* two step pop is necessary for precise exceptions */
2521 static void gen_pop_T0(DisasContext
*s
)
2523 #ifdef TARGET_X86_64
2525 gen_op_movq_A0_reg(R_ESP
);
2526 gen_op_ld_T0_A0((s
->dflag
? OT_QUAD
: OT_WORD
) + s
->mem_index
);
2530 gen_op_movl_A0_reg(R_ESP
);
2533 gen_op_addl_A0_seg(s
, R_SS
);
2535 gen_op_andl_A0_ffff();
2536 gen_op_addl_A0_seg(s
, R_SS
);
2538 gen_op_ld_T0_A0(s
->dflag
+ 1 + s
->mem_index
);
2542 static void gen_pop_update(DisasContext
*s
)
2544 #ifdef TARGET_X86_64
2545 if (CODE64(s
) && s
->dflag
) {
2546 gen_stack_update(s
, 8);
2550 gen_stack_update(s
, 2 << s
->dflag
);
2554 static void gen_stack_A0(DisasContext
*s
)
2556 gen_op_movl_A0_reg(R_ESP
);
2558 gen_op_andl_A0_ffff();
2559 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2561 gen_op_addl_A0_seg(s
, R_SS
);
2564 /* NOTE: wrap around in 16 bit not fully handled */
2565 static void gen_pusha(DisasContext
*s
)
2568 gen_op_movl_A0_reg(R_ESP
);
2569 gen_op_addl_A0_im(-16 << s
->dflag
);
2571 gen_op_andl_A0_ffff();
2572 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2574 gen_op_addl_A0_seg(s
, R_SS
);
2575 for(i
= 0;i
< 8; i
++) {
2576 gen_op_mov_TN_reg(OT_LONG
, 0, 7 - i
);
2577 gen_op_st_T0_A0(OT_WORD
+ s
->dflag
+ s
->mem_index
);
2578 gen_op_addl_A0_im(2 << s
->dflag
);
2580 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2583 /* NOTE: wrap around in 16 bit not fully handled */
2584 static void gen_popa(DisasContext
*s
)
2587 gen_op_movl_A0_reg(R_ESP
);
2589 gen_op_andl_A0_ffff();
2590 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2591 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], 16 << s
->dflag
);
2593 gen_op_addl_A0_seg(s
, R_SS
);
2594 for(i
= 0;i
< 8; i
++) {
2595 /* ESP is not reloaded */
2597 gen_op_ld_T0_A0(OT_WORD
+ s
->dflag
+ s
->mem_index
);
2598 gen_op_mov_reg_T0(OT_WORD
+ s
->dflag
, 7 - i
);
2600 gen_op_addl_A0_im(2 << s
->dflag
);
2602 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2605 static void gen_enter(DisasContext
*s
, int esp_addend
, int level
)
2610 #ifdef TARGET_X86_64
2612 ot
= s
->dflag
? OT_QUAD
: OT_WORD
;
2615 gen_op_movl_A0_reg(R_ESP
);
2616 gen_op_addq_A0_im(-opsize
);
2617 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2620 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
2621 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2623 /* XXX: must save state */
2624 gen_helper_enter64_level(cpu_env
, tcg_const_i32(level
),
2625 tcg_const_i32((ot
== OT_QUAD
)),
2628 gen_op_mov_reg_T1(ot
, R_EBP
);
2629 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], -esp_addend
+ (-opsize
* level
));
2630 gen_op_mov_reg_T1(OT_QUAD
, R_ESP
);
2634 ot
= s
->dflag
+ OT_WORD
;
2635 opsize
= 2 << s
->dflag
;
2637 gen_op_movl_A0_reg(R_ESP
);
2638 gen_op_addl_A0_im(-opsize
);
2640 gen_op_andl_A0_ffff();
2641 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2643 gen_op_addl_A0_seg(s
, R_SS
);
2645 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
2646 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2648 /* XXX: must save state */
2649 gen_helper_enter_level(cpu_env
, tcg_const_i32(level
),
2650 tcg_const_i32(s
->dflag
),
2653 gen_op_mov_reg_T1(ot
, R_EBP
);
2654 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], -esp_addend
+ (-opsize
* level
));
2655 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2659 static void gen_exception(DisasContext
*s
, int trapno
, target_ulong cur_eip
)
2661 gen_update_cc_op(s
);
2662 gen_jmp_im(cur_eip
);
2663 gen_helper_raise_exception(cpu_env
, tcg_const_i32(trapno
));
2664 s
->is_jmp
= DISAS_TB_JUMP
;
2667 /* an interrupt is different from an exception because of the
2669 static void gen_interrupt(DisasContext
*s
, int intno
,
2670 target_ulong cur_eip
, target_ulong next_eip
)
2672 gen_update_cc_op(s
);
2673 gen_jmp_im(cur_eip
);
2674 gen_helper_raise_interrupt(cpu_env
, tcg_const_i32(intno
),
2675 tcg_const_i32(next_eip
- cur_eip
));
2676 s
->is_jmp
= DISAS_TB_JUMP
;
2679 static void gen_debug(DisasContext
*s
, target_ulong cur_eip
)
2681 gen_update_cc_op(s
);
2682 gen_jmp_im(cur_eip
);
2683 gen_helper_debug(cpu_env
);
2684 s
->is_jmp
= DISAS_TB_JUMP
;
2687 /* generate a generic end of block. Trace exception is also generated
2689 static void gen_eob(DisasContext
*s
)
2691 gen_update_cc_op(s
);
2692 if (s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
) {
2693 gen_helper_reset_inhibit_irq(cpu_env
);
2695 if (s
->tb
->flags
& HF_RF_MASK
) {
2696 gen_helper_reset_rf(cpu_env
);
2698 if (s
->singlestep_enabled
) {
2699 gen_helper_debug(cpu_env
);
2701 gen_helper_single_step(cpu_env
);
2705 s
->is_jmp
= DISAS_TB_JUMP
;
2708 /* generate a jump to eip. No segment change must happen before as a
2709 direct call to the next block may occur */
2710 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
)
2713 gen_update_cc_op(s
);
2714 gen_goto_tb(s
, tb_num
, eip
);
2715 s
->is_jmp
= DISAS_TB_JUMP
;
2722 static void gen_jmp(DisasContext
*s
, target_ulong eip
)
2724 gen_jmp_tb(s
, eip
, 0);
2727 static inline void gen_ldq_env_A0(int idx
, int offset
)
2729 int mem_index
= (idx
>> 2) - 1;
2730 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2731 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2734 static inline void gen_stq_env_A0(int idx
, int offset
)
2736 int mem_index
= (idx
>> 2) - 1;
2737 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2738 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2741 static inline void gen_ldo_env_A0(int idx
, int offset
)
2743 int mem_index
= (idx
>> 2) - 1;
2744 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2745 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2746 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2747 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
);
2748 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2751 static inline void gen_sto_env_A0(int idx
, int offset
)
2753 int mem_index
= (idx
>> 2) - 1;
2754 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2755 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2756 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2757 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2758 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
);
2761 static inline void gen_op_movo(int d_offset
, int s_offset
)
2763 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2764 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2765 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
+ 8);
2766 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
+ 8);
2769 static inline void gen_op_movq(int d_offset
, int s_offset
)
2771 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2772 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2775 static inline void gen_op_movl(int d_offset
, int s_offset
)
2777 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
, s_offset
);
2778 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, d_offset
);
2781 static inline void gen_op_movq_env_0(int d_offset
)
2783 tcg_gen_movi_i64(cpu_tmp1_i64
, 0);
2784 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2787 typedef void (*SSEFunc_i_ep
)(TCGv_i32 val
, TCGv_ptr env
, TCGv_ptr reg
);
2788 typedef void (*SSEFunc_l_ep
)(TCGv_i64 val
, TCGv_ptr env
, TCGv_ptr reg
);
2789 typedef void (*SSEFunc_0_epi
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i32 val
);
2790 typedef void (*SSEFunc_0_epl
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i64 val
);
2791 typedef void (*SSEFunc_0_epp
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
);
2792 typedef void (*SSEFunc_0_eppi
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2794 typedef void (*SSEFunc_0_ppi
)(TCGv_ptr reg_a
, TCGv_ptr reg_b
, TCGv_i32 val
);
2795 typedef void (*SSEFunc_0_eppt
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2798 #define SSE_SPECIAL ((void *)1)
2799 #define SSE_DUMMY ((void *)2)
2801 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2802 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2803 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2805 static const SSEFunc_0_epp sse_op_table1
[256][4] = {
2806 /* 3DNow! extensions */
2807 [0x0e] = { SSE_DUMMY
}, /* femms */
2808 [0x0f] = { SSE_DUMMY
}, /* pf... */
2809 /* pure SSE operations */
2810 [0x10] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2811 [0x11] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2812 [0x12] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd, movsldup, movddup */
2813 [0x13] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd */
2814 [0x14] = { gen_helper_punpckldq_xmm
, gen_helper_punpcklqdq_xmm
},
2815 [0x15] = { gen_helper_punpckhdq_xmm
, gen_helper_punpckhqdq_xmm
},
2816 [0x16] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd, movshdup */
2817 [0x17] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd */
2819 [0x28] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2820 [0x29] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2821 [0x2a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2822 [0x2b] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movntps, movntpd, movntss, movntsd */
2823 [0x2c] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2824 [0x2d] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2825 [0x2e] = { gen_helper_ucomiss
, gen_helper_ucomisd
},
2826 [0x2f] = { gen_helper_comiss
, gen_helper_comisd
},
2827 [0x50] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movmskps, movmskpd */
2828 [0x51] = SSE_FOP(sqrt
),
2829 [0x52] = { gen_helper_rsqrtps
, NULL
, gen_helper_rsqrtss
, NULL
},
2830 [0x53] = { gen_helper_rcpps
, NULL
, gen_helper_rcpss
, NULL
},
2831 [0x54] = { gen_helper_pand_xmm
, gen_helper_pand_xmm
}, /* andps, andpd */
2832 [0x55] = { gen_helper_pandn_xmm
, gen_helper_pandn_xmm
}, /* andnps, andnpd */
2833 [0x56] = { gen_helper_por_xmm
, gen_helper_por_xmm
}, /* orps, orpd */
2834 [0x57] = { gen_helper_pxor_xmm
, gen_helper_pxor_xmm
}, /* xorps, xorpd */
2835 [0x58] = SSE_FOP(add
),
2836 [0x59] = SSE_FOP(mul
),
2837 [0x5a] = { gen_helper_cvtps2pd
, gen_helper_cvtpd2ps
,
2838 gen_helper_cvtss2sd
, gen_helper_cvtsd2ss
},
2839 [0x5b] = { gen_helper_cvtdq2ps
, gen_helper_cvtps2dq
, gen_helper_cvttps2dq
},
2840 [0x5c] = SSE_FOP(sub
),
2841 [0x5d] = SSE_FOP(min
),
2842 [0x5e] = SSE_FOP(div
),
2843 [0x5f] = SSE_FOP(max
),
2845 [0xc2] = SSE_FOP(cmpeq
),
2846 [0xc6] = { (SSEFunc_0_epp
)gen_helper_shufps
,
2847 (SSEFunc_0_epp
)gen_helper_shufpd
}, /* XXX: casts */
2849 [0x38] = { SSE_SPECIAL
, SSE_SPECIAL
, NULL
, SSE_SPECIAL
}, /* SSSE3/SSE4 */
2850 [0x3a] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* SSSE3/SSE4 */
2852 /* MMX ops and their SSE extensions */
2853 [0x60] = MMX_OP2(punpcklbw
),
2854 [0x61] = MMX_OP2(punpcklwd
),
2855 [0x62] = MMX_OP2(punpckldq
),
2856 [0x63] = MMX_OP2(packsswb
),
2857 [0x64] = MMX_OP2(pcmpgtb
),
2858 [0x65] = MMX_OP2(pcmpgtw
),
2859 [0x66] = MMX_OP2(pcmpgtl
),
2860 [0x67] = MMX_OP2(packuswb
),
2861 [0x68] = MMX_OP2(punpckhbw
),
2862 [0x69] = MMX_OP2(punpckhwd
),
2863 [0x6a] = MMX_OP2(punpckhdq
),
2864 [0x6b] = MMX_OP2(packssdw
),
2865 [0x6c] = { NULL
, gen_helper_punpcklqdq_xmm
},
2866 [0x6d] = { NULL
, gen_helper_punpckhqdq_xmm
},
2867 [0x6e] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movd mm, ea */
2868 [0x6f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, , movqdu */
2869 [0x70] = { (SSEFunc_0_epp
)gen_helper_pshufw_mmx
,
2870 (SSEFunc_0_epp
)gen_helper_pshufd_xmm
,
2871 (SSEFunc_0_epp
)gen_helper_pshufhw_xmm
,
2872 (SSEFunc_0_epp
)gen_helper_pshuflw_xmm
}, /* XXX: casts */
2873 [0x71] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftw */
2874 [0x72] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftd */
2875 [0x73] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftq */
2876 [0x74] = MMX_OP2(pcmpeqb
),
2877 [0x75] = MMX_OP2(pcmpeqw
),
2878 [0x76] = MMX_OP2(pcmpeql
),
2879 [0x77] = { SSE_DUMMY
}, /* emms */
2880 [0x78] = { NULL
, SSE_SPECIAL
, NULL
, SSE_SPECIAL
}, /* extrq_i, insertq_i */
2881 [0x79] = { NULL
, gen_helper_extrq_r
, NULL
, gen_helper_insertq_r
},
2882 [0x7c] = { NULL
, gen_helper_haddpd
, NULL
, gen_helper_haddps
},
2883 [0x7d] = { NULL
, gen_helper_hsubpd
, NULL
, gen_helper_hsubps
},
2884 [0x7e] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movd, movd, , movq */
2885 [0x7f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, movdqu */
2886 [0xc4] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pinsrw */
2887 [0xc5] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pextrw */
2888 [0xd0] = { NULL
, gen_helper_addsubpd
, NULL
, gen_helper_addsubps
},
2889 [0xd1] = MMX_OP2(psrlw
),
2890 [0xd2] = MMX_OP2(psrld
),
2891 [0xd3] = MMX_OP2(psrlq
),
2892 [0xd4] = MMX_OP2(paddq
),
2893 [0xd5] = MMX_OP2(pmullw
),
2894 [0xd6] = { NULL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2895 [0xd7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pmovmskb */
2896 [0xd8] = MMX_OP2(psubusb
),
2897 [0xd9] = MMX_OP2(psubusw
),
2898 [0xda] = MMX_OP2(pminub
),
2899 [0xdb] = MMX_OP2(pand
),
2900 [0xdc] = MMX_OP2(paddusb
),
2901 [0xdd] = MMX_OP2(paddusw
),
2902 [0xde] = MMX_OP2(pmaxub
),
2903 [0xdf] = MMX_OP2(pandn
),
2904 [0xe0] = MMX_OP2(pavgb
),
2905 [0xe1] = MMX_OP2(psraw
),
2906 [0xe2] = MMX_OP2(psrad
),
2907 [0xe3] = MMX_OP2(pavgw
),
2908 [0xe4] = MMX_OP2(pmulhuw
),
2909 [0xe5] = MMX_OP2(pmulhw
),
2910 [0xe6] = { NULL
, gen_helper_cvttpd2dq
, gen_helper_cvtdq2pd
, gen_helper_cvtpd2dq
},
2911 [0xe7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movntq, movntq */
2912 [0xe8] = MMX_OP2(psubsb
),
2913 [0xe9] = MMX_OP2(psubsw
),
2914 [0xea] = MMX_OP2(pminsw
),
2915 [0xeb] = MMX_OP2(por
),
2916 [0xec] = MMX_OP2(paddsb
),
2917 [0xed] = MMX_OP2(paddsw
),
2918 [0xee] = MMX_OP2(pmaxsw
),
2919 [0xef] = MMX_OP2(pxor
),
2920 [0xf0] = { NULL
, NULL
, NULL
, SSE_SPECIAL
}, /* lddqu */
2921 [0xf1] = MMX_OP2(psllw
),
2922 [0xf2] = MMX_OP2(pslld
),
2923 [0xf3] = MMX_OP2(psllq
),
2924 [0xf4] = MMX_OP2(pmuludq
),
2925 [0xf5] = MMX_OP2(pmaddwd
),
2926 [0xf6] = MMX_OP2(psadbw
),
2927 [0xf7] = { (SSEFunc_0_epp
)gen_helper_maskmov_mmx
,
2928 (SSEFunc_0_epp
)gen_helper_maskmov_xmm
}, /* XXX: casts */
2929 [0xf8] = MMX_OP2(psubb
),
2930 [0xf9] = MMX_OP2(psubw
),
2931 [0xfa] = MMX_OP2(psubl
),
2932 [0xfb] = MMX_OP2(psubq
),
2933 [0xfc] = MMX_OP2(paddb
),
2934 [0xfd] = MMX_OP2(paddw
),
2935 [0xfe] = MMX_OP2(paddl
),
2938 static const SSEFunc_0_epp sse_op_table2
[3 * 8][2] = {
2939 [0 + 2] = MMX_OP2(psrlw
),
2940 [0 + 4] = MMX_OP2(psraw
),
2941 [0 + 6] = MMX_OP2(psllw
),
2942 [8 + 2] = MMX_OP2(psrld
),
2943 [8 + 4] = MMX_OP2(psrad
),
2944 [8 + 6] = MMX_OP2(pslld
),
2945 [16 + 2] = MMX_OP2(psrlq
),
2946 [16 + 3] = { NULL
, gen_helper_psrldq_xmm
},
2947 [16 + 6] = MMX_OP2(psllq
),
2948 [16 + 7] = { NULL
, gen_helper_pslldq_xmm
},
2951 static const SSEFunc_0_epi sse_op_table3ai
[] = {
2952 gen_helper_cvtsi2ss
,
2956 #ifdef TARGET_X86_64
2957 static const SSEFunc_0_epl sse_op_table3aq
[] = {
2958 gen_helper_cvtsq2ss
,
2963 static const SSEFunc_i_ep sse_op_table3bi
[] = {
2964 gen_helper_cvttss2si
,
2965 gen_helper_cvtss2si
,
2966 gen_helper_cvttsd2si
,
2970 #ifdef TARGET_X86_64
2971 static const SSEFunc_l_ep sse_op_table3bq
[] = {
2972 gen_helper_cvttss2sq
,
2973 gen_helper_cvtss2sq
,
2974 gen_helper_cvttsd2sq
,
2979 static const SSEFunc_0_epp sse_op_table4
[8][4] = {
2990 static const SSEFunc_0_epp sse_op_table5
[256] = {
2991 [0x0c] = gen_helper_pi2fw
,
2992 [0x0d] = gen_helper_pi2fd
,
2993 [0x1c] = gen_helper_pf2iw
,
2994 [0x1d] = gen_helper_pf2id
,
2995 [0x8a] = gen_helper_pfnacc
,
2996 [0x8e] = gen_helper_pfpnacc
,
2997 [0x90] = gen_helper_pfcmpge
,
2998 [0x94] = gen_helper_pfmin
,
2999 [0x96] = gen_helper_pfrcp
,
3000 [0x97] = gen_helper_pfrsqrt
,
3001 [0x9a] = gen_helper_pfsub
,
3002 [0x9e] = gen_helper_pfadd
,
3003 [0xa0] = gen_helper_pfcmpgt
,
3004 [0xa4] = gen_helper_pfmax
,
3005 [0xa6] = gen_helper_movq
, /* pfrcpit1; no need to actually increase precision */
3006 [0xa7] = gen_helper_movq
, /* pfrsqit1 */
3007 [0xaa] = gen_helper_pfsubr
,
3008 [0xae] = gen_helper_pfacc
,
3009 [0xb0] = gen_helper_pfcmpeq
,
3010 [0xb4] = gen_helper_pfmul
,
3011 [0xb6] = gen_helper_movq
, /* pfrcpit2 */
3012 [0xb7] = gen_helper_pmulhrw_mmx
,
3013 [0xbb] = gen_helper_pswapd
,
3014 [0xbf] = gen_helper_pavgb_mmx
/* pavgusb */
3017 struct SSEOpHelper_epp
{
3018 SSEFunc_0_epp op
[2];
3022 struct SSEOpHelper_eppi
{
3023 SSEFunc_0_eppi op
[2];
3027 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
3028 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3029 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
3030 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
3032 static const struct SSEOpHelper_epp sse_op_table6
[256] = {
3033 [0x00] = SSSE3_OP(pshufb
),
3034 [0x01] = SSSE3_OP(phaddw
),
3035 [0x02] = SSSE3_OP(phaddd
),
3036 [0x03] = SSSE3_OP(phaddsw
),
3037 [0x04] = SSSE3_OP(pmaddubsw
),
3038 [0x05] = SSSE3_OP(phsubw
),
3039 [0x06] = SSSE3_OP(phsubd
),
3040 [0x07] = SSSE3_OP(phsubsw
),
3041 [0x08] = SSSE3_OP(psignb
),
3042 [0x09] = SSSE3_OP(psignw
),
3043 [0x0a] = SSSE3_OP(psignd
),
3044 [0x0b] = SSSE3_OP(pmulhrsw
),
3045 [0x10] = SSE41_OP(pblendvb
),
3046 [0x14] = SSE41_OP(blendvps
),
3047 [0x15] = SSE41_OP(blendvpd
),
3048 [0x17] = SSE41_OP(ptest
),
3049 [0x1c] = SSSE3_OP(pabsb
),
3050 [0x1d] = SSSE3_OP(pabsw
),
3051 [0x1e] = SSSE3_OP(pabsd
),
3052 [0x20] = SSE41_OP(pmovsxbw
),
3053 [0x21] = SSE41_OP(pmovsxbd
),
3054 [0x22] = SSE41_OP(pmovsxbq
),
3055 [0x23] = SSE41_OP(pmovsxwd
),
3056 [0x24] = SSE41_OP(pmovsxwq
),
3057 [0x25] = SSE41_OP(pmovsxdq
),
3058 [0x28] = SSE41_OP(pmuldq
),
3059 [0x29] = SSE41_OP(pcmpeqq
),
3060 [0x2a] = SSE41_SPECIAL
, /* movntqda */
3061 [0x2b] = SSE41_OP(packusdw
),
3062 [0x30] = SSE41_OP(pmovzxbw
),
3063 [0x31] = SSE41_OP(pmovzxbd
),
3064 [0x32] = SSE41_OP(pmovzxbq
),
3065 [0x33] = SSE41_OP(pmovzxwd
),
3066 [0x34] = SSE41_OP(pmovzxwq
),
3067 [0x35] = SSE41_OP(pmovzxdq
),
3068 [0x37] = SSE42_OP(pcmpgtq
),
3069 [0x38] = SSE41_OP(pminsb
),
3070 [0x39] = SSE41_OP(pminsd
),
3071 [0x3a] = SSE41_OP(pminuw
),
3072 [0x3b] = SSE41_OP(pminud
),
3073 [0x3c] = SSE41_OP(pmaxsb
),
3074 [0x3d] = SSE41_OP(pmaxsd
),
3075 [0x3e] = SSE41_OP(pmaxuw
),
3076 [0x3f] = SSE41_OP(pmaxud
),
3077 [0x40] = SSE41_OP(pmulld
),
3078 [0x41] = SSE41_OP(phminposuw
),
3081 static const struct SSEOpHelper_eppi sse_op_table7
[256] = {
3082 [0x08] = SSE41_OP(roundps
),
3083 [0x09] = SSE41_OP(roundpd
),
3084 [0x0a] = SSE41_OP(roundss
),
3085 [0x0b] = SSE41_OP(roundsd
),
3086 [0x0c] = SSE41_OP(blendps
),
3087 [0x0d] = SSE41_OP(blendpd
),
3088 [0x0e] = SSE41_OP(pblendw
),
3089 [0x0f] = SSSE3_OP(palignr
),
3090 [0x14] = SSE41_SPECIAL
, /* pextrb */
3091 [0x15] = SSE41_SPECIAL
, /* pextrw */
3092 [0x16] = SSE41_SPECIAL
, /* pextrd/pextrq */
3093 [0x17] = SSE41_SPECIAL
, /* extractps */
3094 [0x20] = SSE41_SPECIAL
, /* pinsrb */
3095 [0x21] = SSE41_SPECIAL
, /* insertps */
3096 [0x22] = SSE41_SPECIAL
, /* pinsrd/pinsrq */
3097 [0x40] = SSE41_OP(dpps
),
3098 [0x41] = SSE41_OP(dppd
),
3099 [0x42] = SSE41_OP(mpsadbw
),
3100 [0x60] = SSE42_OP(pcmpestrm
),
3101 [0x61] = SSE42_OP(pcmpestri
),
3102 [0x62] = SSE42_OP(pcmpistrm
),
3103 [0x63] = SSE42_OP(pcmpistri
),
3106 static void gen_sse(CPUX86State
*env
, DisasContext
*s
, int b
,
3107 target_ulong pc_start
, int rex_r
)
3109 int b1
, op1_offset
, op2_offset
, is_xmm
, val
, ot
;
3110 int modrm
, mod
, rm
, reg
, reg_addr
, offset_addr
;
3111 SSEFunc_0_epp sse_fn_epp
;
3112 SSEFunc_0_eppi sse_fn_eppi
;
3113 SSEFunc_0_ppi sse_fn_ppi
;
3114 SSEFunc_0_eppt sse_fn_eppt
;
3117 if (s
->prefix
& PREFIX_DATA
)
3119 else if (s
->prefix
& PREFIX_REPZ
)
3121 else if (s
->prefix
& PREFIX_REPNZ
)
3125 sse_fn_epp
= sse_op_table1
[b
][b1
];
3129 if ((b
<= 0x5f && b
>= 0x10) || b
== 0xc6 || b
== 0xc2) {
3139 /* simple MMX/SSE operation */
3140 if (s
->flags
& HF_TS_MASK
) {
3141 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
3144 if (s
->flags
& HF_EM_MASK
) {
3146 gen_exception(s
, EXCP06_ILLOP
, pc_start
- s
->cs_base
);
3149 if (is_xmm
&& !(s
->flags
& HF_OSFXSR_MASK
))
3150 if ((b
!= 0x38 && b
!= 0x3a) || (s
->prefix
& PREFIX_DATA
))
3153 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
))
3156 gen_helper_emms(cpu_env
);
3161 gen_helper_emms(cpu_env
);
3164 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3165 the static cpu state) */
3167 gen_helper_enter_mmx(cpu_env
);
3170 modrm
= cpu_ldub_code(env
, s
->pc
++);
3171 reg
= ((modrm
>> 3) & 7);
3174 mod
= (modrm
>> 6) & 3;
3175 if (sse_fn_epp
== SSE_SPECIAL
) {
3178 case 0x0e7: /* movntq */
3181 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3182 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3184 case 0x1e7: /* movntdq */
3185 case 0x02b: /* movntps */
3186 case 0x12b: /* movntps */
3189 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3190 gen_sto_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3192 case 0x3f0: /* lddqu */
3195 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3196 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3198 case 0x22b: /* movntss */
3199 case 0x32b: /* movntsd */
3202 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3204 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,
3207 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3208 xmm_regs
[reg
].XMM_L(0)));
3209 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
3212 case 0x6e: /* movd mm, ea */
3213 #ifdef TARGET_X86_64
3214 if (s
->dflag
== 2) {
3215 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 0);
3216 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3220 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 0);
3221 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3222 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3223 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3224 gen_helper_movl_mm_T0_mmx(cpu_ptr0
, cpu_tmp2_i32
);
3227 case 0x16e: /* movd xmm, ea */
3228 #ifdef TARGET_X86_64
3229 if (s
->dflag
== 2) {
3230 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 0);
3231 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3232 offsetof(CPUX86State
,xmm_regs
[reg
]));
3233 gen_helper_movq_mm_T0_xmm(cpu_ptr0
, cpu_T
[0]);
3237 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 0);
3238 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3239 offsetof(CPUX86State
,xmm_regs
[reg
]));
3240 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3241 gen_helper_movl_mm_T0_xmm(cpu_ptr0
, cpu_tmp2_i32
);
3244 case 0x6f: /* movq mm, ea */
3246 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3247 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3250 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
3251 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3252 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
3253 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3256 case 0x010: /* movups */
3257 case 0x110: /* movupd */
3258 case 0x028: /* movaps */
3259 case 0x128: /* movapd */
3260 case 0x16f: /* movdqa xmm, ea */
3261 case 0x26f: /* movdqu xmm, ea */
3263 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3264 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3266 rm
= (modrm
& 7) | REX_B(s
);
3267 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[reg
]),
3268 offsetof(CPUX86State
,xmm_regs
[rm
]));
3271 case 0x210: /* movss xmm, ea */
3273 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3274 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
3275 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3277 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)));
3278 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3279 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3281 rm
= (modrm
& 7) | REX_B(s
);
3282 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3283 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)));
3286 case 0x310: /* movsd xmm, ea */
3288 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3289 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3291 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3292 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3294 rm
= (modrm
& 7) | REX_B(s
);
3295 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3296 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3299 case 0x012: /* movlps */
3300 case 0x112: /* movlpd */
3302 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3303 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3306 rm
= (modrm
& 7) | REX_B(s
);
3307 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3308 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(1)));
3311 case 0x212: /* movsldup */
3313 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3314 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3316 rm
= (modrm
& 7) | REX_B(s
);
3317 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3318 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)));
3319 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)),
3320 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(2)));
3322 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)),
3323 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3324 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)),
3325 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3327 case 0x312: /* movddup */
3329 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3330 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3332 rm
= (modrm
& 7) | REX_B(s
);
3333 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3334 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3336 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)),
3337 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3339 case 0x016: /* movhps */
3340 case 0x116: /* movhpd */
3342 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3343 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3346 rm
= (modrm
& 7) | REX_B(s
);
3347 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)),
3348 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3351 case 0x216: /* movshdup */
3353 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3354 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3356 rm
= (modrm
& 7) | REX_B(s
);
3357 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)),
3358 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(1)));
3359 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)),
3360 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(3)));
3362 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3363 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)));
3364 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)),
3365 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3370 int bit_index
, field_length
;
3372 if (b1
== 1 && reg
!= 0)
3374 field_length
= cpu_ldub_code(env
, s
->pc
++) & 0x3F;
3375 bit_index
= cpu_ldub_code(env
, s
->pc
++) & 0x3F;
3376 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3377 offsetof(CPUX86State
,xmm_regs
[reg
]));
3379 gen_helper_extrq_i(cpu_env
, cpu_ptr0
,
3380 tcg_const_i32(bit_index
),
3381 tcg_const_i32(field_length
));
3383 gen_helper_insertq_i(cpu_env
, cpu_ptr0
,
3384 tcg_const_i32(bit_index
),
3385 tcg_const_i32(field_length
));
3388 case 0x7e: /* movd ea, mm */
3389 #ifdef TARGET_X86_64
3390 if (s
->dflag
== 2) {
3391 tcg_gen_ld_i64(cpu_T
[0], cpu_env
,
3392 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3393 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 1);
3397 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
3398 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_L(0)));
3399 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 1);
3402 case 0x17e: /* movd ea, xmm */
3403 #ifdef TARGET_X86_64
3404 if (s
->dflag
== 2) {
3405 tcg_gen_ld_i64(cpu_T
[0], cpu_env
,
3406 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3407 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 1);
3411 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
3412 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3413 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 1);
3416 case 0x27e: /* movq xmm, ea */
3418 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3419 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3421 rm
= (modrm
& 7) | REX_B(s
);
3422 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3423 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3425 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3427 case 0x7f: /* movq ea, mm */
3429 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3430 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3433 gen_op_movq(offsetof(CPUX86State
,fpregs
[rm
].mmx
),
3434 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3437 case 0x011: /* movups */
3438 case 0x111: /* movupd */
3439 case 0x029: /* movaps */
3440 case 0x129: /* movapd */
3441 case 0x17f: /* movdqa ea, xmm */
3442 case 0x27f: /* movdqu ea, xmm */
3444 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3445 gen_sto_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3447 rm
= (modrm
& 7) | REX_B(s
);
3448 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[rm
]),
3449 offsetof(CPUX86State
,xmm_regs
[reg
]));
3452 case 0x211: /* movss ea, xmm */
3454 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3455 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3456 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
3458 rm
= (modrm
& 7) | REX_B(s
);
3459 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)),
3460 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3463 case 0x311: /* movsd ea, xmm */
3465 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3466 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3468 rm
= (modrm
& 7) | REX_B(s
);
3469 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)),
3470 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3473 case 0x013: /* movlps */
3474 case 0x113: /* movlpd */
3476 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3477 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3482 case 0x017: /* movhps */
3483 case 0x117: /* movhpd */
3485 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3486 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3491 case 0x71: /* shift mm, im */
3494 case 0x171: /* shift xmm, im */
3500 val
= cpu_ldub_code(env
, s
->pc
++);
3502 gen_op_movl_T0_im(val
);
3503 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
3505 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(1)));
3506 op1_offset
= offsetof(CPUX86State
,xmm_t0
);
3508 gen_op_movl_T0_im(val
);
3509 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(0)));
3511 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(1)));
3512 op1_offset
= offsetof(CPUX86State
,mmx_t0
);
3514 sse_fn_epp
= sse_op_table2
[((b
- 1) & 3) * 8 +
3515 (((modrm
>> 3)) & 7)][b1
];
3520 rm
= (modrm
& 7) | REX_B(s
);
3521 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3524 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3526 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3527 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op1_offset
);
3528 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3530 case 0x050: /* movmskps */
3531 rm
= (modrm
& 7) | REX_B(s
);
3532 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3533 offsetof(CPUX86State
,xmm_regs
[rm
]));
3534 gen_helper_movmskps(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3535 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3536 gen_op_mov_reg_T0(OT_LONG
, reg
);
3538 case 0x150: /* movmskpd */
3539 rm
= (modrm
& 7) | REX_B(s
);
3540 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3541 offsetof(CPUX86State
,xmm_regs
[rm
]));
3542 gen_helper_movmskpd(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3543 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3544 gen_op_mov_reg_T0(OT_LONG
, reg
);
3546 case 0x02a: /* cvtpi2ps */
3547 case 0x12a: /* cvtpi2pd */
3548 gen_helper_enter_mmx(cpu_env
);
3550 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3551 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3552 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
3555 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3557 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3558 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3559 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3562 gen_helper_cvtpi2ps(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3566 gen_helper_cvtpi2pd(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3570 case 0x22a: /* cvtsi2ss */
3571 case 0x32a: /* cvtsi2sd */
3572 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3573 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3574 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3575 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3576 if (ot
== OT_LONG
) {
3577 SSEFunc_0_epi sse_fn_epi
= sse_op_table3ai
[(b
>> 8) & 1];
3578 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3579 sse_fn_epi(cpu_env
, cpu_ptr0
, cpu_tmp2_i32
);
3581 #ifdef TARGET_X86_64
3582 SSEFunc_0_epl sse_fn_epl
= sse_op_table3aq
[(b
>> 8) & 1];
3583 sse_fn_epl(cpu_env
, cpu_ptr0
, cpu_T
[0]);
3589 case 0x02c: /* cvttps2pi */
3590 case 0x12c: /* cvttpd2pi */
3591 case 0x02d: /* cvtps2pi */
3592 case 0x12d: /* cvtpd2pi */
3593 gen_helper_enter_mmx(cpu_env
);
3595 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3596 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3597 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3599 rm
= (modrm
& 7) | REX_B(s
);
3600 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3602 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
);
3603 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3604 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3607 gen_helper_cvttps2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3610 gen_helper_cvttpd2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3613 gen_helper_cvtps2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3616 gen_helper_cvtpd2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3620 case 0x22c: /* cvttss2si */
3621 case 0x32c: /* cvttsd2si */
3622 case 0x22d: /* cvtss2si */
3623 case 0x32d: /* cvtsd2si */
3624 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3626 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3628 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_t0
.XMM_Q(0)));
3630 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
3631 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
3633 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3635 rm
= (modrm
& 7) | REX_B(s
);
3636 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3638 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3639 if (ot
== OT_LONG
) {
3640 SSEFunc_i_ep sse_fn_i_ep
=
3641 sse_op_table3bi
[((b
>> 7) & 2) | (b
& 1)];
3642 sse_fn_i_ep(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3643 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3645 #ifdef TARGET_X86_64
3646 SSEFunc_l_ep sse_fn_l_ep
=
3647 sse_op_table3bq
[((b
>> 7) & 2) | (b
& 1)];
3648 sse_fn_l_ep(cpu_T
[0], cpu_env
, cpu_ptr0
);
3653 gen_op_mov_reg_T0(ot
, reg
);
3655 case 0xc4: /* pinsrw */
3658 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
3659 val
= cpu_ldub_code(env
, s
->pc
++);
3662 tcg_gen_st16_tl(cpu_T
[0], cpu_env
,
3663 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_W(val
)));
3666 tcg_gen_st16_tl(cpu_T
[0], cpu_env
,
3667 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_W(val
)));
3670 case 0xc5: /* pextrw */
3674 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3675 val
= cpu_ldub_code(env
, s
->pc
++);
3678 rm
= (modrm
& 7) | REX_B(s
);
3679 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
,
3680 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_W(val
)));
3684 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
,
3685 offsetof(CPUX86State
,fpregs
[rm
].mmx
.MMX_W(val
)));
3687 reg
= ((modrm
>> 3) & 7) | rex_r
;
3688 gen_op_mov_reg_T0(ot
, reg
);
3690 case 0x1d6: /* movq ea, xmm */
3692 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3693 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3695 rm
= (modrm
& 7) | REX_B(s
);
3696 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)),
3697 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3698 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(1)));
3701 case 0x2d6: /* movq2dq */
3702 gen_helper_enter_mmx(cpu_env
);
3704 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3705 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3706 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3708 case 0x3d6: /* movdq2q */
3709 gen_helper_enter_mmx(cpu_env
);
3710 rm
= (modrm
& 7) | REX_B(s
);
3711 gen_op_movq(offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
),
3712 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3714 case 0xd7: /* pmovmskb */
3719 rm
= (modrm
& 7) | REX_B(s
);
3720 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[rm
]));
3721 gen_helper_pmovmskb_xmm(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3724 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3725 gen_helper_pmovmskb_mmx(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3727 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3728 reg
= ((modrm
>> 3) & 7) | rex_r
;
3729 gen_op_mov_reg_T0(OT_LONG
, reg
);
3732 if (s
->prefix
& PREFIX_REPNZ
)
3736 modrm
= cpu_ldub_code(env
, s
->pc
++);
3738 reg
= ((modrm
>> 3) & 7) | rex_r
;
3739 mod
= (modrm
>> 6) & 3;
3744 sse_fn_epp
= sse_op_table6
[b
].op
[b1
];
3748 if (!(s
->cpuid_ext_features
& sse_op_table6
[b
].ext_mask
))
3752 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3754 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
3756 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3757 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3759 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3760 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3761 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3762 gen_ldq_env_A0(s
->mem_index
, op2_offset
+
3763 offsetof(XMMReg
, XMM_Q(0)));
3765 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3766 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3767 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
3768 (s
->mem_index
>> 2) - 1);
3769 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
3770 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, op2_offset
+
3771 offsetof(XMMReg
, XMM_L(0)));
3773 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3774 tcg_gen_qemu_ld16u(cpu_tmp0
, cpu_A0
,
3775 (s
->mem_index
>> 2) - 1);
3776 tcg_gen_st16_tl(cpu_tmp0
, cpu_env
, op2_offset
+
3777 offsetof(XMMReg
, XMM_W(0)));
3779 case 0x2a: /* movntqda */
3780 gen_ldo_env_A0(s
->mem_index
, op1_offset
);
3783 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3787 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
3789 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3791 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3792 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3793 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
3796 if (sse_fn_epp
== SSE_SPECIAL
) {
3800 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3801 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3802 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3805 set_cc_op(s
, CC_OP_EFLAGS
);
3808 case 0x338: /* crc32 */
3811 modrm
= cpu_ldub_code(env
, s
->pc
++);
3812 reg
= ((modrm
>> 3) & 7) | rex_r
;
3814 if (b
!= 0xf0 && b
!= 0xf1)
3816 if (!(s
->cpuid_ext_features
& CPUID_EXT_SSE42
))
3821 else if (b
== 0xf1 && s
->dflag
!= 2)
3822 if (s
->prefix
& PREFIX_DATA
)
3829 gen_op_mov_TN_reg(OT_LONG
, 0, reg
);
3830 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3831 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3832 gen_helper_crc32(cpu_T
[0], cpu_tmp2_i32
,
3833 cpu_T
[0], tcg_const_i32(8 << ot
));
3835 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3836 gen_op_mov_reg_T0(ot
, reg
);
3841 modrm
= cpu_ldub_code(env
, s
->pc
++);
3843 reg
= ((modrm
>> 3) & 7) | rex_r
;
3844 mod
= (modrm
>> 6) & 3;
3849 sse_fn_eppi
= sse_op_table7
[b
].op
[b1
];
3853 if (!(s
->cpuid_ext_features
& sse_op_table7
[b
].ext_mask
))
3856 if (sse_fn_eppi
== SSE_SPECIAL
) {
3857 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3858 rm
= (modrm
& 7) | REX_B(s
);
3860 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3861 reg
= ((modrm
>> 3) & 7) | rex_r
;
3862 val
= cpu_ldub_code(env
, s
->pc
++);
3864 case 0x14: /* pextrb */
3865 tcg_gen_ld8u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3866 xmm_regs
[reg
].XMM_B(val
& 15)));
3868 gen_op_mov_reg_T0(ot
, rm
);
3870 tcg_gen_qemu_st8(cpu_T
[0], cpu_A0
,
3871 (s
->mem_index
>> 2) - 1);
3873 case 0x15: /* pextrw */
3874 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3875 xmm_regs
[reg
].XMM_W(val
& 7)));
3877 gen_op_mov_reg_T0(ot
, rm
);
3879 tcg_gen_qemu_st16(cpu_T
[0], cpu_A0
,
3880 (s
->mem_index
>> 2) - 1);
3883 if (ot
== OT_LONG
) { /* pextrd */
3884 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
3885 offsetof(CPUX86State
,
3886 xmm_regs
[reg
].XMM_L(val
& 3)));
3887 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3889 gen_op_mov_reg_v(ot
, rm
, cpu_T
[0]);
3891 tcg_gen_qemu_st32(cpu_T
[0], cpu_A0
,
3892 (s
->mem_index
>> 2) - 1);
3893 } else { /* pextrq */
3894 #ifdef TARGET_X86_64
3895 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
3896 offsetof(CPUX86State
,
3897 xmm_regs
[reg
].XMM_Q(val
& 1)));
3899 gen_op_mov_reg_v(ot
, rm
, cpu_tmp1_i64
);
3901 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
3902 (s
->mem_index
>> 2) - 1);
3908 case 0x17: /* extractps */
3909 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3910 xmm_regs
[reg
].XMM_L(val
& 3)));
3912 gen_op_mov_reg_T0(ot
, rm
);
3914 tcg_gen_qemu_st32(cpu_T
[0], cpu_A0
,
3915 (s
->mem_index
>> 2) - 1);
3917 case 0x20: /* pinsrb */
3919 gen_op_mov_TN_reg(OT_LONG
, 0, rm
);
3921 tcg_gen_qemu_ld8u(cpu_tmp0
, cpu_A0
,
3922 (s
->mem_index
>> 2) - 1);
3923 tcg_gen_st8_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
,
3924 xmm_regs
[reg
].XMM_B(val
& 15)));
3926 case 0x21: /* insertps */
3928 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
3929 offsetof(CPUX86State
,xmm_regs
[rm
]
3930 .XMM_L((val
>> 6) & 3)));
3932 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
3933 (s
->mem_index
>> 2) - 1);
3934 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
3936 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
3937 offsetof(CPUX86State
,xmm_regs
[reg
]
3938 .XMM_L((val
>> 4) & 3)));
3940 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3941 cpu_env
, offsetof(CPUX86State
,
3942 xmm_regs
[reg
].XMM_L(0)));
3944 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3945 cpu_env
, offsetof(CPUX86State
,
3946 xmm_regs
[reg
].XMM_L(1)));
3948 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3949 cpu_env
, offsetof(CPUX86State
,
3950 xmm_regs
[reg
].XMM_L(2)));
3952 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3953 cpu_env
, offsetof(CPUX86State
,
3954 xmm_regs
[reg
].XMM_L(3)));
3957 if (ot
== OT_LONG
) { /* pinsrd */
3959 gen_op_mov_v_reg(ot
, cpu_tmp0
, rm
);
3961 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
3962 (s
->mem_index
>> 2) - 1);
3963 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
3964 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
3965 offsetof(CPUX86State
,
3966 xmm_regs
[reg
].XMM_L(val
& 3)));
3967 } else { /* pinsrq */
3968 #ifdef TARGET_X86_64
3970 gen_op_mov_v_reg(ot
, cpu_tmp1_i64
, rm
);
3972 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
3973 (s
->mem_index
>> 2) - 1);
3974 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
3975 offsetof(CPUX86State
,
3976 xmm_regs
[reg
].XMM_Q(val
& 1)));
3987 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3989 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
3991 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3992 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3993 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3996 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
3998 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4000 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4001 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4002 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
4005 val
= cpu_ldub_code(env
, s
->pc
++);
4007 if ((b
& 0xfc) == 0x60) { /* pcmpXstrX */
4008 set_cc_op(s
, CC_OP_EFLAGS
);
4011 /* The helper must use entire 64-bit gp registers */
4015 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4016 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4017 sse_fn_eppi(cpu_env
, cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4023 /* generic MMX or SSE operation */
4025 case 0x70: /* pshufx insn */
4026 case 0xc6: /* pshufx insn */
4027 case 0xc2: /* compare insns */
4034 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4036 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4037 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4038 if (b1
>= 2 && ((b
>= 0x50 && b
<= 0x5f && b
!= 0x5b) ||
4040 /* specific case for SSE single instructions */
4043 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
4044 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
4047 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_t0
.XMM_D(0)));
4050 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
4053 rm
= (modrm
& 7) | REX_B(s
);
4054 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
4057 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4059 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4060 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4061 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
4064 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4068 case 0x0f: /* 3DNow! data insns */
4069 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
))
4071 val
= cpu_ldub_code(env
, s
->pc
++);
4072 sse_fn_epp
= sse_op_table5
[val
];
4076 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4077 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4078 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4080 case 0x70: /* pshufx insn */
4081 case 0xc6: /* pshufx insn */
4082 val
= cpu_ldub_code(env
, s
->pc
++);
4083 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4084 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4085 /* XXX: introduce a new table? */
4086 sse_fn_ppi
= (SSEFunc_0_ppi
)sse_fn_epp
;
4087 sse_fn_ppi(cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4091 val
= cpu_ldub_code(env
, s
->pc
++);
4094 sse_fn_epp
= sse_op_table4
[val
][b1
];
4096 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4097 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4098 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4101 /* maskmov : we must prepare A0 */
4104 #ifdef TARGET_X86_64
4105 if (s
->aflag
== 2) {
4106 gen_op_movq_A0_reg(R_EDI
);
4110 gen_op_movl_A0_reg(R_EDI
);
4112 gen_op_andl_A0_ffff();
4114 gen_add_A0_ds_seg(s
);
4116 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4117 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4118 /* XXX: introduce a new table? */
4119 sse_fn_eppt
= (SSEFunc_0_eppt
)sse_fn_epp
;
4120 sse_fn_eppt(cpu_env
, cpu_ptr0
, cpu_ptr1
, cpu_A0
);
4123 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4124 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4125 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4128 if (b
== 0x2e || b
== 0x2f) {
4129 set_cc_op(s
, CC_OP_EFLAGS
);
4134 /* convert one instruction. s->is_jmp is set if the translation must
4135 be stopped. Return the next pc value */
4136 static target_ulong
disas_insn(CPUX86State
*env
, DisasContext
*s
,
4137 target_ulong pc_start
)
4139 int b
, prefixes
, aflag
, dflag
;
4141 int modrm
, reg
, rm
, mod
, reg_addr
, op
, opreg
, offset_addr
, val
;
4142 target_ulong next_eip
, tval
;
4145 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4146 tcg_gen_debug_insn_start(pc_start
);
4155 #ifdef TARGET_X86_64
4160 s
->rip_offset
= 0; /* for relative ip address */
4162 b
= cpu_ldub_code(env
, s
->pc
);
4164 /* check prefixes */
4165 #ifdef TARGET_X86_64
4169 prefixes
|= PREFIX_REPZ
;
4172 prefixes
|= PREFIX_REPNZ
;
4175 prefixes
|= PREFIX_LOCK
;
4196 prefixes
|= PREFIX_DATA
;
4199 prefixes
|= PREFIX_ADR
;
4203 rex_w
= (b
>> 3) & 1;
4204 rex_r
= (b
& 0x4) << 1;
4205 s
->rex_x
= (b
& 0x2) << 2;
4206 REX_B(s
) = (b
& 0x1) << 3;
4207 x86_64_hregs
= 1; /* select uniform byte register addressing */
4211 /* 0x66 is ignored if rex.w is set */
4214 if (prefixes
& PREFIX_DATA
)
4217 if (!(prefixes
& PREFIX_ADR
))
4224 prefixes
|= PREFIX_REPZ
;
4227 prefixes
|= PREFIX_REPNZ
;
4230 prefixes
|= PREFIX_LOCK
;
4251 prefixes
|= PREFIX_DATA
;
4254 prefixes
|= PREFIX_ADR
;
4257 if (prefixes
& PREFIX_DATA
)
4259 if (prefixes
& PREFIX_ADR
)
4263 s
->prefix
= prefixes
;
4267 /* lock generation */
4268 if (prefixes
& PREFIX_LOCK
)
4271 /* now check op code */
4275 /**************************/
4276 /* extended op code */
4277 b
= cpu_ldub_code(env
, s
->pc
++) | 0x100;
4280 /**************************/
4298 ot
= dflag
+ OT_WORD
;
4301 case 0: /* OP Ev, Gv */
4302 modrm
= cpu_ldub_code(env
, s
->pc
++);
4303 reg
= ((modrm
>> 3) & 7) | rex_r
;
4304 mod
= (modrm
>> 6) & 3;
4305 rm
= (modrm
& 7) | REX_B(s
);
4307 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4309 } else if (op
== OP_XORL
&& rm
== reg
) {
4311 /* xor reg, reg optimisation */
4313 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4314 gen_op_mov_reg_T0(ot
, reg
);
4315 gen_op_update1_cc();
4320 gen_op_mov_TN_reg(ot
, 1, reg
);
4321 gen_op(s
, op
, ot
, opreg
);
4323 case 1: /* OP Gv, Ev */
4324 modrm
= cpu_ldub_code(env
, s
->pc
++);
4325 mod
= (modrm
>> 6) & 3;
4326 reg
= ((modrm
>> 3) & 7) | rex_r
;
4327 rm
= (modrm
& 7) | REX_B(s
);
4329 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4330 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4331 } else if (op
== OP_XORL
&& rm
== reg
) {
4334 gen_op_mov_TN_reg(ot
, 1, rm
);
4336 gen_op(s
, op
, ot
, reg
);
4338 case 2: /* OP A, Iv */
4339 val
= insn_get(env
, s
, ot
);
4340 gen_op_movl_T1_im(val
);
4341 gen_op(s
, op
, ot
, OR_EAX
);
4350 case 0x80: /* GRP1 */
4359 ot
= dflag
+ OT_WORD
;
4361 modrm
= cpu_ldub_code(env
, s
->pc
++);
4362 mod
= (modrm
>> 6) & 3;
4363 rm
= (modrm
& 7) | REX_B(s
);
4364 op
= (modrm
>> 3) & 7;
4370 s
->rip_offset
= insn_const_size(ot
);
4371 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4382 val
= insn_get(env
, s
, ot
);
4385 val
= (int8_t)insn_get(env
, s
, OT_BYTE
);
4388 gen_op_movl_T1_im(val
);
4389 gen_op(s
, op
, ot
, opreg
);
4393 /**************************/
4394 /* inc, dec, and other misc arith */
4395 case 0x40 ... 0x47: /* inc Gv */
4396 ot
= dflag
? OT_LONG
: OT_WORD
;
4397 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), 1);
4399 case 0x48 ... 0x4f: /* dec Gv */
4400 ot
= dflag
? OT_LONG
: OT_WORD
;
4401 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), -1);
4403 case 0xf6: /* GRP3 */
4408 ot
= dflag
+ OT_WORD
;
4410 modrm
= cpu_ldub_code(env
, s
->pc
++);
4411 mod
= (modrm
>> 6) & 3;
4412 rm
= (modrm
& 7) | REX_B(s
);
4413 op
= (modrm
>> 3) & 7;
4416 s
->rip_offset
= insn_const_size(ot
);
4417 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4418 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
4420 gen_op_mov_TN_reg(ot
, 0, rm
);
4425 val
= insn_get(env
, s
, ot
);
4426 gen_op_movl_T1_im(val
);
4427 gen_op_testl_T0_T1_cc();
4428 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4431 tcg_gen_not_tl(cpu_T
[0], cpu_T
[0]);
4433 gen_op_st_T0_A0(ot
+ s
->mem_index
);
4435 gen_op_mov_reg_T0(ot
, rm
);
4439 tcg_gen_neg_tl(cpu_T
[0], cpu_T
[0]);
4441 gen_op_st_T0_A0(ot
+ s
->mem_index
);
4443 gen_op_mov_reg_T0(ot
, rm
);
4445 gen_op_update_neg_cc();
4446 set_cc_op(s
, CC_OP_SUBB
+ ot
);
4451 gen_op_mov_TN_reg(OT_BYTE
, 1, R_EAX
);
4452 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
4453 tcg_gen_ext8u_tl(cpu_T
[1], cpu_T
[1]);
4454 /* XXX: use 32 bit mul which could be faster */
4455 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4456 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4457 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4458 tcg_gen_andi_tl(cpu_cc_src
, cpu_T
[0], 0xff00);
4459 set_cc_op(s
, CC_OP_MULB
);
4462 gen_op_mov_TN_reg(OT_WORD
, 1, R_EAX
);
4463 tcg_gen_ext16u_tl(cpu_T
[0], cpu_T
[0]);
4464 tcg_gen_ext16u_tl(cpu_T
[1], cpu_T
[1]);
4465 /* XXX: use 32 bit mul which could be faster */
4466 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4467 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4468 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4469 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 16);
4470 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
4471 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4472 set_cc_op(s
, CC_OP_MULW
);
4476 #ifdef TARGET_X86_64
4477 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4478 tcg_gen_ext32u_tl(cpu_T
[0], cpu_T
[0]);
4479 tcg_gen_ext32u_tl(cpu_T
[1], cpu_T
[1]);
4480 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4481 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4482 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4483 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 32);
4484 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4485 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4489 t0
= tcg_temp_new_i64();
4490 t1
= tcg_temp_new_i64();
4491 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4492 tcg_gen_extu_i32_i64(t0
, cpu_T
[0]);
4493 tcg_gen_extu_i32_i64(t1
, cpu_T
[1]);
4494 tcg_gen_mul_i64(t0
, t0
, t1
);
4495 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4496 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4497 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4498 tcg_gen_shri_i64(t0
, t0
, 32);
4499 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4500 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4501 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4504 set_cc_op(s
, CC_OP_MULL
);
4506 #ifdef TARGET_X86_64
4508 gen_helper_mulq_EAX_T0(cpu_env
, cpu_T
[0]);
4509 set_cc_op(s
, CC_OP_MULQ
);
4517 gen_op_mov_TN_reg(OT_BYTE
, 1, R_EAX
);
4518 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
4519 tcg_gen_ext8s_tl(cpu_T
[1], cpu_T
[1]);
4520 /* XXX: use 32 bit mul which could be faster */
4521 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4522 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4523 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4524 tcg_gen_ext8s_tl(cpu_tmp0
, cpu_T
[0]);
4525 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4526 set_cc_op(s
, CC_OP_MULB
);
4529 gen_op_mov_TN_reg(OT_WORD
, 1, R_EAX
);
4530 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4531 tcg_gen_ext16s_tl(cpu_T
[1], cpu_T
[1]);
4532 /* XXX: use 32 bit mul which could be faster */
4533 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4534 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4535 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4536 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T
[0]);
4537 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4538 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 16);
4539 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
4540 set_cc_op(s
, CC_OP_MULW
);
4544 #ifdef TARGET_X86_64
4545 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4546 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4547 tcg_gen_ext32s_tl(cpu_T
[1], cpu_T
[1]);
4548 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4549 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4550 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4551 tcg_gen_ext32s_tl(cpu_tmp0
, cpu_T
[0]);
4552 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4553 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 32);
4554 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4558 t0
= tcg_temp_new_i64();
4559 t1
= tcg_temp_new_i64();
4560 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4561 tcg_gen_ext_i32_i64(t0
, cpu_T
[0]);
4562 tcg_gen_ext_i32_i64(t1
, cpu_T
[1]);
4563 tcg_gen_mul_i64(t0
, t0
, t1
);
4564 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4565 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4566 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4567 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[0], 31);
4568 tcg_gen_shri_i64(t0
, t0
, 32);
4569 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4570 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4571 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4574 set_cc_op(s
, CC_OP_MULL
);
4576 #ifdef TARGET_X86_64
4578 gen_helper_imulq_EAX_T0(cpu_env
, cpu_T
[0]);
4579 set_cc_op(s
, CC_OP_MULQ
);
4587 gen_jmp_im(pc_start
- s
->cs_base
);
4588 gen_helper_divb_AL(cpu_env
, cpu_T
[0]);
4591 gen_jmp_im(pc_start
- s
->cs_base
);
4592 gen_helper_divw_AX(cpu_env
, cpu_T
[0]);
4596 gen_jmp_im(pc_start
- s
->cs_base
);
4597 gen_helper_divl_EAX(cpu_env
, cpu_T
[0]);
4599 #ifdef TARGET_X86_64
4601 gen_jmp_im(pc_start
- s
->cs_base
);
4602 gen_helper_divq_EAX(cpu_env
, cpu_T
[0]);
4610 gen_jmp_im(pc_start
- s
->cs_base
);
4611 gen_helper_idivb_AL(cpu_env
, cpu_T
[0]);
4614 gen_jmp_im(pc_start
- s
->cs_base
);
4615 gen_helper_idivw_AX(cpu_env
, cpu_T
[0]);
4619 gen_jmp_im(pc_start
- s
->cs_base
);
4620 gen_helper_idivl_EAX(cpu_env
, cpu_T
[0]);
4622 #ifdef TARGET_X86_64
4624 gen_jmp_im(pc_start
- s
->cs_base
);
4625 gen_helper_idivq_EAX(cpu_env
, cpu_T
[0]);
4635 case 0xfe: /* GRP4 */
4636 case 0xff: /* GRP5 */
4640 ot
= dflag
+ OT_WORD
;
4642 modrm
= cpu_ldub_code(env
, s
->pc
++);
4643 mod
= (modrm
>> 6) & 3;
4644 rm
= (modrm
& 7) | REX_B(s
);
4645 op
= (modrm
>> 3) & 7;
4646 if (op
>= 2 && b
== 0xfe) {
4650 if (op
== 2 || op
== 4) {
4651 /* operand size for jumps is 64 bit */
4653 } else if (op
== 3 || op
== 5) {
4654 ot
= dflag
? OT_LONG
+ (rex_w
== 1) : OT_WORD
;
4655 } else if (op
== 6) {
4656 /* default push size is 64 bit */
4657 ot
= dflag
? OT_QUAD
: OT_WORD
;
4661 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4662 if (op
>= 2 && op
!= 3 && op
!= 5)
4663 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
4665 gen_op_mov_TN_reg(ot
, 0, rm
);
4669 case 0: /* inc Ev */
4674 gen_inc(s
, ot
, opreg
, 1);
4676 case 1: /* dec Ev */
4681 gen_inc(s
, ot
, opreg
, -1);
4683 case 2: /* call Ev */
4684 /* XXX: optimize if memory (no 'and' is necessary) */
4686 gen_op_andl_T0_ffff();
4687 next_eip
= s
->pc
- s
->cs_base
;
4688 gen_movtl_T1_im(next_eip
);
4693 case 3: /* lcall Ev */
4694 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4695 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
4696 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
4698 if (s
->pe
&& !s
->vm86
) {
4699 gen_update_cc_op(s
);
4700 gen_jmp_im(pc_start
- s
->cs_base
);
4701 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4702 gen_helper_lcall_protected(cpu_env
, cpu_tmp2_i32
, cpu_T
[1],
4703 tcg_const_i32(dflag
),
4704 tcg_const_i32(s
->pc
- pc_start
));
4706 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4707 gen_helper_lcall_real(cpu_env
, cpu_tmp2_i32
, cpu_T
[1],
4708 tcg_const_i32(dflag
),
4709 tcg_const_i32(s
->pc
- s
->cs_base
));
4713 case 4: /* jmp Ev */
4715 gen_op_andl_T0_ffff();
4719 case 5: /* ljmp Ev */
4720 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4721 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
4722 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
4724 if (s
->pe
&& !s
->vm86
) {
4725 gen_update_cc_op(s
);
4726 gen_jmp_im(pc_start
- s
->cs_base
);
4727 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4728 gen_helper_ljmp_protected(cpu_env
, cpu_tmp2_i32
, cpu_T
[1],
4729 tcg_const_i32(s
->pc
- pc_start
));
4731 gen_op_movl_seg_T0_vm(R_CS
);
4732 gen_op_movl_T0_T1();
4737 case 6: /* push Ev */
4745 case 0x84: /* test Ev, Gv */
4750 ot
= dflag
+ OT_WORD
;
4752 modrm
= cpu_ldub_code(env
, s
->pc
++);
4753 reg
= ((modrm
>> 3) & 7) | rex_r
;
4755 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4756 gen_op_mov_TN_reg(ot
, 1, reg
);
4757 gen_op_testl_T0_T1_cc();
4758 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4761 case 0xa8: /* test eAX, Iv */
4766 ot
= dflag
+ OT_WORD
;
4767 val
= insn_get(env
, s
, ot
);
4769 gen_op_mov_TN_reg(ot
, 0, OR_EAX
);
4770 gen_op_movl_T1_im(val
);
4771 gen_op_testl_T0_T1_cc();
4772 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4775 case 0x98: /* CWDE/CBW */
4776 #ifdef TARGET_X86_64
4778 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
4779 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4780 gen_op_mov_reg_T0(OT_QUAD
, R_EAX
);
4784 gen_op_mov_TN_reg(OT_WORD
, 0, R_EAX
);
4785 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4786 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4788 gen_op_mov_TN_reg(OT_BYTE
, 0, R_EAX
);
4789 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
4790 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4793 case 0x99: /* CDQ/CWD */
4794 #ifdef TARGET_X86_64
4796 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EAX
);
4797 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 63);
4798 gen_op_mov_reg_T0(OT_QUAD
, R_EDX
);
4802 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
4803 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4804 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 31);
4805 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4807 gen_op_mov_TN_reg(OT_WORD
, 0, R_EAX
);
4808 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4809 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 15);
4810 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
4813 case 0x1af: /* imul Gv, Ev */
4814 case 0x69: /* imul Gv, Ev, I */
4816 ot
= dflag
+ OT_WORD
;
4817 modrm
= cpu_ldub_code(env
, s
->pc
++);
4818 reg
= ((modrm
>> 3) & 7) | rex_r
;
4820 s
->rip_offset
= insn_const_size(ot
);
4823 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4825 val
= insn_get(env
, s
, ot
);
4826 gen_op_movl_T1_im(val
);
4827 } else if (b
== 0x6b) {
4828 val
= (int8_t)insn_get(env
, s
, OT_BYTE
);
4829 gen_op_movl_T1_im(val
);
4831 gen_op_mov_TN_reg(ot
, 1, reg
);
4834 #ifdef TARGET_X86_64
4835 if (ot
== OT_QUAD
) {
4836 gen_helper_imulq_T0_T1(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
4839 if (ot
== OT_LONG
) {
4840 #ifdef TARGET_X86_64
4841 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4842 tcg_gen_ext32s_tl(cpu_T
[1], cpu_T
[1]);
4843 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4844 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4845 tcg_gen_ext32s_tl(cpu_tmp0
, cpu_T
[0]);
4846 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4850 t0
= tcg_temp_new_i64();
4851 t1
= tcg_temp_new_i64();
4852 tcg_gen_ext_i32_i64(t0
, cpu_T
[0]);
4853 tcg_gen_ext_i32_i64(t1
, cpu_T
[1]);
4854 tcg_gen_mul_i64(t0
, t0
, t1
);
4855 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4856 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4857 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[0], 31);
4858 tcg_gen_shri_i64(t0
, t0
, 32);
4859 tcg_gen_trunc_i64_i32(cpu_T
[1], t0
);
4860 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[1], cpu_tmp0
);
4864 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4865 tcg_gen_ext16s_tl(cpu_T
[1], cpu_T
[1]);
4866 /* XXX: use 32 bit mul which could be faster */
4867 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4868 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4869 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T
[0]);
4870 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4872 gen_op_mov_reg_T0(ot
, reg
);
4873 set_cc_op(s
, CC_OP_MULB
+ ot
);
4876 case 0x1c1: /* xadd Ev, Gv */
4880 ot
= dflag
+ OT_WORD
;
4881 modrm
= cpu_ldub_code(env
, s
->pc
++);
4882 reg
= ((modrm
>> 3) & 7) | rex_r
;
4883 mod
= (modrm
>> 6) & 3;
4885 rm
= (modrm
& 7) | REX_B(s
);
4886 gen_op_mov_TN_reg(ot
, 0, reg
);
4887 gen_op_mov_TN_reg(ot
, 1, rm
);
4888 gen_op_addl_T0_T1();
4889 gen_op_mov_reg_T1(ot
, reg
);
4890 gen_op_mov_reg_T0(ot
, rm
);
4892 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4893 gen_op_mov_TN_reg(ot
, 0, reg
);
4894 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4895 gen_op_addl_T0_T1();
4896 gen_op_st_T0_A0(ot
+ s
->mem_index
);
4897 gen_op_mov_reg_T1(ot
, reg
);
4899 gen_op_update2_cc();
4900 set_cc_op(s
, CC_OP_ADDB
+ ot
);
4903 case 0x1b1: /* cmpxchg Ev, Gv */
4906 TCGv t0
, t1
, t2
, a0
;
4911 ot
= dflag
+ OT_WORD
;
4912 modrm
= cpu_ldub_code(env
, s
->pc
++);
4913 reg
= ((modrm
>> 3) & 7) | rex_r
;
4914 mod
= (modrm
>> 6) & 3;
4915 t0
= tcg_temp_local_new();
4916 t1
= tcg_temp_local_new();
4917 t2
= tcg_temp_local_new();
4918 a0
= tcg_temp_local_new();
4919 gen_op_mov_v_reg(ot
, t1
, reg
);
4921 rm
= (modrm
& 7) | REX_B(s
);
4922 gen_op_mov_v_reg(ot
, t0
, rm
);
4924 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4925 tcg_gen_mov_tl(a0
, cpu_A0
);
4926 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
4927 rm
= 0; /* avoid warning */
4929 label1
= gen_new_label();
4930 tcg_gen_sub_tl(t2
, cpu_regs
[R_EAX
], t0
);
4932 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, label1
);
4933 label2
= gen_new_label();
4935 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
4937 gen_set_label(label1
);
4938 gen_op_mov_reg_v(ot
, rm
, t1
);
4940 /* perform no-op store cycle like physical cpu; must be
4941 before changing accumulator to ensure idempotency if
4942 the store faults and the instruction is restarted */
4943 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
4944 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
4946 gen_set_label(label1
);
4947 gen_op_st_v(ot
+ s
->mem_index
, t1
, a0
);
4949 gen_set_label(label2
);
4950 tcg_gen_mov_tl(cpu_cc_src
, t0
);
4951 tcg_gen_mov_tl(cpu_cc_dst
, t2
);
4952 set_cc_op(s
, CC_OP_SUBB
+ ot
);
4959 case 0x1c7: /* cmpxchg8b */
4960 modrm
= cpu_ldub_code(env
, s
->pc
++);
4961 mod
= (modrm
>> 6) & 3;
4962 if ((mod
== 3) || ((modrm
& 0x38) != 0x8))
4964 #ifdef TARGET_X86_64
4966 if (!(s
->cpuid_ext_features
& CPUID_EXT_CX16
))
4968 gen_jmp_im(pc_start
- s
->cs_base
);
4969 gen_update_cc_op(s
);
4970 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4971 gen_helper_cmpxchg16b(cpu_env
, cpu_A0
);
4975 if (!(s
->cpuid_features
& CPUID_CX8
))
4977 gen_jmp_im(pc_start
- s
->cs_base
);
4978 gen_update_cc_op(s
);
4979 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4980 gen_helper_cmpxchg8b(cpu_env
, cpu_A0
);
4982 set_cc_op(s
, CC_OP_EFLAGS
);
4985 /**************************/
4987 case 0x50 ... 0x57: /* push */
4988 gen_op_mov_TN_reg(OT_LONG
, 0, (b
& 7) | REX_B(s
));
4991 case 0x58 ... 0x5f: /* pop */
4993 ot
= dflag
? OT_QUAD
: OT_WORD
;
4995 ot
= dflag
+ OT_WORD
;
4998 /* NOTE: order is important for pop %sp */
5000 gen_op_mov_reg_T0(ot
, (b
& 7) | REX_B(s
));
5002 case 0x60: /* pusha */
5007 case 0x61: /* popa */
5012 case 0x68: /* push Iv */
5015 ot
= dflag
? OT_QUAD
: OT_WORD
;
5017 ot
= dflag
+ OT_WORD
;
5020 val
= insn_get(env
, s
, ot
);
5022 val
= (int8_t)insn_get(env
, s
, OT_BYTE
);
5023 gen_op_movl_T0_im(val
);
5026 case 0x8f: /* pop Ev */
5028 ot
= dflag
? OT_QUAD
: OT_WORD
;
5030 ot
= dflag
+ OT_WORD
;
5032 modrm
= cpu_ldub_code(env
, s
->pc
++);
5033 mod
= (modrm
>> 6) & 3;
5036 /* NOTE: order is important for pop %sp */
5038 rm
= (modrm
& 7) | REX_B(s
);
5039 gen_op_mov_reg_T0(ot
, rm
);
5041 /* NOTE: order is important too for MMU exceptions */
5042 s
->popl_esp_hack
= 1 << ot
;
5043 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5044 s
->popl_esp_hack
= 0;
5048 case 0xc8: /* enter */
5051 val
= cpu_lduw_code(env
, s
->pc
);
5053 level
= cpu_ldub_code(env
, s
->pc
++);
5054 gen_enter(s
, val
, level
);
5057 case 0xc9: /* leave */
5058 /* XXX: exception not precise (ESP is updated before potential exception) */
5060 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EBP
);
5061 gen_op_mov_reg_T0(OT_QUAD
, R_ESP
);
5062 } else if (s
->ss32
) {
5063 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
5064 gen_op_mov_reg_T0(OT_LONG
, R_ESP
);
5066 gen_op_mov_TN_reg(OT_WORD
, 0, R_EBP
);
5067 gen_op_mov_reg_T0(OT_WORD
, R_ESP
);
5071 ot
= dflag
? OT_QUAD
: OT_WORD
;
5073 ot
= dflag
+ OT_WORD
;
5075 gen_op_mov_reg_T0(ot
, R_EBP
);
5078 case 0x06: /* push es */
5079 case 0x0e: /* push cs */
5080 case 0x16: /* push ss */
5081 case 0x1e: /* push ds */
5084 gen_op_movl_T0_seg(b
>> 3);
5087 case 0x1a0: /* push fs */
5088 case 0x1a8: /* push gs */
5089 gen_op_movl_T0_seg((b
>> 3) & 7);
5092 case 0x07: /* pop es */
5093 case 0x17: /* pop ss */
5094 case 0x1f: /* pop ds */
5099 gen_movl_seg_T0(s
, reg
, pc_start
- s
->cs_base
);
5102 /* if reg == SS, inhibit interrupts/trace. */
5103 /* If several instructions disable interrupts, only the
5105 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
5106 gen_helper_set_inhibit_irq(cpu_env
);
5110 gen_jmp_im(s
->pc
- s
->cs_base
);
5114 case 0x1a1: /* pop fs */
5115 case 0x1a9: /* pop gs */
5117 gen_movl_seg_T0(s
, (b
>> 3) & 7, pc_start
- s
->cs_base
);
5120 gen_jmp_im(s
->pc
- s
->cs_base
);
5125 /**************************/
5128 case 0x89: /* mov Gv, Ev */
5132 ot
= dflag
+ OT_WORD
;
5133 modrm
= cpu_ldub_code(env
, s
->pc
++);
5134 reg
= ((modrm
>> 3) & 7) | rex_r
;
5136 /* generate a generic store */
5137 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
5140 case 0xc7: /* mov Ev, Iv */
5144 ot
= dflag
+ OT_WORD
;
5145 modrm
= cpu_ldub_code(env
, s
->pc
++);
5146 mod
= (modrm
>> 6) & 3;
5148 s
->rip_offset
= insn_const_size(ot
);
5149 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5151 val
= insn_get(env
, s
, ot
);
5152 gen_op_movl_T0_im(val
);
5154 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5156 gen_op_mov_reg_T0(ot
, (modrm
& 7) | REX_B(s
));
5159 case 0x8b: /* mov Ev, Gv */
5163 ot
= OT_WORD
+ dflag
;
5164 modrm
= cpu_ldub_code(env
, s
->pc
++);
5165 reg
= ((modrm
>> 3) & 7) | rex_r
;
5167 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5168 gen_op_mov_reg_T0(ot
, reg
);
5170 case 0x8e: /* mov seg, Gv */
5171 modrm
= cpu_ldub_code(env
, s
->pc
++);
5172 reg
= (modrm
>> 3) & 7;
5173 if (reg
>= 6 || reg
== R_CS
)
5175 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
5176 gen_movl_seg_T0(s
, reg
, pc_start
- s
->cs_base
);
5178 /* if reg == SS, inhibit interrupts/trace */
5179 /* If several instructions disable interrupts, only the
5181 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
5182 gen_helper_set_inhibit_irq(cpu_env
);
5186 gen_jmp_im(s
->pc
- s
->cs_base
);
5190 case 0x8c: /* mov Gv, seg */
5191 modrm
= cpu_ldub_code(env
, s
->pc
++);
5192 reg
= (modrm
>> 3) & 7;
5193 mod
= (modrm
>> 6) & 3;
5196 gen_op_movl_T0_seg(reg
);
5198 ot
= OT_WORD
+ dflag
;
5201 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5204 case 0x1b6: /* movzbS Gv, Eb */
5205 case 0x1b7: /* movzwS Gv, Eb */
5206 case 0x1be: /* movsbS Gv, Eb */
5207 case 0x1bf: /* movswS Gv, Eb */
5210 /* d_ot is the size of destination */
5211 d_ot
= dflag
+ OT_WORD
;
5212 /* ot is the size of source */
5213 ot
= (b
& 1) + OT_BYTE
;
5214 modrm
= cpu_ldub_code(env
, s
->pc
++);
5215 reg
= ((modrm
>> 3) & 7) | rex_r
;
5216 mod
= (modrm
>> 6) & 3;
5217 rm
= (modrm
& 7) | REX_B(s
);
5220 gen_op_mov_TN_reg(ot
, 0, rm
);
5221 switch(ot
| (b
& 8)) {
5223 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
5226 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
5229 tcg_gen_ext16u_tl(cpu_T
[0], cpu_T
[0]);
5233 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5236 gen_op_mov_reg_T0(d_ot
, reg
);
5238 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5240 gen_op_lds_T0_A0(ot
+ s
->mem_index
);
5242 gen_op_ldu_T0_A0(ot
+ s
->mem_index
);
5244 gen_op_mov_reg_T0(d_ot
, reg
);
5249 case 0x8d: /* lea */
5250 ot
= dflag
+ OT_WORD
;
5251 modrm
= cpu_ldub_code(env
, s
->pc
++);
5252 mod
= (modrm
>> 6) & 3;
5255 reg
= ((modrm
>> 3) & 7) | rex_r
;
5256 /* we must ensure that no segment is added */
5260 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5262 gen_op_mov_reg_A0(ot
- OT_WORD
, reg
);
5265 case 0xa0: /* mov EAX, Ov */
5267 case 0xa2: /* mov Ov, EAX */
5270 target_ulong offset_addr
;
5275 ot
= dflag
+ OT_WORD
;
5276 #ifdef TARGET_X86_64
5277 if (s
->aflag
== 2) {
5278 offset_addr
= cpu_ldq_code(env
, s
->pc
);
5280 gen_op_movq_A0_im(offset_addr
);
5285 offset_addr
= insn_get(env
, s
, OT_LONG
);
5287 offset_addr
= insn_get(env
, s
, OT_WORD
);
5289 gen_op_movl_A0_im(offset_addr
);
5291 gen_add_A0_ds_seg(s
);
5293 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
5294 gen_op_mov_reg_T0(ot
, R_EAX
);
5296 gen_op_mov_TN_reg(ot
, 0, R_EAX
);
5297 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5301 case 0xd7: /* xlat */
5302 #ifdef TARGET_X86_64
5303 if (s
->aflag
== 2) {
5304 gen_op_movq_A0_reg(R_EBX
);
5305 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EAX
);
5306 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xff);
5307 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T
[0]);
5311 gen_op_movl_A0_reg(R_EBX
);
5312 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
5313 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xff);
5314 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T
[0]);
5316 gen_op_andl_A0_ffff();
5318 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
5320 gen_add_A0_ds_seg(s
);
5321 gen_op_ldu_T0_A0(OT_BYTE
+ s
->mem_index
);
5322 gen_op_mov_reg_T0(OT_BYTE
, R_EAX
);
5324 case 0xb0 ... 0xb7: /* mov R, Ib */
5325 val
= insn_get(env
, s
, OT_BYTE
);
5326 gen_op_movl_T0_im(val
);
5327 gen_op_mov_reg_T0(OT_BYTE
, (b
& 7) | REX_B(s
));
5329 case 0xb8 ... 0xbf: /* mov R, Iv */
5330 #ifdef TARGET_X86_64
5334 tmp
= cpu_ldq_code(env
, s
->pc
);
5336 reg
= (b
& 7) | REX_B(s
);
5337 gen_movtl_T0_im(tmp
);
5338 gen_op_mov_reg_T0(OT_QUAD
, reg
);
5342 ot
= dflag
? OT_LONG
: OT_WORD
;
5343 val
= insn_get(env
, s
, ot
);
5344 reg
= (b
& 7) | REX_B(s
);
5345 gen_op_movl_T0_im(val
);
5346 gen_op_mov_reg_T0(ot
, reg
);
5350 case 0x91 ... 0x97: /* xchg R, EAX */
5352 ot
= dflag
+ OT_WORD
;
5353 reg
= (b
& 7) | REX_B(s
);
5357 case 0x87: /* xchg Ev, Gv */
5361 ot
= dflag
+ OT_WORD
;
5362 modrm
= cpu_ldub_code(env
, s
->pc
++);
5363 reg
= ((modrm
>> 3) & 7) | rex_r
;
5364 mod
= (modrm
>> 6) & 3;
5366 rm
= (modrm
& 7) | REX_B(s
);
5368 gen_op_mov_TN_reg(ot
, 0, reg
);
5369 gen_op_mov_TN_reg(ot
, 1, rm
);
5370 gen_op_mov_reg_T0(ot
, rm
);
5371 gen_op_mov_reg_T1(ot
, reg
);
5373 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5374 gen_op_mov_TN_reg(ot
, 0, reg
);
5375 /* for xchg, lock is implicit */
5376 if (!(prefixes
& PREFIX_LOCK
))
5378 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5379 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5380 if (!(prefixes
& PREFIX_LOCK
))
5381 gen_helper_unlock();
5382 gen_op_mov_reg_T1(ot
, reg
);
5385 case 0xc4: /* les Gv */
5390 case 0xc5: /* lds Gv */
5395 case 0x1b2: /* lss Gv */
5398 case 0x1b4: /* lfs Gv */
5401 case 0x1b5: /* lgs Gv */
5404 ot
= dflag
? OT_LONG
: OT_WORD
;
5405 modrm
= cpu_ldub_code(env
, s
->pc
++);
5406 reg
= ((modrm
>> 3) & 7) | rex_r
;
5407 mod
= (modrm
>> 6) & 3;
5410 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5411 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5412 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
5413 /* load the segment first to handle exceptions properly */
5414 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
5415 gen_movl_seg_T0(s
, op
, pc_start
- s
->cs_base
);
5416 /* then put the data */
5417 gen_op_mov_reg_T1(ot
, reg
);
5419 gen_jmp_im(s
->pc
- s
->cs_base
);
5424 /************************/
5435 ot
= dflag
+ OT_WORD
;
5437 modrm
= cpu_ldub_code(env
, s
->pc
++);
5438 mod
= (modrm
>> 6) & 3;
5439 op
= (modrm
>> 3) & 7;
5445 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5448 opreg
= (modrm
& 7) | REX_B(s
);
5453 gen_shift(s
, op
, ot
, opreg
, OR_ECX
);
5456 shift
= cpu_ldub_code(env
, s
->pc
++);
5458 gen_shifti(s
, op
, ot
, opreg
, shift
);
5473 case 0x1a4: /* shld imm */
5477 case 0x1a5: /* shld cl */
5481 case 0x1ac: /* shrd imm */
5485 case 0x1ad: /* shrd cl */
5489 ot
= dflag
+ OT_WORD
;
5490 modrm
= cpu_ldub_code(env
, s
->pc
++);
5491 mod
= (modrm
>> 6) & 3;
5492 rm
= (modrm
& 7) | REX_B(s
);
5493 reg
= ((modrm
>> 3) & 7) | rex_r
;
5495 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5500 gen_op_mov_TN_reg(ot
, 1, reg
);
5503 val
= cpu_ldub_code(env
, s
->pc
++);
5504 tcg_gen_movi_tl(cpu_T3
, val
);
5506 tcg_gen_mov_tl(cpu_T3
, cpu_regs
[R_ECX
]);
5508 gen_shiftd_rm_T1_T3(s
, ot
, opreg
, op
);
5511 /************************/
5514 if (s
->flags
& (HF_EM_MASK
| HF_TS_MASK
)) {
5515 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5516 /* XXX: what to do if illegal op ? */
5517 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
5520 modrm
= cpu_ldub_code(env
, s
->pc
++);
5521 mod
= (modrm
>> 6) & 3;
5523 op
= ((b
& 7) << 3) | ((modrm
>> 3) & 7);
5526 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5528 case 0x00 ... 0x07: /* fxxxs */
5529 case 0x10 ... 0x17: /* fixxxl */
5530 case 0x20 ... 0x27: /* fxxxl */
5531 case 0x30 ... 0x37: /* fixxx */
5538 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5539 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5540 gen_helper_flds_FT0(cpu_env
, cpu_tmp2_i32
);
5543 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5544 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5545 gen_helper_fildl_FT0(cpu_env
, cpu_tmp2_i32
);
5548 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
5549 (s
->mem_index
>> 2) - 1);
5550 gen_helper_fldl_FT0(cpu_env
, cpu_tmp1_i64
);
5554 gen_op_lds_T0_A0(OT_WORD
+ s
->mem_index
);
5555 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5556 gen_helper_fildl_FT0(cpu_env
, cpu_tmp2_i32
);
5560 gen_helper_fp_arith_ST0_FT0(op1
);
5562 /* fcomp needs pop */
5563 gen_helper_fpop(cpu_env
);
5567 case 0x08: /* flds */
5568 case 0x0a: /* fsts */
5569 case 0x0b: /* fstps */
5570 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5571 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5572 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5577 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5578 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5579 gen_helper_flds_ST0(cpu_env
, cpu_tmp2_i32
);
5582 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5583 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5584 gen_helper_fildl_ST0(cpu_env
, cpu_tmp2_i32
);
5587 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
5588 (s
->mem_index
>> 2) - 1);
5589 gen_helper_fldl_ST0(cpu_env
, cpu_tmp1_i64
);
5593 gen_op_lds_T0_A0(OT_WORD
+ s
->mem_index
);
5594 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5595 gen_helper_fildl_ST0(cpu_env
, cpu_tmp2_i32
);
5600 /* XXX: the corresponding CPUID bit must be tested ! */
5603 gen_helper_fisttl_ST0(cpu_tmp2_i32
, cpu_env
);
5604 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5605 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
5608 gen_helper_fisttll_ST0(cpu_tmp1_i64
, cpu_env
);
5609 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
5610 (s
->mem_index
>> 2) - 1);
5614 gen_helper_fistt_ST0(cpu_tmp2_i32
, cpu_env
);
5615 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5616 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5619 gen_helper_fpop(cpu_env
);
5624 gen_helper_fsts_ST0(cpu_tmp2_i32
, cpu_env
);
5625 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5626 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
5629 gen_helper_fistl_ST0(cpu_tmp2_i32
, cpu_env
);
5630 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5631 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
5634 gen_helper_fstl_ST0(cpu_tmp1_i64
, cpu_env
);
5635 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
5636 (s
->mem_index
>> 2) - 1);
5640 gen_helper_fist_ST0(cpu_tmp2_i32
, cpu_env
);
5641 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5642 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5646 gen_helper_fpop(cpu_env
);
5650 case 0x0c: /* fldenv mem */
5651 gen_update_cc_op(s
);
5652 gen_jmp_im(pc_start
- s
->cs_base
);
5653 gen_helper_fldenv(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
5655 case 0x0d: /* fldcw mem */
5656 gen_op_ld_T0_A0(OT_WORD
+ s
->mem_index
);
5657 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5658 gen_helper_fldcw(cpu_env
, cpu_tmp2_i32
);
5660 case 0x0e: /* fnstenv mem */
5661 gen_update_cc_op(s
);
5662 gen_jmp_im(pc_start
- s
->cs_base
);
5663 gen_helper_fstenv(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
5665 case 0x0f: /* fnstcw mem */
5666 gen_helper_fnstcw(cpu_tmp2_i32
, cpu_env
);
5667 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5668 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5670 case 0x1d: /* fldt mem */
5671 gen_update_cc_op(s
);
5672 gen_jmp_im(pc_start
- s
->cs_base
);
5673 gen_helper_fldt_ST0(cpu_env
, cpu_A0
);
5675 case 0x1f: /* fstpt mem */
5676 gen_update_cc_op(s
);
5677 gen_jmp_im(pc_start
- s
->cs_base
);
5678 gen_helper_fstt_ST0(cpu_env
, cpu_A0
);
5679 gen_helper_fpop(cpu_env
);
5681 case 0x2c: /* frstor mem */
5682 gen_update_cc_op(s
);
5683 gen_jmp_im(pc_start
- s
->cs_base
);
5684 gen_helper_frstor(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
5686 case 0x2e: /* fnsave mem */
5687 gen_update_cc_op(s
);
5688 gen_jmp_im(pc_start
- s
->cs_base
);
5689 gen_helper_fsave(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
5691 case 0x2f: /* fnstsw mem */
5692 gen_helper_fnstsw(cpu_tmp2_i32
, cpu_env
);
5693 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5694 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5696 case 0x3c: /* fbld */
5697 gen_update_cc_op(s
);
5698 gen_jmp_im(pc_start
- s
->cs_base
);
5699 gen_helper_fbld_ST0(cpu_env
, cpu_A0
);
5701 case 0x3e: /* fbstp */
5702 gen_update_cc_op(s
);
5703 gen_jmp_im(pc_start
- s
->cs_base
);
5704 gen_helper_fbst_ST0(cpu_env
, cpu_A0
);
5705 gen_helper_fpop(cpu_env
);
5707 case 0x3d: /* fildll */
5708 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
5709 (s
->mem_index
>> 2) - 1);
5710 gen_helper_fildll_ST0(cpu_env
, cpu_tmp1_i64
);
5712 case 0x3f: /* fistpll */
5713 gen_helper_fistll_ST0(cpu_tmp1_i64
, cpu_env
);
5714 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
5715 (s
->mem_index
>> 2) - 1);
5716 gen_helper_fpop(cpu_env
);
5722 /* register float ops */
5726 case 0x08: /* fld sti */
5727 gen_helper_fpush(cpu_env
);
5728 gen_helper_fmov_ST0_STN(cpu_env
,
5729 tcg_const_i32((opreg
+ 1) & 7));
5731 case 0x09: /* fxchg sti */
5732 case 0x29: /* fxchg4 sti, undocumented op */
5733 case 0x39: /* fxchg7 sti, undocumented op */
5734 gen_helper_fxchg_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
5736 case 0x0a: /* grp d9/2 */
5739 /* check exceptions (FreeBSD FPU probe) */
5740 gen_update_cc_op(s
);
5741 gen_jmp_im(pc_start
- s
->cs_base
);
5742 gen_helper_fwait(cpu_env
);
5748 case 0x0c: /* grp d9/4 */
5751 gen_helper_fchs_ST0(cpu_env
);
5754 gen_helper_fabs_ST0(cpu_env
);
5757 gen_helper_fldz_FT0(cpu_env
);
5758 gen_helper_fcom_ST0_FT0(cpu_env
);
5761 gen_helper_fxam_ST0(cpu_env
);
5767 case 0x0d: /* grp d9/5 */
5771 gen_helper_fpush(cpu_env
);
5772 gen_helper_fld1_ST0(cpu_env
);
5775 gen_helper_fpush(cpu_env
);
5776 gen_helper_fldl2t_ST0(cpu_env
);
5779 gen_helper_fpush(cpu_env
);
5780 gen_helper_fldl2e_ST0(cpu_env
);
5783 gen_helper_fpush(cpu_env
);
5784 gen_helper_fldpi_ST0(cpu_env
);
5787 gen_helper_fpush(cpu_env
);
5788 gen_helper_fldlg2_ST0(cpu_env
);
5791 gen_helper_fpush(cpu_env
);
5792 gen_helper_fldln2_ST0(cpu_env
);
5795 gen_helper_fpush(cpu_env
);
5796 gen_helper_fldz_ST0(cpu_env
);
5803 case 0x0e: /* grp d9/6 */
5806 gen_helper_f2xm1(cpu_env
);
5809 gen_helper_fyl2x(cpu_env
);
5812 gen_helper_fptan(cpu_env
);
5814 case 3: /* fpatan */
5815 gen_helper_fpatan(cpu_env
);
5817 case 4: /* fxtract */
5818 gen_helper_fxtract(cpu_env
);
5820 case 5: /* fprem1 */
5821 gen_helper_fprem1(cpu_env
);
5823 case 6: /* fdecstp */
5824 gen_helper_fdecstp(cpu_env
);
5827 case 7: /* fincstp */
5828 gen_helper_fincstp(cpu_env
);
5832 case 0x0f: /* grp d9/7 */
5835 gen_helper_fprem(cpu_env
);
5837 case 1: /* fyl2xp1 */
5838 gen_helper_fyl2xp1(cpu_env
);
5841 gen_helper_fsqrt(cpu_env
);
5843 case 3: /* fsincos */
5844 gen_helper_fsincos(cpu_env
);
5846 case 5: /* fscale */
5847 gen_helper_fscale(cpu_env
);
5849 case 4: /* frndint */
5850 gen_helper_frndint(cpu_env
);
5853 gen_helper_fsin(cpu_env
);
5857 gen_helper_fcos(cpu_env
);
5861 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
5862 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
5863 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
5869 gen_helper_fp_arith_STN_ST0(op1
, opreg
);
5871 gen_helper_fpop(cpu_env
);
5873 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5874 gen_helper_fp_arith_ST0_FT0(op1
);
5878 case 0x02: /* fcom */
5879 case 0x22: /* fcom2, undocumented op */
5880 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5881 gen_helper_fcom_ST0_FT0(cpu_env
);
5883 case 0x03: /* fcomp */
5884 case 0x23: /* fcomp3, undocumented op */
5885 case 0x32: /* fcomp5, undocumented op */
5886 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5887 gen_helper_fcom_ST0_FT0(cpu_env
);
5888 gen_helper_fpop(cpu_env
);
5890 case 0x15: /* da/5 */
5892 case 1: /* fucompp */
5893 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
5894 gen_helper_fucom_ST0_FT0(cpu_env
);
5895 gen_helper_fpop(cpu_env
);
5896 gen_helper_fpop(cpu_env
);
5904 case 0: /* feni (287 only, just do nop here) */
5906 case 1: /* fdisi (287 only, just do nop here) */
5909 gen_helper_fclex(cpu_env
);
5911 case 3: /* fninit */
5912 gen_helper_fninit(cpu_env
);
5914 case 4: /* fsetpm (287 only, just do nop here) */
5920 case 0x1d: /* fucomi */
5921 gen_update_cc_op(s
);
5922 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5923 gen_helper_fucomi_ST0_FT0(cpu_env
);
5924 set_cc_op(s
, CC_OP_EFLAGS
);
5926 case 0x1e: /* fcomi */
5927 gen_update_cc_op(s
);
5928 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5929 gen_helper_fcomi_ST0_FT0(cpu_env
);
5930 set_cc_op(s
, CC_OP_EFLAGS
);
5932 case 0x28: /* ffree sti */
5933 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
5935 case 0x2a: /* fst sti */
5936 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
5938 case 0x2b: /* fstp sti */
5939 case 0x0b: /* fstp1 sti, undocumented op */
5940 case 0x3a: /* fstp8 sti, undocumented op */
5941 case 0x3b: /* fstp9 sti, undocumented op */
5942 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
5943 gen_helper_fpop(cpu_env
);
5945 case 0x2c: /* fucom st(i) */
5946 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5947 gen_helper_fucom_ST0_FT0(cpu_env
);
5949 case 0x2d: /* fucomp st(i) */
5950 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5951 gen_helper_fucom_ST0_FT0(cpu_env
);
5952 gen_helper_fpop(cpu_env
);
5954 case 0x33: /* de/3 */
5956 case 1: /* fcompp */
5957 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
5958 gen_helper_fcom_ST0_FT0(cpu_env
);
5959 gen_helper_fpop(cpu_env
);
5960 gen_helper_fpop(cpu_env
);
5966 case 0x38: /* ffreep sti, undocumented op */
5967 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
5968 gen_helper_fpop(cpu_env
);
5970 case 0x3c: /* df/4 */
5973 gen_helper_fnstsw(cpu_tmp2_i32
, cpu_env
);
5974 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5975 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
5981 case 0x3d: /* fucomip */
5982 gen_update_cc_op(s
);
5983 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5984 gen_helper_fucomi_ST0_FT0(cpu_env
);
5985 gen_helper_fpop(cpu_env
);
5986 set_cc_op(s
, CC_OP_EFLAGS
);
5988 case 0x3e: /* fcomip */
5989 gen_update_cc_op(s
);
5990 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5991 gen_helper_fcomi_ST0_FT0(cpu_env
);
5992 gen_helper_fpop(cpu_env
);
5993 set_cc_op(s
, CC_OP_EFLAGS
);
5995 case 0x10 ... 0x13: /* fcmovxx */
5999 static const uint8_t fcmov_cc
[8] = {
6005 op1
= fcmov_cc
[op
& 3] | (((op
>> 3) & 1) ^ 1);
6006 l1
= gen_new_label();
6007 gen_jcc1(s
, op1
, l1
);
6008 gen_helper_fmov_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
6017 /************************/
6020 case 0xa4: /* movsS */
6025 ot
= dflag
+ OT_WORD
;
6027 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6028 gen_repz_movs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6034 case 0xaa: /* stosS */
6039 ot
= dflag
+ OT_WORD
;
6041 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6042 gen_repz_stos(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6047 case 0xac: /* lodsS */
6052 ot
= dflag
+ OT_WORD
;
6053 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6054 gen_repz_lods(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6059 case 0xae: /* scasS */
6064 ot
= dflag
+ OT_WORD
;
6065 if (prefixes
& PREFIX_REPNZ
) {
6066 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6067 } else if (prefixes
& PREFIX_REPZ
) {
6068 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6074 case 0xa6: /* cmpsS */
6079 ot
= dflag
+ OT_WORD
;
6080 if (prefixes
& PREFIX_REPNZ
) {
6081 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6082 } else if (prefixes
& PREFIX_REPZ
) {
6083 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6088 case 0x6c: /* insS */
6093 ot
= dflag
? OT_LONG
: OT_WORD
;
6094 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6095 gen_op_andl_T0_ffff();
6096 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6097 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
) | 4);
6098 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6099 gen_repz_ins(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6103 gen_jmp(s
, s
->pc
- s
->cs_base
);
6107 case 0x6e: /* outsS */
6112 ot
= dflag
? OT_LONG
: OT_WORD
;
6113 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6114 gen_op_andl_T0_ffff();
6115 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6116 svm_is_rep(prefixes
) | 4);
6117 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6118 gen_repz_outs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6122 gen_jmp(s
, s
->pc
- s
->cs_base
);
6127 /************************/
6135 ot
= dflag
? OT_LONG
: OT_WORD
;
6136 val
= cpu_ldub_code(env
, s
->pc
++);
6137 gen_op_movl_T0_im(val
);
6138 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6139 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6142 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6143 gen_helper_in_func(ot
, cpu_T
[1], cpu_tmp2_i32
);
6144 gen_op_mov_reg_T1(ot
, R_EAX
);
6147 gen_jmp(s
, s
->pc
- s
->cs_base
);
6155 ot
= dflag
? OT_LONG
: OT_WORD
;
6156 val
= cpu_ldub_code(env
, s
->pc
++);
6157 gen_op_movl_T0_im(val
);
6158 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6159 svm_is_rep(prefixes
));
6160 gen_op_mov_TN_reg(ot
, 1, R_EAX
);
6164 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6165 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
6166 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6169 gen_jmp(s
, s
->pc
- s
->cs_base
);
6177 ot
= dflag
? OT_LONG
: OT_WORD
;
6178 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6179 gen_op_andl_T0_ffff();
6180 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6181 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6184 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6185 gen_helper_in_func(ot
, cpu_T
[1], cpu_tmp2_i32
);
6186 gen_op_mov_reg_T1(ot
, R_EAX
);
6189 gen_jmp(s
, s
->pc
- s
->cs_base
);
6197 ot
= dflag
? OT_LONG
: OT_WORD
;
6198 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6199 gen_op_andl_T0_ffff();
6200 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6201 svm_is_rep(prefixes
));
6202 gen_op_mov_TN_reg(ot
, 1, R_EAX
);
6206 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6207 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
6208 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6211 gen_jmp(s
, s
->pc
- s
->cs_base
);
6215 /************************/
6217 case 0xc2: /* ret im */
6218 val
= cpu_ldsw_code(env
, s
->pc
);
6221 if (CODE64(s
) && s
->dflag
)
6223 gen_stack_update(s
, val
+ (2 << s
->dflag
));
6225 gen_op_andl_T0_ffff();
6229 case 0xc3: /* ret */
6233 gen_op_andl_T0_ffff();
6237 case 0xca: /* lret im */
6238 val
= cpu_ldsw_code(env
, s
->pc
);
6241 if (s
->pe
&& !s
->vm86
) {
6242 gen_update_cc_op(s
);
6243 gen_jmp_im(pc_start
- s
->cs_base
);
6244 gen_helper_lret_protected(cpu_env
, tcg_const_i32(s
->dflag
),
6245 tcg_const_i32(val
));
6249 gen_op_ld_T0_A0(1 + s
->dflag
+ s
->mem_index
);
6251 gen_op_andl_T0_ffff();
6252 /* NOTE: keeping EIP updated is not a problem in case of
6256 gen_op_addl_A0_im(2 << s
->dflag
);
6257 gen_op_ld_T0_A0(1 + s
->dflag
+ s
->mem_index
);
6258 gen_op_movl_seg_T0_vm(R_CS
);
6259 /* add stack offset */
6260 gen_stack_update(s
, val
+ (4 << s
->dflag
));
6264 case 0xcb: /* lret */
6267 case 0xcf: /* iret */
6268 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IRET
);
6271 gen_helper_iret_real(cpu_env
, tcg_const_i32(s
->dflag
));
6272 set_cc_op(s
, CC_OP_EFLAGS
);
6273 } else if (s
->vm86
) {
6275 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6277 gen_helper_iret_real(cpu_env
, tcg_const_i32(s
->dflag
));
6278 set_cc_op(s
, CC_OP_EFLAGS
);
6281 gen_update_cc_op(s
);
6282 gen_jmp_im(pc_start
- s
->cs_base
);
6283 gen_helper_iret_protected(cpu_env
, tcg_const_i32(s
->dflag
),
6284 tcg_const_i32(s
->pc
- s
->cs_base
));
6285 set_cc_op(s
, CC_OP_EFLAGS
);
6289 case 0xe8: /* call im */
6292 tval
= (int32_t)insn_get(env
, s
, OT_LONG
);
6294 tval
= (int16_t)insn_get(env
, s
, OT_WORD
);
6295 next_eip
= s
->pc
- s
->cs_base
;
6301 gen_movtl_T0_im(next_eip
);
6306 case 0x9a: /* lcall im */
6308 unsigned int selector
, offset
;
6312 ot
= dflag
? OT_LONG
: OT_WORD
;
6313 offset
= insn_get(env
, s
, ot
);
6314 selector
= insn_get(env
, s
, OT_WORD
);
6316 gen_op_movl_T0_im(selector
);
6317 gen_op_movl_T1_imu(offset
);
6320 case 0xe9: /* jmp im */
6322 tval
= (int32_t)insn_get(env
, s
, OT_LONG
);
6324 tval
= (int16_t)insn_get(env
, s
, OT_WORD
);
6325 tval
+= s
->pc
- s
->cs_base
;
6332 case 0xea: /* ljmp im */
6334 unsigned int selector
, offset
;
6338 ot
= dflag
? OT_LONG
: OT_WORD
;
6339 offset
= insn_get(env
, s
, ot
);
6340 selector
= insn_get(env
, s
, OT_WORD
);
6342 gen_op_movl_T0_im(selector
);
6343 gen_op_movl_T1_imu(offset
);
6346 case 0xeb: /* jmp Jb */
6347 tval
= (int8_t)insn_get(env
, s
, OT_BYTE
);
6348 tval
+= s
->pc
- s
->cs_base
;
6353 case 0x70 ... 0x7f: /* jcc Jb */
6354 tval
= (int8_t)insn_get(env
, s
, OT_BYTE
);
6356 case 0x180 ... 0x18f: /* jcc Jv */
6358 tval
= (int32_t)insn_get(env
, s
, OT_LONG
);
6360 tval
= (int16_t)insn_get(env
, s
, OT_WORD
);
6363 next_eip
= s
->pc
- s
->cs_base
;
6367 gen_jcc(s
, b
, tval
, next_eip
);
6370 case 0x190 ... 0x19f: /* setcc Gv */
6371 modrm
= cpu_ldub_code(env
, s
->pc
++);
6373 gen_ldst_modrm(env
, s
, modrm
, OT_BYTE
, OR_TMP0
, 1);
6375 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6380 ot
= dflag
+ OT_WORD
;
6381 modrm
= cpu_ldub_code(env
, s
->pc
++);
6382 reg
= ((modrm
>> 3) & 7) | rex_r
;
6383 mod
= (modrm
>> 6) & 3;
6384 t0
= tcg_temp_local_new();
6386 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
6387 gen_op_ld_v(ot
+ s
->mem_index
, t0
, cpu_A0
);
6389 rm
= (modrm
& 7) | REX_B(s
);
6390 gen_op_mov_v_reg(ot
, t0
, rm
);
6392 #ifdef TARGET_X86_64
6393 if (ot
== OT_LONG
) {
6394 /* XXX: specific Intel behaviour ? */
6395 l1
= gen_new_label();
6396 gen_jcc1(s
, b
^ 1, l1
);
6397 tcg_gen_mov_tl(cpu_regs
[reg
], t0
);
6399 tcg_gen_ext32u_tl(cpu_regs
[reg
], cpu_regs
[reg
]);
6403 l1
= gen_new_label();
6404 gen_jcc1(s
, b
^ 1, l1
);
6405 gen_op_mov_reg_v(ot
, reg
, t0
);
6412 /************************/
6414 case 0x9c: /* pushf */
6415 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PUSHF
);
6416 if (s
->vm86
&& s
->iopl
!= 3) {
6417 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6419 gen_update_cc_op(s
);
6420 gen_helper_read_eflags(cpu_T
[0], cpu_env
);
6424 case 0x9d: /* popf */
6425 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_POPF
);
6426 if (s
->vm86
&& s
->iopl
!= 3) {
6427 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6432 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6433 tcg_const_i32((TF_MASK
| AC_MASK
|
6438 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6439 tcg_const_i32((TF_MASK
| AC_MASK
|
6441 IF_MASK
| IOPL_MASK
)
6445 if (s
->cpl
<= s
->iopl
) {
6447 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6448 tcg_const_i32((TF_MASK
|
6454 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6455 tcg_const_i32((TF_MASK
|
6464 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6465 tcg_const_i32((TF_MASK
| AC_MASK
|
6466 ID_MASK
| NT_MASK
)));
6468 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6469 tcg_const_i32((TF_MASK
| AC_MASK
|
6476 set_cc_op(s
, CC_OP_EFLAGS
);
6477 /* abort translation because TF/AC flag may change */
6478 gen_jmp_im(s
->pc
- s
->cs_base
);
6482 case 0x9e: /* sahf */
6483 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6485 gen_op_mov_TN_reg(OT_BYTE
, 0, R_AH
);
6486 gen_compute_eflags(s
, cpu_cc_src
);
6487 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, CC_O
);
6488 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
6489 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_T
[0]);
6491 case 0x9f: /* lahf */
6492 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6494 gen_compute_eflags(s
, cpu_T
[0]);
6495 /* Note: gen_compute_eflags() only gives the condition codes */
6496 tcg_gen_ori_tl(cpu_T
[0], cpu_T
[0], 0x02);
6497 gen_op_mov_reg_T0(OT_BYTE
, R_AH
);
6499 case 0xf5: /* cmc */
6500 gen_compute_eflags(s
, cpu_cc_src
);
6501 tcg_gen_xori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6503 case 0xf8: /* clc */
6504 gen_compute_eflags(s
, cpu_cc_src
);
6505 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_C
);
6507 case 0xf9: /* stc */
6508 gen_compute_eflags(s
, cpu_cc_src
);
6509 tcg_gen_ori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6511 case 0xfc: /* cld */
6512 tcg_gen_movi_i32(cpu_tmp2_i32
, 1);
6513 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6515 case 0xfd: /* std */
6516 tcg_gen_movi_i32(cpu_tmp2_i32
, -1);
6517 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6520 /************************/
6521 /* bit operations */
6522 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6523 ot
= dflag
+ OT_WORD
;
6524 modrm
= cpu_ldub_code(env
, s
->pc
++);
6525 op
= (modrm
>> 3) & 7;
6526 mod
= (modrm
>> 6) & 3;
6527 rm
= (modrm
& 7) | REX_B(s
);
6530 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
6531 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
6533 gen_op_mov_TN_reg(ot
, 0, rm
);
6536 val
= cpu_ldub_code(env
, s
->pc
++);
6537 gen_op_movl_T1_im(val
);
6542 case 0x1a3: /* bt Gv, Ev */
6545 case 0x1ab: /* bts */
6548 case 0x1b3: /* btr */
6551 case 0x1bb: /* btc */
6554 ot
= dflag
+ OT_WORD
;
6555 modrm
= cpu_ldub_code(env
, s
->pc
++);
6556 reg
= ((modrm
>> 3) & 7) | rex_r
;
6557 mod
= (modrm
>> 6) & 3;
6558 rm
= (modrm
& 7) | REX_B(s
);
6559 gen_op_mov_TN_reg(OT_LONG
, 1, reg
);
6561 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
6562 /* specific case: we need to add a displacement */
6563 gen_exts(ot
, cpu_T
[1]);
6564 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[1], 3 + ot
);
6565 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, ot
);
6566 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
6567 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
6569 gen_op_mov_TN_reg(ot
, 0, rm
);
6572 tcg_gen_andi_tl(cpu_T
[1], cpu_T
[1], (1 << (3 + ot
)) - 1);
6575 tcg_gen_shr_tl(cpu_cc_src
, cpu_T
[0], cpu_T
[1]);
6576 tcg_gen_movi_tl(cpu_cc_dst
, 0);
6579 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
6580 tcg_gen_movi_tl(cpu_tmp0
, 1);
6581 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
6582 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
6585 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
6586 tcg_gen_movi_tl(cpu_tmp0
, 1);
6587 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
6588 tcg_gen_not_tl(cpu_tmp0
, cpu_tmp0
);
6589 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
6593 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
6594 tcg_gen_movi_tl(cpu_tmp0
, 1);
6595 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
6596 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
6599 set_cc_op(s
, CC_OP_SARB
+ ot
);
6602 gen_op_st_T0_A0(ot
+ s
->mem_index
);
6604 gen_op_mov_reg_T0(ot
, rm
);
6605 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
6606 tcg_gen_movi_tl(cpu_cc_dst
, 0);
6609 case 0x1bc: /* bsf */
6610 case 0x1bd: /* bsr */
6615 ot
= dflag
+ OT_WORD
;
6616 modrm
= cpu_ldub_code(env
, s
->pc
++);
6617 reg
= ((modrm
>> 3) & 7) | rex_r
;
6618 gen_ldst_modrm(env
, s
,modrm
, ot
, OR_TMP0
, 0);
6619 gen_extu(ot
, cpu_T
[0]);
6620 t0
= tcg_temp_local_new();
6621 tcg_gen_mov_tl(t0
, cpu_T
[0]);
6622 if ((b
& 1) && (prefixes
& PREFIX_REPZ
) &&
6623 (s
->cpuid_ext3_features
& CPUID_EXT3_ABM
)) {
6625 case OT_WORD
: gen_helper_lzcnt(cpu_T
[0], t0
,
6626 tcg_const_i32(16)); break;
6627 case OT_LONG
: gen_helper_lzcnt(cpu_T
[0], t0
,
6628 tcg_const_i32(32)); break;
6629 case OT_QUAD
: gen_helper_lzcnt(cpu_T
[0], t0
,
6630 tcg_const_i32(64)); break;
6632 gen_op_mov_reg_T0(ot
, reg
);
6634 label1
= gen_new_label();
6635 tcg_gen_movi_tl(cpu_cc_dst
, 0);
6636 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, 0, label1
);
6638 gen_helper_bsr(cpu_T
[0], t0
);
6640 gen_helper_bsf(cpu_T
[0], t0
);
6642 gen_op_mov_reg_T0(ot
, reg
);
6643 tcg_gen_movi_tl(cpu_cc_dst
, 1);
6644 gen_set_label(label1
);
6645 tcg_gen_discard_tl(cpu_cc_src
);
6646 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
6651 /************************/
6653 case 0x27: /* daa */
6656 gen_update_cc_op(s
);
6657 gen_helper_daa(cpu_env
);
6658 set_cc_op(s
, CC_OP_EFLAGS
);
6660 case 0x2f: /* das */
6663 gen_update_cc_op(s
);
6664 gen_helper_das(cpu_env
);
6665 set_cc_op(s
, CC_OP_EFLAGS
);
6667 case 0x37: /* aaa */
6670 gen_update_cc_op(s
);
6671 gen_helper_aaa(cpu_env
);
6672 set_cc_op(s
, CC_OP_EFLAGS
);
6674 case 0x3f: /* aas */
6677 gen_update_cc_op(s
);
6678 gen_helper_aas(cpu_env
);
6679 set_cc_op(s
, CC_OP_EFLAGS
);
6681 case 0xd4: /* aam */
6684 val
= cpu_ldub_code(env
, s
->pc
++);
6686 gen_exception(s
, EXCP00_DIVZ
, pc_start
- s
->cs_base
);
6688 gen_helper_aam(cpu_env
, tcg_const_i32(val
));
6689 set_cc_op(s
, CC_OP_LOGICB
);
6692 case 0xd5: /* aad */
6695 val
= cpu_ldub_code(env
, s
->pc
++);
6696 gen_helper_aad(cpu_env
, tcg_const_i32(val
));
6697 set_cc_op(s
, CC_OP_LOGICB
);
6699 /************************/
6701 case 0x90: /* nop */
6702 /* XXX: correct lock test for all insn */
6703 if (prefixes
& PREFIX_LOCK
) {
6706 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6708 goto do_xchg_reg_eax
;
6710 if (prefixes
& PREFIX_REPZ
) {
6711 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PAUSE
);
6714 case 0x9b: /* fwait */
6715 if ((s
->flags
& (HF_MP_MASK
| HF_TS_MASK
)) ==
6716 (HF_MP_MASK
| HF_TS_MASK
)) {
6717 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
6719 gen_update_cc_op(s
);
6720 gen_jmp_im(pc_start
- s
->cs_base
);
6721 gen_helper_fwait(cpu_env
);
6724 case 0xcc: /* int3 */
6725 gen_interrupt(s
, EXCP03_INT3
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6727 case 0xcd: /* int N */
6728 val
= cpu_ldub_code(env
, s
->pc
++);
6729 if (s
->vm86
&& s
->iopl
!= 3) {
6730 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6732 gen_interrupt(s
, val
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6735 case 0xce: /* into */
6738 gen_update_cc_op(s
);
6739 gen_jmp_im(pc_start
- s
->cs_base
);
6740 gen_helper_into(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
6743 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6744 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_ICEBP
);
6746 gen_debug(s
, pc_start
- s
->cs_base
);
6750 qemu_set_log(CPU_LOG_INT
| CPU_LOG_TB_IN_ASM
);
6754 case 0xfa: /* cli */
6756 if (s
->cpl
<= s
->iopl
) {
6757 gen_helper_cli(cpu_env
);
6759 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6763 gen_helper_cli(cpu_env
);
6765 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6769 case 0xfb: /* sti */
6771 if (s
->cpl
<= s
->iopl
) {
6773 gen_helper_sti(cpu_env
);
6774 /* interruptions are enabled only the first insn after sti */
6775 /* If several instructions disable interrupts, only the
6777 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
6778 gen_helper_set_inhibit_irq(cpu_env
);
6779 /* give a chance to handle pending irqs */
6780 gen_jmp_im(s
->pc
- s
->cs_base
);
6783 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6789 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6793 case 0x62: /* bound */
6796 ot
= dflag
? OT_LONG
: OT_WORD
;
6797 modrm
= cpu_ldub_code(env
, s
->pc
++);
6798 reg
= (modrm
>> 3) & 7;
6799 mod
= (modrm
>> 6) & 3;
6802 gen_op_mov_TN_reg(ot
, 0, reg
);
6803 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
6804 gen_jmp_im(pc_start
- s
->cs_base
);
6805 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6806 if (ot
== OT_WORD
) {
6807 gen_helper_boundw(cpu_env
, cpu_A0
, cpu_tmp2_i32
);
6809 gen_helper_boundl(cpu_env
, cpu_A0
, cpu_tmp2_i32
);
6812 case 0x1c8 ... 0x1cf: /* bswap reg */
6813 reg
= (b
& 7) | REX_B(s
);
6814 #ifdef TARGET_X86_64
6816 gen_op_mov_TN_reg(OT_QUAD
, 0, reg
);
6817 tcg_gen_bswap64_i64(cpu_T
[0], cpu_T
[0]);
6818 gen_op_mov_reg_T0(OT_QUAD
, reg
);
6822 gen_op_mov_TN_reg(OT_LONG
, 0, reg
);
6823 tcg_gen_ext32u_tl(cpu_T
[0], cpu_T
[0]);
6824 tcg_gen_bswap32_tl(cpu_T
[0], cpu_T
[0]);
6825 gen_op_mov_reg_T0(OT_LONG
, reg
);
6828 case 0xd6: /* salc */
6831 gen_compute_eflags_c(s
, cpu_T
[0]);
6832 tcg_gen_neg_tl(cpu_T
[0], cpu_T
[0]);
6833 gen_op_mov_reg_T0(OT_BYTE
, R_EAX
);
6835 case 0xe0: /* loopnz */
6836 case 0xe1: /* loopz */
6837 case 0xe2: /* loop */
6838 case 0xe3: /* jecxz */
6842 tval
= (int8_t)insn_get(env
, s
, OT_BYTE
);
6843 next_eip
= s
->pc
- s
->cs_base
;
6848 l1
= gen_new_label();
6849 l2
= gen_new_label();
6850 l3
= gen_new_label();
6853 case 0: /* loopnz */
6855 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
6856 gen_op_jz_ecx(s
->aflag
, l3
);
6857 gen_jcc1(s
, (JCC_Z
<< 1) | (b
^ 1), l1
);
6860 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
6861 gen_op_jnz_ecx(s
->aflag
, l1
);
6865 gen_op_jz_ecx(s
->aflag
, l1
);
6870 gen_jmp_im(next_eip
);
6879 case 0x130: /* wrmsr */
6880 case 0x132: /* rdmsr */
6882 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6884 gen_update_cc_op(s
);
6885 gen_jmp_im(pc_start
- s
->cs_base
);
6887 gen_helper_rdmsr(cpu_env
);
6889 gen_helper_wrmsr(cpu_env
);
6893 case 0x131: /* rdtsc */
6894 gen_update_cc_op(s
);
6895 gen_jmp_im(pc_start
- s
->cs_base
);
6898 gen_helper_rdtsc(cpu_env
);
6901 gen_jmp(s
, s
->pc
- s
->cs_base
);
6904 case 0x133: /* rdpmc */
6905 gen_update_cc_op(s
);
6906 gen_jmp_im(pc_start
- s
->cs_base
);
6907 gen_helper_rdpmc(cpu_env
);
6909 case 0x134: /* sysenter */
6910 /* For Intel SYSENTER is valid on 64-bit */
6911 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
6914 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6916 gen_update_cc_op(s
);
6917 gen_jmp_im(pc_start
- s
->cs_base
);
6918 gen_helper_sysenter(cpu_env
);
6922 case 0x135: /* sysexit */
6923 /* For Intel SYSEXIT is valid on 64-bit */
6924 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
6927 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6929 gen_update_cc_op(s
);
6930 gen_jmp_im(pc_start
- s
->cs_base
);
6931 gen_helper_sysexit(cpu_env
, tcg_const_i32(dflag
));
6935 #ifdef TARGET_X86_64
6936 case 0x105: /* syscall */
6937 /* XXX: is it usable in real mode ? */
6938 gen_update_cc_op(s
);
6939 gen_jmp_im(pc_start
- s
->cs_base
);
6940 gen_helper_syscall(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
6943 case 0x107: /* sysret */
6945 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6947 gen_update_cc_op(s
);
6948 gen_jmp_im(pc_start
- s
->cs_base
);
6949 gen_helper_sysret(cpu_env
, tcg_const_i32(s
->dflag
));
6950 /* condition codes are modified only in long mode */
6952 set_cc_op(s
, CC_OP_EFLAGS
);
6958 case 0x1a2: /* cpuid */
6959 gen_update_cc_op(s
);
6960 gen_jmp_im(pc_start
- s
->cs_base
);
6961 gen_helper_cpuid(cpu_env
);
6963 case 0xf4: /* hlt */
6965 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6967 gen_update_cc_op(s
);
6968 gen_jmp_im(pc_start
- s
->cs_base
);
6969 gen_helper_hlt(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
6970 s
->is_jmp
= DISAS_TB_JUMP
;
6974 modrm
= cpu_ldub_code(env
, s
->pc
++);
6975 mod
= (modrm
>> 6) & 3;
6976 op
= (modrm
>> 3) & 7;
6979 if (!s
->pe
|| s
->vm86
)
6981 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_READ
);
6982 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,ldt
.selector
));
6986 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
6989 if (!s
->pe
|| s
->vm86
)
6992 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6994 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_WRITE
);
6995 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
6996 gen_jmp_im(pc_start
- s
->cs_base
);
6997 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6998 gen_helper_lldt(cpu_env
, cpu_tmp2_i32
);
7002 if (!s
->pe
|| s
->vm86
)
7004 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_READ
);
7005 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,tr
.selector
));
7009 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7012 if (!s
->pe
|| s
->vm86
)
7015 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7017 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_WRITE
);
7018 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7019 gen_jmp_im(pc_start
- s
->cs_base
);
7020 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7021 gen_helper_ltr(cpu_env
, cpu_tmp2_i32
);
7026 if (!s
->pe
|| s
->vm86
)
7028 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7029 gen_update_cc_op(s
);
7031 gen_helper_verr(cpu_env
, cpu_T
[0]);
7033 gen_helper_verw(cpu_env
, cpu_T
[0]);
7035 set_cc_op(s
, CC_OP_EFLAGS
);
7042 modrm
= cpu_ldub_code(env
, s
->pc
++);
7043 mod
= (modrm
>> 6) & 3;
7044 op
= (modrm
>> 3) & 7;
7050 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_GDTR_READ
);
7051 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7052 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
7053 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
7054 gen_add_A0_im(s
, 2);
7055 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, gdt
.base
));
7057 gen_op_andl_T0_im(0xffffff);
7058 gen_op_st_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7063 case 0: /* monitor */
7064 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) ||
7067 gen_update_cc_op(s
);
7068 gen_jmp_im(pc_start
- s
->cs_base
);
7069 #ifdef TARGET_X86_64
7070 if (s
->aflag
== 2) {
7071 gen_op_movq_A0_reg(R_EAX
);
7075 gen_op_movl_A0_reg(R_EAX
);
7077 gen_op_andl_A0_ffff();
7079 gen_add_A0_ds_seg(s
);
7080 gen_helper_monitor(cpu_env
, cpu_A0
);
7083 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) ||
7086 gen_update_cc_op(s
);
7087 gen_jmp_im(pc_start
- s
->cs_base
);
7088 gen_helper_mwait(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7092 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
) ||
7096 gen_helper_clac(cpu_env
);
7097 gen_jmp_im(s
->pc
- s
->cs_base
);
7101 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
) ||
7105 gen_helper_stac(cpu_env
);
7106 gen_jmp_im(s
->pc
- s
->cs_base
);
7113 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IDTR_READ
);
7114 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7115 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, idt
.limit
));
7116 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
7117 gen_add_A0_im(s
, 2);
7118 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, idt
.base
));
7120 gen_op_andl_T0_im(0xffffff);
7121 gen_op_st_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7127 gen_update_cc_op(s
);
7128 gen_jmp_im(pc_start
- s
->cs_base
);
7131 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7134 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7137 gen_helper_vmrun(cpu_env
, tcg_const_i32(s
->aflag
),
7138 tcg_const_i32(s
->pc
- pc_start
));
7140 s
->is_jmp
= DISAS_TB_JUMP
;
7143 case 1: /* VMMCALL */
7144 if (!(s
->flags
& HF_SVME_MASK
))
7146 gen_helper_vmmcall(cpu_env
);
7148 case 2: /* VMLOAD */
7149 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7152 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7155 gen_helper_vmload(cpu_env
, tcg_const_i32(s
->aflag
));
7158 case 3: /* VMSAVE */
7159 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7162 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7165 gen_helper_vmsave(cpu_env
, tcg_const_i32(s
->aflag
));
7169 if ((!(s
->flags
& HF_SVME_MASK
) &&
7170 !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
)) ||
7174 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7177 gen_helper_stgi(cpu_env
);
7181 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7184 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7187 gen_helper_clgi(cpu_env
);
7190 case 6: /* SKINIT */
7191 if ((!(s
->flags
& HF_SVME_MASK
) &&
7192 !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
)) ||
7195 gen_helper_skinit(cpu_env
);
7197 case 7: /* INVLPGA */
7198 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7201 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7204 gen_helper_invlpga(cpu_env
, tcg_const_i32(s
->aflag
));
7210 } else if (s
->cpl
!= 0) {
7211 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7213 gen_svm_check_intercept(s
, pc_start
,
7214 op
==2 ? SVM_EXIT_GDTR_WRITE
: SVM_EXIT_IDTR_WRITE
);
7215 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7216 gen_op_ld_T1_A0(OT_WORD
+ s
->mem_index
);
7217 gen_add_A0_im(s
, 2);
7218 gen_op_ld_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7220 gen_op_andl_T0_im(0xffffff);
7222 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,gdt
.base
));
7223 tcg_gen_st32_tl(cpu_T
[1], cpu_env
, offsetof(CPUX86State
,gdt
.limit
));
7225 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,idt
.base
));
7226 tcg_gen_st32_tl(cpu_T
[1], cpu_env
, offsetof(CPUX86State
,idt
.limit
));
7231 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_CR0
);
7232 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7233 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,cr
[0]) + 4);
7235 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,cr
[0]));
7237 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 1);
7241 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7243 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7244 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7245 gen_helper_lmsw(cpu_env
, cpu_T
[0]);
7246 gen_jmp_im(s
->pc
- s
->cs_base
);
7251 if (mod
!= 3) { /* invlpg */
7253 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7255 gen_update_cc_op(s
);
7256 gen_jmp_im(pc_start
- s
->cs_base
);
7257 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7258 gen_helper_invlpg(cpu_env
, cpu_A0
);
7259 gen_jmp_im(s
->pc
- s
->cs_base
);
7264 case 0: /* swapgs */
7265 #ifdef TARGET_X86_64
7268 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7270 tcg_gen_ld_tl(cpu_T
[0], cpu_env
,
7271 offsetof(CPUX86State
,segs
[R_GS
].base
));
7272 tcg_gen_ld_tl(cpu_T
[1], cpu_env
,
7273 offsetof(CPUX86State
,kernelgsbase
));
7274 tcg_gen_st_tl(cpu_T
[1], cpu_env
,
7275 offsetof(CPUX86State
,segs
[R_GS
].base
));
7276 tcg_gen_st_tl(cpu_T
[0], cpu_env
,
7277 offsetof(CPUX86State
,kernelgsbase
));
7285 case 1: /* rdtscp */
7286 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_RDTSCP
))
7288 gen_update_cc_op(s
);
7289 gen_jmp_im(pc_start
- s
->cs_base
);
7292 gen_helper_rdtscp(cpu_env
);
7295 gen_jmp(s
, s
->pc
- s
->cs_base
);
7307 case 0x108: /* invd */
7308 case 0x109: /* wbinvd */
7310 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7312 gen_svm_check_intercept(s
, pc_start
, (b
& 2) ? SVM_EXIT_INVD
: SVM_EXIT_WBINVD
);
7316 case 0x63: /* arpl or movslS (x86_64) */
7317 #ifdef TARGET_X86_64
7320 /* d_ot is the size of destination */
7321 d_ot
= dflag
+ OT_WORD
;
7323 modrm
= cpu_ldub_code(env
, s
->pc
++);
7324 reg
= ((modrm
>> 3) & 7) | rex_r
;
7325 mod
= (modrm
>> 6) & 3;
7326 rm
= (modrm
& 7) | REX_B(s
);
7329 gen_op_mov_TN_reg(OT_LONG
, 0, rm
);
7331 if (d_ot
== OT_QUAD
)
7332 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
7333 gen_op_mov_reg_T0(d_ot
, reg
);
7335 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7336 if (d_ot
== OT_QUAD
) {
7337 gen_op_lds_T0_A0(OT_LONG
+ s
->mem_index
);
7339 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
7341 gen_op_mov_reg_T0(d_ot
, reg
);
7347 TCGv t0
, t1
, t2
, a0
;
7349 if (!s
->pe
|| s
->vm86
)
7351 t0
= tcg_temp_local_new();
7352 t1
= tcg_temp_local_new();
7353 t2
= tcg_temp_local_new();
7355 modrm
= cpu_ldub_code(env
, s
->pc
++);
7356 reg
= (modrm
>> 3) & 7;
7357 mod
= (modrm
>> 6) & 3;
7360 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7361 gen_op_ld_v(ot
+ s
->mem_index
, t0
, cpu_A0
);
7362 a0
= tcg_temp_local_new();
7363 tcg_gen_mov_tl(a0
, cpu_A0
);
7365 gen_op_mov_v_reg(ot
, t0
, rm
);
7368 gen_op_mov_v_reg(ot
, t1
, reg
);
7369 tcg_gen_andi_tl(cpu_tmp0
, t0
, 3);
7370 tcg_gen_andi_tl(t1
, t1
, 3);
7371 tcg_gen_movi_tl(t2
, 0);
7372 label1
= gen_new_label();
7373 tcg_gen_brcond_tl(TCG_COND_GE
, cpu_tmp0
, t1
, label1
);
7374 tcg_gen_andi_tl(t0
, t0
, ~3);
7375 tcg_gen_or_tl(t0
, t0
, t1
);
7376 tcg_gen_movi_tl(t2
, CC_Z
);
7377 gen_set_label(label1
);
7379 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
7382 gen_op_mov_reg_v(ot
, rm
, t0
);
7384 gen_compute_eflags(s
, cpu_cc_src
);
7385 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_Z
);
7386 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t2
);
7392 case 0x102: /* lar */
7393 case 0x103: /* lsl */
7397 if (!s
->pe
|| s
->vm86
)
7399 ot
= dflag
? OT_LONG
: OT_WORD
;
7400 modrm
= cpu_ldub_code(env
, s
->pc
++);
7401 reg
= ((modrm
>> 3) & 7) | rex_r
;
7402 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7403 t0
= tcg_temp_local_new();
7404 gen_update_cc_op(s
);
7406 gen_helper_lar(t0
, cpu_env
, cpu_T
[0]);
7408 gen_helper_lsl(t0
, cpu_env
, cpu_T
[0]);
7410 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_src
, CC_Z
);
7411 label1
= gen_new_label();
7412 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
7413 gen_op_mov_reg_v(ot
, reg
, t0
);
7414 gen_set_label(label1
);
7415 set_cc_op(s
, CC_OP_EFLAGS
);
7420 modrm
= cpu_ldub_code(env
, s
->pc
++);
7421 mod
= (modrm
>> 6) & 3;
7422 op
= (modrm
>> 3) & 7;
7424 case 0: /* prefetchnta */
7425 case 1: /* prefetchnt0 */
7426 case 2: /* prefetchnt0 */
7427 case 3: /* prefetchnt0 */
7430 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7431 /* nothing more to do */
7433 default: /* nop (multi byte) */
7434 gen_nop_modrm(env
, s
, modrm
);
7438 case 0x119 ... 0x11f: /* nop (multi byte) */
7439 modrm
= cpu_ldub_code(env
, s
->pc
++);
7440 gen_nop_modrm(env
, s
, modrm
);
7442 case 0x120: /* mov reg, crN */
7443 case 0x122: /* mov crN, reg */
7445 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7447 modrm
= cpu_ldub_code(env
, s
->pc
++);
7448 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7449 * AMD documentation (24594.pdf) and testing of
7450 * intel 386 and 486 processors all show that the mod bits
7451 * are assumed to be 1's, regardless of actual values.
7453 rm
= (modrm
& 7) | REX_B(s
);
7454 reg
= ((modrm
>> 3) & 7) | rex_r
;
7459 if ((prefixes
& PREFIX_LOCK
) && (reg
== 0) &&
7460 (s
->cpuid_ext3_features
& CPUID_EXT3_CR8LEG
)) {
7469 gen_update_cc_op(s
);
7470 gen_jmp_im(pc_start
- s
->cs_base
);
7472 gen_op_mov_TN_reg(ot
, 0, rm
);
7473 gen_helper_write_crN(cpu_env
, tcg_const_i32(reg
),
7475 gen_jmp_im(s
->pc
- s
->cs_base
);
7478 gen_helper_read_crN(cpu_T
[0], cpu_env
, tcg_const_i32(reg
));
7479 gen_op_mov_reg_T0(ot
, rm
);
7487 case 0x121: /* mov reg, drN */
7488 case 0x123: /* mov drN, reg */
7490 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7492 modrm
= cpu_ldub_code(env
, s
->pc
++);
7493 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7494 * AMD documentation (24594.pdf) and testing of
7495 * intel 386 and 486 processors all show that the mod bits
7496 * are assumed to be 1's, regardless of actual values.
7498 rm
= (modrm
& 7) | REX_B(s
);
7499 reg
= ((modrm
>> 3) & 7) | rex_r
;
7504 /* XXX: do it dynamically with CR4.DE bit */
7505 if (reg
== 4 || reg
== 5 || reg
>= 8)
7508 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_DR0
+ reg
);
7509 gen_op_mov_TN_reg(ot
, 0, rm
);
7510 gen_helper_movl_drN_T0(cpu_env
, tcg_const_i32(reg
), cpu_T
[0]);
7511 gen_jmp_im(s
->pc
- s
->cs_base
);
7514 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_DR0
+ reg
);
7515 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,dr
[reg
]));
7516 gen_op_mov_reg_T0(ot
, rm
);
7520 case 0x106: /* clts */
7522 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7524 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7525 gen_helper_clts(cpu_env
);
7526 /* abort block because static cpu state changed */
7527 gen_jmp_im(s
->pc
- s
->cs_base
);
7531 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7532 case 0x1c3: /* MOVNTI reg, mem */
7533 if (!(s
->cpuid_features
& CPUID_SSE2
))
7535 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
7536 modrm
= cpu_ldub_code(env
, s
->pc
++);
7537 mod
= (modrm
>> 6) & 3;
7540 reg
= ((modrm
>> 3) & 7) | rex_r
;
7541 /* generate a generic store */
7542 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
7545 modrm
= cpu_ldub_code(env
, s
->pc
++);
7546 mod
= (modrm
>> 6) & 3;
7547 op
= (modrm
>> 3) & 7;
7549 case 0: /* fxsave */
7550 if (mod
== 3 || !(s
->cpuid_features
& CPUID_FXSR
) ||
7551 (s
->prefix
& PREFIX_LOCK
))
7553 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
7554 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7557 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7558 gen_update_cc_op(s
);
7559 gen_jmp_im(pc_start
- s
->cs_base
);
7560 gen_helper_fxsave(cpu_env
, cpu_A0
, tcg_const_i32((s
->dflag
== 2)));
7562 case 1: /* fxrstor */
7563 if (mod
== 3 || !(s
->cpuid_features
& CPUID_FXSR
) ||
7564 (s
->prefix
& PREFIX_LOCK
))
7566 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
7567 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7570 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7571 gen_update_cc_op(s
);
7572 gen_jmp_im(pc_start
- s
->cs_base
);
7573 gen_helper_fxrstor(cpu_env
, cpu_A0
,
7574 tcg_const_i32((s
->dflag
== 2)));
7576 case 2: /* ldmxcsr */
7577 case 3: /* stmxcsr */
7578 if (s
->flags
& HF_TS_MASK
) {
7579 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7582 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
) ||
7585 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7587 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
7588 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7589 gen_helper_ldmxcsr(cpu_env
, cpu_tmp2_i32
);
7591 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, mxcsr
));
7592 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
7595 case 5: /* lfence */
7596 case 6: /* mfence */
7597 if ((modrm
& 0xc7) != 0xc0 || !(s
->cpuid_features
& CPUID_SSE2
))
7600 case 7: /* sfence / clflush */
7601 if ((modrm
& 0xc7) == 0xc0) {
7603 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7604 if (!(s
->cpuid_features
& CPUID_SSE
))
7608 if (!(s
->cpuid_features
& CPUID_CLFLUSH
))
7610 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7617 case 0x10d: /* 3DNow! prefetch(w) */
7618 modrm
= cpu_ldub_code(env
, s
->pc
++);
7619 mod
= (modrm
>> 6) & 3;
7622 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7623 /* ignore for now */
7625 case 0x1aa: /* rsm */
7626 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_RSM
);
7627 if (!(s
->flags
& HF_SMM_MASK
))
7629 gen_update_cc_op(s
);
7630 gen_jmp_im(s
->pc
- s
->cs_base
);
7631 gen_helper_rsm(cpu_env
);
7634 case 0x1b8: /* SSE4.2 popcnt */
7635 if ((prefixes
& (PREFIX_REPZ
| PREFIX_LOCK
| PREFIX_REPNZ
)) !=
7638 if (!(s
->cpuid_ext_features
& CPUID_EXT_POPCNT
))
7641 modrm
= cpu_ldub_code(env
, s
->pc
++);
7642 reg
= ((modrm
>> 3) & 7) | rex_r
;
7644 if (s
->prefix
& PREFIX_DATA
)
7646 else if (s
->dflag
!= 2)
7651 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
7652 gen_helper_popcnt(cpu_T
[0], cpu_env
, cpu_T
[0], tcg_const_i32(ot
));
7653 gen_op_mov_reg_T0(ot
, reg
);
7655 set_cc_op(s
, CC_OP_EFLAGS
);
7657 case 0x10e ... 0x10f:
7658 /* 3DNow! instructions, ignore prefixes */
7659 s
->prefix
&= ~(PREFIX_REPZ
| PREFIX_REPNZ
| PREFIX_DATA
);
7660 case 0x110 ... 0x117:
7661 case 0x128 ... 0x12f:
7662 case 0x138 ... 0x13a:
7663 case 0x150 ... 0x179:
7664 case 0x17c ... 0x17f:
7666 case 0x1c4 ... 0x1c6:
7667 case 0x1d0 ... 0x1fe:
7668 gen_sse(env
, s
, b
, pc_start
, rex_r
);
7673 /* lock generation */
7674 if (s
->prefix
& PREFIX_LOCK
)
7675 gen_helper_unlock();
7678 if (s
->prefix
& PREFIX_LOCK
)
7679 gen_helper_unlock();
7680 /* XXX: ensure that no lock was generated */
7681 gen_exception(s
, EXCP06_ILLOP
, pc_start
- s
->cs_base
);
7685 void optimize_flags_init(void)
7687 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
7688 cpu_cc_op
= tcg_global_mem_new_i32(TCG_AREG0
,
7689 offsetof(CPUX86State
, cc_op
), "cc_op");
7690 cpu_cc_src
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_src
),
7692 cpu_cc_dst
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_dst
),
7695 #ifdef TARGET_X86_64
7696 cpu_regs
[R_EAX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7697 offsetof(CPUX86State
, regs
[R_EAX
]), "rax");
7698 cpu_regs
[R_ECX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7699 offsetof(CPUX86State
, regs
[R_ECX
]), "rcx");
7700 cpu_regs
[R_EDX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7701 offsetof(CPUX86State
, regs
[R_EDX
]), "rdx");
7702 cpu_regs
[R_EBX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7703 offsetof(CPUX86State
, regs
[R_EBX
]), "rbx");
7704 cpu_regs
[R_ESP
] = tcg_global_mem_new_i64(TCG_AREG0
,
7705 offsetof(CPUX86State
, regs
[R_ESP
]), "rsp");
7706 cpu_regs
[R_EBP
] = tcg_global_mem_new_i64(TCG_AREG0
,
7707 offsetof(CPUX86State
, regs
[R_EBP
]), "rbp");
7708 cpu_regs
[R_ESI
] = tcg_global_mem_new_i64(TCG_AREG0
,
7709 offsetof(CPUX86State
, regs
[R_ESI
]), "rsi");
7710 cpu_regs
[R_EDI
] = tcg_global_mem_new_i64(TCG_AREG0
,
7711 offsetof(CPUX86State
, regs
[R_EDI
]), "rdi");
7712 cpu_regs
[8] = tcg_global_mem_new_i64(TCG_AREG0
,
7713 offsetof(CPUX86State
, regs
[8]), "r8");
7714 cpu_regs
[9] = tcg_global_mem_new_i64(TCG_AREG0
,
7715 offsetof(CPUX86State
, regs
[9]), "r9");
7716 cpu_regs
[10] = tcg_global_mem_new_i64(TCG_AREG0
,
7717 offsetof(CPUX86State
, regs
[10]), "r10");
7718 cpu_regs
[11] = tcg_global_mem_new_i64(TCG_AREG0
,
7719 offsetof(CPUX86State
, regs
[11]), "r11");
7720 cpu_regs
[12] = tcg_global_mem_new_i64(TCG_AREG0
,
7721 offsetof(CPUX86State
, regs
[12]), "r12");
7722 cpu_regs
[13] = tcg_global_mem_new_i64(TCG_AREG0
,
7723 offsetof(CPUX86State
, regs
[13]), "r13");
7724 cpu_regs
[14] = tcg_global_mem_new_i64(TCG_AREG0
,
7725 offsetof(CPUX86State
, regs
[14]), "r14");
7726 cpu_regs
[15] = tcg_global_mem_new_i64(TCG_AREG0
,
7727 offsetof(CPUX86State
, regs
[15]), "r15");
7729 cpu_regs
[R_EAX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7730 offsetof(CPUX86State
, regs
[R_EAX
]), "eax");
7731 cpu_regs
[R_ECX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7732 offsetof(CPUX86State
, regs
[R_ECX
]), "ecx");
7733 cpu_regs
[R_EDX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7734 offsetof(CPUX86State
, regs
[R_EDX
]), "edx");
7735 cpu_regs
[R_EBX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7736 offsetof(CPUX86State
, regs
[R_EBX
]), "ebx");
7737 cpu_regs
[R_ESP
] = tcg_global_mem_new_i32(TCG_AREG0
,
7738 offsetof(CPUX86State
, regs
[R_ESP
]), "esp");
7739 cpu_regs
[R_EBP
] = tcg_global_mem_new_i32(TCG_AREG0
,
7740 offsetof(CPUX86State
, regs
[R_EBP
]), "ebp");
7741 cpu_regs
[R_ESI
] = tcg_global_mem_new_i32(TCG_AREG0
,
7742 offsetof(CPUX86State
, regs
[R_ESI
]), "esi");
7743 cpu_regs
[R_EDI
] = tcg_global_mem_new_i32(TCG_AREG0
,
7744 offsetof(CPUX86State
, regs
[R_EDI
]), "edi");
7747 /* register helpers */
7748 #define GEN_HELPER 2
7752 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7753 basic block 'tb'. If search_pc is TRUE, also generate PC
7754 information for each intermediate instruction. */
7755 static inline void gen_intermediate_code_internal(CPUX86State
*env
,
7756 TranslationBlock
*tb
,
7759 DisasContext dc1
, *dc
= &dc1
;
7760 target_ulong pc_ptr
;
7761 uint16_t *gen_opc_end
;
7765 target_ulong pc_start
;
7766 target_ulong cs_base
;
7770 /* generate intermediate code */
7772 cs_base
= tb
->cs_base
;
7775 dc
->pe
= (flags
>> HF_PE_SHIFT
) & 1;
7776 dc
->code32
= (flags
>> HF_CS32_SHIFT
) & 1;
7777 dc
->ss32
= (flags
>> HF_SS32_SHIFT
) & 1;
7778 dc
->addseg
= (flags
>> HF_ADDSEG_SHIFT
) & 1;
7780 dc
->vm86
= (flags
>> VM_SHIFT
) & 1;
7781 dc
->cpl
= (flags
>> HF_CPL_SHIFT
) & 3;
7782 dc
->iopl
= (flags
>> IOPL_SHIFT
) & 3;
7783 dc
->tf
= (flags
>> TF_SHIFT
) & 1;
7784 dc
->singlestep_enabled
= env
->singlestep_enabled
;
7785 dc
->cc_op
= CC_OP_DYNAMIC
;
7786 dc
->cc_op_dirty
= false;
7787 dc
->cs_base
= cs_base
;
7789 dc
->popl_esp_hack
= 0;
7790 /* select memory access functions */
7792 if (flags
& HF_SOFTMMU_MASK
) {
7793 dc
->mem_index
= (cpu_mmu_index(env
) + 1) << 2;
7795 dc
->cpuid_features
= env
->cpuid_features
;
7796 dc
->cpuid_ext_features
= env
->cpuid_ext_features
;
7797 dc
->cpuid_ext2_features
= env
->cpuid_ext2_features
;
7798 dc
->cpuid_ext3_features
= env
->cpuid_ext3_features
;
7799 dc
->cpuid_7_0_ebx_features
= env
->cpuid_7_0_ebx_features
;
7800 #ifdef TARGET_X86_64
7801 dc
->lma
= (flags
>> HF_LMA_SHIFT
) & 1;
7802 dc
->code64
= (flags
>> HF_CS64_SHIFT
) & 1;
7805 dc
->jmp_opt
= !(dc
->tf
|| env
->singlestep_enabled
||
7806 (flags
& HF_INHIBIT_IRQ_MASK
)
7807 #ifndef CONFIG_SOFTMMU
7808 || (flags
& HF_SOFTMMU_MASK
)
7812 /* check addseg logic */
7813 if (!dc
->addseg
&& (dc
->vm86
|| !dc
->pe
|| !dc
->code32
))
7814 printf("ERROR addseg\n");
7817 cpu_T
[0] = tcg_temp_new();
7818 cpu_T
[1] = tcg_temp_new();
7819 cpu_A0
= tcg_temp_new();
7820 cpu_T3
= tcg_temp_new();
7822 cpu_tmp0
= tcg_temp_new();
7823 cpu_tmp1_i64
= tcg_temp_new_i64();
7824 cpu_tmp2_i32
= tcg_temp_new_i32();
7825 cpu_tmp3_i32
= tcg_temp_new_i32();
7826 cpu_tmp4
= tcg_temp_new();
7827 cpu_tmp5
= tcg_temp_new();
7828 cpu_ptr0
= tcg_temp_new_ptr();
7829 cpu_ptr1
= tcg_temp_new_ptr();
7831 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
7833 dc
->is_jmp
= DISAS_NEXT
;
7837 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
7839 max_insns
= CF_COUNT_MASK
;
7843 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
7844 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
7845 if (bp
->pc
== pc_ptr
&&
7846 !((bp
->flags
& BP_CPU
) && (tb
->flags
& HF_RF_MASK
))) {
7847 gen_debug(dc
, pc_ptr
- dc
->cs_base
);
7853 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
7857 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
7859 tcg_ctx
.gen_opc_pc
[lj
] = pc_ptr
;
7860 gen_opc_cc_op
[lj
] = dc
->cc_op
;
7861 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
7862 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
7864 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
7867 pc_ptr
= disas_insn(env
, dc
, pc_ptr
);
7869 /* stop translation if indicated */
7872 /* if single step mode, we generate only one instruction and
7873 generate an exception */
7874 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7875 the flag and abort the translation to give the irqs a
7876 change to be happen */
7877 if (dc
->tf
|| dc
->singlestep_enabled
||
7878 (flags
& HF_INHIBIT_IRQ_MASK
)) {
7879 gen_jmp_im(pc_ptr
- dc
->cs_base
);
7883 /* if too long translation, stop generation too */
7884 if (tcg_ctx
.gen_opc_ptr
>= gen_opc_end
||
7885 (pc_ptr
- pc_start
) >= (TARGET_PAGE_SIZE
- 32) ||
7886 num_insns
>= max_insns
) {
7887 gen_jmp_im(pc_ptr
- dc
->cs_base
);
7892 gen_jmp_im(pc_ptr
- dc
->cs_base
);
7897 if (tb
->cflags
& CF_LAST_IO
)
7899 gen_icount_end(tb
, num_insns
);
7900 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
7901 /* we don't forget to fill the last values */
7903 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
7906 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
7910 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
7912 qemu_log("----------------\n");
7913 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
7914 #ifdef TARGET_X86_64
7919 disas_flags
= !dc
->code32
;
7920 log_target_disas(env
, pc_start
, pc_ptr
- pc_start
, disas_flags
);
7926 tb
->size
= pc_ptr
- pc_start
;
7927 tb
->icount
= num_insns
;
7931 void gen_intermediate_code(CPUX86State
*env
, TranslationBlock
*tb
)
7933 gen_intermediate_code_internal(env
, tb
, 0);
7936 void gen_intermediate_code_pc(CPUX86State
*env
, TranslationBlock
*tb
)
7938 gen_intermediate_code_internal(env
, tb
, 1);
7941 void restore_state_to_opc(CPUX86State
*env
, TranslationBlock
*tb
, int pc_pos
)
7945 if (qemu_loglevel_mask(CPU_LOG_TB_OP
)) {
7947 qemu_log("RESTORE:\n");
7948 for(i
= 0;i
<= pc_pos
; i
++) {
7949 if (tcg_ctx
.gen_opc_instr_start
[i
]) {
7950 qemu_log("0x%04x: " TARGET_FMT_lx
"\n", i
,
7951 tcg_ctx
.gen_opc_pc
[i
]);
7954 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx
" cs_base=%x\n",
7955 pc_pos
, tcg_ctx
.gen_opc_pc
[pc_pos
] - tb
->cs_base
,
7956 (uint32_t)tb
->cs_base
);
7959 env
->eip
= tcg_ctx
.gen_opc_pc
[pc_pos
] - tb
->cs_base
;
7960 cc_op
= gen_opc_cc_op
[pc_pos
];
7961 if (cc_op
!= CC_OP_DYNAMIC
)