4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
34 #define PREFIX_REPZ 0x01
35 #define PREFIX_REPNZ 0x02
36 #define PREFIX_LOCK 0x04
37 #define PREFIX_DATA 0x08
38 #define PREFIX_ADR 0x10
41 #define CODE64(s) ((s)->code64)
42 #define REX_X(s) ((s)->rex_x)
43 #define REX_B(s) ((s)->rex_b)
50 //#define MACRO_TEST 1
52 /* global register indexes */
53 static TCGv_ptr cpu_env
;
54 static TCGv cpu_A0
, cpu_cc_src
, cpu_cc_dst
, cpu_cc_tmp
;
55 static TCGv_i32 cpu_cc_op
;
56 static TCGv cpu_regs
[CPU_NB_REGS
];
58 static TCGv cpu_T
[2], cpu_T3
;
59 /* local register indexes (only used inside old micro ops) */
60 static TCGv cpu_tmp0
, cpu_tmp4
;
61 static TCGv_ptr cpu_ptr0
, cpu_ptr1
;
62 static TCGv_i32 cpu_tmp2_i32
, cpu_tmp3_i32
;
63 static TCGv_i64 cpu_tmp1_i64
;
66 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
68 #include "gen-icount.h"
71 static int x86_64_hregs
;
74 typedef struct DisasContext
{
75 /* current insn context */
76 int override
; /* -1 if no override */
79 target_ulong pc
; /* pc = eip + cs_base */
80 int is_jmp
; /* 1 = means jump (stop translation), 2 means CPU
81 static state change (stop translation) */
82 /* current block context */
83 target_ulong cs_base
; /* base of CS segment */
84 int pe
; /* protected mode */
85 int code32
; /* 32 bit code segment */
87 int lma
; /* long mode active */
88 int code64
; /* 64 bit code segment */
91 int ss32
; /* 32 bit stack segment */
92 int cc_op
; /* current CC operation */
93 int addseg
; /* non zero if either DS/ES/SS have a non zero base */
94 int f_st
; /* currently unused */
95 int vm86
; /* vm86 mode */
98 int tf
; /* TF cpu flag */
99 int singlestep_enabled
; /* "hardware" single step enabled */
100 int jmp_opt
; /* use direct block chaining for direct jumps */
101 int mem_index
; /* select memory access functions */
102 uint64_t flags
; /* all execution flags */
103 struct TranslationBlock
*tb
;
104 int popl_esp_hack
; /* for correct popl with esp base handling */
105 int rip_offset
; /* only used in x86_64, but left for simplicity */
107 int cpuid_ext_features
;
108 int cpuid_ext2_features
;
109 int cpuid_ext3_features
;
112 static void gen_eob(DisasContext
*s
);
113 static void gen_jmp(DisasContext
*s
, target_ulong eip
);
114 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
);
116 /* i386 arith/logic operations */
136 OP_SHL1
, /* undocumented */
160 /* I386 int registers */
161 OR_EAX
, /* MUST be even numbered */
170 OR_TMP0
= 16, /* temporary operand register */
172 OR_A0
, /* temporary register used when doing address evaluation */
175 static inline void gen_op_movl_T0_0(void)
177 tcg_gen_movi_tl(cpu_T
[0], 0);
180 static inline void gen_op_movl_T0_im(int32_t val
)
182 tcg_gen_movi_tl(cpu_T
[0], val
);
185 static inline void gen_op_movl_T0_imu(uint32_t val
)
187 tcg_gen_movi_tl(cpu_T
[0], val
);
190 static inline void gen_op_movl_T1_im(int32_t val
)
192 tcg_gen_movi_tl(cpu_T
[1], val
);
195 static inline void gen_op_movl_T1_imu(uint32_t val
)
197 tcg_gen_movi_tl(cpu_T
[1], val
);
200 static inline void gen_op_movl_A0_im(uint32_t val
)
202 tcg_gen_movi_tl(cpu_A0
, val
);
206 static inline void gen_op_movq_A0_im(int64_t val
)
208 tcg_gen_movi_tl(cpu_A0
, val
);
212 static inline void gen_movtl_T0_im(target_ulong val
)
214 tcg_gen_movi_tl(cpu_T
[0], val
);
217 static inline void gen_movtl_T1_im(target_ulong val
)
219 tcg_gen_movi_tl(cpu_T
[1], val
);
222 static inline void gen_op_andl_T0_ffff(void)
224 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xffff);
227 static inline void gen_op_andl_T0_im(uint32_t val
)
229 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], val
);
232 static inline void gen_op_movl_T0_T1(void)
234 tcg_gen_mov_tl(cpu_T
[0], cpu_T
[1]);
237 static inline void gen_op_andl_A0_ffff(void)
239 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffff);
244 #define NB_OP_SIZES 4
246 #else /* !TARGET_X86_64 */
248 #define NB_OP_SIZES 3
250 #endif /* !TARGET_X86_64 */
252 #if defined(HOST_WORDS_BIGENDIAN)
253 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
254 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
255 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
256 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
257 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
259 #define REG_B_OFFSET 0
260 #define REG_H_OFFSET 1
261 #define REG_W_OFFSET 0
262 #define REG_L_OFFSET 0
263 #define REG_LH_OFFSET 4
266 /* In instruction encodings for byte register accesses the
267 * register number usually indicates "low 8 bits of register N";
268 * however there are some special cases where N 4..7 indicates
269 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
270 * true for this special case, false otherwise.
272 static inline bool byte_reg_is_xH(int reg
)
278 if (reg
>= 8 || x86_64_hregs
) {
285 static inline void gen_op_mov_reg_v(int ot
, int reg
, TCGv t0
)
289 if (!byte_reg_is_xH(reg
)) {
290 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 8);
292 tcg_gen_deposit_tl(cpu_regs
[reg
- 4], cpu_regs
[reg
- 4], t0
, 8, 8);
296 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 16);
298 default: /* XXX this shouldn't be reached; abort? */
300 /* For x86_64, this sets the higher half of register to zero.
301 For i386, this is equivalent to a mov. */
302 tcg_gen_ext32u_tl(cpu_regs
[reg
], t0
);
306 tcg_gen_mov_tl(cpu_regs
[reg
], t0
);
312 static inline void gen_op_mov_reg_T0(int ot
, int reg
)
314 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
317 static inline void gen_op_mov_reg_T1(int ot
, int reg
)
319 gen_op_mov_reg_v(ot
, reg
, cpu_T
[1]);
322 static inline void gen_op_mov_reg_A0(int size
, int reg
)
326 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_A0
, 0, 16);
328 default: /* XXX this shouldn't be reached; abort? */
330 /* For x86_64, this sets the higher half of register to zero.
331 For i386, this is equivalent to a mov. */
332 tcg_gen_ext32u_tl(cpu_regs
[reg
], cpu_A0
);
336 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_A0
);
342 static inline void gen_op_mov_v_reg(int ot
, TCGv t0
, int reg
)
344 if (ot
== OT_BYTE
&& byte_reg_is_xH(reg
)) {
345 tcg_gen_shri_tl(t0
, cpu_regs
[reg
- 4], 8);
346 tcg_gen_ext8u_tl(t0
, t0
);
348 tcg_gen_mov_tl(t0
, cpu_regs
[reg
]);
352 static inline void gen_op_mov_TN_reg(int ot
, int t_index
, int reg
)
354 gen_op_mov_v_reg(ot
, cpu_T
[t_index
], reg
);
357 static inline void gen_op_movl_A0_reg(int reg
)
359 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[reg
]);
362 static inline void gen_op_addl_A0_im(int32_t val
)
364 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
366 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
371 static inline void gen_op_addq_A0_im(int64_t val
)
373 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
377 static void gen_add_A0_im(DisasContext
*s
, int val
)
381 gen_op_addq_A0_im(val
);
384 gen_op_addl_A0_im(val
);
387 static inline void gen_op_addl_T0_T1(void)
389 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
392 static inline void gen_op_jmp_T0(void)
394 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, eip
));
397 static inline void gen_op_add_reg_im(int size
, int reg
, int32_t val
)
401 tcg_gen_addi_tl(cpu_tmp0
, cpu_regs
[reg
], val
);
402 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_tmp0
, 0, 16);
405 tcg_gen_addi_tl(cpu_tmp0
, cpu_regs
[reg
], val
);
406 /* For x86_64, this sets the higher half of register to zero.
407 For i386, this is equivalent to a nop. */
408 tcg_gen_ext32u_tl(cpu_tmp0
, cpu_tmp0
);
409 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_tmp0
);
413 tcg_gen_addi_tl(cpu_regs
[reg
], cpu_regs
[reg
], val
);
419 static inline void gen_op_add_reg_T0(int size
, int reg
)
423 tcg_gen_add_tl(cpu_tmp0
, cpu_regs
[reg
], cpu_T
[0]);
424 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_tmp0
, 0, 16);
427 tcg_gen_add_tl(cpu_tmp0
, cpu_regs
[reg
], cpu_T
[0]);
428 /* For x86_64, this sets the higher half of register to zero.
429 For i386, this is equivalent to a nop. */
430 tcg_gen_ext32u_tl(cpu_tmp0
, cpu_tmp0
);
431 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_tmp0
);
435 tcg_gen_add_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_T
[0]);
441 static inline void gen_op_set_cc_op(int32_t val
)
443 tcg_gen_movi_i32(cpu_cc_op
, val
);
446 static inline void gen_op_addl_A0_reg_sN(int shift
, int reg
)
448 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[reg
]);
450 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, shift
);
451 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
452 /* For x86_64, this sets the higher half of register to zero.
453 For i386, this is equivalent to a nop. */
454 tcg_gen_ext32u_tl(cpu_A0
, cpu_A0
);
457 static inline void gen_op_movl_A0_seg(int reg
)
459 tcg_gen_ld32u_tl(cpu_A0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
) + REG_L_OFFSET
);
462 static inline void gen_op_addl_A0_seg(DisasContext
*s
, int reg
)
464 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
467 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
468 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
470 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
471 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
474 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
479 static inline void gen_op_movq_A0_seg(int reg
)
481 tcg_gen_ld_tl(cpu_A0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
484 static inline void gen_op_addq_A0_seg(int reg
)
486 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
487 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
490 static inline void gen_op_movq_A0_reg(int reg
)
492 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[reg
]);
495 static inline void gen_op_addq_A0_reg_sN(int shift
, int reg
)
497 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[reg
]);
499 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, shift
);
500 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
504 static inline void gen_op_lds_T0_A0(int idx
)
506 int mem_index
= (idx
>> 2) - 1;
509 tcg_gen_qemu_ld8s(cpu_T
[0], cpu_A0
, mem_index
);
512 tcg_gen_qemu_ld16s(cpu_T
[0], cpu_A0
, mem_index
);
516 tcg_gen_qemu_ld32s(cpu_T
[0], cpu_A0
, mem_index
);
521 static inline void gen_op_ld_v(int idx
, TCGv t0
, TCGv a0
)
523 int mem_index
= (idx
>> 2) - 1;
526 tcg_gen_qemu_ld8u(t0
, a0
, mem_index
);
529 tcg_gen_qemu_ld16u(t0
, a0
, mem_index
);
532 tcg_gen_qemu_ld32u(t0
, a0
, mem_index
);
536 /* Should never happen on 32-bit targets. */
538 tcg_gen_qemu_ld64(t0
, a0
, mem_index
);
544 /* XXX: always use ldu or lds */
545 static inline void gen_op_ld_T0_A0(int idx
)
547 gen_op_ld_v(idx
, cpu_T
[0], cpu_A0
);
550 static inline void gen_op_ldu_T0_A0(int idx
)
552 gen_op_ld_v(idx
, cpu_T
[0], cpu_A0
);
555 static inline void gen_op_ld_T1_A0(int idx
)
557 gen_op_ld_v(idx
, cpu_T
[1], cpu_A0
);
560 static inline void gen_op_st_v(int idx
, TCGv t0
, TCGv a0
)
562 int mem_index
= (idx
>> 2) - 1;
565 tcg_gen_qemu_st8(t0
, a0
, mem_index
);
568 tcg_gen_qemu_st16(t0
, a0
, mem_index
);
571 tcg_gen_qemu_st32(t0
, a0
, mem_index
);
575 /* Should never happen on 32-bit targets. */
577 tcg_gen_qemu_st64(t0
, a0
, mem_index
);
583 static inline void gen_op_st_T0_A0(int idx
)
585 gen_op_st_v(idx
, cpu_T
[0], cpu_A0
);
588 static inline void gen_op_st_T1_A0(int idx
)
590 gen_op_st_v(idx
, cpu_T
[1], cpu_A0
);
593 static inline void gen_jmp_im(target_ulong pc
)
595 tcg_gen_movi_tl(cpu_tmp0
, pc
);
596 tcg_gen_st_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, eip
));
599 static inline void gen_string_movl_A0_ESI(DisasContext
*s
)
603 override
= s
->override
;
607 gen_op_movq_A0_seg(override
);
608 gen_op_addq_A0_reg_sN(0, R_ESI
);
610 gen_op_movq_A0_reg(R_ESI
);
616 if (s
->addseg
&& override
< 0)
619 gen_op_movl_A0_seg(override
);
620 gen_op_addl_A0_reg_sN(0, R_ESI
);
622 gen_op_movl_A0_reg(R_ESI
);
625 /* 16 address, always override */
628 gen_op_movl_A0_reg(R_ESI
);
629 gen_op_andl_A0_ffff();
630 gen_op_addl_A0_seg(s
, override
);
634 static inline void gen_string_movl_A0_EDI(DisasContext
*s
)
638 gen_op_movq_A0_reg(R_EDI
);
643 gen_op_movl_A0_seg(R_ES
);
644 gen_op_addl_A0_reg_sN(0, R_EDI
);
646 gen_op_movl_A0_reg(R_EDI
);
649 gen_op_movl_A0_reg(R_EDI
);
650 gen_op_andl_A0_ffff();
651 gen_op_addl_A0_seg(s
, R_ES
);
655 static inline void gen_op_movl_T0_Dshift(int ot
)
657 tcg_gen_ld32s_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, df
));
658 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], ot
);
661 static void gen_extu(int ot
, TCGv reg
)
665 tcg_gen_ext8u_tl(reg
, reg
);
668 tcg_gen_ext16u_tl(reg
, reg
);
671 tcg_gen_ext32u_tl(reg
, reg
);
678 static void gen_exts(int ot
, TCGv reg
)
682 tcg_gen_ext8s_tl(reg
, reg
);
685 tcg_gen_ext16s_tl(reg
, reg
);
688 tcg_gen_ext32s_tl(reg
, reg
);
695 static inline void gen_op_jnz_ecx(int size
, int label1
)
697 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
698 gen_extu(size
+ 1, cpu_tmp0
);
699 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_tmp0
, 0, label1
);
702 static inline void gen_op_jz_ecx(int size
, int label1
)
704 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
705 gen_extu(size
+ 1, cpu_tmp0
);
706 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
709 static void gen_helper_in_func(int ot
, TCGv v
, TCGv_i32 n
)
712 case 0: gen_helper_inb(v
, n
); break;
713 case 1: gen_helper_inw(v
, n
); break;
714 case 2: gen_helper_inl(v
, n
); break;
719 static void gen_helper_out_func(int ot
, TCGv_i32 v
, TCGv_i32 n
)
722 case 0: gen_helper_outb(v
, n
); break;
723 case 1: gen_helper_outw(v
, n
); break;
724 case 2: gen_helper_outl(v
, n
); break;
729 static void gen_check_io(DisasContext
*s
, int ot
, target_ulong cur_eip
,
733 target_ulong next_eip
;
736 if (s
->pe
&& (s
->cpl
> s
->iopl
|| s
->vm86
)) {
737 if (s
->cc_op
!= CC_OP_DYNAMIC
)
738 gen_op_set_cc_op(s
->cc_op
);
741 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
743 case 0: gen_helper_check_iob(cpu_tmp2_i32
); break;
744 case 1: gen_helper_check_iow(cpu_tmp2_i32
); break;
745 case 2: gen_helper_check_iol(cpu_tmp2_i32
); break;
748 if(s
->flags
& HF_SVMI_MASK
) {
750 if (s
->cc_op
!= CC_OP_DYNAMIC
)
751 gen_op_set_cc_op(s
->cc_op
);
754 svm_flags
|= (1 << (4 + ot
));
755 next_eip
= s
->pc
- s
->cs_base
;
756 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
757 gen_helper_svm_check_io(cpu_tmp2_i32
, tcg_const_i32(svm_flags
),
758 tcg_const_i32(next_eip
- cur_eip
));
762 static inline void gen_movs(DisasContext
*s
, int ot
)
764 gen_string_movl_A0_ESI(s
);
765 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
766 gen_string_movl_A0_EDI(s
);
767 gen_op_st_T0_A0(ot
+ s
->mem_index
);
768 gen_op_movl_T0_Dshift(ot
);
769 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
770 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
773 static inline void gen_update_cc_op(DisasContext
*s
)
775 if (s
->cc_op
!= CC_OP_DYNAMIC
) {
776 gen_op_set_cc_op(s
->cc_op
);
777 s
->cc_op
= CC_OP_DYNAMIC
;
781 static void gen_op_update1_cc(void)
783 tcg_gen_discard_tl(cpu_cc_src
);
784 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
787 static void gen_op_update2_cc(void)
789 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
790 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
793 static inline void gen_op_cmpl_T0_T1_cc(void)
795 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
796 tcg_gen_sub_tl(cpu_cc_dst
, cpu_T
[0], cpu_T
[1]);
799 static inline void gen_op_testl_T0_T1_cc(void)
801 tcg_gen_discard_tl(cpu_cc_src
);
802 tcg_gen_and_tl(cpu_cc_dst
, cpu_T
[0], cpu_T
[1]);
805 static void gen_op_update_neg_cc(void)
807 tcg_gen_neg_tl(cpu_cc_src
, cpu_T
[0]);
808 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
811 /* compute eflags.C to reg */
812 static void gen_compute_eflags_c(TCGv reg
)
814 gen_helper_cc_compute_c(cpu_tmp2_i32
, cpu_cc_op
);
815 tcg_gen_extu_i32_tl(reg
, cpu_tmp2_i32
);
818 /* compute all eflags to cc_src */
819 static void gen_compute_eflags(TCGv reg
)
821 gen_helper_cc_compute_all(cpu_tmp2_i32
, cpu_cc_op
);
822 tcg_gen_extu_i32_tl(reg
, cpu_tmp2_i32
);
825 static inline void gen_setcc_slow_T0(DisasContext
*s
, int jcc_op
)
827 if (s
->cc_op
!= CC_OP_DYNAMIC
)
828 gen_op_set_cc_op(s
->cc_op
);
831 gen_compute_eflags(cpu_T
[0]);
832 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 11);
833 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
836 gen_compute_eflags_c(cpu_T
[0]);
839 gen_compute_eflags(cpu_T
[0]);
840 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 6);
841 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
844 gen_compute_eflags(cpu_tmp0
);
845 tcg_gen_shri_tl(cpu_T
[0], cpu_tmp0
, 6);
846 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
847 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
850 gen_compute_eflags(cpu_T
[0]);
851 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 7);
852 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
855 gen_compute_eflags(cpu_T
[0]);
856 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 2);
857 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
860 gen_compute_eflags(cpu_tmp0
);
861 tcg_gen_shri_tl(cpu_T
[0], cpu_tmp0
, 11); /* CC_O */
862 tcg_gen_shri_tl(cpu_tmp0
, cpu_tmp0
, 7); /* CC_S */
863 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
864 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
868 gen_compute_eflags(cpu_tmp0
);
869 tcg_gen_shri_tl(cpu_T
[0], cpu_tmp0
, 11); /* CC_O */
870 tcg_gen_shri_tl(cpu_tmp4
, cpu_tmp0
, 7); /* CC_S */
871 tcg_gen_shri_tl(cpu_tmp0
, cpu_tmp0
, 6); /* CC_Z */
872 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
873 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
874 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
879 /* return true if setcc_slow is not needed (WARNING: must be kept in
880 sync with gen_jcc1) */
881 static int is_fast_jcc_case(DisasContext
*s
, int b
)
884 jcc_op
= (b
>> 1) & 7;
886 /* we optimize the cmp/jcc case */
891 if (jcc_op
== JCC_O
|| jcc_op
== JCC_P
)
895 /* some jumps are easy to compute */
920 if (jcc_op
!= JCC_Z
&& jcc_op
!= JCC_S
)
930 /* generate a conditional jump to label 'l1' according to jump opcode
931 value 'b'. In the fast case, T0 is guaranted not to be used. */
932 static inline void gen_jcc1(DisasContext
*s
, int cc_op
, int b
, int l1
)
934 int inv
, jcc_op
, size
, cond
;
938 jcc_op
= (b
>> 1) & 7;
941 /* we optimize the cmp/jcc case */
947 size
= cc_op
- CC_OP_SUBB
;
953 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_dst
, 0xff);
957 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_dst
, 0xffff);
962 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_dst
, 0xffffffff);
970 tcg_gen_brcondi_tl(inv
? TCG_COND_NE
: TCG_COND_EQ
, t0
, 0, l1
);
976 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_dst
, 0x80);
977 tcg_gen_brcondi_tl(inv
? TCG_COND_EQ
: TCG_COND_NE
, cpu_tmp0
,
981 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_dst
, 0x8000);
982 tcg_gen_brcondi_tl(inv
? TCG_COND_EQ
: TCG_COND_NE
, cpu_tmp0
,
987 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_dst
, 0x80000000);
988 tcg_gen_brcondi_tl(inv
? TCG_COND_EQ
: TCG_COND_NE
, cpu_tmp0
,
993 tcg_gen_brcondi_tl(inv
? TCG_COND_GE
: TCG_COND_LT
, cpu_cc_dst
,
1000 cond
= inv
? TCG_COND_GEU
: TCG_COND_LTU
;
1003 cond
= inv
? TCG_COND_GTU
: TCG_COND_LEU
;
1005 tcg_gen_add_tl(cpu_tmp4
, cpu_cc_dst
, cpu_cc_src
);
1009 tcg_gen_andi_tl(cpu_tmp4
, cpu_tmp4
, 0xff);
1010 tcg_gen_andi_tl(t0
, cpu_cc_src
, 0xff);
1014 tcg_gen_andi_tl(cpu_tmp4
, cpu_tmp4
, 0xffff);
1015 tcg_gen_andi_tl(t0
, cpu_cc_src
, 0xffff);
1017 #ifdef TARGET_X86_64
1020 tcg_gen_andi_tl(cpu_tmp4
, cpu_tmp4
, 0xffffffff);
1021 tcg_gen_andi_tl(t0
, cpu_cc_src
, 0xffffffff);
1028 tcg_gen_brcond_tl(cond
, cpu_tmp4
, t0
, l1
);
1032 cond
= inv
? TCG_COND_GE
: TCG_COND_LT
;
1035 cond
= inv
? TCG_COND_GT
: TCG_COND_LE
;
1037 tcg_gen_add_tl(cpu_tmp4
, cpu_cc_dst
, cpu_cc_src
);
1041 tcg_gen_ext8s_tl(cpu_tmp4
, cpu_tmp4
);
1042 tcg_gen_ext8s_tl(t0
, cpu_cc_src
);
1046 tcg_gen_ext16s_tl(cpu_tmp4
, cpu_tmp4
);
1047 tcg_gen_ext16s_tl(t0
, cpu_cc_src
);
1049 #ifdef TARGET_X86_64
1052 tcg_gen_ext32s_tl(cpu_tmp4
, cpu_tmp4
);
1053 tcg_gen_ext32s_tl(t0
, cpu_cc_src
);
1060 tcg_gen_brcond_tl(cond
, cpu_tmp4
, t0
, l1
);
1068 /* some jumps are easy to compute */
1110 size
= (cc_op
- CC_OP_ADDB
) & 3;
1113 size
= (cc_op
- CC_OP_ADDB
) & 3;
1121 gen_setcc_slow_T0(s
, jcc_op
);
1122 tcg_gen_brcondi_tl(inv
? TCG_COND_EQ
: TCG_COND_NE
,
1128 /* XXX: does not work with gdbstub "ice" single step - not a
1130 static int gen_jz_ecx_string(DisasContext
*s
, target_ulong next_eip
)
1134 l1
= gen_new_label();
1135 l2
= gen_new_label();
1136 gen_op_jnz_ecx(s
->aflag
, l1
);
1138 gen_jmp_tb(s
, next_eip
, 1);
1143 static inline void gen_stos(DisasContext
*s
, int ot
)
1145 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
1146 gen_string_movl_A0_EDI(s
);
1147 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1148 gen_op_movl_T0_Dshift(ot
);
1149 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1152 static inline void gen_lods(DisasContext
*s
, int ot
)
1154 gen_string_movl_A0_ESI(s
);
1155 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1156 gen_op_mov_reg_T0(ot
, R_EAX
);
1157 gen_op_movl_T0_Dshift(ot
);
1158 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1161 static inline void gen_scas(DisasContext
*s
, int ot
)
1163 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
1164 gen_string_movl_A0_EDI(s
);
1165 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
1166 gen_op_cmpl_T0_T1_cc();
1167 gen_op_movl_T0_Dshift(ot
);
1168 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1171 static inline void gen_cmps(DisasContext
*s
, int ot
)
1173 gen_string_movl_A0_ESI(s
);
1174 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1175 gen_string_movl_A0_EDI(s
);
1176 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
1177 gen_op_cmpl_T0_T1_cc();
1178 gen_op_movl_T0_Dshift(ot
);
1179 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1180 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1183 static inline void gen_ins(DisasContext
*s
, int ot
)
1187 gen_string_movl_A0_EDI(s
);
1188 /* Note: we must do this dummy write first to be restartable in
1189 case of page fault. */
1191 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1192 gen_op_mov_TN_reg(OT_WORD
, 1, R_EDX
);
1193 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[1]);
1194 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1195 gen_helper_in_func(ot
, cpu_T
[0], cpu_tmp2_i32
);
1196 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1197 gen_op_movl_T0_Dshift(ot
);
1198 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1203 static inline void gen_outs(DisasContext
*s
, int ot
)
1207 gen_string_movl_A0_ESI(s
);
1208 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1210 gen_op_mov_TN_reg(OT_WORD
, 1, R_EDX
);
1211 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[1]);
1212 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1213 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[0]);
1214 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1216 gen_op_movl_T0_Dshift(ot
);
1217 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1222 /* same method as Valgrind : we generate jumps to current or next
1224 #define GEN_REPZ(op) \
1225 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1226 target_ulong cur_eip, target_ulong next_eip) \
1229 gen_update_cc_op(s); \
1230 l2 = gen_jz_ecx_string(s, next_eip); \
1231 gen_ ## op(s, ot); \
1232 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1233 /* a loop would cause two single step exceptions if ECX = 1 \
1234 before rep string_insn */ \
1236 gen_op_jz_ecx(s->aflag, l2); \
1237 gen_jmp(s, cur_eip); \
1240 #define GEN_REPZ2(op) \
1241 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1242 target_ulong cur_eip, \
1243 target_ulong next_eip, \
1247 gen_update_cc_op(s); \
1248 l2 = gen_jz_ecx_string(s, next_eip); \
1249 gen_ ## op(s, ot); \
1250 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1251 gen_op_set_cc_op(CC_OP_SUBB + ot); \
1252 gen_jcc1(s, CC_OP_SUBB + ot, (JCC_Z << 1) | (nz ^ 1), l2); \
1254 gen_op_jz_ecx(s->aflag, l2); \
1255 gen_jmp(s, cur_eip); \
1266 static void gen_helper_fp_arith_ST0_FT0(int op
)
1269 case 0: gen_helper_fadd_ST0_FT0(); break;
1270 case 1: gen_helper_fmul_ST0_FT0(); break;
1271 case 2: gen_helper_fcom_ST0_FT0(); break;
1272 case 3: gen_helper_fcom_ST0_FT0(); break;
1273 case 4: gen_helper_fsub_ST0_FT0(); break;
1274 case 5: gen_helper_fsubr_ST0_FT0(); break;
1275 case 6: gen_helper_fdiv_ST0_FT0(); break;
1276 case 7: gen_helper_fdivr_ST0_FT0(); break;
1280 /* NOTE the exception in "r" op ordering */
1281 static void gen_helper_fp_arith_STN_ST0(int op
, int opreg
)
1283 TCGv_i32 tmp
= tcg_const_i32(opreg
);
1285 case 0: gen_helper_fadd_STN_ST0(tmp
); break;
1286 case 1: gen_helper_fmul_STN_ST0(tmp
); break;
1287 case 4: gen_helper_fsubr_STN_ST0(tmp
); break;
1288 case 5: gen_helper_fsub_STN_ST0(tmp
); break;
1289 case 6: gen_helper_fdivr_STN_ST0(tmp
); break;
1290 case 7: gen_helper_fdiv_STN_ST0(tmp
); break;
1294 /* if d == OR_TMP0, it means memory operand (address in A0) */
1295 static void gen_op(DisasContext
*s1
, int op
, int ot
, int d
)
1298 gen_op_mov_TN_reg(ot
, 0, d
);
1300 gen_op_ld_T0_A0(ot
+ s1
->mem_index
);
1304 if (s1
->cc_op
!= CC_OP_DYNAMIC
)
1305 gen_op_set_cc_op(s1
->cc_op
);
1306 gen_compute_eflags_c(cpu_tmp4
);
1307 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1308 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
1310 gen_op_mov_reg_T0(ot
, d
);
1312 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1313 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
1314 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1315 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp4
);
1316 tcg_gen_shli_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 2);
1317 tcg_gen_addi_i32(cpu_cc_op
, cpu_tmp2_i32
, CC_OP_ADDB
+ ot
);
1318 s1
->cc_op
= CC_OP_DYNAMIC
;
1321 if (s1
->cc_op
!= CC_OP_DYNAMIC
)
1322 gen_op_set_cc_op(s1
->cc_op
);
1323 gen_compute_eflags_c(cpu_tmp4
);
1324 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1325 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
1327 gen_op_mov_reg_T0(ot
, d
);
1329 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1330 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
1331 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1332 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp4
);
1333 tcg_gen_shli_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 2);
1334 tcg_gen_addi_i32(cpu_cc_op
, cpu_tmp2_i32
, CC_OP_SUBB
+ ot
);
1335 s1
->cc_op
= CC_OP_DYNAMIC
;
1338 gen_op_addl_T0_T1();
1340 gen_op_mov_reg_T0(ot
, d
);
1342 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1343 gen_op_update2_cc();
1344 s1
->cc_op
= CC_OP_ADDB
+ ot
;
1347 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1349 gen_op_mov_reg_T0(ot
, d
);
1351 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1352 gen_op_update2_cc();
1353 s1
->cc_op
= CC_OP_SUBB
+ ot
;
1357 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1359 gen_op_mov_reg_T0(ot
, d
);
1361 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1362 gen_op_update1_cc();
1363 s1
->cc_op
= CC_OP_LOGICB
+ ot
;
1366 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1368 gen_op_mov_reg_T0(ot
, d
);
1370 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1371 gen_op_update1_cc();
1372 s1
->cc_op
= CC_OP_LOGICB
+ ot
;
1375 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1377 gen_op_mov_reg_T0(ot
, d
);
1379 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1380 gen_op_update1_cc();
1381 s1
->cc_op
= CC_OP_LOGICB
+ ot
;
1384 gen_op_cmpl_T0_T1_cc();
1385 s1
->cc_op
= CC_OP_SUBB
+ ot
;
1390 /* if d == OR_TMP0, it means memory operand (address in A0) */
1391 static void gen_inc(DisasContext
*s1
, int ot
, int d
, int c
)
1394 gen_op_mov_TN_reg(ot
, 0, d
);
1396 gen_op_ld_T0_A0(ot
+ s1
->mem_index
);
1397 if (s1
->cc_op
!= CC_OP_DYNAMIC
)
1398 gen_op_set_cc_op(s1
->cc_op
);
1400 tcg_gen_addi_tl(cpu_T
[0], cpu_T
[0], 1);
1401 s1
->cc_op
= CC_OP_INCB
+ ot
;
1403 tcg_gen_addi_tl(cpu_T
[0], cpu_T
[0], -1);
1404 s1
->cc_op
= CC_OP_DECB
+ ot
;
1407 gen_op_mov_reg_T0(ot
, d
);
1409 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1410 gen_compute_eflags_c(cpu_cc_src
);
1411 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1414 static void gen_shift_rm_T1(DisasContext
*s
, int ot
, int op1
,
1415 int is_right
, int is_arith
)
1421 if (ot
== OT_QUAD
) {
1428 if (op1
== OR_TMP0
) {
1429 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1431 gen_op_mov_TN_reg(ot
, 0, op1
);
1434 t0
= tcg_temp_local_new();
1435 t1
= tcg_temp_local_new();
1436 t2
= tcg_temp_local_new();
1438 tcg_gen_andi_tl(t2
, cpu_T
[1], mask
);
1442 gen_exts(ot
, cpu_T
[0]);
1443 tcg_gen_mov_tl(t0
, cpu_T
[0]);
1444 tcg_gen_sar_tl(cpu_T
[0], cpu_T
[0], t2
);
1446 gen_extu(ot
, cpu_T
[0]);
1447 tcg_gen_mov_tl(t0
, cpu_T
[0]);
1448 tcg_gen_shr_tl(cpu_T
[0], cpu_T
[0], t2
);
1451 tcg_gen_mov_tl(t0
, cpu_T
[0]);
1452 tcg_gen_shl_tl(cpu_T
[0], cpu_T
[0], t2
);
1456 if (op1
== OR_TMP0
) {
1457 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1459 gen_op_mov_reg_T0(ot
, op1
);
1462 /* update eflags if non zero shift */
1463 if (s
->cc_op
!= CC_OP_DYNAMIC
) {
1464 gen_op_set_cc_op(s
->cc_op
);
1467 tcg_gen_mov_tl(t1
, cpu_T
[0]);
1469 shift_label
= gen_new_label();
1470 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, shift_label
);
1472 tcg_gen_addi_tl(t2
, t2
, -1);
1473 tcg_gen_mov_tl(cpu_cc_dst
, t1
);
1477 tcg_gen_sar_tl(cpu_cc_src
, t0
, t2
);
1479 tcg_gen_shr_tl(cpu_cc_src
, t0
, t2
);
1482 tcg_gen_shl_tl(cpu_cc_src
, t0
, t2
);
1486 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SARB
+ ot
);
1488 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SHLB
+ ot
);
1491 gen_set_label(shift_label
);
1492 s
->cc_op
= CC_OP_DYNAMIC
; /* cannot predict flags after */
1499 static void gen_shift_rm_im(DisasContext
*s
, int ot
, int op1
, int op2
,
1500 int is_right
, int is_arith
)
1511 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1513 gen_op_mov_TN_reg(ot
, 0, op1
);
1519 gen_exts(ot
, cpu_T
[0]);
1520 tcg_gen_sari_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1521 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], op2
);
1523 gen_extu(ot
, cpu_T
[0]);
1524 tcg_gen_shri_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1525 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], op2
);
1528 tcg_gen_shli_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1529 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], op2
);
1535 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1537 gen_op_mov_reg_T0(ot
, op1
);
1539 /* update eflags if non zero shift */
1541 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
1542 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1544 s
->cc_op
= CC_OP_SARB
+ ot
;
1546 s
->cc_op
= CC_OP_SHLB
+ ot
;
1550 static inline void tcg_gen_lshift(TCGv ret
, TCGv arg1
, target_long arg2
)
1553 tcg_gen_shli_tl(ret
, arg1
, arg2
);
1555 tcg_gen_shri_tl(ret
, arg1
, -arg2
);
1558 static void gen_rot_rm_T1(DisasContext
*s
, int ot
, int op1
,
1562 int label1
, label2
, data_bits
;
1563 TCGv t0
, t1
, t2
, a0
;
1565 /* XXX: inefficient, but we must use local temps */
1566 t0
= tcg_temp_local_new();
1567 t1
= tcg_temp_local_new();
1568 t2
= tcg_temp_local_new();
1569 a0
= tcg_temp_local_new();
1577 if (op1
== OR_TMP0
) {
1578 tcg_gen_mov_tl(a0
, cpu_A0
);
1579 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
1581 gen_op_mov_v_reg(ot
, t0
, op1
);
1584 tcg_gen_mov_tl(t1
, cpu_T
[1]);
1586 tcg_gen_andi_tl(t1
, t1
, mask
);
1588 /* Must test zero case to avoid using undefined behaviour in TCG
1590 label1
= gen_new_label();
1591 tcg_gen_brcondi_tl(TCG_COND_EQ
, t1
, 0, label1
);
1594 tcg_gen_andi_tl(cpu_tmp0
, t1
, (1 << (3 + ot
)) - 1);
1596 tcg_gen_mov_tl(cpu_tmp0
, t1
);
1599 tcg_gen_mov_tl(t2
, t0
);
1601 data_bits
= 8 << ot
;
1602 /* XXX: rely on behaviour of shifts when operand 2 overflows (XXX:
1603 fix TCG definition) */
1605 tcg_gen_shr_tl(cpu_tmp4
, t0
, cpu_tmp0
);
1606 tcg_gen_subfi_tl(cpu_tmp0
, data_bits
, cpu_tmp0
);
1607 tcg_gen_shl_tl(t0
, t0
, cpu_tmp0
);
1609 tcg_gen_shl_tl(cpu_tmp4
, t0
, cpu_tmp0
);
1610 tcg_gen_subfi_tl(cpu_tmp0
, data_bits
, cpu_tmp0
);
1611 tcg_gen_shr_tl(t0
, t0
, cpu_tmp0
);
1613 tcg_gen_or_tl(t0
, t0
, cpu_tmp4
);
1615 gen_set_label(label1
);
1617 if (op1
== OR_TMP0
) {
1618 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
1620 gen_op_mov_reg_v(ot
, op1
, t0
);
1624 if (s
->cc_op
!= CC_OP_DYNAMIC
)
1625 gen_op_set_cc_op(s
->cc_op
);
1627 label2
= gen_new_label();
1628 tcg_gen_brcondi_tl(TCG_COND_EQ
, t1
, 0, label2
);
1630 gen_compute_eflags(cpu_cc_src
);
1631 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~(CC_O
| CC_C
));
1632 tcg_gen_xor_tl(cpu_tmp0
, t2
, t0
);
1633 tcg_gen_lshift(cpu_tmp0
, cpu_tmp0
, 11 - (data_bits
- 1));
1634 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, CC_O
);
1635 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_tmp0
);
1637 tcg_gen_shri_tl(t0
, t0
, data_bits
- 1);
1639 tcg_gen_andi_tl(t0
, t0
, CC_C
);
1640 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
1642 tcg_gen_discard_tl(cpu_cc_dst
);
1643 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_EFLAGS
);
1645 gen_set_label(label2
);
1646 s
->cc_op
= CC_OP_DYNAMIC
; /* cannot predict flags after */
1654 static void gen_rot_rm_im(DisasContext
*s
, int ot
, int op1
, int op2
,
1661 /* XXX: inefficient, but we must use local temps */
1662 t0
= tcg_temp_local_new();
1663 t1
= tcg_temp_local_new();
1664 a0
= tcg_temp_local_new();
1672 if (op1
== OR_TMP0
) {
1673 tcg_gen_mov_tl(a0
, cpu_A0
);
1674 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
1676 gen_op_mov_v_reg(ot
, t0
, op1
);
1680 tcg_gen_mov_tl(t1
, t0
);
1683 data_bits
= 8 << ot
;
1685 int shift
= op2
& ((1 << (3 + ot
)) - 1);
1687 tcg_gen_shri_tl(cpu_tmp4
, t0
, shift
);
1688 tcg_gen_shli_tl(t0
, t0
, data_bits
- shift
);
1691 tcg_gen_shli_tl(cpu_tmp4
, t0
, shift
);
1692 tcg_gen_shri_tl(t0
, t0
, data_bits
- shift
);
1694 tcg_gen_or_tl(t0
, t0
, cpu_tmp4
);
1698 if (op1
== OR_TMP0
) {
1699 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
1701 gen_op_mov_reg_v(ot
, op1
, t0
);
1706 if (s
->cc_op
!= CC_OP_DYNAMIC
)
1707 gen_op_set_cc_op(s
->cc_op
);
1709 gen_compute_eflags(cpu_cc_src
);
1710 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~(CC_O
| CC_C
));
1711 tcg_gen_xor_tl(cpu_tmp0
, t1
, t0
);
1712 tcg_gen_lshift(cpu_tmp0
, cpu_tmp0
, 11 - (data_bits
- 1));
1713 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, CC_O
);
1714 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_tmp0
);
1716 tcg_gen_shri_tl(t0
, t0
, data_bits
- 1);
1718 tcg_gen_andi_tl(t0
, t0
, CC_C
);
1719 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
1721 tcg_gen_discard_tl(cpu_cc_dst
);
1722 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_EFLAGS
);
1723 s
->cc_op
= CC_OP_EFLAGS
;
1731 /* XXX: add faster immediate = 1 case */
1732 static void gen_rotc_rm_T1(DisasContext
*s
, int ot
, int op1
,
1737 if (s
->cc_op
!= CC_OP_DYNAMIC
)
1738 gen_op_set_cc_op(s
->cc_op
);
1742 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1744 gen_op_mov_TN_reg(ot
, 0, op1
);
1748 case 0: gen_helper_rcrb(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1749 case 1: gen_helper_rcrw(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1750 case 2: gen_helper_rcrl(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1751 #ifdef TARGET_X86_64
1752 case 3: gen_helper_rcrq(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1757 case 0: gen_helper_rclb(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1758 case 1: gen_helper_rclw(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1759 case 2: gen_helper_rcll(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1760 #ifdef TARGET_X86_64
1761 case 3: gen_helper_rclq(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1767 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1769 gen_op_mov_reg_T0(ot
, op1
);
1772 label1
= gen_new_label();
1773 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_cc_tmp
, -1, label1
);
1775 tcg_gen_mov_tl(cpu_cc_src
, cpu_cc_tmp
);
1776 tcg_gen_discard_tl(cpu_cc_dst
);
1777 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_EFLAGS
);
1779 gen_set_label(label1
);
1780 s
->cc_op
= CC_OP_DYNAMIC
; /* cannot predict flags after */
1783 /* XXX: add faster immediate case */
1784 static void gen_shiftd_rm_T1_T3(DisasContext
*s
, int ot
, int op1
,
1787 int label1
, label2
, data_bits
;
1789 TCGv t0
, t1
, t2
, a0
;
1791 t0
= tcg_temp_local_new();
1792 t1
= tcg_temp_local_new();
1793 t2
= tcg_temp_local_new();
1794 a0
= tcg_temp_local_new();
1802 if (op1
== OR_TMP0
) {
1803 tcg_gen_mov_tl(a0
, cpu_A0
);
1804 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
1806 gen_op_mov_v_reg(ot
, t0
, op1
);
1809 tcg_gen_andi_tl(cpu_T3
, cpu_T3
, mask
);
1811 tcg_gen_mov_tl(t1
, cpu_T
[1]);
1812 tcg_gen_mov_tl(t2
, cpu_T3
);
1814 /* Must test zero case to avoid using undefined behaviour in TCG
1816 label1
= gen_new_label();
1817 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, label1
);
1819 tcg_gen_addi_tl(cpu_tmp5
, t2
, -1);
1820 if (ot
== OT_WORD
) {
1821 /* Note: we implement the Intel behaviour for shift count > 16 */
1823 tcg_gen_andi_tl(t0
, t0
, 0xffff);
1824 tcg_gen_shli_tl(cpu_tmp0
, t1
, 16);
1825 tcg_gen_or_tl(t0
, t0
, cpu_tmp0
);
1826 tcg_gen_ext32u_tl(t0
, t0
);
1828 tcg_gen_shr_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1830 /* only needed if count > 16, but a test would complicate */
1831 tcg_gen_subfi_tl(cpu_tmp5
, 32, t2
);
1832 tcg_gen_shl_tl(cpu_tmp0
, t0
, cpu_tmp5
);
1834 tcg_gen_shr_tl(t0
, t0
, t2
);
1836 tcg_gen_or_tl(t0
, t0
, cpu_tmp0
);
1838 /* XXX: not optimal */
1839 tcg_gen_andi_tl(t0
, t0
, 0xffff);
1840 tcg_gen_shli_tl(t1
, t1
, 16);
1841 tcg_gen_or_tl(t1
, t1
, t0
);
1842 tcg_gen_ext32u_tl(t1
, t1
);
1844 tcg_gen_shl_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1845 tcg_gen_subfi_tl(cpu_tmp0
, 32, cpu_tmp5
);
1846 tcg_gen_shr_tl(cpu_tmp5
, t1
, cpu_tmp0
);
1847 tcg_gen_or_tl(cpu_tmp4
, cpu_tmp4
, cpu_tmp5
);
1849 tcg_gen_shl_tl(t0
, t0
, t2
);
1850 tcg_gen_subfi_tl(cpu_tmp5
, 32, t2
);
1851 tcg_gen_shr_tl(t1
, t1
, cpu_tmp5
);
1852 tcg_gen_or_tl(t0
, t0
, t1
);
1855 data_bits
= 8 << ot
;
1858 tcg_gen_ext32u_tl(t0
, t0
);
1860 tcg_gen_shr_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1862 tcg_gen_shr_tl(t0
, t0
, t2
);
1863 tcg_gen_subfi_tl(cpu_tmp5
, data_bits
, t2
);
1864 tcg_gen_shl_tl(t1
, t1
, cpu_tmp5
);
1865 tcg_gen_or_tl(t0
, t0
, t1
);
1869 tcg_gen_ext32u_tl(t1
, t1
);
1871 tcg_gen_shl_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1873 tcg_gen_shl_tl(t0
, t0
, t2
);
1874 tcg_gen_subfi_tl(cpu_tmp5
, data_bits
, t2
);
1875 tcg_gen_shr_tl(t1
, t1
, cpu_tmp5
);
1876 tcg_gen_or_tl(t0
, t0
, t1
);
1879 tcg_gen_mov_tl(t1
, cpu_tmp4
);
1881 gen_set_label(label1
);
1883 if (op1
== OR_TMP0
) {
1884 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
1886 gen_op_mov_reg_v(ot
, op1
, t0
);
1890 if (s
->cc_op
!= CC_OP_DYNAMIC
)
1891 gen_op_set_cc_op(s
->cc_op
);
1893 label2
= gen_new_label();
1894 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, label2
);
1896 tcg_gen_mov_tl(cpu_cc_src
, t1
);
1897 tcg_gen_mov_tl(cpu_cc_dst
, t0
);
1899 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SARB
+ ot
);
1901 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SHLB
+ ot
);
1903 gen_set_label(label2
);
1904 s
->cc_op
= CC_OP_DYNAMIC
; /* cannot predict flags after */
1912 static void gen_shift(DisasContext
*s1
, int op
, int ot
, int d
, int s
)
1915 gen_op_mov_TN_reg(ot
, 1, s
);
1918 gen_rot_rm_T1(s1
, ot
, d
, 0);
1921 gen_rot_rm_T1(s1
, ot
, d
, 1);
1925 gen_shift_rm_T1(s1
, ot
, d
, 0, 0);
1928 gen_shift_rm_T1(s1
, ot
, d
, 1, 0);
1931 gen_shift_rm_T1(s1
, ot
, d
, 1, 1);
1934 gen_rotc_rm_T1(s1
, ot
, d
, 0);
1937 gen_rotc_rm_T1(s1
, ot
, d
, 1);
1942 static void gen_shifti(DisasContext
*s1
, int op
, int ot
, int d
, int c
)
1946 gen_rot_rm_im(s1
, ot
, d
, c
, 0);
1949 gen_rot_rm_im(s1
, ot
, d
, c
, 1);
1953 gen_shift_rm_im(s1
, ot
, d
, c
, 0, 0);
1956 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 0);
1959 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 1);
1962 /* currently not optimized */
1963 gen_op_movl_T1_im(c
);
1964 gen_shift(s1
, op
, ot
, d
, OR_TMP1
);
1969 static void gen_lea_modrm(DisasContext
*s
, int modrm
, int *reg_ptr
, int *offset_ptr
)
1977 int mod
, rm
, code
, override
, must_add_seg
;
1979 override
= s
->override
;
1980 must_add_seg
= s
->addseg
;
1983 mod
= (modrm
>> 6) & 3;
1995 code
= ldub_code(s
->pc
++);
1996 scale
= (code
>> 6) & 3;
1997 index
= ((code
>> 3) & 7) | REX_X(s
);
2004 if ((base
& 7) == 5) {
2006 disp
= (int32_t)ldl_code(s
->pc
);
2008 if (CODE64(s
) && !havesib
) {
2009 disp
+= s
->pc
+ s
->rip_offset
;
2016 disp
= (int8_t)ldub_code(s
->pc
++);
2020 disp
= (int32_t)ldl_code(s
->pc
);
2026 /* for correct popl handling with esp */
2027 if (base
== 4 && s
->popl_esp_hack
)
2028 disp
+= s
->popl_esp_hack
;
2029 #ifdef TARGET_X86_64
2030 if (s
->aflag
== 2) {
2031 gen_op_movq_A0_reg(base
);
2033 gen_op_addq_A0_im(disp
);
2038 gen_op_movl_A0_reg(base
);
2040 gen_op_addl_A0_im(disp
);
2043 #ifdef TARGET_X86_64
2044 if (s
->aflag
== 2) {
2045 gen_op_movq_A0_im(disp
);
2049 gen_op_movl_A0_im(disp
);
2052 /* index == 4 means no index */
2053 if (havesib
&& (index
!= 4)) {
2054 #ifdef TARGET_X86_64
2055 if (s
->aflag
== 2) {
2056 gen_op_addq_A0_reg_sN(scale
, index
);
2060 gen_op_addl_A0_reg_sN(scale
, index
);
2065 if (base
== R_EBP
|| base
== R_ESP
)
2070 #ifdef TARGET_X86_64
2071 if (s
->aflag
== 2) {
2072 gen_op_addq_A0_seg(override
);
2076 gen_op_addl_A0_seg(s
, override
);
2083 disp
= lduw_code(s
->pc
);
2085 gen_op_movl_A0_im(disp
);
2086 rm
= 0; /* avoid SS override */
2093 disp
= (int8_t)ldub_code(s
->pc
++);
2097 disp
= lduw_code(s
->pc
);
2103 gen_op_movl_A0_reg(R_EBX
);
2104 gen_op_addl_A0_reg_sN(0, R_ESI
);
2107 gen_op_movl_A0_reg(R_EBX
);
2108 gen_op_addl_A0_reg_sN(0, R_EDI
);
2111 gen_op_movl_A0_reg(R_EBP
);
2112 gen_op_addl_A0_reg_sN(0, R_ESI
);
2115 gen_op_movl_A0_reg(R_EBP
);
2116 gen_op_addl_A0_reg_sN(0, R_EDI
);
2119 gen_op_movl_A0_reg(R_ESI
);
2122 gen_op_movl_A0_reg(R_EDI
);
2125 gen_op_movl_A0_reg(R_EBP
);
2129 gen_op_movl_A0_reg(R_EBX
);
2133 gen_op_addl_A0_im(disp
);
2134 gen_op_andl_A0_ffff();
2138 if (rm
== 2 || rm
== 3 || rm
== 6)
2143 gen_op_addl_A0_seg(s
, override
);
2153 static void gen_nop_modrm(DisasContext
*s
, int modrm
)
2155 int mod
, rm
, base
, code
;
2157 mod
= (modrm
>> 6) & 3;
2167 code
= ldub_code(s
->pc
++);
2203 /* used for LEA and MOV AX, mem */
2204 static void gen_add_A0_ds_seg(DisasContext
*s
)
2206 int override
, must_add_seg
;
2207 must_add_seg
= s
->addseg
;
2209 if (s
->override
>= 0) {
2210 override
= s
->override
;
2214 #ifdef TARGET_X86_64
2216 gen_op_addq_A0_seg(override
);
2220 gen_op_addl_A0_seg(s
, override
);
2225 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2227 static void gen_ldst_modrm(DisasContext
*s
, int modrm
, int ot
, int reg
, int is_store
)
2229 int mod
, rm
, opreg
, disp
;
2231 mod
= (modrm
>> 6) & 3;
2232 rm
= (modrm
& 7) | REX_B(s
);
2236 gen_op_mov_TN_reg(ot
, 0, reg
);
2237 gen_op_mov_reg_T0(ot
, rm
);
2239 gen_op_mov_TN_reg(ot
, 0, rm
);
2241 gen_op_mov_reg_T0(ot
, reg
);
2244 gen_lea_modrm(s
, modrm
, &opreg
, &disp
);
2247 gen_op_mov_TN_reg(ot
, 0, reg
);
2248 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2250 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
2252 gen_op_mov_reg_T0(ot
, reg
);
2257 static inline uint32_t insn_get(DisasContext
*s
, int ot
)
2263 ret
= ldub_code(s
->pc
);
2267 ret
= lduw_code(s
->pc
);
2272 ret
= ldl_code(s
->pc
);
2279 static inline int insn_const_size(unsigned int ot
)
2287 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong eip
)
2289 TranslationBlock
*tb
;
2292 pc
= s
->cs_base
+ eip
;
2294 /* NOTE: we handle the case where the TB spans two pages here */
2295 if ((pc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) ||
2296 (pc
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
)) {
2297 /* jump to same page: we can use a direct jump */
2298 tcg_gen_goto_tb(tb_num
);
2300 tcg_gen_exit_tb((tcg_target_long
)tb
+ tb_num
);
2302 /* jump to another page: currently not optimized */
2308 static inline void gen_jcc(DisasContext
*s
, int b
,
2309 target_ulong val
, target_ulong next_eip
)
2314 gen_update_cc_op(s
);
2316 l1
= gen_new_label();
2317 gen_jcc1(s
, cc_op
, b
, l1
);
2319 gen_goto_tb(s
, 0, next_eip
);
2322 gen_goto_tb(s
, 1, val
);
2323 s
->is_jmp
= DISAS_TB_JUMP
;
2326 l1
= gen_new_label();
2327 l2
= gen_new_label();
2328 gen_jcc1(s
, cc_op
, b
, l1
);
2330 gen_jmp_im(next_eip
);
2340 static void gen_setcc(DisasContext
*s
, int b
)
2342 int inv
, jcc_op
, l1
;
2345 if (is_fast_jcc_case(s
, b
)) {
2346 /* nominal case: we use a jump */
2347 /* XXX: make it faster by adding new instructions in TCG */
2348 t0
= tcg_temp_local_new();
2349 tcg_gen_movi_tl(t0
, 0);
2350 l1
= gen_new_label();
2351 gen_jcc1(s
, s
->cc_op
, b
^ 1, l1
);
2352 tcg_gen_movi_tl(t0
, 1);
2354 tcg_gen_mov_tl(cpu_T
[0], t0
);
2357 /* slow case: it is more efficient not to generate a jump,
2358 although it is questionnable whether this optimization is
2361 jcc_op
= (b
>> 1) & 7;
2362 gen_setcc_slow_T0(s
, jcc_op
);
2364 tcg_gen_xori_tl(cpu_T
[0], cpu_T
[0], 1);
2369 static inline void gen_op_movl_T0_seg(int seg_reg
)
2371 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
2372 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2375 static inline void gen_op_movl_seg_T0_vm(int seg_reg
)
2377 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xffff);
2378 tcg_gen_st32_tl(cpu_T
[0], cpu_env
,
2379 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2380 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], 4);
2381 tcg_gen_st_tl(cpu_T
[0], cpu_env
,
2382 offsetof(CPUX86State
,segs
[seg_reg
].base
));
2385 /* move T0 to seg_reg and compute if the CPU state may change. Never
2386 call this function with seg_reg == R_CS */
2387 static void gen_movl_seg_T0(DisasContext
*s
, int seg_reg
, target_ulong cur_eip
)
2389 if (s
->pe
&& !s
->vm86
) {
2390 /* XXX: optimize by finding processor state dynamically */
2391 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2392 gen_op_set_cc_op(s
->cc_op
);
2393 gen_jmp_im(cur_eip
);
2394 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
2395 gen_helper_load_seg(tcg_const_i32(seg_reg
), cpu_tmp2_i32
);
2396 /* abort translation because the addseg value may change or
2397 because ss32 may change. For R_SS, translation must always
2398 stop as a special handling must be done to disable hardware
2399 interrupts for the next instruction */
2400 if (seg_reg
== R_SS
|| (s
->code32
&& seg_reg
< R_FS
))
2401 s
->is_jmp
= DISAS_TB_JUMP
;
2403 gen_op_movl_seg_T0_vm(seg_reg
);
2404 if (seg_reg
== R_SS
)
2405 s
->is_jmp
= DISAS_TB_JUMP
;
2409 static inline int svm_is_rep(int prefixes
)
2411 return ((prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) ? 8 : 0);
2415 gen_svm_check_intercept_param(DisasContext
*s
, target_ulong pc_start
,
2416 uint32_t type
, uint64_t param
)
2418 /* no SVM activated; fast case */
2419 if (likely(!(s
->flags
& HF_SVMI_MASK
)))
2421 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2422 gen_op_set_cc_op(s
->cc_op
);
2423 gen_jmp_im(pc_start
- s
->cs_base
);
2424 gen_helper_svm_check_intercept_param(tcg_const_i32(type
),
2425 tcg_const_i64(param
));
2429 gen_svm_check_intercept(DisasContext
*s
, target_ulong pc_start
, uint64_t type
)
2431 gen_svm_check_intercept_param(s
, pc_start
, type
, 0);
2434 static inline void gen_stack_update(DisasContext
*s
, int addend
)
2436 #ifdef TARGET_X86_64
2438 gen_op_add_reg_im(2, R_ESP
, addend
);
2442 gen_op_add_reg_im(1, R_ESP
, addend
);
2444 gen_op_add_reg_im(0, R_ESP
, addend
);
2448 /* generate a push. It depends on ss32, addseg and dflag */
2449 static void gen_push_T0(DisasContext
*s
)
2451 #ifdef TARGET_X86_64
2453 gen_op_movq_A0_reg(R_ESP
);
2455 gen_op_addq_A0_im(-8);
2456 gen_op_st_T0_A0(OT_QUAD
+ s
->mem_index
);
2458 gen_op_addq_A0_im(-2);
2459 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
2461 gen_op_mov_reg_A0(2, R_ESP
);
2465 gen_op_movl_A0_reg(R_ESP
);
2467 gen_op_addl_A0_im(-2);
2469 gen_op_addl_A0_im(-4);
2472 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2473 gen_op_addl_A0_seg(s
, R_SS
);
2476 gen_op_andl_A0_ffff();
2477 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2478 gen_op_addl_A0_seg(s
, R_SS
);
2480 gen_op_st_T0_A0(s
->dflag
+ 1 + s
->mem_index
);
2481 if (s
->ss32
&& !s
->addseg
)
2482 gen_op_mov_reg_A0(1, R_ESP
);
2484 gen_op_mov_reg_T1(s
->ss32
+ 1, R_ESP
);
2488 /* generate a push. It depends on ss32, addseg and dflag */
2489 /* slower version for T1, only used for call Ev */
2490 static void gen_push_T1(DisasContext
*s
)
2492 #ifdef TARGET_X86_64
2494 gen_op_movq_A0_reg(R_ESP
);
2496 gen_op_addq_A0_im(-8);
2497 gen_op_st_T1_A0(OT_QUAD
+ s
->mem_index
);
2499 gen_op_addq_A0_im(-2);
2500 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
2502 gen_op_mov_reg_A0(2, R_ESP
);
2506 gen_op_movl_A0_reg(R_ESP
);
2508 gen_op_addl_A0_im(-2);
2510 gen_op_addl_A0_im(-4);
2513 gen_op_addl_A0_seg(s
, R_SS
);
2516 gen_op_andl_A0_ffff();
2517 gen_op_addl_A0_seg(s
, R_SS
);
2519 gen_op_st_T1_A0(s
->dflag
+ 1 + s
->mem_index
);
2521 if (s
->ss32
&& !s
->addseg
)
2522 gen_op_mov_reg_A0(1, R_ESP
);
2524 gen_stack_update(s
, (-2) << s
->dflag
);
2528 /* two step pop is necessary for precise exceptions */
2529 static void gen_pop_T0(DisasContext
*s
)
2531 #ifdef TARGET_X86_64
2533 gen_op_movq_A0_reg(R_ESP
);
2534 gen_op_ld_T0_A0((s
->dflag
? OT_QUAD
: OT_WORD
) + s
->mem_index
);
2538 gen_op_movl_A0_reg(R_ESP
);
2541 gen_op_addl_A0_seg(s
, R_SS
);
2543 gen_op_andl_A0_ffff();
2544 gen_op_addl_A0_seg(s
, R_SS
);
2546 gen_op_ld_T0_A0(s
->dflag
+ 1 + s
->mem_index
);
2550 static void gen_pop_update(DisasContext
*s
)
2552 #ifdef TARGET_X86_64
2553 if (CODE64(s
) && s
->dflag
) {
2554 gen_stack_update(s
, 8);
2558 gen_stack_update(s
, 2 << s
->dflag
);
2562 static void gen_stack_A0(DisasContext
*s
)
2564 gen_op_movl_A0_reg(R_ESP
);
2566 gen_op_andl_A0_ffff();
2567 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2569 gen_op_addl_A0_seg(s
, R_SS
);
2572 /* NOTE: wrap around in 16 bit not fully handled */
2573 static void gen_pusha(DisasContext
*s
)
2576 gen_op_movl_A0_reg(R_ESP
);
2577 gen_op_addl_A0_im(-16 << s
->dflag
);
2579 gen_op_andl_A0_ffff();
2580 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2582 gen_op_addl_A0_seg(s
, R_SS
);
2583 for(i
= 0;i
< 8; i
++) {
2584 gen_op_mov_TN_reg(OT_LONG
, 0, 7 - i
);
2585 gen_op_st_T0_A0(OT_WORD
+ s
->dflag
+ s
->mem_index
);
2586 gen_op_addl_A0_im(2 << s
->dflag
);
2588 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2591 /* NOTE: wrap around in 16 bit not fully handled */
2592 static void gen_popa(DisasContext
*s
)
2595 gen_op_movl_A0_reg(R_ESP
);
2597 gen_op_andl_A0_ffff();
2598 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2599 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], 16 << s
->dflag
);
2601 gen_op_addl_A0_seg(s
, R_SS
);
2602 for(i
= 0;i
< 8; i
++) {
2603 /* ESP is not reloaded */
2605 gen_op_ld_T0_A0(OT_WORD
+ s
->dflag
+ s
->mem_index
);
2606 gen_op_mov_reg_T0(OT_WORD
+ s
->dflag
, 7 - i
);
2608 gen_op_addl_A0_im(2 << s
->dflag
);
2610 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2613 static void gen_enter(DisasContext
*s
, int esp_addend
, int level
)
2618 #ifdef TARGET_X86_64
2620 ot
= s
->dflag
? OT_QUAD
: OT_WORD
;
2623 gen_op_movl_A0_reg(R_ESP
);
2624 gen_op_addq_A0_im(-opsize
);
2625 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2628 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
2629 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2631 /* XXX: must save state */
2632 gen_helper_enter64_level(tcg_const_i32(level
),
2633 tcg_const_i32((ot
== OT_QUAD
)),
2636 gen_op_mov_reg_T1(ot
, R_EBP
);
2637 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], -esp_addend
+ (-opsize
* level
));
2638 gen_op_mov_reg_T1(OT_QUAD
, R_ESP
);
2642 ot
= s
->dflag
+ OT_WORD
;
2643 opsize
= 2 << s
->dflag
;
2645 gen_op_movl_A0_reg(R_ESP
);
2646 gen_op_addl_A0_im(-opsize
);
2648 gen_op_andl_A0_ffff();
2649 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2651 gen_op_addl_A0_seg(s
, R_SS
);
2653 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
2654 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2656 /* XXX: must save state */
2657 gen_helper_enter_level(tcg_const_i32(level
),
2658 tcg_const_i32(s
->dflag
),
2661 gen_op_mov_reg_T1(ot
, R_EBP
);
2662 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], -esp_addend
+ (-opsize
* level
));
2663 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2667 static void gen_exception(DisasContext
*s
, int trapno
, target_ulong cur_eip
)
2669 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2670 gen_op_set_cc_op(s
->cc_op
);
2671 gen_jmp_im(cur_eip
);
2672 gen_helper_raise_exception(cpu_env
, tcg_const_i32(trapno
));
2673 s
->is_jmp
= DISAS_TB_JUMP
;
2676 /* an interrupt is different from an exception because of the
2678 static void gen_interrupt(DisasContext
*s
, int intno
,
2679 target_ulong cur_eip
, target_ulong next_eip
)
2681 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2682 gen_op_set_cc_op(s
->cc_op
);
2683 gen_jmp_im(cur_eip
);
2684 gen_helper_raise_interrupt(cpu_env
, tcg_const_i32(intno
),
2685 tcg_const_i32(next_eip
- cur_eip
));
2686 s
->is_jmp
= DISAS_TB_JUMP
;
2689 static void gen_debug(DisasContext
*s
, target_ulong cur_eip
)
2691 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2692 gen_op_set_cc_op(s
->cc_op
);
2693 gen_jmp_im(cur_eip
);
2695 s
->is_jmp
= DISAS_TB_JUMP
;
2698 /* generate a generic end of block. Trace exception is also generated
2700 static void gen_eob(DisasContext
*s
)
2702 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2703 gen_op_set_cc_op(s
->cc_op
);
2704 if (s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
) {
2705 gen_helper_reset_inhibit_irq();
2707 if (s
->tb
->flags
& HF_RF_MASK
) {
2708 gen_helper_reset_rf();
2710 if (s
->singlestep_enabled
) {
2713 gen_helper_single_step();
2717 s
->is_jmp
= DISAS_TB_JUMP
;
2720 /* generate a jump to eip. No segment change must happen before as a
2721 direct call to the next block may occur */
2722 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
)
2725 gen_update_cc_op(s
);
2726 gen_goto_tb(s
, tb_num
, eip
);
2727 s
->is_jmp
= DISAS_TB_JUMP
;
2734 static void gen_jmp(DisasContext
*s
, target_ulong eip
)
2736 gen_jmp_tb(s
, eip
, 0);
2739 static inline void gen_ldq_env_A0(int idx
, int offset
)
2741 int mem_index
= (idx
>> 2) - 1;
2742 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2743 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2746 static inline void gen_stq_env_A0(int idx
, int offset
)
2748 int mem_index
= (idx
>> 2) - 1;
2749 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2750 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2753 static inline void gen_ldo_env_A0(int idx
, int offset
)
2755 int mem_index
= (idx
>> 2) - 1;
2756 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2757 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2758 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2759 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
);
2760 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2763 static inline void gen_sto_env_A0(int idx
, int offset
)
2765 int mem_index
= (idx
>> 2) - 1;
2766 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2767 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2768 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2769 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2770 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
);
2773 static inline void gen_op_movo(int d_offset
, int s_offset
)
2775 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2776 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2777 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
+ 8);
2778 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
+ 8);
2781 static inline void gen_op_movq(int d_offset
, int s_offset
)
2783 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2784 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2787 static inline void gen_op_movl(int d_offset
, int s_offset
)
2789 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
, s_offset
);
2790 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, d_offset
);
2793 static inline void gen_op_movq_env_0(int d_offset
)
2795 tcg_gen_movi_i64(cpu_tmp1_i64
, 0);
2796 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2799 typedef void (*SSEFunc_i_p
)(TCGv_i32 val
, TCGv_ptr reg
);
2800 typedef void (*SSEFunc_l_p
)(TCGv_i64 val
, TCGv_ptr reg
);
2801 typedef void (*SSEFunc_0_pi
)(TCGv_ptr reg
, TCGv_i32 val
);
2802 typedef void (*SSEFunc_0_pl
)(TCGv_ptr reg
, TCGv_i64 val
);
2803 typedef void (*SSEFunc_0_pp
)(TCGv_ptr reg_a
, TCGv_ptr reg_b
);
2804 typedef void (*SSEFunc_0_ppi
)(TCGv_ptr reg_a
, TCGv_ptr reg_b
, TCGv_i32 val
);
2805 typedef void (*SSEFunc_0_ppt
)(TCGv_ptr reg_a
, TCGv_ptr reg_b
, TCGv val
);
2807 #define SSE_SPECIAL ((void *)1)
2808 #define SSE_DUMMY ((void *)2)
2810 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2811 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2812 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2814 static const SSEFunc_0_pp sse_op_table1
[256][4] = {
2815 /* 3DNow! extensions */
2816 [0x0e] = { SSE_DUMMY
}, /* femms */
2817 [0x0f] = { SSE_DUMMY
}, /* pf... */
2818 /* pure SSE operations */
2819 [0x10] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2820 [0x11] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2821 [0x12] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd, movsldup, movddup */
2822 [0x13] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd */
2823 [0x14] = { gen_helper_punpckldq_xmm
, gen_helper_punpcklqdq_xmm
},
2824 [0x15] = { gen_helper_punpckhdq_xmm
, gen_helper_punpckhqdq_xmm
},
2825 [0x16] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd, movshdup */
2826 [0x17] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd */
2828 [0x28] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2829 [0x29] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2830 [0x2a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2831 [0x2b] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movntps, movntpd, movntss, movntsd */
2832 [0x2c] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2833 [0x2d] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2834 [0x2e] = { gen_helper_ucomiss
, gen_helper_ucomisd
},
2835 [0x2f] = { gen_helper_comiss
, gen_helper_comisd
},
2836 [0x50] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movmskps, movmskpd */
2837 [0x51] = SSE_FOP(sqrt
),
2838 [0x52] = { gen_helper_rsqrtps
, NULL
, gen_helper_rsqrtss
, NULL
},
2839 [0x53] = { gen_helper_rcpps
, NULL
, gen_helper_rcpss
, NULL
},
2840 [0x54] = { gen_helper_pand_xmm
, gen_helper_pand_xmm
}, /* andps, andpd */
2841 [0x55] = { gen_helper_pandn_xmm
, gen_helper_pandn_xmm
}, /* andnps, andnpd */
2842 [0x56] = { gen_helper_por_xmm
, gen_helper_por_xmm
}, /* orps, orpd */
2843 [0x57] = { gen_helper_pxor_xmm
, gen_helper_pxor_xmm
}, /* xorps, xorpd */
2844 [0x58] = SSE_FOP(add
),
2845 [0x59] = SSE_FOP(mul
),
2846 [0x5a] = { gen_helper_cvtps2pd
, gen_helper_cvtpd2ps
,
2847 gen_helper_cvtss2sd
, gen_helper_cvtsd2ss
},
2848 [0x5b] = { gen_helper_cvtdq2ps
, gen_helper_cvtps2dq
, gen_helper_cvttps2dq
},
2849 [0x5c] = SSE_FOP(sub
),
2850 [0x5d] = SSE_FOP(min
),
2851 [0x5e] = SSE_FOP(div
),
2852 [0x5f] = SSE_FOP(max
),
2854 [0xc2] = SSE_FOP(cmpeq
),
2855 [0xc6] = { (SSEFunc_0_pp
)gen_helper_shufps
,
2856 (SSEFunc_0_pp
)gen_helper_shufpd
}, /* XXX: casts */
2858 [0x38] = { SSE_SPECIAL
, SSE_SPECIAL
, NULL
, SSE_SPECIAL
}, /* SSSE3/SSE4 */
2859 [0x3a] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* SSSE3/SSE4 */
2861 /* MMX ops and their SSE extensions */
2862 [0x60] = MMX_OP2(punpcklbw
),
2863 [0x61] = MMX_OP2(punpcklwd
),
2864 [0x62] = MMX_OP2(punpckldq
),
2865 [0x63] = MMX_OP2(packsswb
),
2866 [0x64] = MMX_OP2(pcmpgtb
),
2867 [0x65] = MMX_OP2(pcmpgtw
),
2868 [0x66] = MMX_OP2(pcmpgtl
),
2869 [0x67] = MMX_OP2(packuswb
),
2870 [0x68] = MMX_OP2(punpckhbw
),
2871 [0x69] = MMX_OP2(punpckhwd
),
2872 [0x6a] = MMX_OP2(punpckhdq
),
2873 [0x6b] = MMX_OP2(packssdw
),
2874 [0x6c] = { NULL
, gen_helper_punpcklqdq_xmm
},
2875 [0x6d] = { NULL
, gen_helper_punpckhqdq_xmm
},
2876 [0x6e] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movd mm, ea */
2877 [0x6f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, , movqdu */
2878 [0x70] = { (SSEFunc_0_pp
)gen_helper_pshufw_mmx
,
2879 (SSEFunc_0_pp
)gen_helper_pshufd_xmm
,
2880 (SSEFunc_0_pp
)gen_helper_pshufhw_xmm
,
2881 (SSEFunc_0_pp
)gen_helper_pshuflw_xmm
}, /* XXX: casts */
2882 [0x71] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftw */
2883 [0x72] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftd */
2884 [0x73] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftq */
2885 [0x74] = MMX_OP2(pcmpeqb
),
2886 [0x75] = MMX_OP2(pcmpeqw
),
2887 [0x76] = MMX_OP2(pcmpeql
),
2888 [0x77] = { SSE_DUMMY
}, /* emms */
2889 [0x78] = { NULL
, SSE_SPECIAL
, NULL
, SSE_SPECIAL
}, /* extrq_i, insertq_i */
2890 [0x79] = { NULL
, gen_helper_extrq_r
, NULL
, gen_helper_insertq_r
},
2891 [0x7c] = { NULL
, gen_helper_haddpd
, NULL
, gen_helper_haddps
},
2892 [0x7d] = { NULL
, gen_helper_hsubpd
, NULL
, gen_helper_hsubps
},
2893 [0x7e] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movd, movd, , movq */
2894 [0x7f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, movdqu */
2895 [0xc4] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pinsrw */
2896 [0xc5] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pextrw */
2897 [0xd0] = { NULL
, gen_helper_addsubpd
, NULL
, gen_helper_addsubps
},
2898 [0xd1] = MMX_OP2(psrlw
),
2899 [0xd2] = MMX_OP2(psrld
),
2900 [0xd3] = MMX_OP2(psrlq
),
2901 [0xd4] = MMX_OP2(paddq
),
2902 [0xd5] = MMX_OP2(pmullw
),
2903 [0xd6] = { NULL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2904 [0xd7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pmovmskb */
2905 [0xd8] = MMX_OP2(psubusb
),
2906 [0xd9] = MMX_OP2(psubusw
),
2907 [0xda] = MMX_OP2(pminub
),
2908 [0xdb] = MMX_OP2(pand
),
2909 [0xdc] = MMX_OP2(paddusb
),
2910 [0xdd] = MMX_OP2(paddusw
),
2911 [0xde] = MMX_OP2(pmaxub
),
2912 [0xdf] = MMX_OP2(pandn
),
2913 [0xe0] = MMX_OP2(pavgb
),
2914 [0xe1] = MMX_OP2(psraw
),
2915 [0xe2] = MMX_OP2(psrad
),
2916 [0xe3] = MMX_OP2(pavgw
),
2917 [0xe4] = MMX_OP2(pmulhuw
),
2918 [0xe5] = MMX_OP2(pmulhw
),
2919 [0xe6] = { NULL
, gen_helper_cvttpd2dq
, gen_helper_cvtdq2pd
, gen_helper_cvtpd2dq
},
2920 [0xe7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movntq, movntq */
2921 [0xe8] = MMX_OP2(psubsb
),
2922 [0xe9] = MMX_OP2(psubsw
),
2923 [0xea] = MMX_OP2(pminsw
),
2924 [0xeb] = MMX_OP2(por
),
2925 [0xec] = MMX_OP2(paddsb
),
2926 [0xed] = MMX_OP2(paddsw
),
2927 [0xee] = MMX_OP2(pmaxsw
),
2928 [0xef] = MMX_OP2(pxor
),
2929 [0xf0] = { NULL
, NULL
, NULL
, SSE_SPECIAL
}, /* lddqu */
2930 [0xf1] = MMX_OP2(psllw
),
2931 [0xf2] = MMX_OP2(pslld
),
2932 [0xf3] = MMX_OP2(psllq
),
2933 [0xf4] = MMX_OP2(pmuludq
),
2934 [0xf5] = MMX_OP2(pmaddwd
),
2935 [0xf6] = MMX_OP2(psadbw
),
2936 [0xf7] = { (SSEFunc_0_pp
)gen_helper_maskmov_mmx
,
2937 (SSEFunc_0_pp
)gen_helper_maskmov_xmm
}, /* XXX: casts */
2938 [0xf8] = MMX_OP2(psubb
),
2939 [0xf9] = MMX_OP2(psubw
),
2940 [0xfa] = MMX_OP2(psubl
),
2941 [0xfb] = MMX_OP2(psubq
),
2942 [0xfc] = MMX_OP2(paddb
),
2943 [0xfd] = MMX_OP2(paddw
),
2944 [0xfe] = MMX_OP2(paddl
),
2947 static const SSEFunc_0_pp sse_op_table2
[3 * 8][2] = {
2948 [0 + 2] = MMX_OP2(psrlw
),
2949 [0 + 4] = MMX_OP2(psraw
),
2950 [0 + 6] = MMX_OP2(psllw
),
2951 [8 + 2] = MMX_OP2(psrld
),
2952 [8 + 4] = MMX_OP2(psrad
),
2953 [8 + 6] = MMX_OP2(pslld
),
2954 [16 + 2] = MMX_OP2(psrlq
),
2955 [16 + 3] = { NULL
, gen_helper_psrldq_xmm
},
2956 [16 + 6] = MMX_OP2(psllq
),
2957 [16 + 7] = { NULL
, gen_helper_pslldq_xmm
},
2960 static const SSEFunc_0_pi sse_op_table3ai
[] = {
2961 gen_helper_cvtsi2ss
,
2965 #ifdef TARGET_X86_64
2966 static const SSEFunc_0_pl sse_op_table3aq
[] = {
2967 gen_helper_cvtsq2ss
,
2972 static const SSEFunc_i_p sse_op_table3bi
[] = {
2973 gen_helper_cvttss2si
,
2974 gen_helper_cvtss2si
,
2975 gen_helper_cvttsd2si
,
2979 #ifdef TARGET_X86_64
2980 static const SSEFunc_l_p sse_op_table3bq
[] = {
2981 gen_helper_cvttss2sq
,
2982 gen_helper_cvtss2sq
,
2983 gen_helper_cvttsd2sq
,
2988 static const SSEFunc_0_pp sse_op_table4
[8][4] = {
2999 static const SSEFunc_0_pp sse_op_table5
[256] = {
3000 [0x0c] = gen_helper_pi2fw
,
3001 [0x0d] = gen_helper_pi2fd
,
3002 [0x1c] = gen_helper_pf2iw
,
3003 [0x1d] = gen_helper_pf2id
,
3004 [0x8a] = gen_helper_pfnacc
,
3005 [0x8e] = gen_helper_pfpnacc
,
3006 [0x90] = gen_helper_pfcmpge
,
3007 [0x94] = gen_helper_pfmin
,
3008 [0x96] = gen_helper_pfrcp
,
3009 [0x97] = gen_helper_pfrsqrt
,
3010 [0x9a] = gen_helper_pfsub
,
3011 [0x9e] = gen_helper_pfadd
,
3012 [0xa0] = gen_helper_pfcmpgt
,
3013 [0xa4] = gen_helper_pfmax
,
3014 [0xa6] = gen_helper_movq
, /* pfrcpit1; no need to actually increase precision */
3015 [0xa7] = gen_helper_movq
, /* pfrsqit1 */
3016 [0xaa] = gen_helper_pfsubr
,
3017 [0xae] = gen_helper_pfacc
,
3018 [0xb0] = gen_helper_pfcmpeq
,
3019 [0xb4] = gen_helper_pfmul
,
3020 [0xb6] = gen_helper_movq
, /* pfrcpit2 */
3021 [0xb7] = gen_helper_pmulhrw_mmx
,
3022 [0xbb] = gen_helper_pswapd
,
3023 [0xbf] = gen_helper_pavgb_mmx
/* pavgusb */
3026 struct SSEOpHelper_pp
{
3031 struct SSEOpHelper_ppi
{
3032 SSEFunc_0_ppi op
[2];
3036 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
3037 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3038 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
3039 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
3041 static const struct SSEOpHelper_pp sse_op_table6
[256] = {
3042 [0x00] = SSSE3_OP(pshufb
),
3043 [0x01] = SSSE3_OP(phaddw
),
3044 [0x02] = SSSE3_OP(phaddd
),
3045 [0x03] = SSSE3_OP(phaddsw
),
3046 [0x04] = SSSE3_OP(pmaddubsw
),
3047 [0x05] = SSSE3_OP(phsubw
),
3048 [0x06] = SSSE3_OP(phsubd
),
3049 [0x07] = SSSE3_OP(phsubsw
),
3050 [0x08] = SSSE3_OP(psignb
),
3051 [0x09] = SSSE3_OP(psignw
),
3052 [0x0a] = SSSE3_OP(psignd
),
3053 [0x0b] = SSSE3_OP(pmulhrsw
),
3054 [0x10] = SSE41_OP(pblendvb
),
3055 [0x14] = SSE41_OP(blendvps
),
3056 [0x15] = SSE41_OP(blendvpd
),
3057 [0x17] = SSE41_OP(ptest
),
3058 [0x1c] = SSSE3_OP(pabsb
),
3059 [0x1d] = SSSE3_OP(pabsw
),
3060 [0x1e] = SSSE3_OP(pabsd
),
3061 [0x20] = SSE41_OP(pmovsxbw
),
3062 [0x21] = SSE41_OP(pmovsxbd
),
3063 [0x22] = SSE41_OP(pmovsxbq
),
3064 [0x23] = SSE41_OP(pmovsxwd
),
3065 [0x24] = SSE41_OP(pmovsxwq
),
3066 [0x25] = SSE41_OP(pmovsxdq
),
3067 [0x28] = SSE41_OP(pmuldq
),
3068 [0x29] = SSE41_OP(pcmpeqq
),
3069 [0x2a] = SSE41_SPECIAL
, /* movntqda */
3070 [0x2b] = SSE41_OP(packusdw
),
3071 [0x30] = SSE41_OP(pmovzxbw
),
3072 [0x31] = SSE41_OP(pmovzxbd
),
3073 [0x32] = SSE41_OP(pmovzxbq
),
3074 [0x33] = SSE41_OP(pmovzxwd
),
3075 [0x34] = SSE41_OP(pmovzxwq
),
3076 [0x35] = SSE41_OP(pmovzxdq
),
3077 [0x37] = SSE42_OP(pcmpgtq
),
3078 [0x38] = SSE41_OP(pminsb
),
3079 [0x39] = SSE41_OP(pminsd
),
3080 [0x3a] = SSE41_OP(pminuw
),
3081 [0x3b] = SSE41_OP(pminud
),
3082 [0x3c] = SSE41_OP(pmaxsb
),
3083 [0x3d] = SSE41_OP(pmaxsd
),
3084 [0x3e] = SSE41_OP(pmaxuw
),
3085 [0x3f] = SSE41_OP(pmaxud
),
3086 [0x40] = SSE41_OP(pmulld
),
3087 [0x41] = SSE41_OP(phminposuw
),
3090 static const struct SSEOpHelper_ppi sse_op_table7
[256] = {
3091 [0x08] = SSE41_OP(roundps
),
3092 [0x09] = SSE41_OP(roundpd
),
3093 [0x0a] = SSE41_OP(roundss
),
3094 [0x0b] = SSE41_OP(roundsd
),
3095 [0x0c] = SSE41_OP(blendps
),
3096 [0x0d] = SSE41_OP(blendpd
),
3097 [0x0e] = SSE41_OP(pblendw
),
3098 [0x0f] = SSSE3_OP(palignr
),
3099 [0x14] = SSE41_SPECIAL
, /* pextrb */
3100 [0x15] = SSE41_SPECIAL
, /* pextrw */
3101 [0x16] = SSE41_SPECIAL
, /* pextrd/pextrq */
3102 [0x17] = SSE41_SPECIAL
, /* extractps */
3103 [0x20] = SSE41_SPECIAL
, /* pinsrb */
3104 [0x21] = SSE41_SPECIAL
, /* insertps */
3105 [0x22] = SSE41_SPECIAL
, /* pinsrd/pinsrq */
3106 [0x40] = SSE41_OP(dpps
),
3107 [0x41] = SSE41_OP(dppd
),
3108 [0x42] = SSE41_OP(mpsadbw
),
3109 [0x60] = SSE42_OP(pcmpestrm
),
3110 [0x61] = SSE42_OP(pcmpestri
),
3111 [0x62] = SSE42_OP(pcmpistrm
),
3112 [0x63] = SSE42_OP(pcmpistri
),
3115 static void gen_sse(DisasContext
*s
, int b
, target_ulong pc_start
, int rex_r
)
3117 int b1
, op1_offset
, op2_offset
, is_xmm
, val
, ot
;
3118 int modrm
, mod
, rm
, reg
, reg_addr
, offset_addr
;
3119 SSEFunc_0_pp sse_fn_pp
;
3120 SSEFunc_0_ppi sse_fn_ppi
;
3121 SSEFunc_0_ppt sse_fn_ppt
;
3124 if (s
->prefix
& PREFIX_DATA
)
3126 else if (s
->prefix
& PREFIX_REPZ
)
3128 else if (s
->prefix
& PREFIX_REPNZ
)
3132 sse_fn_pp
= sse_op_table1
[b
][b1
];
3136 if ((b
<= 0x5f && b
>= 0x10) || b
== 0xc6 || b
== 0xc2) {
3146 /* simple MMX/SSE operation */
3147 if (s
->flags
& HF_TS_MASK
) {
3148 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
3151 if (s
->flags
& HF_EM_MASK
) {
3153 gen_exception(s
, EXCP06_ILLOP
, pc_start
- s
->cs_base
);
3156 if (is_xmm
&& !(s
->flags
& HF_OSFXSR_MASK
))
3157 if ((b
!= 0x38 && b
!= 0x3a) || (s
->prefix
& PREFIX_DATA
))
3160 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
))
3171 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3172 the static cpu state) */
3174 gen_helper_enter_mmx();
3177 modrm
= ldub_code(s
->pc
++);
3178 reg
= ((modrm
>> 3) & 7);
3181 mod
= (modrm
>> 6) & 3;
3182 if (sse_fn_pp
== SSE_SPECIAL
) {
3185 case 0x0e7: /* movntq */
3188 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3189 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3191 case 0x1e7: /* movntdq */
3192 case 0x02b: /* movntps */
3193 case 0x12b: /* movntps */
3196 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3197 gen_sto_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3199 case 0x3f0: /* lddqu */
3202 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3203 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3205 case 0x22b: /* movntss */
3206 case 0x32b: /* movntsd */
3209 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3211 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,
3214 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3215 xmm_regs
[reg
].XMM_L(0)));
3216 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
3219 case 0x6e: /* movd mm, ea */
3220 #ifdef TARGET_X86_64
3221 if (s
->dflag
== 2) {
3222 gen_ldst_modrm(s
, modrm
, OT_QUAD
, OR_TMP0
, 0);
3223 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3227 gen_ldst_modrm(s
, modrm
, OT_LONG
, OR_TMP0
, 0);
3228 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3229 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3230 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3231 gen_helper_movl_mm_T0_mmx(cpu_ptr0
, cpu_tmp2_i32
);
3234 case 0x16e: /* movd xmm, ea */
3235 #ifdef TARGET_X86_64
3236 if (s
->dflag
== 2) {
3237 gen_ldst_modrm(s
, modrm
, OT_QUAD
, OR_TMP0
, 0);
3238 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3239 offsetof(CPUX86State
,xmm_regs
[reg
]));
3240 gen_helper_movq_mm_T0_xmm(cpu_ptr0
, cpu_T
[0]);
3244 gen_ldst_modrm(s
, modrm
, OT_LONG
, OR_TMP0
, 0);
3245 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3246 offsetof(CPUX86State
,xmm_regs
[reg
]));
3247 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3248 gen_helper_movl_mm_T0_xmm(cpu_ptr0
, cpu_tmp2_i32
);
3251 case 0x6f: /* movq mm, ea */
3253 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3254 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3257 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
3258 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3259 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
3260 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3263 case 0x010: /* movups */
3264 case 0x110: /* movupd */
3265 case 0x028: /* movaps */
3266 case 0x128: /* movapd */
3267 case 0x16f: /* movdqa xmm, ea */
3268 case 0x26f: /* movdqu xmm, ea */
3270 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3271 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3273 rm
= (modrm
& 7) | REX_B(s
);
3274 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[reg
]),
3275 offsetof(CPUX86State
,xmm_regs
[rm
]));
3278 case 0x210: /* movss xmm, ea */
3280 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3281 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
3282 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3284 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)));
3285 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3286 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3288 rm
= (modrm
& 7) | REX_B(s
);
3289 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3290 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)));
3293 case 0x310: /* movsd xmm, ea */
3295 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3296 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3298 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3299 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3301 rm
= (modrm
& 7) | REX_B(s
);
3302 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3303 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3306 case 0x012: /* movlps */
3307 case 0x112: /* movlpd */
3309 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3310 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3313 rm
= (modrm
& 7) | REX_B(s
);
3314 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3315 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(1)));
3318 case 0x212: /* movsldup */
3320 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3321 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3323 rm
= (modrm
& 7) | REX_B(s
);
3324 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3325 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)));
3326 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)),
3327 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(2)));
3329 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)),
3330 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3331 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)),
3332 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3334 case 0x312: /* movddup */
3336 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3337 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3339 rm
= (modrm
& 7) | REX_B(s
);
3340 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3341 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3343 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)),
3344 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3346 case 0x016: /* movhps */
3347 case 0x116: /* movhpd */
3349 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3350 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3353 rm
= (modrm
& 7) | REX_B(s
);
3354 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)),
3355 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3358 case 0x216: /* movshdup */
3360 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3361 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3363 rm
= (modrm
& 7) | REX_B(s
);
3364 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)),
3365 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(1)));
3366 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)),
3367 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(3)));
3369 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3370 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)));
3371 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)),
3372 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3377 int bit_index
, field_length
;
3379 if (b1
== 1 && reg
!= 0)
3381 field_length
= ldub_code(s
->pc
++) & 0x3F;
3382 bit_index
= ldub_code(s
->pc
++) & 0x3F;
3383 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3384 offsetof(CPUX86State
,xmm_regs
[reg
]));
3386 gen_helper_extrq_i(cpu_ptr0
, tcg_const_i32(bit_index
),
3387 tcg_const_i32(field_length
));
3389 gen_helper_insertq_i(cpu_ptr0
, tcg_const_i32(bit_index
),
3390 tcg_const_i32(field_length
));
3393 case 0x7e: /* movd ea, mm */
3394 #ifdef TARGET_X86_64
3395 if (s
->dflag
== 2) {
3396 tcg_gen_ld_i64(cpu_T
[0], cpu_env
,
3397 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3398 gen_ldst_modrm(s
, modrm
, OT_QUAD
, OR_TMP0
, 1);
3402 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
3403 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_L(0)));
3404 gen_ldst_modrm(s
, modrm
, OT_LONG
, OR_TMP0
, 1);
3407 case 0x17e: /* movd ea, xmm */
3408 #ifdef TARGET_X86_64
3409 if (s
->dflag
== 2) {
3410 tcg_gen_ld_i64(cpu_T
[0], cpu_env
,
3411 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3412 gen_ldst_modrm(s
, modrm
, OT_QUAD
, OR_TMP0
, 1);
3416 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
3417 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3418 gen_ldst_modrm(s
, modrm
, OT_LONG
, OR_TMP0
, 1);
3421 case 0x27e: /* movq xmm, ea */
3423 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3424 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3426 rm
= (modrm
& 7) | REX_B(s
);
3427 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3428 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3430 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3432 case 0x7f: /* movq ea, mm */
3434 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3435 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3438 gen_op_movq(offsetof(CPUX86State
,fpregs
[rm
].mmx
),
3439 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3442 case 0x011: /* movups */
3443 case 0x111: /* movupd */
3444 case 0x029: /* movaps */
3445 case 0x129: /* movapd */
3446 case 0x17f: /* movdqa ea, xmm */
3447 case 0x27f: /* movdqu ea, xmm */
3449 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3450 gen_sto_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3452 rm
= (modrm
& 7) | REX_B(s
);
3453 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[rm
]),
3454 offsetof(CPUX86State
,xmm_regs
[reg
]));
3457 case 0x211: /* movss ea, xmm */
3459 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3460 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3461 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
3463 rm
= (modrm
& 7) | REX_B(s
);
3464 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)),
3465 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3468 case 0x311: /* movsd ea, xmm */
3470 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3471 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3473 rm
= (modrm
& 7) | REX_B(s
);
3474 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)),
3475 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3478 case 0x013: /* movlps */
3479 case 0x113: /* movlpd */
3481 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3482 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3487 case 0x017: /* movhps */
3488 case 0x117: /* movhpd */
3490 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3491 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3496 case 0x71: /* shift mm, im */
3499 case 0x171: /* shift xmm, im */
3505 val
= ldub_code(s
->pc
++);
3507 gen_op_movl_T0_im(val
);
3508 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
3510 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(1)));
3511 op1_offset
= offsetof(CPUX86State
,xmm_t0
);
3513 gen_op_movl_T0_im(val
);
3514 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(0)));
3516 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(1)));
3517 op1_offset
= offsetof(CPUX86State
,mmx_t0
);
3519 sse_fn_pp
= sse_op_table2
[((b
- 1) & 3) * 8 + (((modrm
>> 3)) & 7)][b1
];
3524 rm
= (modrm
& 7) | REX_B(s
);
3525 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3528 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3530 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3531 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op1_offset
);
3532 sse_fn_pp(cpu_ptr0
, cpu_ptr1
);
3534 case 0x050: /* movmskps */
3535 rm
= (modrm
& 7) | REX_B(s
);
3536 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3537 offsetof(CPUX86State
,xmm_regs
[rm
]));
3538 gen_helper_movmskps(cpu_tmp2_i32
, cpu_ptr0
);
3539 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3540 gen_op_mov_reg_T0(OT_LONG
, reg
);
3542 case 0x150: /* movmskpd */
3543 rm
= (modrm
& 7) | REX_B(s
);
3544 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3545 offsetof(CPUX86State
,xmm_regs
[rm
]));
3546 gen_helper_movmskpd(cpu_tmp2_i32
, cpu_ptr0
);
3547 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3548 gen_op_mov_reg_T0(OT_LONG
, reg
);
3550 case 0x02a: /* cvtpi2ps */
3551 case 0x12a: /* cvtpi2pd */
3552 gen_helper_enter_mmx();
3554 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3555 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3556 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
3559 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3561 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3562 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3563 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3566 gen_helper_cvtpi2ps(cpu_ptr0
, cpu_ptr1
);
3570 gen_helper_cvtpi2pd(cpu_ptr0
, cpu_ptr1
);
3574 case 0x22a: /* cvtsi2ss */
3575 case 0x32a: /* cvtsi2sd */
3576 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3577 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
3578 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3579 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3580 if (ot
== OT_LONG
) {
3581 SSEFunc_0_pi sse_fn_pi
= sse_op_table3ai
[(b
>> 8) & 1];
3582 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3583 sse_fn_pi(cpu_ptr0
, cpu_tmp2_i32
);
3585 #ifdef TARGET_X86_64
3586 SSEFunc_0_pl sse_fn_pl
= sse_op_table3aq
[(b
>> 8) & 1];
3587 sse_fn_pl(cpu_ptr0
, cpu_T
[0]);
3593 case 0x02c: /* cvttps2pi */
3594 case 0x12c: /* cvttpd2pi */
3595 case 0x02d: /* cvtps2pi */
3596 case 0x12d: /* cvtpd2pi */
3597 gen_helper_enter_mmx();
3599 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3600 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3601 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3603 rm
= (modrm
& 7) | REX_B(s
);
3604 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3606 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
);
3607 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3608 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3611 gen_helper_cvttps2pi(cpu_ptr0
, cpu_ptr1
);
3614 gen_helper_cvttpd2pi(cpu_ptr0
, cpu_ptr1
);
3617 gen_helper_cvtps2pi(cpu_ptr0
, cpu_ptr1
);
3620 gen_helper_cvtpd2pi(cpu_ptr0
, cpu_ptr1
);
3624 case 0x22c: /* cvttss2si */
3625 case 0x32c: /* cvttsd2si */
3626 case 0x22d: /* cvtss2si */
3627 case 0x32d: /* cvtsd2si */
3628 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3630 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3632 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_t0
.XMM_Q(0)));
3634 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
3635 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
3637 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3639 rm
= (modrm
& 7) | REX_B(s
);
3640 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3642 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3643 if (ot
== OT_LONG
) {
3644 SSEFunc_i_p sse_fn_i_p
=
3645 sse_op_table3bi
[((b
>> 7) & 2) | (b
& 1)];
3646 sse_fn_i_p(cpu_tmp2_i32
, cpu_ptr0
);
3647 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3649 #ifdef TARGET_X86_64
3650 SSEFunc_l_p sse_fn_l_p
=
3651 sse_op_table3bq
[((b
>> 7) & 2) | (b
& 1)];
3652 sse_fn_l_p(cpu_T
[0], cpu_ptr0
);
3657 gen_op_mov_reg_T0(ot
, reg
);
3659 case 0xc4: /* pinsrw */
3662 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
3663 val
= ldub_code(s
->pc
++);
3666 tcg_gen_st16_tl(cpu_T
[0], cpu_env
,
3667 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_W(val
)));
3670 tcg_gen_st16_tl(cpu_T
[0], cpu_env
,
3671 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_W(val
)));
3674 case 0xc5: /* pextrw */
3678 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3679 val
= ldub_code(s
->pc
++);
3682 rm
= (modrm
& 7) | REX_B(s
);
3683 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
,
3684 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_W(val
)));
3688 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
,
3689 offsetof(CPUX86State
,fpregs
[rm
].mmx
.MMX_W(val
)));
3691 reg
= ((modrm
>> 3) & 7) | rex_r
;
3692 gen_op_mov_reg_T0(ot
, reg
);
3694 case 0x1d6: /* movq ea, xmm */
3696 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3697 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3699 rm
= (modrm
& 7) | REX_B(s
);
3700 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)),
3701 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3702 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(1)));
3705 case 0x2d6: /* movq2dq */
3706 gen_helper_enter_mmx();
3708 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3709 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3710 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3712 case 0x3d6: /* movdq2q */
3713 gen_helper_enter_mmx();
3714 rm
= (modrm
& 7) | REX_B(s
);
3715 gen_op_movq(offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
),
3716 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3718 case 0xd7: /* pmovmskb */
3723 rm
= (modrm
& 7) | REX_B(s
);
3724 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[rm
]));
3725 gen_helper_pmovmskb_xmm(cpu_tmp2_i32
, cpu_ptr0
);
3728 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3729 gen_helper_pmovmskb_mmx(cpu_tmp2_i32
, cpu_ptr0
);
3731 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3732 reg
= ((modrm
>> 3) & 7) | rex_r
;
3733 gen_op_mov_reg_T0(OT_LONG
, reg
);
3736 if (s
->prefix
& PREFIX_REPNZ
)
3740 modrm
= ldub_code(s
->pc
++);
3742 reg
= ((modrm
>> 3) & 7) | rex_r
;
3743 mod
= (modrm
>> 6) & 3;
3748 sse_fn_pp
= sse_op_table6
[b
].op
[b1
];
3752 if (!(s
->cpuid_ext_features
& sse_op_table6
[b
].ext_mask
))
3756 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3758 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
3760 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3761 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3763 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3764 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3765 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3766 gen_ldq_env_A0(s
->mem_index
, op2_offset
+
3767 offsetof(XMMReg
, XMM_Q(0)));
3769 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3770 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3771 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
3772 (s
->mem_index
>> 2) - 1);
3773 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
3774 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, op2_offset
+
3775 offsetof(XMMReg
, XMM_L(0)));
3777 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3778 tcg_gen_qemu_ld16u(cpu_tmp0
, cpu_A0
,
3779 (s
->mem_index
>> 2) - 1);
3780 tcg_gen_st16_tl(cpu_tmp0
, cpu_env
, op2_offset
+
3781 offsetof(XMMReg
, XMM_W(0)));
3783 case 0x2a: /* movntqda */
3784 gen_ldo_env_A0(s
->mem_index
, op1_offset
);
3787 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3791 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
3793 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3795 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3796 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3797 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
3800 if (sse_fn_pp
== SSE_SPECIAL
) {
3804 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3805 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3806 sse_fn_pp(cpu_ptr0
, cpu_ptr1
);
3809 s
->cc_op
= CC_OP_EFLAGS
;
3811 case 0x338: /* crc32 */
3814 modrm
= ldub_code(s
->pc
++);
3815 reg
= ((modrm
>> 3) & 7) | rex_r
;
3817 if (b
!= 0xf0 && b
!= 0xf1)
3819 if (!(s
->cpuid_ext_features
& CPUID_EXT_SSE42
))
3824 else if (b
== 0xf1 && s
->dflag
!= 2)
3825 if (s
->prefix
& PREFIX_DATA
)
3832 gen_op_mov_TN_reg(OT_LONG
, 0, reg
);
3833 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3834 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
3835 gen_helper_crc32(cpu_T
[0], cpu_tmp2_i32
,
3836 cpu_T
[0], tcg_const_i32(8 << ot
));
3838 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3839 gen_op_mov_reg_T0(ot
, reg
);
3844 modrm
= ldub_code(s
->pc
++);
3846 reg
= ((modrm
>> 3) & 7) | rex_r
;
3847 mod
= (modrm
>> 6) & 3;
3852 sse_fn_ppi
= sse_op_table7
[b
].op
[b1
];
3856 if (!(s
->cpuid_ext_features
& sse_op_table7
[b
].ext_mask
))
3859 if (sse_fn_ppi
== SSE_SPECIAL
) {
3860 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3861 rm
= (modrm
& 7) | REX_B(s
);
3863 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3864 reg
= ((modrm
>> 3) & 7) | rex_r
;
3865 val
= ldub_code(s
->pc
++);
3867 case 0x14: /* pextrb */
3868 tcg_gen_ld8u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3869 xmm_regs
[reg
].XMM_B(val
& 15)));
3871 gen_op_mov_reg_T0(ot
, rm
);
3873 tcg_gen_qemu_st8(cpu_T
[0], cpu_A0
,
3874 (s
->mem_index
>> 2) - 1);
3876 case 0x15: /* pextrw */
3877 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3878 xmm_regs
[reg
].XMM_W(val
& 7)));
3880 gen_op_mov_reg_T0(ot
, rm
);
3882 tcg_gen_qemu_st16(cpu_T
[0], cpu_A0
,
3883 (s
->mem_index
>> 2) - 1);
3886 if (ot
== OT_LONG
) { /* pextrd */
3887 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
3888 offsetof(CPUX86State
,
3889 xmm_regs
[reg
].XMM_L(val
& 3)));
3890 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3892 gen_op_mov_reg_v(ot
, rm
, cpu_T
[0]);
3894 tcg_gen_qemu_st32(cpu_T
[0], cpu_A0
,
3895 (s
->mem_index
>> 2) - 1);
3896 } else { /* pextrq */
3897 #ifdef TARGET_X86_64
3898 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
3899 offsetof(CPUX86State
,
3900 xmm_regs
[reg
].XMM_Q(val
& 1)));
3902 gen_op_mov_reg_v(ot
, rm
, cpu_tmp1_i64
);
3904 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
3905 (s
->mem_index
>> 2) - 1);
3911 case 0x17: /* extractps */
3912 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3913 xmm_regs
[reg
].XMM_L(val
& 3)));
3915 gen_op_mov_reg_T0(ot
, rm
);
3917 tcg_gen_qemu_st32(cpu_T
[0], cpu_A0
,
3918 (s
->mem_index
>> 2) - 1);
3920 case 0x20: /* pinsrb */
3922 gen_op_mov_TN_reg(OT_LONG
, 0, rm
);
3924 tcg_gen_qemu_ld8u(cpu_tmp0
, cpu_A0
,
3925 (s
->mem_index
>> 2) - 1);
3926 tcg_gen_st8_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
,
3927 xmm_regs
[reg
].XMM_B(val
& 15)));
3929 case 0x21: /* insertps */
3931 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
3932 offsetof(CPUX86State
,xmm_regs
[rm
]
3933 .XMM_L((val
>> 6) & 3)));
3935 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
3936 (s
->mem_index
>> 2) - 1);
3937 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
3939 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
3940 offsetof(CPUX86State
,xmm_regs
[reg
]
3941 .XMM_L((val
>> 4) & 3)));
3943 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3944 cpu_env
, offsetof(CPUX86State
,
3945 xmm_regs
[reg
].XMM_L(0)));
3947 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3948 cpu_env
, offsetof(CPUX86State
,
3949 xmm_regs
[reg
].XMM_L(1)));
3951 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3952 cpu_env
, offsetof(CPUX86State
,
3953 xmm_regs
[reg
].XMM_L(2)));
3955 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3956 cpu_env
, offsetof(CPUX86State
,
3957 xmm_regs
[reg
].XMM_L(3)));
3960 if (ot
== OT_LONG
) { /* pinsrd */
3962 gen_op_mov_v_reg(ot
, cpu_tmp0
, rm
);
3964 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
3965 (s
->mem_index
>> 2) - 1);
3966 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
3967 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
3968 offsetof(CPUX86State
,
3969 xmm_regs
[reg
].XMM_L(val
& 3)));
3970 } else { /* pinsrq */
3971 #ifdef TARGET_X86_64
3973 gen_op_mov_v_reg(ot
, cpu_tmp1_i64
, rm
);
3975 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
3976 (s
->mem_index
>> 2) - 1);
3977 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
3978 offsetof(CPUX86State
,
3979 xmm_regs
[reg
].XMM_Q(val
& 1)));
3990 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3992 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
3994 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3995 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3996 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3999 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4001 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4003 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4004 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4005 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
4008 val
= ldub_code(s
->pc
++);
4010 if ((b
& 0xfc) == 0x60) { /* pcmpXstrX */
4011 s
->cc_op
= CC_OP_EFLAGS
;
4014 /* The helper must use entire 64-bit gp registers */
4018 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4019 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4020 sse_fn_ppi(cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4026 /* generic MMX or SSE operation */
4028 case 0x70: /* pshufx insn */
4029 case 0xc6: /* pshufx insn */
4030 case 0xc2: /* compare insns */
4037 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4039 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4040 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4041 if (b1
>= 2 && ((b
>= 0x50 && b
<= 0x5f && b
!= 0x5b) ||
4043 /* specific case for SSE single instructions */
4046 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
4047 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
4050 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_t0
.XMM_D(0)));
4053 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
4056 rm
= (modrm
& 7) | REX_B(s
);
4057 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
4060 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4062 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4063 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4064 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
4067 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4071 case 0x0f: /* 3DNow! data insns */
4072 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
))
4074 val
= ldub_code(s
->pc
++);
4075 sse_fn_pp
= sse_op_table5
[val
];
4079 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4080 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4081 sse_fn_pp(cpu_ptr0
, cpu_ptr1
);
4083 case 0x70: /* pshufx insn */
4084 case 0xc6: /* pshufx insn */
4085 val
= ldub_code(s
->pc
++);
4086 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4087 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4088 /* XXX: introduce a new table? */
4089 sse_fn_ppi
= (SSEFunc_0_ppi
)sse_fn_pp
;
4090 sse_fn_ppi(cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4094 val
= ldub_code(s
->pc
++);
4097 sse_fn_pp
= sse_op_table4
[val
][b1
];
4099 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4100 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4101 sse_fn_pp(cpu_ptr0
, cpu_ptr1
);
4104 /* maskmov : we must prepare A0 */
4107 #ifdef TARGET_X86_64
4108 if (s
->aflag
== 2) {
4109 gen_op_movq_A0_reg(R_EDI
);
4113 gen_op_movl_A0_reg(R_EDI
);
4115 gen_op_andl_A0_ffff();
4117 gen_add_A0_ds_seg(s
);
4119 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4120 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4121 /* XXX: introduce a new table? */
4122 sse_fn_ppt
= (SSEFunc_0_ppt
)sse_fn_pp
;
4123 sse_fn_ppt(cpu_ptr0
, cpu_ptr1
, cpu_A0
);
4126 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4127 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4128 sse_fn_pp(cpu_ptr0
, cpu_ptr1
);
4131 if (b
== 0x2e || b
== 0x2f) {
4132 s
->cc_op
= CC_OP_EFLAGS
;
4137 /* convert one instruction. s->is_jmp is set if the translation must
4138 be stopped. Return the next pc value */
4139 static target_ulong
disas_insn(DisasContext
*s
, target_ulong pc_start
)
4141 int b
, prefixes
, aflag
, dflag
;
4143 int modrm
, reg
, rm
, mod
, reg_addr
, op
, opreg
, offset_addr
, val
;
4144 target_ulong next_eip
, tval
;
4147 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
)))
4148 tcg_gen_debug_insn_start(pc_start
);
4156 #ifdef TARGET_X86_64
4161 s
->rip_offset
= 0; /* for relative ip address */
4163 b
= ldub_code(s
->pc
);
4165 /* check prefixes */
4166 #ifdef TARGET_X86_64
4170 prefixes
|= PREFIX_REPZ
;
4173 prefixes
|= PREFIX_REPNZ
;
4176 prefixes
|= PREFIX_LOCK
;
4197 prefixes
|= PREFIX_DATA
;
4200 prefixes
|= PREFIX_ADR
;
4204 rex_w
= (b
>> 3) & 1;
4205 rex_r
= (b
& 0x4) << 1;
4206 s
->rex_x
= (b
& 0x2) << 2;
4207 REX_B(s
) = (b
& 0x1) << 3;
4208 x86_64_hregs
= 1; /* select uniform byte register addressing */
4212 /* 0x66 is ignored if rex.w is set */
4215 if (prefixes
& PREFIX_DATA
)
4218 if (!(prefixes
& PREFIX_ADR
))
4225 prefixes
|= PREFIX_REPZ
;
4228 prefixes
|= PREFIX_REPNZ
;
4231 prefixes
|= PREFIX_LOCK
;
4252 prefixes
|= PREFIX_DATA
;
4255 prefixes
|= PREFIX_ADR
;
4258 if (prefixes
& PREFIX_DATA
)
4260 if (prefixes
& PREFIX_ADR
)
4264 s
->prefix
= prefixes
;
4268 /* lock generation */
4269 if (prefixes
& PREFIX_LOCK
)
4272 /* now check op code */
4276 /**************************/
4277 /* extended op code */
4278 b
= ldub_code(s
->pc
++) | 0x100;
4281 /**************************/
4299 ot
= dflag
+ OT_WORD
;
4302 case 0: /* OP Ev, Gv */
4303 modrm
= ldub_code(s
->pc
++);
4304 reg
= ((modrm
>> 3) & 7) | rex_r
;
4305 mod
= (modrm
>> 6) & 3;
4306 rm
= (modrm
& 7) | REX_B(s
);
4308 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4310 } else if (op
== OP_XORL
&& rm
== reg
) {
4312 /* xor reg, reg optimisation */
4314 s
->cc_op
= CC_OP_LOGICB
+ ot
;
4315 gen_op_mov_reg_T0(ot
, reg
);
4316 gen_op_update1_cc();
4321 gen_op_mov_TN_reg(ot
, 1, reg
);
4322 gen_op(s
, op
, ot
, opreg
);
4324 case 1: /* OP Gv, Ev */
4325 modrm
= ldub_code(s
->pc
++);
4326 mod
= (modrm
>> 6) & 3;
4327 reg
= ((modrm
>> 3) & 7) | rex_r
;
4328 rm
= (modrm
& 7) | REX_B(s
);
4330 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4331 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4332 } else if (op
== OP_XORL
&& rm
== reg
) {
4335 gen_op_mov_TN_reg(ot
, 1, rm
);
4337 gen_op(s
, op
, ot
, reg
);
4339 case 2: /* OP A, Iv */
4340 val
= insn_get(s
, ot
);
4341 gen_op_movl_T1_im(val
);
4342 gen_op(s
, op
, ot
, OR_EAX
);
4351 case 0x80: /* GRP1 */
4360 ot
= dflag
+ OT_WORD
;
4362 modrm
= ldub_code(s
->pc
++);
4363 mod
= (modrm
>> 6) & 3;
4364 rm
= (modrm
& 7) | REX_B(s
);
4365 op
= (modrm
>> 3) & 7;
4371 s
->rip_offset
= insn_const_size(ot
);
4372 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4383 val
= insn_get(s
, ot
);
4386 val
= (int8_t)insn_get(s
, OT_BYTE
);
4389 gen_op_movl_T1_im(val
);
4390 gen_op(s
, op
, ot
, opreg
);
4394 /**************************/
4395 /* inc, dec, and other misc arith */
4396 case 0x40 ... 0x47: /* inc Gv */
4397 ot
= dflag
? OT_LONG
: OT_WORD
;
4398 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), 1);
4400 case 0x48 ... 0x4f: /* dec Gv */
4401 ot
= dflag
? OT_LONG
: OT_WORD
;
4402 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), -1);
4404 case 0xf6: /* GRP3 */
4409 ot
= dflag
+ OT_WORD
;
4411 modrm
= ldub_code(s
->pc
++);
4412 mod
= (modrm
>> 6) & 3;
4413 rm
= (modrm
& 7) | REX_B(s
);
4414 op
= (modrm
>> 3) & 7;
4417 s
->rip_offset
= insn_const_size(ot
);
4418 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4419 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
4421 gen_op_mov_TN_reg(ot
, 0, rm
);
4426 val
= insn_get(s
, ot
);
4427 gen_op_movl_T1_im(val
);
4428 gen_op_testl_T0_T1_cc();
4429 s
->cc_op
= CC_OP_LOGICB
+ ot
;
4432 tcg_gen_not_tl(cpu_T
[0], cpu_T
[0]);
4434 gen_op_st_T0_A0(ot
+ s
->mem_index
);
4436 gen_op_mov_reg_T0(ot
, rm
);
4440 tcg_gen_neg_tl(cpu_T
[0], cpu_T
[0]);
4442 gen_op_st_T0_A0(ot
+ s
->mem_index
);
4444 gen_op_mov_reg_T0(ot
, rm
);
4446 gen_op_update_neg_cc();
4447 s
->cc_op
= CC_OP_SUBB
+ ot
;
4452 gen_op_mov_TN_reg(OT_BYTE
, 1, R_EAX
);
4453 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
4454 tcg_gen_ext8u_tl(cpu_T
[1], cpu_T
[1]);
4455 /* XXX: use 32 bit mul which could be faster */
4456 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4457 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4458 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4459 tcg_gen_andi_tl(cpu_cc_src
, cpu_T
[0], 0xff00);
4460 s
->cc_op
= CC_OP_MULB
;
4463 gen_op_mov_TN_reg(OT_WORD
, 1, R_EAX
);
4464 tcg_gen_ext16u_tl(cpu_T
[0], cpu_T
[0]);
4465 tcg_gen_ext16u_tl(cpu_T
[1], cpu_T
[1]);
4466 /* XXX: use 32 bit mul which could be faster */
4467 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4468 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4469 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4470 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 16);
4471 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
4472 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4473 s
->cc_op
= CC_OP_MULW
;
4477 #ifdef TARGET_X86_64
4478 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4479 tcg_gen_ext32u_tl(cpu_T
[0], cpu_T
[0]);
4480 tcg_gen_ext32u_tl(cpu_T
[1], cpu_T
[1]);
4481 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4482 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4483 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4484 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 32);
4485 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4486 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4490 t0
= tcg_temp_new_i64();
4491 t1
= tcg_temp_new_i64();
4492 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4493 tcg_gen_extu_i32_i64(t0
, cpu_T
[0]);
4494 tcg_gen_extu_i32_i64(t1
, cpu_T
[1]);
4495 tcg_gen_mul_i64(t0
, t0
, t1
);
4496 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4497 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4498 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4499 tcg_gen_shri_i64(t0
, t0
, 32);
4500 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4501 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4502 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4505 s
->cc_op
= CC_OP_MULL
;
4507 #ifdef TARGET_X86_64
4509 gen_helper_mulq_EAX_T0(cpu_T
[0]);
4510 s
->cc_op
= CC_OP_MULQ
;
4518 gen_op_mov_TN_reg(OT_BYTE
, 1, R_EAX
);
4519 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
4520 tcg_gen_ext8s_tl(cpu_T
[1], cpu_T
[1]);
4521 /* XXX: use 32 bit mul which could be faster */
4522 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4523 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4524 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4525 tcg_gen_ext8s_tl(cpu_tmp0
, cpu_T
[0]);
4526 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4527 s
->cc_op
= CC_OP_MULB
;
4530 gen_op_mov_TN_reg(OT_WORD
, 1, R_EAX
);
4531 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4532 tcg_gen_ext16s_tl(cpu_T
[1], cpu_T
[1]);
4533 /* XXX: use 32 bit mul which could be faster */
4534 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4535 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4536 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4537 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T
[0]);
4538 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4539 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 16);
4540 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
4541 s
->cc_op
= CC_OP_MULW
;
4545 #ifdef TARGET_X86_64
4546 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4547 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4548 tcg_gen_ext32s_tl(cpu_T
[1], cpu_T
[1]);
4549 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4550 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4551 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4552 tcg_gen_ext32s_tl(cpu_tmp0
, cpu_T
[0]);
4553 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4554 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 32);
4555 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4559 t0
= tcg_temp_new_i64();
4560 t1
= tcg_temp_new_i64();
4561 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4562 tcg_gen_ext_i32_i64(t0
, cpu_T
[0]);
4563 tcg_gen_ext_i32_i64(t1
, cpu_T
[1]);
4564 tcg_gen_mul_i64(t0
, t0
, t1
);
4565 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4566 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4567 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4568 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[0], 31);
4569 tcg_gen_shri_i64(t0
, t0
, 32);
4570 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4571 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4572 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4575 s
->cc_op
= CC_OP_MULL
;
4577 #ifdef TARGET_X86_64
4579 gen_helper_imulq_EAX_T0(cpu_T
[0]);
4580 s
->cc_op
= CC_OP_MULQ
;
4588 gen_jmp_im(pc_start
- s
->cs_base
);
4589 gen_helper_divb_AL(cpu_T
[0]);
4592 gen_jmp_im(pc_start
- s
->cs_base
);
4593 gen_helper_divw_AX(cpu_T
[0]);
4597 gen_jmp_im(pc_start
- s
->cs_base
);
4598 gen_helper_divl_EAX(cpu_T
[0]);
4600 #ifdef TARGET_X86_64
4602 gen_jmp_im(pc_start
- s
->cs_base
);
4603 gen_helper_divq_EAX(cpu_T
[0]);
4611 gen_jmp_im(pc_start
- s
->cs_base
);
4612 gen_helper_idivb_AL(cpu_T
[0]);
4615 gen_jmp_im(pc_start
- s
->cs_base
);
4616 gen_helper_idivw_AX(cpu_T
[0]);
4620 gen_jmp_im(pc_start
- s
->cs_base
);
4621 gen_helper_idivl_EAX(cpu_T
[0]);
4623 #ifdef TARGET_X86_64
4625 gen_jmp_im(pc_start
- s
->cs_base
);
4626 gen_helper_idivq_EAX(cpu_T
[0]);
4636 case 0xfe: /* GRP4 */
4637 case 0xff: /* GRP5 */
4641 ot
= dflag
+ OT_WORD
;
4643 modrm
= ldub_code(s
->pc
++);
4644 mod
= (modrm
>> 6) & 3;
4645 rm
= (modrm
& 7) | REX_B(s
);
4646 op
= (modrm
>> 3) & 7;
4647 if (op
>= 2 && b
== 0xfe) {
4651 if (op
== 2 || op
== 4) {
4652 /* operand size for jumps is 64 bit */
4654 } else if (op
== 3 || op
== 5) {
4655 ot
= dflag
? OT_LONG
+ (rex_w
== 1) : OT_WORD
;
4656 } else if (op
== 6) {
4657 /* default push size is 64 bit */
4658 ot
= dflag
? OT_QUAD
: OT_WORD
;
4662 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4663 if (op
>= 2 && op
!= 3 && op
!= 5)
4664 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
4666 gen_op_mov_TN_reg(ot
, 0, rm
);
4670 case 0: /* inc Ev */
4675 gen_inc(s
, ot
, opreg
, 1);
4677 case 1: /* dec Ev */
4682 gen_inc(s
, ot
, opreg
, -1);
4684 case 2: /* call Ev */
4685 /* XXX: optimize if memory (no 'and' is necessary) */
4687 gen_op_andl_T0_ffff();
4688 next_eip
= s
->pc
- s
->cs_base
;
4689 gen_movtl_T1_im(next_eip
);
4694 case 3: /* lcall Ev */
4695 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4696 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
4697 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
4699 if (s
->pe
&& !s
->vm86
) {
4700 if (s
->cc_op
!= CC_OP_DYNAMIC
)
4701 gen_op_set_cc_op(s
->cc_op
);
4702 gen_jmp_im(pc_start
- s
->cs_base
);
4703 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4704 gen_helper_lcall_protected(cpu_tmp2_i32
, cpu_T
[1],
4705 tcg_const_i32(dflag
),
4706 tcg_const_i32(s
->pc
- pc_start
));
4708 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4709 gen_helper_lcall_real(cpu_tmp2_i32
, cpu_T
[1],
4710 tcg_const_i32(dflag
),
4711 tcg_const_i32(s
->pc
- s
->cs_base
));
4715 case 4: /* jmp Ev */
4717 gen_op_andl_T0_ffff();
4721 case 5: /* ljmp Ev */
4722 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4723 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
4724 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
4726 if (s
->pe
&& !s
->vm86
) {
4727 if (s
->cc_op
!= CC_OP_DYNAMIC
)
4728 gen_op_set_cc_op(s
->cc_op
);
4729 gen_jmp_im(pc_start
- s
->cs_base
);
4730 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4731 gen_helper_ljmp_protected(cpu_tmp2_i32
, cpu_T
[1],
4732 tcg_const_i32(s
->pc
- pc_start
));
4734 gen_op_movl_seg_T0_vm(R_CS
);
4735 gen_op_movl_T0_T1();
4740 case 6: /* push Ev */
4748 case 0x84: /* test Ev, Gv */
4753 ot
= dflag
+ OT_WORD
;
4755 modrm
= ldub_code(s
->pc
++);
4756 reg
= ((modrm
>> 3) & 7) | rex_r
;
4758 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
4759 gen_op_mov_TN_reg(ot
, 1, reg
);
4760 gen_op_testl_T0_T1_cc();
4761 s
->cc_op
= CC_OP_LOGICB
+ ot
;
4764 case 0xa8: /* test eAX, Iv */
4769 ot
= dflag
+ OT_WORD
;
4770 val
= insn_get(s
, ot
);
4772 gen_op_mov_TN_reg(ot
, 0, OR_EAX
);
4773 gen_op_movl_T1_im(val
);
4774 gen_op_testl_T0_T1_cc();
4775 s
->cc_op
= CC_OP_LOGICB
+ ot
;
4778 case 0x98: /* CWDE/CBW */
4779 #ifdef TARGET_X86_64
4781 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
4782 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4783 gen_op_mov_reg_T0(OT_QUAD
, R_EAX
);
4787 gen_op_mov_TN_reg(OT_WORD
, 0, R_EAX
);
4788 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4789 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4791 gen_op_mov_TN_reg(OT_BYTE
, 0, R_EAX
);
4792 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
4793 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4796 case 0x99: /* CDQ/CWD */
4797 #ifdef TARGET_X86_64
4799 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EAX
);
4800 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 63);
4801 gen_op_mov_reg_T0(OT_QUAD
, R_EDX
);
4805 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
4806 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4807 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 31);
4808 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4810 gen_op_mov_TN_reg(OT_WORD
, 0, R_EAX
);
4811 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4812 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 15);
4813 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
4816 case 0x1af: /* imul Gv, Ev */
4817 case 0x69: /* imul Gv, Ev, I */
4819 ot
= dflag
+ OT_WORD
;
4820 modrm
= ldub_code(s
->pc
++);
4821 reg
= ((modrm
>> 3) & 7) | rex_r
;
4823 s
->rip_offset
= insn_const_size(ot
);
4826 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
4828 val
= insn_get(s
, ot
);
4829 gen_op_movl_T1_im(val
);
4830 } else if (b
== 0x6b) {
4831 val
= (int8_t)insn_get(s
, OT_BYTE
);
4832 gen_op_movl_T1_im(val
);
4834 gen_op_mov_TN_reg(ot
, 1, reg
);
4837 #ifdef TARGET_X86_64
4838 if (ot
== OT_QUAD
) {
4839 gen_helper_imulq_T0_T1(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4842 if (ot
== OT_LONG
) {
4843 #ifdef TARGET_X86_64
4844 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4845 tcg_gen_ext32s_tl(cpu_T
[1], cpu_T
[1]);
4846 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4847 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4848 tcg_gen_ext32s_tl(cpu_tmp0
, cpu_T
[0]);
4849 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4853 t0
= tcg_temp_new_i64();
4854 t1
= tcg_temp_new_i64();
4855 tcg_gen_ext_i32_i64(t0
, cpu_T
[0]);
4856 tcg_gen_ext_i32_i64(t1
, cpu_T
[1]);
4857 tcg_gen_mul_i64(t0
, t0
, t1
);
4858 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4859 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4860 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[0], 31);
4861 tcg_gen_shri_i64(t0
, t0
, 32);
4862 tcg_gen_trunc_i64_i32(cpu_T
[1], t0
);
4863 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[1], cpu_tmp0
);
4867 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4868 tcg_gen_ext16s_tl(cpu_T
[1], cpu_T
[1]);
4869 /* XXX: use 32 bit mul which could be faster */
4870 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4871 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4872 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T
[0]);
4873 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4875 gen_op_mov_reg_T0(ot
, reg
);
4876 s
->cc_op
= CC_OP_MULB
+ ot
;
4879 case 0x1c1: /* xadd Ev, Gv */
4883 ot
= dflag
+ OT_WORD
;
4884 modrm
= ldub_code(s
->pc
++);
4885 reg
= ((modrm
>> 3) & 7) | rex_r
;
4886 mod
= (modrm
>> 6) & 3;
4888 rm
= (modrm
& 7) | REX_B(s
);
4889 gen_op_mov_TN_reg(ot
, 0, reg
);
4890 gen_op_mov_TN_reg(ot
, 1, rm
);
4891 gen_op_addl_T0_T1();
4892 gen_op_mov_reg_T1(ot
, reg
);
4893 gen_op_mov_reg_T0(ot
, rm
);
4895 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4896 gen_op_mov_TN_reg(ot
, 0, reg
);
4897 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4898 gen_op_addl_T0_T1();
4899 gen_op_st_T0_A0(ot
+ s
->mem_index
);
4900 gen_op_mov_reg_T1(ot
, reg
);
4902 gen_op_update2_cc();
4903 s
->cc_op
= CC_OP_ADDB
+ ot
;
4906 case 0x1b1: /* cmpxchg Ev, Gv */
4909 TCGv t0
, t1
, t2
, a0
;
4914 ot
= dflag
+ OT_WORD
;
4915 modrm
= ldub_code(s
->pc
++);
4916 reg
= ((modrm
>> 3) & 7) | rex_r
;
4917 mod
= (modrm
>> 6) & 3;
4918 t0
= tcg_temp_local_new();
4919 t1
= tcg_temp_local_new();
4920 t2
= tcg_temp_local_new();
4921 a0
= tcg_temp_local_new();
4922 gen_op_mov_v_reg(ot
, t1
, reg
);
4924 rm
= (modrm
& 7) | REX_B(s
);
4925 gen_op_mov_v_reg(ot
, t0
, rm
);
4927 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4928 tcg_gen_mov_tl(a0
, cpu_A0
);
4929 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
4930 rm
= 0; /* avoid warning */
4932 label1
= gen_new_label();
4933 tcg_gen_sub_tl(t2
, cpu_regs
[R_EAX
], t0
);
4935 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, label1
);
4936 label2
= gen_new_label();
4938 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
4940 gen_set_label(label1
);
4941 gen_op_mov_reg_v(ot
, rm
, t1
);
4943 /* perform no-op store cycle like physical cpu; must be
4944 before changing accumulator to ensure idempotency if
4945 the store faults and the instruction is restarted */
4946 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
4947 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
4949 gen_set_label(label1
);
4950 gen_op_st_v(ot
+ s
->mem_index
, t1
, a0
);
4952 gen_set_label(label2
);
4953 tcg_gen_mov_tl(cpu_cc_src
, t0
);
4954 tcg_gen_mov_tl(cpu_cc_dst
, t2
);
4955 s
->cc_op
= CC_OP_SUBB
+ ot
;
4962 case 0x1c7: /* cmpxchg8b */
4963 modrm
= ldub_code(s
->pc
++);
4964 mod
= (modrm
>> 6) & 3;
4965 if ((mod
== 3) || ((modrm
& 0x38) != 0x8))
4967 #ifdef TARGET_X86_64
4969 if (!(s
->cpuid_ext_features
& CPUID_EXT_CX16
))
4971 gen_jmp_im(pc_start
- s
->cs_base
);
4972 if (s
->cc_op
!= CC_OP_DYNAMIC
)
4973 gen_op_set_cc_op(s
->cc_op
);
4974 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4975 gen_helper_cmpxchg16b(cpu_A0
);
4979 if (!(s
->cpuid_features
& CPUID_CX8
))
4981 gen_jmp_im(pc_start
- s
->cs_base
);
4982 if (s
->cc_op
!= CC_OP_DYNAMIC
)
4983 gen_op_set_cc_op(s
->cc_op
);
4984 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4985 gen_helper_cmpxchg8b(cpu_A0
);
4987 s
->cc_op
= CC_OP_EFLAGS
;
4990 /**************************/
4992 case 0x50 ... 0x57: /* push */
4993 gen_op_mov_TN_reg(OT_LONG
, 0, (b
& 7) | REX_B(s
));
4996 case 0x58 ... 0x5f: /* pop */
4998 ot
= dflag
? OT_QUAD
: OT_WORD
;
5000 ot
= dflag
+ OT_WORD
;
5003 /* NOTE: order is important for pop %sp */
5005 gen_op_mov_reg_T0(ot
, (b
& 7) | REX_B(s
));
5007 case 0x60: /* pusha */
5012 case 0x61: /* popa */
5017 case 0x68: /* push Iv */
5020 ot
= dflag
? OT_QUAD
: OT_WORD
;
5022 ot
= dflag
+ OT_WORD
;
5025 val
= insn_get(s
, ot
);
5027 val
= (int8_t)insn_get(s
, OT_BYTE
);
5028 gen_op_movl_T0_im(val
);
5031 case 0x8f: /* pop Ev */
5033 ot
= dflag
? OT_QUAD
: OT_WORD
;
5035 ot
= dflag
+ OT_WORD
;
5037 modrm
= ldub_code(s
->pc
++);
5038 mod
= (modrm
>> 6) & 3;
5041 /* NOTE: order is important for pop %sp */
5043 rm
= (modrm
& 7) | REX_B(s
);
5044 gen_op_mov_reg_T0(ot
, rm
);
5046 /* NOTE: order is important too for MMU exceptions */
5047 s
->popl_esp_hack
= 1 << ot
;
5048 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 1);
5049 s
->popl_esp_hack
= 0;
5053 case 0xc8: /* enter */
5056 val
= lduw_code(s
->pc
);
5058 level
= ldub_code(s
->pc
++);
5059 gen_enter(s
, val
, level
);
5062 case 0xc9: /* leave */
5063 /* XXX: exception not precise (ESP is updated before potential exception) */
5065 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EBP
);
5066 gen_op_mov_reg_T0(OT_QUAD
, R_ESP
);
5067 } else if (s
->ss32
) {
5068 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
5069 gen_op_mov_reg_T0(OT_LONG
, R_ESP
);
5071 gen_op_mov_TN_reg(OT_WORD
, 0, R_EBP
);
5072 gen_op_mov_reg_T0(OT_WORD
, R_ESP
);
5076 ot
= dflag
? OT_QUAD
: OT_WORD
;
5078 ot
= dflag
+ OT_WORD
;
5080 gen_op_mov_reg_T0(ot
, R_EBP
);
5083 case 0x06: /* push es */
5084 case 0x0e: /* push cs */
5085 case 0x16: /* push ss */
5086 case 0x1e: /* push ds */
5089 gen_op_movl_T0_seg(b
>> 3);
5092 case 0x1a0: /* push fs */
5093 case 0x1a8: /* push gs */
5094 gen_op_movl_T0_seg((b
>> 3) & 7);
5097 case 0x07: /* pop es */
5098 case 0x17: /* pop ss */
5099 case 0x1f: /* pop ds */
5104 gen_movl_seg_T0(s
, reg
, pc_start
- s
->cs_base
);
5107 /* if reg == SS, inhibit interrupts/trace. */
5108 /* If several instructions disable interrupts, only the
5110 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
5111 gen_helper_set_inhibit_irq();
5115 gen_jmp_im(s
->pc
- s
->cs_base
);
5119 case 0x1a1: /* pop fs */
5120 case 0x1a9: /* pop gs */
5122 gen_movl_seg_T0(s
, (b
>> 3) & 7, pc_start
- s
->cs_base
);
5125 gen_jmp_im(s
->pc
- s
->cs_base
);
5130 /**************************/
5133 case 0x89: /* mov Gv, Ev */
5137 ot
= dflag
+ OT_WORD
;
5138 modrm
= ldub_code(s
->pc
++);
5139 reg
= ((modrm
>> 3) & 7) | rex_r
;
5141 /* generate a generic store */
5142 gen_ldst_modrm(s
, modrm
, ot
, reg
, 1);
5145 case 0xc7: /* mov Ev, Iv */
5149 ot
= dflag
+ OT_WORD
;
5150 modrm
= ldub_code(s
->pc
++);
5151 mod
= (modrm
>> 6) & 3;
5153 s
->rip_offset
= insn_const_size(ot
);
5154 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5156 val
= insn_get(s
, ot
);
5157 gen_op_movl_T0_im(val
);
5159 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5161 gen_op_mov_reg_T0(ot
, (modrm
& 7) | REX_B(s
));
5164 case 0x8b: /* mov Ev, Gv */
5168 ot
= OT_WORD
+ dflag
;
5169 modrm
= ldub_code(s
->pc
++);
5170 reg
= ((modrm
>> 3) & 7) | rex_r
;
5172 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
5173 gen_op_mov_reg_T0(ot
, reg
);
5175 case 0x8e: /* mov seg, Gv */
5176 modrm
= ldub_code(s
->pc
++);
5177 reg
= (modrm
>> 3) & 7;
5178 if (reg
>= 6 || reg
== R_CS
)
5180 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
5181 gen_movl_seg_T0(s
, reg
, pc_start
- s
->cs_base
);
5183 /* if reg == SS, inhibit interrupts/trace */
5184 /* If several instructions disable interrupts, only the
5186 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
5187 gen_helper_set_inhibit_irq();
5191 gen_jmp_im(s
->pc
- s
->cs_base
);
5195 case 0x8c: /* mov Gv, seg */
5196 modrm
= ldub_code(s
->pc
++);
5197 reg
= (modrm
>> 3) & 7;
5198 mod
= (modrm
>> 6) & 3;
5201 gen_op_movl_T0_seg(reg
);
5203 ot
= OT_WORD
+ dflag
;
5206 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 1);
5209 case 0x1b6: /* movzbS Gv, Eb */
5210 case 0x1b7: /* movzwS Gv, Eb */
5211 case 0x1be: /* movsbS Gv, Eb */
5212 case 0x1bf: /* movswS Gv, Eb */
5215 /* d_ot is the size of destination */
5216 d_ot
= dflag
+ OT_WORD
;
5217 /* ot is the size of source */
5218 ot
= (b
& 1) + OT_BYTE
;
5219 modrm
= ldub_code(s
->pc
++);
5220 reg
= ((modrm
>> 3) & 7) | rex_r
;
5221 mod
= (modrm
>> 6) & 3;
5222 rm
= (modrm
& 7) | REX_B(s
);
5225 gen_op_mov_TN_reg(ot
, 0, rm
);
5226 switch(ot
| (b
& 8)) {
5228 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
5231 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
5234 tcg_gen_ext16u_tl(cpu_T
[0], cpu_T
[0]);
5238 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5241 gen_op_mov_reg_T0(d_ot
, reg
);
5243 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5245 gen_op_lds_T0_A0(ot
+ s
->mem_index
);
5247 gen_op_ldu_T0_A0(ot
+ s
->mem_index
);
5249 gen_op_mov_reg_T0(d_ot
, reg
);
5254 case 0x8d: /* lea */
5255 ot
= dflag
+ OT_WORD
;
5256 modrm
= ldub_code(s
->pc
++);
5257 mod
= (modrm
>> 6) & 3;
5260 reg
= ((modrm
>> 3) & 7) | rex_r
;
5261 /* we must ensure that no segment is added */
5265 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5267 gen_op_mov_reg_A0(ot
- OT_WORD
, reg
);
5270 case 0xa0: /* mov EAX, Ov */
5272 case 0xa2: /* mov Ov, EAX */
5275 target_ulong offset_addr
;
5280 ot
= dflag
+ OT_WORD
;
5281 #ifdef TARGET_X86_64
5282 if (s
->aflag
== 2) {
5283 offset_addr
= ldq_code(s
->pc
);
5285 gen_op_movq_A0_im(offset_addr
);
5290 offset_addr
= insn_get(s
, OT_LONG
);
5292 offset_addr
= insn_get(s
, OT_WORD
);
5294 gen_op_movl_A0_im(offset_addr
);
5296 gen_add_A0_ds_seg(s
);
5298 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
5299 gen_op_mov_reg_T0(ot
, R_EAX
);
5301 gen_op_mov_TN_reg(ot
, 0, R_EAX
);
5302 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5306 case 0xd7: /* xlat */
5307 #ifdef TARGET_X86_64
5308 if (s
->aflag
== 2) {
5309 gen_op_movq_A0_reg(R_EBX
);
5310 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EAX
);
5311 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xff);
5312 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T
[0]);
5316 gen_op_movl_A0_reg(R_EBX
);
5317 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
5318 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xff);
5319 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T
[0]);
5321 gen_op_andl_A0_ffff();
5323 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
5325 gen_add_A0_ds_seg(s
);
5326 gen_op_ldu_T0_A0(OT_BYTE
+ s
->mem_index
);
5327 gen_op_mov_reg_T0(OT_BYTE
, R_EAX
);
5329 case 0xb0 ... 0xb7: /* mov R, Ib */
5330 val
= insn_get(s
, OT_BYTE
);
5331 gen_op_movl_T0_im(val
);
5332 gen_op_mov_reg_T0(OT_BYTE
, (b
& 7) | REX_B(s
));
5334 case 0xb8 ... 0xbf: /* mov R, Iv */
5335 #ifdef TARGET_X86_64
5339 tmp
= ldq_code(s
->pc
);
5341 reg
= (b
& 7) | REX_B(s
);
5342 gen_movtl_T0_im(tmp
);
5343 gen_op_mov_reg_T0(OT_QUAD
, reg
);
5347 ot
= dflag
? OT_LONG
: OT_WORD
;
5348 val
= insn_get(s
, ot
);
5349 reg
= (b
& 7) | REX_B(s
);
5350 gen_op_movl_T0_im(val
);
5351 gen_op_mov_reg_T0(ot
, reg
);
5355 case 0x91 ... 0x97: /* xchg R, EAX */
5357 ot
= dflag
+ OT_WORD
;
5358 reg
= (b
& 7) | REX_B(s
);
5362 case 0x87: /* xchg Ev, Gv */
5366 ot
= dflag
+ OT_WORD
;
5367 modrm
= ldub_code(s
->pc
++);
5368 reg
= ((modrm
>> 3) & 7) | rex_r
;
5369 mod
= (modrm
>> 6) & 3;
5371 rm
= (modrm
& 7) | REX_B(s
);
5373 gen_op_mov_TN_reg(ot
, 0, reg
);
5374 gen_op_mov_TN_reg(ot
, 1, rm
);
5375 gen_op_mov_reg_T0(ot
, rm
);
5376 gen_op_mov_reg_T1(ot
, reg
);
5378 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5379 gen_op_mov_TN_reg(ot
, 0, reg
);
5380 /* for xchg, lock is implicit */
5381 if (!(prefixes
& PREFIX_LOCK
))
5383 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5384 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5385 if (!(prefixes
& PREFIX_LOCK
))
5386 gen_helper_unlock();
5387 gen_op_mov_reg_T1(ot
, reg
);
5390 case 0xc4: /* les Gv */
5395 case 0xc5: /* lds Gv */
5400 case 0x1b2: /* lss Gv */
5403 case 0x1b4: /* lfs Gv */
5406 case 0x1b5: /* lgs Gv */
5409 ot
= dflag
? OT_LONG
: OT_WORD
;
5410 modrm
= ldub_code(s
->pc
++);
5411 reg
= ((modrm
>> 3) & 7) | rex_r
;
5412 mod
= (modrm
>> 6) & 3;
5415 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5416 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5417 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
5418 /* load the segment first to handle exceptions properly */
5419 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
5420 gen_movl_seg_T0(s
, op
, pc_start
- s
->cs_base
);
5421 /* then put the data */
5422 gen_op_mov_reg_T1(ot
, reg
);
5424 gen_jmp_im(s
->pc
- s
->cs_base
);
5429 /************************/
5440 ot
= dflag
+ OT_WORD
;
5442 modrm
= ldub_code(s
->pc
++);
5443 mod
= (modrm
>> 6) & 3;
5444 op
= (modrm
>> 3) & 7;
5450 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5453 opreg
= (modrm
& 7) | REX_B(s
);
5458 gen_shift(s
, op
, ot
, opreg
, OR_ECX
);
5461 shift
= ldub_code(s
->pc
++);
5463 gen_shifti(s
, op
, ot
, opreg
, shift
);
5478 case 0x1a4: /* shld imm */
5482 case 0x1a5: /* shld cl */
5486 case 0x1ac: /* shrd imm */
5490 case 0x1ad: /* shrd cl */
5494 ot
= dflag
+ OT_WORD
;
5495 modrm
= ldub_code(s
->pc
++);
5496 mod
= (modrm
>> 6) & 3;
5497 rm
= (modrm
& 7) | REX_B(s
);
5498 reg
= ((modrm
>> 3) & 7) | rex_r
;
5500 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5505 gen_op_mov_TN_reg(ot
, 1, reg
);
5508 val
= ldub_code(s
->pc
++);
5509 tcg_gen_movi_tl(cpu_T3
, val
);
5511 tcg_gen_mov_tl(cpu_T3
, cpu_regs
[R_ECX
]);
5513 gen_shiftd_rm_T1_T3(s
, ot
, opreg
, op
);
5516 /************************/
5519 if (s
->flags
& (HF_EM_MASK
| HF_TS_MASK
)) {
5520 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5521 /* XXX: what to do if illegal op ? */
5522 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
5525 modrm
= ldub_code(s
->pc
++);
5526 mod
= (modrm
>> 6) & 3;
5528 op
= ((b
& 7) << 3) | ((modrm
>> 3) & 7);
5531 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5533 case 0x00 ... 0x07: /* fxxxs */
5534 case 0x10 ... 0x17: /* fixxxl */
5535 case 0x20 ... 0x27: /* fxxxl */
5536 case 0x30 ... 0x37: /* fixxx */
5543 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5544 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5545 gen_helper_flds_FT0(cpu_tmp2_i32
);
5548 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5549 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5550 gen_helper_fildl_FT0(cpu_tmp2_i32
);
5553 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
5554 (s
->mem_index
>> 2) - 1);
5555 gen_helper_fldl_FT0(cpu_tmp1_i64
);
5559 gen_op_lds_T0_A0(OT_WORD
+ s
->mem_index
);
5560 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5561 gen_helper_fildl_FT0(cpu_tmp2_i32
);
5565 gen_helper_fp_arith_ST0_FT0(op1
);
5567 /* fcomp needs pop */
5572 case 0x08: /* flds */
5573 case 0x0a: /* fsts */
5574 case 0x0b: /* fstps */
5575 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5576 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5577 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5582 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5583 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5584 gen_helper_flds_ST0(cpu_tmp2_i32
);
5587 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5588 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5589 gen_helper_fildl_ST0(cpu_tmp2_i32
);
5592 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
5593 (s
->mem_index
>> 2) - 1);
5594 gen_helper_fldl_ST0(cpu_tmp1_i64
);
5598 gen_op_lds_T0_A0(OT_WORD
+ s
->mem_index
);
5599 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5600 gen_helper_fildl_ST0(cpu_tmp2_i32
);
5605 /* XXX: the corresponding CPUID bit must be tested ! */
5608 gen_helper_fisttl_ST0(cpu_tmp2_i32
);
5609 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5610 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
5613 gen_helper_fisttll_ST0(cpu_tmp1_i64
);
5614 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
5615 (s
->mem_index
>> 2) - 1);
5619 gen_helper_fistt_ST0(cpu_tmp2_i32
);
5620 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5621 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5629 gen_helper_fsts_ST0(cpu_tmp2_i32
);
5630 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5631 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
5634 gen_helper_fistl_ST0(cpu_tmp2_i32
);
5635 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5636 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
5639 gen_helper_fstl_ST0(cpu_tmp1_i64
);
5640 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
5641 (s
->mem_index
>> 2) - 1);
5645 gen_helper_fist_ST0(cpu_tmp2_i32
);
5646 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5647 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5655 case 0x0c: /* fldenv mem */
5656 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5657 gen_op_set_cc_op(s
->cc_op
);
5658 gen_jmp_im(pc_start
- s
->cs_base
);
5660 cpu_A0
, tcg_const_i32(s
->dflag
));
5662 case 0x0d: /* fldcw mem */
5663 gen_op_ld_T0_A0(OT_WORD
+ s
->mem_index
);
5664 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5665 gen_helper_fldcw(cpu_tmp2_i32
);
5667 case 0x0e: /* fnstenv mem */
5668 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5669 gen_op_set_cc_op(s
->cc_op
);
5670 gen_jmp_im(pc_start
- s
->cs_base
);
5671 gen_helper_fstenv(cpu_A0
, tcg_const_i32(s
->dflag
));
5673 case 0x0f: /* fnstcw mem */
5674 gen_helper_fnstcw(cpu_tmp2_i32
);
5675 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5676 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5678 case 0x1d: /* fldt mem */
5679 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5680 gen_op_set_cc_op(s
->cc_op
);
5681 gen_jmp_im(pc_start
- s
->cs_base
);
5682 gen_helper_fldt_ST0(cpu_A0
);
5684 case 0x1f: /* fstpt mem */
5685 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5686 gen_op_set_cc_op(s
->cc_op
);
5687 gen_jmp_im(pc_start
- s
->cs_base
);
5688 gen_helper_fstt_ST0(cpu_A0
);
5691 case 0x2c: /* frstor mem */
5692 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5693 gen_op_set_cc_op(s
->cc_op
);
5694 gen_jmp_im(pc_start
- s
->cs_base
);
5695 gen_helper_frstor(cpu_A0
, tcg_const_i32(s
->dflag
));
5697 case 0x2e: /* fnsave mem */
5698 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5699 gen_op_set_cc_op(s
->cc_op
);
5700 gen_jmp_im(pc_start
- s
->cs_base
);
5701 gen_helper_fsave(cpu_A0
, tcg_const_i32(s
->dflag
));
5703 case 0x2f: /* fnstsw mem */
5704 gen_helper_fnstsw(cpu_tmp2_i32
);
5705 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5706 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5708 case 0x3c: /* fbld */
5709 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5710 gen_op_set_cc_op(s
->cc_op
);
5711 gen_jmp_im(pc_start
- s
->cs_base
);
5712 gen_helper_fbld_ST0(cpu_A0
);
5714 case 0x3e: /* fbstp */
5715 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5716 gen_op_set_cc_op(s
->cc_op
);
5717 gen_jmp_im(pc_start
- s
->cs_base
);
5718 gen_helper_fbst_ST0(cpu_A0
);
5721 case 0x3d: /* fildll */
5722 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
5723 (s
->mem_index
>> 2) - 1);
5724 gen_helper_fildll_ST0(cpu_tmp1_i64
);
5726 case 0x3f: /* fistpll */
5727 gen_helper_fistll_ST0(cpu_tmp1_i64
);
5728 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
5729 (s
->mem_index
>> 2) - 1);
5736 /* register float ops */
5740 case 0x08: /* fld sti */
5742 gen_helper_fmov_ST0_STN(tcg_const_i32((opreg
+ 1) & 7));
5744 case 0x09: /* fxchg sti */
5745 case 0x29: /* fxchg4 sti, undocumented op */
5746 case 0x39: /* fxchg7 sti, undocumented op */
5747 gen_helper_fxchg_ST0_STN(tcg_const_i32(opreg
));
5749 case 0x0a: /* grp d9/2 */
5752 /* check exceptions (FreeBSD FPU probe) */
5753 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5754 gen_op_set_cc_op(s
->cc_op
);
5755 gen_jmp_im(pc_start
- s
->cs_base
);
5762 case 0x0c: /* grp d9/4 */
5765 gen_helper_fchs_ST0();
5768 gen_helper_fabs_ST0();
5771 gen_helper_fldz_FT0();
5772 gen_helper_fcom_ST0_FT0();
5775 gen_helper_fxam_ST0();
5781 case 0x0d: /* grp d9/5 */
5786 gen_helper_fld1_ST0();
5790 gen_helper_fldl2t_ST0();
5794 gen_helper_fldl2e_ST0();
5798 gen_helper_fldpi_ST0();
5802 gen_helper_fldlg2_ST0();
5806 gen_helper_fldln2_ST0();
5810 gen_helper_fldz_ST0();
5817 case 0x0e: /* grp d9/6 */
5828 case 3: /* fpatan */
5829 gen_helper_fpatan();
5831 case 4: /* fxtract */
5832 gen_helper_fxtract();
5834 case 5: /* fprem1 */
5835 gen_helper_fprem1();
5837 case 6: /* fdecstp */
5838 gen_helper_fdecstp();
5841 case 7: /* fincstp */
5842 gen_helper_fincstp();
5846 case 0x0f: /* grp d9/7 */
5851 case 1: /* fyl2xp1 */
5852 gen_helper_fyl2xp1();
5857 case 3: /* fsincos */
5858 gen_helper_fsincos();
5860 case 5: /* fscale */
5861 gen_helper_fscale();
5863 case 4: /* frndint */
5864 gen_helper_frndint();
5875 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
5876 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
5877 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
5883 gen_helper_fp_arith_STN_ST0(op1
, opreg
);
5887 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg
));
5888 gen_helper_fp_arith_ST0_FT0(op1
);
5892 case 0x02: /* fcom */
5893 case 0x22: /* fcom2, undocumented op */
5894 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg
));
5895 gen_helper_fcom_ST0_FT0();
5897 case 0x03: /* fcomp */
5898 case 0x23: /* fcomp3, undocumented op */
5899 case 0x32: /* fcomp5, undocumented op */
5900 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg
));
5901 gen_helper_fcom_ST0_FT0();
5904 case 0x15: /* da/5 */
5906 case 1: /* fucompp */
5907 gen_helper_fmov_FT0_STN(tcg_const_i32(1));
5908 gen_helper_fucom_ST0_FT0();
5918 case 0: /* feni (287 only, just do nop here) */
5920 case 1: /* fdisi (287 only, just do nop here) */
5925 case 3: /* fninit */
5926 gen_helper_fninit();
5928 case 4: /* fsetpm (287 only, just do nop here) */
5934 case 0x1d: /* fucomi */
5935 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5936 gen_op_set_cc_op(s
->cc_op
);
5937 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg
));
5938 gen_helper_fucomi_ST0_FT0();
5939 s
->cc_op
= CC_OP_EFLAGS
;
5941 case 0x1e: /* fcomi */
5942 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5943 gen_op_set_cc_op(s
->cc_op
);
5944 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg
));
5945 gen_helper_fcomi_ST0_FT0();
5946 s
->cc_op
= CC_OP_EFLAGS
;
5948 case 0x28: /* ffree sti */
5949 gen_helper_ffree_STN(tcg_const_i32(opreg
));
5951 case 0x2a: /* fst sti */
5952 gen_helper_fmov_STN_ST0(tcg_const_i32(opreg
));
5954 case 0x2b: /* fstp sti */
5955 case 0x0b: /* fstp1 sti, undocumented op */
5956 case 0x3a: /* fstp8 sti, undocumented op */
5957 case 0x3b: /* fstp9 sti, undocumented op */
5958 gen_helper_fmov_STN_ST0(tcg_const_i32(opreg
));
5961 case 0x2c: /* fucom st(i) */
5962 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg
));
5963 gen_helper_fucom_ST0_FT0();
5965 case 0x2d: /* fucomp st(i) */
5966 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg
));
5967 gen_helper_fucom_ST0_FT0();
5970 case 0x33: /* de/3 */
5972 case 1: /* fcompp */
5973 gen_helper_fmov_FT0_STN(tcg_const_i32(1));
5974 gen_helper_fcom_ST0_FT0();
5982 case 0x38: /* ffreep sti, undocumented op */
5983 gen_helper_ffree_STN(tcg_const_i32(opreg
));
5986 case 0x3c: /* df/4 */
5989 gen_helper_fnstsw(cpu_tmp2_i32
);
5990 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5991 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
5997 case 0x3d: /* fucomip */
5998 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5999 gen_op_set_cc_op(s
->cc_op
);
6000 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg
));
6001 gen_helper_fucomi_ST0_FT0();
6003 s
->cc_op
= CC_OP_EFLAGS
;
6005 case 0x3e: /* fcomip */
6006 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6007 gen_op_set_cc_op(s
->cc_op
);
6008 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg
));
6009 gen_helper_fcomi_ST0_FT0();
6011 s
->cc_op
= CC_OP_EFLAGS
;
6013 case 0x10 ... 0x13: /* fcmovxx */
6017 static const uint8_t fcmov_cc
[8] = {
6023 op1
= fcmov_cc
[op
& 3] | (((op
>> 3) & 1) ^ 1);
6024 l1
= gen_new_label();
6025 gen_jcc1(s
, s
->cc_op
, op1
, l1
);
6026 gen_helper_fmov_ST0_STN(tcg_const_i32(opreg
));
6035 /************************/
6038 case 0xa4: /* movsS */
6043 ot
= dflag
+ OT_WORD
;
6045 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6046 gen_repz_movs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6052 case 0xaa: /* stosS */
6057 ot
= dflag
+ OT_WORD
;
6059 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6060 gen_repz_stos(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6065 case 0xac: /* lodsS */
6070 ot
= dflag
+ OT_WORD
;
6071 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6072 gen_repz_lods(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6077 case 0xae: /* scasS */
6082 ot
= dflag
+ OT_WORD
;
6083 if (prefixes
& PREFIX_REPNZ
) {
6084 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6085 } else if (prefixes
& PREFIX_REPZ
) {
6086 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6089 s
->cc_op
= CC_OP_SUBB
+ ot
;
6093 case 0xa6: /* cmpsS */
6098 ot
= dflag
+ OT_WORD
;
6099 if (prefixes
& PREFIX_REPNZ
) {
6100 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6101 } else if (prefixes
& PREFIX_REPZ
) {
6102 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6105 s
->cc_op
= CC_OP_SUBB
+ ot
;
6108 case 0x6c: /* insS */
6113 ot
= dflag
? OT_LONG
: OT_WORD
;
6114 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6115 gen_op_andl_T0_ffff();
6116 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6117 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
) | 4);
6118 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6119 gen_repz_ins(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6123 gen_jmp(s
, s
->pc
- s
->cs_base
);
6127 case 0x6e: /* outsS */
6132 ot
= dflag
? OT_LONG
: OT_WORD
;
6133 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6134 gen_op_andl_T0_ffff();
6135 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6136 svm_is_rep(prefixes
) | 4);
6137 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6138 gen_repz_outs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6142 gen_jmp(s
, s
->pc
- s
->cs_base
);
6147 /************************/
6155 ot
= dflag
? OT_LONG
: OT_WORD
;
6156 val
= ldub_code(s
->pc
++);
6157 gen_op_movl_T0_im(val
);
6158 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6159 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6162 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6163 gen_helper_in_func(ot
, cpu_T
[1], cpu_tmp2_i32
);
6164 gen_op_mov_reg_T1(ot
, R_EAX
);
6167 gen_jmp(s
, s
->pc
- s
->cs_base
);
6175 ot
= dflag
? OT_LONG
: OT_WORD
;
6176 val
= ldub_code(s
->pc
++);
6177 gen_op_movl_T0_im(val
);
6178 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6179 svm_is_rep(prefixes
));
6180 gen_op_mov_TN_reg(ot
, 1, R_EAX
);
6184 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6185 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
6186 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6189 gen_jmp(s
, s
->pc
- s
->cs_base
);
6197 ot
= dflag
? OT_LONG
: OT_WORD
;
6198 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6199 gen_op_andl_T0_ffff();
6200 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6201 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6204 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6205 gen_helper_in_func(ot
, cpu_T
[1], cpu_tmp2_i32
);
6206 gen_op_mov_reg_T1(ot
, R_EAX
);
6209 gen_jmp(s
, s
->pc
- s
->cs_base
);
6217 ot
= dflag
? OT_LONG
: OT_WORD
;
6218 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6219 gen_op_andl_T0_ffff();
6220 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6221 svm_is_rep(prefixes
));
6222 gen_op_mov_TN_reg(ot
, 1, R_EAX
);
6226 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6227 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
6228 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6231 gen_jmp(s
, s
->pc
- s
->cs_base
);
6235 /************************/
6237 case 0xc2: /* ret im */
6238 val
= ldsw_code(s
->pc
);
6241 if (CODE64(s
) && s
->dflag
)
6243 gen_stack_update(s
, val
+ (2 << s
->dflag
));
6245 gen_op_andl_T0_ffff();
6249 case 0xc3: /* ret */
6253 gen_op_andl_T0_ffff();
6257 case 0xca: /* lret im */
6258 val
= ldsw_code(s
->pc
);
6261 if (s
->pe
&& !s
->vm86
) {
6262 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6263 gen_op_set_cc_op(s
->cc_op
);
6264 gen_jmp_im(pc_start
- s
->cs_base
);
6265 gen_helper_lret_protected(tcg_const_i32(s
->dflag
),
6266 tcg_const_i32(val
));
6270 gen_op_ld_T0_A0(1 + s
->dflag
+ s
->mem_index
);
6272 gen_op_andl_T0_ffff();
6273 /* NOTE: keeping EIP updated is not a problem in case of
6277 gen_op_addl_A0_im(2 << s
->dflag
);
6278 gen_op_ld_T0_A0(1 + s
->dflag
+ s
->mem_index
);
6279 gen_op_movl_seg_T0_vm(R_CS
);
6280 /* add stack offset */
6281 gen_stack_update(s
, val
+ (4 << s
->dflag
));
6285 case 0xcb: /* lret */
6288 case 0xcf: /* iret */
6289 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IRET
);
6292 gen_helper_iret_real(tcg_const_i32(s
->dflag
));
6293 s
->cc_op
= CC_OP_EFLAGS
;
6294 } else if (s
->vm86
) {
6296 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6298 gen_helper_iret_real(tcg_const_i32(s
->dflag
));
6299 s
->cc_op
= CC_OP_EFLAGS
;
6302 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6303 gen_op_set_cc_op(s
->cc_op
);
6304 gen_jmp_im(pc_start
- s
->cs_base
);
6305 gen_helper_iret_protected(tcg_const_i32(s
->dflag
),
6306 tcg_const_i32(s
->pc
- s
->cs_base
));
6307 s
->cc_op
= CC_OP_EFLAGS
;
6311 case 0xe8: /* call im */
6314 tval
= (int32_t)insn_get(s
, OT_LONG
);
6316 tval
= (int16_t)insn_get(s
, OT_WORD
);
6317 next_eip
= s
->pc
- s
->cs_base
;
6323 gen_movtl_T0_im(next_eip
);
6328 case 0x9a: /* lcall im */
6330 unsigned int selector
, offset
;
6334 ot
= dflag
? OT_LONG
: OT_WORD
;
6335 offset
= insn_get(s
, ot
);
6336 selector
= insn_get(s
, OT_WORD
);
6338 gen_op_movl_T0_im(selector
);
6339 gen_op_movl_T1_imu(offset
);
6342 case 0xe9: /* jmp im */
6344 tval
= (int32_t)insn_get(s
, OT_LONG
);
6346 tval
= (int16_t)insn_get(s
, OT_WORD
);
6347 tval
+= s
->pc
- s
->cs_base
;
6354 case 0xea: /* ljmp im */
6356 unsigned int selector
, offset
;
6360 ot
= dflag
? OT_LONG
: OT_WORD
;
6361 offset
= insn_get(s
, ot
);
6362 selector
= insn_get(s
, OT_WORD
);
6364 gen_op_movl_T0_im(selector
);
6365 gen_op_movl_T1_imu(offset
);
6368 case 0xeb: /* jmp Jb */
6369 tval
= (int8_t)insn_get(s
, OT_BYTE
);
6370 tval
+= s
->pc
- s
->cs_base
;
6375 case 0x70 ... 0x7f: /* jcc Jb */
6376 tval
= (int8_t)insn_get(s
, OT_BYTE
);
6378 case 0x180 ... 0x18f: /* jcc Jv */
6380 tval
= (int32_t)insn_get(s
, OT_LONG
);
6382 tval
= (int16_t)insn_get(s
, OT_WORD
);
6385 next_eip
= s
->pc
- s
->cs_base
;
6389 gen_jcc(s
, b
, tval
, next_eip
);
6392 case 0x190 ... 0x19f: /* setcc Gv */
6393 modrm
= ldub_code(s
->pc
++);
6395 gen_ldst_modrm(s
, modrm
, OT_BYTE
, OR_TMP0
, 1);
6397 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6402 ot
= dflag
+ OT_WORD
;
6403 modrm
= ldub_code(s
->pc
++);
6404 reg
= ((modrm
>> 3) & 7) | rex_r
;
6405 mod
= (modrm
>> 6) & 3;
6406 t0
= tcg_temp_local_new();
6408 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
6409 gen_op_ld_v(ot
+ s
->mem_index
, t0
, cpu_A0
);
6411 rm
= (modrm
& 7) | REX_B(s
);
6412 gen_op_mov_v_reg(ot
, t0
, rm
);
6414 #ifdef TARGET_X86_64
6415 if (ot
== OT_LONG
) {
6416 /* XXX: specific Intel behaviour ? */
6417 l1
= gen_new_label();
6418 gen_jcc1(s
, s
->cc_op
, b
^ 1, l1
);
6419 tcg_gen_mov_tl(cpu_regs
[reg
], t0
);
6421 tcg_gen_ext32u_tl(cpu_regs
[reg
], cpu_regs
[reg
]);
6425 l1
= gen_new_label();
6426 gen_jcc1(s
, s
->cc_op
, b
^ 1, l1
);
6427 gen_op_mov_reg_v(ot
, reg
, t0
);
6434 /************************/
6436 case 0x9c: /* pushf */
6437 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PUSHF
);
6438 if (s
->vm86
&& s
->iopl
!= 3) {
6439 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6441 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6442 gen_op_set_cc_op(s
->cc_op
);
6443 gen_helper_read_eflags(cpu_T
[0]);
6447 case 0x9d: /* popf */
6448 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_POPF
);
6449 if (s
->vm86
&& s
->iopl
!= 3) {
6450 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6455 gen_helper_write_eflags(cpu_T
[0],
6456 tcg_const_i32((TF_MASK
| AC_MASK
| ID_MASK
| NT_MASK
| IF_MASK
| IOPL_MASK
)));
6458 gen_helper_write_eflags(cpu_T
[0],
6459 tcg_const_i32((TF_MASK
| AC_MASK
| ID_MASK
| NT_MASK
| IF_MASK
| IOPL_MASK
) & 0xffff));
6462 if (s
->cpl
<= s
->iopl
) {
6464 gen_helper_write_eflags(cpu_T
[0],
6465 tcg_const_i32((TF_MASK
| AC_MASK
| ID_MASK
| NT_MASK
| IF_MASK
)));
6467 gen_helper_write_eflags(cpu_T
[0],
6468 tcg_const_i32((TF_MASK
| AC_MASK
| ID_MASK
| NT_MASK
| IF_MASK
) & 0xffff));
6472 gen_helper_write_eflags(cpu_T
[0],
6473 tcg_const_i32((TF_MASK
| AC_MASK
| ID_MASK
| NT_MASK
)));
6475 gen_helper_write_eflags(cpu_T
[0],
6476 tcg_const_i32((TF_MASK
| AC_MASK
| ID_MASK
| NT_MASK
) & 0xffff));
6481 s
->cc_op
= CC_OP_EFLAGS
;
6482 /* abort translation because TF flag may change */
6483 gen_jmp_im(s
->pc
- s
->cs_base
);
6487 case 0x9e: /* sahf */
6488 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6490 gen_op_mov_TN_reg(OT_BYTE
, 0, R_AH
);
6491 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6492 gen_op_set_cc_op(s
->cc_op
);
6493 gen_compute_eflags(cpu_cc_src
);
6494 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, CC_O
);
6495 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
6496 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_T
[0]);
6497 s
->cc_op
= CC_OP_EFLAGS
;
6499 case 0x9f: /* lahf */
6500 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6502 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6503 gen_op_set_cc_op(s
->cc_op
);
6504 gen_compute_eflags(cpu_T
[0]);
6505 /* Note: gen_compute_eflags() only gives the condition codes */
6506 tcg_gen_ori_tl(cpu_T
[0], cpu_T
[0], 0x02);
6507 gen_op_mov_reg_T0(OT_BYTE
, R_AH
);
6509 case 0xf5: /* cmc */
6510 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6511 gen_op_set_cc_op(s
->cc_op
);
6512 gen_compute_eflags(cpu_cc_src
);
6513 tcg_gen_xori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6514 s
->cc_op
= CC_OP_EFLAGS
;
6516 case 0xf8: /* clc */
6517 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6518 gen_op_set_cc_op(s
->cc_op
);
6519 gen_compute_eflags(cpu_cc_src
);
6520 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_C
);
6521 s
->cc_op
= CC_OP_EFLAGS
;
6523 case 0xf9: /* stc */
6524 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6525 gen_op_set_cc_op(s
->cc_op
);
6526 gen_compute_eflags(cpu_cc_src
);
6527 tcg_gen_ori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6528 s
->cc_op
= CC_OP_EFLAGS
;
6530 case 0xfc: /* cld */
6531 tcg_gen_movi_i32(cpu_tmp2_i32
, 1);
6532 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6534 case 0xfd: /* std */
6535 tcg_gen_movi_i32(cpu_tmp2_i32
, -1);
6536 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6539 /************************/
6540 /* bit operations */
6541 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6542 ot
= dflag
+ OT_WORD
;
6543 modrm
= ldub_code(s
->pc
++);
6544 op
= (modrm
>> 3) & 7;
6545 mod
= (modrm
>> 6) & 3;
6546 rm
= (modrm
& 7) | REX_B(s
);
6549 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
6550 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
6552 gen_op_mov_TN_reg(ot
, 0, rm
);
6555 val
= ldub_code(s
->pc
++);
6556 gen_op_movl_T1_im(val
);
6561 case 0x1a3: /* bt Gv, Ev */
6564 case 0x1ab: /* bts */
6567 case 0x1b3: /* btr */
6570 case 0x1bb: /* btc */
6573 ot
= dflag
+ OT_WORD
;
6574 modrm
= ldub_code(s
->pc
++);
6575 reg
= ((modrm
>> 3) & 7) | rex_r
;
6576 mod
= (modrm
>> 6) & 3;
6577 rm
= (modrm
& 7) | REX_B(s
);
6578 gen_op_mov_TN_reg(OT_LONG
, 1, reg
);
6580 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
6581 /* specific case: we need to add a displacement */
6582 gen_exts(ot
, cpu_T
[1]);
6583 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[1], 3 + ot
);
6584 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, ot
);
6585 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
6586 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
6588 gen_op_mov_TN_reg(ot
, 0, rm
);
6591 tcg_gen_andi_tl(cpu_T
[1], cpu_T
[1], (1 << (3 + ot
)) - 1);
6594 tcg_gen_shr_tl(cpu_cc_src
, cpu_T
[0], cpu_T
[1]);
6595 tcg_gen_movi_tl(cpu_cc_dst
, 0);
6598 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
6599 tcg_gen_movi_tl(cpu_tmp0
, 1);
6600 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
6601 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
6604 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
6605 tcg_gen_movi_tl(cpu_tmp0
, 1);
6606 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
6607 tcg_gen_not_tl(cpu_tmp0
, cpu_tmp0
);
6608 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
6612 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
6613 tcg_gen_movi_tl(cpu_tmp0
, 1);
6614 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
6615 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
6618 s
->cc_op
= CC_OP_SARB
+ ot
;
6621 gen_op_st_T0_A0(ot
+ s
->mem_index
);
6623 gen_op_mov_reg_T0(ot
, rm
);
6624 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
6625 tcg_gen_movi_tl(cpu_cc_dst
, 0);
6628 case 0x1bc: /* bsf */
6629 case 0x1bd: /* bsr */
6634 ot
= dflag
+ OT_WORD
;
6635 modrm
= ldub_code(s
->pc
++);
6636 reg
= ((modrm
>> 3) & 7) | rex_r
;
6637 gen_ldst_modrm(s
,modrm
, ot
, OR_TMP0
, 0);
6638 gen_extu(ot
, cpu_T
[0]);
6639 t0
= tcg_temp_local_new();
6640 tcg_gen_mov_tl(t0
, cpu_T
[0]);
6641 if ((b
& 1) && (prefixes
& PREFIX_REPZ
) &&
6642 (s
->cpuid_ext3_features
& CPUID_EXT3_ABM
)) {
6644 case OT_WORD
: gen_helper_lzcnt(cpu_T
[0], t0
,
6645 tcg_const_i32(16)); break;
6646 case OT_LONG
: gen_helper_lzcnt(cpu_T
[0], t0
,
6647 tcg_const_i32(32)); break;
6648 case OT_QUAD
: gen_helper_lzcnt(cpu_T
[0], t0
,
6649 tcg_const_i32(64)); break;
6651 gen_op_mov_reg_T0(ot
, reg
);
6653 label1
= gen_new_label();
6654 tcg_gen_movi_tl(cpu_cc_dst
, 0);
6655 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, 0, label1
);
6657 gen_helper_bsr(cpu_T
[0], t0
);
6659 gen_helper_bsf(cpu_T
[0], t0
);
6661 gen_op_mov_reg_T0(ot
, reg
);
6662 tcg_gen_movi_tl(cpu_cc_dst
, 1);
6663 gen_set_label(label1
);
6664 tcg_gen_discard_tl(cpu_cc_src
);
6665 s
->cc_op
= CC_OP_LOGICB
+ ot
;
6670 /************************/
6672 case 0x27: /* daa */
6675 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6676 gen_op_set_cc_op(s
->cc_op
);
6678 s
->cc_op
= CC_OP_EFLAGS
;
6680 case 0x2f: /* das */
6683 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6684 gen_op_set_cc_op(s
->cc_op
);
6686 s
->cc_op
= CC_OP_EFLAGS
;
6688 case 0x37: /* aaa */
6691 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6692 gen_op_set_cc_op(s
->cc_op
);
6694 s
->cc_op
= CC_OP_EFLAGS
;
6696 case 0x3f: /* aas */
6699 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6700 gen_op_set_cc_op(s
->cc_op
);
6702 s
->cc_op
= CC_OP_EFLAGS
;
6704 case 0xd4: /* aam */
6707 val
= ldub_code(s
->pc
++);
6709 gen_exception(s
, EXCP00_DIVZ
, pc_start
- s
->cs_base
);
6711 gen_helper_aam(tcg_const_i32(val
));
6712 s
->cc_op
= CC_OP_LOGICB
;
6715 case 0xd5: /* aad */
6718 val
= ldub_code(s
->pc
++);
6719 gen_helper_aad(tcg_const_i32(val
));
6720 s
->cc_op
= CC_OP_LOGICB
;
6722 /************************/
6724 case 0x90: /* nop */
6725 /* XXX: correct lock test for all insn */
6726 if (prefixes
& PREFIX_LOCK
) {
6729 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6731 goto do_xchg_reg_eax
;
6733 if (prefixes
& PREFIX_REPZ
) {
6734 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PAUSE
);
6737 case 0x9b: /* fwait */
6738 if ((s
->flags
& (HF_MP_MASK
| HF_TS_MASK
)) ==
6739 (HF_MP_MASK
| HF_TS_MASK
)) {
6740 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
6742 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6743 gen_op_set_cc_op(s
->cc_op
);
6744 gen_jmp_im(pc_start
- s
->cs_base
);
6748 case 0xcc: /* int3 */
6749 gen_interrupt(s
, EXCP03_INT3
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6751 case 0xcd: /* int N */
6752 val
= ldub_code(s
->pc
++);
6753 if (s
->vm86
&& s
->iopl
!= 3) {
6754 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6756 gen_interrupt(s
, val
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6759 case 0xce: /* into */
6762 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6763 gen_op_set_cc_op(s
->cc_op
);
6764 gen_jmp_im(pc_start
- s
->cs_base
);
6765 gen_helper_into(tcg_const_i32(s
->pc
- pc_start
));
6768 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6769 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_ICEBP
);
6771 gen_debug(s
, pc_start
- s
->cs_base
);
6774 tb_flush(cpu_single_env
);
6775 cpu_set_log(CPU_LOG_INT
| CPU_LOG_TB_IN_ASM
);
6779 case 0xfa: /* cli */
6781 if (s
->cpl
<= s
->iopl
) {
6784 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6790 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6794 case 0xfb: /* sti */
6796 if (s
->cpl
<= s
->iopl
) {
6799 /* interruptions are enabled only the first insn after sti */
6800 /* If several instructions disable interrupts, only the
6802 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
6803 gen_helper_set_inhibit_irq();
6804 /* give a chance to handle pending irqs */
6805 gen_jmp_im(s
->pc
- s
->cs_base
);
6808 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6814 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6818 case 0x62: /* bound */
6821 ot
= dflag
? OT_LONG
: OT_WORD
;
6822 modrm
= ldub_code(s
->pc
++);
6823 reg
= (modrm
>> 3) & 7;
6824 mod
= (modrm
>> 6) & 3;
6827 gen_op_mov_TN_reg(ot
, 0, reg
);
6828 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
6829 gen_jmp_im(pc_start
- s
->cs_base
);
6830 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6832 gen_helper_boundw(cpu_A0
, cpu_tmp2_i32
);
6834 gen_helper_boundl(cpu_A0
, cpu_tmp2_i32
);
6836 case 0x1c8 ... 0x1cf: /* bswap reg */
6837 reg
= (b
& 7) | REX_B(s
);
6838 #ifdef TARGET_X86_64
6840 gen_op_mov_TN_reg(OT_QUAD
, 0, reg
);
6841 tcg_gen_bswap64_i64(cpu_T
[0], cpu_T
[0]);
6842 gen_op_mov_reg_T0(OT_QUAD
, reg
);
6846 gen_op_mov_TN_reg(OT_LONG
, 0, reg
);
6847 tcg_gen_ext32u_tl(cpu_T
[0], cpu_T
[0]);
6848 tcg_gen_bswap32_tl(cpu_T
[0], cpu_T
[0]);
6849 gen_op_mov_reg_T0(OT_LONG
, reg
);
6852 case 0xd6: /* salc */
6855 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6856 gen_op_set_cc_op(s
->cc_op
);
6857 gen_compute_eflags_c(cpu_T
[0]);
6858 tcg_gen_neg_tl(cpu_T
[0], cpu_T
[0]);
6859 gen_op_mov_reg_T0(OT_BYTE
, R_EAX
);
6861 case 0xe0: /* loopnz */
6862 case 0xe1: /* loopz */
6863 case 0xe2: /* loop */
6864 case 0xe3: /* jecxz */
6868 tval
= (int8_t)insn_get(s
, OT_BYTE
);
6869 next_eip
= s
->pc
- s
->cs_base
;
6874 l1
= gen_new_label();
6875 l2
= gen_new_label();
6876 l3
= gen_new_label();
6879 case 0: /* loopnz */
6881 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6882 gen_op_set_cc_op(s
->cc_op
);
6883 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
6884 gen_op_jz_ecx(s
->aflag
, l3
);
6885 gen_compute_eflags(cpu_tmp0
);
6886 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, CC_Z
);
6888 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, l1
);
6890 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_tmp0
, 0, l1
);
6894 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
6895 gen_op_jnz_ecx(s
->aflag
, l1
);
6899 gen_op_jz_ecx(s
->aflag
, l1
);
6904 gen_jmp_im(next_eip
);
6913 case 0x130: /* wrmsr */
6914 case 0x132: /* rdmsr */
6916 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6918 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6919 gen_op_set_cc_op(s
->cc_op
);
6920 gen_jmp_im(pc_start
- s
->cs_base
);
6928 case 0x131: /* rdtsc */
6929 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6930 gen_op_set_cc_op(s
->cc_op
);
6931 gen_jmp_im(pc_start
- s
->cs_base
);
6937 gen_jmp(s
, s
->pc
- s
->cs_base
);
6940 case 0x133: /* rdpmc */
6941 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6942 gen_op_set_cc_op(s
->cc_op
);
6943 gen_jmp_im(pc_start
- s
->cs_base
);
6946 case 0x134: /* sysenter */
6947 /* For Intel SYSENTER is valid on 64-bit */
6948 if (CODE64(s
) && cpu_single_env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
6951 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6953 gen_update_cc_op(s
);
6954 gen_jmp_im(pc_start
- s
->cs_base
);
6955 gen_helper_sysenter();
6959 case 0x135: /* sysexit */
6960 /* For Intel SYSEXIT is valid on 64-bit */
6961 if (CODE64(s
) && cpu_single_env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
6964 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6966 gen_update_cc_op(s
);
6967 gen_jmp_im(pc_start
- s
->cs_base
);
6968 gen_helper_sysexit(tcg_const_i32(dflag
));
6972 #ifdef TARGET_X86_64
6973 case 0x105: /* syscall */
6974 /* XXX: is it usable in real mode ? */
6975 gen_update_cc_op(s
);
6976 gen_jmp_im(pc_start
- s
->cs_base
);
6977 gen_helper_syscall(tcg_const_i32(s
->pc
- pc_start
));
6980 case 0x107: /* sysret */
6982 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6984 gen_update_cc_op(s
);
6985 gen_jmp_im(pc_start
- s
->cs_base
);
6986 gen_helper_sysret(tcg_const_i32(s
->dflag
));
6987 /* condition codes are modified only in long mode */
6989 s
->cc_op
= CC_OP_EFLAGS
;
6994 case 0x1a2: /* cpuid */
6995 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6996 gen_op_set_cc_op(s
->cc_op
);
6997 gen_jmp_im(pc_start
- s
->cs_base
);
7000 case 0xf4: /* hlt */
7002 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7004 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7005 gen_op_set_cc_op(s
->cc_op
);
7006 gen_jmp_im(pc_start
- s
->cs_base
);
7007 gen_helper_hlt(tcg_const_i32(s
->pc
- pc_start
));
7008 s
->is_jmp
= DISAS_TB_JUMP
;
7012 modrm
= ldub_code(s
->pc
++);
7013 mod
= (modrm
>> 6) & 3;
7014 op
= (modrm
>> 3) & 7;
7017 if (!s
->pe
|| s
->vm86
)
7019 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_READ
);
7020 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,ldt
.selector
));
7024 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 1);
7027 if (!s
->pe
|| s
->vm86
)
7030 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7032 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_WRITE
);
7033 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7034 gen_jmp_im(pc_start
- s
->cs_base
);
7035 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7036 gen_helper_lldt(cpu_tmp2_i32
);
7040 if (!s
->pe
|| s
->vm86
)
7042 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_READ
);
7043 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,tr
.selector
));
7047 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 1);
7050 if (!s
->pe
|| s
->vm86
)
7053 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7055 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_WRITE
);
7056 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7057 gen_jmp_im(pc_start
- s
->cs_base
);
7058 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7059 gen_helper_ltr(cpu_tmp2_i32
);
7064 if (!s
->pe
|| s
->vm86
)
7066 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7067 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7068 gen_op_set_cc_op(s
->cc_op
);
7070 gen_helper_verr(cpu_T
[0]);
7072 gen_helper_verw(cpu_T
[0]);
7073 s
->cc_op
= CC_OP_EFLAGS
;
7080 modrm
= ldub_code(s
->pc
++);
7081 mod
= (modrm
>> 6) & 3;
7082 op
= (modrm
>> 3) & 7;
7088 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_GDTR_READ
);
7089 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7090 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
7091 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
7092 gen_add_A0_im(s
, 2);
7093 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, gdt
.base
));
7095 gen_op_andl_T0_im(0xffffff);
7096 gen_op_st_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7101 case 0: /* monitor */
7102 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) ||
7105 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7106 gen_op_set_cc_op(s
->cc_op
);
7107 gen_jmp_im(pc_start
- s
->cs_base
);
7108 #ifdef TARGET_X86_64
7109 if (s
->aflag
== 2) {
7110 gen_op_movq_A0_reg(R_EAX
);
7114 gen_op_movl_A0_reg(R_EAX
);
7116 gen_op_andl_A0_ffff();
7118 gen_add_A0_ds_seg(s
);
7119 gen_helper_monitor(cpu_A0
);
7122 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) ||
7125 gen_update_cc_op(s
);
7126 gen_jmp_im(pc_start
- s
->cs_base
);
7127 gen_helper_mwait(tcg_const_i32(s
->pc
- pc_start
));
7134 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IDTR_READ
);
7135 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7136 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, idt
.limit
));
7137 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
7138 gen_add_A0_im(s
, 2);
7139 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, idt
.base
));
7141 gen_op_andl_T0_im(0xffffff);
7142 gen_op_st_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7148 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7149 gen_op_set_cc_op(s
->cc_op
);
7150 gen_jmp_im(pc_start
- s
->cs_base
);
7153 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7156 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7159 gen_helper_vmrun(tcg_const_i32(s
->aflag
),
7160 tcg_const_i32(s
->pc
- pc_start
));
7162 s
->is_jmp
= DISAS_TB_JUMP
;
7165 case 1: /* VMMCALL */
7166 if (!(s
->flags
& HF_SVME_MASK
))
7168 gen_helper_vmmcall();
7170 case 2: /* VMLOAD */
7171 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7174 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7177 gen_helper_vmload(tcg_const_i32(s
->aflag
));
7180 case 3: /* VMSAVE */
7181 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7184 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7187 gen_helper_vmsave(tcg_const_i32(s
->aflag
));
7191 if ((!(s
->flags
& HF_SVME_MASK
) &&
7192 !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
)) ||
7196 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7203 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7206 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7212 case 6: /* SKINIT */
7213 if ((!(s
->flags
& HF_SVME_MASK
) &&
7214 !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
)) ||
7217 gen_helper_skinit();
7219 case 7: /* INVLPGA */
7220 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7223 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7226 gen_helper_invlpga(tcg_const_i32(s
->aflag
));
7232 } else if (s
->cpl
!= 0) {
7233 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7235 gen_svm_check_intercept(s
, pc_start
,
7236 op
==2 ? SVM_EXIT_GDTR_WRITE
: SVM_EXIT_IDTR_WRITE
);
7237 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7238 gen_op_ld_T1_A0(OT_WORD
+ s
->mem_index
);
7239 gen_add_A0_im(s
, 2);
7240 gen_op_ld_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7242 gen_op_andl_T0_im(0xffffff);
7244 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,gdt
.base
));
7245 tcg_gen_st32_tl(cpu_T
[1], cpu_env
, offsetof(CPUX86State
,gdt
.limit
));
7247 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,idt
.base
));
7248 tcg_gen_st32_tl(cpu_T
[1], cpu_env
, offsetof(CPUX86State
,idt
.limit
));
7253 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_CR0
);
7254 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7255 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,cr
[0]) + 4);
7257 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,cr
[0]));
7259 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 1);
7263 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7265 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7266 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7267 gen_helper_lmsw(cpu_T
[0]);
7268 gen_jmp_im(s
->pc
- s
->cs_base
);
7273 if (mod
!= 3) { /* invlpg */
7275 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7277 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7278 gen_op_set_cc_op(s
->cc_op
);
7279 gen_jmp_im(pc_start
- s
->cs_base
);
7280 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7281 gen_helper_invlpg(cpu_A0
);
7282 gen_jmp_im(s
->pc
- s
->cs_base
);
7287 case 0: /* swapgs */
7288 #ifdef TARGET_X86_64
7291 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7293 tcg_gen_ld_tl(cpu_T
[0], cpu_env
,
7294 offsetof(CPUX86State
,segs
[R_GS
].base
));
7295 tcg_gen_ld_tl(cpu_T
[1], cpu_env
,
7296 offsetof(CPUX86State
,kernelgsbase
));
7297 tcg_gen_st_tl(cpu_T
[1], cpu_env
,
7298 offsetof(CPUX86State
,segs
[R_GS
].base
));
7299 tcg_gen_st_tl(cpu_T
[0], cpu_env
,
7300 offsetof(CPUX86State
,kernelgsbase
));
7308 case 1: /* rdtscp */
7309 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_RDTSCP
))
7311 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7312 gen_op_set_cc_op(s
->cc_op
);
7313 gen_jmp_im(pc_start
- s
->cs_base
);
7316 gen_helper_rdtscp();
7319 gen_jmp(s
, s
->pc
- s
->cs_base
);
7331 case 0x108: /* invd */
7332 case 0x109: /* wbinvd */
7334 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7336 gen_svm_check_intercept(s
, pc_start
, (b
& 2) ? SVM_EXIT_INVD
: SVM_EXIT_WBINVD
);
7340 case 0x63: /* arpl or movslS (x86_64) */
7341 #ifdef TARGET_X86_64
7344 /* d_ot is the size of destination */
7345 d_ot
= dflag
+ OT_WORD
;
7347 modrm
= ldub_code(s
->pc
++);
7348 reg
= ((modrm
>> 3) & 7) | rex_r
;
7349 mod
= (modrm
>> 6) & 3;
7350 rm
= (modrm
& 7) | REX_B(s
);
7353 gen_op_mov_TN_reg(OT_LONG
, 0, rm
);
7355 if (d_ot
== OT_QUAD
)
7356 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
7357 gen_op_mov_reg_T0(d_ot
, reg
);
7359 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7360 if (d_ot
== OT_QUAD
) {
7361 gen_op_lds_T0_A0(OT_LONG
+ s
->mem_index
);
7363 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
7365 gen_op_mov_reg_T0(d_ot
, reg
);
7371 TCGv t0
, t1
, t2
, a0
;
7373 if (!s
->pe
|| s
->vm86
)
7375 t0
= tcg_temp_local_new();
7376 t1
= tcg_temp_local_new();
7377 t2
= tcg_temp_local_new();
7379 modrm
= ldub_code(s
->pc
++);
7380 reg
= (modrm
>> 3) & 7;
7381 mod
= (modrm
>> 6) & 3;
7384 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7385 gen_op_ld_v(ot
+ s
->mem_index
, t0
, cpu_A0
);
7386 a0
= tcg_temp_local_new();
7387 tcg_gen_mov_tl(a0
, cpu_A0
);
7389 gen_op_mov_v_reg(ot
, t0
, rm
);
7392 gen_op_mov_v_reg(ot
, t1
, reg
);
7393 tcg_gen_andi_tl(cpu_tmp0
, t0
, 3);
7394 tcg_gen_andi_tl(t1
, t1
, 3);
7395 tcg_gen_movi_tl(t2
, 0);
7396 label1
= gen_new_label();
7397 tcg_gen_brcond_tl(TCG_COND_GE
, cpu_tmp0
, t1
, label1
);
7398 tcg_gen_andi_tl(t0
, t0
, ~3);
7399 tcg_gen_or_tl(t0
, t0
, t1
);
7400 tcg_gen_movi_tl(t2
, CC_Z
);
7401 gen_set_label(label1
);
7403 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
7406 gen_op_mov_reg_v(ot
, rm
, t0
);
7408 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7409 gen_op_set_cc_op(s
->cc_op
);
7410 gen_compute_eflags(cpu_cc_src
);
7411 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_Z
);
7412 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t2
);
7413 s
->cc_op
= CC_OP_EFLAGS
;
7419 case 0x102: /* lar */
7420 case 0x103: /* lsl */
7424 if (!s
->pe
|| s
->vm86
)
7426 ot
= dflag
? OT_LONG
: OT_WORD
;
7427 modrm
= ldub_code(s
->pc
++);
7428 reg
= ((modrm
>> 3) & 7) | rex_r
;
7429 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7430 t0
= tcg_temp_local_new();
7431 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7432 gen_op_set_cc_op(s
->cc_op
);
7434 gen_helper_lar(t0
, cpu_T
[0]);
7436 gen_helper_lsl(t0
, cpu_T
[0]);
7437 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_src
, CC_Z
);
7438 label1
= gen_new_label();
7439 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
7440 gen_op_mov_reg_v(ot
, reg
, t0
);
7441 gen_set_label(label1
);
7442 s
->cc_op
= CC_OP_EFLAGS
;
7447 modrm
= ldub_code(s
->pc
++);
7448 mod
= (modrm
>> 6) & 3;
7449 op
= (modrm
>> 3) & 7;
7451 case 0: /* prefetchnta */
7452 case 1: /* prefetchnt0 */
7453 case 2: /* prefetchnt0 */
7454 case 3: /* prefetchnt0 */
7457 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7458 /* nothing more to do */
7460 default: /* nop (multi byte) */
7461 gen_nop_modrm(s
, modrm
);
7465 case 0x119 ... 0x11f: /* nop (multi byte) */
7466 modrm
= ldub_code(s
->pc
++);
7467 gen_nop_modrm(s
, modrm
);
7469 case 0x120: /* mov reg, crN */
7470 case 0x122: /* mov crN, reg */
7472 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7474 modrm
= ldub_code(s
->pc
++);
7475 if ((modrm
& 0xc0) != 0xc0)
7477 rm
= (modrm
& 7) | REX_B(s
);
7478 reg
= ((modrm
>> 3) & 7) | rex_r
;
7483 if ((prefixes
& PREFIX_LOCK
) && (reg
== 0) &&
7484 (s
->cpuid_ext3_features
& CPUID_EXT3_CR8LEG
)) {
7493 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7494 gen_op_set_cc_op(s
->cc_op
);
7495 gen_jmp_im(pc_start
- s
->cs_base
);
7497 gen_op_mov_TN_reg(ot
, 0, rm
);
7498 gen_helper_write_crN(tcg_const_i32(reg
), cpu_T
[0]);
7499 gen_jmp_im(s
->pc
- s
->cs_base
);
7502 gen_helper_read_crN(cpu_T
[0], tcg_const_i32(reg
));
7503 gen_op_mov_reg_T0(ot
, rm
);
7511 case 0x121: /* mov reg, drN */
7512 case 0x123: /* mov drN, reg */
7514 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7516 modrm
= ldub_code(s
->pc
++);
7517 if ((modrm
& 0xc0) != 0xc0)
7519 rm
= (modrm
& 7) | REX_B(s
);
7520 reg
= ((modrm
>> 3) & 7) | rex_r
;
7525 /* XXX: do it dynamically with CR4.DE bit */
7526 if (reg
== 4 || reg
== 5 || reg
>= 8)
7529 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_DR0
+ reg
);
7530 gen_op_mov_TN_reg(ot
, 0, rm
);
7531 gen_helper_movl_drN_T0(tcg_const_i32(reg
), cpu_T
[0]);
7532 gen_jmp_im(s
->pc
- s
->cs_base
);
7535 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_DR0
+ reg
);
7536 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,dr
[reg
]));
7537 gen_op_mov_reg_T0(ot
, rm
);
7541 case 0x106: /* clts */
7543 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7545 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7547 /* abort block because static cpu state changed */
7548 gen_jmp_im(s
->pc
- s
->cs_base
);
7552 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7553 case 0x1c3: /* MOVNTI reg, mem */
7554 if (!(s
->cpuid_features
& CPUID_SSE2
))
7556 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
7557 modrm
= ldub_code(s
->pc
++);
7558 mod
= (modrm
>> 6) & 3;
7561 reg
= ((modrm
>> 3) & 7) | rex_r
;
7562 /* generate a generic store */
7563 gen_ldst_modrm(s
, modrm
, ot
, reg
, 1);
7566 modrm
= ldub_code(s
->pc
++);
7567 mod
= (modrm
>> 6) & 3;
7568 op
= (modrm
>> 3) & 7;
7570 case 0: /* fxsave */
7571 if (mod
== 3 || !(s
->cpuid_features
& CPUID_FXSR
) ||
7572 (s
->prefix
& PREFIX_LOCK
))
7574 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
7575 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7578 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7579 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7580 gen_op_set_cc_op(s
->cc_op
);
7581 gen_jmp_im(pc_start
- s
->cs_base
);
7582 gen_helper_fxsave(cpu_A0
, tcg_const_i32((s
->dflag
== 2)));
7584 case 1: /* fxrstor */
7585 if (mod
== 3 || !(s
->cpuid_features
& CPUID_FXSR
) ||
7586 (s
->prefix
& PREFIX_LOCK
))
7588 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
7589 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7592 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7593 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7594 gen_op_set_cc_op(s
->cc_op
);
7595 gen_jmp_im(pc_start
- s
->cs_base
);
7596 gen_helper_fxrstor(cpu_A0
, tcg_const_i32((s
->dflag
== 2)));
7598 case 2: /* ldmxcsr */
7599 case 3: /* stmxcsr */
7600 if (s
->flags
& HF_TS_MASK
) {
7601 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7604 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
) ||
7607 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7609 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
7610 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7611 gen_helper_ldmxcsr(cpu_tmp2_i32
);
7613 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, mxcsr
));
7614 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
7617 case 5: /* lfence */
7618 case 6: /* mfence */
7619 if ((modrm
& 0xc7) != 0xc0 || !(s
->cpuid_features
& CPUID_SSE2
))
7622 case 7: /* sfence / clflush */
7623 if ((modrm
& 0xc7) == 0xc0) {
7625 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7626 if (!(s
->cpuid_features
& CPUID_SSE
))
7630 if (!(s
->cpuid_features
& CPUID_CLFLUSH
))
7632 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7639 case 0x10d: /* 3DNow! prefetch(w) */
7640 modrm
= ldub_code(s
->pc
++);
7641 mod
= (modrm
>> 6) & 3;
7644 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7645 /* ignore for now */
7647 case 0x1aa: /* rsm */
7648 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_RSM
);
7649 if (!(s
->flags
& HF_SMM_MASK
))
7651 gen_update_cc_op(s
);
7652 gen_jmp_im(s
->pc
- s
->cs_base
);
7656 case 0x1b8: /* SSE4.2 popcnt */
7657 if ((prefixes
& (PREFIX_REPZ
| PREFIX_LOCK
| PREFIX_REPNZ
)) !=
7660 if (!(s
->cpuid_ext_features
& CPUID_EXT_POPCNT
))
7663 modrm
= ldub_code(s
->pc
++);
7664 reg
= ((modrm
>> 3) & 7);
7666 if (s
->prefix
& PREFIX_DATA
)
7668 else if (s
->dflag
!= 2)
7673 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
7674 gen_helper_popcnt(cpu_T
[0], cpu_T
[0], tcg_const_i32(ot
));
7675 gen_op_mov_reg_T0(ot
, reg
);
7677 s
->cc_op
= CC_OP_EFLAGS
;
7679 case 0x10e ... 0x10f:
7680 /* 3DNow! instructions, ignore prefixes */
7681 s
->prefix
&= ~(PREFIX_REPZ
| PREFIX_REPNZ
| PREFIX_DATA
);
7682 case 0x110 ... 0x117:
7683 case 0x128 ... 0x12f:
7684 case 0x138 ... 0x13a:
7685 case 0x150 ... 0x179:
7686 case 0x17c ... 0x17f:
7688 case 0x1c4 ... 0x1c6:
7689 case 0x1d0 ... 0x1fe:
7690 gen_sse(s
, b
, pc_start
, rex_r
);
7695 /* lock generation */
7696 if (s
->prefix
& PREFIX_LOCK
)
7697 gen_helper_unlock();
7700 if (s
->prefix
& PREFIX_LOCK
)
7701 gen_helper_unlock();
7702 /* XXX: ensure that no lock was generated */
7703 gen_exception(s
, EXCP06_ILLOP
, pc_start
- s
->cs_base
);
7707 void optimize_flags_init(void)
7709 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
7710 cpu_cc_op
= tcg_global_mem_new_i32(TCG_AREG0
,
7711 offsetof(CPUX86State
, cc_op
), "cc_op");
7712 cpu_cc_src
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_src
),
7714 cpu_cc_dst
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_dst
),
7716 cpu_cc_tmp
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_tmp
),
7719 #ifdef TARGET_X86_64
7720 cpu_regs
[R_EAX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7721 offsetof(CPUX86State
, regs
[R_EAX
]), "rax");
7722 cpu_regs
[R_ECX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7723 offsetof(CPUX86State
, regs
[R_ECX
]), "rcx");
7724 cpu_regs
[R_EDX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7725 offsetof(CPUX86State
, regs
[R_EDX
]), "rdx");
7726 cpu_regs
[R_EBX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7727 offsetof(CPUX86State
, regs
[R_EBX
]), "rbx");
7728 cpu_regs
[R_ESP
] = tcg_global_mem_new_i64(TCG_AREG0
,
7729 offsetof(CPUX86State
, regs
[R_ESP
]), "rsp");
7730 cpu_regs
[R_EBP
] = tcg_global_mem_new_i64(TCG_AREG0
,
7731 offsetof(CPUX86State
, regs
[R_EBP
]), "rbp");
7732 cpu_regs
[R_ESI
] = tcg_global_mem_new_i64(TCG_AREG0
,
7733 offsetof(CPUX86State
, regs
[R_ESI
]), "rsi");
7734 cpu_regs
[R_EDI
] = tcg_global_mem_new_i64(TCG_AREG0
,
7735 offsetof(CPUX86State
, regs
[R_EDI
]), "rdi");
7736 cpu_regs
[8] = tcg_global_mem_new_i64(TCG_AREG0
,
7737 offsetof(CPUX86State
, regs
[8]), "r8");
7738 cpu_regs
[9] = tcg_global_mem_new_i64(TCG_AREG0
,
7739 offsetof(CPUX86State
, regs
[9]), "r9");
7740 cpu_regs
[10] = tcg_global_mem_new_i64(TCG_AREG0
,
7741 offsetof(CPUX86State
, regs
[10]), "r10");
7742 cpu_regs
[11] = tcg_global_mem_new_i64(TCG_AREG0
,
7743 offsetof(CPUX86State
, regs
[11]), "r11");
7744 cpu_regs
[12] = tcg_global_mem_new_i64(TCG_AREG0
,
7745 offsetof(CPUX86State
, regs
[12]), "r12");
7746 cpu_regs
[13] = tcg_global_mem_new_i64(TCG_AREG0
,
7747 offsetof(CPUX86State
, regs
[13]), "r13");
7748 cpu_regs
[14] = tcg_global_mem_new_i64(TCG_AREG0
,
7749 offsetof(CPUX86State
, regs
[14]), "r14");
7750 cpu_regs
[15] = tcg_global_mem_new_i64(TCG_AREG0
,
7751 offsetof(CPUX86State
, regs
[15]), "r15");
7753 cpu_regs
[R_EAX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7754 offsetof(CPUX86State
, regs
[R_EAX
]), "eax");
7755 cpu_regs
[R_ECX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7756 offsetof(CPUX86State
, regs
[R_ECX
]), "ecx");
7757 cpu_regs
[R_EDX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7758 offsetof(CPUX86State
, regs
[R_EDX
]), "edx");
7759 cpu_regs
[R_EBX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7760 offsetof(CPUX86State
, regs
[R_EBX
]), "ebx");
7761 cpu_regs
[R_ESP
] = tcg_global_mem_new_i32(TCG_AREG0
,
7762 offsetof(CPUX86State
, regs
[R_ESP
]), "esp");
7763 cpu_regs
[R_EBP
] = tcg_global_mem_new_i32(TCG_AREG0
,
7764 offsetof(CPUX86State
, regs
[R_EBP
]), "ebp");
7765 cpu_regs
[R_ESI
] = tcg_global_mem_new_i32(TCG_AREG0
,
7766 offsetof(CPUX86State
, regs
[R_ESI
]), "esi");
7767 cpu_regs
[R_EDI
] = tcg_global_mem_new_i32(TCG_AREG0
,
7768 offsetof(CPUX86State
, regs
[R_EDI
]), "edi");
7771 /* register helpers */
7772 #define GEN_HELPER 2
7776 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7777 basic block 'tb'. If search_pc is TRUE, also generate PC
7778 information for each intermediate instruction. */
7779 static inline void gen_intermediate_code_internal(CPUX86State
*env
,
7780 TranslationBlock
*tb
,
7783 DisasContext dc1
, *dc
= &dc1
;
7784 target_ulong pc_ptr
;
7785 uint16_t *gen_opc_end
;
7789 target_ulong pc_start
;
7790 target_ulong cs_base
;
7794 /* generate intermediate code */
7796 cs_base
= tb
->cs_base
;
7799 dc
->pe
= (flags
>> HF_PE_SHIFT
) & 1;
7800 dc
->code32
= (flags
>> HF_CS32_SHIFT
) & 1;
7801 dc
->ss32
= (flags
>> HF_SS32_SHIFT
) & 1;
7802 dc
->addseg
= (flags
>> HF_ADDSEG_SHIFT
) & 1;
7804 dc
->vm86
= (flags
>> VM_SHIFT
) & 1;
7805 dc
->cpl
= (flags
>> HF_CPL_SHIFT
) & 3;
7806 dc
->iopl
= (flags
>> IOPL_SHIFT
) & 3;
7807 dc
->tf
= (flags
>> TF_SHIFT
) & 1;
7808 dc
->singlestep_enabled
= env
->singlestep_enabled
;
7809 dc
->cc_op
= CC_OP_DYNAMIC
;
7810 dc
->cs_base
= cs_base
;
7812 dc
->popl_esp_hack
= 0;
7813 /* select memory access functions */
7815 if (flags
& HF_SOFTMMU_MASK
) {
7817 dc
->mem_index
= 2 * 4;
7819 dc
->mem_index
= 1 * 4;
7821 dc
->cpuid_features
= env
->cpuid_features
;
7822 dc
->cpuid_ext_features
= env
->cpuid_ext_features
;
7823 dc
->cpuid_ext2_features
= env
->cpuid_ext2_features
;
7824 dc
->cpuid_ext3_features
= env
->cpuid_ext3_features
;
7825 #ifdef TARGET_X86_64
7826 dc
->lma
= (flags
>> HF_LMA_SHIFT
) & 1;
7827 dc
->code64
= (flags
>> HF_CS64_SHIFT
) & 1;
7830 dc
->jmp_opt
= !(dc
->tf
|| env
->singlestep_enabled
||
7831 (flags
& HF_INHIBIT_IRQ_MASK
)
7832 #ifndef CONFIG_SOFTMMU
7833 || (flags
& HF_SOFTMMU_MASK
)
7837 /* check addseg logic */
7838 if (!dc
->addseg
&& (dc
->vm86
|| !dc
->pe
|| !dc
->code32
))
7839 printf("ERROR addseg\n");
7842 cpu_T
[0] = tcg_temp_new();
7843 cpu_T
[1] = tcg_temp_new();
7844 cpu_A0
= tcg_temp_new();
7845 cpu_T3
= tcg_temp_new();
7847 cpu_tmp0
= tcg_temp_new();
7848 cpu_tmp1_i64
= tcg_temp_new_i64();
7849 cpu_tmp2_i32
= tcg_temp_new_i32();
7850 cpu_tmp3_i32
= tcg_temp_new_i32();
7851 cpu_tmp4
= tcg_temp_new();
7852 cpu_tmp5
= tcg_temp_new();
7853 cpu_ptr0
= tcg_temp_new_ptr();
7854 cpu_ptr1
= tcg_temp_new_ptr();
7856 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
7858 dc
->is_jmp
= DISAS_NEXT
;
7862 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
7864 max_insns
= CF_COUNT_MASK
;
7868 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
7869 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
7870 if (bp
->pc
== pc_ptr
&&
7871 !((bp
->flags
& BP_CPU
) && (tb
->flags
& HF_RF_MASK
))) {
7872 gen_debug(dc
, pc_ptr
- dc
->cs_base
);
7878 j
= gen_opc_ptr
- gen_opc_buf
;
7882 gen_opc_instr_start
[lj
++] = 0;
7884 gen_opc_pc
[lj
] = pc_ptr
;
7885 gen_opc_cc_op
[lj
] = dc
->cc_op
;
7886 gen_opc_instr_start
[lj
] = 1;
7887 gen_opc_icount
[lj
] = num_insns
;
7889 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
7892 pc_ptr
= disas_insn(dc
, pc_ptr
);
7894 /* stop translation if indicated */
7897 /* if single step mode, we generate only one instruction and
7898 generate an exception */
7899 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7900 the flag and abort the translation to give the irqs a
7901 change to be happen */
7902 if (dc
->tf
|| dc
->singlestep_enabled
||
7903 (flags
& HF_INHIBIT_IRQ_MASK
)) {
7904 gen_jmp_im(pc_ptr
- dc
->cs_base
);
7908 /* if too long translation, stop generation too */
7909 if (gen_opc_ptr
>= gen_opc_end
||
7910 (pc_ptr
- pc_start
) >= (TARGET_PAGE_SIZE
- 32) ||
7911 num_insns
>= max_insns
) {
7912 gen_jmp_im(pc_ptr
- dc
->cs_base
);
7917 gen_jmp_im(pc_ptr
- dc
->cs_base
);
7922 if (tb
->cflags
& CF_LAST_IO
)
7924 gen_icount_end(tb
, num_insns
);
7925 *gen_opc_ptr
= INDEX_op_end
;
7926 /* we don't forget to fill the last values */
7928 j
= gen_opc_ptr
- gen_opc_buf
;
7931 gen_opc_instr_start
[lj
++] = 0;
7935 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
7937 qemu_log("----------------\n");
7938 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
7939 #ifdef TARGET_X86_64
7944 disas_flags
= !dc
->code32
;
7945 log_target_disas(pc_start
, pc_ptr
- pc_start
, disas_flags
);
7951 tb
->size
= pc_ptr
- pc_start
;
7952 tb
->icount
= num_insns
;
7956 void gen_intermediate_code(CPUX86State
*env
, TranslationBlock
*tb
)
7958 gen_intermediate_code_internal(env
, tb
, 0);
7961 void gen_intermediate_code_pc(CPUX86State
*env
, TranslationBlock
*tb
)
7963 gen_intermediate_code_internal(env
, tb
, 1);
7966 void restore_state_to_opc(CPUX86State
*env
, TranslationBlock
*tb
, int pc_pos
)
7970 if (qemu_loglevel_mask(CPU_LOG_TB_OP
)) {
7972 qemu_log("RESTORE:\n");
7973 for(i
= 0;i
<= pc_pos
; i
++) {
7974 if (gen_opc_instr_start
[i
]) {
7975 qemu_log("0x%04x: " TARGET_FMT_lx
"\n", i
, gen_opc_pc
[i
]);
7978 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx
" cs_base=%x\n",
7979 pc_pos
, gen_opc_pc
[pc_pos
] - tb
->cs_base
,
7980 (uint32_t)tb
->cs_base
);
7983 env
->eip
= gen_opc_pc
[pc_pos
] - tb
->cs_base
;
7984 cc_op
= gen_opc_cc_op
[pc_pos
];
7985 if (cc_op
!= CC_OP_DYNAMIC
)