4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
36 #define PREFIX_REPZ 0x01
37 #define PREFIX_REPNZ 0x02
38 #define PREFIX_LOCK 0x04
39 #define PREFIX_DATA 0x08
40 #define PREFIX_ADR 0x10
43 #define X86_64_ONLY(x) x
44 #define X86_64_DEF(...) __VA_ARGS__
45 #define CODE64(s) ((s)->code64)
46 #define REX_X(s) ((s)->rex_x)
47 #define REX_B(s) ((s)->rex_b)
48 /* XXX: gcc generates push/pop in some opcodes, so we cannot use them */
50 #define BUGGY_64(x) NULL
53 #define X86_64_ONLY(x) NULL
54 #define X86_64_DEF(...)
60 //#define MACRO_TEST 1
62 /* global register indexes */
63 static TCGv_ptr cpu_env
;
64 static TCGv cpu_A0
, cpu_cc_src
, cpu_cc_dst
, cpu_cc_tmp
;
65 static TCGv_i32 cpu_cc_op
;
67 static TCGv cpu_T
[2], cpu_T3
;
68 /* local register indexes (only used inside old micro ops) */
69 static TCGv cpu_tmp0
, cpu_tmp4
;
70 static TCGv_ptr cpu_ptr0
, cpu_ptr1
;
71 static TCGv_i32 cpu_tmp2_i32
, cpu_tmp3_i32
;
72 static TCGv_i64 cpu_tmp1_i64
;
73 static TCGv cpu_tmp5
, cpu_tmp6
;
75 #include "gen-icount.h"
78 static int x86_64_hregs
;
81 typedef struct DisasContext
{
82 /* current insn context */
83 int override
; /* -1 if no override */
86 target_ulong pc
; /* pc = eip + cs_base */
87 int is_jmp
; /* 1 = means jump (stop translation), 2 means CPU
88 static state change (stop translation) */
89 /* current block context */
90 target_ulong cs_base
; /* base of CS segment */
91 int pe
; /* protected mode */
92 int code32
; /* 32 bit code segment */
94 int lma
; /* long mode active */
95 int code64
; /* 64 bit code segment */
98 int ss32
; /* 32 bit stack segment */
99 int cc_op
; /* current CC operation */
100 int addseg
; /* non zero if either DS/ES/SS have a non zero base */
101 int f_st
; /* currently unused */
102 int vm86
; /* vm86 mode */
105 int tf
; /* TF cpu flag */
106 int singlestep_enabled
; /* "hardware" single step enabled */
107 int jmp_opt
; /* use direct block chaining for direct jumps */
108 int mem_index
; /* select memory access functions */
109 uint64_t flags
; /* all execution flags */
110 struct TranslationBlock
*tb
;
111 int popl_esp_hack
; /* for correct popl with esp base handling */
112 int rip_offset
; /* only used in x86_64, but left for simplicity */
114 int cpuid_ext_features
;
115 int cpuid_ext2_features
;
116 int cpuid_ext3_features
;
119 static void gen_eob(DisasContext
*s
);
120 static void gen_jmp(DisasContext
*s
, target_ulong eip
);
121 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
);
123 /* i386 arith/logic operations */
143 OP_SHL1
, /* undocumented */
167 /* I386 int registers */
168 OR_EAX
, /* MUST be even numbered */
177 OR_TMP0
= 16, /* temporary operand register */
179 OR_A0
, /* temporary register used when doing address evaluation */
182 static inline void gen_op_movl_T0_0(void)
184 tcg_gen_movi_tl(cpu_T
[0], 0);
187 static inline void gen_op_movl_T0_im(int32_t val
)
189 tcg_gen_movi_tl(cpu_T
[0], val
);
192 static inline void gen_op_movl_T0_imu(uint32_t val
)
194 tcg_gen_movi_tl(cpu_T
[0], val
);
197 static inline void gen_op_movl_T1_im(int32_t val
)
199 tcg_gen_movi_tl(cpu_T
[1], val
);
202 static inline void gen_op_movl_T1_imu(uint32_t val
)
204 tcg_gen_movi_tl(cpu_T
[1], val
);
207 static inline void gen_op_movl_A0_im(uint32_t val
)
209 tcg_gen_movi_tl(cpu_A0
, val
);
213 static inline void gen_op_movq_A0_im(int64_t val
)
215 tcg_gen_movi_tl(cpu_A0
, val
);
219 static inline void gen_movtl_T0_im(target_ulong val
)
221 tcg_gen_movi_tl(cpu_T
[0], val
);
224 static inline void gen_movtl_T1_im(target_ulong val
)
226 tcg_gen_movi_tl(cpu_T
[1], val
);
229 static inline void gen_op_andl_T0_ffff(void)
231 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xffff);
234 static inline void gen_op_andl_T0_im(uint32_t val
)
236 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], val
);
239 static inline void gen_op_movl_T0_T1(void)
241 tcg_gen_mov_tl(cpu_T
[0], cpu_T
[1]);
244 static inline void gen_op_andl_A0_ffff(void)
246 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffff);
251 #define NB_OP_SIZES 4
253 #else /* !TARGET_X86_64 */
255 #define NB_OP_SIZES 3
257 #endif /* !TARGET_X86_64 */
259 #if defined(WORDS_BIGENDIAN)
260 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
261 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
262 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
263 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
264 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
266 #define REG_B_OFFSET 0
267 #define REG_H_OFFSET 1
268 #define REG_W_OFFSET 0
269 #define REG_L_OFFSET 0
270 #define REG_LH_OFFSET 4
273 static inline void gen_op_mov_reg_v(int ot
, int reg
, TCGv t0
)
277 if (reg
< 4 X86_64_DEF( || reg
>= 8 || x86_64_hregs
)) {
278 tcg_gen_st8_tl(t0
, cpu_env
, offsetof(CPUState
, regs
[reg
]) + REG_B_OFFSET
);
280 tcg_gen_st8_tl(t0
, cpu_env
, offsetof(CPUState
, regs
[reg
- 4]) + REG_H_OFFSET
);
284 tcg_gen_st16_tl(t0
, cpu_env
, offsetof(CPUState
, regs
[reg
]) + REG_W_OFFSET
);
288 tcg_gen_st32_tl(t0
, cpu_env
, offsetof(CPUState
, regs
[reg
]) + REG_L_OFFSET
);
289 /* high part of register set to zero */
290 tcg_gen_movi_tl(cpu_tmp0
, 0);
291 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
, offsetof(CPUState
, regs
[reg
]) + REG_LH_OFFSET
);
295 tcg_gen_st_tl(t0
, cpu_env
, offsetof(CPUState
, regs
[reg
]));
300 tcg_gen_st32_tl(t0
, cpu_env
, offsetof(CPUState
, regs
[reg
]) + REG_L_OFFSET
);
306 static inline void gen_op_mov_reg_T0(int ot
, int reg
)
308 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
311 static inline void gen_op_mov_reg_T1(int ot
, int reg
)
313 gen_op_mov_reg_v(ot
, reg
, cpu_T
[1]);
316 static inline void gen_op_mov_reg_A0(int size
, int reg
)
320 tcg_gen_st16_tl(cpu_A0
, cpu_env
, offsetof(CPUState
, regs
[reg
]) + REG_W_OFFSET
);
324 tcg_gen_st32_tl(cpu_A0
, cpu_env
, offsetof(CPUState
, regs
[reg
]) + REG_L_OFFSET
);
325 /* high part of register set to zero */
326 tcg_gen_movi_tl(cpu_tmp0
, 0);
327 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
, offsetof(CPUState
, regs
[reg
]) + REG_LH_OFFSET
);
331 tcg_gen_st_tl(cpu_A0
, cpu_env
, offsetof(CPUState
, regs
[reg
]));
336 tcg_gen_st32_tl(cpu_A0
, cpu_env
, offsetof(CPUState
, regs
[reg
]) + REG_L_OFFSET
);
342 static inline void gen_op_mov_v_reg(int ot
, TCGv t0
, int reg
)
346 if (reg
< 4 X86_64_DEF( || reg
>= 8 || x86_64_hregs
)) {
349 tcg_gen_ld8u_tl(t0
, cpu_env
, offsetof(CPUState
, regs
[reg
- 4]) + REG_H_OFFSET
);
354 tcg_gen_ld_tl(t0
, cpu_env
, offsetof(CPUState
, regs
[reg
]));
359 static inline void gen_op_mov_TN_reg(int ot
, int t_index
, int reg
)
361 gen_op_mov_v_reg(ot
, cpu_T
[t_index
], reg
);
364 static inline void gen_op_movl_A0_reg(int reg
)
366 tcg_gen_ld32u_tl(cpu_A0
, cpu_env
, offsetof(CPUState
, regs
[reg
]) + REG_L_OFFSET
);
369 static inline void gen_op_addl_A0_im(int32_t val
)
371 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
373 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
378 static inline void gen_op_addq_A0_im(int64_t val
)
380 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
384 static void gen_add_A0_im(DisasContext
*s
, int val
)
388 gen_op_addq_A0_im(val
);
391 gen_op_addl_A0_im(val
);
394 static inline void gen_op_addl_T0_T1(void)
396 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
399 static inline void gen_op_jmp_T0(void)
401 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUState
, eip
));
404 static inline void gen_op_add_reg_im(int size
, int reg
, int32_t val
)
408 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUState
, regs
[reg
]));
409 tcg_gen_addi_tl(cpu_tmp0
, cpu_tmp0
, val
);
410 tcg_gen_st16_tl(cpu_tmp0
, cpu_env
, offsetof(CPUState
, regs
[reg
]) + REG_W_OFFSET
);
413 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUState
, regs
[reg
]));
414 tcg_gen_addi_tl(cpu_tmp0
, cpu_tmp0
, val
);
416 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, 0xffffffff);
418 tcg_gen_st_tl(cpu_tmp0
, cpu_env
, offsetof(CPUState
, regs
[reg
]));
422 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUState
, regs
[reg
]));
423 tcg_gen_addi_tl(cpu_tmp0
, cpu_tmp0
, val
);
424 tcg_gen_st_tl(cpu_tmp0
, cpu_env
, offsetof(CPUState
, regs
[reg
]));
430 static inline void gen_op_add_reg_T0(int size
, int reg
)
434 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUState
, regs
[reg
]));
435 tcg_gen_add_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[0]);
436 tcg_gen_st16_tl(cpu_tmp0
, cpu_env
, offsetof(CPUState
, regs
[reg
]) + REG_W_OFFSET
);
439 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUState
, regs
[reg
]));
440 tcg_gen_add_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[0]);
442 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, 0xffffffff);
444 tcg_gen_st_tl(cpu_tmp0
, cpu_env
, offsetof(CPUState
, regs
[reg
]));
448 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUState
, regs
[reg
]));
449 tcg_gen_add_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[0]);
450 tcg_gen_st_tl(cpu_tmp0
, cpu_env
, offsetof(CPUState
, regs
[reg
]));
456 static inline void gen_op_set_cc_op(int32_t val
)
458 tcg_gen_movi_i32(cpu_cc_op
, val
);
461 static inline void gen_op_addl_A0_reg_sN(int shift
, int reg
)
463 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUState
, regs
[reg
]));
465 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, shift
);
466 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
468 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
472 static inline void gen_op_movl_A0_seg(int reg
)
474 tcg_gen_ld32u_tl(cpu_A0
, cpu_env
, offsetof(CPUState
, segs
[reg
].base
) + REG_L_OFFSET
);
477 static inline void gen_op_addl_A0_seg(int reg
)
479 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUState
, segs
[reg
].base
));
480 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
482 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
487 static inline void gen_op_movq_A0_seg(int reg
)
489 tcg_gen_ld_tl(cpu_A0
, cpu_env
, offsetof(CPUState
, segs
[reg
].base
));
492 static inline void gen_op_addq_A0_seg(int reg
)
494 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUState
, segs
[reg
].base
));
495 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
498 static inline void gen_op_movq_A0_reg(int reg
)
500 tcg_gen_ld_tl(cpu_A0
, cpu_env
, offsetof(CPUState
, regs
[reg
]));
503 static inline void gen_op_addq_A0_reg_sN(int shift
, int reg
)
505 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUState
, regs
[reg
]));
507 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, shift
);
508 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
512 static inline void gen_op_lds_T0_A0(int idx
)
514 int mem_index
= (idx
>> 2) - 1;
517 tcg_gen_qemu_ld8s(cpu_T
[0], cpu_A0
, mem_index
);
520 tcg_gen_qemu_ld16s(cpu_T
[0], cpu_A0
, mem_index
);
524 tcg_gen_qemu_ld32s(cpu_T
[0], cpu_A0
, mem_index
);
529 static inline void gen_op_ld_v(int idx
, TCGv t0
, TCGv a0
)
531 int mem_index
= (idx
>> 2) - 1;
534 tcg_gen_qemu_ld8u(t0
, a0
, mem_index
);
537 tcg_gen_qemu_ld16u(t0
, a0
, mem_index
);
540 tcg_gen_qemu_ld32u(t0
, a0
, mem_index
);
544 /* Should never happen on 32-bit targets. */
546 tcg_gen_qemu_ld64(t0
, a0
, mem_index
);
552 /* XXX: always use ldu or lds */
553 static inline void gen_op_ld_T0_A0(int idx
)
555 gen_op_ld_v(idx
, cpu_T
[0], cpu_A0
);
558 static inline void gen_op_ldu_T0_A0(int idx
)
560 gen_op_ld_v(idx
, cpu_T
[0], cpu_A0
);
563 static inline void gen_op_ld_T1_A0(int idx
)
565 gen_op_ld_v(idx
, cpu_T
[1], cpu_A0
);
568 static inline void gen_op_st_v(int idx
, TCGv t0
, TCGv a0
)
570 int mem_index
= (idx
>> 2) - 1;
573 tcg_gen_qemu_st8(t0
, a0
, mem_index
);
576 tcg_gen_qemu_st16(t0
, a0
, mem_index
);
579 tcg_gen_qemu_st32(t0
, a0
, mem_index
);
583 /* Should never happen on 32-bit targets. */
585 tcg_gen_qemu_st64(t0
, a0
, mem_index
);
591 static inline void gen_op_st_T0_A0(int idx
)
593 gen_op_st_v(idx
, cpu_T
[0], cpu_A0
);
596 static inline void gen_op_st_T1_A0(int idx
)
598 gen_op_st_v(idx
, cpu_T
[1], cpu_A0
);
601 static inline void gen_jmp_im(target_ulong pc
)
603 tcg_gen_movi_tl(cpu_tmp0
, pc
);
604 tcg_gen_st_tl(cpu_tmp0
, cpu_env
, offsetof(CPUState
, eip
));
607 static inline void gen_string_movl_A0_ESI(DisasContext
*s
)
611 override
= s
->override
;
615 gen_op_movq_A0_seg(override
);
616 gen_op_addq_A0_reg_sN(0, R_ESI
);
618 gen_op_movq_A0_reg(R_ESI
);
624 if (s
->addseg
&& override
< 0)
627 gen_op_movl_A0_seg(override
);
628 gen_op_addl_A0_reg_sN(0, R_ESI
);
630 gen_op_movl_A0_reg(R_ESI
);
633 /* 16 address, always override */
636 gen_op_movl_A0_reg(R_ESI
);
637 gen_op_andl_A0_ffff();
638 gen_op_addl_A0_seg(override
);
642 static inline void gen_string_movl_A0_EDI(DisasContext
*s
)
646 gen_op_movq_A0_reg(R_EDI
);
651 gen_op_movl_A0_seg(R_ES
);
652 gen_op_addl_A0_reg_sN(0, R_EDI
);
654 gen_op_movl_A0_reg(R_EDI
);
657 gen_op_movl_A0_reg(R_EDI
);
658 gen_op_andl_A0_ffff();
659 gen_op_addl_A0_seg(R_ES
);
663 static inline void gen_op_movl_T0_Dshift(int ot
)
665 tcg_gen_ld32s_tl(cpu_T
[0], cpu_env
, offsetof(CPUState
, df
));
666 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], ot
);
669 static void gen_extu(int ot
, TCGv reg
)
673 tcg_gen_ext8u_tl(reg
, reg
);
676 tcg_gen_ext16u_tl(reg
, reg
);
679 tcg_gen_ext32u_tl(reg
, reg
);
686 static void gen_exts(int ot
, TCGv reg
)
690 tcg_gen_ext8s_tl(reg
, reg
);
693 tcg_gen_ext16s_tl(reg
, reg
);
696 tcg_gen_ext32s_tl(reg
, reg
);
703 static inline void gen_op_jnz_ecx(int size
, int label1
)
705 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUState
, regs
[R_ECX
]));
706 gen_extu(size
+ 1, cpu_tmp0
);
707 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_tmp0
, 0, label1
);
710 static inline void gen_op_jz_ecx(int size
, int label1
)
712 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUState
, regs
[R_ECX
]));
713 gen_extu(size
+ 1, cpu_tmp0
);
714 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
717 static void gen_helper_in_func(int ot
, TCGv v
, TCGv_i32 n
)
720 case 0: gen_helper_inb(v
, n
); break;
721 case 1: gen_helper_inw(v
, n
); break;
722 case 2: gen_helper_inl(v
, n
); break;
727 static void gen_helper_out_func(int ot
, TCGv_i32 v
, TCGv_i32 n
)
730 case 0: gen_helper_outb(v
, n
); break;
731 case 1: gen_helper_outw(v
, n
); break;
732 case 2: gen_helper_outl(v
, n
); break;
737 static void gen_check_io(DisasContext
*s
, int ot
, target_ulong cur_eip
,
741 target_ulong next_eip
;
744 if (s
->pe
&& (s
->cpl
> s
->iopl
|| s
->vm86
)) {
745 if (s
->cc_op
!= CC_OP_DYNAMIC
)
746 gen_op_set_cc_op(s
->cc_op
);
749 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
751 case 0: gen_helper_check_iob(cpu_tmp2_i32
); break;
752 case 1: gen_helper_check_iow(cpu_tmp2_i32
); break;
753 case 2: gen_helper_check_iol(cpu_tmp2_i32
); break;
756 if(s
->flags
& HF_SVMI_MASK
) {
758 if (s
->cc_op
!= CC_OP_DYNAMIC
)
759 gen_op_set_cc_op(s
->cc_op
);
763 svm_flags
|= (1 << (4 + ot
));
764 next_eip
= s
->pc
- s
->cs_base
;
765 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
766 gen_helper_svm_check_io(cpu_tmp2_i32
, tcg_const_i32(svm_flags
),
767 tcg_const_i32(next_eip
- cur_eip
));
771 static inline void gen_movs(DisasContext
*s
, int ot
)
773 gen_string_movl_A0_ESI(s
);
774 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
775 gen_string_movl_A0_EDI(s
);
776 gen_op_st_T0_A0(ot
+ s
->mem_index
);
777 gen_op_movl_T0_Dshift(ot
);
778 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
779 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
782 static inline void gen_update_cc_op(DisasContext
*s
)
784 if (s
->cc_op
!= CC_OP_DYNAMIC
) {
785 gen_op_set_cc_op(s
->cc_op
);
786 s
->cc_op
= CC_OP_DYNAMIC
;
790 static void gen_op_update1_cc(void)
792 tcg_gen_discard_tl(cpu_cc_src
);
793 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
796 static void gen_op_update2_cc(void)
798 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
799 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
802 static inline void gen_op_cmpl_T0_T1_cc(void)
804 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
805 tcg_gen_sub_tl(cpu_cc_dst
, cpu_T
[0], cpu_T
[1]);
808 static inline void gen_op_testl_T0_T1_cc(void)
810 tcg_gen_discard_tl(cpu_cc_src
);
811 tcg_gen_and_tl(cpu_cc_dst
, cpu_T
[0], cpu_T
[1]);
814 static void gen_op_update_neg_cc(void)
816 tcg_gen_neg_tl(cpu_cc_src
, cpu_T
[0]);
817 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
820 /* compute eflags.C to reg */
821 static void gen_compute_eflags_c(TCGv reg
)
823 gen_helper_cc_compute_c(cpu_tmp2_i32
, cpu_cc_op
);
824 tcg_gen_extu_i32_tl(reg
, cpu_tmp2_i32
);
827 /* compute all eflags to cc_src */
828 static void gen_compute_eflags(TCGv reg
)
830 gen_helper_cc_compute_all(cpu_tmp2_i32
, cpu_cc_op
);
831 tcg_gen_extu_i32_tl(reg
, cpu_tmp2_i32
);
834 static inline void gen_setcc_slow_T0(DisasContext
*s
, int jcc_op
)
836 if (s
->cc_op
!= CC_OP_DYNAMIC
)
837 gen_op_set_cc_op(s
->cc_op
);
840 gen_compute_eflags(cpu_T
[0]);
841 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 11);
842 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
845 gen_compute_eflags_c(cpu_T
[0]);
848 gen_compute_eflags(cpu_T
[0]);
849 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 6);
850 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
853 gen_compute_eflags(cpu_tmp0
);
854 tcg_gen_shri_tl(cpu_T
[0], cpu_tmp0
, 6);
855 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
856 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
859 gen_compute_eflags(cpu_T
[0]);
860 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 7);
861 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
864 gen_compute_eflags(cpu_T
[0]);
865 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 2);
866 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
869 gen_compute_eflags(cpu_tmp0
);
870 tcg_gen_shri_tl(cpu_T
[0], cpu_tmp0
, 11); /* CC_O */
871 tcg_gen_shri_tl(cpu_tmp0
, cpu_tmp0
, 7); /* CC_S */
872 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
873 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
877 gen_compute_eflags(cpu_tmp0
);
878 tcg_gen_shri_tl(cpu_T
[0], cpu_tmp0
, 11); /* CC_O */
879 tcg_gen_shri_tl(cpu_tmp4
, cpu_tmp0
, 7); /* CC_S */
880 tcg_gen_shri_tl(cpu_tmp0
, cpu_tmp0
, 6); /* CC_Z */
881 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
882 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
883 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
888 /* return true if setcc_slow is not needed (WARNING: must be kept in
889 sync with gen_jcc1) */
890 static int is_fast_jcc_case(DisasContext
*s
, int b
)
893 jcc_op
= (b
>> 1) & 7;
895 /* we optimize the cmp/jcc case */
900 if (jcc_op
== JCC_O
|| jcc_op
== JCC_P
)
904 /* some jumps are easy to compute */
929 if (jcc_op
!= JCC_Z
&& jcc_op
!= JCC_S
)
939 /* generate a conditional jump to label 'l1' according to jump opcode
940 value 'b'. In the fast case, T0 is guaranted not to be used. */
941 static inline void gen_jcc1(DisasContext
*s
, int cc_op
, int b
, int l1
)
943 int inv
, jcc_op
, size
, cond
;
947 jcc_op
= (b
>> 1) & 7;
950 /* we optimize the cmp/jcc case */
956 size
= cc_op
- CC_OP_SUBB
;
962 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_dst
, 0xff);
966 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_dst
, 0xffff);
971 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_dst
, 0xffffffff);
979 tcg_gen_brcondi_tl(inv
? TCG_COND_NE
: TCG_COND_EQ
, t0
, 0, l1
);
985 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_dst
, 0x80);
986 tcg_gen_brcondi_tl(inv
? TCG_COND_EQ
: TCG_COND_NE
, cpu_tmp0
,
990 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_dst
, 0x8000);
991 tcg_gen_brcondi_tl(inv
? TCG_COND_EQ
: TCG_COND_NE
, cpu_tmp0
,
996 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_dst
, 0x80000000);
997 tcg_gen_brcondi_tl(inv
? TCG_COND_EQ
: TCG_COND_NE
, cpu_tmp0
,
1002 tcg_gen_brcondi_tl(inv
? TCG_COND_GE
: TCG_COND_LT
, cpu_cc_dst
,
1009 cond
= inv
? TCG_COND_GEU
: TCG_COND_LTU
;
1012 cond
= inv
? TCG_COND_GTU
: TCG_COND_LEU
;
1014 tcg_gen_add_tl(cpu_tmp4
, cpu_cc_dst
, cpu_cc_src
);
1018 tcg_gen_andi_tl(cpu_tmp4
, cpu_tmp4
, 0xff);
1019 tcg_gen_andi_tl(t0
, cpu_cc_src
, 0xff);
1023 tcg_gen_andi_tl(cpu_tmp4
, cpu_tmp4
, 0xffff);
1024 tcg_gen_andi_tl(t0
, cpu_cc_src
, 0xffff);
1026 #ifdef TARGET_X86_64
1029 tcg_gen_andi_tl(cpu_tmp4
, cpu_tmp4
, 0xffffffff);
1030 tcg_gen_andi_tl(t0
, cpu_cc_src
, 0xffffffff);
1037 tcg_gen_brcond_tl(cond
, cpu_tmp4
, t0
, l1
);
1041 cond
= inv
? TCG_COND_GE
: TCG_COND_LT
;
1044 cond
= inv
? TCG_COND_GT
: TCG_COND_LE
;
1046 tcg_gen_add_tl(cpu_tmp4
, cpu_cc_dst
, cpu_cc_src
);
1050 tcg_gen_ext8s_tl(cpu_tmp4
, cpu_tmp4
);
1051 tcg_gen_ext8s_tl(t0
, cpu_cc_src
);
1055 tcg_gen_ext16s_tl(cpu_tmp4
, cpu_tmp4
);
1056 tcg_gen_ext16s_tl(t0
, cpu_cc_src
);
1058 #ifdef TARGET_X86_64
1061 tcg_gen_ext32s_tl(cpu_tmp4
, cpu_tmp4
);
1062 tcg_gen_ext32s_tl(t0
, cpu_cc_src
);
1069 tcg_gen_brcond_tl(cond
, cpu_tmp4
, t0
, l1
);
1077 /* some jumps are easy to compute */
1119 size
= (cc_op
- CC_OP_ADDB
) & 3;
1122 size
= (cc_op
- CC_OP_ADDB
) & 3;
1130 gen_setcc_slow_T0(s
, jcc_op
);
1131 tcg_gen_brcondi_tl(inv
? TCG_COND_EQ
: TCG_COND_NE
,
1137 /* XXX: does not work with gdbstub "ice" single step - not a
1139 static int gen_jz_ecx_string(DisasContext
*s
, target_ulong next_eip
)
1143 l1
= gen_new_label();
1144 l2
= gen_new_label();
1145 gen_op_jnz_ecx(s
->aflag
, l1
);
1147 gen_jmp_tb(s
, next_eip
, 1);
1152 static inline void gen_stos(DisasContext
*s
, int ot
)
1154 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
1155 gen_string_movl_A0_EDI(s
);
1156 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1157 gen_op_movl_T0_Dshift(ot
);
1158 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1161 static inline void gen_lods(DisasContext
*s
, int ot
)
1163 gen_string_movl_A0_ESI(s
);
1164 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1165 gen_op_mov_reg_T0(ot
, R_EAX
);
1166 gen_op_movl_T0_Dshift(ot
);
1167 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1170 static inline void gen_scas(DisasContext
*s
, int ot
)
1172 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
1173 gen_string_movl_A0_EDI(s
);
1174 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
1175 gen_op_cmpl_T0_T1_cc();
1176 gen_op_movl_T0_Dshift(ot
);
1177 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1180 static inline void gen_cmps(DisasContext
*s
, int ot
)
1182 gen_string_movl_A0_ESI(s
);
1183 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1184 gen_string_movl_A0_EDI(s
);
1185 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
1186 gen_op_cmpl_T0_T1_cc();
1187 gen_op_movl_T0_Dshift(ot
);
1188 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1189 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1192 static inline void gen_ins(DisasContext
*s
, int ot
)
1196 gen_string_movl_A0_EDI(s
);
1197 /* Note: we must do this dummy write first to be restartable in
1198 case of page fault. */
1200 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1201 gen_op_mov_TN_reg(OT_WORD
, 1, R_EDX
);
1202 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[1]);
1203 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1204 gen_helper_in_func(ot
, cpu_T
[0], cpu_tmp2_i32
);
1205 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1206 gen_op_movl_T0_Dshift(ot
);
1207 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1212 static inline void gen_outs(DisasContext
*s
, int ot
)
1216 gen_string_movl_A0_ESI(s
);
1217 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1219 gen_op_mov_TN_reg(OT_WORD
, 1, R_EDX
);
1220 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[1]);
1221 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1222 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[0]);
1223 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1225 gen_op_movl_T0_Dshift(ot
);
1226 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1231 /* same method as Valgrind : we generate jumps to current or next
1233 #define GEN_REPZ(op) \
1234 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1235 target_ulong cur_eip, target_ulong next_eip) \
1238 gen_update_cc_op(s); \
1239 l2 = gen_jz_ecx_string(s, next_eip); \
1240 gen_ ## op(s, ot); \
1241 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1242 /* a loop would cause two single step exceptions if ECX = 1 \
1243 before rep string_insn */ \
1245 gen_op_jz_ecx(s->aflag, l2); \
1246 gen_jmp(s, cur_eip); \
1249 #define GEN_REPZ2(op) \
1250 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1251 target_ulong cur_eip, \
1252 target_ulong next_eip, \
1256 gen_update_cc_op(s); \
1257 l2 = gen_jz_ecx_string(s, next_eip); \
1258 gen_ ## op(s, ot); \
1259 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1260 gen_op_set_cc_op(CC_OP_SUBB + ot); \
1261 gen_jcc1(s, CC_OP_SUBB + ot, (JCC_Z << 1) | (nz ^ 1), l2); \
1263 gen_op_jz_ecx(s->aflag, l2); \
1264 gen_jmp(s, cur_eip); \
1275 static void gen_helper_fp_arith_ST0_FT0(int op
)
1278 case 0: gen_helper_fadd_ST0_FT0(); break;
1279 case 1: gen_helper_fmul_ST0_FT0(); break;
1280 case 2: gen_helper_fcom_ST0_FT0(); break;
1281 case 3: gen_helper_fcom_ST0_FT0(); break;
1282 case 4: gen_helper_fsub_ST0_FT0(); break;
1283 case 5: gen_helper_fsubr_ST0_FT0(); break;
1284 case 6: gen_helper_fdiv_ST0_FT0(); break;
1285 case 7: gen_helper_fdivr_ST0_FT0(); break;
1289 /* NOTE the exception in "r" op ordering */
1290 static void gen_helper_fp_arith_STN_ST0(int op
, int opreg
)
1292 TCGv_i32 tmp
= tcg_const_i32(opreg
);
1294 case 0: gen_helper_fadd_STN_ST0(tmp
); break;
1295 case 1: gen_helper_fmul_STN_ST0(tmp
); break;
1296 case 4: gen_helper_fsubr_STN_ST0(tmp
); break;
1297 case 5: gen_helper_fsub_STN_ST0(tmp
); break;
1298 case 6: gen_helper_fdivr_STN_ST0(tmp
); break;
1299 case 7: gen_helper_fdiv_STN_ST0(tmp
); break;
1303 /* if d == OR_TMP0, it means memory operand (address in A0) */
1304 static void gen_op(DisasContext
*s1
, int op
, int ot
, int d
)
1307 gen_op_mov_TN_reg(ot
, 0, d
);
1309 gen_op_ld_T0_A0(ot
+ s1
->mem_index
);
1313 if (s1
->cc_op
!= CC_OP_DYNAMIC
)
1314 gen_op_set_cc_op(s1
->cc_op
);
1315 gen_compute_eflags_c(cpu_tmp4
);
1316 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1317 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
1319 gen_op_mov_reg_T0(ot
, d
);
1321 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1322 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
1323 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1324 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp4
);
1325 tcg_gen_shli_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 2);
1326 tcg_gen_addi_i32(cpu_cc_op
, cpu_tmp2_i32
, CC_OP_ADDB
+ ot
);
1327 s1
->cc_op
= CC_OP_DYNAMIC
;
1330 if (s1
->cc_op
!= CC_OP_DYNAMIC
)
1331 gen_op_set_cc_op(s1
->cc_op
);
1332 gen_compute_eflags_c(cpu_tmp4
);
1333 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1334 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
1336 gen_op_mov_reg_T0(ot
, d
);
1338 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1339 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
1340 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1341 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp4
);
1342 tcg_gen_shli_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 2);
1343 tcg_gen_addi_i32(cpu_cc_op
, cpu_tmp2_i32
, CC_OP_SUBB
+ ot
);
1344 s1
->cc_op
= CC_OP_DYNAMIC
;
1347 gen_op_addl_T0_T1();
1349 gen_op_mov_reg_T0(ot
, d
);
1351 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1352 gen_op_update2_cc();
1353 s1
->cc_op
= CC_OP_ADDB
+ ot
;
1356 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1358 gen_op_mov_reg_T0(ot
, d
);
1360 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1361 gen_op_update2_cc();
1362 s1
->cc_op
= CC_OP_SUBB
+ ot
;
1366 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1368 gen_op_mov_reg_T0(ot
, d
);
1370 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1371 gen_op_update1_cc();
1372 s1
->cc_op
= CC_OP_LOGICB
+ ot
;
1375 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1377 gen_op_mov_reg_T0(ot
, d
);
1379 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1380 gen_op_update1_cc();
1381 s1
->cc_op
= CC_OP_LOGICB
+ ot
;
1384 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1386 gen_op_mov_reg_T0(ot
, d
);
1388 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1389 gen_op_update1_cc();
1390 s1
->cc_op
= CC_OP_LOGICB
+ ot
;
1393 gen_op_cmpl_T0_T1_cc();
1394 s1
->cc_op
= CC_OP_SUBB
+ ot
;
1399 /* if d == OR_TMP0, it means memory operand (address in A0) */
1400 static void gen_inc(DisasContext
*s1
, int ot
, int d
, int c
)
1403 gen_op_mov_TN_reg(ot
, 0, d
);
1405 gen_op_ld_T0_A0(ot
+ s1
->mem_index
);
1406 if (s1
->cc_op
!= CC_OP_DYNAMIC
)
1407 gen_op_set_cc_op(s1
->cc_op
);
1409 tcg_gen_addi_tl(cpu_T
[0], cpu_T
[0], 1);
1410 s1
->cc_op
= CC_OP_INCB
+ ot
;
1412 tcg_gen_addi_tl(cpu_T
[0], cpu_T
[0], -1);
1413 s1
->cc_op
= CC_OP_DECB
+ ot
;
1416 gen_op_mov_reg_T0(ot
, d
);
1418 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1419 gen_compute_eflags_c(cpu_cc_src
);
1420 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1423 static void gen_shift_rm_T1(DisasContext
*s
, int ot
, int op1
,
1424 int is_right
, int is_arith
)
1437 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1439 gen_op_mov_TN_reg(ot
, 0, op1
);
1441 tcg_gen_andi_tl(cpu_T
[1], cpu_T
[1], mask
);
1443 tcg_gen_addi_tl(cpu_tmp5
, cpu_T
[1], -1);
1447 gen_exts(ot
, cpu_T
[0]);
1448 tcg_gen_sar_tl(cpu_T3
, cpu_T
[0], cpu_tmp5
);
1449 tcg_gen_sar_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1451 gen_extu(ot
, cpu_T
[0]);
1452 tcg_gen_shr_tl(cpu_T3
, cpu_T
[0], cpu_tmp5
);
1453 tcg_gen_shr_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1456 tcg_gen_shl_tl(cpu_T3
, cpu_T
[0], cpu_tmp5
);
1457 tcg_gen_shl_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1462 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1464 gen_op_mov_reg_T0(ot
, op1
);
1466 /* update eflags if non zero shift */
1467 if (s
->cc_op
!= CC_OP_DYNAMIC
)
1468 gen_op_set_cc_op(s
->cc_op
);
1470 /* XXX: inefficient */
1471 t0
= tcg_temp_local_new();
1472 t1
= tcg_temp_local_new();
1474 tcg_gen_mov_tl(t0
, cpu_T
[0]);
1475 tcg_gen_mov_tl(t1
, cpu_T3
);
1477 shift_label
= gen_new_label();
1478 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_T
[1], 0, shift_label
);
1480 tcg_gen_mov_tl(cpu_cc_src
, t1
);
1481 tcg_gen_mov_tl(cpu_cc_dst
, t0
);
1483 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SARB
+ ot
);
1485 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SHLB
+ ot
);
1487 gen_set_label(shift_label
);
1488 s
->cc_op
= CC_OP_DYNAMIC
; /* cannot predict flags after */
1494 static void gen_shift_rm_im(DisasContext
*s
, int ot
, int op1
, int op2
,
1495 int is_right
, int is_arith
)
1506 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1508 gen_op_mov_TN_reg(ot
, 0, op1
);
1514 gen_exts(ot
, cpu_T
[0]);
1515 tcg_gen_sari_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1516 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], op2
);
1518 gen_extu(ot
, cpu_T
[0]);
1519 tcg_gen_shri_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1520 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], op2
);
1523 tcg_gen_shli_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1524 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], op2
);
1530 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1532 gen_op_mov_reg_T0(ot
, op1
);
1534 /* update eflags if non zero shift */
1536 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
1537 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1539 s
->cc_op
= CC_OP_SARB
+ ot
;
1541 s
->cc_op
= CC_OP_SHLB
+ ot
;
1545 static inline void tcg_gen_lshift(TCGv ret
, TCGv arg1
, target_long arg2
)
1548 tcg_gen_shli_tl(ret
, arg1
, arg2
);
1550 tcg_gen_shri_tl(ret
, arg1
, -arg2
);
1553 static void gen_rot_rm_T1(DisasContext
*s
, int ot
, int op1
,
1557 int label1
, label2
, data_bits
;
1558 TCGv t0
, t1
, t2
, a0
;
1560 /* XXX: inefficient, but we must use local temps */
1561 t0
= tcg_temp_local_new();
1562 t1
= tcg_temp_local_new();
1563 t2
= tcg_temp_local_new();
1564 a0
= tcg_temp_local_new();
1572 if (op1
== OR_TMP0
) {
1573 tcg_gen_mov_tl(a0
, cpu_A0
);
1574 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
1576 gen_op_mov_v_reg(ot
, t0
, op1
);
1579 tcg_gen_mov_tl(t1
, cpu_T
[1]);
1581 tcg_gen_andi_tl(t1
, t1
, mask
);
1583 /* Must test zero case to avoid using undefined behaviour in TCG
1585 label1
= gen_new_label();
1586 tcg_gen_brcondi_tl(TCG_COND_EQ
, t1
, 0, label1
);
1589 tcg_gen_andi_tl(cpu_tmp0
, t1
, (1 << (3 + ot
)) - 1);
1591 tcg_gen_mov_tl(cpu_tmp0
, t1
);
1594 tcg_gen_mov_tl(t2
, t0
);
1596 data_bits
= 8 << ot
;
1597 /* XXX: rely on behaviour of shifts when operand 2 overflows (XXX:
1598 fix TCG definition) */
1600 tcg_gen_shr_tl(cpu_tmp4
, t0
, cpu_tmp0
);
1601 tcg_gen_sub_tl(cpu_tmp0
, tcg_const_tl(data_bits
), cpu_tmp0
);
1602 tcg_gen_shl_tl(t0
, t0
, cpu_tmp0
);
1604 tcg_gen_shl_tl(cpu_tmp4
, t0
, cpu_tmp0
);
1605 tcg_gen_sub_tl(cpu_tmp0
, tcg_const_tl(data_bits
), cpu_tmp0
);
1606 tcg_gen_shr_tl(t0
, t0
, cpu_tmp0
);
1608 tcg_gen_or_tl(t0
, t0
, cpu_tmp4
);
1610 gen_set_label(label1
);
1612 if (op1
== OR_TMP0
) {
1613 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
1615 gen_op_mov_reg_v(ot
, op1
, t0
);
1619 if (s
->cc_op
!= CC_OP_DYNAMIC
)
1620 gen_op_set_cc_op(s
->cc_op
);
1622 label2
= gen_new_label();
1623 tcg_gen_brcondi_tl(TCG_COND_EQ
, t1
, 0, label2
);
1625 gen_compute_eflags(cpu_cc_src
);
1626 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~(CC_O
| CC_C
));
1627 tcg_gen_xor_tl(cpu_tmp0
, t2
, t0
);
1628 tcg_gen_lshift(cpu_tmp0
, cpu_tmp0
, 11 - (data_bits
- 1));
1629 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, CC_O
);
1630 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_tmp0
);
1632 tcg_gen_shri_tl(t0
, t0
, data_bits
- 1);
1634 tcg_gen_andi_tl(t0
, t0
, CC_C
);
1635 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
1637 tcg_gen_discard_tl(cpu_cc_dst
);
1638 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_EFLAGS
);
1640 gen_set_label(label2
);
1641 s
->cc_op
= CC_OP_DYNAMIC
; /* cannot predict flags after */
1649 static void gen_rot_rm_im(DisasContext
*s
, int ot
, int op1
, int op2
,
1656 /* XXX: inefficient, but we must use local temps */
1657 t0
= tcg_temp_local_new();
1658 t1
= tcg_temp_local_new();
1659 a0
= tcg_temp_local_new();
1667 if (op1
== OR_TMP0
) {
1668 tcg_gen_mov_tl(a0
, cpu_A0
);
1669 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
1671 gen_op_mov_v_reg(ot
, t0
, op1
);
1675 tcg_gen_mov_tl(t1
, t0
);
1678 data_bits
= 8 << ot
;
1680 int shift
= op2
& ((1 << (3 + ot
)) - 1);
1682 tcg_gen_shri_tl(cpu_tmp4
, t0
, shift
);
1683 tcg_gen_shli_tl(t0
, t0
, data_bits
- shift
);
1686 tcg_gen_shli_tl(cpu_tmp4
, t0
, shift
);
1687 tcg_gen_shri_tl(t0
, t0
, data_bits
- shift
);
1689 tcg_gen_or_tl(t0
, t0
, cpu_tmp4
);
1693 if (op1
== OR_TMP0
) {
1694 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
1696 gen_op_mov_reg_v(ot
, op1
, t0
);
1701 if (s
->cc_op
!= CC_OP_DYNAMIC
)
1702 gen_op_set_cc_op(s
->cc_op
);
1704 gen_compute_eflags(cpu_cc_src
);
1705 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~(CC_O
| CC_C
));
1706 tcg_gen_xor_tl(cpu_tmp0
, t1
, t0
);
1707 tcg_gen_lshift(cpu_tmp0
, cpu_tmp0
, 11 - (data_bits
- 1));
1708 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, CC_O
);
1709 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_tmp0
);
1711 tcg_gen_shri_tl(t0
, t0
, data_bits
- 1);
1713 tcg_gen_andi_tl(t0
, t0
, CC_C
);
1714 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
1716 tcg_gen_discard_tl(cpu_cc_dst
);
1717 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_EFLAGS
);
1718 s
->cc_op
= CC_OP_EFLAGS
;
1726 /* XXX: add faster immediate = 1 case */
1727 static void gen_rotc_rm_T1(DisasContext
*s
, int ot
, int op1
,
1732 if (s
->cc_op
!= CC_OP_DYNAMIC
)
1733 gen_op_set_cc_op(s
->cc_op
);
1737 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1739 gen_op_mov_TN_reg(ot
, 0, op1
);
1743 case 0: gen_helper_rcrb(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1744 case 1: gen_helper_rcrw(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1745 case 2: gen_helper_rcrl(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1746 #ifdef TARGET_X86_64
1747 case 3: gen_helper_rcrq(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1752 case 0: gen_helper_rclb(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1753 case 1: gen_helper_rclw(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1754 case 2: gen_helper_rcll(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1755 #ifdef TARGET_X86_64
1756 case 3: gen_helper_rclq(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1762 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1764 gen_op_mov_reg_T0(ot
, op1
);
1767 label1
= gen_new_label();
1768 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_cc_tmp
, -1, label1
);
1770 tcg_gen_mov_tl(cpu_cc_src
, cpu_cc_tmp
);
1771 tcg_gen_discard_tl(cpu_cc_dst
);
1772 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_EFLAGS
);
1774 gen_set_label(label1
);
1775 s
->cc_op
= CC_OP_DYNAMIC
; /* cannot predict flags after */
1778 /* XXX: add faster immediate case */
1779 static void gen_shiftd_rm_T1_T3(DisasContext
*s
, int ot
, int op1
,
1782 int label1
, label2
, data_bits
;
1784 TCGv t0
, t1
, t2
, a0
;
1786 t0
= tcg_temp_local_new();
1787 t1
= tcg_temp_local_new();
1788 t2
= tcg_temp_local_new();
1789 a0
= tcg_temp_local_new();
1797 if (op1
== OR_TMP0
) {
1798 tcg_gen_mov_tl(a0
, cpu_A0
);
1799 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
1801 gen_op_mov_v_reg(ot
, t0
, op1
);
1804 tcg_gen_andi_tl(cpu_T3
, cpu_T3
, mask
);
1806 tcg_gen_mov_tl(t1
, cpu_T
[1]);
1807 tcg_gen_mov_tl(t2
, cpu_T3
);
1809 /* Must test zero case to avoid using undefined behaviour in TCG
1811 label1
= gen_new_label();
1812 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, label1
);
1814 tcg_gen_addi_tl(cpu_tmp5
, t2
, -1);
1815 if (ot
== OT_WORD
) {
1816 /* Note: we implement the Intel behaviour for shift count > 16 */
1818 tcg_gen_andi_tl(t0
, t0
, 0xffff);
1819 tcg_gen_shli_tl(cpu_tmp0
, t1
, 16);
1820 tcg_gen_or_tl(t0
, t0
, cpu_tmp0
);
1821 tcg_gen_ext32u_tl(t0
, t0
);
1823 tcg_gen_shr_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1825 /* only needed if count > 16, but a test would complicate */
1826 tcg_gen_sub_tl(cpu_tmp5
, tcg_const_tl(32), t2
);
1827 tcg_gen_shl_tl(cpu_tmp0
, t0
, cpu_tmp5
);
1829 tcg_gen_shr_tl(t0
, t0
, t2
);
1831 tcg_gen_or_tl(t0
, t0
, cpu_tmp0
);
1833 /* XXX: not optimal */
1834 tcg_gen_andi_tl(t0
, t0
, 0xffff);
1835 tcg_gen_shli_tl(t1
, t1
, 16);
1836 tcg_gen_or_tl(t1
, t1
, t0
);
1837 tcg_gen_ext32u_tl(t1
, t1
);
1839 tcg_gen_shl_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1840 tcg_gen_sub_tl(cpu_tmp0
, tcg_const_tl(32), cpu_tmp5
);
1841 tcg_gen_shr_tl(cpu_tmp6
, t1
, cpu_tmp0
);
1842 tcg_gen_or_tl(cpu_tmp4
, cpu_tmp4
, cpu_tmp6
);
1844 tcg_gen_shl_tl(t0
, t0
, t2
);
1845 tcg_gen_sub_tl(cpu_tmp5
, tcg_const_tl(32), t2
);
1846 tcg_gen_shr_tl(t1
, t1
, cpu_tmp5
);
1847 tcg_gen_or_tl(t0
, t0
, t1
);
1850 data_bits
= 8 << ot
;
1853 tcg_gen_ext32u_tl(t0
, t0
);
1855 tcg_gen_shr_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1857 tcg_gen_shr_tl(t0
, t0
, t2
);
1858 tcg_gen_sub_tl(cpu_tmp5
, tcg_const_tl(data_bits
), t2
);
1859 tcg_gen_shl_tl(t1
, t1
, cpu_tmp5
);
1860 tcg_gen_or_tl(t0
, t0
, t1
);
1864 tcg_gen_ext32u_tl(t1
, t1
);
1866 tcg_gen_shl_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1868 tcg_gen_shl_tl(t0
, t0
, t2
);
1869 tcg_gen_sub_tl(cpu_tmp5
, tcg_const_tl(data_bits
), t2
);
1870 tcg_gen_shr_tl(t1
, t1
, cpu_tmp5
);
1871 tcg_gen_or_tl(t0
, t0
, t1
);
1874 tcg_gen_mov_tl(t1
, cpu_tmp4
);
1876 gen_set_label(label1
);
1878 if (op1
== OR_TMP0
) {
1879 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
1881 gen_op_mov_reg_v(ot
, op1
, t0
);
1885 if (s
->cc_op
!= CC_OP_DYNAMIC
)
1886 gen_op_set_cc_op(s
->cc_op
);
1888 label2
= gen_new_label();
1889 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, label2
);
1891 tcg_gen_mov_tl(cpu_cc_src
, t1
);
1892 tcg_gen_mov_tl(cpu_cc_dst
, t0
);
1894 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SARB
+ ot
);
1896 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SHLB
+ ot
);
1898 gen_set_label(label2
);
1899 s
->cc_op
= CC_OP_DYNAMIC
; /* cannot predict flags after */
1907 static void gen_shift(DisasContext
*s1
, int op
, int ot
, int d
, int s
)
1910 gen_op_mov_TN_reg(ot
, 1, s
);
1913 gen_rot_rm_T1(s1
, ot
, d
, 0);
1916 gen_rot_rm_T1(s1
, ot
, d
, 1);
1920 gen_shift_rm_T1(s1
, ot
, d
, 0, 0);
1923 gen_shift_rm_T1(s1
, ot
, d
, 1, 0);
1926 gen_shift_rm_T1(s1
, ot
, d
, 1, 1);
1929 gen_rotc_rm_T1(s1
, ot
, d
, 0);
1932 gen_rotc_rm_T1(s1
, ot
, d
, 1);
1937 static void gen_shifti(DisasContext
*s1
, int op
, int ot
, int d
, int c
)
1941 gen_rot_rm_im(s1
, ot
, d
, c
, 0);
1944 gen_rot_rm_im(s1
, ot
, d
, c
, 1);
1948 gen_shift_rm_im(s1
, ot
, d
, c
, 0, 0);
1951 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 0);
1954 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 1);
1957 /* currently not optimized */
1958 gen_op_movl_T1_im(c
);
1959 gen_shift(s1
, op
, ot
, d
, OR_TMP1
);
1964 static void gen_lea_modrm(DisasContext
*s
, int modrm
, int *reg_ptr
, int *offset_ptr
)
1972 int mod
, rm
, code
, override
, must_add_seg
;
1974 override
= s
->override
;
1975 must_add_seg
= s
->addseg
;
1978 mod
= (modrm
>> 6) & 3;
1990 code
= ldub_code(s
->pc
++);
1991 scale
= (code
>> 6) & 3;
1992 index
= ((code
>> 3) & 7) | REX_X(s
);
1999 if ((base
& 7) == 5) {
2001 disp
= (int32_t)ldl_code(s
->pc
);
2003 if (CODE64(s
) && !havesib
) {
2004 disp
+= s
->pc
+ s
->rip_offset
;
2011 disp
= (int8_t)ldub_code(s
->pc
++);
2015 disp
= ldl_code(s
->pc
);
2021 /* for correct popl handling with esp */
2022 if (base
== 4 && s
->popl_esp_hack
)
2023 disp
+= s
->popl_esp_hack
;
2024 #ifdef TARGET_X86_64
2025 if (s
->aflag
== 2) {
2026 gen_op_movq_A0_reg(base
);
2028 gen_op_addq_A0_im(disp
);
2033 gen_op_movl_A0_reg(base
);
2035 gen_op_addl_A0_im(disp
);
2038 #ifdef TARGET_X86_64
2039 if (s
->aflag
== 2) {
2040 gen_op_movq_A0_im(disp
);
2044 gen_op_movl_A0_im(disp
);
2047 /* XXX: index == 4 is always invalid */
2048 if (havesib
&& (index
!= 4 || scale
!= 0)) {
2049 #ifdef TARGET_X86_64
2050 if (s
->aflag
== 2) {
2051 gen_op_addq_A0_reg_sN(scale
, index
);
2055 gen_op_addl_A0_reg_sN(scale
, index
);
2060 if (base
== R_EBP
|| base
== R_ESP
)
2065 #ifdef TARGET_X86_64
2066 if (s
->aflag
== 2) {
2067 gen_op_addq_A0_seg(override
);
2071 gen_op_addl_A0_seg(override
);
2078 disp
= lduw_code(s
->pc
);
2080 gen_op_movl_A0_im(disp
);
2081 rm
= 0; /* avoid SS override */
2088 disp
= (int8_t)ldub_code(s
->pc
++);
2092 disp
= lduw_code(s
->pc
);
2098 gen_op_movl_A0_reg(R_EBX
);
2099 gen_op_addl_A0_reg_sN(0, R_ESI
);
2102 gen_op_movl_A0_reg(R_EBX
);
2103 gen_op_addl_A0_reg_sN(0, R_EDI
);
2106 gen_op_movl_A0_reg(R_EBP
);
2107 gen_op_addl_A0_reg_sN(0, R_ESI
);
2110 gen_op_movl_A0_reg(R_EBP
);
2111 gen_op_addl_A0_reg_sN(0, R_EDI
);
2114 gen_op_movl_A0_reg(R_ESI
);
2117 gen_op_movl_A0_reg(R_EDI
);
2120 gen_op_movl_A0_reg(R_EBP
);
2124 gen_op_movl_A0_reg(R_EBX
);
2128 gen_op_addl_A0_im(disp
);
2129 gen_op_andl_A0_ffff();
2133 if (rm
== 2 || rm
== 3 || rm
== 6)
2138 gen_op_addl_A0_seg(override
);
2148 static void gen_nop_modrm(DisasContext
*s
, int modrm
)
2150 int mod
, rm
, base
, code
;
2152 mod
= (modrm
>> 6) & 3;
2162 code
= ldub_code(s
->pc
++);
2198 /* used for LEA and MOV AX, mem */
2199 static void gen_add_A0_ds_seg(DisasContext
*s
)
2201 int override
, must_add_seg
;
2202 must_add_seg
= s
->addseg
;
2204 if (s
->override
>= 0) {
2205 override
= s
->override
;
2211 #ifdef TARGET_X86_64
2213 gen_op_addq_A0_seg(override
);
2217 gen_op_addl_A0_seg(override
);
2222 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2224 static void gen_ldst_modrm(DisasContext
*s
, int modrm
, int ot
, int reg
, int is_store
)
2226 int mod
, rm
, opreg
, disp
;
2228 mod
= (modrm
>> 6) & 3;
2229 rm
= (modrm
& 7) | REX_B(s
);
2233 gen_op_mov_TN_reg(ot
, 0, reg
);
2234 gen_op_mov_reg_T0(ot
, rm
);
2236 gen_op_mov_TN_reg(ot
, 0, rm
);
2238 gen_op_mov_reg_T0(ot
, reg
);
2241 gen_lea_modrm(s
, modrm
, &opreg
, &disp
);
2244 gen_op_mov_TN_reg(ot
, 0, reg
);
2245 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2247 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
2249 gen_op_mov_reg_T0(ot
, reg
);
2254 static inline uint32_t insn_get(DisasContext
*s
, int ot
)
2260 ret
= ldub_code(s
->pc
);
2264 ret
= lduw_code(s
->pc
);
2269 ret
= ldl_code(s
->pc
);
2276 static inline int insn_const_size(unsigned int ot
)
2284 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong eip
)
2286 TranslationBlock
*tb
;
2289 pc
= s
->cs_base
+ eip
;
2291 /* NOTE: we handle the case where the TB spans two pages here */
2292 if ((pc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) ||
2293 (pc
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
)) {
2294 /* jump to same page: we can use a direct jump */
2295 tcg_gen_goto_tb(tb_num
);
2297 tcg_gen_exit_tb((long)tb
+ tb_num
);
2299 /* jump to another page: currently not optimized */
2305 static inline void gen_jcc(DisasContext
*s
, int b
,
2306 target_ulong val
, target_ulong next_eip
)
2311 if (s
->cc_op
!= CC_OP_DYNAMIC
) {
2312 gen_op_set_cc_op(s
->cc_op
);
2313 s
->cc_op
= CC_OP_DYNAMIC
;
2316 l1
= gen_new_label();
2317 gen_jcc1(s
, cc_op
, b
, l1
);
2319 gen_goto_tb(s
, 0, next_eip
);
2322 gen_goto_tb(s
, 1, val
);
2326 l1
= gen_new_label();
2327 l2
= gen_new_label();
2328 gen_jcc1(s
, cc_op
, b
, l1
);
2330 gen_jmp_im(next_eip
);
2340 static void gen_setcc(DisasContext
*s
, int b
)
2342 int inv
, jcc_op
, l1
;
2345 if (is_fast_jcc_case(s
, b
)) {
2346 /* nominal case: we use a jump */
2347 /* XXX: make it faster by adding new instructions in TCG */
2348 t0
= tcg_temp_local_new();
2349 tcg_gen_movi_tl(t0
, 0);
2350 l1
= gen_new_label();
2351 gen_jcc1(s
, s
->cc_op
, b
^ 1, l1
);
2352 tcg_gen_movi_tl(t0
, 1);
2354 tcg_gen_mov_tl(cpu_T
[0], t0
);
2357 /* slow case: it is more efficient not to generate a jump,
2358 although it is questionnable whether this optimization is
2361 jcc_op
= (b
>> 1) & 7;
2362 gen_setcc_slow_T0(s
, jcc_op
);
2364 tcg_gen_xori_tl(cpu_T
[0], cpu_T
[0], 1);
2369 static inline void gen_op_movl_T0_seg(int seg_reg
)
2371 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
2372 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2375 static inline void gen_op_movl_seg_T0_vm(int seg_reg
)
2377 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xffff);
2378 tcg_gen_st32_tl(cpu_T
[0], cpu_env
,
2379 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2380 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], 4);
2381 tcg_gen_st_tl(cpu_T
[0], cpu_env
,
2382 offsetof(CPUX86State
,segs
[seg_reg
].base
));
2385 /* move T0 to seg_reg and compute if the CPU state may change. Never
2386 call this function with seg_reg == R_CS */
2387 static void gen_movl_seg_T0(DisasContext
*s
, int seg_reg
, target_ulong cur_eip
)
2389 if (s
->pe
&& !s
->vm86
) {
2390 /* XXX: optimize by finding processor state dynamically */
2391 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2392 gen_op_set_cc_op(s
->cc_op
);
2393 gen_jmp_im(cur_eip
);
2394 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
2395 gen_helper_load_seg(tcg_const_i32(seg_reg
), cpu_tmp2_i32
);
2396 /* abort translation because the addseg value may change or
2397 because ss32 may change. For R_SS, translation must always
2398 stop as a special handling must be done to disable hardware
2399 interrupts for the next instruction */
2400 if (seg_reg
== R_SS
|| (s
->code32
&& seg_reg
< R_FS
))
2403 gen_op_movl_seg_T0_vm(seg_reg
);
2404 if (seg_reg
== R_SS
)
2409 static inline int svm_is_rep(int prefixes
)
2411 return ((prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) ? 8 : 0);
2415 gen_svm_check_intercept_param(DisasContext
*s
, target_ulong pc_start
,
2416 uint32_t type
, uint64_t param
)
2418 /* no SVM activated; fast case */
2419 if (likely(!(s
->flags
& HF_SVMI_MASK
)))
2421 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2422 gen_op_set_cc_op(s
->cc_op
);
2423 gen_jmp_im(pc_start
- s
->cs_base
);
2424 gen_helper_svm_check_intercept_param(tcg_const_i32(type
),
2425 tcg_const_i64(param
));
2429 gen_svm_check_intercept(DisasContext
*s
, target_ulong pc_start
, uint64_t type
)
2431 gen_svm_check_intercept_param(s
, pc_start
, type
, 0);
2434 static inline void gen_stack_update(DisasContext
*s
, int addend
)
2436 #ifdef TARGET_X86_64
2438 gen_op_add_reg_im(2, R_ESP
, addend
);
2442 gen_op_add_reg_im(1, R_ESP
, addend
);
2444 gen_op_add_reg_im(0, R_ESP
, addend
);
2448 /* generate a push. It depends on ss32, addseg and dflag */
2449 static void gen_push_T0(DisasContext
*s
)
2451 #ifdef TARGET_X86_64
2453 gen_op_movq_A0_reg(R_ESP
);
2455 gen_op_addq_A0_im(-8);
2456 gen_op_st_T0_A0(OT_QUAD
+ s
->mem_index
);
2458 gen_op_addq_A0_im(-2);
2459 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
2461 gen_op_mov_reg_A0(2, R_ESP
);
2465 gen_op_movl_A0_reg(R_ESP
);
2467 gen_op_addl_A0_im(-2);
2469 gen_op_addl_A0_im(-4);
2472 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2473 gen_op_addl_A0_seg(R_SS
);
2476 gen_op_andl_A0_ffff();
2477 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2478 gen_op_addl_A0_seg(R_SS
);
2480 gen_op_st_T0_A0(s
->dflag
+ 1 + s
->mem_index
);
2481 if (s
->ss32
&& !s
->addseg
)
2482 gen_op_mov_reg_A0(1, R_ESP
);
2484 gen_op_mov_reg_T1(s
->ss32
+ 1, R_ESP
);
2488 /* generate a push. It depends on ss32, addseg and dflag */
2489 /* slower version for T1, only used for call Ev */
2490 static void gen_push_T1(DisasContext
*s
)
2492 #ifdef TARGET_X86_64
2494 gen_op_movq_A0_reg(R_ESP
);
2496 gen_op_addq_A0_im(-8);
2497 gen_op_st_T1_A0(OT_QUAD
+ s
->mem_index
);
2499 gen_op_addq_A0_im(-2);
2500 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
2502 gen_op_mov_reg_A0(2, R_ESP
);
2506 gen_op_movl_A0_reg(R_ESP
);
2508 gen_op_addl_A0_im(-2);
2510 gen_op_addl_A0_im(-4);
2513 gen_op_addl_A0_seg(R_SS
);
2516 gen_op_andl_A0_ffff();
2517 gen_op_addl_A0_seg(R_SS
);
2519 gen_op_st_T1_A0(s
->dflag
+ 1 + s
->mem_index
);
2521 if (s
->ss32
&& !s
->addseg
)
2522 gen_op_mov_reg_A0(1, R_ESP
);
2524 gen_stack_update(s
, (-2) << s
->dflag
);
2528 /* two step pop is necessary for precise exceptions */
2529 static void gen_pop_T0(DisasContext
*s
)
2531 #ifdef TARGET_X86_64
2533 gen_op_movq_A0_reg(R_ESP
);
2534 gen_op_ld_T0_A0((s
->dflag
? OT_QUAD
: OT_WORD
) + s
->mem_index
);
2538 gen_op_movl_A0_reg(R_ESP
);
2541 gen_op_addl_A0_seg(R_SS
);
2543 gen_op_andl_A0_ffff();
2544 gen_op_addl_A0_seg(R_SS
);
2546 gen_op_ld_T0_A0(s
->dflag
+ 1 + s
->mem_index
);
2550 static void gen_pop_update(DisasContext
*s
)
2552 #ifdef TARGET_X86_64
2553 if (CODE64(s
) && s
->dflag
) {
2554 gen_stack_update(s
, 8);
2558 gen_stack_update(s
, 2 << s
->dflag
);
2562 static void gen_stack_A0(DisasContext
*s
)
2564 gen_op_movl_A0_reg(R_ESP
);
2566 gen_op_andl_A0_ffff();
2567 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2569 gen_op_addl_A0_seg(R_SS
);
2572 /* NOTE: wrap around in 16 bit not fully handled */
2573 static void gen_pusha(DisasContext
*s
)
2576 gen_op_movl_A0_reg(R_ESP
);
2577 gen_op_addl_A0_im(-16 << s
->dflag
);
2579 gen_op_andl_A0_ffff();
2580 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2582 gen_op_addl_A0_seg(R_SS
);
2583 for(i
= 0;i
< 8; i
++) {
2584 gen_op_mov_TN_reg(OT_LONG
, 0, 7 - i
);
2585 gen_op_st_T0_A0(OT_WORD
+ s
->dflag
+ s
->mem_index
);
2586 gen_op_addl_A0_im(2 << s
->dflag
);
2588 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2591 /* NOTE: wrap around in 16 bit not fully handled */
2592 static void gen_popa(DisasContext
*s
)
2595 gen_op_movl_A0_reg(R_ESP
);
2597 gen_op_andl_A0_ffff();
2598 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2599 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], 16 << s
->dflag
);
2601 gen_op_addl_A0_seg(R_SS
);
2602 for(i
= 0;i
< 8; i
++) {
2603 /* ESP is not reloaded */
2605 gen_op_ld_T0_A0(OT_WORD
+ s
->dflag
+ s
->mem_index
);
2606 gen_op_mov_reg_T0(OT_WORD
+ s
->dflag
, 7 - i
);
2608 gen_op_addl_A0_im(2 << s
->dflag
);
2610 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2613 static void gen_enter(DisasContext
*s
, int esp_addend
, int level
)
2618 #ifdef TARGET_X86_64
2620 ot
= s
->dflag
? OT_QUAD
: OT_WORD
;
2623 gen_op_movl_A0_reg(R_ESP
);
2624 gen_op_addq_A0_im(-opsize
);
2625 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2628 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
2629 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2631 /* XXX: must save state */
2632 gen_helper_enter64_level(tcg_const_i32(level
),
2633 tcg_const_i32((ot
== OT_QUAD
)),
2636 gen_op_mov_reg_T1(ot
, R_EBP
);
2637 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], -esp_addend
+ (-opsize
* level
));
2638 gen_op_mov_reg_T1(OT_QUAD
, R_ESP
);
2642 ot
= s
->dflag
+ OT_WORD
;
2643 opsize
= 2 << s
->dflag
;
2645 gen_op_movl_A0_reg(R_ESP
);
2646 gen_op_addl_A0_im(-opsize
);
2648 gen_op_andl_A0_ffff();
2649 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2651 gen_op_addl_A0_seg(R_SS
);
2653 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
2654 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2656 /* XXX: must save state */
2657 gen_helper_enter_level(tcg_const_i32(level
),
2658 tcg_const_i32(s
->dflag
),
2661 gen_op_mov_reg_T1(ot
, R_EBP
);
2662 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], -esp_addend
+ (-opsize
* level
));
2663 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2667 static void gen_exception(DisasContext
*s
, int trapno
, target_ulong cur_eip
)
2669 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2670 gen_op_set_cc_op(s
->cc_op
);
2671 gen_jmp_im(cur_eip
);
2672 gen_helper_raise_exception(tcg_const_i32(trapno
));
2676 /* an interrupt is different from an exception because of the
2678 static void gen_interrupt(DisasContext
*s
, int intno
,
2679 target_ulong cur_eip
, target_ulong next_eip
)
2681 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2682 gen_op_set_cc_op(s
->cc_op
);
2683 gen_jmp_im(cur_eip
);
2684 gen_helper_raise_interrupt(tcg_const_i32(intno
),
2685 tcg_const_i32(next_eip
- cur_eip
));
2689 static void gen_debug(DisasContext
*s
, target_ulong cur_eip
)
2691 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2692 gen_op_set_cc_op(s
->cc_op
);
2693 gen_jmp_im(cur_eip
);
2698 /* generate a generic end of block. Trace exception is also generated
2700 static void gen_eob(DisasContext
*s
)
2702 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2703 gen_op_set_cc_op(s
->cc_op
);
2704 if (s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
) {
2705 gen_helper_reset_inhibit_irq();
2707 if (s
->tb
->flags
& HF_RF_MASK
) {
2708 gen_helper_reset_rf();
2710 if (s
->singlestep_enabled
) {
2713 gen_helper_single_step();
2720 /* generate a jump to eip. No segment change must happen before as a
2721 direct call to the next block may occur */
2722 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
)
2725 if (s
->cc_op
!= CC_OP_DYNAMIC
) {
2726 gen_op_set_cc_op(s
->cc_op
);
2727 s
->cc_op
= CC_OP_DYNAMIC
;
2729 gen_goto_tb(s
, tb_num
, eip
);
2737 static void gen_jmp(DisasContext
*s
, target_ulong eip
)
2739 gen_jmp_tb(s
, eip
, 0);
2742 static inline void gen_ldq_env_A0(int idx
, int offset
)
2744 int mem_index
= (idx
>> 2) - 1;
2745 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2746 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2749 static inline void gen_stq_env_A0(int idx
, int offset
)
2751 int mem_index
= (idx
>> 2) - 1;
2752 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2753 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2756 static inline void gen_ldo_env_A0(int idx
, int offset
)
2758 int mem_index
= (idx
>> 2) - 1;
2759 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2760 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2761 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2762 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
);
2763 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2766 static inline void gen_sto_env_A0(int idx
, int offset
)
2768 int mem_index
= (idx
>> 2) - 1;
2769 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2770 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2771 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2772 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2773 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
);
2776 static inline void gen_op_movo(int d_offset
, int s_offset
)
2778 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2779 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2780 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
+ 8);
2781 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
+ 8);
2784 static inline void gen_op_movq(int d_offset
, int s_offset
)
2786 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2787 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2790 static inline void gen_op_movl(int d_offset
, int s_offset
)
2792 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
, s_offset
);
2793 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, d_offset
);
2796 static inline void gen_op_movq_env_0(int d_offset
)
2798 tcg_gen_movi_i64(cpu_tmp1_i64
, 0);
2799 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2802 #define SSE_SPECIAL ((void *)1)
2803 #define SSE_DUMMY ((void *)2)
2805 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2806 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2807 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2809 static void *sse_op_table1
[256][4] = {
2810 /* 3DNow! extensions */
2811 [0x0e] = { SSE_DUMMY
}, /* femms */
2812 [0x0f] = { SSE_DUMMY
}, /* pf... */
2813 /* pure SSE operations */
2814 [0x10] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2815 [0x11] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2816 [0x12] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd, movsldup, movddup */
2817 [0x13] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd */
2818 [0x14] = { gen_helper_punpckldq_xmm
, gen_helper_punpcklqdq_xmm
},
2819 [0x15] = { gen_helper_punpckhdq_xmm
, gen_helper_punpckhqdq_xmm
},
2820 [0x16] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd, movshdup */
2821 [0x17] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd */
2823 [0x28] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2824 [0x29] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2825 [0x2a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2826 [0x2b] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movntps, movntpd */
2827 [0x2c] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2828 [0x2d] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2829 [0x2e] = { gen_helper_ucomiss
, gen_helper_ucomisd
},
2830 [0x2f] = { gen_helper_comiss
, gen_helper_comisd
},
2831 [0x50] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movmskps, movmskpd */
2832 [0x51] = SSE_FOP(sqrt
),
2833 [0x52] = { gen_helper_rsqrtps
, NULL
, gen_helper_rsqrtss
, NULL
},
2834 [0x53] = { gen_helper_rcpps
, NULL
, gen_helper_rcpss
, NULL
},
2835 [0x54] = { gen_helper_pand_xmm
, gen_helper_pand_xmm
}, /* andps, andpd */
2836 [0x55] = { gen_helper_pandn_xmm
, gen_helper_pandn_xmm
}, /* andnps, andnpd */
2837 [0x56] = { gen_helper_por_xmm
, gen_helper_por_xmm
}, /* orps, orpd */
2838 [0x57] = { gen_helper_pxor_xmm
, gen_helper_pxor_xmm
}, /* xorps, xorpd */
2839 [0x58] = SSE_FOP(add
),
2840 [0x59] = SSE_FOP(mul
),
2841 [0x5a] = { gen_helper_cvtps2pd
, gen_helper_cvtpd2ps
,
2842 gen_helper_cvtss2sd
, gen_helper_cvtsd2ss
},
2843 [0x5b] = { gen_helper_cvtdq2ps
, gen_helper_cvtps2dq
, gen_helper_cvttps2dq
},
2844 [0x5c] = SSE_FOP(sub
),
2845 [0x5d] = SSE_FOP(min
),
2846 [0x5e] = SSE_FOP(div
),
2847 [0x5f] = SSE_FOP(max
),
2849 [0xc2] = SSE_FOP(cmpeq
),
2850 [0xc6] = { gen_helper_shufps
, gen_helper_shufpd
},
2852 [0x38] = { SSE_SPECIAL
, SSE_SPECIAL
, NULL
, SSE_SPECIAL
}, /* SSSE3/SSE4 */
2853 [0x3a] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* SSSE3/SSE4 */
2855 /* MMX ops and their SSE extensions */
2856 [0x60] = MMX_OP2(punpcklbw
),
2857 [0x61] = MMX_OP2(punpcklwd
),
2858 [0x62] = MMX_OP2(punpckldq
),
2859 [0x63] = MMX_OP2(packsswb
),
2860 [0x64] = MMX_OP2(pcmpgtb
),
2861 [0x65] = MMX_OP2(pcmpgtw
),
2862 [0x66] = MMX_OP2(pcmpgtl
),
2863 [0x67] = MMX_OP2(packuswb
),
2864 [0x68] = MMX_OP2(punpckhbw
),
2865 [0x69] = MMX_OP2(punpckhwd
),
2866 [0x6a] = MMX_OP2(punpckhdq
),
2867 [0x6b] = MMX_OP2(packssdw
),
2868 [0x6c] = { NULL
, gen_helper_punpcklqdq_xmm
},
2869 [0x6d] = { NULL
, gen_helper_punpckhqdq_xmm
},
2870 [0x6e] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movd mm, ea */
2871 [0x6f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, , movqdu */
2872 [0x70] = { gen_helper_pshufw_mmx
,
2873 gen_helper_pshufd_xmm
,
2874 gen_helper_pshufhw_xmm
,
2875 gen_helper_pshuflw_xmm
},
2876 [0x71] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftw */
2877 [0x72] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftd */
2878 [0x73] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftq */
2879 [0x74] = MMX_OP2(pcmpeqb
),
2880 [0x75] = MMX_OP2(pcmpeqw
),
2881 [0x76] = MMX_OP2(pcmpeql
),
2882 [0x77] = { SSE_DUMMY
}, /* emms */
2883 [0x7c] = { NULL
, gen_helper_haddpd
, NULL
, gen_helper_haddps
},
2884 [0x7d] = { NULL
, gen_helper_hsubpd
, NULL
, gen_helper_hsubps
},
2885 [0x7e] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movd, movd, , movq */
2886 [0x7f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, movdqu */
2887 [0xc4] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pinsrw */
2888 [0xc5] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pextrw */
2889 [0xd0] = { NULL
, gen_helper_addsubpd
, NULL
, gen_helper_addsubps
},
2890 [0xd1] = MMX_OP2(psrlw
),
2891 [0xd2] = MMX_OP2(psrld
),
2892 [0xd3] = MMX_OP2(psrlq
),
2893 [0xd4] = MMX_OP2(paddq
),
2894 [0xd5] = MMX_OP2(pmullw
),
2895 [0xd6] = { NULL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2896 [0xd7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pmovmskb */
2897 [0xd8] = MMX_OP2(psubusb
),
2898 [0xd9] = MMX_OP2(psubusw
),
2899 [0xda] = MMX_OP2(pminub
),
2900 [0xdb] = MMX_OP2(pand
),
2901 [0xdc] = MMX_OP2(paddusb
),
2902 [0xdd] = MMX_OP2(paddusw
),
2903 [0xde] = MMX_OP2(pmaxub
),
2904 [0xdf] = MMX_OP2(pandn
),
2905 [0xe0] = MMX_OP2(pavgb
),
2906 [0xe1] = MMX_OP2(psraw
),
2907 [0xe2] = MMX_OP2(psrad
),
2908 [0xe3] = MMX_OP2(pavgw
),
2909 [0xe4] = MMX_OP2(pmulhuw
),
2910 [0xe5] = MMX_OP2(pmulhw
),
2911 [0xe6] = { NULL
, gen_helper_cvttpd2dq
, gen_helper_cvtdq2pd
, gen_helper_cvtpd2dq
},
2912 [0xe7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movntq, movntq */
2913 [0xe8] = MMX_OP2(psubsb
),
2914 [0xe9] = MMX_OP2(psubsw
),
2915 [0xea] = MMX_OP2(pminsw
),
2916 [0xeb] = MMX_OP2(por
),
2917 [0xec] = MMX_OP2(paddsb
),
2918 [0xed] = MMX_OP2(paddsw
),
2919 [0xee] = MMX_OP2(pmaxsw
),
2920 [0xef] = MMX_OP2(pxor
),
2921 [0xf0] = { NULL
, NULL
, NULL
, SSE_SPECIAL
}, /* lddqu */
2922 [0xf1] = MMX_OP2(psllw
),
2923 [0xf2] = MMX_OP2(pslld
),
2924 [0xf3] = MMX_OP2(psllq
),
2925 [0xf4] = MMX_OP2(pmuludq
),
2926 [0xf5] = MMX_OP2(pmaddwd
),
2927 [0xf6] = MMX_OP2(psadbw
),
2928 [0xf7] = MMX_OP2(maskmov
),
2929 [0xf8] = MMX_OP2(psubb
),
2930 [0xf9] = MMX_OP2(psubw
),
2931 [0xfa] = MMX_OP2(psubl
),
2932 [0xfb] = MMX_OP2(psubq
),
2933 [0xfc] = MMX_OP2(paddb
),
2934 [0xfd] = MMX_OP2(paddw
),
2935 [0xfe] = MMX_OP2(paddl
),
2938 static void *sse_op_table2
[3 * 8][2] = {
2939 [0 + 2] = MMX_OP2(psrlw
),
2940 [0 + 4] = MMX_OP2(psraw
),
2941 [0 + 6] = MMX_OP2(psllw
),
2942 [8 + 2] = MMX_OP2(psrld
),
2943 [8 + 4] = MMX_OP2(psrad
),
2944 [8 + 6] = MMX_OP2(pslld
),
2945 [16 + 2] = MMX_OP2(psrlq
),
2946 [16 + 3] = { NULL
, gen_helper_psrldq_xmm
},
2947 [16 + 6] = MMX_OP2(psllq
),
2948 [16 + 7] = { NULL
, gen_helper_pslldq_xmm
},
2951 static void *sse_op_table3
[4 * 3] = {
2952 gen_helper_cvtsi2ss
,
2953 gen_helper_cvtsi2sd
,
2954 X86_64_ONLY(gen_helper_cvtsq2ss
),
2955 X86_64_ONLY(gen_helper_cvtsq2sd
),
2957 gen_helper_cvttss2si
,
2958 gen_helper_cvttsd2si
,
2959 X86_64_ONLY(gen_helper_cvttss2sq
),
2960 X86_64_ONLY(gen_helper_cvttsd2sq
),
2962 gen_helper_cvtss2si
,
2963 gen_helper_cvtsd2si
,
2964 X86_64_ONLY(gen_helper_cvtss2sq
),
2965 X86_64_ONLY(gen_helper_cvtsd2sq
),
2968 static void *sse_op_table4
[8][4] = {
2979 static void *sse_op_table5
[256] = {
2980 [0x0c] = gen_helper_pi2fw
,
2981 [0x0d] = gen_helper_pi2fd
,
2982 [0x1c] = gen_helper_pf2iw
,
2983 [0x1d] = gen_helper_pf2id
,
2984 [0x8a] = gen_helper_pfnacc
,
2985 [0x8e] = gen_helper_pfpnacc
,
2986 [0x90] = gen_helper_pfcmpge
,
2987 [0x94] = gen_helper_pfmin
,
2988 [0x96] = gen_helper_pfrcp
,
2989 [0x97] = gen_helper_pfrsqrt
,
2990 [0x9a] = gen_helper_pfsub
,
2991 [0x9e] = gen_helper_pfadd
,
2992 [0xa0] = gen_helper_pfcmpgt
,
2993 [0xa4] = gen_helper_pfmax
,
2994 [0xa6] = gen_helper_movq
, /* pfrcpit1; no need to actually increase precision */
2995 [0xa7] = gen_helper_movq
, /* pfrsqit1 */
2996 [0xaa] = gen_helper_pfsubr
,
2997 [0xae] = gen_helper_pfacc
,
2998 [0xb0] = gen_helper_pfcmpeq
,
2999 [0xb4] = gen_helper_pfmul
,
3000 [0xb6] = gen_helper_movq
, /* pfrcpit2 */
3001 [0xb7] = gen_helper_pmulhrw_mmx
,
3002 [0xbb] = gen_helper_pswapd
,
3003 [0xbf] = gen_helper_pavgb_mmx
/* pavgusb */
3006 struct sse_op_helper_s
{
3007 void *op
[2]; uint32_t ext_mask
;
3009 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
3010 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3011 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
3012 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
3013 static struct sse_op_helper_s sse_op_table6
[256] = {
3014 [0x00] = SSSE3_OP(pshufb
),
3015 [0x01] = SSSE3_OP(phaddw
),
3016 [0x02] = SSSE3_OP(phaddd
),
3017 [0x03] = SSSE3_OP(phaddsw
),
3018 [0x04] = SSSE3_OP(pmaddubsw
),
3019 [0x05] = SSSE3_OP(phsubw
),
3020 [0x06] = SSSE3_OP(phsubd
),
3021 [0x07] = SSSE3_OP(phsubsw
),
3022 [0x08] = SSSE3_OP(psignb
),
3023 [0x09] = SSSE3_OP(psignw
),
3024 [0x0a] = SSSE3_OP(psignd
),
3025 [0x0b] = SSSE3_OP(pmulhrsw
),
3026 [0x10] = SSE41_OP(pblendvb
),
3027 [0x14] = SSE41_OP(blendvps
),
3028 [0x15] = SSE41_OP(blendvpd
),
3029 [0x17] = SSE41_OP(ptest
),
3030 [0x1c] = SSSE3_OP(pabsb
),
3031 [0x1d] = SSSE3_OP(pabsw
),
3032 [0x1e] = SSSE3_OP(pabsd
),
3033 [0x20] = SSE41_OP(pmovsxbw
),
3034 [0x21] = SSE41_OP(pmovsxbd
),
3035 [0x22] = SSE41_OP(pmovsxbq
),
3036 [0x23] = SSE41_OP(pmovsxwd
),
3037 [0x24] = SSE41_OP(pmovsxwq
),
3038 [0x25] = SSE41_OP(pmovsxdq
),
3039 [0x28] = SSE41_OP(pmuldq
),
3040 [0x29] = SSE41_OP(pcmpeqq
),
3041 [0x2a] = SSE41_SPECIAL
, /* movntqda */
3042 [0x2b] = SSE41_OP(packusdw
),
3043 [0x30] = SSE41_OP(pmovzxbw
),
3044 [0x31] = SSE41_OP(pmovzxbd
),
3045 [0x32] = SSE41_OP(pmovzxbq
),
3046 [0x33] = SSE41_OP(pmovzxwd
),
3047 [0x34] = SSE41_OP(pmovzxwq
),
3048 [0x35] = SSE41_OP(pmovzxdq
),
3049 [0x37] = SSE42_OP(pcmpgtq
),
3050 [0x38] = SSE41_OP(pminsb
),
3051 [0x39] = SSE41_OP(pminsd
),
3052 [0x3a] = SSE41_OP(pminuw
),
3053 [0x3b] = SSE41_OP(pminud
),
3054 [0x3c] = SSE41_OP(pmaxsb
),
3055 [0x3d] = SSE41_OP(pmaxsd
),
3056 [0x3e] = SSE41_OP(pmaxuw
),
3057 [0x3f] = SSE41_OP(pmaxud
),
3058 [0x40] = SSE41_OP(pmulld
),
3059 [0x41] = SSE41_OP(phminposuw
),
3062 static struct sse_op_helper_s sse_op_table7
[256] = {
3063 [0x08] = SSE41_OP(roundps
),
3064 [0x09] = SSE41_OP(roundpd
),
3065 [0x0a] = SSE41_OP(roundss
),
3066 [0x0b] = SSE41_OP(roundsd
),
3067 [0x0c] = SSE41_OP(blendps
),
3068 [0x0d] = SSE41_OP(blendpd
),
3069 [0x0e] = SSE41_OP(pblendw
),
3070 [0x0f] = SSSE3_OP(palignr
),
3071 [0x14] = SSE41_SPECIAL
, /* pextrb */
3072 [0x15] = SSE41_SPECIAL
, /* pextrw */
3073 [0x16] = SSE41_SPECIAL
, /* pextrd/pextrq */
3074 [0x17] = SSE41_SPECIAL
, /* extractps */
3075 [0x20] = SSE41_SPECIAL
, /* pinsrb */
3076 [0x21] = SSE41_SPECIAL
, /* insertps */
3077 [0x22] = SSE41_SPECIAL
, /* pinsrd/pinsrq */
3078 [0x40] = SSE41_OP(dpps
),
3079 [0x41] = SSE41_OP(dppd
),
3080 [0x42] = SSE41_OP(mpsadbw
),
3081 [0x60] = SSE42_OP(pcmpestrm
),
3082 [0x61] = SSE42_OP(pcmpestri
),
3083 [0x62] = SSE42_OP(pcmpistrm
),
3084 [0x63] = SSE42_OP(pcmpistri
),
3087 static void gen_sse(DisasContext
*s
, int b
, target_ulong pc_start
, int rex_r
)
3089 int b1
, op1_offset
, op2_offset
, is_xmm
, val
, ot
;
3090 int modrm
, mod
, rm
, reg
, reg_addr
, offset_addr
;
3094 if (s
->prefix
& PREFIX_DATA
)
3096 else if (s
->prefix
& PREFIX_REPZ
)
3098 else if (s
->prefix
& PREFIX_REPNZ
)
3102 sse_op2
= sse_op_table1
[b
][b1
];
3105 if ((b
<= 0x5f && b
>= 0x10) || b
== 0xc6 || b
== 0xc2) {
3115 /* simple MMX/SSE operation */
3116 if (s
->flags
& HF_TS_MASK
) {
3117 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
3120 if (s
->flags
& HF_EM_MASK
) {
3122 gen_exception(s
, EXCP06_ILLOP
, pc_start
- s
->cs_base
);
3125 if (is_xmm
&& !(s
->flags
& HF_OSFXSR_MASK
))
3126 if ((b
!= 0x38 && b
!= 0x3a) || (s
->prefix
& PREFIX_DATA
))
3129 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
))
3140 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3141 the static cpu state) */
3143 gen_helper_enter_mmx();
3146 modrm
= ldub_code(s
->pc
++);
3147 reg
= ((modrm
>> 3) & 7);
3150 mod
= (modrm
>> 6) & 3;
3151 if (sse_op2
== SSE_SPECIAL
) {
3154 case 0x0e7: /* movntq */
3157 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3158 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3160 case 0x1e7: /* movntdq */
3161 case 0x02b: /* movntps */
3162 case 0x12b: /* movntps */
3163 case 0x3f0: /* lddqu */
3166 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3167 gen_sto_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3169 case 0x6e: /* movd mm, ea */
3170 #ifdef TARGET_X86_64
3171 if (s
->dflag
== 2) {
3172 gen_ldst_modrm(s
, modrm
, OT_QUAD
, OR_TMP0
, 0);
3173 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3177 gen_ldst_modrm(s
, modrm
, OT_LONG
, OR_TMP0
, 0);
3178 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3179 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3180 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3181 gen_helper_movl_mm_T0_mmx(cpu_ptr0
, cpu_tmp2_i32
);
3184 case 0x16e: /* movd xmm, ea */
3185 #ifdef TARGET_X86_64
3186 if (s
->dflag
== 2) {
3187 gen_ldst_modrm(s
, modrm
, OT_QUAD
, OR_TMP0
, 0);
3188 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3189 offsetof(CPUX86State
,xmm_regs
[reg
]));
3190 gen_helper_movq_mm_T0_xmm(cpu_ptr0
, cpu_T
[0]);
3194 gen_ldst_modrm(s
, modrm
, OT_LONG
, OR_TMP0
, 0);
3195 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3196 offsetof(CPUX86State
,xmm_regs
[reg
]));
3197 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3198 gen_helper_movl_mm_T0_xmm(cpu_ptr0
, cpu_tmp2_i32
);
3201 case 0x6f: /* movq mm, ea */
3203 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3204 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3207 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
3208 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3209 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
3210 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3213 case 0x010: /* movups */
3214 case 0x110: /* movupd */
3215 case 0x028: /* movaps */
3216 case 0x128: /* movapd */
3217 case 0x16f: /* movdqa xmm, ea */
3218 case 0x26f: /* movdqu xmm, ea */
3220 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3221 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3223 rm
= (modrm
& 7) | REX_B(s
);
3224 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[reg
]),
3225 offsetof(CPUX86State
,xmm_regs
[rm
]));
3228 case 0x210: /* movss xmm, ea */
3230 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3231 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
3232 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3234 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)));
3235 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3236 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3238 rm
= (modrm
& 7) | REX_B(s
);
3239 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3240 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)));
3243 case 0x310: /* movsd xmm, ea */
3245 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3246 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3248 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3249 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3251 rm
= (modrm
& 7) | REX_B(s
);
3252 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3253 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3256 case 0x012: /* movlps */
3257 case 0x112: /* movlpd */
3259 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3260 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3263 rm
= (modrm
& 7) | REX_B(s
);
3264 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3265 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(1)));
3268 case 0x212: /* movsldup */
3270 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3271 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3273 rm
= (modrm
& 7) | REX_B(s
);
3274 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3275 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)));
3276 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)),
3277 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(2)));
3279 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)),
3280 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3281 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)),
3282 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3284 case 0x312: /* movddup */
3286 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3287 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3289 rm
= (modrm
& 7) | REX_B(s
);
3290 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3291 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3293 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)),
3294 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3296 case 0x016: /* movhps */
3297 case 0x116: /* movhpd */
3299 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3300 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3303 rm
= (modrm
& 7) | REX_B(s
);
3304 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)),
3305 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3308 case 0x216: /* movshdup */
3310 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3311 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3313 rm
= (modrm
& 7) | REX_B(s
);
3314 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)),
3315 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(1)));
3316 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)),
3317 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(3)));
3319 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3320 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)));
3321 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)),
3322 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3324 case 0x7e: /* movd ea, mm */
3325 #ifdef TARGET_X86_64
3326 if (s
->dflag
== 2) {
3327 tcg_gen_ld_i64(cpu_T
[0], cpu_env
,
3328 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3329 gen_ldst_modrm(s
, modrm
, OT_QUAD
, OR_TMP0
, 1);
3333 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
3334 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_L(0)));
3335 gen_ldst_modrm(s
, modrm
, OT_LONG
, OR_TMP0
, 1);
3338 case 0x17e: /* movd ea, xmm */
3339 #ifdef TARGET_X86_64
3340 if (s
->dflag
== 2) {
3341 tcg_gen_ld_i64(cpu_T
[0], cpu_env
,
3342 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3343 gen_ldst_modrm(s
, modrm
, OT_QUAD
, OR_TMP0
, 1);
3347 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
3348 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3349 gen_ldst_modrm(s
, modrm
, OT_LONG
, OR_TMP0
, 1);
3352 case 0x27e: /* movq xmm, ea */
3354 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3355 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3357 rm
= (modrm
& 7) | REX_B(s
);
3358 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3359 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3361 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3363 case 0x7f: /* movq ea, mm */
3365 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3366 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3369 gen_op_movq(offsetof(CPUX86State
,fpregs
[rm
].mmx
),
3370 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3373 case 0x011: /* movups */
3374 case 0x111: /* movupd */
3375 case 0x029: /* movaps */
3376 case 0x129: /* movapd */
3377 case 0x17f: /* movdqa ea, xmm */
3378 case 0x27f: /* movdqu ea, xmm */
3380 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3381 gen_sto_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3383 rm
= (modrm
& 7) | REX_B(s
);
3384 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[rm
]),
3385 offsetof(CPUX86State
,xmm_regs
[reg
]));
3388 case 0x211: /* movss ea, xmm */
3390 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3391 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3392 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
3394 rm
= (modrm
& 7) | REX_B(s
);
3395 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)),
3396 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3399 case 0x311: /* movsd ea, xmm */
3401 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3402 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3404 rm
= (modrm
& 7) | REX_B(s
);
3405 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)),
3406 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3409 case 0x013: /* movlps */
3410 case 0x113: /* movlpd */
3412 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3413 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3418 case 0x017: /* movhps */
3419 case 0x117: /* movhpd */
3421 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3422 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3427 case 0x71: /* shift mm, im */
3430 case 0x171: /* shift xmm, im */
3433 val
= ldub_code(s
->pc
++);
3435 gen_op_movl_T0_im(val
);
3436 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
3438 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(1)));
3439 op1_offset
= offsetof(CPUX86State
,xmm_t0
);
3441 gen_op_movl_T0_im(val
);
3442 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(0)));
3444 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(1)));
3445 op1_offset
= offsetof(CPUX86State
,mmx_t0
);
3447 sse_op2
= sse_op_table2
[((b
- 1) & 3) * 8 + (((modrm
>> 3)) & 7)][b1
];
3451 rm
= (modrm
& 7) | REX_B(s
);
3452 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3455 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3457 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3458 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op1_offset
);
3459 ((void (*)(TCGv_ptr
, TCGv_ptr
))sse_op2
)(cpu_ptr0
, cpu_ptr1
);
3461 case 0x050: /* movmskps */
3462 rm
= (modrm
& 7) | REX_B(s
);
3463 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3464 offsetof(CPUX86State
,xmm_regs
[rm
]));
3465 gen_helper_movmskps(cpu_tmp2_i32
, cpu_ptr0
);
3466 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3467 gen_op_mov_reg_T0(OT_LONG
, reg
);
3469 case 0x150: /* movmskpd */
3470 rm
= (modrm
& 7) | REX_B(s
);
3471 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3472 offsetof(CPUX86State
,xmm_regs
[rm
]));
3473 gen_helper_movmskpd(cpu_tmp2_i32
, cpu_ptr0
);
3474 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3475 gen_op_mov_reg_T0(OT_LONG
, reg
);
3477 case 0x02a: /* cvtpi2ps */
3478 case 0x12a: /* cvtpi2pd */
3479 gen_helper_enter_mmx();
3481 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3482 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3483 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
3486 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3488 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3489 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3490 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3493 gen_helper_cvtpi2ps(cpu_ptr0
, cpu_ptr1
);
3497 gen_helper_cvtpi2pd(cpu_ptr0
, cpu_ptr1
);
3501 case 0x22a: /* cvtsi2ss */
3502 case 0x32a: /* cvtsi2sd */
3503 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3504 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
3505 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3506 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3507 sse_op2
= sse_op_table3
[(s
->dflag
== 2) * 2 + ((b
>> 8) - 2)];
3508 if (ot
== OT_LONG
) {
3509 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3510 ((void (*)(TCGv_ptr
, TCGv_i32
))sse_op2
)(cpu_ptr0
, cpu_tmp2_i32
);
3512 ((void (*)(TCGv_ptr
, TCGv
))sse_op2
)(cpu_ptr0
, cpu_T
[0]);
3515 case 0x02c: /* cvttps2pi */
3516 case 0x12c: /* cvttpd2pi */
3517 case 0x02d: /* cvtps2pi */
3518 case 0x12d: /* cvtpd2pi */
3519 gen_helper_enter_mmx();
3521 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3522 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3523 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3525 rm
= (modrm
& 7) | REX_B(s
);
3526 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3528 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
);
3529 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3530 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3533 gen_helper_cvttps2pi(cpu_ptr0
, cpu_ptr1
);
3536 gen_helper_cvttpd2pi(cpu_ptr0
, cpu_ptr1
);
3539 gen_helper_cvtps2pi(cpu_ptr0
, cpu_ptr1
);
3542 gen_helper_cvtpd2pi(cpu_ptr0
, cpu_ptr1
);
3546 case 0x22c: /* cvttss2si */
3547 case 0x32c: /* cvttsd2si */
3548 case 0x22d: /* cvtss2si */
3549 case 0x32d: /* cvtsd2si */
3550 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3552 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3554 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_t0
.XMM_Q(0)));
3556 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
3557 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
3559 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3561 rm
= (modrm
& 7) | REX_B(s
);
3562 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3564 sse_op2
= sse_op_table3
[(s
->dflag
== 2) * 2 + ((b
>> 8) - 2) + 4 +
3566 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3567 if (ot
== OT_LONG
) {
3568 ((void (*)(TCGv_i32
, TCGv_ptr
))sse_op2
)(cpu_tmp2_i32
, cpu_ptr0
);
3569 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3571 ((void (*)(TCGv
, TCGv_ptr
))sse_op2
)(cpu_T
[0], cpu_ptr0
);
3573 gen_op_mov_reg_T0(ot
, reg
);
3575 case 0xc4: /* pinsrw */
3578 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
3579 val
= ldub_code(s
->pc
++);
3582 tcg_gen_st16_tl(cpu_T
[0], cpu_env
,
3583 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_W(val
)));
3586 tcg_gen_st16_tl(cpu_T
[0], cpu_env
,
3587 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_W(val
)));
3590 case 0xc5: /* pextrw */
3594 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3595 val
= ldub_code(s
->pc
++);
3598 rm
= (modrm
& 7) | REX_B(s
);
3599 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
,
3600 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_W(val
)));
3604 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
,
3605 offsetof(CPUX86State
,fpregs
[rm
].mmx
.MMX_W(val
)));
3607 reg
= ((modrm
>> 3) & 7) | rex_r
;
3608 gen_op_mov_reg_T0(ot
, reg
);
3610 case 0x1d6: /* movq ea, xmm */
3612 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3613 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3615 rm
= (modrm
& 7) | REX_B(s
);
3616 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)),
3617 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3618 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(1)));
3621 case 0x2d6: /* movq2dq */
3622 gen_helper_enter_mmx();
3624 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3625 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3626 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3628 case 0x3d6: /* movdq2q */
3629 gen_helper_enter_mmx();
3630 rm
= (modrm
& 7) | REX_B(s
);
3631 gen_op_movq(offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
),
3632 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3634 case 0xd7: /* pmovmskb */
3639 rm
= (modrm
& 7) | REX_B(s
);
3640 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[rm
]));
3641 gen_helper_pmovmskb_xmm(cpu_tmp2_i32
, cpu_ptr0
);
3644 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3645 gen_helper_pmovmskb_mmx(cpu_tmp2_i32
, cpu_ptr0
);
3647 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3648 reg
= ((modrm
>> 3) & 7) | rex_r
;
3649 gen_op_mov_reg_T0(OT_LONG
, reg
);
3652 if (s
->prefix
& PREFIX_REPNZ
)
3656 modrm
= ldub_code(s
->pc
++);
3658 reg
= ((modrm
>> 3) & 7) | rex_r
;
3659 mod
= (modrm
>> 6) & 3;
3661 sse_op2
= sse_op_table6
[b
].op
[b1
];
3664 if (!(s
->cpuid_ext_features
& sse_op_table6
[b
].ext_mask
))
3668 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3670 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
3672 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3673 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3675 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3676 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3677 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3678 gen_ldq_env_A0(s
->mem_index
, op2_offset
+
3679 offsetof(XMMReg
, XMM_Q(0)));
3681 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3682 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3683 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
3684 (s
->mem_index
>> 2) - 1);
3685 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
3686 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, op2_offset
+
3687 offsetof(XMMReg
, XMM_L(0)));
3689 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3690 tcg_gen_qemu_ld16u(cpu_tmp0
, cpu_A0
,
3691 (s
->mem_index
>> 2) - 1);
3692 tcg_gen_st16_tl(cpu_tmp0
, cpu_env
, op2_offset
+
3693 offsetof(XMMReg
, XMM_W(0)));
3695 case 0x2a: /* movntqda */
3696 gen_ldo_env_A0(s
->mem_index
, op1_offset
);
3699 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3703 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
3705 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3707 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3708 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3709 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
3712 if (sse_op2
== SSE_SPECIAL
)
3715 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3716 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3717 ((void (*)(TCGv_ptr
, TCGv_ptr
))sse_op2
)(cpu_ptr0
, cpu_ptr1
);
3720 s
->cc_op
= CC_OP_EFLAGS
;
3722 case 0x338: /* crc32 */
3725 modrm
= ldub_code(s
->pc
++);
3726 reg
= ((modrm
>> 3) & 7) | rex_r
;
3728 if (b
!= 0xf0 && b
!= 0xf1)
3730 if (!(s
->cpuid_ext_features
& CPUID_EXT_SSE42
))
3735 else if (b
== 0xf1 && s
->dflag
!= 2)
3736 if (s
->prefix
& PREFIX_DATA
)
3743 gen_op_mov_TN_reg(OT_LONG
, 0, reg
);
3744 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3745 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
3746 gen_helper_crc32(cpu_T
[0], cpu_tmp2_i32
,
3747 cpu_T
[0], tcg_const_i32(8 << ot
));
3749 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3750 gen_op_mov_reg_T0(ot
, reg
);
3755 modrm
= ldub_code(s
->pc
++);
3757 reg
= ((modrm
>> 3) & 7) | rex_r
;
3758 mod
= (modrm
>> 6) & 3;
3760 sse_op2
= sse_op_table7
[b
].op
[b1
];
3763 if (!(s
->cpuid_ext_features
& sse_op_table7
[b
].ext_mask
))
3766 if (sse_op2
== SSE_SPECIAL
) {
3767 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3768 rm
= (modrm
& 7) | REX_B(s
);
3770 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3771 reg
= ((modrm
>> 3) & 7) | rex_r
;
3772 val
= ldub_code(s
->pc
++);
3774 case 0x14: /* pextrb */
3775 tcg_gen_ld8u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3776 xmm_regs
[reg
].XMM_B(val
& 15)));
3778 gen_op_mov_reg_T0(ot
, rm
);
3780 tcg_gen_qemu_st8(cpu_T
[0], cpu_A0
,
3781 (s
->mem_index
>> 2) - 1);
3783 case 0x15: /* pextrw */
3784 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3785 xmm_regs
[reg
].XMM_W(val
& 7)));
3787 gen_op_mov_reg_T0(ot
, rm
);
3789 tcg_gen_qemu_st16(cpu_T
[0], cpu_A0
,
3790 (s
->mem_index
>> 2) - 1);
3793 if (ot
== OT_LONG
) { /* pextrd */
3794 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
3795 offsetof(CPUX86State
,
3796 xmm_regs
[reg
].XMM_L(val
& 3)));
3797 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3799 gen_op_mov_reg_v(ot
, rm
, cpu_T
[0]);
3801 tcg_gen_qemu_st32(cpu_T
[0], cpu_A0
,
3802 (s
->mem_index
>> 2) - 1);
3803 } else { /* pextrq */
3804 #ifdef TARGET_X86_64
3805 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
3806 offsetof(CPUX86State
,
3807 xmm_regs
[reg
].XMM_Q(val
& 1)));
3809 gen_op_mov_reg_v(ot
, rm
, cpu_tmp1_i64
);
3811 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
3812 (s
->mem_index
>> 2) - 1);
3818 case 0x17: /* extractps */
3819 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3820 xmm_regs
[reg
].XMM_L(val
& 3)));
3822 gen_op_mov_reg_T0(ot
, rm
);
3824 tcg_gen_qemu_st32(cpu_T
[0], cpu_A0
,
3825 (s
->mem_index
>> 2) - 1);
3827 case 0x20: /* pinsrb */
3829 gen_op_mov_TN_reg(OT_LONG
, 0, rm
);
3831 tcg_gen_qemu_ld8u(cpu_tmp0
, cpu_A0
,
3832 (s
->mem_index
>> 2) - 1);
3833 tcg_gen_st8_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
,
3834 xmm_regs
[reg
].XMM_B(val
& 15)));
3836 case 0x21: /* insertps */
3838 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
3839 offsetof(CPUX86State
,xmm_regs
[rm
]
3840 .XMM_L((val
>> 6) & 3)));
3842 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
3843 (s
->mem_index
>> 2) - 1);
3844 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
3846 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
3847 offsetof(CPUX86State
,xmm_regs
[reg
]
3848 .XMM_L((val
>> 4) & 3)));
3850 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3851 cpu_env
, offsetof(CPUX86State
,
3852 xmm_regs
[reg
].XMM_L(0)));
3854 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3855 cpu_env
, offsetof(CPUX86State
,
3856 xmm_regs
[reg
].XMM_L(1)));
3858 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3859 cpu_env
, offsetof(CPUX86State
,
3860 xmm_regs
[reg
].XMM_L(2)));
3862 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3863 cpu_env
, offsetof(CPUX86State
,
3864 xmm_regs
[reg
].XMM_L(3)));
3867 if (ot
== OT_LONG
) { /* pinsrd */
3869 gen_op_mov_v_reg(ot
, cpu_tmp0
, rm
);
3871 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
3872 (s
->mem_index
>> 2) - 1);
3873 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
3874 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
3875 offsetof(CPUX86State
,
3876 xmm_regs
[reg
].XMM_L(val
& 3)));
3877 } else { /* pinsrq */
3878 #ifdef TARGET_X86_64
3880 gen_op_mov_v_reg(ot
, cpu_tmp1_i64
, rm
);
3882 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
3883 (s
->mem_index
>> 2) - 1);
3884 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
3885 offsetof(CPUX86State
,
3886 xmm_regs
[reg
].XMM_Q(val
& 1)));
3897 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3899 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
3901 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3902 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3903 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3906 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
3908 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3910 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3911 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3912 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
3915 val
= ldub_code(s
->pc
++);
3917 if ((b
& 0xfc) == 0x60) { /* pcmpXstrX */
3918 s
->cc_op
= CC_OP_EFLAGS
;
3921 /* The helper must use entire 64-bit gp registers */
3925 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3926 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3927 ((void (*)(TCGv_ptr
, TCGv_ptr
, TCGv_i32
))sse_op2
)(cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
3933 /* generic MMX or SSE operation */
3935 case 0x70: /* pshufx insn */
3936 case 0xc6: /* pshufx insn */
3937 case 0xc2: /* compare insns */
3944 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3946 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3947 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3948 if (b1
>= 2 && ((b
>= 0x50 && b
<= 0x5f && b
!= 0x5b) ||
3950 /* specific case for SSE single instructions */
3953 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
3954 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
3957 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_t0
.XMM_D(0)));
3960 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3963 rm
= (modrm
& 7) | REX_B(s
);
3964 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3967 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
3969 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3970 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3971 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
3974 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3978 case 0x0f: /* 3DNow! data insns */
3979 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
))
3981 val
= ldub_code(s
->pc
++);
3982 sse_op2
= sse_op_table5
[val
];
3985 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3986 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3987 ((void (*)(TCGv_ptr
, TCGv_ptr
))sse_op2
)(cpu_ptr0
, cpu_ptr1
);
3989 case 0x70: /* pshufx insn */
3990 case 0xc6: /* pshufx insn */
3991 val
= ldub_code(s
->pc
++);
3992 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3993 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3994 ((void (*)(TCGv_ptr
, TCGv_ptr
, TCGv_i32
))sse_op2
)(cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
3998 val
= ldub_code(s
->pc
++);
4001 sse_op2
= sse_op_table4
[val
][b1
];
4002 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4003 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4004 ((void (*)(TCGv_ptr
, TCGv_ptr
))sse_op2
)(cpu_ptr0
, cpu_ptr1
);
4007 /* maskmov : we must prepare A0 */
4010 #ifdef TARGET_X86_64
4011 if (s
->aflag
== 2) {
4012 gen_op_movq_A0_reg(R_EDI
);
4016 gen_op_movl_A0_reg(R_EDI
);
4018 gen_op_andl_A0_ffff();
4020 gen_add_A0_ds_seg(s
);
4022 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4023 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4024 ((void (*)(TCGv_ptr
, TCGv_ptr
, TCGv
))sse_op2
)(cpu_ptr0
, cpu_ptr1
, cpu_A0
);
4027 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4028 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4029 ((void (*)(TCGv_ptr
, TCGv_ptr
))sse_op2
)(cpu_ptr0
, cpu_ptr1
);
4032 if (b
== 0x2e || b
== 0x2f) {
4033 s
->cc_op
= CC_OP_EFLAGS
;
4038 /* convert one instruction. s->is_jmp is set if the translation must
4039 be stopped. Return the next pc value */
4040 static target_ulong
disas_insn(DisasContext
*s
, target_ulong pc_start
)
4042 int b
, prefixes
, aflag
, dflag
;
4044 int modrm
, reg
, rm
, mod
, reg_addr
, op
, opreg
, offset_addr
, val
;
4045 target_ulong next_eip
, tval
;
4048 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
)))
4049 tcg_gen_debug_insn_start(pc_start
);
4057 #ifdef TARGET_X86_64
4062 s
->rip_offset
= 0; /* for relative ip address */
4064 b
= ldub_code(s
->pc
);
4066 /* check prefixes */
4067 #ifdef TARGET_X86_64
4071 prefixes
|= PREFIX_REPZ
;
4074 prefixes
|= PREFIX_REPNZ
;
4077 prefixes
|= PREFIX_LOCK
;
4098 prefixes
|= PREFIX_DATA
;
4101 prefixes
|= PREFIX_ADR
;
4105 rex_w
= (b
>> 3) & 1;
4106 rex_r
= (b
& 0x4) << 1;
4107 s
->rex_x
= (b
& 0x2) << 2;
4108 REX_B(s
) = (b
& 0x1) << 3;
4109 x86_64_hregs
= 1; /* select uniform byte register addressing */
4113 /* 0x66 is ignored if rex.w is set */
4116 if (prefixes
& PREFIX_DATA
)
4119 if (!(prefixes
& PREFIX_ADR
))
4126 prefixes
|= PREFIX_REPZ
;
4129 prefixes
|= PREFIX_REPNZ
;
4132 prefixes
|= PREFIX_LOCK
;
4153 prefixes
|= PREFIX_DATA
;
4156 prefixes
|= PREFIX_ADR
;
4159 if (prefixes
& PREFIX_DATA
)
4161 if (prefixes
& PREFIX_ADR
)
4165 s
->prefix
= prefixes
;
4169 /* lock generation */
4170 if (prefixes
& PREFIX_LOCK
)
4173 /* now check op code */
4177 /**************************/
4178 /* extended op code */
4179 b
= ldub_code(s
->pc
++) | 0x100;
4182 /**************************/
4200 ot
= dflag
+ OT_WORD
;
4203 case 0: /* OP Ev, Gv */
4204 modrm
= ldub_code(s
->pc
++);
4205 reg
= ((modrm
>> 3) & 7) | rex_r
;
4206 mod
= (modrm
>> 6) & 3;
4207 rm
= (modrm
& 7) | REX_B(s
);
4209 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4211 } else if (op
== OP_XORL
&& rm
== reg
) {
4213 /* xor reg, reg optimisation */
4215 s
->cc_op
= CC_OP_LOGICB
+ ot
;
4216 gen_op_mov_reg_T0(ot
, reg
);
4217 gen_op_update1_cc();
4222 gen_op_mov_TN_reg(ot
, 1, reg
);
4223 gen_op(s
, op
, ot
, opreg
);
4225 case 1: /* OP Gv, Ev */
4226 modrm
= ldub_code(s
->pc
++);
4227 mod
= (modrm
>> 6) & 3;
4228 reg
= ((modrm
>> 3) & 7) | rex_r
;
4229 rm
= (modrm
& 7) | REX_B(s
);
4231 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4232 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4233 } else if (op
== OP_XORL
&& rm
== reg
) {
4236 gen_op_mov_TN_reg(ot
, 1, rm
);
4238 gen_op(s
, op
, ot
, reg
);
4240 case 2: /* OP A, Iv */
4241 val
= insn_get(s
, ot
);
4242 gen_op_movl_T1_im(val
);
4243 gen_op(s
, op
, ot
, OR_EAX
);
4252 case 0x80: /* GRP1 */
4261 ot
= dflag
+ OT_WORD
;
4263 modrm
= ldub_code(s
->pc
++);
4264 mod
= (modrm
>> 6) & 3;
4265 rm
= (modrm
& 7) | REX_B(s
);
4266 op
= (modrm
>> 3) & 7;
4272 s
->rip_offset
= insn_const_size(ot
);
4273 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4284 val
= insn_get(s
, ot
);
4287 val
= (int8_t)insn_get(s
, OT_BYTE
);
4290 gen_op_movl_T1_im(val
);
4291 gen_op(s
, op
, ot
, opreg
);
4295 /**************************/
4296 /* inc, dec, and other misc arith */
4297 case 0x40 ... 0x47: /* inc Gv */
4298 ot
= dflag
? OT_LONG
: OT_WORD
;
4299 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), 1);
4301 case 0x48 ... 0x4f: /* dec Gv */
4302 ot
= dflag
? OT_LONG
: OT_WORD
;
4303 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), -1);
4305 case 0xf6: /* GRP3 */
4310 ot
= dflag
+ OT_WORD
;
4312 modrm
= ldub_code(s
->pc
++);
4313 mod
= (modrm
>> 6) & 3;
4314 rm
= (modrm
& 7) | REX_B(s
);
4315 op
= (modrm
>> 3) & 7;
4318 s
->rip_offset
= insn_const_size(ot
);
4319 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4320 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
4322 gen_op_mov_TN_reg(ot
, 0, rm
);
4327 val
= insn_get(s
, ot
);
4328 gen_op_movl_T1_im(val
);
4329 gen_op_testl_T0_T1_cc();
4330 s
->cc_op
= CC_OP_LOGICB
+ ot
;
4333 tcg_gen_not_tl(cpu_T
[0], cpu_T
[0]);
4335 gen_op_st_T0_A0(ot
+ s
->mem_index
);
4337 gen_op_mov_reg_T0(ot
, rm
);
4341 tcg_gen_neg_tl(cpu_T
[0], cpu_T
[0]);
4343 gen_op_st_T0_A0(ot
+ s
->mem_index
);
4345 gen_op_mov_reg_T0(ot
, rm
);
4347 gen_op_update_neg_cc();
4348 s
->cc_op
= CC_OP_SUBB
+ ot
;
4353 gen_op_mov_TN_reg(OT_BYTE
, 1, R_EAX
);
4354 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
4355 tcg_gen_ext8u_tl(cpu_T
[1], cpu_T
[1]);
4356 /* XXX: use 32 bit mul which could be faster */
4357 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4358 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4359 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4360 tcg_gen_andi_tl(cpu_cc_src
, cpu_T
[0], 0xff00);
4361 s
->cc_op
= CC_OP_MULB
;
4364 gen_op_mov_TN_reg(OT_WORD
, 1, R_EAX
);
4365 tcg_gen_ext16u_tl(cpu_T
[0], cpu_T
[0]);
4366 tcg_gen_ext16u_tl(cpu_T
[1], cpu_T
[1]);
4367 /* XXX: use 32 bit mul which could be faster */
4368 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4369 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4370 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4371 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 16);
4372 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
4373 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4374 s
->cc_op
= CC_OP_MULW
;
4378 #ifdef TARGET_X86_64
4379 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4380 tcg_gen_ext32u_tl(cpu_T
[0], cpu_T
[0]);
4381 tcg_gen_ext32u_tl(cpu_T
[1], cpu_T
[1]);
4382 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4383 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4384 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4385 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 32);
4386 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4387 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4391 t0
= tcg_temp_new_i64();
4392 t1
= tcg_temp_new_i64();
4393 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4394 tcg_gen_extu_i32_i64(t0
, cpu_T
[0]);
4395 tcg_gen_extu_i32_i64(t1
, cpu_T
[1]);
4396 tcg_gen_mul_i64(t0
, t0
, t1
);
4397 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4398 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4399 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4400 tcg_gen_shri_i64(t0
, t0
, 32);
4401 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4402 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4403 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4406 s
->cc_op
= CC_OP_MULL
;
4408 #ifdef TARGET_X86_64
4410 gen_helper_mulq_EAX_T0(cpu_T
[0]);
4411 s
->cc_op
= CC_OP_MULQ
;
4419 gen_op_mov_TN_reg(OT_BYTE
, 1, R_EAX
);
4420 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
4421 tcg_gen_ext8s_tl(cpu_T
[1], cpu_T
[1]);
4422 /* XXX: use 32 bit mul which could be faster */
4423 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4424 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4425 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4426 tcg_gen_ext8s_tl(cpu_tmp0
, cpu_T
[0]);
4427 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4428 s
->cc_op
= CC_OP_MULB
;
4431 gen_op_mov_TN_reg(OT_WORD
, 1, R_EAX
);
4432 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4433 tcg_gen_ext16s_tl(cpu_T
[1], cpu_T
[1]);
4434 /* XXX: use 32 bit mul which could be faster */
4435 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4436 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4437 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4438 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T
[0]);
4439 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4440 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 16);
4441 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
4442 s
->cc_op
= CC_OP_MULW
;
4446 #ifdef TARGET_X86_64
4447 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4448 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4449 tcg_gen_ext32s_tl(cpu_T
[1], cpu_T
[1]);
4450 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4451 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4452 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4453 tcg_gen_ext32s_tl(cpu_tmp0
, cpu_T
[0]);
4454 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4455 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 32);
4456 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4460 t0
= tcg_temp_new_i64();
4461 t1
= tcg_temp_new_i64();
4462 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4463 tcg_gen_ext_i32_i64(t0
, cpu_T
[0]);
4464 tcg_gen_ext_i32_i64(t1
, cpu_T
[1]);
4465 tcg_gen_mul_i64(t0
, t0
, t1
);
4466 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4467 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4468 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4469 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[0], 31);
4470 tcg_gen_shri_i64(t0
, t0
, 32);
4471 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4472 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4473 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4476 s
->cc_op
= CC_OP_MULL
;
4478 #ifdef TARGET_X86_64
4480 gen_helper_imulq_EAX_T0(cpu_T
[0]);
4481 s
->cc_op
= CC_OP_MULQ
;
4489 gen_jmp_im(pc_start
- s
->cs_base
);
4490 gen_helper_divb_AL(cpu_T
[0]);
4493 gen_jmp_im(pc_start
- s
->cs_base
);
4494 gen_helper_divw_AX(cpu_T
[0]);
4498 gen_jmp_im(pc_start
- s
->cs_base
);
4499 gen_helper_divl_EAX(cpu_T
[0]);
4501 #ifdef TARGET_X86_64
4503 gen_jmp_im(pc_start
- s
->cs_base
);
4504 gen_helper_divq_EAX(cpu_T
[0]);
4512 gen_jmp_im(pc_start
- s
->cs_base
);
4513 gen_helper_idivb_AL(cpu_T
[0]);
4516 gen_jmp_im(pc_start
- s
->cs_base
);
4517 gen_helper_idivw_AX(cpu_T
[0]);
4521 gen_jmp_im(pc_start
- s
->cs_base
);
4522 gen_helper_idivl_EAX(cpu_T
[0]);
4524 #ifdef TARGET_X86_64
4526 gen_jmp_im(pc_start
- s
->cs_base
);
4527 gen_helper_idivq_EAX(cpu_T
[0]);
4537 case 0xfe: /* GRP4 */
4538 case 0xff: /* GRP5 */
4542 ot
= dflag
+ OT_WORD
;
4544 modrm
= ldub_code(s
->pc
++);
4545 mod
= (modrm
>> 6) & 3;
4546 rm
= (modrm
& 7) | REX_B(s
);
4547 op
= (modrm
>> 3) & 7;
4548 if (op
>= 2 && b
== 0xfe) {
4552 if (op
== 2 || op
== 4) {
4553 /* operand size for jumps is 64 bit */
4555 } else if (op
== 3 || op
== 5) {
4556 /* for call calls, the operand is 16 or 32 bit, even
4558 ot
= dflag
? OT_LONG
: OT_WORD
;
4559 } else if (op
== 6) {
4560 /* default push size is 64 bit */
4561 ot
= dflag
? OT_QUAD
: OT_WORD
;
4565 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4566 if (op
>= 2 && op
!= 3 && op
!= 5)
4567 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
4569 gen_op_mov_TN_reg(ot
, 0, rm
);
4573 case 0: /* inc Ev */
4578 gen_inc(s
, ot
, opreg
, 1);
4580 case 1: /* dec Ev */
4585 gen_inc(s
, ot
, opreg
, -1);
4587 case 2: /* call Ev */
4588 /* XXX: optimize if memory (no 'and' is necessary) */
4590 gen_op_andl_T0_ffff();
4591 next_eip
= s
->pc
- s
->cs_base
;
4592 gen_movtl_T1_im(next_eip
);
4597 case 3: /* lcall Ev */
4598 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4599 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
4600 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
4602 if (s
->pe
&& !s
->vm86
) {
4603 if (s
->cc_op
!= CC_OP_DYNAMIC
)
4604 gen_op_set_cc_op(s
->cc_op
);
4605 gen_jmp_im(pc_start
- s
->cs_base
);
4606 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4607 gen_helper_lcall_protected(cpu_tmp2_i32
, cpu_T
[1],
4608 tcg_const_i32(dflag
),
4609 tcg_const_i32(s
->pc
- pc_start
));
4611 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4612 gen_helper_lcall_real(cpu_tmp2_i32
, cpu_T
[1],
4613 tcg_const_i32(dflag
),
4614 tcg_const_i32(s
->pc
- s
->cs_base
));
4618 case 4: /* jmp Ev */
4620 gen_op_andl_T0_ffff();
4624 case 5: /* ljmp Ev */
4625 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4626 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
4627 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
4629 if (s
->pe
&& !s
->vm86
) {
4630 if (s
->cc_op
!= CC_OP_DYNAMIC
)
4631 gen_op_set_cc_op(s
->cc_op
);
4632 gen_jmp_im(pc_start
- s
->cs_base
);
4633 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4634 gen_helper_ljmp_protected(cpu_tmp2_i32
, cpu_T
[1],
4635 tcg_const_i32(s
->pc
- pc_start
));
4637 gen_op_movl_seg_T0_vm(R_CS
);
4638 gen_op_movl_T0_T1();
4643 case 6: /* push Ev */
4651 case 0x84: /* test Ev, Gv */
4656 ot
= dflag
+ OT_WORD
;
4658 modrm
= ldub_code(s
->pc
++);
4659 mod
= (modrm
>> 6) & 3;
4660 rm
= (modrm
& 7) | REX_B(s
);
4661 reg
= ((modrm
>> 3) & 7) | rex_r
;
4663 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
4664 gen_op_mov_TN_reg(ot
, 1, reg
);
4665 gen_op_testl_T0_T1_cc();
4666 s
->cc_op
= CC_OP_LOGICB
+ ot
;
4669 case 0xa8: /* test eAX, Iv */
4674 ot
= dflag
+ OT_WORD
;
4675 val
= insn_get(s
, ot
);
4677 gen_op_mov_TN_reg(ot
, 0, OR_EAX
);
4678 gen_op_movl_T1_im(val
);
4679 gen_op_testl_T0_T1_cc();
4680 s
->cc_op
= CC_OP_LOGICB
+ ot
;
4683 case 0x98: /* CWDE/CBW */
4684 #ifdef TARGET_X86_64
4686 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
4687 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4688 gen_op_mov_reg_T0(OT_QUAD
, R_EAX
);
4692 gen_op_mov_TN_reg(OT_WORD
, 0, R_EAX
);
4693 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4694 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4696 gen_op_mov_TN_reg(OT_BYTE
, 0, R_EAX
);
4697 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
4698 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4701 case 0x99: /* CDQ/CWD */
4702 #ifdef TARGET_X86_64
4704 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EAX
);
4705 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 63);
4706 gen_op_mov_reg_T0(OT_QUAD
, R_EDX
);
4710 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
4711 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4712 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 31);
4713 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4715 gen_op_mov_TN_reg(OT_WORD
, 0, R_EAX
);
4716 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4717 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 15);
4718 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
4721 case 0x1af: /* imul Gv, Ev */
4722 case 0x69: /* imul Gv, Ev, I */
4724 ot
= dflag
+ OT_WORD
;
4725 modrm
= ldub_code(s
->pc
++);
4726 reg
= ((modrm
>> 3) & 7) | rex_r
;
4728 s
->rip_offset
= insn_const_size(ot
);
4731 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
4733 val
= insn_get(s
, ot
);
4734 gen_op_movl_T1_im(val
);
4735 } else if (b
== 0x6b) {
4736 val
= (int8_t)insn_get(s
, OT_BYTE
);
4737 gen_op_movl_T1_im(val
);
4739 gen_op_mov_TN_reg(ot
, 1, reg
);
4742 #ifdef TARGET_X86_64
4743 if (ot
== OT_QUAD
) {
4744 gen_helper_imulq_T0_T1(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4747 if (ot
== OT_LONG
) {
4748 #ifdef TARGET_X86_64
4749 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4750 tcg_gen_ext32s_tl(cpu_T
[1], cpu_T
[1]);
4751 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4752 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4753 tcg_gen_ext32s_tl(cpu_tmp0
, cpu_T
[0]);
4754 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4758 t0
= tcg_temp_new_i64();
4759 t1
= tcg_temp_new_i64();
4760 tcg_gen_ext_i32_i64(t0
, cpu_T
[0]);
4761 tcg_gen_ext_i32_i64(t1
, cpu_T
[1]);
4762 tcg_gen_mul_i64(t0
, t0
, t1
);
4763 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4764 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4765 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[0], 31);
4766 tcg_gen_shri_i64(t0
, t0
, 32);
4767 tcg_gen_trunc_i64_i32(cpu_T
[1], t0
);
4768 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[1], cpu_tmp0
);
4772 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4773 tcg_gen_ext16s_tl(cpu_T
[1], cpu_T
[1]);
4774 /* XXX: use 32 bit mul which could be faster */
4775 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4776 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4777 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T
[0]);
4778 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4780 gen_op_mov_reg_T0(ot
, reg
);
4781 s
->cc_op
= CC_OP_MULB
+ ot
;
4784 case 0x1c1: /* xadd Ev, Gv */
4788 ot
= dflag
+ OT_WORD
;
4789 modrm
= ldub_code(s
->pc
++);
4790 reg
= ((modrm
>> 3) & 7) | rex_r
;
4791 mod
= (modrm
>> 6) & 3;
4793 rm
= (modrm
& 7) | REX_B(s
);
4794 gen_op_mov_TN_reg(ot
, 0, reg
);
4795 gen_op_mov_TN_reg(ot
, 1, rm
);
4796 gen_op_addl_T0_T1();
4797 gen_op_mov_reg_T1(ot
, reg
);
4798 gen_op_mov_reg_T0(ot
, rm
);
4800 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4801 gen_op_mov_TN_reg(ot
, 0, reg
);
4802 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4803 gen_op_addl_T0_T1();
4804 gen_op_st_T0_A0(ot
+ s
->mem_index
);
4805 gen_op_mov_reg_T1(ot
, reg
);
4807 gen_op_update2_cc();
4808 s
->cc_op
= CC_OP_ADDB
+ ot
;
4811 case 0x1b1: /* cmpxchg Ev, Gv */
4814 TCGv t0
, t1
, t2
, a0
;
4819 ot
= dflag
+ OT_WORD
;
4820 modrm
= ldub_code(s
->pc
++);
4821 reg
= ((modrm
>> 3) & 7) | rex_r
;
4822 mod
= (modrm
>> 6) & 3;
4823 t0
= tcg_temp_local_new();
4824 t1
= tcg_temp_local_new();
4825 t2
= tcg_temp_local_new();
4826 a0
= tcg_temp_local_new();
4827 gen_op_mov_v_reg(ot
, t1
, reg
);
4829 rm
= (modrm
& 7) | REX_B(s
);
4830 gen_op_mov_v_reg(ot
, t0
, rm
);
4832 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4833 tcg_gen_mov_tl(a0
, cpu_A0
);
4834 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
4835 rm
= 0; /* avoid warning */
4837 label1
= gen_new_label();
4838 tcg_gen_ld_tl(t2
, cpu_env
, offsetof(CPUState
, regs
[R_EAX
]));
4839 tcg_gen_sub_tl(t2
, t2
, t0
);
4841 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, label1
);
4843 label2
= gen_new_label();
4844 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
4846 gen_set_label(label1
);
4847 gen_op_mov_reg_v(ot
, rm
, t1
);
4848 gen_set_label(label2
);
4850 tcg_gen_mov_tl(t1
, t0
);
4851 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
4852 gen_set_label(label1
);
4854 gen_op_st_v(ot
+ s
->mem_index
, t1
, a0
);
4856 tcg_gen_mov_tl(cpu_cc_src
, t0
);
4857 tcg_gen_mov_tl(cpu_cc_dst
, t2
);
4858 s
->cc_op
= CC_OP_SUBB
+ ot
;
4865 case 0x1c7: /* cmpxchg8b */
4866 modrm
= ldub_code(s
->pc
++);
4867 mod
= (modrm
>> 6) & 3;
4868 if ((mod
== 3) || ((modrm
& 0x38) != 0x8))
4870 #ifdef TARGET_X86_64
4872 if (!(s
->cpuid_ext_features
& CPUID_EXT_CX16
))
4874 gen_jmp_im(pc_start
- s
->cs_base
);
4875 if (s
->cc_op
!= CC_OP_DYNAMIC
)
4876 gen_op_set_cc_op(s
->cc_op
);
4877 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4878 gen_helper_cmpxchg16b(cpu_A0
);
4882 if (!(s
->cpuid_features
& CPUID_CX8
))
4884 gen_jmp_im(pc_start
- s
->cs_base
);
4885 if (s
->cc_op
!= CC_OP_DYNAMIC
)
4886 gen_op_set_cc_op(s
->cc_op
);
4887 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4888 gen_helper_cmpxchg8b(cpu_A0
);
4890 s
->cc_op
= CC_OP_EFLAGS
;
4893 /**************************/
4895 case 0x50 ... 0x57: /* push */
4896 gen_op_mov_TN_reg(OT_LONG
, 0, (b
& 7) | REX_B(s
));
4899 case 0x58 ... 0x5f: /* pop */
4901 ot
= dflag
? OT_QUAD
: OT_WORD
;
4903 ot
= dflag
+ OT_WORD
;
4906 /* NOTE: order is important for pop %sp */
4908 gen_op_mov_reg_T0(ot
, (b
& 7) | REX_B(s
));
4910 case 0x60: /* pusha */
4915 case 0x61: /* popa */
4920 case 0x68: /* push Iv */
4923 ot
= dflag
? OT_QUAD
: OT_WORD
;
4925 ot
= dflag
+ OT_WORD
;
4928 val
= insn_get(s
, ot
);
4930 val
= (int8_t)insn_get(s
, OT_BYTE
);
4931 gen_op_movl_T0_im(val
);
4934 case 0x8f: /* pop Ev */
4936 ot
= dflag
? OT_QUAD
: OT_WORD
;
4938 ot
= dflag
+ OT_WORD
;
4940 modrm
= ldub_code(s
->pc
++);
4941 mod
= (modrm
>> 6) & 3;
4944 /* NOTE: order is important for pop %sp */
4946 rm
= (modrm
& 7) | REX_B(s
);
4947 gen_op_mov_reg_T0(ot
, rm
);
4949 /* NOTE: order is important too for MMU exceptions */
4950 s
->popl_esp_hack
= 1 << ot
;
4951 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 1);
4952 s
->popl_esp_hack
= 0;
4956 case 0xc8: /* enter */
4959 val
= lduw_code(s
->pc
);
4961 level
= ldub_code(s
->pc
++);
4962 gen_enter(s
, val
, level
);
4965 case 0xc9: /* leave */
4966 /* XXX: exception not precise (ESP is updated before potential exception) */
4968 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EBP
);
4969 gen_op_mov_reg_T0(OT_QUAD
, R_ESP
);
4970 } else if (s
->ss32
) {
4971 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
4972 gen_op_mov_reg_T0(OT_LONG
, R_ESP
);
4974 gen_op_mov_TN_reg(OT_WORD
, 0, R_EBP
);
4975 gen_op_mov_reg_T0(OT_WORD
, R_ESP
);
4979 ot
= dflag
? OT_QUAD
: OT_WORD
;
4981 ot
= dflag
+ OT_WORD
;
4983 gen_op_mov_reg_T0(ot
, R_EBP
);
4986 case 0x06: /* push es */
4987 case 0x0e: /* push cs */
4988 case 0x16: /* push ss */
4989 case 0x1e: /* push ds */
4992 gen_op_movl_T0_seg(b
>> 3);
4995 case 0x1a0: /* push fs */
4996 case 0x1a8: /* push gs */
4997 gen_op_movl_T0_seg((b
>> 3) & 7);
5000 case 0x07: /* pop es */
5001 case 0x17: /* pop ss */
5002 case 0x1f: /* pop ds */
5007 gen_movl_seg_T0(s
, reg
, pc_start
- s
->cs_base
);
5010 /* if reg == SS, inhibit interrupts/trace. */
5011 /* If several instructions disable interrupts, only the
5013 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
5014 gen_helper_set_inhibit_irq();
5018 gen_jmp_im(s
->pc
- s
->cs_base
);
5022 case 0x1a1: /* pop fs */
5023 case 0x1a9: /* pop gs */
5025 gen_movl_seg_T0(s
, (b
>> 3) & 7, pc_start
- s
->cs_base
);
5028 gen_jmp_im(s
->pc
- s
->cs_base
);
5033 /**************************/
5036 case 0x89: /* mov Gv, Ev */
5040 ot
= dflag
+ OT_WORD
;
5041 modrm
= ldub_code(s
->pc
++);
5042 reg
= ((modrm
>> 3) & 7) | rex_r
;
5044 /* generate a generic store */
5045 gen_ldst_modrm(s
, modrm
, ot
, reg
, 1);
5048 case 0xc7: /* mov Ev, Iv */
5052 ot
= dflag
+ OT_WORD
;
5053 modrm
= ldub_code(s
->pc
++);
5054 mod
= (modrm
>> 6) & 3;
5056 s
->rip_offset
= insn_const_size(ot
);
5057 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5059 val
= insn_get(s
, ot
);
5060 gen_op_movl_T0_im(val
);
5062 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5064 gen_op_mov_reg_T0(ot
, (modrm
& 7) | REX_B(s
));
5067 case 0x8b: /* mov Ev, Gv */
5071 ot
= OT_WORD
+ dflag
;
5072 modrm
= ldub_code(s
->pc
++);
5073 reg
= ((modrm
>> 3) & 7) | rex_r
;
5075 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
5076 gen_op_mov_reg_T0(ot
, reg
);
5078 case 0x8e: /* mov seg, Gv */
5079 modrm
= ldub_code(s
->pc
++);
5080 reg
= (modrm
>> 3) & 7;
5081 if (reg
>= 6 || reg
== R_CS
)
5083 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
5084 gen_movl_seg_T0(s
, reg
, pc_start
- s
->cs_base
);
5086 /* if reg == SS, inhibit interrupts/trace */
5087 /* If several instructions disable interrupts, only the
5089 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
5090 gen_helper_set_inhibit_irq();
5094 gen_jmp_im(s
->pc
- s
->cs_base
);
5098 case 0x8c: /* mov Gv, seg */
5099 modrm
= ldub_code(s
->pc
++);
5100 reg
= (modrm
>> 3) & 7;
5101 mod
= (modrm
>> 6) & 3;
5104 gen_op_movl_T0_seg(reg
);
5106 ot
= OT_WORD
+ dflag
;
5109 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 1);
5112 case 0x1b6: /* movzbS Gv, Eb */
5113 case 0x1b7: /* movzwS Gv, Eb */
5114 case 0x1be: /* movsbS Gv, Eb */
5115 case 0x1bf: /* movswS Gv, Eb */
5118 /* d_ot is the size of destination */
5119 d_ot
= dflag
+ OT_WORD
;
5120 /* ot is the size of source */
5121 ot
= (b
& 1) + OT_BYTE
;
5122 modrm
= ldub_code(s
->pc
++);
5123 reg
= ((modrm
>> 3) & 7) | rex_r
;
5124 mod
= (modrm
>> 6) & 3;
5125 rm
= (modrm
& 7) | REX_B(s
);
5128 gen_op_mov_TN_reg(ot
, 0, rm
);
5129 switch(ot
| (b
& 8)) {
5131 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
5134 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
5137 tcg_gen_ext16u_tl(cpu_T
[0], cpu_T
[0]);
5141 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5144 gen_op_mov_reg_T0(d_ot
, reg
);
5146 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5148 gen_op_lds_T0_A0(ot
+ s
->mem_index
);
5150 gen_op_ldu_T0_A0(ot
+ s
->mem_index
);
5152 gen_op_mov_reg_T0(d_ot
, reg
);
5157 case 0x8d: /* lea */
5158 ot
= dflag
+ OT_WORD
;
5159 modrm
= ldub_code(s
->pc
++);
5160 mod
= (modrm
>> 6) & 3;
5163 reg
= ((modrm
>> 3) & 7) | rex_r
;
5164 /* we must ensure that no segment is added */
5168 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5170 gen_op_mov_reg_A0(ot
- OT_WORD
, reg
);
5173 case 0xa0: /* mov EAX, Ov */
5175 case 0xa2: /* mov Ov, EAX */
5178 target_ulong offset_addr
;
5183 ot
= dflag
+ OT_WORD
;
5184 #ifdef TARGET_X86_64
5185 if (s
->aflag
== 2) {
5186 offset_addr
= ldq_code(s
->pc
);
5188 gen_op_movq_A0_im(offset_addr
);
5193 offset_addr
= insn_get(s
, OT_LONG
);
5195 offset_addr
= insn_get(s
, OT_WORD
);
5197 gen_op_movl_A0_im(offset_addr
);
5199 gen_add_A0_ds_seg(s
);
5201 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
5202 gen_op_mov_reg_T0(ot
, R_EAX
);
5204 gen_op_mov_TN_reg(ot
, 0, R_EAX
);
5205 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5209 case 0xd7: /* xlat */
5210 #ifdef TARGET_X86_64
5211 if (s
->aflag
== 2) {
5212 gen_op_movq_A0_reg(R_EBX
);
5213 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EAX
);
5214 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xff);
5215 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T
[0]);
5219 gen_op_movl_A0_reg(R_EBX
);
5220 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
5221 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xff);
5222 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T
[0]);
5224 gen_op_andl_A0_ffff();
5226 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
5228 gen_add_A0_ds_seg(s
);
5229 gen_op_ldu_T0_A0(OT_BYTE
+ s
->mem_index
);
5230 gen_op_mov_reg_T0(OT_BYTE
, R_EAX
);
5232 case 0xb0 ... 0xb7: /* mov R, Ib */
5233 val
= insn_get(s
, OT_BYTE
);
5234 gen_op_movl_T0_im(val
);
5235 gen_op_mov_reg_T0(OT_BYTE
, (b
& 7) | REX_B(s
));
5237 case 0xb8 ... 0xbf: /* mov R, Iv */
5238 #ifdef TARGET_X86_64
5242 tmp
= ldq_code(s
->pc
);
5244 reg
= (b
& 7) | REX_B(s
);
5245 gen_movtl_T0_im(tmp
);
5246 gen_op_mov_reg_T0(OT_QUAD
, reg
);
5250 ot
= dflag
? OT_LONG
: OT_WORD
;
5251 val
= insn_get(s
, ot
);
5252 reg
= (b
& 7) | REX_B(s
);
5253 gen_op_movl_T0_im(val
);
5254 gen_op_mov_reg_T0(ot
, reg
);
5258 case 0x91 ... 0x97: /* xchg R, EAX */
5259 ot
= dflag
+ OT_WORD
;
5260 reg
= (b
& 7) | REX_B(s
);
5264 case 0x87: /* xchg Ev, Gv */
5268 ot
= dflag
+ OT_WORD
;
5269 modrm
= ldub_code(s
->pc
++);
5270 reg
= ((modrm
>> 3) & 7) | rex_r
;
5271 mod
= (modrm
>> 6) & 3;
5273 rm
= (modrm
& 7) | REX_B(s
);
5275 gen_op_mov_TN_reg(ot
, 0, reg
);
5276 gen_op_mov_TN_reg(ot
, 1, rm
);
5277 gen_op_mov_reg_T0(ot
, rm
);
5278 gen_op_mov_reg_T1(ot
, reg
);
5280 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5281 gen_op_mov_TN_reg(ot
, 0, reg
);
5282 /* for xchg, lock is implicit */
5283 if (!(prefixes
& PREFIX_LOCK
))
5285 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5286 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5287 if (!(prefixes
& PREFIX_LOCK
))
5288 gen_helper_unlock();
5289 gen_op_mov_reg_T1(ot
, reg
);
5292 case 0xc4: /* les Gv */
5297 case 0xc5: /* lds Gv */
5302 case 0x1b2: /* lss Gv */
5305 case 0x1b4: /* lfs Gv */
5308 case 0x1b5: /* lgs Gv */
5311 ot
= dflag
? OT_LONG
: OT_WORD
;
5312 modrm
= ldub_code(s
->pc
++);
5313 reg
= ((modrm
>> 3) & 7) | rex_r
;
5314 mod
= (modrm
>> 6) & 3;
5317 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5318 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5319 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
5320 /* load the segment first to handle exceptions properly */
5321 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
5322 gen_movl_seg_T0(s
, op
, pc_start
- s
->cs_base
);
5323 /* then put the data */
5324 gen_op_mov_reg_T1(ot
, reg
);
5326 gen_jmp_im(s
->pc
- s
->cs_base
);
5331 /************************/
5342 ot
= dflag
+ OT_WORD
;
5344 modrm
= ldub_code(s
->pc
++);
5345 mod
= (modrm
>> 6) & 3;
5346 op
= (modrm
>> 3) & 7;
5352 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5355 opreg
= (modrm
& 7) | REX_B(s
);
5360 gen_shift(s
, op
, ot
, opreg
, OR_ECX
);
5363 shift
= ldub_code(s
->pc
++);
5365 gen_shifti(s
, op
, ot
, opreg
, shift
);
5380 case 0x1a4: /* shld imm */
5384 case 0x1a5: /* shld cl */
5388 case 0x1ac: /* shrd imm */
5392 case 0x1ad: /* shrd cl */
5396 ot
= dflag
+ OT_WORD
;
5397 modrm
= ldub_code(s
->pc
++);
5398 mod
= (modrm
>> 6) & 3;
5399 rm
= (modrm
& 7) | REX_B(s
);
5400 reg
= ((modrm
>> 3) & 7) | rex_r
;
5402 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5407 gen_op_mov_TN_reg(ot
, 1, reg
);
5410 val
= ldub_code(s
->pc
++);
5411 tcg_gen_movi_tl(cpu_T3
, val
);
5413 tcg_gen_ld_tl(cpu_T3
, cpu_env
, offsetof(CPUState
, regs
[R_ECX
]));
5415 gen_shiftd_rm_T1_T3(s
, ot
, opreg
, op
);
5418 /************************/
5421 if (s
->flags
& (HF_EM_MASK
| HF_TS_MASK
)) {
5422 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5423 /* XXX: what to do if illegal op ? */
5424 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
5427 modrm
= ldub_code(s
->pc
++);
5428 mod
= (modrm
>> 6) & 3;
5430 op
= ((b
& 7) << 3) | ((modrm
>> 3) & 7);
5433 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5435 case 0x00 ... 0x07: /* fxxxs */
5436 case 0x10 ... 0x17: /* fixxxl */
5437 case 0x20 ... 0x27: /* fxxxl */
5438 case 0x30 ... 0x37: /* fixxx */
5445 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5446 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5447 gen_helper_flds_FT0(cpu_tmp2_i32
);
5450 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5451 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5452 gen_helper_fildl_FT0(cpu_tmp2_i32
);
5455 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
5456 (s
->mem_index
>> 2) - 1);
5457 gen_helper_fldl_FT0(cpu_tmp1_i64
);
5461 gen_op_lds_T0_A0(OT_WORD
+ s
->mem_index
);
5462 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5463 gen_helper_fildl_FT0(cpu_tmp2_i32
);
5467 gen_helper_fp_arith_ST0_FT0(op1
);
5469 /* fcomp needs pop */
5474 case 0x08: /* flds */
5475 case 0x0a: /* fsts */
5476 case 0x0b: /* fstps */
5477 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5478 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5479 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5484 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5485 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5486 gen_helper_flds_ST0(cpu_tmp2_i32
);
5489 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5490 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5491 gen_helper_fildl_ST0(cpu_tmp2_i32
);
5494 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
5495 (s
->mem_index
>> 2) - 1);
5496 gen_helper_fldl_ST0(cpu_tmp1_i64
);
5500 gen_op_lds_T0_A0(OT_WORD
+ s
->mem_index
);
5501 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5502 gen_helper_fildl_ST0(cpu_tmp2_i32
);
5507 /* XXX: the corresponding CPUID bit must be tested ! */
5510 gen_helper_fisttl_ST0(cpu_tmp2_i32
);
5511 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5512 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
5515 gen_helper_fisttll_ST0(cpu_tmp1_i64
);
5516 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
5517 (s
->mem_index
>> 2) - 1);
5521 gen_helper_fistt_ST0(cpu_tmp2_i32
);
5522 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5523 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5531 gen_helper_fsts_ST0(cpu_tmp2_i32
);
5532 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5533 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
5536 gen_helper_fistl_ST0(cpu_tmp2_i32
);
5537 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5538 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
5541 gen_helper_fstl_ST0(cpu_tmp1_i64
);
5542 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
5543 (s
->mem_index
>> 2) - 1);
5547 gen_helper_fist_ST0(cpu_tmp2_i32
);
5548 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5549 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5557 case 0x0c: /* fldenv mem */
5558 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5559 gen_op_set_cc_op(s
->cc_op
);
5560 gen_jmp_im(pc_start
- s
->cs_base
);
5562 cpu_A0
, tcg_const_i32(s
->dflag
));
5564 case 0x0d: /* fldcw mem */
5565 gen_op_ld_T0_A0(OT_WORD
+ s
->mem_index
);
5566 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5567 gen_helper_fldcw(cpu_tmp2_i32
);
5569 case 0x0e: /* fnstenv mem */
5570 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5571 gen_op_set_cc_op(s
->cc_op
);
5572 gen_jmp_im(pc_start
- s
->cs_base
);
5573 gen_helper_fstenv(cpu_A0
, tcg_const_i32(s
->dflag
));
5575 case 0x0f: /* fnstcw mem */
5576 gen_helper_fnstcw(cpu_tmp2_i32
);
5577 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5578 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5580 case 0x1d: /* fldt mem */
5581 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5582 gen_op_set_cc_op(s
->cc_op
);
5583 gen_jmp_im(pc_start
- s
->cs_base
);
5584 gen_helper_fldt_ST0(cpu_A0
);
5586 case 0x1f: /* fstpt mem */
5587 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5588 gen_op_set_cc_op(s
->cc_op
);
5589 gen_jmp_im(pc_start
- s
->cs_base
);
5590 gen_helper_fstt_ST0(cpu_A0
);
5593 case 0x2c: /* frstor mem */
5594 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5595 gen_op_set_cc_op(s
->cc_op
);
5596 gen_jmp_im(pc_start
- s
->cs_base
);
5597 gen_helper_frstor(cpu_A0
, tcg_const_i32(s
->dflag
));
5599 case 0x2e: /* fnsave mem */
5600 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5601 gen_op_set_cc_op(s
->cc_op
);
5602 gen_jmp_im(pc_start
- s
->cs_base
);
5603 gen_helper_fsave(cpu_A0
, tcg_const_i32(s
->dflag
));
5605 case 0x2f: /* fnstsw mem */
5606 gen_helper_fnstsw(cpu_tmp2_i32
);
5607 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5608 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5610 case 0x3c: /* fbld */
5611 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5612 gen_op_set_cc_op(s
->cc_op
);
5613 gen_jmp_im(pc_start
- s
->cs_base
);
5614 gen_helper_fbld_ST0(cpu_A0
);
5616 case 0x3e: /* fbstp */
5617 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5618 gen_op_set_cc_op(s
->cc_op
);
5619 gen_jmp_im(pc_start
- s
->cs_base
);
5620 gen_helper_fbst_ST0(cpu_A0
);
5623 case 0x3d: /* fildll */
5624 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
5625 (s
->mem_index
>> 2) - 1);
5626 gen_helper_fildll_ST0(cpu_tmp1_i64
);
5628 case 0x3f: /* fistpll */
5629 gen_helper_fistll_ST0(cpu_tmp1_i64
);
5630 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
5631 (s
->mem_index
>> 2) - 1);
5638 /* register float ops */
5642 case 0x08: /* fld sti */
5644 gen_helper_fmov_ST0_STN(tcg_const_i32((opreg
+ 1) & 7));
5646 case 0x09: /* fxchg sti */
5647 case 0x29: /* fxchg4 sti, undocumented op */
5648 case 0x39: /* fxchg7 sti, undocumented op */
5649 gen_helper_fxchg_ST0_STN(tcg_const_i32(opreg
));
5651 case 0x0a: /* grp d9/2 */
5654 /* check exceptions (FreeBSD FPU probe) */
5655 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5656 gen_op_set_cc_op(s
->cc_op
);
5657 gen_jmp_im(pc_start
- s
->cs_base
);
5664 case 0x0c: /* grp d9/4 */
5667 gen_helper_fchs_ST0();
5670 gen_helper_fabs_ST0();
5673 gen_helper_fldz_FT0();
5674 gen_helper_fcom_ST0_FT0();
5677 gen_helper_fxam_ST0();
5683 case 0x0d: /* grp d9/5 */
5688 gen_helper_fld1_ST0();
5692 gen_helper_fldl2t_ST0();
5696 gen_helper_fldl2e_ST0();
5700 gen_helper_fldpi_ST0();
5704 gen_helper_fldlg2_ST0();
5708 gen_helper_fldln2_ST0();
5712 gen_helper_fldz_ST0();
5719 case 0x0e: /* grp d9/6 */
5730 case 3: /* fpatan */
5731 gen_helper_fpatan();
5733 case 4: /* fxtract */
5734 gen_helper_fxtract();
5736 case 5: /* fprem1 */
5737 gen_helper_fprem1();
5739 case 6: /* fdecstp */
5740 gen_helper_fdecstp();
5743 case 7: /* fincstp */
5744 gen_helper_fincstp();
5748 case 0x0f: /* grp d9/7 */
5753 case 1: /* fyl2xp1 */
5754 gen_helper_fyl2xp1();
5759 case 3: /* fsincos */
5760 gen_helper_fsincos();
5762 case 5: /* fscale */
5763 gen_helper_fscale();
5765 case 4: /* frndint */
5766 gen_helper_frndint();
5777 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
5778 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
5779 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
5785 gen_helper_fp_arith_STN_ST0(op1
, opreg
);
5789 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg
));
5790 gen_helper_fp_arith_ST0_FT0(op1
);
5794 case 0x02: /* fcom */
5795 case 0x22: /* fcom2, undocumented op */
5796 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg
));
5797 gen_helper_fcom_ST0_FT0();
5799 case 0x03: /* fcomp */
5800 case 0x23: /* fcomp3, undocumented op */
5801 case 0x32: /* fcomp5, undocumented op */
5802 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg
));
5803 gen_helper_fcom_ST0_FT0();
5806 case 0x15: /* da/5 */
5808 case 1: /* fucompp */
5809 gen_helper_fmov_FT0_STN(tcg_const_i32(1));
5810 gen_helper_fucom_ST0_FT0();
5820 case 0: /* feni (287 only, just do nop here) */
5822 case 1: /* fdisi (287 only, just do nop here) */
5827 case 3: /* fninit */
5828 gen_helper_fninit();
5830 case 4: /* fsetpm (287 only, just do nop here) */
5836 case 0x1d: /* fucomi */
5837 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5838 gen_op_set_cc_op(s
->cc_op
);
5839 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg
));
5840 gen_helper_fucomi_ST0_FT0();
5841 s
->cc_op
= CC_OP_EFLAGS
;
5843 case 0x1e: /* fcomi */
5844 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5845 gen_op_set_cc_op(s
->cc_op
);
5846 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg
));
5847 gen_helper_fcomi_ST0_FT0();
5848 s
->cc_op
= CC_OP_EFLAGS
;
5850 case 0x28: /* ffree sti */
5851 gen_helper_ffree_STN(tcg_const_i32(opreg
));
5853 case 0x2a: /* fst sti */
5854 gen_helper_fmov_STN_ST0(tcg_const_i32(opreg
));
5856 case 0x2b: /* fstp sti */
5857 case 0x0b: /* fstp1 sti, undocumented op */
5858 case 0x3a: /* fstp8 sti, undocumented op */
5859 case 0x3b: /* fstp9 sti, undocumented op */
5860 gen_helper_fmov_STN_ST0(tcg_const_i32(opreg
));
5863 case 0x2c: /* fucom st(i) */
5864 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg
));
5865 gen_helper_fucom_ST0_FT0();
5867 case 0x2d: /* fucomp st(i) */
5868 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg
));
5869 gen_helper_fucom_ST0_FT0();
5872 case 0x33: /* de/3 */
5874 case 1: /* fcompp */
5875 gen_helper_fmov_FT0_STN(tcg_const_i32(1));
5876 gen_helper_fcom_ST0_FT0();
5884 case 0x38: /* ffreep sti, undocumented op */
5885 gen_helper_ffree_STN(tcg_const_i32(opreg
));
5888 case 0x3c: /* df/4 */
5891 gen_helper_fnstsw(cpu_tmp2_i32
);
5892 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5893 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
5899 case 0x3d: /* fucomip */
5900 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5901 gen_op_set_cc_op(s
->cc_op
);
5902 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg
));
5903 gen_helper_fucomi_ST0_FT0();
5905 s
->cc_op
= CC_OP_EFLAGS
;
5907 case 0x3e: /* fcomip */
5908 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5909 gen_op_set_cc_op(s
->cc_op
);
5910 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg
));
5911 gen_helper_fcomi_ST0_FT0();
5913 s
->cc_op
= CC_OP_EFLAGS
;
5915 case 0x10 ... 0x13: /* fcmovxx */
5919 static const uint8_t fcmov_cc
[8] = {
5925 op1
= fcmov_cc
[op
& 3] | (((op
>> 3) & 1) ^ 1);
5926 l1
= gen_new_label();
5927 gen_jcc1(s
, s
->cc_op
, op1
, l1
);
5928 gen_helper_fmov_ST0_STN(tcg_const_i32(opreg
));
5937 /************************/
5940 case 0xa4: /* movsS */
5945 ot
= dflag
+ OT_WORD
;
5947 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
5948 gen_repz_movs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
5954 case 0xaa: /* stosS */
5959 ot
= dflag
+ OT_WORD
;
5961 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
5962 gen_repz_stos(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
5967 case 0xac: /* lodsS */
5972 ot
= dflag
+ OT_WORD
;
5973 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
5974 gen_repz_lods(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
5979 case 0xae: /* scasS */
5984 ot
= dflag
+ OT_WORD
;
5985 if (prefixes
& PREFIX_REPNZ
) {
5986 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
5987 } else if (prefixes
& PREFIX_REPZ
) {
5988 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
5991 s
->cc_op
= CC_OP_SUBB
+ ot
;
5995 case 0xa6: /* cmpsS */
6000 ot
= dflag
+ OT_WORD
;
6001 if (prefixes
& PREFIX_REPNZ
) {
6002 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6003 } else if (prefixes
& PREFIX_REPZ
) {
6004 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6007 s
->cc_op
= CC_OP_SUBB
+ ot
;
6010 case 0x6c: /* insS */
6015 ot
= dflag
? OT_LONG
: OT_WORD
;
6016 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6017 gen_op_andl_T0_ffff();
6018 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6019 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
) | 4);
6020 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6021 gen_repz_ins(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6025 gen_jmp(s
, s
->pc
- s
->cs_base
);
6029 case 0x6e: /* outsS */
6034 ot
= dflag
? OT_LONG
: OT_WORD
;
6035 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6036 gen_op_andl_T0_ffff();
6037 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6038 svm_is_rep(prefixes
) | 4);
6039 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6040 gen_repz_outs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6044 gen_jmp(s
, s
->pc
- s
->cs_base
);
6049 /************************/
6057 ot
= dflag
? OT_LONG
: OT_WORD
;
6058 val
= ldub_code(s
->pc
++);
6059 gen_op_movl_T0_im(val
);
6060 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6061 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6064 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6065 gen_helper_in_func(ot
, cpu_T
[1], cpu_tmp2_i32
);
6066 gen_op_mov_reg_T1(ot
, R_EAX
);
6069 gen_jmp(s
, s
->pc
- s
->cs_base
);
6077 ot
= dflag
? OT_LONG
: OT_WORD
;
6078 val
= ldub_code(s
->pc
++);
6079 gen_op_movl_T0_im(val
);
6080 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6081 svm_is_rep(prefixes
));
6082 gen_op_mov_TN_reg(ot
, 1, R_EAX
);
6086 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6087 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
6088 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
6089 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6092 gen_jmp(s
, s
->pc
- s
->cs_base
);
6100 ot
= dflag
? OT_LONG
: OT_WORD
;
6101 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6102 gen_op_andl_T0_ffff();
6103 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6104 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6107 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6108 gen_helper_in_func(ot
, cpu_T
[1], cpu_tmp2_i32
);
6109 gen_op_mov_reg_T1(ot
, R_EAX
);
6112 gen_jmp(s
, s
->pc
- s
->cs_base
);
6120 ot
= dflag
? OT_LONG
: OT_WORD
;
6121 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6122 gen_op_andl_T0_ffff();
6123 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6124 svm_is_rep(prefixes
));
6125 gen_op_mov_TN_reg(ot
, 1, R_EAX
);
6129 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6130 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
6131 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
6132 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6135 gen_jmp(s
, s
->pc
- s
->cs_base
);
6139 /************************/
6141 case 0xc2: /* ret im */
6142 val
= ldsw_code(s
->pc
);
6145 if (CODE64(s
) && s
->dflag
)
6147 gen_stack_update(s
, val
+ (2 << s
->dflag
));
6149 gen_op_andl_T0_ffff();
6153 case 0xc3: /* ret */
6157 gen_op_andl_T0_ffff();
6161 case 0xca: /* lret im */
6162 val
= ldsw_code(s
->pc
);
6165 if (s
->pe
&& !s
->vm86
) {
6166 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6167 gen_op_set_cc_op(s
->cc_op
);
6168 gen_jmp_im(pc_start
- s
->cs_base
);
6169 gen_helper_lret_protected(tcg_const_i32(s
->dflag
),
6170 tcg_const_i32(val
));
6174 gen_op_ld_T0_A0(1 + s
->dflag
+ s
->mem_index
);
6176 gen_op_andl_T0_ffff();
6177 /* NOTE: keeping EIP updated is not a problem in case of
6181 gen_op_addl_A0_im(2 << s
->dflag
);
6182 gen_op_ld_T0_A0(1 + s
->dflag
+ s
->mem_index
);
6183 gen_op_movl_seg_T0_vm(R_CS
);
6184 /* add stack offset */
6185 gen_stack_update(s
, val
+ (4 << s
->dflag
));
6189 case 0xcb: /* lret */
6192 case 0xcf: /* iret */
6193 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IRET
);
6196 gen_helper_iret_real(tcg_const_i32(s
->dflag
));
6197 s
->cc_op
= CC_OP_EFLAGS
;
6198 } else if (s
->vm86
) {
6200 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6202 gen_helper_iret_real(tcg_const_i32(s
->dflag
));
6203 s
->cc_op
= CC_OP_EFLAGS
;
6206 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6207 gen_op_set_cc_op(s
->cc_op
);
6208 gen_jmp_im(pc_start
- s
->cs_base
);
6209 gen_helper_iret_protected(tcg_const_i32(s
->dflag
),
6210 tcg_const_i32(s
->pc
- s
->cs_base
));
6211 s
->cc_op
= CC_OP_EFLAGS
;
6215 case 0xe8: /* call im */
6218 tval
= (int32_t)insn_get(s
, OT_LONG
);
6220 tval
= (int16_t)insn_get(s
, OT_WORD
);
6221 next_eip
= s
->pc
- s
->cs_base
;
6225 gen_movtl_T0_im(next_eip
);
6230 case 0x9a: /* lcall im */
6232 unsigned int selector
, offset
;
6236 ot
= dflag
? OT_LONG
: OT_WORD
;
6237 offset
= insn_get(s
, ot
);
6238 selector
= insn_get(s
, OT_WORD
);
6240 gen_op_movl_T0_im(selector
);
6241 gen_op_movl_T1_imu(offset
);
6244 case 0xe9: /* jmp im */
6246 tval
= (int32_t)insn_get(s
, OT_LONG
);
6248 tval
= (int16_t)insn_get(s
, OT_WORD
);
6249 tval
+= s
->pc
- s
->cs_base
;
6256 case 0xea: /* ljmp im */
6258 unsigned int selector
, offset
;
6262 ot
= dflag
? OT_LONG
: OT_WORD
;
6263 offset
= insn_get(s
, ot
);
6264 selector
= insn_get(s
, OT_WORD
);
6266 gen_op_movl_T0_im(selector
);
6267 gen_op_movl_T1_imu(offset
);
6270 case 0xeb: /* jmp Jb */
6271 tval
= (int8_t)insn_get(s
, OT_BYTE
);
6272 tval
+= s
->pc
- s
->cs_base
;
6277 case 0x70 ... 0x7f: /* jcc Jb */
6278 tval
= (int8_t)insn_get(s
, OT_BYTE
);
6280 case 0x180 ... 0x18f: /* jcc Jv */
6282 tval
= (int32_t)insn_get(s
, OT_LONG
);
6284 tval
= (int16_t)insn_get(s
, OT_WORD
);
6287 next_eip
= s
->pc
- s
->cs_base
;
6291 gen_jcc(s
, b
, tval
, next_eip
);
6294 case 0x190 ... 0x19f: /* setcc Gv */
6295 modrm
= ldub_code(s
->pc
++);
6297 gen_ldst_modrm(s
, modrm
, OT_BYTE
, OR_TMP0
, 1);
6299 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6304 ot
= dflag
+ OT_WORD
;
6305 modrm
= ldub_code(s
->pc
++);
6306 reg
= ((modrm
>> 3) & 7) | rex_r
;
6307 mod
= (modrm
>> 6) & 3;
6308 t0
= tcg_temp_local_new();
6310 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
6311 gen_op_ld_v(ot
+ s
->mem_index
, t0
, cpu_A0
);
6313 rm
= (modrm
& 7) | REX_B(s
);
6314 gen_op_mov_v_reg(ot
, t0
, rm
);
6316 #ifdef TARGET_X86_64
6317 if (ot
== OT_LONG
) {
6318 /* XXX: specific Intel behaviour ? */
6319 l1
= gen_new_label();
6320 gen_jcc1(s
, s
->cc_op
, b
^ 1, l1
);
6321 tcg_gen_st32_tl(t0
, cpu_env
, offsetof(CPUState
, regs
[reg
]) + REG_L_OFFSET
);
6323 tcg_gen_movi_tl(cpu_tmp0
, 0);
6324 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
, offsetof(CPUState
, regs
[reg
]) + REG_LH_OFFSET
);
6328 l1
= gen_new_label();
6329 gen_jcc1(s
, s
->cc_op
, b
^ 1, l1
);
6330 gen_op_mov_reg_v(ot
, reg
, t0
);
6337 /************************/
6339 case 0x9c: /* pushf */
6340 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PUSHF
);
6341 if (s
->vm86
&& s
->iopl
!= 3) {
6342 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6344 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6345 gen_op_set_cc_op(s
->cc_op
);
6346 gen_helper_read_eflags(cpu_T
[0]);
6350 case 0x9d: /* popf */
6351 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_POPF
);
6352 if (s
->vm86
&& s
->iopl
!= 3) {
6353 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6358 gen_helper_write_eflags(cpu_T
[0],
6359 tcg_const_i32((TF_MASK
| AC_MASK
| ID_MASK
| NT_MASK
| IF_MASK
| IOPL_MASK
)));
6361 gen_helper_write_eflags(cpu_T
[0],
6362 tcg_const_i32((TF_MASK
| AC_MASK
| ID_MASK
| NT_MASK
| IF_MASK
| IOPL_MASK
) & 0xffff));
6365 if (s
->cpl
<= s
->iopl
) {
6367 gen_helper_write_eflags(cpu_T
[0],
6368 tcg_const_i32((TF_MASK
| AC_MASK
| ID_MASK
| NT_MASK
| IF_MASK
)));
6370 gen_helper_write_eflags(cpu_T
[0],
6371 tcg_const_i32((TF_MASK
| AC_MASK
| ID_MASK
| NT_MASK
| IF_MASK
) & 0xffff));
6375 gen_helper_write_eflags(cpu_T
[0],
6376 tcg_const_i32((TF_MASK
| AC_MASK
| ID_MASK
| NT_MASK
)));
6378 gen_helper_write_eflags(cpu_T
[0],
6379 tcg_const_i32((TF_MASK
| AC_MASK
| ID_MASK
| NT_MASK
) & 0xffff));
6384 s
->cc_op
= CC_OP_EFLAGS
;
6385 /* abort translation because TF flag may change */
6386 gen_jmp_im(s
->pc
- s
->cs_base
);
6390 case 0x9e: /* sahf */
6391 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6393 gen_op_mov_TN_reg(OT_BYTE
, 0, R_AH
);
6394 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6395 gen_op_set_cc_op(s
->cc_op
);
6396 gen_compute_eflags(cpu_cc_src
);
6397 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, CC_O
);
6398 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
6399 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_T
[0]);
6400 s
->cc_op
= CC_OP_EFLAGS
;
6402 case 0x9f: /* lahf */
6403 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6405 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6406 gen_op_set_cc_op(s
->cc_op
);
6407 gen_compute_eflags(cpu_T
[0]);
6408 /* Note: gen_compute_eflags() only gives the condition codes */
6409 tcg_gen_ori_tl(cpu_T
[0], cpu_T
[0], 0x02);
6410 gen_op_mov_reg_T0(OT_BYTE
, R_AH
);
6412 case 0xf5: /* cmc */
6413 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6414 gen_op_set_cc_op(s
->cc_op
);
6415 gen_compute_eflags(cpu_cc_src
);
6416 tcg_gen_xori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6417 s
->cc_op
= CC_OP_EFLAGS
;
6419 case 0xf8: /* clc */
6420 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6421 gen_op_set_cc_op(s
->cc_op
);
6422 gen_compute_eflags(cpu_cc_src
);
6423 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_C
);
6424 s
->cc_op
= CC_OP_EFLAGS
;
6426 case 0xf9: /* stc */
6427 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6428 gen_op_set_cc_op(s
->cc_op
);
6429 gen_compute_eflags(cpu_cc_src
);
6430 tcg_gen_ori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6431 s
->cc_op
= CC_OP_EFLAGS
;
6433 case 0xfc: /* cld */
6434 tcg_gen_movi_i32(cpu_tmp2_i32
, 1);
6435 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUState
, df
));
6437 case 0xfd: /* std */
6438 tcg_gen_movi_i32(cpu_tmp2_i32
, -1);
6439 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUState
, df
));
6442 /************************/
6443 /* bit operations */
6444 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6445 ot
= dflag
+ OT_WORD
;
6446 modrm
= ldub_code(s
->pc
++);
6447 op
= (modrm
>> 3) & 7;
6448 mod
= (modrm
>> 6) & 3;
6449 rm
= (modrm
& 7) | REX_B(s
);
6452 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
6453 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
6455 gen_op_mov_TN_reg(ot
, 0, rm
);
6458 val
= ldub_code(s
->pc
++);
6459 gen_op_movl_T1_im(val
);
6464 case 0x1a3: /* bt Gv, Ev */
6467 case 0x1ab: /* bts */
6470 case 0x1b3: /* btr */
6473 case 0x1bb: /* btc */
6476 ot
= dflag
+ OT_WORD
;
6477 modrm
= ldub_code(s
->pc
++);
6478 reg
= ((modrm
>> 3) & 7) | rex_r
;
6479 mod
= (modrm
>> 6) & 3;
6480 rm
= (modrm
& 7) | REX_B(s
);
6481 gen_op_mov_TN_reg(OT_LONG
, 1, reg
);
6483 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
6484 /* specific case: we need to add a displacement */
6485 gen_exts(ot
, cpu_T
[1]);
6486 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[1], 3 + ot
);
6487 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, ot
);
6488 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
6489 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
6491 gen_op_mov_TN_reg(ot
, 0, rm
);
6494 tcg_gen_andi_tl(cpu_T
[1], cpu_T
[1], (1 << (3 + ot
)) - 1);
6497 tcg_gen_shr_tl(cpu_cc_src
, cpu_T
[0], cpu_T
[1]);
6498 tcg_gen_movi_tl(cpu_cc_dst
, 0);
6501 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
6502 tcg_gen_movi_tl(cpu_tmp0
, 1);
6503 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
6504 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
6507 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
6508 tcg_gen_movi_tl(cpu_tmp0
, 1);
6509 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
6510 tcg_gen_not_tl(cpu_tmp0
, cpu_tmp0
);
6511 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
6515 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
6516 tcg_gen_movi_tl(cpu_tmp0
, 1);
6517 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
6518 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
6521 s
->cc_op
= CC_OP_SARB
+ ot
;
6524 gen_op_st_T0_A0(ot
+ s
->mem_index
);
6526 gen_op_mov_reg_T0(ot
, rm
);
6527 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
6528 tcg_gen_movi_tl(cpu_cc_dst
, 0);
6531 case 0x1bc: /* bsf */
6532 case 0x1bd: /* bsr */
6537 ot
= dflag
+ OT_WORD
;
6538 modrm
= ldub_code(s
->pc
++);
6539 reg
= ((modrm
>> 3) & 7) | rex_r
;
6540 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
6541 gen_extu(ot
, cpu_T
[0]);
6542 label1
= gen_new_label();
6543 tcg_gen_movi_tl(cpu_cc_dst
, 0);
6544 t0
= tcg_temp_local_new();
6545 tcg_gen_mov_tl(t0
, cpu_T
[0]);
6546 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, 0, label1
);
6548 gen_helper_bsr(cpu_T
[0], t0
);
6550 gen_helper_bsf(cpu_T
[0], t0
);
6552 gen_op_mov_reg_T0(ot
, reg
);
6553 tcg_gen_movi_tl(cpu_cc_dst
, 1);
6554 gen_set_label(label1
);
6555 tcg_gen_discard_tl(cpu_cc_src
);
6556 s
->cc_op
= CC_OP_LOGICB
+ ot
;
6560 /************************/
6562 case 0x27: /* daa */
6565 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6566 gen_op_set_cc_op(s
->cc_op
);
6568 s
->cc_op
= CC_OP_EFLAGS
;
6570 case 0x2f: /* das */
6573 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6574 gen_op_set_cc_op(s
->cc_op
);
6576 s
->cc_op
= CC_OP_EFLAGS
;
6578 case 0x37: /* aaa */
6581 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6582 gen_op_set_cc_op(s
->cc_op
);
6584 s
->cc_op
= CC_OP_EFLAGS
;
6586 case 0x3f: /* aas */
6589 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6590 gen_op_set_cc_op(s
->cc_op
);
6592 s
->cc_op
= CC_OP_EFLAGS
;
6594 case 0xd4: /* aam */
6597 val
= ldub_code(s
->pc
++);
6599 gen_exception(s
, EXCP00_DIVZ
, pc_start
- s
->cs_base
);
6601 gen_helper_aam(tcg_const_i32(val
));
6602 s
->cc_op
= CC_OP_LOGICB
;
6605 case 0xd5: /* aad */
6608 val
= ldub_code(s
->pc
++);
6609 gen_helper_aad(tcg_const_i32(val
));
6610 s
->cc_op
= CC_OP_LOGICB
;
6612 /************************/
6614 case 0x90: /* nop */
6615 /* XXX: xchg + rex handling */
6616 /* XXX: correct lock test for all insn */
6617 if (prefixes
& PREFIX_LOCK
)
6619 if (prefixes
& PREFIX_REPZ
) {
6620 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PAUSE
);
6623 case 0x9b: /* fwait */
6624 if ((s
->flags
& (HF_MP_MASK
| HF_TS_MASK
)) ==
6625 (HF_MP_MASK
| HF_TS_MASK
)) {
6626 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
6628 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6629 gen_op_set_cc_op(s
->cc_op
);
6630 gen_jmp_im(pc_start
- s
->cs_base
);
6634 case 0xcc: /* int3 */
6635 gen_interrupt(s
, EXCP03_INT3
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6637 case 0xcd: /* int N */
6638 val
= ldub_code(s
->pc
++);
6639 if (s
->vm86
&& s
->iopl
!= 3) {
6640 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6642 gen_interrupt(s
, val
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6645 case 0xce: /* into */
6648 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6649 gen_op_set_cc_op(s
->cc_op
);
6650 gen_jmp_im(pc_start
- s
->cs_base
);
6651 gen_helper_into(tcg_const_i32(s
->pc
- pc_start
));
6654 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6655 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_ICEBP
);
6657 gen_debug(s
, pc_start
- s
->cs_base
);
6660 tb_flush(cpu_single_env
);
6661 cpu_set_log(CPU_LOG_INT
| CPU_LOG_TB_IN_ASM
);
6665 case 0xfa: /* cli */
6667 if (s
->cpl
<= s
->iopl
) {
6670 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6676 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6680 case 0xfb: /* sti */
6682 if (s
->cpl
<= s
->iopl
) {
6685 /* interruptions are enabled only the first insn after sti */
6686 /* If several instructions disable interrupts, only the
6688 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
6689 gen_helper_set_inhibit_irq();
6690 /* give a chance to handle pending irqs */
6691 gen_jmp_im(s
->pc
- s
->cs_base
);
6694 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6700 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6704 case 0x62: /* bound */
6707 ot
= dflag
? OT_LONG
: OT_WORD
;
6708 modrm
= ldub_code(s
->pc
++);
6709 reg
= (modrm
>> 3) & 7;
6710 mod
= (modrm
>> 6) & 3;
6713 gen_op_mov_TN_reg(ot
, 0, reg
);
6714 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
6715 gen_jmp_im(pc_start
- s
->cs_base
);
6716 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6718 gen_helper_boundw(cpu_A0
, cpu_tmp2_i32
);
6720 gen_helper_boundl(cpu_A0
, cpu_tmp2_i32
);
6722 case 0x1c8 ... 0x1cf: /* bswap reg */
6723 reg
= (b
& 7) | REX_B(s
);
6724 #ifdef TARGET_X86_64
6726 gen_op_mov_TN_reg(OT_QUAD
, 0, reg
);
6727 tcg_gen_bswap64_i64(cpu_T
[0], cpu_T
[0]);
6728 gen_op_mov_reg_T0(OT_QUAD
, reg
);
6732 gen_op_mov_TN_reg(OT_LONG
, 0, reg
);
6733 tcg_gen_ext32u_tl(cpu_T
[0], cpu_T
[0]);
6734 tcg_gen_bswap32_tl(cpu_T
[0], cpu_T
[0]);
6735 gen_op_mov_reg_T0(OT_LONG
, reg
);
6738 case 0xd6: /* salc */
6741 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6742 gen_op_set_cc_op(s
->cc_op
);
6743 gen_compute_eflags_c(cpu_T
[0]);
6744 tcg_gen_neg_tl(cpu_T
[0], cpu_T
[0]);
6745 gen_op_mov_reg_T0(OT_BYTE
, R_EAX
);
6747 case 0xe0: /* loopnz */
6748 case 0xe1: /* loopz */
6749 case 0xe2: /* loop */
6750 case 0xe3: /* jecxz */
6754 tval
= (int8_t)insn_get(s
, OT_BYTE
);
6755 next_eip
= s
->pc
- s
->cs_base
;
6760 l1
= gen_new_label();
6761 l2
= gen_new_label();
6762 l3
= gen_new_label();
6765 case 0: /* loopnz */
6767 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6768 gen_op_set_cc_op(s
->cc_op
);
6769 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
6770 gen_op_jz_ecx(s
->aflag
, l3
);
6771 gen_compute_eflags(cpu_tmp0
);
6772 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, CC_Z
);
6774 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, l1
);
6776 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_tmp0
, 0, l1
);
6780 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
6781 gen_op_jnz_ecx(s
->aflag
, l1
);
6785 gen_op_jz_ecx(s
->aflag
, l1
);
6790 gen_jmp_im(next_eip
);
6799 case 0x130: /* wrmsr */
6800 case 0x132: /* rdmsr */
6802 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6804 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6805 gen_op_set_cc_op(s
->cc_op
);
6806 gen_jmp_im(pc_start
- s
->cs_base
);
6814 case 0x131: /* rdtsc */
6815 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6816 gen_op_set_cc_op(s
->cc_op
);
6817 gen_jmp_im(pc_start
- s
->cs_base
);
6823 gen_jmp(s
, s
->pc
- s
->cs_base
);
6826 case 0x133: /* rdpmc */
6827 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6828 gen_op_set_cc_op(s
->cc_op
);
6829 gen_jmp_im(pc_start
- s
->cs_base
);
6832 case 0x134: /* sysenter */
6833 /* For Intel SYSENTER is valid on 64-bit */
6834 if (CODE64(s
) && cpu_single_env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
6837 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6839 if (s
->cc_op
!= CC_OP_DYNAMIC
) {
6840 gen_op_set_cc_op(s
->cc_op
);
6841 s
->cc_op
= CC_OP_DYNAMIC
;
6843 gen_jmp_im(pc_start
- s
->cs_base
);
6844 gen_helper_sysenter();
6848 case 0x135: /* sysexit */
6849 /* For Intel SYSEXIT is valid on 64-bit */
6850 if (CODE64(s
) && cpu_single_env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
6853 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6855 if (s
->cc_op
!= CC_OP_DYNAMIC
) {
6856 gen_op_set_cc_op(s
->cc_op
);
6857 s
->cc_op
= CC_OP_DYNAMIC
;
6859 gen_jmp_im(pc_start
- s
->cs_base
);
6860 gen_helper_sysexit(tcg_const_i32(dflag
));
6864 #ifdef TARGET_X86_64
6865 case 0x105: /* syscall */
6866 /* XXX: is it usable in real mode ? */
6867 if (s
->cc_op
!= CC_OP_DYNAMIC
) {
6868 gen_op_set_cc_op(s
->cc_op
);
6869 s
->cc_op
= CC_OP_DYNAMIC
;
6871 gen_jmp_im(pc_start
- s
->cs_base
);
6872 gen_helper_syscall(tcg_const_i32(s
->pc
- pc_start
));
6875 case 0x107: /* sysret */
6877 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6879 if (s
->cc_op
!= CC_OP_DYNAMIC
) {
6880 gen_op_set_cc_op(s
->cc_op
);
6881 s
->cc_op
= CC_OP_DYNAMIC
;
6883 gen_jmp_im(pc_start
- s
->cs_base
);
6884 gen_helper_sysret(tcg_const_i32(s
->dflag
));
6885 /* condition codes are modified only in long mode */
6887 s
->cc_op
= CC_OP_EFLAGS
;
6892 case 0x1a2: /* cpuid */
6893 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6894 gen_op_set_cc_op(s
->cc_op
);
6895 gen_jmp_im(pc_start
- s
->cs_base
);
6898 case 0xf4: /* hlt */
6900 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6902 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6903 gen_op_set_cc_op(s
->cc_op
);
6904 gen_jmp_im(pc_start
- s
->cs_base
);
6905 gen_helper_hlt(tcg_const_i32(s
->pc
- pc_start
));
6910 modrm
= ldub_code(s
->pc
++);
6911 mod
= (modrm
>> 6) & 3;
6912 op
= (modrm
>> 3) & 7;
6915 if (!s
->pe
|| s
->vm86
)
6917 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_READ
);
6918 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,ldt
.selector
));
6922 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 1);
6925 if (!s
->pe
|| s
->vm86
)
6928 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6930 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_WRITE
);
6931 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
6932 gen_jmp_im(pc_start
- s
->cs_base
);
6933 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6934 gen_helper_lldt(cpu_tmp2_i32
);
6938 if (!s
->pe
|| s
->vm86
)
6940 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_READ
);
6941 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,tr
.selector
));
6945 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 1);
6948 if (!s
->pe
|| s
->vm86
)
6951 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6953 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_WRITE
);
6954 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
6955 gen_jmp_im(pc_start
- s
->cs_base
);
6956 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6957 gen_helper_ltr(cpu_tmp2_i32
);
6962 if (!s
->pe
|| s
->vm86
)
6964 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
6965 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6966 gen_op_set_cc_op(s
->cc_op
);
6968 gen_helper_verr(cpu_T
[0]);
6970 gen_helper_verw(cpu_T
[0]);
6971 s
->cc_op
= CC_OP_EFLAGS
;
6978 modrm
= ldub_code(s
->pc
++);
6979 mod
= (modrm
>> 6) & 3;
6980 op
= (modrm
>> 3) & 7;
6986 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_GDTR_READ
);
6987 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
6988 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
6989 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
6990 gen_add_A0_im(s
, 2);
6991 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, gdt
.base
));
6993 gen_op_andl_T0_im(0xffffff);
6994 gen_op_st_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
6999 case 0: /* monitor */
7000 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) ||
7003 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7004 gen_op_set_cc_op(s
->cc_op
);
7005 gen_jmp_im(pc_start
- s
->cs_base
);
7006 #ifdef TARGET_X86_64
7007 if (s
->aflag
== 2) {
7008 gen_op_movq_A0_reg(R_EAX
);
7012 gen_op_movl_A0_reg(R_EAX
);
7014 gen_op_andl_A0_ffff();
7016 gen_add_A0_ds_seg(s
);
7017 gen_helper_monitor(cpu_A0
);
7020 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) ||
7023 if (s
->cc_op
!= CC_OP_DYNAMIC
) {
7024 gen_op_set_cc_op(s
->cc_op
);
7025 s
->cc_op
= CC_OP_DYNAMIC
;
7027 gen_jmp_im(pc_start
- s
->cs_base
);
7028 gen_helper_mwait(tcg_const_i32(s
->pc
- pc_start
));
7035 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IDTR_READ
);
7036 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7037 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, idt
.limit
));
7038 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
7039 gen_add_A0_im(s
, 2);
7040 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, idt
.base
));
7042 gen_op_andl_T0_im(0xffffff);
7043 gen_op_st_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7049 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7050 gen_op_set_cc_op(s
->cc_op
);
7051 gen_jmp_im(pc_start
- s
->cs_base
);
7054 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7057 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7060 gen_helper_vmrun(tcg_const_i32(s
->aflag
),
7061 tcg_const_i32(s
->pc
- pc_start
));
7066 case 1: /* VMMCALL */
7067 if (!(s
->flags
& HF_SVME_MASK
))
7069 gen_helper_vmmcall();
7071 case 2: /* VMLOAD */
7072 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7075 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7078 gen_helper_vmload(tcg_const_i32(s
->aflag
));
7081 case 3: /* VMSAVE */
7082 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7085 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7088 gen_helper_vmsave(tcg_const_i32(s
->aflag
));
7092 if ((!(s
->flags
& HF_SVME_MASK
) &&
7093 !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
)) ||
7097 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7104 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7107 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7113 case 6: /* SKINIT */
7114 if ((!(s
->flags
& HF_SVME_MASK
) &&
7115 !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
)) ||
7118 gen_helper_skinit();
7120 case 7: /* INVLPGA */
7121 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7124 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7127 gen_helper_invlpga(tcg_const_i32(s
->aflag
));
7133 } else if (s
->cpl
!= 0) {
7134 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7136 gen_svm_check_intercept(s
, pc_start
,
7137 op
==2 ? SVM_EXIT_GDTR_WRITE
: SVM_EXIT_IDTR_WRITE
);
7138 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7139 gen_op_ld_T1_A0(OT_WORD
+ s
->mem_index
);
7140 gen_add_A0_im(s
, 2);
7141 gen_op_ld_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7143 gen_op_andl_T0_im(0xffffff);
7145 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,gdt
.base
));
7146 tcg_gen_st32_tl(cpu_T
[1], cpu_env
, offsetof(CPUX86State
,gdt
.limit
));
7148 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,idt
.base
));
7149 tcg_gen_st32_tl(cpu_T
[1], cpu_env
, offsetof(CPUX86State
,idt
.limit
));
7154 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_CR0
);
7155 #if defined TARGET_X86_64 && defined WORDS_BIGENDIAN
7156 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,cr
[0]) + 4);
7158 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,cr
[0]));
7160 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 1);
7164 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7166 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7167 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7168 gen_helper_lmsw(cpu_T
[0]);
7169 gen_jmp_im(s
->pc
- s
->cs_base
);
7173 case 7: /* invlpg */
7175 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7178 #ifdef TARGET_X86_64
7179 if (CODE64(s
) && rm
== 0) {
7181 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,segs
[R_GS
].base
));
7182 tcg_gen_ld_tl(cpu_T
[1], cpu_env
, offsetof(CPUX86State
,kernelgsbase
));
7183 tcg_gen_st_tl(cpu_T
[1], cpu_env
, offsetof(CPUX86State
,segs
[R_GS
].base
));
7184 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,kernelgsbase
));
7191 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7192 gen_op_set_cc_op(s
->cc_op
);
7193 gen_jmp_im(pc_start
- s
->cs_base
);
7194 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7195 gen_helper_invlpg(cpu_A0
);
7196 gen_jmp_im(s
->pc
- s
->cs_base
);
7205 case 0x108: /* invd */
7206 case 0x109: /* wbinvd */
7208 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7210 gen_svm_check_intercept(s
, pc_start
, (b
& 2) ? SVM_EXIT_INVD
: SVM_EXIT_WBINVD
);
7214 case 0x63: /* arpl or movslS (x86_64) */
7215 #ifdef TARGET_X86_64
7218 /* d_ot is the size of destination */
7219 d_ot
= dflag
+ OT_WORD
;
7221 modrm
= ldub_code(s
->pc
++);
7222 reg
= ((modrm
>> 3) & 7) | rex_r
;
7223 mod
= (modrm
>> 6) & 3;
7224 rm
= (modrm
& 7) | REX_B(s
);
7227 gen_op_mov_TN_reg(OT_LONG
, 0, rm
);
7229 if (d_ot
== OT_QUAD
)
7230 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
7231 gen_op_mov_reg_T0(d_ot
, reg
);
7233 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7234 if (d_ot
== OT_QUAD
) {
7235 gen_op_lds_T0_A0(OT_LONG
+ s
->mem_index
);
7237 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
7239 gen_op_mov_reg_T0(d_ot
, reg
);
7247 if (!s
->pe
|| s
->vm86
)
7249 t0
= tcg_temp_local_new();
7250 t1
= tcg_temp_local_new();
7251 t2
= tcg_temp_local_new();
7253 modrm
= ldub_code(s
->pc
++);
7254 reg
= (modrm
>> 3) & 7;
7255 mod
= (modrm
>> 6) & 3;
7258 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7259 gen_op_ld_v(ot
+ s
->mem_index
, t0
, cpu_A0
);
7261 gen_op_mov_v_reg(ot
, t0
, rm
);
7263 gen_op_mov_v_reg(ot
, t1
, reg
);
7264 tcg_gen_andi_tl(cpu_tmp0
, t0
, 3);
7265 tcg_gen_andi_tl(t1
, t1
, 3);
7266 tcg_gen_movi_tl(t2
, 0);
7267 label1
= gen_new_label();
7268 tcg_gen_brcond_tl(TCG_COND_GE
, cpu_tmp0
, t1
, label1
);
7269 tcg_gen_andi_tl(t0
, t0
, ~3);
7270 tcg_gen_or_tl(t0
, t0
, t1
);
7271 tcg_gen_movi_tl(t2
, CC_Z
);
7272 gen_set_label(label1
);
7274 gen_op_st_v(ot
+ s
->mem_index
, t0
, cpu_A0
);
7276 gen_op_mov_reg_v(ot
, rm
, t0
);
7278 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7279 gen_op_set_cc_op(s
->cc_op
);
7280 gen_compute_eflags(cpu_cc_src
);
7281 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_Z
);
7282 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t2
);
7283 s
->cc_op
= CC_OP_EFLAGS
;
7289 case 0x102: /* lar */
7290 case 0x103: /* lsl */
7294 if (!s
->pe
|| s
->vm86
)
7296 ot
= dflag
? OT_LONG
: OT_WORD
;
7297 modrm
= ldub_code(s
->pc
++);
7298 reg
= ((modrm
>> 3) & 7) | rex_r
;
7299 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7300 t0
= tcg_temp_local_new();
7301 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7302 gen_op_set_cc_op(s
->cc_op
);
7304 gen_helper_lar(t0
, cpu_T
[0]);
7306 gen_helper_lsl(t0
, cpu_T
[0]);
7307 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_src
, CC_Z
);
7308 label1
= gen_new_label();
7309 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
7310 gen_op_mov_reg_v(ot
, reg
, t0
);
7311 gen_set_label(label1
);
7312 s
->cc_op
= CC_OP_EFLAGS
;
7317 modrm
= ldub_code(s
->pc
++);
7318 mod
= (modrm
>> 6) & 3;
7319 op
= (modrm
>> 3) & 7;
7321 case 0: /* prefetchnta */
7322 case 1: /* prefetchnt0 */
7323 case 2: /* prefetchnt0 */
7324 case 3: /* prefetchnt0 */
7327 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7328 /* nothing more to do */
7330 default: /* nop (multi byte) */
7331 gen_nop_modrm(s
, modrm
);
7335 case 0x119 ... 0x11f: /* nop (multi byte) */
7336 modrm
= ldub_code(s
->pc
++);
7337 gen_nop_modrm(s
, modrm
);
7339 case 0x120: /* mov reg, crN */
7340 case 0x122: /* mov crN, reg */
7342 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7344 modrm
= ldub_code(s
->pc
++);
7345 if ((modrm
& 0xc0) != 0xc0)
7347 rm
= (modrm
& 7) | REX_B(s
);
7348 reg
= ((modrm
>> 3) & 7) | rex_r
;
7359 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7360 gen_op_set_cc_op(s
->cc_op
);
7361 gen_jmp_im(pc_start
- s
->cs_base
);
7363 gen_op_mov_TN_reg(ot
, 0, rm
);
7364 gen_helper_write_crN(tcg_const_i32(reg
), cpu_T
[0]);
7365 gen_jmp_im(s
->pc
- s
->cs_base
);
7368 gen_helper_read_crN(cpu_T
[0], tcg_const_i32(reg
));
7369 gen_op_mov_reg_T0(ot
, rm
);
7377 case 0x121: /* mov reg, drN */
7378 case 0x123: /* mov drN, reg */
7380 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7382 modrm
= ldub_code(s
->pc
++);
7383 if ((modrm
& 0xc0) != 0xc0)
7385 rm
= (modrm
& 7) | REX_B(s
);
7386 reg
= ((modrm
>> 3) & 7) | rex_r
;
7391 /* XXX: do it dynamically with CR4.DE bit */
7392 if (reg
== 4 || reg
== 5 || reg
>= 8)
7395 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_DR0
+ reg
);
7396 gen_op_mov_TN_reg(ot
, 0, rm
);
7397 gen_helper_movl_drN_T0(tcg_const_i32(reg
), cpu_T
[0]);
7398 gen_jmp_im(s
->pc
- s
->cs_base
);
7401 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_DR0
+ reg
);
7402 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,dr
[reg
]));
7403 gen_op_mov_reg_T0(ot
, rm
);
7407 case 0x106: /* clts */
7409 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7411 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7413 /* abort block because static cpu state changed */
7414 gen_jmp_im(s
->pc
- s
->cs_base
);
7418 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7419 case 0x1c3: /* MOVNTI reg, mem */
7420 if (!(s
->cpuid_features
& CPUID_SSE2
))
7422 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
7423 modrm
= ldub_code(s
->pc
++);
7424 mod
= (modrm
>> 6) & 3;
7427 reg
= ((modrm
>> 3) & 7) | rex_r
;
7428 /* generate a generic store */
7429 gen_ldst_modrm(s
, modrm
, ot
, reg
, 1);
7432 modrm
= ldub_code(s
->pc
++);
7433 mod
= (modrm
>> 6) & 3;
7434 op
= (modrm
>> 3) & 7;
7436 case 0: /* fxsave */
7437 if (mod
== 3 || !(s
->cpuid_features
& CPUID_FXSR
) ||
7438 (s
->flags
& HF_EM_MASK
))
7440 if (s
->flags
& HF_TS_MASK
) {
7441 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7444 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7445 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7446 gen_op_set_cc_op(s
->cc_op
);
7447 gen_jmp_im(pc_start
- s
->cs_base
);
7448 gen_helper_fxsave(cpu_A0
, tcg_const_i32((s
->dflag
== 2)));
7450 case 1: /* fxrstor */
7451 if (mod
== 3 || !(s
->cpuid_features
& CPUID_FXSR
) ||
7452 (s
->flags
& HF_EM_MASK
))
7454 if (s
->flags
& HF_TS_MASK
) {
7455 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7458 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7459 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7460 gen_op_set_cc_op(s
->cc_op
);
7461 gen_jmp_im(pc_start
- s
->cs_base
);
7462 gen_helper_fxrstor(cpu_A0
, tcg_const_i32((s
->dflag
== 2)));
7464 case 2: /* ldmxcsr */
7465 case 3: /* stmxcsr */
7466 if (s
->flags
& HF_TS_MASK
) {
7467 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7470 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
) ||
7473 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7475 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
7476 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, mxcsr
));
7478 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, mxcsr
));
7479 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
7482 case 5: /* lfence */
7483 case 6: /* mfence */
7484 if ((modrm
& 0xc7) != 0xc0 || !(s
->cpuid_features
& CPUID_SSE
))
7487 case 7: /* sfence / clflush */
7488 if ((modrm
& 0xc7) == 0xc0) {
7490 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7491 if (!(s
->cpuid_features
& CPUID_SSE
))
7495 if (!(s
->cpuid_features
& CPUID_CLFLUSH
))
7497 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7504 case 0x10d: /* 3DNow! prefetch(w) */
7505 modrm
= ldub_code(s
->pc
++);
7506 mod
= (modrm
>> 6) & 3;
7509 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7510 /* ignore for now */
7512 case 0x1aa: /* rsm */
7513 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_RSM
);
7514 if (!(s
->flags
& HF_SMM_MASK
))
7516 if (s
->cc_op
!= CC_OP_DYNAMIC
) {
7517 gen_op_set_cc_op(s
->cc_op
);
7518 s
->cc_op
= CC_OP_DYNAMIC
;
7520 gen_jmp_im(s
->pc
- s
->cs_base
);
7524 case 0x1b8: /* SSE4.2 popcnt */
7525 if ((prefixes
& (PREFIX_REPZ
| PREFIX_LOCK
| PREFIX_REPNZ
)) !=
7528 if (!(s
->cpuid_ext_features
& CPUID_EXT_POPCNT
))
7531 modrm
= ldub_code(s
->pc
++);
7532 reg
= ((modrm
>> 3) & 7);
7534 if (s
->prefix
& PREFIX_DATA
)
7536 else if (s
->dflag
!= 2)
7541 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
7542 gen_helper_popcnt(cpu_T
[0], cpu_T
[0], tcg_const_i32(ot
));
7543 gen_op_mov_reg_T0(ot
, reg
);
7545 s
->cc_op
= CC_OP_EFLAGS
;
7547 case 0x10e ... 0x10f:
7548 /* 3DNow! instructions, ignore prefixes */
7549 s
->prefix
&= ~(PREFIX_REPZ
| PREFIX_REPNZ
| PREFIX_DATA
);
7550 case 0x110 ... 0x117:
7551 case 0x128 ... 0x12f:
7552 case 0x138 ... 0x13a:
7553 case 0x150 ... 0x177:
7554 case 0x17c ... 0x17f:
7556 case 0x1c4 ... 0x1c6:
7557 case 0x1d0 ... 0x1fe:
7558 gen_sse(s
, b
, pc_start
, rex_r
);
7563 /* lock generation */
7564 if (s
->prefix
& PREFIX_LOCK
)
7565 gen_helper_unlock();
7568 if (s
->prefix
& PREFIX_LOCK
)
7569 gen_helper_unlock();
7570 /* XXX: ensure that no lock was generated */
7571 gen_exception(s
, EXCP06_ILLOP
, pc_start
- s
->cs_base
);
7575 void optimize_flags_init(void)
7577 #if TCG_TARGET_REG_BITS == 32
7578 assert(sizeof(CCTable
) == (1 << 3));
7580 assert(sizeof(CCTable
) == (1 << 4));
7582 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
7583 cpu_cc_op
= tcg_global_mem_new_i32(TCG_AREG0
,
7584 offsetof(CPUState
, cc_op
), "cc_op");
7585 cpu_cc_src
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUState
, cc_src
),
7587 cpu_cc_dst
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUState
, cc_dst
),
7589 cpu_cc_tmp
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUState
, cc_tmp
),
7592 /* register helpers */
7593 #define GEN_HELPER 2
7597 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7598 basic block 'tb'. If search_pc is TRUE, also generate PC
7599 information for each intermediate instruction. */
7600 static inline void gen_intermediate_code_internal(CPUState
*env
,
7601 TranslationBlock
*tb
,
7604 DisasContext dc1
, *dc
= &dc1
;
7605 target_ulong pc_ptr
;
7606 uint16_t *gen_opc_end
;
7610 target_ulong pc_start
;
7611 target_ulong cs_base
;
7615 /* generate intermediate code */
7617 cs_base
= tb
->cs_base
;
7619 cflags
= tb
->cflags
;
7621 dc
->pe
= (flags
>> HF_PE_SHIFT
) & 1;
7622 dc
->code32
= (flags
>> HF_CS32_SHIFT
) & 1;
7623 dc
->ss32
= (flags
>> HF_SS32_SHIFT
) & 1;
7624 dc
->addseg
= (flags
>> HF_ADDSEG_SHIFT
) & 1;
7626 dc
->vm86
= (flags
>> VM_SHIFT
) & 1;
7627 dc
->cpl
= (flags
>> HF_CPL_SHIFT
) & 3;
7628 dc
->iopl
= (flags
>> IOPL_SHIFT
) & 3;
7629 dc
->tf
= (flags
>> TF_SHIFT
) & 1;
7630 dc
->singlestep_enabled
= env
->singlestep_enabled
;
7631 dc
->cc_op
= CC_OP_DYNAMIC
;
7632 dc
->cs_base
= cs_base
;
7634 dc
->popl_esp_hack
= 0;
7635 /* select memory access functions */
7637 if (flags
& HF_SOFTMMU_MASK
) {
7639 dc
->mem_index
= 2 * 4;
7641 dc
->mem_index
= 1 * 4;
7643 dc
->cpuid_features
= env
->cpuid_features
;
7644 dc
->cpuid_ext_features
= env
->cpuid_ext_features
;
7645 dc
->cpuid_ext2_features
= env
->cpuid_ext2_features
;
7646 dc
->cpuid_ext3_features
= env
->cpuid_ext3_features
;
7647 #ifdef TARGET_X86_64
7648 dc
->lma
= (flags
>> HF_LMA_SHIFT
) & 1;
7649 dc
->code64
= (flags
>> HF_CS64_SHIFT
) & 1;
7652 dc
->jmp_opt
= !(dc
->tf
|| env
->singlestep_enabled
||
7653 (flags
& HF_INHIBIT_IRQ_MASK
)
7654 #ifndef CONFIG_SOFTMMU
7655 || (flags
& HF_SOFTMMU_MASK
)
7659 /* check addseg logic */
7660 if (!dc
->addseg
&& (dc
->vm86
|| !dc
->pe
|| !dc
->code32
))
7661 printf("ERROR addseg\n");
7664 cpu_T
[0] = tcg_temp_new();
7665 cpu_T
[1] = tcg_temp_new();
7666 cpu_A0
= tcg_temp_new();
7667 cpu_T3
= tcg_temp_new();
7669 cpu_tmp0
= tcg_temp_new();
7670 cpu_tmp1_i64
= tcg_temp_new_i64();
7671 cpu_tmp2_i32
= tcg_temp_new_i32();
7672 cpu_tmp3_i32
= tcg_temp_new_i32();
7673 cpu_tmp4
= tcg_temp_new();
7674 cpu_tmp5
= tcg_temp_new();
7675 cpu_tmp6
= tcg_temp_new();
7676 cpu_ptr0
= tcg_temp_new_ptr();
7677 cpu_ptr1
= tcg_temp_new_ptr();
7679 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
7681 dc
->is_jmp
= DISAS_NEXT
;
7685 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
7687 max_insns
= CF_COUNT_MASK
;
7691 if (unlikely(!TAILQ_EMPTY(&env
->breakpoints
))) {
7692 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
7693 if (bp
->pc
== pc_ptr
&&
7694 !((bp
->flags
& BP_CPU
) && (tb
->flags
& HF_RF_MASK
))) {
7695 gen_debug(dc
, pc_ptr
- dc
->cs_base
);
7701 j
= gen_opc_ptr
- gen_opc_buf
;
7705 gen_opc_instr_start
[lj
++] = 0;
7707 gen_opc_pc
[lj
] = pc_ptr
;
7708 gen_opc_cc_op
[lj
] = dc
->cc_op
;
7709 gen_opc_instr_start
[lj
] = 1;
7710 gen_opc_icount
[lj
] = num_insns
;
7712 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
7715 pc_ptr
= disas_insn(dc
, pc_ptr
);
7717 /* stop translation if indicated */
7720 /* if single step mode, we generate only one instruction and
7721 generate an exception */
7722 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7723 the flag and abort the translation to give the irqs a
7724 change to be happen */
7725 if (dc
->tf
|| dc
->singlestep_enabled
||
7726 (flags
& HF_INHIBIT_IRQ_MASK
)) {
7727 gen_jmp_im(pc_ptr
- dc
->cs_base
);
7731 /* if too long translation, stop generation too */
7732 if (gen_opc_ptr
>= gen_opc_end
||
7733 (pc_ptr
- pc_start
) >= (TARGET_PAGE_SIZE
- 32) ||
7734 num_insns
>= max_insns
) {
7735 gen_jmp_im(pc_ptr
- dc
->cs_base
);
7740 gen_jmp_im(pc_ptr
- dc
->cs_base
);
7745 if (tb
->cflags
& CF_LAST_IO
)
7747 gen_icount_end(tb
, num_insns
);
7748 *gen_opc_ptr
= INDEX_op_end
;
7749 /* we don't forget to fill the last values */
7751 j
= gen_opc_ptr
- gen_opc_buf
;
7754 gen_opc_instr_start
[lj
++] = 0;
7758 log_cpu_state_mask(CPU_LOG_TB_CPU
, env
, X86_DUMP_CCOP
);
7759 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
7761 qemu_log("----------------\n");
7762 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
7763 #ifdef TARGET_X86_64
7768 disas_flags
= !dc
->code32
;
7769 log_target_disas(pc_start
, pc_ptr
- pc_start
, disas_flags
);
7775 tb
->size
= pc_ptr
- pc_start
;
7776 tb
->icount
= num_insns
;
7780 void gen_intermediate_code(CPUState
*env
, TranslationBlock
*tb
)
7782 gen_intermediate_code_internal(env
, tb
, 0);
7785 void gen_intermediate_code_pc(CPUState
*env
, TranslationBlock
*tb
)
7787 gen_intermediate_code_internal(env
, tb
, 1);
7790 void gen_pc_load(CPUState
*env
, TranslationBlock
*tb
,
7791 unsigned long searched_pc
, int pc_pos
, void *puc
)
7795 if (qemu_loglevel_mask(CPU_LOG_TB_OP
)) {
7797 qemu_log("RESTORE:\n");
7798 for(i
= 0;i
<= pc_pos
; i
++) {
7799 if (gen_opc_instr_start
[i
]) {
7800 qemu_log("0x%04x: " TARGET_FMT_lx
"\n", i
, gen_opc_pc
[i
]);
7803 qemu_log("spc=0x%08lx pc_pos=0x%x eip=" TARGET_FMT_lx
" cs_base=%x\n",
7804 searched_pc
, pc_pos
, gen_opc_pc
[pc_pos
] - tb
->cs_base
,
7805 (uint32_t)tb
->cs_base
);
7808 env
->eip
= gen_opc_pc
[pc_pos
] - tb
->cs_base
;
7809 cc_op
= gen_opc_cc_op
[pc_pos
];
7810 if (cc_op
!= CC_OP_DYNAMIC
)