4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
35 #define PREFIX_REPZ 0x01
36 #define PREFIX_REPNZ 0x02
37 #define PREFIX_LOCK 0x04
38 #define PREFIX_DATA 0x08
39 #define PREFIX_ADR 0x10
42 #define X86_64_ONLY(x) x
43 #define X86_64_DEF(...) __VA_ARGS__
44 #define CODE64(s) ((s)->code64)
45 #define REX_X(s) ((s)->rex_x)
46 #define REX_B(s) ((s)->rex_b)
47 /* XXX: gcc generates push/pop in some opcodes, so we cannot use them */
49 #define BUGGY_64(x) NULL
52 #define X86_64_ONLY(x) NULL
53 #define X86_64_DEF(...)
59 //#define MACRO_TEST 1
61 /* global register indexes */
62 static TCGv_ptr cpu_env
;
63 static TCGv cpu_A0
, cpu_cc_src
, cpu_cc_dst
, cpu_cc_tmp
;
64 static TCGv_i32 cpu_cc_op
;
65 static TCGv cpu_regs
[CPU_NB_REGS
];
67 static TCGv cpu_T
[2], cpu_T3
;
68 /* local register indexes (only used inside old micro ops) */
69 static TCGv cpu_tmp0
, cpu_tmp4
;
70 static TCGv_ptr cpu_ptr0
, cpu_ptr1
;
71 static TCGv_i32 cpu_tmp2_i32
, cpu_tmp3_i32
;
72 static TCGv_i64 cpu_tmp1_i64
;
75 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
77 #include "gen-icount.h"
80 static int x86_64_hregs
;
83 typedef struct DisasContext
{
84 /* current insn context */
85 int override
; /* -1 if no override */
88 target_ulong pc
; /* pc = eip + cs_base */
89 int is_jmp
; /* 1 = means jump (stop translation), 2 means CPU
90 static state change (stop translation) */
91 /* current block context */
92 target_ulong cs_base
; /* base of CS segment */
93 int pe
; /* protected mode */
94 int code32
; /* 32 bit code segment */
96 int lma
; /* long mode active */
97 int code64
; /* 64 bit code segment */
100 int ss32
; /* 32 bit stack segment */
101 int cc_op
; /* current CC operation */
102 int addseg
; /* non zero if either DS/ES/SS have a non zero base */
103 int f_st
; /* currently unused */
104 int vm86
; /* vm86 mode */
107 int tf
; /* TF cpu flag */
108 int singlestep_enabled
; /* "hardware" single step enabled */
109 int jmp_opt
; /* use direct block chaining for direct jumps */
110 int mem_index
; /* select memory access functions */
111 uint64_t flags
; /* all execution flags */
112 struct TranslationBlock
*tb
;
113 int popl_esp_hack
; /* for correct popl with esp base handling */
114 int rip_offset
; /* only used in x86_64, but left for simplicity */
116 int cpuid_ext_features
;
117 int cpuid_ext2_features
;
118 int cpuid_ext3_features
;
121 static void gen_eob(DisasContext
*s
);
122 static void gen_jmp(DisasContext
*s
, target_ulong eip
);
123 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
);
125 /* i386 arith/logic operations */
145 OP_SHL1
, /* undocumented */
169 /* I386 int registers */
170 OR_EAX
, /* MUST be even numbered */
179 OR_TMP0
= 16, /* temporary operand register */
181 OR_A0
, /* temporary register used when doing address evaluation */
184 static inline void gen_op_movl_T0_0(void)
186 tcg_gen_movi_tl(cpu_T
[0], 0);
189 static inline void gen_op_movl_T0_im(int32_t val
)
191 tcg_gen_movi_tl(cpu_T
[0], val
);
194 static inline void gen_op_movl_T0_imu(uint32_t val
)
196 tcg_gen_movi_tl(cpu_T
[0], val
);
199 static inline void gen_op_movl_T1_im(int32_t val
)
201 tcg_gen_movi_tl(cpu_T
[1], val
);
204 static inline void gen_op_movl_T1_imu(uint32_t val
)
206 tcg_gen_movi_tl(cpu_T
[1], val
);
209 static inline void gen_op_movl_A0_im(uint32_t val
)
211 tcg_gen_movi_tl(cpu_A0
, val
);
215 static inline void gen_op_movq_A0_im(int64_t val
)
217 tcg_gen_movi_tl(cpu_A0
, val
);
221 static inline void gen_movtl_T0_im(target_ulong val
)
223 tcg_gen_movi_tl(cpu_T
[0], val
);
226 static inline void gen_movtl_T1_im(target_ulong val
)
228 tcg_gen_movi_tl(cpu_T
[1], val
);
231 static inline void gen_op_andl_T0_ffff(void)
233 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xffff);
236 static inline void gen_op_andl_T0_im(uint32_t val
)
238 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], val
);
241 static inline void gen_op_movl_T0_T1(void)
243 tcg_gen_mov_tl(cpu_T
[0], cpu_T
[1]);
246 static inline void gen_op_andl_A0_ffff(void)
248 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffff);
253 #define NB_OP_SIZES 4
255 #else /* !TARGET_X86_64 */
257 #define NB_OP_SIZES 3
259 #endif /* !TARGET_X86_64 */
261 #if defined(HOST_WORDS_BIGENDIAN)
262 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
263 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
264 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
265 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
266 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
268 #define REG_B_OFFSET 0
269 #define REG_H_OFFSET 1
270 #define REG_W_OFFSET 0
271 #define REG_L_OFFSET 0
272 #define REG_LH_OFFSET 4
275 static inline void gen_op_mov_reg_v(int ot
, int reg
, TCGv t0
)
281 tmp
= tcg_temp_new();
282 tcg_gen_ext8u_tl(tmp
, t0
);
283 if (reg
< 4 X86_64_DEF( || reg
>= 8 || x86_64_hregs
)) {
284 tcg_gen_andi_tl(cpu_regs
[reg
], cpu_regs
[reg
], ~0xff);
285 tcg_gen_or_tl(cpu_regs
[reg
], cpu_regs
[reg
], tmp
);
287 tcg_gen_shli_tl(tmp
, tmp
, 8);
288 tcg_gen_andi_tl(cpu_regs
[reg
- 4], cpu_regs
[reg
- 4], ~0xff00);
289 tcg_gen_or_tl(cpu_regs
[reg
- 4], cpu_regs
[reg
- 4], tmp
);
294 tmp
= tcg_temp_new();
295 tcg_gen_ext16u_tl(tmp
, t0
);
296 tcg_gen_andi_tl(cpu_regs
[reg
], cpu_regs
[reg
], ~0xffff);
297 tcg_gen_or_tl(cpu_regs
[reg
], cpu_regs
[reg
], tmp
);
300 default: /* XXX this shouldn't be reached; abort? */
302 /* For x86_64, this sets the higher half of register to zero.
303 For i386, this is equivalent to a mov. */
304 tcg_gen_ext32u_tl(cpu_regs
[reg
], t0
);
308 tcg_gen_mov_tl(cpu_regs
[reg
], t0
);
314 static inline void gen_op_mov_reg_T0(int ot
, int reg
)
316 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
319 static inline void gen_op_mov_reg_T1(int ot
, int reg
)
321 gen_op_mov_reg_v(ot
, reg
, cpu_T
[1]);
324 static inline void gen_op_mov_reg_A0(int size
, int reg
)
330 tmp
= tcg_temp_new();
331 tcg_gen_ext16u_tl(tmp
, cpu_A0
);
332 tcg_gen_andi_tl(cpu_regs
[reg
], cpu_regs
[reg
], ~0xffff);
333 tcg_gen_or_tl(cpu_regs
[reg
], cpu_regs
[reg
], tmp
);
336 default: /* XXX this shouldn't be reached; abort? */
338 /* For x86_64, this sets the higher half of register to zero.
339 For i386, this is equivalent to a mov. */
340 tcg_gen_ext32u_tl(cpu_regs
[reg
], cpu_A0
);
344 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_A0
);
350 static inline void gen_op_mov_v_reg(int ot
, TCGv t0
, int reg
)
354 if (reg
< 4 X86_64_DEF( || reg
>= 8 || x86_64_hregs
)) {
357 tcg_gen_shri_tl(t0
, cpu_regs
[reg
- 4], 8);
358 tcg_gen_ext8u_tl(t0
, t0
);
363 tcg_gen_mov_tl(t0
, cpu_regs
[reg
]);
368 static inline void gen_op_mov_TN_reg(int ot
, int t_index
, int reg
)
370 gen_op_mov_v_reg(ot
, cpu_T
[t_index
], reg
);
373 static inline void gen_op_movl_A0_reg(int reg
)
375 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[reg
]);
378 static inline void gen_op_addl_A0_im(int32_t val
)
380 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
382 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
387 static inline void gen_op_addq_A0_im(int64_t val
)
389 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
393 static void gen_add_A0_im(DisasContext
*s
, int val
)
397 gen_op_addq_A0_im(val
);
400 gen_op_addl_A0_im(val
);
403 static inline void gen_op_addl_T0_T1(void)
405 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
408 static inline void gen_op_jmp_T0(void)
410 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUState
, eip
));
413 static inline void gen_op_add_reg_im(int size
, int reg
, int32_t val
)
417 tcg_gen_addi_tl(cpu_tmp0
, cpu_regs
[reg
], val
);
418 tcg_gen_ext16u_tl(cpu_tmp0
, cpu_tmp0
);
419 tcg_gen_andi_tl(cpu_regs
[reg
], cpu_regs
[reg
], ~0xffff);
420 tcg_gen_or_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_tmp0
);
423 tcg_gen_addi_tl(cpu_tmp0
, cpu_regs
[reg
], val
);
424 /* For x86_64, this sets the higher half of register to zero.
425 For i386, this is equivalent to a nop. */
426 tcg_gen_ext32u_tl(cpu_tmp0
, cpu_tmp0
);
427 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_tmp0
);
431 tcg_gen_addi_tl(cpu_regs
[reg
], cpu_regs
[reg
], val
);
437 static inline void gen_op_add_reg_T0(int size
, int reg
)
441 tcg_gen_add_tl(cpu_tmp0
, cpu_regs
[reg
], cpu_T
[0]);
442 tcg_gen_ext16u_tl(cpu_tmp0
, cpu_tmp0
);
443 tcg_gen_andi_tl(cpu_regs
[reg
], cpu_regs
[reg
], ~0xffff);
444 tcg_gen_or_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_tmp0
);
447 tcg_gen_add_tl(cpu_tmp0
, cpu_regs
[reg
], cpu_T
[0]);
448 /* For x86_64, this sets the higher half of register to zero.
449 For i386, this is equivalent to a nop. */
450 tcg_gen_ext32u_tl(cpu_tmp0
, cpu_tmp0
);
451 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_tmp0
);
455 tcg_gen_add_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_T
[0]);
461 static inline void gen_op_set_cc_op(int32_t val
)
463 tcg_gen_movi_i32(cpu_cc_op
, val
);
466 static inline void gen_op_addl_A0_reg_sN(int shift
, int reg
)
468 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[reg
]);
470 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, shift
);
471 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
472 /* For x86_64, this sets the higher half of register to zero.
473 For i386, this is equivalent to a nop. */
474 tcg_gen_ext32u_tl(cpu_A0
, cpu_A0
);
477 static inline void gen_op_movl_A0_seg(int reg
)
479 tcg_gen_ld32u_tl(cpu_A0
, cpu_env
, offsetof(CPUState
, segs
[reg
].base
) + REG_L_OFFSET
);
482 static inline void gen_op_addl_A0_seg(int reg
)
484 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUState
, segs
[reg
].base
));
485 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
487 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
492 static inline void gen_op_movq_A0_seg(int reg
)
494 tcg_gen_ld_tl(cpu_A0
, cpu_env
, offsetof(CPUState
, segs
[reg
].base
));
497 static inline void gen_op_addq_A0_seg(int reg
)
499 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUState
, segs
[reg
].base
));
500 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
503 static inline void gen_op_movq_A0_reg(int reg
)
505 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[reg
]);
508 static inline void gen_op_addq_A0_reg_sN(int shift
, int reg
)
510 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[reg
]);
512 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, shift
);
513 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
517 static inline void gen_op_lds_T0_A0(int idx
)
519 int mem_index
= (idx
>> 2) - 1;
522 tcg_gen_qemu_ld8s(cpu_T
[0], cpu_A0
, mem_index
);
525 tcg_gen_qemu_ld16s(cpu_T
[0], cpu_A0
, mem_index
);
529 tcg_gen_qemu_ld32s(cpu_T
[0], cpu_A0
, mem_index
);
534 static inline void gen_op_ld_v(int idx
, TCGv t0
, TCGv a0
)
536 int mem_index
= (idx
>> 2) - 1;
539 tcg_gen_qemu_ld8u(t0
, a0
, mem_index
);
542 tcg_gen_qemu_ld16u(t0
, a0
, mem_index
);
545 tcg_gen_qemu_ld32u(t0
, a0
, mem_index
);
549 /* Should never happen on 32-bit targets. */
551 tcg_gen_qemu_ld64(t0
, a0
, mem_index
);
557 /* XXX: always use ldu or lds */
558 static inline void gen_op_ld_T0_A0(int idx
)
560 gen_op_ld_v(idx
, cpu_T
[0], cpu_A0
);
563 static inline void gen_op_ldu_T0_A0(int idx
)
565 gen_op_ld_v(idx
, cpu_T
[0], cpu_A0
);
568 static inline void gen_op_ld_T1_A0(int idx
)
570 gen_op_ld_v(idx
, cpu_T
[1], cpu_A0
);
573 static inline void gen_op_st_v(int idx
, TCGv t0
, TCGv a0
)
575 int mem_index
= (idx
>> 2) - 1;
578 tcg_gen_qemu_st8(t0
, a0
, mem_index
);
581 tcg_gen_qemu_st16(t0
, a0
, mem_index
);
584 tcg_gen_qemu_st32(t0
, a0
, mem_index
);
588 /* Should never happen on 32-bit targets. */
590 tcg_gen_qemu_st64(t0
, a0
, mem_index
);
596 static inline void gen_op_st_T0_A0(int idx
)
598 gen_op_st_v(idx
, cpu_T
[0], cpu_A0
);
601 static inline void gen_op_st_T1_A0(int idx
)
603 gen_op_st_v(idx
, cpu_T
[1], cpu_A0
);
606 static inline void gen_jmp_im(target_ulong pc
)
608 tcg_gen_movi_tl(cpu_tmp0
, pc
);
609 tcg_gen_st_tl(cpu_tmp0
, cpu_env
, offsetof(CPUState
, eip
));
612 static inline void gen_string_movl_A0_ESI(DisasContext
*s
)
616 override
= s
->override
;
620 gen_op_movq_A0_seg(override
);
621 gen_op_addq_A0_reg_sN(0, R_ESI
);
623 gen_op_movq_A0_reg(R_ESI
);
629 if (s
->addseg
&& override
< 0)
632 gen_op_movl_A0_seg(override
);
633 gen_op_addl_A0_reg_sN(0, R_ESI
);
635 gen_op_movl_A0_reg(R_ESI
);
638 /* 16 address, always override */
641 gen_op_movl_A0_reg(R_ESI
);
642 gen_op_andl_A0_ffff();
643 gen_op_addl_A0_seg(override
);
647 static inline void gen_string_movl_A0_EDI(DisasContext
*s
)
651 gen_op_movq_A0_reg(R_EDI
);
656 gen_op_movl_A0_seg(R_ES
);
657 gen_op_addl_A0_reg_sN(0, R_EDI
);
659 gen_op_movl_A0_reg(R_EDI
);
662 gen_op_movl_A0_reg(R_EDI
);
663 gen_op_andl_A0_ffff();
664 gen_op_addl_A0_seg(R_ES
);
668 static inline void gen_op_movl_T0_Dshift(int ot
)
670 tcg_gen_ld32s_tl(cpu_T
[0], cpu_env
, offsetof(CPUState
, df
));
671 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], ot
);
674 static void gen_extu(int ot
, TCGv reg
)
678 tcg_gen_ext8u_tl(reg
, reg
);
681 tcg_gen_ext16u_tl(reg
, reg
);
684 tcg_gen_ext32u_tl(reg
, reg
);
691 static void gen_exts(int ot
, TCGv reg
)
695 tcg_gen_ext8s_tl(reg
, reg
);
698 tcg_gen_ext16s_tl(reg
, reg
);
701 tcg_gen_ext32s_tl(reg
, reg
);
708 static inline void gen_op_jnz_ecx(int size
, int label1
)
710 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
711 gen_extu(size
+ 1, cpu_tmp0
);
712 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_tmp0
, 0, label1
);
715 static inline void gen_op_jz_ecx(int size
, int label1
)
717 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
718 gen_extu(size
+ 1, cpu_tmp0
);
719 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
722 static void gen_helper_in_func(int ot
, TCGv v
, TCGv_i32 n
)
725 case 0: gen_helper_inb(v
, n
); break;
726 case 1: gen_helper_inw(v
, n
); break;
727 case 2: gen_helper_inl(v
, n
); break;
732 static void gen_helper_out_func(int ot
, TCGv_i32 v
, TCGv_i32 n
)
735 case 0: gen_helper_outb(v
, n
); break;
736 case 1: gen_helper_outw(v
, n
); break;
737 case 2: gen_helper_outl(v
, n
); break;
742 static void gen_check_io(DisasContext
*s
, int ot
, target_ulong cur_eip
,
746 target_ulong next_eip
;
749 if (s
->pe
&& (s
->cpl
> s
->iopl
|| s
->vm86
)) {
750 if (s
->cc_op
!= CC_OP_DYNAMIC
)
751 gen_op_set_cc_op(s
->cc_op
);
754 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
756 case 0: gen_helper_check_iob(cpu_tmp2_i32
); break;
757 case 1: gen_helper_check_iow(cpu_tmp2_i32
); break;
758 case 2: gen_helper_check_iol(cpu_tmp2_i32
); break;
761 if(s
->flags
& HF_SVMI_MASK
) {
763 if (s
->cc_op
!= CC_OP_DYNAMIC
)
764 gen_op_set_cc_op(s
->cc_op
);
767 svm_flags
|= (1 << (4 + ot
));
768 next_eip
= s
->pc
- s
->cs_base
;
769 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
770 gen_helper_svm_check_io(cpu_tmp2_i32
, tcg_const_i32(svm_flags
),
771 tcg_const_i32(next_eip
- cur_eip
));
775 static inline void gen_movs(DisasContext
*s
, int ot
)
777 gen_string_movl_A0_ESI(s
);
778 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
779 gen_string_movl_A0_EDI(s
);
780 gen_op_st_T0_A0(ot
+ s
->mem_index
);
781 gen_op_movl_T0_Dshift(ot
);
782 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
783 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
786 static inline void gen_update_cc_op(DisasContext
*s
)
788 if (s
->cc_op
!= CC_OP_DYNAMIC
) {
789 gen_op_set_cc_op(s
->cc_op
);
790 s
->cc_op
= CC_OP_DYNAMIC
;
794 static void gen_op_update1_cc(void)
796 tcg_gen_discard_tl(cpu_cc_src
);
797 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
800 static void gen_op_update2_cc(void)
802 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
803 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
806 static inline void gen_op_cmpl_T0_T1_cc(void)
808 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
809 tcg_gen_sub_tl(cpu_cc_dst
, cpu_T
[0], cpu_T
[1]);
812 static inline void gen_op_testl_T0_T1_cc(void)
814 tcg_gen_discard_tl(cpu_cc_src
);
815 tcg_gen_and_tl(cpu_cc_dst
, cpu_T
[0], cpu_T
[1]);
818 static void gen_op_update_neg_cc(void)
820 tcg_gen_neg_tl(cpu_cc_src
, cpu_T
[0]);
821 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
824 /* compute eflags.C to reg */
825 static void gen_compute_eflags_c(TCGv reg
)
827 gen_helper_cc_compute_c(cpu_tmp2_i32
, cpu_cc_op
);
828 tcg_gen_extu_i32_tl(reg
, cpu_tmp2_i32
);
831 /* compute all eflags to cc_src */
832 static void gen_compute_eflags(TCGv reg
)
834 gen_helper_cc_compute_all(cpu_tmp2_i32
, cpu_cc_op
);
835 tcg_gen_extu_i32_tl(reg
, cpu_tmp2_i32
);
838 static inline void gen_setcc_slow_T0(DisasContext
*s
, int jcc_op
)
840 if (s
->cc_op
!= CC_OP_DYNAMIC
)
841 gen_op_set_cc_op(s
->cc_op
);
844 gen_compute_eflags(cpu_T
[0]);
845 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 11);
846 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
849 gen_compute_eflags_c(cpu_T
[0]);
852 gen_compute_eflags(cpu_T
[0]);
853 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 6);
854 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
857 gen_compute_eflags(cpu_tmp0
);
858 tcg_gen_shri_tl(cpu_T
[0], cpu_tmp0
, 6);
859 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
860 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
863 gen_compute_eflags(cpu_T
[0]);
864 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 7);
865 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
868 gen_compute_eflags(cpu_T
[0]);
869 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 2);
870 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
873 gen_compute_eflags(cpu_tmp0
);
874 tcg_gen_shri_tl(cpu_T
[0], cpu_tmp0
, 11); /* CC_O */
875 tcg_gen_shri_tl(cpu_tmp0
, cpu_tmp0
, 7); /* CC_S */
876 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
877 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
881 gen_compute_eflags(cpu_tmp0
);
882 tcg_gen_shri_tl(cpu_T
[0], cpu_tmp0
, 11); /* CC_O */
883 tcg_gen_shri_tl(cpu_tmp4
, cpu_tmp0
, 7); /* CC_S */
884 tcg_gen_shri_tl(cpu_tmp0
, cpu_tmp0
, 6); /* CC_Z */
885 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
886 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
887 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 1);
892 /* return true if setcc_slow is not needed (WARNING: must be kept in
893 sync with gen_jcc1) */
894 static int is_fast_jcc_case(DisasContext
*s
, int b
)
897 jcc_op
= (b
>> 1) & 7;
899 /* we optimize the cmp/jcc case */
904 if (jcc_op
== JCC_O
|| jcc_op
== JCC_P
)
908 /* some jumps are easy to compute */
933 if (jcc_op
!= JCC_Z
&& jcc_op
!= JCC_S
)
943 /* generate a conditional jump to label 'l1' according to jump opcode
944 value 'b'. In the fast case, T0 is guaranted not to be used. */
945 static inline void gen_jcc1(DisasContext
*s
, int cc_op
, int b
, int l1
)
947 int inv
, jcc_op
, size
, cond
;
951 jcc_op
= (b
>> 1) & 7;
954 /* we optimize the cmp/jcc case */
960 size
= cc_op
- CC_OP_SUBB
;
966 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_dst
, 0xff);
970 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_dst
, 0xffff);
975 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_dst
, 0xffffffff);
983 tcg_gen_brcondi_tl(inv
? TCG_COND_NE
: TCG_COND_EQ
, t0
, 0, l1
);
989 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_dst
, 0x80);
990 tcg_gen_brcondi_tl(inv
? TCG_COND_EQ
: TCG_COND_NE
, cpu_tmp0
,
994 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_dst
, 0x8000);
995 tcg_gen_brcondi_tl(inv
? TCG_COND_EQ
: TCG_COND_NE
, cpu_tmp0
,
1000 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_dst
, 0x80000000);
1001 tcg_gen_brcondi_tl(inv
? TCG_COND_EQ
: TCG_COND_NE
, cpu_tmp0
,
1006 tcg_gen_brcondi_tl(inv
? TCG_COND_GE
: TCG_COND_LT
, cpu_cc_dst
,
1013 cond
= inv
? TCG_COND_GEU
: TCG_COND_LTU
;
1016 cond
= inv
? TCG_COND_GTU
: TCG_COND_LEU
;
1018 tcg_gen_add_tl(cpu_tmp4
, cpu_cc_dst
, cpu_cc_src
);
1022 tcg_gen_andi_tl(cpu_tmp4
, cpu_tmp4
, 0xff);
1023 tcg_gen_andi_tl(t0
, cpu_cc_src
, 0xff);
1027 tcg_gen_andi_tl(cpu_tmp4
, cpu_tmp4
, 0xffff);
1028 tcg_gen_andi_tl(t0
, cpu_cc_src
, 0xffff);
1030 #ifdef TARGET_X86_64
1033 tcg_gen_andi_tl(cpu_tmp4
, cpu_tmp4
, 0xffffffff);
1034 tcg_gen_andi_tl(t0
, cpu_cc_src
, 0xffffffff);
1041 tcg_gen_brcond_tl(cond
, cpu_tmp4
, t0
, l1
);
1045 cond
= inv
? TCG_COND_GE
: TCG_COND_LT
;
1048 cond
= inv
? TCG_COND_GT
: TCG_COND_LE
;
1050 tcg_gen_add_tl(cpu_tmp4
, cpu_cc_dst
, cpu_cc_src
);
1054 tcg_gen_ext8s_tl(cpu_tmp4
, cpu_tmp4
);
1055 tcg_gen_ext8s_tl(t0
, cpu_cc_src
);
1059 tcg_gen_ext16s_tl(cpu_tmp4
, cpu_tmp4
);
1060 tcg_gen_ext16s_tl(t0
, cpu_cc_src
);
1062 #ifdef TARGET_X86_64
1065 tcg_gen_ext32s_tl(cpu_tmp4
, cpu_tmp4
);
1066 tcg_gen_ext32s_tl(t0
, cpu_cc_src
);
1073 tcg_gen_brcond_tl(cond
, cpu_tmp4
, t0
, l1
);
1081 /* some jumps are easy to compute */
1123 size
= (cc_op
- CC_OP_ADDB
) & 3;
1126 size
= (cc_op
- CC_OP_ADDB
) & 3;
1134 gen_setcc_slow_T0(s
, jcc_op
);
1135 tcg_gen_brcondi_tl(inv
? TCG_COND_EQ
: TCG_COND_NE
,
1141 /* XXX: does not work with gdbstub "ice" single step - not a
1143 static int gen_jz_ecx_string(DisasContext
*s
, target_ulong next_eip
)
1147 l1
= gen_new_label();
1148 l2
= gen_new_label();
1149 gen_op_jnz_ecx(s
->aflag
, l1
);
1151 gen_jmp_tb(s
, next_eip
, 1);
1156 static inline void gen_stos(DisasContext
*s
, int ot
)
1158 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
1159 gen_string_movl_A0_EDI(s
);
1160 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1161 gen_op_movl_T0_Dshift(ot
);
1162 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1165 static inline void gen_lods(DisasContext
*s
, int ot
)
1167 gen_string_movl_A0_ESI(s
);
1168 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1169 gen_op_mov_reg_T0(ot
, R_EAX
);
1170 gen_op_movl_T0_Dshift(ot
);
1171 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1174 static inline void gen_scas(DisasContext
*s
, int ot
)
1176 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
1177 gen_string_movl_A0_EDI(s
);
1178 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
1179 gen_op_cmpl_T0_T1_cc();
1180 gen_op_movl_T0_Dshift(ot
);
1181 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1184 static inline void gen_cmps(DisasContext
*s
, int ot
)
1186 gen_string_movl_A0_ESI(s
);
1187 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1188 gen_string_movl_A0_EDI(s
);
1189 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
1190 gen_op_cmpl_T0_T1_cc();
1191 gen_op_movl_T0_Dshift(ot
);
1192 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1193 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1196 static inline void gen_ins(DisasContext
*s
, int ot
)
1200 gen_string_movl_A0_EDI(s
);
1201 /* Note: we must do this dummy write first to be restartable in
1202 case of page fault. */
1204 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1205 gen_op_mov_TN_reg(OT_WORD
, 1, R_EDX
);
1206 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[1]);
1207 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1208 gen_helper_in_func(ot
, cpu_T
[0], cpu_tmp2_i32
);
1209 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1210 gen_op_movl_T0_Dshift(ot
);
1211 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1216 static inline void gen_outs(DisasContext
*s
, int ot
)
1220 gen_string_movl_A0_ESI(s
);
1221 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1223 gen_op_mov_TN_reg(OT_WORD
, 1, R_EDX
);
1224 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[1]);
1225 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1226 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[0]);
1227 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1229 gen_op_movl_T0_Dshift(ot
);
1230 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1235 /* same method as Valgrind : we generate jumps to current or next
1237 #define GEN_REPZ(op) \
1238 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1239 target_ulong cur_eip, target_ulong next_eip) \
1242 gen_update_cc_op(s); \
1243 l2 = gen_jz_ecx_string(s, next_eip); \
1244 gen_ ## op(s, ot); \
1245 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1246 /* a loop would cause two single step exceptions if ECX = 1 \
1247 before rep string_insn */ \
1249 gen_op_jz_ecx(s->aflag, l2); \
1250 gen_jmp(s, cur_eip); \
1253 #define GEN_REPZ2(op) \
1254 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1255 target_ulong cur_eip, \
1256 target_ulong next_eip, \
1260 gen_update_cc_op(s); \
1261 l2 = gen_jz_ecx_string(s, next_eip); \
1262 gen_ ## op(s, ot); \
1263 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1264 gen_op_set_cc_op(CC_OP_SUBB + ot); \
1265 gen_jcc1(s, CC_OP_SUBB + ot, (JCC_Z << 1) | (nz ^ 1), l2); \
1267 gen_op_jz_ecx(s->aflag, l2); \
1268 gen_jmp(s, cur_eip); \
1279 static void gen_helper_fp_arith_ST0_FT0(int op
)
1282 case 0: gen_helper_fadd_ST0_FT0(); break;
1283 case 1: gen_helper_fmul_ST0_FT0(); break;
1284 case 2: gen_helper_fcom_ST0_FT0(); break;
1285 case 3: gen_helper_fcom_ST0_FT0(); break;
1286 case 4: gen_helper_fsub_ST0_FT0(); break;
1287 case 5: gen_helper_fsubr_ST0_FT0(); break;
1288 case 6: gen_helper_fdiv_ST0_FT0(); break;
1289 case 7: gen_helper_fdivr_ST0_FT0(); break;
1293 /* NOTE the exception in "r" op ordering */
1294 static void gen_helper_fp_arith_STN_ST0(int op
, int opreg
)
1296 TCGv_i32 tmp
= tcg_const_i32(opreg
);
1298 case 0: gen_helper_fadd_STN_ST0(tmp
); break;
1299 case 1: gen_helper_fmul_STN_ST0(tmp
); break;
1300 case 4: gen_helper_fsubr_STN_ST0(tmp
); break;
1301 case 5: gen_helper_fsub_STN_ST0(tmp
); break;
1302 case 6: gen_helper_fdivr_STN_ST0(tmp
); break;
1303 case 7: gen_helper_fdiv_STN_ST0(tmp
); break;
1307 /* if d == OR_TMP0, it means memory operand (address in A0) */
1308 static void gen_op(DisasContext
*s1
, int op
, int ot
, int d
)
1311 gen_op_mov_TN_reg(ot
, 0, d
);
1313 gen_op_ld_T0_A0(ot
+ s1
->mem_index
);
1317 if (s1
->cc_op
!= CC_OP_DYNAMIC
)
1318 gen_op_set_cc_op(s1
->cc_op
);
1319 gen_compute_eflags_c(cpu_tmp4
);
1320 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1321 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
1323 gen_op_mov_reg_T0(ot
, d
);
1325 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1326 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
1327 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1328 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp4
);
1329 tcg_gen_shli_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 2);
1330 tcg_gen_addi_i32(cpu_cc_op
, cpu_tmp2_i32
, CC_OP_ADDB
+ ot
);
1331 s1
->cc_op
= CC_OP_DYNAMIC
;
1334 if (s1
->cc_op
!= CC_OP_DYNAMIC
)
1335 gen_op_set_cc_op(s1
->cc_op
);
1336 gen_compute_eflags_c(cpu_tmp4
);
1337 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1338 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
1340 gen_op_mov_reg_T0(ot
, d
);
1342 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1343 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
1344 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1345 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp4
);
1346 tcg_gen_shli_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 2);
1347 tcg_gen_addi_i32(cpu_cc_op
, cpu_tmp2_i32
, CC_OP_SUBB
+ ot
);
1348 s1
->cc_op
= CC_OP_DYNAMIC
;
1351 gen_op_addl_T0_T1();
1353 gen_op_mov_reg_T0(ot
, d
);
1355 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1356 gen_op_update2_cc();
1357 s1
->cc_op
= CC_OP_ADDB
+ ot
;
1360 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1362 gen_op_mov_reg_T0(ot
, d
);
1364 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1365 gen_op_update2_cc();
1366 s1
->cc_op
= CC_OP_SUBB
+ ot
;
1370 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1372 gen_op_mov_reg_T0(ot
, d
);
1374 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1375 gen_op_update1_cc();
1376 s1
->cc_op
= CC_OP_LOGICB
+ ot
;
1379 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1381 gen_op_mov_reg_T0(ot
, d
);
1383 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1384 gen_op_update1_cc();
1385 s1
->cc_op
= CC_OP_LOGICB
+ ot
;
1388 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1390 gen_op_mov_reg_T0(ot
, d
);
1392 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1393 gen_op_update1_cc();
1394 s1
->cc_op
= CC_OP_LOGICB
+ ot
;
1397 gen_op_cmpl_T0_T1_cc();
1398 s1
->cc_op
= CC_OP_SUBB
+ ot
;
1403 /* if d == OR_TMP0, it means memory operand (address in A0) */
1404 static void gen_inc(DisasContext
*s1
, int ot
, int d
, int c
)
1407 gen_op_mov_TN_reg(ot
, 0, d
);
1409 gen_op_ld_T0_A0(ot
+ s1
->mem_index
);
1410 if (s1
->cc_op
!= CC_OP_DYNAMIC
)
1411 gen_op_set_cc_op(s1
->cc_op
);
1413 tcg_gen_addi_tl(cpu_T
[0], cpu_T
[0], 1);
1414 s1
->cc_op
= CC_OP_INCB
+ ot
;
1416 tcg_gen_addi_tl(cpu_T
[0], cpu_T
[0], -1);
1417 s1
->cc_op
= CC_OP_DECB
+ ot
;
1420 gen_op_mov_reg_T0(ot
, d
);
1422 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1423 gen_compute_eflags_c(cpu_cc_src
);
1424 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1427 static void gen_shift_rm_T1(DisasContext
*s
, int ot
, int op1
,
1428 int is_right
, int is_arith
)
1441 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1443 gen_op_mov_TN_reg(ot
, 0, op1
);
1445 tcg_gen_andi_tl(cpu_T
[1], cpu_T
[1], mask
);
1447 tcg_gen_addi_tl(cpu_tmp5
, cpu_T
[1], -1);
1451 gen_exts(ot
, cpu_T
[0]);
1452 tcg_gen_sar_tl(cpu_T3
, cpu_T
[0], cpu_tmp5
);
1453 tcg_gen_sar_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1455 gen_extu(ot
, cpu_T
[0]);
1456 tcg_gen_shr_tl(cpu_T3
, cpu_T
[0], cpu_tmp5
);
1457 tcg_gen_shr_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1460 tcg_gen_shl_tl(cpu_T3
, cpu_T
[0], cpu_tmp5
);
1461 tcg_gen_shl_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1466 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1468 gen_op_mov_reg_T0(ot
, op1
);
1470 /* update eflags if non zero shift */
1471 if (s
->cc_op
!= CC_OP_DYNAMIC
)
1472 gen_op_set_cc_op(s
->cc_op
);
1474 /* XXX: inefficient */
1475 t0
= tcg_temp_local_new();
1476 t1
= tcg_temp_local_new();
1478 tcg_gen_mov_tl(t0
, cpu_T
[0]);
1479 tcg_gen_mov_tl(t1
, cpu_T3
);
1481 shift_label
= gen_new_label();
1482 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_T
[1], 0, shift_label
);
1484 tcg_gen_mov_tl(cpu_cc_src
, t1
);
1485 tcg_gen_mov_tl(cpu_cc_dst
, t0
);
1487 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SARB
+ ot
);
1489 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SHLB
+ ot
);
1491 gen_set_label(shift_label
);
1492 s
->cc_op
= CC_OP_DYNAMIC
; /* cannot predict flags after */
1498 static void gen_shift_rm_im(DisasContext
*s
, int ot
, int op1
, int op2
,
1499 int is_right
, int is_arith
)
1510 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1512 gen_op_mov_TN_reg(ot
, 0, op1
);
1518 gen_exts(ot
, cpu_T
[0]);
1519 tcg_gen_sari_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1520 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], op2
);
1522 gen_extu(ot
, cpu_T
[0]);
1523 tcg_gen_shri_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1524 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], op2
);
1527 tcg_gen_shli_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1528 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], op2
);
1534 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1536 gen_op_mov_reg_T0(ot
, op1
);
1538 /* update eflags if non zero shift */
1540 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
1541 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1543 s
->cc_op
= CC_OP_SARB
+ ot
;
1545 s
->cc_op
= CC_OP_SHLB
+ ot
;
1549 static inline void tcg_gen_lshift(TCGv ret
, TCGv arg1
, target_long arg2
)
1552 tcg_gen_shli_tl(ret
, arg1
, arg2
);
1554 tcg_gen_shri_tl(ret
, arg1
, -arg2
);
1557 static void gen_rot_rm_T1(DisasContext
*s
, int ot
, int op1
,
1561 int label1
, label2
, data_bits
;
1562 TCGv t0
, t1
, t2
, a0
;
1564 /* XXX: inefficient, but we must use local temps */
1565 t0
= tcg_temp_local_new();
1566 t1
= tcg_temp_local_new();
1567 t2
= tcg_temp_local_new();
1568 a0
= tcg_temp_local_new();
1576 if (op1
== OR_TMP0
) {
1577 tcg_gen_mov_tl(a0
, cpu_A0
);
1578 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
1580 gen_op_mov_v_reg(ot
, t0
, op1
);
1583 tcg_gen_mov_tl(t1
, cpu_T
[1]);
1585 tcg_gen_andi_tl(t1
, t1
, mask
);
1587 /* Must test zero case to avoid using undefined behaviour in TCG
1589 label1
= gen_new_label();
1590 tcg_gen_brcondi_tl(TCG_COND_EQ
, t1
, 0, label1
);
1593 tcg_gen_andi_tl(cpu_tmp0
, t1
, (1 << (3 + ot
)) - 1);
1595 tcg_gen_mov_tl(cpu_tmp0
, t1
);
1598 tcg_gen_mov_tl(t2
, t0
);
1600 data_bits
= 8 << ot
;
1601 /* XXX: rely on behaviour of shifts when operand 2 overflows (XXX:
1602 fix TCG definition) */
1604 tcg_gen_shr_tl(cpu_tmp4
, t0
, cpu_tmp0
);
1605 tcg_gen_subfi_tl(cpu_tmp0
, data_bits
, cpu_tmp0
);
1606 tcg_gen_shl_tl(t0
, t0
, cpu_tmp0
);
1608 tcg_gen_shl_tl(cpu_tmp4
, t0
, cpu_tmp0
);
1609 tcg_gen_subfi_tl(cpu_tmp0
, data_bits
, cpu_tmp0
);
1610 tcg_gen_shr_tl(t0
, t0
, cpu_tmp0
);
1612 tcg_gen_or_tl(t0
, t0
, cpu_tmp4
);
1614 gen_set_label(label1
);
1616 if (op1
== OR_TMP0
) {
1617 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
1619 gen_op_mov_reg_v(ot
, op1
, t0
);
1623 if (s
->cc_op
!= CC_OP_DYNAMIC
)
1624 gen_op_set_cc_op(s
->cc_op
);
1626 label2
= gen_new_label();
1627 tcg_gen_brcondi_tl(TCG_COND_EQ
, t1
, 0, label2
);
1629 gen_compute_eflags(cpu_cc_src
);
1630 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~(CC_O
| CC_C
));
1631 tcg_gen_xor_tl(cpu_tmp0
, t2
, t0
);
1632 tcg_gen_lshift(cpu_tmp0
, cpu_tmp0
, 11 - (data_bits
- 1));
1633 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, CC_O
);
1634 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_tmp0
);
1636 tcg_gen_shri_tl(t0
, t0
, data_bits
- 1);
1638 tcg_gen_andi_tl(t0
, t0
, CC_C
);
1639 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
1641 tcg_gen_discard_tl(cpu_cc_dst
);
1642 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_EFLAGS
);
1644 gen_set_label(label2
);
1645 s
->cc_op
= CC_OP_DYNAMIC
; /* cannot predict flags after */
1653 static void gen_rot_rm_im(DisasContext
*s
, int ot
, int op1
, int op2
,
1660 /* XXX: inefficient, but we must use local temps */
1661 t0
= tcg_temp_local_new();
1662 t1
= tcg_temp_local_new();
1663 a0
= tcg_temp_local_new();
1671 if (op1
== OR_TMP0
) {
1672 tcg_gen_mov_tl(a0
, cpu_A0
);
1673 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
1675 gen_op_mov_v_reg(ot
, t0
, op1
);
1679 tcg_gen_mov_tl(t1
, t0
);
1682 data_bits
= 8 << ot
;
1684 int shift
= op2
& ((1 << (3 + ot
)) - 1);
1686 tcg_gen_shri_tl(cpu_tmp4
, t0
, shift
);
1687 tcg_gen_shli_tl(t0
, t0
, data_bits
- shift
);
1690 tcg_gen_shli_tl(cpu_tmp4
, t0
, shift
);
1691 tcg_gen_shri_tl(t0
, t0
, data_bits
- shift
);
1693 tcg_gen_or_tl(t0
, t0
, cpu_tmp4
);
1697 if (op1
== OR_TMP0
) {
1698 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
1700 gen_op_mov_reg_v(ot
, op1
, t0
);
1705 if (s
->cc_op
!= CC_OP_DYNAMIC
)
1706 gen_op_set_cc_op(s
->cc_op
);
1708 gen_compute_eflags(cpu_cc_src
);
1709 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~(CC_O
| CC_C
));
1710 tcg_gen_xor_tl(cpu_tmp0
, t1
, t0
);
1711 tcg_gen_lshift(cpu_tmp0
, cpu_tmp0
, 11 - (data_bits
- 1));
1712 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, CC_O
);
1713 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_tmp0
);
1715 tcg_gen_shri_tl(t0
, t0
, data_bits
- 1);
1717 tcg_gen_andi_tl(t0
, t0
, CC_C
);
1718 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
1720 tcg_gen_discard_tl(cpu_cc_dst
);
1721 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_EFLAGS
);
1722 s
->cc_op
= CC_OP_EFLAGS
;
1730 /* XXX: add faster immediate = 1 case */
1731 static void gen_rotc_rm_T1(DisasContext
*s
, int ot
, int op1
,
1736 if (s
->cc_op
!= CC_OP_DYNAMIC
)
1737 gen_op_set_cc_op(s
->cc_op
);
1741 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1743 gen_op_mov_TN_reg(ot
, 0, op1
);
1747 case 0: gen_helper_rcrb(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1748 case 1: gen_helper_rcrw(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1749 case 2: gen_helper_rcrl(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1750 #ifdef TARGET_X86_64
1751 case 3: gen_helper_rcrq(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1756 case 0: gen_helper_rclb(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1757 case 1: gen_helper_rclw(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1758 case 2: gen_helper_rcll(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1759 #ifdef TARGET_X86_64
1760 case 3: gen_helper_rclq(cpu_T
[0], cpu_T
[0], cpu_T
[1]); break;
1766 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1768 gen_op_mov_reg_T0(ot
, op1
);
1771 label1
= gen_new_label();
1772 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_cc_tmp
, -1, label1
);
1774 tcg_gen_mov_tl(cpu_cc_src
, cpu_cc_tmp
);
1775 tcg_gen_discard_tl(cpu_cc_dst
);
1776 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_EFLAGS
);
1778 gen_set_label(label1
);
1779 s
->cc_op
= CC_OP_DYNAMIC
; /* cannot predict flags after */
1782 /* XXX: add faster immediate case */
1783 static void gen_shiftd_rm_T1_T3(DisasContext
*s
, int ot
, int op1
,
1786 int label1
, label2
, data_bits
;
1788 TCGv t0
, t1
, t2
, a0
;
1790 t0
= tcg_temp_local_new();
1791 t1
= tcg_temp_local_new();
1792 t2
= tcg_temp_local_new();
1793 a0
= tcg_temp_local_new();
1801 if (op1
== OR_TMP0
) {
1802 tcg_gen_mov_tl(a0
, cpu_A0
);
1803 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
1805 gen_op_mov_v_reg(ot
, t0
, op1
);
1808 tcg_gen_andi_tl(cpu_T3
, cpu_T3
, mask
);
1810 tcg_gen_mov_tl(t1
, cpu_T
[1]);
1811 tcg_gen_mov_tl(t2
, cpu_T3
);
1813 /* Must test zero case to avoid using undefined behaviour in TCG
1815 label1
= gen_new_label();
1816 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, label1
);
1818 tcg_gen_addi_tl(cpu_tmp5
, t2
, -1);
1819 if (ot
== OT_WORD
) {
1820 /* Note: we implement the Intel behaviour for shift count > 16 */
1822 tcg_gen_andi_tl(t0
, t0
, 0xffff);
1823 tcg_gen_shli_tl(cpu_tmp0
, t1
, 16);
1824 tcg_gen_or_tl(t0
, t0
, cpu_tmp0
);
1825 tcg_gen_ext32u_tl(t0
, t0
);
1827 tcg_gen_shr_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1829 /* only needed if count > 16, but a test would complicate */
1830 tcg_gen_subfi_tl(cpu_tmp5
, 32, t2
);
1831 tcg_gen_shl_tl(cpu_tmp0
, t0
, cpu_tmp5
);
1833 tcg_gen_shr_tl(t0
, t0
, t2
);
1835 tcg_gen_or_tl(t0
, t0
, cpu_tmp0
);
1837 /* XXX: not optimal */
1838 tcg_gen_andi_tl(t0
, t0
, 0xffff);
1839 tcg_gen_shli_tl(t1
, t1
, 16);
1840 tcg_gen_or_tl(t1
, t1
, t0
);
1841 tcg_gen_ext32u_tl(t1
, t1
);
1843 tcg_gen_shl_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1844 tcg_gen_subfi_tl(cpu_tmp0
, 32, cpu_tmp5
);
1845 tcg_gen_shr_tl(cpu_tmp5
, t1
, cpu_tmp0
);
1846 tcg_gen_or_tl(cpu_tmp4
, cpu_tmp4
, cpu_tmp5
);
1848 tcg_gen_shl_tl(t0
, t0
, t2
);
1849 tcg_gen_subfi_tl(cpu_tmp5
, 32, t2
);
1850 tcg_gen_shr_tl(t1
, t1
, cpu_tmp5
);
1851 tcg_gen_or_tl(t0
, t0
, t1
);
1854 data_bits
= 8 << ot
;
1857 tcg_gen_ext32u_tl(t0
, t0
);
1859 tcg_gen_shr_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1861 tcg_gen_shr_tl(t0
, t0
, t2
);
1862 tcg_gen_subfi_tl(cpu_tmp5
, data_bits
, t2
);
1863 tcg_gen_shl_tl(t1
, t1
, cpu_tmp5
);
1864 tcg_gen_or_tl(t0
, t0
, t1
);
1868 tcg_gen_ext32u_tl(t1
, t1
);
1870 tcg_gen_shl_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1872 tcg_gen_shl_tl(t0
, t0
, t2
);
1873 tcg_gen_subfi_tl(cpu_tmp5
, data_bits
, t2
);
1874 tcg_gen_shr_tl(t1
, t1
, cpu_tmp5
);
1875 tcg_gen_or_tl(t0
, t0
, t1
);
1878 tcg_gen_mov_tl(t1
, cpu_tmp4
);
1880 gen_set_label(label1
);
1882 if (op1
== OR_TMP0
) {
1883 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
1885 gen_op_mov_reg_v(ot
, op1
, t0
);
1889 if (s
->cc_op
!= CC_OP_DYNAMIC
)
1890 gen_op_set_cc_op(s
->cc_op
);
1892 label2
= gen_new_label();
1893 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, label2
);
1895 tcg_gen_mov_tl(cpu_cc_src
, t1
);
1896 tcg_gen_mov_tl(cpu_cc_dst
, t0
);
1898 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SARB
+ ot
);
1900 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SHLB
+ ot
);
1902 gen_set_label(label2
);
1903 s
->cc_op
= CC_OP_DYNAMIC
; /* cannot predict flags after */
1911 static void gen_shift(DisasContext
*s1
, int op
, int ot
, int d
, int s
)
1914 gen_op_mov_TN_reg(ot
, 1, s
);
1917 gen_rot_rm_T1(s1
, ot
, d
, 0);
1920 gen_rot_rm_T1(s1
, ot
, d
, 1);
1924 gen_shift_rm_T1(s1
, ot
, d
, 0, 0);
1927 gen_shift_rm_T1(s1
, ot
, d
, 1, 0);
1930 gen_shift_rm_T1(s1
, ot
, d
, 1, 1);
1933 gen_rotc_rm_T1(s1
, ot
, d
, 0);
1936 gen_rotc_rm_T1(s1
, ot
, d
, 1);
1941 static void gen_shifti(DisasContext
*s1
, int op
, int ot
, int d
, int c
)
1945 gen_rot_rm_im(s1
, ot
, d
, c
, 0);
1948 gen_rot_rm_im(s1
, ot
, d
, c
, 1);
1952 gen_shift_rm_im(s1
, ot
, d
, c
, 0, 0);
1955 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 0);
1958 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 1);
1961 /* currently not optimized */
1962 gen_op_movl_T1_im(c
);
1963 gen_shift(s1
, op
, ot
, d
, OR_TMP1
);
1968 static void gen_lea_modrm(DisasContext
*s
, int modrm
, int *reg_ptr
, int *offset_ptr
)
1976 int mod
, rm
, code
, override
, must_add_seg
;
1978 override
= s
->override
;
1979 must_add_seg
= s
->addseg
;
1982 mod
= (modrm
>> 6) & 3;
1994 code
= ldub_code(s
->pc
++);
1995 scale
= (code
>> 6) & 3;
1996 index
= ((code
>> 3) & 7) | REX_X(s
);
2003 if ((base
& 7) == 5) {
2005 disp
= (int32_t)ldl_code(s
->pc
);
2007 if (CODE64(s
) && !havesib
) {
2008 disp
+= s
->pc
+ s
->rip_offset
;
2015 disp
= (int8_t)ldub_code(s
->pc
++);
2019 disp
= (int32_t)ldl_code(s
->pc
);
2025 /* for correct popl handling with esp */
2026 if (base
== 4 && s
->popl_esp_hack
)
2027 disp
+= s
->popl_esp_hack
;
2028 #ifdef TARGET_X86_64
2029 if (s
->aflag
== 2) {
2030 gen_op_movq_A0_reg(base
);
2032 gen_op_addq_A0_im(disp
);
2037 gen_op_movl_A0_reg(base
);
2039 gen_op_addl_A0_im(disp
);
2042 #ifdef TARGET_X86_64
2043 if (s
->aflag
== 2) {
2044 gen_op_movq_A0_im(disp
);
2048 gen_op_movl_A0_im(disp
);
2051 /* index == 4 means no index */
2052 if (havesib
&& (index
!= 4)) {
2053 #ifdef TARGET_X86_64
2054 if (s
->aflag
== 2) {
2055 gen_op_addq_A0_reg_sN(scale
, index
);
2059 gen_op_addl_A0_reg_sN(scale
, index
);
2064 if (base
== R_EBP
|| base
== R_ESP
)
2069 #ifdef TARGET_X86_64
2070 if (s
->aflag
== 2) {
2071 gen_op_addq_A0_seg(override
);
2075 gen_op_addl_A0_seg(override
);
2082 disp
= lduw_code(s
->pc
);
2084 gen_op_movl_A0_im(disp
);
2085 rm
= 0; /* avoid SS override */
2092 disp
= (int8_t)ldub_code(s
->pc
++);
2096 disp
= lduw_code(s
->pc
);
2102 gen_op_movl_A0_reg(R_EBX
);
2103 gen_op_addl_A0_reg_sN(0, R_ESI
);
2106 gen_op_movl_A0_reg(R_EBX
);
2107 gen_op_addl_A0_reg_sN(0, R_EDI
);
2110 gen_op_movl_A0_reg(R_EBP
);
2111 gen_op_addl_A0_reg_sN(0, R_ESI
);
2114 gen_op_movl_A0_reg(R_EBP
);
2115 gen_op_addl_A0_reg_sN(0, R_EDI
);
2118 gen_op_movl_A0_reg(R_ESI
);
2121 gen_op_movl_A0_reg(R_EDI
);
2124 gen_op_movl_A0_reg(R_EBP
);
2128 gen_op_movl_A0_reg(R_EBX
);
2132 gen_op_addl_A0_im(disp
);
2133 gen_op_andl_A0_ffff();
2137 if (rm
== 2 || rm
== 3 || rm
== 6)
2142 gen_op_addl_A0_seg(override
);
2152 static void gen_nop_modrm(DisasContext
*s
, int modrm
)
2154 int mod
, rm
, base
, code
;
2156 mod
= (modrm
>> 6) & 3;
2166 code
= ldub_code(s
->pc
++);
2202 /* used for LEA and MOV AX, mem */
2203 static void gen_add_A0_ds_seg(DisasContext
*s
)
2205 int override
, must_add_seg
;
2206 must_add_seg
= s
->addseg
;
2208 if (s
->override
>= 0) {
2209 override
= s
->override
;
2213 #ifdef TARGET_X86_64
2215 gen_op_addq_A0_seg(override
);
2219 gen_op_addl_A0_seg(override
);
2224 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2226 static void gen_ldst_modrm(DisasContext
*s
, int modrm
, int ot
, int reg
, int is_store
)
2228 int mod
, rm
, opreg
, disp
;
2230 mod
= (modrm
>> 6) & 3;
2231 rm
= (modrm
& 7) | REX_B(s
);
2235 gen_op_mov_TN_reg(ot
, 0, reg
);
2236 gen_op_mov_reg_T0(ot
, rm
);
2238 gen_op_mov_TN_reg(ot
, 0, rm
);
2240 gen_op_mov_reg_T0(ot
, reg
);
2243 gen_lea_modrm(s
, modrm
, &opreg
, &disp
);
2246 gen_op_mov_TN_reg(ot
, 0, reg
);
2247 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2249 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
2251 gen_op_mov_reg_T0(ot
, reg
);
2256 static inline uint32_t insn_get(DisasContext
*s
, int ot
)
2262 ret
= ldub_code(s
->pc
);
2266 ret
= lduw_code(s
->pc
);
2271 ret
= ldl_code(s
->pc
);
2278 static inline int insn_const_size(unsigned int ot
)
2286 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong eip
)
2288 TranslationBlock
*tb
;
2291 pc
= s
->cs_base
+ eip
;
2293 /* NOTE: we handle the case where the TB spans two pages here */
2294 if ((pc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) ||
2295 (pc
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
)) {
2296 /* jump to same page: we can use a direct jump */
2297 tcg_gen_goto_tb(tb_num
);
2299 tcg_gen_exit_tb((long)tb
+ tb_num
);
2301 /* jump to another page: currently not optimized */
2307 static inline void gen_jcc(DisasContext
*s
, int b
,
2308 target_ulong val
, target_ulong next_eip
)
2313 gen_update_cc_op(s
);
2315 l1
= gen_new_label();
2316 gen_jcc1(s
, cc_op
, b
, l1
);
2318 gen_goto_tb(s
, 0, next_eip
);
2321 gen_goto_tb(s
, 1, val
);
2322 s
->is_jmp
= DISAS_TB_JUMP
;
2325 l1
= gen_new_label();
2326 l2
= gen_new_label();
2327 gen_jcc1(s
, cc_op
, b
, l1
);
2329 gen_jmp_im(next_eip
);
2339 static void gen_setcc(DisasContext
*s
, int b
)
2341 int inv
, jcc_op
, l1
;
2344 if (is_fast_jcc_case(s
, b
)) {
2345 /* nominal case: we use a jump */
2346 /* XXX: make it faster by adding new instructions in TCG */
2347 t0
= tcg_temp_local_new();
2348 tcg_gen_movi_tl(t0
, 0);
2349 l1
= gen_new_label();
2350 gen_jcc1(s
, s
->cc_op
, b
^ 1, l1
);
2351 tcg_gen_movi_tl(t0
, 1);
2353 tcg_gen_mov_tl(cpu_T
[0], t0
);
2356 /* slow case: it is more efficient not to generate a jump,
2357 although it is questionnable whether this optimization is
2360 jcc_op
= (b
>> 1) & 7;
2361 gen_setcc_slow_T0(s
, jcc_op
);
2363 tcg_gen_xori_tl(cpu_T
[0], cpu_T
[0], 1);
2368 static inline void gen_op_movl_T0_seg(int seg_reg
)
2370 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
2371 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2374 static inline void gen_op_movl_seg_T0_vm(int seg_reg
)
2376 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xffff);
2377 tcg_gen_st32_tl(cpu_T
[0], cpu_env
,
2378 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2379 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], 4);
2380 tcg_gen_st_tl(cpu_T
[0], cpu_env
,
2381 offsetof(CPUX86State
,segs
[seg_reg
].base
));
2384 /* move T0 to seg_reg and compute if the CPU state may change. Never
2385 call this function with seg_reg == R_CS */
2386 static void gen_movl_seg_T0(DisasContext
*s
, int seg_reg
, target_ulong cur_eip
)
2388 if (s
->pe
&& !s
->vm86
) {
2389 /* XXX: optimize by finding processor state dynamically */
2390 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2391 gen_op_set_cc_op(s
->cc_op
);
2392 gen_jmp_im(cur_eip
);
2393 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
2394 gen_helper_load_seg(tcg_const_i32(seg_reg
), cpu_tmp2_i32
);
2395 /* abort translation because the addseg value may change or
2396 because ss32 may change. For R_SS, translation must always
2397 stop as a special handling must be done to disable hardware
2398 interrupts for the next instruction */
2399 if (seg_reg
== R_SS
|| (s
->code32
&& seg_reg
< R_FS
))
2400 s
->is_jmp
= DISAS_TB_JUMP
;
2402 gen_op_movl_seg_T0_vm(seg_reg
);
2403 if (seg_reg
== R_SS
)
2404 s
->is_jmp
= DISAS_TB_JUMP
;
2408 static inline int svm_is_rep(int prefixes
)
2410 return ((prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) ? 8 : 0);
2414 gen_svm_check_intercept_param(DisasContext
*s
, target_ulong pc_start
,
2415 uint32_t type
, uint64_t param
)
2417 /* no SVM activated; fast case */
2418 if (likely(!(s
->flags
& HF_SVMI_MASK
)))
2420 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2421 gen_op_set_cc_op(s
->cc_op
);
2422 gen_jmp_im(pc_start
- s
->cs_base
);
2423 gen_helper_svm_check_intercept_param(tcg_const_i32(type
),
2424 tcg_const_i64(param
));
2428 gen_svm_check_intercept(DisasContext
*s
, target_ulong pc_start
, uint64_t type
)
2430 gen_svm_check_intercept_param(s
, pc_start
, type
, 0);
2433 static inline void gen_stack_update(DisasContext
*s
, int addend
)
2435 #ifdef TARGET_X86_64
2437 gen_op_add_reg_im(2, R_ESP
, addend
);
2441 gen_op_add_reg_im(1, R_ESP
, addend
);
2443 gen_op_add_reg_im(0, R_ESP
, addend
);
2447 /* generate a push. It depends on ss32, addseg and dflag */
2448 static void gen_push_T0(DisasContext
*s
)
2450 #ifdef TARGET_X86_64
2452 gen_op_movq_A0_reg(R_ESP
);
2454 gen_op_addq_A0_im(-8);
2455 gen_op_st_T0_A0(OT_QUAD
+ s
->mem_index
);
2457 gen_op_addq_A0_im(-2);
2458 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
2460 gen_op_mov_reg_A0(2, R_ESP
);
2464 gen_op_movl_A0_reg(R_ESP
);
2466 gen_op_addl_A0_im(-2);
2468 gen_op_addl_A0_im(-4);
2471 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2472 gen_op_addl_A0_seg(R_SS
);
2475 gen_op_andl_A0_ffff();
2476 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2477 gen_op_addl_A0_seg(R_SS
);
2479 gen_op_st_T0_A0(s
->dflag
+ 1 + s
->mem_index
);
2480 if (s
->ss32
&& !s
->addseg
)
2481 gen_op_mov_reg_A0(1, R_ESP
);
2483 gen_op_mov_reg_T1(s
->ss32
+ 1, R_ESP
);
2487 /* generate a push. It depends on ss32, addseg and dflag */
2488 /* slower version for T1, only used for call Ev */
2489 static void gen_push_T1(DisasContext
*s
)
2491 #ifdef TARGET_X86_64
2493 gen_op_movq_A0_reg(R_ESP
);
2495 gen_op_addq_A0_im(-8);
2496 gen_op_st_T1_A0(OT_QUAD
+ s
->mem_index
);
2498 gen_op_addq_A0_im(-2);
2499 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
2501 gen_op_mov_reg_A0(2, R_ESP
);
2505 gen_op_movl_A0_reg(R_ESP
);
2507 gen_op_addl_A0_im(-2);
2509 gen_op_addl_A0_im(-4);
2512 gen_op_addl_A0_seg(R_SS
);
2515 gen_op_andl_A0_ffff();
2516 gen_op_addl_A0_seg(R_SS
);
2518 gen_op_st_T1_A0(s
->dflag
+ 1 + s
->mem_index
);
2520 if (s
->ss32
&& !s
->addseg
)
2521 gen_op_mov_reg_A0(1, R_ESP
);
2523 gen_stack_update(s
, (-2) << s
->dflag
);
2527 /* two step pop is necessary for precise exceptions */
2528 static void gen_pop_T0(DisasContext
*s
)
2530 #ifdef TARGET_X86_64
2532 gen_op_movq_A0_reg(R_ESP
);
2533 gen_op_ld_T0_A0((s
->dflag
? OT_QUAD
: OT_WORD
) + s
->mem_index
);
2537 gen_op_movl_A0_reg(R_ESP
);
2540 gen_op_addl_A0_seg(R_SS
);
2542 gen_op_andl_A0_ffff();
2543 gen_op_addl_A0_seg(R_SS
);
2545 gen_op_ld_T0_A0(s
->dflag
+ 1 + s
->mem_index
);
2549 static void gen_pop_update(DisasContext
*s
)
2551 #ifdef TARGET_X86_64
2552 if (CODE64(s
) && s
->dflag
) {
2553 gen_stack_update(s
, 8);
2557 gen_stack_update(s
, 2 << s
->dflag
);
2561 static void gen_stack_A0(DisasContext
*s
)
2563 gen_op_movl_A0_reg(R_ESP
);
2565 gen_op_andl_A0_ffff();
2566 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2568 gen_op_addl_A0_seg(R_SS
);
2571 /* NOTE: wrap around in 16 bit not fully handled */
2572 static void gen_pusha(DisasContext
*s
)
2575 gen_op_movl_A0_reg(R_ESP
);
2576 gen_op_addl_A0_im(-16 << s
->dflag
);
2578 gen_op_andl_A0_ffff();
2579 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2581 gen_op_addl_A0_seg(R_SS
);
2582 for(i
= 0;i
< 8; i
++) {
2583 gen_op_mov_TN_reg(OT_LONG
, 0, 7 - i
);
2584 gen_op_st_T0_A0(OT_WORD
+ s
->dflag
+ s
->mem_index
);
2585 gen_op_addl_A0_im(2 << s
->dflag
);
2587 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2590 /* NOTE: wrap around in 16 bit not fully handled */
2591 static void gen_popa(DisasContext
*s
)
2594 gen_op_movl_A0_reg(R_ESP
);
2596 gen_op_andl_A0_ffff();
2597 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2598 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], 16 << s
->dflag
);
2600 gen_op_addl_A0_seg(R_SS
);
2601 for(i
= 0;i
< 8; i
++) {
2602 /* ESP is not reloaded */
2604 gen_op_ld_T0_A0(OT_WORD
+ s
->dflag
+ s
->mem_index
);
2605 gen_op_mov_reg_T0(OT_WORD
+ s
->dflag
, 7 - i
);
2607 gen_op_addl_A0_im(2 << s
->dflag
);
2609 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2612 static void gen_enter(DisasContext
*s
, int esp_addend
, int level
)
2617 #ifdef TARGET_X86_64
2619 ot
= s
->dflag
? OT_QUAD
: OT_WORD
;
2622 gen_op_movl_A0_reg(R_ESP
);
2623 gen_op_addq_A0_im(-opsize
);
2624 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2627 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
2628 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2630 /* XXX: must save state */
2631 gen_helper_enter64_level(tcg_const_i32(level
),
2632 tcg_const_i32((ot
== OT_QUAD
)),
2635 gen_op_mov_reg_T1(ot
, R_EBP
);
2636 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], -esp_addend
+ (-opsize
* level
));
2637 gen_op_mov_reg_T1(OT_QUAD
, R_ESP
);
2641 ot
= s
->dflag
+ OT_WORD
;
2642 opsize
= 2 << s
->dflag
;
2644 gen_op_movl_A0_reg(R_ESP
);
2645 gen_op_addl_A0_im(-opsize
);
2647 gen_op_andl_A0_ffff();
2648 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2650 gen_op_addl_A0_seg(R_SS
);
2652 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
2653 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2655 /* XXX: must save state */
2656 gen_helper_enter_level(tcg_const_i32(level
),
2657 tcg_const_i32(s
->dflag
),
2660 gen_op_mov_reg_T1(ot
, R_EBP
);
2661 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], -esp_addend
+ (-opsize
* level
));
2662 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2666 static void gen_exception(DisasContext
*s
, int trapno
, target_ulong cur_eip
)
2668 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2669 gen_op_set_cc_op(s
->cc_op
);
2670 gen_jmp_im(cur_eip
);
2671 gen_helper_raise_exception(tcg_const_i32(trapno
));
2672 s
->is_jmp
= DISAS_TB_JUMP
;
2675 /* an interrupt is different from an exception because of the
2677 static void gen_interrupt(DisasContext
*s
, int intno
,
2678 target_ulong cur_eip
, target_ulong next_eip
)
2680 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2681 gen_op_set_cc_op(s
->cc_op
);
2682 gen_jmp_im(cur_eip
);
2683 gen_helper_raise_interrupt(tcg_const_i32(intno
),
2684 tcg_const_i32(next_eip
- cur_eip
));
2685 s
->is_jmp
= DISAS_TB_JUMP
;
2688 static void gen_debug(DisasContext
*s
, target_ulong cur_eip
)
2690 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2691 gen_op_set_cc_op(s
->cc_op
);
2692 gen_jmp_im(cur_eip
);
2694 s
->is_jmp
= DISAS_TB_JUMP
;
2697 /* generate a generic end of block. Trace exception is also generated
2699 static void gen_eob(DisasContext
*s
)
2701 if (s
->cc_op
!= CC_OP_DYNAMIC
)
2702 gen_op_set_cc_op(s
->cc_op
);
2703 if (s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
) {
2704 gen_helper_reset_inhibit_irq();
2706 if (s
->tb
->flags
& HF_RF_MASK
) {
2707 gen_helper_reset_rf();
2709 if (s
->singlestep_enabled
) {
2712 gen_helper_single_step();
2716 s
->is_jmp
= DISAS_TB_JUMP
;
2719 /* generate a jump to eip. No segment change must happen before as a
2720 direct call to the next block may occur */
2721 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
)
2724 gen_update_cc_op(s
);
2725 gen_goto_tb(s
, tb_num
, eip
);
2726 s
->is_jmp
= DISAS_TB_JUMP
;
2733 static void gen_jmp(DisasContext
*s
, target_ulong eip
)
2735 gen_jmp_tb(s
, eip
, 0);
2738 static inline void gen_ldq_env_A0(int idx
, int offset
)
2740 int mem_index
= (idx
>> 2) - 1;
2741 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2742 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2745 static inline void gen_stq_env_A0(int idx
, int offset
)
2747 int mem_index
= (idx
>> 2) - 1;
2748 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2749 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2752 static inline void gen_ldo_env_A0(int idx
, int offset
)
2754 int mem_index
= (idx
>> 2) - 1;
2755 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2756 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2757 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2758 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
);
2759 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2762 static inline void gen_sto_env_A0(int idx
, int offset
)
2764 int mem_index
= (idx
>> 2) - 1;
2765 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2766 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2767 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2768 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2769 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
);
2772 static inline void gen_op_movo(int d_offset
, int s_offset
)
2774 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2775 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2776 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
+ 8);
2777 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
+ 8);
2780 static inline void gen_op_movq(int d_offset
, int s_offset
)
2782 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2783 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2786 static inline void gen_op_movl(int d_offset
, int s_offset
)
2788 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
, s_offset
);
2789 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, d_offset
);
2792 static inline void gen_op_movq_env_0(int d_offset
)
2794 tcg_gen_movi_i64(cpu_tmp1_i64
, 0);
2795 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2798 #define SSE_SPECIAL ((void *)1)
2799 #define SSE_DUMMY ((void *)2)
2801 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2802 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2803 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2805 static void *sse_op_table1
[256][4] = {
2806 /* 3DNow! extensions */
2807 [0x0e] = { SSE_DUMMY
}, /* femms */
2808 [0x0f] = { SSE_DUMMY
}, /* pf... */
2809 /* pure SSE operations */
2810 [0x10] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2811 [0x11] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2812 [0x12] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd, movsldup, movddup */
2813 [0x13] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd */
2814 [0x14] = { gen_helper_punpckldq_xmm
, gen_helper_punpcklqdq_xmm
},
2815 [0x15] = { gen_helper_punpckhdq_xmm
, gen_helper_punpckhqdq_xmm
},
2816 [0x16] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd, movshdup */
2817 [0x17] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd */
2819 [0x28] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2820 [0x29] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2821 [0x2a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2822 [0x2b] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movntps, movntpd, movntss, movntsd */
2823 [0x2c] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2824 [0x2d] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2825 [0x2e] = { gen_helper_ucomiss
, gen_helper_ucomisd
},
2826 [0x2f] = { gen_helper_comiss
, gen_helper_comisd
},
2827 [0x50] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movmskps, movmskpd */
2828 [0x51] = SSE_FOP(sqrt
),
2829 [0x52] = { gen_helper_rsqrtps
, NULL
, gen_helper_rsqrtss
, NULL
},
2830 [0x53] = { gen_helper_rcpps
, NULL
, gen_helper_rcpss
, NULL
},
2831 [0x54] = { gen_helper_pand_xmm
, gen_helper_pand_xmm
}, /* andps, andpd */
2832 [0x55] = { gen_helper_pandn_xmm
, gen_helper_pandn_xmm
}, /* andnps, andnpd */
2833 [0x56] = { gen_helper_por_xmm
, gen_helper_por_xmm
}, /* orps, orpd */
2834 [0x57] = { gen_helper_pxor_xmm
, gen_helper_pxor_xmm
}, /* xorps, xorpd */
2835 [0x58] = SSE_FOP(add
),
2836 [0x59] = SSE_FOP(mul
),
2837 [0x5a] = { gen_helper_cvtps2pd
, gen_helper_cvtpd2ps
,
2838 gen_helper_cvtss2sd
, gen_helper_cvtsd2ss
},
2839 [0x5b] = { gen_helper_cvtdq2ps
, gen_helper_cvtps2dq
, gen_helper_cvttps2dq
},
2840 [0x5c] = SSE_FOP(sub
),
2841 [0x5d] = SSE_FOP(min
),
2842 [0x5e] = SSE_FOP(div
),
2843 [0x5f] = SSE_FOP(max
),
2845 [0xc2] = SSE_FOP(cmpeq
),
2846 [0xc6] = { gen_helper_shufps
, gen_helper_shufpd
},
2848 [0x38] = { SSE_SPECIAL
, SSE_SPECIAL
, NULL
, SSE_SPECIAL
}, /* SSSE3/SSE4 */
2849 [0x3a] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* SSSE3/SSE4 */
2851 /* MMX ops and their SSE extensions */
2852 [0x60] = MMX_OP2(punpcklbw
),
2853 [0x61] = MMX_OP2(punpcklwd
),
2854 [0x62] = MMX_OP2(punpckldq
),
2855 [0x63] = MMX_OP2(packsswb
),
2856 [0x64] = MMX_OP2(pcmpgtb
),
2857 [0x65] = MMX_OP2(pcmpgtw
),
2858 [0x66] = MMX_OP2(pcmpgtl
),
2859 [0x67] = MMX_OP2(packuswb
),
2860 [0x68] = MMX_OP2(punpckhbw
),
2861 [0x69] = MMX_OP2(punpckhwd
),
2862 [0x6a] = MMX_OP2(punpckhdq
),
2863 [0x6b] = MMX_OP2(packssdw
),
2864 [0x6c] = { NULL
, gen_helper_punpcklqdq_xmm
},
2865 [0x6d] = { NULL
, gen_helper_punpckhqdq_xmm
},
2866 [0x6e] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movd mm, ea */
2867 [0x6f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, , movqdu */
2868 [0x70] = { gen_helper_pshufw_mmx
,
2869 gen_helper_pshufd_xmm
,
2870 gen_helper_pshufhw_xmm
,
2871 gen_helper_pshuflw_xmm
},
2872 [0x71] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftw */
2873 [0x72] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftd */
2874 [0x73] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftq */
2875 [0x74] = MMX_OP2(pcmpeqb
),
2876 [0x75] = MMX_OP2(pcmpeqw
),
2877 [0x76] = MMX_OP2(pcmpeql
),
2878 [0x77] = { SSE_DUMMY
}, /* emms */
2879 [0x78] = { NULL
, SSE_SPECIAL
, NULL
, SSE_SPECIAL
}, /* extrq_i, insertq_i */
2880 [0x79] = { NULL
, gen_helper_extrq_r
, NULL
, gen_helper_insertq_r
},
2881 [0x7c] = { NULL
, gen_helper_haddpd
, NULL
, gen_helper_haddps
},
2882 [0x7d] = { NULL
, gen_helper_hsubpd
, NULL
, gen_helper_hsubps
},
2883 [0x7e] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movd, movd, , movq */
2884 [0x7f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, movdqu */
2885 [0xc4] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pinsrw */
2886 [0xc5] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pextrw */
2887 [0xd0] = { NULL
, gen_helper_addsubpd
, NULL
, gen_helper_addsubps
},
2888 [0xd1] = MMX_OP2(psrlw
),
2889 [0xd2] = MMX_OP2(psrld
),
2890 [0xd3] = MMX_OP2(psrlq
),
2891 [0xd4] = MMX_OP2(paddq
),
2892 [0xd5] = MMX_OP2(pmullw
),
2893 [0xd6] = { NULL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2894 [0xd7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pmovmskb */
2895 [0xd8] = MMX_OP2(psubusb
),
2896 [0xd9] = MMX_OP2(psubusw
),
2897 [0xda] = MMX_OP2(pminub
),
2898 [0xdb] = MMX_OP2(pand
),
2899 [0xdc] = MMX_OP2(paddusb
),
2900 [0xdd] = MMX_OP2(paddusw
),
2901 [0xde] = MMX_OP2(pmaxub
),
2902 [0xdf] = MMX_OP2(pandn
),
2903 [0xe0] = MMX_OP2(pavgb
),
2904 [0xe1] = MMX_OP2(psraw
),
2905 [0xe2] = MMX_OP2(psrad
),
2906 [0xe3] = MMX_OP2(pavgw
),
2907 [0xe4] = MMX_OP2(pmulhuw
),
2908 [0xe5] = MMX_OP2(pmulhw
),
2909 [0xe6] = { NULL
, gen_helper_cvttpd2dq
, gen_helper_cvtdq2pd
, gen_helper_cvtpd2dq
},
2910 [0xe7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movntq, movntq */
2911 [0xe8] = MMX_OP2(psubsb
),
2912 [0xe9] = MMX_OP2(psubsw
),
2913 [0xea] = MMX_OP2(pminsw
),
2914 [0xeb] = MMX_OP2(por
),
2915 [0xec] = MMX_OP2(paddsb
),
2916 [0xed] = MMX_OP2(paddsw
),
2917 [0xee] = MMX_OP2(pmaxsw
),
2918 [0xef] = MMX_OP2(pxor
),
2919 [0xf0] = { NULL
, NULL
, NULL
, SSE_SPECIAL
}, /* lddqu */
2920 [0xf1] = MMX_OP2(psllw
),
2921 [0xf2] = MMX_OP2(pslld
),
2922 [0xf3] = MMX_OP2(psllq
),
2923 [0xf4] = MMX_OP2(pmuludq
),
2924 [0xf5] = MMX_OP2(pmaddwd
),
2925 [0xf6] = MMX_OP2(psadbw
),
2926 [0xf7] = MMX_OP2(maskmov
),
2927 [0xf8] = MMX_OP2(psubb
),
2928 [0xf9] = MMX_OP2(psubw
),
2929 [0xfa] = MMX_OP2(psubl
),
2930 [0xfb] = MMX_OP2(psubq
),
2931 [0xfc] = MMX_OP2(paddb
),
2932 [0xfd] = MMX_OP2(paddw
),
2933 [0xfe] = MMX_OP2(paddl
),
2936 static void *sse_op_table2
[3 * 8][2] = {
2937 [0 + 2] = MMX_OP2(psrlw
),
2938 [0 + 4] = MMX_OP2(psraw
),
2939 [0 + 6] = MMX_OP2(psllw
),
2940 [8 + 2] = MMX_OP2(psrld
),
2941 [8 + 4] = MMX_OP2(psrad
),
2942 [8 + 6] = MMX_OP2(pslld
),
2943 [16 + 2] = MMX_OP2(psrlq
),
2944 [16 + 3] = { NULL
, gen_helper_psrldq_xmm
},
2945 [16 + 6] = MMX_OP2(psllq
),
2946 [16 + 7] = { NULL
, gen_helper_pslldq_xmm
},
2949 static void *sse_op_table3
[4 * 3] = {
2950 gen_helper_cvtsi2ss
,
2951 gen_helper_cvtsi2sd
,
2952 X86_64_ONLY(gen_helper_cvtsq2ss
),
2953 X86_64_ONLY(gen_helper_cvtsq2sd
),
2955 gen_helper_cvttss2si
,
2956 gen_helper_cvttsd2si
,
2957 X86_64_ONLY(gen_helper_cvttss2sq
),
2958 X86_64_ONLY(gen_helper_cvttsd2sq
),
2960 gen_helper_cvtss2si
,
2961 gen_helper_cvtsd2si
,
2962 X86_64_ONLY(gen_helper_cvtss2sq
),
2963 X86_64_ONLY(gen_helper_cvtsd2sq
),
2966 static void *sse_op_table4
[8][4] = {
2977 static void *sse_op_table5
[256] = {
2978 [0x0c] = gen_helper_pi2fw
,
2979 [0x0d] = gen_helper_pi2fd
,
2980 [0x1c] = gen_helper_pf2iw
,
2981 [0x1d] = gen_helper_pf2id
,
2982 [0x8a] = gen_helper_pfnacc
,
2983 [0x8e] = gen_helper_pfpnacc
,
2984 [0x90] = gen_helper_pfcmpge
,
2985 [0x94] = gen_helper_pfmin
,
2986 [0x96] = gen_helper_pfrcp
,
2987 [0x97] = gen_helper_pfrsqrt
,
2988 [0x9a] = gen_helper_pfsub
,
2989 [0x9e] = gen_helper_pfadd
,
2990 [0xa0] = gen_helper_pfcmpgt
,
2991 [0xa4] = gen_helper_pfmax
,
2992 [0xa6] = gen_helper_movq
, /* pfrcpit1; no need to actually increase precision */
2993 [0xa7] = gen_helper_movq
, /* pfrsqit1 */
2994 [0xaa] = gen_helper_pfsubr
,
2995 [0xae] = gen_helper_pfacc
,
2996 [0xb0] = gen_helper_pfcmpeq
,
2997 [0xb4] = gen_helper_pfmul
,
2998 [0xb6] = gen_helper_movq
, /* pfrcpit2 */
2999 [0xb7] = gen_helper_pmulhrw_mmx
,
3000 [0xbb] = gen_helper_pswapd
,
3001 [0xbf] = gen_helper_pavgb_mmx
/* pavgusb */
3004 struct sse_op_helper_s
{
3005 void *op
[2]; uint32_t ext_mask
;
3007 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
3008 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3009 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
3010 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
3011 static struct sse_op_helper_s sse_op_table6
[256] = {
3012 [0x00] = SSSE3_OP(pshufb
),
3013 [0x01] = SSSE3_OP(phaddw
),
3014 [0x02] = SSSE3_OP(phaddd
),
3015 [0x03] = SSSE3_OP(phaddsw
),
3016 [0x04] = SSSE3_OP(pmaddubsw
),
3017 [0x05] = SSSE3_OP(phsubw
),
3018 [0x06] = SSSE3_OP(phsubd
),
3019 [0x07] = SSSE3_OP(phsubsw
),
3020 [0x08] = SSSE3_OP(psignb
),
3021 [0x09] = SSSE3_OP(psignw
),
3022 [0x0a] = SSSE3_OP(psignd
),
3023 [0x0b] = SSSE3_OP(pmulhrsw
),
3024 [0x10] = SSE41_OP(pblendvb
),
3025 [0x14] = SSE41_OP(blendvps
),
3026 [0x15] = SSE41_OP(blendvpd
),
3027 [0x17] = SSE41_OP(ptest
),
3028 [0x1c] = SSSE3_OP(pabsb
),
3029 [0x1d] = SSSE3_OP(pabsw
),
3030 [0x1e] = SSSE3_OP(pabsd
),
3031 [0x20] = SSE41_OP(pmovsxbw
),
3032 [0x21] = SSE41_OP(pmovsxbd
),
3033 [0x22] = SSE41_OP(pmovsxbq
),
3034 [0x23] = SSE41_OP(pmovsxwd
),
3035 [0x24] = SSE41_OP(pmovsxwq
),
3036 [0x25] = SSE41_OP(pmovsxdq
),
3037 [0x28] = SSE41_OP(pmuldq
),
3038 [0x29] = SSE41_OP(pcmpeqq
),
3039 [0x2a] = SSE41_SPECIAL
, /* movntqda */
3040 [0x2b] = SSE41_OP(packusdw
),
3041 [0x30] = SSE41_OP(pmovzxbw
),
3042 [0x31] = SSE41_OP(pmovzxbd
),
3043 [0x32] = SSE41_OP(pmovzxbq
),
3044 [0x33] = SSE41_OP(pmovzxwd
),
3045 [0x34] = SSE41_OP(pmovzxwq
),
3046 [0x35] = SSE41_OP(pmovzxdq
),
3047 [0x37] = SSE42_OP(pcmpgtq
),
3048 [0x38] = SSE41_OP(pminsb
),
3049 [0x39] = SSE41_OP(pminsd
),
3050 [0x3a] = SSE41_OP(pminuw
),
3051 [0x3b] = SSE41_OP(pminud
),
3052 [0x3c] = SSE41_OP(pmaxsb
),
3053 [0x3d] = SSE41_OP(pmaxsd
),
3054 [0x3e] = SSE41_OP(pmaxuw
),
3055 [0x3f] = SSE41_OP(pmaxud
),
3056 [0x40] = SSE41_OP(pmulld
),
3057 [0x41] = SSE41_OP(phminposuw
),
3060 static struct sse_op_helper_s sse_op_table7
[256] = {
3061 [0x08] = SSE41_OP(roundps
),
3062 [0x09] = SSE41_OP(roundpd
),
3063 [0x0a] = SSE41_OP(roundss
),
3064 [0x0b] = SSE41_OP(roundsd
),
3065 [0x0c] = SSE41_OP(blendps
),
3066 [0x0d] = SSE41_OP(blendpd
),
3067 [0x0e] = SSE41_OP(pblendw
),
3068 [0x0f] = SSSE3_OP(palignr
),
3069 [0x14] = SSE41_SPECIAL
, /* pextrb */
3070 [0x15] = SSE41_SPECIAL
, /* pextrw */
3071 [0x16] = SSE41_SPECIAL
, /* pextrd/pextrq */
3072 [0x17] = SSE41_SPECIAL
, /* extractps */
3073 [0x20] = SSE41_SPECIAL
, /* pinsrb */
3074 [0x21] = SSE41_SPECIAL
, /* insertps */
3075 [0x22] = SSE41_SPECIAL
, /* pinsrd/pinsrq */
3076 [0x40] = SSE41_OP(dpps
),
3077 [0x41] = SSE41_OP(dppd
),
3078 [0x42] = SSE41_OP(mpsadbw
),
3079 [0x60] = SSE42_OP(pcmpestrm
),
3080 [0x61] = SSE42_OP(pcmpestri
),
3081 [0x62] = SSE42_OP(pcmpistrm
),
3082 [0x63] = SSE42_OP(pcmpistri
),
3085 static void gen_sse(DisasContext
*s
, int b
, target_ulong pc_start
, int rex_r
)
3087 int b1
, op1_offset
, op2_offset
, is_xmm
, val
, ot
;
3088 int modrm
, mod
, rm
, reg
, reg_addr
, offset_addr
;
3092 if (s
->prefix
& PREFIX_DATA
)
3094 else if (s
->prefix
& PREFIX_REPZ
)
3096 else if (s
->prefix
& PREFIX_REPNZ
)
3100 sse_op2
= sse_op_table1
[b
][b1
];
3103 if ((b
<= 0x5f && b
>= 0x10) || b
== 0xc6 || b
== 0xc2) {
3113 /* simple MMX/SSE operation */
3114 if (s
->flags
& HF_TS_MASK
) {
3115 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
3118 if (s
->flags
& HF_EM_MASK
) {
3120 gen_exception(s
, EXCP06_ILLOP
, pc_start
- s
->cs_base
);
3123 if (is_xmm
&& !(s
->flags
& HF_OSFXSR_MASK
))
3124 if ((b
!= 0x38 && b
!= 0x3a) || (s
->prefix
& PREFIX_DATA
))
3127 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
))
3138 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3139 the static cpu state) */
3141 gen_helper_enter_mmx();
3144 modrm
= ldub_code(s
->pc
++);
3145 reg
= ((modrm
>> 3) & 7);
3148 mod
= (modrm
>> 6) & 3;
3149 if (sse_op2
== SSE_SPECIAL
) {
3152 case 0x0e7: /* movntq */
3155 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3156 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3158 case 0x1e7: /* movntdq */
3159 case 0x02b: /* movntps */
3160 case 0x12b: /* movntps */
3163 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3164 gen_sto_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3166 case 0x3f0: /* lddqu */
3169 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3170 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3172 case 0x22b: /* movntss */
3173 case 0x32b: /* movntsd */
3176 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3178 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,
3181 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3182 xmm_regs
[reg
].XMM_L(0)));
3183 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
3186 case 0x6e: /* movd mm, ea */
3187 #ifdef TARGET_X86_64
3188 if (s
->dflag
== 2) {
3189 gen_ldst_modrm(s
, modrm
, OT_QUAD
, OR_TMP0
, 0);
3190 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3194 gen_ldst_modrm(s
, modrm
, OT_LONG
, OR_TMP0
, 0);
3195 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3196 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3197 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3198 gen_helper_movl_mm_T0_mmx(cpu_ptr0
, cpu_tmp2_i32
);
3201 case 0x16e: /* movd xmm, ea */
3202 #ifdef TARGET_X86_64
3203 if (s
->dflag
== 2) {
3204 gen_ldst_modrm(s
, modrm
, OT_QUAD
, OR_TMP0
, 0);
3205 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3206 offsetof(CPUX86State
,xmm_regs
[reg
]));
3207 gen_helper_movq_mm_T0_xmm(cpu_ptr0
, cpu_T
[0]);
3211 gen_ldst_modrm(s
, modrm
, OT_LONG
, OR_TMP0
, 0);
3212 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3213 offsetof(CPUX86State
,xmm_regs
[reg
]));
3214 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3215 gen_helper_movl_mm_T0_xmm(cpu_ptr0
, cpu_tmp2_i32
);
3218 case 0x6f: /* movq mm, ea */
3220 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3221 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3224 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
3225 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3226 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
3227 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3230 case 0x010: /* movups */
3231 case 0x110: /* movupd */
3232 case 0x028: /* movaps */
3233 case 0x128: /* movapd */
3234 case 0x16f: /* movdqa xmm, ea */
3235 case 0x26f: /* movdqu xmm, ea */
3237 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3238 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3240 rm
= (modrm
& 7) | REX_B(s
);
3241 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[reg
]),
3242 offsetof(CPUX86State
,xmm_regs
[rm
]));
3245 case 0x210: /* movss xmm, ea */
3247 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3248 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
3249 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3251 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)));
3252 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3253 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3255 rm
= (modrm
& 7) | REX_B(s
);
3256 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3257 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)));
3260 case 0x310: /* movsd xmm, ea */
3262 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3263 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3265 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3266 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3268 rm
= (modrm
& 7) | REX_B(s
);
3269 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3270 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3273 case 0x012: /* movlps */
3274 case 0x112: /* movlpd */
3276 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3277 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3280 rm
= (modrm
& 7) | REX_B(s
);
3281 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3282 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(1)));
3285 case 0x212: /* movsldup */
3287 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3288 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3290 rm
= (modrm
& 7) | REX_B(s
);
3291 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3292 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)));
3293 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)),
3294 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(2)));
3296 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)),
3297 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3298 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)),
3299 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3301 case 0x312: /* movddup */
3303 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3304 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3306 rm
= (modrm
& 7) | REX_B(s
);
3307 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3308 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3310 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)),
3311 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3313 case 0x016: /* movhps */
3314 case 0x116: /* movhpd */
3316 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3317 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3320 rm
= (modrm
& 7) | REX_B(s
);
3321 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)),
3322 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3325 case 0x216: /* movshdup */
3327 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3328 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3330 rm
= (modrm
& 7) | REX_B(s
);
3331 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)),
3332 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(1)));
3333 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)),
3334 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(3)));
3336 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3337 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)));
3338 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)),
3339 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3344 int bit_index
, field_length
;
3346 if (b1
== 1 && reg
!= 0)
3348 field_length
= ldub_code(s
->pc
++) & 0x3F;
3349 bit_index
= ldub_code(s
->pc
++) & 0x3F;
3350 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3351 offsetof(CPUX86State
,xmm_regs
[reg
]));
3353 gen_helper_extrq_i(cpu_ptr0
, tcg_const_i32(bit_index
),
3354 tcg_const_i32(field_length
));
3356 gen_helper_insertq_i(cpu_ptr0
, tcg_const_i32(bit_index
),
3357 tcg_const_i32(field_length
));
3360 case 0x7e: /* movd ea, mm */
3361 #ifdef TARGET_X86_64
3362 if (s
->dflag
== 2) {
3363 tcg_gen_ld_i64(cpu_T
[0], cpu_env
,
3364 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3365 gen_ldst_modrm(s
, modrm
, OT_QUAD
, OR_TMP0
, 1);
3369 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
3370 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_L(0)));
3371 gen_ldst_modrm(s
, modrm
, OT_LONG
, OR_TMP0
, 1);
3374 case 0x17e: /* movd ea, xmm */
3375 #ifdef TARGET_X86_64
3376 if (s
->dflag
== 2) {
3377 tcg_gen_ld_i64(cpu_T
[0], cpu_env
,
3378 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3379 gen_ldst_modrm(s
, modrm
, OT_QUAD
, OR_TMP0
, 1);
3383 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
3384 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3385 gen_ldst_modrm(s
, modrm
, OT_LONG
, OR_TMP0
, 1);
3388 case 0x27e: /* movq xmm, ea */
3390 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3391 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3393 rm
= (modrm
& 7) | REX_B(s
);
3394 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3395 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3397 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3399 case 0x7f: /* movq ea, mm */
3401 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3402 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3405 gen_op_movq(offsetof(CPUX86State
,fpregs
[rm
].mmx
),
3406 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3409 case 0x011: /* movups */
3410 case 0x111: /* movupd */
3411 case 0x029: /* movaps */
3412 case 0x129: /* movapd */
3413 case 0x17f: /* movdqa ea, xmm */
3414 case 0x27f: /* movdqu ea, xmm */
3416 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3417 gen_sto_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3419 rm
= (modrm
& 7) | REX_B(s
);
3420 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[rm
]),
3421 offsetof(CPUX86State
,xmm_regs
[reg
]));
3424 case 0x211: /* movss ea, xmm */
3426 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3427 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3428 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
3430 rm
= (modrm
& 7) | REX_B(s
);
3431 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)),
3432 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3435 case 0x311: /* movsd ea, xmm */
3437 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3438 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3440 rm
= (modrm
& 7) | REX_B(s
);
3441 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)),
3442 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3445 case 0x013: /* movlps */
3446 case 0x113: /* movlpd */
3448 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3449 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3454 case 0x017: /* movhps */
3455 case 0x117: /* movhpd */
3457 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3458 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3463 case 0x71: /* shift mm, im */
3466 case 0x171: /* shift xmm, im */
3472 val
= ldub_code(s
->pc
++);
3474 gen_op_movl_T0_im(val
);
3475 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
3477 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(1)));
3478 op1_offset
= offsetof(CPUX86State
,xmm_t0
);
3480 gen_op_movl_T0_im(val
);
3481 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(0)));
3483 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(1)));
3484 op1_offset
= offsetof(CPUX86State
,mmx_t0
);
3486 sse_op2
= sse_op_table2
[((b
- 1) & 3) * 8 + (((modrm
>> 3)) & 7)][b1
];
3490 rm
= (modrm
& 7) | REX_B(s
);
3491 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3494 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3496 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3497 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op1_offset
);
3498 ((void (*)(TCGv_ptr
, TCGv_ptr
))sse_op2
)(cpu_ptr0
, cpu_ptr1
);
3500 case 0x050: /* movmskps */
3501 rm
= (modrm
& 7) | REX_B(s
);
3502 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3503 offsetof(CPUX86State
,xmm_regs
[rm
]));
3504 gen_helper_movmskps(cpu_tmp2_i32
, cpu_ptr0
);
3505 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3506 gen_op_mov_reg_T0(OT_LONG
, reg
);
3508 case 0x150: /* movmskpd */
3509 rm
= (modrm
& 7) | REX_B(s
);
3510 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3511 offsetof(CPUX86State
,xmm_regs
[rm
]));
3512 gen_helper_movmskpd(cpu_tmp2_i32
, cpu_ptr0
);
3513 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3514 gen_op_mov_reg_T0(OT_LONG
, reg
);
3516 case 0x02a: /* cvtpi2ps */
3517 case 0x12a: /* cvtpi2pd */
3518 gen_helper_enter_mmx();
3520 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3521 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3522 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
3525 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3527 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3528 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3529 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3532 gen_helper_cvtpi2ps(cpu_ptr0
, cpu_ptr1
);
3536 gen_helper_cvtpi2pd(cpu_ptr0
, cpu_ptr1
);
3540 case 0x22a: /* cvtsi2ss */
3541 case 0x32a: /* cvtsi2sd */
3542 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3543 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
3544 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3545 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3546 sse_op2
= sse_op_table3
[(s
->dflag
== 2) * 2 + ((b
>> 8) - 2)];
3547 if (ot
== OT_LONG
) {
3548 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3549 ((void (*)(TCGv_ptr
, TCGv_i32
))sse_op2
)(cpu_ptr0
, cpu_tmp2_i32
);
3551 ((void (*)(TCGv_ptr
, TCGv
))sse_op2
)(cpu_ptr0
, cpu_T
[0]);
3554 case 0x02c: /* cvttps2pi */
3555 case 0x12c: /* cvttpd2pi */
3556 case 0x02d: /* cvtps2pi */
3557 case 0x12d: /* cvtpd2pi */
3558 gen_helper_enter_mmx();
3560 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3561 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3562 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3564 rm
= (modrm
& 7) | REX_B(s
);
3565 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3567 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
);
3568 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3569 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3572 gen_helper_cvttps2pi(cpu_ptr0
, cpu_ptr1
);
3575 gen_helper_cvttpd2pi(cpu_ptr0
, cpu_ptr1
);
3578 gen_helper_cvtps2pi(cpu_ptr0
, cpu_ptr1
);
3581 gen_helper_cvtpd2pi(cpu_ptr0
, cpu_ptr1
);
3585 case 0x22c: /* cvttss2si */
3586 case 0x32c: /* cvttsd2si */
3587 case 0x22d: /* cvtss2si */
3588 case 0x32d: /* cvtsd2si */
3589 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3591 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3593 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_t0
.XMM_Q(0)));
3595 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
3596 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
3598 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3600 rm
= (modrm
& 7) | REX_B(s
);
3601 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3603 sse_op2
= sse_op_table3
[(s
->dflag
== 2) * 2 + ((b
>> 8) - 2) + 4 +
3605 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3606 if (ot
== OT_LONG
) {
3607 ((void (*)(TCGv_i32
, TCGv_ptr
))sse_op2
)(cpu_tmp2_i32
, cpu_ptr0
);
3608 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3610 ((void (*)(TCGv
, TCGv_ptr
))sse_op2
)(cpu_T
[0], cpu_ptr0
);
3612 gen_op_mov_reg_T0(ot
, reg
);
3614 case 0xc4: /* pinsrw */
3617 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
3618 val
= ldub_code(s
->pc
++);
3621 tcg_gen_st16_tl(cpu_T
[0], cpu_env
,
3622 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_W(val
)));
3625 tcg_gen_st16_tl(cpu_T
[0], cpu_env
,
3626 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_W(val
)));
3629 case 0xc5: /* pextrw */
3633 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3634 val
= ldub_code(s
->pc
++);
3637 rm
= (modrm
& 7) | REX_B(s
);
3638 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
,
3639 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_W(val
)));
3643 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
,
3644 offsetof(CPUX86State
,fpregs
[rm
].mmx
.MMX_W(val
)));
3646 reg
= ((modrm
>> 3) & 7) | rex_r
;
3647 gen_op_mov_reg_T0(ot
, reg
);
3649 case 0x1d6: /* movq ea, xmm */
3651 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3652 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3654 rm
= (modrm
& 7) | REX_B(s
);
3655 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)),
3656 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3657 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(1)));
3660 case 0x2d6: /* movq2dq */
3661 gen_helper_enter_mmx();
3663 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3664 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3665 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3667 case 0x3d6: /* movdq2q */
3668 gen_helper_enter_mmx();
3669 rm
= (modrm
& 7) | REX_B(s
);
3670 gen_op_movq(offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
),
3671 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3673 case 0xd7: /* pmovmskb */
3678 rm
= (modrm
& 7) | REX_B(s
);
3679 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[rm
]));
3680 gen_helper_pmovmskb_xmm(cpu_tmp2_i32
, cpu_ptr0
);
3683 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3684 gen_helper_pmovmskb_mmx(cpu_tmp2_i32
, cpu_ptr0
);
3686 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3687 reg
= ((modrm
>> 3) & 7) | rex_r
;
3688 gen_op_mov_reg_T0(OT_LONG
, reg
);
3691 if (s
->prefix
& PREFIX_REPNZ
)
3695 modrm
= ldub_code(s
->pc
++);
3697 reg
= ((modrm
>> 3) & 7) | rex_r
;
3698 mod
= (modrm
>> 6) & 3;
3703 sse_op2
= sse_op_table6
[b
].op
[b1
];
3706 if (!(s
->cpuid_ext_features
& sse_op_table6
[b
].ext_mask
))
3710 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3712 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
3714 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3715 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3717 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3718 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3719 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3720 gen_ldq_env_A0(s
->mem_index
, op2_offset
+
3721 offsetof(XMMReg
, XMM_Q(0)));
3723 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3724 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3725 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
3726 (s
->mem_index
>> 2) - 1);
3727 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
3728 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, op2_offset
+
3729 offsetof(XMMReg
, XMM_L(0)));
3731 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3732 tcg_gen_qemu_ld16u(cpu_tmp0
, cpu_A0
,
3733 (s
->mem_index
>> 2) - 1);
3734 tcg_gen_st16_tl(cpu_tmp0
, cpu_env
, op2_offset
+
3735 offsetof(XMMReg
, XMM_W(0)));
3737 case 0x2a: /* movntqda */
3738 gen_ldo_env_A0(s
->mem_index
, op1_offset
);
3741 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3745 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
3747 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3749 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3750 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3751 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
3754 if (sse_op2
== SSE_SPECIAL
)
3757 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3758 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3759 ((void (*)(TCGv_ptr
, TCGv_ptr
))sse_op2
)(cpu_ptr0
, cpu_ptr1
);
3762 s
->cc_op
= CC_OP_EFLAGS
;
3764 case 0x338: /* crc32 */
3767 modrm
= ldub_code(s
->pc
++);
3768 reg
= ((modrm
>> 3) & 7) | rex_r
;
3770 if (b
!= 0xf0 && b
!= 0xf1)
3772 if (!(s
->cpuid_ext_features
& CPUID_EXT_SSE42
))
3777 else if (b
== 0xf1 && s
->dflag
!= 2)
3778 if (s
->prefix
& PREFIX_DATA
)
3785 gen_op_mov_TN_reg(OT_LONG
, 0, reg
);
3786 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3787 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
3788 gen_helper_crc32(cpu_T
[0], cpu_tmp2_i32
,
3789 cpu_T
[0], tcg_const_i32(8 << ot
));
3791 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3792 gen_op_mov_reg_T0(ot
, reg
);
3797 modrm
= ldub_code(s
->pc
++);
3799 reg
= ((modrm
>> 3) & 7) | rex_r
;
3800 mod
= (modrm
>> 6) & 3;
3805 sse_op2
= sse_op_table7
[b
].op
[b1
];
3808 if (!(s
->cpuid_ext_features
& sse_op_table7
[b
].ext_mask
))
3811 if (sse_op2
== SSE_SPECIAL
) {
3812 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3813 rm
= (modrm
& 7) | REX_B(s
);
3815 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3816 reg
= ((modrm
>> 3) & 7) | rex_r
;
3817 val
= ldub_code(s
->pc
++);
3819 case 0x14: /* pextrb */
3820 tcg_gen_ld8u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3821 xmm_regs
[reg
].XMM_B(val
& 15)));
3823 gen_op_mov_reg_T0(ot
, rm
);
3825 tcg_gen_qemu_st8(cpu_T
[0], cpu_A0
,
3826 (s
->mem_index
>> 2) - 1);
3828 case 0x15: /* pextrw */
3829 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3830 xmm_regs
[reg
].XMM_W(val
& 7)));
3832 gen_op_mov_reg_T0(ot
, rm
);
3834 tcg_gen_qemu_st16(cpu_T
[0], cpu_A0
,
3835 (s
->mem_index
>> 2) - 1);
3838 if (ot
== OT_LONG
) { /* pextrd */
3839 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
3840 offsetof(CPUX86State
,
3841 xmm_regs
[reg
].XMM_L(val
& 3)));
3842 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3844 gen_op_mov_reg_v(ot
, rm
, cpu_T
[0]);
3846 tcg_gen_qemu_st32(cpu_T
[0], cpu_A0
,
3847 (s
->mem_index
>> 2) - 1);
3848 } else { /* pextrq */
3849 #ifdef TARGET_X86_64
3850 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
3851 offsetof(CPUX86State
,
3852 xmm_regs
[reg
].XMM_Q(val
& 1)));
3854 gen_op_mov_reg_v(ot
, rm
, cpu_tmp1_i64
);
3856 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
3857 (s
->mem_index
>> 2) - 1);
3863 case 0x17: /* extractps */
3864 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3865 xmm_regs
[reg
].XMM_L(val
& 3)));
3867 gen_op_mov_reg_T0(ot
, rm
);
3869 tcg_gen_qemu_st32(cpu_T
[0], cpu_A0
,
3870 (s
->mem_index
>> 2) - 1);
3872 case 0x20: /* pinsrb */
3874 gen_op_mov_TN_reg(OT_LONG
, 0, rm
);
3876 tcg_gen_qemu_ld8u(cpu_tmp0
, cpu_A0
,
3877 (s
->mem_index
>> 2) - 1);
3878 tcg_gen_st8_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
,
3879 xmm_regs
[reg
].XMM_B(val
& 15)));
3881 case 0x21: /* insertps */
3883 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
3884 offsetof(CPUX86State
,xmm_regs
[rm
]
3885 .XMM_L((val
>> 6) & 3)));
3887 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
3888 (s
->mem_index
>> 2) - 1);
3889 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
3891 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
3892 offsetof(CPUX86State
,xmm_regs
[reg
]
3893 .XMM_L((val
>> 4) & 3)));
3895 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3896 cpu_env
, offsetof(CPUX86State
,
3897 xmm_regs
[reg
].XMM_L(0)));
3899 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3900 cpu_env
, offsetof(CPUX86State
,
3901 xmm_regs
[reg
].XMM_L(1)));
3903 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3904 cpu_env
, offsetof(CPUX86State
,
3905 xmm_regs
[reg
].XMM_L(2)));
3907 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3908 cpu_env
, offsetof(CPUX86State
,
3909 xmm_regs
[reg
].XMM_L(3)));
3912 if (ot
== OT_LONG
) { /* pinsrd */
3914 gen_op_mov_v_reg(ot
, cpu_tmp0
, rm
);
3916 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
3917 (s
->mem_index
>> 2) - 1);
3918 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
3919 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
3920 offsetof(CPUX86State
,
3921 xmm_regs
[reg
].XMM_L(val
& 3)));
3922 } else { /* pinsrq */
3923 #ifdef TARGET_X86_64
3925 gen_op_mov_v_reg(ot
, cpu_tmp1_i64
, rm
);
3927 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
3928 (s
->mem_index
>> 2) - 1);
3929 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
3930 offsetof(CPUX86State
,
3931 xmm_regs
[reg
].XMM_Q(val
& 1)));
3942 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3944 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
3946 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3947 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3948 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3951 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
3953 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3955 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3956 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3957 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
3960 val
= ldub_code(s
->pc
++);
3962 if ((b
& 0xfc) == 0x60) { /* pcmpXstrX */
3963 s
->cc_op
= CC_OP_EFLAGS
;
3966 /* The helper must use entire 64-bit gp registers */
3970 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3971 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3972 ((void (*)(TCGv_ptr
, TCGv_ptr
, TCGv_i32
))sse_op2
)(cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
3978 /* generic MMX or SSE operation */
3980 case 0x70: /* pshufx insn */
3981 case 0xc6: /* pshufx insn */
3982 case 0xc2: /* compare insns */
3989 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3991 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
3992 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3993 if (b1
>= 2 && ((b
>= 0x50 && b
<= 0x5f && b
!= 0x5b) ||
3995 /* specific case for SSE single instructions */
3998 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
3999 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
4002 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_t0
.XMM_D(0)));
4005 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
4008 rm
= (modrm
& 7) | REX_B(s
);
4009 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
4012 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4014 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4015 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4016 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
4019 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4023 case 0x0f: /* 3DNow! data insns */
4024 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
))
4026 val
= ldub_code(s
->pc
++);
4027 sse_op2
= sse_op_table5
[val
];
4030 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4031 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4032 ((void (*)(TCGv_ptr
, TCGv_ptr
))sse_op2
)(cpu_ptr0
, cpu_ptr1
);
4034 case 0x70: /* pshufx insn */
4035 case 0xc6: /* pshufx insn */
4036 val
= ldub_code(s
->pc
++);
4037 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4038 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4039 ((void (*)(TCGv_ptr
, TCGv_ptr
, TCGv_i32
))sse_op2
)(cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4043 val
= ldub_code(s
->pc
++);
4046 sse_op2
= sse_op_table4
[val
][b1
];
4047 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4048 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4049 ((void (*)(TCGv_ptr
, TCGv_ptr
))sse_op2
)(cpu_ptr0
, cpu_ptr1
);
4052 /* maskmov : we must prepare A0 */
4055 #ifdef TARGET_X86_64
4056 if (s
->aflag
== 2) {
4057 gen_op_movq_A0_reg(R_EDI
);
4061 gen_op_movl_A0_reg(R_EDI
);
4063 gen_op_andl_A0_ffff();
4065 gen_add_A0_ds_seg(s
);
4067 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4068 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4069 ((void (*)(TCGv_ptr
, TCGv_ptr
, TCGv
))sse_op2
)(cpu_ptr0
, cpu_ptr1
, cpu_A0
);
4072 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4073 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4074 ((void (*)(TCGv_ptr
, TCGv_ptr
))sse_op2
)(cpu_ptr0
, cpu_ptr1
);
4077 if (b
== 0x2e || b
== 0x2f) {
4078 s
->cc_op
= CC_OP_EFLAGS
;
4083 /* convert one instruction. s->is_jmp is set if the translation must
4084 be stopped. Return the next pc value */
4085 static target_ulong
disas_insn(DisasContext
*s
, target_ulong pc_start
)
4087 int b
, prefixes
, aflag
, dflag
;
4089 int modrm
, reg
, rm
, mod
, reg_addr
, op
, opreg
, offset_addr
, val
;
4090 target_ulong next_eip
, tval
;
4093 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
)))
4094 tcg_gen_debug_insn_start(pc_start
);
4102 #ifdef TARGET_X86_64
4107 s
->rip_offset
= 0; /* for relative ip address */
4109 b
= ldub_code(s
->pc
);
4111 /* check prefixes */
4112 #ifdef TARGET_X86_64
4116 prefixes
|= PREFIX_REPZ
;
4119 prefixes
|= PREFIX_REPNZ
;
4122 prefixes
|= PREFIX_LOCK
;
4143 prefixes
|= PREFIX_DATA
;
4146 prefixes
|= PREFIX_ADR
;
4150 rex_w
= (b
>> 3) & 1;
4151 rex_r
= (b
& 0x4) << 1;
4152 s
->rex_x
= (b
& 0x2) << 2;
4153 REX_B(s
) = (b
& 0x1) << 3;
4154 x86_64_hregs
= 1; /* select uniform byte register addressing */
4158 /* 0x66 is ignored if rex.w is set */
4161 if (prefixes
& PREFIX_DATA
)
4164 if (!(prefixes
& PREFIX_ADR
))
4171 prefixes
|= PREFIX_REPZ
;
4174 prefixes
|= PREFIX_REPNZ
;
4177 prefixes
|= PREFIX_LOCK
;
4198 prefixes
|= PREFIX_DATA
;
4201 prefixes
|= PREFIX_ADR
;
4204 if (prefixes
& PREFIX_DATA
)
4206 if (prefixes
& PREFIX_ADR
)
4210 s
->prefix
= prefixes
;
4214 /* lock generation */
4215 if (prefixes
& PREFIX_LOCK
)
4218 /* now check op code */
4222 /**************************/
4223 /* extended op code */
4224 b
= ldub_code(s
->pc
++) | 0x100;
4227 /**************************/
4245 ot
= dflag
+ OT_WORD
;
4248 case 0: /* OP Ev, Gv */
4249 modrm
= ldub_code(s
->pc
++);
4250 reg
= ((modrm
>> 3) & 7) | rex_r
;
4251 mod
= (modrm
>> 6) & 3;
4252 rm
= (modrm
& 7) | REX_B(s
);
4254 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4256 } else if (op
== OP_XORL
&& rm
== reg
) {
4258 /* xor reg, reg optimisation */
4260 s
->cc_op
= CC_OP_LOGICB
+ ot
;
4261 gen_op_mov_reg_T0(ot
, reg
);
4262 gen_op_update1_cc();
4267 gen_op_mov_TN_reg(ot
, 1, reg
);
4268 gen_op(s
, op
, ot
, opreg
);
4270 case 1: /* OP Gv, Ev */
4271 modrm
= ldub_code(s
->pc
++);
4272 mod
= (modrm
>> 6) & 3;
4273 reg
= ((modrm
>> 3) & 7) | rex_r
;
4274 rm
= (modrm
& 7) | REX_B(s
);
4276 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4277 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4278 } else if (op
== OP_XORL
&& rm
== reg
) {
4281 gen_op_mov_TN_reg(ot
, 1, rm
);
4283 gen_op(s
, op
, ot
, reg
);
4285 case 2: /* OP A, Iv */
4286 val
= insn_get(s
, ot
);
4287 gen_op_movl_T1_im(val
);
4288 gen_op(s
, op
, ot
, OR_EAX
);
4297 case 0x80: /* GRP1 */
4306 ot
= dflag
+ OT_WORD
;
4308 modrm
= ldub_code(s
->pc
++);
4309 mod
= (modrm
>> 6) & 3;
4310 rm
= (modrm
& 7) | REX_B(s
);
4311 op
= (modrm
>> 3) & 7;
4317 s
->rip_offset
= insn_const_size(ot
);
4318 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4329 val
= insn_get(s
, ot
);
4332 val
= (int8_t)insn_get(s
, OT_BYTE
);
4335 gen_op_movl_T1_im(val
);
4336 gen_op(s
, op
, ot
, opreg
);
4340 /**************************/
4341 /* inc, dec, and other misc arith */
4342 case 0x40 ... 0x47: /* inc Gv */
4343 ot
= dflag
? OT_LONG
: OT_WORD
;
4344 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), 1);
4346 case 0x48 ... 0x4f: /* dec Gv */
4347 ot
= dflag
? OT_LONG
: OT_WORD
;
4348 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), -1);
4350 case 0xf6: /* GRP3 */
4355 ot
= dflag
+ OT_WORD
;
4357 modrm
= ldub_code(s
->pc
++);
4358 mod
= (modrm
>> 6) & 3;
4359 rm
= (modrm
& 7) | REX_B(s
);
4360 op
= (modrm
>> 3) & 7;
4363 s
->rip_offset
= insn_const_size(ot
);
4364 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4365 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
4367 gen_op_mov_TN_reg(ot
, 0, rm
);
4372 val
= insn_get(s
, ot
);
4373 gen_op_movl_T1_im(val
);
4374 gen_op_testl_T0_T1_cc();
4375 s
->cc_op
= CC_OP_LOGICB
+ ot
;
4378 tcg_gen_not_tl(cpu_T
[0], cpu_T
[0]);
4380 gen_op_st_T0_A0(ot
+ s
->mem_index
);
4382 gen_op_mov_reg_T0(ot
, rm
);
4386 tcg_gen_neg_tl(cpu_T
[0], cpu_T
[0]);
4388 gen_op_st_T0_A0(ot
+ s
->mem_index
);
4390 gen_op_mov_reg_T0(ot
, rm
);
4392 gen_op_update_neg_cc();
4393 s
->cc_op
= CC_OP_SUBB
+ ot
;
4398 gen_op_mov_TN_reg(OT_BYTE
, 1, R_EAX
);
4399 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
4400 tcg_gen_ext8u_tl(cpu_T
[1], cpu_T
[1]);
4401 /* XXX: use 32 bit mul which could be faster */
4402 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4403 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4404 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4405 tcg_gen_andi_tl(cpu_cc_src
, cpu_T
[0], 0xff00);
4406 s
->cc_op
= CC_OP_MULB
;
4409 gen_op_mov_TN_reg(OT_WORD
, 1, R_EAX
);
4410 tcg_gen_ext16u_tl(cpu_T
[0], cpu_T
[0]);
4411 tcg_gen_ext16u_tl(cpu_T
[1], cpu_T
[1]);
4412 /* XXX: use 32 bit mul which could be faster */
4413 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4414 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4415 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4416 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 16);
4417 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
4418 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4419 s
->cc_op
= CC_OP_MULW
;
4423 #ifdef TARGET_X86_64
4424 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4425 tcg_gen_ext32u_tl(cpu_T
[0], cpu_T
[0]);
4426 tcg_gen_ext32u_tl(cpu_T
[1], cpu_T
[1]);
4427 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4428 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4429 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4430 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 32);
4431 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4432 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4436 t0
= tcg_temp_new_i64();
4437 t1
= tcg_temp_new_i64();
4438 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4439 tcg_gen_extu_i32_i64(t0
, cpu_T
[0]);
4440 tcg_gen_extu_i32_i64(t1
, cpu_T
[1]);
4441 tcg_gen_mul_i64(t0
, t0
, t1
);
4442 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4443 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4444 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4445 tcg_gen_shri_i64(t0
, t0
, 32);
4446 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4447 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4448 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4451 s
->cc_op
= CC_OP_MULL
;
4453 #ifdef TARGET_X86_64
4455 gen_helper_mulq_EAX_T0(cpu_T
[0]);
4456 s
->cc_op
= CC_OP_MULQ
;
4464 gen_op_mov_TN_reg(OT_BYTE
, 1, R_EAX
);
4465 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
4466 tcg_gen_ext8s_tl(cpu_T
[1], cpu_T
[1]);
4467 /* XXX: use 32 bit mul which could be faster */
4468 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4469 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4470 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4471 tcg_gen_ext8s_tl(cpu_tmp0
, cpu_T
[0]);
4472 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4473 s
->cc_op
= CC_OP_MULB
;
4476 gen_op_mov_TN_reg(OT_WORD
, 1, R_EAX
);
4477 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4478 tcg_gen_ext16s_tl(cpu_T
[1], cpu_T
[1]);
4479 /* XXX: use 32 bit mul which could be faster */
4480 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4481 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4482 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4483 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T
[0]);
4484 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4485 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 16);
4486 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
4487 s
->cc_op
= CC_OP_MULW
;
4491 #ifdef TARGET_X86_64
4492 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4493 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4494 tcg_gen_ext32s_tl(cpu_T
[1], cpu_T
[1]);
4495 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4496 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4497 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4498 tcg_gen_ext32s_tl(cpu_tmp0
, cpu_T
[0]);
4499 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4500 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 32);
4501 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4505 t0
= tcg_temp_new_i64();
4506 t1
= tcg_temp_new_i64();
4507 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4508 tcg_gen_ext_i32_i64(t0
, cpu_T
[0]);
4509 tcg_gen_ext_i32_i64(t1
, cpu_T
[1]);
4510 tcg_gen_mul_i64(t0
, t0
, t1
);
4511 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4512 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4513 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4514 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[0], 31);
4515 tcg_gen_shri_i64(t0
, t0
, 32);
4516 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4517 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4518 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4521 s
->cc_op
= CC_OP_MULL
;
4523 #ifdef TARGET_X86_64
4525 gen_helper_imulq_EAX_T0(cpu_T
[0]);
4526 s
->cc_op
= CC_OP_MULQ
;
4534 gen_jmp_im(pc_start
- s
->cs_base
);
4535 gen_helper_divb_AL(cpu_T
[0]);
4538 gen_jmp_im(pc_start
- s
->cs_base
);
4539 gen_helper_divw_AX(cpu_T
[0]);
4543 gen_jmp_im(pc_start
- s
->cs_base
);
4544 gen_helper_divl_EAX(cpu_T
[0]);
4546 #ifdef TARGET_X86_64
4548 gen_jmp_im(pc_start
- s
->cs_base
);
4549 gen_helper_divq_EAX(cpu_T
[0]);
4557 gen_jmp_im(pc_start
- s
->cs_base
);
4558 gen_helper_idivb_AL(cpu_T
[0]);
4561 gen_jmp_im(pc_start
- s
->cs_base
);
4562 gen_helper_idivw_AX(cpu_T
[0]);
4566 gen_jmp_im(pc_start
- s
->cs_base
);
4567 gen_helper_idivl_EAX(cpu_T
[0]);
4569 #ifdef TARGET_X86_64
4571 gen_jmp_im(pc_start
- s
->cs_base
);
4572 gen_helper_idivq_EAX(cpu_T
[0]);
4582 case 0xfe: /* GRP4 */
4583 case 0xff: /* GRP5 */
4587 ot
= dflag
+ OT_WORD
;
4589 modrm
= ldub_code(s
->pc
++);
4590 mod
= (modrm
>> 6) & 3;
4591 rm
= (modrm
& 7) | REX_B(s
);
4592 op
= (modrm
>> 3) & 7;
4593 if (op
>= 2 && b
== 0xfe) {
4597 if (op
== 2 || op
== 4) {
4598 /* operand size for jumps is 64 bit */
4600 } else if (op
== 3 || op
== 5) {
4601 ot
= dflag
? OT_LONG
+ (rex_w
== 1) : OT_WORD
;
4602 } else if (op
== 6) {
4603 /* default push size is 64 bit */
4604 ot
= dflag
? OT_QUAD
: OT_WORD
;
4608 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4609 if (op
>= 2 && op
!= 3 && op
!= 5)
4610 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
4612 gen_op_mov_TN_reg(ot
, 0, rm
);
4616 case 0: /* inc Ev */
4621 gen_inc(s
, ot
, opreg
, 1);
4623 case 1: /* dec Ev */
4628 gen_inc(s
, ot
, opreg
, -1);
4630 case 2: /* call Ev */
4631 /* XXX: optimize if memory (no 'and' is necessary) */
4633 gen_op_andl_T0_ffff();
4634 next_eip
= s
->pc
- s
->cs_base
;
4635 gen_movtl_T1_im(next_eip
);
4640 case 3: /* lcall Ev */
4641 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4642 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
4643 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
4645 if (s
->pe
&& !s
->vm86
) {
4646 if (s
->cc_op
!= CC_OP_DYNAMIC
)
4647 gen_op_set_cc_op(s
->cc_op
);
4648 gen_jmp_im(pc_start
- s
->cs_base
);
4649 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4650 gen_helper_lcall_protected(cpu_tmp2_i32
, cpu_T
[1],
4651 tcg_const_i32(dflag
),
4652 tcg_const_i32(s
->pc
- pc_start
));
4654 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4655 gen_helper_lcall_real(cpu_tmp2_i32
, cpu_T
[1],
4656 tcg_const_i32(dflag
),
4657 tcg_const_i32(s
->pc
- s
->cs_base
));
4661 case 4: /* jmp Ev */
4663 gen_op_andl_T0_ffff();
4667 case 5: /* ljmp Ev */
4668 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4669 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
4670 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
4672 if (s
->pe
&& !s
->vm86
) {
4673 if (s
->cc_op
!= CC_OP_DYNAMIC
)
4674 gen_op_set_cc_op(s
->cc_op
);
4675 gen_jmp_im(pc_start
- s
->cs_base
);
4676 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4677 gen_helper_ljmp_protected(cpu_tmp2_i32
, cpu_T
[1],
4678 tcg_const_i32(s
->pc
- pc_start
));
4680 gen_op_movl_seg_T0_vm(R_CS
);
4681 gen_op_movl_T0_T1();
4686 case 6: /* push Ev */
4694 case 0x84: /* test Ev, Gv */
4699 ot
= dflag
+ OT_WORD
;
4701 modrm
= ldub_code(s
->pc
++);
4702 reg
= ((modrm
>> 3) & 7) | rex_r
;
4704 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
4705 gen_op_mov_TN_reg(ot
, 1, reg
);
4706 gen_op_testl_T0_T1_cc();
4707 s
->cc_op
= CC_OP_LOGICB
+ ot
;
4710 case 0xa8: /* test eAX, Iv */
4715 ot
= dflag
+ OT_WORD
;
4716 val
= insn_get(s
, ot
);
4718 gen_op_mov_TN_reg(ot
, 0, OR_EAX
);
4719 gen_op_movl_T1_im(val
);
4720 gen_op_testl_T0_T1_cc();
4721 s
->cc_op
= CC_OP_LOGICB
+ ot
;
4724 case 0x98: /* CWDE/CBW */
4725 #ifdef TARGET_X86_64
4727 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
4728 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4729 gen_op_mov_reg_T0(OT_QUAD
, R_EAX
);
4733 gen_op_mov_TN_reg(OT_WORD
, 0, R_EAX
);
4734 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4735 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4737 gen_op_mov_TN_reg(OT_BYTE
, 0, R_EAX
);
4738 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
4739 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4742 case 0x99: /* CDQ/CWD */
4743 #ifdef TARGET_X86_64
4745 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EAX
);
4746 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 63);
4747 gen_op_mov_reg_T0(OT_QUAD
, R_EDX
);
4751 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
4752 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4753 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 31);
4754 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4756 gen_op_mov_TN_reg(OT_WORD
, 0, R_EAX
);
4757 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4758 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 15);
4759 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
4762 case 0x1af: /* imul Gv, Ev */
4763 case 0x69: /* imul Gv, Ev, I */
4765 ot
= dflag
+ OT_WORD
;
4766 modrm
= ldub_code(s
->pc
++);
4767 reg
= ((modrm
>> 3) & 7) | rex_r
;
4769 s
->rip_offset
= insn_const_size(ot
);
4772 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
4774 val
= insn_get(s
, ot
);
4775 gen_op_movl_T1_im(val
);
4776 } else if (b
== 0x6b) {
4777 val
= (int8_t)insn_get(s
, OT_BYTE
);
4778 gen_op_movl_T1_im(val
);
4780 gen_op_mov_TN_reg(ot
, 1, reg
);
4783 #ifdef TARGET_X86_64
4784 if (ot
== OT_QUAD
) {
4785 gen_helper_imulq_T0_T1(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4788 if (ot
== OT_LONG
) {
4789 #ifdef TARGET_X86_64
4790 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4791 tcg_gen_ext32s_tl(cpu_T
[1], cpu_T
[1]);
4792 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4793 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4794 tcg_gen_ext32s_tl(cpu_tmp0
, cpu_T
[0]);
4795 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4799 t0
= tcg_temp_new_i64();
4800 t1
= tcg_temp_new_i64();
4801 tcg_gen_ext_i32_i64(t0
, cpu_T
[0]);
4802 tcg_gen_ext_i32_i64(t1
, cpu_T
[1]);
4803 tcg_gen_mul_i64(t0
, t0
, t1
);
4804 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4805 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4806 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[0], 31);
4807 tcg_gen_shri_i64(t0
, t0
, 32);
4808 tcg_gen_trunc_i64_i32(cpu_T
[1], t0
);
4809 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[1], cpu_tmp0
);
4813 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4814 tcg_gen_ext16s_tl(cpu_T
[1], cpu_T
[1]);
4815 /* XXX: use 32 bit mul which could be faster */
4816 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4817 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4818 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T
[0]);
4819 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4821 gen_op_mov_reg_T0(ot
, reg
);
4822 s
->cc_op
= CC_OP_MULB
+ ot
;
4825 case 0x1c1: /* xadd Ev, Gv */
4829 ot
= dflag
+ OT_WORD
;
4830 modrm
= ldub_code(s
->pc
++);
4831 reg
= ((modrm
>> 3) & 7) | rex_r
;
4832 mod
= (modrm
>> 6) & 3;
4834 rm
= (modrm
& 7) | REX_B(s
);
4835 gen_op_mov_TN_reg(ot
, 0, reg
);
4836 gen_op_mov_TN_reg(ot
, 1, rm
);
4837 gen_op_addl_T0_T1();
4838 gen_op_mov_reg_T1(ot
, reg
);
4839 gen_op_mov_reg_T0(ot
, rm
);
4841 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4842 gen_op_mov_TN_reg(ot
, 0, reg
);
4843 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4844 gen_op_addl_T0_T1();
4845 gen_op_st_T0_A0(ot
+ s
->mem_index
);
4846 gen_op_mov_reg_T1(ot
, reg
);
4848 gen_op_update2_cc();
4849 s
->cc_op
= CC_OP_ADDB
+ ot
;
4852 case 0x1b1: /* cmpxchg Ev, Gv */
4855 TCGv t0
, t1
, t2
, a0
;
4860 ot
= dflag
+ OT_WORD
;
4861 modrm
= ldub_code(s
->pc
++);
4862 reg
= ((modrm
>> 3) & 7) | rex_r
;
4863 mod
= (modrm
>> 6) & 3;
4864 t0
= tcg_temp_local_new();
4865 t1
= tcg_temp_local_new();
4866 t2
= tcg_temp_local_new();
4867 a0
= tcg_temp_local_new();
4868 gen_op_mov_v_reg(ot
, t1
, reg
);
4870 rm
= (modrm
& 7) | REX_B(s
);
4871 gen_op_mov_v_reg(ot
, t0
, rm
);
4873 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4874 tcg_gen_mov_tl(a0
, cpu_A0
);
4875 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
4876 rm
= 0; /* avoid warning */
4878 label1
= gen_new_label();
4879 tcg_gen_sub_tl(t2
, cpu_regs
[R_EAX
], t0
);
4881 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, label1
);
4883 label2
= gen_new_label();
4884 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
4886 gen_set_label(label1
);
4887 gen_op_mov_reg_v(ot
, rm
, t1
);
4888 gen_set_label(label2
);
4890 tcg_gen_mov_tl(t1
, t0
);
4891 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
4892 gen_set_label(label1
);
4894 gen_op_st_v(ot
+ s
->mem_index
, t1
, a0
);
4896 tcg_gen_mov_tl(cpu_cc_src
, t0
);
4897 tcg_gen_mov_tl(cpu_cc_dst
, t2
);
4898 s
->cc_op
= CC_OP_SUBB
+ ot
;
4905 case 0x1c7: /* cmpxchg8b */
4906 modrm
= ldub_code(s
->pc
++);
4907 mod
= (modrm
>> 6) & 3;
4908 if ((mod
== 3) || ((modrm
& 0x38) != 0x8))
4910 #ifdef TARGET_X86_64
4912 if (!(s
->cpuid_ext_features
& CPUID_EXT_CX16
))
4914 gen_jmp_im(pc_start
- s
->cs_base
);
4915 if (s
->cc_op
!= CC_OP_DYNAMIC
)
4916 gen_op_set_cc_op(s
->cc_op
);
4917 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4918 gen_helper_cmpxchg16b(cpu_A0
);
4922 if (!(s
->cpuid_features
& CPUID_CX8
))
4924 gen_jmp_im(pc_start
- s
->cs_base
);
4925 if (s
->cc_op
!= CC_OP_DYNAMIC
)
4926 gen_op_set_cc_op(s
->cc_op
);
4927 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
4928 gen_helper_cmpxchg8b(cpu_A0
);
4930 s
->cc_op
= CC_OP_EFLAGS
;
4933 /**************************/
4935 case 0x50 ... 0x57: /* push */
4936 gen_op_mov_TN_reg(OT_LONG
, 0, (b
& 7) | REX_B(s
));
4939 case 0x58 ... 0x5f: /* pop */
4941 ot
= dflag
? OT_QUAD
: OT_WORD
;
4943 ot
= dflag
+ OT_WORD
;
4946 /* NOTE: order is important for pop %sp */
4948 gen_op_mov_reg_T0(ot
, (b
& 7) | REX_B(s
));
4950 case 0x60: /* pusha */
4955 case 0x61: /* popa */
4960 case 0x68: /* push Iv */
4963 ot
= dflag
? OT_QUAD
: OT_WORD
;
4965 ot
= dflag
+ OT_WORD
;
4968 val
= insn_get(s
, ot
);
4970 val
= (int8_t)insn_get(s
, OT_BYTE
);
4971 gen_op_movl_T0_im(val
);
4974 case 0x8f: /* pop Ev */
4976 ot
= dflag
? OT_QUAD
: OT_WORD
;
4978 ot
= dflag
+ OT_WORD
;
4980 modrm
= ldub_code(s
->pc
++);
4981 mod
= (modrm
>> 6) & 3;
4984 /* NOTE: order is important for pop %sp */
4986 rm
= (modrm
& 7) | REX_B(s
);
4987 gen_op_mov_reg_T0(ot
, rm
);
4989 /* NOTE: order is important too for MMU exceptions */
4990 s
->popl_esp_hack
= 1 << ot
;
4991 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 1);
4992 s
->popl_esp_hack
= 0;
4996 case 0xc8: /* enter */
4999 val
= lduw_code(s
->pc
);
5001 level
= ldub_code(s
->pc
++);
5002 gen_enter(s
, val
, level
);
5005 case 0xc9: /* leave */
5006 /* XXX: exception not precise (ESP is updated before potential exception) */
5008 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EBP
);
5009 gen_op_mov_reg_T0(OT_QUAD
, R_ESP
);
5010 } else if (s
->ss32
) {
5011 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
5012 gen_op_mov_reg_T0(OT_LONG
, R_ESP
);
5014 gen_op_mov_TN_reg(OT_WORD
, 0, R_EBP
);
5015 gen_op_mov_reg_T0(OT_WORD
, R_ESP
);
5019 ot
= dflag
? OT_QUAD
: OT_WORD
;
5021 ot
= dflag
+ OT_WORD
;
5023 gen_op_mov_reg_T0(ot
, R_EBP
);
5026 case 0x06: /* push es */
5027 case 0x0e: /* push cs */
5028 case 0x16: /* push ss */
5029 case 0x1e: /* push ds */
5032 gen_op_movl_T0_seg(b
>> 3);
5035 case 0x1a0: /* push fs */
5036 case 0x1a8: /* push gs */
5037 gen_op_movl_T0_seg((b
>> 3) & 7);
5040 case 0x07: /* pop es */
5041 case 0x17: /* pop ss */
5042 case 0x1f: /* pop ds */
5047 gen_movl_seg_T0(s
, reg
, pc_start
- s
->cs_base
);
5050 /* if reg == SS, inhibit interrupts/trace. */
5051 /* If several instructions disable interrupts, only the
5053 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
5054 gen_helper_set_inhibit_irq();
5058 gen_jmp_im(s
->pc
- s
->cs_base
);
5062 case 0x1a1: /* pop fs */
5063 case 0x1a9: /* pop gs */
5065 gen_movl_seg_T0(s
, (b
>> 3) & 7, pc_start
- s
->cs_base
);
5068 gen_jmp_im(s
->pc
- s
->cs_base
);
5073 /**************************/
5076 case 0x89: /* mov Gv, Ev */
5080 ot
= dflag
+ OT_WORD
;
5081 modrm
= ldub_code(s
->pc
++);
5082 reg
= ((modrm
>> 3) & 7) | rex_r
;
5084 /* generate a generic store */
5085 gen_ldst_modrm(s
, modrm
, ot
, reg
, 1);
5088 case 0xc7: /* mov Ev, Iv */
5092 ot
= dflag
+ OT_WORD
;
5093 modrm
= ldub_code(s
->pc
++);
5094 mod
= (modrm
>> 6) & 3;
5096 s
->rip_offset
= insn_const_size(ot
);
5097 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5099 val
= insn_get(s
, ot
);
5100 gen_op_movl_T0_im(val
);
5102 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5104 gen_op_mov_reg_T0(ot
, (modrm
& 7) | REX_B(s
));
5107 case 0x8b: /* mov Ev, Gv */
5111 ot
= OT_WORD
+ dflag
;
5112 modrm
= ldub_code(s
->pc
++);
5113 reg
= ((modrm
>> 3) & 7) | rex_r
;
5115 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
5116 gen_op_mov_reg_T0(ot
, reg
);
5118 case 0x8e: /* mov seg, Gv */
5119 modrm
= ldub_code(s
->pc
++);
5120 reg
= (modrm
>> 3) & 7;
5121 if (reg
>= 6 || reg
== R_CS
)
5123 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
5124 gen_movl_seg_T0(s
, reg
, pc_start
- s
->cs_base
);
5126 /* if reg == SS, inhibit interrupts/trace */
5127 /* If several instructions disable interrupts, only the
5129 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
5130 gen_helper_set_inhibit_irq();
5134 gen_jmp_im(s
->pc
- s
->cs_base
);
5138 case 0x8c: /* mov Gv, seg */
5139 modrm
= ldub_code(s
->pc
++);
5140 reg
= (modrm
>> 3) & 7;
5141 mod
= (modrm
>> 6) & 3;
5144 gen_op_movl_T0_seg(reg
);
5146 ot
= OT_WORD
+ dflag
;
5149 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 1);
5152 case 0x1b6: /* movzbS Gv, Eb */
5153 case 0x1b7: /* movzwS Gv, Eb */
5154 case 0x1be: /* movsbS Gv, Eb */
5155 case 0x1bf: /* movswS Gv, Eb */
5158 /* d_ot is the size of destination */
5159 d_ot
= dflag
+ OT_WORD
;
5160 /* ot is the size of source */
5161 ot
= (b
& 1) + OT_BYTE
;
5162 modrm
= ldub_code(s
->pc
++);
5163 reg
= ((modrm
>> 3) & 7) | rex_r
;
5164 mod
= (modrm
>> 6) & 3;
5165 rm
= (modrm
& 7) | REX_B(s
);
5168 gen_op_mov_TN_reg(ot
, 0, rm
);
5169 switch(ot
| (b
& 8)) {
5171 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
5174 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
5177 tcg_gen_ext16u_tl(cpu_T
[0], cpu_T
[0]);
5181 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5184 gen_op_mov_reg_T0(d_ot
, reg
);
5186 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5188 gen_op_lds_T0_A0(ot
+ s
->mem_index
);
5190 gen_op_ldu_T0_A0(ot
+ s
->mem_index
);
5192 gen_op_mov_reg_T0(d_ot
, reg
);
5197 case 0x8d: /* lea */
5198 ot
= dflag
+ OT_WORD
;
5199 modrm
= ldub_code(s
->pc
++);
5200 mod
= (modrm
>> 6) & 3;
5203 reg
= ((modrm
>> 3) & 7) | rex_r
;
5204 /* we must ensure that no segment is added */
5208 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5210 gen_op_mov_reg_A0(ot
- OT_WORD
, reg
);
5213 case 0xa0: /* mov EAX, Ov */
5215 case 0xa2: /* mov Ov, EAX */
5218 target_ulong offset_addr
;
5223 ot
= dflag
+ OT_WORD
;
5224 #ifdef TARGET_X86_64
5225 if (s
->aflag
== 2) {
5226 offset_addr
= ldq_code(s
->pc
);
5228 gen_op_movq_A0_im(offset_addr
);
5233 offset_addr
= insn_get(s
, OT_LONG
);
5235 offset_addr
= insn_get(s
, OT_WORD
);
5237 gen_op_movl_A0_im(offset_addr
);
5239 gen_add_A0_ds_seg(s
);
5241 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
5242 gen_op_mov_reg_T0(ot
, R_EAX
);
5244 gen_op_mov_TN_reg(ot
, 0, R_EAX
);
5245 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5249 case 0xd7: /* xlat */
5250 #ifdef TARGET_X86_64
5251 if (s
->aflag
== 2) {
5252 gen_op_movq_A0_reg(R_EBX
);
5253 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EAX
);
5254 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xff);
5255 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T
[0]);
5259 gen_op_movl_A0_reg(R_EBX
);
5260 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
5261 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xff);
5262 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T
[0]);
5264 gen_op_andl_A0_ffff();
5266 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
5268 gen_add_A0_ds_seg(s
);
5269 gen_op_ldu_T0_A0(OT_BYTE
+ s
->mem_index
);
5270 gen_op_mov_reg_T0(OT_BYTE
, R_EAX
);
5272 case 0xb0 ... 0xb7: /* mov R, Ib */
5273 val
= insn_get(s
, OT_BYTE
);
5274 gen_op_movl_T0_im(val
);
5275 gen_op_mov_reg_T0(OT_BYTE
, (b
& 7) | REX_B(s
));
5277 case 0xb8 ... 0xbf: /* mov R, Iv */
5278 #ifdef TARGET_X86_64
5282 tmp
= ldq_code(s
->pc
);
5284 reg
= (b
& 7) | REX_B(s
);
5285 gen_movtl_T0_im(tmp
);
5286 gen_op_mov_reg_T0(OT_QUAD
, reg
);
5290 ot
= dflag
? OT_LONG
: OT_WORD
;
5291 val
= insn_get(s
, ot
);
5292 reg
= (b
& 7) | REX_B(s
);
5293 gen_op_movl_T0_im(val
);
5294 gen_op_mov_reg_T0(ot
, reg
);
5298 case 0x91 ... 0x97: /* xchg R, EAX */
5300 ot
= dflag
+ OT_WORD
;
5301 reg
= (b
& 7) | REX_B(s
);
5305 case 0x87: /* xchg Ev, Gv */
5309 ot
= dflag
+ OT_WORD
;
5310 modrm
= ldub_code(s
->pc
++);
5311 reg
= ((modrm
>> 3) & 7) | rex_r
;
5312 mod
= (modrm
>> 6) & 3;
5314 rm
= (modrm
& 7) | REX_B(s
);
5316 gen_op_mov_TN_reg(ot
, 0, reg
);
5317 gen_op_mov_TN_reg(ot
, 1, rm
);
5318 gen_op_mov_reg_T0(ot
, rm
);
5319 gen_op_mov_reg_T1(ot
, reg
);
5321 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5322 gen_op_mov_TN_reg(ot
, 0, reg
);
5323 /* for xchg, lock is implicit */
5324 if (!(prefixes
& PREFIX_LOCK
))
5326 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5327 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5328 if (!(prefixes
& PREFIX_LOCK
))
5329 gen_helper_unlock();
5330 gen_op_mov_reg_T1(ot
, reg
);
5333 case 0xc4: /* les Gv */
5338 case 0xc5: /* lds Gv */
5343 case 0x1b2: /* lss Gv */
5346 case 0x1b4: /* lfs Gv */
5349 case 0x1b5: /* lgs Gv */
5352 ot
= dflag
? OT_LONG
: OT_WORD
;
5353 modrm
= ldub_code(s
->pc
++);
5354 reg
= ((modrm
>> 3) & 7) | rex_r
;
5355 mod
= (modrm
>> 6) & 3;
5358 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5359 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5360 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
5361 /* load the segment first to handle exceptions properly */
5362 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
5363 gen_movl_seg_T0(s
, op
, pc_start
- s
->cs_base
);
5364 /* then put the data */
5365 gen_op_mov_reg_T1(ot
, reg
);
5367 gen_jmp_im(s
->pc
- s
->cs_base
);
5372 /************************/
5383 ot
= dflag
+ OT_WORD
;
5385 modrm
= ldub_code(s
->pc
++);
5386 mod
= (modrm
>> 6) & 3;
5387 op
= (modrm
>> 3) & 7;
5393 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5396 opreg
= (modrm
& 7) | REX_B(s
);
5401 gen_shift(s
, op
, ot
, opreg
, OR_ECX
);
5404 shift
= ldub_code(s
->pc
++);
5406 gen_shifti(s
, op
, ot
, opreg
, shift
);
5421 case 0x1a4: /* shld imm */
5425 case 0x1a5: /* shld cl */
5429 case 0x1ac: /* shrd imm */
5433 case 0x1ad: /* shrd cl */
5437 ot
= dflag
+ OT_WORD
;
5438 modrm
= ldub_code(s
->pc
++);
5439 mod
= (modrm
>> 6) & 3;
5440 rm
= (modrm
& 7) | REX_B(s
);
5441 reg
= ((modrm
>> 3) & 7) | rex_r
;
5443 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5448 gen_op_mov_TN_reg(ot
, 1, reg
);
5451 val
= ldub_code(s
->pc
++);
5452 tcg_gen_movi_tl(cpu_T3
, val
);
5454 tcg_gen_mov_tl(cpu_T3
, cpu_regs
[R_ECX
]);
5456 gen_shiftd_rm_T1_T3(s
, ot
, opreg
, op
);
5459 /************************/
5462 if (s
->flags
& (HF_EM_MASK
| HF_TS_MASK
)) {
5463 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5464 /* XXX: what to do if illegal op ? */
5465 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
5468 modrm
= ldub_code(s
->pc
++);
5469 mod
= (modrm
>> 6) & 3;
5471 op
= ((b
& 7) << 3) | ((modrm
>> 3) & 7);
5474 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
5476 case 0x00 ... 0x07: /* fxxxs */
5477 case 0x10 ... 0x17: /* fixxxl */
5478 case 0x20 ... 0x27: /* fxxxl */
5479 case 0x30 ... 0x37: /* fixxx */
5486 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5487 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5488 gen_helper_flds_FT0(cpu_tmp2_i32
);
5491 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5492 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5493 gen_helper_fildl_FT0(cpu_tmp2_i32
);
5496 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
5497 (s
->mem_index
>> 2) - 1);
5498 gen_helper_fldl_FT0(cpu_tmp1_i64
);
5502 gen_op_lds_T0_A0(OT_WORD
+ s
->mem_index
);
5503 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5504 gen_helper_fildl_FT0(cpu_tmp2_i32
);
5508 gen_helper_fp_arith_ST0_FT0(op1
);
5510 /* fcomp needs pop */
5515 case 0x08: /* flds */
5516 case 0x0a: /* fsts */
5517 case 0x0b: /* fstps */
5518 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5519 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5520 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5525 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5526 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5527 gen_helper_flds_ST0(cpu_tmp2_i32
);
5530 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5531 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5532 gen_helper_fildl_ST0(cpu_tmp2_i32
);
5535 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
5536 (s
->mem_index
>> 2) - 1);
5537 gen_helper_fldl_ST0(cpu_tmp1_i64
);
5541 gen_op_lds_T0_A0(OT_WORD
+ s
->mem_index
);
5542 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5543 gen_helper_fildl_ST0(cpu_tmp2_i32
);
5548 /* XXX: the corresponding CPUID bit must be tested ! */
5551 gen_helper_fisttl_ST0(cpu_tmp2_i32
);
5552 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5553 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
5556 gen_helper_fisttll_ST0(cpu_tmp1_i64
);
5557 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
5558 (s
->mem_index
>> 2) - 1);
5562 gen_helper_fistt_ST0(cpu_tmp2_i32
);
5563 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5564 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5572 gen_helper_fsts_ST0(cpu_tmp2_i32
);
5573 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5574 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
5577 gen_helper_fistl_ST0(cpu_tmp2_i32
);
5578 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5579 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
5582 gen_helper_fstl_ST0(cpu_tmp1_i64
);
5583 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
5584 (s
->mem_index
>> 2) - 1);
5588 gen_helper_fist_ST0(cpu_tmp2_i32
);
5589 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5590 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5598 case 0x0c: /* fldenv mem */
5599 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5600 gen_op_set_cc_op(s
->cc_op
);
5601 gen_jmp_im(pc_start
- s
->cs_base
);
5603 cpu_A0
, tcg_const_i32(s
->dflag
));
5605 case 0x0d: /* fldcw mem */
5606 gen_op_ld_T0_A0(OT_WORD
+ s
->mem_index
);
5607 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5608 gen_helper_fldcw(cpu_tmp2_i32
);
5610 case 0x0e: /* fnstenv mem */
5611 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5612 gen_op_set_cc_op(s
->cc_op
);
5613 gen_jmp_im(pc_start
- s
->cs_base
);
5614 gen_helper_fstenv(cpu_A0
, tcg_const_i32(s
->dflag
));
5616 case 0x0f: /* fnstcw mem */
5617 gen_helper_fnstcw(cpu_tmp2_i32
);
5618 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5619 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5621 case 0x1d: /* fldt mem */
5622 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5623 gen_op_set_cc_op(s
->cc_op
);
5624 gen_jmp_im(pc_start
- s
->cs_base
);
5625 gen_helper_fldt_ST0(cpu_A0
);
5627 case 0x1f: /* fstpt mem */
5628 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5629 gen_op_set_cc_op(s
->cc_op
);
5630 gen_jmp_im(pc_start
- s
->cs_base
);
5631 gen_helper_fstt_ST0(cpu_A0
);
5634 case 0x2c: /* frstor mem */
5635 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5636 gen_op_set_cc_op(s
->cc_op
);
5637 gen_jmp_im(pc_start
- s
->cs_base
);
5638 gen_helper_frstor(cpu_A0
, tcg_const_i32(s
->dflag
));
5640 case 0x2e: /* fnsave mem */
5641 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5642 gen_op_set_cc_op(s
->cc_op
);
5643 gen_jmp_im(pc_start
- s
->cs_base
);
5644 gen_helper_fsave(cpu_A0
, tcg_const_i32(s
->dflag
));
5646 case 0x2f: /* fnstsw mem */
5647 gen_helper_fnstsw(cpu_tmp2_i32
);
5648 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5649 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5651 case 0x3c: /* fbld */
5652 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5653 gen_op_set_cc_op(s
->cc_op
);
5654 gen_jmp_im(pc_start
- s
->cs_base
);
5655 gen_helper_fbld_ST0(cpu_A0
);
5657 case 0x3e: /* fbstp */
5658 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5659 gen_op_set_cc_op(s
->cc_op
);
5660 gen_jmp_im(pc_start
- s
->cs_base
);
5661 gen_helper_fbst_ST0(cpu_A0
);
5664 case 0x3d: /* fildll */
5665 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
5666 (s
->mem_index
>> 2) - 1);
5667 gen_helper_fildll_ST0(cpu_tmp1_i64
);
5669 case 0x3f: /* fistpll */
5670 gen_helper_fistll_ST0(cpu_tmp1_i64
);
5671 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
5672 (s
->mem_index
>> 2) - 1);
5679 /* register float ops */
5683 case 0x08: /* fld sti */
5685 gen_helper_fmov_ST0_STN(tcg_const_i32((opreg
+ 1) & 7));
5687 case 0x09: /* fxchg sti */
5688 case 0x29: /* fxchg4 sti, undocumented op */
5689 case 0x39: /* fxchg7 sti, undocumented op */
5690 gen_helper_fxchg_ST0_STN(tcg_const_i32(opreg
));
5692 case 0x0a: /* grp d9/2 */
5695 /* check exceptions (FreeBSD FPU probe) */
5696 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5697 gen_op_set_cc_op(s
->cc_op
);
5698 gen_jmp_im(pc_start
- s
->cs_base
);
5705 case 0x0c: /* grp d9/4 */
5708 gen_helper_fchs_ST0();
5711 gen_helper_fabs_ST0();
5714 gen_helper_fldz_FT0();
5715 gen_helper_fcom_ST0_FT0();
5718 gen_helper_fxam_ST0();
5724 case 0x0d: /* grp d9/5 */
5729 gen_helper_fld1_ST0();
5733 gen_helper_fldl2t_ST0();
5737 gen_helper_fldl2e_ST0();
5741 gen_helper_fldpi_ST0();
5745 gen_helper_fldlg2_ST0();
5749 gen_helper_fldln2_ST0();
5753 gen_helper_fldz_ST0();
5760 case 0x0e: /* grp d9/6 */
5771 case 3: /* fpatan */
5772 gen_helper_fpatan();
5774 case 4: /* fxtract */
5775 gen_helper_fxtract();
5777 case 5: /* fprem1 */
5778 gen_helper_fprem1();
5780 case 6: /* fdecstp */
5781 gen_helper_fdecstp();
5784 case 7: /* fincstp */
5785 gen_helper_fincstp();
5789 case 0x0f: /* grp d9/7 */
5794 case 1: /* fyl2xp1 */
5795 gen_helper_fyl2xp1();
5800 case 3: /* fsincos */
5801 gen_helper_fsincos();
5803 case 5: /* fscale */
5804 gen_helper_fscale();
5806 case 4: /* frndint */
5807 gen_helper_frndint();
5818 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
5819 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
5820 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
5826 gen_helper_fp_arith_STN_ST0(op1
, opreg
);
5830 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg
));
5831 gen_helper_fp_arith_ST0_FT0(op1
);
5835 case 0x02: /* fcom */
5836 case 0x22: /* fcom2, undocumented op */
5837 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg
));
5838 gen_helper_fcom_ST0_FT0();
5840 case 0x03: /* fcomp */
5841 case 0x23: /* fcomp3, undocumented op */
5842 case 0x32: /* fcomp5, undocumented op */
5843 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg
));
5844 gen_helper_fcom_ST0_FT0();
5847 case 0x15: /* da/5 */
5849 case 1: /* fucompp */
5850 gen_helper_fmov_FT0_STN(tcg_const_i32(1));
5851 gen_helper_fucom_ST0_FT0();
5861 case 0: /* feni (287 only, just do nop here) */
5863 case 1: /* fdisi (287 only, just do nop here) */
5868 case 3: /* fninit */
5869 gen_helper_fninit();
5871 case 4: /* fsetpm (287 only, just do nop here) */
5877 case 0x1d: /* fucomi */
5878 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5879 gen_op_set_cc_op(s
->cc_op
);
5880 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg
));
5881 gen_helper_fucomi_ST0_FT0();
5882 s
->cc_op
= CC_OP_EFLAGS
;
5884 case 0x1e: /* fcomi */
5885 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5886 gen_op_set_cc_op(s
->cc_op
);
5887 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg
));
5888 gen_helper_fcomi_ST0_FT0();
5889 s
->cc_op
= CC_OP_EFLAGS
;
5891 case 0x28: /* ffree sti */
5892 gen_helper_ffree_STN(tcg_const_i32(opreg
));
5894 case 0x2a: /* fst sti */
5895 gen_helper_fmov_STN_ST0(tcg_const_i32(opreg
));
5897 case 0x2b: /* fstp sti */
5898 case 0x0b: /* fstp1 sti, undocumented op */
5899 case 0x3a: /* fstp8 sti, undocumented op */
5900 case 0x3b: /* fstp9 sti, undocumented op */
5901 gen_helper_fmov_STN_ST0(tcg_const_i32(opreg
));
5904 case 0x2c: /* fucom st(i) */
5905 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg
));
5906 gen_helper_fucom_ST0_FT0();
5908 case 0x2d: /* fucomp st(i) */
5909 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg
));
5910 gen_helper_fucom_ST0_FT0();
5913 case 0x33: /* de/3 */
5915 case 1: /* fcompp */
5916 gen_helper_fmov_FT0_STN(tcg_const_i32(1));
5917 gen_helper_fcom_ST0_FT0();
5925 case 0x38: /* ffreep sti, undocumented op */
5926 gen_helper_ffree_STN(tcg_const_i32(opreg
));
5929 case 0x3c: /* df/4 */
5932 gen_helper_fnstsw(cpu_tmp2_i32
);
5933 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5934 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
5940 case 0x3d: /* fucomip */
5941 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5942 gen_op_set_cc_op(s
->cc_op
);
5943 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg
));
5944 gen_helper_fucomi_ST0_FT0();
5946 s
->cc_op
= CC_OP_EFLAGS
;
5948 case 0x3e: /* fcomip */
5949 if (s
->cc_op
!= CC_OP_DYNAMIC
)
5950 gen_op_set_cc_op(s
->cc_op
);
5951 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg
));
5952 gen_helper_fcomi_ST0_FT0();
5954 s
->cc_op
= CC_OP_EFLAGS
;
5956 case 0x10 ... 0x13: /* fcmovxx */
5960 static const uint8_t fcmov_cc
[8] = {
5966 op1
= fcmov_cc
[op
& 3] | (((op
>> 3) & 1) ^ 1);
5967 l1
= gen_new_label();
5968 gen_jcc1(s
, s
->cc_op
, op1
, l1
);
5969 gen_helper_fmov_ST0_STN(tcg_const_i32(opreg
));
5978 /************************/
5981 case 0xa4: /* movsS */
5986 ot
= dflag
+ OT_WORD
;
5988 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
5989 gen_repz_movs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
5995 case 0xaa: /* stosS */
6000 ot
= dflag
+ OT_WORD
;
6002 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6003 gen_repz_stos(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6008 case 0xac: /* lodsS */
6013 ot
= dflag
+ OT_WORD
;
6014 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6015 gen_repz_lods(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6020 case 0xae: /* scasS */
6025 ot
= dflag
+ OT_WORD
;
6026 if (prefixes
& PREFIX_REPNZ
) {
6027 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6028 } else if (prefixes
& PREFIX_REPZ
) {
6029 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6032 s
->cc_op
= CC_OP_SUBB
+ ot
;
6036 case 0xa6: /* cmpsS */
6041 ot
= dflag
+ OT_WORD
;
6042 if (prefixes
& PREFIX_REPNZ
) {
6043 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6044 } else if (prefixes
& PREFIX_REPZ
) {
6045 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6048 s
->cc_op
= CC_OP_SUBB
+ ot
;
6051 case 0x6c: /* insS */
6056 ot
= dflag
? OT_LONG
: OT_WORD
;
6057 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6058 gen_op_andl_T0_ffff();
6059 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6060 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
) | 4);
6061 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6062 gen_repz_ins(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6066 gen_jmp(s
, s
->pc
- s
->cs_base
);
6070 case 0x6e: /* outsS */
6075 ot
= dflag
? OT_LONG
: OT_WORD
;
6076 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6077 gen_op_andl_T0_ffff();
6078 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6079 svm_is_rep(prefixes
) | 4);
6080 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6081 gen_repz_outs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6085 gen_jmp(s
, s
->pc
- s
->cs_base
);
6090 /************************/
6098 ot
= dflag
? OT_LONG
: OT_WORD
;
6099 val
= ldub_code(s
->pc
++);
6100 gen_op_movl_T0_im(val
);
6101 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6102 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6105 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6106 gen_helper_in_func(ot
, cpu_T
[1], cpu_tmp2_i32
);
6107 gen_op_mov_reg_T1(ot
, R_EAX
);
6110 gen_jmp(s
, s
->pc
- s
->cs_base
);
6118 ot
= dflag
? OT_LONG
: OT_WORD
;
6119 val
= ldub_code(s
->pc
++);
6120 gen_op_movl_T0_im(val
);
6121 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6122 svm_is_rep(prefixes
));
6123 gen_op_mov_TN_reg(ot
, 1, R_EAX
);
6127 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6128 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
6129 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
6130 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6133 gen_jmp(s
, s
->pc
- s
->cs_base
);
6141 ot
= dflag
? OT_LONG
: OT_WORD
;
6142 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6143 gen_op_andl_T0_ffff();
6144 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6145 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6148 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6149 gen_helper_in_func(ot
, cpu_T
[1], cpu_tmp2_i32
);
6150 gen_op_mov_reg_T1(ot
, R_EAX
);
6153 gen_jmp(s
, s
->pc
- s
->cs_base
);
6161 ot
= dflag
? OT_LONG
: OT_WORD
;
6162 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6163 gen_op_andl_T0_ffff();
6164 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6165 svm_is_rep(prefixes
));
6166 gen_op_mov_TN_reg(ot
, 1, R_EAX
);
6170 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6171 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
6172 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
6173 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6176 gen_jmp(s
, s
->pc
- s
->cs_base
);
6180 /************************/
6182 case 0xc2: /* ret im */
6183 val
= ldsw_code(s
->pc
);
6186 if (CODE64(s
) && s
->dflag
)
6188 gen_stack_update(s
, val
+ (2 << s
->dflag
));
6190 gen_op_andl_T0_ffff();
6194 case 0xc3: /* ret */
6198 gen_op_andl_T0_ffff();
6202 case 0xca: /* lret im */
6203 val
= ldsw_code(s
->pc
);
6206 if (s
->pe
&& !s
->vm86
) {
6207 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6208 gen_op_set_cc_op(s
->cc_op
);
6209 gen_jmp_im(pc_start
- s
->cs_base
);
6210 gen_helper_lret_protected(tcg_const_i32(s
->dflag
),
6211 tcg_const_i32(val
));
6215 gen_op_ld_T0_A0(1 + s
->dflag
+ s
->mem_index
);
6217 gen_op_andl_T0_ffff();
6218 /* NOTE: keeping EIP updated is not a problem in case of
6222 gen_op_addl_A0_im(2 << s
->dflag
);
6223 gen_op_ld_T0_A0(1 + s
->dflag
+ s
->mem_index
);
6224 gen_op_movl_seg_T0_vm(R_CS
);
6225 /* add stack offset */
6226 gen_stack_update(s
, val
+ (4 << s
->dflag
));
6230 case 0xcb: /* lret */
6233 case 0xcf: /* iret */
6234 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IRET
);
6237 gen_helper_iret_real(tcg_const_i32(s
->dflag
));
6238 s
->cc_op
= CC_OP_EFLAGS
;
6239 } else if (s
->vm86
) {
6241 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6243 gen_helper_iret_real(tcg_const_i32(s
->dflag
));
6244 s
->cc_op
= CC_OP_EFLAGS
;
6247 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6248 gen_op_set_cc_op(s
->cc_op
);
6249 gen_jmp_im(pc_start
- s
->cs_base
);
6250 gen_helper_iret_protected(tcg_const_i32(s
->dflag
),
6251 tcg_const_i32(s
->pc
- s
->cs_base
));
6252 s
->cc_op
= CC_OP_EFLAGS
;
6256 case 0xe8: /* call im */
6259 tval
= (int32_t)insn_get(s
, OT_LONG
);
6261 tval
= (int16_t)insn_get(s
, OT_WORD
);
6262 next_eip
= s
->pc
- s
->cs_base
;
6268 gen_movtl_T0_im(next_eip
);
6273 case 0x9a: /* lcall im */
6275 unsigned int selector
, offset
;
6279 ot
= dflag
? OT_LONG
: OT_WORD
;
6280 offset
= insn_get(s
, ot
);
6281 selector
= insn_get(s
, OT_WORD
);
6283 gen_op_movl_T0_im(selector
);
6284 gen_op_movl_T1_imu(offset
);
6287 case 0xe9: /* jmp im */
6289 tval
= (int32_t)insn_get(s
, OT_LONG
);
6291 tval
= (int16_t)insn_get(s
, OT_WORD
);
6292 tval
+= s
->pc
- s
->cs_base
;
6299 case 0xea: /* ljmp im */
6301 unsigned int selector
, offset
;
6305 ot
= dflag
? OT_LONG
: OT_WORD
;
6306 offset
= insn_get(s
, ot
);
6307 selector
= insn_get(s
, OT_WORD
);
6309 gen_op_movl_T0_im(selector
);
6310 gen_op_movl_T1_imu(offset
);
6313 case 0xeb: /* jmp Jb */
6314 tval
= (int8_t)insn_get(s
, OT_BYTE
);
6315 tval
+= s
->pc
- s
->cs_base
;
6320 case 0x70 ... 0x7f: /* jcc Jb */
6321 tval
= (int8_t)insn_get(s
, OT_BYTE
);
6323 case 0x180 ... 0x18f: /* jcc Jv */
6325 tval
= (int32_t)insn_get(s
, OT_LONG
);
6327 tval
= (int16_t)insn_get(s
, OT_WORD
);
6330 next_eip
= s
->pc
- s
->cs_base
;
6334 gen_jcc(s
, b
, tval
, next_eip
);
6337 case 0x190 ... 0x19f: /* setcc Gv */
6338 modrm
= ldub_code(s
->pc
++);
6340 gen_ldst_modrm(s
, modrm
, OT_BYTE
, OR_TMP0
, 1);
6342 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6347 ot
= dflag
+ OT_WORD
;
6348 modrm
= ldub_code(s
->pc
++);
6349 reg
= ((modrm
>> 3) & 7) | rex_r
;
6350 mod
= (modrm
>> 6) & 3;
6351 t0
= tcg_temp_local_new();
6353 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
6354 gen_op_ld_v(ot
+ s
->mem_index
, t0
, cpu_A0
);
6356 rm
= (modrm
& 7) | REX_B(s
);
6357 gen_op_mov_v_reg(ot
, t0
, rm
);
6359 #ifdef TARGET_X86_64
6360 if (ot
== OT_LONG
) {
6361 /* XXX: specific Intel behaviour ? */
6362 l1
= gen_new_label();
6363 gen_jcc1(s
, s
->cc_op
, b
^ 1, l1
);
6364 tcg_gen_mov_tl(cpu_regs
[reg
], t0
);
6366 tcg_gen_ext32u_tl(cpu_regs
[reg
], cpu_regs
[reg
]);
6370 l1
= gen_new_label();
6371 gen_jcc1(s
, s
->cc_op
, b
^ 1, l1
);
6372 gen_op_mov_reg_v(ot
, reg
, t0
);
6379 /************************/
6381 case 0x9c: /* pushf */
6382 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PUSHF
);
6383 if (s
->vm86
&& s
->iopl
!= 3) {
6384 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6386 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6387 gen_op_set_cc_op(s
->cc_op
);
6388 gen_helper_read_eflags(cpu_T
[0]);
6392 case 0x9d: /* popf */
6393 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_POPF
);
6394 if (s
->vm86
&& s
->iopl
!= 3) {
6395 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6400 gen_helper_write_eflags(cpu_T
[0],
6401 tcg_const_i32((TF_MASK
| AC_MASK
| ID_MASK
| NT_MASK
| IF_MASK
| IOPL_MASK
)));
6403 gen_helper_write_eflags(cpu_T
[0],
6404 tcg_const_i32((TF_MASK
| AC_MASK
| ID_MASK
| NT_MASK
| IF_MASK
| IOPL_MASK
) & 0xffff));
6407 if (s
->cpl
<= s
->iopl
) {
6409 gen_helper_write_eflags(cpu_T
[0],
6410 tcg_const_i32((TF_MASK
| AC_MASK
| ID_MASK
| NT_MASK
| IF_MASK
)));
6412 gen_helper_write_eflags(cpu_T
[0],
6413 tcg_const_i32((TF_MASK
| AC_MASK
| ID_MASK
| NT_MASK
| IF_MASK
) & 0xffff));
6417 gen_helper_write_eflags(cpu_T
[0],
6418 tcg_const_i32((TF_MASK
| AC_MASK
| ID_MASK
| NT_MASK
)));
6420 gen_helper_write_eflags(cpu_T
[0],
6421 tcg_const_i32((TF_MASK
| AC_MASK
| ID_MASK
| NT_MASK
) & 0xffff));
6426 s
->cc_op
= CC_OP_EFLAGS
;
6427 /* abort translation because TF flag may change */
6428 gen_jmp_im(s
->pc
- s
->cs_base
);
6432 case 0x9e: /* sahf */
6433 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6435 gen_op_mov_TN_reg(OT_BYTE
, 0, R_AH
);
6436 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6437 gen_op_set_cc_op(s
->cc_op
);
6438 gen_compute_eflags(cpu_cc_src
);
6439 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, CC_O
);
6440 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
6441 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_T
[0]);
6442 s
->cc_op
= CC_OP_EFLAGS
;
6444 case 0x9f: /* lahf */
6445 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6447 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6448 gen_op_set_cc_op(s
->cc_op
);
6449 gen_compute_eflags(cpu_T
[0]);
6450 /* Note: gen_compute_eflags() only gives the condition codes */
6451 tcg_gen_ori_tl(cpu_T
[0], cpu_T
[0], 0x02);
6452 gen_op_mov_reg_T0(OT_BYTE
, R_AH
);
6454 case 0xf5: /* cmc */
6455 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6456 gen_op_set_cc_op(s
->cc_op
);
6457 gen_compute_eflags(cpu_cc_src
);
6458 tcg_gen_xori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6459 s
->cc_op
= CC_OP_EFLAGS
;
6461 case 0xf8: /* clc */
6462 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6463 gen_op_set_cc_op(s
->cc_op
);
6464 gen_compute_eflags(cpu_cc_src
);
6465 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_C
);
6466 s
->cc_op
= CC_OP_EFLAGS
;
6468 case 0xf9: /* stc */
6469 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6470 gen_op_set_cc_op(s
->cc_op
);
6471 gen_compute_eflags(cpu_cc_src
);
6472 tcg_gen_ori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6473 s
->cc_op
= CC_OP_EFLAGS
;
6475 case 0xfc: /* cld */
6476 tcg_gen_movi_i32(cpu_tmp2_i32
, 1);
6477 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUState
, df
));
6479 case 0xfd: /* std */
6480 tcg_gen_movi_i32(cpu_tmp2_i32
, -1);
6481 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUState
, df
));
6484 /************************/
6485 /* bit operations */
6486 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6487 ot
= dflag
+ OT_WORD
;
6488 modrm
= ldub_code(s
->pc
++);
6489 op
= (modrm
>> 3) & 7;
6490 mod
= (modrm
>> 6) & 3;
6491 rm
= (modrm
& 7) | REX_B(s
);
6494 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
6495 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
6497 gen_op_mov_TN_reg(ot
, 0, rm
);
6500 val
= ldub_code(s
->pc
++);
6501 gen_op_movl_T1_im(val
);
6506 case 0x1a3: /* bt Gv, Ev */
6509 case 0x1ab: /* bts */
6512 case 0x1b3: /* btr */
6515 case 0x1bb: /* btc */
6518 ot
= dflag
+ OT_WORD
;
6519 modrm
= ldub_code(s
->pc
++);
6520 reg
= ((modrm
>> 3) & 7) | rex_r
;
6521 mod
= (modrm
>> 6) & 3;
6522 rm
= (modrm
& 7) | REX_B(s
);
6523 gen_op_mov_TN_reg(OT_LONG
, 1, reg
);
6525 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
6526 /* specific case: we need to add a displacement */
6527 gen_exts(ot
, cpu_T
[1]);
6528 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[1], 3 + ot
);
6529 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, ot
);
6530 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
6531 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
6533 gen_op_mov_TN_reg(ot
, 0, rm
);
6536 tcg_gen_andi_tl(cpu_T
[1], cpu_T
[1], (1 << (3 + ot
)) - 1);
6539 tcg_gen_shr_tl(cpu_cc_src
, cpu_T
[0], cpu_T
[1]);
6540 tcg_gen_movi_tl(cpu_cc_dst
, 0);
6543 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
6544 tcg_gen_movi_tl(cpu_tmp0
, 1);
6545 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
6546 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
6549 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
6550 tcg_gen_movi_tl(cpu_tmp0
, 1);
6551 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
6552 tcg_gen_not_tl(cpu_tmp0
, cpu_tmp0
);
6553 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
6557 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
6558 tcg_gen_movi_tl(cpu_tmp0
, 1);
6559 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
6560 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
6563 s
->cc_op
= CC_OP_SARB
+ ot
;
6566 gen_op_st_T0_A0(ot
+ s
->mem_index
);
6568 gen_op_mov_reg_T0(ot
, rm
);
6569 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
6570 tcg_gen_movi_tl(cpu_cc_dst
, 0);
6573 case 0x1bc: /* bsf */
6574 case 0x1bd: /* bsr */
6579 ot
= dflag
+ OT_WORD
;
6580 modrm
= ldub_code(s
->pc
++);
6581 reg
= ((modrm
>> 3) & 7) | rex_r
;
6582 gen_ldst_modrm(s
,modrm
, ot
, OR_TMP0
, 0);
6583 gen_extu(ot
, cpu_T
[0]);
6584 t0
= tcg_temp_local_new();
6585 tcg_gen_mov_tl(t0
, cpu_T
[0]);
6586 if ((b
& 1) && (prefixes
& PREFIX_REPZ
) &&
6587 (s
->cpuid_ext3_features
& CPUID_EXT3_ABM
)) {
6589 case OT_WORD
: gen_helper_lzcnt(cpu_T
[0], t0
,
6590 tcg_const_i32(16)); break;
6591 case OT_LONG
: gen_helper_lzcnt(cpu_T
[0], t0
,
6592 tcg_const_i32(32)); break;
6593 case OT_QUAD
: gen_helper_lzcnt(cpu_T
[0], t0
,
6594 tcg_const_i32(64)); break;
6596 gen_op_mov_reg_T0(ot
, reg
);
6598 label1
= gen_new_label();
6599 tcg_gen_movi_tl(cpu_cc_dst
, 0);
6600 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, 0, label1
);
6602 gen_helper_bsr(cpu_T
[0], t0
);
6604 gen_helper_bsf(cpu_T
[0], t0
);
6606 gen_op_mov_reg_T0(ot
, reg
);
6607 tcg_gen_movi_tl(cpu_cc_dst
, 1);
6608 gen_set_label(label1
);
6609 tcg_gen_discard_tl(cpu_cc_src
);
6610 s
->cc_op
= CC_OP_LOGICB
+ ot
;
6615 /************************/
6617 case 0x27: /* daa */
6620 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6621 gen_op_set_cc_op(s
->cc_op
);
6623 s
->cc_op
= CC_OP_EFLAGS
;
6625 case 0x2f: /* das */
6628 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6629 gen_op_set_cc_op(s
->cc_op
);
6631 s
->cc_op
= CC_OP_EFLAGS
;
6633 case 0x37: /* aaa */
6636 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6637 gen_op_set_cc_op(s
->cc_op
);
6639 s
->cc_op
= CC_OP_EFLAGS
;
6641 case 0x3f: /* aas */
6644 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6645 gen_op_set_cc_op(s
->cc_op
);
6647 s
->cc_op
= CC_OP_EFLAGS
;
6649 case 0xd4: /* aam */
6652 val
= ldub_code(s
->pc
++);
6654 gen_exception(s
, EXCP00_DIVZ
, pc_start
- s
->cs_base
);
6656 gen_helper_aam(tcg_const_i32(val
));
6657 s
->cc_op
= CC_OP_LOGICB
;
6660 case 0xd5: /* aad */
6663 val
= ldub_code(s
->pc
++);
6664 gen_helper_aad(tcg_const_i32(val
));
6665 s
->cc_op
= CC_OP_LOGICB
;
6667 /************************/
6669 case 0x90: /* nop */
6670 /* XXX: correct lock test for all insn */
6671 if (prefixes
& PREFIX_LOCK
) {
6674 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6676 goto do_xchg_reg_eax
;
6678 if (prefixes
& PREFIX_REPZ
) {
6679 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PAUSE
);
6682 case 0x9b: /* fwait */
6683 if ((s
->flags
& (HF_MP_MASK
| HF_TS_MASK
)) ==
6684 (HF_MP_MASK
| HF_TS_MASK
)) {
6685 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
6687 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6688 gen_op_set_cc_op(s
->cc_op
);
6689 gen_jmp_im(pc_start
- s
->cs_base
);
6693 case 0xcc: /* int3 */
6694 gen_interrupt(s
, EXCP03_INT3
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6696 case 0xcd: /* int N */
6697 val
= ldub_code(s
->pc
++);
6698 if (s
->vm86
&& s
->iopl
!= 3) {
6699 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6701 gen_interrupt(s
, val
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6704 case 0xce: /* into */
6707 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6708 gen_op_set_cc_op(s
->cc_op
);
6709 gen_jmp_im(pc_start
- s
->cs_base
);
6710 gen_helper_into(tcg_const_i32(s
->pc
- pc_start
));
6713 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6714 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_ICEBP
);
6716 gen_debug(s
, pc_start
- s
->cs_base
);
6719 tb_flush(cpu_single_env
);
6720 cpu_set_log(CPU_LOG_INT
| CPU_LOG_TB_IN_ASM
);
6724 case 0xfa: /* cli */
6726 if (s
->cpl
<= s
->iopl
) {
6729 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6735 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6739 case 0xfb: /* sti */
6741 if (s
->cpl
<= s
->iopl
) {
6744 /* interruptions are enabled only the first insn after sti */
6745 /* If several instructions disable interrupts, only the
6747 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
6748 gen_helper_set_inhibit_irq();
6749 /* give a chance to handle pending irqs */
6750 gen_jmp_im(s
->pc
- s
->cs_base
);
6753 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6759 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6763 case 0x62: /* bound */
6766 ot
= dflag
? OT_LONG
: OT_WORD
;
6767 modrm
= ldub_code(s
->pc
++);
6768 reg
= (modrm
>> 3) & 7;
6769 mod
= (modrm
>> 6) & 3;
6772 gen_op_mov_TN_reg(ot
, 0, reg
);
6773 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
6774 gen_jmp_im(pc_start
- s
->cs_base
);
6775 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6777 gen_helper_boundw(cpu_A0
, cpu_tmp2_i32
);
6779 gen_helper_boundl(cpu_A0
, cpu_tmp2_i32
);
6781 case 0x1c8 ... 0x1cf: /* bswap reg */
6782 reg
= (b
& 7) | REX_B(s
);
6783 #ifdef TARGET_X86_64
6785 gen_op_mov_TN_reg(OT_QUAD
, 0, reg
);
6786 tcg_gen_bswap64_i64(cpu_T
[0], cpu_T
[0]);
6787 gen_op_mov_reg_T0(OT_QUAD
, reg
);
6791 gen_op_mov_TN_reg(OT_LONG
, 0, reg
);
6792 tcg_gen_ext32u_tl(cpu_T
[0], cpu_T
[0]);
6793 tcg_gen_bswap32_tl(cpu_T
[0], cpu_T
[0]);
6794 gen_op_mov_reg_T0(OT_LONG
, reg
);
6797 case 0xd6: /* salc */
6800 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6801 gen_op_set_cc_op(s
->cc_op
);
6802 gen_compute_eflags_c(cpu_T
[0]);
6803 tcg_gen_neg_tl(cpu_T
[0], cpu_T
[0]);
6804 gen_op_mov_reg_T0(OT_BYTE
, R_EAX
);
6806 case 0xe0: /* loopnz */
6807 case 0xe1: /* loopz */
6808 case 0xe2: /* loop */
6809 case 0xe3: /* jecxz */
6813 tval
= (int8_t)insn_get(s
, OT_BYTE
);
6814 next_eip
= s
->pc
- s
->cs_base
;
6819 l1
= gen_new_label();
6820 l2
= gen_new_label();
6821 l3
= gen_new_label();
6824 case 0: /* loopnz */
6826 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6827 gen_op_set_cc_op(s
->cc_op
);
6828 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
6829 gen_op_jz_ecx(s
->aflag
, l3
);
6830 gen_compute_eflags(cpu_tmp0
);
6831 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, CC_Z
);
6833 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, l1
);
6835 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_tmp0
, 0, l1
);
6839 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
6840 gen_op_jnz_ecx(s
->aflag
, l1
);
6844 gen_op_jz_ecx(s
->aflag
, l1
);
6849 gen_jmp_im(next_eip
);
6858 case 0x130: /* wrmsr */
6859 case 0x132: /* rdmsr */
6861 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6863 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6864 gen_op_set_cc_op(s
->cc_op
);
6865 gen_jmp_im(pc_start
- s
->cs_base
);
6873 case 0x131: /* rdtsc */
6874 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6875 gen_op_set_cc_op(s
->cc_op
);
6876 gen_jmp_im(pc_start
- s
->cs_base
);
6882 gen_jmp(s
, s
->pc
- s
->cs_base
);
6885 case 0x133: /* rdpmc */
6886 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6887 gen_op_set_cc_op(s
->cc_op
);
6888 gen_jmp_im(pc_start
- s
->cs_base
);
6891 case 0x134: /* sysenter */
6892 /* For Intel SYSENTER is valid on 64-bit */
6893 if (CODE64(s
) && cpu_single_env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
6896 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6898 gen_update_cc_op(s
);
6899 gen_jmp_im(pc_start
- s
->cs_base
);
6900 gen_helper_sysenter();
6904 case 0x135: /* sysexit */
6905 /* For Intel SYSEXIT is valid on 64-bit */
6906 if (CODE64(s
) && cpu_single_env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
6909 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6911 gen_update_cc_op(s
);
6912 gen_jmp_im(pc_start
- s
->cs_base
);
6913 gen_helper_sysexit(tcg_const_i32(dflag
));
6917 #ifdef TARGET_X86_64
6918 case 0x105: /* syscall */
6919 /* XXX: is it usable in real mode ? */
6920 gen_update_cc_op(s
);
6921 gen_jmp_im(pc_start
- s
->cs_base
);
6922 gen_helper_syscall(tcg_const_i32(s
->pc
- pc_start
));
6925 case 0x107: /* sysret */
6927 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6929 gen_update_cc_op(s
);
6930 gen_jmp_im(pc_start
- s
->cs_base
);
6931 gen_helper_sysret(tcg_const_i32(s
->dflag
));
6932 /* condition codes are modified only in long mode */
6934 s
->cc_op
= CC_OP_EFLAGS
;
6939 case 0x1a2: /* cpuid */
6940 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6941 gen_op_set_cc_op(s
->cc_op
);
6942 gen_jmp_im(pc_start
- s
->cs_base
);
6945 case 0xf4: /* hlt */
6947 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6949 if (s
->cc_op
!= CC_OP_DYNAMIC
)
6950 gen_op_set_cc_op(s
->cc_op
);
6951 gen_jmp_im(pc_start
- s
->cs_base
);
6952 gen_helper_hlt(tcg_const_i32(s
->pc
- pc_start
));
6953 s
->is_jmp
= DISAS_TB_JUMP
;
6957 modrm
= ldub_code(s
->pc
++);
6958 mod
= (modrm
>> 6) & 3;
6959 op
= (modrm
>> 3) & 7;
6962 if (!s
->pe
|| s
->vm86
)
6964 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_READ
);
6965 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,ldt
.selector
));
6969 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 1);
6972 if (!s
->pe
|| s
->vm86
)
6975 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6977 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_WRITE
);
6978 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
6979 gen_jmp_im(pc_start
- s
->cs_base
);
6980 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6981 gen_helper_lldt(cpu_tmp2_i32
);
6985 if (!s
->pe
|| s
->vm86
)
6987 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_READ
);
6988 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,tr
.selector
));
6992 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 1);
6995 if (!s
->pe
|| s
->vm86
)
6998 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7000 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_WRITE
);
7001 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7002 gen_jmp_im(pc_start
- s
->cs_base
);
7003 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7004 gen_helper_ltr(cpu_tmp2_i32
);
7009 if (!s
->pe
|| s
->vm86
)
7011 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7012 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7013 gen_op_set_cc_op(s
->cc_op
);
7015 gen_helper_verr(cpu_T
[0]);
7017 gen_helper_verw(cpu_T
[0]);
7018 s
->cc_op
= CC_OP_EFLAGS
;
7025 modrm
= ldub_code(s
->pc
++);
7026 mod
= (modrm
>> 6) & 3;
7027 op
= (modrm
>> 3) & 7;
7033 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_GDTR_READ
);
7034 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7035 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
7036 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
7037 gen_add_A0_im(s
, 2);
7038 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, gdt
.base
));
7040 gen_op_andl_T0_im(0xffffff);
7041 gen_op_st_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7046 case 0: /* monitor */
7047 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) ||
7050 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7051 gen_op_set_cc_op(s
->cc_op
);
7052 gen_jmp_im(pc_start
- s
->cs_base
);
7053 #ifdef TARGET_X86_64
7054 if (s
->aflag
== 2) {
7055 gen_op_movq_A0_reg(R_EAX
);
7059 gen_op_movl_A0_reg(R_EAX
);
7061 gen_op_andl_A0_ffff();
7063 gen_add_A0_ds_seg(s
);
7064 gen_helper_monitor(cpu_A0
);
7067 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) ||
7070 gen_update_cc_op(s
);
7071 gen_jmp_im(pc_start
- s
->cs_base
);
7072 gen_helper_mwait(tcg_const_i32(s
->pc
- pc_start
));
7079 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IDTR_READ
);
7080 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7081 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, idt
.limit
));
7082 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
7083 gen_add_A0_im(s
, 2);
7084 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, idt
.base
));
7086 gen_op_andl_T0_im(0xffffff);
7087 gen_op_st_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7093 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7094 gen_op_set_cc_op(s
->cc_op
);
7095 gen_jmp_im(pc_start
- s
->cs_base
);
7098 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7101 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7104 gen_helper_vmrun(tcg_const_i32(s
->aflag
),
7105 tcg_const_i32(s
->pc
- pc_start
));
7107 s
->is_jmp
= DISAS_TB_JUMP
;
7110 case 1: /* VMMCALL */
7111 if (!(s
->flags
& HF_SVME_MASK
))
7113 gen_helper_vmmcall();
7115 case 2: /* VMLOAD */
7116 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7119 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7122 gen_helper_vmload(tcg_const_i32(s
->aflag
));
7125 case 3: /* VMSAVE */
7126 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7129 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7132 gen_helper_vmsave(tcg_const_i32(s
->aflag
));
7136 if ((!(s
->flags
& HF_SVME_MASK
) &&
7137 !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
)) ||
7141 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7148 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7151 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7157 case 6: /* SKINIT */
7158 if ((!(s
->flags
& HF_SVME_MASK
) &&
7159 !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
)) ||
7162 gen_helper_skinit();
7164 case 7: /* INVLPGA */
7165 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7168 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7171 gen_helper_invlpga(tcg_const_i32(s
->aflag
));
7177 } else if (s
->cpl
!= 0) {
7178 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7180 gen_svm_check_intercept(s
, pc_start
,
7181 op
==2 ? SVM_EXIT_GDTR_WRITE
: SVM_EXIT_IDTR_WRITE
);
7182 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7183 gen_op_ld_T1_A0(OT_WORD
+ s
->mem_index
);
7184 gen_add_A0_im(s
, 2);
7185 gen_op_ld_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7187 gen_op_andl_T0_im(0xffffff);
7189 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,gdt
.base
));
7190 tcg_gen_st32_tl(cpu_T
[1], cpu_env
, offsetof(CPUX86State
,gdt
.limit
));
7192 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,idt
.base
));
7193 tcg_gen_st32_tl(cpu_T
[1], cpu_env
, offsetof(CPUX86State
,idt
.limit
));
7198 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_CR0
);
7199 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7200 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,cr
[0]) + 4);
7202 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,cr
[0]));
7204 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 1);
7208 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7210 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7211 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7212 gen_helper_lmsw(cpu_T
[0]);
7213 gen_jmp_im(s
->pc
- s
->cs_base
);
7218 if (mod
!= 3) { /* invlpg */
7220 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7222 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7223 gen_op_set_cc_op(s
->cc_op
);
7224 gen_jmp_im(pc_start
- s
->cs_base
);
7225 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7226 gen_helper_invlpg(cpu_A0
);
7227 gen_jmp_im(s
->pc
- s
->cs_base
);
7232 case 0: /* swapgs */
7233 #ifdef TARGET_X86_64
7236 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7238 tcg_gen_ld_tl(cpu_T
[0], cpu_env
,
7239 offsetof(CPUX86State
,segs
[R_GS
].base
));
7240 tcg_gen_ld_tl(cpu_T
[1], cpu_env
,
7241 offsetof(CPUX86State
,kernelgsbase
));
7242 tcg_gen_st_tl(cpu_T
[1], cpu_env
,
7243 offsetof(CPUX86State
,segs
[R_GS
].base
));
7244 tcg_gen_st_tl(cpu_T
[0], cpu_env
,
7245 offsetof(CPUX86State
,kernelgsbase
));
7253 case 1: /* rdtscp */
7254 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_RDTSCP
))
7256 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7257 gen_op_set_cc_op(s
->cc_op
);
7258 gen_jmp_im(pc_start
- s
->cs_base
);
7261 gen_helper_rdtscp();
7264 gen_jmp(s
, s
->pc
- s
->cs_base
);
7276 case 0x108: /* invd */
7277 case 0x109: /* wbinvd */
7279 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7281 gen_svm_check_intercept(s
, pc_start
, (b
& 2) ? SVM_EXIT_INVD
: SVM_EXIT_WBINVD
);
7285 case 0x63: /* arpl or movslS (x86_64) */
7286 #ifdef TARGET_X86_64
7289 /* d_ot is the size of destination */
7290 d_ot
= dflag
+ OT_WORD
;
7292 modrm
= ldub_code(s
->pc
++);
7293 reg
= ((modrm
>> 3) & 7) | rex_r
;
7294 mod
= (modrm
>> 6) & 3;
7295 rm
= (modrm
& 7) | REX_B(s
);
7298 gen_op_mov_TN_reg(OT_LONG
, 0, rm
);
7300 if (d_ot
== OT_QUAD
)
7301 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
7302 gen_op_mov_reg_T0(d_ot
, reg
);
7304 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7305 if (d_ot
== OT_QUAD
) {
7306 gen_op_lds_T0_A0(OT_LONG
+ s
->mem_index
);
7308 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
7310 gen_op_mov_reg_T0(d_ot
, reg
);
7316 TCGv t0
, t1
, t2
, a0
;
7318 if (!s
->pe
|| s
->vm86
)
7320 t0
= tcg_temp_local_new();
7321 t1
= tcg_temp_local_new();
7322 t2
= tcg_temp_local_new();
7324 modrm
= ldub_code(s
->pc
++);
7325 reg
= (modrm
>> 3) & 7;
7326 mod
= (modrm
>> 6) & 3;
7329 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7330 gen_op_ld_v(ot
+ s
->mem_index
, t0
, cpu_A0
);
7331 a0
= tcg_temp_local_new();
7332 tcg_gen_mov_tl(a0
, cpu_A0
);
7334 gen_op_mov_v_reg(ot
, t0
, rm
);
7337 gen_op_mov_v_reg(ot
, t1
, reg
);
7338 tcg_gen_andi_tl(cpu_tmp0
, t0
, 3);
7339 tcg_gen_andi_tl(t1
, t1
, 3);
7340 tcg_gen_movi_tl(t2
, 0);
7341 label1
= gen_new_label();
7342 tcg_gen_brcond_tl(TCG_COND_GE
, cpu_tmp0
, t1
, label1
);
7343 tcg_gen_andi_tl(t0
, t0
, ~3);
7344 tcg_gen_or_tl(t0
, t0
, t1
);
7345 tcg_gen_movi_tl(t2
, CC_Z
);
7346 gen_set_label(label1
);
7348 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
7351 gen_op_mov_reg_v(ot
, rm
, t0
);
7353 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7354 gen_op_set_cc_op(s
->cc_op
);
7355 gen_compute_eflags(cpu_cc_src
);
7356 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_Z
);
7357 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t2
);
7358 s
->cc_op
= CC_OP_EFLAGS
;
7364 case 0x102: /* lar */
7365 case 0x103: /* lsl */
7369 if (!s
->pe
|| s
->vm86
)
7371 ot
= dflag
? OT_LONG
: OT_WORD
;
7372 modrm
= ldub_code(s
->pc
++);
7373 reg
= ((modrm
>> 3) & 7) | rex_r
;
7374 gen_ldst_modrm(s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7375 t0
= tcg_temp_local_new();
7376 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7377 gen_op_set_cc_op(s
->cc_op
);
7379 gen_helper_lar(t0
, cpu_T
[0]);
7381 gen_helper_lsl(t0
, cpu_T
[0]);
7382 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_src
, CC_Z
);
7383 label1
= gen_new_label();
7384 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
7385 gen_op_mov_reg_v(ot
, reg
, t0
);
7386 gen_set_label(label1
);
7387 s
->cc_op
= CC_OP_EFLAGS
;
7392 modrm
= ldub_code(s
->pc
++);
7393 mod
= (modrm
>> 6) & 3;
7394 op
= (modrm
>> 3) & 7;
7396 case 0: /* prefetchnta */
7397 case 1: /* prefetchnt0 */
7398 case 2: /* prefetchnt0 */
7399 case 3: /* prefetchnt0 */
7402 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7403 /* nothing more to do */
7405 default: /* nop (multi byte) */
7406 gen_nop_modrm(s
, modrm
);
7410 case 0x119 ... 0x11f: /* nop (multi byte) */
7411 modrm
= ldub_code(s
->pc
++);
7412 gen_nop_modrm(s
, modrm
);
7414 case 0x120: /* mov reg, crN */
7415 case 0x122: /* mov crN, reg */
7417 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7419 modrm
= ldub_code(s
->pc
++);
7420 if ((modrm
& 0xc0) != 0xc0)
7422 rm
= (modrm
& 7) | REX_B(s
);
7423 reg
= ((modrm
>> 3) & 7) | rex_r
;
7428 if ((prefixes
& PREFIX_LOCK
) && (reg
== 0) &&
7429 (s
->cpuid_ext3_features
& CPUID_EXT3_CR8LEG
)) {
7438 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7439 gen_op_set_cc_op(s
->cc_op
);
7440 gen_jmp_im(pc_start
- s
->cs_base
);
7442 gen_op_mov_TN_reg(ot
, 0, rm
);
7443 gen_helper_write_crN(tcg_const_i32(reg
), cpu_T
[0]);
7444 gen_jmp_im(s
->pc
- s
->cs_base
);
7447 gen_helper_read_crN(cpu_T
[0], tcg_const_i32(reg
));
7448 gen_op_mov_reg_T0(ot
, rm
);
7456 case 0x121: /* mov reg, drN */
7457 case 0x123: /* mov drN, reg */
7459 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7461 modrm
= ldub_code(s
->pc
++);
7462 if ((modrm
& 0xc0) != 0xc0)
7464 rm
= (modrm
& 7) | REX_B(s
);
7465 reg
= ((modrm
>> 3) & 7) | rex_r
;
7470 /* XXX: do it dynamically with CR4.DE bit */
7471 if (reg
== 4 || reg
== 5 || reg
>= 8)
7474 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_DR0
+ reg
);
7475 gen_op_mov_TN_reg(ot
, 0, rm
);
7476 gen_helper_movl_drN_T0(tcg_const_i32(reg
), cpu_T
[0]);
7477 gen_jmp_im(s
->pc
- s
->cs_base
);
7480 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_DR0
+ reg
);
7481 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,dr
[reg
]));
7482 gen_op_mov_reg_T0(ot
, rm
);
7486 case 0x106: /* clts */
7488 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7490 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7492 /* abort block because static cpu state changed */
7493 gen_jmp_im(s
->pc
- s
->cs_base
);
7497 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7498 case 0x1c3: /* MOVNTI reg, mem */
7499 if (!(s
->cpuid_features
& CPUID_SSE2
))
7501 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
7502 modrm
= ldub_code(s
->pc
++);
7503 mod
= (modrm
>> 6) & 3;
7506 reg
= ((modrm
>> 3) & 7) | rex_r
;
7507 /* generate a generic store */
7508 gen_ldst_modrm(s
, modrm
, ot
, reg
, 1);
7511 modrm
= ldub_code(s
->pc
++);
7512 mod
= (modrm
>> 6) & 3;
7513 op
= (modrm
>> 3) & 7;
7515 case 0: /* fxsave */
7516 if (mod
== 3 || !(s
->cpuid_features
& CPUID_FXSR
) ||
7517 (s
->prefix
& PREFIX_LOCK
))
7519 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
7520 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7523 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7524 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7525 gen_op_set_cc_op(s
->cc_op
);
7526 gen_jmp_im(pc_start
- s
->cs_base
);
7527 gen_helper_fxsave(cpu_A0
, tcg_const_i32((s
->dflag
== 2)));
7529 case 1: /* fxrstor */
7530 if (mod
== 3 || !(s
->cpuid_features
& CPUID_FXSR
) ||
7531 (s
->prefix
& PREFIX_LOCK
))
7533 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
7534 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7537 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7538 if (s
->cc_op
!= CC_OP_DYNAMIC
)
7539 gen_op_set_cc_op(s
->cc_op
);
7540 gen_jmp_im(pc_start
- s
->cs_base
);
7541 gen_helper_fxrstor(cpu_A0
, tcg_const_i32((s
->dflag
== 2)));
7543 case 2: /* ldmxcsr */
7544 case 3: /* stmxcsr */
7545 if (s
->flags
& HF_TS_MASK
) {
7546 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7549 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
) ||
7552 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7554 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
7555 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, mxcsr
));
7557 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, mxcsr
));
7558 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
7561 case 5: /* lfence */
7562 case 6: /* mfence */
7563 if ((modrm
& 0xc7) != 0xc0 || !(s
->cpuid_features
& CPUID_SSE
))
7566 case 7: /* sfence / clflush */
7567 if ((modrm
& 0xc7) == 0xc0) {
7569 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7570 if (!(s
->cpuid_features
& CPUID_SSE
))
7574 if (!(s
->cpuid_features
& CPUID_CLFLUSH
))
7576 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7583 case 0x10d: /* 3DNow! prefetch(w) */
7584 modrm
= ldub_code(s
->pc
++);
7585 mod
= (modrm
>> 6) & 3;
7588 gen_lea_modrm(s
, modrm
, ®_addr
, &offset_addr
);
7589 /* ignore for now */
7591 case 0x1aa: /* rsm */
7592 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_RSM
);
7593 if (!(s
->flags
& HF_SMM_MASK
))
7595 gen_update_cc_op(s
);
7596 gen_jmp_im(s
->pc
- s
->cs_base
);
7600 case 0x1b8: /* SSE4.2 popcnt */
7601 if ((prefixes
& (PREFIX_REPZ
| PREFIX_LOCK
| PREFIX_REPNZ
)) !=
7604 if (!(s
->cpuid_ext_features
& CPUID_EXT_POPCNT
))
7607 modrm
= ldub_code(s
->pc
++);
7608 reg
= ((modrm
>> 3) & 7);
7610 if (s
->prefix
& PREFIX_DATA
)
7612 else if (s
->dflag
!= 2)
7617 gen_ldst_modrm(s
, modrm
, ot
, OR_TMP0
, 0);
7618 gen_helper_popcnt(cpu_T
[0], cpu_T
[0], tcg_const_i32(ot
));
7619 gen_op_mov_reg_T0(ot
, reg
);
7621 s
->cc_op
= CC_OP_EFLAGS
;
7623 case 0x10e ... 0x10f:
7624 /* 3DNow! instructions, ignore prefixes */
7625 s
->prefix
&= ~(PREFIX_REPZ
| PREFIX_REPNZ
| PREFIX_DATA
);
7626 case 0x110 ... 0x117:
7627 case 0x128 ... 0x12f:
7628 case 0x138 ... 0x13a:
7629 case 0x150 ... 0x179:
7630 case 0x17c ... 0x17f:
7632 case 0x1c4 ... 0x1c6:
7633 case 0x1d0 ... 0x1fe:
7634 gen_sse(s
, b
, pc_start
, rex_r
);
7639 /* lock generation */
7640 if (s
->prefix
& PREFIX_LOCK
)
7641 gen_helper_unlock();
7644 if (s
->prefix
& PREFIX_LOCK
)
7645 gen_helper_unlock();
7646 /* XXX: ensure that no lock was generated */
7647 gen_exception(s
, EXCP06_ILLOP
, pc_start
- s
->cs_base
);
7651 void optimize_flags_init(void)
7653 #if TCG_TARGET_REG_BITS == 32
7654 assert(sizeof(CCTable
) == (1 << 3));
7656 assert(sizeof(CCTable
) == (1 << 4));
7658 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
7659 cpu_cc_op
= tcg_global_mem_new_i32(TCG_AREG0
,
7660 offsetof(CPUState
, cc_op
), "cc_op");
7661 cpu_cc_src
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUState
, cc_src
),
7663 cpu_cc_dst
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUState
, cc_dst
),
7665 cpu_cc_tmp
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUState
, cc_tmp
),
7668 #ifdef TARGET_X86_64
7669 cpu_regs
[R_EAX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7670 offsetof(CPUState
, regs
[R_EAX
]), "rax");
7671 cpu_regs
[R_ECX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7672 offsetof(CPUState
, regs
[R_ECX
]), "rcx");
7673 cpu_regs
[R_EDX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7674 offsetof(CPUState
, regs
[R_EDX
]), "rdx");
7675 cpu_regs
[R_EBX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7676 offsetof(CPUState
, regs
[R_EBX
]), "rbx");
7677 cpu_regs
[R_ESP
] = tcg_global_mem_new_i64(TCG_AREG0
,
7678 offsetof(CPUState
, regs
[R_ESP
]), "rsp");
7679 cpu_regs
[R_EBP
] = tcg_global_mem_new_i64(TCG_AREG0
,
7680 offsetof(CPUState
, regs
[R_EBP
]), "rbp");
7681 cpu_regs
[R_ESI
] = tcg_global_mem_new_i64(TCG_AREG0
,
7682 offsetof(CPUState
, regs
[R_ESI
]), "rsi");
7683 cpu_regs
[R_EDI
] = tcg_global_mem_new_i64(TCG_AREG0
,
7684 offsetof(CPUState
, regs
[R_EDI
]), "rdi");
7685 cpu_regs
[8] = tcg_global_mem_new_i64(TCG_AREG0
,
7686 offsetof(CPUState
, regs
[8]), "r8");
7687 cpu_regs
[9] = tcg_global_mem_new_i64(TCG_AREG0
,
7688 offsetof(CPUState
, regs
[9]), "r9");
7689 cpu_regs
[10] = tcg_global_mem_new_i64(TCG_AREG0
,
7690 offsetof(CPUState
, regs
[10]), "r10");
7691 cpu_regs
[11] = tcg_global_mem_new_i64(TCG_AREG0
,
7692 offsetof(CPUState
, regs
[11]), "r11");
7693 cpu_regs
[12] = tcg_global_mem_new_i64(TCG_AREG0
,
7694 offsetof(CPUState
, regs
[12]), "r12");
7695 cpu_regs
[13] = tcg_global_mem_new_i64(TCG_AREG0
,
7696 offsetof(CPUState
, regs
[13]), "r13");
7697 cpu_regs
[14] = tcg_global_mem_new_i64(TCG_AREG0
,
7698 offsetof(CPUState
, regs
[14]), "r14");
7699 cpu_regs
[15] = tcg_global_mem_new_i64(TCG_AREG0
,
7700 offsetof(CPUState
, regs
[15]), "r15");
7702 cpu_regs
[R_EAX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7703 offsetof(CPUState
, regs
[R_EAX
]), "eax");
7704 cpu_regs
[R_ECX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7705 offsetof(CPUState
, regs
[R_ECX
]), "ecx");
7706 cpu_regs
[R_EDX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7707 offsetof(CPUState
, regs
[R_EDX
]), "edx");
7708 cpu_regs
[R_EBX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7709 offsetof(CPUState
, regs
[R_EBX
]), "ebx");
7710 cpu_regs
[R_ESP
] = tcg_global_mem_new_i32(TCG_AREG0
,
7711 offsetof(CPUState
, regs
[R_ESP
]), "esp");
7712 cpu_regs
[R_EBP
] = tcg_global_mem_new_i32(TCG_AREG0
,
7713 offsetof(CPUState
, regs
[R_EBP
]), "ebp");
7714 cpu_regs
[R_ESI
] = tcg_global_mem_new_i32(TCG_AREG0
,
7715 offsetof(CPUState
, regs
[R_ESI
]), "esi");
7716 cpu_regs
[R_EDI
] = tcg_global_mem_new_i32(TCG_AREG0
,
7717 offsetof(CPUState
, regs
[R_EDI
]), "edi");
7720 /* register helpers */
7721 #define GEN_HELPER 2
7725 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7726 basic block 'tb'. If search_pc is TRUE, also generate PC
7727 information for each intermediate instruction. */
7728 static inline void gen_intermediate_code_internal(CPUState
*env
,
7729 TranslationBlock
*tb
,
7732 DisasContext dc1
, *dc
= &dc1
;
7733 target_ulong pc_ptr
;
7734 uint16_t *gen_opc_end
;
7738 target_ulong pc_start
;
7739 target_ulong cs_base
;
7743 /* generate intermediate code */
7745 cs_base
= tb
->cs_base
;
7748 dc
->pe
= (flags
>> HF_PE_SHIFT
) & 1;
7749 dc
->code32
= (flags
>> HF_CS32_SHIFT
) & 1;
7750 dc
->ss32
= (flags
>> HF_SS32_SHIFT
) & 1;
7751 dc
->addseg
= (flags
>> HF_ADDSEG_SHIFT
) & 1;
7753 dc
->vm86
= (flags
>> VM_SHIFT
) & 1;
7754 dc
->cpl
= (flags
>> HF_CPL_SHIFT
) & 3;
7755 dc
->iopl
= (flags
>> IOPL_SHIFT
) & 3;
7756 dc
->tf
= (flags
>> TF_SHIFT
) & 1;
7757 dc
->singlestep_enabled
= env
->singlestep_enabled
;
7758 dc
->cc_op
= CC_OP_DYNAMIC
;
7759 dc
->cs_base
= cs_base
;
7761 dc
->popl_esp_hack
= 0;
7762 /* select memory access functions */
7764 if (flags
& HF_SOFTMMU_MASK
) {
7766 dc
->mem_index
= 2 * 4;
7768 dc
->mem_index
= 1 * 4;
7770 dc
->cpuid_features
= env
->cpuid_features
;
7771 dc
->cpuid_ext_features
= env
->cpuid_ext_features
;
7772 dc
->cpuid_ext2_features
= env
->cpuid_ext2_features
;
7773 dc
->cpuid_ext3_features
= env
->cpuid_ext3_features
;
7774 #ifdef TARGET_X86_64
7775 dc
->lma
= (flags
>> HF_LMA_SHIFT
) & 1;
7776 dc
->code64
= (flags
>> HF_CS64_SHIFT
) & 1;
7779 dc
->jmp_opt
= !(dc
->tf
|| env
->singlestep_enabled
||
7780 (flags
& HF_INHIBIT_IRQ_MASK
)
7781 #ifndef CONFIG_SOFTMMU
7782 || (flags
& HF_SOFTMMU_MASK
)
7786 /* check addseg logic */
7787 if (!dc
->addseg
&& (dc
->vm86
|| !dc
->pe
|| !dc
->code32
))
7788 printf("ERROR addseg\n");
7791 cpu_T
[0] = tcg_temp_new();
7792 cpu_T
[1] = tcg_temp_new();
7793 cpu_A0
= tcg_temp_new();
7794 cpu_T3
= tcg_temp_new();
7796 cpu_tmp0
= tcg_temp_new();
7797 cpu_tmp1_i64
= tcg_temp_new_i64();
7798 cpu_tmp2_i32
= tcg_temp_new_i32();
7799 cpu_tmp3_i32
= tcg_temp_new_i32();
7800 cpu_tmp4
= tcg_temp_new();
7801 cpu_tmp5
= tcg_temp_new();
7802 cpu_ptr0
= tcg_temp_new_ptr();
7803 cpu_ptr1
= tcg_temp_new_ptr();
7805 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
7807 dc
->is_jmp
= DISAS_NEXT
;
7811 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
7813 max_insns
= CF_COUNT_MASK
;
7817 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
7818 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
7819 if (bp
->pc
== pc_ptr
&&
7820 !((bp
->flags
& BP_CPU
) && (tb
->flags
& HF_RF_MASK
))) {
7821 gen_debug(dc
, pc_ptr
- dc
->cs_base
);
7827 j
= gen_opc_ptr
- gen_opc_buf
;
7831 gen_opc_instr_start
[lj
++] = 0;
7833 gen_opc_pc
[lj
] = pc_ptr
;
7834 gen_opc_cc_op
[lj
] = dc
->cc_op
;
7835 gen_opc_instr_start
[lj
] = 1;
7836 gen_opc_icount
[lj
] = num_insns
;
7838 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
7841 pc_ptr
= disas_insn(dc
, pc_ptr
);
7843 /* stop translation if indicated */
7846 /* if single step mode, we generate only one instruction and
7847 generate an exception */
7848 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7849 the flag and abort the translation to give the irqs a
7850 change to be happen */
7851 if (dc
->tf
|| dc
->singlestep_enabled
||
7852 (flags
& HF_INHIBIT_IRQ_MASK
)) {
7853 gen_jmp_im(pc_ptr
- dc
->cs_base
);
7857 /* if too long translation, stop generation too */
7858 if (gen_opc_ptr
>= gen_opc_end
||
7859 (pc_ptr
- pc_start
) >= (TARGET_PAGE_SIZE
- 32) ||
7860 num_insns
>= max_insns
) {
7861 gen_jmp_im(pc_ptr
- dc
->cs_base
);
7866 gen_jmp_im(pc_ptr
- dc
->cs_base
);
7871 if (tb
->cflags
& CF_LAST_IO
)
7873 gen_icount_end(tb
, num_insns
);
7874 *gen_opc_ptr
= INDEX_op_end
;
7875 /* we don't forget to fill the last values */
7877 j
= gen_opc_ptr
- gen_opc_buf
;
7880 gen_opc_instr_start
[lj
++] = 0;
7884 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
7886 qemu_log("----------------\n");
7887 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
7888 #ifdef TARGET_X86_64
7893 disas_flags
= !dc
->code32
;
7894 log_target_disas(pc_start
, pc_ptr
- pc_start
, disas_flags
);
7900 tb
->size
= pc_ptr
- pc_start
;
7901 tb
->icount
= num_insns
;
7905 void gen_intermediate_code(CPUState
*env
, TranslationBlock
*tb
)
7907 gen_intermediate_code_internal(env
, tb
, 0);
7910 void gen_intermediate_code_pc(CPUState
*env
, TranslationBlock
*tb
)
7912 gen_intermediate_code_internal(env
, tb
, 1);
7915 void gen_pc_load(CPUState
*env
, TranslationBlock
*tb
,
7916 unsigned long searched_pc
, int pc_pos
, void *puc
)
7920 if (qemu_loglevel_mask(CPU_LOG_TB_OP
)) {
7922 qemu_log("RESTORE:\n");
7923 for(i
= 0;i
<= pc_pos
; i
++) {
7924 if (gen_opc_instr_start
[i
]) {
7925 qemu_log("0x%04x: " TARGET_FMT_lx
"\n", i
, gen_opc_pc
[i
]);
7928 qemu_log("spc=0x%08lx pc_pos=0x%x eip=" TARGET_FMT_lx
" cs_base=%x\n",
7929 searched_pc
, pc_pos
, gen_opc_pc
[pc_pos
] - tb
->cs_base
,
7930 (uint32_t)tb
->cs_base
);
7933 env
->eip
= gen_opc_pc
[pc_pos
] - tb
->cs_base
;
7934 cc_op
= gen_opc_cc_op
[pc_pos
];
7935 if (cc_op
!= CC_OP_DYNAMIC
)