4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
26 #include "qemu/host-utils.h"
28 #include "disas/disas.h"
35 #define PREFIX_REPZ 0x01
36 #define PREFIX_REPNZ 0x02
37 #define PREFIX_LOCK 0x04
38 #define PREFIX_DATA 0x08
39 #define PREFIX_ADR 0x10
40 #define PREFIX_VEX 0x20
43 #define CODE64(s) ((s)->code64)
44 #define REX_X(s) ((s)->rex_x)
45 #define REX_B(s) ((s)->rex_b)
60 //#define MACRO_TEST 1
62 /* global register indexes */
63 static TCGv_ptr cpu_env
;
65 static TCGv cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
, cpu_cc_srcT
;
66 static TCGv_i32 cpu_cc_op
;
67 static TCGv cpu_regs
[CPU_NB_REGS
];
70 /* local register indexes (only used inside old micro ops) */
71 static TCGv cpu_tmp0
, cpu_tmp4
;
72 static TCGv_ptr cpu_ptr0
, cpu_ptr1
;
73 static TCGv_i32 cpu_tmp2_i32
, cpu_tmp3_i32
;
74 static TCGv_i64 cpu_tmp1_i64
;
77 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
79 #include "exec/gen-icount.h"
82 static int x86_64_hregs
;
85 typedef struct DisasContext
{
86 /* current insn context */
87 int override
; /* -1 if no override */
90 target_ulong pc
; /* pc = eip + cs_base */
91 int is_jmp
; /* 1 = means jump (stop translation), 2 means CPU
92 static state change (stop translation) */
93 /* current block context */
94 target_ulong cs_base
; /* base of CS segment */
95 int pe
; /* protected mode */
96 int code32
; /* 32 bit code segment */
98 int lma
; /* long mode active */
99 int code64
; /* 64 bit code segment */
102 int vex_l
; /* vex vector length */
103 int vex_v
; /* vex vvvv register, without 1's compliment. */
104 int ss32
; /* 32 bit stack segment */
105 CCOp cc_op
; /* current CC operation */
107 int addseg
; /* non zero if either DS/ES/SS have a non zero base */
108 int f_st
; /* currently unused */
109 int vm86
; /* vm86 mode */
112 int tf
; /* TF cpu flag */
113 int singlestep_enabled
; /* "hardware" single step enabled */
114 int jmp_opt
; /* use direct block chaining for direct jumps */
115 int mem_index
; /* select memory access functions */
116 uint64_t flags
; /* all execution flags */
117 struct TranslationBlock
*tb
;
118 int popl_esp_hack
; /* for correct popl with esp base handling */
119 int rip_offset
; /* only used in x86_64, but left for simplicity */
121 int cpuid_ext_features
;
122 int cpuid_ext2_features
;
123 int cpuid_ext3_features
;
124 int cpuid_7_0_ebx_features
;
127 static void gen_eob(DisasContext
*s
);
128 static void gen_jmp(DisasContext
*s
, target_ulong eip
);
129 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
);
130 static void gen_op(DisasContext
*s1
, int op
, int ot
, int d
);
132 /* i386 arith/logic operations */
152 OP_SHL1
, /* undocumented */
176 /* I386 int registers */
177 OR_EAX
, /* MUST be even numbered */
186 OR_TMP0
= 16, /* temporary operand register */
188 OR_A0
, /* temporary register used when doing address evaluation */
198 /* Bit set if the global variable is live after setting CC_OP to X. */
199 static const uint8_t cc_op_live
[CC_OP_NB
] = {
200 [CC_OP_DYNAMIC
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
201 [CC_OP_EFLAGS
] = USES_CC_SRC
,
202 [CC_OP_MULB
... CC_OP_MULQ
] = USES_CC_DST
| USES_CC_SRC
,
203 [CC_OP_ADDB
... CC_OP_ADDQ
] = USES_CC_DST
| USES_CC_SRC
,
204 [CC_OP_ADCB
... CC_OP_ADCQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
205 [CC_OP_SUBB
... CC_OP_SUBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRCT
,
206 [CC_OP_SBBB
... CC_OP_SBBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
207 [CC_OP_LOGICB
... CC_OP_LOGICQ
] = USES_CC_DST
,
208 [CC_OP_INCB
... CC_OP_INCQ
] = USES_CC_DST
| USES_CC_SRC
,
209 [CC_OP_DECB
... CC_OP_DECQ
] = USES_CC_DST
| USES_CC_SRC
,
210 [CC_OP_SHLB
... CC_OP_SHLQ
] = USES_CC_DST
| USES_CC_SRC
,
211 [CC_OP_SARB
... CC_OP_SARQ
] = USES_CC_DST
| USES_CC_SRC
,
214 static void set_cc_op(DisasContext
*s
, CCOp op
)
218 if (s
->cc_op
== op
) {
222 /* Discard CC computation that will no longer be used. */
223 dead
= cc_op_live
[s
->cc_op
] & ~cc_op_live
[op
];
224 if (dead
& USES_CC_DST
) {
225 tcg_gen_discard_tl(cpu_cc_dst
);
227 if (dead
& USES_CC_SRC
) {
228 tcg_gen_discard_tl(cpu_cc_src
);
230 if (dead
& USES_CC_SRC2
) {
231 tcg_gen_discard_tl(cpu_cc_src2
);
233 if (dead
& USES_CC_SRCT
) {
234 tcg_gen_discard_tl(cpu_cc_srcT
);
238 /* The DYNAMIC setting is translator only, and should never be
239 stored. Thus we always consider it clean. */
240 s
->cc_op_dirty
= (op
!= CC_OP_DYNAMIC
);
243 static void gen_update_cc_op(DisasContext
*s
)
245 if (s
->cc_op_dirty
) {
246 tcg_gen_movi_i32(cpu_cc_op
, s
->cc_op
);
247 s
->cc_op_dirty
= false;
251 static inline void gen_op_movl_T0_0(void)
253 tcg_gen_movi_tl(cpu_T
[0], 0);
256 static inline void gen_op_movl_T0_im(int32_t val
)
258 tcg_gen_movi_tl(cpu_T
[0], val
);
261 static inline void gen_op_movl_T0_imu(uint32_t val
)
263 tcg_gen_movi_tl(cpu_T
[0], val
);
266 static inline void gen_op_movl_T1_im(int32_t val
)
268 tcg_gen_movi_tl(cpu_T
[1], val
);
271 static inline void gen_op_movl_T1_imu(uint32_t val
)
273 tcg_gen_movi_tl(cpu_T
[1], val
);
276 static inline void gen_op_movl_A0_im(uint32_t val
)
278 tcg_gen_movi_tl(cpu_A0
, val
);
282 static inline void gen_op_movq_A0_im(int64_t val
)
284 tcg_gen_movi_tl(cpu_A0
, val
);
288 static inline void gen_movtl_T0_im(target_ulong val
)
290 tcg_gen_movi_tl(cpu_T
[0], val
);
293 static inline void gen_movtl_T1_im(target_ulong val
)
295 tcg_gen_movi_tl(cpu_T
[1], val
);
298 static inline void gen_op_andl_T0_ffff(void)
300 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xffff);
303 static inline void gen_op_andl_T0_im(uint32_t val
)
305 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], val
);
308 static inline void gen_op_movl_T0_T1(void)
310 tcg_gen_mov_tl(cpu_T
[0], cpu_T
[1]);
313 static inline void gen_op_andl_A0_ffff(void)
315 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffff);
320 #define NB_OP_SIZES 4
322 #else /* !TARGET_X86_64 */
324 #define NB_OP_SIZES 3
326 #endif /* !TARGET_X86_64 */
328 #if defined(HOST_WORDS_BIGENDIAN)
329 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
330 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
331 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
332 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
333 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
335 #define REG_B_OFFSET 0
336 #define REG_H_OFFSET 1
337 #define REG_W_OFFSET 0
338 #define REG_L_OFFSET 0
339 #define REG_LH_OFFSET 4
342 /* In instruction encodings for byte register accesses the
343 * register number usually indicates "low 8 bits of register N";
344 * however there are some special cases where N 4..7 indicates
345 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
346 * true for this special case, false otherwise.
348 static inline bool byte_reg_is_xH(int reg
)
354 if (reg
>= 8 || x86_64_hregs
) {
361 static inline void gen_op_mov_reg_v(int ot
, int reg
, TCGv t0
)
365 if (!byte_reg_is_xH(reg
)) {
366 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 8);
368 tcg_gen_deposit_tl(cpu_regs
[reg
- 4], cpu_regs
[reg
- 4], t0
, 8, 8);
372 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 16);
374 default: /* XXX this shouldn't be reached; abort? */
376 /* For x86_64, this sets the higher half of register to zero.
377 For i386, this is equivalent to a mov. */
378 tcg_gen_ext32u_tl(cpu_regs
[reg
], t0
);
382 tcg_gen_mov_tl(cpu_regs
[reg
], t0
);
388 static inline void gen_op_mov_reg_T0(int ot
, int reg
)
390 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
393 static inline void gen_op_mov_reg_T1(int ot
, int reg
)
395 gen_op_mov_reg_v(ot
, reg
, cpu_T
[1]);
398 static inline void gen_op_mov_reg_A0(int size
, int reg
)
402 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_A0
, 0, 16);
404 default: /* XXX this shouldn't be reached; abort? */
406 /* For x86_64, this sets the higher half of register to zero.
407 For i386, this is equivalent to a mov. */
408 tcg_gen_ext32u_tl(cpu_regs
[reg
], cpu_A0
);
412 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_A0
);
418 static inline void gen_op_mov_v_reg(int ot
, TCGv t0
, int reg
)
420 if (ot
== OT_BYTE
&& byte_reg_is_xH(reg
)) {
421 tcg_gen_shri_tl(t0
, cpu_regs
[reg
- 4], 8);
422 tcg_gen_ext8u_tl(t0
, t0
);
424 tcg_gen_mov_tl(t0
, cpu_regs
[reg
]);
428 static inline void gen_op_mov_TN_reg(int ot
, int t_index
, int reg
)
430 gen_op_mov_v_reg(ot
, cpu_T
[t_index
], reg
);
433 static inline void gen_op_movl_A0_reg(int reg
)
435 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[reg
]);
438 static inline void gen_op_addl_A0_im(int32_t val
)
440 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
442 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
447 static inline void gen_op_addq_A0_im(int64_t val
)
449 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
453 static void gen_add_A0_im(DisasContext
*s
, int val
)
457 gen_op_addq_A0_im(val
);
460 gen_op_addl_A0_im(val
);
463 static inline void gen_op_addl_T0_T1(void)
465 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
468 static inline void gen_op_jmp_T0(void)
470 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, eip
));
473 static inline void gen_op_add_reg_im(int size
, int reg
, int32_t val
)
477 tcg_gen_addi_tl(cpu_tmp0
, cpu_regs
[reg
], val
);
478 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_tmp0
, 0, 16);
481 tcg_gen_addi_tl(cpu_tmp0
, cpu_regs
[reg
], val
);
482 /* For x86_64, this sets the higher half of register to zero.
483 For i386, this is equivalent to a nop. */
484 tcg_gen_ext32u_tl(cpu_tmp0
, cpu_tmp0
);
485 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_tmp0
);
489 tcg_gen_addi_tl(cpu_regs
[reg
], cpu_regs
[reg
], val
);
495 static inline void gen_op_add_reg_T0(int size
, int reg
)
499 tcg_gen_add_tl(cpu_tmp0
, cpu_regs
[reg
], cpu_T
[0]);
500 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_tmp0
, 0, 16);
503 tcg_gen_add_tl(cpu_tmp0
, cpu_regs
[reg
], cpu_T
[0]);
504 /* For x86_64, this sets the higher half of register to zero.
505 For i386, this is equivalent to a nop. */
506 tcg_gen_ext32u_tl(cpu_tmp0
, cpu_tmp0
);
507 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_tmp0
);
511 tcg_gen_add_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_T
[0]);
517 static inline void gen_op_addl_A0_reg_sN(int shift
, int reg
)
519 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[reg
]);
521 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, shift
);
522 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
523 /* For x86_64, this sets the higher half of register to zero.
524 For i386, this is equivalent to a nop. */
525 tcg_gen_ext32u_tl(cpu_A0
, cpu_A0
);
528 static inline void gen_op_movl_A0_seg(int reg
)
530 tcg_gen_ld32u_tl(cpu_A0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
) + REG_L_OFFSET
);
533 static inline void gen_op_addl_A0_seg(DisasContext
*s
, int reg
)
535 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
538 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
539 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
541 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
542 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
545 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
550 static inline void gen_op_movq_A0_seg(int reg
)
552 tcg_gen_ld_tl(cpu_A0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
555 static inline void gen_op_addq_A0_seg(int reg
)
557 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
558 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
561 static inline void gen_op_movq_A0_reg(int reg
)
563 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[reg
]);
566 static inline void gen_op_addq_A0_reg_sN(int shift
, int reg
)
568 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[reg
]);
570 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, shift
);
571 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
575 static inline void gen_op_lds_T0_A0(int idx
)
577 int mem_index
= (idx
>> 2) - 1;
580 tcg_gen_qemu_ld8s(cpu_T
[0], cpu_A0
, mem_index
);
583 tcg_gen_qemu_ld16s(cpu_T
[0], cpu_A0
, mem_index
);
587 tcg_gen_qemu_ld32s(cpu_T
[0], cpu_A0
, mem_index
);
592 static inline void gen_op_ld_v(int idx
, TCGv t0
, TCGv a0
)
594 int mem_index
= (idx
>> 2) - 1;
597 tcg_gen_qemu_ld8u(t0
, a0
, mem_index
);
600 tcg_gen_qemu_ld16u(t0
, a0
, mem_index
);
603 tcg_gen_qemu_ld32u(t0
, a0
, mem_index
);
607 /* Should never happen on 32-bit targets. */
609 tcg_gen_qemu_ld64(t0
, a0
, mem_index
);
615 /* XXX: always use ldu or lds */
616 static inline void gen_op_ld_T0_A0(int idx
)
618 gen_op_ld_v(idx
, cpu_T
[0], cpu_A0
);
621 static inline void gen_op_ldu_T0_A0(int idx
)
623 gen_op_ld_v(idx
, cpu_T
[0], cpu_A0
);
626 static inline void gen_op_ld_T1_A0(int idx
)
628 gen_op_ld_v(idx
, cpu_T
[1], cpu_A0
);
631 static inline void gen_op_st_v(int idx
, TCGv t0
, TCGv a0
)
633 int mem_index
= (idx
>> 2) - 1;
636 tcg_gen_qemu_st8(t0
, a0
, mem_index
);
639 tcg_gen_qemu_st16(t0
, a0
, mem_index
);
642 tcg_gen_qemu_st32(t0
, a0
, mem_index
);
646 /* Should never happen on 32-bit targets. */
648 tcg_gen_qemu_st64(t0
, a0
, mem_index
);
654 static inline void gen_op_st_T0_A0(int idx
)
656 gen_op_st_v(idx
, cpu_T
[0], cpu_A0
);
659 static inline void gen_op_st_T1_A0(int idx
)
661 gen_op_st_v(idx
, cpu_T
[1], cpu_A0
);
664 static inline void gen_jmp_im(target_ulong pc
)
666 tcg_gen_movi_tl(cpu_tmp0
, pc
);
667 tcg_gen_st_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, eip
));
670 static inline void gen_string_movl_A0_ESI(DisasContext
*s
)
674 override
= s
->override
;
678 gen_op_movq_A0_seg(override
);
679 gen_op_addq_A0_reg_sN(0, R_ESI
);
681 gen_op_movq_A0_reg(R_ESI
);
687 if (s
->addseg
&& override
< 0)
690 gen_op_movl_A0_seg(override
);
691 gen_op_addl_A0_reg_sN(0, R_ESI
);
693 gen_op_movl_A0_reg(R_ESI
);
696 /* 16 address, always override */
699 gen_op_movl_A0_reg(R_ESI
);
700 gen_op_andl_A0_ffff();
701 gen_op_addl_A0_seg(s
, override
);
705 static inline void gen_string_movl_A0_EDI(DisasContext
*s
)
709 gen_op_movq_A0_reg(R_EDI
);
714 gen_op_movl_A0_seg(R_ES
);
715 gen_op_addl_A0_reg_sN(0, R_EDI
);
717 gen_op_movl_A0_reg(R_EDI
);
720 gen_op_movl_A0_reg(R_EDI
);
721 gen_op_andl_A0_ffff();
722 gen_op_addl_A0_seg(s
, R_ES
);
726 static inline void gen_op_movl_T0_Dshift(int ot
)
728 tcg_gen_ld32s_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, df
));
729 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], ot
);
732 static TCGv
gen_ext_tl(TCGv dst
, TCGv src
, int size
, bool sign
)
737 tcg_gen_ext8s_tl(dst
, src
);
739 tcg_gen_ext8u_tl(dst
, src
);
744 tcg_gen_ext16s_tl(dst
, src
);
746 tcg_gen_ext16u_tl(dst
, src
);
752 tcg_gen_ext32s_tl(dst
, src
);
754 tcg_gen_ext32u_tl(dst
, src
);
763 static void gen_extu(int ot
, TCGv reg
)
765 gen_ext_tl(reg
, reg
, ot
, false);
768 static void gen_exts(int ot
, TCGv reg
)
770 gen_ext_tl(reg
, reg
, ot
, true);
773 static inline void gen_op_jnz_ecx(int size
, int label1
)
775 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
776 gen_extu(size
+ 1, cpu_tmp0
);
777 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_tmp0
, 0, label1
);
780 static inline void gen_op_jz_ecx(int size
, int label1
)
782 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
783 gen_extu(size
+ 1, cpu_tmp0
);
784 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
787 static void gen_helper_in_func(int ot
, TCGv v
, TCGv_i32 n
)
791 gen_helper_inb(v
, n
);
794 gen_helper_inw(v
, n
);
797 gen_helper_inl(v
, n
);
802 static void gen_helper_out_func(int ot
, TCGv_i32 v
, TCGv_i32 n
)
806 gen_helper_outb(v
, n
);
809 gen_helper_outw(v
, n
);
812 gen_helper_outl(v
, n
);
817 static void gen_check_io(DisasContext
*s
, int ot
, target_ulong cur_eip
,
821 target_ulong next_eip
;
824 if (s
->pe
&& (s
->cpl
> s
->iopl
|| s
->vm86
)) {
828 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
831 gen_helper_check_iob(cpu_env
, cpu_tmp2_i32
);
834 gen_helper_check_iow(cpu_env
, cpu_tmp2_i32
);
837 gen_helper_check_iol(cpu_env
, cpu_tmp2_i32
);
841 if(s
->flags
& HF_SVMI_MASK
) {
846 svm_flags
|= (1 << (4 + ot
));
847 next_eip
= s
->pc
- s
->cs_base
;
848 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
849 gen_helper_svm_check_io(cpu_env
, cpu_tmp2_i32
,
850 tcg_const_i32(svm_flags
),
851 tcg_const_i32(next_eip
- cur_eip
));
855 static inline void gen_movs(DisasContext
*s
, int ot
)
857 gen_string_movl_A0_ESI(s
);
858 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
859 gen_string_movl_A0_EDI(s
);
860 gen_op_st_T0_A0(ot
+ s
->mem_index
);
861 gen_op_movl_T0_Dshift(ot
);
862 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
863 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
866 static void gen_op_update1_cc(void)
868 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
871 static void gen_op_update2_cc(void)
873 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
874 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
877 static void gen_op_update3_cc(TCGv reg
)
879 tcg_gen_mov_tl(cpu_cc_src2
, reg
);
880 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
881 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
884 static inline void gen_op_testl_T0_T1_cc(void)
886 tcg_gen_and_tl(cpu_cc_dst
, cpu_T
[0], cpu_T
[1]);
889 static void gen_op_update_neg_cc(void)
891 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
892 tcg_gen_neg_tl(cpu_cc_src
, cpu_T
[0]);
893 tcg_gen_movi_tl(cpu_cc_srcT
, 0);
896 /* compute all eflags to cc_src */
897 static void gen_compute_eflags(DisasContext
*s
)
899 TCGv zero
, dst
, src1
, src2
;
902 if (s
->cc_op
== CC_OP_EFLAGS
) {
911 /* Take care to not read values that are not live. */
912 live
= cc_op_live
[s
->cc_op
] & ~USES_CC_SRCT
;
913 dead
= live
^ (USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
);
915 zero
= tcg_const_tl(0);
916 if (dead
& USES_CC_DST
) {
919 if (dead
& USES_CC_SRC
) {
922 if (dead
& USES_CC_SRC2
) {
928 gen_helper_cc_compute_all(cpu_cc_src
, dst
, src1
, src2
, cpu_cc_op
);
929 set_cc_op(s
, CC_OP_EFLAGS
);
936 typedef struct CCPrepare
{
946 /* compute eflags.C to reg */
947 static CCPrepare
gen_prepare_eflags_c(DisasContext
*s
, TCGv reg
)
953 case CC_OP_SUBB
... CC_OP_SUBQ
:
954 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
955 size
= s
->cc_op
- CC_OP_SUBB
;
956 t1
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
957 /* If no temporary was used, be careful not to alias t1 and t0. */
958 t0
= TCGV_EQUAL(t1
, cpu_cc_src
) ? cpu_tmp0
: reg
;
959 tcg_gen_mov_tl(t0
, cpu_cc_srcT
);
963 case CC_OP_ADDB
... CC_OP_ADDQ
:
964 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
965 size
= s
->cc_op
- CC_OP_ADDB
;
966 t1
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
967 t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
969 return (CCPrepare
) { .cond
= TCG_COND_LTU
, .reg
= t0
,
970 .reg2
= t1
, .mask
= -1, .use_reg2
= true };
972 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
973 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
975 case CC_OP_INCB
... CC_OP_INCQ
:
976 case CC_OP_DECB
... CC_OP_DECQ
:
977 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
978 .mask
= -1, .no_setcond
= true };
980 case CC_OP_SHLB
... CC_OP_SHLQ
:
981 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
982 size
= s
->cc_op
- CC_OP_SHLB
;
983 shift
= (8 << size
) - 1;
984 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
985 .mask
= (target_ulong
)1 << shift
};
987 case CC_OP_MULB
... CC_OP_MULQ
:
988 return (CCPrepare
) { .cond
= TCG_COND_NE
,
989 .reg
= cpu_cc_src
, .mask
= -1 };
992 case CC_OP_SARB
... CC_OP_SARQ
:
994 return (CCPrepare
) { .cond
= TCG_COND_NE
,
995 .reg
= cpu_cc_src
, .mask
= CC_C
};
998 /* The need to compute only C from CC_OP_DYNAMIC is important
999 in efficiently implementing e.g. INC at the start of a TB. */
1000 gen_update_cc_op(s
);
1001 gen_helper_cc_compute_c(reg
, cpu_cc_dst
, cpu_cc_src
,
1002 cpu_cc_src2
, cpu_cc_op
);
1003 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1004 .mask
= -1, .no_setcond
= true };
1008 /* compute eflags.P to reg */
1009 static CCPrepare
gen_prepare_eflags_p(DisasContext
*s
, TCGv reg
)
1011 gen_compute_eflags(s
);
1012 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1016 /* compute eflags.S to reg */
1017 static CCPrepare
gen_prepare_eflags_s(DisasContext
*s
, TCGv reg
)
1021 gen_compute_eflags(s
);
1024 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1028 int size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1029 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, true);
1030 return (CCPrepare
) { .cond
= TCG_COND_LT
, .reg
= t0
, .mask
= -1 };
1035 /* compute eflags.O to reg */
1036 static CCPrepare
gen_prepare_eflags_o(DisasContext
*s
, TCGv reg
)
1038 gen_compute_eflags(s
);
1039 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1043 /* compute eflags.Z to reg */
1044 static CCPrepare
gen_prepare_eflags_z(DisasContext
*s
, TCGv reg
)
1048 gen_compute_eflags(s
);
1051 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1055 int size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1056 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
1057 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
1062 /* perform a conditional store into register 'reg' according to jump opcode
1063 value 'b'. In the fast case, T0 is guaranted not to be used. */
1064 static CCPrepare
gen_prepare_cc(DisasContext
*s
, int b
, TCGv reg
)
1066 int inv
, jcc_op
, size
, cond
;
1071 jcc_op
= (b
>> 1) & 7;
1074 case CC_OP_SUBB
... CC_OP_SUBQ
:
1075 /* We optimize relational operators for the cmp/jcc case. */
1076 size
= s
->cc_op
- CC_OP_SUBB
;
1079 tcg_gen_mov_tl(cpu_tmp4
, cpu_cc_srcT
);
1080 gen_extu(size
, cpu_tmp4
);
1081 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
1082 cc
= (CCPrepare
) { .cond
= TCG_COND_LEU
, .reg
= cpu_tmp4
,
1083 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
1092 tcg_gen_mov_tl(cpu_tmp4
, cpu_cc_srcT
);
1093 gen_exts(size
, cpu_tmp4
);
1094 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, true);
1095 cc
= (CCPrepare
) { .cond
= cond
, .reg
= cpu_tmp4
,
1096 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
1106 /* This actually generates good code for JC, JZ and JS. */
1109 cc
= gen_prepare_eflags_o(s
, reg
);
1112 cc
= gen_prepare_eflags_c(s
, reg
);
1115 cc
= gen_prepare_eflags_z(s
, reg
);
1118 gen_compute_eflags(s
);
1119 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1120 .mask
= CC_Z
| CC_C
};
1123 cc
= gen_prepare_eflags_s(s
, reg
);
1126 cc
= gen_prepare_eflags_p(s
, reg
);
1129 gen_compute_eflags(s
);
1130 if (TCGV_EQUAL(reg
, cpu_cc_src
)) {
1133 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
1134 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
1135 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1140 gen_compute_eflags(s
);
1141 if (TCGV_EQUAL(reg
, cpu_cc_src
)) {
1144 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
1145 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
1146 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1147 .mask
= CC_S
| CC_Z
};
1154 cc
.cond
= tcg_invert_cond(cc
.cond
);
1159 static void gen_setcc1(DisasContext
*s
, int b
, TCGv reg
)
1161 CCPrepare cc
= gen_prepare_cc(s
, b
, reg
);
1163 if (cc
.no_setcond
) {
1164 if (cc
.cond
== TCG_COND_EQ
) {
1165 tcg_gen_xori_tl(reg
, cc
.reg
, 1);
1167 tcg_gen_mov_tl(reg
, cc
.reg
);
1172 if (cc
.cond
== TCG_COND_NE
&& !cc
.use_reg2
&& cc
.imm
== 0 &&
1173 cc
.mask
!= 0 && (cc
.mask
& (cc
.mask
- 1)) == 0) {
1174 tcg_gen_shri_tl(reg
, cc
.reg
, ctztl(cc
.mask
));
1175 tcg_gen_andi_tl(reg
, reg
, 1);
1178 if (cc
.mask
!= -1) {
1179 tcg_gen_andi_tl(reg
, cc
.reg
, cc
.mask
);
1183 tcg_gen_setcond_tl(cc
.cond
, reg
, cc
.reg
, cc
.reg2
);
1185 tcg_gen_setcondi_tl(cc
.cond
, reg
, cc
.reg
, cc
.imm
);
1189 static inline void gen_compute_eflags_c(DisasContext
*s
, TCGv reg
)
1191 gen_setcc1(s
, JCC_B
<< 1, reg
);
1194 /* generate a conditional jump to label 'l1' according to jump opcode
1195 value 'b'. In the fast case, T0 is guaranted not to be used. */
1196 static inline void gen_jcc1_noeob(DisasContext
*s
, int b
, int l1
)
1198 CCPrepare cc
= gen_prepare_cc(s
, b
, cpu_T
[0]);
1200 if (cc
.mask
!= -1) {
1201 tcg_gen_andi_tl(cpu_T
[0], cc
.reg
, cc
.mask
);
1205 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1207 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1211 /* Generate a conditional jump to label 'l1' according to jump opcode
1212 value 'b'. In the fast case, T0 is guaranted not to be used.
1213 A translation block must end soon. */
1214 static inline void gen_jcc1(DisasContext
*s
, int b
, int l1
)
1216 CCPrepare cc
= gen_prepare_cc(s
, b
, cpu_T
[0]);
1218 gen_update_cc_op(s
);
1219 if (cc
.mask
!= -1) {
1220 tcg_gen_andi_tl(cpu_T
[0], cc
.reg
, cc
.mask
);
1223 set_cc_op(s
, CC_OP_DYNAMIC
);
1225 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1227 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1231 /* XXX: does not work with gdbstub "ice" single step - not a
1233 static int gen_jz_ecx_string(DisasContext
*s
, target_ulong next_eip
)
1237 l1
= gen_new_label();
1238 l2
= gen_new_label();
1239 gen_op_jnz_ecx(s
->aflag
, l1
);
1241 gen_jmp_tb(s
, next_eip
, 1);
1246 static inline void gen_stos(DisasContext
*s
, int ot
)
1248 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
1249 gen_string_movl_A0_EDI(s
);
1250 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1251 gen_op_movl_T0_Dshift(ot
);
1252 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1255 static inline void gen_lods(DisasContext
*s
, int ot
)
1257 gen_string_movl_A0_ESI(s
);
1258 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1259 gen_op_mov_reg_T0(ot
, R_EAX
);
1260 gen_op_movl_T0_Dshift(ot
);
1261 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1264 static inline void gen_scas(DisasContext
*s
, int ot
)
1266 gen_string_movl_A0_EDI(s
);
1267 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
1268 gen_op(s
, OP_CMPL
, ot
, R_EAX
);
1269 gen_op_movl_T0_Dshift(ot
);
1270 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1273 static inline void gen_cmps(DisasContext
*s
, int ot
)
1275 gen_string_movl_A0_EDI(s
);
1276 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
1277 gen_string_movl_A0_ESI(s
);
1278 gen_op(s
, OP_CMPL
, ot
, OR_TMP0
);
1279 gen_op_movl_T0_Dshift(ot
);
1280 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1281 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1284 static inline void gen_ins(DisasContext
*s
, int ot
)
1288 gen_string_movl_A0_EDI(s
);
1289 /* Note: we must do this dummy write first to be restartable in
1290 case of page fault. */
1292 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1293 gen_op_mov_TN_reg(OT_WORD
, 1, R_EDX
);
1294 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[1]);
1295 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1296 gen_helper_in_func(ot
, cpu_T
[0], cpu_tmp2_i32
);
1297 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1298 gen_op_movl_T0_Dshift(ot
);
1299 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1304 static inline void gen_outs(DisasContext
*s
, int ot
)
1308 gen_string_movl_A0_ESI(s
);
1309 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1311 gen_op_mov_TN_reg(OT_WORD
, 1, R_EDX
);
1312 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[1]);
1313 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1314 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[0]);
1315 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1317 gen_op_movl_T0_Dshift(ot
);
1318 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1323 /* same method as Valgrind : we generate jumps to current or next
1325 #define GEN_REPZ(op) \
1326 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1327 target_ulong cur_eip, target_ulong next_eip) \
1330 gen_update_cc_op(s); \
1331 l2 = gen_jz_ecx_string(s, next_eip); \
1332 gen_ ## op(s, ot); \
1333 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1334 /* a loop would cause two single step exceptions if ECX = 1 \
1335 before rep string_insn */ \
1337 gen_op_jz_ecx(s->aflag, l2); \
1338 gen_jmp(s, cur_eip); \
1341 #define GEN_REPZ2(op) \
1342 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1343 target_ulong cur_eip, \
1344 target_ulong next_eip, \
1348 gen_update_cc_op(s); \
1349 l2 = gen_jz_ecx_string(s, next_eip); \
1350 gen_ ## op(s, ot); \
1351 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1352 gen_update_cc_op(s); \
1353 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1355 gen_op_jz_ecx(s->aflag, l2); \
1356 gen_jmp(s, cur_eip); \
1367 static void gen_helper_fp_arith_ST0_FT0(int op
)
1371 gen_helper_fadd_ST0_FT0(cpu_env
);
1374 gen_helper_fmul_ST0_FT0(cpu_env
);
1377 gen_helper_fcom_ST0_FT0(cpu_env
);
1380 gen_helper_fcom_ST0_FT0(cpu_env
);
1383 gen_helper_fsub_ST0_FT0(cpu_env
);
1386 gen_helper_fsubr_ST0_FT0(cpu_env
);
1389 gen_helper_fdiv_ST0_FT0(cpu_env
);
1392 gen_helper_fdivr_ST0_FT0(cpu_env
);
1397 /* NOTE the exception in "r" op ordering */
1398 static void gen_helper_fp_arith_STN_ST0(int op
, int opreg
)
1400 TCGv_i32 tmp
= tcg_const_i32(opreg
);
1403 gen_helper_fadd_STN_ST0(cpu_env
, tmp
);
1406 gen_helper_fmul_STN_ST0(cpu_env
, tmp
);
1409 gen_helper_fsubr_STN_ST0(cpu_env
, tmp
);
1412 gen_helper_fsub_STN_ST0(cpu_env
, tmp
);
1415 gen_helper_fdivr_STN_ST0(cpu_env
, tmp
);
1418 gen_helper_fdiv_STN_ST0(cpu_env
, tmp
);
1423 /* if d == OR_TMP0, it means memory operand (address in A0) */
1424 static void gen_op(DisasContext
*s1
, int op
, int ot
, int d
)
1427 gen_op_mov_TN_reg(ot
, 0, d
);
1429 gen_op_ld_T0_A0(ot
+ s1
->mem_index
);
1433 gen_compute_eflags_c(s1
, cpu_tmp4
);
1434 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1435 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
1437 gen_op_mov_reg_T0(ot
, d
);
1439 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1440 gen_op_update3_cc(cpu_tmp4
);
1441 set_cc_op(s1
, CC_OP_ADCB
+ ot
);
1444 gen_compute_eflags_c(s1
, cpu_tmp4
);
1445 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1446 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
1448 gen_op_mov_reg_T0(ot
, d
);
1450 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1451 gen_op_update3_cc(cpu_tmp4
);
1452 set_cc_op(s1
, CC_OP_SBBB
+ ot
);
1455 gen_op_addl_T0_T1();
1457 gen_op_mov_reg_T0(ot
, d
);
1459 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1460 gen_op_update2_cc();
1461 set_cc_op(s1
, CC_OP_ADDB
+ ot
);
1464 tcg_gen_mov_tl(cpu_cc_srcT
, cpu_T
[0]);
1465 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1467 gen_op_mov_reg_T0(ot
, d
);
1469 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1470 gen_op_update2_cc();
1471 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1475 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1477 gen_op_mov_reg_T0(ot
, d
);
1479 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1480 gen_op_update1_cc();
1481 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1484 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1486 gen_op_mov_reg_T0(ot
, d
);
1488 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1489 gen_op_update1_cc();
1490 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1493 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1495 gen_op_mov_reg_T0(ot
, d
);
1497 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1498 gen_op_update1_cc();
1499 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1502 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
1503 tcg_gen_mov_tl(cpu_cc_srcT
, cpu_T
[0]);
1504 tcg_gen_sub_tl(cpu_cc_dst
, cpu_T
[0], cpu_T
[1]);
1505 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1510 /* if d == OR_TMP0, it means memory operand (address in A0) */
1511 static void gen_inc(DisasContext
*s1
, int ot
, int d
, int c
)
1514 gen_op_mov_TN_reg(ot
, 0, d
);
1516 gen_op_ld_T0_A0(ot
+ s1
->mem_index
);
1517 gen_compute_eflags_c(s1
, cpu_cc_src
);
1519 tcg_gen_addi_tl(cpu_T
[0], cpu_T
[0], 1);
1520 set_cc_op(s1
, CC_OP_INCB
+ ot
);
1522 tcg_gen_addi_tl(cpu_T
[0], cpu_T
[0], -1);
1523 set_cc_op(s1
, CC_OP_DECB
+ ot
);
1526 gen_op_mov_reg_T0(ot
, d
);
1528 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1529 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1532 static void gen_shift_rm_T1(DisasContext
*s
, int ot
, int op1
,
1533 int is_right
, int is_arith
)
1539 if (ot
== OT_QUAD
) {
1546 if (op1
== OR_TMP0
) {
1547 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1549 gen_op_mov_TN_reg(ot
, 0, op1
);
1552 t0
= tcg_temp_local_new();
1553 t1
= tcg_temp_local_new();
1554 t2
= tcg_temp_local_new();
1556 tcg_gen_andi_tl(t2
, cpu_T
[1], mask
);
1560 gen_exts(ot
, cpu_T
[0]);
1561 tcg_gen_mov_tl(t0
, cpu_T
[0]);
1562 tcg_gen_sar_tl(cpu_T
[0], cpu_T
[0], t2
);
1564 gen_extu(ot
, cpu_T
[0]);
1565 tcg_gen_mov_tl(t0
, cpu_T
[0]);
1566 tcg_gen_shr_tl(cpu_T
[0], cpu_T
[0], t2
);
1569 tcg_gen_mov_tl(t0
, cpu_T
[0]);
1570 tcg_gen_shl_tl(cpu_T
[0], cpu_T
[0], t2
);
1574 if (op1
== OR_TMP0
) {
1575 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1577 gen_op_mov_reg_T0(ot
, op1
);
1580 /* Update eflags data because we cannot predict flags afterward. */
1581 gen_update_cc_op(s
);
1582 set_cc_op(s
, CC_OP_DYNAMIC
);
1584 tcg_gen_mov_tl(t1
, cpu_T
[0]);
1586 shift_label
= gen_new_label();
1587 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, shift_label
);
1589 tcg_gen_addi_tl(t2
, t2
, -1);
1590 tcg_gen_mov_tl(cpu_cc_dst
, t1
);
1594 tcg_gen_sar_tl(cpu_cc_src
, t0
, t2
);
1596 tcg_gen_shr_tl(cpu_cc_src
, t0
, t2
);
1599 tcg_gen_shl_tl(cpu_cc_src
, t0
, t2
);
1603 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SARB
+ ot
);
1605 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SHLB
+ ot
);
1608 gen_set_label(shift_label
);
1615 static void gen_shift_rm_im(DisasContext
*s
, int ot
, int op1
, int op2
,
1616 int is_right
, int is_arith
)
1627 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1629 gen_op_mov_TN_reg(ot
, 0, op1
);
1635 gen_exts(ot
, cpu_T
[0]);
1636 tcg_gen_sari_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1637 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], op2
);
1639 gen_extu(ot
, cpu_T
[0]);
1640 tcg_gen_shri_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1641 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], op2
);
1644 tcg_gen_shli_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1645 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], op2
);
1651 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1653 gen_op_mov_reg_T0(ot
, op1
);
1655 /* update eflags if non zero shift */
1657 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
1658 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1659 set_cc_op(s
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1663 static inline void tcg_gen_lshift(TCGv ret
, TCGv arg1
, target_long arg2
)
1666 tcg_gen_shli_tl(ret
, arg1
, arg2
);
1668 tcg_gen_shri_tl(ret
, arg1
, -arg2
);
1671 static void gen_rot_rm_T1(DisasContext
*s
, int ot
, int op1
,
1675 int label1
, label2
, data_bits
;
1676 TCGv t0
, t1
, t2
, a0
;
1678 /* XXX: inefficient, but we must use local temps */
1679 t0
= tcg_temp_local_new();
1680 t1
= tcg_temp_local_new();
1681 t2
= tcg_temp_local_new();
1682 a0
= tcg_temp_local_new();
1690 if (op1
== OR_TMP0
) {
1691 tcg_gen_mov_tl(a0
, cpu_A0
);
1692 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
1694 gen_op_mov_v_reg(ot
, t0
, op1
);
1697 tcg_gen_mov_tl(t1
, cpu_T
[1]);
1699 tcg_gen_andi_tl(t1
, t1
, mask
);
1701 /* Must test zero case to avoid using undefined behaviour in TCG
1703 label1
= gen_new_label();
1704 tcg_gen_brcondi_tl(TCG_COND_EQ
, t1
, 0, label1
);
1707 tcg_gen_andi_tl(cpu_tmp0
, t1
, (1 << (3 + ot
)) - 1);
1709 tcg_gen_mov_tl(cpu_tmp0
, t1
);
1712 tcg_gen_mov_tl(t2
, t0
);
1714 data_bits
= 8 << ot
;
1715 /* XXX: rely on behaviour of shifts when operand 2 overflows (XXX:
1716 fix TCG definition) */
1718 tcg_gen_shr_tl(cpu_tmp4
, t0
, cpu_tmp0
);
1719 tcg_gen_subfi_tl(cpu_tmp0
, data_bits
, cpu_tmp0
);
1720 tcg_gen_shl_tl(t0
, t0
, cpu_tmp0
);
1722 tcg_gen_shl_tl(cpu_tmp4
, t0
, cpu_tmp0
);
1723 tcg_gen_subfi_tl(cpu_tmp0
, data_bits
, cpu_tmp0
);
1724 tcg_gen_shr_tl(t0
, t0
, cpu_tmp0
);
1726 tcg_gen_or_tl(t0
, t0
, cpu_tmp4
);
1728 gen_set_label(label1
);
1730 if (op1
== OR_TMP0
) {
1731 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
1733 gen_op_mov_reg_v(ot
, op1
, t0
);
1736 /* update eflags. It is needed anyway most of the time, do it always. */
1737 gen_compute_eflags(s
);
1738 assert(s
->cc_op
== CC_OP_EFLAGS
);
1740 label2
= gen_new_label();
1741 tcg_gen_brcondi_tl(TCG_COND_EQ
, t1
, 0, label2
);
1743 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~(CC_O
| CC_C
));
1744 tcg_gen_xor_tl(cpu_tmp0
, t2
, t0
);
1745 tcg_gen_lshift(cpu_tmp0
, cpu_tmp0
, 11 - (data_bits
- 1));
1746 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, CC_O
);
1747 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_tmp0
);
1749 tcg_gen_shri_tl(t0
, t0
, data_bits
- 1);
1751 tcg_gen_andi_tl(t0
, t0
, CC_C
);
1752 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
1754 gen_set_label(label2
);
1762 static void gen_rot_rm_im(DisasContext
*s
, int ot
, int op1
, int op2
,
1769 /* XXX: inefficient, but we must use local temps */
1770 t0
= tcg_temp_local_new();
1771 t1
= tcg_temp_local_new();
1772 a0
= tcg_temp_local_new();
1780 if (op1
== OR_TMP0
) {
1781 tcg_gen_mov_tl(a0
, cpu_A0
);
1782 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
1784 gen_op_mov_v_reg(ot
, t0
, op1
);
1788 tcg_gen_mov_tl(t1
, t0
);
1791 data_bits
= 8 << ot
;
1793 int shift
= op2
& ((1 << (3 + ot
)) - 1);
1795 tcg_gen_shri_tl(cpu_tmp4
, t0
, shift
);
1796 tcg_gen_shli_tl(t0
, t0
, data_bits
- shift
);
1799 tcg_gen_shli_tl(cpu_tmp4
, t0
, shift
);
1800 tcg_gen_shri_tl(t0
, t0
, data_bits
- shift
);
1802 tcg_gen_or_tl(t0
, t0
, cpu_tmp4
);
1806 if (op1
== OR_TMP0
) {
1807 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
1809 gen_op_mov_reg_v(ot
, op1
, t0
);
1814 gen_compute_eflags(s
);
1815 assert(s
->cc_op
== CC_OP_EFLAGS
);
1817 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~(CC_O
| CC_C
));
1818 tcg_gen_xor_tl(cpu_tmp0
, t1
, t0
);
1819 tcg_gen_lshift(cpu_tmp0
, cpu_tmp0
, 11 - (data_bits
- 1));
1820 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, CC_O
);
1821 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_tmp0
);
1823 tcg_gen_shri_tl(t0
, t0
, data_bits
- 1);
1825 tcg_gen_andi_tl(t0
, t0
, CC_C
);
1826 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
1834 /* XXX: add faster immediate = 1 case */
1835 static void gen_rotc_rm_T1(DisasContext
*s
, int ot
, int op1
,
1838 gen_compute_eflags(s
);
1839 assert(s
->cc_op
== CC_OP_EFLAGS
);
1843 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1845 gen_op_mov_TN_reg(ot
, 0, op1
);
1850 gen_helper_rcrb(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1853 gen_helper_rcrw(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1856 gen_helper_rcrl(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1858 #ifdef TARGET_X86_64
1860 gen_helper_rcrq(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1867 gen_helper_rclb(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1870 gen_helper_rclw(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1873 gen_helper_rcll(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1875 #ifdef TARGET_X86_64
1877 gen_helper_rclq(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1884 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1886 gen_op_mov_reg_T0(ot
, op1
);
1889 /* XXX: add faster immediate case */
1890 static void gen_shiftd_rm_T1(DisasContext
*s
, int ot
, int op1
,
1891 int is_right
, TCGv count
)
1893 int label1
, label2
, data_bits
;
1895 TCGv t0
, t1
, t2
, a0
;
1897 t0
= tcg_temp_local_new();
1898 t1
= tcg_temp_local_new();
1899 t2
= tcg_temp_local_new();
1900 a0
= tcg_temp_local_new();
1908 if (op1
== OR_TMP0
) {
1909 tcg_gen_mov_tl(a0
, cpu_A0
);
1910 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
1912 gen_op_mov_v_reg(ot
, t0
, op1
);
1915 tcg_gen_andi_tl(t2
, count
, mask
);
1916 tcg_gen_mov_tl(t1
, cpu_T
[1]);
1918 /* Must test zero case to avoid using undefined behaviour in TCG
1920 label1
= gen_new_label();
1921 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, label1
);
1923 tcg_gen_addi_tl(cpu_tmp5
, t2
, -1);
1924 if (ot
== OT_WORD
) {
1925 /* Note: we implement the Intel behaviour for shift count > 16 */
1927 tcg_gen_andi_tl(t0
, t0
, 0xffff);
1928 tcg_gen_shli_tl(cpu_tmp0
, t1
, 16);
1929 tcg_gen_or_tl(t0
, t0
, cpu_tmp0
);
1930 tcg_gen_ext32u_tl(t0
, t0
);
1932 tcg_gen_shr_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1934 /* only needed if count > 16, but a test would complicate */
1935 tcg_gen_subfi_tl(cpu_tmp5
, 32, t2
);
1936 tcg_gen_shl_tl(cpu_tmp0
, t0
, cpu_tmp5
);
1938 tcg_gen_shr_tl(t0
, t0
, t2
);
1940 tcg_gen_or_tl(t0
, t0
, cpu_tmp0
);
1942 /* XXX: not optimal */
1943 tcg_gen_andi_tl(t0
, t0
, 0xffff);
1944 tcg_gen_shli_tl(t1
, t1
, 16);
1945 tcg_gen_or_tl(t1
, t1
, t0
);
1946 tcg_gen_ext32u_tl(t1
, t1
);
1948 tcg_gen_shl_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1949 tcg_gen_subfi_tl(cpu_tmp0
, 32, cpu_tmp5
);
1950 tcg_gen_shr_tl(cpu_tmp5
, t1
, cpu_tmp0
);
1951 tcg_gen_or_tl(cpu_tmp4
, cpu_tmp4
, cpu_tmp5
);
1953 tcg_gen_shl_tl(t0
, t0
, t2
);
1954 tcg_gen_subfi_tl(cpu_tmp5
, 32, t2
);
1955 tcg_gen_shr_tl(t1
, t1
, cpu_tmp5
);
1956 tcg_gen_or_tl(t0
, t0
, t1
);
1959 data_bits
= 8 << ot
;
1962 tcg_gen_ext32u_tl(t0
, t0
);
1964 tcg_gen_shr_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1966 tcg_gen_shr_tl(t0
, t0
, t2
);
1967 tcg_gen_subfi_tl(cpu_tmp5
, data_bits
, t2
);
1968 tcg_gen_shl_tl(t1
, t1
, cpu_tmp5
);
1969 tcg_gen_or_tl(t0
, t0
, t1
);
1973 tcg_gen_ext32u_tl(t1
, t1
);
1975 tcg_gen_shl_tl(cpu_tmp4
, t0
, cpu_tmp5
);
1977 tcg_gen_shl_tl(t0
, t0
, t2
);
1978 tcg_gen_subfi_tl(cpu_tmp5
, data_bits
, t2
);
1979 tcg_gen_shr_tl(t1
, t1
, cpu_tmp5
);
1980 tcg_gen_or_tl(t0
, t0
, t1
);
1983 tcg_gen_mov_tl(t1
, cpu_tmp4
);
1985 gen_set_label(label1
);
1987 if (op1
== OR_TMP0
) {
1988 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
1990 gen_op_mov_reg_v(ot
, op1
, t0
);
1993 /* Update eflags data because we cannot predict flags afterward. */
1994 gen_update_cc_op(s
);
1995 set_cc_op(s
, CC_OP_DYNAMIC
);
1997 label2
= gen_new_label();
1998 tcg_gen_brcondi_tl(TCG_COND_EQ
, t2
, 0, label2
);
2000 tcg_gen_mov_tl(cpu_cc_src
, t1
);
2001 tcg_gen_mov_tl(cpu_cc_dst
, t0
);
2003 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SARB
+ ot
);
2005 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SHLB
+ ot
);
2007 gen_set_label(label2
);
2015 static void gen_shift(DisasContext
*s1
, int op
, int ot
, int d
, int s
)
2018 gen_op_mov_TN_reg(ot
, 1, s
);
2021 gen_rot_rm_T1(s1
, ot
, d
, 0);
2024 gen_rot_rm_T1(s1
, ot
, d
, 1);
2028 gen_shift_rm_T1(s1
, ot
, d
, 0, 0);
2031 gen_shift_rm_T1(s1
, ot
, d
, 1, 0);
2034 gen_shift_rm_T1(s1
, ot
, d
, 1, 1);
2037 gen_rotc_rm_T1(s1
, ot
, d
, 0);
2040 gen_rotc_rm_T1(s1
, ot
, d
, 1);
2045 static void gen_shifti(DisasContext
*s1
, int op
, int ot
, int d
, int c
)
2049 gen_rot_rm_im(s1
, ot
, d
, c
, 0);
2052 gen_rot_rm_im(s1
, ot
, d
, c
, 1);
2056 gen_shift_rm_im(s1
, ot
, d
, c
, 0, 0);
2059 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 0);
2062 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 1);
2065 /* currently not optimized */
2066 gen_op_movl_T1_im(c
);
2067 gen_shift(s1
, op
, ot
, d
, OR_TMP1
);
2072 static void gen_lea_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2073 int *reg_ptr
, int *offset_ptr
)
2081 int mod
, rm
, code
, override
, must_add_seg
;
2083 override
= s
->override
;
2084 must_add_seg
= s
->addseg
;
2087 mod
= (modrm
>> 6) & 3;
2099 code
= cpu_ldub_code(env
, s
->pc
++);
2100 scale
= (code
>> 6) & 3;
2101 index
= ((code
>> 3) & 7) | REX_X(s
);
2108 if ((base
& 7) == 5) {
2110 disp
= (int32_t)cpu_ldl_code(env
, s
->pc
);
2112 if (CODE64(s
) && !havesib
) {
2113 disp
+= s
->pc
+ s
->rip_offset
;
2120 disp
= (int8_t)cpu_ldub_code(env
, s
->pc
++);
2124 disp
= (int32_t)cpu_ldl_code(env
, s
->pc
);
2130 /* for correct popl handling with esp */
2131 if (base
== 4 && s
->popl_esp_hack
)
2132 disp
+= s
->popl_esp_hack
;
2133 #ifdef TARGET_X86_64
2134 if (s
->aflag
== 2) {
2135 gen_op_movq_A0_reg(base
);
2137 gen_op_addq_A0_im(disp
);
2142 gen_op_movl_A0_reg(base
);
2144 gen_op_addl_A0_im(disp
);
2147 #ifdef TARGET_X86_64
2148 if (s
->aflag
== 2) {
2149 gen_op_movq_A0_im(disp
);
2153 gen_op_movl_A0_im(disp
);
2156 /* index == 4 means no index */
2157 if (havesib
&& (index
!= 4)) {
2158 #ifdef TARGET_X86_64
2159 if (s
->aflag
== 2) {
2160 gen_op_addq_A0_reg_sN(scale
, index
);
2164 gen_op_addl_A0_reg_sN(scale
, index
);
2169 if (base
== R_EBP
|| base
== R_ESP
)
2174 #ifdef TARGET_X86_64
2175 if (s
->aflag
== 2) {
2176 gen_op_addq_A0_seg(override
);
2180 gen_op_addl_A0_seg(s
, override
);
2187 disp
= cpu_lduw_code(env
, s
->pc
);
2189 gen_op_movl_A0_im(disp
);
2190 rm
= 0; /* avoid SS override */
2197 disp
= (int8_t)cpu_ldub_code(env
, s
->pc
++);
2201 disp
= cpu_lduw_code(env
, s
->pc
);
2207 gen_op_movl_A0_reg(R_EBX
);
2208 gen_op_addl_A0_reg_sN(0, R_ESI
);
2211 gen_op_movl_A0_reg(R_EBX
);
2212 gen_op_addl_A0_reg_sN(0, R_EDI
);
2215 gen_op_movl_A0_reg(R_EBP
);
2216 gen_op_addl_A0_reg_sN(0, R_ESI
);
2219 gen_op_movl_A0_reg(R_EBP
);
2220 gen_op_addl_A0_reg_sN(0, R_EDI
);
2223 gen_op_movl_A0_reg(R_ESI
);
2226 gen_op_movl_A0_reg(R_EDI
);
2229 gen_op_movl_A0_reg(R_EBP
);
2233 gen_op_movl_A0_reg(R_EBX
);
2237 gen_op_addl_A0_im(disp
);
2238 gen_op_andl_A0_ffff();
2242 if (rm
== 2 || rm
== 3 || rm
== 6)
2247 gen_op_addl_A0_seg(s
, override
);
2257 static void gen_nop_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2259 int mod
, rm
, base
, code
;
2261 mod
= (modrm
>> 6) & 3;
2271 code
= cpu_ldub_code(env
, s
->pc
++);
2307 /* used for LEA and MOV AX, mem */
2308 static void gen_add_A0_ds_seg(DisasContext
*s
)
2310 int override
, must_add_seg
;
2311 must_add_seg
= s
->addseg
;
2313 if (s
->override
>= 0) {
2314 override
= s
->override
;
2318 #ifdef TARGET_X86_64
2320 gen_op_addq_A0_seg(override
);
2324 gen_op_addl_A0_seg(s
, override
);
2329 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2331 static void gen_ldst_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2332 int ot
, int reg
, int is_store
)
2334 int mod
, rm
, opreg
, disp
;
2336 mod
= (modrm
>> 6) & 3;
2337 rm
= (modrm
& 7) | REX_B(s
);
2341 gen_op_mov_TN_reg(ot
, 0, reg
);
2342 gen_op_mov_reg_T0(ot
, rm
);
2344 gen_op_mov_TN_reg(ot
, 0, rm
);
2346 gen_op_mov_reg_T0(ot
, reg
);
2349 gen_lea_modrm(env
, s
, modrm
, &opreg
, &disp
);
2352 gen_op_mov_TN_reg(ot
, 0, reg
);
2353 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2355 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
2357 gen_op_mov_reg_T0(ot
, reg
);
2362 static inline uint32_t insn_get(CPUX86State
*env
, DisasContext
*s
, int ot
)
2368 ret
= cpu_ldub_code(env
, s
->pc
);
2372 ret
= cpu_lduw_code(env
, s
->pc
);
2377 ret
= cpu_ldl_code(env
, s
->pc
);
2384 static inline int insn_const_size(unsigned int ot
)
2392 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong eip
)
2394 TranslationBlock
*tb
;
2397 pc
= s
->cs_base
+ eip
;
2399 /* NOTE: we handle the case where the TB spans two pages here */
2400 if ((pc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) ||
2401 (pc
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
)) {
2402 /* jump to same page: we can use a direct jump */
2403 tcg_gen_goto_tb(tb_num
);
2405 tcg_gen_exit_tb((tcg_target_long
)tb
+ tb_num
);
2407 /* jump to another page: currently not optimized */
2413 static inline void gen_jcc(DisasContext
*s
, int b
,
2414 target_ulong val
, target_ulong next_eip
)
2419 l1
= gen_new_label();
2422 gen_goto_tb(s
, 0, next_eip
);
2425 gen_goto_tb(s
, 1, val
);
2426 s
->is_jmp
= DISAS_TB_JUMP
;
2428 l1
= gen_new_label();
2429 l2
= gen_new_label();
2432 gen_jmp_im(next_eip
);
2442 static void gen_cmovcc1(CPUX86State
*env
, DisasContext
*s
, int ot
, int b
,
2447 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
2449 cc
= gen_prepare_cc(s
, b
, cpu_T
[1]);
2450 if (cc
.mask
!= -1) {
2451 TCGv t0
= tcg_temp_new();
2452 tcg_gen_andi_tl(t0
, cc
.reg
, cc
.mask
);
2456 cc
.reg2
= tcg_const_tl(cc
.imm
);
2459 tcg_gen_movcond_tl(cc
.cond
, cpu_T
[0], cc
.reg
, cc
.reg2
,
2460 cpu_T
[0], cpu_regs
[reg
]);
2461 gen_op_mov_reg_T0(ot
, reg
);
2463 if (cc
.mask
!= -1) {
2464 tcg_temp_free(cc
.reg
);
2467 tcg_temp_free(cc
.reg2
);
2471 static inline void gen_op_movl_T0_seg(int seg_reg
)
2473 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
2474 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2477 static inline void gen_op_movl_seg_T0_vm(int seg_reg
)
2479 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xffff);
2480 tcg_gen_st32_tl(cpu_T
[0], cpu_env
,
2481 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2482 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], 4);
2483 tcg_gen_st_tl(cpu_T
[0], cpu_env
,
2484 offsetof(CPUX86State
,segs
[seg_reg
].base
));
2487 /* move T0 to seg_reg and compute if the CPU state may change. Never
2488 call this function with seg_reg == R_CS */
2489 static void gen_movl_seg_T0(DisasContext
*s
, int seg_reg
, target_ulong cur_eip
)
2491 if (s
->pe
&& !s
->vm86
) {
2492 /* XXX: optimize by finding processor state dynamically */
2493 gen_update_cc_op(s
);
2494 gen_jmp_im(cur_eip
);
2495 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
2496 gen_helper_load_seg(cpu_env
, tcg_const_i32(seg_reg
), cpu_tmp2_i32
);
2497 /* abort translation because the addseg value may change or
2498 because ss32 may change. For R_SS, translation must always
2499 stop as a special handling must be done to disable hardware
2500 interrupts for the next instruction */
2501 if (seg_reg
== R_SS
|| (s
->code32
&& seg_reg
< R_FS
))
2502 s
->is_jmp
= DISAS_TB_JUMP
;
2504 gen_op_movl_seg_T0_vm(seg_reg
);
2505 if (seg_reg
== R_SS
)
2506 s
->is_jmp
= DISAS_TB_JUMP
;
2510 static inline int svm_is_rep(int prefixes
)
2512 return ((prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) ? 8 : 0);
2516 gen_svm_check_intercept_param(DisasContext
*s
, target_ulong pc_start
,
2517 uint32_t type
, uint64_t param
)
2519 /* no SVM activated; fast case */
2520 if (likely(!(s
->flags
& HF_SVMI_MASK
)))
2522 gen_update_cc_op(s
);
2523 gen_jmp_im(pc_start
- s
->cs_base
);
2524 gen_helper_svm_check_intercept_param(cpu_env
, tcg_const_i32(type
),
2525 tcg_const_i64(param
));
2529 gen_svm_check_intercept(DisasContext
*s
, target_ulong pc_start
, uint64_t type
)
2531 gen_svm_check_intercept_param(s
, pc_start
, type
, 0);
2534 static inline void gen_stack_update(DisasContext
*s
, int addend
)
2536 #ifdef TARGET_X86_64
2538 gen_op_add_reg_im(2, R_ESP
, addend
);
2542 gen_op_add_reg_im(1, R_ESP
, addend
);
2544 gen_op_add_reg_im(0, R_ESP
, addend
);
2548 /* generate a push. It depends on ss32, addseg and dflag */
2549 static void gen_push_T0(DisasContext
*s
)
2551 #ifdef TARGET_X86_64
2553 gen_op_movq_A0_reg(R_ESP
);
2555 gen_op_addq_A0_im(-8);
2556 gen_op_st_T0_A0(OT_QUAD
+ s
->mem_index
);
2558 gen_op_addq_A0_im(-2);
2559 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
2561 gen_op_mov_reg_A0(2, R_ESP
);
2565 gen_op_movl_A0_reg(R_ESP
);
2567 gen_op_addl_A0_im(-2);
2569 gen_op_addl_A0_im(-4);
2572 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2573 gen_op_addl_A0_seg(s
, R_SS
);
2576 gen_op_andl_A0_ffff();
2577 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2578 gen_op_addl_A0_seg(s
, R_SS
);
2580 gen_op_st_T0_A0(s
->dflag
+ 1 + s
->mem_index
);
2581 if (s
->ss32
&& !s
->addseg
)
2582 gen_op_mov_reg_A0(1, R_ESP
);
2584 gen_op_mov_reg_T1(s
->ss32
+ 1, R_ESP
);
2588 /* generate a push. It depends on ss32, addseg and dflag */
2589 /* slower version for T1, only used for call Ev */
2590 static void gen_push_T1(DisasContext
*s
)
2592 #ifdef TARGET_X86_64
2594 gen_op_movq_A0_reg(R_ESP
);
2596 gen_op_addq_A0_im(-8);
2597 gen_op_st_T1_A0(OT_QUAD
+ s
->mem_index
);
2599 gen_op_addq_A0_im(-2);
2600 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
2602 gen_op_mov_reg_A0(2, R_ESP
);
2606 gen_op_movl_A0_reg(R_ESP
);
2608 gen_op_addl_A0_im(-2);
2610 gen_op_addl_A0_im(-4);
2613 gen_op_addl_A0_seg(s
, R_SS
);
2616 gen_op_andl_A0_ffff();
2617 gen_op_addl_A0_seg(s
, R_SS
);
2619 gen_op_st_T1_A0(s
->dflag
+ 1 + s
->mem_index
);
2621 if (s
->ss32
&& !s
->addseg
)
2622 gen_op_mov_reg_A0(1, R_ESP
);
2624 gen_stack_update(s
, (-2) << s
->dflag
);
2628 /* two step pop is necessary for precise exceptions */
2629 static void gen_pop_T0(DisasContext
*s
)
2631 #ifdef TARGET_X86_64
2633 gen_op_movq_A0_reg(R_ESP
);
2634 gen_op_ld_T0_A0((s
->dflag
? OT_QUAD
: OT_WORD
) + s
->mem_index
);
2638 gen_op_movl_A0_reg(R_ESP
);
2641 gen_op_addl_A0_seg(s
, R_SS
);
2643 gen_op_andl_A0_ffff();
2644 gen_op_addl_A0_seg(s
, R_SS
);
2646 gen_op_ld_T0_A0(s
->dflag
+ 1 + s
->mem_index
);
2650 static void gen_pop_update(DisasContext
*s
)
2652 #ifdef TARGET_X86_64
2653 if (CODE64(s
) && s
->dflag
) {
2654 gen_stack_update(s
, 8);
2658 gen_stack_update(s
, 2 << s
->dflag
);
2662 static void gen_stack_A0(DisasContext
*s
)
2664 gen_op_movl_A0_reg(R_ESP
);
2666 gen_op_andl_A0_ffff();
2667 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2669 gen_op_addl_A0_seg(s
, R_SS
);
2672 /* NOTE: wrap around in 16 bit not fully handled */
2673 static void gen_pusha(DisasContext
*s
)
2676 gen_op_movl_A0_reg(R_ESP
);
2677 gen_op_addl_A0_im(-16 << s
->dflag
);
2679 gen_op_andl_A0_ffff();
2680 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2682 gen_op_addl_A0_seg(s
, R_SS
);
2683 for(i
= 0;i
< 8; i
++) {
2684 gen_op_mov_TN_reg(OT_LONG
, 0, 7 - i
);
2685 gen_op_st_T0_A0(OT_WORD
+ s
->dflag
+ s
->mem_index
);
2686 gen_op_addl_A0_im(2 << s
->dflag
);
2688 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2691 /* NOTE: wrap around in 16 bit not fully handled */
2692 static void gen_popa(DisasContext
*s
)
2695 gen_op_movl_A0_reg(R_ESP
);
2697 gen_op_andl_A0_ffff();
2698 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2699 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], 16 << s
->dflag
);
2701 gen_op_addl_A0_seg(s
, R_SS
);
2702 for(i
= 0;i
< 8; i
++) {
2703 /* ESP is not reloaded */
2705 gen_op_ld_T0_A0(OT_WORD
+ s
->dflag
+ s
->mem_index
);
2706 gen_op_mov_reg_T0(OT_WORD
+ s
->dflag
, 7 - i
);
2708 gen_op_addl_A0_im(2 << s
->dflag
);
2710 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2713 static void gen_enter(DisasContext
*s
, int esp_addend
, int level
)
2718 #ifdef TARGET_X86_64
2720 ot
= s
->dflag
? OT_QUAD
: OT_WORD
;
2723 gen_op_movl_A0_reg(R_ESP
);
2724 gen_op_addq_A0_im(-opsize
);
2725 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2728 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
2729 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2731 /* XXX: must save state */
2732 gen_helper_enter64_level(cpu_env
, tcg_const_i32(level
),
2733 tcg_const_i32((ot
== OT_QUAD
)),
2736 gen_op_mov_reg_T1(ot
, R_EBP
);
2737 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], -esp_addend
+ (-opsize
* level
));
2738 gen_op_mov_reg_T1(OT_QUAD
, R_ESP
);
2742 ot
= s
->dflag
+ OT_WORD
;
2743 opsize
= 2 << s
->dflag
;
2745 gen_op_movl_A0_reg(R_ESP
);
2746 gen_op_addl_A0_im(-opsize
);
2748 gen_op_andl_A0_ffff();
2749 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2751 gen_op_addl_A0_seg(s
, R_SS
);
2753 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
2754 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2756 /* XXX: must save state */
2757 gen_helper_enter_level(cpu_env
, tcg_const_i32(level
),
2758 tcg_const_i32(s
->dflag
),
2761 gen_op_mov_reg_T1(ot
, R_EBP
);
2762 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], -esp_addend
+ (-opsize
* level
));
2763 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2767 static void gen_exception(DisasContext
*s
, int trapno
, target_ulong cur_eip
)
2769 gen_update_cc_op(s
);
2770 gen_jmp_im(cur_eip
);
2771 gen_helper_raise_exception(cpu_env
, tcg_const_i32(trapno
));
2772 s
->is_jmp
= DISAS_TB_JUMP
;
2775 /* an interrupt is different from an exception because of the
2777 static void gen_interrupt(DisasContext
*s
, int intno
,
2778 target_ulong cur_eip
, target_ulong next_eip
)
2780 gen_update_cc_op(s
);
2781 gen_jmp_im(cur_eip
);
2782 gen_helper_raise_interrupt(cpu_env
, tcg_const_i32(intno
),
2783 tcg_const_i32(next_eip
- cur_eip
));
2784 s
->is_jmp
= DISAS_TB_JUMP
;
2787 static void gen_debug(DisasContext
*s
, target_ulong cur_eip
)
2789 gen_update_cc_op(s
);
2790 gen_jmp_im(cur_eip
);
2791 gen_helper_debug(cpu_env
);
2792 s
->is_jmp
= DISAS_TB_JUMP
;
2795 /* generate a generic end of block. Trace exception is also generated
2797 static void gen_eob(DisasContext
*s
)
2799 gen_update_cc_op(s
);
2800 if (s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
) {
2801 gen_helper_reset_inhibit_irq(cpu_env
);
2803 if (s
->tb
->flags
& HF_RF_MASK
) {
2804 gen_helper_reset_rf(cpu_env
);
2806 if (s
->singlestep_enabled
) {
2807 gen_helper_debug(cpu_env
);
2809 gen_helper_single_step(cpu_env
);
2813 s
->is_jmp
= DISAS_TB_JUMP
;
2816 /* generate a jump to eip. No segment change must happen before as a
2817 direct call to the next block may occur */
2818 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
)
2820 gen_update_cc_op(s
);
2821 set_cc_op(s
, CC_OP_DYNAMIC
);
2823 gen_goto_tb(s
, tb_num
, eip
);
2824 s
->is_jmp
= DISAS_TB_JUMP
;
2831 static void gen_jmp(DisasContext
*s
, target_ulong eip
)
2833 gen_jmp_tb(s
, eip
, 0);
2836 static inline void gen_ldq_env_A0(int idx
, int offset
)
2838 int mem_index
= (idx
>> 2) - 1;
2839 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2840 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2843 static inline void gen_stq_env_A0(int idx
, int offset
)
2845 int mem_index
= (idx
>> 2) - 1;
2846 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2847 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2850 static inline void gen_ldo_env_A0(int idx
, int offset
)
2852 int mem_index
= (idx
>> 2) - 1;
2853 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2854 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2855 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2856 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
);
2857 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2860 static inline void gen_sto_env_A0(int idx
, int offset
)
2862 int mem_index
= (idx
>> 2) - 1;
2863 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2864 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2865 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2866 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2867 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
);
2870 static inline void gen_op_movo(int d_offset
, int s_offset
)
2872 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2873 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2874 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
+ 8);
2875 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
+ 8);
2878 static inline void gen_op_movq(int d_offset
, int s_offset
)
2880 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2881 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2884 static inline void gen_op_movl(int d_offset
, int s_offset
)
2886 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
, s_offset
);
2887 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, d_offset
);
2890 static inline void gen_op_movq_env_0(int d_offset
)
2892 tcg_gen_movi_i64(cpu_tmp1_i64
, 0);
2893 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2896 typedef void (*SSEFunc_i_ep
)(TCGv_i32 val
, TCGv_ptr env
, TCGv_ptr reg
);
2897 typedef void (*SSEFunc_l_ep
)(TCGv_i64 val
, TCGv_ptr env
, TCGv_ptr reg
);
2898 typedef void (*SSEFunc_0_epi
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i32 val
);
2899 typedef void (*SSEFunc_0_epl
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i64 val
);
2900 typedef void (*SSEFunc_0_epp
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
);
2901 typedef void (*SSEFunc_0_eppi
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2903 typedef void (*SSEFunc_0_ppi
)(TCGv_ptr reg_a
, TCGv_ptr reg_b
, TCGv_i32 val
);
2904 typedef void (*SSEFunc_0_eppt
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2907 #define SSE_SPECIAL ((void *)1)
2908 #define SSE_DUMMY ((void *)2)
2910 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2911 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2912 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2914 static const SSEFunc_0_epp sse_op_table1
[256][4] = {
2915 /* 3DNow! extensions */
2916 [0x0e] = { SSE_DUMMY
}, /* femms */
2917 [0x0f] = { SSE_DUMMY
}, /* pf... */
2918 /* pure SSE operations */
2919 [0x10] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2920 [0x11] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2921 [0x12] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd, movsldup, movddup */
2922 [0x13] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd */
2923 [0x14] = { gen_helper_punpckldq_xmm
, gen_helper_punpcklqdq_xmm
},
2924 [0x15] = { gen_helper_punpckhdq_xmm
, gen_helper_punpckhqdq_xmm
},
2925 [0x16] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd, movshdup */
2926 [0x17] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd */
2928 [0x28] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2929 [0x29] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2930 [0x2a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2931 [0x2b] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movntps, movntpd, movntss, movntsd */
2932 [0x2c] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2933 [0x2d] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2934 [0x2e] = { gen_helper_ucomiss
, gen_helper_ucomisd
},
2935 [0x2f] = { gen_helper_comiss
, gen_helper_comisd
},
2936 [0x50] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movmskps, movmskpd */
2937 [0x51] = SSE_FOP(sqrt
),
2938 [0x52] = { gen_helper_rsqrtps
, NULL
, gen_helper_rsqrtss
, NULL
},
2939 [0x53] = { gen_helper_rcpps
, NULL
, gen_helper_rcpss
, NULL
},
2940 [0x54] = { gen_helper_pand_xmm
, gen_helper_pand_xmm
}, /* andps, andpd */
2941 [0x55] = { gen_helper_pandn_xmm
, gen_helper_pandn_xmm
}, /* andnps, andnpd */
2942 [0x56] = { gen_helper_por_xmm
, gen_helper_por_xmm
}, /* orps, orpd */
2943 [0x57] = { gen_helper_pxor_xmm
, gen_helper_pxor_xmm
}, /* xorps, xorpd */
2944 [0x58] = SSE_FOP(add
),
2945 [0x59] = SSE_FOP(mul
),
2946 [0x5a] = { gen_helper_cvtps2pd
, gen_helper_cvtpd2ps
,
2947 gen_helper_cvtss2sd
, gen_helper_cvtsd2ss
},
2948 [0x5b] = { gen_helper_cvtdq2ps
, gen_helper_cvtps2dq
, gen_helper_cvttps2dq
},
2949 [0x5c] = SSE_FOP(sub
),
2950 [0x5d] = SSE_FOP(min
),
2951 [0x5e] = SSE_FOP(div
),
2952 [0x5f] = SSE_FOP(max
),
2954 [0xc2] = SSE_FOP(cmpeq
),
2955 [0xc6] = { (SSEFunc_0_epp
)gen_helper_shufps
,
2956 (SSEFunc_0_epp
)gen_helper_shufpd
}, /* XXX: casts */
2958 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2959 [0x38] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2960 [0x3a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2962 /* MMX ops and their SSE extensions */
2963 [0x60] = MMX_OP2(punpcklbw
),
2964 [0x61] = MMX_OP2(punpcklwd
),
2965 [0x62] = MMX_OP2(punpckldq
),
2966 [0x63] = MMX_OP2(packsswb
),
2967 [0x64] = MMX_OP2(pcmpgtb
),
2968 [0x65] = MMX_OP2(pcmpgtw
),
2969 [0x66] = MMX_OP2(pcmpgtl
),
2970 [0x67] = MMX_OP2(packuswb
),
2971 [0x68] = MMX_OP2(punpckhbw
),
2972 [0x69] = MMX_OP2(punpckhwd
),
2973 [0x6a] = MMX_OP2(punpckhdq
),
2974 [0x6b] = MMX_OP2(packssdw
),
2975 [0x6c] = { NULL
, gen_helper_punpcklqdq_xmm
},
2976 [0x6d] = { NULL
, gen_helper_punpckhqdq_xmm
},
2977 [0x6e] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movd mm, ea */
2978 [0x6f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, , movqdu */
2979 [0x70] = { (SSEFunc_0_epp
)gen_helper_pshufw_mmx
,
2980 (SSEFunc_0_epp
)gen_helper_pshufd_xmm
,
2981 (SSEFunc_0_epp
)gen_helper_pshufhw_xmm
,
2982 (SSEFunc_0_epp
)gen_helper_pshuflw_xmm
}, /* XXX: casts */
2983 [0x71] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftw */
2984 [0x72] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftd */
2985 [0x73] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftq */
2986 [0x74] = MMX_OP2(pcmpeqb
),
2987 [0x75] = MMX_OP2(pcmpeqw
),
2988 [0x76] = MMX_OP2(pcmpeql
),
2989 [0x77] = { SSE_DUMMY
}, /* emms */
2990 [0x78] = { NULL
, SSE_SPECIAL
, NULL
, SSE_SPECIAL
}, /* extrq_i, insertq_i */
2991 [0x79] = { NULL
, gen_helper_extrq_r
, NULL
, gen_helper_insertq_r
},
2992 [0x7c] = { NULL
, gen_helper_haddpd
, NULL
, gen_helper_haddps
},
2993 [0x7d] = { NULL
, gen_helper_hsubpd
, NULL
, gen_helper_hsubps
},
2994 [0x7e] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movd, movd, , movq */
2995 [0x7f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, movdqu */
2996 [0xc4] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pinsrw */
2997 [0xc5] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pextrw */
2998 [0xd0] = { NULL
, gen_helper_addsubpd
, NULL
, gen_helper_addsubps
},
2999 [0xd1] = MMX_OP2(psrlw
),
3000 [0xd2] = MMX_OP2(psrld
),
3001 [0xd3] = MMX_OP2(psrlq
),
3002 [0xd4] = MMX_OP2(paddq
),
3003 [0xd5] = MMX_OP2(pmullw
),
3004 [0xd6] = { NULL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
3005 [0xd7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pmovmskb */
3006 [0xd8] = MMX_OP2(psubusb
),
3007 [0xd9] = MMX_OP2(psubusw
),
3008 [0xda] = MMX_OP2(pminub
),
3009 [0xdb] = MMX_OP2(pand
),
3010 [0xdc] = MMX_OP2(paddusb
),
3011 [0xdd] = MMX_OP2(paddusw
),
3012 [0xde] = MMX_OP2(pmaxub
),
3013 [0xdf] = MMX_OP2(pandn
),
3014 [0xe0] = MMX_OP2(pavgb
),
3015 [0xe1] = MMX_OP2(psraw
),
3016 [0xe2] = MMX_OP2(psrad
),
3017 [0xe3] = MMX_OP2(pavgw
),
3018 [0xe4] = MMX_OP2(pmulhuw
),
3019 [0xe5] = MMX_OP2(pmulhw
),
3020 [0xe6] = { NULL
, gen_helper_cvttpd2dq
, gen_helper_cvtdq2pd
, gen_helper_cvtpd2dq
},
3021 [0xe7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movntq, movntq */
3022 [0xe8] = MMX_OP2(psubsb
),
3023 [0xe9] = MMX_OP2(psubsw
),
3024 [0xea] = MMX_OP2(pminsw
),
3025 [0xeb] = MMX_OP2(por
),
3026 [0xec] = MMX_OP2(paddsb
),
3027 [0xed] = MMX_OP2(paddsw
),
3028 [0xee] = MMX_OP2(pmaxsw
),
3029 [0xef] = MMX_OP2(pxor
),
3030 [0xf0] = { NULL
, NULL
, NULL
, SSE_SPECIAL
}, /* lddqu */
3031 [0xf1] = MMX_OP2(psllw
),
3032 [0xf2] = MMX_OP2(pslld
),
3033 [0xf3] = MMX_OP2(psllq
),
3034 [0xf4] = MMX_OP2(pmuludq
),
3035 [0xf5] = MMX_OP2(pmaddwd
),
3036 [0xf6] = MMX_OP2(psadbw
),
3037 [0xf7] = { (SSEFunc_0_epp
)gen_helper_maskmov_mmx
,
3038 (SSEFunc_0_epp
)gen_helper_maskmov_xmm
}, /* XXX: casts */
3039 [0xf8] = MMX_OP2(psubb
),
3040 [0xf9] = MMX_OP2(psubw
),
3041 [0xfa] = MMX_OP2(psubl
),
3042 [0xfb] = MMX_OP2(psubq
),
3043 [0xfc] = MMX_OP2(paddb
),
3044 [0xfd] = MMX_OP2(paddw
),
3045 [0xfe] = MMX_OP2(paddl
),
3048 static const SSEFunc_0_epp sse_op_table2
[3 * 8][2] = {
3049 [0 + 2] = MMX_OP2(psrlw
),
3050 [0 + 4] = MMX_OP2(psraw
),
3051 [0 + 6] = MMX_OP2(psllw
),
3052 [8 + 2] = MMX_OP2(psrld
),
3053 [8 + 4] = MMX_OP2(psrad
),
3054 [8 + 6] = MMX_OP2(pslld
),
3055 [16 + 2] = MMX_OP2(psrlq
),
3056 [16 + 3] = { NULL
, gen_helper_psrldq_xmm
},
3057 [16 + 6] = MMX_OP2(psllq
),
3058 [16 + 7] = { NULL
, gen_helper_pslldq_xmm
},
3061 static const SSEFunc_0_epi sse_op_table3ai
[] = {
3062 gen_helper_cvtsi2ss
,
3066 #ifdef TARGET_X86_64
3067 static const SSEFunc_0_epl sse_op_table3aq
[] = {
3068 gen_helper_cvtsq2ss
,
3073 static const SSEFunc_i_ep sse_op_table3bi
[] = {
3074 gen_helper_cvttss2si
,
3075 gen_helper_cvtss2si
,
3076 gen_helper_cvttsd2si
,
3080 #ifdef TARGET_X86_64
3081 static const SSEFunc_l_ep sse_op_table3bq
[] = {
3082 gen_helper_cvttss2sq
,
3083 gen_helper_cvtss2sq
,
3084 gen_helper_cvttsd2sq
,
3089 static const SSEFunc_0_epp sse_op_table4
[8][4] = {
3100 static const SSEFunc_0_epp sse_op_table5
[256] = {
3101 [0x0c] = gen_helper_pi2fw
,
3102 [0x0d] = gen_helper_pi2fd
,
3103 [0x1c] = gen_helper_pf2iw
,
3104 [0x1d] = gen_helper_pf2id
,
3105 [0x8a] = gen_helper_pfnacc
,
3106 [0x8e] = gen_helper_pfpnacc
,
3107 [0x90] = gen_helper_pfcmpge
,
3108 [0x94] = gen_helper_pfmin
,
3109 [0x96] = gen_helper_pfrcp
,
3110 [0x97] = gen_helper_pfrsqrt
,
3111 [0x9a] = gen_helper_pfsub
,
3112 [0x9e] = gen_helper_pfadd
,
3113 [0xa0] = gen_helper_pfcmpgt
,
3114 [0xa4] = gen_helper_pfmax
,
3115 [0xa6] = gen_helper_movq
, /* pfrcpit1; no need to actually increase precision */
3116 [0xa7] = gen_helper_movq
, /* pfrsqit1 */
3117 [0xaa] = gen_helper_pfsubr
,
3118 [0xae] = gen_helper_pfacc
,
3119 [0xb0] = gen_helper_pfcmpeq
,
3120 [0xb4] = gen_helper_pfmul
,
3121 [0xb6] = gen_helper_movq
, /* pfrcpit2 */
3122 [0xb7] = gen_helper_pmulhrw_mmx
,
3123 [0xbb] = gen_helper_pswapd
,
3124 [0xbf] = gen_helper_pavgb_mmx
/* pavgusb */
3127 struct SSEOpHelper_epp
{
3128 SSEFunc_0_epp op
[2];
3132 struct SSEOpHelper_eppi
{
3133 SSEFunc_0_eppi op
[2];
3137 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
3138 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3139 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
3140 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
3142 static const struct SSEOpHelper_epp sse_op_table6
[256] = {
3143 [0x00] = SSSE3_OP(pshufb
),
3144 [0x01] = SSSE3_OP(phaddw
),
3145 [0x02] = SSSE3_OP(phaddd
),
3146 [0x03] = SSSE3_OP(phaddsw
),
3147 [0x04] = SSSE3_OP(pmaddubsw
),
3148 [0x05] = SSSE3_OP(phsubw
),
3149 [0x06] = SSSE3_OP(phsubd
),
3150 [0x07] = SSSE3_OP(phsubsw
),
3151 [0x08] = SSSE3_OP(psignb
),
3152 [0x09] = SSSE3_OP(psignw
),
3153 [0x0a] = SSSE3_OP(psignd
),
3154 [0x0b] = SSSE3_OP(pmulhrsw
),
3155 [0x10] = SSE41_OP(pblendvb
),
3156 [0x14] = SSE41_OP(blendvps
),
3157 [0x15] = SSE41_OP(blendvpd
),
3158 [0x17] = SSE41_OP(ptest
),
3159 [0x1c] = SSSE3_OP(pabsb
),
3160 [0x1d] = SSSE3_OP(pabsw
),
3161 [0x1e] = SSSE3_OP(pabsd
),
3162 [0x20] = SSE41_OP(pmovsxbw
),
3163 [0x21] = SSE41_OP(pmovsxbd
),
3164 [0x22] = SSE41_OP(pmovsxbq
),
3165 [0x23] = SSE41_OP(pmovsxwd
),
3166 [0x24] = SSE41_OP(pmovsxwq
),
3167 [0x25] = SSE41_OP(pmovsxdq
),
3168 [0x28] = SSE41_OP(pmuldq
),
3169 [0x29] = SSE41_OP(pcmpeqq
),
3170 [0x2a] = SSE41_SPECIAL
, /* movntqda */
3171 [0x2b] = SSE41_OP(packusdw
),
3172 [0x30] = SSE41_OP(pmovzxbw
),
3173 [0x31] = SSE41_OP(pmovzxbd
),
3174 [0x32] = SSE41_OP(pmovzxbq
),
3175 [0x33] = SSE41_OP(pmovzxwd
),
3176 [0x34] = SSE41_OP(pmovzxwq
),
3177 [0x35] = SSE41_OP(pmovzxdq
),
3178 [0x37] = SSE42_OP(pcmpgtq
),
3179 [0x38] = SSE41_OP(pminsb
),
3180 [0x39] = SSE41_OP(pminsd
),
3181 [0x3a] = SSE41_OP(pminuw
),
3182 [0x3b] = SSE41_OP(pminud
),
3183 [0x3c] = SSE41_OP(pmaxsb
),
3184 [0x3d] = SSE41_OP(pmaxsd
),
3185 [0x3e] = SSE41_OP(pmaxuw
),
3186 [0x3f] = SSE41_OP(pmaxud
),
3187 [0x40] = SSE41_OP(pmulld
),
3188 [0x41] = SSE41_OP(phminposuw
),
3191 static const struct SSEOpHelper_eppi sse_op_table7
[256] = {
3192 [0x08] = SSE41_OP(roundps
),
3193 [0x09] = SSE41_OP(roundpd
),
3194 [0x0a] = SSE41_OP(roundss
),
3195 [0x0b] = SSE41_OP(roundsd
),
3196 [0x0c] = SSE41_OP(blendps
),
3197 [0x0d] = SSE41_OP(blendpd
),
3198 [0x0e] = SSE41_OP(pblendw
),
3199 [0x0f] = SSSE3_OP(palignr
),
3200 [0x14] = SSE41_SPECIAL
, /* pextrb */
3201 [0x15] = SSE41_SPECIAL
, /* pextrw */
3202 [0x16] = SSE41_SPECIAL
, /* pextrd/pextrq */
3203 [0x17] = SSE41_SPECIAL
, /* extractps */
3204 [0x20] = SSE41_SPECIAL
, /* pinsrb */
3205 [0x21] = SSE41_SPECIAL
, /* insertps */
3206 [0x22] = SSE41_SPECIAL
, /* pinsrd/pinsrq */
3207 [0x40] = SSE41_OP(dpps
),
3208 [0x41] = SSE41_OP(dppd
),
3209 [0x42] = SSE41_OP(mpsadbw
),
3210 [0x60] = SSE42_OP(pcmpestrm
),
3211 [0x61] = SSE42_OP(pcmpestri
),
3212 [0x62] = SSE42_OP(pcmpistrm
),
3213 [0x63] = SSE42_OP(pcmpistri
),
3216 static void gen_sse(CPUX86State
*env
, DisasContext
*s
, int b
,
3217 target_ulong pc_start
, int rex_r
)
3219 int b1
, op1_offset
, op2_offset
, is_xmm
, val
, ot
;
3220 int modrm
, mod
, rm
, reg
, reg_addr
, offset_addr
;
3221 SSEFunc_0_epp sse_fn_epp
;
3222 SSEFunc_0_eppi sse_fn_eppi
;
3223 SSEFunc_0_ppi sse_fn_ppi
;
3224 SSEFunc_0_eppt sse_fn_eppt
;
3227 if (s
->prefix
& PREFIX_DATA
)
3229 else if (s
->prefix
& PREFIX_REPZ
)
3231 else if (s
->prefix
& PREFIX_REPNZ
)
3235 sse_fn_epp
= sse_op_table1
[b
][b1
];
3239 if ((b
<= 0x5f && b
>= 0x10) || b
== 0xc6 || b
== 0xc2) {
3249 /* simple MMX/SSE operation */
3250 if (s
->flags
& HF_TS_MASK
) {
3251 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
3254 if (s
->flags
& HF_EM_MASK
) {
3256 gen_exception(s
, EXCP06_ILLOP
, pc_start
- s
->cs_base
);
3259 if (is_xmm
&& !(s
->flags
& HF_OSFXSR_MASK
))
3260 if ((b
!= 0x38 && b
!= 0x3a) || (s
->prefix
& PREFIX_DATA
))
3263 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
))
3266 gen_helper_emms(cpu_env
);
3271 gen_helper_emms(cpu_env
);
3274 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3275 the static cpu state) */
3277 gen_helper_enter_mmx(cpu_env
);
3280 modrm
= cpu_ldub_code(env
, s
->pc
++);
3281 reg
= ((modrm
>> 3) & 7);
3284 mod
= (modrm
>> 6) & 3;
3285 if (sse_fn_epp
== SSE_SPECIAL
) {
3288 case 0x0e7: /* movntq */
3291 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3292 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3294 case 0x1e7: /* movntdq */
3295 case 0x02b: /* movntps */
3296 case 0x12b: /* movntps */
3299 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3300 gen_sto_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3302 case 0x3f0: /* lddqu */
3305 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3306 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3308 case 0x22b: /* movntss */
3309 case 0x32b: /* movntsd */
3312 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3314 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,
3317 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3318 xmm_regs
[reg
].XMM_L(0)));
3319 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
3322 case 0x6e: /* movd mm, ea */
3323 #ifdef TARGET_X86_64
3324 if (s
->dflag
== 2) {
3325 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 0);
3326 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3330 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 0);
3331 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3332 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3333 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3334 gen_helper_movl_mm_T0_mmx(cpu_ptr0
, cpu_tmp2_i32
);
3337 case 0x16e: /* movd xmm, ea */
3338 #ifdef TARGET_X86_64
3339 if (s
->dflag
== 2) {
3340 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 0);
3341 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3342 offsetof(CPUX86State
,xmm_regs
[reg
]));
3343 gen_helper_movq_mm_T0_xmm(cpu_ptr0
, cpu_T
[0]);
3347 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 0);
3348 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3349 offsetof(CPUX86State
,xmm_regs
[reg
]));
3350 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3351 gen_helper_movl_mm_T0_xmm(cpu_ptr0
, cpu_tmp2_i32
);
3354 case 0x6f: /* movq mm, ea */
3356 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3357 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3360 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
3361 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3362 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
3363 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3366 case 0x010: /* movups */
3367 case 0x110: /* movupd */
3368 case 0x028: /* movaps */
3369 case 0x128: /* movapd */
3370 case 0x16f: /* movdqa xmm, ea */
3371 case 0x26f: /* movdqu xmm, ea */
3373 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3374 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3376 rm
= (modrm
& 7) | REX_B(s
);
3377 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[reg
]),
3378 offsetof(CPUX86State
,xmm_regs
[rm
]));
3381 case 0x210: /* movss xmm, ea */
3383 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3384 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
3385 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3387 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)));
3388 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3389 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3391 rm
= (modrm
& 7) | REX_B(s
);
3392 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3393 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)));
3396 case 0x310: /* movsd xmm, ea */
3398 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3399 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3401 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3402 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3404 rm
= (modrm
& 7) | REX_B(s
);
3405 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3406 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3409 case 0x012: /* movlps */
3410 case 0x112: /* movlpd */
3412 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3413 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3416 rm
= (modrm
& 7) | REX_B(s
);
3417 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3418 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(1)));
3421 case 0x212: /* movsldup */
3423 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3424 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3426 rm
= (modrm
& 7) | REX_B(s
);
3427 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3428 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)));
3429 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)),
3430 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(2)));
3432 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)),
3433 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3434 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)),
3435 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3437 case 0x312: /* movddup */
3439 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3440 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3442 rm
= (modrm
& 7) | REX_B(s
);
3443 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3444 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3446 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)),
3447 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3449 case 0x016: /* movhps */
3450 case 0x116: /* movhpd */
3452 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3453 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3456 rm
= (modrm
& 7) | REX_B(s
);
3457 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)),
3458 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3461 case 0x216: /* movshdup */
3463 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3464 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3466 rm
= (modrm
& 7) | REX_B(s
);
3467 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)),
3468 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(1)));
3469 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)),
3470 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(3)));
3472 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3473 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)));
3474 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)),
3475 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3480 int bit_index
, field_length
;
3482 if (b1
== 1 && reg
!= 0)
3484 field_length
= cpu_ldub_code(env
, s
->pc
++) & 0x3F;
3485 bit_index
= cpu_ldub_code(env
, s
->pc
++) & 0x3F;
3486 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3487 offsetof(CPUX86State
,xmm_regs
[reg
]));
3489 gen_helper_extrq_i(cpu_env
, cpu_ptr0
,
3490 tcg_const_i32(bit_index
),
3491 tcg_const_i32(field_length
));
3493 gen_helper_insertq_i(cpu_env
, cpu_ptr0
,
3494 tcg_const_i32(bit_index
),
3495 tcg_const_i32(field_length
));
3498 case 0x7e: /* movd ea, mm */
3499 #ifdef TARGET_X86_64
3500 if (s
->dflag
== 2) {
3501 tcg_gen_ld_i64(cpu_T
[0], cpu_env
,
3502 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3503 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 1);
3507 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
3508 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_L(0)));
3509 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 1);
3512 case 0x17e: /* movd ea, xmm */
3513 #ifdef TARGET_X86_64
3514 if (s
->dflag
== 2) {
3515 tcg_gen_ld_i64(cpu_T
[0], cpu_env
,
3516 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3517 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 1);
3521 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
3522 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3523 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 1);
3526 case 0x27e: /* movq xmm, ea */
3528 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3529 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3531 rm
= (modrm
& 7) | REX_B(s
);
3532 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3533 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3535 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3537 case 0x7f: /* movq ea, mm */
3539 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3540 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3543 gen_op_movq(offsetof(CPUX86State
,fpregs
[rm
].mmx
),
3544 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3547 case 0x011: /* movups */
3548 case 0x111: /* movupd */
3549 case 0x029: /* movaps */
3550 case 0x129: /* movapd */
3551 case 0x17f: /* movdqa ea, xmm */
3552 case 0x27f: /* movdqu ea, xmm */
3554 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3555 gen_sto_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3557 rm
= (modrm
& 7) | REX_B(s
);
3558 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[rm
]),
3559 offsetof(CPUX86State
,xmm_regs
[reg
]));
3562 case 0x211: /* movss ea, xmm */
3564 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3565 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3566 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
3568 rm
= (modrm
& 7) | REX_B(s
);
3569 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)),
3570 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3573 case 0x311: /* movsd ea, xmm */
3575 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3576 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3578 rm
= (modrm
& 7) | REX_B(s
);
3579 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)),
3580 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3583 case 0x013: /* movlps */
3584 case 0x113: /* movlpd */
3586 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3587 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3592 case 0x017: /* movhps */
3593 case 0x117: /* movhpd */
3595 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3596 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3601 case 0x71: /* shift mm, im */
3604 case 0x171: /* shift xmm, im */
3610 val
= cpu_ldub_code(env
, s
->pc
++);
3612 gen_op_movl_T0_im(val
);
3613 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
3615 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(1)));
3616 op1_offset
= offsetof(CPUX86State
,xmm_t0
);
3618 gen_op_movl_T0_im(val
);
3619 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(0)));
3621 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(1)));
3622 op1_offset
= offsetof(CPUX86State
,mmx_t0
);
3624 sse_fn_epp
= sse_op_table2
[((b
- 1) & 3) * 8 +
3625 (((modrm
>> 3)) & 7)][b1
];
3630 rm
= (modrm
& 7) | REX_B(s
);
3631 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3634 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3636 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3637 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op1_offset
);
3638 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3640 case 0x050: /* movmskps */
3641 rm
= (modrm
& 7) | REX_B(s
);
3642 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3643 offsetof(CPUX86State
,xmm_regs
[rm
]));
3644 gen_helper_movmskps(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3645 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3646 gen_op_mov_reg_T0(OT_LONG
, reg
);
3648 case 0x150: /* movmskpd */
3649 rm
= (modrm
& 7) | REX_B(s
);
3650 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3651 offsetof(CPUX86State
,xmm_regs
[rm
]));
3652 gen_helper_movmskpd(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3653 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3654 gen_op_mov_reg_T0(OT_LONG
, reg
);
3656 case 0x02a: /* cvtpi2ps */
3657 case 0x12a: /* cvtpi2pd */
3658 gen_helper_enter_mmx(cpu_env
);
3660 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3661 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3662 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
3665 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3667 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3668 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3669 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3672 gen_helper_cvtpi2ps(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3676 gen_helper_cvtpi2pd(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3680 case 0x22a: /* cvtsi2ss */
3681 case 0x32a: /* cvtsi2sd */
3682 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3683 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3684 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3685 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3686 if (ot
== OT_LONG
) {
3687 SSEFunc_0_epi sse_fn_epi
= sse_op_table3ai
[(b
>> 8) & 1];
3688 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3689 sse_fn_epi(cpu_env
, cpu_ptr0
, cpu_tmp2_i32
);
3691 #ifdef TARGET_X86_64
3692 SSEFunc_0_epl sse_fn_epl
= sse_op_table3aq
[(b
>> 8) & 1];
3693 sse_fn_epl(cpu_env
, cpu_ptr0
, cpu_T
[0]);
3699 case 0x02c: /* cvttps2pi */
3700 case 0x12c: /* cvttpd2pi */
3701 case 0x02d: /* cvtps2pi */
3702 case 0x12d: /* cvtpd2pi */
3703 gen_helper_enter_mmx(cpu_env
);
3705 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3706 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3707 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3709 rm
= (modrm
& 7) | REX_B(s
);
3710 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3712 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
);
3713 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3714 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3717 gen_helper_cvttps2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3720 gen_helper_cvttpd2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3723 gen_helper_cvtps2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3726 gen_helper_cvtpd2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3730 case 0x22c: /* cvttss2si */
3731 case 0x32c: /* cvttsd2si */
3732 case 0x22d: /* cvtss2si */
3733 case 0x32d: /* cvtsd2si */
3734 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3736 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3738 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_t0
.XMM_Q(0)));
3740 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
3741 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
3743 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3745 rm
= (modrm
& 7) | REX_B(s
);
3746 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3748 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3749 if (ot
== OT_LONG
) {
3750 SSEFunc_i_ep sse_fn_i_ep
=
3751 sse_op_table3bi
[((b
>> 7) & 2) | (b
& 1)];
3752 sse_fn_i_ep(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3753 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3755 #ifdef TARGET_X86_64
3756 SSEFunc_l_ep sse_fn_l_ep
=
3757 sse_op_table3bq
[((b
>> 7) & 2) | (b
& 1)];
3758 sse_fn_l_ep(cpu_T
[0], cpu_env
, cpu_ptr0
);
3763 gen_op_mov_reg_T0(ot
, reg
);
3765 case 0xc4: /* pinsrw */
3768 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
3769 val
= cpu_ldub_code(env
, s
->pc
++);
3772 tcg_gen_st16_tl(cpu_T
[0], cpu_env
,
3773 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_W(val
)));
3776 tcg_gen_st16_tl(cpu_T
[0], cpu_env
,
3777 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_W(val
)));
3780 case 0xc5: /* pextrw */
3784 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3785 val
= cpu_ldub_code(env
, s
->pc
++);
3788 rm
= (modrm
& 7) | REX_B(s
);
3789 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
,
3790 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_W(val
)));
3794 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
,
3795 offsetof(CPUX86State
,fpregs
[rm
].mmx
.MMX_W(val
)));
3797 reg
= ((modrm
>> 3) & 7) | rex_r
;
3798 gen_op_mov_reg_T0(ot
, reg
);
3800 case 0x1d6: /* movq ea, xmm */
3802 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3803 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3805 rm
= (modrm
& 7) | REX_B(s
);
3806 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)),
3807 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3808 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(1)));
3811 case 0x2d6: /* movq2dq */
3812 gen_helper_enter_mmx(cpu_env
);
3814 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3815 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3816 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3818 case 0x3d6: /* movdq2q */
3819 gen_helper_enter_mmx(cpu_env
);
3820 rm
= (modrm
& 7) | REX_B(s
);
3821 gen_op_movq(offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
),
3822 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3824 case 0xd7: /* pmovmskb */
3829 rm
= (modrm
& 7) | REX_B(s
);
3830 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[rm
]));
3831 gen_helper_pmovmskb_xmm(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3834 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3835 gen_helper_pmovmskb_mmx(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3837 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3838 reg
= ((modrm
>> 3) & 7) | rex_r
;
3839 gen_op_mov_reg_T0(OT_LONG
, reg
);
3845 if ((b
& 0xf0) == 0xf0) {
3848 modrm
= cpu_ldub_code(env
, s
->pc
++);
3850 reg
= ((modrm
>> 3) & 7) | rex_r
;
3851 mod
= (modrm
>> 6) & 3;
3856 sse_fn_epp
= sse_op_table6
[b
].op
[b1
];
3860 if (!(s
->cpuid_ext_features
& sse_op_table6
[b
].ext_mask
))
3864 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3866 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
3868 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3869 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3871 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3872 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3873 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3874 gen_ldq_env_A0(s
->mem_index
, op2_offset
+
3875 offsetof(XMMReg
, XMM_Q(0)));
3877 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3878 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3879 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
3880 (s
->mem_index
>> 2) - 1);
3881 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
3882 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, op2_offset
+
3883 offsetof(XMMReg
, XMM_L(0)));
3885 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3886 tcg_gen_qemu_ld16u(cpu_tmp0
, cpu_A0
,
3887 (s
->mem_index
>> 2) - 1);
3888 tcg_gen_st16_tl(cpu_tmp0
, cpu_env
, op2_offset
+
3889 offsetof(XMMReg
, XMM_W(0)));
3891 case 0x2a: /* movntqda */
3892 gen_ldo_env_A0(s
->mem_index
, op1_offset
);
3895 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3899 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
3901 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3903 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3904 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3905 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
3908 if (sse_fn_epp
== SSE_SPECIAL
) {
3912 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3913 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3914 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3917 set_cc_op(s
, CC_OP_EFLAGS
);
3924 /* Various integer extensions at 0f 38 f[0-f]. */
3925 b
= modrm
| (b1
<< 8);
3926 modrm
= cpu_ldub_code(env
, s
->pc
++);
3927 reg
= ((modrm
>> 3) & 7) | rex_r
;
3930 case 0x3f0: /* crc32 Gd,Eb */
3931 case 0x3f1: /* crc32 Gd,Ey */
3933 if (!(s
->cpuid_ext_features
& CPUID_EXT_SSE42
)) {
3936 if ((b
& 0xff) == 0xf0) {
3938 } else if (s
->dflag
!= 2) {
3939 ot
= (s
->prefix
& PREFIX_DATA
? OT_WORD
: OT_LONG
);
3944 gen_op_mov_TN_reg(OT_LONG
, 0, reg
);
3945 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3946 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3947 gen_helper_crc32(cpu_T
[0], cpu_tmp2_i32
,
3948 cpu_T
[0], tcg_const_i32(8 << ot
));
3950 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3951 gen_op_mov_reg_T0(ot
, reg
);
3954 case 0x1f0: /* crc32 or movbe */
3956 /* For these insns, the f3 prefix is supposed to have priority
3957 over the 66 prefix, but that's not what we implement above
3959 if (s
->prefix
& PREFIX_REPNZ
) {
3963 case 0x0f0: /* movbe Gy,My */
3964 case 0x0f1: /* movbe My,Gy */
3965 if (!(s
->cpuid_ext_features
& CPUID_EXT_MOVBE
)) {
3968 if (s
->dflag
!= 2) {
3969 ot
= (s
->prefix
& PREFIX_DATA
? OT_WORD
: OT_LONG
);
3974 /* Load the data incoming to the bswap. Note that the TCG
3975 implementation of bswap requires the input be zero
3976 extended. In the case of the loads, we simply know that
3977 gen_op_ld_v via gen_ldst_modrm does that already. */
3979 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3983 tcg_gen_ext16u_tl(cpu_T
[0], cpu_regs
[reg
]);
3986 tcg_gen_ext32u_tl(cpu_T
[0], cpu_regs
[reg
]);
3989 tcg_gen_mov_tl(cpu_T
[0], cpu_regs
[reg
]);
3996 tcg_gen_bswap16_tl(cpu_T
[0], cpu_T
[0]);
3999 tcg_gen_bswap32_tl(cpu_T
[0], cpu_T
[0]);
4001 #ifdef TARGET_X86_64
4003 tcg_gen_bswap64_tl(cpu_T
[0], cpu_T
[0]);
4009 gen_op_mov_reg_T0(ot
, reg
);
4011 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
4015 case 0x0f2: /* andn Gy, By, Ey */
4016 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
4017 || !(s
->prefix
& PREFIX_VEX
)
4021 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4022 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4023 tcg_gen_andc_tl(cpu_T
[0], cpu_regs
[s
->vex_v
], cpu_T
[0]);
4024 gen_op_mov_reg_T0(ot
, reg
);
4025 gen_op_update1_cc();
4026 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4037 modrm
= cpu_ldub_code(env
, s
->pc
++);
4039 reg
= ((modrm
>> 3) & 7) | rex_r
;
4040 mod
= (modrm
>> 6) & 3;
4045 sse_fn_eppi
= sse_op_table7
[b
].op
[b1
];
4049 if (!(s
->cpuid_ext_features
& sse_op_table7
[b
].ext_mask
))
4052 if (sse_fn_eppi
== SSE_SPECIAL
) {
4053 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
4054 rm
= (modrm
& 7) | REX_B(s
);
4056 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4057 reg
= ((modrm
>> 3) & 7) | rex_r
;
4058 val
= cpu_ldub_code(env
, s
->pc
++);
4060 case 0x14: /* pextrb */
4061 tcg_gen_ld8u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
4062 xmm_regs
[reg
].XMM_B(val
& 15)));
4064 gen_op_mov_reg_T0(ot
, rm
);
4066 tcg_gen_qemu_st8(cpu_T
[0], cpu_A0
,
4067 (s
->mem_index
>> 2) - 1);
4069 case 0x15: /* pextrw */
4070 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
4071 xmm_regs
[reg
].XMM_W(val
& 7)));
4073 gen_op_mov_reg_T0(ot
, rm
);
4075 tcg_gen_qemu_st16(cpu_T
[0], cpu_A0
,
4076 (s
->mem_index
>> 2) - 1);
4079 if (ot
== OT_LONG
) { /* pextrd */
4080 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
4081 offsetof(CPUX86State
,
4082 xmm_regs
[reg
].XMM_L(val
& 3)));
4083 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
4085 gen_op_mov_reg_v(ot
, rm
, cpu_T
[0]);
4087 tcg_gen_qemu_st32(cpu_T
[0], cpu_A0
,
4088 (s
->mem_index
>> 2) - 1);
4089 } else { /* pextrq */
4090 #ifdef TARGET_X86_64
4091 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
4092 offsetof(CPUX86State
,
4093 xmm_regs
[reg
].XMM_Q(val
& 1)));
4095 gen_op_mov_reg_v(ot
, rm
, cpu_tmp1_i64
);
4097 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
4098 (s
->mem_index
>> 2) - 1);
4104 case 0x17: /* extractps */
4105 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
4106 xmm_regs
[reg
].XMM_L(val
& 3)));
4108 gen_op_mov_reg_T0(ot
, rm
);
4110 tcg_gen_qemu_st32(cpu_T
[0], cpu_A0
,
4111 (s
->mem_index
>> 2) - 1);
4113 case 0x20: /* pinsrb */
4115 gen_op_mov_TN_reg(OT_LONG
, 0, rm
);
4117 tcg_gen_qemu_ld8u(cpu_tmp0
, cpu_A0
,
4118 (s
->mem_index
>> 2) - 1);
4119 tcg_gen_st8_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
,
4120 xmm_regs
[reg
].XMM_B(val
& 15)));
4122 case 0x21: /* insertps */
4124 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
4125 offsetof(CPUX86State
,xmm_regs
[rm
]
4126 .XMM_L((val
>> 6) & 3)));
4128 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
4129 (s
->mem_index
>> 2) - 1);
4130 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
4132 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
4133 offsetof(CPUX86State
,xmm_regs
[reg
]
4134 .XMM_L((val
>> 4) & 3)));
4136 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4137 cpu_env
, offsetof(CPUX86State
,
4138 xmm_regs
[reg
].XMM_L(0)));
4140 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4141 cpu_env
, offsetof(CPUX86State
,
4142 xmm_regs
[reg
].XMM_L(1)));
4144 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4145 cpu_env
, offsetof(CPUX86State
,
4146 xmm_regs
[reg
].XMM_L(2)));
4148 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4149 cpu_env
, offsetof(CPUX86State
,
4150 xmm_regs
[reg
].XMM_L(3)));
4153 if (ot
== OT_LONG
) { /* pinsrd */
4155 gen_op_mov_v_reg(ot
, cpu_tmp0
, rm
);
4157 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
4158 (s
->mem_index
>> 2) - 1);
4159 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
4160 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
4161 offsetof(CPUX86State
,
4162 xmm_regs
[reg
].XMM_L(val
& 3)));
4163 } else { /* pinsrq */
4164 #ifdef TARGET_X86_64
4166 gen_op_mov_v_reg(ot
, cpu_tmp1_i64
, rm
);
4168 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
4169 (s
->mem_index
>> 2) - 1);
4170 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
4171 offsetof(CPUX86State
,
4172 xmm_regs
[reg
].XMM_Q(val
& 1)));
4183 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4185 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
4187 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4188 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4189 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
4192 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4194 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4196 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4197 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4198 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
4201 val
= cpu_ldub_code(env
, s
->pc
++);
4203 if ((b
& 0xfc) == 0x60) { /* pcmpXstrX */
4204 set_cc_op(s
, CC_OP_EFLAGS
);
4207 /* The helper must use entire 64-bit gp registers */
4211 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4212 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4213 sse_fn_eppi(cpu_env
, cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4219 /* generic MMX or SSE operation */
4221 case 0x70: /* pshufx insn */
4222 case 0xc6: /* pshufx insn */
4223 case 0xc2: /* compare insns */
4230 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4232 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4233 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4234 if (b1
>= 2 && ((b
>= 0x50 && b
<= 0x5f && b
!= 0x5b) ||
4236 /* specific case for SSE single instructions */
4239 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
4240 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
4243 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_t0
.XMM_D(0)));
4246 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
4249 rm
= (modrm
& 7) | REX_B(s
);
4250 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
4253 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4255 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4256 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4257 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
4260 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4264 case 0x0f: /* 3DNow! data insns */
4265 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
))
4267 val
= cpu_ldub_code(env
, s
->pc
++);
4268 sse_fn_epp
= sse_op_table5
[val
];
4272 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4273 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4274 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4276 case 0x70: /* pshufx insn */
4277 case 0xc6: /* pshufx insn */
4278 val
= cpu_ldub_code(env
, s
->pc
++);
4279 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4280 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4281 /* XXX: introduce a new table? */
4282 sse_fn_ppi
= (SSEFunc_0_ppi
)sse_fn_epp
;
4283 sse_fn_ppi(cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4287 val
= cpu_ldub_code(env
, s
->pc
++);
4290 sse_fn_epp
= sse_op_table4
[val
][b1
];
4292 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4293 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4294 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4297 /* maskmov : we must prepare A0 */
4300 #ifdef TARGET_X86_64
4301 if (s
->aflag
== 2) {
4302 gen_op_movq_A0_reg(R_EDI
);
4306 gen_op_movl_A0_reg(R_EDI
);
4308 gen_op_andl_A0_ffff();
4310 gen_add_A0_ds_seg(s
);
4312 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4313 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4314 /* XXX: introduce a new table? */
4315 sse_fn_eppt
= (SSEFunc_0_eppt
)sse_fn_epp
;
4316 sse_fn_eppt(cpu_env
, cpu_ptr0
, cpu_ptr1
, cpu_A0
);
4319 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4320 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4321 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4324 if (b
== 0x2e || b
== 0x2f) {
4325 set_cc_op(s
, CC_OP_EFLAGS
);
4330 /* convert one instruction. s->is_jmp is set if the translation must
4331 be stopped. Return the next pc value */
4332 static target_ulong
disas_insn(CPUX86State
*env
, DisasContext
*s
,
4333 target_ulong pc_start
)
4335 int b
, prefixes
, aflag
, dflag
;
4337 int modrm
, reg
, rm
, mod
, reg_addr
, op
, opreg
, offset_addr
, val
;
4338 target_ulong next_eip
, tval
;
4341 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4342 tcg_gen_debug_insn_start(pc_start
);
4351 #ifdef TARGET_X86_64
4356 s
->rip_offset
= 0; /* for relative ip address */
4360 b
= cpu_ldub_code(env
, s
->pc
);
4362 /* Collect prefixes. */
4365 prefixes
|= PREFIX_REPZ
;
4368 prefixes
|= PREFIX_REPNZ
;
4371 prefixes
|= PREFIX_LOCK
;
4392 prefixes
|= PREFIX_DATA
;
4395 prefixes
|= PREFIX_ADR
;
4397 #ifdef TARGET_X86_64
4401 rex_w
= (b
>> 3) & 1;
4402 rex_r
= (b
& 0x4) << 1;
4403 s
->rex_x
= (b
& 0x2) << 2;
4404 REX_B(s
) = (b
& 0x1) << 3;
4405 x86_64_hregs
= 1; /* select uniform byte register addressing */
4410 case 0xc5: /* 2-byte VEX */
4411 case 0xc4: /* 3-byte VEX */
4412 /* VEX prefixes cannot be used except in 32-bit mode.
4413 Otherwise the instruction is LES or LDS. */
4414 if (s
->code32
&& !s
->vm86
) {
4415 static const int pp_prefix
[4] = {
4416 0, PREFIX_DATA
, PREFIX_REPZ
, PREFIX_REPNZ
4418 int vex3
, vex2
= cpu_ldub_code(env
, s
->pc
);
4420 if (!CODE64(s
) && (vex2
& 0xc0) != 0xc0) {
4421 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4422 otherwise the instruction is LES or LDS. */
4427 /* 4.1.1-4.1.3: No preceeding lock, 66, f2, f3, or rex prefixes. */
4428 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
4429 | PREFIX_LOCK
| PREFIX_DATA
)) {
4432 #ifdef TARGET_X86_64
4437 rex_r
= (~vex2
>> 4) & 8;
4440 b
= cpu_ldub_code(env
, s
->pc
++);
4442 #ifdef TARGET_X86_64
4443 s
->rex_x
= (~vex2
>> 3) & 8;
4444 s
->rex_b
= (~vex2
>> 2) & 8;
4446 vex3
= cpu_ldub_code(env
, s
->pc
++);
4447 rex_w
= (vex3
>> 7) & 1;
4448 switch (vex2
& 0x1f) {
4449 case 0x01: /* Implied 0f leading opcode bytes. */
4450 b
= cpu_ldub_code(env
, s
->pc
++) | 0x100;
4452 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4455 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4458 default: /* Reserved for future use. */
4462 s
->vex_v
= (~vex3
>> 3) & 0xf;
4463 s
->vex_l
= (vex3
>> 2) & 1;
4464 prefixes
|= pp_prefix
[vex3
& 3] | PREFIX_VEX
;
4469 /* Post-process prefixes. */
4470 if (prefixes
& PREFIX_DATA
) {
4473 if (prefixes
& PREFIX_ADR
) {
4476 #ifdef TARGET_X86_64
4479 /* 0x66 is ignored if rex.w is set */
4482 if (!(prefixes
& PREFIX_ADR
)) {
4488 s
->prefix
= prefixes
;
4492 /* lock generation */
4493 if (prefixes
& PREFIX_LOCK
)
4496 /* now check op code */
4500 /**************************/
4501 /* extended op code */
4502 b
= cpu_ldub_code(env
, s
->pc
++) | 0x100;
4505 /**************************/
4523 ot
= dflag
+ OT_WORD
;
4526 case 0: /* OP Ev, Gv */
4527 modrm
= cpu_ldub_code(env
, s
->pc
++);
4528 reg
= ((modrm
>> 3) & 7) | rex_r
;
4529 mod
= (modrm
>> 6) & 3;
4530 rm
= (modrm
& 7) | REX_B(s
);
4532 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4534 } else if (op
== OP_XORL
&& rm
== reg
) {
4536 /* xor reg, reg optimisation */
4538 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4539 gen_op_mov_reg_T0(ot
, reg
);
4540 gen_op_update1_cc();
4545 gen_op_mov_TN_reg(ot
, 1, reg
);
4546 gen_op(s
, op
, ot
, opreg
);
4548 case 1: /* OP Gv, Ev */
4549 modrm
= cpu_ldub_code(env
, s
->pc
++);
4550 mod
= (modrm
>> 6) & 3;
4551 reg
= ((modrm
>> 3) & 7) | rex_r
;
4552 rm
= (modrm
& 7) | REX_B(s
);
4554 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4555 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4556 } else if (op
== OP_XORL
&& rm
== reg
) {
4559 gen_op_mov_TN_reg(ot
, 1, rm
);
4561 gen_op(s
, op
, ot
, reg
);
4563 case 2: /* OP A, Iv */
4564 val
= insn_get(env
, s
, ot
);
4565 gen_op_movl_T1_im(val
);
4566 gen_op(s
, op
, ot
, OR_EAX
);
4575 case 0x80: /* GRP1 */
4584 ot
= dflag
+ OT_WORD
;
4586 modrm
= cpu_ldub_code(env
, s
->pc
++);
4587 mod
= (modrm
>> 6) & 3;
4588 rm
= (modrm
& 7) | REX_B(s
);
4589 op
= (modrm
>> 3) & 7;
4595 s
->rip_offset
= insn_const_size(ot
);
4596 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4607 val
= insn_get(env
, s
, ot
);
4610 val
= (int8_t)insn_get(env
, s
, OT_BYTE
);
4613 gen_op_movl_T1_im(val
);
4614 gen_op(s
, op
, ot
, opreg
);
4618 /**************************/
4619 /* inc, dec, and other misc arith */
4620 case 0x40 ... 0x47: /* inc Gv */
4621 ot
= dflag
? OT_LONG
: OT_WORD
;
4622 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), 1);
4624 case 0x48 ... 0x4f: /* dec Gv */
4625 ot
= dflag
? OT_LONG
: OT_WORD
;
4626 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), -1);
4628 case 0xf6: /* GRP3 */
4633 ot
= dflag
+ OT_WORD
;
4635 modrm
= cpu_ldub_code(env
, s
->pc
++);
4636 mod
= (modrm
>> 6) & 3;
4637 rm
= (modrm
& 7) | REX_B(s
);
4638 op
= (modrm
>> 3) & 7;
4641 s
->rip_offset
= insn_const_size(ot
);
4642 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4643 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
4645 gen_op_mov_TN_reg(ot
, 0, rm
);
4650 val
= insn_get(env
, s
, ot
);
4651 gen_op_movl_T1_im(val
);
4652 gen_op_testl_T0_T1_cc();
4653 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4656 tcg_gen_not_tl(cpu_T
[0], cpu_T
[0]);
4658 gen_op_st_T0_A0(ot
+ s
->mem_index
);
4660 gen_op_mov_reg_T0(ot
, rm
);
4664 tcg_gen_neg_tl(cpu_T
[0], cpu_T
[0]);
4666 gen_op_st_T0_A0(ot
+ s
->mem_index
);
4668 gen_op_mov_reg_T0(ot
, rm
);
4670 gen_op_update_neg_cc();
4671 set_cc_op(s
, CC_OP_SUBB
+ ot
);
4676 gen_op_mov_TN_reg(OT_BYTE
, 1, R_EAX
);
4677 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
4678 tcg_gen_ext8u_tl(cpu_T
[1], cpu_T
[1]);
4679 /* XXX: use 32 bit mul which could be faster */
4680 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4681 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4682 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4683 tcg_gen_andi_tl(cpu_cc_src
, cpu_T
[0], 0xff00);
4684 set_cc_op(s
, CC_OP_MULB
);
4687 gen_op_mov_TN_reg(OT_WORD
, 1, R_EAX
);
4688 tcg_gen_ext16u_tl(cpu_T
[0], cpu_T
[0]);
4689 tcg_gen_ext16u_tl(cpu_T
[1], cpu_T
[1]);
4690 /* XXX: use 32 bit mul which could be faster */
4691 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4692 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4693 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4694 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 16);
4695 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
4696 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4697 set_cc_op(s
, CC_OP_MULW
);
4701 #ifdef TARGET_X86_64
4702 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4703 tcg_gen_ext32u_tl(cpu_T
[0], cpu_T
[0]);
4704 tcg_gen_ext32u_tl(cpu_T
[1], cpu_T
[1]);
4705 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4706 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4707 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4708 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 32);
4709 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4710 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4714 t0
= tcg_temp_new_i64();
4715 t1
= tcg_temp_new_i64();
4716 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4717 tcg_gen_extu_i32_i64(t0
, cpu_T
[0]);
4718 tcg_gen_extu_i32_i64(t1
, cpu_T
[1]);
4719 tcg_gen_mul_i64(t0
, t0
, t1
);
4720 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4721 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4722 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4723 tcg_gen_shri_i64(t0
, t0
, 32);
4724 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4725 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4726 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4729 set_cc_op(s
, CC_OP_MULL
);
4731 #ifdef TARGET_X86_64
4733 gen_helper_mulq_EAX_T0(cpu_env
, cpu_T
[0]);
4734 set_cc_op(s
, CC_OP_MULQ
);
4742 gen_op_mov_TN_reg(OT_BYTE
, 1, R_EAX
);
4743 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
4744 tcg_gen_ext8s_tl(cpu_T
[1], cpu_T
[1]);
4745 /* XXX: use 32 bit mul which could be faster */
4746 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4747 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4748 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4749 tcg_gen_ext8s_tl(cpu_tmp0
, cpu_T
[0]);
4750 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4751 set_cc_op(s
, CC_OP_MULB
);
4754 gen_op_mov_TN_reg(OT_WORD
, 1, R_EAX
);
4755 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
4756 tcg_gen_ext16s_tl(cpu_T
[1], cpu_T
[1]);
4757 /* XXX: use 32 bit mul which could be faster */
4758 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4759 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
4760 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4761 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T
[0]);
4762 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4763 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 16);
4764 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
4765 set_cc_op(s
, CC_OP_MULW
);
4769 #ifdef TARGET_X86_64
4770 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4771 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4772 tcg_gen_ext32s_tl(cpu_T
[1], cpu_T
[1]);
4773 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4774 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4775 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4776 tcg_gen_ext32s_tl(cpu_tmp0
, cpu_T
[0]);
4777 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4778 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 32);
4779 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4783 t0
= tcg_temp_new_i64();
4784 t1
= tcg_temp_new_i64();
4785 gen_op_mov_TN_reg(OT_LONG
, 1, R_EAX
);
4786 tcg_gen_ext_i32_i64(t0
, cpu_T
[0]);
4787 tcg_gen_ext_i32_i64(t1
, cpu_T
[1]);
4788 tcg_gen_mul_i64(t0
, t0
, t1
);
4789 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4790 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
4791 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4792 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[0], 31);
4793 tcg_gen_shri_i64(t0
, t0
, 32);
4794 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
4795 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
4796 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
4799 set_cc_op(s
, CC_OP_MULL
);
4801 #ifdef TARGET_X86_64
4803 gen_helper_imulq_EAX_T0(cpu_env
, cpu_T
[0]);
4804 set_cc_op(s
, CC_OP_MULQ
);
4812 gen_jmp_im(pc_start
- s
->cs_base
);
4813 gen_helper_divb_AL(cpu_env
, cpu_T
[0]);
4816 gen_jmp_im(pc_start
- s
->cs_base
);
4817 gen_helper_divw_AX(cpu_env
, cpu_T
[0]);
4821 gen_jmp_im(pc_start
- s
->cs_base
);
4822 gen_helper_divl_EAX(cpu_env
, cpu_T
[0]);
4824 #ifdef TARGET_X86_64
4826 gen_jmp_im(pc_start
- s
->cs_base
);
4827 gen_helper_divq_EAX(cpu_env
, cpu_T
[0]);
4835 gen_jmp_im(pc_start
- s
->cs_base
);
4836 gen_helper_idivb_AL(cpu_env
, cpu_T
[0]);
4839 gen_jmp_im(pc_start
- s
->cs_base
);
4840 gen_helper_idivw_AX(cpu_env
, cpu_T
[0]);
4844 gen_jmp_im(pc_start
- s
->cs_base
);
4845 gen_helper_idivl_EAX(cpu_env
, cpu_T
[0]);
4847 #ifdef TARGET_X86_64
4849 gen_jmp_im(pc_start
- s
->cs_base
);
4850 gen_helper_idivq_EAX(cpu_env
, cpu_T
[0]);
4860 case 0xfe: /* GRP4 */
4861 case 0xff: /* GRP5 */
4865 ot
= dflag
+ OT_WORD
;
4867 modrm
= cpu_ldub_code(env
, s
->pc
++);
4868 mod
= (modrm
>> 6) & 3;
4869 rm
= (modrm
& 7) | REX_B(s
);
4870 op
= (modrm
>> 3) & 7;
4871 if (op
>= 2 && b
== 0xfe) {
4875 if (op
== 2 || op
== 4) {
4876 /* operand size for jumps is 64 bit */
4878 } else if (op
== 3 || op
== 5) {
4879 ot
= dflag
? OT_LONG
+ (rex_w
== 1) : OT_WORD
;
4880 } else if (op
== 6) {
4881 /* default push size is 64 bit */
4882 ot
= dflag
? OT_QUAD
: OT_WORD
;
4886 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4887 if (op
>= 2 && op
!= 3 && op
!= 5)
4888 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
4890 gen_op_mov_TN_reg(ot
, 0, rm
);
4894 case 0: /* inc Ev */
4899 gen_inc(s
, ot
, opreg
, 1);
4901 case 1: /* dec Ev */
4906 gen_inc(s
, ot
, opreg
, -1);
4908 case 2: /* call Ev */
4909 /* XXX: optimize if memory (no 'and' is necessary) */
4911 gen_op_andl_T0_ffff();
4912 next_eip
= s
->pc
- s
->cs_base
;
4913 gen_movtl_T1_im(next_eip
);
4918 case 3: /* lcall Ev */
4919 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4920 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
4921 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
4923 if (s
->pe
&& !s
->vm86
) {
4924 gen_update_cc_op(s
);
4925 gen_jmp_im(pc_start
- s
->cs_base
);
4926 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4927 gen_helper_lcall_protected(cpu_env
, cpu_tmp2_i32
, cpu_T
[1],
4928 tcg_const_i32(dflag
),
4929 tcg_const_i32(s
->pc
- pc_start
));
4931 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4932 gen_helper_lcall_real(cpu_env
, cpu_tmp2_i32
, cpu_T
[1],
4933 tcg_const_i32(dflag
),
4934 tcg_const_i32(s
->pc
- s
->cs_base
));
4938 case 4: /* jmp Ev */
4940 gen_op_andl_T0_ffff();
4944 case 5: /* ljmp Ev */
4945 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4946 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
4947 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
4949 if (s
->pe
&& !s
->vm86
) {
4950 gen_update_cc_op(s
);
4951 gen_jmp_im(pc_start
- s
->cs_base
);
4952 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4953 gen_helper_ljmp_protected(cpu_env
, cpu_tmp2_i32
, cpu_T
[1],
4954 tcg_const_i32(s
->pc
- pc_start
));
4956 gen_op_movl_seg_T0_vm(R_CS
);
4957 gen_op_movl_T0_T1();
4962 case 6: /* push Ev */
4970 case 0x84: /* test Ev, Gv */
4975 ot
= dflag
+ OT_WORD
;
4977 modrm
= cpu_ldub_code(env
, s
->pc
++);
4978 reg
= ((modrm
>> 3) & 7) | rex_r
;
4980 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4981 gen_op_mov_TN_reg(ot
, 1, reg
);
4982 gen_op_testl_T0_T1_cc();
4983 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4986 case 0xa8: /* test eAX, Iv */
4991 ot
= dflag
+ OT_WORD
;
4992 val
= insn_get(env
, s
, ot
);
4994 gen_op_mov_TN_reg(ot
, 0, OR_EAX
);
4995 gen_op_movl_T1_im(val
);
4996 gen_op_testl_T0_T1_cc();
4997 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
5000 case 0x98: /* CWDE/CBW */
5001 #ifdef TARGET_X86_64
5003 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
5004 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
5005 gen_op_mov_reg_T0(OT_QUAD
, R_EAX
);
5009 gen_op_mov_TN_reg(OT_WORD
, 0, R_EAX
);
5010 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5011 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
5013 gen_op_mov_TN_reg(OT_BYTE
, 0, R_EAX
);
5014 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
5015 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
5018 case 0x99: /* CDQ/CWD */
5019 #ifdef TARGET_X86_64
5021 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EAX
);
5022 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 63);
5023 gen_op_mov_reg_T0(OT_QUAD
, R_EDX
);
5027 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
5028 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
5029 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 31);
5030 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
5032 gen_op_mov_TN_reg(OT_WORD
, 0, R_EAX
);
5033 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5034 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 15);
5035 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
5038 case 0x1af: /* imul Gv, Ev */
5039 case 0x69: /* imul Gv, Ev, I */
5041 ot
= dflag
+ OT_WORD
;
5042 modrm
= cpu_ldub_code(env
, s
->pc
++);
5043 reg
= ((modrm
>> 3) & 7) | rex_r
;
5045 s
->rip_offset
= insn_const_size(ot
);
5048 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5050 val
= insn_get(env
, s
, ot
);
5051 gen_op_movl_T1_im(val
);
5052 } else if (b
== 0x6b) {
5053 val
= (int8_t)insn_get(env
, s
, OT_BYTE
);
5054 gen_op_movl_T1_im(val
);
5056 gen_op_mov_TN_reg(ot
, 1, reg
);
5059 #ifdef TARGET_X86_64
5060 if (ot
== OT_QUAD
) {
5061 gen_helper_imulq_T0_T1(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
5064 if (ot
== OT_LONG
) {
5065 #ifdef TARGET_X86_64
5066 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
5067 tcg_gen_ext32s_tl(cpu_T
[1], cpu_T
[1]);
5068 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
5069 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5070 tcg_gen_ext32s_tl(cpu_tmp0
, cpu_T
[0]);
5071 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
5075 t0
= tcg_temp_new_i64();
5076 t1
= tcg_temp_new_i64();
5077 tcg_gen_ext_i32_i64(t0
, cpu_T
[0]);
5078 tcg_gen_ext_i32_i64(t1
, cpu_T
[1]);
5079 tcg_gen_mul_i64(t0
, t0
, t1
);
5080 tcg_gen_trunc_i64_i32(cpu_T
[0], t0
);
5081 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5082 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[0], 31);
5083 tcg_gen_shri_i64(t0
, t0
, 32);
5084 tcg_gen_trunc_i64_i32(cpu_T
[1], t0
);
5085 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[1], cpu_tmp0
);
5089 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5090 tcg_gen_ext16s_tl(cpu_T
[1], cpu_T
[1]);
5091 /* XXX: use 32 bit mul which could be faster */
5092 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
5093 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5094 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T
[0]);
5095 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
5097 gen_op_mov_reg_T0(ot
, reg
);
5098 set_cc_op(s
, CC_OP_MULB
+ ot
);
5101 case 0x1c1: /* xadd Ev, Gv */
5105 ot
= dflag
+ OT_WORD
;
5106 modrm
= cpu_ldub_code(env
, s
->pc
++);
5107 reg
= ((modrm
>> 3) & 7) | rex_r
;
5108 mod
= (modrm
>> 6) & 3;
5110 rm
= (modrm
& 7) | REX_B(s
);
5111 gen_op_mov_TN_reg(ot
, 0, reg
);
5112 gen_op_mov_TN_reg(ot
, 1, rm
);
5113 gen_op_addl_T0_T1();
5114 gen_op_mov_reg_T1(ot
, reg
);
5115 gen_op_mov_reg_T0(ot
, rm
);
5117 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5118 gen_op_mov_TN_reg(ot
, 0, reg
);
5119 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5120 gen_op_addl_T0_T1();
5121 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5122 gen_op_mov_reg_T1(ot
, reg
);
5124 gen_op_update2_cc();
5125 set_cc_op(s
, CC_OP_ADDB
+ ot
);
5128 case 0x1b1: /* cmpxchg Ev, Gv */
5131 TCGv t0
, t1
, t2
, a0
;
5136 ot
= dflag
+ OT_WORD
;
5137 modrm
= cpu_ldub_code(env
, s
->pc
++);
5138 reg
= ((modrm
>> 3) & 7) | rex_r
;
5139 mod
= (modrm
>> 6) & 3;
5140 t0
= tcg_temp_local_new();
5141 t1
= tcg_temp_local_new();
5142 t2
= tcg_temp_local_new();
5143 a0
= tcg_temp_local_new();
5144 gen_op_mov_v_reg(ot
, t1
, reg
);
5146 rm
= (modrm
& 7) | REX_B(s
);
5147 gen_op_mov_v_reg(ot
, t0
, rm
);
5149 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5150 tcg_gen_mov_tl(a0
, cpu_A0
);
5151 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
5152 rm
= 0; /* avoid warning */
5154 label1
= gen_new_label();
5155 tcg_gen_mov_tl(t2
, cpu_regs
[R_EAX
]);
5158 tcg_gen_brcond_tl(TCG_COND_EQ
, t2
, t0
, label1
);
5159 label2
= gen_new_label();
5161 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
5163 gen_set_label(label1
);
5164 gen_op_mov_reg_v(ot
, rm
, t1
);
5166 /* perform no-op store cycle like physical cpu; must be
5167 before changing accumulator to ensure idempotency if
5168 the store faults and the instruction is restarted */
5169 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
5170 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
5172 gen_set_label(label1
);
5173 gen_op_st_v(ot
+ s
->mem_index
, t1
, a0
);
5175 gen_set_label(label2
);
5176 tcg_gen_mov_tl(cpu_cc_src
, t0
);
5177 tcg_gen_mov_tl(cpu_cc_srcT
, t2
);
5178 tcg_gen_sub_tl(cpu_cc_dst
, t2
, t0
);
5179 set_cc_op(s
, CC_OP_SUBB
+ ot
);
5186 case 0x1c7: /* cmpxchg8b */
5187 modrm
= cpu_ldub_code(env
, s
->pc
++);
5188 mod
= (modrm
>> 6) & 3;
5189 if ((mod
== 3) || ((modrm
& 0x38) != 0x8))
5191 #ifdef TARGET_X86_64
5193 if (!(s
->cpuid_ext_features
& CPUID_EXT_CX16
))
5195 gen_jmp_im(pc_start
- s
->cs_base
);
5196 gen_update_cc_op(s
);
5197 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5198 gen_helper_cmpxchg16b(cpu_env
, cpu_A0
);
5202 if (!(s
->cpuid_features
& CPUID_CX8
))
5204 gen_jmp_im(pc_start
- s
->cs_base
);
5205 gen_update_cc_op(s
);
5206 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5207 gen_helper_cmpxchg8b(cpu_env
, cpu_A0
);
5209 set_cc_op(s
, CC_OP_EFLAGS
);
5212 /**************************/
5214 case 0x50 ... 0x57: /* push */
5215 gen_op_mov_TN_reg(OT_LONG
, 0, (b
& 7) | REX_B(s
));
5218 case 0x58 ... 0x5f: /* pop */
5220 ot
= dflag
? OT_QUAD
: OT_WORD
;
5222 ot
= dflag
+ OT_WORD
;
5225 /* NOTE: order is important for pop %sp */
5227 gen_op_mov_reg_T0(ot
, (b
& 7) | REX_B(s
));
5229 case 0x60: /* pusha */
5234 case 0x61: /* popa */
5239 case 0x68: /* push Iv */
5242 ot
= dflag
? OT_QUAD
: OT_WORD
;
5244 ot
= dflag
+ OT_WORD
;
5247 val
= insn_get(env
, s
, ot
);
5249 val
= (int8_t)insn_get(env
, s
, OT_BYTE
);
5250 gen_op_movl_T0_im(val
);
5253 case 0x8f: /* pop Ev */
5255 ot
= dflag
? OT_QUAD
: OT_WORD
;
5257 ot
= dflag
+ OT_WORD
;
5259 modrm
= cpu_ldub_code(env
, s
->pc
++);
5260 mod
= (modrm
>> 6) & 3;
5263 /* NOTE: order is important for pop %sp */
5265 rm
= (modrm
& 7) | REX_B(s
);
5266 gen_op_mov_reg_T0(ot
, rm
);
5268 /* NOTE: order is important too for MMU exceptions */
5269 s
->popl_esp_hack
= 1 << ot
;
5270 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5271 s
->popl_esp_hack
= 0;
5275 case 0xc8: /* enter */
5278 val
= cpu_lduw_code(env
, s
->pc
);
5280 level
= cpu_ldub_code(env
, s
->pc
++);
5281 gen_enter(s
, val
, level
);
5284 case 0xc9: /* leave */
5285 /* XXX: exception not precise (ESP is updated before potential exception) */
5287 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EBP
);
5288 gen_op_mov_reg_T0(OT_QUAD
, R_ESP
);
5289 } else if (s
->ss32
) {
5290 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
5291 gen_op_mov_reg_T0(OT_LONG
, R_ESP
);
5293 gen_op_mov_TN_reg(OT_WORD
, 0, R_EBP
);
5294 gen_op_mov_reg_T0(OT_WORD
, R_ESP
);
5298 ot
= dflag
? OT_QUAD
: OT_WORD
;
5300 ot
= dflag
+ OT_WORD
;
5302 gen_op_mov_reg_T0(ot
, R_EBP
);
5305 case 0x06: /* push es */
5306 case 0x0e: /* push cs */
5307 case 0x16: /* push ss */
5308 case 0x1e: /* push ds */
5311 gen_op_movl_T0_seg(b
>> 3);
5314 case 0x1a0: /* push fs */
5315 case 0x1a8: /* push gs */
5316 gen_op_movl_T0_seg((b
>> 3) & 7);
5319 case 0x07: /* pop es */
5320 case 0x17: /* pop ss */
5321 case 0x1f: /* pop ds */
5326 gen_movl_seg_T0(s
, reg
, pc_start
- s
->cs_base
);
5329 /* if reg == SS, inhibit interrupts/trace. */
5330 /* If several instructions disable interrupts, only the
5332 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
5333 gen_helper_set_inhibit_irq(cpu_env
);
5337 gen_jmp_im(s
->pc
- s
->cs_base
);
5341 case 0x1a1: /* pop fs */
5342 case 0x1a9: /* pop gs */
5344 gen_movl_seg_T0(s
, (b
>> 3) & 7, pc_start
- s
->cs_base
);
5347 gen_jmp_im(s
->pc
- s
->cs_base
);
5352 /**************************/
5355 case 0x89: /* mov Gv, Ev */
5359 ot
= dflag
+ OT_WORD
;
5360 modrm
= cpu_ldub_code(env
, s
->pc
++);
5361 reg
= ((modrm
>> 3) & 7) | rex_r
;
5363 /* generate a generic store */
5364 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
5367 case 0xc7: /* mov Ev, Iv */
5371 ot
= dflag
+ OT_WORD
;
5372 modrm
= cpu_ldub_code(env
, s
->pc
++);
5373 mod
= (modrm
>> 6) & 3;
5375 s
->rip_offset
= insn_const_size(ot
);
5376 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5378 val
= insn_get(env
, s
, ot
);
5379 gen_op_movl_T0_im(val
);
5381 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5383 gen_op_mov_reg_T0(ot
, (modrm
& 7) | REX_B(s
));
5386 case 0x8b: /* mov Ev, Gv */
5390 ot
= OT_WORD
+ dflag
;
5391 modrm
= cpu_ldub_code(env
, s
->pc
++);
5392 reg
= ((modrm
>> 3) & 7) | rex_r
;
5394 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5395 gen_op_mov_reg_T0(ot
, reg
);
5397 case 0x8e: /* mov seg, Gv */
5398 modrm
= cpu_ldub_code(env
, s
->pc
++);
5399 reg
= (modrm
>> 3) & 7;
5400 if (reg
>= 6 || reg
== R_CS
)
5402 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
5403 gen_movl_seg_T0(s
, reg
, pc_start
- s
->cs_base
);
5405 /* if reg == SS, inhibit interrupts/trace */
5406 /* If several instructions disable interrupts, only the
5408 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
5409 gen_helper_set_inhibit_irq(cpu_env
);
5413 gen_jmp_im(s
->pc
- s
->cs_base
);
5417 case 0x8c: /* mov Gv, seg */
5418 modrm
= cpu_ldub_code(env
, s
->pc
++);
5419 reg
= (modrm
>> 3) & 7;
5420 mod
= (modrm
>> 6) & 3;
5423 gen_op_movl_T0_seg(reg
);
5425 ot
= OT_WORD
+ dflag
;
5428 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5431 case 0x1b6: /* movzbS Gv, Eb */
5432 case 0x1b7: /* movzwS Gv, Eb */
5433 case 0x1be: /* movsbS Gv, Eb */
5434 case 0x1bf: /* movswS Gv, Eb */
5437 /* d_ot is the size of destination */
5438 d_ot
= dflag
+ OT_WORD
;
5439 /* ot is the size of source */
5440 ot
= (b
& 1) + OT_BYTE
;
5441 modrm
= cpu_ldub_code(env
, s
->pc
++);
5442 reg
= ((modrm
>> 3) & 7) | rex_r
;
5443 mod
= (modrm
>> 6) & 3;
5444 rm
= (modrm
& 7) | REX_B(s
);
5447 gen_op_mov_TN_reg(ot
, 0, rm
);
5448 switch(ot
| (b
& 8)) {
5450 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
5453 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
5456 tcg_gen_ext16u_tl(cpu_T
[0], cpu_T
[0]);
5460 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5463 gen_op_mov_reg_T0(d_ot
, reg
);
5465 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5467 gen_op_lds_T0_A0(ot
+ s
->mem_index
);
5469 gen_op_ldu_T0_A0(ot
+ s
->mem_index
);
5471 gen_op_mov_reg_T0(d_ot
, reg
);
5476 case 0x8d: /* lea */
5477 ot
= dflag
+ OT_WORD
;
5478 modrm
= cpu_ldub_code(env
, s
->pc
++);
5479 mod
= (modrm
>> 6) & 3;
5482 reg
= ((modrm
>> 3) & 7) | rex_r
;
5483 /* we must ensure that no segment is added */
5487 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5489 gen_op_mov_reg_A0(ot
- OT_WORD
, reg
);
5492 case 0xa0: /* mov EAX, Ov */
5494 case 0xa2: /* mov Ov, EAX */
5497 target_ulong offset_addr
;
5502 ot
= dflag
+ OT_WORD
;
5503 #ifdef TARGET_X86_64
5504 if (s
->aflag
== 2) {
5505 offset_addr
= cpu_ldq_code(env
, s
->pc
);
5507 gen_op_movq_A0_im(offset_addr
);
5512 offset_addr
= insn_get(env
, s
, OT_LONG
);
5514 offset_addr
= insn_get(env
, s
, OT_WORD
);
5516 gen_op_movl_A0_im(offset_addr
);
5518 gen_add_A0_ds_seg(s
);
5520 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
5521 gen_op_mov_reg_T0(ot
, R_EAX
);
5523 gen_op_mov_TN_reg(ot
, 0, R_EAX
);
5524 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5528 case 0xd7: /* xlat */
5529 #ifdef TARGET_X86_64
5530 if (s
->aflag
== 2) {
5531 gen_op_movq_A0_reg(R_EBX
);
5532 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EAX
);
5533 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xff);
5534 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T
[0]);
5538 gen_op_movl_A0_reg(R_EBX
);
5539 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
5540 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xff);
5541 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T
[0]);
5543 gen_op_andl_A0_ffff();
5545 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
5547 gen_add_A0_ds_seg(s
);
5548 gen_op_ldu_T0_A0(OT_BYTE
+ s
->mem_index
);
5549 gen_op_mov_reg_T0(OT_BYTE
, R_EAX
);
5551 case 0xb0 ... 0xb7: /* mov R, Ib */
5552 val
= insn_get(env
, s
, OT_BYTE
);
5553 gen_op_movl_T0_im(val
);
5554 gen_op_mov_reg_T0(OT_BYTE
, (b
& 7) | REX_B(s
));
5556 case 0xb8 ... 0xbf: /* mov R, Iv */
5557 #ifdef TARGET_X86_64
5561 tmp
= cpu_ldq_code(env
, s
->pc
);
5563 reg
= (b
& 7) | REX_B(s
);
5564 gen_movtl_T0_im(tmp
);
5565 gen_op_mov_reg_T0(OT_QUAD
, reg
);
5569 ot
= dflag
? OT_LONG
: OT_WORD
;
5570 val
= insn_get(env
, s
, ot
);
5571 reg
= (b
& 7) | REX_B(s
);
5572 gen_op_movl_T0_im(val
);
5573 gen_op_mov_reg_T0(ot
, reg
);
5577 case 0x91 ... 0x97: /* xchg R, EAX */
5579 ot
= dflag
+ OT_WORD
;
5580 reg
= (b
& 7) | REX_B(s
);
5584 case 0x87: /* xchg Ev, Gv */
5588 ot
= dflag
+ OT_WORD
;
5589 modrm
= cpu_ldub_code(env
, s
->pc
++);
5590 reg
= ((modrm
>> 3) & 7) | rex_r
;
5591 mod
= (modrm
>> 6) & 3;
5593 rm
= (modrm
& 7) | REX_B(s
);
5595 gen_op_mov_TN_reg(ot
, 0, reg
);
5596 gen_op_mov_TN_reg(ot
, 1, rm
);
5597 gen_op_mov_reg_T0(ot
, rm
);
5598 gen_op_mov_reg_T1(ot
, reg
);
5600 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5601 gen_op_mov_TN_reg(ot
, 0, reg
);
5602 /* for xchg, lock is implicit */
5603 if (!(prefixes
& PREFIX_LOCK
))
5605 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5606 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5607 if (!(prefixes
& PREFIX_LOCK
))
5608 gen_helper_unlock();
5609 gen_op_mov_reg_T1(ot
, reg
);
5612 case 0xc4: /* les Gv */
5613 /* In CODE64 this is VEX3; see above. */
5616 case 0xc5: /* lds Gv */
5617 /* In CODE64 this is VEX2; see above. */
5620 case 0x1b2: /* lss Gv */
5623 case 0x1b4: /* lfs Gv */
5626 case 0x1b5: /* lgs Gv */
5629 ot
= dflag
? OT_LONG
: OT_WORD
;
5630 modrm
= cpu_ldub_code(env
, s
->pc
++);
5631 reg
= ((modrm
>> 3) & 7) | rex_r
;
5632 mod
= (modrm
>> 6) & 3;
5635 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5636 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5637 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
5638 /* load the segment first to handle exceptions properly */
5639 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
5640 gen_movl_seg_T0(s
, op
, pc_start
- s
->cs_base
);
5641 /* then put the data */
5642 gen_op_mov_reg_T1(ot
, reg
);
5644 gen_jmp_im(s
->pc
- s
->cs_base
);
5649 /************************/
5660 ot
= dflag
+ OT_WORD
;
5662 modrm
= cpu_ldub_code(env
, s
->pc
++);
5663 mod
= (modrm
>> 6) & 3;
5664 op
= (modrm
>> 3) & 7;
5670 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5673 opreg
= (modrm
& 7) | REX_B(s
);
5678 gen_shift(s
, op
, ot
, opreg
, OR_ECX
);
5681 shift
= cpu_ldub_code(env
, s
->pc
++);
5683 gen_shifti(s
, op
, ot
, opreg
, shift
);
5698 case 0x1a4: /* shld imm */
5702 case 0x1a5: /* shld cl */
5706 case 0x1ac: /* shrd imm */
5710 case 0x1ad: /* shrd cl */
5714 ot
= dflag
+ OT_WORD
;
5715 modrm
= cpu_ldub_code(env
, s
->pc
++);
5716 mod
= (modrm
>> 6) & 3;
5717 rm
= (modrm
& 7) | REX_B(s
);
5718 reg
= ((modrm
>> 3) & 7) | rex_r
;
5720 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5725 gen_op_mov_TN_reg(ot
, 1, reg
);
5728 TCGv imm
= tcg_const_tl(cpu_ldub_code(env
, s
->pc
++));
5729 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, imm
);
5732 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, cpu_regs
[R_ECX
]);
5736 /************************/
5739 if (s
->flags
& (HF_EM_MASK
| HF_TS_MASK
)) {
5740 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5741 /* XXX: what to do if illegal op ? */
5742 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
5745 modrm
= cpu_ldub_code(env
, s
->pc
++);
5746 mod
= (modrm
>> 6) & 3;
5748 op
= ((b
& 7) << 3) | ((modrm
>> 3) & 7);
5751 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5753 case 0x00 ... 0x07: /* fxxxs */
5754 case 0x10 ... 0x17: /* fixxxl */
5755 case 0x20 ... 0x27: /* fxxxl */
5756 case 0x30 ... 0x37: /* fixxx */
5763 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5764 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5765 gen_helper_flds_FT0(cpu_env
, cpu_tmp2_i32
);
5768 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5769 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5770 gen_helper_fildl_FT0(cpu_env
, cpu_tmp2_i32
);
5773 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
5774 (s
->mem_index
>> 2) - 1);
5775 gen_helper_fldl_FT0(cpu_env
, cpu_tmp1_i64
);
5779 gen_op_lds_T0_A0(OT_WORD
+ s
->mem_index
);
5780 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5781 gen_helper_fildl_FT0(cpu_env
, cpu_tmp2_i32
);
5785 gen_helper_fp_arith_ST0_FT0(op1
);
5787 /* fcomp needs pop */
5788 gen_helper_fpop(cpu_env
);
5792 case 0x08: /* flds */
5793 case 0x0a: /* fsts */
5794 case 0x0b: /* fstps */
5795 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5796 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5797 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5802 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5803 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5804 gen_helper_flds_ST0(cpu_env
, cpu_tmp2_i32
);
5807 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
5808 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5809 gen_helper_fildl_ST0(cpu_env
, cpu_tmp2_i32
);
5812 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
5813 (s
->mem_index
>> 2) - 1);
5814 gen_helper_fldl_ST0(cpu_env
, cpu_tmp1_i64
);
5818 gen_op_lds_T0_A0(OT_WORD
+ s
->mem_index
);
5819 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5820 gen_helper_fildl_ST0(cpu_env
, cpu_tmp2_i32
);
5825 /* XXX: the corresponding CPUID bit must be tested ! */
5828 gen_helper_fisttl_ST0(cpu_tmp2_i32
, cpu_env
);
5829 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5830 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
5833 gen_helper_fisttll_ST0(cpu_tmp1_i64
, cpu_env
);
5834 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
5835 (s
->mem_index
>> 2) - 1);
5839 gen_helper_fistt_ST0(cpu_tmp2_i32
, cpu_env
);
5840 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5841 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5844 gen_helper_fpop(cpu_env
);
5849 gen_helper_fsts_ST0(cpu_tmp2_i32
, cpu_env
);
5850 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5851 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
5854 gen_helper_fistl_ST0(cpu_tmp2_i32
, cpu_env
);
5855 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5856 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
5859 gen_helper_fstl_ST0(cpu_tmp1_i64
, cpu_env
);
5860 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
5861 (s
->mem_index
>> 2) - 1);
5865 gen_helper_fist_ST0(cpu_tmp2_i32
, cpu_env
);
5866 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5867 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5871 gen_helper_fpop(cpu_env
);
5875 case 0x0c: /* fldenv mem */
5876 gen_update_cc_op(s
);
5877 gen_jmp_im(pc_start
- s
->cs_base
);
5878 gen_helper_fldenv(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
5880 case 0x0d: /* fldcw mem */
5881 gen_op_ld_T0_A0(OT_WORD
+ s
->mem_index
);
5882 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5883 gen_helper_fldcw(cpu_env
, cpu_tmp2_i32
);
5885 case 0x0e: /* fnstenv mem */
5886 gen_update_cc_op(s
);
5887 gen_jmp_im(pc_start
- s
->cs_base
);
5888 gen_helper_fstenv(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
5890 case 0x0f: /* fnstcw mem */
5891 gen_helper_fnstcw(cpu_tmp2_i32
, cpu_env
);
5892 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5893 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5895 case 0x1d: /* fldt mem */
5896 gen_update_cc_op(s
);
5897 gen_jmp_im(pc_start
- s
->cs_base
);
5898 gen_helper_fldt_ST0(cpu_env
, cpu_A0
);
5900 case 0x1f: /* fstpt mem */
5901 gen_update_cc_op(s
);
5902 gen_jmp_im(pc_start
- s
->cs_base
);
5903 gen_helper_fstt_ST0(cpu_env
, cpu_A0
);
5904 gen_helper_fpop(cpu_env
);
5906 case 0x2c: /* frstor mem */
5907 gen_update_cc_op(s
);
5908 gen_jmp_im(pc_start
- s
->cs_base
);
5909 gen_helper_frstor(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
5911 case 0x2e: /* fnsave mem */
5912 gen_update_cc_op(s
);
5913 gen_jmp_im(pc_start
- s
->cs_base
);
5914 gen_helper_fsave(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
5916 case 0x2f: /* fnstsw mem */
5917 gen_helper_fnstsw(cpu_tmp2_i32
, cpu_env
);
5918 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
5919 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
5921 case 0x3c: /* fbld */
5922 gen_update_cc_op(s
);
5923 gen_jmp_im(pc_start
- s
->cs_base
);
5924 gen_helper_fbld_ST0(cpu_env
, cpu_A0
);
5926 case 0x3e: /* fbstp */
5927 gen_update_cc_op(s
);
5928 gen_jmp_im(pc_start
- s
->cs_base
);
5929 gen_helper_fbst_ST0(cpu_env
, cpu_A0
);
5930 gen_helper_fpop(cpu_env
);
5932 case 0x3d: /* fildll */
5933 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
5934 (s
->mem_index
>> 2) - 1);
5935 gen_helper_fildll_ST0(cpu_env
, cpu_tmp1_i64
);
5937 case 0x3f: /* fistpll */
5938 gen_helper_fistll_ST0(cpu_tmp1_i64
, cpu_env
);
5939 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
5940 (s
->mem_index
>> 2) - 1);
5941 gen_helper_fpop(cpu_env
);
5947 /* register float ops */
5951 case 0x08: /* fld sti */
5952 gen_helper_fpush(cpu_env
);
5953 gen_helper_fmov_ST0_STN(cpu_env
,
5954 tcg_const_i32((opreg
+ 1) & 7));
5956 case 0x09: /* fxchg sti */
5957 case 0x29: /* fxchg4 sti, undocumented op */
5958 case 0x39: /* fxchg7 sti, undocumented op */
5959 gen_helper_fxchg_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
5961 case 0x0a: /* grp d9/2 */
5964 /* check exceptions (FreeBSD FPU probe) */
5965 gen_update_cc_op(s
);
5966 gen_jmp_im(pc_start
- s
->cs_base
);
5967 gen_helper_fwait(cpu_env
);
5973 case 0x0c: /* grp d9/4 */
5976 gen_helper_fchs_ST0(cpu_env
);
5979 gen_helper_fabs_ST0(cpu_env
);
5982 gen_helper_fldz_FT0(cpu_env
);
5983 gen_helper_fcom_ST0_FT0(cpu_env
);
5986 gen_helper_fxam_ST0(cpu_env
);
5992 case 0x0d: /* grp d9/5 */
5996 gen_helper_fpush(cpu_env
);
5997 gen_helper_fld1_ST0(cpu_env
);
6000 gen_helper_fpush(cpu_env
);
6001 gen_helper_fldl2t_ST0(cpu_env
);
6004 gen_helper_fpush(cpu_env
);
6005 gen_helper_fldl2e_ST0(cpu_env
);
6008 gen_helper_fpush(cpu_env
);
6009 gen_helper_fldpi_ST0(cpu_env
);
6012 gen_helper_fpush(cpu_env
);
6013 gen_helper_fldlg2_ST0(cpu_env
);
6016 gen_helper_fpush(cpu_env
);
6017 gen_helper_fldln2_ST0(cpu_env
);
6020 gen_helper_fpush(cpu_env
);
6021 gen_helper_fldz_ST0(cpu_env
);
6028 case 0x0e: /* grp d9/6 */
6031 gen_helper_f2xm1(cpu_env
);
6034 gen_helper_fyl2x(cpu_env
);
6037 gen_helper_fptan(cpu_env
);
6039 case 3: /* fpatan */
6040 gen_helper_fpatan(cpu_env
);
6042 case 4: /* fxtract */
6043 gen_helper_fxtract(cpu_env
);
6045 case 5: /* fprem1 */
6046 gen_helper_fprem1(cpu_env
);
6048 case 6: /* fdecstp */
6049 gen_helper_fdecstp(cpu_env
);
6052 case 7: /* fincstp */
6053 gen_helper_fincstp(cpu_env
);
6057 case 0x0f: /* grp d9/7 */
6060 gen_helper_fprem(cpu_env
);
6062 case 1: /* fyl2xp1 */
6063 gen_helper_fyl2xp1(cpu_env
);
6066 gen_helper_fsqrt(cpu_env
);
6068 case 3: /* fsincos */
6069 gen_helper_fsincos(cpu_env
);
6071 case 5: /* fscale */
6072 gen_helper_fscale(cpu_env
);
6074 case 4: /* frndint */
6075 gen_helper_frndint(cpu_env
);
6078 gen_helper_fsin(cpu_env
);
6082 gen_helper_fcos(cpu_env
);
6086 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
6087 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6088 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6094 gen_helper_fp_arith_STN_ST0(op1
, opreg
);
6096 gen_helper_fpop(cpu_env
);
6098 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6099 gen_helper_fp_arith_ST0_FT0(op1
);
6103 case 0x02: /* fcom */
6104 case 0x22: /* fcom2, undocumented op */
6105 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6106 gen_helper_fcom_ST0_FT0(cpu_env
);
6108 case 0x03: /* fcomp */
6109 case 0x23: /* fcomp3, undocumented op */
6110 case 0x32: /* fcomp5, undocumented op */
6111 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6112 gen_helper_fcom_ST0_FT0(cpu_env
);
6113 gen_helper_fpop(cpu_env
);
6115 case 0x15: /* da/5 */
6117 case 1: /* fucompp */
6118 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
6119 gen_helper_fucom_ST0_FT0(cpu_env
);
6120 gen_helper_fpop(cpu_env
);
6121 gen_helper_fpop(cpu_env
);
6129 case 0: /* feni (287 only, just do nop here) */
6131 case 1: /* fdisi (287 only, just do nop here) */
6134 gen_helper_fclex(cpu_env
);
6136 case 3: /* fninit */
6137 gen_helper_fninit(cpu_env
);
6139 case 4: /* fsetpm (287 only, just do nop here) */
6145 case 0x1d: /* fucomi */
6146 gen_update_cc_op(s
);
6147 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6148 gen_helper_fucomi_ST0_FT0(cpu_env
);
6149 set_cc_op(s
, CC_OP_EFLAGS
);
6151 case 0x1e: /* fcomi */
6152 gen_update_cc_op(s
);
6153 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6154 gen_helper_fcomi_ST0_FT0(cpu_env
);
6155 set_cc_op(s
, CC_OP_EFLAGS
);
6157 case 0x28: /* ffree sti */
6158 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6160 case 0x2a: /* fst sti */
6161 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6163 case 0x2b: /* fstp sti */
6164 case 0x0b: /* fstp1 sti, undocumented op */
6165 case 0x3a: /* fstp8 sti, undocumented op */
6166 case 0x3b: /* fstp9 sti, undocumented op */
6167 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6168 gen_helper_fpop(cpu_env
);
6170 case 0x2c: /* fucom st(i) */
6171 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6172 gen_helper_fucom_ST0_FT0(cpu_env
);
6174 case 0x2d: /* fucomp st(i) */
6175 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6176 gen_helper_fucom_ST0_FT0(cpu_env
);
6177 gen_helper_fpop(cpu_env
);
6179 case 0x33: /* de/3 */
6181 case 1: /* fcompp */
6182 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
6183 gen_helper_fcom_ST0_FT0(cpu_env
);
6184 gen_helper_fpop(cpu_env
);
6185 gen_helper_fpop(cpu_env
);
6191 case 0x38: /* ffreep sti, undocumented op */
6192 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6193 gen_helper_fpop(cpu_env
);
6195 case 0x3c: /* df/4 */
6198 gen_helper_fnstsw(cpu_tmp2_i32
, cpu_env
);
6199 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6200 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
6206 case 0x3d: /* fucomip */
6207 gen_update_cc_op(s
);
6208 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6209 gen_helper_fucomi_ST0_FT0(cpu_env
);
6210 gen_helper_fpop(cpu_env
);
6211 set_cc_op(s
, CC_OP_EFLAGS
);
6213 case 0x3e: /* fcomip */
6214 gen_update_cc_op(s
);
6215 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6216 gen_helper_fcomi_ST0_FT0(cpu_env
);
6217 gen_helper_fpop(cpu_env
);
6218 set_cc_op(s
, CC_OP_EFLAGS
);
6220 case 0x10 ... 0x13: /* fcmovxx */
6224 static const uint8_t fcmov_cc
[8] = {
6230 op1
= fcmov_cc
[op
& 3] | (((op
>> 3) & 1) ^ 1);
6231 l1
= gen_new_label();
6232 gen_jcc1_noeob(s
, op1
, l1
);
6233 gen_helper_fmov_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
6242 /************************/
6245 case 0xa4: /* movsS */
6250 ot
= dflag
+ OT_WORD
;
6252 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6253 gen_repz_movs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6259 case 0xaa: /* stosS */
6264 ot
= dflag
+ OT_WORD
;
6266 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6267 gen_repz_stos(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6272 case 0xac: /* lodsS */
6277 ot
= dflag
+ OT_WORD
;
6278 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6279 gen_repz_lods(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6284 case 0xae: /* scasS */
6289 ot
= dflag
+ OT_WORD
;
6290 if (prefixes
& PREFIX_REPNZ
) {
6291 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6292 } else if (prefixes
& PREFIX_REPZ
) {
6293 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6299 case 0xa6: /* cmpsS */
6304 ot
= dflag
+ OT_WORD
;
6305 if (prefixes
& PREFIX_REPNZ
) {
6306 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6307 } else if (prefixes
& PREFIX_REPZ
) {
6308 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6313 case 0x6c: /* insS */
6318 ot
= dflag
? OT_LONG
: OT_WORD
;
6319 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6320 gen_op_andl_T0_ffff();
6321 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6322 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
) | 4);
6323 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6324 gen_repz_ins(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6328 gen_jmp(s
, s
->pc
- s
->cs_base
);
6332 case 0x6e: /* outsS */
6337 ot
= dflag
? OT_LONG
: OT_WORD
;
6338 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6339 gen_op_andl_T0_ffff();
6340 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6341 svm_is_rep(prefixes
) | 4);
6342 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6343 gen_repz_outs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6347 gen_jmp(s
, s
->pc
- s
->cs_base
);
6352 /************************/
6360 ot
= dflag
? OT_LONG
: OT_WORD
;
6361 val
= cpu_ldub_code(env
, s
->pc
++);
6362 gen_op_movl_T0_im(val
);
6363 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6364 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6367 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6368 gen_helper_in_func(ot
, cpu_T
[1], cpu_tmp2_i32
);
6369 gen_op_mov_reg_T1(ot
, R_EAX
);
6372 gen_jmp(s
, s
->pc
- s
->cs_base
);
6380 ot
= dflag
? OT_LONG
: OT_WORD
;
6381 val
= cpu_ldub_code(env
, s
->pc
++);
6382 gen_op_movl_T0_im(val
);
6383 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6384 svm_is_rep(prefixes
));
6385 gen_op_mov_TN_reg(ot
, 1, R_EAX
);
6389 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6390 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
6391 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6394 gen_jmp(s
, s
->pc
- s
->cs_base
);
6402 ot
= dflag
? OT_LONG
: OT_WORD
;
6403 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6404 gen_op_andl_T0_ffff();
6405 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6406 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6409 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6410 gen_helper_in_func(ot
, cpu_T
[1], cpu_tmp2_i32
);
6411 gen_op_mov_reg_T1(ot
, R_EAX
);
6414 gen_jmp(s
, s
->pc
- s
->cs_base
);
6422 ot
= dflag
? OT_LONG
: OT_WORD
;
6423 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6424 gen_op_andl_T0_ffff();
6425 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6426 svm_is_rep(prefixes
));
6427 gen_op_mov_TN_reg(ot
, 1, R_EAX
);
6431 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6432 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
6433 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6436 gen_jmp(s
, s
->pc
- s
->cs_base
);
6440 /************************/
6442 case 0xc2: /* ret im */
6443 val
= cpu_ldsw_code(env
, s
->pc
);
6446 if (CODE64(s
) && s
->dflag
)
6448 gen_stack_update(s
, val
+ (2 << s
->dflag
));
6450 gen_op_andl_T0_ffff();
6454 case 0xc3: /* ret */
6458 gen_op_andl_T0_ffff();
6462 case 0xca: /* lret im */
6463 val
= cpu_ldsw_code(env
, s
->pc
);
6466 if (s
->pe
&& !s
->vm86
) {
6467 gen_update_cc_op(s
);
6468 gen_jmp_im(pc_start
- s
->cs_base
);
6469 gen_helper_lret_protected(cpu_env
, tcg_const_i32(s
->dflag
),
6470 tcg_const_i32(val
));
6474 gen_op_ld_T0_A0(1 + s
->dflag
+ s
->mem_index
);
6476 gen_op_andl_T0_ffff();
6477 /* NOTE: keeping EIP updated is not a problem in case of
6481 gen_op_addl_A0_im(2 << s
->dflag
);
6482 gen_op_ld_T0_A0(1 + s
->dflag
+ s
->mem_index
);
6483 gen_op_movl_seg_T0_vm(R_CS
);
6484 /* add stack offset */
6485 gen_stack_update(s
, val
+ (4 << s
->dflag
));
6489 case 0xcb: /* lret */
6492 case 0xcf: /* iret */
6493 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IRET
);
6496 gen_helper_iret_real(cpu_env
, tcg_const_i32(s
->dflag
));
6497 set_cc_op(s
, CC_OP_EFLAGS
);
6498 } else if (s
->vm86
) {
6500 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6502 gen_helper_iret_real(cpu_env
, tcg_const_i32(s
->dflag
));
6503 set_cc_op(s
, CC_OP_EFLAGS
);
6506 gen_update_cc_op(s
);
6507 gen_jmp_im(pc_start
- s
->cs_base
);
6508 gen_helper_iret_protected(cpu_env
, tcg_const_i32(s
->dflag
),
6509 tcg_const_i32(s
->pc
- s
->cs_base
));
6510 set_cc_op(s
, CC_OP_EFLAGS
);
6514 case 0xe8: /* call im */
6517 tval
= (int32_t)insn_get(env
, s
, OT_LONG
);
6519 tval
= (int16_t)insn_get(env
, s
, OT_WORD
);
6520 next_eip
= s
->pc
- s
->cs_base
;
6526 gen_movtl_T0_im(next_eip
);
6531 case 0x9a: /* lcall im */
6533 unsigned int selector
, offset
;
6537 ot
= dflag
? OT_LONG
: OT_WORD
;
6538 offset
= insn_get(env
, s
, ot
);
6539 selector
= insn_get(env
, s
, OT_WORD
);
6541 gen_op_movl_T0_im(selector
);
6542 gen_op_movl_T1_imu(offset
);
6545 case 0xe9: /* jmp im */
6547 tval
= (int32_t)insn_get(env
, s
, OT_LONG
);
6549 tval
= (int16_t)insn_get(env
, s
, OT_WORD
);
6550 tval
+= s
->pc
- s
->cs_base
;
6557 case 0xea: /* ljmp im */
6559 unsigned int selector
, offset
;
6563 ot
= dflag
? OT_LONG
: OT_WORD
;
6564 offset
= insn_get(env
, s
, ot
);
6565 selector
= insn_get(env
, s
, OT_WORD
);
6567 gen_op_movl_T0_im(selector
);
6568 gen_op_movl_T1_imu(offset
);
6571 case 0xeb: /* jmp Jb */
6572 tval
= (int8_t)insn_get(env
, s
, OT_BYTE
);
6573 tval
+= s
->pc
- s
->cs_base
;
6578 case 0x70 ... 0x7f: /* jcc Jb */
6579 tval
= (int8_t)insn_get(env
, s
, OT_BYTE
);
6581 case 0x180 ... 0x18f: /* jcc Jv */
6583 tval
= (int32_t)insn_get(env
, s
, OT_LONG
);
6585 tval
= (int16_t)insn_get(env
, s
, OT_WORD
);
6588 next_eip
= s
->pc
- s
->cs_base
;
6592 gen_jcc(s
, b
, tval
, next_eip
);
6595 case 0x190 ... 0x19f: /* setcc Gv */
6596 modrm
= cpu_ldub_code(env
, s
->pc
++);
6597 gen_setcc1(s
, b
, cpu_T
[0]);
6598 gen_ldst_modrm(env
, s
, modrm
, OT_BYTE
, OR_TMP0
, 1);
6600 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6601 ot
= dflag
+ OT_WORD
;
6602 modrm
= cpu_ldub_code(env
, s
->pc
++);
6603 reg
= ((modrm
>> 3) & 7) | rex_r
;
6604 gen_cmovcc1(env
, s
, ot
, b
, modrm
, reg
);
6607 /************************/
6609 case 0x9c: /* pushf */
6610 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PUSHF
);
6611 if (s
->vm86
&& s
->iopl
!= 3) {
6612 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6614 gen_update_cc_op(s
);
6615 gen_helper_read_eflags(cpu_T
[0], cpu_env
);
6619 case 0x9d: /* popf */
6620 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_POPF
);
6621 if (s
->vm86
&& s
->iopl
!= 3) {
6622 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6627 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6628 tcg_const_i32((TF_MASK
| AC_MASK
|
6633 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6634 tcg_const_i32((TF_MASK
| AC_MASK
|
6636 IF_MASK
| IOPL_MASK
)
6640 if (s
->cpl
<= s
->iopl
) {
6642 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6643 tcg_const_i32((TF_MASK
|
6649 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6650 tcg_const_i32((TF_MASK
|
6659 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6660 tcg_const_i32((TF_MASK
| AC_MASK
|
6661 ID_MASK
| NT_MASK
)));
6663 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6664 tcg_const_i32((TF_MASK
| AC_MASK
|
6671 set_cc_op(s
, CC_OP_EFLAGS
);
6672 /* abort translation because TF/AC flag may change */
6673 gen_jmp_im(s
->pc
- s
->cs_base
);
6677 case 0x9e: /* sahf */
6678 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6680 gen_op_mov_TN_reg(OT_BYTE
, 0, R_AH
);
6681 gen_compute_eflags(s
);
6682 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, CC_O
);
6683 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
6684 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_T
[0]);
6686 case 0x9f: /* lahf */
6687 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6689 gen_compute_eflags(s
);
6690 /* Note: gen_compute_eflags() only gives the condition codes */
6691 tcg_gen_ori_tl(cpu_T
[0], cpu_cc_src
, 0x02);
6692 gen_op_mov_reg_T0(OT_BYTE
, R_AH
);
6694 case 0xf5: /* cmc */
6695 gen_compute_eflags(s
);
6696 tcg_gen_xori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6698 case 0xf8: /* clc */
6699 gen_compute_eflags(s
);
6700 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_C
);
6702 case 0xf9: /* stc */
6703 gen_compute_eflags(s
);
6704 tcg_gen_ori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6706 case 0xfc: /* cld */
6707 tcg_gen_movi_i32(cpu_tmp2_i32
, 1);
6708 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6710 case 0xfd: /* std */
6711 tcg_gen_movi_i32(cpu_tmp2_i32
, -1);
6712 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6715 /************************/
6716 /* bit operations */
6717 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6718 ot
= dflag
+ OT_WORD
;
6719 modrm
= cpu_ldub_code(env
, s
->pc
++);
6720 op
= (modrm
>> 3) & 7;
6721 mod
= (modrm
>> 6) & 3;
6722 rm
= (modrm
& 7) | REX_B(s
);
6725 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
6726 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
6728 gen_op_mov_TN_reg(ot
, 0, rm
);
6731 val
= cpu_ldub_code(env
, s
->pc
++);
6732 gen_op_movl_T1_im(val
);
6737 case 0x1a3: /* bt Gv, Ev */
6740 case 0x1ab: /* bts */
6743 case 0x1b3: /* btr */
6746 case 0x1bb: /* btc */
6749 ot
= dflag
+ OT_WORD
;
6750 modrm
= cpu_ldub_code(env
, s
->pc
++);
6751 reg
= ((modrm
>> 3) & 7) | rex_r
;
6752 mod
= (modrm
>> 6) & 3;
6753 rm
= (modrm
& 7) | REX_B(s
);
6754 gen_op_mov_TN_reg(OT_LONG
, 1, reg
);
6756 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
6757 /* specific case: we need to add a displacement */
6758 gen_exts(ot
, cpu_T
[1]);
6759 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[1], 3 + ot
);
6760 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, ot
);
6761 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
6762 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
6764 gen_op_mov_TN_reg(ot
, 0, rm
);
6767 tcg_gen_andi_tl(cpu_T
[1], cpu_T
[1], (1 << (3 + ot
)) - 1);
6770 tcg_gen_shr_tl(cpu_cc_src
, cpu_T
[0], cpu_T
[1]);
6771 tcg_gen_movi_tl(cpu_cc_dst
, 0);
6774 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
6775 tcg_gen_movi_tl(cpu_tmp0
, 1);
6776 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
6777 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
6780 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
6781 tcg_gen_movi_tl(cpu_tmp0
, 1);
6782 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
6783 tcg_gen_not_tl(cpu_tmp0
, cpu_tmp0
);
6784 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
6788 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
6789 tcg_gen_movi_tl(cpu_tmp0
, 1);
6790 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
6791 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
6794 set_cc_op(s
, CC_OP_SARB
+ ot
);
6797 gen_op_st_T0_A0(ot
+ s
->mem_index
);
6799 gen_op_mov_reg_T0(ot
, rm
);
6800 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
6801 tcg_gen_movi_tl(cpu_cc_dst
, 0);
6804 case 0x1bc: /* bsf */
6805 case 0x1bd: /* bsr */
6810 ot
= dflag
+ OT_WORD
;
6811 modrm
= cpu_ldub_code(env
, s
->pc
++);
6812 reg
= ((modrm
>> 3) & 7) | rex_r
;
6813 gen_ldst_modrm(env
, s
,modrm
, ot
, OR_TMP0
, 0);
6814 gen_extu(ot
, cpu_T
[0]);
6815 t0
= tcg_temp_local_new();
6816 tcg_gen_mov_tl(t0
, cpu_T
[0]);
6817 if ((b
& 1) && (prefixes
& PREFIX_REPZ
) &&
6818 (s
->cpuid_ext3_features
& CPUID_EXT3_ABM
)) {
6820 case OT_WORD
: gen_helper_lzcnt(cpu_T
[0], t0
,
6821 tcg_const_i32(16)); break;
6822 case OT_LONG
: gen_helper_lzcnt(cpu_T
[0], t0
,
6823 tcg_const_i32(32)); break;
6824 case OT_QUAD
: gen_helper_lzcnt(cpu_T
[0], t0
,
6825 tcg_const_i32(64)); break;
6827 gen_op_mov_reg_T0(ot
, reg
);
6829 label1
= gen_new_label();
6830 tcg_gen_movi_tl(cpu_cc_dst
, 0);
6831 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, 0, label1
);
6833 gen_helper_bsr(cpu_T
[0], t0
);
6835 gen_helper_bsf(cpu_T
[0], t0
);
6837 gen_op_mov_reg_T0(ot
, reg
);
6838 tcg_gen_movi_tl(cpu_cc_dst
, 1);
6839 gen_set_label(label1
);
6840 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
6845 /************************/
6847 case 0x27: /* daa */
6850 gen_update_cc_op(s
);
6851 gen_helper_daa(cpu_env
);
6852 set_cc_op(s
, CC_OP_EFLAGS
);
6854 case 0x2f: /* das */
6857 gen_update_cc_op(s
);
6858 gen_helper_das(cpu_env
);
6859 set_cc_op(s
, CC_OP_EFLAGS
);
6861 case 0x37: /* aaa */
6864 gen_update_cc_op(s
);
6865 gen_helper_aaa(cpu_env
);
6866 set_cc_op(s
, CC_OP_EFLAGS
);
6868 case 0x3f: /* aas */
6871 gen_update_cc_op(s
);
6872 gen_helper_aas(cpu_env
);
6873 set_cc_op(s
, CC_OP_EFLAGS
);
6875 case 0xd4: /* aam */
6878 val
= cpu_ldub_code(env
, s
->pc
++);
6880 gen_exception(s
, EXCP00_DIVZ
, pc_start
- s
->cs_base
);
6882 gen_helper_aam(cpu_env
, tcg_const_i32(val
));
6883 set_cc_op(s
, CC_OP_LOGICB
);
6886 case 0xd5: /* aad */
6889 val
= cpu_ldub_code(env
, s
->pc
++);
6890 gen_helper_aad(cpu_env
, tcg_const_i32(val
));
6891 set_cc_op(s
, CC_OP_LOGICB
);
6893 /************************/
6895 case 0x90: /* nop */
6896 /* XXX: correct lock test for all insn */
6897 if (prefixes
& PREFIX_LOCK
) {
6900 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6902 goto do_xchg_reg_eax
;
6904 if (prefixes
& PREFIX_REPZ
) {
6905 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PAUSE
);
6908 case 0x9b: /* fwait */
6909 if ((s
->flags
& (HF_MP_MASK
| HF_TS_MASK
)) ==
6910 (HF_MP_MASK
| HF_TS_MASK
)) {
6911 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
6913 gen_update_cc_op(s
);
6914 gen_jmp_im(pc_start
- s
->cs_base
);
6915 gen_helper_fwait(cpu_env
);
6918 case 0xcc: /* int3 */
6919 gen_interrupt(s
, EXCP03_INT3
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6921 case 0xcd: /* int N */
6922 val
= cpu_ldub_code(env
, s
->pc
++);
6923 if (s
->vm86
&& s
->iopl
!= 3) {
6924 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6926 gen_interrupt(s
, val
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6929 case 0xce: /* into */
6932 gen_update_cc_op(s
);
6933 gen_jmp_im(pc_start
- s
->cs_base
);
6934 gen_helper_into(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
6937 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6938 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_ICEBP
);
6940 gen_debug(s
, pc_start
- s
->cs_base
);
6944 qemu_set_log(CPU_LOG_INT
| CPU_LOG_TB_IN_ASM
);
6948 case 0xfa: /* cli */
6950 if (s
->cpl
<= s
->iopl
) {
6951 gen_helper_cli(cpu_env
);
6953 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6957 gen_helper_cli(cpu_env
);
6959 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6963 case 0xfb: /* sti */
6965 if (s
->cpl
<= s
->iopl
) {
6967 gen_helper_sti(cpu_env
);
6968 /* interruptions are enabled only the first insn after sti */
6969 /* If several instructions disable interrupts, only the
6971 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
6972 gen_helper_set_inhibit_irq(cpu_env
);
6973 /* give a chance to handle pending irqs */
6974 gen_jmp_im(s
->pc
- s
->cs_base
);
6977 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6983 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6987 case 0x62: /* bound */
6990 ot
= dflag
? OT_LONG
: OT_WORD
;
6991 modrm
= cpu_ldub_code(env
, s
->pc
++);
6992 reg
= (modrm
>> 3) & 7;
6993 mod
= (modrm
>> 6) & 3;
6996 gen_op_mov_TN_reg(ot
, 0, reg
);
6997 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
6998 gen_jmp_im(pc_start
- s
->cs_base
);
6999 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7000 if (ot
== OT_WORD
) {
7001 gen_helper_boundw(cpu_env
, cpu_A0
, cpu_tmp2_i32
);
7003 gen_helper_boundl(cpu_env
, cpu_A0
, cpu_tmp2_i32
);
7006 case 0x1c8 ... 0x1cf: /* bswap reg */
7007 reg
= (b
& 7) | REX_B(s
);
7008 #ifdef TARGET_X86_64
7010 gen_op_mov_TN_reg(OT_QUAD
, 0, reg
);
7011 tcg_gen_bswap64_i64(cpu_T
[0], cpu_T
[0]);
7012 gen_op_mov_reg_T0(OT_QUAD
, reg
);
7016 gen_op_mov_TN_reg(OT_LONG
, 0, reg
);
7017 tcg_gen_ext32u_tl(cpu_T
[0], cpu_T
[0]);
7018 tcg_gen_bswap32_tl(cpu_T
[0], cpu_T
[0]);
7019 gen_op_mov_reg_T0(OT_LONG
, reg
);
7022 case 0xd6: /* salc */
7025 gen_compute_eflags_c(s
, cpu_T
[0]);
7026 tcg_gen_neg_tl(cpu_T
[0], cpu_T
[0]);
7027 gen_op_mov_reg_T0(OT_BYTE
, R_EAX
);
7029 case 0xe0: /* loopnz */
7030 case 0xe1: /* loopz */
7031 case 0xe2: /* loop */
7032 case 0xe3: /* jecxz */
7036 tval
= (int8_t)insn_get(env
, s
, OT_BYTE
);
7037 next_eip
= s
->pc
- s
->cs_base
;
7042 l1
= gen_new_label();
7043 l2
= gen_new_label();
7044 l3
= gen_new_label();
7047 case 0: /* loopnz */
7049 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
7050 gen_op_jz_ecx(s
->aflag
, l3
);
7051 gen_jcc1(s
, (JCC_Z
<< 1) | (b
^ 1), l1
);
7054 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
7055 gen_op_jnz_ecx(s
->aflag
, l1
);
7059 gen_op_jz_ecx(s
->aflag
, l1
);
7064 gen_jmp_im(next_eip
);
7073 case 0x130: /* wrmsr */
7074 case 0x132: /* rdmsr */
7076 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7078 gen_update_cc_op(s
);
7079 gen_jmp_im(pc_start
- s
->cs_base
);
7081 gen_helper_rdmsr(cpu_env
);
7083 gen_helper_wrmsr(cpu_env
);
7087 case 0x131: /* rdtsc */
7088 gen_update_cc_op(s
);
7089 gen_jmp_im(pc_start
- s
->cs_base
);
7092 gen_helper_rdtsc(cpu_env
);
7095 gen_jmp(s
, s
->pc
- s
->cs_base
);
7098 case 0x133: /* rdpmc */
7099 gen_update_cc_op(s
);
7100 gen_jmp_im(pc_start
- s
->cs_base
);
7101 gen_helper_rdpmc(cpu_env
);
7103 case 0x134: /* sysenter */
7104 /* For Intel SYSENTER is valid on 64-bit */
7105 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
7108 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7110 gen_update_cc_op(s
);
7111 gen_jmp_im(pc_start
- s
->cs_base
);
7112 gen_helper_sysenter(cpu_env
);
7116 case 0x135: /* sysexit */
7117 /* For Intel SYSEXIT is valid on 64-bit */
7118 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
7121 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7123 gen_update_cc_op(s
);
7124 gen_jmp_im(pc_start
- s
->cs_base
);
7125 gen_helper_sysexit(cpu_env
, tcg_const_i32(dflag
));
7129 #ifdef TARGET_X86_64
7130 case 0x105: /* syscall */
7131 /* XXX: is it usable in real mode ? */
7132 gen_update_cc_op(s
);
7133 gen_jmp_im(pc_start
- s
->cs_base
);
7134 gen_helper_syscall(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7137 case 0x107: /* sysret */
7139 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7141 gen_update_cc_op(s
);
7142 gen_jmp_im(pc_start
- s
->cs_base
);
7143 gen_helper_sysret(cpu_env
, tcg_const_i32(s
->dflag
));
7144 /* condition codes are modified only in long mode */
7146 set_cc_op(s
, CC_OP_EFLAGS
);
7152 case 0x1a2: /* cpuid */
7153 gen_update_cc_op(s
);
7154 gen_jmp_im(pc_start
- s
->cs_base
);
7155 gen_helper_cpuid(cpu_env
);
7157 case 0xf4: /* hlt */
7159 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7161 gen_update_cc_op(s
);
7162 gen_jmp_im(pc_start
- s
->cs_base
);
7163 gen_helper_hlt(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7164 s
->is_jmp
= DISAS_TB_JUMP
;
7168 modrm
= cpu_ldub_code(env
, s
->pc
++);
7169 mod
= (modrm
>> 6) & 3;
7170 op
= (modrm
>> 3) & 7;
7173 if (!s
->pe
|| s
->vm86
)
7175 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_READ
);
7176 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,ldt
.selector
));
7180 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7183 if (!s
->pe
|| s
->vm86
)
7186 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7188 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_WRITE
);
7189 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7190 gen_jmp_im(pc_start
- s
->cs_base
);
7191 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7192 gen_helper_lldt(cpu_env
, cpu_tmp2_i32
);
7196 if (!s
->pe
|| s
->vm86
)
7198 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_READ
);
7199 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,tr
.selector
));
7203 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7206 if (!s
->pe
|| s
->vm86
)
7209 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7211 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_WRITE
);
7212 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7213 gen_jmp_im(pc_start
- s
->cs_base
);
7214 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7215 gen_helper_ltr(cpu_env
, cpu_tmp2_i32
);
7220 if (!s
->pe
|| s
->vm86
)
7222 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7223 gen_update_cc_op(s
);
7225 gen_helper_verr(cpu_env
, cpu_T
[0]);
7227 gen_helper_verw(cpu_env
, cpu_T
[0]);
7229 set_cc_op(s
, CC_OP_EFLAGS
);
7236 modrm
= cpu_ldub_code(env
, s
->pc
++);
7237 mod
= (modrm
>> 6) & 3;
7238 op
= (modrm
>> 3) & 7;
7244 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_GDTR_READ
);
7245 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7246 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
7247 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
7248 gen_add_A0_im(s
, 2);
7249 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, gdt
.base
));
7251 gen_op_andl_T0_im(0xffffff);
7252 gen_op_st_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7257 case 0: /* monitor */
7258 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) ||
7261 gen_update_cc_op(s
);
7262 gen_jmp_im(pc_start
- s
->cs_base
);
7263 #ifdef TARGET_X86_64
7264 if (s
->aflag
== 2) {
7265 gen_op_movq_A0_reg(R_EAX
);
7269 gen_op_movl_A0_reg(R_EAX
);
7271 gen_op_andl_A0_ffff();
7273 gen_add_A0_ds_seg(s
);
7274 gen_helper_monitor(cpu_env
, cpu_A0
);
7277 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) ||
7280 gen_update_cc_op(s
);
7281 gen_jmp_im(pc_start
- s
->cs_base
);
7282 gen_helper_mwait(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7286 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
) ||
7290 gen_helper_clac(cpu_env
);
7291 gen_jmp_im(s
->pc
- s
->cs_base
);
7295 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
) ||
7299 gen_helper_stac(cpu_env
);
7300 gen_jmp_im(s
->pc
- s
->cs_base
);
7307 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IDTR_READ
);
7308 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7309 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, idt
.limit
));
7310 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
7311 gen_add_A0_im(s
, 2);
7312 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, idt
.base
));
7314 gen_op_andl_T0_im(0xffffff);
7315 gen_op_st_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7321 gen_update_cc_op(s
);
7322 gen_jmp_im(pc_start
- s
->cs_base
);
7325 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7328 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7331 gen_helper_vmrun(cpu_env
, tcg_const_i32(s
->aflag
),
7332 tcg_const_i32(s
->pc
- pc_start
));
7334 s
->is_jmp
= DISAS_TB_JUMP
;
7337 case 1: /* VMMCALL */
7338 if (!(s
->flags
& HF_SVME_MASK
))
7340 gen_helper_vmmcall(cpu_env
);
7342 case 2: /* VMLOAD */
7343 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7346 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7349 gen_helper_vmload(cpu_env
, tcg_const_i32(s
->aflag
));
7352 case 3: /* VMSAVE */
7353 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7356 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7359 gen_helper_vmsave(cpu_env
, tcg_const_i32(s
->aflag
));
7363 if ((!(s
->flags
& HF_SVME_MASK
) &&
7364 !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
)) ||
7368 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7371 gen_helper_stgi(cpu_env
);
7375 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7378 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7381 gen_helper_clgi(cpu_env
);
7384 case 6: /* SKINIT */
7385 if ((!(s
->flags
& HF_SVME_MASK
) &&
7386 !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
)) ||
7389 gen_helper_skinit(cpu_env
);
7391 case 7: /* INVLPGA */
7392 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7395 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7398 gen_helper_invlpga(cpu_env
, tcg_const_i32(s
->aflag
));
7404 } else if (s
->cpl
!= 0) {
7405 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7407 gen_svm_check_intercept(s
, pc_start
,
7408 op
==2 ? SVM_EXIT_GDTR_WRITE
: SVM_EXIT_IDTR_WRITE
);
7409 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7410 gen_op_ld_T1_A0(OT_WORD
+ s
->mem_index
);
7411 gen_add_A0_im(s
, 2);
7412 gen_op_ld_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7414 gen_op_andl_T0_im(0xffffff);
7416 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,gdt
.base
));
7417 tcg_gen_st32_tl(cpu_T
[1], cpu_env
, offsetof(CPUX86State
,gdt
.limit
));
7419 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,idt
.base
));
7420 tcg_gen_st32_tl(cpu_T
[1], cpu_env
, offsetof(CPUX86State
,idt
.limit
));
7425 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_CR0
);
7426 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7427 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,cr
[0]) + 4);
7429 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,cr
[0]));
7431 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 1);
7435 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7437 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7438 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7439 gen_helper_lmsw(cpu_env
, cpu_T
[0]);
7440 gen_jmp_im(s
->pc
- s
->cs_base
);
7445 if (mod
!= 3) { /* invlpg */
7447 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7449 gen_update_cc_op(s
);
7450 gen_jmp_im(pc_start
- s
->cs_base
);
7451 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7452 gen_helper_invlpg(cpu_env
, cpu_A0
);
7453 gen_jmp_im(s
->pc
- s
->cs_base
);
7458 case 0: /* swapgs */
7459 #ifdef TARGET_X86_64
7462 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7464 tcg_gen_ld_tl(cpu_T
[0], cpu_env
,
7465 offsetof(CPUX86State
,segs
[R_GS
].base
));
7466 tcg_gen_ld_tl(cpu_T
[1], cpu_env
,
7467 offsetof(CPUX86State
,kernelgsbase
));
7468 tcg_gen_st_tl(cpu_T
[1], cpu_env
,
7469 offsetof(CPUX86State
,segs
[R_GS
].base
));
7470 tcg_gen_st_tl(cpu_T
[0], cpu_env
,
7471 offsetof(CPUX86State
,kernelgsbase
));
7479 case 1: /* rdtscp */
7480 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_RDTSCP
))
7482 gen_update_cc_op(s
);
7483 gen_jmp_im(pc_start
- s
->cs_base
);
7486 gen_helper_rdtscp(cpu_env
);
7489 gen_jmp(s
, s
->pc
- s
->cs_base
);
7501 case 0x108: /* invd */
7502 case 0x109: /* wbinvd */
7504 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7506 gen_svm_check_intercept(s
, pc_start
, (b
& 2) ? SVM_EXIT_INVD
: SVM_EXIT_WBINVD
);
7510 case 0x63: /* arpl or movslS (x86_64) */
7511 #ifdef TARGET_X86_64
7514 /* d_ot is the size of destination */
7515 d_ot
= dflag
+ OT_WORD
;
7517 modrm
= cpu_ldub_code(env
, s
->pc
++);
7518 reg
= ((modrm
>> 3) & 7) | rex_r
;
7519 mod
= (modrm
>> 6) & 3;
7520 rm
= (modrm
& 7) | REX_B(s
);
7523 gen_op_mov_TN_reg(OT_LONG
, 0, rm
);
7525 if (d_ot
== OT_QUAD
)
7526 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
7527 gen_op_mov_reg_T0(d_ot
, reg
);
7529 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7530 if (d_ot
== OT_QUAD
) {
7531 gen_op_lds_T0_A0(OT_LONG
+ s
->mem_index
);
7533 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
7535 gen_op_mov_reg_T0(d_ot
, reg
);
7541 TCGv t0
, t1
, t2
, a0
;
7543 if (!s
->pe
|| s
->vm86
)
7545 t0
= tcg_temp_local_new();
7546 t1
= tcg_temp_local_new();
7547 t2
= tcg_temp_local_new();
7549 modrm
= cpu_ldub_code(env
, s
->pc
++);
7550 reg
= (modrm
>> 3) & 7;
7551 mod
= (modrm
>> 6) & 3;
7554 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7555 gen_op_ld_v(ot
+ s
->mem_index
, t0
, cpu_A0
);
7556 a0
= tcg_temp_local_new();
7557 tcg_gen_mov_tl(a0
, cpu_A0
);
7559 gen_op_mov_v_reg(ot
, t0
, rm
);
7562 gen_op_mov_v_reg(ot
, t1
, reg
);
7563 tcg_gen_andi_tl(cpu_tmp0
, t0
, 3);
7564 tcg_gen_andi_tl(t1
, t1
, 3);
7565 tcg_gen_movi_tl(t2
, 0);
7566 label1
= gen_new_label();
7567 tcg_gen_brcond_tl(TCG_COND_GE
, cpu_tmp0
, t1
, label1
);
7568 tcg_gen_andi_tl(t0
, t0
, ~3);
7569 tcg_gen_or_tl(t0
, t0
, t1
);
7570 tcg_gen_movi_tl(t2
, CC_Z
);
7571 gen_set_label(label1
);
7573 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
7576 gen_op_mov_reg_v(ot
, rm
, t0
);
7578 gen_compute_eflags(s
);
7579 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_Z
);
7580 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t2
);
7586 case 0x102: /* lar */
7587 case 0x103: /* lsl */
7591 if (!s
->pe
|| s
->vm86
)
7593 ot
= dflag
? OT_LONG
: OT_WORD
;
7594 modrm
= cpu_ldub_code(env
, s
->pc
++);
7595 reg
= ((modrm
>> 3) & 7) | rex_r
;
7596 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7597 t0
= tcg_temp_local_new();
7598 gen_update_cc_op(s
);
7600 gen_helper_lar(t0
, cpu_env
, cpu_T
[0]);
7602 gen_helper_lsl(t0
, cpu_env
, cpu_T
[0]);
7604 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_src
, CC_Z
);
7605 label1
= gen_new_label();
7606 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
7607 gen_op_mov_reg_v(ot
, reg
, t0
);
7608 gen_set_label(label1
);
7609 set_cc_op(s
, CC_OP_EFLAGS
);
7614 modrm
= cpu_ldub_code(env
, s
->pc
++);
7615 mod
= (modrm
>> 6) & 3;
7616 op
= (modrm
>> 3) & 7;
7618 case 0: /* prefetchnta */
7619 case 1: /* prefetchnt0 */
7620 case 2: /* prefetchnt0 */
7621 case 3: /* prefetchnt0 */
7624 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7625 /* nothing more to do */
7627 default: /* nop (multi byte) */
7628 gen_nop_modrm(env
, s
, modrm
);
7632 case 0x119 ... 0x11f: /* nop (multi byte) */
7633 modrm
= cpu_ldub_code(env
, s
->pc
++);
7634 gen_nop_modrm(env
, s
, modrm
);
7636 case 0x120: /* mov reg, crN */
7637 case 0x122: /* mov crN, reg */
7639 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7641 modrm
= cpu_ldub_code(env
, s
->pc
++);
7642 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7643 * AMD documentation (24594.pdf) and testing of
7644 * intel 386 and 486 processors all show that the mod bits
7645 * are assumed to be 1's, regardless of actual values.
7647 rm
= (modrm
& 7) | REX_B(s
);
7648 reg
= ((modrm
>> 3) & 7) | rex_r
;
7653 if ((prefixes
& PREFIX_LOCK
) && (reg
== 0) &&
7654 (s
->cpuid_ext3_features
& CPUID_EXT3_CR8LEG
)) {
7663 gen_update_cc_op(s
);
7664 gen_jmp_im(pc_start
- s
->cs_base
);
7666 gen_op_mov_TN_reg(ot
, 0, rm
);
7667 gen_helper_write_crN(cpu_env
, tcg_const_i32(reg
),
7669 gen_jmp_im(s
->pc
- s
->cs_base
);
7672 gen_helper_read_crN(cpu_T
[0], cpu_env
, tcg_const_i32(reg
));
7673 gen_op_mov_reg_T0(ot
, rm
);
7681 case 0x121: /* mov reg, drN */
7682 case 0x123: /* mov drN, reg */
7684 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7686 modrm
= cpu_ldub_code(env
, s
->pc
++);
7687 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7688 * AMD documentation (24594.pdf) and testing of
7689 * intel 386 and 486 processors all show that the mod bits
7690 * are assumed to be 1's, regardless of actual values.
7692 rm
= (modrm
& 7) | REX_B(s
);
7693 reg
= ((modrm
>> 3) & 7) | rex_r
;
7698 /* XXX: do it dynamically with CR4.DE bit */
7699 if (reg
== 4 || reg
== 5 || reg
>= 8)
7702 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_DR0
+ reg
);
7703 gen_op_mov_TN_reg(ot
, 0, rm
);
7704 gen_helper_movl_drN_T0(cpu_env
, tcg_const_i32(reg
), cpu_T
[0]);
7705 gen_jmp_im(s
->pc
- s
->cs_base
);
7708 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_DR0
+ reg
);
7709 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,dr
[reg
]));
7710 gen_op_mov_reg_T0(ot
, rm
);
7714 case 0x106: /* clts */
7716 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7718 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7719 gen_helper_clts(cpu_env
);
7720 /* abort block because static cpu state changed */
7721 gen_jmp_im(s
->pc
- s
->cs_base
);
7725 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7726 case 0x1c3: /* MOVNTI reg, mem */
7727 if (!(s
->cpuid_features
& CPUID_SSE2
))
7729 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
7730 modrm
= cpu_ldub_code(env
, s
->pc
++);
7731 mod
= (modrm
>> 6) & 3;
7734 reg
= ((modrm
>> 3) & 7) | rex_r
;
7735 /* generate a generic store */
7736 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
7739 modrm
= cpu_ldub_code(env
, s
->pc
++);
7740 mod
= (modrm
>> 6) & 3;
7741 op
= (modrm
>> 3) & 7;
7743 case 0: /* fxsave */
7744 if (mod
== 3 || !(s
->cpuid_features
& CPUID_FXSR
) ||
7745 (s
->prefix
& PREFIX_LOCK
))
7747 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
7748 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7751 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7752 gen_update_cc_op(s
);
7753 gen_jmp_im(pc_start
- s
->cs_base
);
7754 gen_helper_fxsave(cpu_env
, cpu_A0
, tcg_const_i32((s
->dflag
== 2)));
7756 case 1: /* fxrstor */
7757 if (mod
== 3 || !(s
->cpuid_features
& CPUID_FXSR
) ||
7758 (s
->prefix
& PREFIX_LOCK
))
7760 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
7761 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7764 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7765 gen_update_cc_op(s
);
7766 gen_jmp_im(pc_start
- s
->cs_base
);
7767 gen_helper_fxrstor(cpu_env
, cpu_A0
,
7768 tcg_const_i32((s
->dflag
== 2)));
7770 case 2: /* ldmxcsr */
7771 case 3: /* stmxcsr */
7772 if (s
->flags
& HF_TS_MASK
) {
7773 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7776 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
) ||
7779 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7781 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
7782 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7783 gen_helper_ldmxcsr(cpu_env
, cpu_tmp2_i32
);
7785 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, mxcsr
));
7786 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
7789 case 5: /* lfence */
7790 case 6: /* mfence */
7791 if ((modrm
& 0xc7) != 0xc0 || !(s
->cpuid_features
& CPUID_SSE2
))
7794 case 7: /* sfence / clflush */
7795 if ((modrm
& 0xc7) == 0xc0) {
7797 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7798 if (!(s
->cpuid_features
& CPUID_SSE
))
7802 if (!(s
->cpuid_features
& CPUID_CLFLUSH
))
7804 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7811 case 0x10d: /* 3DNow! prefetch(w) */
7812 modrm
= cpu_ldub_code(env
, s
->pc
++);
7813 mod
= (modrm
>> 6) & 3;
7816 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7817 /* ignore for now */
7819 case 0x1aa: /* rsm */
7820 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_RSM
);
7821 if (!(s
->flags
& HF_SMM_MASK
))
7823 gen_update_cc_op(s
);
7824 gen_jmp_im(s
->pc
- s
->cs_base
);
7825 gen_helper_rsm(cpu_env
);
7828 case 0x1b8: /* SSE4.2 popcnt */
7829 if ((prefixes
& (PREFIX_REPZ
| PREFIX_LOCK
| PREFIX_REPNZ
)) !=
7832 if (!(s
->cpuid_ext_features
& CPUID_EXT_POPCNT
))
7835 modrm
= cpu_ldub_code(env
, s
->pc
++);
7836 reg
= ((modrm
>> 3) & 7) | rex_r
;
7838 if (s
->prefix
& PREFIX_DATA
)
7840 else if (s
->dflag
!= 2)
7845 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
7846 gen_helper_popcnt(cpu_T
[0], cpu_env
, cpu_T
[0], tcg_const_i32(ot
));
7847 gen_op_mov_reg_T0(ot
, reg
);
7849 set_cc_op(s
, CC_OP_EFLAGS
);
7851 case 0x10e ... 0x10f:
7852 /* 3DNow! instructions, ignore prefixes */
7853 s
->prefix
&= ~(PREFIX_REPZ
| PREFIX_REPNZ
| PREFIX_DATA
);
7854 case 0x110 ... 0x117:
7855 case 0x128 ... 0x12f:
7856 case 0x138 ... 0x13a:
7857 case 0x150 ... 0x179:
7858 case 0x17c ... 0x17f:
7860 case 0x1c4 ... 0x1c6:
7861 case 0x1d0 ... 0x1fe:
7862 gen_sse(env
, s
, b
, pc_start
, rex_r
);
7867 /* lock generation */
7868 if (s
->prefix
& PREFIX_LOCK
)
7869 gen_helper_unlock();
7872 if (s
->prefix
& PREFIX_LOCK
)
7873 gen_helper_unlock();
7874 /* XXX: ensure that no lock was generated */
7875 gen_exception(s
, EXCP06_ILLOP
, pc_start
- s
->cs_base
);
7879 void optimize_flags_init(void)
7881 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
7882 cpu_cc_op
= tcg_global_mem_new_i32(TCG_AREG0
,
7883 offsetof(CPUX86State
, cc_op
), "cc_op");
7884 cpu_cc_dst
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_dst
),
7886 cpu_cc_src
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_src
),
7888 cpu_cc_src2
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_src2
),
7891 #ifdef TARGET_X86_64
7892 cpu_regs
[R_EAX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7893 offsetof(CPUX86State
, regs
[R_EAX
]), "rax");
7894 cpu_regs
[R_ECX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7895 offsetof(CPUX86State
, regs
[R_ECX
]), "rcx");
7896 cpu_regs
[R_EDX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7897 offsetof(CPUX86State
, regs
[R_EDX
]), "rdx");
7898 cpu_regs
[R_EBX
] = tcg_global_mem_new_i64(TCG_AREG0
,
7899 offsetof(CPUX86State
, regs
[R_EBX
]), "rbx");
7900 cpu_regs
[R_ESP
] = tcg_global_mem_new_i64(TCG_AREG0
,
7901 offsetof(CPUX86State
, regs
[R_ESP
]), "rsp");
7902 cpu_regs
[R_EBP
] = tcg_global_mem_new_i64(TCG_AREG0
,
7903 offsetof(CPUX86State
, regs
[R_EBP
]), "rbp");
7904 cpu_regs
[R_ESI
] = tcg_global_mem_new_i64(TCG_AREG0
,
7905 offsetof(CPUX86State
, regs
[R_ESI
]), "rsi");
7906 cpu_regs
[R_EDI
] = tcg_global_mem_new_i64(TCG_AREG0
,
7907 offsetof(CPUX86State
, regs
[R_EDI
]), "rdi");
7908 cpu_regs
[8] = tcg_global_mem_new_i64(TCG_AREG0
,
7909 offsetof(CPUX86State
, regs
[8]), "r8");
7910 cpu_regs
[9] = tcg_global_mem_new_i64(TCG_AREG0
,
7911 offsetof(CPUX86State
, regs
[9]), "r9");
7912 cpu_regs
[10] = tcg_global_mem_new_i64(TCG_AREG0
,
7913 offsetof(CPUX86State
, regs
[10]), "r10");
7914 cpu_regs
[11] = tcg_global_mem_new_i64(TCG_AREG0
,
7915 offsetof(CPUX86State
, regs
[11]), "r11");
7916 cpu_regs
[12] = tcg_global_mem_new_i64(TCG_AREG0
,
7917 offsetof(CPUX86State
, regs
[12]), "r12");
7918 cpu_regs
[13] = tcg_global_mem_new_i64(TCG_AREG0
,
7919 offsetof(CPUX86State
, regs
[13]), "r13");
7920 cpu_regs
[14] = tcg_global_mem_new_i64(TCG_AREG0
,
7921 offsetof(CPUX86State
, regs
[14]), "r14");
7922 cpu_regs
[15] = tcg_global_mem_new_i64(TCG_AREG0
,
7923 offsetof(CPUX86State
, regs
[15]), "r15");
7925 cpu_regs
[R_EAX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7926 offsetof(CPUX86State
, regs
[R_EAX
]), "eax");
7927 cpu_regs
[R_ECX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7928 offsetof(CPUX86State
, regs
[R_ECX
]), "ecx");
7929 cpu_regs
[R_EDX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7930 offsetof(CPUX86State
, regs
[R_EDX
]), "edx");
7931 cpu_regs
[R_EBX
] = tcg_global_mem_new_i32(TCG_AREG0
,
7932 offsetof(CPUX86State
, regs
[R_EBX
]), "ebx");
7933 cpu_regs
[R_ESP
] = tcg_global_mem_new_i32(TCG_AREG0
,
7934 offsetof(CPUX86State
, regs
[R_ESP
]), "esp");
7935 cpu_regs
[R_EBP
] = tcg_global_mem_new_i32(TCG_AREG0
,
7936 offsetof(CPUX86State
, regs
[R_EBP
]), "ebp");
7937 cpu_regs
[R_ESI
] = tcg_global_mem_new_i32(TCG_AREG0
,
7938 offsetof(CPUX86State
, regs
[R_ESI
]), "esi");
7939 cpu_regs
[R_EDI
] = tcg_global_mem_new_i32(TCG_AREG0
,
7940 offsetof(CPUX86State
, regs
[R_EDI
]), "edi");
7943 /* register helpers */
7944 #define GEN_HELPER 2
7948 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7949 basic block 'tb'. If search_pc is TRUE, also generate PC
7950 information for each intermediate instruction. */
7951 static inline void gen_intermediate_code_internal(CPUX86State
*env
,
7952 TranslationBlock
*tb
,
7955 DisasContext dc1
, *dc
= &dc1
;
7956 target_ulong pc_ptr
;
7957 uint16_t *gen_opc_end
;
7961 target_ulong pc_start
;
7962 target_ulong cs_base
;
7966 /* generate intermediate code */
7968 cs_base
= tb
->cs_base
;
7971 dc
->pe
= (flags
>> HF_PE_SHIFT
) & 1;
7972 dc
->code32
= (flags
>> HF_CS32_SHIFT
) & 1;
7973 dc
->ss32
= (flags
>> HF_SS32_SHIFT
) & 1;
7974 dc
->addseg
= (flags
>> HF_ADDSEG_SHIFT
) & 1;
7976 dc
->vm86
= (flags
>> VM_SHIFT
) & 1;
7977 dc
->cpl
= (flags
>> HF_CPL_SHIFT
) & 3;
7978 dc
->iopl
= (flags
>> IOPL_SHIFT
) & 3;
7979 dc
->tf
= (flags
>> TF_SHIFT
) & 1;
7980 dc
->singlestep_enabled
= env
->singlestep_enabled
;
7981 dc
->cc_op
= CC_OP_DYNAMIC
;
7982 dc
->cc_op_dirty
= false;
7983 dc
->cs_base
= cs_base
;
7985 dc
->popl_esp_hack
= 0;
7986 /* select memory access functions */
7988 if (flags
& HF_SOFTMMU_MASK
) {
7989 dc
->mem_index
= (cpu_mmu_index(env
) + 1) << 2;
7991 dc
->cpuid_features
= env
->cpuid_features
;
7992 dc
->cpuid_ext_features
= env
->cpuid_ext_features
;
7993 dc
->cpuid_ext2_features
= env
->cpuid_ext2_features
;
7994 dc
->cpuid_ext3_features
= env
->cpuid_ext3_features
;
7995 dc
->cpuid_7_0_ebx_features
= env
->cpuid_7_0_ebx_features
;
7996 #ifdef TARGET_X86_64
7997 dc
->lma
= (flags
>> HF_LMA_SHIFT
) & 1;
7998 dc
->code64
= (flags
>> HF_CS64_SHIFT
) & 1;
8001 dc
->jmp_opt
= !(dc
->tf
|| env
->singlestep_enabled
||
8002 (flags
& HF_INHIBIT_IRQ_MASK
)
8003 #ifndef CONFIG_SOFTMMU
8004 || (flags
& HF_SOFTMMU_MASK
)
8008 /* check addseg logic */
8009 if (!dc
->addseg
&& (dc
->vm86
|| !dc
->pe
|| !dc
->code32
))
8010 printf("ERROR addseg\n");
8013 cpu_T
[0] = tcg_temp_new();
8014 cpu_T
[1] = tcg_temp_new();
8015 cpu_A0
= tcg_temp_new();
8017 cpu_tmp0
= tcg_temp_new();
8018 cpu_tmp1_i64
= tcg_temp_new_i64();
8019 cpu_tmp2_i32
= tcg_temp_new_i32();
8020 cpu_tmp3_i32
= tcg_temp_new_i32();
8021 cpu_tmp4
= tcg_temp_new();
8022 cpu_tmp5
= tcg_temp_new();
8023 cpu_ptr0
= tcg_temp_new_ptr();
8024 cpu_ptr1
= tcg_temp_new_ptr();
8025 cpu_cc_srcT
= tcg_temp_local_new();
8027 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
8029 dc
->is_jmp
= DISAS_NEXT
;
8033 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
8035 max_insns
= CF_COUNT_MASK
;
8039 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
8040 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
8041 if (bp
->pc
== pc_ptr
&&
8042 !((bp
->flags
& BP_CPU
) && (tb
->flags
& HF_RF_MASK
))) {
8043 gen_debug(dc
, pc_ptr
- dc
->cs_base
);
8049 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
8053 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
8055 tcg_ctx
.gen_opc_pc
[lj
] = pc_ptr
;
8056 gen_opc_cc_op
[lj
] = dc
->cc_op
;
8057 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
8058 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
8060 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
8063 pc_ptr
= disas_insn(env
, dc
, pc_ptr
);
8065 /* stop translation if indicated */
8068 /* if single step mode, we generate only one instruction and
8069 generate an exception */
8070 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8071 the flag and abort the translation to give the irqs a
8072 change to be happen */
8073 if (dc
->tf
|| dc
->singlestep_enabled
||
8074 (flags
& HF_INHIBIT_IRQ_MASK
)) {
8075 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8079 /* if too long translation, stop generation too */
8080 if (tcg_ctx
.gen_opc_ptr
>= gen_opc_end
||
8081 (pc_ptr
- pc_start
) >= (TARGET_PAGE_SIZE
- 32) ||
8082 num_insns
>= max_insns
) {
8083 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8088 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8093 if (tb
->cflags
& CF_LAST_IO
)
8095 gen_icount_end(tb
, num_insns
);
8096 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
8097 /* we don't forget to fill the last values */
8099 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
8102 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
8106 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
8108 qemu_log("----------------\n");
8109 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
8110 #ifdef TARGET_X86_64
8115 disas_flags
= !dc
->code32
;
8116 log_target_disas(env
, pc_start
, pc_ptr
- pc_start
, disas_flags
);
8122 tb
->size
= pc_ptr
- pc_start
;
8123 tb
->icount
= num_insns
;
8127 void gen_intermediate_code(CPUX86State
*env
, TranslationBlock
*tb
)
8129 gen_intermediate_code_internal(env
, tb
, 0);
8132 void gen_intermediate_code_pc(CPUX86State
*env
, TranslationBlock
*tb
)
8134 gen_intermediate_code_internal(env
, tb
, 1);
8137 void restore_state_to_opc(CPUX86State
*env
, TranslationBlock
*tb
, int pc_pos
)
8141 if (qemu_loglevel_mask(CPU_LOG_TB_OP
)) {
8143 qemu_log("RESTORE:\n");
8144 for(i
= 0;i
<= pc_pos
; i
++) {
8145 if (tcg_ctx
.gen_opc_instr_start
[i
]) {
8146 qemu_log("0x%04x: " TARGET_FMT_lx
"\n", i
,
8147 tcg_ctx
.gen_opc_pc
[i
]);
8150 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx
" cs_base=%x\n",
8151 pc_pos
, tcg_ctx
.gen_opc_pc
[pc_pos
] - tb
->cs_base
,
8152 (uint32_t)tb
->cs_base
);
8155 env
->eip
= tcg_ctx
.gen_opc_pc
[pc_pos
] - tb
->cs_base
;
8156 cc_op
= gen_opc_cc_op
[pc_pos
];
8157 if (cc_op
!= CC_OP_DYNAMIC
)