4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
26 #include "qemu/host-utils.h"
28 #include "disas/disas.h"
35 #define PREFIX_REPZ 0x01
36 #define PREFIX_REPNZ 0x02
37 #define PREFIX_LOCK 0x04
38 #define PREFIX_DATA 0x08
39 #define PREFIX_ADR 0x10
40 #define PREFIX_VEX 0x20
43 #define CODE64(s) ((s)->code64)
44 #define REX_X(s) ((s)->rex_x)
45 #define REX_B(s) ((s)->rex_b)
60 //#define MACRO_TEST 1
62 /* global register indexes */
63 static TCGv_ptr cpu_env
;
65 static TCGv cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
, cpu_cc_srcT
;
66 static TCGv_i32 cpu_cc_op
;
67 static TCGv cpu_regs
[CPU_NB_REGS
];
70 /* local register indexes (only used inside old micro ops) */
71 static TCGv cpu_tmp0
, cpu_tmp4
;
72 static TCGv_ptr cpu_ptr0
, cpu_ptr1
;
73 static TCGv_i32 cpu_tmp2_i32
, cpu_tmp3_i32
;
74 static TCGv_i64 cpu_tmp1_i64
;
76 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
78 #include "exec/gen-icount.h"
81 static int x86_64_hregs
;
84 typedef struct DisasContext
{
85 /* current insn context */
86 int override
; /* -1 if no override */
89 target_ulong pc
; /* pc = eip + cs_base */
90 int is_jmp
; /* 1 = means jump (stop translation), 2 means CPU
91 static state change (stop translation) */
92 /* current block context */
93 target_ulong cs_base
; /* base of CS segment */
94 int pe
; /* protected mode */
95 int code32
; /* 32 bit code segment */
97 int lma
; /* long mode active */
98 int code64
; /* 64 bit code segment */
101 int vex_l
; /* vex vector length */
102 int vex_v
; /* vex vvvv register, without 1's compliment. */
103 int ss32
; /* 32 bit stack segment */
104 CCOp cc_op
; /* current CC operation */
106 int addseg
; /* non zero if either DS/ES/SS have a non zero base */
107 int f_st
; /* currently unused */
108 int vm86
; /* vm86 mode */
111 int tf
; /* TF cpu flag */
112 int singlestep_enabled
; /* "hardware" single step enabled */
113 int jmp_opt
; /* use direct block chaining for direct jumps */
114 int mem_index
; /* select memory access functions */
115 uint64_t flags
; /* all execution flags */
116 struct TranslationBlock
*tb
;
117 int popl_esp_hack
; /* for correct popl with esp base handling */
118 int rip_offset
; /* only used in x86_64, but left for simplicity */
120 int cpuid_ext_features
;
121 int cpuid_ext2_features
;
122 int cpuid_ext3_features
;
123 int cpuid_7_0_ebx_features
;
126 static void gen_eob(DisasContext
*s
);
127 static void gen_jmp(DisasContext
*s
, target_ulong eip
);
128 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
);
129 static void gen_op(DisasContext
*s1
, int op
, int ot
, int d
);
131 /* i386 arith/logic operations */
151 OP_SHL1
, /* undocumented */
175 /* I386 int registers */
176 OR_EAX
, /* MUST be even numbered */
185 OR_TMP0
= 16, /* temporary operand register */
187 OR_A0
, /* temporary register used when doing address evaluation */
197 /* Bit set if the global variable is live after setting CC_OP to X. */
198 static const uint8_t cc_op_live
[CC_OP_NB
] = {
199 [CC_OP_DYNAMIC
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
200 [CC_OP_EFLAGS
] = USES_CC_SRC
,
201 [CC_OP_MULB
... CC_OP_MULQ
] = USES_CC_DST
| USES_CC_SRC
,
202 [CC_OP_ADDB
... CC_OP_ADDQ
] = USES_CC_DST
| USES_CC_SRC
,
203 [CC_OP_ADCB
... CC_OP_ADCQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
204 [CC_OP_SUBB
... CC_OP_SUBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRCT
,
205 [CC_OP_SBBB
... CC_OP_SBBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
206 [CC_OP_LOGICB
... CC_OP_LOGICQ
] = USES_CC_DST
,
207 [CC_OP_INCB
... CC_OP_INCQ
] = USES_CC_DST
| USES_CC_SRC
,
208 [CC_OP_DECB
... CC_OP_DECQ
] = USES_CC_DST
| USES_CC_SRC
,
209 [CC_OP_SHLB
... CC_OP_SHLQ
] = USES_CC_DST
| USES_CC_SRC
,
210 [CC_OP_SARB
... CC_OP_SARQ
] = USES_CC_DST
| USES_CC_SRC
,
211 [CC_OP_BMILGB
... CC_OP_BMILGQ
] = USES_CC_DST
| USES_CC_SRC
,
212 [CC_OP_ADCX
] = USES_CC_DST
| USES_CC_SRC
,
213 [CC_OP_ADOX
] = USES_CC_SRC
| USES_CC_SRC2
,
214 [CC_OP_ADCOX
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
218 static void set_cc_op(DisasContext
*s
, CCOp op
)
222 if (s
->cc_op
== op
) {
226 /* Discard CC computation that will no longer be used. */
227 dead
= cc_op_live
[s
->cc_op
] & ~cc_op_live
[op
];
228 if (dead
& USES_CC_DST
) {
229 tcg_gen_discard_tl(cpu_cc_dst
);
231 if (dead
& USES_CC_SRC
) {
232 tcg_gen_discard_tl(cpu_cc_src
);
234 if (dead
& USES_CC_SRC2
) {
235 tcg_gen_discard_tl(cpu_cc_src2
);
237 if (dead
& USES_CC_SRCT
) {
238 tcg_gen_discard_tl(cpu_cc_srcT
);
241 if (op
== CC_OP_DYNAMIC
) {
242 /* The DYNAMIC setting is translator only, and should never be
243 stored. Thus we always consider it clean. */
244 s
->cc_op_dirty
= false;
246 /* Discard any computed CC_OP value (see shifts). */
247 if (s
->cc_op
== CC_OP_DYNAMIC
) {
248 tcg_gen_discard_i32(cpu_cc_op
);
250 s
->cc_op_dirty
= true;
255 static void gen_update_cc_op(DisasContext
*s
)
257 if (s
->cc_op_dirty
) {
258 tcg_gen_movi_i32(cpu_cc_op
, s
->cc_op
);
259 s
->cc_op_dirty
= false;
263 static inline void gen_op_movl_T0_0(void)
265 tcg_gen_movi_tl(cpu_T
[0], 0);
268 static inline void gen_op_movl_T0_im(int32_t val
)
270 tcg_gen_movi_tl(cpu_T
[0], val
);
273 static inline void gen_op_movl_T0_imu(uint32_t val
)
275 tcg_gen_movi_tl(cpu_T
[0], val
);
278 static inline void gen_op_movl_T1_im(int32_t val
)
280 tcg_gen_movi_tl(cpu_T
[1], val
);
283 static inline void gen_op_movl_T1_imu(uint32_t val
)
285 tcg_gen_movi_tl(cpu_T
[1], val
);
288 static inline void gen_op_movl_A0_im(uint32_t val
)
290 tcg_gen_movi_tl(cpu_A0
, val
);
294 static inline void gen_op_movq_A0_im(int64_t val
)
296 tcg_gen_movi_tl(cpu_A0
, val
);
300 static inline void gen_movtl_T0_im(target_ulong val
)
302 tcg_gen_movi_tl(cpu_T
[0], val
);
305 static inline void gen_movtl_T1_im(target_ulong val
)
307 tcg_gen_movi_tl(cpu_T
[1], val
);
310 static inline void gen_op_andl_T0_ffff(void)
312 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xffff);
315 static inline void gen_op_andl_T0_im(uint32_t val
)
317 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], val
);
320 static inline void gen_op_movl_T0_T1(void)
322 tcg_gen_mov_tl(cpu_T
[0], cpu_T
[1]);
325 static inline void gen_op_andl_A0_ffff(void)
327 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffff);
332 #define NB_OP_SIZES 4
334 #else /* !TARGET_X86_64 */
336 #define NB_OP_SIZES 3
338 #endif /* !TARGET_X86_64 */
340 #if defined(HOST_WORDS_BIGENDIAN)
341 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
342 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
343 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
344 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
345 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
347 #define REG_B_OFFSET 0
348 #define REG_H_OFFSET 1
349 #define REG_W_OFFSET 0
350 #define REG_L_OFFSET 0
351 #define REG_LH_OFFSET 4
354 /* In instruction encodings for byte register accesses the
355 * register number usually indicates "low 8 bits of register N";
356 * however there are some special cases where N 4..7 indicates
357 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
358 * true for this special case, false otherwise.
360 static inline bool byte_reg_is_xH(int reg
)
366 if (reg
>= 8 || x86_64_hregs
) {
373 static inline void gen_op_mov_reg_v(int ot
, int reg
, TCGv t0
)
377 if (!byte_reg_is_xH(reg
)) {
378 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 8);
380 tcg_gen_deposit_tl(cpu_regs
[reg
- 4], cpu_regs
[reg
- 4], t0
, 8, 8);
384 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 16);
386 default: /* XXX this shouldn't be reached; abort? */
388 /* For x86_64, this sets the higher half of register to zero.
389 For i386, this is equivalent to a mov. */
390 tcg_gen_ext32u_tl(cpu_regs
[reg
], t0
);
394 tcg_gen_mov_tl(cpu_regs
[reg
], t0
);
400 static inline void gen_op_mov_reg_T0(int ot
, int reg
)
402 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
405 static inline void gen_op_mov_reg_T1(int ot
, int reg
)
407 gen_op_mov_reg_v(ot
, reg
, cpu_T
[1]);
410 static inline void gen_op_mov_reg_A0(int size
, int reg
)
414 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_A0
, 0, 16);
416 default: /* XXX this shouldn't be reached; abort? */
418 /* For x86_64, this sets the higher half of register to zero.
419 For i386, this is equivalent to a mov. */
420 tcg_gen_ext32u_tl(cpu_regs
[reg
], cpu_A0
);
424 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_A0
);
430 static inline void gen_op_mov_v_reg(int ot
, TCGv t0
, int reg
)
432 if (ot
== OT_BYTE
&& byte_reg_is_xH(reg
)) {
433 tcg_gen_shri_tl(t0
, cpu_regs
[reg
- 4], 8);
434 tcg_gen_ext8u_tl(t0
, t0
);
436 tcg_gen_mov_tl(t0
, cpu_regs
[reg
]);
440 static inline void gen_op_mov_TN_reg(int ot
, int t_index
, int reg
)
442 gen_op_mov_v_reg(ot
, cpu_T
[t_index
], reg
);
445 static inline void gen_op_movl_A0_reg(int reg
)
447 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[reg
]);
450 static inline void gen_op_addl_A0_im(int32_t val
)
452 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
454 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
459 static inline void gen_op_addq_A0_im(int64_t val
)
461 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
465 static void gen_add_A0_im(DisasContext
*s
, int val
)
469 gen_op_addq_A0_im(val
);
472 gen_op_addl_A0_im(val
);
475 static inline void gen_op_addl_T0_T1(void)
477 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
480 static inline void gen_op_jmp_T0(void)
482 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, eip
));
485 static inline void gen_op_add_reg_im(int size
, int reg
, int32_t val
)
489 tcg_gen_addi_tl(cpu_tmp0
, cpu_regs
[reg
], val
);
490 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_tmp0
, 0, 16);
493 tcg_gen_addi_tl(cpu_tmp0
, cpu_regs
[reg
], val
);
494 /* For x86_64, this sets the higher half of register to zero.
495 For i386, this is equivalent to a nop. */
496 tcg_gen_ext32u_tl(cpu_tmp0
, cpu_tmp0
);
497 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_tmp0
);
501 tcg_gen_addi_tl(cpu_regs
[reg
], cpu_regs
[reg
], val
);
507 static inline void gen_op_add_reg_T0(int size
, int reg
)
511 tcg_gen_add_tl(cpu_tmp0
, cpu_regs
[reg
], cpu_T
[0]);
512 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_tmp0
, 0, 16);
515 tcg_gen_add_tl(cpu_tmp0
, cpu_regs
[reg
], cpu_T
[0]);
516 /* For x86_64, this sets the higher half of register to zero.
517 For i386, this is equivalent to a nop. */
518 tcg_gen_ext32u_tl(cpu_tmp0
, cpu_tmp0
);
519 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_tmp0
);
523 tcg_gen_add_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_T
[0]);
529 static inline void gen_op_addl_A0_reg_sN(int shift
, int reg
)
531 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[reg
]);
533 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, shift
);
534 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
535 /* For x86_64, this sets the higher half of register to zero.
536 For i386, this is equivalent to a nop. */
537 tcg_gen_ext32u_tl(cpu_A0
, cpu_A0
);
540 static inline void gen_op_movl_A0_seg(int reg
)
542 tcg_gen_ld32u_tl(cpu_A0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
) + REG_L_OFFSET
);
545 static inline void gen_op_addl_A0_seg(DisasContext
*s
, int reg
)
547 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
550 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
551 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
553 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
554 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
557 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
562 static inline void gen_op_movq_A0_seg(int reg
)
564 tcg_gen_ld_tl(cpu_A0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
567 static inline void gen_op_addq_A0_seg(int reg
)
569 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
570 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
573 static inline void gen_op_movq_A0_reg(int reg
)
575 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[reg
]);
578 static inline void gen_op_addq_A0_reg_sN(int shift
, int reg
)
580 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[reg
]);
582 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, shift
);
583 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
587 static inline void gen_op_lds_T0_A0(int idx
)
589 int mem_index
= (idx
>> 2) - 1;
592 tcg_gen_qemu_ld8s(cpu_T
[0], cpu_A0
, mem_index
);
595 tcg_gen_qemu_ld16s(cpu_T
[0], cpu_A0
, mem_index
);
599 tcg_gen_qemu_ld32s(cpu_T
[0], cpu_A0
, mem_index
);
604 static inline void gen_op_ld_v(int idx
, TCGv t0
, TCGv a0
)
606 int mem_index
= (idx
>> 2) - 1;
609 tcg_gen_qemu_ld8u(t0
, a0
, mem_index
);
612 tcg_gen_qemu_ld16u(t0
, a0
, mem_index
);
615 tcg_gen_qemu_ld32u(t0
, a0
, mem_index
);
619 /* Should never happen on 32-bit targets. */
621 tcg_gen_qemu_ld64(t0
, a0
, mem_index
);
627 /* XXX: always use ldu or lds */
628 static inline void gen_op_ld_T0_A0(int idx
)
630 gen_op_ld_v(idx
, cpu_T
[0], cpu_A0
);
633 static inline void gen_op_ldu_T0_A0(int idx
)
635 gen_op_ld_v(idx
, cpu_T
[0], cpu_A0
);
638 static inline void gen_op_ld_T1_A0(int idx
)
640 gen_op_ld_v(idx
, cpu_T
[1], cpu_A0
);
643 static inline void gen_op_st_v(int idx
, TCGv t0
, TCGv a0
)
645 int mem_index
= (idx
>> 2) - 1;
648 tcg_gen_qemu_st8(t0
, a0
, mem_index
);
651 tcg_gen_qemu_st16(t0
, a0
, mem_index
);
654 tcg_gen_qemu_st32(t0
, a0
, mem_index
);
658 /* Should never happen on 32-bit targets. */
660 tcg_gen_qemu_st64(t0
, a0
, mem_index
);
666 static inline void gen_op_st_T0_A0(int idx
)
668 gen_op_st_v(idx
, cpu_T
[0], cpu_A0
);
671 static inline void gen_op_st_T1_A0(int idx
)
673 gen_op_st_v(idx
, cpu_T
[1], cpu_A0
);
676 static inline void gen_jmp_im(target_ulong pc
)
678 tcg_gen_movi_tl(cpu_tmp0
, pc
);
679 tcg_gen_st_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, eip
));
682 static inline void gen_string_movl_A0_ESI(DisasContext
*s
)
686 override
= s
->override
;
690 gen_op_movq_A0_seg(override
);
691 gen_op_addq_A0_reg_sN(0, R_ESI
);
693 gen_op_movq_A0_reg(R_ESI
);
699 if (s
->addseg
&& override
< 0)
702 gen_op_movl_A0_seg(override
);
703 gen_op_addl_A0_reg_sN(0, R_ESI
);
705 gen_op_movl_A0_reg(R_ESI
);
708 /* 16 address, always override */
711 gen_op_movl_A0_reg(R_ESI
);
712 gen_op_andl_A0_ffff();
713 gen_op_addl_A0_seg(s
, override
);
717 static inline void gen_string_movl_A0_EDI(DisasContext
*s
)
721 gen_op_movq_A0_reg(R_EDI
);
726 gen_op_movl_A0_seg(R_ES
);
727 gen_op_addl_A0_reg_sN(0, R_EDI
);
729 gen_op_movl_A0_reg(R_EDI
);
732 gen_op_movl_A0_reg(R_EDI
);
733 gen_op_andl_A0_ffff();
734 gen_op_addl_A0_seg(s
, R_ES
);
738 static inline void gen_op_movl_T0_Dshift(int ot
)
740 tcg_gen_ld32s_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, df
));
741 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], ot
);
744 static TCGv
gen_ext_tl(TCGv dst
, TCGv src
, int size
, bool sign
)
749 tcg_gen_ext8s_tl(dst
, src
);
751 tcg_gen_ext8u_tl(dst
, src
);
756 tcg_gen_ext16s_tl(dst
, src
);
758 tcg_gen_ext16u_tl(dst
, src
);
764 tcg_gen_ext32s_tl(dst
, src
);
766 tcg_gen_ext32u_tl(dst
, src
);
775 static void gen_extu(int ot
, TCGv reg
)
777 gen_ext_tl(reg
, reg
, ot
, false);
780 static void gen_exts(int ot
, TCGv reg
)
782 gen_ext_tl(reg
, reg
, ot
, true);
785 static inline void gen_op_jnz_ecx(int size
, int label1
)
787 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
788 gen_extu(size
+ 1, cpu_tmp0
);
789 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_tmp0
, 0, label1
);
792 static inline void gen_op_jz_ecx(int size
, int label1
)
794 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
795 gen_extu(size
+ 1, cpu_tmp0
);
796 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
799 static void gen_helper_in_func(int ot
, TCGv v
, TCGv_i32 n
)
803 gen_helper_inb(v
, n
);
806 gen_helper_inw(v
, n
);
809 gen_helper_inl(v
, n
);
814 static void gen_helper_out_func(int ot
, TCGv_i32 v
, TCGv_i32 n
)
818 gen_helper_outb(v
, n
);
821 gen_helper_outw(v
, n
);
824 gen_helper_outl(v
, n
);
829 static void gen_check_io(DisasContext
*s
, int ot
, target_ulong cur_eip
,
833 target_ulong next_eip
;
836 if (s
->pe
&& (s
->cpl
> s
->iopl
|| s
->vm86
)) {
840 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
843 gen_helper_check_iob(cpu_env
, cpu_tmp2_i32
);
846 gen_helper_check_iow(cpu_env
, cpu_tmp2_i32
);
849 gen_helper_check_iol(cpu_env
, cpu_tmp2_i32
);
853 if(s
->flags
& HF_SVMI_MASK
) {
858 svm_flags
|= (1 << (4 + ot
));
859 next_eip
= s
->pc
- s
->cs_base
;
860 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
861 gen_helper_svm_check_io(cpu_env
, cpu_tmp2_i32
,
862 tcg_const_i32(svm_flags
),
863 tcg_const_i32(next_eip
- cur_eip
));
867 static inline void gen_movs(DisasContext
*s
, int ot
)
869 gen_string_movl_A0_ESI(s
);
870 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
871 gen_string_movl_A0_EDI(s
);
872 gen_op_st_T0_A0(ot
+ s
->mem_index
);
873 gen_op_movl_T0_Dshift(ot
);
874 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
875 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
878 static void gen_op_update1_cc(void)
880 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
883 static void gen_op_update2_cc(void)
885 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
886 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
889 static void gen_op_update3_cc(TCGv reg
)
891 tcg_gen_mov_tl(cpu_cc_src2
, reg
);
892 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
893 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
896 static inline void gen_op_testl_T0_T1_cc(void)
898 tcg_gen_and_tl(cpu_cc_dst
, cpu_T
[0], cpu_T
[1]);
901 static void gen_op_update_neg_cc(void)
903 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
904 tcg_gen_neg_tl(cpu_cc_src
, cpu_T
[0]);
905 tcg_gen_movi_tl(cpu_cc_srcT
, 0);
908 /* compute all eflags to cc_src */
909 static void gen_compute_eflags(DisasContext
*s
)
911 TCGv zero
, dst
, src1
, src2
;
914 if (s
->cc_op
== CC_OP_EFLAGS
) {
917 if (s
->cc_op
== CC_OP_CLR
) {
918 tcg_gen_movi_tl(cpu_cc_src
, CC_Z
);
919 set_cc_op(s
, CC_OP_EFLAGS
);
928 /* Take care to not read values that are not live. */
929 live
= cc_op_live
[s
->cc_op
] & ~USES_CC_SRCT
;
930 dead
= live
^ (USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
);
932 zero
= tcg_const_tl(0);
933 if (dead
& USES_CC_DST
) {
936 if (dead
& USES_CC_SRC
) {
939 if (dead
& USES_CC_SRC2
) {
945 gen_helper_cc_compute_all(cpu_cc_src
, dst
, src1
, src2
, cpu_cc_op
);
946 set_cc_op(s
, CC_OP_EFLAGS
);
953 typedef struct CCPrepare
{
963 /* compute eflags.C to reg */
964 static CCPrepare
gen_prepare_eflags_c(DisasContext
*s
, TCGv reg
)
970 case CC_OP_SUBB
... CC_OP_SUBQ
:
971 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
972 size
= s
->cc_op
- CC_OP_SUBB
;
973 t1
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
974 /* If no temporary was used, be careful not to alias t1 and t0. */
975 t0
= TCGV_EQUAL(t1
, cpu_cc_src
) ? cpu_tmp0
: reg
;
976 tcg_gen_mov_tl(t0
, cpu_cc_srcT
);
980 case CC_OP_ADDB
... CC_OP_ADDQ
:
981 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
982 size
= s
->cc_op
- CC_OP_ADDB
;
983 t1
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
984 t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
986 return (CCPrepare
) { .cond
= TCG_COND_LTU
, .reg
= t0
,
987 .reg2
= t1
, .mask
= -1, .use_reg2
= true };
989 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
991 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
993 case CC_OP_INCB
... CC_OP_INCQ
:
994 case CC_OP_DECB
... CC_OP_DECQ
:
995 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
996 .mask
= -1, .no_setcond
= true };
998 case CC_OP_SHLB
... CC_OP_SHLQ
:
999 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
1000 size
= s
->cc_op
- CC_OP_SHLB
;
1001 shift
= (8 << size
) - 1;
1002 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1003 .mask
= (target_ulong
)1 << shift
};
1005 case CC_OP_MULB
... CC_OP_MULQ
:
1006 return (CCPrepare
) { .cond
= TCG_COND_NE
,
1007 .reg
= cpu_cc_src
, .mask
= -1 };
1009 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
1010 size
= s
->cc_op
- CC_OP_BMILGB
;
1011 t0
= gen_ext_tl(reg
, cpu_cc_src
, size
, false);
1012 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
1016 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_dst
,
1017 .mask
= -1, .no_setcond
= true };
1020 case CC_OP_SARB
... CC_OP_SARQ
:
1022 return (CCPrepare
) { .cond
= TCG_COND_NE
,
1023 .reg
= cpu_cc_src
, .mask
= CC_C
};
1026 /* The need to compute only C from CC_OP_DYNAMIC is important
1027 in efficiently implementing e.g. INC at the start of a TB. */
1028 gen_update_cc_op(s
);
1029 gen_helper_cc_compute_c(reg
, cpu_cc_dst
, cpu_cc_src
,
1030 cpu_cc_src2
, cpu_cc_op
);
1031 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1032 .mask
= -1, .no_setcond
= true };
1036 /* compute eflags.P to reg */
1037 static CCPrepare
gen_prepare_eflags_p(DisasContext
*s
, TCGv reg
)
1039 gen_compute_eflags(s
);
1040 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1044 /* compute eflags.S to reg */
1045 static CCPrepare
gen_prepare_eflags_s(DisasContext
*s
, TCGv reg
)
1049 gen_compute_eflags(s
);
1055 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1058 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
1061 int size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1062 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, true);
1063 return (CCPrepare
) { .cond
= TCG_COND_LT
, .reg
= t0
, .mask
= -1 };
1068 /* compute eflags.O to reg */
1069 static CCPrepare
gen_prepare_eflags_o(DisasContext
*s
, TCGv reg
)
1074 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src2
,
1075 .mask
= -1, .no_setcond
= true };
1077 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
1079 gen_compute_eflags(s
);
1080 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1085 /* compute eflags.Z to reg */
1086 static CCPrepare
gen_prepare_eflags_z(DisasContext
*s
, TCGv reg
)
1090 gen_compute_eflags(s
);
1096 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1099 return (CCPrepare
) { .cond
= TCG_COND_ALWAYS
, .mask
= -1 };
1102 int size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1103 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
1104 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
1109 /* perform a conditional store into register 'reg' according to jump opcode
1110 value 'b'. In the fast case, T0 is guaranted not to be used. */
1111 static CCPrepare
gen_prepare_cc(DisasContext
*s
, int b
, TCGv reg
)
1113 int inv
, jcc_op
, size
, cond
;
1118 jcc_op
= (b
>> 1) & 7;
1121 case CC_OP_SUBB
... CC_OP_SUBQ
:
1122 /* We optimize relational operators for the cmp/jcc case. */
1123 size
= s
->cc_op
- CC_OP_SUBB
;
1126 tcg_gen_mov_tl(cpu_tmp4
, cpu_cc_srcT
);
1127 gen_extu(size
, cpu_tmp4
);
1128 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
1129 cc
= (CCPrepare
) { .cond
= TCG_COND_LEU
, .reg
= cpu_tmp4
,
1130 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
1139 tcg_gen_mov_tl(cpu_tmp4
, cpu_cc_srcT
);
1140 gen_exts(size
, cpu_tmp4
);
1141 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, true);
1142 cc
= (CCPrepare
) { .cond
= cond
, .reg
= cpu_tmp4
,
1143 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
1153 /* This actually generates good code for JC, JZ and JS. */
1156 cc
= gen_prepare_eflags_o(s
, reg
);
1159 cc
= gen_prepare_eflags_c(s
, reg
);
1162 cc
= gen_prepare_eflags_z(s
, reg
);
1165 gen_compute_eflags(s
);
1166 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1167 .mask
= CC_Z
| CC_C
};
1170 cc
= gen_prepare_eflags_s(s
, reg
);
1173 cc
= gen_prepare_eflags_p(s
, reg
);
1176 gen_compute_eflags(s
);
1177 if (TCGV_EQUAL(reg
, cpu_cc_src
)) {
1180 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
1181 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
1182 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1187 gen_compute_eflags(s
);
1188 if (TCGV_EQUAL(reg
, cpu_cc_src
)) {
1191 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
1192 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
1193 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1194 .mask
= CC_S
| CC_Z
};
1201 cc
.cond
= tcg_invert_cond(cc
.cond
);
1206 static void gen_setcc1(DisasContext
*s
, int b
, TCGv reg
)
1208 CCPrepare cc
= gen_prepare_cc(s
, b
, reg
);
1210 if (cc
.no_setcond
) {
1211 if (cc
.cond
== TCG_COND_EQ
) {
1212 tcg_gen_xori_tl(reg
, cc
.reg
, 1);
1214 tcg_gen_mov_tl(reg
, cc
.reg
);
1219 if (cc
.cond
== TCG_COND_NE
&& !cc
.use_reg2
&& cc
.imm
== 0 &&
1220 cc
.mask
!= 0 && (cc
.mask
& (cc
.mask
- 1)) == 0) {
1221 tcg_gen_shri_tl(reg
, cc
.reg
, ctztl(cc
.mask
));
1222 tcg_gen_andi_tl(reg
, reg
, 1);
1225 if (cc
.mask
!= -1) {
1226 tcg_gen_andi_tl(reg
, cc
.reg
, cc
.mask
);
1230 tcg_gen_setcond_tl(cc
.cond
, reg
, cc
.reg
, cc
.reg2
);
1232 tcg_gen_setcondi_tl(cc
.cond
, reg
, cc
.reg
, cc
.imm
);
1236 static inline void gen_compute_eflags_c(DisasContext
*s
, TCGv reg
)
1238 gen_setcc1(s
, JCC_B
<< 1, reg
);
1241 /* generate a conditional jump to label 'l1' according to jump opcode
1242 value 'b'. In the fast case, T0 is guaranted not to be used. */
1243 static inline void gen_jcc1_noeob(DisasContext
*s
, int b
, int l1
)
1245 CCPrepare cc
= gen_prepare_cc(s
, b
, cpu_T
[0]);
1247 if (cc
.mask
!= -1) {
1248 tcg_gen_andi_tl(cpu_T
[0], cc
.reg
, cc
.mask
);
1252 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1254 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1258 /* Generate a conditional jump to label 'l1' according to jump opcode
1259 value 'b'. In the fast case, T0 is guaranted not to be used.
1260 A translation block must end soon. */
1261 static inline void gen_jcc1(DisasContext
*s
, int b
, int l1
)
1263 CCPrepare cc
= gen_prepare_cc(s
, b
, cpu_T
[0]);
1265 gen_update_cc_op(s
);
1266 if (cc
.mask
!= -1) {
1267 tcg_gen_andi_tl(cpu_T
[0], cc
.reg
, cc
.mask
);
1270 set_cc_op(s
, CC_OP_DYNAMIC
);
1272 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1274 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1278 /* XXX: does not work with gdbstub "ice" single step - not a
1280 static int gen_jz_ecx_string(DisasContext
*s
, target_ulong next_eip
)
1284 l1
= gen_new_label();
1285 l2
= gen_new_label();
1286 gen_op_jnz_ecx(s
->aflag
, l1
);
1288 gen_jmp_tb(s
, next_eip
, 1);
1293 static inline void gen_stos(DisasContext
*s
, int ot
)
1295 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
1296 gen_string_movl_A0_EDI(s
);
1297 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1298 gen_op_movl_T0_Dshift(ot
);
1299 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1302 static inline void gen_lods(DisasContext
*s
, int ot
)
1304 gen_string_movl_A0_ESI(s
);
1305 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1306 gen_op_mov_reg_T0(ot
, R_EAX
);
1307 gen_op_movl_T0_Dshift(ot
);
1308 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1311 static inline void gen_scas(DisasContext
*s
, int ot
)
1313 gen_string_movl_A0_EDI(s
);
1314 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
1315 gen_op(s
, OP_CMPL
, ot
, R_EAX
);
1316 gen_op_movl_T0_Dshift(ot
);
1317 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1320 static inline void gen_cmps(DisasContext
*s
, int ot
)
1322 gen_string_movl_A0_EDI(s
);
1323 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
1324 gen_string_movl_A0_ESI(s
);
1325 gen_op(s
, OP_CMPL
, ot
, OR_TMP0
);
1326 gen_op_movl_T0_Dshift(ot
);
1327 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1328 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1331 static inline void gen_ins(DisasContext
*s
, int ot
)
1335 gen_string_movl_A0_EDI(s
);
1336 /* Note: we must do this dummy write first to be restartable in
1337 case of page fault. */
1339 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1340 gen_op_mov_TN_reg(OT_WORD
, 1, R_EDX
);
1341 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[1]);
1342 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1343 gen_helper_in_func(ot
, cpu_T
[0], cpu_tmp2_i32
);
1344 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1345 gen_op_movl_T0_Dshift(ot
);
1346 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1351 static inline void gen_outs(DisasContext
*s
, int ot
)
1355 gen_string_movl_A0_ESI(s
);
1356 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1358 gen_op_mov_TN_reg(OT_WORD
, 1, R_EDX
);
1359 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[1]);
1360 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1361 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[0]);
1362 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1364 gen_op_movl_T0_Dshift(ot
);
1365 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1370 /* same method as Valgrind : we generate jumps to current or next
1372 #define GEN_REPZ(op) \
1373 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1374 target_ulong cur_eip, target_ulong next_eip) \
1377 gen_update_cc_op(s); \
1378 l2 = gen_jz_ecx_string(s, next_eip); \
1379 gen_ ## op(s, ot); \
1380 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1381 /* a loop would cause two single step exceptions if ECX = 1 \
1382 before rep string_insn */ \
1384 gen_op_jz_ecx(s->aflag, l2); \
1385 gen_jmp(s, cur_eip); \
1388 #define GEN_REPZ2(op) \
1389 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1390 target_ulong cur_eip, \
1391 target_ulong next_eip, \
1395 gen_update_cc_op(s); \
1396 l2 = gen_jz_ecx_string(s, next_eip); \
1397 gen_ ## op(s, ot); \
1398 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1399 gen_update_cc_op(s); \
1400 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1402 gen_op_jz_ecx(s->aflag, l2); \
1403 gen_jmp(s, cur_eip); \
1414 static void gen_helper_fp_arith_ST0_FT0(int op
)
1418 gen_helper_fadd_ST0_FT0(cpu_env
);
1421 gen_helper_fmul_ST0_FT0(cpu_env
);
1424 gen_helper_fcom_ST0_FT0(cpu_env
);
1427 gen_helper_fcom_ST0_FT0(cpu_env
);
1430 gen_helper_fsub_ST0_FT0(cpu_env
);
1433 gen_helper_fsubr_ST0_FT0(cpu_env
);
1436 gen_helper_fdiv_ST0_FT0(cpu_env
);
1439 gen_helper_fdivr_ST0_FT0(cpu_env
);
1444 /* NOTE the exception in "r" op ordering */
1445 static void gen_helper_fp_arith_STN_ST0(int op
, int opreg
)
1447 TCGv_i32 tmp
= tcg_const_i32(opreg
);
1450 gen_helper_fadd_STN_ST0(cpu_env
, tmp
);
1453 gen_helper_fmul_STN_ST0(cpu_env
, tmp
);
1456 gen_helper_fsubr_STN_ST0(cpu_env
, tmp
);
1459 gen_helper_fsub_STN_ST0(cpu_env
, tmp
);
1462 gen_helper_fdivr_STN_ST0(cpu_env
, tmp
);
1465 gen_helper_fdiv_STN_ST0(cpu_env
, tmp
);
1470 /* if d == OR_TMP0, it means memory operand (address in A0) */
1471 static void gen_op(DisasContext
*s1
, int op
, int ot
, int d
)
1474 gen_op_mov_TN_reg(ot
, 0, d
);
1476 gen_op_ld_T0_A0(ot
+ s1
->mem_index
);
1480 gen_compute_eflags_c(s1
, cpu_tmp4
);
1481 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1482 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
1484 gen_op_mov_reg_T0(ot
, d
);
1486 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1487 gen_op_update3_cc(cpu_tmp4
);
1488 set_cc_op(s1
, CC_OP_ADCB
+ ot
);
1491 gen_compute_eflags_c(s1
, cpu_tmp4
);
1492 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1493 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
1495 gen_op_mov_reg_T0(ot
, d
);
1497 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1498 gen_op_update3_cc(cpu_tmp4
);
1499 set_cc_op(s1
, CC_OP_SBBB
+ ot
);
1502 gen_op_addl_T0_T1();
1504 gen_op_mov_reg_T0(ot
, d
);
1506 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1507 gen_op_update2_cc();
1508 set_cc_op(s1
, CC_OP_ADDB
+ ot
);
1511 tcg_gen_mov_tl(cpu_cc_srcT
, cpu_T
[0]);
1512 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1514 gen_op_mov_reg_T0(ot
, d
);
1516 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1517 gen_op_update2_cc();
1518 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1522 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1524 gen_op_mov_reg_T0(ot
, d
);
1526 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1527 gen_op_update1_cc();
1528 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1531 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1533 gen_op_mov_reg_T0(ot
, d
);
1535 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1536 gen_op_update1_cc();
1537 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1540 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1542 gen_op_mov_reg_T0(ot
, d
);
1544 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1545 gen_op_update1_cc();
1546 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1549 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
1550 tcg_gen_mov_tl(cpu_cc_srcT
, cpu_T
[0]);
1551 tcg_gen_sub_tl(cpu_cc_dst
, cpu_T
[0], cpu_T
[1]);
1552 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1557 /* if d == OR_TMP0, it means memory operand (address in A0) */
1558 static void gen_inc(DisasContext
*s1
, int ot
, int d
, int c
)
1561 gen_op_mov_TN_reg(ot
, 0, d
);
1563 gen_op_ld_T0_A0(ot
+ s1
->mem_index
);
1564 gen_compute_eflags_c(s1
, cpu_cc_src
);
1566 tcg_gen_addi_tl(cpu_T
[0], cpu_T
[0], 1);
1567 set_cc_op(s1
, CC_OP_INCB
+ ot
);
1569 tcg_gen_addi_tl(cpu_T
[0], cpu_T
[0], -1);
1570 set_cc_op(s1
, CC_OP_DECB
+ ot
);
1573 gen_op_mov_reg_T0(ot
, d
);
1575 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1576 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1579 static void gen_shift_flags(DisasContext
*s
, int ot
, TCGv result
, TCGv shm1
,
1580 TCGv count
, bool is_right
)
1582 TCGv_i32 z32
, s32
, oldop
;
1585 /* Store the results into the CC variables. If we know that the
1586 variable must be dead, store unconditionally. Otherwise we'll
1587 need to not disrupt the current contents. */
1588 z_tl
= tcg_const_tl(0);
1589 if (cc_op_live
[s
->cc_op
] & USES_CC_DST
) {
1590 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_dst
, count
, z_tl
,
1591 result
, cpu_cc_dst
);
1593 tcg_gen_mov_tl(cpu_cc_dst
, result
);
1595 if (cc_op_live
[s
->cc_op
] & USES_CC_SRC
) {
1596 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_src
, count
, z_tl
,
1599 tcg_gen_mov_tl(cpu_cc_src
, shm1
);
1601 tcg_temp_free(z_tl
);
1603 /* Get the two potential CC_OP values into temporaries. */
1604 tcg_gen_movi_i32(cpu_tmp2_i32
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1605 if (s
->cc_op
== CC_OP_DYNAMIC
) {
1608 tcg_gen_movi_i32(cpu_tmp3_i32
, s
->cc_op
);
1609 oldop
= cpu_tmp3_i32
;
1612 /* Conditionally store the CC_OP value. */
1613 z32
= tcg_const_i32(0);
1614 s32
= tcg_temp_new_i32();
1615 tcg_gen_trunc_tl_i32(s32
, count
);
1616 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, s32
, z32
, cpu_tmp2_i32
, oldop
);
1617 tcg_temp_free_i32(z32
);
1618 tcg_temp_free_i32(s32
);
1620 /* The CC_OP value is no longer predictable. */
1621 set_cc_op(s
, CC_OP_DYNAMIC
);
1624 static void gen_shift_rm_T1(DisasContext
*s
, int ot
, int op1
,
1625 int is_right
, int is_arith
)
1627 target_ulong mask
= (ot
== OT_QUAD
? 0x3f : 0x1f);
1630 if (op1
== OR_TMP0
) {
1631 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1633 gen_op_mov_TN_reg(ot
, 0, op1
);
1636 tcg_gen_andi_tl(cpu_T
[1], cpu_T
[1], mask
);
1637 tcg_gen_subi_tl(cpu_tmp0
, cpu_T
[1], 1);
1641 gen_exts(ot
, cpu_T
[0]);
1642 tcg_gen_sar_tl(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1643 tcg_gen_sar_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1645 gen_extu(ot
, cpu_T
[0]);
1646 tcg_gen_shr_tl(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1647 tcg_gen_shr_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1650 tcg_gen_shl_tl(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1651 tcg_gen_shl_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1655 if (op1
== OR_TMP0
) {
1656 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1658 gen_op_mov_reg_T0(ot
, op1
);
1661 gen_shift_flags(s
, ot
, cpu_T
[0], cpu_tmp0
, cpu_T
[1], is_right
);
1664 static void gen_shift_rm_im(DisasContext
*s
, int ot
, int op1
, int op2
,
1665 int is_right
, int is_arith
)
1667 int mask
= (ot
== OT_QUAD
? 0x3f : 0x1f);
1671 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1673 gen_op_mov_TN_reg(ot
, 0, op1
);
1679 gen_exts(ot
, cpu_T
[0]);
1680 tcg_gen_sari_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1681 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], op2
);
1683 gen_extu(ot
, cpu_T
[0]);
1684 tcg_gen_shri_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1685 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], op2
);
1688 tcg_gen_shli_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1689 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], op2
);
1695 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1697 gen_op_mov_reg_T0(ot
, op1
);
1699 /* update eflags if non zero shift */
1701 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
1702 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1703 set_cc_op(s
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1707 static inline void tcg_gen_lshift(TCGv ret
, TCGv arg1
, target_long arg2
)
1710 tcg_gen_shli_tl(ret
, arg1
, arg2
);
1712 tcg_gen_shri_tl(ret
, arg1
, -arg2
);
1715 static void gen_rot_rm_T1(DisasContext
*s
, int ot
, int op1
, int is_right
)
1717 target_ulong mask
= (ot
== OT_QUAD
? 0x3f : 0x1f);
1721 if (op1
== OR_TMP0
) {
1722 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1724 gen_op_mov_TN_reg(ot
, 0, op1
);
1727 tcg_gen_andi_tl(cpu_T
[1], cpu_T
[1], mask
);
1731 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1732 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
1733 tcg_gen_muli_tl(cpu_T
[0], cpu_T
[0], 0x01010101);
1736 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1737 tcg_gen_deposit_tl(cpu_T
[0], cpu_T
[0], cpu_T
[0], 16, 16);
1740 #ifdef TARGET_X86_64
1742 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
1743 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
1745 tcg_gen_rotr_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1747 tcg_gen_rotl_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1749 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
1754 tcg_gen_rotr_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1756 tcg_gen_rotl_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1762 if (op1
== OR_TMP0
) {
1763 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1765 gen_op_mov_reg_T0(ot
, op1
);
1768 /* We'll need the flags computed into CC_SRC. */
1769 gen_compute_eflags(s
);
1771 /* The value that was "rotated out" is now present at the other end
1772 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1773 since we've computed the flags into CC_SRC, these variables are
1776 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T
[0], mask
- 1);
1777 tcg_gen_shri_tl(cpu_cc_dst
, cpu_T
[0], mask
);
1778 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1780 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T
[0], mask
);
1781 tcg_gen_andi_tl(cpu_cc_dst
, cpu_T
[0], 1);
1783 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1784 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1786 /* Now conditionally store the new CC_OP value. If the shift count
1787 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1788 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1789 exactly as we computed above. */
1790 t0
= tcg_const_i32(0);
1791 t1
= tcg_temp_new_i32();
1792 tcg_gen_trunc_tl_i32(t1
, cpu_T
[1]);
1793 tcg_gen_movi_i32(cpu_tmp2_i32
, CC_OP_ADCOX
);
1794 tcg_gen_movi_i32(cpu_tmp3_i32
, CC_OP_EFLAGS
);
1795 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, t1
, t0
,
1796 cpu_tmp2_i32
, cpu_tmp3_i32
);
1797 tcg_temp_free_i32(t0
);
1798 tcg_temp_free_i32(t1
);
1800 /* The CC_OP value is no longer predictable. */
1801 set_cc_op(s
, CC_OP_DYNAMIC
);
1804 static void gen_rot_rm_im(DisasContext
*s
, int ot
, int op1
, int op2
,
1807 int mask
= (ot
== OT_QUAD
? 0x3f : 0x1f);
1811 if (op1
== OR_TMP0
) {
1812 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1814 gen_op_mov_TN_reg(ot
, 0, op1
);
1820 #ifdef TARGET_X86_64
1822 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
1824 tcg_gen_rotri_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, op2
);
1826 tcg_gen_rotli_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, op2
);
1828 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
1833 tcg_gen_rotri_tl(cpu_T
[0], cpu_T
[0], op2
);
1835 tcg_gen_rotli_tl(cpu_T
[0], cpu_T
[0], op2
);
1846 shift
= mask
+ 1 - shift
;
1848 gen_extu(ot
, cpu_T
[0]);
1849 tcg_gen_shli_tl(cpu_tmp0
, cpu_T
[0], shift
);
1850 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], mask
+ 1 - shift
);
1851 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
1857 if (op1
== OR_TMP0
) {
1858 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1860 gen_op_mov_reg_T0(ot
, op1
);
1864 /* Compute the flags into CC_SRC. */
1865 gen_compute_eflags(s
);
1867 /* The value that was "rotated out" is now present at the other end
1868 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1869 since we've computed the flags into CC_SRC, these variables are
1872 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T
[0], mask
- 1);
1873 tcg_gen_shri_tl(cpu_cc_dst
, cpu_T
[0], mask
);
1874 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1876 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T
[0], mask
);
1877 tcg_gen_andi_tl(cpu_cc_dst
, cpu_T
[0], 1);
1879 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1880 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1881 set_cc_op(s
, CC_OP_ADCOX
);
1885 /* XXX: add faster immediate = 1 case */
1886 static void gen_rotc_rm_T1(DisasContext
*s
, int ot
, int op1
,
1889 gen_compute_eflags(s
);
1890 assert(s
->cc_op
== CC_OP_EFLAGS
);
1894 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1896 gen_op_mov_TN_reg(ot
, 0, op1
);
1901 gen_helper_rcrb(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1904 gen_helper_rcrw(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1907 gen_helper_rcrl(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1909 #ifdef TARGET_X86_64
1911 gen_helper_rcrq(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1918 gen_helper_rclb(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1921 gen_helper_rclw(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1924 gen_helper_rcll(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1926 #ifdef TARGET_X86_64
1928 gen_helper_rclq(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1935 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1937 gen_op_mov_reg_T0(ot
, op1
);
1940 /* XXX: add faster immediate case */
1941 static void gen_shiftd_rm_T1(DisasContext
*s
, int ot
, int op1
,
1942 bool is_right
, TCGv count_in
)
1944 target_ulong mask
= (ot
== OT_QUAD
? 63 : 31);
1948 if (op1
== OR_TMP0
) {
1949 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1951 gen_op_mov_TN_reg(ot
, 0, op1
);
1954 count
= tcg_temp_new();
1955 tcg_gen_andi_tl(count
, count_in
, mask
);
1959 /* Note: we implement the Intel behaviour for shift count > 16.
1960 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1961 portion by constructing it as a 32-bit value. */
1963 tcg_gen_deposit_tl(cpu_tmp0
, cpu_T
[0], cpu_T
[1], 16, 16);
1964 tcg_gen_mov_tl(cpu_T
[1], cpu_T
[0]);
1965 tcg_gen_mov_tl(cpu_T
[0], cpu_tmp0
);
1967 tcg_gen_deposit_tl(cpu_T
[1], cpu_T
[0], cpu_T
[1], 16, 16);
1970 #ifdef TARGET_X86_64
1972 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1973 tcg_gen_subi_tl(cpu_tmp0
, count
, 1);
1975 tcg_gen_concat_tl_i64(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1976 tcg_gen_shr_i64(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1977 tcg_gen_shr_i64(cpu_T
[0], cpu_T
[0], count
);
1979 tcg_gen_concat_tl_i64(cpu_T
[0], cpu_T
[1], cpu_T
[0]);
1980 tcg_gen_shl_i64(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1981 tcg_gen_shl_i64(cpu_T
[0], cpu_T
[0], count
);
1982 tcg_gen_shri_i64(cpu_tmp0
, cpu_tmp0
, 32);
1983 tcg_gen_shri_i64(cpu_T
[0], cpu_T
[0], 32);
1988 tcg_gen_subi_tl(cpu_tmp0
, count
, 1);
1990 tcg_gen_shr_tl(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1992 tcg_gen_subfi_tl(cpu_tmp4
, mask
+ 1, count
);
1993 tcg_gen_shr_tl(cpu_T
[0], cpu_T
[0], count
);
1994 tcg_gen_shl_tl(cpu_T
[1], cpu_T
[1], cpu_tmp4
);
1996 tcg_gen_shl_tl(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1997 if (ot
== OT_WORD
) {
1998 /* Only needed if count > 16, for Intel behaviour. */
1999 tcg_gen_subfi_tl(cpu_tmp4
, 33, count
);
2000 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[1], cpu_tmp4
);
2001 tcg_gen_or_tl(cpu_tmp0
, cpu_tmp0
, cpu_tmp4
);
2004 tcg_gen_subfi_tl(cpu_tmp4
, mask
+ 1, count
);
2005 tcg_gen_shl_tl(cpu_T
[0], cpu_T
[0], count
);
2006 tcg_gen_shr_tl(cpu_T
[1], cpu_T
[1], cpu_tmp4
);
2008 tcg_gen_movi_tl(cpu_tmp4
, 0);
2009 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_T
[1], count
, cpu_tmp4
,
2010 cpu_tmp4
, cpu_T
[1]);
2011 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
2016 if (op1
== OR_TMP0
) {
2017 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2019 gen_op_mov_reg_T0(ot
, op1
);
2022 gen_shift_flags(s
, ot
, cpu_T
[0], cpu_tmp0
, count
, is_right
);
2023 tcg_temp_free(count
);
2026 static void gen_shift(DisasContext
*s1
, int op
, int ot
, int d
, int s
)
2029 gen_op_mov_TN_reg(ot
, 1, s
);
2032 gen_rot_rm_T1(s1
, ot
, d
, 0);
2035 gen_rot_rm_T1(s1
, ot
, d
, 1);
2039 gen_shift_rm_T1(s1
, ot
, d
, 0, 0);
2042 gen_shift_rm_T1(s1
, ot
, d
, 1, 0);
2045 gen_shift_rm_T1(s1
, ot
, d
, 1, 1);
2048 gen_rotc_rm_T1(s1
, ot
, d
, 0);
2051 gen_rotc_rm_T1(s1
, ot
, d
, 1);
2056 static void gen_shifti(DisasContext
*s1
, int op
, int ot
, int d
, int c
)
2060 gen_rot_rm_im(s1
, ot
, d
, c
, 0);
2063 gen_rot_rm_im(s1
, ot
, d
, c
, 1);
2067 gen_shift_rm_im(s1
, ot
, d
, c
, 0, 0);
2070 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 0);
2073 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 1);
2076 /* currently not optimized */
2077 gen_op_movl_T1_im(c
);
2078 gen_shift(s1
, op
, ot
, d
, OR_TMP1
);
2083 static void gen_lea_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2084 int *reg_ptr
, int *offset_ptr
)
2092 int mod
, rm
, code
, override
, must_add_seg
;
2095 override
= s
->override
;
2096 must_add_seg
= s
->addseg
;
2099 mod
= (modrm
>> 6) & 3;
2110 code
= cpu_ldub_code(env
, s
->pc
++);
2111 scale
= (code
>> 6) & 3;
2112 index
= ((code
>> 3) & 7) | REX_X(s
);
2114 index
= -1; /* no index */
2122 if ((base
& 7) == 5) {
2124 disp
= (int32_t)cpu_ldl_code(env
, s
->pc
);
2126 if (CODE64(s
) && !havesib
) {
2127 disp
+= s
->pc
+ s
->rip_offset
;
2134 disp
= (int8_t)cpu_ldub_code(env
, s
->pc
++);
2138 disp
= (int32_t)cpu_ldl_code(env
, s
->pc
);
2143 /* For correct popl handling with esp. */
2144 if (base
== R_ESP
&& s
->popl_esp_hack
) {
2145 disp
+= s
->popl_esp_hack
;
2148 /* Compute the address, with a minimum number of TCG ops. */
2152 sum
= cpu_regs
[index
];
2154 tcg_gen_shli_tl(cpu_A0
, cpu_regs
[index
], scale
);
2158 tcg_gen_add_tl(cpu_A0
, sum
, cpu_regs
[base
]);
2161 } else if (base
>= 0) {
2162 sum
= cpu_regs
[base
];
2164 if (TCGV_IS_UNUSED(sum
)) {
2165 tcg_gen_movi_tl(cpu_A0
, disp
);
2167 tcg_gen_addi_tl(cpu_A0
, sum
, disp
);
2172 if (base
== R_EBP
|| base
== R_ESP
) {
2179 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
,
2180 offsetof(CPUX86State
, segs
[override
].base
));
2182 if (s
->aflag
!= 2) {
2183 tcg_gen_ext32u_tl(cpu_A0
, cpu_A0
);
2185 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
2189 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
2192 if (s
->aflag
!= 2) {
2193 tcg_gen_ext32u_tl(cpu_A0
, cpu_A0
);
2199 disp
= cpu_lduw_code(env
, s
->pc
);
2201 gen_op_movl_A0_im(disp
);
2202 rm
= 0; /* avoid SS override */
2209 disp
= (int8_t)cpu_ldub_code(env
, s
->pc
++);
2213 disp
= cpu_lduw_code(env
, s
->pc
);
2219 gen_op_movl_A0_reg(R_EBX
);
2220 gen_op_addl_A0_reg_sN(0, R_ESI
);
2223 gen_op_movl_A0_reg(R_EBX
);
2224 gen_op_addl_A0_reg_sN(0, R_EDI
);
2227 gen_op_movl_A0_reg(R_EBP
);
2228 gen_op_addl_A0_reg_sN(0, R_ESI
);
2231 gen_op_movl_A0_reg(R_EBP
);
2232 gen_op_addl_A0_reg_sN(0, R_EDI
);
2235 gen_op_movl_A0_reg(R_ESI
);
2238 gen_op_movl_A0_reg(R_EDI
);
2241 gen_op_movl_A0_reg(R_EBP
);
2245 gen_op_movl_A0_reg(R_EBX
);
2249 gen_op_addl_A0_im(disp
);
2250 gen_op_andl_A0_ffff();
2254 if (rm
== 2 || rm
== 3 || rm
== 6)
2259 gen_op_addl_A0_seg(s
, override
);
2270 static void gen_nop_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2272 int mod
, rm
, base
, code
;
2274 mod
= (modrm
>> 6) & 3;
2284 code
= cpu_ldub_code(env
, s
->pc
++);
2320 /* used for LEA and MOV AX, mem */
2321 static void gen_add_A0_ds_seg(DisasContext
*s
)
2323 int override
, must_add_seg
;
2324 must_add_seg
= s
->addseg
;
2326 if (s
->override
>= 0) {
2327 override
= s
->override
;
2331 #ifdef TARGET_X86_64
2333 gen_op_addq_A0_seg(override
);
2337 gen_op_addl_A0_seg(s
, override
);
2342 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2344 static void gen_ldst_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2345 int ot
, int reg
, int is_store
)
2347 int mod
, rm
, opreg
, disp
;
2349 mod
= (modrm
>> 6) & 3;
2350 rm
= (modrm
& 7) | REX_B(s
);
2354 gen_op_mov_TN_reg(ot
, 0, reg
);
2355 gen_op_mov_reg_T0(ot
, rm
);
2357 gen_op_mov_TN_reg(ot
, 0, rm
);
2359 gen_op_mov_reg_T0(ot
, reg
);
2362 gen_lea_modrm(env
, s
, modrm
, &opreg
, &disp
);
2365 gen_op_mov_TN_reg(ot
, 0, reg
);
2366 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2368 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
2370 gen_op_mov_reg_T0(ot
, reg
);
2375 static inline uint32_t insn_get(CPUX86State
*env
, DisasContext
*s
, int ot
)
2381 ret
= cpu_ldub_code(env
, s
->pc
);
2385 ret
= cpu_lduw_code(env
, s
->pc
);
2390 ret
= cpu_ldl_code(env
, s
->pc
);
2397 static inline int insn_const_size(unsigned int ot
)
2405 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong eip
)
2407 TranslationBlock
*tb
;
2410 pc
= s
->cs_base
+ eip
;
2412 /* NOTE: we handle the case where the TB spans two pages here */
2413 if ((pc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) ||
2414 (pc
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
)) {
2415 /* jump to same page: we can use a direct jump */
2416 tcg_gen_goto_tb(tb_num
);
2418 tcg_gen_exit_tb((uintptr_t)tb
+ tb_num
);
2420 /* jump to another page: currently not optimized */
2426 static inline void gen_jcc(DisasContext
*s
, int b
,
2427 target_ulong val
, target_ulong next_eip
)
2432 l1
= gen_new_label();
2435 gen_goto_tb(s
, 0, next_eip
);
2438 gen_goto_tb(s
, 1, val
);
2439 s
->is_jmp
= DISAS_TB_JUMP
;
2441 l1
= gen_new_label();
2442 l2
= gen_new_label();
2445 gen_jmp_im(next_eip
);
2455 static void gen_cmovcc1(CPUX86State
*env
, DisasContext
*s
, int ot
, int b
,
2460 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
2462 cc
= gen_prepare_cc(s
, b
, cpu_T
[1]);
2463 if (cc
.mask
!= -1) {
2464 TCGv t0
= tcg_temp_new();
2465 tcg_gen_andi_tl(t0
, cc
.reg
, cc
.mask
);
2469 cc
.reg2
= tcg_const_tl(cc
.imm
);
2472 tcg_gen_movcond_tl(cc
.cond
, cpu_T
[0], cc
.reg
, cc
.reg2
,
2473 cpu_T
[0], cpu_regs
[reg
]);
2474 gen_op_mov_reg_T0(ot
, reg
);
2476 if (cc
.mask
!= -1) {
2477 tcg_temp_free(cc
.reg
);
2480 tcg_temp_free(cc
.reg2
);
2484 static inline void gen_op_movl_T0_seg(int seg_reg
)
2486 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
2487 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2490 static inline void gen_op_movl_seg_T0_vm(int seg_reg
)
2492 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xffff);
2493 tcg_gen_st32_tl(cpu_T
[0], cpu_env
,
2494 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2495 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], 4);
2496 tcg_gen_st_tl(cpu_T
[0], cpu_env
,
2497 offsetof(CPUX86State
,segs
[seg_reg
].base
));
2500 /* move T0 to seg_reg and compute if the CPU state may change. Never
2501 call this function with seg_reg == R_CS */
2502 static void gen_movl_seg_T0(DisasContext
*s
, int seg_reg
, target_ulong cur_eip
)
2504 if (s
->pe
&& !s
->vm86
) {
2505 /* XXX: optimize by finding processor state dynamically */
2506 gen_update_cc_op(s
);
2507 gen_jmp_im(cur_eip
);
2508 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
2509 gen_helper_load_seg(cpu_env
, tcg_const_i32(seg_reg
), cpu_tmp2_i32
);
2510 /* abort translation because the addseg value may change or
2511 because ss32 may change. For R_SS, translation must always
2512 stop as a special handling must be done to disable hardware
2513 interrupts for the next instruction */
2514 if (seg_reg
== R_SS
|| (s
->code32
&& seg_reg
< R_FS
))
2515 s
->is_jmp
= DISAS_TB_JUMP
;
2517 gen_op_movl_seg_T0_vm(seg_reg
);
2518 if (seg_reg
== R_SS
)
2519 s
->is_jmp
= DISAS_TB_JUMP
;
2523 static inline int svm_is_rep(int prefixes
)
2525 return ((prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) ? 8 : 0);
2529 gen_svm_check_intercept_param(DisasContext
*s
, target_ulong pc_start
,
2530 uint32_t type
, uint64_t param
)
2532 /* no SVM activated; fast case */
2533 if (likely(!(s
->flags
& HF_SVMI_MASK
)))
2535 gen_update_cc_op(s
);
2536 gen_jmp_im(pc_start
- s
->cs_base
);
2537 gen_helper_svm_check_intercept_param(cpu_env
, tcg_const_i32(type
),
2538 tcg_const_i64(param
));
2542 gen_svm_check_intercept(DisasContext
*s
, target_ulong pc_start
, uint64_t type
)
2544 gen_svm_check_intercept_param(s
, pc_start
, type
, 0);
2547 static inline void gen_stack_update(DisasContext
*s
, int addend
)
2549 #ifdef TARGET_X86_64
2551 gen_op_add_reg_im(2, R_ESP
, addend
);
2555 gen_op_add_reg_im(1, R_ESP
, addend
);
2557 gen_op_add_reg_im(0, R_ESP
, addend
);
2561 /* generate a push. It depends on ss32, addseg and dflag */
2562 static void gen_push_T0(DisasContext
*s
)
2564 #ifdef TARGET_X86_64
2566 gen_op_movq_A0_reg(R_ESP
);
2568 gen_op_addq_A0_im(-8);
2569 gen_op_st_T0_A0(OT_QUAD
+ s
->mem_index
);
2571 gen_op_addq_A0_im(-2);
2572 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
2574 gen_op_mov_reg_A0(2, R_ESP
);
2578 gen_op_movl_A0_reg(R_ESP
);
2580 gen_op_addl_A0_im(-2);
2582 gen_op_addl_A0_im(-4);
2585 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2586 gen_op_addl_A0_seg(s
, R_SS
);
2589 gen_op_andl_A0_ffff();
2590 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2591 gen_op_addl_A0_seg(s
, R_SS
);
2593 gen_op_st_T0_A0(s
->dflag
+ 1 + s
->mem_index
);
2594 if (s
->ss32
&& !s
->addseg
)
2595 gen_op_mov_reg_A0(1, R_ESP
);
2597 gen_op_mov_reg_T1(s
->ss32
+ 1, R_ESP
);
2601 /* generate a push. It depends on ss32, addseg and dflag */
2602 /* slower version for T1, only used for call Ev */
2603 static void gen_push_T1(DisasContext
*s
)
2605 #ifdef TARGET_X86_64
2607 gen_op_movq_A0_reg(R_ESP
);
2609 gen_op_addq_A0_im(-8);
2610 gen_op_st_T1_A0(OT_QUAD
+ s
->mem_index
);
2612 gen_op_addq_A0_im(-2);
2613 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
2615 gen_op_mov_reg_A0(2, R_ESP
);
2619 gen_op_movl_A0_reg(R_ESP
);
2621 gen_op_addl_A0_im(-2);
2623 gen_op_addl_A0_im(-4);
2626 gen_op_addl_A0_seg(s
, R_SS
);
2629 gen_op_andl_A0_ffff();
2630 gen_op_addl_A0_seg(s
, R_SS
);
2632 gen_op_st_T1_A0(s
->dflag
+ 1 + s
->mem_index
);
2634 if (s
->ss32
&& !s
->addseg
)
2635 gen_op_mov_reg_A0(1, R_ESP
);
2637 gen_stack_update(s
, (-2) << s
->dflag
);
2641 /* two step pop is necessary for precise exceptions */
2642 static void gen_pop_T0(DisasContext
*s
)
2644 #ifdef TARGET_X86_64
2646 gen_op_movq_A0_reg(R_ESP
);
2647 gen_op_ld_T0_A0((s
->dflag
? OT_QUAD
: OT_WORD
) + s
->mem_index
);
2651 gen_op_movl_A0_reg(R_ESP
);
2654 gen_op_addl_A0_seg(s
, R_SS
);
2656 gen_op_andl_A0_ffff();
2657 gen_op_addl_A0_seg(s
, R_SS
);
2659 gen_op_ld_T0_A0(s
->dflag
+ 1 + s
->mem_index
);
2663 static void gen_pop_update(DisasContext
*s
)
2665 #ifdef TARGET_X86_64
2666 if (CODE64(s
) && s
->dflag
) {
2667 gen_stack_update(s
, 8);
2671 gen_stack_update(s
, 2 << s
->dflag
);
2675 static void gen_stack_A0(DisasContext
*s
)
2677 gen_op_movl_A0_reg(R_ESP
);
2679 gen_op_andl_A0_ffff();
2680 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2682 gen_op_addl_A0_seg(s
, R_SS
);
2685 /* NOTE: wrap around in 16 bit not fully handled */
2686 static void gen_pusha(DisasContext
*s
)
2689 gen_op_movl_A0_reg(R_ESP
);
2690 gen_op_addl_A0_im(-16 << s
->dflag
);
2692 gen_op_andl_A0_ffff();
2693 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2695 gen_op_addl_A0_seg(s
, R_SS
);
2696 for(i
= 0;i
< 8; i
++) {
2697 gen_op_mov_TN_reg(OT_LONG
, 0, 7 - i
);
2698 gen_op_st_T0_A0(OT_WORD
+ s
->dflag
+ s
->mem_index
);
2699 gen_op_addl_A0_im(2 << s
->dflag
);
2701 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2704 /* NOTE: wrap around in 16 bit not fully handled */
2705 static void gen_popa(DisasContext
*s
)
2708 gen_op_movl_A0_reg(R_ESP
);
2710 gen_op_andl_A0_ffff();
2711 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2712 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], 16 << s
->dflag
);
2714 gen_op_addl_A0_seg(s
, R_SS
);
2715 for(i
= 0;i
< 8; i
++) {
2716 /* ESP is not reloaded */
2718 gen_op_ld_T0_A0(OT_WORD
+ s
->dflag
+ s
->mem_index
);
2719 gen_op_mov_reg_T0(OT_WORD
+ s
->dflag
, 7 - i
);
2721 gen_op_addl_A0_im(2 << s
->dflag
);
2723 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2726 static void gen_enter(DisasContext
*s
, int esp_addend
, int level
)
2731 #ifdef TARGET_X86_64
2733 ot
= s
->dflag
? OT_QUAD
: OT_WORD
;
2736 gen_op_movl_A0_reg(R_ESP
);
2737 gen_op_addq_A0_im(-opsize
);
2738 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2741 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
2742 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2744 /* XXX: must save state */
2745 gen_helper_enter64_level(cpu_env
, tcg_const_i32(level
),
2746 tcg_const_i32((ot
== OT_QUAD
)),
2749 gen_op_mov_reg_T1(ot
, R_EBP
);
2750 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], -esp_addend
+ (-opsize
* level
));
2751 gen_op_mov_reg_T1(OT_QUAD
, R_ESP
);
2755 ot
= s
->dflag
+ OT_WORD
;
2756 opsize
= 2 << s
->dflag
;
2758 gen_op_movl_A0_reg(R_ESP
);
2759 gen_op_addl_A0_im(-opsize
);
2761 gen_op_andl_A0_ffff();
2762 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2764 gen_op_addl_A0_seg(s
, R_SS
);
2766 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
2767 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2769 /* XXX: must save state */
2770 gen_helper_enter_level(cpu_env
, tcg_const_i32(level
),
2771 tcg_const_i32(s
->dflag
),
2774 gen_op_mov_reg_T1(ot
, R_EBP
);
2775 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], -esp_addend
+ (-opsize
* level
));
2776 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2780 static void gen_exception(DisasContext
*s
, int trapno
, target_ulong cur_eip
)
2782 gen_update_cc_op(s
);
2783 gen_jmp_im(cur_eip
);
2784 gen_helper_raise_exception(cpu_env
, tcg_const_i32(trapno
));
2785 s
->is_jmp
= DISAS_TB_JUMP
;
2788 /* an interrupt is different from an exception because of the
2790 static void gen_interrupt(DisasContext
*s
, int intno
,
2791 target_ulong cur_eip
, target_ulong next_eip
)
2793 gen_update_cc_op(s
);
2794 gen_jmp_im(cur_eip
);
2795 gen_helper_raise_interrupt(cpu_env
, tcg_const_i32(intno
),
2796 tcg_const_i32(next_eip
- cur_eip
));
2797 s
->is_jmp
= DISAS_TB_JUMP
;
2800 static void gen_debug(DisasContext
*s
, target_ulong cur_eip
)
2802 gen_update_cc_op(s
);
2803 gen_jmp_im(cur_eip
);
2804 gen_helper_debug(cpu_env
);
2805 s
->is_jmp
= DISAS_TB_JUMP
;
2808 /* generate a generic end of block. Trace exception is also generated
2810 static void gen_eob(DisasContext
*s
)
2812 gen_update_cc_op(s
);
2813 if (s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
) {
2814 gen_helper_reset_inhibit_irq(cpu_env
);
2816 if (s
->tb
->flags
& HF_RF_MASK
) {
2817 gen_helper_reset_rf(cpu_env
);
2819 if (s
->singlestep_enabled
) {
2820 gen_helper_debug(cpu_env
);
2822 gen_helper_single_step(cpu_env
);
2826 s
->is_jmp
= DISAS_TB_JUMP
;
2829 /* generate a jump to eip. No segment change must happen before as a
2830 direct call to the next block may occur */
2831 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
)
2833 gen_update_cc_op(s
);
2834 set_cc_op(s
, CC_OP_DYNAMIC
);
2836 gen_goto_tb(s
, tb_num
, eip
);
2837 s
->is_jmp
= DISAS_TB_JUMP
;
2844 static void gen_jmp(DisasContext
*s
, target_ulong eip
)
2846 gen_jmp_tb(s
, eip
, 0);
2849 static inline void gen_ldq_env_A0(int idx
, int offset
)
2851 int mem_index
= (idx
>> 2) - 1;
2852 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2853 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2856 static inline void gen_stq_env_A0(int idx
, int offset
)
2858 int mem_index
= (idx
>> 2) - 1;
2859 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2860 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2863 static inline void gen_ldo_env_A0(int idx
, int offset
)
2865 int mem_index
= (idx
>> 2) - 1;
2866 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2867 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2868 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2869 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
);
2870 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2873 static inline void gen_sto_env_A0(int idx
, int offset
)
2875 int mem_index
= (idx
>> 2) - 1;
2876 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2877 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2878 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2879 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2880 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
);
2883 static inline void gen_op_movo(int d_offset
, int s_offset
)
2885 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2886 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2887 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
+ 8);
2888 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
+ 8);
2891 static inline void gen_op_movq(int d_offset
, int s_offset
)
2893 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2894 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2897 static inline void gen_op_movl(int d_offset
, int s_offset
)
2899 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
, s_offset
);
2900 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, d_offset
);
2903 static inline void gen_op_movq_env_0(int d_offset
)
2905 tcg_gen_movi_i64(cpu_tmp1_i64
, 0);
2906 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2909 typedef void (*SSEFunc_i_ep
)(TCGv_i32 val
, TCGv_ptr env
, TCGv_ptr reg
);
2910 typedef void (*SSEFunc_l_ep
)(TCGv_i64 val
, TCGv_ptr env
, TCGv_ptr reg
);
2911 typedef void (*SSEFunc_0_epi
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i32 val
);
2912 typedef void (*SSEFunc_0_epl
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i64 val
);
2913 typedef void (*SSEFunc_0_epp
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
);
2914 typedef void (*SSEFunc_0_eppi
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2916 typedef void (*SSEFunc_0_ppi
)(TCGv_ptr reg_a
, TCGv_ptr reg_b
, TCGv_i32 val
);
2917 typedef void (*SSEFunc_0_eppt
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2920 #define SSE_SPECIAL ((void *)1)
2921 #define SSE_DUMMY ((void *)2)
2923 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2924 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2925 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2927 static const SSEFunc_0_epp sse_op_table1
[256][4] = {
2928 /* 3DNow! extensions */
2929 [0x0e] = { SSE_DUMMY
}, /* femms */
2930 [0x0f] = { SSE_DUMMY
}, /* pf... */
2931 /* pure SSE operations */
2932 [0x10] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2933 [0x11] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2934 [0x12] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd, movsldup, movddup */
2935 [0x13] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd */
2936 [0x14] = { gen_helper_punpckldq_xmm
, gen_helper_punpcklqdq_xmm
},
2937 [0x15] = { gen_helper_punpckhdq_xmm
, gen_helper_punpckhqdq_xmm
},
2938 [0x16] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd, movshdup */
2939 [0x17] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd */
2941 [0x28] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2942 [0x29] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2943 [0x2a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2944 [0x2b] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movntps, movntpd, movntss, movntsd */
2945 [0x2c] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2946 [0x2d] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2947 [0x2e] = { gen_helper_ucomiss
, gen_helper_ucomisd
},
2948 [0x2f] = { gen_helper_comiss
, gen_helper_comisd
},
2949 [0x50] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movmskps, movmskpd */
2950 [0x51] = SSE_FOP(sqrt
),
2951 [0x52] = { gen_helper_rsqrtps
, NULL
, gen_helper_rsqrtss
, NULL
},
2952 [0x53] = { gen_helper_rcpps
, NULL
, gen_helper_rcpss
, NULL
},
2953 [0x54] = { gen_helper_pand_xmm
, gen_helper_pand_xmm
}, /* andps, andpd */
2954 [0x55] = { gen_helper_pandn_xmm
, gen_helper_pandn_xmm
}, /* andnps, andnpd */
2955 [0x56] = { gen_helper_por_xmm
, gen_helper_por_xmm
}, /* orps, orpd */
2956 [0x57] = { gen_helper_pxor_xmm
, gen_helper_pxor_xmm
}, /* xorps, xorpd */
2957 [0x58] = SSE_FOP(add
),
2958 [0x59] = SSE_FOP(mul
),
2959 [0x5a] = { gen_helper_cvtps2pd
, gen_helper_cvtpd2ps
,
2960 gen_helper_cvtss2sd
, gen_helper_cvtsd2ss
},
2961 [0x5b] = { gen_helper_cvtdq2ps
, gen_helper_cvtps2dq
, gen_helper_cvttps2dq
},
2962 [0x5c] = SSE_FOP(sub
),
2963 [0x5d] = SSE_FOP(min
),
2964 [0x5e] = SSE_FOP(div
),
2965 [0x5f] = SSE_FOP(max
),
2967 [0xc2] = SSE_FOP(cmpeq
),
2968 [0xc6] = { (SSEFunc_0_epp
)gen_helper_shufps
,
2969 (SSEFunc_0_epp
)gen_helper_shufpd
}, /* XXX: casts */
2971 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2972 [0x38] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2973 [0x3a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2975 /* MMX ops and their SSE extensions */
2976 [0x60] = MMX_OP2(punpcklbw
),
2977 [0x61] = MMX_OP2(punpcklwd
),
2978 [0x62] = MMX_OP2(punpckldq
),
2979 [0x63] = MMX_OP2(packsswb
),
2980 [0x64] = MMX_OP2(pcmpgtb
),
2981 [0x65] = MMX_OP2(pcmpgtw
),
2982 [0x66] = MMX_OP2(pcmpgtl
),
2983 [0x67] = MMX_OP2(packuswb
),
2984 [0x68] = MMX_OP2(punpckhbw
),
2985 [0x69] = MMX_OP2(punpckhwd
),
2986 [0x6a] = MMX_OP2(punpckhdq
),
2987 [0x6b] = MMX_OP2(packssdw
),
2988 [0x6c] = { NULL
, gen_helper_punpcklqdq_xmm
},
2989 [0x6d] = { NULL
, gen_helper_punpckhqdq_xmm
},
2990 [0x6e] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movd mm, ea */
2991 [0x6f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, , movqdu */
2992 [0x70] = { (SSEFunc_0_epp
)gen_helper_pshufw_mmx
,
2993 (SSEFunc_0_epp
)gen_helper_pshufd_xmm
,
2994 (SSEFunc_0_epp
)gen_helper_pshufhw_xmm
,
2995 (SSEFunc_0_epp
)gen_helper_pshuflw_xmm
}, /* XXX: casts */
2996 [0x71] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftw */
2997 [0x72] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftd */
2998 [0x73] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftq */
2999 [0x74] = MMX_OP2(pcmpeqb
),
3000 [0x75] = MMX_OP2(pcmpeqw
),
3001 [0x76] = MMX_OP2(pcmpeql
),
3002 [0x77] = { SSE_DUMMY
}, /* emms */
3003 [0x78] = { NULL
, SSE_SPECIAL
, NULL
, SSE_SPECIAL
}, /* extrq_i, insertq_i */
3004 [0x79] = { NULL
, gen_helper_extrq_r
, NULL
, gen_helper_insertq_r
},
3005 [0x7c] = { NULL
, gen_helper_haddpd
, NULL
, gen_helper_haddps
},
3006 [0x7d] = { NULL
, gen_helper_hsubpd
, NULL
, gen_helper_hsubps
},
3007 [0x7e] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movd, movd, , movq */
3008 [0x7f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, movdqu */
3009 [0xc4] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pinsrw */
3010 [0xc5] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pextrw */
3011 [0xd0] = { NULL
, gen_helper_addsubpd
, NULL
, gen_helper_addsubps
},
3012 [0xd1] = MMX_OP2(psrlw
),
3013 [0xd2] = MMX_OP2(psrld
),
3014 [0xd3] = MMX_OP2(psrlq
),
3015 [0xd4] = MMX_OP2(paddq
),
3016 [0xd5] = MMX_OP2(pmullw
),
3017 [0xd6] = { NULL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
3018 [0xd7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pmovmskb */
3019 [0xd8] = MMX_OP2(psubusb
),
3020 [0xd9] = MMX_OP2(psubusw
),
3021 [0xda] = MMX_OP2(pminub
),
3022 [0xdb] = MMX_OP2(pand
),
3023 [0xdc] = MMX_OP2(paddusb
),
3024 [0xdd] = MMX_OP2(paddusw
),
3025 [0xde] = MMX_OP2(pmaxub
),
3026 [0xdf] = MMX_OP2(pandn
),
3027 [0xe0] = MMX_OP2(pavgb
),
3028 [0xe1] = MMX_OP2(psraw
),
3029 [0xe2] = MMX_OP2(psrad
),
3030 [0xe3] = MMX_OP2(pavgw
),
3031 [0xe4] = MMX_OP2(pmulhuw
),
3032 [0xe5] = MMX_OP2(pmulhw
),
3033 [0xe6] = { NULL
, gen_helper_cvttpd2dq
, gen_helper_cvtdq2pd
, gen_helper_cvtpd2dq
},
3034 [0xe7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movntq, movntq */
3035 [0xe8] = MMX_OP2(psubsb
),
3036 [0xe9] = MMX_OP2(psubsw
),
3037 [0xea] = MMX_OP2(pminsw
),
3038 [0xeb] = MMX_OP2(por
),
3039 [0xec] = MMX_OP2(paddsb
),
3040 [0xed] = MMX_OP2(paddsw
),
3041 [0xee] = MMX_OP2(pmaxsw
),
3042 [0xef] = MMX_OP2(pxor
),
3043 [0xf0] = { NULL
, NULL
, NULL
, SSE_SPECIAL
}, /* lddqu */
3044 [0xf1] = MMX_OP2(psllw
),
3045 [0xf2] = MMX_OP2(pslld
),
3046 [0xf3] = MMX_OP2(psllq
),
3047 [0xf4] = MMX_OP2(pmuludq
),
3048 [0xf5] = MMX_OP2(pmaddwd
),
3049 [0xf6] = MMX_OP2(psadbw
),
3050 [0xf7] = { (SSEFunc_0_epp
)gen_helper_maskmov_mmx
,
3051 (SSEFunc_0_epp
)gen_helper_maskmov_xmm
}, /* XXX: casts */
3052 [0xf8] = MMX_OP2(psubb
),
3053 [0xf9] = MMX_OP2(psubw
),
3054 [0xfa] = MMX_OP2(psubl
),
3055 [0xfb] = MMX_OP2(psubq
),
3056 [0xfc] = MMX_OP2(paddb
),
3057 [0xfd] = MMX_OP2(paddw
),
3058 [0xfe] = MMX_OP2(paddl
),
3061 static const SSEFunc_0_epp sse_op_table2
[3 * 8][2] = {
3062 [0 + 2] = MMX_OP2(psrlw
),
3063 [0 + 4] = MMX_OP2(psraw
),
3064 [0 + 6] = MMX_OP2(psllw
),
3065 [8 + 2] = MMX_OP2(psrld
),
3066 [8 + 4] = MMX_OP2(psrad
),
3067 [8 + 6] = MMX_OP2(pslld
),
3068 [16 + 2] = MMX_OP2(psrlq
),
3069 [16 + 3] = { NULL
, gen_helper_psrldq_xmm
},
3070 [16 + 6] = MMX_OP2(psllq
),
3071 [16 + 7] = { NULL
, gen_helper_pslldq_xmm
},
3074 static const SSEFunc_0_epi sse_op_table3ai
[] = {
3075 gen_helper_cvtsi2ss
,
3079 #ifdef TARGET_X86_64
3080 static const SSEFunc_0_epl sse_op_table3aq
[] = {
3081 gen_helper_cvtsq2ss
,
3086 static const SSEFunc_i_ep sse_op_table3bi
[] = {
3087 gen_helper_cvttss2si
,
3088 gen_helper_cvtss2si
,
3089 gen_helper_cvttsd2si
,
3093 #ifdef TARGET_X86_64
3094 static const SSEFunc_l_ep sse_op_table3bq
[] = {
3095 gen_helper_cvttss2sq
,
3096 gen_helper_cvtss2sq
,
3097 gen_helper_cvttsd2sq
,
3102 static const SSEFunc_0_epp sse_op_table4
[8][4] = {
3113 static const SSEFunc_0_epp sse_op_table5
[256] = {
3114 [0x0c] = gen_helper_pi2fw
,
3115 [0x0d] = gen_helper_pi2fd
,
3116 [0x1c] = gen_helper_pf2iw
,
3117 [0x1d] = gen_helper_pf2id
,
3118 [0x8a] = gen_helper_pfnacc
,
3119 [0x8e] = gen_helper_pfpnacc
,
3120 [0x90] = gen_helper_pfcmpge
,
3121 [0x94] = gen_helper_pfmin
,
3122 [0x96] = gen_helper_pfrcp
,
3123 [0x97] = gen_helper_pfrsqrt
,
3124 [0x9a] = gen_helper_pfsub
,
3125 [0x9e] = gen_helper_pfadd
,
3126 [0xa0] = gen_helper_pfcmpgt
,
3127 [0xa4] = gen_helper_pfmax
,
3128 [0xa6] = gen_helper_movq
, /* pfrcpit1; no need to actually increase precision */
3129 [0xa7] = gen_helper_movq
, /* pfrsqit1 */
3130 [0xaa] = gen_helper_pfsubr
,
3131 [0xae] = gen_helper_pfacc
,
3132 [0xb0] = gen_helper_pfcmpeq
,
3133 [0xb4] = gen_helper_pfmul
,
3134 [0xb6] = gen_helper_movq
, /* pfrcpit2 */
3135 [0xb7] = gen_helper_pmulhrw_mmx
,
3136 [0xbb] = gen_helper_pswapd
,
3137 [0xbf] = gen_helper_pavgb_mmx
/* pavgusb */
3140 struct SSEOpHelper_epp
{
3141 SSEFunc_0_epp op
[2];
3145 struct SSEOpHelper_eppi
{
3146 SSEFunc_0_eppi op
[2];
3150 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
3151 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3152 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
3153 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
3154 #define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
3155 CPUID_EXT_PCLMULQDQ }
3156 #define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
3158 static const struct SSEOpHelper_epp sse_op_table6
[256] = {
3159 [0x00] = SSSE3_OP(pshufb
),
3160 [0x01] = SSSE3_OP(phaddw
),
3161 [0x02] = SSSE3_OP(phaddd
),
3162 [0x03] = SSSE3_OP(phaddsw
),
3163 [0x04] = SSSE3_OP(pmaddubsw
),
3164 [0x05] = SSSE3_OP(phsubw
),
3165 [0x06] = SSSE3_OP(phsubd
),
3166 [0x07] = SSSE3_OP(phsubsw
),
3167 [0x08] = SSSE3_OP(psignb
),
3168 [0x09] = SSSE3_OP(psignw
),
3169 [0x0a] = SSSE3_OP(psignd
),
3170 [0x0b] = SSSE3_OP(pmulhrsw
),
3171 [0x10] = SSE41_OP(pblendvb
),
3172 [0x14] = SSE41_OP(blendvps
),
3173 [0x15] = SSE41_OP(blendvpd
),
3174 [0x17] = SSE41_OP(ptest
),
3175 [0x1c] = SSSE3_OP(pabsb
),
3176 [0x1d] = SSSE3_OP(pabsw
),
3177 [0x1e] = SSSE3_OP(pabsd
),
3178 [0x20] = SSE41_OP(pmovsxbw
),
3179 [0x21] = SSE41_OP(pmovsxbd
),
3180 [0x22] = SSE41_OP(pmovsxbq
),
3181 [0x23] = SSE41_OP(pmovsxwd
),
3182 [0x24] = SSE41_OP(pmovsxwq
),
3183 [0x25] = SSE41_OP(pmovsxdq
),
3184 [0x28] = SSE41_OP(pmuldq
),
3185 [0x29] = SSE41_OP(pcmpeqq
),
3186 [0x2a] = SSE41_SPECIAL
, /* movntqda */
3187 [0x2b] = SSE41_OP(packusdw
),
3188 [0x30] = SSE41_OP(pmovzxbw
),
3189 [0x31] = SSE41_OP(pmovzxbd
),
3190 [0x32] = SSE41_OP(pmovzxbq
),
3191 [0x33] = SSE41_OP(pmovzxwd
),
3192 [0x34] = SSE41_OP(pmovzxwq
),
3193 [0x35] = SSE41_OP(pmovzxdq
),
3194 [0x37] = SSE42_OP(pcmpgtq
),
3195 [0x38] = SSE41_OP(pminsb
),
3196 [0x39] = SSE41_OP(pminsd
),
3197 [0x3a] = SSE41_OP(pminuw
),
3198 [0x3b] = SSE41_OP(pminud
),
3199 [0x3c] = SSE41_OP(pmaxsb
),
3200 [0x3d] = SSE41_OP(pmaxsd
),
3201 [0x3e] = SSE41_OP(pmaxuw
),
3202 [0x3f] = SSE41_OP(pmaxud
),
3203 [0x40] = SSE41_OP(pmulld
),
3204 [0x41] = SSE41_OP(phminposuw
),
3205 [0xdb] = AESNI_OP(aesimc
),
3206 [0xdc] = AESNI_OP(aesenc
),
3207 [0xdd] = AESNI_OP(aesenclast
),
3208 [0xde] = AESNI_OP(aesdec
),
3209 [0xdf] = AESNI_OP(aesdeclast
),
3212 static const struct SSEOpHelper_eppi sse_op_table7
[256] = {
3213 [0x08] = SSE41_OP(roundps
),
3214 [0x09] = SSE41_OP(roundpd
),
3215 [0x0a] = SSE41_OP(roundss
),
3216 [0x0b] = SSE41_OP(roundsd
),
3217 [0x0c] = SSE41_OP(blendps
),
3218 [0x0d] = SSE41_OP(blendpd
),
3219 [0x0e] = SSE41_OP(pblendw
),
3220 [0x0f] = SSSE3_OP(palignr
),
3221 [0x14] = SSE41_SPECIAL
, /* pextrb */
3222 [0x15] = SSE41_SPECIAL
, /* pextrw */
3223 [0x16] = SSE41_SPECIAL
, /* pextrd/pextrq */
3224 [0x17] = SSE41_SPECIAL
, /* extractps */
3225 [0x20] = SSE41_SPECIAL
, /* pinsrb */
3226 [0x21] = SSE41_SPECIAL
, /* insertps */
3227 [0x22] = SSE41_SPECIAL
, /* pinsrd/pinsrq */
3228 [0x40] = SSE41_OP(dpps
),
3229 [0x41] = SSE41_OP(dppd
),
3230 [0x42] = SSE41_OP(mpsadbw
),
3231 [0x44] = PCLMULQDQ_OP(pclmulqdq
),
3232 [0x60] = SSE42_OP(pcmpestrm
),
3233 [0x61] = SSE42_OP(pcmpestri
),
3234 [0x62] = SSE42_OP(pcmpistrm
),
3235 [0x63] = SSE42_OP(pcmpistri
),
3236 [0xdf] = AESNI_OP(aeskeygenassist
),
3239 static void gen_sse(CPUX86State
*env
, DisasContext
*s
, int b
,
3240 target_ulong pc_start
, int rex_r
)
3242 int b1
, op1_offset
, op2_offset
, is_xmm
, val
, ot
;
3243 int modrm
, mod
, rm
, reg
, reg_addr
, offset_addr
;
3244 SSEFunc_0_epp sse_fn_epp
;
3245 SSEFunc_0_eppi sse_fn_eppi
;
3246 SSEFunc_0_ppi sse_fn_ppi
;
3247 SSEFunc_0_eppt sse_fn_eppt
;
3250 if (s
->prefix
& PREFIX_DATA
)
3252 else if (s
->prefix
& PREFIX_REPZ
)
3254 else if (s
->prefix
& PREFIX_REPNZ
)
3258 sse_fn_epp
= sse_op_table1
[b
][b1
];
3262 if ((b
<= 0x5f && b
>= 0x10) || b
== 0xc6 || b
== 0xc2) {
3272 /* simple MMX/SSE operation */
3273 if (s
->flags
& HF_TS_MASK
) {
3274 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
3277 if (s
->flags
& HF_EM_MASK
) {
3279 gen_exception(s
, EXCP06_ILLOP
, pc_start
- s
->cs_base
);
3282 if (is_xmm
&& !(s
->flags
& HF_OSFXSR_MASK
))
3283 if ((b
!= 0x38 && b
!= 0x3a) || (s
->prefix
& PREFIX_DATA
))
3286 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
))
3289 gen_helper_emms(cpu_env
);
3294 gen_helper_emms(cpu_env
);
3297 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3298 the static cpu state) */
3300 gen_helper_enter_mmx(cpu_env
);
3303 modrm
= cpu_ldub_code(env
, s
->pc
++);
3304 reg
= ((modrm
>> 3) & 7);
3307 mod
= (modrm
>> 6) & 3;
3308 if (sse_fn_epp
== SSE_SPECIAL
) {
3311 case 0x0e7: /* movntq */
3314 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3315 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3317 case 0x1e7: /* movntdq */
3318 case 0x02b: /* movntps */
3319 case 0x12b: /* movntps */
3322 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3323 gen_sto_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3325 case 0x3f0: /* lddqu */
3328 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3329 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3331 case 0x22b: /* movntss */
3332 case 0x32b: /* movntsd */
3335 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3337 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,
3340 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3341 xmm_regs
[reg
].XMM_L(0)));
3342 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
3345 case 0x6e: /* movd mm, ea */
3346 #ifdef TARGET_X86_64
3347 if (s
->dflag
== 2) {
3348 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 0);
3349 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3353 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 0);
3354 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3355 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3356 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3357 gen_helper_movl_mm_T0_mmx(cpu_ptr0
, cpu_tmp2_i32
);
3360 case 0x16e: /* movd xmm, ea */
3361 #ifdef TARGET_X86_64
3362 if (s
->dflag
== 2) {
3363 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 0);
3364 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3365 offsetof(CPUX86State
,xmm_regs
[reg
]));
3366 gen_helper_movq_mm_T0_xmm(cpu_ptr0
, cpu_T
[0]);
3370 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 0);
3371 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3372 offsetof(CPUX86State
,xmm_regs
[reg
]));
3373 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3374 gen_helper_movl_mm_T0_xmm(cpu_ptr0
, cpu_tmp2_i32
);
3377 case 0x6f: /* movq mm, ea */
3379 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3380 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3383 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
3384 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3385 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
3386 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3389 case 0x010: /* movups */
3390 case 0x110: /* movupd */
3391 case 0x028: /* movaps */
3392 case 0x128: /* movapd */
3393 case 0x16f: /* movdqa xmm, ea */
3394 case 0x26f: /* movdqu xmm, ea */
3396 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3397 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3399 rm
= (modrm
& 7) | REX_B(s
);
3400 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[reg
]),
3401 offsetof(CPUX86State
,xmm_regs
[rm
]));
3404 case 0x210: /* movss xmm, ea */
3406 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3407 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
3408 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3410 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)));
3411 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3412 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3414 rm
= (modrm
& 7) | REX_B(s
);
3415 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3416 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)));
3419 case 0x310: /* movsd xmm, ea */
3421 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3422 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3424 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3425 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3427 rm
= (modrm
& 7) | REX_B(s
);
3428 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3429 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3432 case 0x012: /* movlps */
3433 case 0x112: /* movlpd */
3435 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3436 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3439 rm
= (modrm
& 7) | REX_B(s
);
3440 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3441 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(1)));
3444 case 0x212: /* movsldup */
3446 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3447 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3449 rm
= (modrm
& 7) | REX_B(s
);
3450 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3451 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)));
3452 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)),
3453 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(2)));
3455 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)),
3456 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3457 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)),
3458 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3460 case 0x312: /* movddup */
3462 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3463 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3465 rm
= (modrm
& 7) | REX_B(s
);
3466 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3467 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3469 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)),
3470 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3472 case 0x016: /* movhps */
3473 case 0x116: /* movhpd */
3475 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3476 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3479 rm
= (modrm
& 7) | REX_B(s
);
3480 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)),
3481 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3484 case 0x216: /* movshdup */
3486 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3487 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3489 rm
= (modrm
& 7) | REX_B(s
);
3490 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)),
3491 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(1)));
3492 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)),
3493 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(3)));
3495 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3496 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)));
3497 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)),
3498 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3503 int bit_index
, field_length
;
3505 if (b1
== 1 && reg
!= 0)
3507 field_length
= cpu_ldub_code(env
, s
->pc
++) & 0x3F;
3508 bit_index
= cpu_ldub_code(env
, s
->pc
++) & 0x3F;
3509 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3510 offsetof(CPUX86State
,xmm_regs
[reg
]));
3512 gen_helper_extrq_i(cpu_env
, cpu_ptr0
,
3513 tcg_const_i32(bit_index
),
3514 tcg_const_i32(field_length
));
3516 gen_helper_insertq_i(cpu_env
, cpu_ptr0
,
3517 tcg_const_i32(bit_index
),
3518 tcg_const_i32(field_length
));
3521 case 0x7e: /* movd ea, mm */
3522 #ifdef TARGET_X86_64
3523 if (s
->dflag
== 2) {
3524 tcg_gen_ld_i64(cpu_T
[0], cpu_env
,
3525 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3526 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 1);
3530 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
3531 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_L(0)));
3532 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 1);
3535 case 0x17e: /* movd ea, xmm */
3536 #ifdef TARGET_X86_64
3537 if (s
->dflag
== 2) {
3538 tcg_gen_ld_i64(cpu_T
[0], cpu_env
,
3539 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3540 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 1);
3544 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
3545 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3546 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 1);
3549 case 0x27e: /* movq xmm, ea */
3551 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3552 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3554 rm
= (modrm
& 7) | REX_B(s
);
3555 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3556 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3558 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3560 case 0x7f: /* movq ea, mm */
3562 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3563 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3566 gen_op_movq(offsetof(CPUX86State
,fpregs
[rm
].mmx
),
3567 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3570 case 0x011: /* movups */
3571 case 0x111: /* movupd */
3572 case 0x029: /* movaps */
3573 case 0x129: /* movapd */
3574 case 0x17f: /* movdqa ea, xmm */
3575 case 0x27f: /* movdqu ea, xmm */
3577 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3578 gen_sto_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3580 rm
= (modrm
& 7) | REX_B(s
);
3581 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[rm
]),
3582 offsetof(CPUX86State
,xmm_regs
[reg
]));
3585 case 0x211: /* movss ea, xmm */
3587 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3588 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3589 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
3591 rm
= (modrm
& 7) | REX_B(s
);
3592 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)),
3593 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3596 case 0x311: /* movsd ea, xmm */
3598 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3599 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3601 rm
= (modrm
& 7) | REX_B(s
);
3602 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)),
3603 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3606 case 0x013: /* movlps */
3607 case 0x113: /* movlpd */
3609 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3610 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3615 case 0x017: /* movhps */
3616 case 0x117: /* movhpd */
3618 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3619 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3624 case 0x71: /* shift mm, im */
3627 case 0x171: /* shift xmm, im */
3633 val
= cpu_ldub_code(env
, s
->pc
++);
3635 gen_op_movl_T0_im(val
);
3636 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
3638 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(1)));
3639 op1_offset
= offsetof(CPUX86State
,xmm_t0
);
3641 gen_op_movl_T0_im(val
);
3642 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(0)));
3644 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(1)));
3645 op1_offset
= offsetof(CPUX86State
,mmx_t0
);
3647 sse_fn_epp
= sse_op_table2
[((b
- 1) & 3) * 8 +
3648 (((modrm
>> 3)) & 7)][b1
];
3653 rm
= (modrm
& 7) | REX_B(s
);
3654 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3657 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3659 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3660 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op1_offset
);
3661 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3663 case 0x050: /* movmskps */
3664 rm
= (modrm
& 7) | REX_B(s
);
3665 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3666 offsetof(CPUX86State
,xmm_regs
[rm
]));
3667 gen_helper_movmskps(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3668 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3669 gen_op_mov_reg_T0(OT_LONG
, reg
);
3671 case 0x150: /* movmskpd */
3672 rm
= (modrm
& 7) | REX_B(s
);
3673 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3674 offsetof(CPUX86State
,xmm_regs
[rm
]));
3675 gen_helper_movmskpd(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3676 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3677 gen_op_mov_reg_T0(OT_LONG
, reg
);
3679 case 0x02a: /* cvtpi2ps */
3680 case 0x12a: /* cvtpi2pd */
3681 gen_helper_enter_mmx(cpu_env
);
3683 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3684 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3685 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
3688 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3690 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3691 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3692 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3695 gen_helper_cvtpi2ps(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3699 gen_helper_cvtpi2pd(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3703 case 0x22a: /* cvtsi2ss */
3704 case 0x32a: /* cvtsi2sd */
3705 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3706 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3707 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3708 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3709 if (ot
== OT_LONG
) {
3710 SSEFunc_0_epi sse_fn_epi
= sse_op_table3ai
[(b
>> 8) & 1];
3711 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3712 sse_fn_epi(cpu_env
, cpu_ptr0
, cpu_tmp2_i32
);
3714 #ifdef TARGET_X86_64
3715 SSEFunc_0_epl sse_fn_epl
= sse_op_table3aq
[(b
>> 8) & 1];
3716 sse_fn_epl(cpu_env
, cpu_ptr0
, cpu_T
[0]);
3722 case 0x02c: /* cvttps2pi */
3723 case 0x12c: /* cvttpd2pi */
3724 case 0x02d: /* cvtps2pi */
3725 case 0x12d: /* cvtpd2pi */
3726 gen_helper_enter_mmx(cpu_env
);
3728 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3729 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3730 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3732 rm
= (modrm
& 7) | REX_B(s
);
3733 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3735 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
);
3736 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3737 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3740 gen_helper_cvttps2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3743 gen_helper_cvttpd2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3746 gen_helper_cvtps2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3749 gen_helper_cvtpd2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3753 case 0x22c: /* cvttss2si */
3754 case 0x32c: /* cvttsd2si */
3755 case 0x22d: /* cvtss2si */
3756 case 0x32d: /* cvtsd2si */
3757 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3759 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3761 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_t0
.XMM_Q(0)));
3763 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
3764 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
3766 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3768 rm
= (modrm
& 7) | REX_B(s
);
3769 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3771 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3772 if (ot
== OT_LONG
) {
3773 SSEFunc_i_ep sse_fn_i_ep
=
3774 sse_op_table3bi
[((b
>> 7) & 2) | (b
& 1)];
3775 sse_fn_i_ep(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3776 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3778 #ifdef TARGET_X86_64
3779 SSEFunc_l_ep sse_fn_l_ep
=
3780 sse_op_table3bq
[((b
>> 7) & 2) | (b
& 1)];
3781 sse_fn_l_ep(cpu_T
[0], cpu_env
, cpu_ptr0
);
3786 gen_op_mov_reg_T0(ot
, reg
);
3788 case 0xc4: /* pinsrw */
3791 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
3792 val
= cpu_ldub_code(env
, s
->pc
++);
3795 tcg_gen_st16_tl(cpu_T
[0], cpu_env
,
3796 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_W(val
)));
3799 tcg_gen_st16_tl(cpu_T
[0], cpu_env
,
3800 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_W(val
)));
3803 case 0xc5: /* pextrw */
3807 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3808 val
= cpu_ldub_code(env
, s
->pc
++);
3811 rm
= (modrm
& 7) | REX_B(s
);
3812 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
,
3813 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_W(val
)));
3817 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
,
3818 offsetof(CPUX86State
,fpregs
[rm
].mmx
.MMX_W(val
)));
3820 reg
= ((modrm
>> 3) & 7) | rex_r
;
3821 gen_op_mov_reg_T0(ot
, reg
);
3823 case 0x1d6: /* movq ea, xmm */
3825 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3826 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3828 rm
= (modrm
& 7) | REX_B(s
);
3829 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)),
3830 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3831 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(1)));
3834 case 0x2d6: /* movq2dq */
3835 gen_helper_enter_mmx(cpu_env
);
3837 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3838 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3839 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3841 case 0x3d6: /* movdq2q */
3842 gen_helper_enter_mmx(cpu_env
);
3843 rm
= (modrm
& 7) | REX_B(s
);
3844 gen_op_movq(offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
),
3845 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3847 case 0xd7: /* pmovmskb */
3852 rm
= (modrm
& 7) | REX_B(s
);
3853 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[rm
]));
3854 gen_helper_pmovmskb_xmm(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3857 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3858 gen_helper_pmovmskb_mmx(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3860 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3861 reg
= ((modrm
>> 3) & 7) | rex_r
;
3862 gen_op_mov_reg_T0(OT_LONG
, reg
);
3868 if ((b
& 0xf0) == 0xf0) {
3871 modrm
= cpu_ldub_code(env
, s
->pc
++);
3873 reg
= ((modrm
>> 3) & 7) | rex_r
;
3874 mod
= (modrm
>> 6) & 3;
3879 sse_fn_epp
= sse_op_table6
[b
].op
[b1
];
3883 if (!(s
->cpuid_ext_features
& sse_op_table6
[b
].ext_mask
))
3887 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3889 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
3891 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3892 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3894 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3895 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3896 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3897 gen_ldq_env_A0(s
->mem_index
, op2_offset
+
3898 offsetof(XMMReg
, XMM_Q(0)));
3900 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3901 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3902 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
3903 (s
->mem_index
>> 2) - 1);
3904 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
3905 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, op2_offset
+
3906 offsetof(XMMReg
, XMM_L(0)));
3908 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3909 tcg_gen_qemu_ld16u(cpu_tmp0
, cpu_A0
,
3910 (s
->mem_index
>> 2) - 1);
3911 tcg_gen_st16_tl(cpu_tmp0
, cpu_env
, op2_offset
+
3912 offsetof(XMMReg
, XMM_W(0)));
3914 case 0x2a: /* movntqda */
3915 gen_ldo_env_A0(s
->mem_index
, op1_offset
);
3918 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3922 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
3924 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3926 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3927 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3928 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
3931 if (sse_fn_epp
== SSE_SPECIAL
) {
3935 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3936 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3937 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3940 set_cc_op(s
, CC_OP_EFLAGS
);
3947 /* Various integer extensions at 0f 38 f[0-f]. */
3948 b
= modrm
| (b1
<< 8);
3949 modrm
= cpu_ldub_code(env
, s
->pc
++);
3950 reg
= ((modrm
>> 3) & 7) | rex_r
;
3953 case 0x3f0: /* crc32 Gd,Eb */
3954 case 0x3f1: /* crc32 Gd,Ey */
3956 if (!(s
->cpuid_ext_features
& CPUID_EXT_SSE42
)) {
3959 if ((b
& 0xff) == 0xf0) {
3961 } else if (s
->dflag
!= 2) {
3962 ot
= (s
->prefix
& PREFIX_DATA
? OT_WORD
: OT_LONG
);
3967 gen_op_mov_TN_reg(OT_LONG
, 0, reg
);
3968 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3969 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3970 gen_helper_crc32(cpu_T
[0], cpu_tmp2_i32
,
3971 cpu_T
[0], tcg_const_i32(8 << ot
));
3973 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3974 gen_op_mov_reg_T0(ot
, reg
);
3977 case 0x1f0: /* crc32 or movbe */
3979 /* For these insns, the f3 prefix is supposed to have priority
3980 over the 66 prefix, but that's not what we implement above
3982 if (s
->prefix
& PREFIX_REPNZ
) {
3986 case 0x0f0: /* movbe Gy,My */
3987 case 0x0f1: /* movbe My,Gy */
3988 if (!(s
->cpuid_ext_features
& CPUID_EXT_MOVBE
)) {
3991 if (s
->dflag
!= 2) {
3992 ot
= (s
->prefix
& PREFIX_DATA
? OT_WORD
: OT_LONG
);
3997 /* Load the data incoming to the bswap. Note that the TCG
3998 implementation of bswap requires the input be zero
3999 extended. In the case of the loads, we simply know that
4000 gen_op_ld_v via gen_ldst_modrm does that already. */
4002 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4006 tcg_gen_ext16u_tl(cpu_T
[0], cpu_regs
[reg
]);
4009 tcg_gen_ext32u_tl(cpu_T
[0], cpu_regs
[reg
]);
4012 tcg_gen_mov_tl(cpu_T
[0], cpu_regs
[reg
]);
4019 tcg_gen_bswap16_tl(cpu_T
[0], cpu_T
[0]);
4022 tcg_gen_bswap32_tl(cpu_T
[0], cpu_T
[0]);
4024 #ifdef TARGET_X86_64
4026 tcg_gen_bswap64_tl(cpu_T
[0], cpu_T
[0]);
4032 gen_op_mov_reg_T0(ot
, reg
);
4034 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
4038 case 0x0f2: /* andn Gy, By, Ey */
4039 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
4040 || !(s
->prefix
& PREFIX_VEX
)
4044 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4045 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4046 tcg_gen_andc_tl(cpu_T
[0], cpu_regs
[s
->vex_v
], cpu_T
[0]);
4047 gen_op_mov_reg_T0(ot
, reg
);
4048 gen_op_update1_cc();
4049 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4052 case 0x0f7: /* bextr Gy, Ey, By */
4053 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
4054 || !(s
->prefix
& PREFIX_VEX
)
4058 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4062 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4063 /* Extract START, and shift the operand.
4064 Shifts larger than operand size get zeros. */
4065 tcg_gen_ext8u_tl(cpu_A0
, cpu_regs
[s
->vex_v
]);
4066 tcg_gen_shr_tl(cpu_T
[0], cpu_T
[0], cpu_A0
);
4068 bound
= tcg_const_tl(ot
== OT_QUAD
? 63 : 31);
4069 zero
= tcg_const_tl(0);
4070 tcg_gen_movcond_tl(TCG_COND_LEU
, cpu_T
[0], cpu_A0
, bound
,
4072 tcg_temp_free(zero
);
4074 /* Extract the LEN into a mask. Lengths larger than
4075 operand size get all ones. */
4076 tcg_gen_shri_tl(cpu_A0
, cpu_regs
[s
->vex_v
], 8);
4077 tcg_gen_ext8u_tl(cpu_A0
, cpu_A0
);
4078 tcg_gen_movcond_tl(TCG_COND_LEU
, cpu_A0
, cpu_A0
, bound
,
4080 tcg_temp_free(bound
);
4081 tcg_gen_movi_tl(cpu_T
[1], 1);
4082 tcg_gen_shl_tl(cpu_T
[1], cpu_T
[1], cpu_A0
);
4083 tcg_gen_subi_tl(cpu_T
[1], cpu_T
[1], 1);
4084 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4086 gen_op_mov_reg_T0(ot
, reg
);
4087 gen_op_update1_cc();
4088 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4092 case 0x0f5: /* bzhi Gy, Ey, By */
4093 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4094 || !(s
->prefix
& PREFIX_VEX
)
4098 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4099 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4100 tcg_gen_ext8u_tl(cpu_T
[1], cpu_regs
[s
->vex_v
]);
4102 TCGv bound
= tcg_const_tl(ot
== OT_QUAD
? 63 : 31);
4103 /* Note that since we're using BMILG (in order to get O
4104 cleared) we need to store the inverse into C. */
4105 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_cc_src
,
4107 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_T
[1], cpu_T
[1],
4108 bound
, bound
, cpu_T
[1]);
4109 tcg_temp_free(bound
);
4111 tcg_gen_movi_tl(cpu_A0
, -1);
4112 tcg_gen_shl_tl(cpu_A0
, cpu_A0
, cpu_T
[1]);
4113 tcg_gen_andc_tl(cpu_T
[0], cpu_T
[0], cpu_A0
);
4114 gen_op_mov_reg_T0(ot
, reg
);
4115 gen_op_update1_cc();
4116 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
4119 case 0x3f6: /* mulx By, Gy, rdx, Ey */
4120 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4121 || !(s
->prefix
& PREFIX_VEX
)
4125 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4126 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4129 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4130 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_regs
[R_EDX
]);
4131 tcg_gen_mulu2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
4132 cpu_tmp2_i32
, cpu_tmp3_i32
);
4133 tcg_gen_extu_i32_tl(cpu_regs
[s
->vex_v
], cpu_tmp2_i32
);
4134 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp3_i32
);
4136 #ifdef TARGET_X86_64
4138 tcg_gen_mulu2_i64(cpu_regs
[s
->vex_v
], cpu_regs
[reg
],
4139 cpu_T
[0], cpu_regs
[R_EDX
]);
4145 case 0x3f5: /* pdep Gy, By, Ey */
4146 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4147 || !(s
->prefix
& PREFIX_VEX
)
4151 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4152 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4153 /* Note that by zero-extending the mask operand, we
4154 automatically handle zero-extending the result. */
4155 if (s
->dflag
== 2) {
4156 tcg_gen_mov_tl(cpu_T
[1], cpu_regs
[s
->vex_v
]);
4158 tcg_gen_ext32u_tl(cpu_T
[1], cpu_regs
[s
->vex_v
]);
4160 gen_helper_pdep(cpu_regs
[reg
], cpu_T
[0], cpu_T
[1]);
4163 case 0x2f5: /* pext Gy, By, Ey */
4164 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4165 || !(s
->prefix
& PREFIX_VEX
)
4169 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4170 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4171 /* Note that by zero-extending the mask operand, we
4172 automatically handle zero-extending the result. */
4173 if (s
->dflag
== 2) {
4174 tcg_gen_mov_tl(cpu_T
[1], cpu_regs
[s
->vex_v
]);
4176 tcg_gen_ext32u_tl(cpu_T
[1], cpu_regs
[s
->vex_v
]);
4178 gen_helper_pext(cpu_regs
[reg
], cpu_T
[0], cpu_T
[1]);
4181 case 0x1f6: /* adcx Gy, Ey */
4182 case 0x2f6: /* adox Gy, Ey */
4183 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_ADX
)) {
4186 TCGv carry_in
, carry_out
, zero
;
4189 ot
= (s
->dflag
== 2 ? OT_QUAD
: OT_LONG
);
4190 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4192 /* Re-use the carry-out from a previous round. */
4193 TCGV_UNUSED(carry_in
);
4194 carry_out
= (b
== 0x1f6 ? cpu_cc_dst
: cpu_cc_src2
);
4198 carry_in
= cpu_cc_dst
;
4199 end_op
= CC_OP_ADCX
;
4201 end_op
= CC_OP_ADCOX
;
4206 end_op
= CC_OP_ADCOX
;
4208 carry_in
= cpu_cc_src2
;
4209 end_op
= CC_OP_ADOX
;
4213 end_op
= CC_OP_ADCOX
;
4214 carry_in
= carry_out
;
4217 end_op
= (b
== 0x1f6 ? CC_OP_ADCX
: CC_OP_ADOX
);
4220 /* If we can't reuse carry-out, get it out of EFLAGS. */
4221 if (TCGV_IS_UNUSED(carry_in
)) {
4222 if (s
->cc_op
!= CC_OP_ADCX
&& s
->cc_op
!= CC_OP_ADOX
) {
4223 gen_compute_eflags(s
);
4225 carry_in
= cpu_tmp0
;
4226 tcg_gen_shri_tl(carry_in
, cpu_cc_src
,
4227 ctz32(b
== 0x1f6 ? CC_C
: CC_O
));
4228 tcg_gen_andi_tl(carry_in
, carry_in
, 1);
4232 #ifdef TARGET_X86_64
4234 /* If we know TL is 64-bit, and we want a 32-bit
4235 result, just do everything in 64-bit arithmetic. */
4236 tcg_gen_ext32u_i64(cpu_regs
[reg
], cpu_regs
[reg
]);
4237 tcg_gen_ext32u_i64(cpu_T
[0], cpu_T
[0]);
4238 tcg_gen_add_i64(cpu_T
[0], cpu_T
[0], cpu_regs
[reg
]);
4239 tcg_gen_add_i64(cpu_T
[0], cpu_T
[0], carry_in
);
4240 tcg_gen_ext32u_i64(cpu_regs
[reg
], cpu_T
[0]);
4241 tcg_gen_shri_i64(carry_out
, cpu_T
[0], 32);
4245 /* Otherwise compute the carry-out in two steps. */
4246 zero
= tcg_const_tl(0);
4247 tcg_gen_add2_tl(cpu_T
[0], carry_out
,
4250 tcg_gen_add2_tl(cpu_regs
[reg
], carry_out
,
4251 cpu_regs
[reg
], carry_out
,
4253 tcg_temp_free(zero
);
4256 set_cc_op(s
, end_op
);
4260 case 0x1f7: /* shlx Gy, Ey, By */
4261 case 0x2f7: /* sarx Gy, Ey, By */
4262 case 0x3f7: /* shrx Gy, Ey, By */
4263 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4264 || !(s
->prefix
& PREFIX_VEX
)
4268 ot
= (s
->dflag
== 2 ? OT_QUAD
: OT_LONG
);
4269 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4270 if (ot
== OT_QUAD
) {
4271 tcg_gen_andi_tl(cpu_T
[1], cpu_regs
[s
->vex_v
], 63);
4273 tcg_gen_andi_tl(cpu_T
[1], cpu_regs
[s
->vex_v
], 31);
4276 tcg_gen_shl_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4277 } else if (b
== 0x2f7) {
4278 if (ot
!= OT_QUAD
) {
4279 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4281 tcg_gen_sar_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4283 if (ot
!= OT_QUAD
) {
4284 tcg_gen_ext32u_tl(cpu_T
[0], cpu_T
[0]);
4286 tcg_gen_shr_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4288 gen_op_mov_reg_T0(ot
, reg
);
4294 case 0x3f3: /* Group 17 */
4295 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
4296 || !(s
->prefix
& PREFIX_VEX
)
4300 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4301 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4304 case 1: /* blsr By,Ey */
4305 tcg_gen_neg_tl(cpu_T
[1], cpu_T
[0]);
4306 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4307 gen_op_mov_reg_T0(ot
, s
->vex_v
);
4308 gen_op_update2_cc();
4309 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
4312 case 2: /* blsmsk By,Ey */
4313 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4314 tcg_gen_subi_tl(cpu_T
[0], cpu_T
[0], 1);
4315 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_cc_src
);
4316 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4317 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
4320 case 3: /* blsi By, Ey */
4321 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4322 tcg_gen_subi_tl(cpu_T
[0], cpu_T
[0], 1);
4323 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_cc_src
);
4324 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4325 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
4341 modrm
= cpu_ldub_code(env
, s
->pc
++);
4343 reg
= ((modrm
>> 3) & 7) | rex_r
;
4344 mod
= (modrm
>> 6) & 3;
4349 sse_fn_eppi
= sse_op_table7
[b
].op
[b1
];
4353 if (!(s
->cpuid_ext_features
& sse_op_table7
[b
].ext_mask
))
4356 if (sse_fn_eppi
== SSE_SPECIAL
) {
4357 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
4358 rm
= (modrm
& 7) | REX_B(s
);
4360 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4361 reg
= ((modrm
>> 3) & 7) | rex_r
;
4362 val
= cpu_ldub_code(env
, s
->pc
++);
4364 case 0x14: /* pextrb */
4365 tcg_gen_ld8u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
4366 xmm_regs
[reg
].XMM_B(val
& 15)));
4368 gen_op_mov_reg_T0(ot
, rm
);
4370 tcg_gen_qemu_st8(cpu_T
[0], cpu_A0
,
4371 (s
->mem_index
>> 2) - 1);
4373 case 0x15: /* pextrw */
4374 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
4375 xmm_regs
[reg
].XMM_W(val
& 7)));
4377 gen_op_mov_reg_T0(ot
, rm
);
4379 tcg_gen_qemu_st16(cpu_T
[0], cpu_A0
,
4380 (s
->mem_index
>> 2) - 1);
4383 if (ot
== OT_LONG
) { /* pextrd */
4384 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
4385 offsetof(CPUX86State
,
4386 xmm_regs
[reg
].XMM_L(val
& 3)));
4387 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
4389 gen_op_mov_reg_v(ot
, rm
, cpu_T
[0]);
4391 tcg_gen_qemu_st32(cpu_T
[0], cpu_A0
,
4392 (s
->mem_index
>> 2) - 1);
4393 } else { /* pextrq */
4394 #ifdef TARGET_X86_64
4395 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
4396 offsetof(CPUX86State
,
4397 xmm_regs
[reg
].XMM_Q(val
& 1)));
4399 gen_op_mov_reg_v(ot
, rm
, cpu_tmp1_i64
);
4401 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
4402 (s
->mem_index
>> 2) - 1);
4408 case 0x17: /* extractps */
4409 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
4410 xmm_regs
[reg
].XMM_L(val
& 3)));
4412 gen_op_mov_reg_T0(ot
, rm
);
4414 tcg_gen_qemu_st32(cpu_T
[0], cpu_A0
,
4415 (s
->mem_index
>> 2) - 1);
4417 case 0x20: /* pinsrb */
4419 gen_op_mov_TN_reg(OT_LONG
, 0, rm
);
4421 tcg_gen_qemu_ld8u(cpu_T
[0], cpu_A0
,
4422 (s
->mem_index
>> 2) - 1);
4423 tcg_gen_st8_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
4424 xmm_regs
[reg
].XMM_B(val
& 15)));
4426 case 0x21: /* insertps */
4428 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
4429 offsetof(CPUX86State
,xmm_regs
[rm
]
4430 .XMM_L((val
>> 6) & 3)));
4432 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
4433 (s
->mem_index
>> 2) - 1);
4434 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
4436 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
4437 offsetof(CPUX86State
,xmm_regs
[reg
]
4438 .XMM_L((val
>> 4) & 3)));
4440 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4441 cpu_env
, offsetof(CPUX86State
,
4442 xmm_regs
[reg
].XMM_L(0)));
4444 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4445 cpu_env
, offsetof(CPUX86State
,
4446 xmm_regs
[reg
].XMM_L(1)));
4448 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4449 cpu_env
, offsetof(CPUX86State
,
4450 xmm_regs
[reg
].XMM_L(2)));
4452 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4453 cpu_env
, offsetof(CPUX86State
,
4454 xmm_regs
[reg
].XMM_L(3)));
4457 if (ot
== OT_LONG
) { /* pinsrd */
4459 gen_op_mov_v_reg(ot
, cpu_tmp0
, rm
);
4461 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
4462 (s
->mem_index
>> 2) - 1);
4463 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
4464 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
4465 offsetof(CPUX86State
,
4466 xmm_regs
[reg
].XMM_L(val
& 3)));
4467 } else { /* pinsrq */
4468 #ifdef TARGET_X86_64
4470 gen_op_mov_v_reg(ot
, cpu_tmp1_i64
, rm
);
4472 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
4473 (s
->mem_index
>> 2) - 1);
4474 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
4475 offsetof(CPUX86State
,
4476 xmm_regs
[reg
].XMM_Q(val
& 1)));
4487 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4489 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
4491 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4492 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4493 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
4496 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4498 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4500 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4501 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4502 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
4505 val
= cpu_ldub_code(env
, s
->pc
++);
4507 if ((b
& 0xfc) == 0x60) { /* pcmpXstrX */
4508 set_cc_op(s
, CC_OP_EFLAGS
);
4511 /* The helper must use entire 64-bit gp registers */
4515 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4516 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4517 sse_fn_eppi(cpu_env
, cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4521 /* Various integer extensions at 0f 3a f[0-f]. */
4522 b
= modrm
| (b1
<< 8);
4523 modrm
= cpu_ldub_code(env
, s
->pc
++);
4524 reg
= ((modrm
>> 3) & 7) | rex_r
;
4527 case 0x3f0: /* rorx Gy,Ey, Ib */
4528 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4529 || !(s
->prefix
& PREFIX_VEX
)
4533 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4534 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4535 b
= cpu_ldub_code(env
, s
->pc
++);
4536 if (ot
== OT_QUAD
) {
4537 tcg_gen_rotri_tl(cpu_T
[0], cpu_T
[0], b
& 63);
4539 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4540 tcg_gen_rotri_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, b
& 31);
4541 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
4543 gen_op_mov_reg_T0(ot
, reg
);
4555 /* generic MMX or SSE operation */
4557 case 0x70: /* pshufx insn */
4558 case 0xc6: /* pshufx insn */
4559 case 0xc2: /* compare insns */
4566 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4568 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4569 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4570 if (b1
>= 2 && ((b
>= 0x50 && b
<= 0x5f && b
!= 0x5b) ||
4572 /* specific case for SSE single instructions */
4575 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
4576 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
4579 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_t0
.XMM_D(0)));
4582 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
4585 rm
= (modrm
& 7) | REX_B(s
);
4586 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
4589 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4591 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4592 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4593 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
4596 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4600 case 0x0f: /* 3DNow! data insns */
4601 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
))
4603 val
= cpu_ldub_code(env
, s
->pc
++);
4604 sse_fn_epp
= sse_op_table5
[val
];
4608 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4609 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4610 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4612 case 0x70: /* pshufx insn */
4613 case 0xc6: /* pshufx insn */
4614 val
= cpu_ldub_code(env
, s
->pc
++);
4615 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4616 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4617 /* XXX: introduce a new table? */
4618 sse_fn_ppi
= (SSEFunc_0_ppi
)sse_fn_epp
;
4619 sse_fn_ppi(cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4623 val
= cpu_ldub_code(env
, s
->pc
++);
4626 sse_fn_epp
= sse_op_table4
[val
][b1
];
4628 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4629 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4630 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4633 /* maskmov : we must prepare A0 */
4636 #ifdef TARGET_X86_64
4637 if (s
->aflag
== 2) {
4638 gen_op_movq_A0_reg(R_EDI
);
4642 gen_op_movl_A0_reg(R_EDI
);
4644 gen_op_andl_A0_ffff();
4646 gen_add_A0_ds_seg(s
);
4648 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4649 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4650 /* XXX: introduce a new table? */
4651 sse_fn_eppt
= (SSEFunc_0_eppt
)sse_fn_epp
;
4652 sse_fn_eppt(cpu_env
, cpu_ptr0
, cpu_ptr1
, cpu_A0
);
4655 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4656 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4657 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4660 if (b
== 0x2e || b
== 0x2f) {
4661 set_cc_op(s
, CC_OP_EFLAGS
);
4666 /* convert one instruction. s->is_jmp is set if the translation must
4667 be stopped. Return the next pc value */
4668 static target_ulong
disas_insn(CPUX86State
*env
, DisasContext
*s
,
4669 target_ulong pc_start
)
4671 int b
, prefixes
, aflag
, dflag
;
4673 int modrm
, reg
, rm
, mod
, reg_addr
, op
, opreg
, offset_addr
, val
;
4674 target_ulong next_eip
, tval
;
4677 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4678 tcg_gen_debug_insn_start(pc_start
);
4685 #ifdef TARGET_X86_64
4690 s
->rip_offset
= 0; /* for relative ip address */
4694 b
= cpu_ldub_code(env
, s
->pc
);
4696 /* Collect prefixes. */
4699 prefixes
|= PREFIX_REPZ
;
4702 prefixes
|= PREFIX_REPNZ
;
4705 prefixes
|= PREFIX_LOCK
;
4726 prefixes
|= PREFIX_DATA
;
4729 prefixes
|= PREFIX_ADR
;
4731 #ifdef TARGET_X86_64
4735 rex_w
= (b
>> 3) & 1;
4736 rex_r
= (b
& 0x4) << 1;
4737 s
->rex_x
= (b
& 0x2) << 2;
4738 REX_B(s
) = (b
& 0x1) << 3;
4739 x86_64_hregs
= 1; /* select uniform byte register addressing */
4744 case 0xc5: /* 2-byte VEX */
4745 case 0xc4: /* 3-byte VEX */
4746 /* VEX prefixes cannot be used except in 32-bit mode.
4747 Otherwise the instruction is LES or LDS. */
4748 if (s
->code32
&& !s
->vm86
) {
4749 static const int pp_prefix
[4] = {
4750 0, PREFIX_DATA
, PREFIX_REPZ
, PREFIX_REPNZ
4752 int vex3
, vex2
= cpu_ldub_code(env
, s
->pc
);
4754 if (!CODE64(s
) && (vex2
& 0xc0) != 0xc0) {
4755 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4756 otherwise the instruction is LES or LDS. */
4761 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
4762 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
4763 | PREFIX_LOCK
| PREFIX_DATA
)) {
4766 #ifdef TARGET_X86_64
4771 rex_r
= (~vex2
>> 4) & 8;
4774 b
= cpu_ldub_code(env
, s
->pc
++);
4776 #ifdef TARGET_X86_64
4777 s
->rex_x
= (~vex2
>> 3) & 8;
4778 s
->rex_b
= (~vex2
>> 2) & 8;
4780 vex3
= cpu_ldub_code(env
, s
->pc
++);
4781 rex_w
= (vex3
>> 7) & 1;
4782 switch (vex2
& 0x1f) {
4783 case 0x01: /* Implied 0f leading opcode bytes. */
4784 b
= cpu_ldub_code(env
, s
->pc
++) | 0x100;
4786 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4789 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4792 default: /* Reserved for future use. */
4796 s
->vex_v
= (~vex3
>> 3) & 0xf;
4797 s
->vex_l
= (vex3
>> 2) & 1;
4798 prefixes
|= pp_prefix
[vex3
& 3] | PREFIX_VEX
;
4803 /* Post-process prefixes. */
4805 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
4806 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
4807 over 0x66 if both are present. */
4808 dflag
= (rex_w
> 0 ? 2 : prefixes
& PREFIX_DATA
? 0 : 1);
4809 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
4810 aflag
= (prefixes
& PREFIX_ADR
? 1 : 2);
4812 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
4814 if (prefixes
& PREFIX_DATA
) {
4817 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
4819 if (prefixes
& PREFIX_ADR
) {
4824 s
->prefix
= prefixes
;
4828 /* lock generation */
4829 if (prefixes
& PREFIX_LOCK
)
4832 /* now check op code */
4836 /**************************/
4837 /* extended op code */
4838 b
= cpu_ldub_code(env
, s
->pc
++) | 0x100;
4841 /**************************/
4859 ot
= dflag
+ OT_WORD
;
4862 case 0: /* OP Ev, Gv */
4863 modrm
= cpu_ldub_code(env
, s
->pc
++);
4864 reg
= ((modrm
>> 3) & 7) | rex_r
;
4865 mod
= (modrm
>> 6) & 3;
4866 rm
= (modrm
& 7) | REX_B(s
);
4868 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4870 } else if (op
== OP_XORL
&& rm
== reg
) {
4872 /* xor reg, reg optimisation */
4873 set_cc_op(s
, CC_OP_CLR
);
4875 gen_op_mov_reg_T0(ot
, reg
);
4880 gen_op_mov_TN_reg(ot
, 1, reg
);
4881 gen_op(s
, op
, ot
, opreg
);
4883 case 1: /* OP Gv, Ev */
4884 modrm
= cpu_ldub_code(env
, s
->pc
++);
4885 mod
= (modrm
>> 6) & 3;
4886 reg
= ((modrm
>> 3) & 7) | rex_r
;
4887 rm
= (modrm
& 7) | REX_B(s
);
4889 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4890 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4891 } else if (op
== OP_XORL
&& rm
== reg
) {
4894 gen_op_mov_TN_reg(ot
, 1, rm
);
4896 gen_op(s
, op
, ot
, reg
);
4898 case 2: /* OP A, Iv */
4899 val
= insn_get(env
, s
, ot
);
4900 gen_op_movl_T1_im(val
);
4901 gen_op(s
, op
, ot
, OR_EAX
);
4910 case 0x80: /* GRP1 */
4919 ot
= dflag
+ OT_WORD
;
4921 modrm
= cpu_ldub_code(env
, s
->pc
++);
4922 mod
= (modrm
>> 6) & 3;
4923 rm
= (modrm
& 7) | REX_B(s
);
4924 op
= (modrm
>> 3) & 7;
4930 s
->rip_offset
= insn_const_size(ot
);
4931 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4942 val
= insn_get(env
, s
, ot
);
4945 val
= (int8_t)insn_get(env
, s
, OT_BYTE
);
4948 gen_op_movl_T1_im(val
);
4949 gen_op(s
, op
, ot
, opreg
);
4953 /**************************/
4954 /* inc, dec, and other misc arith */
4955 case 0x40 ... 0x47: /* inc Gv */
4956 ot
= dflag
? OT_LONG
: OT_WORD
;
4957 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), 1);
4959 case 0x48 ... 0x4f: /* dec Gv */
4960 ot
= dflag
? OT_LONG
: OT_WORD
;
4961 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), -1);
4963 case 0xf6: /* GRP3 */
4968 ot
= dflag
+ OT_WORD
;
4970 modrm
= cpu_ldub_code(env
, s
->pc
++);
4971 mod
= (modrm
>> 6) & 3;
4972 rm
= (modrm
& 7) | REX_B(s
);
4973 op
= (modrm
>> 3) & 7;
4976 s
->rip_offset
= insn_const_size(ot
);
4977 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4978 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
4980 gen_op_mov_TN_reg(ot
, 0, rm
);
4985 val
= insn_get(env
, s
, ot
);
4986 gen_op_movl_T1_im(val
);
4987 gen_op_testl_T0_T1_cc();
4988 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4991 tcg_gen_not_tl(cpu_T
[0], cpu_T
[0]);
4993 gen_op_st_T0_A0(ot
+ s
->mem_index
);
4995 gen_op_mov_reg_T0(ot
, rm
);
4999 tcg_gen_neg_tl(cpu_T
[0], cpu_T
[0]);
5001 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5003 gen_op_mov_reg_T0(ot
, rm
);
5005 gen_op_update_neg_cc();
5006 set_cc_op(s
, CC_OP_SUBB
+ ot
);
5011 gen_op_mov_TN_reg(OT_BYTE
, 1, R_EAX
);
5012 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
5013 tcg_gen_ext8u_tl(cpu_T
[1], cpu_T
[1]);
5014 /* XXX: use 32 bit mul which could be faster */
5015 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
5016 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
5017 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5018 tcg_gen_andi_tl(cpu_cc_src
, cpu_T
[0], 0xff00);
5019 set_cc_op(s
, CC_OP_MULB
);
5022 gen_op_mov_TN_reg(OT_WORD
, 1, R_EAX
);
5023 tcg_gen_ext16u_tl(cpu_T
[0], cpu_T
[0]);
5024 tcg_gen_ext16u_tl(cpu_T
[1], cpu_T
[1]);
5025 /* XXX: use 32 bit mul which could be faster */
5026 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
5027 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
5028 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5029 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 16);
5030 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
5031 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
5032 set_cc_op(s
, CC_OP_MULW
);
5036 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5037 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_regs
[R_EAX
]);
5038 tcg_gen_mulu2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
5039 cpu_tmp2_i32
, cpu_tmp3_i32
);
5040 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], cpu_tmp2_i32
);
5041 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], cpu_tmp3_i32
);
5042 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
5043 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
5044 set_cc_op(s
, CC_OP_MULL
);
5046 #ifdef TARGET_X86_64
5048 tcg_gen_mulu2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
5049 cpu_T
[0], cpu_regs
[R_EAX
]);
5050 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
5051 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
5052 set_cc_op(s
, CC_OP_MULQ
);
5060 gen_op_mov_TN_reg(OT_BYTE
, 1, R_EAX
);
5061 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
5062 tcg_gen_ext8s_tl(cpu_T
[1], cpu_T
[1]);
5063 /* XXX: use 32 bit mul which could be faster */
5064 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
5065 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
5066 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5067 tcg_gen_ext8s_tl(cpu_tmp0
, cpu_T
[0]);
5068 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
5069 set_cc_op(s
, CC_OP_MULB
);
5072 gen_op_mov_TN_reg(OT_WORD
, 1, R_EAX
);
5073 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5074 tcg_gen_ext16s_tl(cpu_T
[1], cpu_T
[1]);
5075 /* XXX: use 32 bit mul which could be faster */
5076 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
5077 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
5078 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5079 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T
[0]);
5080 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
5081 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 16);
5082 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
5083 set_cc_op(s
, CC_OP_MULW
);
5087 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5088 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_regs
[R_EAX
]);
5089 tcg_gen_muls2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
5090 cpu_tmp2_i32
, cpu_tmp3_i32
);
5091 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], cpu_tmp2_i32
);
5092 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], cpu_tmp3_i32
);
5093 tcg_gen_sari_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 31);
5094 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
5095 tcg_gen_sub_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
5096 tcg_gen_extu_i32_tl(cpu_cc_src
, cpu_tmp2_i32
);
5097 set_cc_op(s
, CC_OP_MULL
);
5099 #ifdef TARGET_X86_64
5101 tcg_gen_muls2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
5102 cpu_T
[0], cpu_regs
[R_EAX
]);
5103 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
5104 tcg_gen_sari_tl(cpu_cc_src
, cpu_regs
[R_EAX
], 63);
5105 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, cpu_regs
[R_EDX
]);
5106 set_cc_op(s
, CC_OP_MULQ
);
5114 gen_jmp_im(pc_start
- s
->cs_base
);
5115 gen_helper_divb_AL(cpu_env
, cpu_T
[0]);
5118 gen_jmp_im(pc_start
- s
->cs_base
);
5119 gen_helper_divw_AX(cpu_env
, cpu_T
[0]);
5123 gen_jmp_im(pc_start
- s
->cs_base
);
5124 gen_helper_divl_EAX(cpu_env
, cpu_T
[0]);
5126 #ifdef TARGET_X86_64
5128 gen_jmp_im(pc_start
- s
->cs_base
);
5129 gen_helper_divq_EAX(cpu_env
, cpu_T
[0]);
5137 gen_jmp_im(pc_start
- s
->cs_base
);
5138 gen_helper_idivb_AL(cpu_env
, cpu_T
[0]);
5141 gen_jmp_im(pc_start
- s
->cs_base
);
5142 gen_helper_idivw_AX(cpu_env
, cpu_T
[0]);
5146 gen_jmp_im(pc_start
- s
->cs_base
);
5147 gen_helper_idivl_EAX(cpu_env
, cpu_T
[0]);
5149 #ifdef TARGET_X86_64
5151 gen_jmp_im(pc_start
- s
->cs_base
);
5152 gen_helper_idivq_EAX(cpu_env
, cpu_T
[0]);
5162 case 0xfe: /* GRP4 */
5163 case 0xff: /* GRP5 */
5167 ot
= dflag
+ OT_WORD
;
5169 modrm
= cpu_ldub_code(env
, s
->pc
++);
5170 mod
= (modrm
>> 6) & 3;
5171 rm
= (modrm
& 7) | REX_B(s
);
5172 op
= (modrm
>> 3) & 7;
5173 if (op
>= 2 && b
== 0xfe) {
5177 if (op
== 2 || op
== 4) {
5178 /* operand size for jumps is 64 bit */
5180 } else if (op
== 3 || op
== 5) {
5181 ot
= dflag
? OT_LONG
+ (rex_w
== 1) : OT_WORD
;
5182 } else if (op
== 6) {
5183 /* default push size is 64 bit */
5184 ot
= dflag
? OT_QUAD
: OT_WORD
;
5188 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5189 if (op
>= 2 && op
!= 3 && op
!= 5)
5190 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
5192 gen_op_mov_TN_reg(ot
, 0, rm
);
5196 case 0: /* inc Ev */
5201 gen_inc(s
, ot
, opreg
, 1);
5203 case 1: /* dec Ev */
5208 gen_inc(s
, ot
, opreg
, -1);
5210 case 2: /* call Ev */
5211 /* XXX: optimize if memory (no 'and' is necessary) */
5213 gen_op_andl_T0_ffff();
5214 next_eip
= s
->pc
- s
->cs_base
;
5215 gen_movtl_T1_im(next_eip
);
5220 case 3: /* lcall Ev */
5221 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5222 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
5223 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
5225 if (s
->pe
&& !s
->vm86
) {
5226 gen_update_cc_op(s
);
5227 gen_jmp_im(pc_start
- s
->cs_base
);
5228 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5229 gen_helper_lcall_protected(cpu_env
, cpu_tmp2_i32
, cpu_T
[1],
5230 tcg_const_i32(dflag
),
5231 tcg_const_i32(s
->pc
- pc_start
));
5233 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5234 gen_helper_lcall_real(cpu_env
, cpu_tmp2_i32
, cpu_T
[1],
5235 tcg_const_i32(dflag
),
5236 tcg_const_i32(s
->pc
- s
->cs_base
));
5240 case 4: /* jmp Ev */
5242 gen_op_andl_T0_ffff();
5246 case 5: /* ljmp Ev */
5247 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5248 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
5249 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
5251 if (s
->pe
&& !s
->vm86
) {
5252 gen_update_cc_op(s
);
5253 gen_jmp_im(pc_start
- s
->cs_base
);
5254 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5255 gen_helper_ljmp_protected(cpu_env
, cpu_tmp2_i32
, cpu_T
[1],
5256 tcg_const_i32(s
->pc
- pc_start
));
5258 gen_op_movl_seg_T0_vm(R_CS
);
5259 gen_op_movl_T0_T1();
5264 case 6: /* push Ev */
5272 case 0x84: /* test Ev, Gv */
5277 ot
= dflag
+ OT_WORD
;
5279 modrm
= cpu_ldub_code(env
, s
->pc
++);
5280 reg
= ((modrm
>> 3) & 7) | rex_r
;
5282 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5283 gen_op_mov_TN_reg(ot
, 1, reg
);
5284 gen_op_testl_T0_T1_cc();
5285 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
5288 case 0xa8: /* test eAX, Iv */
5293 ot
= dflag
+ OT_WORD
;
5294 val
= insn_get(env
, s
, ot
);
5296 gen_op_mov_TN_reg(ot
, 0, OR_EAX
);
5297 gen_op_movl_T1_im(val
);
5298 gen_op_testl_T0_T1_cc();
5299 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
5302 case 0x98: /* CWDE/CBW */
5303 #ifdef TARGET_X86_64
5305 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
5306 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
5307 gen_op_mov_reg_T0(OT_QUAD
, R_EAX
);
5311 gen_op_mov_TN_reg(OT_WORD
, 0, R_EAX
);
5312 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5313 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
5315 gen_op_mov_TN_reg(OT_BYTE
, 0, R_EAX
);
5316 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
5317 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
5320 case 0x99: /* CDQ/CWD */
5321 #ifdef TARGET_X86_64
5323 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EAX
);
5324 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 63);
5325 gen_op_mov_reg_T0(OT_QUAD
, R_EDX
);
5329 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
5330 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
5331 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 31);
5332 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
5334 gen_op_mov_TN_reg(OT_WORD
, 0, R_EAX
);
5335 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5336 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 15);
5337 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
5340 case 0x1af: /* imul Gv, Ev */
5341 case 0x69: /* imul Gv, Ev, I */
5343 ot
= dflag
+ OT_WORD
;
5344 modrm
= cpu_ldub_code(env
, s
->pc
++);
5345 reg
= ((modrm
>> 3) & 7) | rex_r
;
5347 s
->rip_offset
= insn_const_size(ot
);
5350 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5352 val
= insn_get(env
, s
, ot
);
5353 gen_op_movl_T1_im(val
);
5354 } else if (b
== 0x6b) {
5355 val
= (int8_t)insn_get(env
, s
, OT_BYTE
);
5356 gen_op_movl_T1_im(val
);
5358 gen_op_mov_TN_reg(ot
, 1, reg
);
5361 #ifdef TARGET_X86_64
5363 tcg_gen_muls2_i64(cpu_regs
[reg
], cpu_T
[1], cpu_T
[0], cpu_T
[1]);
5364 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
5365 tcg_gen_sari_tl(cpu_cc_src
, cpu_cc_dst
, 63);
5366 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, cpu_T
[1]);
5370 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5371 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
5372 tcg_gen_muls2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
5373 cpu_tmp2_i32
, cpu_tmp3_i32
);
5374 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp2_i32
);
5375 tcg_gen_sari_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 31);
5376 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
5377 tcg_gen_sub_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
5378 tcg_gen_extu_i32_tl(cpu_cc_src
, cpu_tmp2_i32
);
5381 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5382 tcg_gen_ext16s_tl(cpu_T
[1], cpu_T
[1]);
5383 /* XXX: use 32 bit mul which could be faster */
5384 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
5385 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5386 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T
[0]);
5387 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
5388 gen_op_mov_reg_T0(ot
, reg
);
5391 set_cc_op(s
, CC_OP_MULB
+ ot
);
5394 case 0x1c1: /* xadd Ev, Gv */
5398 ot
= dflag
+ OT_WORD
;
5399 modrm
= cpu_ldub_code(env
, s
->pc
++);
5400 reg
= ((modrm
>> 3) & 7) | rex_r
;
5401 mod
= (modrm
>> 6) & 3;
5403 rm
= (modrm
& 7) | REX_B(s
);
5404 gen_op_mov_TN_reg(ot
, 0, reg
);
5405 gen_op_mov_TN_reg(ot
, 1, rm
);
5406 gen_op_addl_T0_T1();
5407 gen_op_mov_reg_T1(ot
, reg
);
5408 gen_op_mov_reg_T0(ot
, rm
);
5410 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5411 gen_op_mov_TN_reg(ot
, 0, reg
);
5412 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5413 gen_op_addl_T0_T1();
5414 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5415 gen_op_mov_reg_T1(ot
, reg
);
5417 gen_op_update2_cc();
5418 set_cc_op(s
, CC_OP_ADDB
+ ot
);
5421 case 0x1b1: /* cmpxchg Ev, Gv */
5424 TCGv t0
, t1
, t2
, a0
;
5429 ot
= dflag
+ OT_WORD
;
5430 modrm
= cpu_ldub_code(env
, s
->pc
++);
5431 reg
= ((modrm
>> 3) & 7) | rex_r
;
5432 mod
= (modrm
>> 6) & 3;
5433 t0
= tcg_temp_local_new();
5434 t1
= tcg_temp_local_new();
5435 t2
= tcg_temp_local_new();
5436 a0
= tcg_temp_local_new();
5437 gen_op_mov_v_reg(ot
, t1
, reg
);
5439 rm
= (modrm
& 7) | REX_B(s
);
5440 gen_op_mov_v_reg(ot
, t0
, rm
);
5442 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5443 tcg_gen_mov_tl(a0
, cpu_A0
);
5444 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
5445 rm
= 0; /* avoid warning */
5447 label1
= gen_new_label();
5448 tcg_gen_mov_tl(t2
, cpu_regs
[R_EAX
]);
5451 tcg_gen_brcond_tl(TCG_COND_EQ
, t2
, t0
, label1
);
5452 label2
= gen_new_label();
5454 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
5456 gen_set_label(label1
);
5457 gen_op_mov_reg_v(ot
, rm
, t1
);
5459 /* perform no-op store cycle like physical cpu; must be
5460 before changing accumulator to ensure idempotency if
5461 the store faults and the instruction is restarted */
5462 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
5463 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
5465 gen_set_label(label1
);
5466 gen_op_st_v(ot
+ s
->mem_index
, t1
, a0
);
5468 gen_set_label(label2
);
5469 tcg_gen_mov_tl(cpu_cc_src
, t0
);
5470 tcg_gen_mov_tl(cpu_cc_srcT
, t2
);
5471 tcg_gen_sub_tl(cpu_cc_dst
, t2
, t0
);
5472 set_cc_op(s
, CC_OP_SUBB
+ ot
);
5479 case 0x1c7: /* cmpxchg8b */
5480 modrm
= cpu_ldub_code(env
, s
->pc
++);
5481 mod
= (modrm
>> 6) & 3;
5482 if ((mod
== 3) || ((modrm
& 0x38) != 0x8))
5484 #ifdef TARGET_X86_64
5486 if (!(s
->cpuid_ext_features
& CPUID_EXT_CX16
))
5488 gen_jmp_im(pc_start
- s
->cs_base
);
5489 gen_update_cc_op(s
);
5490 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5491 gen_helper_cmpxchg16b(cpu_env
, cpu_A0
);
5495 if (!(s
->cpuid_features
& CPUID_CX8
))
5497 gen_jmp_im(pc_start
- s
->cs_base
);
5498 gen_update_cc_op(s
);
5499 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5500 gen_helper_cmpxchg8b(cpu_env
, cpu_A0
);
5502 set_cc_op(s
, CC_OP_EFLAGS
);
5505 /**************************/
5507 case 0x50 ... 0x57: /* push */
5508 gen_op_mov_TN_reg(OT_LONG
, 0, (b
& 7) | REX_B(s
));
5511 case 0x58 ... 0x5f: /* pop */
5513 ot
= dflag
? OT_QUAD
: OT_WORD
;
5515 ot
= dflag
+ OT_WORD
;
5518 /* NOTE: order is important for pop %sp */
5520 gen_op_mov_reg_T0(ot
, (b
& 7) | REX_B(s
));
5522 case 0x60: /* pusha */
5527 case 0x61: /* popa */
5532 case 0x68: /* push Iv */
5535 ot
= dflag
? OT_QUAD
: OT_WORD
;
5537 ot
= dflag
+ OT_WORD
;
5540 val
= insn_get(env
, s
, ot
);
5542 val
= (int8_t)insn_get(env
, s
, OT_BYTE
);
5543 gen_op_movl_T0_im(val
);
5546 case 0x8f: /* pop Ev */
5548 ot
= dflag
? OT_QUAD
: OT_WORD
;
5550 ot
= dflag
+ OT_WORD
;
5552 modrm
= cpu_ldub_code(env
, s
->pc
++);
5553 mod
= (modrm
>> 6) & 3;
5556 /* NOTE: order is important for pop %sp */
5558 rm
= (modrm
& 7) | REX_B(s
);
5559 gen_op_mov_reg_T0(ot
, rm
);
5561 /* NOTE: order is important too for MMU exceptions */
5562 s
->popl_esp_hack
= 1 << ot
;
5563 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5564 s
->popl_esp_hack
= 0;
5568 case 0xc8: /* enter */
5571 val
= cpu_lduw_code(env
, s
->pc
);
5573 level
= cpu_ldub_code(env
, s
->pc
++);
5574 gen_enter(s
, val
, level
);
5577 case 0xc9: /* leave */
5578 /* XXX: exception not precise (ESP is updated before potential exception) */
5580 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EBP
);
5581 gen_op_mov_reg_T0(OT_QUAD
, R_ESP
);
5582 } else if (s
->ss32
) {
5583 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
5584 gen_op_mov_reg_T0(OT_LONG
, R_ESP
);
5586 gen_op_mov_TN_reg(OT_WORD
, 0, R_EBP
);
5587 gen_op_mov_reg_T0(OT_WORD
, R_ESP
);
5591 ot
= dflag
? OT_QUAD
: OT_WORD
;
5593 ot
= dflag
+ OT_WORD
;
5595 gen_op_mov_reg_T0(ot
, R_EBP
);
5598 case 0x06: /* push es */
5599 case 0x0e: /* push cs */
5600 case 0x16: /* push ss */
5601 case 0x1e: /* push ds */
5604 gen_op_movl_T0_seg(b
>> 3);
5607 case 0x1a0: /* push fs */
5608 case 0x1a8: /* push gs */
5609 gen_op_movl_T0_seg((b
>> 3) & 7);
5612 case 0x07: /* pop es */
5613 case 0x17: /* pop ss */
5614 case 0x1f: /* pop ds */
5619 gen_movl_seg_T0(s
, reg
, pc_start
- s
->cs_base
);
5622 /* if reg == SS, inhibit interrupts/trace. */
5623 /* If several instructions disable interrupts, only the
5625 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
5626 gen_helper_set_inhibit_irq(cpu_env
);
5630 gen_jmp_im(s
->pc
- s
->cs_base
);
5634 case 0x1a1: /* pop fs */
5635 case 0x1a9: /* pop gs */
5637 gen_movl_seg_T0(s
, (b
>> 3) & 7, pc_start
- s
->cs_base
);
5640 gen_jmp_im(s
->pc
- s
->cs_base
);
5645 /**************************/
5648 case 0x89: /* mov Gv, Ev */
5652 ot
= dflag
+ OT_WORD
;
5653 modrm
= cpu_ldub_code(env
, s
->pc
++);
5654 reg
= ((modrm
>> 3) & 7) | rex_r
;
5656 /* generate a generic store */
5657 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
5660 case 0xc7: /* mov Ev, Iv */
5664 ot
= dflag
+ OT_WORD
;
5665 modrm
= cpu_ldub_code(env
, s
->pc
++);
5666 mod
= (modrm
>> 6) & 3;
5668 s
->rip_offset
= insn_const_size(ot
);
5669 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5671 val
= insn_get(env
, s
, ot
);
5672 gen_op_movl_T0_im(val
);
5674 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5676 gen_op_mov_reg_T0(ot
, (modrm
& 7) | REX_B(s
));
5679 case 0x8b: /* mov Ev, Gv */
5683 ot
= OT_WORD
+ dflag
;
5684 modrm
= cpu_ldub_code(env
, s
->pc
++);
5685 reg
= ((modrm
>> 3) & 7) | rex_r
;
5687 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5688 gen_op_mov_reg_T0(ot
, reg
);
5690 case 0x8e: /* mov seg, Gv */
5691 modrm
= cpu_ldub_code(env
, s
->pc
++);
5692 reg
= (modrm
>> 3) & 7;
5693 if (reg
>= 6 || reg
== R_CS
)
5695 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
5696 gen_movl_seg_T0(s
, reg
, pc_start
- s
->cs_base
);
5698 /* if reg == SS, inhibit interrupts/trace */
5699 /* If several instructions disable interrupts, only the
5701 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
5702 gen_helper_set_inhibit_irq(cpu_env
);
5706 gen_jmp_im(s
->pc
- s
->cs_base
);
5710 case 0x8c: /* mov Gv, seg */
5711 modrm
= cpu_ldub_code(env
, s
->pc
++);
5712 reg
= (modrm
>> 3) & 7;
5713 mod
= (modrm
>> 6) & 3;
5716 gen_op_movl_T0_seg(reg
);
5718 ot
= OT_WORD
+ dflag
;
5721 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5724 case 0x1b6: /* movzbS Gv, Eb */
5725 case 0x1b7: /* movzwS Gv, Eb */
5726 case 0x1be: /* movsbS Gv, Eb */
5727 case 0x1bf: /* movswS Gv, Eb */
5730 /* d_ot is the size of destination */
5731 d_ot
= dflag
+ OT_WORD
;
5732 /* ot is the size of source */
5733 ot
= (b
& 1) + OT_BYTE
;
5734 modrm
= cpu_ldub_code(env
, s
->pc
++);
5735 reg
= ((modrm
>> 3) & 7) | rex_r
;
5736 mod
= (modrm
>> 6) & 3;
5737 rm
= (modrm
& 7) | REX_B(s
);
5740 gen_op_mov_TN_reg(ot
, 0, rm
);
5741 switch(ot
| (b
& 8)) {
5743 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
5746 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
5749 tcg_gen_ext16u_tl(cpu_T
[0], cpu_T
[0]);
5753 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5756 gen_op_mov_reg_T0(d_ot
, reg
);
5758 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5760 gen_op_lds_T0_A0(ot
+ s
->mem_index
);
5762 gen_op_ldu_T0_A0(ot
+ s
->mem_index
);
5764 gen_op_mov_reg_T0(d_ot
, reg
);
5769 case 0x8d: /* lea */
5770 ot
= dflag
+ OT_WORD
;
5771 modrm
= cpu_ldub_code(env
, s
->pc
++);
5772 mod
= (modrm
>> 6) & 3;
5775 reg
= ((modrm
>> 3) & 7) | rex_r
;
5776 /* we must ensure that no segment is added */
5780 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5782 gen_op_mov_reg_A0(ot
- OT_WORD
, reg
);
5785 case 0xa0: /* mov EAX, Ov */
5787 case 0xa2: /* mov Ov, EAX */
5790 target_ulong offset_addr
;
5795 ot
= dflag
+ OT_WORD
;
5796 #ifdef TARGET_X86_64
5797 if (s
->aflag
== 2) {
5798 offset_addr
= cpu_ldq_code(env
, s
->pc
);
5800 gen_op_movq_A0_im(offset_addr
);
5805 offset_addr
= insn_get(env
, s
, OT_LONG
);
5807 offset_addr
= insn_get(env
, s
, OT_WORD
);
5809 gen_op_movl_A0_im(offset_addr
);
5811 gen_add_A0_ds_seg(s
);
5813 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
5814 gen_op_mov_reg_T0(ot
, R_EAX
);
5816 gen_op_mov_TN_reg(ot
, 0, R_EAX
);
5817 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5821 case 0xd7: /* xlat */
5822 #ifdef TARGET_X86_64
5823 if (s
->aflag
== 2) {
5824 gen_op_movq_A0_reg(R_EBX
);
5825 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EAX
);
5826 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xff);
5827 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T
[0]);
5831 gen_op_movl_A0_reg(R_EBX
);
5832 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
5833 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xff);
5834 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T
[0]);
5836 gen_op_andl_A0_ffff();
5838 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
5840 gen_add_A0_ds_seg(s
);
5841 gen_op_ldu_T0_A0(OT_BYTE
+ s
->mem_index
);
5842 gen_op_mov_reg_T0(OT_BYTE
, R_EAX
);
5844 case 0xb0 ... 0xb7: /* mov R, Ib */
5845 val
= insn_get(env
, s
, OT_BYTE
);
5846 gen_op_movl_T0_im(val
);
5847 gen_op_mov_reg_T0(OT_BYTE
, (b
& 7) | REX_B(s
));
5849 case 0xb8 ... 0xbf: /* mov R, Iv */
5850 #ifdef TARGET_X86_64
5854 tmp
= cpu_ldq_code(env
, s
->pc
);
5856 reg
= (b
& 7) | REX_B(s
);
5857 gen_movtl_T0_im(tmp
);
5858 gen_op_mov_reg_T0(OT_QUAD
, reg
);
5862 ot
= dflag
? OT_LONG
: OT_WORD
;
5863 val
= insn_get(env
, s
, ot
);
5864 reg
= (b
& 7) | REX_B(s
);
5865 gen_op_movl_T0_im(val
);
5866 gen_op_mov_reg_T0(ot
, reg
);
5870 case 0x91 ... 0x97: /* xchg R, EAX */
5872 ot
= dflag
+ OT_WORD
;
5873 reg
= (b
& 7) | REX_B(s
);
5877 case 0x87: /* xchg Ev, Gv */
5881 ot
= dflag
+ OT_WORD
;
5882 modrm
= cpu_ldub_code(env
, s
->pc
++);
5883 reg
= ((modrm
>> 3) & 7) | rex_r
;
5884 mod
= (modrm
>> 6) & 3;
5886 rm
= (modrm
& 7) | REX_B(s
);
5888 gen_op_mov_TN_reg(ot
, 0, reg
);
5889 gen_op_mov_TN_reg(ot
, 1, rm
);
5890 gen_op_mov_reg_T0(ot
, rm
);
5891 gen_op_mov_reg_T1(ot
, reg
);
5893 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5894 gen_op_mov_TN_reg(ot
, 0, reg
);
5895 /* for xchg, lock is implicit */
5896 if (!(prefixes
& PREFIX_LOCK
))
5898 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5899 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5900 if (!(prefixes
& PREFIX_LOCK
))
5901 gen_helper_unlock();
5902 gen_op_mov_reg_T1(ot
, reg
);
5905 case 0xc4: /* les Gv */
5906 /* In CODE64 this is VEX3; see above. */
5909 case 0xc5: /* lds Gv */
5910 /* In CODE64 this is VEX2; see above. */
5913 case 0x1b2: /* lss Gv */
5916 case 0x1b4: /* lfs Gv */
5919 case 0x1b5: /* lgs Gv */
5922 ot
= dflag
? OT_LONG
: OT_WORD
;
5923 modrm
= cpu_ldub_code(env
, s
->pc
++);
5924 reg
= ((modrm
>> 3) & 7) | rex_r
;
5925 mod
= (modrm
>> 6) & 3;
5928 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5929 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5930 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
5931 /* load the segment first to handle exceptions properly */
5932 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
5933 gen_movl_seg_T0(s
, op
, pc_start
- s
->cs_base
);
5934 /* then put the data */
5935 gen_op_mov_reg_T1(ot
, reg
);
5937 gen_jmp_im(s
->pc
- s
->cs_base
);
5942 /************************/
5953 ot
= dflag
+ OT_WORD
;
5955 modrm
= cpu_ldub_code(env
, s
->pc
++);
5956 mod
= (modrm
>> 6) & 3;
5957 op
= (modrm
>> 3) & 7;
5963 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5966 opreg
= (modrm
& 7) | REX_B(s
);
5971 gen_shift(s
, op
, ot
, opreg
, OR_ECX
);
5974 shift
= cpu_ldub_code(env
, s
->pc
++);
5976 gen_shifti(s
, op
, ot
, opreg
, shift
);
5991 case 0x1a4: /* shld imm */
5995 case 0x1a5: /* shld cl */
5999 case 0x1ac: /* shrd imm */
6003 case 0x1ad: /* shrd cl */
6007 ot
= dflag
+ OT_WORD
;
6008 modrm
= cpu_ldub_code(env
, s
->pc
++);
6009 mod
= (modrm
>> 6) & 3;
6010 rm
= (modrm
& 7) | REX_B(s
);
6011 reg
= ((modrm
>> 3) & 7) | rex_r
;
6013 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
6018 gen_op_mov_TN_reg(ot
, 1, reg
);
6021 TCGv imm
= tcg_const_tl(cpu_ldub_code(env
, s
->pc
++));
6022 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, imm
);
6025 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, cpu_regs
[R_ECX
]);
6029 /************************/
6032 if (s
->flags
& (HF_EM_MASK
| HF_TS_MASK
)) {
6033 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
6034 /* XXX: what to do if illegal op ? */
6035 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
6038 modrm
= cpu_ldub_code(env
, s
->pc
++);
6039 mod
= (modrm
>> 6) & 3;
6041 op
= ((b
& 7) << 3) | ((modrm
>> 3) & 7);
6044 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
6046 case 0x00 ... 0x07: /* fxxxs */
6047 case 0x10 ... 0x17: /* fixxxl */
6048 case 0x20 ... 0x27: /* fxxxl */
6049 case 0x30 ... 0x37: /* fixxx */
6056 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
6057 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6058 gen_helper_flds_FT0(cpu_env
, cpu_tmp2_i32
);
6061 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
6062 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6063 gen_helper_fildl_FT0(cpu_env
, cpu_tmp2_i32
);
6066 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
6067 (s
->mem_index
>> 2) - 1);
6068 gen_helper_fldl_FT0(cpu_env
, cpu_tmp1_i64
);
6072 gen_op_lds_T0_A0(OT_WORD
+ s
->mem_index
);
6073 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6074 gen_helper_fildl_FT0(cpu_env
, cpu_tmp2_i32
);
6078 gen_helper_fp_arith_ST0_FT0(op1
);
6080 /* fcomp needs pop */
6081 gen_helper_fpop(cpu_env
);
6085 case 0x08: /* flds */
6086 case 0x0a: /* fsts */
6087 case 0x0b: /* fstps */
6088 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
6089 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
6090 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
6095 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
6096 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6097 gen_helper_flds_ST0(cpu_env
, cpu_tmp2_i32
);
6100 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
6101 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6102 gen_helper_fildl_ST0(cpu_env
, cpu_tmp2_i32
);
6105 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
6106 (s
->mem_index
>> 2) - 1);
6107 gen_helper_fldl_ST0(cpu_env
, cpu_tmp1_i64
);
6111 gen_op_lds_T0_A0(OT_WORD
+ s
->mem_index
);
6112 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6113 gen_helper_fildl_ST0(cpu_env
, cpu_tmp2_i32
);
6118 /* XXX: the corresponding CPUID bit must be tested ! */
6121 gen_helper_fisttl_ST0(cpu_tmp2_i32
, cpu_env
);
6122 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6123 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
6126 gen_helper_fisttll_ST0(cpu_tmp1_i64
, cpu_env
);
6127 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
6128 (s
->mem_index
>> 2) - 1);
6132 gen_helper_fistt_ST0(cpu_tmp2_i32
, cpu_env
);
6133 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6134 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
6137 gen_helper_fpop(cpu_env
);
6142 gen_helper_fsts_ST0(cpu_tmp2_i32
, cpu_env
);
6143 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6144 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
6147 gen_helper_fistl_ST0(cpu_tmp2_i32
, cpu_env
);
6148 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6149 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
6152 gen_helper_fstl_ST0(cpu_tmp1_i64
, cpu_env
);
6153 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
6154 (s
->mem_index
>> 2) - 1);
6158 gen_helper_fist_ST0(cpu_tmp2_i32
, cpu_env
);
6159 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6160 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
6164 gen_helper_fpop(cpu_env
);
6168 case 0x0c: /* fldenv mem */
6169 gen_update_cc_op(s
);
6170 gen_jmp_im(pc_start
- s
->cs_base
);
6171 gen_helper_fldenv(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
6173 case 0x0d: /* fldcw mem */
6174 gen_op_ld_T0_A0(OT_WORD
+ s
->mem_index
);
6175 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6176 gen_helper_fldcw(cpu_env
, cpu_tmp2_i32
);
6178 case 0x0e: /* fnstenv mem */
6179 gen_update_cc_op(s
);
6180 gen_jmp_im(pc_start
- s
->cs_base
);
6181 gen_helper_fstenv(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
6183 case 0x0f: /* fnstcw mem */
6184 gen_helper_fnstcw(cpu_tmp2_i32
, cpu_env
);
6185 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6186 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
6188 case 0x1d: /* fldt mem */
6189 gen_update_cc_op(s
);
6190 gen_jmp_im(pc_start
- s
->cs_base
);
6191 gen_helper_fldt_ST0(cpu_env
, cpu_A0
);
6193 case 0x1f: /* fstpt mem */
6194 gen_update_cc_op(s
);
6195 gen_jmp_im(pc_start
- s
->cs_base
);
6196 gen_helper_fstt_ST0(cpu_env
, cpu_A0
);
6197 gen_helper_fpop(cpu_env
);
6199 case 0x2c: /* frstor mem */
6200 gen_update_cc_op(s
);
6201 gen_jmp_im(pc_start
- s
->cs_base
);
6202 gen_helper_frstor(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
6204 case 0x2e: /* fnsave mem */
6205 gen_update_cc_op(s
);
6206 gen_jmp_im(pc_start
- s
->cs_base
);
6207 gen_helper_fsave(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
6209 case 0x2f: /* fnstsw mem */
6210 gen_helper_fnstsw(cpu_tmp2_i32
, cpu_env
);
6211 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6212 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
6214 case 0x3c: /* fbld */
6215 gen_update_cc_op(s
);
6216 gen_jmp_im(pc_start
- s
->cs_base
);
6217 gen_helper_fbld_ST0(cpu_env
, cpu_A0
);
6219 case 0x3e: /* fbstp */
6220 gen_update_cc_op(s
);
6221 gen_jmp_im(pc_start
- s
->cs_base
);
6222 gen_helper_fbst_ST0(cpu_env
, cpu_A0
);
6223 gen_helper_fpop(cpu_env
);
6225 case 0x3d: /* fildll */
6226 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
6227 (s
->mem_index
>> 2) - 1);
6228 gen_helper_fildll_ST0(cpu_env
, cpu_tmp1_i64
);
6230 case 0x3f: /* fistpll */
6231 gen_helper_fistll_ST0(cpu_tmp1_i64
, cpu_env
);
6232 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
6233 (s
->mem_index
>> 2) - 1);
6234 gen_helper_fpop(cpu_env
);
6240 /* register float ops */
6244 case 0x08: /* fld sti */
6245 gen_helper_fpush(cpu_env
);
6246 gen_helper_fmov_ST0_STN(cpu_env
,
6247 tcg_const_i32((opreg
+ 1) & 7));
6249 case 0x09: /* fxchg sti */
6250 case 0x29: /* fxchg4 sti, undocumented op */
6251 case 0x39: /* fxchg7 sti, undocumented op */
6252 gen_helper_fxchg_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
6254 case 0x0a: /* grp d9/2 */
6257 /* check exceptions (FreeBSD FPU probe) */
6258 gen_update_cc_op(s
);
6259 gen_jmp_im(pc_start
- s
->cs_base
);
6260 gen_helper_fwait(cpu_env
);
6266 case 0x0c: /* grp d9/4 */
6269 gen_helper_fchs_ST0(cpu_env
);
6272 gen_helper_fabs_ST0(cpu_env
);
6275 gen_helper_fldz_FT0(cpu_env
);
6276 gen_helper_fcom_ST0_FT0(cpu_env
);
6279 gen_helper_fxam_ST0(cpu_env
);
6285 case 0x0d: /* grp d9/5 */
6289 gen_helper_fpush(cpu_env
);
6290 gen_helper_fld1_ST0(cpu_env
);
6293 gen_helper_fpush(cpu_env
);
6294 gen_helper_fldl2t_ST0(cpu_env
);
6297 gen_helper_fpush(cpu_env
);
6298 gen_helper_fldl2e_ST0(cpu_env
);
6301 gen_helper_fpush(cpu_env
);
6302 gen_helper_fldpi_ST0(cpu_env
);
6305 gen_helper_fpush(cpu_env
);
6306 gen_helper_fldlg2_ST0(cpu_env
);
6309 gen_helper_fpush(cpu_env
);
6310 gen_helper_fldln2_ST0(cpu_env
);
6313 gen_helper_fpush(cpu_env
);
6314 gen_helper_fldz_ST0(cpu_env
);
6321 case 0x0e: /* grp d9/6 */
6324 gen_helper_f2xm1(cpu_env
);
6327 gen_helper_fyl2x(cpu_env
);
6330 gen_helper_fptan(cpu_env
);
6332 case 3: /* fpatan */
6333 gen_helper_fpatan(cpu_env
);
6335 case 4: /* fxtract */
6336 gen_helper_fxtract(cpu_env
);
6338 case 5: /* fprem1 */
6339 gen_helper_fprem1(cpu_env
);
6341 case 6: /* fdecstp */
6342 gen_helper_fdecstp(cpu_env
);
6345 case 7: /* fincstp */
6346 gen_helper_fincstp(cpu_env
);
6350 case 0x0f: /* grp d9/7 */
6353 gen_helper_fprem(cpu_env
);
6355 case 1: /* fyl2xp1 */
6356 gen_helper_fyl2xp1(cpu_env
);
6359 gen_helper_fsqrt(cpu_env
);
6361 case 3: /* fsincos */
6362 gen_helper_fsincos(cpu_env
);
6364 case 5: /* fscale */
6365 gen_helper_fscale(cpu_env
);
6367 case 4: /* frndint */
6368 gen_helper_frndint(cpu_env
);
6371 gen_helper_fsin(cpu_env
);
6375 gen_helper_fcos(cpu_env
);
6379 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
6380 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6381 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6387 gen_helper_fp_arith_STN_ST0(op1
, opreg
);
6389 gen_helper_fpop(cpu_env
);
6391 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6392 gen_helper_fp_arith_ST0_FT0(op1
);
6396 case 0x02: /* fcom */
6397 case 0x22: /* fcom2, undocumented op */
6398 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6399 gen_helper_fcom_ST0_FT0(cpu_env
);
6401 case 0x03: /* fcomp */
6402 case 0x23: /* fcomp3, undocumented op */
6403 case 0x32: /* fcomp5, undocumented op */
6404 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6405 gen_helper_fcom_ST0_FT0(cpu_env
);
6406 gen_helper_fpop(cpu_env
);
6408 case 0x15: /* da/5 */
6410 case 1: /* fucompp */
6411 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
6412 gen_helper_fucom_ST0_FT0(cpu_env
);
6413 gen_helper_fpop(cpu_env
);
6414 gen_helper_fpop(cpu_env
);
6422 case 0: /* feni (287 only, just do nop here) */
6424 case 1: /* fdisi (287 only, just do nop here) */
6427 gen_helper_fclex(cpu_env
);
6429 case 3: /* fninit */
6430 gen_helper_fninit(cpu_env
);
6432 case 4: /* fsetpm (287 only, just do nop here) */
6438 case 0x1d: /* fucomi */
6439 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6442 gen_update_cc_op(s
);
6443 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6444 gen_helper_fucomi_ST0_FT0(cpu_env
);
6445 set_cc_op(s
, CC_OP_EFLAGS
);
6447 case 0x1e: /* fcomi */
6448 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6451 gen_update_cc_op(s
);
6452 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6453 gen_helper_fcomi_ST0_FT0(cpu_env
);
6454 set_cc_op(s
, CC_OP_EFLAGS
);
6456 case 0x28: /* ffree sti */
6457 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6459 case 0x2a: /* fst sti */
6460 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6462 case 0x2b: /* fstp sti */
6463 case 0x0b: /* fstp1 sti, undocumented op */
6464 case 0x3a: /* fstp8 sti, undocumented op */
6465 case 0x3b: /* fstp9 sti, undocumented op */
6466 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6467 gen_helper_fpop(cpu_env
);
6469 case 0x2c: /* fucom st(i) */
6470 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6471 gen_helper_fucom_ST0_FT0(cpu_env
);
6473 case 0x2d: /* fucomp st(i) */
6474 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6475 gen_helper_fucom_ST0_FT0(cpu_env
);
6476 gen_helper_fpop(cpu_env
);
6478 case 0x33: /* de/3 */
6480 case 1: /* fcompp */
6481 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
6482 gen_helper_fcom_ST0_FT0(cpu_env
);
6483 gen_helper_fpop(cpu_env
);
6484 gen_helper_fpop(cpu_env
);
6490 case 0x38: /* ffreep sti, undocumented op */
6491 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6492 gen_helper_fpop(cpu_env
);
6494 case 0x3c: /* df/4 */
6497 gen_helper_fnstsw(cpu_tmp2_i32
, cpu_env
);
6498 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6499 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
6505 case 0x3d: /* fucomip */
6506 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6509 gen_update_cc_op(s
);
6510 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6511 gen_helper_fucomi_ST0_FT0(cpu_env
);
6512 gen_helper_fpop(cpu_env
);
6513 set_cc_op(s
, CC_OP_EFLAGS
);
6515 case 0x3e: /* fcomip */
6516 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6519 gen_update_cc_op(s
);
6520 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6521 gen_helper_fcomi_ST0_FT0(cpu_env
);
6522 gen_helper_fpop(cpu_env
);
6523 set_cc_op(s
, CC_OP_EFLAGS
);
6525 case 0x10 ... 0x13: /* fcmovxx */
6529 static const uint8_t fcmov_cc
[8] = {
6536 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6539 op1
= fcmov_cc
[op
& 3] | (((op
>> 3) & 1) ^ 1);
6540 l1
= gen_new_label();
6541 gen_jcc1_noeob(s
, op1
, l1
);
6542 gen_helper_fmov_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
6551 /************************/
6554 case 0xa4: /* movsS */
6559 ot
= dflag
+ OT_WORD
;
6561 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6562 gen_repz_movs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6568 case 0xaa: /* stosS */
6573 ot
= dflag
+ OT_WORD
;
6575 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6576 gen_repz_stos(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6581 case 0xac: /* lodsS */
6586 ot
= dflag
+ OT_WORD
;
6587 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6588 gen_repz_lods(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6593 case 0xae: /* scasS */
6598 ot
= dflag
+ OT_WORD
;
6599 if (prefixes
& PREFIX_REPNZ
) {
6600 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6601 } else if (prefixes
& PREFIX_REPZ
) {
6602 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6608 case 0xa6: /* cmpsS */
6613 ot
= dflag
+ OT_WORD
;
6614 if (prefixes
& PREFIX_REPNZ
) {
6615 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6616 } else if (prefixes
& PREFIX_REPZ
) {
6617 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6622 case 0x6c: /* insS */
6627 ot
= dflag
? OT_LONG
: OT_WORD
;
6628 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6629 gen_op_andl_T0_ffff();
6630 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6631 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
) | 4);
6632 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6633 gen_repz_ins(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6637 gen_jmp(s
, s
->pc
- s
->cs_base
);
6641 case 0x6e: /* outsS */
6646 ot
= dflag
? OT_LONG
: OT_WORD
;
6647 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6648 gen_op_andl_T0_ffff();
6649 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6650 svm_is_rep(prefixes
) | 4);
6651 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6652 gen_repz_outs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6656 gen_jmp(s
, s
->pc
- s
->cs_base
);
6661 /************************/
6669 ot
= dflag
? OT_LONG
: OT_WORD
;
6670 val
= cpu_ldub_code(env
, s
->pc
++);
6671 gen_op_movl_T0_im(val
);
6672 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6673 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6676 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6677 gen_helper_in_func(ot
, cpu_T
[1], cpu_tmp2_i32
);
6678 gen_op_mov_reg_T1(ot
, R_EAX
);
6681 gen_jmp(s
, s
->pc
- s
->cs_base
);
6689 ot
= dflag
? OT_LONG
: OT_WORD
;
6690 val
= cpu_ldub_code(env
, s
->pc
++);
6691 gen_op_movl_T0_im(val
);
6692 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6693 svm_is_rep(prefixes
));
6694 gen_op_mov_TN_reg(ot
, 1, R_EAX
);
6698 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6699 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
6700 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6703 gen_jmp(s
, s
->pc
- s
->cs_base
);
6711 ot
= dflag
? OT_LONG
: OT_WORD
;
6712 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6713 gen_op_andl_T0_ffff();
6714 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6715 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6718 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6719 gen_helper_in_func(ot
, cpu_T
[1], cpu_tmp2_i32
);
6720 gen_op_mov_reg_T1(ot
, R_EAX
);
6723 gen_jmp(s
, s
->pc
- s
->cs_base
);
6731 ot
= dflag
? OT_LONG
: OT_WORD
;
6732 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6733 gen_op_andl_T0_ffff();
6734 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6735 svm_is_rep(prefixes
));
6736 gen_op_mov_TN_reg(ot
, 1, R_EAX
);
6740 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6741 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
6742 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6745 gen_jmp(s
, s
->pc
- s
->cs_base
);
6749 /************************/
6751 case 0xc2: /* ret im */
6752 val
= cpu_ldsw_code(env
, s
->pc
);
6755 if (CODE64(s
) && s
->dflag
)
6757 gen_stack_update(s
, val
+ (2 << s
->dflag
));
6759 gen_op_andl_T0_ffff();
6763 case 0xc3: /* ret */
6767 gen_op_andl_T0_ffff();
6771 case 0xca: /* lret im */
6772 val
= cpu_ldsw_code(env
, s
->pc
);
6775 if (s
->pe
&& !s
->vm86
) {
6776 gen_update_cc_op(s
);
6777 gen_jmp_im(pc_start
- s
->cs_base
);
6778 gen_helper_lret_protected(cpu_env
, tcg_const_i32(s
->dflag
),
6779 tcg_const_i32(val
));
6783 gen_op_ld_T0_A0(1 + s
->dflag
+ s
->mem_index
);
6785 gen_op_andl_T0_ffff();
6786 /* NOTE: keeping EIP updated is not a problem in case of
6790 gen_op_addl_A0_im(2 << s
->dflag
);
6791 gen_op_ld_T0_A0(1 + s
->dflag
+ s
->mem_index
);
6792 gen_op_movl_seg_T0_vm(R_CS
);
6793 /* add stack offset */
6794 gen_stack_update(s
, val
+ (4 << s
->dflag
));
6798 case 0xcb: /* lret */
6801 case 0xcf: /* iret */
6802 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IRET
);
6805 gen_helper_iret_real(cpu_env
, tcg_const_i32(s
->dflag
));
6806 set_cc_op(s
, CC_OP_EFLAGS
);
6807 } else if (s
->vm86
) {
6809 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6811 gen_helper_iret_real(cpu_env
, tcg_const_i32(s
->dflag
));
6812 set_cc_op(s
, CC_OP_EFLAGS
);
6815 gen_update_cc_op(s
);
6816 gen_jmp_im(pc_start
- s
->cs_base
);
6817 gen_helper_iret_protected(cpu_env
, tcg_const_i32(s
->dflag
),
6818 tcg_const_i32(s
->pc
- s
->cs_base
));
6819 set_cc_op(s
, CC_OP_EFLAGS
);
6823 case 0xe8: /* call im */
6826 tval
= (int32_t)insn_get(env
, s
, OT_LONG
);
6828 tval
= (int16_t)insn_get(env
, s
, OT_WORD
);
6829 next_eip
= s
->pc
- s
->cs_base
;
6835 gen_movtl_T0_im(next_eip
);
6840 case 0x9a: /* lcall im */
6842 unsigned int selector
, offset
;
6846 ot
= dflag
? OT_LONG
: OT_WORD
;
6847 offset
= insn_get(env
, s
, ot
);
6848 selector
= insn_get(env
, s
, OT_WORD
);
6850 gen_op_movl_T0_im(selector
);
6851 gen_op_movl_T1_imu(offset
);
6854 case 0xe9: /* jmp im */
6856 tval
= (int32_t)insn_get(env
, s
, OT_LONG
);
6858 tval
= (int16_t)insn_get(env
, s
, OT_WORD
);
6859 tval
+= s
->pc
- s
->cs_base
;
6866 case 0xea: /* ljmp im */
6868 unsigned int selector
, offset
;
6872 ot
= dflag
? OT_LONG
: OT_WORD
;
6873 offset
= insn_get(env
, s
, ot
);
6874 selector
= insn_get(env
, s
, OT_WORD
);
6876 gen_op_movl_T0_im(selector
);
6877 gen_op_movl_T1_imu(offset
);
6880 case 0xeb: /* jmp Jb */
6881 tval
= (int8_t)insn_get(env
, s
, OT_BYTE
);
6882 tval
+= s
->pc
- s
->cs_base
;
6887 case 0x70 ... 0x7f: /* jcc Jb */
6888 tval
= (int8_t)insn_get(env
, s
, OT_BYTE
);
6890 case 0x180 ... 0x18f: /* jcc Jv */
6892 tval
= (int32_t)insn_get(env
, s
, OT_LONG
);
6894 tval
= (int16_t)insn_get(env
, s
, OT_WORD
);
6897 next_eip
= s
->pc
- s
->cs_base
;
6901 gen_jcc(s
, b
, tval
, next_eip
);
6904 case 0x190 ... 0x19f: /* setcc Gv */
6905 modrm
= cpu_ldub_code(env
, s
->pc
++);
6906 gen_setcc1(s
, b
, cpu_T
[0]);
6907 gen_ldst_modrm(env
, s
, modrm
, OT_BYTE
, OR_TMP0
, 1);
6909 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6910 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6913 ot
= dflag
+ OT_WORD
;
6914 modrm
= cpu_ldub_code(env
, s
->pc
++);
6915 reg
= ((modrm
>> 3) & 7) | rex_r
;
6916 gen_cmovcc1(env
, s
, ot
, b
, modrm
, reg
);
6919 /************************/
6921 case 0x9c: /* pushf */
6922 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PUSHF
);
6923 if (s
->vm86
&& s
->iopl
!= 3) {
6924 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6926 gen_update_cc_op(s
);
6927 gen_helper_read_eflags(cpu_T
[0], cpu_env
);
6931 case 0x9d: /* popf */
6932 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_POPF
);
6933 if (s
->vm86
&& s
->iopl
!= 3) {
6934 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6939 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6940 tcg_const_i32((TF_MASK
| AC_MASK
|
6945 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6946 tcg_const_i32((TF_MASK
| AC_MASK
|
6948 IF_MASK
| IOPL_MASK
)
6952 if (s
->cpl
<= s
->iopl
) {
6954 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6955 tcg_const_i32((TF_MASK
|
6961 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6962 tcg_const_i32((TF_MASK
|
6971 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6972 tcg_const_i32((TF_MASK
| AC_MASK
|
6973 ID_MASK
| NT_MASK
)));
6975 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6976 tcg_const_i32((TF_MASK
| AC_MASK
|
6983 set_cc_op(s
, CC_OP_EFLAGS
);
6984 /* abort translation because TF/AC flag may change */
6985 gen_jmp_im(s
->pc
- s
->cs_base
);
6989 case 0x9e: /* sahf */
6990 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6992 gen_op_mov_TN_reg(OT_BYTE
, 0, R_AH
);
6993 gen_compute_eflags(s
);
6994 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, CC_O
);
6995 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
6996 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_T
[0]);
6998 case 0x9f: /* lahf */
6999 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
7001 gen_compute_eflags(s
);
7002 /* Note: gen_compute_eflags() only gives the condition codes */
7003 tcg_gen_ori_tl(cpu_T
[0], cpu_cc_src
, 0x02);
7004 gen_op_mov_reg_T0(OT_BYTE
, R_AH
);
7006 case 0xf5: /* cmc */
7007 gen_compute_eflags(s
);
7008 tcg_gen_xori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
7010 case 0xf8: /* clc */
7011 gen_compute_eflags(s
);
7012 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_C
);
7014 case 0xf9: /* stc */
7015 gen_compute_eflags(s
);
7016 tcg_gen_ori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
7018 case 0xfc: /* cld */
7019 tcg_gen_movi_i32(cpu_tmp2_i32
, 1);
7020 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
7022 case 0xfd: /* std */
7023 tcg_gen_movi_i32(cpu_tmp2_i32
, -1);
7024 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
7027 /************************/
7028 /* bit operations */
7029 case 0x1ba: /* bt/bts/btr/btc Gv, im */
7030 ot
= dflag
+ OT_WORD
;
7031 modrm
= cpu_ldub_code(env
, s
->pc
++);
7032 op
= (modrm
>> 3) & 7;
7033 mod
= (modrm
>> 6) & 3;
7034 rm
= (modrm
& 7) | REX_B(s
);
7037 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7038 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
7040 gen_op_mov_TN_reg(ot
, 0, rm
);
7043 val
= cpu_ldub_code(env
, s
->pc
++);
7044 gen_op_movl_T1_im(val
);
7049 case 0x1a3: /* bt Gv, Ev */
7052 case 0x1ab: /* bts */
7055 case 0x1b3: /* btr */
7058 case 0x1bb: /* btc */
7061 ot
= dflag
+ OT_WORD
;
7062 modrm
= cpu_ldub_code(env
, s
->pc
++);
7063 reg
= ((modrm
>> 3) & 7) | rex_r
;
7064 mod
= (modrm
>> 6) & 3;
7065 rm
= (modrm
& 7) | REX_B(s
);
7066 gen_op_mov_TN_reg(OT_LONG
, 1, reg
);
7068 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7069 /* specific case: we need to add a displacement */
7070 gen_exts(ot
, cpu_T
[1]);
7071 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[1], 3 + ot
);
7072 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, ot
);
7073 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
7074 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
7076 gen_op_mov_TN_reg(ot
, 0, rm
);
7079 tcg_gen_andi_tl(cpu_T
[1], cpu_T
[1], (1 << (3 + ot
)) - 1);
7082 tcg_gen_shr_tl(cpu_cc_src
, cpu_T
[0], cpu_T
[1]);
7083 tcg_gen_movi_tl(cpu_cc_dst
, 0);
7086 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
7087 tcg_gen_movi_tl(cpu_tmp0
, 1);
7088 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
7089 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
7092 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
7093 tcg_gen_movi_tl(cpu_tmp0
, 1);
7094 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
7095 tcg_gen_not_tl(cpu_tmp0
, cpu_tmp0
);
7096 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
7100 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
7101 tcg_gen_movi_tl(cpu_tmp0
, 1);
7102 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
7103 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
7106 set_cc_op(s
, CC_OP_SARB
+ ot
);
7109 gen_op_st_T0_A0(ot
+ s
->mem_index
);
7111 gen_op_mov_reg_T0(ot
, rm
);
7112 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
7113 tcg_gen_movi_tl(cpu_cc_dst
, 0);
7116 case 0x1bc: /* bsf / tzcnt */
7117 case 0x1bd: /* bsr / lzcnt */
7118 ot
= dflag
+ OT_WORD
;
7119 modrm
= cpu_ldub_code(env
, s
->pc
++);
7120 reg
= ((modrm
>> 3) & 7) | rex_r
;
7121 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
7122 gen_extu(ot
, cpu_T
[0]);
7124 /* Note that lzcnt and tzcnt are in different extensions. */
7125 if ((prefixes
& PREFIX_REPZ
)
7127 ? s
->cpuid_ext3_features
& CPUID_EXT3_ABM
7128 : s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)) {
7130 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
7132 /* For lzcnt, reduce the target_ulong result by the
7133 number of zeros that we expect to find at the top. */
7134 gen_helper_clz(cpu_T
[0], cpu_T
[0]);
7135 tcg_gen_subi_tl(cpu_T
[0], cpu_T
[0], TARGET_LONG_BITS
- size
);
7137 /* For tzcnt, a zero input must return the operand size:
7138 force all bits outside the operand size to 1. */
7139 target_ulong mask
= (target_ulong
)-2 << (size
- 1);
7140 tcg_gen_ori_tl(cpu_T
[0], cpu_T
[0], mask
);
7141 gen_helper_ctz(cpu_T
[0], cpu_T
[0]);
7143 /* For lzcnt/tzcnt, C and Z bits are defined and are
7144 related to the result. */
7145 gen_op_update1_cc();
7146 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
7148 /* For bsr/bsf, only the Z bit is defined and it is related
7149 to the input and not the result. */
7150 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
7151 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
7153 /* For bsr, return the bit index of the first 1 bit,
7154 not the count of leading zeros. */
7155 gen_helper_clz(cpu_T
[0], cpu_T
[0]);
7156 tcg_gen_xori_tl(cpu_T
[0], cpu_T
[0], TARGET_LONG_BITS
- 1);
7158 gen_helper_ctz(cpu_T
[0], cpu_T
[0]);
7160 /* ??? The manual says that the output is undefined when the
7161 input is zero, but real hardware leaves it unchanged, and
7162 real programs appear to depend on that. */
7163 tcg_gen_movi_tl(cpu_tmp0
, 0);
7164 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_T
[0], cpu_cc_dst
, cpu_tmp0
,
7165 cpu_regs
[reg
], cpu_T
[0]);
7167 gen_op_mov_reg_T0(ot
, reg
);
7169 /************************/
7171 case 0x27: /* daa */
7174 gen_update_cc_op(s
);
7175 gen_helper_daa(cpu_env
);
7176 set_cc_op(s
, CC_OP_EFLAGS
);
7178 case 0x2f: /* das */
7181 gen_update_cc_op(s
);
7182 gen_helper_das(cpu_env
);
7183 set_cc_op(s
, CC_OP_EFLAGS
);
7185 case 0x37: /* aaa */
7188 gen_update_cc_op(s
);
7189 gen_helper_aaa(cpu_env
);
7190 set_cc_op(s
, CC_OP_EFLAGS
);
7192 case 0x3f: /* aas */
7195 gen_update_cc_op(s
);
7196 gen_helper_aas(cpu_env
);
7197 set_cc_op(s
, CC_OP_EFLAGS
);
7199 case 0xd4: /* aam */
7202 val
= cpu_ldub_code(env
, s
->pc
++);
7204 gen_exception(s
, EXCP00_DIVZ
, pc_start
- s
->cs_base
);
7206 gen_helper_aam(cpu_env
, tcg_const_i32(val
));
7207 set_cc_op(s
, CC_OP_LOGICB
);
7210 case 0xd5: /* aad */
7213 val
= cpu_ldub_code(env
, s
->pc
++);
7214 gen_helper_aad(cpu_env
, tcg_const_i32(val
));
7215 set_cc_op(s
, CC_OP_LOGICB
);
7217 /************************/
7219 case 0x90: /* nop */
7220 /* XXX: correct lock test for all insn */
7221 if (prefixes
& PREFIX_LOCK
) {
7224 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
7226 goto do_xchg_reg_eax
;
7228 if (prefixes
& PREFIX_REPZ
) {
7229 gen_update_cc_op(s
);
7230 gen_jmp_im(pc_start
- s
->cs_base
);
7231 gen_helper_pause(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7232 s
->is_jmp
= DISAS_TB_JUMP
;
7235 case 0x9b: /* fwait */
7236 if ((s
->flags
& (HF_MP_MASK
| HF_TS_MASK
)) ==
7237 (HF_MP_MASK
| HF_TS_MASK
)) {
7238 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7240 gen_update_cc_op(s
);
7241 gen_jmp_im(pc_start
- s
->cs_base
);
7242 gen_helper_fwait(cpu_env
);
7245 case 0xcc: /* int3 */
7246 gen_interrupt(s
, EXCP03_INT3
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
7248 case 0xcd: /* int N */
7249 val
= cpu_ldub_code(env
, s
->pc
++);
7250 if (s
->vm86
&& s
->iopl
!= 3) {
7251 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7253 gen_interrupt(s
, val
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
7256 case 0xce: /* into */
7259 gen_update_cc_op(s
);
7260 gen_jmp_im(pc_start
- s
->cs_base
);
7261 gen_helper_into(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7264 case 0xf1: /* icebp (undocumented, exits to external debugger) */
7265 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_ICEBP
);
7267 gen_debug(s
, pc_start
- s
->cs_base
);
7271 qemu_set_log(CPU_LOG_INT
| CPU_LOG_TB_IN_ASM
);
7275 case 0xfa: /* cli */
7277 if (s
->cpl
<= s
->iopl
) {
7278 gen_helper_cli(cpu_env
);
7280 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7284 gen_helper_cli(cpu_env
);
7286 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7290 case 0xfb: /* sti */
7292 if (s
->cpl
<= s
->iopl
) {
7294 gen_helper_sti(cpu_env
);
7295 /* interruptions are enabled only the first insn after sti */
7296 /* If several instructions disable interrupts, only the
7298 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
7299 gen_helper_set_inhibit_irq(cpu_env
);
7300 /* give a chance to handle pending irqs */
7301 gen_jmp_im(s
->pc
- s
->cs_base
);
7304 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7310 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7314 case 0x62: /* bound */
7317 ot
= dflag
? OT_LONG
: OT_WORD
;
7318 modrm
= cpu_ldub_code(env
, s
->pc
++);
7319 reg
= (modrm
>> 3) & 7;
7320 mod
= (modrm
>> 6) & 3;
7323 gen_op_mov_TN_reg(ot
, 0, reg
);
7324 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7325 gen_jmp_im(pc_start
- s
->cs_base
);
7326 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7327 if (ot
== OT_WORD
) {
7328 gen_helper_boundw(cpu_env
, cpu_A0
, cpu_tmp2_i32
);
7330 gen_helper_boundl(cpu_env
, cpu_A0
, cpu_tmp2_i32
);
7333 case 0x1c8 ... 0x1cf: /* bswap reg */
7334 reg
= (b
& 7) | REX_B(s
);
7335 #ifdef TARGET_X86_64
7337 gen_op_mov_TN_reg(OT_QUAD
, 0, reg
);
7338 tcg_gen_bswap64_i64(cpu_T
[0], cpu_T
[0]);
7339 gen_op_mov_reg_T0(OT_QUAD
, reg
);
7343 gen_op_mov_TN_reg(OT_LONG
, 0, reg
);
7344 tcg_gen_ext32u_tl(cpu_T
[0], cpu_T
[0]);
7345 tcg_gen_bswap32_tl(cpu_T
[0], cpu_T
[0]);
7346 gen_op_mov_reg_T0(OT_LONG
, reg
);
7349 case 0xd6: /* salc */
7352 gen_compute_eflags_c(s
, cpu_T
[0]);
7353 tcg_gen_neg_tl(cpu_T
[0], cpu_T
[0]);
7354 gen_op_mov_reg_T0(OT_BYTE
, R_EAX
);
7356 case 0xe0: /* loopnz */
7357 case 0xe1: /* loopz */
7358 case 0xe2: /* loop */
7359 case 0xe3: /* jecxz */
7363 tval
= (int8_t)insn_get(env
, s
, OT_BYTE
);
7364 next_eip
= s
->pc
- s
->cs_base
;
7369 l1
= gen_new_label();
7370 l2
= gen_new_label();
7371 l3
= gen_new_label();
7374 case 0: /* loopnz */
7376 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
7377 gen_op_jz_ecx(s
->aflag
, l3
);
7378 gen_jcc1(s
, (JCC_Z
<< 1) | (b
^ 1), l1
);
7381 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
7382 gen_op_jnz_ecx(s
->aflag
, l1
);
7386 gen_op_jz_ecx(s
->aflag
, l1
);
7391 gen_jmp_im(next_eip
);
7400 case 0x130: /* wrmsr */
7401 case 0x132: /* rdmsr */
7403 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7405 gen_update_cc_op(s
);
7406 gen_jmp_im(pc_start
- s
->cs_base
);
7408 gen_helper_rdmsr(cpu_env
);
7410 gen_helper_wrmsr(cpu_env
);
7414 case 0x131: /* rdtsc */
7415 gen_update_cc_op(s
);
7416 gen_jmp_im(pc_start
- s
->cs_base
);
7419 gen_helper_rdtsc(cpu_env
);
7422 gen_jmp(s
, s
->pc
- s
->cs_base
);
7425 case 0x133: /* rdpmc */
7426 gen_update_cc_op(s
);
7427 gen_jmp_im(pc_start
- s
->cs_base
);
7428 gen_helper_rdpmc(cpu_env
);
7430 case 0x134: /* sysenter */
7431 /* For Intel SYSENTER is valid on 64-bit */
7432 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
7435 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7437 gen_update_cc_op(s
);
7438 gen_jmp_im(pc_start
- s
->cs_base
);
7439 gen_helper_sysenter(cpu_env
);
7443 case 0x135: /* sysexit */
7444 /* For Intel SYSEXIT is valid on 64-bit */
7445 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
7448 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7450 gen_update_cc_op(s
);
7451 gen_jmp_im(pc_start
- s
->cs_base
);
7452 gen_helper_sysexit(cpu_env
, tcg_const_i32(dflag
));
7456 #ifdef TARGET_X86_64
7457 case 0x105: /* syscall */
7458 /* XXX: is it usable in real mode ? */
7459 gen_update_cc_op(s
);
7460 gen_jmp_im(pc_start
- s
->cs_base
);
7461 gen_helper_syscall(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7464 case 0x107: /* sysret */
7466 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7468 gen_update_cc_op(s
);
7469 gen_jmp_im(pc_start
- s
->cs_base
);
7470 gen_helper_sysret(cpu_env
, tcg_const_i32(s
->dflag
));
7471 /* condition codes are modified only in long mode */
7473 set_cc_op(s
, CC_OP_EFLAGS
);
7479 case 0x1a2: /* cpuid */
7480 gen_update_cc_op(s
);
7481 gen_jmp_im(pc_start
- s
->cs_base
);
7482 gen_helper_cpuid(cpu_env
);
7484 case 0xf4: /* hlt */
7486 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7488 gen_update_cc_op(s
);
7489 gen_jmp_im(pc_start
- s
->cs_base
);
7490 gen_helper_hlt(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7491 s
->is_jmp
= DISAS_TB_JUMP
;
7495 modrm
= cpu_ldub_code(env
, s
->pc
++);
7496 mod
= (modrm
>> 6) & 3;
7497 op
= (modrm
>> 3) & 7;
7500 if (!s
->pe
|| s
->vm86
)
7502 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_READ
);
7503 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,ldt
.selector
));
7507 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7510 if (!s
->pe
|| s
->vm86
)
7513 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7515 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_WRITE
);
7516 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7517 gen_jmp_im(pc_start
- s
->cs_base
);
7518 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7519 gen_helper_lldt(cpu_env
, cpu_tmp2_i32
);
7523 if (!s
->pe
|| s
->vm86
)
7525 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_READ
);
7526 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,tr
.selector
));
7530 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7533 if (!s
->pe
|| s
->vm86
)
7536 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7538 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_WRITE
);
7539 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7540 gen_jmp_im(pc_start
- s
->cs_base
);
7541 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7542 gen_helper_ltr(cpu_env
, cpu_tmp2_i32
);
7547 if (!s
->pe
|| s
->vm86
)
7549 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7550 gen_update_cc_op(s
);
7552 gen_helper_verr(cpu_env
, cpu_T
[0]);
7554 gen_helper_verw(cpu_env
, cpu_T
[0]);
7556 set_cc_op(s
, CC_OP_EFLAGS
);
7563 modrm
= cpu_ldub_code(env
, s
->pc
++);
7564 mod
= (modrm
>> 6) & 3;
7565 op
= (modrm
>> 3) & 7;
7571 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_GDTR_READ
);
7572 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7573 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
7574 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
7575 gen_add_A0_im(s
, 2);
7576 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, gdt
.base
));
7578 gen_op_andl_T0_im(0xffffff);
7579 gen_op_st_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7584 case 0: /* monitor */
7585 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) ||
7588 gen_update_cc_op(s
);
7589 gen_jmp_im(pc_start
- s
->cs_base
);
7590 #ifdef TARGET_X86_64
7591 if (s
->aflag
== 2) {
7592 gen_op_movq_A0_reg(R_EAX
);
7596 gen_op_movl_A0_reg(R_EAX
);
7598 gen_op_andl_A0_ffff();
7600 gen_add_A0_ds_seg(s
);
7601 gen_helper_monitor(cpu_env
, cpu_A0
);
7604 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) ||
7607 gen_update_cc_op(s
);
7608 gen_jmp_im(pc_start
- s
->cs_base
);
7609 gen_helper_mwait(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7613 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
) ||
7617 gen_helper_clac(cpu_env
);
7618 gen_jmp_im(s
->pc
- s
->cs_base
);
7622 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
) ||
7626 gen_helper_stac(cpu_env
);
7627 gen_jmp_im(s
->pc
- s
->cs_base
);
7634 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IDTR_READ
);
7635 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7636 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, idt
.limit
));
7637 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
7638 gen_add_A0_im(s
, 2);
7639 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, idt
.base
));
7641 gen_op_andl_T0_im(0xffffff);
7642 gen_op_st_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7648 gen_update_cc_op(s
);
7649 gen_jmp_im(pc_start
- s
->cs_base
);
7652 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7655 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7658 gen_helper_vmrun(cpu_env
, tcg_const_i32(s
->aflag
),
7659 tcg_const_i32(s
->pc
- pc_start
));
7661 s
->is_jmp
= DISAS_TB_JUMP
;
7664 case 1: /* VMMCALL */
7665 if (!(s
->flags
& HF_SVME_MASK
))
7667 gen_helper_vmmcall(cpu_env
);
7669 case 2: /* VMLOAD */
7670 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7673 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7676 gen_helper_vmload(cpu_env
, tcg_const_i32(s
->aflag
));
7679 case 3: /* VMSAVE */
7680 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7683 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7686 gen_helper_vmsave(cpu_env
, tcg_const_i32(s
->aflag
));
7690 if ((!(s
->flags
& HF_SVME_MASK
) &&
7691 !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
)) ||
7695 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7698 gen_helper_stgi(cpu_env
);
7702 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7705 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7708 gen_helper_clgi(cpu_env
);
7711 case 6: /* SKINIT */
7712 if ((!(s
->flags
& HF_SVME_MASK
) &&
7713 !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
)) ||
7716 gen_helper_skinit(cpu_env
);
7718 case 7: /* INVLPGA */
7719 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7722 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7725 gen_helper_invlpga(cpu_env
, tcg_const_i32(s
->aflag
));
7731 } else if (s
->cpl
!= 0) {
7732 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7734 gen_svm_check_intercept(s
, pc_start
,
7735 op
==2 ? SVM_EXIT_GDTR_WRITE
: SVM_EXIT_IDTR_WRITE
);
7736 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7737 gen_op_ld_T1_A0(OT_WORD
+ s
->mem_index
);
7738 gen_add_A0_im(s
, 2);
7739 gen_op_ld_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7741 gen_op_andl_T0_im(0xffffff);
7743 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,gdt
.base
));
7744 tcg_gen_st32_tl(cpu_T
[1], cpu_env
, offsetof(CPUX86State
,gdt
.limit
));
7746 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,idt
.base
));
7747 tcg_gen_st32_tl(cpu_T
[1], cpu_env
, offsetof(CPUX86State
,idt
.limit
));
7752 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_CR0
);
7753 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7754 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,cr
[0]) + 4);
7756 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,cr
[0]));
7758 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 1);
7762 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7764 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7765 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7766 gen_helper_lmsw(cpu_env
, cpu_T
[0]);
7767 gen_jmp_im(s
->pc
- s
->cs_base
);
7772 if (mod
!= 3) { /* invlpg */
7774 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7776 gen_update_cc_op(s
);
7777 gen_jmp_im(pc_start
- s
->cs_base
);
7778 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7779 gen_helper_invlpg(cpu_env
, cpu_A0
);
7780 gen_jmp_im(s
->pc
- s
->cs_base
);
7785 case 0: /* swapgs */
7786 #ifdef TARGET_X86_64
7789 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7791 tcg_gen_ld_tl(cpu_T
[0], cpu_env
,
7792 offsetof(CPUX86State
,segs
[R_GS
].base
));
7793 tcg_gen_ld_tl(cpu_T
[1], cpu_env
,
7794 offsetof(CPUX86State
,kernelgsbase
));
7795 tcg_gen_st_tl(cpu_T
[1], cpu_env
,
7796 offsetof(CPUX86State
,segs
[R_GS
].base
));
7797 tcg_gen_st_tl(cpu_T
[0], cpu_env
,
7798 offsetof(CPUX86State
,kernelgsbase
));
7806 case 1: /* rdtscp */
7807 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_RDTSCP
))
7809 gen_update_cc_op(s
);
7810 gen_jmp_im(pc_start
- s
->cs_base
);
7813 gen_helper_rdtscp(cpu_env
);
7816 gen_jmp(s
, s
->pc
- s
->cs_base
);
7828 case 0x108: /* invd */
7829 case 0x109: /* wbinvd */
7831 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7833 gen_svm_check_intercept(s
, pc_start
, (b
& 2) ? SVM_EXIT_INVD
: SVM_EXIT_WBINVD
);
7837 case 0x63: /* arpl or movslS (x86_64) */
7838 #ifdef TARGET_X86_64
7841 /* d_ot is the size of destination */
7842 d_ot
= dflag
+ OT_WORD
;
7844 modrm
= cpu_ldub_code(env
, s
->pc
++);
7845 reg
= ((modrm
>> 3) & 7) | rex_r
;
7846 mod
= (modrm
>> 6) & 3;
7847 rm
= (modrm
& 7) | REX_B(s
);
7850 gen_op_mov_TN_reg(OT_LONG
, 0, rm
);
7852 if (d_ot
== OT_QUAD
)
7853 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
7854 gen_op_mov_reg_T0(d_ot
, reg
);
7856 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7857 if (d_ot
== OT_QUAD
) {
7858 gen_op_lds_T0_A0(OT_LONG
+ s
->mem_index
);
7860 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
7862 gen_op_mov_reg_T0(d_ot
, reg
);
7868 TCGv t0
, t1
, t2
, a0
;
7870 if (!s
->pe
|| s
->vm86
)
7872 t0
= tcg_temp_local_new();
7873 t1
= tcg_temp_local_new();
7874 t2
= tcg_temp_local_new();
7876 modrm
= cpu_ldub_code(env
, s
->pc
++);
7877 reg
= (modrm
>> 3) & 7;
7878 mod
= (modrm
>> 6) & 3;
7881 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7882 gen_op_ld_v(ot
+ s
->mem_index
, t0
, cpu_A0
);
7883 a0
= tcg_temp_local_new();
7884 tcg_gen_mov_tl(a0
, cpu_A0
);
7886 gen_op_mov_v_reg(ot
, t0
, rm
);
7889 gen_op_mov_v_reg(ot
, t1
, reg
);
7890 tcg_gen_andi_tl(cpu_tmp0
, t0
, 3);
7891 tcg_gen_andi_tl(t1
, t1
, 3);
7892 tcg_gen_movi_tl(t2
, 0);
7893 label1
= gen_new_label();
7894 tcg_gen_brcond_tl(TCG_COND_GE
, cpu_tmp0
, t1
, label1
);
7895 tcg_gen_andi_tl(t0
, t0
, ~3);
7896 tcg_gen_or_tl(t0
, t0
, t1
);
7897 tcg_gen_movi_tl(t2
, CC_Z
);
7898 gen_set_label(label1
);
7900 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
7903 gen_op_mov_reg_v(ot
, rm
, t0
);
7905 gen_compute_eflags(s
);
7906 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_Z
);
7907 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t2
);
7913 case 0x102: /* lar */
7914 case 0x103: /* lsl */
7918 if (!s
->pe
|| s
->vm86
)
7920 ot
= dflag
? OT_LONG
: OT_WORD
;
7921 modrm
= cpu_ldub_code(env
, s
->pc
++);
7922 reg
= ((modrm
>> 3) & 7) | rex_r
;
7923 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7924 t0
= tcg_temp_local_new();
7925 gen_update_cc_op(s
);
7927 gen_helper_lar(t0
, cpu_env
, cpu_T
[0]);
7929 gen_helper_lsl(t0
, cpu_env
, cpu_T
[0]);
7931 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_src
, CC_Z
);
7932 label1
= gen_new_label();
7933 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
7934 gen_op_mov_reg_v(ot
, reg
, t0
);
7935 gen_set_label(label1
);
7936 set_cc_op(s
, CC_OP_EFLAGS
);
7941 modrm
= cpu_ldub_code(env
, s
->pc
++);
7942 mod
= (modrm
>> 6) & 3;
7943 op
= (modrm
>> 3) & 7;
7945 case 0: /* prefetchnta */
7946 case 1: /* prefetchnt0 */
7947 case 2: /* prefetchnt0 */
7948 case 3: /* prefetchnt0 */
7951 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7952 /* nothing more to do */
7954 default: /* nop (multi byte) */
7955 gen_nop_modrm(env
, s
, modrm
);
7959 case 0x119 ... 0x11f: /* nop (multi byte) */
7960 modrm
= cpu_ldub_code(env
, s
->pc
++);
7961 gen_nop_modrm(env
, s
, modrm
);
7963 case 0x120: /* mov reg, crN */
7964 case 0x122: /* mov crN, reg */
7966 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7968 modrm
= cpu_ldub_code(env
, s
->pc
++);
7969 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7970 * AMD documentation (24594.pdf) and testing of
7971 * intel 386 and 486 processors all show that the mod bits
7972 * are assumed to be 1's, regardless of actual values.
7974 rm
= (modrm
& 7) | REX_B(s
);
7975 reg
= ((modrm
>> 3) & 7) | rex_r
;
7980 if ((prefixes
& PREFIX_LOCK
) && (reg
== 0) &&
7981 (s
->cpuid_ext3_features
& CPUID_EXT3_CR8LEG
)) {
7990 gen_update_cc_op(s
);
7991 gen_jmp_im(pc_start
- s
->cs_base
);
7993 gen_op_mov_TN_reg(ot
, 0, rm
);
7994 gen_helper_write_crN(cpu_env
, tcg_const_i32(reg
),
7996 gen_jmp_im(s
->pc
- s
->cs_base
);
7999 gen_helper_read_crN(cpu_T
[0], cpu_env
, tcg_const_i32(reg
));
8000 gen_op_mov_reg_T0(ot
, rm
);
8008 case 0x121: /* mov reg, drN */
8009 case 0x123: /* mov drN, reg */
8011 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
8013 modrm
= cpu_ldub_code(env
, s
->pc
++);
8014 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
8015 * AMD documentation (24594.pdf) and testing of
8016 * intel 386 and 486 processors all show that the mod bits
8017 * are assumed to be 1's, regardless of actual values.
8019 rm
= (modrm
& 7) | REX_B(s
);
8020 reg
= ((modrm
>> 3) & 7) | rex_r
;
8025 /* XXX: do it dynamically with CR4.DE bit */
8026 if (reg
== 4 || reg
== 5 || reg
>= 8)
8029 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_DR0
+ reg
);
8030 gen_op_mov_TN_reg(ot
, 0, rm
);
8031 gen_helper_movl_drN_T0(cpu_env
, tcg_const_i32(reg
), cpu_T
[0]);
8032 gen_jmp_im(s
->pc
- s
->cs_base
);
8035 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_DR0
+ reg
);
8036 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,dr
[reg
]));
8037 gen_op_mov_reg_T0(ot
, rm
);
8041 case 0x106: /* clts */
8043 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
8045 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
8046 gen_helper_clts(cpu_env
);
8047 /* abort block because static cpu state changed */
8048 gen_jmp_im(s
->pc
- s
->cs_base
);
8052 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
8053 case 0x1c3: /* MOVNTI reg, mem */
8054 if (!(s
->cpuid_features
& CPUID_SSE2
))
8056 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
8057 modrm
= cpu_ldub_code(env
, s
->pc
++);
8058 mod
= (modrm
>> 6) & 3;
8061 reg
= ((modrm
>> 3) & 7) | rex_r
;
8062 /* generate a generic store */
8063 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
8066 modrm
= cpu_ldub_code(env
, s
->pc
++);
8067 mod
= (modrm
>> 6) & 3;
8068 op
= (modrm
>> 3) & 7;
8070 case 0: /* fxsave */
8071 if (mod
== 3 || !(s
->cpuid_features
& CPUID_FXSR
) ||
8072 (s
->prefix
& PREFIX_LOCK
))
8074 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
8075 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
8078 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
8079 gen_update_cc_op(s
);
8080 gen_jmp_im(pc_start
- s
->cs_base
);
8081 gen_helper_fxsave(cpu_env
, cpu_A0
, tcg_const_i32((s
->dflag
== 2)));
8083 case 1: /* fxrstor */
8084 if (mod
== 3 || !(s
->cpuid_features
& CPUID_FXSR
) ||
8085 (s
->prefix
& PREFIX_LOCK
))
8087 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
8088 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
8091 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
8092 gen_update_cc_op(s
);
8093 gen_jmp_im(pc_start
- s
->cs_base
);
8094 gen_helper_fxrstor(cpu_env
, cpu_A0
,
8095 tcg_const_i32((s
->dflag
== 2)));
8097 case 2: /* ldmxcsr */
8098 case 3: /* stmxcsr */
8099 if (s
->flags
& HF_TS_MASK
) {
8100 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
8103 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
) ||
8106 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
8108 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
8109 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
8110 gen_helper_ldmxcsr(cpu_env
, cpu_tmp2_i32
);
8112 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, mxcsr
));
8113 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
8116 case 5: /* lfence */
8117 case 6: /* mfence */
8118 if ((modrm
& 0xc7) != 0xc0 || !(s
->cpuid_features
& CPUID_SSE2
))
8121 case 7: /* sfence / clflush */
8122 if ((modrm
& 0xc7) == 0xc0) {
8124 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
8125 if (!(s
->cpuid_features
& CPUID_SSE
))
8129 if (!(s
->cpuid_features
& CPUID_CLFLUSH
))
8131 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
8138 case 0x10d: /* 3DNow! prefetch(w) */
8139 modrm
= cpu_ldub_code(env
, s
->pc
++);
8140 mod
= (modrm
>> 6) & 3;
8143 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
8144 /* ignore for now */
8146 case 0x1aa: /* rsm */
8147 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_RSM
);
8148 if (!(s
->flags
& HF_SMM_MASK
))
8150 gen_update_cc_op(s
);
8151 gen_jmp_im(s
->pc
- s
->cs_base
);
8152 gen_helper_rsm(cpu_env
);
8155 case 0x1b8: /* SSE4.2 popcnt */
8156 if ((prefixes
& (PREFIX_REPZ
| PREFIX_LOCK
| PREFIX_REPNZ
)) !=
8159 if (!(s
->cpuid_ext_features
& CPUID_EXT_POPCNT
))
8162 modrm
= cpu_ldub_code(env
, s
->pc
++);
8163 reg
= ((modrm
>> 3) & 7) | rex_r
;
8165 if (s
->prefix
& PREFIX_DATA
)
8167 else if (s
->dflag
!= 2)
8172 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
8173 gen_helper_popcnt(cpu_T
[0], cpu_env
, cpu_T
[0], tcg_const_i32(ot
));
8174 gen_op_mov_reg_T0(ot
, reg
);
8176 set_cc_op(s
, CC_OP_EFLAGS
);
8178 case 0x10e ... 0x10f:
8179 /* 3DNow! instructions, ignore prefixes */
8180 s
->prefix
&= ~(PREFIX_REPZ
| PREFIX_REPNZ
| PREFIX_DATA
);
8181 case 0x110 ... 0x117:
8182 case 0x128 ... 0x12f:
8183 case 0x138 ... 0x13a:
8184 case 0x150 ... 0x179:
8185 case 0x17c ... 0x17f:
8187 case 0x1c4 ... 0x1c6:
8188 case 0x1d0 ... 0x1fe:
8189 gen_sse(env
, s
, b
, pc_start
, rex_r
);
8194 /* lock generation */
8195 if (s
->prefix
& PREFIX_LOCK
)
8196 gen_helper_unlock();
8199 if (s
->prefix
& PREFIX_LOCK
)
8200 gen_helper_unlock();
8201 /* XXX: ensure that no lock was generated */
8202 gen_exception(s
, EXCP06_ILLOP
, pc_start
- s
->cs_base
);
8206 void optimize_flags_init(void)
8208 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
8209 cpu_cc_op
= tcg_global_mem_new_i32(TCG_AREG0
,
8210 offsetof(CPUX86State
, cc_op
), "cc_op");
8211 cpu_cc_dst
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_dst
),
8213 cpu_cc_src
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_src
),
8215 cpu_cc_src2
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_src2
),
8218 #ifdef TARGET_X86_64
8219 cpu_regs
[R_EAX
] = tcg_global_mem_new_i64(TCG_AREG0
,
8220 offsetof(CPUX86State
, regs
[R_EAX
]), "rax");
8221 cpu_regs
[R_ECX
] = tcg_global_mem_new_i64(TCG_AREG0
,
8222 offsetof(CPUX86State
, regs
[R_ECX
]), "rcx");
8223 cpu_regs
[R_EDX
] = tcg_global_mem_new_i64(TCG_AREG0
,
8224 offsetof(CPUX86State
, regs
[R_EDX
]), "rdx");
8225 cpu_regs
[R_EBX
] = tcg_global_mem_new_i64(TCG_AREG0
,
8226 offsetof(CPUX86State
, regs
[R_EBX
]), "rbx");
8227 cpu_regs
[R_ESP
] = tcg_global_mem_new_i64(TCG_AREG0
,
8228 offsetof(CPUX86State
, regs
[R_ESP
]), "rsp");
8229 cpu_regs
[R_EBP
] = tcg_global_mem_new_i64(TCG_AREG0
,
8230 offsetof(CPUX86State
, regs
[R_EBP
]), "rbp");
8231 cpu_regs
[R_ESI
] = tcg_global_mem_new_i64(TCG_AREG0
,
8232 offsetof(CPUX86State
, regs
[R_ESI
]), "rsi");
8233 cpu_regs
[R_EDI
] = tcg_global_mem_new_i64(TCG_AREG0
,
8234 offsetof(CPUX86State
, regs
[R_EDI
]), "rdi");
8235 cpu_regs
[8] = tcg_global_mem_new_i64(TCG_AREG0
,
8236 offsetof(CPUX86State
, regs
[8]), "r8");
8237 cpu_regs
[9] = tcg_global_mem_new_i64(TCG_AREG0
,
8238 offsetof(CPUX86State
, regs
[9]), "r9");
8239 cpu_regs
[10] = tcg_global_mem_new_i64(TCG_AREG0
,
8240 offsetof(CPUX86State
, regs
[10]), "r10");
8241 cpu_regs
[11] = tcg_global_mem_new_i64(TCG_AREG0
,
8242 offsetof(CPUX86State
, regs
[11]), "r11");
8243 cpu_regs
[12] = tcg_global_mem_new_i64(TCG_AREG0
,
8244 offsetof(CPUX86State
, regs
[12]), "r12");
8245 cpu_regs
[13] = tcg_global_mem_new_i64(TCG_AREG0
,
8246 offsetof(CPUX86State
, regs
[13]), "r13");
8247 cpu_regs
[14] = tcg_global_mem_new_i64(TCG_AREG0
,
8248 offsetof(CPUX86State
, regs
[14]), "r14");
8249 cpu_regs
[15] = tcg_global_mem_new_i64(TCG_AREG0
,
8250 offsetof(CPUX86State
, regs
[15]), "r15");
8252 cpu_regs
[R_EAX
] = tcg_global_mem_new_i32(TCG_AREG0
,
8253 offsetof(CPUX86State
, regs
[R_EAX
]), "eax");
8254 cpu_regs
[R_ECX
] = tcg_global_mem_new_i32(TCG_AREG0
,
8255 offsetof(CPUX86State
, regs
[R_ECX
]), "ecx");
8256 cpu_regs
[R_EDX
] = tcg_global_mem_new_i32(TCG_AREG0
,
8257 offsetof(CPUX86State
, regs
[R_EDX
]), "edx");
8258 cpu_regs
[R_EBX
] = tcg_global_mem_new_i32(TCG_AREG0
,
8259 offsetof(CPUX86State
, regs
[R_EBX
]), "ebx");
8260 cpu_regs
[R_ESP
] = tcg_global_mem_new_i32(TCG_AREG0
,
8261 offsetof(CPUX86State
, regs
[R_ESP
]), "esp");
8262 cpu_regs
[R_EBP
] = tcg_global_mem_new_i32(TCG_AREG0
,
8263 offsetof(CPUX86State
, regs
[R_EBP
]), "ebp");
8264 cpu_regs
[R_ESI
] = tcg_global_mem_new_i32(TCG_AREG0
,
8265 offsetof(CPUX86State
, regs
[R_ESI
]), "esi");
8266 cpu_regs
[R_EDI
] = tcg_global_mem_new_i32(TCG_AREG0
,
8267 offsetof(CPUX86State
, regs
[R_EDI
]), "edi");
8271 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8272 basic block 'tb'. If search_pc is TRUE, also generate PC
8273 information for each intermediate instruction. */
8274 static inline void gen_intermediate_code_internal(X86CPU
*cpu
,
8275 TranslationBlock
*tb
,
8278 CPUState
*cs
= CPU(cpu
);
8279 CPUX86State
*env
= &cpu
->env
;
8280 DisasContext dc1
, *dc
= &dc1
;
8281 target_ulong pc_ptr
;
8282 uint16_t *gen_opc_end
;
8286 target_ulong pc_start
;
8287 target_ulong cs_base
;
8291 /* generate intermediate code */
8293 cs_base
= tb
->cs_base
;
8296 dc
->pe
= (flags
>> HF_PE_SHIFT
) & 1;
8297 dc
->code32
= (flags
>> HF_CS32_SHIFT
) & 1;
8298 dc
->ss32
= (flags
>> HF_SS32_SHIFT
) & 1;
8299 dc
->addseg
= (flags
>> HF_ADDSEG_SHIFT
) & 1;
8301 dc
->vm86
= (flags
>> VM_SHIFT
) & 1;
8302 dc
->cpl
= (flags
>> HF_CPL_SHIFT
) & 3;
8303 dc
->iopl
= (flags
>> IOPL_SHIFT
) & 3;
8304 dc
->tf
= (flags
>> TF_SHIFT
) & 1;
8305 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
8306 dc
->cc_op
= CC_OP_DYNAMIC
;
8307 dc
->cc_op_dirty
= false;
8308 dc
->cs_base
= cs_base
;
8310 dc
->popl_esp_hack
= 0;
8311 /* select memory access functions */
8313 if (flags
& HF_SOFTMMU_MASK
) {
8314 dc
->mem_index
= (cpu_mmu_index(env
) + 1) << 2;
8316 dc
->cpuid_features
= env
->features
[FEAT_1_EDX
];
8317 dc
->cpuid_ext_features
= env
->features
[FEAT_1_ECX
];
8318 dc
->cpuid_ext2_features
= env
->features
[FEAT_8000_0001_EDX
];
8319 dc
->cpuid_ext3_features
= env
->features
[FEAT_8000_0001_ECX
];
8320 dc
->cpuid_7_0_ebx_features
= env
->features
[FEAT_7_0_EBX
];
8321 #ifdef TARGET_X86_64
8322 dc
->lma
= (flags
>> HF_LMA_SHIFT
) & 1;
8323 dc
->code64
= (flags
>> HF_CS64_SHIFT
) & 1;
8326 dc
->jmp_opt
= !(dc
->tf
|| cs
->singlestep_enabled
||
8327 (flags
& HF_INHIBIT_IRQ_MASK
)
8328 #ifndef CONFIG_SOFTMMU
8329 || (flags
& HF_SOFTMMU_MASK
)
8333 /* check addseg logic */
8334 if (!dc
->addseg
&& (dc
->vm86
|| !dc
->pe
|| !dc
->code32
))
8335 printf("ERROR addseg\n");
8338 cpu_T
[0] = tcg_temp_new();
8339 cpu_T
[1] = tcg_temp_new();
8340 cpu_A0
= tcg_temp_new();
8342 cpu_tmp0
= tcg_temp_new();
8343 cpu_tmp1_i64
= tcg_temp_new_i64();
8344 cpu_tmp2_i32
= tcg_temp_new_i32();
8345 cpu_tmp3_i32
= tcg_temp_new_i32();
8346 cpu_tmp4
= tcg_temp_new();
8347 cpu_ptr0
= tcg_temp_new_ptr();
8348 cpu_ptr1
= tcg_temp_new_ptr();
8349 cpu_cc_srcT
= tcg_temp_local_new();
8351 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
8353 dc
->is_jmp
= DISAS_NEXT
;
8357 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
8359 max_insns
= CF_COUNT_MASK
;
8363 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
8364 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
8365 if (bp
->pc
== pc_ptr
&&
8366 !((bp
->flags
& BP_CPU
) && (tb
->flags
& HF_RF_MASK
))) {
8367 gen_debug(dc
, pc_ptr
- dc
->cs_base
);
8373 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
8377 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
8379 tcg_ctx
.gen_opc_pc
[lj
] = pc_ptr
;
8380 gen_opc_cc_op
[lj
] = dc
->cc_op
;
8381 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
8382 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
8384 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
8387 pc_ptr
= disas_insn(env
, dc
, pc_ptr
);
8389 /* stop translation if indicated */
8392 /* if single step mode, we generate only one instruction and
8393 generate an exception */
8394 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8395 the flag and abort the translation to give the irqs a
8396 change to be happen */
8397 if (dc
->tf
|| dc
->singlestep_enabled
||
8398 (flags
& HF_INHIBIT_IRQ_MASK
)) {
8399 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8403 /* if too long translation, stop generation too */
8404 if (tcg_ctx
.gen_opc_ptr
>= gen_opc_end
||
8405 (pc_ptr
- pc_start
) >= (TARGET_PAGE_SIZE
- 32) ||
8406 num_insns
>= max_insns
) {
8407 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8412 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8417 if (tb
->cflags
& CF_LAST_IO
)
8419 gen_tb_end(tb
, num_insns
);
8420 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
8421 /* we don't forget to fill the last values */
8423 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
8426 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
8430 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
8432 qemu_log("----------------\n");
8433 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
8434 #ifdef TARGET_X86_64
8439 disas_flags
= !dc
->code32
;
8440 log_target_disas(env
, pc_start
, pc_ptr
- pc_start
, disas_flags
);
8446 tb
->size
= pc_ptr
- pc_start
;
8447 tb
->icount
= num_insns
;
8451 void gen_intermediate_code(CPUX86State
*env
, TranslationBlock
*tb
)
8453 gen_intermediate_code_internal(x86_env_get_cpu(env
), tb
, false);
8456 void gen_intermediate_code_pc(CPUX86State
*env
, TranslationBlock
*tb
)
8458 gen_intermediate_code_internal(x86_env_get_cpu(env
), tb
, true);
8461 void restore_state_to_opc(CPUX86State
*env
, TranslationBlock
*tb
, int pc_pos
)
8465 if (qemu_loglevel_mask(CPU_LOG_TB_OP
)) {
8467 qemu_log("RESTORE:\n");
8468 for(i
= 0;i
<= pc_pos
; i
++) {
8469 if (tcg_ctx
.gen_opc_instr_start
[i
]) {
8470 qemu_log("0x%04x: " TARGET_FMT_lx
"\n", i
,
8471 tcg_ctx
.gen_opc_pc
[i
]);
8474 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx
" cs_base=%x\n",
8475 pc_pos
, tcg_ctx
.gen_opc_pc
[pc_pos
] - tb
->cs_base
,
8476 (uint32_t)tb
->cs_base
);
8479 env
->eip
= tcg_ctx
.gen_opc_pc
[pc_pos
] - tb
->cs_base
;
8480 cc_op
= gen_opc_cc_op
[pc_pos
];
8481 if (cc_op
!= CC_OP_DYNAMIC
)