4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
26 #include "qemu/host-utils.h"
28 #include "disas/disas.h"
35 #define PREFIX_REPZ 0x01
36 #define PREFIX_REPNZ 0x02
37 #define PREFIX_LOCK 0x04
38 #define PREFIX_DATA 0x08
39 #define PREFIX_ADR 0x10
40 #define PREFIX_VEX 0x20
43 #define CODE64(s) ((s)->code64)
44 #define REX_X(s) ((s)->rex_x)
45 #define REX_B(s) ((s)->rex_b)
60 //#define MACRO_TEST 1
62 /* global register indexes */
63 static TCGv_ptr cpu_env
;
65 static TCGv cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
, cpu_cc_srcT
;
66 static TCGv_i32 cpu_cc_op
;
67 static TCGv cpu_regs
[CPU_NB_REGS
];
70 /* local register indexes (only used inside old micro ops) */
71 static TCGv cpu_tmp0
, cpu_tmp4
;
72 static TCGv_ptr cpu_ptr0
, cpu_ptr1
;
73 static TCGv_i32 cpu_tmp2_i32
, cpu_tmp3_i32
;
74 static TCGv_i64 cpu_tmp1_i64
;
76 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
78 #include "exec/gen-icount.h"
81 static int x86_64_hregs
;
84 typedef struct DisasContext
{
85 /* current insn context */
86 int override
; /* -1 if no override */
89 target_ulong pc
; /* pc = eip + cs_base */
90 int is_jmp
; /* 1 = means jump (stop translation), 2 means CPU
91 static state change (stop translation) */
92 /* current block context */
93 target_ulong cs_base
; /* base of CS segment */
94 int pe
; /* protected mode */
95 int code32
; /* 32 bit code segment */
97 int lma
; /* long mode active */
98 int code64
; /* 64 bit code segment */
101 int vex_l
; /* vex vector length */
102 int vex_v
; /* vex vvvv register, without 1's compliment. */
103 int ss32
; /* 32 bit stack segment */
104 CCOp cc_op
; /* current CC operation */
106 int addseg
; /* non zero if either DS/ES/SS have a non zero base */
107 int f_st
; /* currently unused */
108 int vm86
; /* vm86 mode */
111 int tf
; /* TF cpu flag */
112 int singlestep_enabled
; /* "hardware" single step enabled */
113 int jmp_opt
; /* use direct block chaining for direct jumps */
114 int mem_index
; /* select memory access functions */
115 uint64_t flags
; /* all execution flags */
116 struct TranslationBlock
*tb
;
117 int popl_esp_hack
; /* for correct popl with esp base handling */
118 int rip_offset
; /* only used in x86_64, but left for simplicity */
120 int cpuid_ext_features
;
121 int cpuid_ext2_features
;
122 int cpuid_ext3_features
;
123 int cpuid_7_0_ebx_features
;
126 static void gen_eob(DisasContext
*s
);
127 static void gen_jmp(DisasContext
*s
, target_ulong eip
);
128 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
);
129 static void gen_op(DisasContext
*s1
, int op
, int ot
, int d
);
131 /* i386 arith/logic operations */
151 OP_SHL1
, /* undocumented */
175 /* I386 int registers */
176 OR_EAX
, /* MUST be even numbered */
185 OR_TMP0
= 16, /* temporary operand register */
187 OR_A0
, /* temporary register used when doing address evaluation */
197 /* Bit set if the global variable is live after setting CC_OP to X. */
198 static const uint8_t cc_op_live
[CC_OP_NB
] = {
199 [CC_OP_DYNAMIC
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
200 [CC_OP_EFLAGS
] = USES_CC_SRC
,
201 [CC_OP_MULB
... CC_OP_MULQ
] = USES_CC_DST
| USES_CC_SRC
,
202 [CC_OP_ADDB
... CC_OP_ADDQ
] = USES_CC_DST
| USES_CC_SRC
,
203 [CC_OP_ADCB
... CC_OP_ADCQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
204 [CC_OP_SUBB
... CC_OP_SUBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRCT
,
205 [CC_OP_SBBB
... CC_OP_SBBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
206 [CC_OP_LOGICB
... CC_OP_LOGICQ
] = USES_CC_DST
,
207 [CC_OP_INCB
... CC_OP_INCQ
] = USES_CC_DST
| USES_CC_SRC
,
208 [CC_OP_DECB
... CC_OP_DECQ
] = USES_CC_DST
| USES_CC_SRC
,
209 [CC_OP_SHLB
... CC_OP_SHLQ
] = USES_CC_DST
| USES_CC_SRC
,
210 [CC_OP_SARB
... CC_OP_SARQ
] = USES_CC_DST
| USES_CC_SRC
,
211 [CC_OP_BMILGB
... CC_OP_BMILGQ
] = USES_CC_DST
| USES_CC_SRC
,
212 [CC_OP_ADCX
] = USES_CC_DST
| USES_CC_SRC
,
213 [CC_OP_ADOX
] = USES_CC_SRC
| USES_CC_SRC2
,
214 [CC_OP_ADCOX
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
218 static void set_cc_op(DisasContext
*s
, CCOp op
)
222 if (s
->cc_op
== op
) {
226 /* Discard CC computation that will no longer be used. */
227 dead
= cc_op_live
[s
->cc_op
] & ~cc_op_live
[op
];
228 if (dead
& USES_CC_DST
) {
229 tcg_gen_discard_tl(cpu_cc_dst
);
231 if (dead
& USES_CC_SRC
) {
232 tcg_gen_discard_tl(cpu_cc_src
);
234 if (dead
& USES_CC_SRC2
) {
235 tcg_gen_discard_tl(cpu_cc_src2
);
237 if (dead
& USES_CC_SRCT
) {
238 tcg_gen_discard_tl(cpu_cc_srcT
);
241 if (op
== CC_OP_DYNAMIC
) {
242 /* The DYNAMIC setting is translator only, and should never be
243 stored. Thus we always consider it clean. */
244 s
->cc_op_dirty
= false;
246 /* Discard any computed CC_OP value (see shifts). */
247 if (s
->cc_op
== CC_OP_DYNAMIC
) {
248 tcg_gen_discard_i32(cpu_cc_op
);
250 s
->cc_op_dirty
= true;
255 static void gen_update_cc_op(DisasContext
*s
)
257 if (s
->cc_op_dirty
) {
258 tcg_gen_movi_i32(cpu_cc_op
, s
->cc_op
);
259 s
->cc_op_dirty
= false;
263 static inline void gen_op_movl_T0_0(void)
265 tcg_gen_movi_tl(cpu_T
[0], 0);
268 static inline void gen_op_movl_T0_im(int32_t val
)
270 tcg_gen_movi_tl(cpu_T
[0], val
);
273 static inline void gen_op_movl_T0_imu(uint32_t val
)
275 tcg_gen_movi_tl(cpu_T
[0], val
);
278 static inline void gen_op_movl_T1_im(int32_t val
)
280 tcg_gen_movi_tl(cpu_T
[1], val
);
283 static inline void gen_op_movl_T1_imu(uint32_t val
)
285 tcg_gen_movi_tl(cpu_T
[1], val
);
288 static inline void gen_op_movl_A0_im(uint32_t val
)
290 tcg_gen_movi_tl(cpu_A0
, val
);
294 static inline void gen_op_movq_A0_im(int64_t val
)
296 tcg_gen_movi_tl(cpu_A0
, val
);
300 static inline void gen_movtl_T0_im(target_ulong val
)
302 tcg_gen_movi_tl(cpu_T
[0], val
);
305 static inline void gen_movtl_T1_im(target_ulong val
)
307 tcg_gen_movi_tl(cpu_T
[1], val
);
310 static inline void gen_op_andl_T0_ffff(void)
312 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xffff);
315 static inline void gen_op_andl_T0_im(uint32_t val
)
317 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], val
);
320 static inline void gen_op_movl_T0_T1(void)
322 tcg_gen_mov_tl(cpu_T
[0], cpu_T
[1]);
325 static inline void gen_op_andl_A0_ffff(void)
327 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffff);
332 #define NB_OP_SIZES 4
334 #else /* !TARGET_X86_64 */
336 #define NB_OP_SIZES 3
338 #endif /* !TARGET_X86_64 */
340 #if defined(HOST_WORDS_BIGENDIAN)
341 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
342 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
343 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
344 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
345 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
347 #define REG_B_OFFSET 0
348 #define REG_H_OFFSET 1
349 #define REG_W_OFFSET 0
350 #define REG_L_OFFSET 0
351 #define REG_LH_OFFSET 4
354 /* In instruction encodings for byte register accesses the
355 * register number usually indicates "low 8 bits of register N";
356 * however there are some special cases where N 4..7 indicates
357 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
358 * true for this special case, false otherwise.
360 static inline bool byte_reg_is_xH(int reg
)
366 if (reg
>= 8 || x86_64_hregs
) {
373 static inline void gen_op_mov_reg_v(int ot
, int reg
, TCGv t0
)
377 if (!byte_reg_is_xH(reg
)) {
378 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 8);
380 tcg_gen_deposit_tl(cpu_regs
[reg
- 4], cpu_regs
[reg
- 4], t0
, 8, 8);
384 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 16);
386 default: /* XXX this shouldn't be reached; abort? */
388 /* For x86_64, this sets the higher half of register to zero.
389 For i386, this is equivalent to a mov. */
390 tcg_gen_ext32u_tl(cpu_regs
[reg
], t0
);
394 tcg_gen_mov_tl(cpu_regs
[reg
], t0
);
400 static inline void gen_op_mov_reg_T0(int ot
, int reg
)
402 gen_op_mov_reg_v(ot
, reg
, cpu_T
[0]);
405 static inline void gen_op_mov_reg_T1(int ot
, int reg
)
407 gen_op_mov_reg_v(ot
, reg
, cpu_T
[1]);
410 static inline void gen_op_mov_reg_A0(int size
, int reg
)
414 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_A0
, 0, 16);
416 default: /* XXX this shouldn't be reached; abort? */
418 /* For x86_64, this sets the higher half of register to zero.
419 For i386, this is equivalent to a mov. */
420 tcg_gen_ext32u_tl(cpu_regs
[reg
], cpu_A0
);
424 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_A0
);
430 static inline void gen_op_mov_v_reg(int ot
, TCGv t0
, int reg
)
432 if (ot
== OT_BYTE
&& byte_reg_is_xH(reg
)) {
433 tcg_gen_shri_tl(t0
, cpu_regs
[reg
- 4], 8);
434 tcg_gen_ext8u_tl(t0
, t0
);
436 tcg_gen_mov_tl(t0
, cpu_regs
[reg
]);
440 static inline void gen_op_mov_TN_reg(int ot
, int t_index
, int reg
)
442 gen_op_mov_v_reg(ot
, cpu_T
[t_index
], reg
);
445 static inline void gen_op_movl_A0_reg(int reg
)
447 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[reg
]);
450 static inline void gen_op_addl_A0_im(int32_t val
)
452 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
454 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
459 static inline void gen_op_addq_A0_im(int64_t val
)
461 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
465 static void gen_add_A0_im(DisasContext
*s
, int val
)
469 gen_op_addq_A0_im(val
);
472 gen_op_addl_A0_im(val
);
475 static inline void gen_op_addl_T0_T1(void)
477 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
480 static inline void gen_op_jmp_T0(void)
482 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, eip
));
485 static inline void gen_op_add_reg_im(int size
, int reg
, int32_t val
)
489 tcg_gen_addi_tl(cpu_tmp0
, cpu_regs
[reg
], val
);
490 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_tmp0
, 0, 16);
493 tcg_gen_addi_tl(cpu_tmp0
, cpu_regs
[reg
], val
);
494 /* For x86_64, this sets the higher half of register to zero.
495 For i386, this is equivalent to a nop. */
496 tcg_gen_ext32u_tl(cpu_tmp0
, cpu_tmp0
);
497 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_tmp0
);
501 tcg_gen_addi_tl(cpu_regs
[reg
], cpu_regs
[reg
], val
);
507 static inline void gen_op_add_reg_T0(int size
, int reg
)
511 tcg_gen_add_tl(cpu_tmp0
, cpu_regs
[reg
], cpu_T
[0]);
512 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_tmp0
, 0, 16);
515 tcg_gen_add_tl(cpu_tmp0
, cpu_regs
[reg
], cpu_T
[0]);
516 /* For x86_64, this sets the higher half of register to zero.
517 For i386, this is equivalent to a nop. */
518 tcg_gen_ext32u_tl(cpu_tmp0
, cpu_tmp0
);
519 tcg_gen_mov_tl(cpu_regs
[reg
], cpu_tmp0
);
523 tcg_gen_add_tl(cpu_regs
[reg
], cpu_regs
[reg
], cpu_T
[0]);
529 static inline void gen_op_addl_A0_reg_sN(int shift
, int reg
)
531 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[reg
]);
533 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, shift
);
534 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
535 /* For x86_64, this sets the higher half of register to zero.
536 For i386, this is equivalent to a nop. */
537 tcg_gen_ext32u_tl(cpu_A0
, cpu_A0
);
540 static inline void gen_op_movl_A0_seg(int reg
)
542 tcg_gen_ld32u_tl(cpu_A0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
) + REG_L_OFFSET
);
545 static inline void gen_op_addl_A0_seg(DisasContext
*s
, int reg
)
547 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
550 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
551 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
553 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
554 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
557 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
562 static inline void gen_op_movq_A0_seg(int reg
)
564 tcg_gen_ld_tl(cpu_A0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
567 static inline void gen_op_addq_A0_seg(int reg
)
569 tcg_gen_ld_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, segs
[reg
].base
));
570 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
573 static inline void gen_op_movq_A0_reg(int reg
)
575 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[reg
]);
578 static inline void gen_op_addq_A0_reg_sN(int shift
, int reg
)
580 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[reg
]);
582 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, shift
);
583 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
587 static inline void gen_op_lds_T0_A0(int idx
)
589 int mem_index
= (idx
>> 2) - 1;
592 tcg_gen_qemu_ld8s(cpu_T
[0], cpu_A0
, mem_index
);
595 tcg_gen_qemu_ld16s(cpu_T
[0], cpu_A0
, mem_index
);
599 tcg_gen_qemu_ld32s(cpu_T
[0], cpu_A0
, mem_index
);
604 static inline void gen_op_ld_v(int idx
, TCGv t0
, TCGv a0
)
606 int mem_index
= (idx
>> 2) - 1;
609 tcg_gen_qemu_ld8u(t0
, a0
, mem_index
);
612 tcg_gen_qemu_ld16u(t0
, a0
, mem_index
);
615 tcg_gen_qemu_ld32u(t0
, a0
, mem_index
);
619 /* Should never happen on 32-bit targets. */
621 tcg_gen_qemu_ld64(t0
, a0
, mem_index
);
627 /* XXX: always use ldu or lds */
628 static inline void gen_op_ld_T0_A0(int idx
)
630 gen_op_ld_v(idx
, cpu_T
[0], cpu_A0
);
633 static inline void gen_op_ldu_T0_A0(int idx
)
635 gen_op_ld_v(idx
, cpu_T
[0], cpu_A0
);
638 static inline void gen_op_ld_T1_A0(int idx
)
640 gen_op_ld_v(idx
, cpu_T
[1], cpu_A0
);
643 static inline void gen_op_st_v(int idx
, TCGv t0
, TCGv a0
)
645 int mem_index
= (idx
>> 2) - 1;
648 tcg_gen_qemu_st8(t0
, a0
, mem_index
);
651 tcg_gen_qemu_st16(t0
, a0
, mem_index
);
654 tcg_gen_qemu_st32(t0
, a0
, mem_index
);
658 /* Should never happen on 32-bit targets. */
660 tcg_gen_qemu_st64(t0
, a0
, mem_index
);
666 static inline void gen_op_st_T0_A0(int idx
)
668 gen_op_st_v(idx
, cpu_T
[0], cpu_A0
);
671 static inline void gen_op_st_T1_A0(int idx
)
673 gen_op_st_v(idx
, cpu_T
[1], cpu_A0
);
676 static inline void gen_jmp_im(target_ulong pc
)
678 tcg_gen_movi_tl(cpu_tmp0
, pc
);
679 tcg_gen_st_tl(cpu_tmp0
, cpu_env
, offsetof(CPUX86State
, eip
));
682 static inline void gen_string_movl_A0_ESI(DisasContext
*s
)
686 override
= s
->override
;
690 gen_op_movq_A0_seg(override
);
691 gen_op_addq_A0_reg_sN(0, R_ESI
);
693 gen_op_movq_A0_reg(R_ESI
);
699 if (s
->addseg
&& override
< 0)
702 gen_op_movl_A0_seg(override
);
703 gen_op_addl_A0_reg_sN(0, R_ESI
);
705 gen_op_movl_A0_reg(R_ESI
);
708 /* 16 address, always override */
711 gen_op_movl_A0_reg(R_ESI
);
712 gen_op_andl_A0_ffff();
713 gen_op_addl_A0_seg(s
, override
);
717 static inline void gen_string_movl_A0_EDI(DisasContext
*s
)
721 gen_op_movq_A0_reg(R_EDI
);
726 gen_op_movl_A0_seg(R_ES
);
727 gen_op_addl_A0_reg_sN(0, R_EDI
);
729 gen_op_movl_A0_reg(R_EDI
);
732 gen_op_movl_A0_reg(R_EDI
);
733 gen_op_andl_A0_ffff();
734 gen_op_addl_A0_seg(s
, R_ES
);
738 static inline void gen_op_movl_T0_Dshift(int ot
)
740 tcg_gen_ld32s_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, df
));
741 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], ot
);
744 static TCGv
gen_ext_tl(TCGv dst
, TCGv src
, int size
, bool sign
)
749 tcg_gen_ext8s_tl(dst
, src
);
751 tcg_gen_ext8u_tl(dst
, src
);
756 tcg_gen_ext16s_tl(dst
, src
);
758 tcg_gen_ext16u_tl(dst
, src
);
764 tcg_gen_ext32s_tl(dst
, src
);
766 tcg_gen_ext32u_tl(dst
, src
);
775 static void gen_extu(int ot
, TCGv reg
)
777 gen_ext_tl(reg
, reg
, ot
, false);
780 static void gen_exts(int ot
, TCGv reg
)
782 gen_ext_tl(reg
, reg
, ot
, true);
785 static inline void gen_op_jnz_ecx(int size
, int label1
)
787 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
788 gen_extu(size
+ 1, cpu_tmp0
);
789 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_tmp0
, 0, label1
);
792 static inline void gen_op_jz_ecx(int size
, int label1
)
794 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
795 gen_extu(size
+ 1, cpu_tmp0
);
796 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
799 static void gen_helper_in_func(int ot
, TCGv v
, TCGv_i32 n
)
803 gen_helper_inb(v
, n
);
806 gen_helper_inw(v
, n
);
809 gen_helper_inl(v
, n
);
814 static void gen_helper_out_func(int ot
, TCGv_i32 v
, TCGv_i32 n
)
818 gen_helper_outb(v
, n
);
821 gen_helper_outw(v
, n
);
824 gen_helper_outl(v
, n
);
829 static void gen_check_io(DisasContext
*s
, int ot
, target_ulong cur_eip
,
833 target_ulong next_eip
;
836 if (s
->pe
&& (s
->cpl
> s
->iopl
|| s
->vm86
)) {
840 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
843 gen_helper_check_iob(cpu_env
, cpu_tmp2_i32
);
846 gen_helper_check_iow(cpu_env
, cpu_tmp2_i32
);
849 gen_helper_check_iol(cpu_env
, cpu_tmp2_i32
);
853 if(s
->flags
& HF_SVMI_MASK
) {
858 svm_flags
|= (1 << (4 + ot
));
859 next_eip
= s
->pc
- s
->cs_base
;
860 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
861 gen_helper_svm_check_io(cpu_env
, cpu_tmp2_i32
,
862 tcg_const_i32(svm_flags
),
863 tcg_const_i32(next_eip
- cur_eip
));
867 static inline void gen_movs(DisasContext
*s
, int ot
)
869 gen_string_movl_A0_ESI(s
);
870 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
871 gen_string_movl_A0_EDI(s
);
872 gen_op_st_T0_A0(ot
+ s
->mem_index
);
873 gen_op_movl_T0_Dshift(ot
);
874 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
875 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
878 static void gen_op_update1_cc(void)
880 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
883 static void gen_op_update2_cc(void)
885 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
886 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
889 static void gen_op_update3_cc(TCGv reg
)
891 tcg_gen_mov_tl(cpu_cc_src2
, reg
);
892 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
893 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
896 static inline void gen_op_testl_T0_T1_cc(void)
898 tcg_gen_and_tl(cpu_cc_dst
, cpu_T
[0], cpu_T
[1]);
901 static void gen_op_update_neg_cc(void)
903 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
904 tcg_gen_neg_tl(cpu_cc_src
, cpu_T
[0]);
905 tcg_gen_movi_tl(cpu_cc_srcT
, 0);
908 /* compute all eflags to cc_src */
909 static void gen_compute_eflags(DisasContext
*s
)
911 TCGv zero
, dst
, src1
, src2
;
914 if (s
->cc_op
== CC_OP_EFLAGS
) {
917 if (s
->cc_op
== CC_OP_CLR
) {
918 tcg_gen_movi_tl(cpu_cc_src
, CC_Z
);
919 set_cc_op(s
, CC_OP_EFLAGS
);
928 /* Take care to not read values that are not live. */
929 live
= cc_op_live
[s
->cc_op
] & ~USES_CC_SRCT
;
930 dead
= live
^ (USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
);
932 zero
= tcg_const_tl(0);
933 if (dead
& USES_CC_DST
) {
936 if (dead
& USES_CC_SRC
) {
939 if (dead
& USES_CC_SRC2
) {
945 gen_helper_cc_compute_all(cpu_cc_src
, dst
, src1
, src2
, cpu_cc_op
);
946 set_cc_op(s
, CC_OP_EFLAGS
);
953 typedef struct CCPrepare
{
963 /* compute eflags.C to reg */
964 static CCPrepare
gen_prepare_eflags_c(DisasContext
*s
, TCGv reg
)
970 case CC_OP_SUBB
... CC_OP_SUBQ
:
971 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
972 size
= s
->cc_op
- CC_OP_SUBB
;
973 t1
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
974 /* If no temporary was used, be careful not to alias t1 and t0. */
975 t0
= TCGV_EQUAL(t1
, cpu_cc_src
) ? cpu_tmp0
: reg
;
976 tcg_gen_mov_tl(t0
, cpu_cc_srcT
);
980 case CC_OP_ADDB
... CC_OP_ADDQ
:
981 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
982 size
= s
->cc_op
- CC_OP_ADDB
;
983 t1
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
984 t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
986 return (CCPrepare
) { .cond
= TCG_COND_LTU
, .reg
= t0
,
987 .reg2
= t1
, .mask
= -1, .use_reg2
= true };
989 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
991 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
993 case CC_OP_INCB
... CC_OP_INCQ
:
994 case CC_OP_DECB
... CC_OP_DECQ
:
995 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
996 .mask
= -1, .no_setcond
= true };
998 case CC_OP_SHLB
... CC_OP_SHLQ
:
999 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
1000 size
= s
->cc_op
- CC_OP_SHLB
;
1001 shift
= (8 << size
) - 1;
1002 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1003 .mask
= (target_ulong
)1 << shift
};
1005 case CC_OP_MULB
... CC_OP_MULQ
:
1006 return (CCPrepare
) { .cond
= TCG_COND_NE
,
1007 .reg
= cpu_cc_src
, .mask
= -1 };
1009 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
1010 size
= s
->cc_op
- CC_OP_BMILGB
;
1011 t0
= gen_ext_tl(reg
, cpu_cc_src
, size
, false);
1012 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
1016 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_dst
,
1017 .mask
= -1, .no_setcond
= true };
1020 case CC_OP_SARB
... CC_OP_SARQ
:
1022 return (CCPrepare
) { .cond
= TCG_COND_NE
,
1023 .reg
= cpu_cc_src
, .mask
= CC_C
};
1026 /* The need to compute only C from CC_OP_DYNAMIC is important
1027 in efficiently implementing e.g. INC at the start of a TB. */
1028 gen_update_cc_op(s
);
1029 gen_helper_cc_compute_c(reg
, cpu_cc_dst
, cpu_cc_src
,
1030 cpu_cc_src2
, cpu_cc_op
);
1031 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1032 .mask
= -1, .no_setcond
= true };
1036 /* compute eflags.P to reg */
1037 static CCPrepare
gen_prepare_eflags_p(DisasContext
*s
, TCGv reg
)
1039 gen_compute_eflags(s
);
1040 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1044 /* compute eflags.S to reg */
1045 static CCPrepare
gen_prepare_eflags_s(DisasContext
*s
, TCGv reg
)
1049 gen_compute_eflags(s
);
1055 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1058 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
1061 int size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1062 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, true);
1063 return (CCPrepare
) { .cond
= TCG_COND_LT
, .reg
= t0
, .mask
= -1 };
1068 /* compute eflags.O to reg */
1069 static CCPrepare
gen_prepare_eflags_o(DisasContext
*s
, TCGv reg
)
1074 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src2
,
1075 .mask
= -1, .no_setcond
= true };
1077 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
1079 gen_compute_eflags(s
);
1080 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1085 /* compute eflags.Z to reg */
1086 static CCPrepare
gen_prepare_eflags_z(DisasContext
*s
, TCGv reg
)
1090 gen_compute_eflags(s
);
1096 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1099 return (CCPrepare
) { .cond
= TCG_COND_ALWAYS
, .mask
= -1 };
1102 int size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1103 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
1104 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
1109 /* perform a conditional store into register 'reg' according to jump opcode
1110 value 'b'. In the fast case, T0 is guaranted not to be used. */
1111 static CCPrepare
gen_prepare_cc(DisasContext
*s
, int b
, TCGv reg
)
1113 int inv
, jcc_op
, size
, cond
;
1118 jcc_op
= (b
>> 1) & 7;
1121 case CC_OP_SUBB
... CC_OP_SUBQ
:
1122 /* We optimize relational operators for the cmp/jcc case. */
1123 size
= s
->cc_op
- CC_OP_SUBB
;
1126 tcg_gen_mov_tl(cpu_tmp4
, cpu_cc_srcT
);
1127 gen_extu(size
, cpu_tmp4
);
1128 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
1129 cc
= (CCPrepare
) { .cond
= TCG_COND_LEU
, .reg
= cpu_tmp4
,
1130 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
1139 tcg_gen_mov_tl(cpu_tmp4
, cpu_cc_srcT
);
1140 gen_exts(size
, cpu_tmp4
);
1141 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, true);
1142 cc
= (CCPrepare
) { .cond
= cond
, .reg
= cpu_tmp4
,
1143 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
1153 /* This actually generates good code for JC, JZ and JS. */
1156 cc
= gen_prepare_eflags_o(s
, reg
);
1159 cc
= gen_prepare_eflags_c(s
, reg
);
1162 cc
= gen_prepare_eflags_z(s
, reg
);
1165 gen_compute_eflags(s
);
1166 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1167 .mask
= CC_Z
| CC_C
};
1170 cc
= gen_prepare_eflags_s(s
, reg
);
1173 cc
= gen_prepare_eflags_p(s
, reg
);
1176 gen_compute_eflags(s
);
1177 if (TCGV_EQUAL(reg
, cpu_cc_src
)) {
1180 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
1181 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
1182 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1187 gen_compute_eflags(s
);
1188 if (TCGV_EQUAL(reg
, cpu_cc_src
)) {
1191 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
1192 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
1193 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1194 .mask
= CC_S
| CC_Z
};
1201 cc
.cond
= tcg_invert_cond(cc
.cond
);
1206 static void gen_setcc1(DisasContext
*s
, int b
, TCGv reg
)
1208 CCPrepare cc
= gen_prepare_cc(s
, b
, reg
);
1210 if (cc
.no_setcond
) {
1211 if (cc
.cond
== TCG_COND_EQ
) {
1212 tcg_gen_xori_tl(reg
, cc
.reg
, 1);
1214 tcg_gen_mov_tl(reg
, cc
.reg
);
1219 if (cc
.cond
== TCG_COND_NE
&& !cc
.use_reg2
&& cc
.imm
== 0 &&
1220 cc
.mask
!= 0 && (cc
.mask
& (cc
.mask
- 1)) == 0) {
1221 tcg_gen_shri_tl(reg
, cc
.reg
, ctztl(cc
.mask
));
1222 tcg_gen_andi_tl(reg
, reg
, 1);
1225 if (cc
.mask
!= -1) {
1226 tcg_gen_andi_tl(reg
, cc
.reg
, cc
.mask
);
1230 tcg_gen_setcond_tl(cc
.cond
, reg
, cc
.reg
, cc
.reg2
);
1232 tcg_gen_setcondi_tl(cc
.cond
, reg
, cc
.reg
, cc
.imm
);
1236 static inline void gen_compute_eflags_c(DisasContext
*s
, TCGv reg
)
1238 gen_setcc1(s
, JCC_B
<< 1, reg
);
1241 /* generate a conditional jump to label 'l1' according to jump opcode
1242 value 'b'. In the fast case, T0 is guaranted not to be used. */
1243 static inline void gen_jcc1_noeob(DisasContext
*s
, int b
, int l1
)
1245 CCPrepare cc
= gen_prepare_cc(s
, b
, cpu_T
[0]);
1247 if (cc
.mask
!= -1) {
1248 tcg_gen_andi_tl(cpu_T
[0], cc
.reg
, cc
.mask
);
1252 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1254 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1258 /* Generate a conditional jump to label 'l1' according to jump opcode
1259 value 'b'. In the fast case, T0 is guaranted not to be used.
1260 A translation block must end soon. */
1261 static inline void gen_jcc1(DisasContext
*s
, int b
, int l1
)
1263 CCPrepare cc
= gen_prepare_cc(s
, b
, cpu_T
[0]);
1265 gen_update_cc_op(s
);
1266 if (cc
.mask
!= -1) {
1267 tcg_gen_andi_tl(cpu_T
[0], cc
.reg
, cc
.mask
);
1270 set_cc_op(s
, CC_OP_DYNAMIC
);
1272 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1274 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1278 /* XXX: does not work with gdbstub "ice" single step - not a
1280 static int gen_jz_ecx_string(DisasContext
*s
, target_ulong next_eip
)
1284 l1
= gen_new_label();
1285 l2
= gen_new_label();
1286 gen_op_jnz_ecx(s
->aflag
, l1
);
1288 gen_jmp_tb(s
, next_eip
, 1);
1293 static inline void gen_stos(DisasContext
*s
, int ot
)
1295 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
1296 gen_string_movl_A0_EDI(s
);
1297 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1298 gen_op_movl_T0_Dshift(ot
);
1299 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1302 static inline void gen_lods(DisasContext
*s
, int ot
)
1304 gen_string_movl_A0_ESI(s
);
1305 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1306 gen_op_mov_reg_T0(ot
, R_EAX
);
1307 gen_op_movl_T0_Dshift(ot
);
1308 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1311 static inline void gen_scas(DisasContext
*s
, int ot
)
1313 gen_string_movl_A0_EDI(s
);
1314 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
1315 gen_op(s
, OP_CMPL
, ot
, R_EAX
);
1316 gen_op_movl_T0_Dshift(ot
);
1317 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1320 static inline void gen_cmps(DisasContext
*s
, int ot
)
1322 gen_string_movl_A0_EDI(s
);
1323 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
1324 gen_string_movl_A0_ESI(s
);
1325 gen_op(s
, OP_CMPL
, ot
, OR_TMP0
);
1326 gen_op_movl_T0_Dshift(ot
);
1327 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1328 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1331 static inline void gen_ins(DisasContext
*s
, int ot
)
1335 gen_string_movl_A0_EDI(s
);
1336 /* Note: we must do this dummy write first to be restartable in
1337 case of page fault. */
1339 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1340 gen_op_mov_TN_reg(OT_WORD
, 1, R_EDX
);
1341 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[1]);
1342 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1343 gen_helper_in_func(ot
, cpu_T
[0], cpu_tmp2_i32
);
1344 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1345 gen_op_movl_T0_Dshift(ot
);
1346 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1351 static inline void gen_outs(DisasContext
*s
, int ot
)
1355 gen_string_movl_A0_ESI(s
);
1356 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1358 gen_op_mov_TN_reg(OT_WORD
, 1, R_EDX
);
1359 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[1]);
1360 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1361 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[0]);
1362 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1364 gen_op_movl_T0_Dshift(ot
);
1365 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1370 /* same method as Valgrind : we generate jumps to current or next
1372 #define GEN_REPZ(op) \
1373 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1374 target_ulong cur_eip, target_ulong next_eip) \
1377 gen_update_cc_op(s); \
1378 l2 = gen_jz_ecx_string(s, next_eip); \
1379 gen_ ## op(s, ot); \
1380 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1381 /* a loop would cause two single step exceptions if ECX = 1 \
1382 before rep string_insn */ \
1384 gen_op_jz_ecx(s->aflag, l2); \
1385 gen_jmp(s, cur_eip); \
1388 #define GEN_REPZ2(op) \
1389 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1390 target_ulong cur_eip, \
1391 target_ulong next_eip, \
1395 gen_update_cc_op(s); \
1396 l2 = gen_jz_ecx_string(s, next_eip); \
1397 gen_ ## op(s, ot); \
1398 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1399 gen_update_cc_op(s); \
1400 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1402 gen_op_jz_ecx(s->aflag, l2); \
1403 gen_jmp(s, cur_eip); \
1414 static void gen_helper_fp_arith_ST0_FT0(int op
)
1418 gen_helper_fadd_ST0_FT0(cpu_env
);
1421 gen_helper_fmul_ST0_FT0(cpu_env
);
1424 gen_helper_fcom_ST0_FT0(cpu_env
);
1427 gen_helper_fcom_ST0_FT0(cpu_env
);
1430 gen_helper_fsub_ST0_FT0(cpu_env
);
1433 gen_helper_fsubr_ST0_FT0(cpu_env
);
1436 gen_helper_fdiv_ST0_FT0(cpu_env
);
1439 gen_helper_fdivr_ST0_FT0(cpu_env
);
1444 /* NOTE the exception in "r" op ordering */
1445 static void gen_helper_fp_arith_STN_ST0(int op
, int opreg
)
1447 TCGv_i32 tmp
= tcg_const_i32(opreg
);
1450 gen_helper_fadd_STN_ST0(cpu_env
, tmp
);
1453 gen_helper_fmul_STN_ST0(cpu_env
, tmp
);
1456 gen_helper_fsubr_STN_ST0(cpu_env
, tmp
);
1459 gen_helper_fsub_STN_ST0(cpu_env
, tmp
);
1462 gen_helper_fdivr_STN_ST0(cpu_env
, tmp
);
1465 gen_helper_fdiv_STN_ST0(cpu_env
, tmp
);
1470 /* if d == OR_TMP0, it means memory operand (address in A0) */
1471 static void gen_op(DisasContext
*s1
, int op
, int ot
, int d
)
1474 gen_op_mov_TN_reg(ot
, 0, d
);
1476 gen_op_ld_T0_A0(ot
+ s1
->mem_index
);
1480 gen_compute_eflags_c(s1
, cpu_tmp4
);
1481 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1482 tcg_gen_add_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
1484 gen_op_mov_reg_T0(ot
, d
);
1486 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1487 gen_op_update3_cc(cpu_tmp4
);
1488 set_cc_op(s1
, CC_OP_ADCB
+ ot
);
1491 gen_compute_eflags_c(s1
, cpu_tmp4
);
1492 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1493 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_tmp4
);
1495 gen_op_mov_reg_T0(ot
, d
);
1497 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1498 gen_op_update3_cc(cpu_tmp4
);
1499 set_cc_op(s1
, CC_OP_SBBB
+ ot
);
1502 gen_op_addl_T0_T1();
1504 gen_op_mov_reg_T0(ot
, d
);
1506 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1507 gen_op_update2_cc();
1508 set_cc_op(s1
, CC_OP_ADDB
+ ot
);
1511 tcg_gen_mov_tl(cpu_cc_srcT
, cpu_T
[0]);
1512 tcg_gen_sub_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1514 gen_op_mov_reg_T0(ot
, d
);
1516 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1517 gen_op_update2_cc();
1518 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1522 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1524 gen_op_mov_reg_T0(ot
, d
);
1526 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1527 gen_op_update1_cc();
1528 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1531 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1533 gen_op_mov_reg_T0(ot
, d
);
1535 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1536 gen_op_update1_cc();
1537 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1540 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1542 gen_op_mov_reg_T0(ot
, d
);
1544 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1545 gen_op_update1_cc();
1546 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1549 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[1]);
1550 tcg_gen_mov_tl(cpu_cc_srcT
, cpu_T
[0]);
1551 tcg_gen_sub_tl(cpu_cc_dst
, cpu_T
[0], cpu_T
[1]);
1552 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1557 /* if d == OR_TMP0, it means memory operand (address in A0) */
1558 static void gen_inc(DisasContext
*s1
, int ot
, int d
, int c
)
1561 gen_op_mov_TN_reg(ot
, 0, d
);
1563 gen_op_ld_T0_A0(ot
+ s1
->mem_index
);
1564 gen_compute_eflags_c(s1
, cpu_cc_src
);
1566 tcg_gen_addi_tl(cpu_T
[0], cpu_T
[0], 1);
1567 set_cc_op(s1
, CC_OP_INCB
+ ot
);
1569 tcg_gen_addi_tl(cpu_T
[0], cpu_T
[0], -1);
1570 set_cc_op(s1
, CC_OP_DECB
+ ot
);
1573 gen_op_mov_reg_T0(ot
, d
);
1575 gen_op_st_T0_A0(ot
+ s1
->mem_index
);
1576 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1579 static void gen_shift_flags(DisasContext
*s
, int ot
, TCGv result
, TCGv shm1
,
1580 TCGv count
, bool is_right
)
1582 TCGv_i32 z32
, s32
, oldop
;
1585 /* Store the results into the CC variables. If we know that the
1586 variable must be dead, store unconditionally. Otherwise we'll
1587 need to not disrupt the current contents. */
1588 z_tl
= tcg_const_tl(0);
1589 if (cc_op_live
[s
->cc_op
] & USES_CC_DST
) {
1590 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_dst
, count
, z_tl
,
1591 result
, cpu_cc_dst
);
1593 tcg_gen_mov_tl(cpu_cc_dst
, result
);
1595 if (cc_op_live
[s
->cc_op
] & USES_CC_SRC
) {
1596 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_src
, count
, z_tl
,
1599 tcg_gen_mov_tl(cpu_cc_src
, shm1
);
1601 tcg_temp_free(z_tl
);
1603 /* Get the two potential CC_OP values into temporaries. */
1604 tcg_gen_movi_i32(cpu_tmp2_i32
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1605 if (s
->cc_op
== CC_OP_DYNAMIC
) {
1608 tcg_gen_movi_i32(cpu_tmp3_i32
, s
->cc_op
);
1609 oldop
= cpu_tmp3_i32
;
1612 /* Conditionally store the CC_OP value. */
1613 z32
= tcg_const_i32(0);
1614 s32
= tcg_temp_new_i32();
1615 tcg_gen_trunc_tl_i32(s32
, count
);
1616 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, s32
, z32
, cpu_tmp2_i32
, oldop
);
1617 tcg_temp_free_i32(z32
);
1618 tcg_temp_free_i32(s32
);
1620 /* The CC_OP value is no longer predictable. */
1621 set_cc_op(s
, CC_OP_DYNAMIC
);
1624 static void gen_shift_rm_T1(DisasContext
*s
, int ot
, int op1
,
1625 int is_right
, int is_arith
)
1627 target_ulong mask
= (ot
== OT_QUAD
? 0x3f : 0x1f);
1630 if (op1
== OR_TMP0
) {
1631 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1633 gen_op_mov_TN_reg(ot
, 0, op1
);
1636 tcg_gen_andi_tl(cpu_T
[1], cpu_T
[1], mask
);
1637 tcg_gen_subi_tl(cpu_tmp0
, cpu_T
[1], 1);
1641 gen_exts(ot
, cpu_T
[0]);
1642 tcg_gen_sar_tl(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1643 tcg_gen_sar_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1645 gen_extu(ot
, cpu_T
[0]);
1646 tcg_gen_shr_tl(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1647 tcg_gen_shr_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1650 tcg_gen_shl_tl(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1651 tcg_gen_shl_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1655 if (op1
== OR_TMP0
) {
1656 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1658 gen_op_mov_reg_T0(ot
, op1
);
1661 gen_shift_flags(s
, ot
, cpu_T
[0], cpu_tmp0
, cpu_T
[1], is_right
);
1664 static void gen_shift_rm_im(DisasContext
*s
, int ot
, int op1
, int op2
,
1665 int is_right
, int is_arith
)
1667 int mask
= (ot
== OT_QUAD
? 0x3f : 0x1f);
1671 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1673 gen_op_mov_TN_reg(ot
, 0, op1
);
1679 gen_exts(ot
, cpu_T
[0]);
1680 tcg_gen_sari_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1681 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], op2
);
1683 gen_extu(ot
, cpu_T
[0]);
1684 tcg_gen_shri_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1685 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], op2
);
1688 tcg_gen_shli_tl(cpu_tmp4
, cpu_T
[0], op2
- 1);
1689 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], op2
);
1695 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1697 gen_op_mov_reg_T0(ot
, op1
);
1699 /* update eflags if non zero shift */
1701 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
1702 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
1703 set_cc_op(s
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1707 static inline void tcg_gen_lshift(TCGv ret
, TCGv arg1
, target_long arg2
)
1710 tcg_gen_shli_tl(ret
, arg1
, arg2
);
1712 tcg_gen_shri_tl(ret
, arg1
, -arg2
);
1715 static void gen_rot_rm_T1(DisasContext
*s
, int ot
, int op1
, int is_right
)
1717 target_ulong mask
= (ot
== OT_QUAD
? 0x3f : 0x1f);
1721 if (op1
== OR_TMP0
) {
1722 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1724 gen_op_mov_TN_reg(ot
, 0, op1
);
1727 tcg_gen_andi_tl(cpu_T
[1], cpu_T
[1], mask
);
1731 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1732 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
1733 tcg_gen_muli_tl(cpu_T
[0], cpu_T
[0], 0x01010101);
1736 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1737 tcg_gen_deposit_tl(cpu_T
[0], cpu_T
[0], cpu_T
[0], 16, 16);
1740 #ifdef TARGET_X86_64
1742 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
1743 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
1745 tcg_gen_rotr_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1747 tcg_gen_rotl_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1749 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
1754 tcg_gen_rotr_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1756 tcg_gen_rotl_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1762 if (op1
== OR_TMP0
) {
1763 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1765 gen_op_mov_reg_T0(ot
, op1
);
1768 /* We'll need the flags computed into CC_SRC. */
1769 gen_compute_eflags(s
);
1771 /* The value that was "rotated out" is now present at the other end
1772 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1773 since we've computed the flags into CC_SRC, these variables are
1776 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T
[0], mask
- 1);
1777 tcg_gen_shri_tl(cpu_cc_dst
, cpu_T
[0], mask
);
1778 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1780 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T
[0], mask
);
1781 tcg_gen_andi_tl(cpu_cc_dst
, cpu_T
[0], 1);
1783 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1784 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1786 /* Now conditionally store the new CC_OP value. If the shift count
1787 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1788 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1789 exactly as we computed above. */
1790 t0
= tcg_const_i32(0);
1791 t1
= tcg_temp_new_i32();
1792 tcg_gen_trunc_tl_i32(t1
, cpu_T
[1]);
1793 tcg_gen_movi_i32(cpu_tmp2_i32
, CC_OP_ADCOX
);
1794 tcg_gen_movi_i32(cpu_tmp3_i32
, CC_OP_EFLAGS
);
1795 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, t1
, t0
,
1796 cpu_tmp2_i32
, cpu_tmp3_i32
);
1797 tcg_temp_free_i32(t0
);
1798 tcg_temp_free_i32(t1
);
1800 /* The CC_OP value is no longer predictable. */
1801 set_cc_op(s
, CC_OP_DYNAMIC
);
1804 static void gen_rot_rm_im(DisasContext
*s
, int ot
, int op1
, int op2
,
1807 int mask
= (ot
== OT_QUAD
? 0x3f : 0x1f);
1811 if (op1
== OR_TMP0
) {
1812 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1814 gen_op_mov_TN_reg(ot
, 0, op1
);
1820 #ifdef TARGET_X86_64
1822 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
1824 tcg_gen_rotri_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, op2
);
1826 tcg_gen_rotli_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, op2
);
1828 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
1833 tcg_gen_rotri_tl(cpu_T
[0], cpu_T
[0], op2
);
1835 tcg_gen_rotli_tl(cpu_T
[0], cpu_T
[0], op2
);
1846 shift
= mask
+ 1 - shift
;
1848 gen_extu(ot
, cpu_T
[0]);
1849 tcg_gen_shli_tl(cpu_tmp0
, cpu_T
[0], shift
);
1850 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], mask
+ 1 - shift
);
1851 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
1857 if (op1
== OR_TMP0
) {
1858 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1860 gen_op_mov_reg_T0(ot
, op1
);
1864 /* Compute the flags into CC_SRC. */
1865 gen_compute_eflags(s
);
1867 /* The value that was "rotated out" is now present at the other end
1868 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1869 since we've computed the flags into CC_SRC, these variables are
1872 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T
[0], mask
- 1);
1873 tcg_gen_shri_tl(cpu_cc_dst
, cpu_T
[0], mask
);
1875 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T
[0], mask
);
1876 tcg_gen_andi_tl(cpu_cc_dst
, cpu_T
[0], 1);
1878 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1879 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1880 set_cc_op(s
, CC_OP_ADCOX
);
1884 /* XXX: add faster immediate = 1 case */
1885 static void gen_rotc_rm_T1(DisasContext
*s
, int ot
, int op1
,
1888 gen_compute_eflags(s
);
1889 assert(s
->cc_op
== CC_OP_EFLAGS
);
1893 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1895 gen_op_mov_TN_reg(ot
, 0, op1
);
1900 gen_helper_rcrb(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1903 gen_helper_rcrw(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1906 gen_helper_rcrl(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1908 #ifdef TARGET_X86_64
1910 gen_helper_rcrq(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1917 gen_helper_rclb(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1920 gen_helper_rclw(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1923 gen_helper_rcll(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1925 #ifdef TARGET_X86_64
1927 gen_helper_rclq(cpu_T
[0], cpu_env
, cpu_T
[0], cpu_T
[1]);
1934 gen_op_st_T0_A0(ot
+ s
->mem_index
);
1936 gen_op_mov_reg_T0(ot
, op1
);
1939 /* XXX: add faster immediate case */
1940 static void gen_shiftd_rm_T1(DisasContext
*s
, int ot
, int op1
,
1941 bool is_right
, TCGv count_in
)
1943 target_ulong mask
= (ot
== OT_QUAD
? 63 : 31);
1947 if (op1
== OR_TMP0
) {
1948 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
1950 gen_op_mov_TN_reg(ot
, 0, op1
);
1953 count
= tcg_temp_new();
1954 tcg_gen_andi_tl(count
, count_in
, mask
);
1958 /* Note: we implement the Intel behaviour for shift count > 16.
1959 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1960 portion by constructing it as a 32-bit value. */
1962 tcg_gen_deposit_tl(cpu_tmp0
, cpu_T
[0], cpu_T
[1], 16, 16);
1963 tcg_gen_mov_tl(cpu_T
[1], cpu_T
[0]);
1964 tcg_gen_mov_tl(cpu_T
[0], cpu_tmp0
);
1966 tcg_gen_deposit_tl(cpu_T
[1], cpu_T
[0], cpu_T
[1], 16, 16);
1969 #ifdef TARGET_X86_64
1971 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1972 tcg_gen_subi_tl(cpu_tmp0
, count
, 1);
1974 tcg_gen_concat_tl_i64(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
1975 tcg_gen_shr_i64(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1976 tcg_gen_shr_i64(cpu_T
[0], cpu_T
[0], count
);
1978 tcg_gen_concat_tl_i64(cpu_T
[0], cpu_T
[1], cpu_T
[0]);
1979 tcg_gen_shl_i64(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1980 tcg_gen_shl_i64(cpu_T
[0], cpu_T
[0], count
);
1981 tcg_gen_shri_i64(cpu_tmp0
, cpu_tmp0
, 32);
1982 tcg_gen_shri_i64(cpu_T
[0], cpu_T
[0], 32);
1987 tcg_gen_subi_tl(cpu_tmp0
, count
, 1);
1989 tcg_gen_shr_tl(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1991 tcg_gen_subfi_tl(cpu_tmp4
, mask
+ 1, count
);
1992 tcg_gen_shr_tl(cpu_T
[0], cpu_T
[0], count
);
1993 tcg_gen_shl_tl(cpu_T
[1], cpu_T
[1], cpu_tmp4
);
1995 tcg_gen_shl_tl(cpu_tmp0
, cpu_T
[0], cpu_tmp0
);
1996 if (ot
== OT_WORD
) {
1997 /* Only needed if count > 16, for Intel behaviour. */
1998 tcg_gen_subfi_tl(cpu_tmp4
, 33, count
);
1999 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[1], cpu_tmp4
);
2000 tcg_gen_or_tl(cpu_tmp0
, cpu_tmp0
, cpu_tmp4
);
2003 tcg_gen_subfi_tl(cpu_tmp4
, mask
+ 1, count
);
2004 tcg_gen_shl_tl(cpu_T
[0], cpu_T
[0], count
);
2005 tcg_gen_shr_tl(cpu_T
[1], cpu_T
[1], cpu_tmp4
);
2007 tcg_gen_movi_tl(cpu_tmp4
, 0);
2008 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_T
[1], count
, cpu_tmp4
,
2009 cpu_tmp4
, cpu_T
[1]);
2010 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
2015 if (op1
== OR_TMP0
) {
2016 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2018 gen_op_mov_reg_T0(ot
, op1
);
2021 gen_shift_flags(s
, ot
, cpu_T
[0], cpu_tmp0
, count
, is_right
);
2022 tcg_temp_free(count
);
2025 static void gen_shift(DisasContext
*s1
, int op
, int ot
, int d
, int s
)
2028 gen_op_mov_TN_reg(ot
, 1, s
);
2031 gen_rot_rm_T1(s1
, ot
, d
, 0);
2034 gen_rot_rm_T1(s1
, ot
, d
, 1);
2038 gen_shift_rm_T1(s1
, ot
, d
, 0, 0);
2041 gen_shift_rm_T1(s1
, ot
, d
, 1, 0);
2044 gen_shift_rm_T1(s1
, ot
, d
, 1, 1);
2047 gen_rotc_rm_T1(s1
, ot
, d
, 0);
2050 gen_rotc_rm_T1(s1
, ot
, d
, 1);
2055 static void gen_shifti(DisasContext
*s1
, int op
, int ot
, int d
, int c
)
2059 gen_rot_rm_im(s1
, ot
, d
, c
, 0);
2062 gen_rot_rm_im(s1
, ot
, d
, c
, 1);
2066 gen_shift_rm_im(s1
, ot
, d
, c
, 0, 0);
2069 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 0);
2072 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 1);
2075 /* currently not optimized */
2076 gen_op_movl_T1_im(c
);
2077 gen_shift(s1
, op
, ot
, d
, OR_TMP1
);
2082 static void gen_lea_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2083 int *reg_ptr
, int *offset_ptr
)
2091 int mod
, rm
, code
, override
, must_add_seg
;
2093 override
= s
->override
;
2094 must_add_seg
= s
->addseg
;
2097 mod
= (modrm
>> 6) & 3;
2109 code
= cpu_ldub_code(env
, s
->pc
++);
2110 scale
= (code
>> 6) & 3;
2111 index
= ((code
>> 3) & 7) | REX_X(s
);
2118 if ((base
& 7) == 5) {
2120 disp
= (int32_t)cpu_ldl_code(env
, s
->pc
);
2122 if (CODE64(s
) && !havesib
) {
2123 disp
+= s
->pc
+ s
->rip_offset
;
2130 disp
= (int8_t)cpu_ldub_code(env
, s
->pc
++);
2134 disp
= (int32_t)cpu_ldl_code(env
, s
->pc
);
2140 /* for correct popl handling with esp */
2141 if (base
== 4 && s
->popl_esp_hack
)
2142 disp
+= s
->popl_esp_hack
;
2143 #ifdef TARGET_X86_64
2144 if (s
->aflag
== 2) {
2145 gen_op_movq_A0_reg(base
);
2147 gen_op_addq_A0_im(disp
);
2152 gen_op_movl_A0_reg(base
);
2154 gen_op_addl_A0_im(disp
);
2157 #ifdef TARGET_X86_64
2158 if (s
->aflag
== 2) {
2159 gen_op_movq_A0_im(disp
);
2163 gen_op_movl_A0_im(disp
);
2166 /* index == 4 means no index */
2167 if (havesib
&& (index
!= 4)) {
2168 #ifdef TARGET_X86_64
2169 if (s
->aflag
== 2) {
2170 gen_op_addq_A0_reg_sN(scale
, index
);
2174 gen_op_addl_A0_reg_sN(scale
, index
);
2179 if (base
== R_EBP
|| base
== R_ESP
)
2184 #ifdef TARGET_X86_64
2185 if (s
->aflag
== 2) {
2186 gen_op_addq_A0_seg(override
);
2190 gen_op_addl_A0_seg(s
, override
);
2197 disp
= cpu_lduw_code(env
, s
->pc
);
2199 gen_op_movl_A0_im(disp
);
2200 rm
= 0; /* avoid SS override */
2207 disp
= (int8_t)cpu_ldub_code(env
, s
->pc
++);
2211 disp
= cpu_lduw_code(env
, s
->pc
);
2217 gen_op_movl_A0_reg(R_EBX
);
2218 gen_op_addl_A0_reg_sN(0, R_ESI
);
2221 gen_op_movl_A0_reg(R_EBX
);
2222 gen_op_addl_A0_reg_sN(0, R_EDI
);
2225 gen_op_movl_A0_reg(R_EBP
);
2226 gen_op_addl_A0_reg_sN(0, R_ESI
);
2229 gen_op_movl_A0_reg(R_EBP
);
2230 gen_op_addl_A0_reg_sN(0, R_EDI
);
2233 gen_op_movl_A0_reg(R_ESI
);
2236 gen_op_movl_A0_reg(R_EDI
);
2239 gen_op_movl_A0_reg(R_EBP
);
2243 gen_op_movl_A0_reg(R_EBX
);
2247 gen_op_addl_A0_im(disp
);
2248 gen_op_andl_A0_ffff();
2252 if (rm
== 2 || rm
== 3 || rm
== 6)
2257 gen_op_addl_A0_seg(s
, override
);
2267 static void gen_nop_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2269 int mod
, rm
, base
, code
;
2271 mod
= (modrm
>> 6) & 3;
2281 code
= cpu_ldub_code(env
, s
->pc
++);
2317 /* used for LEA and MOV AX, mem */
2318 static void gen_add_A0_ds_seg(DisasContext
*s
)
2320 int override
, must_add_seg
;
2321 must_add_seg
= s
->addseg
;
2323 if (s
->override
>= 0) {
2324 override
= s
->override
;
2328 #ifdef TARGET_X86_64
2330 gen_op_addq_A0_seg(override
);
2334 gen_op_addl_A0_seg(s
, override
);
2339 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2341 static void gen_ldst_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2342 int ot
, int reg
, int is_store
)
2344 int mod
, rm
, opreg
, disp
;
2346 mod
= (modrm
>> 6) & 3;
2347 rm
= (modrm
& 7) | REX_B(s
);
2351 gen_op_mov_TN_reg(ot
, 0, reg
);
2352 gen_op_mov_reg_T0(ot
, rm
);
2354 gen_op_mov_TN_reg(ot
, 0, rm
);
2356 gen_op_mov_reg_T0(ot
, reg
);
2359 gen_lea_modrm(env
, s
, modrm
, &opreg
, &disp
);
2362 gen_op_mov_TN_reg(ot
, 0, reg
);
2363 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2365 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
2367 gen_op_mov_reg_T0(ot
, reg
);
2372 static inline uint32_t insn_get(CPUX86State
*env
, DisasContext
*s
, int ot
)
2378 ret
= cpu_ldub_code(env
, s
->pc
);
2382 ret
= cpu_lduw_code(env
, s
->pc
);
2387 ret
= cpu_ldl_code(env
, s
->pc
);
2394 static inline int insn_const_size(unsigned int ot
)
2402 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong eip
)
2404 TranslationBlock
*tb
;
2407 pc
= s
->cs_base
+ eip
;
2409 /* NOTE: we handle the case where the TB spans two pages here */
2410 if ((pc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) ||
2411 (pc
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
)) {
2412 /* jump to same page: we can use a direct jump */
2413 tcg_gen_goto_tb(tb_num
);
2415 tcg_gen_exit_tb((tcg_target_long
)tb
+ tb_num
);
2417 /* jump to another page: currently not optimized */
2423 static inline void gen_jcc(DisasContext
*s
, int b
,
2424 target_ulong val
, target_ulong next_eip
)
2429 l1
= gen_new_label();
2432 gen_goto_tb(s
, 0, next_eip
);
2435 gen_goto_tb(s
, 1, val
);
2436 s
->is_jmp
= DISAS_TB_JUMP
;
2438 l1
= gen_new_label();
2439 l2
= gen_new_label();
2442 gen_jmp_im(next_eip
);
2452 static void gen_cmovcc1(CPUX86State
*env
, DisasContext
*s
, int ot
, int b
,
2457 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
2459 cc
= gen_prepare_cc(s
, b
, cpu_T
[1]);
2460 if (cc
.mask
!= -1) {
2461 TCGv t0
= tcg_temp_new();
2462 tcg_gen_andi_tl(t0
, cc
.reg
, cc
.mask
);
2466 cc
.reg2
= tcg_const_tl(cc
.imm
);
2469 tcg_gen_movcond_tl(cc
.cond
, cpu_T
[0], cc
.reg
, cc
.reg2
,
2470 cpu_T
[0], cpu_regs
[reg
]);
2471 gen_op_mov_reg_T0(ot
, reg
);
2473 if (cc
.mask
!= -1) {
2474 tcg_temp_free(cc
.reg
);
2477 tcg_temp_free(cc
.reg2
);
2481 static inline void gen_op_movl_T0_seg(int seg_reg
)
2483 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
2484 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2487 static inline void gen_op_movl_seg_T0_vm(int seg_reg
)
2489 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xffff);
2490 tcg_gen_st32_tl(cpu_T
[0], cpu_env
,
2491 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2492 tcg_gen_shli_tl(cpu_T
[0], cpu_T
[0], 4);
2493 tcg_gen_st_tl(cpu_T
[0], cpu_env
,
2494 offsetof(CPUX86State
,segs
[seg_reg
].base
));
2497 /* move T0 to seg_reg and compute if the CPU state may change. Never
2498 call this function with seg_reg == R_CS */
2499 static void gen_movl_seg_T0(DisasContext
*s
, int seg_reg
, target_ulong cur_eip
)
2501 if (s
->pe
&& !s
->vm86
) {
2502 /* XXX: optimize by finding processor state dynamically */
2503 gen_update_cc_op(s
);
2504 gen_jmp_im(cur_eip
);
2505 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
2506 gen_helper_load_seg(cpu_env
, tcg_const_i32(seg_reg
), cpu_tmp2_i32
);
2507 /* abort translation because the addseg value may change or
2508 because ss32 may change. For R_SS, translation must always
2509 stop as a special handling must be done to disable hardware
2510 interrupts for the next instruction */
2511 if (seg_reg
== R_SS
|| (s
->code32
&& seg_reg
< R_FS
))
2512 s
->is_jmp
= DISAS_TB_JUMP
;
2514 gen_op_movl_seg_T0_vm(seg_reg
);
2515 if (seg_reg
== R_SS
)
2516 s
->is_jmp
= DISAS_TB_JUMP
;
2520 static inline int svm_is_rep(int prefixes
)
2522 return ((prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) ? 8 : 0);
2526 gen_svm_check_intercept_param(DisasContext
*s
, target_ulong pc_start
,
2527 uint32_t type
, uint64_t param
)
2529 /* no SVM activated; fast case */
2530 if (likely(!(s
->flags
& HF_SVMI_MASK
)))
2532 gen_update_cc_op(s
);
2533 gen_jmp_im(pc_start
- s
->cs_base
);
2534 gen_helper_svm_check_intercept_param(cpu_env
, tcg_const_i32(type
),
2535 tcg_const_i64(param
));
2539 gen_svm_check_intercept(DisasContext
*s
, target_ulong pc_start
, uint64_t type
)
2541 gen_svm_check_intercept_param(s
, pc_start
, type
, 0);
2544 static inline void gen_stack_update(DisasContext
*s
, int addend
)
2546 #ifdef TARGET_X86_64
2548 gen_op_add_reg_im(2, R_ESP
, addend
);
2552 gen_op_add_reg_im(1, R_ESP
, addend
);
2554 gen_op_add_reg_im(0, R_ESP
, addend
);
2558 /* generate a push. It depends on ss32, addseg and dflag */
2559 static void gen_push_T0(DisasContext
*s
)
2561 #ifdef TARGET_X86_64
2563 gen_op_movq_A0_reg(R_ESP
);
2565 gen_op_addq_A0_im(-8);
2566 gen_op_st_T0_A0(OT_QUAD
+ s
->mem_index
);
2568 gen_op_addq_A0_im(-2);
2569 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
2571 gen_op_mov_reg_A0(2, R_ESP
);
2575 gen_op_movl_A0_reg(R_ESP
);
2577 gen_op_addl_A0_im(-2);
2579 gen_op_addl_A0_im(-4);
2582 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2583 gen_op_addl_A0_seg(s
, R_SS
);
2586 gen_op_andl_A0_ffff();
2587 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2588 gen_op_addl_A0_seg(s
, R_SS
);
2590 gen_op_st_T0_A0(s
->dflag
+ 1 + s
->mem_index
);
2591 if (s
->ss32
&& !s
->addseg
)
2592 gen_op_mov_reg_A0(1, R_ESP
);
2594 gen_op_mov_reg_T1(s
->ss32
+ 1, R_ESP
);
2598 /* generate a push. It depends on ss32, addseg and dflag */
2599 /* slower version for T1, only used for call Ev */
2600 static void gen_push_T1(DisasContext
*s
)
2602 #ifdef TARGET_X86_64
2604 gen_op_movq_A0_reg(R_ESP
);
2606 gen_op_addq_A0_im(-8);
2607 gen_op_st_T1_A0(OT_QUAD
+ s
->mem_index
);
2609 gen_op_addq_A0_im(-2);
2610 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
2612 gen_op_mov_reg_A0(2, R_ESP
);
2616 gen_op_movl_A0_reg(R_ESP
);
2618 gen_op_addl_A0_im(-2);
2620 gen_op_addl_A0_im(-4);
2623 gen_op_addl_A0_seg(s
, R_SS
);
2626 gen_op_andl_A0_ffff();
2627 gen_op_addl_A0_seg(s
, R_SS
);
2629 gen_op_st_T1_A0(s
->dflag
+ 1 + s
->mem_index
);
2631 if (s
->ss32
&& !s
->addseg
)
2632 gen_op_mov_reg_A0(1, R_ESP
);
2634 gen_stack_update(s
, (-2) << s
->dflag
);
2638 /* two step pop is necessary for precise exceptions */
2639 static void gen_pop_T0(DisasContext
*s
)
2641 #ifdef TARGET_X86_64
2643 gen_op_movq_A0_reg(R_ESP
);
2644 gen_op_ld_T0_A0((s
->dflag
? OT_QUAD
: OT_WORD
) + s
->mem_index
);
2648 gen_op_movl_A0_reg(R_ESP
);
2651 gen_op_addl_A0_seg(s
, R_SS
);
2653 gen_op_andl_A0_ffff();
2654 gen_op_addl_A0_seg(s
, R_SS
);
2656 gen_op_ld_T0_A0(s
->dflag
+ 1 + s
->mem_index
);
2660 static void gen_pop_update(DisasContext
*s
)
2662 #ifdef TARGET_X86_64
2663 if (CODE64(s
) && s
->dflag
) {
2664 gen_stack_update(s
, 8);
2668 gen_stack_update(s
, 2 << s
->dflag
);
2672 static void gen_stack_A0(DisasContext
*s
)
2674 gen_op_movl_A0_reg(R_ESP
);
2676 gen_op_andl_A0_ffff();
2677 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2679 gen_op_addl_A0_seg(s
, R_SS
);
2682 /* NOTE: wrap around in 16 bit not fully handled */
2683 static void gen_pusha(DisasContext
*s
)
2686 gen_op_movl_A0_reg(R_ESP
);
2687 gen_op_addl_A0_im(-16 << s
->dflag
);
2689 gen_op_andl_A0_ffff();
2690 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2692 gen_op_addl_A0_seg(s
, R_SS
);
2693 for(i
= 0;i
< 8; i
++) {
2694 gen_op_mov_TN_reg(OT_LONG
, 0, 7 - i
);
2695 gen_op_st_T0_A0(OT_WORD
+ s
->dflag
+ s
->mem_index
);
2696 gen_op_addl_A0_im(2 << s
->dflag
);
2698 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2701 /* NOTE: wrap around in 16 bit not fully handled */
2702 static void gen_popa(DisasContext
*s
)
2705 gen_op_movl_A0_reg(R_ESP
);
2707 gen_op_andl_A0_ffff();
2708 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2709 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], 16 << s
->dflag
);
2711 gen_op_addl_A0_seg(s
, R_SS
);
2712 for(i
= 0;i
< 8; i
++) {
2713 /* ESP is not reloaded */
2715 gen_op_ld_T0_A0(OT_WORD
+ s
->dflag
+ s
->mem_index
);
2716 gen_op_mov_reg_T0(OT_WORD
+ s
->dflag
, 7 - i
);
2718 gen_op_addl_A0_im(2 << s
->dflag
);
2720 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2723 static void gen_enter(DisasContext
*s
, int esp_addend
, int level
)
2728 #ifdef TARGET_X86_64
2730 ot
= s
->dflag
? OT_QUAD
: OT_WORD
;
2733 gen_op_movl_A0_reg(R_ESP
);
2734 gen_op_addq_A0_im(-opsize
);
2735 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2738 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
2739 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2741 /* XXX: must save state */
2742 gen_helper_enter64_level(cpu_env
, tcg_const_i32(level
),
2743 tcg_const_i32((ot
== OT_QUAD
)),
2746 gen_op_mov_reg_T1(ot
, R_EBP
);
2747 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], -esp_addend
+ (-opsize
* level
));
2748 gen_op_mov_reg_T1(OT_QUAD
, R_ESP
);
2752 ot
= s
->dflag
+ OT_WORD
;
2753 opsize
= 2 << s
->dflag
;
2755 gen_op_movl_A0_reg(R_ESP
);
2756 gen_op_addl_A0_im(-opsize
);
2758 gen_op_andl_A0_ffff();
2759 tcg_gen_mov_tl(cpu_T
[1], cpu_A0
);
2761 gen_op_addl_A0_seg(s
, R_SS
);
2763 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
2764 gen_op_st_T0_A0(ot
+ s
->mem_index
);
2766 /* XXX: must save state */
2767 gen_helper_enter_level(cpu_env
, tcg_const_i32(level
),
2768 tcg_const_i32(s
->dflag
),
2771 gen_op_mov_reg_T1(ot
, R_EBP
);
2772 tcg_gen_addi_tl(cpu_T
[1], cpu_T
[1], -esp_addend
+ (-opsize
* level
));
2773 gen_op_mov_reg_T1(OT_WORD
+ s
->ss32
, R_ESP
);
2777 static void gen_exception(DisasContext
*s
, int trapno
, target_ulong cur_eip
)
2779 gen_update_cc_op(s
);
2780 gen_jmp_im(cur_eip
);
2781 gen_helper_raise_exception(cpu_env
, tcg_const_i32(trapno
));
2782 s
->is_jmp
= DISAS_TB_JUMP
;
2785 /* an interrupt is different from an exception because of the
2787 static void gen_interrupt(DisasContext
*s
, int intno
,
2788 target_ulong cur_eip
, target_ulong next_eip
)
2790 gen_update_cc_op(s
);
2791 gen_jmp_im(cur_eip
);
2792 gen_helper_raise_interrupt(cpu_env
, tcg_const_i32(intno
),
2793 tcg_const_i32(next_eip
- cur_eip
));
2794 s
->is_jmp
= DISAS_TB_JUMP
;
2797 static void gen_debug(DisasContext
*s
, target_ulong cur_eip
)
2799 gen_update_cc_op(s
);
2800 gen_jmp_im(cur_eip
);
2801 gen_helper_debug(cpu_env
);
2802 s
->is_jmp
= DISAS_TB_JUMP
;
2805 /* generate a generic end of block. Trace exception is also generated
2807 static void gen_eob(DisasContext
*s
)
2809 gen_update_cc_op(s
);
2810 if (s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
) {
2811 gen_helper_reset_inhibit_irq(cpu_env
);
2813 if (s
->tb
->flags
& HF_RF_MASK
) {
2814 gen_helper_reset_rf(cpu_env
);
2816 if (s
->singlestep_enabled
) {
2817 gen_helper_debug(cpu_env
);
2819 gen_helper_single_step(cpu_env
);
2823 s
->is_jmp
= DISAS_TB_JUMP
;
2826 /* generate a jump to eip. No segment change must happen before as a
2827 direct call to the next block may occur */
2828 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
)
2830 gen_update_cc_op(s
);
2831 set_cc_op(s
, CC_OP_DYNAMIC
);
2833 gen_goto_tb(s
, tb_num
, eip
);
2834 s
->is_jmp
= DISAS_TB_JUMP
;
2841 static void gen_jmp(DisasContext
*s
, target_ulong eip
)
2843 gen_jmp_tb(s
, eip
, 0);
2846 static inline void gen_ldq_env_A0(int idx
, int offset
)
2848 int mem_index
= (idx
>> 2) - 1;
2849 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2850 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2853 static inline void gen_stq_env_A0(int idx
, int offset
)
2855 int mem_index
= (idx
>> 2) - 1;
2856 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2857 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2860 static inline void gen_ldo_env_A0(int idx
, int offset
)
2862 int mem_index
= (idx
>> 2) - 1;
2863 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2864 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2865 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2866 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
);
2867 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2870 static inline void gen_sto_env_A0(int idx
, int offset
)
2872 int mem_index
= (idx
>> 2) - 1;
2873 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2874 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
, mem_index
);
2875 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2876 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2877 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
);
2880 static inline void gen_op_movo(int d_offset
, int s_offset
)
2882 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2883 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2884 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
+ 8);
2885 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
+ 8);
2888 static inline void gen_op_movq(int d_offset
, int s_offset
)
2890 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2891 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2894 static inline void gen_op_movl(int d_offset
, int s_offset
)
2896 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
, s_offset
);
2897 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, d_offset
);
2900 static inline void gen_op_movq_env_0(int d_offset
)
2902 tcg_gen_movi_i64(cpu_tmp1_i64
, 0);
2903 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2906 typedef void (*SSEFunc_i_ep
)(TCGv_i32 val
, TCGv_ptr env
, TCGv_ptr reg
);
2907 typedef void (*SSEFunc_l_ep
)(TCGv_i64 val
, TCGv_ptr env
, TCGv_ptr reg
);
2908 typedef void (*SSEFunc_0_epi
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i32 val
);
2909 typedef void (*SSEFunc_0_epl
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i64 val
);
2910 typedef void (*SSEFunc_0_epp
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
);
2911 typedef void (*SSEFunc_0_eppi
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2913 typedef void (*SSEFunc_0_ppi
)(TCGv_ptr reg_a
, TCGv_ptr reg_b
, TCGv_i32 val
);
2914 typedef void (*SSEFunc_0_eppt
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2917 #define SSE_SPECIAL ((void *)1)
2918 #define SSE_DUMMY ((void *)2)
2920 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2921 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2922 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2924 static const SSEFunc_0_epp sse_op_table1
[256][4] = {
2925 /* 3DNow! extensions */
2926 [0x0e] = { SSE_DUMMY
}, /* femms */
2927 [0x0f] = { SSE_DUMMY
}, /* pf... */
2928 /* pure SSE operations */
2929 [0x10] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2930 [0x11] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2931 [0x12] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd, movsldup, movddup */
2932 [0x13] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd */
2933 [0x14] = { gen_helper_punpckldq_xmm
, gen_helper_punpcklqdq_xmm
},
2934 [0x15] = { gen_helper_punpckhdq_xmm
, gen_helper_punpckhqdq_xmm
},
2935 [0x16] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd, movshdup */
2936 [0x17] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd */
2938 [0x28] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2939 [0x29] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2940 [0x2a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2941 [0x2b] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movntps, movntpd, movntss, movntsd */
2942 [0x2c] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2943 [0x2d] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2944 [0x2e] = { gen_helper_ucomiss
, gen_helper_ucomisd
},
2945 [0x2f] = { gen_helper_comiss
, gen_helper_comisd
},
2946 [0x50] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movmskps, movmskpd */
2947 [0x51] = SSE_FOP(sqrt
),
2948 [0x52] = { gen_helper_rsqrtps
, NULL
, gen_helper_rsqrtss
, NULL
},
2949 [0x53] = { gen_helper_rcpps
, NULL
, gen_helper_rcpss
, NULL
},
2950 [0x54] = { gen_helper_pand_xmm
, gen_helper_pand_xmm
}, /* andps, andpd */
2951 [0x55] = { gen_helper_pandn_xmm
, gen_helper_pandn_xmm
}, /* andnps, andnpd */
2952 [0x56] = { gen_helper_por_xmm
, gen_helper_por_xmm
}, /* orps, orpd */
2953 [0x57] = { gen_helper_pxor_xmm
, gen_helper_pxor_xmm
}, /* xorps, xorpd */
2954 [0x58] = SSE_FOP(add
),
2955 [0x59] = SSE_FOP(mul
),
2956 [0x5a] = { gen_helper_cvtps2pd
, gen_helper_cvtpd2ps
,
2957 gen_helper_cvtss2sd
, gen_helper_cvtsd2ss
},
2958 [0x5b] = { gen_helper_cvtdq2ps
, gen_helper_cvtps2dq
, gen_helper_cvttps2dq
},
2959 [0x5c] = SSE_FOP(sub
),
2960 [0x5d] = SSE_FOP(min
),
2961 [0x5e] = SSE_FOP(div
),
2962 [0x5f] = SSE_FOP(max
),
2964 [0xc2] = SSE_FOP(cmpeq
),
2965 [0xc6] = { (SSEFunc_0_epp
)gen_helper_shufps
,
2966 (SSEFunc_0_epp
)gen_helper_shufpd
}, /* XXX: casts */
2968 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2969 [0x38] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2970 [0x3a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2972 /* MMX ops and their SSE extensions */
2973 [0x60] = MMX_OP2(punpcklbw
),
2974 [0x61] = MMX_OP2(punpcklwd
),
2975 [0x62] = MMX_OP2(punpckldq
),
2976 [0x63] = MMX_OP2(packsswb
),
2977 [0x64] = MMX_OP2(pcmpgtb
),
2978 [0x65] = MMX_OP2(pcmpgtw
),
2979 [0x66] = MMX_OP2(pcmpgtl
),
2980 [0x67] = MMX_OP2(packuswb
),
2981 [0x68] = MMX_OP2(punpckhbw
),
2982 [0x69] = MMX_OP2(punpckhwd
),
2983 [0x6a] = MMX_OP2(punpckhdq
),
2984 [0x6b] = MMX_OP2(packssdw
),
2985 [0x6c] = { NULL
, gen_helper_punpcklqdq_xmm
},
2986 [0x6d] = { NULL
, gen_helper_punpckhqdq_xmm
},
2987 [0x6e] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movd mm, ea */
2988 [0x6f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, , movqdu */
2989 [0x70] = { (SSEFunc_0_epp
)gen_helper_pshufw_mmx
,
2990 (SSEFunc_0_epp
)gen_helper_pshufd_xmm
,
2991 (SSEFunc_0_epp
)gen_helper_pshufhw_xmm
,
2992 (SSEFunc_0_epp
)gen_helper_pshuflw_xmm
}, /* XXX: casts */
2993 [0x71] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftw */
2994 [0x72] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftd */
2995 [0x73] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftq */
2996 [0x74] = MMX_OP2(pcmpeqb
),
2997 [0x75] = MMX_OP2(pcmpeqw
),
2998 [0x76] = MMX_OP2(pcmpeql
),
2999 [0x77] = { SSE_DUMMY
}, /* emms */
3000 [0x78] = { NULL
, SSE_SPECIAL
, NULL
, SSE_SPECIAL
}, /* extrq_i, insertq_i */
3001 [0x79] = { NULL
, gen_helper_extrq_r
, NULL
, gen_helper_insertq_r
},
3002 [0x7c] = { NULL
, gen_helper_haddpd
, NULL
, gen_helper_haddps
},
3003 [0x7d] = { NULL
, gen_helper_hsubpd
, NULL
, gen_helper_hsubps
},
3004 [0x7e] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movd, movd, , movq */
3005 [0x7f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, movdqu */
3006 [0xc4] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pinsrw */
3007 [0xc5] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pextrw */
3008 [0xd0] = { NULL
, gen_helper_addsubpd
, NULL
, gen_helper_addsubps
},
3009 [0xd1] = MMX_OP2(psrlw
),
3010 [0xd2] = MMX_OP2(psrld
),
3011 [0xd3] = MMX_OP2(psrlq
),
3012 [0xd4] = MMX_OP2(paddq
),
3013 [0xd5] = MMX_OP2(pmullw
),
3014 [0xd6] = { NULL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
3015 [0xd7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pmovmskb */
3016 [0xd8] = MMX_OP2(psubusb
),
3017 [0xd9] = MMX_OP2(psubusw
),
3018 [0xda] = MMX_OP2(pminub
),
3019 [0xdb] = MMX_OP2(pand
),
3020 [0xdc] = MMX_OP2(paddusb
),
3021 [0xdd] = MMX_OP2(paddusw
),
3022 [0xde] = MMX_OP2(pmaxub
),
3023 [0xdf] = MMX_OP2(pandn
),
3024 [0xe0] = MMX_OP2(pavgb
),
3025 [0xe1] = MMX_OP2(psraw
),
3026 [0xe2] = MMX_OP2(psrad
),
3027 [0xe3] = MMX_OP2(pavgw
),
3028 [0xe4] = MMX_OP2(pmulhuw
),
3029 [0xe5] = MMX_OP2(pmulhw
),
3030 [0xe6] = { NULL
, gen_helper_cvttpd2dq
, gen_helper_cvtdq2pd
, gen_helper_cvtpd2dq
},
3031 [0xe7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movntq, movntq */
3032 [0xe8] = MMX_OP2(psubsb
),
3033 [0xe9] = MMX_OP2(psubsw
),
3034 [0xea] = MMX_OP2(pminsw
),
3035 [0xeb] = MMX_OP2(por
),
3036 [0xec] = MMX_OP2(paddsb
),
3037 [0xed] = MMX_OP2(paddsw
),
3038 [0xee] = MMX_OP2(pmaxsw
),
3039 [0xef] = MMX_OP2(pxor
),
3040 [0xf0] = { NULL
, NULL
, NULL
, SSE_SPECIAL
}, /* lddqu */
3041 [0xf1] = MMX_OP2(psllw
),
3042 [0xf2] = MMX_OP2(pslld
),
3043 [0xf3] = MMX_OP2(psllq
),
3044 [0xf4] = MMX_OP2(pmuludq
),
3045 [0xf5] = MMX_OP2(pmaddwd
),
3046 [0xf6] = MMX_OP2(psadbw
),
3047 [0xf7] = { (SSEFunc_0_epp
)gen_helper_maskmov_mmx
,
3048 (SSEFunc_0_epp
)gen_helper_maskmov_xmm
}, /* XXX: casts */
3049 [0xf8] = MMX_OP2(psubb
),
3050 [0xf9] = MMX_OP2(psubw
),
3051 [0xfa] = MMX_OP2(psubl
),
3052 [0xfb] = MMX_OP2(psubq
),
3053 [0xfc] = MMX_OP2(paddb
),
3054 [0xfd] = MMX_OP2(paddw
),
3055 [0xfe] = MMX_OP2(paddl
),
3058 static const SSEFunc_0_epp sse_op_table2
[3 * 8][2] = {
3059 [0 + 2] = MMX_OP2(psrlw
),
3060 [0 + 4] = MMX_OP2(psraw
),
3061 [0 + 6] = MMX_OP2(psllw
),
3062 [8 + 2] = MMX_OP2(psrld
),
3063 [8 + 4] = MMX_OP2(psrad
),
3064 [8 + 6] = MMX_OP2(pslld
),
3065 [16 + 2] = MMX_OP2(psrlq
),
3066 [16 + 3] = { NULL
, gen_helper_psrldq_xmm
},
3067 [16 + 6] = MMX_OP2(psllq
),
3068 [16 + 7] = { NULL
, gen_helper_pslldq_xmm
},
3071 static const SSEFunc_0_epi sse_op_table3ai
[] = {
3072 gen_helper_cvtsi2ss
,
3076 #ifdef TARGET_X86_64
3077 static const SSEFunc_0_epl sse_op_table3aq
[] = {
3078 gen_helper_cvtsq2ss
,
3083 static const SSEFunc_i_ep sse_op_table3bi
[] = {
3084 gen_helper_cvttss2si
,
3085 gen_helper_cvtss2si
,
3086 gen_helper_cvttsd2si
,
3090 #ifdef TARGET_X86_64
3091 static const SSEFunc_l_ep sse_op_table3bq
[] = {
3092 gen_helper_cvttss2sq
,
3093 gen_helper_cvtss2sq
,
3094 gen_helper_cvttsd2sq
,
3099 static const SSEFunc_0_epp sse_op_table4
[8][4] = {
3110 static const SSEFunc_0_epp sse_op_table5
[256] = {
3111 [0x0c] = gen_helper_pi2fw
,
3112 [0x0d] = gen_helper_pi2fd
,
3113 [0x1c] = gen_helper_pf2iw
,
3114 [0x1d] = gen_helper_pf2id
,
3115 [0x8a] = gen_helper_pfnacc
,
3116 [0x8e] = gen_helper_pfpnacc
,
3117 [0x90] = gen_helper_pfcmpge
,
3118 [0x94] = gen_helper_pfmin
,
3119 [0x96] = gen_helper_pfrcp
,
3120 [0x97] = gen_helper_pfrsqrt
,
3121 [0x9a] = gen_helper_pfsub
,
3122 [0x9e] = gen_helper_pfadd
,
3123 [0xa0] = gen_helper_pfcmpgt
,
3124 [0xa4] = gen_helper_pfmax
,
3125 [0xa6] = gen_helper_movq
, /* pfrcpit1; no need to actually increase precision */
3126 [0xa7] = gen_helper_movq
, /* pfrsqit1 */
3127 [0xaa] = gen_helper_pfsubr
,
3128 [0xae] = gen_helper_pfacc
,
3129 [0xb0] = gen_helper_pfcmpeq
,
3130 [0xb4] = gen_helper_pfmul
,
3131 [0xb6] = gen_helper_movq
, /* pfrcpit2 */
3132 [0xb7] = gen_helper_pmulhrw_mmx
,
3133 [0xbb] = gen_helper_pswapd
,
3134 [0xbf] = gen_helper_pavgb_mmx
/* pavgusb */
3137 struct SSEOpHelper_epp
{
3138 SSEFunc_0_epp op
[2];
3142 struct SSEOpHelper_eppi
{
3143 SSEFunc_0_eppi op
[2];
3147 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
3148 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3149 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
3150 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
3151 #define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
3152 CPUID_EXT_PCLMULQDQ }
3153 #define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
3155 static const struct SSEOpHelper_epp sse_op_table6
[256] = {
3156 [0x00] = SSSE3_OP(pshufb
),
3157 [0x01] = SSSE3_OP(phaddw
),
3158 [0x02] = SSSE3_OP(phaddd
),
3159 [0x03] = SSSE3_OP(phaddsw
),
3160 [0x04] = SSSE3_OP(pmaddubsw
),
3161 [0x05] = SSSE3_OP(phsubw
),
3162 [0x06] = SSSE3_OP(phsubd
),
3163 [0x07] = SSSE3_OP(phsubsw
),
3164 [0x08] = SSSE3_OP(psignb
),
3165 [0x09] = SSSE3_OP(psignw
),
3166 [0x0a] = SSSE3_OP(psignd
),
3167 [0x0b] = SSSE3_OP(pmulhrsw
),
3168 [0x10] = SSE41_OP(pblendvb
),
3169 [0x14] = SSE41_OP(blendvps
),
3170 [0x15] = SSE41_OP(blendvpd
),
3171 [0x17] = SSE41_OP(ptest
),
3172 [0x1c] = SSSE3_OP(pabsb
),
3173 [0x1d] = SSSE3_OP(pabsw
),
3174 [0x1e] = SSSE3_OP(pabsd
),
3175 [0x20] = SSE41_OP(pmovsxbw
),
3176 [0x21] = SSE41_OP(pmovsxbd
),
3177 [0x22] = SSE41_OP(pmovsxbq
),
3178 [0x23] = SSE41_OP(pmovsxwd
),
3179 [0x24] = SSE41_OP(pmovsxwq
),
3180 [0x25] = SSE41_OP(pmovsxdq
),
3181 [0x28] = SSE41_OP(pmuldq
),
3182 [0x29] = SSE41_OP(pcmpeqq
),
3183 [0x2a] = SSE41_SPECIAL
, /* movntqda */
3184 [0x2b] = SSE41_OP(packusdw
),
3185 [0x30] = SSE41_OP(pmovzxbw
),
3186 [0x31] = SSE41_OP(pmovzxbd
),
3187 [0x32] = SSE41_OP(pmovzxbq
),
3188 [0x33] = SSE41_OP(pmovzxwd
),
3189 [0x34] = SSE41_OP(pmovzxwq
),
3190 [0x35] = SSE41_OP(pmovzxdq
),
3191 [0x37] = SSE42_OP(pcmpgtq
),
3192 [0x38] = SSE41_OP(pminsb
),
3193 [0x39] = SSE41_OP(pminsd
),
3194 [0x3a] = SSE41_OP(pminuw
),
3195 [0x3b] = SSE41_OP(pminud
),
3196 [0x3c] = SSE41_OP(pmaxsb
),
3197 [0x3d] = SSE41_OP(pmaxsd
),
3198 [0x3e] = SSE41_OP(pmaxuw
),
3199 [0x3f] = SSE41_OP(pmaxud
),
3200 [0x40] = SSE41_OP(pmulld
),
3201 [0x41] = SSE41_OP(phminposuw
),
3202 [0xdb] = AESNI_OP(aesimc
),
3203 [0xdc] = AESNI_OP(aesenc
),
3204 [0xdd] = AESNI_OP(aesenclast
),
3205 [0xde] = AESNI_OP(aesdec
),
3206 [0xdf] = AESNI_OP(aesdeclast
),
3209 static const struct SSEOpHelper_eppi sse_op_table7
[256] = {
3210 [0x08] = SSE41_OP(roundps
),
3211 [0x09] = SSE41_OP(roundpd
),
3212 [0x0a] = SSE41_OP(roundss
),
3213 [0x0b] = SSE41_OP(roundsd
),
3214 [0x0c] = SSE41_OP(blendps
),
3215 [0x0d] = SSE41_OP(blendpd
),
3216 [0x0e] = SSE41_OP(pblendw
),
3217 [0x0f] = SSSE3_OP(palignr
),
3218 [0x14] = SSE41_SPECIAL
, /* pextrb */
3219 [0x15] = SSE41_SPECIAL
, /* pextrw */
3220 [0x16] = SSE41_SPECIAL
, /* pextrd/pextrq */
3221 [0x17] = SSE41_SPECIAL
, /* extractps */
3222 [0x20] = SSE41_SPECIAL
, /* pinsrb */
3223 [0x21] = SSE41_SPECIAL
, /* insertps */
3224 [0x22] = SSE41_SPECIAL
, /* pinsrd/pinsrq */
3225 [0x40] = SSE41_OP(dpps
),
3226 [0x41] = SSE41_OP(dppd
),
3227 [0x42] = SSE41_OP(mpsadbw
),
3228 [0x44] = PCLMULQDQ_OP(pclmulqdq
),
3229 [0x60] = SSE42_OP(pcmpestrm
),
3230 [0x61] = SSE42_OP(pcmpestri
),
3231 [0x62] = SSE42_OP(pcmpistrm
),
3232 [0x63] = SSE42_OP(pcmpistri
),
3233 [0xdf] = AESNI_OP(aeskeygenassist
),
3236 static void gen_sse(CPUX86State
*env
, DisasContext
*s
, int b
,
3237 target_ulong pc_start
, int rex_r
)
3239 int b1
, op1_offset
, op2_offset
, is_xmm
, val
, ot
;
3240 int modrm
, mod
, rm
, reg
, reg_addr
, offset_addr
;
3241 SSEFunc_0_epp sse_fn_epp
;
3242 SSEFunc_0_eppi sse_fn_eppi
;
3243 SSEFunc_0_ppi sse_fn_ppi
;
3244 SSEFunc_0_eppt sse_fn_eppt
;
3247 if (s
->prefix
& PREFIX_DATA
)
3249 else if (s
->prefix
& PREFIX_REPZ
)
3251 else if (s
->prefix
& PREFIX_REPNZ
)
3255 sse_fn_epp
= sse_op_table1
[b
][b1
];
3259 if ((b
<= 0x5f && b
>= 0x10) || b
== 0xc6 || b
== 0xc2) {
3269 /* simple MMX/SSE operation */
3270 if (s
->flags
& HF_TS_MASK
) {
3271 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
3274 if (s
->flags
& HF_EM_MASK
) {
3276 gen_exception(s
, EXCP06_ILLOP
, pc_start
- s
->cs_base
);
3279 if (is_xmm
&& !(s
->flags
& HF_OSFXSR_MASK
))
3280 if ((b
!= 0x38 && b
!= 0x3a) || (s
->prefix
& PREFIX_DATA
))
3283 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
))
3286 gen_helper_emms(cpu_env
);
3291 gen_helper_emms(cpu_env
);
3294 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3295 the static cpu state) */
3297 gen_helper_enter_mmx(cpu_env
);
3300 modrm
= cpu_ldub_code(env
, s
->pc
++);
3301 reg
= ((modrm
>> 3) & 7);
3304 mod
= (modrm
>> 6) & 3;
3305 if (sse_fn_epp
== SSE_SPECIAL
) {
3308 case 0x0e7: /* movntq */
3311 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3312 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3314 case 0x1e7: /* movntdq */
3315 case 0x02b: /* movntps */
3316 case 0x12b: /* movntps */
3319 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3320 gen_sto_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3322 case 0x3f0: /* lddqu */
3325 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3326 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3328 case 0x22b: /* movntss */
3329 case 0x32b: /* movntsd */
3332 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3334 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,
3337 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
3338 xmm_regs
[reg
].XMM_L(0)));
3339 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
3342 case 0x6e: /* movd mm, ea */
3343 #ifdef TARGET_X86_64
3344 if (s
->dflag
== 2) {
3345 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 0);
3346 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3350 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 0);
3351 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3352 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3353 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3354 gen_helper_movl_mm_T0_mmx(cpu_ptr0
, cpu_tmp2_i32
);
3357 case 0x16e: /* movd xmm, ea */
3358 #ifdef TARGET_X86_64
3359 if (s
->dflag
== 2) {
3360 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 0);
3361 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3362 offsetof(CPUX86State
,xmm_regs
[reg
]));
3363 gen_helper_movq_mm_T0_xmm(cpu_ptr0
, cpu_T
[0]);
3367 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 0);
3368 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3369 offsetof(CPUX86State
,xmm_regs
[reg
]));
3370 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3371 gen_helper_movl_mm_T0_xmm(cpu_ptr0
, cpu_tmp2_i32
);
3374 case 0x6f: /* movq mm, ea */
3376 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3377 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3380 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
3381 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3382 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
3383 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3386 case 0x010: /* movups */
3387 case 0x110: /* movupd */
3388 case 0x028: /* movaps */
3389 case 0x128: /* movapd */
3390 case 0x16f: /* movdqa xmm, ea */
3391 case 0x26f: /* movdqu xmm, ea */
3393 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3394 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3396 rm
= (modrm
& 7) | REX_B(s
);
3397 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[reg
]),
3398 offsetof(CPUX86State
,xmm_regs
[rm
]));
3401 case 0x210: /* movss xmm, ea */
3403 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3404 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
3405 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3407 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)));
3408 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3409 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3411 rm
= (modrm
& 7) | REX_B(s
);
3412 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3413 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)));
3416 case 0x310: /* movsd xmm, ea */
3418 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3419 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3421 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3422 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3424 rm
= (modrm
& 7) | REX_B(s
);
3425 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3426 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3429 case 0x012: /* movlps */
3430 case 0x112: /* movlpd */
3432 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3433 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3436 rm
= (modrm
& 7) | REX_B(s
);
3437 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3438 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(1)));
3441 case 0x212: /* movsldup */
3443 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3444 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3446 rm
= (modrm
& 7) | REX_B(s
);
3447 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3448 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)));
3449 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)),
3450 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(2)));
3452 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)),
3453 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3454 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)),
3455 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)));
3457 case 0x312: /* movddup */
3459 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3460 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3462 rm
= (modrm
& 7) | REX_B(s
);
3463 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3464 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3466 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)),
3467 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3469 case 0x016: /* movhps */
3470 case 0x116: /* movhpd */
3472 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3473 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3476 rm
= (modrm
& 7) | REX_B(s
);
3477 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)),
3478 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3481 case 0x216: /* movshdup */
3483 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3484 gen_ldo_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3486 rm
= (modrm
& 7) | REX_B(s
);
3487 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)),
3488 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(1)));
3489 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)),
3490 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(3)));
3492 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)),
3493 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(1)));
3494 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(2)),
3495 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(3)));
3500 int bit_index
, field_length
;
3502 if (b1
== 1 && reg
!= 0)
3504 field_length
= cpu_ldub_code(env
, s
->pc
++) & 0x3F;
3505 bit_index
= cpu_ldub_code(env
, s
->pc
++) & 0x3F;
3506 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3507 offsetof(CPUX86State
,xmm_regs
[reg
]));
3509 gen_helper_extrq_i(cpu_env
, cpu_ptr0
,
3510 tcg_const_i32(bit_index
),
3511 tcg_const_i32(field_length
));
3513 gen_helper_insertq_i(cpu_env
, cpu_ptr0
,
3514 tcg_const_i32(bit_index
),
3515 tcg_const_i32(field_length
));
3518 case 0x7e: /* movd ea, mm */
3519 #ifdef TARGET_X86_64
3520 if (s
->dflag
== 2) {
3521 tcg_gen_ld_i64(cpu_T
[0], cpu_env
,
3522 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3523 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 1);
3527 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
3528 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_L(0)));
3529 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 1);
3532 case 0x17e: /* movd ea, xmm */
3533 #ifdef TARGET_X86_64
3534 if (s
->dflag
== 2) {
3535 tcg_gen_ld_i64(cpu_T
[0], cpu_env
,
3536 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3537 gen_ldst_modrm(env
, s
, modrm
, OT_QUAD
, OR_TMP0
, 1);
3541 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
,
3542 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3543 gen_ldst_modrm(env
, s
, modrm
, OT_LONG
, OR_TMP0
, 1);
3546 case 0x27e: /* movq xmm, ea */
3548 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3549 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3551 rm
= (modrm
& 7) | REX_B(s
);
3552 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3553 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3555 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3557 case 0x7f: /* movq ea, mm */
3559 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3560 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3563 gen_op_movq(offsetof(CPUX86State
,fpregs
[rm
].mmx
),
3564 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3567 case 0x011: /* movups */
3568 case 0x111: /* movupd */
3569 case 0x029: /* movaps */
3570 case 0x129: /* movapd */
3571 case 0x17f: /* movdqa ea, xmm */
3572 case 0x27f: /* movdqu ea, xmm */
3574 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3575 gen_sto_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
]));
3577 rm
= (modrm
& 7) | REX_B(s
);
3578 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[rm
]),
3579 offsetof(CPUX86State
,xmm_regs
[reg
]));
3582 case 0x211: /* movss ea, xmm */
3584 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3585 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3586 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
3588 rm
= (modrm
& 7) | REX_B(s
);
3589 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_L(0)),
3590 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_L(0)));
3593 case 0x311: /* movsd ea, xmm */
3595 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3596 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3598 rm
= (modrm
& 7) | REX_B(s
);
3599 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)),
3600 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3603 case 0x013: /* movlps */
3604 case 0x113: /* movlpd */
3606 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3607 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3612 case 0x017: /* movhps */
3613 case 0x117: /* movhpd */
3615 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3616 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3621 case 0x71: /* shift mm, im */
3624 case 0x171: /* shift xmm, im */
3630 val
= cpu_ldub_code(env
, s
->pc
++);
3632 gen_op_movl_T0_im(val
);
3633 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
3635 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(1)));
3636 op1_offset
= offsetof(CPUX86State
,xmm_t0
);
3638 gen_op_movl_T0_im(val
);
3639 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(0)));
3641 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(1)));
3642 op1_offset
= offsetof(CPUX86State
,mmx_t0
);
3644 sse_fn_epp
= sse_op_table2
[((b
- 1) & 3) * 8 +
3645 (((modrm
>> 3)) & 7)][b1
];
3650 rm
= (modrm
& 7) | REX_B(s
);
3651 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3654 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3656 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3657 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op1_offset
);
3658 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3660 case 0x050: /* movmskps */
3661 rm
= (modrm
& 7) | REX_B(s
);
3662 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3663 offsetof(CPUX86State
,xmm_regs
[rm
]));
3664 gen_helper_movmskps(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3665 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3666 gen_op_mov_reg_T0(OT_LONG
, reg
);
3668 case 0x150: /* movmskpd */
3669 rm
= (modrm
& 7) | REX_B(s
);
3670 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3671 offsetof(CPUX86State
,xmm_regs
[rm
]));
3672 gen_helper_movmskpd(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3673 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3674 gen_op_mov_reg_T0(OT_LONG
, reg
);
3676 case 0x02a: /* cvtpi2ps */
3677 case 0x12a: /* cvtpi2pd */
3678 gen_helper_enter_mmx(cpu_env
);
3680 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3681 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3682 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
3685 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3687 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3688 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3689 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3692 gen_helper_cvtpi2ps(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3696 gen_helper_cvtpi2pd(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3700 case 0x22a: /* cvtsi2ss */
3701 case 0x32a: /* cvtsi2sd */
3702 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3703 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3704 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3705 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3706 if (ot
== OT_LONG
) {
3707 SSEFunc_0_epi sse_fn_epi
= sse_op_table3ai
[(b
>> 8) & 1];
3708 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3709 sse_fn_epi(cpu_env
, cpu_ptr0
, cpu_tmp2_i32
);
3711 #ifdef TARGET_X86_64
3712 SSEFunc_0_epl sse_fn_epl
= sse_op_table3aq
[(b
>> 8) & 1];
3713 sse_fn_epl(cpu_env
, cpu_ptr0
, cpu_T
[0]);
3719 case 0x02c: /* cvttps2pi */
3720 case 0x12c: /* cvttpd2pi */
3721 case 0x02d: /* cvtps2pi */
3722 case 0x12d: /* cvtpd2pi */
3723 gen_helper_enter_mmx(cpu_env
);
3725 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3726 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3727 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3729 rm
= (modrm
& 7) | REX_B(s
);
3730 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3732 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
);
3733 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3734 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3737 gen_helper_cvttps2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3740 gen_helper_cvttpd2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3743 gen_helper_cvtps2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3746 gen_helper_cvtpd2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3750 case 0x22c: /* cvttss2si */
3751 case 0x32c: /* cvttsd2si */
3752 case 0x22d: /* cvtss2si */
3753 case 0x32d: /* cvtsd2si */
3754 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3756 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3758 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_t0
.XMM_Q(0)));
3760 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
3761 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
3763 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3765 rm
= (modrm
& 7) | REX_B(s
);
3766 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3768 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3769 if (ot
== OT_LONG
) {
3770 SSEFunc_i_ep sse_fn_i_ep
=
3771 sse_op_table3bi
[((b
>> 7) & 2) | (b
& 1)];
3772 sse_fn_i_ep(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3773 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3775 #ifdef TARGET_X86_64
3776 SSEFunc_l_ep sse_fn_l_ep
=
3777 sse_op_table3bq
[((b
>> 7) & 2) | (b
& 1)];
3778 sse_fn_l_ep(cpu_T
[0], cpu_env
, cpu_ptr0
);
3783 gen_op_mov_reg_T0(ot
, reg
);
3785 case 0xc4: /* pinsrw */
3788 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
3789 val
= cpu_ldub_code(env
, s
->pc
++);
3792 tcg_gen_st16_tl(cpu_T
[0], cpu_env
,
3793 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_W(val
)));
3796 tcg_gen_st16_tl(cpu_T
[0], cpu_env
,
3797 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_W(val
)));
3800 case 0xc5: /* pextrw */
3804 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3805 val
= cpu_ldub_code(env
, s
->pc
++);
3808 rm
= (modrm
& 7) | REX_B(s
);
3809 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
,
3810 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_W(val
)));
3814 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
,
3815 offsetof(CPUX86State
,fpregs
[rm
].mmx
.MMX_W(val
)));
3817 reg
= ((modrm
>> 3) & 7) | rex_r
;
3818 gen_op_mov_reg_T0(ot
, reg
);
3820 case 0x1d6: /* movq ea, xmm */
3822 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3823 gen_stq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3825 rm
= (modrm
& 7) | REX_B(s
);
3826 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)),
3827 offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)));
3828 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(1)));
3831 case 0x2d6: /* movq2dq */
3832 gen_helper_enter_mmx(cpu_env
);
3834 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(0)),
3835 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3836 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].XMM_Q(1)));
3838 case 0x3d6: /* movdq2q */
3839 gen_helper_enter_mmx(cpu_env
);
3840 rm
= (modrm
& 7) | REX_B(s
);
3841 gen_op_movq(offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
),
3842 offsetof(CPUX86State
,xmm_regs
[rm
].XMM_Q(0)));
3844 case 0xd7: /* pmovmskb */
3849 rm
= (modrm
& 7) | REX_B(s
);
3850 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[rm
]));
3851 gen_helper_pmovmskb_xmm(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3854 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3855 gen_helper_pmovmskb_mmx(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3857 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
3858 reg
= ((modrm
>> 3) & 7) | rex_r
;
3859 gen_op_mov_reg_T0(OT_LONG
, reg
);
3865 if ((b
& 0xf0) == 0xf0) {
3868 modrm
= cpu_ldub_code(env
, s
->pc
++);
3870 reg
= ((modrm
>> 3) & 7) | rex_r
;
3871 mod
= (modrm
>> 6) & 3;
3876 sse_fn_epp
= sse_op_table6
[b
].op
[b1
];
3880 if (!(s
->cpuid_ext_features
& sse_op_table6
[b
].ext_mask
))
3884 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3886 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
3888 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3889 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3891 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3892 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3893 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3894 gen_ldq_env_A0(s
->mem_index
, op2_offset
+
3895 offsetof(XMMReg
, XMM_Q(0)));
3897 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3898 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3899 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
3900 (s
->mem_index
>> 2) - 1);
3901 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
3902 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, op2_offset
+
3903 offsetof(XMMReg
, XMM_L(0)));
3905 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3906 tcg_gen_qemu_ld16u(cpu_tmp0
, cpu_A0
,
3907 (s
->mem_index
>> 2) - 1);
3908 tcg_gen_st16_tl(cpu_tmp0
, cpu_env
, op2_offset
+
3909 offsetof(XMMReg
, XMM_W(0)));
3911 case 0x2a: /* movntqda */
3912 gen_ldo_env_A0(s
->mem_index
, op1_offset
);
3915 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
3919 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
3921 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3923 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3924 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
3925 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
3928 if (sse_fn_epp
== SSE_SPECIAL
) {
3932 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3933 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3934 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3937 set_cc_op(s
, CC_OP_EFLAGS
);
3944 /* Various integer extensions at 0f 38 f[0-f]. */
3945 b
= modrm
| (b1
<< 8);
3946 modrm
= cpu_ldub_code(env
, s
->pc
++);
3947 reg
= ((modrm
>> 3) & 7) | rex_r
;
3950 case 0x3f0: /* crc32 Gd,Eb */
3951 case 0x3f1: /* crc32 Gd,Ey */
3953 if (!(s
->cpuid_ext_features
& CPUID_EXT_SSE42
)) {
3956 if ((b
& 0xff) == 0xf0) {
3958 } else if (s
->dflag
!= 2) {
3959 ot
= (s
->prefix
& PREFIX_DATA
? OT_WORD
: OT_LONG
);
3964 gen_op_mov_TN_reg(OT_LONG
, 0, reg
);
3965 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
3966 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3967 gen_helper_crc32(cpu_T
[0], cpu_tmp2_i32
,
3968 cpu_T
[0], tcg_const_i32(8 << ot
));
3970 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
3971 gen_op_mov_reg_T0(ot
, reg
);
3974 case 0x1f0: /* crc32 or movbe */
3976 /* For these insns, the f3 prefix is supposed to have priority
3977 over the 66 prefix, but that's not what we implement above
3979 if (s
->prefix
& PREFIX_REPNZ
) {
3983 case 0x0f0: /* movbe Gy,My */
3984 case 0x0f1: /* movbe My,Gy */
3985 if (!(s
->cpuid_ext_features
& CPUID_EXT_MOVBE
)) {
3988 if (s
->dflag
!= 2) {
3989 ot
= (s
->prefix
& PREFIX_DATA
? OT_WORD
: OT_LONG
);
3994 /* Load the data incoming to the bswap. Note that the TCG
3995 implementation of bswap requires the input be zero
3996 extended. In the case of the loads, we simply know that
3997 gen_op_ld_v via gen_ldst_modrm does that already. */
3999 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4003 tcg_gen_ext16u_tl(cpu_T
[0], cpu_regs
[reg
]);
4006 tcg_gen_ext32u_tl(cpu_T
[0], cpu_regs
[reg
]);
4009 tcg_gen_mov_tl(cpu_T
[0], cpu_regs
[reg
]);
4016 tcg_gen_bswap16_tl(cpu_T
[0], cpu_T
[0]);
4019 tcg_gen_bswap32_tl(cpu_T
[0], cpu_T
[0]);
4021 #ifdef TARGET_X86_64
4023 tcg_gen_bswap64_tl(cpu_T
[0], cpu_T
[0]);
4029 gen_op_mov_reg_T0(ot
, reg
);
4031 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
4035 case 0x0f2: /* andn Gy, By, Ey */
4036 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
4037 || !(s
->prefix
& PREFIX_VEX
)
4041 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4042 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4043 tcg_gen_andc_tl(cpu_T
[0], cpu_regs
[s
->vex_v
], cpu_T
[0]);
4044 gen_op_mov_reg_T0(ot
, reg
);
4045 gen_op_update1_cc();
4046 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4049 case 0x0f7: /* bextr Gy, Ey, By */
4050 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
4051 || !(s
->prefix
& PREFIX_VEX
)
4055 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4059 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4060 /* Extract START, and shift the operand.
4061 Shifts larger than operand size get zeros. */
4062 tcg_gen_ext8u_tl(cpu_A0
, cpu_regs
[s
->vex_v
]);
4063 tcg_gen_shr_tl(cpu_T
[0], cpu_T
[0], cpu_A0
);
4065 bound
= tcg_const_tl(ot
== OT_QUAD
? 63 : 31);
4066 zero
= tcg_const_tl(0);
4067 tcg_gen_movcond_tl(TCG_COND_LEU
, cpu_T
[0], cpu_A0
, bound
,
4069 tcg_temp_free(zero
);
4071 /* Extract the LEN into a mask. Lengths larger than
4072 operand size get all ones. */
4073 tcg_gen_shri_tl(cpu_A0
, cpu_regs
[s
->vex_v
], 8);
4074 tcg_gen_ext8u_tl(cpu_A0
, cpu_A0
);
4075 tcg_gen_movcond_tl(TCG_COND_LEU
, cpu_A0
, cpu_A0
, bound
,
4077 tcg_temp_free(bound
);
4078 tcg_gen_movi_tl(cpu_T
[1], 1);
4079 tcg_gen_shl_tl(cpu_T
[1], cpu_T
[1], cpu_A0
);
4080 tcg_gen_subi_tl(cpu_T
[1], cpu_T
[1], 1);
4081 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4083 gen_op_mov_reg_T0(ot
, reg
);
4084 gen_op_update1_cc();
4085 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4089 case 0x0f5: /* bzhi Gy, Ey, By */
4090 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4091 || !(s
->prefix
& PREFIX_VEX
)
4095 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4096 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4097 tcg_gen_ext8u_tl(cpu_T
[1], cpu_regs
[s
->vex_v
]);
4099 TCGv bound
= tcg_const_tl(ot
== OT_QUAD
? 63 : 31);
4100 /* Note that since we're using BMILG (in order to get O
4101 cleared) we need to store the inverse into C. */
4102 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_cc_src
,
4104 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_T
[1], cpu_T
[1],
4105 bound
, bound
, cpu_T
[1]);
4106 tcg_temp_free(bound
);
4108 tcg_gen_movi_tl(cpu_A0
, -1);
4109 tcg_gen_shl_tl(cpu_A0
, cpu_A0
, cpu_T
[1]);
4110 tcg_gen_andc_tl(cpu_T
[0], cpu_T
[0], cpu_A0
);
4111 gen_op_mov_reg_T0(ot
, reg
);
4112 gen_op_update1_cc();
4113 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
4116 case 0x3f6: /* mulx By, Gy, rdx, Ey */
4117 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4118 || !(s
->prefix
& PREFIX_VEX
)
4122 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4123 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4126 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4127 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_regs
[R_EDX
]);
4128 tcg_gen_mulu2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
4129 cpu_tmp2_i32
, cpu_tmp3_i32
);
4130 tcg_gen_extu_i32_tl(cpu_regs
[s
->vex_v
], cpu_tmp2_i32
);
4131 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp3_i32
);
4133 #ifdef TARGET_X86_64
4135 tcg_gen_mulu2_i64(cpu_regs
[s
->vex_v
], cpu_regs
[reg
],
4136 cpu_T
[0], cpu_regs
[R_EDX
]);
4142 case 0x3f5: /* pdep Gy, By, Ey */
4143 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4144 || !(s
->prefix
& PREFIX_VEX
)
4148 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4149 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4150 /* Note that by zero-extending the mask operand, we
4151 automatically handle zero-extending the result. */
4152 if (s
->dflag
== 2) {
4153 tcg_gen_mov_tl(cpu_T
[1], cpu_regs
[s
->vex_v
]);
4155 tcg_gen_ext32u_tl(cpu_T
[1], cpu_regs
[s
->vex_v
]);
4157 gen_helper_pdep(cpu_regs
[reg
], cpu_T
[0], cpu_T
[1]);
4160 case 0x2f5: /* pext Gy, By, Ey */
4161 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4162 || !(s
->prefix
& PREFIX_VEX
)
4166 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4167 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4168 /* Note that by zero-extending the mask operand, we
4169 automatically handle zero-extending the result. */
4170 if (s
->dflag
== 2) {
4171 tcg_gen_mov_tl(cpu_T
[1], cpu_regs
[s
->vex_v
]);
4173 tcg_gen_ext32u_tl(cpu_T
[1], cpu_regs
[s
->vex_v
]);
4175 gen_helper_pext(cpu_regs
[reg
], cpu_T
[0], cpu_T
[1]);
4178 case 0x1f6: /* adcx Gy, Ey */
4179 case 0x2f6: /* adox Gy, Ey */
4180 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_ADX
)) {
4183 TCGv carry_in
, carry_out
, zero
;
4186 ot
= (s
->dflag
== 2 ? OT_QUAD
: OT_LONG
);
4187 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4189 /* Re-use the carry-out from a previous round. */
4190 TCGV_UNUSED(carry_in
);
4191 carry_out
= (b
== 0x1f6 ? cpu_cc_dst
: cpu_cc_src2
);
4195 carry_in
= cpu_cc_dst
;
4196 end_op
= CC_OP_ADCX
;
4198 end_op
= CC_OP_ADCOX
;
4203 end_op
= CC_OP_ADCOX
;
4205 carry_in
= cpu_cc_src2
;
4206 end_op
= CC_OP_ADOX
;
4210 end_op
= CC_OP_ADCOX
;
4211 carry_in
= carry_out
;
4214 end_op
= (b
== 0x1f6 ? CC_OP_ADCX
: CC_OP_ADOX
);
4217 /* If we can't reuse carry-out, get it out of EFLAGS. */
4218 if (TCGV_IS_UNUSED(carry_in
)) {
4219 if (s
->cc_op
!= CC_OP_ADCX
&& s
->cc_op
!= CC_OP_ADOX
) {
4220 gen_compute_eflags(s
);
4222 carry_in
= cpu_tmp0
;
4223 tcg_gen_shri_tl(carry_in
, cpu_cc_src
,
4224 ctz32(b
== 0x1f6 ? CC_C
: CC_O
));
4225 tcg_gen_andi_tl(carry_in
, carry_in
, 1);
4229 #ifdef TARGET_X86_64
4231 /* If we know TL is 64-bit, and we want a 32-bit
4232 result, just do everything in 64-bit arithmetic. */
4233 tcg_gen_ext32u_i64(cpu_regs
[reg
], cpu_regs
[reg
]);
4234 tcg_gen_ext32u_i64(cpu_T
[0], cpu_T
[0]);
4235 tcg_gen_add_i64(cpu_T
[0], cpu_T
[0], cpu_regs
[reg
]);
4236 tcg_gen_add_i64(cpu_T
[0], cpu_T
[0], carry_in
);
4237 tcg_gen_ext32u_i64(cpu_regs
[reg
], cpu_T
[0]);
4238 tcg_gen_shri_i64(carry_out
, cpu_T
[0], 32);
4242 /* Otherwise compute the carry-out in two steps. */
4243 zero
= tcg_const_tl(0);
4244 tcg_gen_add2_tl(cpu_T
[0], carry_out
,
4247 tcg_gen_add2_tl(cpu_regs
[reg
], carry_out
,
4248 cpu_regs
[reg
], carry_out
,
4250 tcg_temp_free(zero
);
4253 set_cc_op(s
, end_op
);
4257 case 0x1f7: /* shlx Gy, Ey, By */
4258 case 0x2f7: /* sarx Gy, Ey, By */
4259 case 0x3f7: /* shrx Gy, Ey, By */
4260 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4261 || !(s
->prefix
& PREFIX_VEX
)
4265 ot
= (s
->dflag
== 2 ? OT_QUAD
: OT_LONG
);
4266 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4267 if (ot
== OT_QUAD
) {
4268 tcg_gen_andi_tl(cpu_T
[1], cpu_regs
[s
->vex_v
], 63);
4270 tcg_gen_andi_tl(cpu_T
[1], cpu_regs
[s
->vex_v
], 31);
4273 tcg_gen_shl_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4274 } else if (b
== 0x2f7) {
4275 if (ot
!= OT_QUAD
) {
4276 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
4278 tcg_gen_sar_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4280 if (ot
!= OT_QUAD
) {
4281 tcg_gen_ext32u_tl(cpu_T
[0], cpu_T
[0]);
4283 tcg_gen_shr_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4285 gen_op_mov_reg_T0(ot
, reg
);
4291 case 0x3f3: /* Group 17 */
4292 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
4293 || !(s
->prefix
& PREFIX_VEX
)
4297 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4298 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4301 case 1: /* blsr By,Ey */
4302 tcg_gen_neg_tl(cpu_T
[1], cpu_T
[0]);
4303 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
4304 gen_op_mov_reg_T0(ot
, s
->vex_v
);
4305 gen_op_update2_cc();
4306 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
4309 case 2: /* blsmsk By,Ey */
4310 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4311 tcg_gen_subi_tl(cpu_T
[0], cpu_T
[0], 1);
4312 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_cc_src
);
4313 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4314 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
4317 case 3: /* blsi By, Ey */
4318 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
4319 tcg_gen_subi_tl(cpu_T
[0], cpu_T
[0], 1);
4320 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_cc_src
);
4321 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
4322 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
4338 modrm
= cpu_ldub_code(env
, s
->pc
++);
4340 reg
= ((modrm
>> 3) & 7) | rex_r
;
4341 mod
= (modrm
>> 6) & 3;
4346 sse_fn_eppi
= sse_op_table7
[b
].op
[b1
];
4350 if (!(s
->cpuid_ext_features
& sse_op_table7
[b
].ext_mask
))
4353 if (sse_fn_eppi
== SSE_SPECIAL
) {
4354 ot
= (s
->dflag
== 2) ? OT_QUAD
: OT_LONG
;
4355 rm
= (modrm
& 7) | REX_B(s
);
4357 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4358 reg
= ((modrm
>> 3) & 7) | rex_r
;
4359 val
= cpu_ldub_code(env
, s
->pc
++);
4361 case 0x14: /* pextrb */
4362 tcg_gen_ld8u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
4363 xmm_regs
[reg
].XMM_B(val
& 15)));
4365 gen_op_mov_reg_T0(ot
, rm
);
4367 tcg_gen_qemu_st8(cpu_T
[0], cpu_A0
,
4368 (s
->mem_index
>> 2) - 1);
4370 case 0x15: /* pextrw */
4371 tcg_gen_ld16u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
4372 xmm_regs
[reg
].XMM_W(val
& 7)));
4374 gen_op_mov_reg_T0(ot
, rm
);
4376 tcg_gen_qemu_st16(cpu_T
[0], cpu_A0
,
4377 (s
->mem_index
>> 2) - 1);
4380 if (ot
== OT_LONG
) { /* pextrd */
4381 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
4382 offsetof(CPUX86State
,
4383 xmm_regs
[reg
].XMM_L(val
& 3)));
4384 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
4386 gen_op_mov_reg_v(ot
, rm
, cpu_T
[0]);
4388 tcg_gen_qemu_st32(cpu_T
[0], cpu_A0
,
4389 (s
->mem_index
>> 2) - 1);
4390 } else { /* pextrq */
4391 #ifdef TARGET_X86_64
4392 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
4393 offsetof(CPUX86State
,
4394 xmm_regs
[reg
].XMM_Q(val
& 1)));
4396 gen_op_mov_reg_v(ot
, rm
, cpu_tmp1_i64
);
4398 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
4399 (s
->mem_index
>> 2) - 1);
4405 case 0x17: /* extractps */
4406 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
4407 xmm_regs
[reg
].XMM_L(val
& 3)));
4409 gen_op_mov_reg_T0(ot
, rm
);
4411 tcg_gen_qemu_st32(cpu_T
[0], cpu_A0
,
4412 (s
->mem_index
>> 2) - 1);
4414 case 0x20: /* pinsrb */
4416 gen_op_mov_TN_reg(OT_LONG
, 0, rm
);
4418 tcg_gen_qemu_ld8u(cpu_T
[0], cpu_A0
,
4419 (s
->mem_index
>> 2) - 1);
4420 tcg_gen_st8_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,
4421 xmm_regs
[reg
].XMM_B(val
& 15)));
4423 case 0x21: /* insertps */
4425 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
4426 offsetof(CPUX86State
,xmm_regs
[rm
]
4427 .XMM_L((val
>> 6) & 3)));
4429 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
4430 (s
->mem_index
>> 2) - 1);
4431 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
4433 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
4434 offsetof(CPUX86State
,xmm_regs
[reg
]
4435 .XMM_L((val
>> 4) & 3)));
4437 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4438 cpu_env
, offsetof(CPUX86State
,
4439 xmm_regs
[reg
].XMM_L(0)));
4441 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4442 cpu_env
, offsetof(CPUX86State
,
4443 xmm_regs
[reg
].XMM_L(1)));
4445 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4446 cpu_env
, offsetof(CPUX86State
,
4447 xmm_regs
[reg
].XMM_L(2)));
4449 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4450 cpu_env
, offsetof(CPUX86State
,
4451 xmm_regs
[reg
].XMM_L(3)));
4454 if (ot
== OT_LONG
) { /* pinsrd */
4456 gen_op_mov_v_reg(ot
, cpu_tmp0
, rm
);
4458 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_A0
,
4459 (s
->mem_index
>> 2) - 1);
4460 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_tmp0
);
4461 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
4462 offsetof(CPUX86State
,
4463 xmm_regs
[reg
].XMM_L(val
& 3)));
4464 } else { /* pinsrq */
4465 #ifdef TARGET_X86_64
4467 gen_op_mov_v_reg(ot
, cpu_tmp1_i64
, rm
);
4469 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
4470 (s
->mem_index
>> 2) - 1);
4471 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
4472 offsetof(CPUX86State
,
4473 xmm_regs
[reg
].XMM_Q(val
& 1)));
4484 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4486 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
4488 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4489 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4490 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
4493 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4495 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4497 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4498 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4499 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
4502 val
= cpu_ldub_code(env
, s
->pc
++);
4504 if ((b
& 0xfc) == 0x60) { /* pcmpXstrX */
4505 set_cc_op(s
, CC_OP_EFLAGS
);
4508 /* The helper must use entire 64-bit gp registers */
4512 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4513 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4514 sse_fn_eppi(cpu_env
, cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4518 /* Various integer extensions at 0f 3a f[0-f]. */
4519 b
= modrm
| (b1
<< 8);
4520 modrm
= cpu_ldub_code(env
, s
->pc
++);
4521 reg
= ((modrm
>> 3) & 7) | rex_r
;
4524 case 0x3f0: /* rorx Gy,Ey, Ib */
4525 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4526 || !(s
->prefix
& PREFIX_VEX
)
4530 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
4531 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4532 b
= cpu_ldub_code(env
, s
->pc
++);
4533 if (ot
== OT_QUAD
) {
4534 tcg_gen_rotri_tl(cpu_T
[0], cpu_T
[0], b
& 63);
4536 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
4537 tcg_gen_rotri_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, b
& 31);
4538 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
4540 gen_op_mov_reg_T0(ot
, reg
);
4552 /* generic MMX or SSE operation */
4554 case 0x70: /* pshufx insn */
4555 case 0xc6: /* pshufx insn */
4556 case 0xc2: /* compare insns */
4563 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4565 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4566 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4567 if (b1
>= 2 && ((b
>= 0x50 && b
<= 0x5f && b
!= 0x5b) ||
4569 /* specific case for SSE single instructions */
4572 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
4573 tcg_gen_st32_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,xmm_t0
.XMM_L(0)));
4576 gen_ldq_env_A0(s
->mem_index
, offsetof(CPUX86State
,xmm_t0
.XMM_D(0)));
4579 gen_ldo_env_A0(s
->mem_index
, op2_offset
);
4582 rm
= (modrm
& 7) | REX_B(s
);
4583 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
4586 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4588 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4589 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4590 gen_ldq_env_A0(s
->mem_index
, op2_offset
);
4593 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4597 case 0x0f: /* 3DNow! data insns */
4598 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
))
4600 val
= cpu_ldub_code(env
, s
->pc
++);
4601 sse_fn_epp
= sse_op_table5
[val
];
4605 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4606 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4607 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4609 case 0x70: /* pshufx insn */
4610 case 0xc6: /* pshufx insn */
4611 val
= cpu_ldub_code(env
, s
->pc
++);
4612 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4613 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4614 /* XXX: introduce a new table? */
4615 sse_fn_ppi
= (SSEFunc_0_ppi
)sse_fn_epp
;
4616 sse_fn_ppi(cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4620 val
= cpu_ldub_code(env
, s
->pc
++);
4623 sse_fn_epp
= sse_op_table4
[val
][b1
];
4625 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4626 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4627 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4630 /* maskmov : we must prepare A0 */
4633 #ifdef TARGET_X86_64
4634 if (s
->aflag
== 2) {
4635 gen_op_movq_A0_reg(R_EDI
);
4639 gen_op_movl_A0_reg(R_EDI
);
4641 gen_op_andl_A0_ffff();
4643 gen_add_A0_ds_seg(s
);
4645 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4646 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4647 /* XXX: introduce a new table? */
4648 sse_fn_eppt
= (SSEFunc_0_eppt
)sse_fn_epp
;
4649 sse_fn_eppt(cpu_env
, cpu_ptr0
, cpu_ptr1
, cpu_A0
);
4652 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4653 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4654 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4657 if (b
== 0x2e || b
== 0x2f) {
4658 set_cc_op(s
, CC_OP_EFLAGS
);
4663 /* convert one instruction. s->is_jmp is set if the translation must
4664 be stopped. Return the next pc value */
4665 static target_ulong
disas_insn(CPUX86State
*env
, DisasContext
*s
,
4666 target_ulong pc_start
)
4668 int b
, prefixes
, aflag
, dflag
;
4670 int modrm
, reg
, rm
, mod
, reg_addr
, op
, opreg
, offset_addr
, val
;
4671 target_ulong next_eip
, tval
;
4674 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4675 tcg_gen_debug_insn_start(pc_start
);
4684 #ifdef TARGET_X86_64
4689 s
->rip_offset
= 0; /* for relative ip address */
4693 b
= cpu_ldub_code(env
, s
->pc
);
4695 /* Collect prefixes. */
4698 prefixes
|= PREFIX_REPZ
;
4701 prefixes
|= PREFIX_REPNZ
;
4704 prefixes
|= PREFIX_LOCK
;
4725 prefixes
|= PREFIX_DATA
;
4728 prefixes
|= PREFIX_ADR
;
4730 #ifdef TARGET_X86_64
4734 rex_w
= (b
>> 3) & 1;
4735 rex_r
= (b
& 0x4) << 1;
4736 s
->rex_x
= (b
& 0x2) << 2;
4737 REX_B(s
) = (b
& 0x1) << 3;
4738 x86_64_hregs
= 1; /* select uniform byte register addressing */
4743 case 0xc5: /* 2-byte VEX */
4744 case 0xc4: /* 3-byte VEX */
4745 /* VEX prefixes cannot be used except in 32-bit mode.
4746 Otherwise the instruction is LES or LDS. */
4747 if (s
->code32
&& !s
->vm86
) {
4748 static const int pp_prefix
[4] = {
4749 0, PREFIX_DATA
, PREFIX_REPZ
, PREFIX_REPNZ
4751 int vex3
, vex2
= cpu_ldub_code(env
, s
->pc
);
4753 if (!CODE64(s
) && (vex2
& 0xc0) != 0xc0) {
4754 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4755 otherwise the instruction is LES or LDS. */
4760 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
4761 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
4762 | PREFIX_LOCK
| PREFIX_DATA
)) {
4765 #ifdef TARGET_X86_64
4770 rex_r
= (~vex2
>> 4) & 8;
4773 b
= cpu_ldub_code(env
, s
->pc
++);
4775 #ifdef TARGET_X86_64
4776 s
->rex_x
= (~vex2
>> 3) & 8;
4777 s
->rex_b
= (~vex2
>> 2) & 8;
4779 vex3
= cpu_ldub_code(env
, s
->pc
++);
4780 rex_w
= (vex3
>> 7) & 1;
4781 switch (vex2
& 0x1f) {
4782 case 0x01: /* Implied 0f leading opcode bytes. */
4783 b
= cpu_ldub_code(env
, s
->pc
++) | 0x100;
4785 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4788 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4791 default: /* Reserved for future use. */
4795 s
->vex_v
= (~vex3
>> 3) & 0xf;
4796 s
->vex_l
= (vex3
>> 2) & 1;
4797 prefixes
|= pp_prefix
[vex3
& 3] | PREFIX_VEX
;
4802 /* Post-process prefixes. */
4803 if (prefixes
& PREFIX_DATA
) {
4806 if (prefixes
& PREFIX_ADR
) {
4809 #ifdef TARGET_X86_64
4812 /* 0x66 is ignored if rex.w is set */
4815 if (!(prefixes
& PREFIX_ADR
)) {
4821 s
->prefix
= prefixes
;
4825 /* lock generation */
4826 if (prefixes
& PREFIX_LOCK
)
4829 /* now check op code */
4833 /**************************/
4834 /* extended op code */
4835 b
= cpu_ldub_code(env
, s
->pc
++) | 0x100;
4838 /**************************/
4856 ot
= dflag
+ OT_WORD
;
4859 case 0: /* OP Ev, Gv */
4860 modrm
= cpu_ldub_code(env
, s
->pc
++);
4861 reg
= ((modrm
>> 3) & 7) | rex_r
;
4862 mod
= (modrm
>> 6) & 3;
4863 rm
= (modrm
& 7) | REX_B(s
);
4865 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4867 } else if (op
== OP_XORL
&& rm
== reg
) {
4869 /* xor reg, reg optimisation */
4870 set_cc_op(s
, CC_OP_CLR
);
4872 gen_op_mov_reg_T0(ot
, reg
);
4877 gen_op_mov_TN_reg(ot
, 1, reg
);
4878 gen_op(s
, op
, ot
, opreg
);
4880 case 1: /* OP Gv, Ev */
4881 modrm
= cpu_ldub_code(env
, s
->pc
++);
4882 mod
= (modrm
>> 6) & 3;
4883 reg
= ((modrm
>> 3) & 7) | rex_r
;
4884 rm
= (modrm
& 7) | REX_B(s
);
4886 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4887 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
4888 } else if (op
== OP_XORL
&& rm
== reg
) {
4891 gen_op_mov_TN_reg(ot
, 1, rm
);
4893 gen_op(s
, op
, ot
, reg
);
4895 case 2: /* OP A, Iv */
4896 val
= insn_get(env
, s
, ot
);
4897 gen_op_movl_T1_im(val
);
4898 gen_op(s
, op
, ot
, OR_EAX
);
4907 case 0x80: /* GRP1 */
4916 ot
= dflag
+ OT_WORD
;
4918 modrm
= cpu_ldub_code(env
, s
->pc
++);
4919 mod
= (modrm
>> 6) & 3;
4920 rm
= (modrm
& 7) | REX_B(s
);
4921 op
= (modrm
>> 3) & 7;
4927 s
->rip_offset
= insn_const_size(ot
);
4928 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4939 val
= insn_get(env
, s
, ot
);
4942 val
= (int8_t)insn_get(env
, s
, OT_BYTE
);
4945 gen_op_movl_T1_im(val
);
4946 gen_op(s
, op
, ot
, opreg
);
4950 /**************************/
4951 /* inc, dec, and other misc arith */
4952 case 0x40 ... 0x47: /* inc Gv */
4953 ot
= dflag
? OT_LONG
: OT_WORD
;
4954 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), 1);
4956 case 0x48 ... 0x4f: /* dec Gv */
4957 ot
= dflag
? OT_LONG
: OT_WORD
;
4958 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), -1);
4960 case 0xf6: /* GRP3 */
4965 ot
= dflag
+ OT_WORD
;
4967 modrm
= cpu_ldub_code(env
, s
->pc
++);
4968 mod
= (modrm
>> 6) & 3;
4969 rm
= (modrm
& 7) | REX_B(s
);
4970 op
= (modrm
>> 3) & 7;
4973 s
->rip_offset
= insn_const_size(ot
);
4974 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
4975 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
4977 gen_op_mov_TN_reg(ot
, 0, rm
);
4982 val
= insn_get(env
, s
, ot
);
4983 gen_op_movl_T1_im(val
);
4984 gen_op_testl_T0_T1_cc();
4985 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4988 tcg_gen_not_tl(cpu_T
[0], cpu_T
[0]);
4990 gen_op_st_T0_A0(ot
+ s
->mem_index
);
4992 gen_op_mov_reg_T0(ot
, rm
);
4996 tcg_gen_neg_tl(cpu_T
[0], cpu_T
[0]);
4998 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5000 gen_op_mov_reg_T0(ot
, rm
);
5002 gen_op_update_neg_cc();
5003 set_cc_op(s
, CC_OP_SUBB
+ ot
);
5008 gen_op_mov_TN_reg(OT_BYTE
, 1, R_EAX
);
5009 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
5010 tcg_gen_ext8u_tl(cpu_T
[1], cpu_T
[1]);
5011 /* XXX: use 32 bit mul which could be faster */
5012 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
5013 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
5014 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5015 tcg_gen_andi_tl(cpu_cc_src
, cpu_T
[0], 0xff00);
5016 set_cc_op(s
, CC_OP_MULB
);
5019 gen_op_mov_TN_reg(OT_WORD
, 1, R_EAX
);
5020 tcg_gen_ext16u_tl(cpu_T
[0], cpu_T
[0]);
5021 tcg_gen_ext16u_tl(cpu_T
[1], cpu_T
[1]);
5022 /* XXX: use 32 bit mul which could be faster */
5023 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
5024 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
5025 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5026 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 16);
5027 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
5028 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
5029 set_cc_op(s
, CC_OP_MULW
);
5033 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5034 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_regs
[R_EAX
]);
5035 tcg_gen_mulu2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
5036 cpu_tmp2_i32
, cpu_tmp3_i32
);
5037 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], cpu_tmp2_i32
);
5038 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], cpu_tmp3_i32
);
5039 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
5040 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
5041 set_cc_op(s
, CC_OP_MULL
);
5043 #ifdef TARGET_X86_64
5045 tcg_gen_mulu2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
5046 cpu_T
[0], cpu_regs
[R_EAX
]);
5047 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
5048 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
5049 set_cc_op(s
, CC_OP_MULQ
);
5057 gen_op_mov_TN_reg(OT_BYTE
, 1, R_EAX
);
5058 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
5059 tcg_gen_ext8s_tl(cpu_T
[1], cpu_T
[1]);
5060 /* XXX: use 32 bit mul which could be faster */
5061 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
5062 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
5063 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5064 tcg_gen_ext8s_tl(cpu_tmp0
, cpu_T
[0]);
5065 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
5066 set_cc_op(s
, CC_OP_MULB
);
5069 gen_op_mov_TN_reg(OT_WORD
, 1, R_EAX
);
5070 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5071 tcg_gen_ext16s_tl(cpu_T
[1], cpu_T
[1]);
5072 /* XXX: use 32 bit mul which could be faster */
5073 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
5074 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
5075 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5076 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T
[0]);
5077 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
5078 tcg_gen_shri_tl(cpu_T
[0], cpu_T
[0], 16);
5079 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
5080 set_cc_op(s
, CC_OP_MULW
);
5084 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5085 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_regs
[R_EAX
]);
5086 tcg_gen_muls2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
5087 cpu_tmp2_i32
, cpu_tmp3_i32
);
5088 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], cpu_tmp2_i32
);
5089 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], cpu_tmp3_i32
);
5090 tcg_gen_sari_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 31);
5091 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
5092 tcg_gen_sub_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
5093 tcg_gen_extu_i32_tl(cpu_cc_src
, cpu_tmp2_i32
);
5094 set_cc_op(s
, CC_OP_MULL
);
5096 #ifdef TARGET_X86_64
5098 tcg_gen_muls2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
5099 cpu_T
[0], cpu_regs
[R_EAX
]);
5100 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
5101 tcg_gen_sari_tl(cpu_cc_src
, cpu_regs
[R_EAX
], 63);
5102 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, cpu_regs
[R_EDX
]);
5103 set_cc_op(s
, CC_OP_MULQ
);
5111 gen_jmp_im(pc_start
- s
->cs_base
);
5112 gen_helper_divb_AL(cpu_env
, cpu_T
[0]);
5115 gen_jmp_im(pc_start
- s
->cs_base
);
5116 gen_helper_divw_AX(cpu_env
, cpu_T
[0]);
5120 gen_jmp_im(pc_start
- s
->cs_base
);
5121 gen_helper_divl_EAX(cpu_env
, cpu_T
[0]);
5123 #ifdef TARGET_X86_64
5125 gen_jmp_im(pc_start
- s
->cs_base
);
5126 gen_helper_divq_EAX(cpu_env
, cpu_T
[0]);
5134 gen_jmp_im(pc_start
- s
->cs_base
);
5135 gen_helper_idivb_AL(cpu_env
, cpu_T
[0]);
5138 gen_jmp_im(pc_start
- s
->cs_base
);
5139 gen_helper_idivw_AX(cpu_env
, cpu_T
[0]);
5143 gen_jmp_im(pc_start
- s
->cs_base
);
5144 gen_helper_idivl_EAX(cpu_env
, cpu_T
[0]);
5146 #ifdef TARGET_X86_64
5148 gen_jmp_im(pc_start
- s
->cs_base
);
5149 gen_helper_idivq_EAX(cpu_env
, cpu_T
[0]);
5159 case 0xfe: /* GRP4 */
5160 case 0xff: /* GRP5 */
5164 ot
= dflag
+ OT_WORD
;
5166 modrm
= cpu_ldub_code(env
, s
->pc
++);
5167 mod
= (modrm
>> 6) & 3;
5168 rm
= (modrm
& 7) | REX_B(s
);
5169 op
= (modrm
>> 3) & 7;
5170 if (op
>= 2 && b
== 0xfe) {
5174 if (op
== 2 || op
== 4) {
5175 /* operand size for jumps is 64 bit */
5177 } else if (op
== 3 || op
== 5) {
5178 ot
= dflag
? OT_LONG
+ (rex_w
== 1) : OT_WORD
;
5179 } else if (op
== 6) {
5180 /* default push size is 64 bit */
5181 ot
= dflag
? OT_QUAD
: OT_WORD
;
5185 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5186 if (op
>= 2 && op
!= 3 && op
!= 5)
5187 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
5189 gen_op_mov_TN_reg(ot
, 0, rm
);
5193 case 0: /* inc Ev */
5198 gen_inc(s
, ot
, opreg
, 1);
5200 case 1: /* dec Ev */
5205 gen_inc(s
, ot
, opreg
, -1);
5207 case 2: /* call Ev */
5208 /* XXX: optimize if memory (no 'and' is necessary) */
5210 gen_op_andl_T0_ffff();
5211 next_eip
= s
->pc
- s
->cs_base
;
5212 gen_movtl_T1_im(next_eip
);
5217 case 3: /* lcall Ev */
5218 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5219 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
5220 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
5222 if (s
->pe
&& !s
->vm86
) {
5223 gen_update_cc_op(s
);
5224 gen_jmp_im(pc_start
- s
->cs_base
);
5225 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5226 gen_helper_lcall_protected(cpu_env
, cpu_tmp2_i32
, cpu_T
[1],
5227 tcg_const_i32(dflag
),
5228 tcg_const_i32(s
->pc
- pc_start
));
5230 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5231 gen_helper_lcall_real(cpu_env
, cpu_tmp2_i32
, cpu_T
[1],
5232 tcg_const_i32(dflag
),
5233 tcg_const_i32(s
->pc
- s
->cs_base
));
5237 case 4: /* jmp Ev */
5239 gen_op_andl_T0_ffff();
5243 case 5: /* ljmp Ev */
5244 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5245 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
5246 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
5248 if (s
->pe
&& !s
->vm86
) {
5249 gen_update_cc_op(s
);
5250 gen_jmp_im(pc_start
- s
->cs_base
);
5251 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5252 gen_helper_ljmp_protected(cpu_env
, cpu_tmp2_i32
, cpu_T
[1],
5253 tcg_const_i32(s
->pc
- pc_start
));
5255 gen_op_movl_seg_T0_vm(R_CS
);
5256 gen_op_movl_T0_T1();
5261 case 6: /* push Ev */
5269 case 0x84: /* test Ev, Gv */
5274 ot
= dflag
+ OT_WORD
;
5276 modrm
= cpu_ldub_code(env
, s
->pc
++);
5277 reg
= ((modrm
>> 3) & 7) | rex_r
;
5279 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5280 gen_op_mov_TN_reg(ot
, 1, reg
);
5281 gen_op_testl_T0_T1_cc();
5282 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
5285 case 0xa8: /* test eAX, Iv */
5290 ot
= dflag
+ OT_WORD
;
5291 val
= insn_get(env
, s
, ot
);
5293 gen_op_mov_TN_reg(ot
, 0, OR_EAX
);
5294 gen_op_movl_T1_im(val
);
5295 gen_op_testl_T0_T1_cc();
5296 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
5299 case 0x98: /* CWDE/CBW */
5300 #ifdef TARGET_X86_64
5302 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
5303 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
5304 gen_op_mov_reg_T0(OT_QUAD
, R_EAX
);
5308 gen_op_mov_TN_reg(OT_WORD
, 0, R_EAX
);
5309 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5310 gen_op_mov_reg_T0(OT_LONG
, R_EAX
);
5312 gen_op_mov_TN_reg(OT_BYTE
, 0, R_EAX
);
5313 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
5314 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
5317 case 0x99: /* CDQ/CWD */
5318 #ifdef TARGET_X86_64
5320 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EAX
);
5321 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 63);
5322 gen_op_mov_reg_T0(OT_QUAD
, R_EDX
);
5326 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
5327 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
5328 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 31);
5329 gen_op_mov_reg_T0(OT_LONG
, R_EDX
);
5331 gen_op_mov_TN_reg(OT_WORD
, 0, R_EAX
);
5332 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5333 tcg_gen_sari_tl(cpu_T
[0], cpu_T
[0], 15);
5334 gen_op_mov_reg_T0(OT_WORD
, R_EDX
);
5337 case 0x1af: /* imul Gv, Ev */
5338 case 0x69: /* imul Gv, Ev, I */
5340 ot
= dflag
+ OT_WORD
;
5341 modrm
= cpu_ldub_code(env
, s
->pc
++);
5342 reg
= ((modrm
>> 3) & 7) | rex_r
;
5344 s
->rip_offset
= insn_const_size(ot
);
5347 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5349 val
= insn_get(env
, s
, ot
);
5350 gen_op_movl_T1_im(val
);
5351 } else if (b
== 0x6b) {
5352 val
= (int8_t)insn_get(env
, s
, OT_BYTE
);
5353 gen_op_movl_T1_im(val
);
5355 gen_op_mov_TN_reg(ot
, 1, reg
);
5358 #ifdef TARGET_X86_64
5360 tcg_gen_muls2_i64(cpu_regs
[reg
], cpu_T
[1], cpu_T
[0], cpu_T
[1]);
5361 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
5362 tcg_gen_sari_tl(cpu_cc_src
, cpu_cc_dst
, 63);
5363 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, cpu_T
[1]);
5367 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
5368 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
5369 tcg_gen_muls2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
5370 cpu_tmp2_i32
, cpu_tmp3_i32
);
5371 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp2_i32
);
5372 tcg_gen_sari_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 31);
5373 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
5374 tcg_gen_sub_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
5375 tcg_gen_extu_i32_tl(cpu_cc_src
, cpu_tmp2_i32
);
5378 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5379 tcg_gen_ext16s_tl(cpu_T
[1], cpu_T
[1]);
5380 /* XXX: use 32 bit mul which could be faster */
5381 tcg_gen_mul_tl(cpu_T
[0], cpu_T
[0], cpu_T
[1]);
5382 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
5383 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T
[0]);
5384 tcg_gen_sub_tl(cpu_cc_src
, cpu_T
[0], cpu_tmp0
);
5385 gen_op_mov_reg_T0(ot
, reg
);
5388 set_cc_op(s
, CC_OP_MULB
+ ot
);
5391 case 0x1c1: /* xadd Ev, Gv */
5395 ot
= dflag
+ OT_WORD
;
5396 modrm
= cpu_ldub_code(env
, s
->pc
++);
5397 reg
= ((modrm
>> 3) & 7) | rex_r
;
5398 mod
= (modrm
>> 6) & 3;
5400 rm
= (modrm
& 7) | REX_B(s
);
5401 gen_op_mov_TN_reg(ot
, 0, reg
);
5402 gen_op_mov_TN_reg(ot
, 1, rm
);
5403 gen_op_addl_T0_T1();
5404 gen_op_mov_reg_T1(ot
, reg
);
5405 gen_op_mov_reg_T0(ot
, rm
);
5407 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5408 gen_op_mov_TN_reg(ot
, 0, reg
);
5409 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5410 gen_op_addl_T0_T1();
5411 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5412 gen_op_mov_reg_T1(ot
, reg
);
5414 gen_op_update2_cc();
5415 set_cc_op(s
, CC_OP_ADDB
+ ot
);
5418 case 0x1b1: /* cmpxchg Ev, Gv */
5421 TCGv t0
, t1
, t2
, a0
;
5426 ot
= dflag
+ OT_WORD
;
5427 modrm
= cpu_ldub_code(env
, s
->pc
++);
5428 reg
= ((modrm
>> 3) & 7) | rex_r
;
5429 mod
= (modrm
>> 6) & 3;
5430 t0
= tcg_temp_local_new();
5431 t1
= tcg_temp_local_new();
5432 t2
= tcg_temp_local_new();
5433 a0
= tcg_temp_local_new();
5434 gen_op_mov_v_reg(ot
, t1
, reg
);
5436 rm
= (modrm
& 7) | REX_B(s
);
5437 gen_op_mov_v_reg(ot
, t0
, rm
);
5439 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5440 tcg_gen_mov_tl(a0
, cpu_A0
);
5441 gen_op_ld_v(ot
+ s
->mem_index
, t0
, a0
);
5442 rm
= 0; /* avoid warning */
5444 label1
= gen_new_label();
5445 tcg_gen_mov_tl(t2
, cpu_regs
[R_EAX
]);
5448 tcg_gen_brcond_tl(TCG_COND_EQ
, t2
, t0
, label1
);
5449 label2
= gen_new_label();
5451 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
5453 gen_set_label(label1
);
5454 gen_op_mov_reg_v(ot
, rm
, t1
);
5456 /* perform no-op store cycle like physical cpu; must be
5457 before changing accumulator to ensure idempotency if
5458 the store faults and the instruction is restarted */
5459 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
5460 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
5462 gen_set_label(label1
);
5463 gen_op_st_v(ot
+ s
->mem_index
, t1
, a0
);
5465 gen_set_label(label2
);
5466 tcg_gen_mov_tl(cpu_cc_src
, t0
);
5467 tcg_gen_mov_tl(cpu_cc_srcT
, t2
);
5468 tcg_gen_sub_tl(cpu_cc_dst
, t2
, t0
);
5469 set_cc_op(s
, CC_OP_SUBB
+ ot
);
5476 case 0x1c7: /* cmpxchg8b */
5477 modrm
= cpu_ldub_code(env
, s
->pc
++);
5478 mod
= (modrm
>> 6) & 3;
5479 if ((mod
== 3) || ((modrm
& 0x38) != 0x8))
5481 #ifdef TARGET_X86_64
5483 if (!(s
->cpuid_ext_features
& CPUID_EXT_CX16
))
5485 gen_jmp_im(pc_start
- s
->cs_base
);
5486 gen_update_cc_op(s
);
5487 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5488 gen_helper_cmpxchg16b(cpu_env
, cpu_A0
);
5492 if (!(s
->cpuid_features
& CPUID_CX8
))
5494 gen_jmp_im(pc_start
- s
->cs_base
);
5495 gen_update_cc_op(s
);
5496 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5497 gen_helper_cmpxchg8b(cpu_env
, cpu_A0
);
5499 set_cc_op(s
, CC_OP_EFLAGS
);
5502 /**************************/
5504 case 0x50 ... 0x57: /* push */
5505 gen_op_mov_TN_reg(OT_LONG
, 0, (b
& 7) | REX_B(s
));
5508 case 0x58 ... 0x5f: /* pop */
5510 ot
= dflag
? OT_QUAD
: OT_WORD
;
5512 ot
= dflag
+ OT_WORD
;
5515 /* NOTE: order is important for pop %sp */
5517 gen_op_mov_reg_T0(ot
, (b
& 7) | REX_B(s
));
5519 case 0x60: /* pusha */
5524 case 0x61: /* popa */
5529 case 0x68: /* push Iv */
5532 ot
= dflag
? OT_QUAD
: OT_WORD
;
5534 ot
= dflag
+ OT_WORD
;
5537 val
= insn_get(env
, s
, ot
);
5539 val
= (int8_t)insn_get(env
, s
, OT_BYTE
);
5540 gen_op_movl_T0_im(val
);
5543 case 0x8f: /* pop Ev */
5545 ot
= dflag
? OT_QUAD
: OT_WORD
;
5547 ot
= dflag
+ OT_WORD
;
5549 modrm
= cpu_ldub_code(env
, s
->pc
++);
5550 mod
= (modrm
>> 6) & 3;
5553 /* NOTE: order is important for pop %sp */
5555 rm
= (modrm
& 7) | REX_B(s
);
5556 gen_op_mov_reg_T0(ot
, rm
);
5558 /* NOTE: order is important too for MMU exceptions */
5559 s
->popl_esp_hack
= 1 << ot
;
5560 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5561 s
->popl_esp_hack
= 0;
5565 case 0xc8: /* enter */
5568 val
= cpu_lduw_code(env
, s
->pc
);
5570 level
= cpu_ldub_code(env
, s
->pc
++);
5571 gen_enter(s
, val
, level
);
5574 case 0xc9: /* leave */
5575 /* XXX: exception not precise (ESP is updated before potential exception) */
5577 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EBP
);
5578 gen_op_mov_reg_T0(OT_QUAD
, R_ESP
);
5579 } else if (s
->ss32
) {
5580 gen_op_mov_TN_reg(OT_LONG
, 0, R_EBP
);
5581 gen_op_mov_reg_T0(OT_LONG
, R_ESP
);
5583 gen_op_mov_TN_reg(OT_WORD
, 0, R_EBP
);
5584 gen_op_mov_reg_T0(OT_WORD
, R_ESP
);
5588 ot
= dflag
? OT_QUAD
: OT_WORD
;
5590 ot
= dflag
+ OT_WORD
;
5592 gen_op_mov_reg_T0(ot
, R_EBP
);
5595 case 0x06: /* push es */
5596 case 0x0e: /* push cs */
5597 case 0x16: /* push ss */
5598 case 0x1e: /* push ds */
5601 gen_op_movl_T0_seg(b
>> 3);
5604 case 0x1a0: /* push fs */
5605 case 0x1a8: /* push gs */
5606 gen_op_movl_T0_seg((b
>> 3) & 7);
5609 case 0x07: /* pop es */
5610 case 0x17: /* pop ss */
5611 case 0x1f: /* pop ds */
5616 gen_movl_seg_T0(s
, reg
, pc_start
- s
->cs_base
);
5619 /* if reg == SS, inhibit interrupts/trace. */
5620 /* If several instructions disable interrupts, only the
5622 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
5623 gen_helper_set_inhibit_irq(cpu_env
);
5627 gen_jmp_im(s
->pc
- s
->cs_base
);
5631 case 0x1a1: /* pop fs */
5632 case 0x1a9: /* pop gs */
5634 gen_movl_seg_T0(s
, (b
>> 3) & 7, pc_start
- s
->cs_base
);
5637 gen_jmp_im(s
->pc
- s
->cs_base
);
5642 /**************************/
5645 case 0x89: /* mov Gv, Ev */
5649 ot
= dflag
+ OT_WORD
;
5650 modrm
= cpu_ldub_code(env
, s
->pc
++);
5651 reg
= ((modrm
>> 3) & 7) | rex_r
;
5653 /* generate a generic store */
5654 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
5657 case 0xc7: /* mov Ev, Iv */
5661 ot
= dflag
+ OT_WORD
;
5662 modrm
= cpu_ldub_code(env
, s
->pc
++);
5663 mod
= (modrm
>> 6) & 3;
5665 s
->rip_offset
= insn_const_size(ot
);
5666 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5668 val
= insn_get(env
, s
, ot
);
5669 gen_op_movl_T0_im(val
);
5671 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5673 gen_op_mov_reg_T0(ot
, (modrm
& 7) | REX_B(s
));
5676 case 0x8b: /* mov Ev, Gv */
5680 ot
= OT_WORD
+ dflag
;
5681 modrm
= cpu_ldub_code(env
, s
->pc
++);
5682 reg
= ((modrm
>> 3) & 7) | rex_r
;
5684 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5685 gen_op_mov_reg_T0(ot
, reg
);
5687 case 0x8e: /* mov seg, Gv */
5688 modrm
= cpu_ldub_code(env
, s
->pc
++);
5689 reg
= (modrm
>> 3) & 7;
5690 if (reg
>= 6 || reg
== R_CS
)
5692 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
5693 gen_movl_seg_T0(s
, reg
, pc_start
- s
->cs_base
);
5695 /* if reg == SS, inhibit interrupts/trace */
5696 /* If several instructions disable interrupts, only the
5698 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
5699 gen_helper_set_inhibit_irq(cpu_env
);
5703 gen_jmp_im(s
->pc
- s
->cs_base
);
5707 case 0x8c: /* mov Gv, seg */
5708 modrm
= cpu_ldub_code(env
, s
->pc
++);
5709 reg
= (modrm
>> 3) & 7;
5710 mod
= (modrm
>> 6) & 3;
5713 gen_op_movl_T0_seg(reg
);
5715 ot
= OT_WORD
+ dflag
;
5718 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5721 case 0x1b6: /* movzbS Gv, Eb */
5722 case 0x1b7: /* movzwS Gv, Eb */
5723 case 0x1be: /* movsbS Gv, Eb */
5724 case 0x1bf: /* movswS Gv, Eb */
5727 /* d_ot is the size of destination */
5728 d_ot
= dflag
+ OT_WORD
;
5729 /* ot is the size of source */
5730 ot
= (b
& 1) + OT_BYTE
;
5731 modrm
= cpu_ldub_code(env
, s
->pc
++);
5732 reg
= ((modrm
>> 3) & 7) | rex_r
;
5733 mod
= (modrm
>> 6) & 3;
5734 rm
= (modrm
& 7) | REX_B(s
);
5737 gen_op_mov_TN_reg(ot
, 0, rm
);
5738 switch(ot
| (b
& 8)) {
5740 tcg_gen_ext8u_tl(cpu_T
[0], cpu_T
[0]);
5743 tcg_gen_ext8s_tl(cpu_T
[0], cpu_T
[0]);
5746 tcg_gen_ext16u_tl(cpu_T
[0], cpu_T
[0]);
5750 tcg_gen_ext16s_tl(cpu_T
[0], cpu_T
[0]);
5753 gen_op_mov_reg_T0(d_ot
, reg
);
5755 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5757 gen_op_lds_T0_A0(ot
+ s
->mem_index
);
5759 gen_op_ldu_T0_A0(ot
+ s
->mem_index
);
5761 gen_op_mov_reg_T0(d_ot
, reg
);
5766 case 0x8d: /* lea */
5767 ot
= dflag
+ OT_WORD
;
5768 modrm
= cpu_ldub_code(env
, s
->pc
++);
5769 mod
= (modrm
>> 6) & 3;
5772 reg
= ((modrm
>> 3) & 7) | rex_r
;
5773 /* we must ensure that no segment is added */
5777 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5779 gen_op_mov_reg_A0(ot
- OT_WORD
, reg
);
5782 case 0xa0: /* mov EAX, Ov */
5784 case 0xa2: /* mov Ov, EAX */
5787 target_ulong offset_addr
;
5792 ot
= dflag
+ OT_WORD
;
5793 #ifdef TARGET_X86_64
5794 if (s
->aflag
== 2) {
5795 offset_addr
= cpu_ldq_code(env
, s
->pc
);
5797 gen_op_movq_A0_im(offset_addr
);
5802 offset_addr
= insn_get(env
, s
, OT_LONG
);
5804 offset_addr
= insn_get(env
, s
, OT_WORD
);
5806 gen_op_movl_A0_im(offset_addr
);
5808 gen_add_A0_ds_seg(s
);
5810 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
5811 gen_op_mov_reg_T0(ot
, R_EAX
);
5813 gen_op_mov_TN_reg(ot
, 0, R_EAX
);
5814 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5818 case 0xd7: /* xlat */
5819 #ifdef TARGET_X86_64
5820 if (s
->aflag
== 2) {
5821 gen_op_movq_A0_reg(R_EBX
);
5822 gen_op_mov_TN_reg(OT_QUAD
, 0, R_EAX
);
5823 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xff);
5824 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T
[0]);
5828 gen_op_movl_A0_reg(R_EBX
);
5829 gen_op_mov_TN_reg(OT_LONG
, 0, R_EAX
);
5830 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], 0xff);
5831 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T
[0]);
5833 gen_op_andl_A0_ffff();
5835 tcg_gen_andi_tl(cpu_A0
, cpu_A0
, 0xffffffff);
5837 gen_add_A0_ds_seg(s
);
5838 gen_op_ldu_T0_A0(OT_BYTE
+ s
->mem_index
);
5839 gen_op_mov_reg_T0(OT_BYTE
, R_EAX
);
5841 case 0xb0 ... 0xb7: /* mov R, Ib */
5842 val
= insn_get(env
, s
, OT_BYTE
);
5843 gen_op_movl_T0_im(val
);
5844 gen_op_mov_reg_T0(OT_BYTE
, (b
& 7) | REX_B(s
));
5846 case 0xb8 ... 0xbf: /* mov R, Iv */
5847 #ifdef TARGET_X86_64
5851 tmp
= cpu_ldq_code(env
, s
->pc
);
5853 reg
= (b
& 7) | REX_B(s
);
5854 gen_movtl_T0_im(tmp
);
5855 gen_op_mov_reg_T0(OT_QUAD
, reg
);
5859 ot
= dflag
? OT_LONG
: OT_WORD
;
5860 val
= insn_get(env
, s
, ot
);
5861 reg
= (b
& 7) | REX_B(s
);
5862 gen_op_movl_T0_im(val
);
5863 gen_op_mov_reg_T0(ot
, reg
);
5867 case 0x91 ... 0x97: /* xchg R, EAX */
5869 ot
= dflag
+ OT_WORD
;
5870 reg
= (b
& 7) | REX_B(s
);
5874 case 0x87: /* xchg Ev, Gv */
5878 ot
= dflag
+ OT_WORD
;
5879 modrm
= cpu_ldub_code(env
, s
->pc
++);
5880 reg
= ((modrm
>> 3) & 7) | rex_r
;
5881 mod
= (modrm
>> 6) & 3;
5883 rm
= (modrm
& 7) | REX_B(s
);
5885 gen_op_mov_TN_reg(ot
, 0, reg
);
5886 gen_op_mov_TN_reg(ot
, 1, rm
);
5887 gen_op_mov_reg_T0(ot
, rm
);
5888 gen_op_mov_reg_T1(ot
, reg
);
5890 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5891 gen_op_mov_TN_reg(ot
, 0, reg
);
5892 /* for xchg, lock is implicit */
5893 if (!(prefixes
& PREFIX_LOCK
))
5895 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5896 gen_op_st_T0_A0(ot
+ s
->mem_index
);
5897 if (!(prefixes
& PREFIX_LOCK
))
5898 gen_helper_unlock();
5899 gen_op_mov_reg_T1(ot
, reg
);
5902 case 0xc4: /* les Gv */
5903 /* In CODE64 this is VEX3; see above. */
5906 case 0xc5: /* lds Gv */
5907 /* In CODE64 this is VEX2; see above. */
5910 case 0x1b2: /* lss Gv */
5913 case 0x1b4: /* lfs Gv */
5916 case 0x1b5: /* lgs Gv */
5919 ot
= dflag
? OT_LONG
: OT_WORD
;
5920 modrm
= cpu_ldub_code(env
, s
->pc
++);
5921 reg
= ((modrm
>> 3) & 7) | rex_r
;
5922 mod
= (modrm
>> 6) & 3;
5925 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5926 gen_op_ld_T1_A0(ot
+ s
->mem_index
);
5927 gen_add_A0_im(s
, 1 << (ot
- OT_WORD
+ 1));
5928 /* load the segment first to handle exceptions properly */
5929 gen_op_ldu_T0_A0(OT_WORD
+ s
->mem_index
);
5930 gen_movl_seg_T0(s
, op
, pc_start
- s
->cs_base
);
5931 /* then put the data */
5932 gen_op_mov_reg_T1(ot
, reg
);
5934 gen_jmp_im(s
->pc
- s
->cs_base
);
5939 /************************/
5950 ot
= dflag
+ OT_WORD
;
5952 modrm
= cpu_ldub_code(env
, s
->pc
++);
5953 mod
= (modrm
>> 6) & 3;
5954 op
= (modrm
>> 3) & 7;
5960 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
5963 opreg
= (modrm
& 7) | REX_B(s
);
5968 gen_shift(s
, op
, ot
, opreg
, OR_ECX
);
5971 shift
= cpu_ldub_code(env
, s
->pc
++);
5973 gen_shifti(s
, op
, ot
, opreg
, shift
);
5988 case 0x1a4: /* shld imm */
5992 case 0x1a5: /* shld cl */
5996 case 0x1ac: /* shrd imm */
6000 case 0x1ad: /* shrd cl */
6004 ot
= dflag
+ OT_WORD
;
6005 modrm
= cpu_ldub_code(env
, s
->pc
++);
6006 mod
= (modrm
>> 6) & 3;
6007 rm
= (modrm
& 7) | REX_B(s
);
6008 reg
= ((modrm
>> 3) & 7) | rex_r
;
6010 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
6015 gen_op_mov_TN_reg(ot
, 1, reg
);
6018 TCGv imm
= tcg_const_tl(cpu_ldub_code(env
, s
->pc
++));
6019 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, imm
);
6022 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, cpu_regs
[R_ECX
]);
6026 /************************/
6029 if (s
->flags
& (HF_EM_MASK
| HF_TS_MASK
)) {
6030 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
6031 /* XXX: what to do if illegal op ? */
6032 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
6035 modrm
= cpu_ldub_code(env
, s
->pc
++);
6036 mod
= (modrm
>> 6) & 3;
6038 op
= ((b
& 7) << 3) | ((modrm
>> 3) & 7);
6041 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
6043 case 0x00 ... 0x07: /* fxxxs */
6044 case 0x10 ... 0x17: /* fixxxl */
6045 case 0x20 ... 0x27: /* fxxxl */
6046 case 0x30 ... 0x37: /* fixxx */
6053 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
6054 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6055 gen_helper_flds_FT0(cpu_env
, cpu_tmp2_i32
);
6058 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
6059 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6060 gen_helper_fildl_FT0(cpu_env
, cpu_tmp2_i32
);
6063 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
6064 (s
->mem_index
>> 2) - 1);
6065 gen_helper_fldl_FT0(cpu_env
, cpu_tmp1_i64
);
6069 gen_op_lds_T0_A0(OT_WORD
+ s
->mem_index
);
6070 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6071 gen_helper_fildl_FT0(cpu_env
, cpu_tmp2_i32
);
6075 gen_helper_fp_arith_ST0_FT0(op1
);
6077 /* fcomp needs pop */
6078 gen_helper_fpop(cpu_env
);
6082 case 0x08: /* flds */
6083 case 0x0a: /* fsts */
6084 case 0x0b: /* fstps */
6085 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
6086 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
6087 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
6092 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
6093 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6094 gen_helper_flds_ST0(cpu_env
, cpu_tmp2_i32
);
6097 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
6098 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6099 gen_helper_fildl_ST0(cpu_env
, cpu_tmp2_i32
);
6102 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
6103 (s
->mem_index
>> 2) - 1);
6104 gen_helper_fldl_ST0(cpu_env
, cpu_tmp1_i64
);
6108 gen_op_lds_T0_A0(OT_WORD
+ s
->mem_index
);
6109 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6110 gen_helper_fildl_ST0(cpu_env
, cpu_tmp2_i32
);
6115 /* XXX: the corresponding CPUID bit must be tested ! */
6118 gen_helper_fisttl_ST0(cpu_tmp2_i32
, cpu_env
);
6119 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6120 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
6123 gen_helper_fisttll_ST0(cpu_tmp1_i64
, cpu_env
);
6124 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
6125 (s
->mem_index
>> 2) - 1);
6129 gen_helper_fistt_ST0(cpu_tmp2_i32
, cpu_env
);
6130 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6131 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
6134 gen_helper_fpop(cpu_env
);
6139 gen_helper_fsts_ST0(cpu_tmp2_i32
, cpu_env
);
6140 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6141 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
6144 gen_helper_fistl_ST0(cpu_tmp2_i32
, cpu_env
);
6145 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6146 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
6149 gen_helper_fstl_ST0(cpu_tmp1_i64
, cpu_env
);
6150 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
6151 (s
->mem_index
>> 2) - 1);
6155 gen_helper_fist_ST0(cpu_tmp2_i32
, cpu_env
);
6156 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6157 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
6161 gen_helper_fpop(cpu_env
);
6165 case 0x0c: /* fldenv mem */
6166 gen_update_cc_op(s
);
6167 gen_jmp_im(pc_start
- s
->cs_base
);
6168 gen_helper_fldenv(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
6170 case 0x0d: /* fldcw mem */
6171 gen_op_ld_T0_A0(OT_WORD
+ s
->mem_index
);
6172 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6173 gen_helper_fldcw(cpu_env
, cpu_tmp2_i32
);
6175 case 0x0e: /* fnstenv mem */
6176 gen_update_cc_op(s
);
6177 gen_jmp_im(pc_start
- s
->cs_base
);
6178 gen_helper_fstenv(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
6180 case 0x0f: /* fnstcw mem */
6181 gen_helper_fnstcw(cpu_tmp2_i32
, cpu_env
);
6182 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6183 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
6185 case 0x1d: /* fldt mem */
6186 gen_update_cc_op(s
);
6187 gen_jmp_im(pc_start
- s
->cs_base
);
6188 gen_helper_fldt_ST0(cpu_env
, cpu_A0
);
6190 case 0x1f: /* fstpt mem */
6191 gen_update_cc_op(s
);
6192 gen_jmp_im(pc_start
- s
->cs_base
);
6193 gen_helper_fstt_ST0(cpu_env
, cpu_A0
);
6194 gen_helper_fpop(cpu_env
);
6196 case 0x2c: /* frstor mem */
6197 gen_update_cc_op(s
);
6198 gen_jmp_im(pc_start
- s
->cs_base
);
6199 gen_helper_frstor(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
6201 case 0x2e: /* fnsave mem */
6202 gen_update_cc_op(s
);
6203 gen_jmp_im(pc_start
- s
->cs_base
);
6204 gen_helper_fsave(cpu_env
, cpu_A0
, tcg_const_i32(s
->dflag
));
6206 case 0x2f: /* fnstsw mem */
6207 gen_helper_fnstsw(cpu_tmp2_i32
, cpu_env
);
6208 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6209 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
6211 case 0x3c: /* fbld */
6212 gen_update_cc_op(s
);
6213 gen_jmp_im(pc_start
- s
->cs_base
);
6214 gen_helper_fbld_ST0(cpu_env
, cpu_A0
);
6216 case 0x3e: /* fbstp */
6217 gen_update_cc_op(s
);
6218 gen_jmp_im(pc_start
- s
->cs_base
);
6219 gen_helper_fbst_ST0(cpu_env
, cpu_A0
);
6220 gen_helper_fpop(cpu_env
);
6222 case 0x3d: /* fildll */
6223 tcg_gen_qemu_ld64(cpu_tmp1_i64
, cpu_A0
,
6224 (s
->mem_index
>> 2) - 1);
6225 gen_helper_fildll_ST0(cpu_env
, cpu_tmp1_i64
);
6227 case 0x3f: /* fistpll */
6228 gen_helper_fistll_ST0(cpu_tmp1_i64
, cpu_env
);
6229 tcg_gen_qemu_st64(cpu_tmp1_i64
, cpu_A0
,
6230 (s
->mem_index
>> 2) - 1);
6231 gen_helper_fpop(cpu_env
);
6237 /* register float ops */
6241 case 0x08: /* fld sti */
6242 gen_helper_fpush(cpu_env
);
6243 gen_helper_fmov_ST0_STN(cpu_env
,
6244 tcg_const_i32((opreg
+ 1) & 7));
6246 case 0x09: /* fxchg sti */
6247 case 0x29: /* fxchg4 sti, undocumented op */
6248 case 0x39: /* fxchg7 sti, undocumented op */
6249 gen_helper_fxchg_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
6251 case 0x0a: /* grp d9/2 */
6254 /* check exceptions (FreeBSD FPU probe) */
6255 gen_update_cc_op(s
);
6256 gen_jmp_im(pc_start
- s
->cs_base
);
6257 gen_helper_fwait(cpu_env
);
6263 case 0x0c: /* grp d9/4 */
6266 gen_helper_fchs_ST0(cpu_env
);
6269 gen_helper_fabs_ST0(cpu_env
);
6272 gen_helper_fldz_FT0(cpu_env
);
6273 gen_helper_fcom_ST0_FT0(cpu_env
);
6276 gen_helper_fxam_ST0(cpu_env
);
6282 case 0x0d: /* grp d9/5 */
6286 gen_helper_fpush(cpu_env
);
6287 gen_helper_fld1_ST0(cpu_env
);
6290 gen_helper_fpush(cpu_env
);
6291 gen_helper_fldl2t_ST0(cpu_env
);
6294 gen_helper_fpush(cpu_env
);
6295 gen_helper_fldl2e_ST0(cpu_env
);
6298 gen_helper_fpush(cpu_env
);
6299 gen_helper_fldpi_ST0(cpu_env
);
6302 gen_helper_fpush(cpu_env
);
6303 gen_helper_fldlg2_ST0(cpu_env
);
6306 gen_helper_fpush(cpu_env
);
6307 gen_helper_fldln2_ST0(cpu_env
);
6310 gen_helper_fpush(cpu_env
);
6311 gen_helper_fldz_ST0(cpu_env
);
6318 case 0x0e: /* grp d9/6 */
6321 gen_helper_f2xm1(cpu_env
);
6324 gen_helper_fyl2x(cpu_env
);
6327 gen_helper_fptan(cpu_env
);
6329 case 3: /* fpatan */
6330 gen_helper_fpatan(cpu_env
);
6332 case 4: /* fxtract */
6333 gen_helper_fxtract(cpu_env
);
6335 case 5: /* fprem1 */
6336 gen_helper_fprem1(cpu_env
);
6338 case 6: /* fdecstp */
6339 gen_helper_fdecstp(cpu_env
);
6342 case 7: /* fincstp */
6343 gen_helper_fincstp(cpu_env
);
6347 case 0x0f: /* grp d9/7 */
6350 gen_helper_fprem(cpu_env
);
6352 case 1: /* fyl2xp1 */
6353 gen_helper_fyl2xp1(cpu_env
);
6356 gen_helper_fsqrt(cpu_env
);
6358 case 3: /* fsincos */
6359 gen_helper_fsincos(cpu_env
);
6361 case 5: /* fscale */
6362 gen_helper_fscale(cpu_env
);
6364 case 4: /* frndint */
6365 gen_helper_frndint(cpu_env
);
6368 gen_helper_fsin(cpu_env
);
6372 gen_helper_fcos(cpu_env
);
6376 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
6377 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6378 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6384 gen_helper_fp_arith_STN_ST0(op1
, opreg
);
6386 gen_helper_fpop(cpu_env
);
6388 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6389 gen_helper_fp_arith_ST0_FT0(op1
);
6393 case 0x02: /* fcom */
6394 case 0x22: /* fcom2, undocumented op */
6395 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6396 gen_helper_fcom_ST0_FT0(cpu_env
);
6398 case 0x03: /* fcomp */
6399 case 0x23: /* fcomp3, undocumented op */
6400 case 0x32: /* fcomp5, undocumented op */
6401 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6402 gen_helper_fcom_ST0_FT0(cpu_env
);
6403 gen_helper_fpop(cpu_env
);
6405 case 0x15: /* da/5 */
6407 case 1: /* fucompp */
6408 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
6409 gen_helper_fucom_ST0_FT0(cpu_env
);
6410 gen_helper_fpop(cpu_env
);
6411 gen_helper_fpop(cpu_env
);
6419 case 0: /* feni (287 only, just do nop here) */
6421 case 1: /* fdisi (287 only, just do nop here) */
6424 gen_helper_fclex(cpu_env
);
6426 case 3: /* fninit */
6427 gen_helper_fninit(cpu_env
);
6429 case 4: /* fsetpm (287 only, just do nop here) */
6435 case 0x1d: /* fucomi */
6436 gen_update_cc_op(s
);
6437 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6438 gen_helper_fucomi_ST0_FT0(cpu_env
);
6439 set_cc_op(s
, CC_OP_EFLAGS
);
6441 case 0x1e: /* fcomi */
6442 gen_update_cc_op(s
);
6443 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6444 gen_helper_fcomi_ST0_FT0(cpu_env
);
6445 set_cc_op(s
, CC_OP_EFLAGS
);
6447 case 0x28: /* ffree sti */
6448 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6450 case 0x2a: /* fst sti */
6451 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6453 case 0x2b: /* fstp sti */
6454 case 0x0b: /* fstp1 sti, undocumented op */
6455 case 0x3a: /* fstp8 sti, undocumented op */
6456 case 0x3b: /* fstp9 sti, undocumented op */
6457 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6458 gen_helper_fpop(cpu_env
);
6460 case 0x2c: /* fucom st(i) */
6461 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6462 gen_helper_fucom_ST0_FT0(cpu_env
);
6464 case 0x2d: /* fucomp st(i) */
6465 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6466 gen_helper_fucom_ST0_FT0(cpu_env
);
6467 gen_helper_fpop(cpu_env
);
6469 case 0x33: /* de/3 */
6471 case 1: /* fcompp */
6472 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
6473 gen_helper_fcom_ST0_FT0(cpu_env
);
6474 gen_helper_fpop(cpu_env
);
6475 gen_helper_fpop(cpu_env
);
6481 case 0x38: /* ffreep sti, undocumented op */
6482 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6483 gen_helper_fpop(cpu_env
);
6485 case 0x3c: /* df/4 */
6488 gen_helper_fnstsw(cpu_tmp2_i32
, cpu_env
);
6489 tcg_gen_extu_i32_tl(cpu_T
[0], cpu_tmp2_i32
);
6490 gen_op_mov_reg_T0(OT_WORD
, R_EAX
);
6496 case 0x3d: /* fucomip */
6497 gen_update_cc_op(s
);
6498 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6499 gen_helper_fucomi_ST0_FT0(cpu_env
);
6500 gen_helper_fpop(cpu_env
);
6501 set_cc_op(s
, CC_OP_EFLAGS
);
6503 case 0x3e: /* fcomip */
6504 gen_update_cc_op(s
);
6505 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6506 gen_helper_fcomi_ST0_FT0(cpu_env
);
6507 gen_helper_fpop(cpu_env
);
6508 set_cc_op(s
, CC_OP_EFLAGS
);
6510 case 0x10 ... 0x13: /* fcmovxx */
6514 static const uint8_t fcmov_cc
[8] = {
6520 op1
= fcmov_cc
[op
& 3] | (((op
>> 3) & 1) ^ 1);
6521 l1
= gen_new_label();
6522 gen_jcc1_noeob(s
, op1
, l1
);
6523 gen_helper_fmov_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
6532 /************************/
6535 case 0xa4: /* movsS */
6540 ot
= dflag
+ OT_WORD
;
6542 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6543 gen_repz_movs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6549 case 0xaa: /* stosS */
6554 ot
= dflag
+ OT_WORD
;
6556 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6557 gen_repz_stos(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6562 case 0xac: /* lodsS */
6567 ot
= dflag
+ OT_WORD
;
6568 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6569 gen_repz_lods(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6574 case 0xae: /* scasS */
6579 ot
= dflag
+ OT_WORD
;
6580 if (prefixes
& PREFIX_REPNZ
) {
6581 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6582 } else if (prefixes
& PREFIX_REPZ
) {
6583 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6589 case 0xa6: /* cmpsS */
6594 ot
= dflag
+ OT_WORD
;
6595 if (prefixes
& PREFIX_REPNZ
) {
6596 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6597 } else if (prefixes
& PREFIX_REPZ
) {
6598 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6603 case 0x6c: /* insS */
6608 ot
= dflag
? OT_LONG
: OT_WORD
;
6609 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6610 gen_op_andl_T0_ffff();
6611 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6612 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
) | 4);
6613 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6614 gen_repz_ins(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6618 gen_jmp(s
, s
->pc
- s
->cs_base
);
6622 case 0x6e: /* outsS */
6627 ot
= dflag
? OT_LONG
: OT_WORD
;
6628 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6629 gen_op_andl_T0_ffff();
6630 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6631 svm_is_rep(prefixes
) | 4);
6632 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6633 gen_repz_outs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6637 gen_jmp(s
, s
->pc
- s
->cs_base
);
6642 /************************/
6650 ot
= dflag
? OT_LONG
: OT_WORD
;
6651 val
= cpu_ldub_code(env
, s
->pc
++);
6652 gen_op_movl_T0_im(val
);
6653 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6654 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6657 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6658 gen_helper_in_func(ot
, cpu_T
[1], cpu_tmp2_i32
);
6659 gen_op_mov_reg_T1(ot
, R_EAX
);
6662 gen_jmp(s
, s
->pc
- s
->cs_base
);
6670 ot
= dflag
? OT_LONG
: OT_WORD
;
6671 val
= cpu_ldub_code(env
, s
->pc
++);
6672 gen_op_movl_T0_im(val
);
6673 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6674 svm_is_rep(prefixes
));
6675 gen_op_mov_TN_reg(ot
, 1, R_EAX
);
6679 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6680 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
6681 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6684 gen_jmp(s
, s
->pc
- s
->cs_base
);
6692 ot
= dflag
? OT_LONG
: OT_WORD
;
6693 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6694 gen_op_andl_T0_ffff();
6695 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6696 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6699 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6700 gen_helper_in_func(ot
, cpu_T
[1], cpu_tmp2_i32
);
6701 gen_op_mov_reg_T1(ot
, R_EAX
);
6704 gen_jmp(s
, s
->pc
- s
->cs_base
);
6712 ot
= dflag
? OT_LONG
: OT_WORD
;
6713 gen_op_mov_TN_reg(OT_WORD
, 0, R_EDX
);
6714 gen_op_andl_T0_ffff();
6715 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6716 svm_is_rep(prefixes
));
6717 gen_op_mov_TN_reg(ot
, 1, R_EAX
);
6721 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
6722 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T
[1]);
6723 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6726 gen_jmp(s
, s
->pc
- s
->cs_base
);
6730 /************************/
6732 case 0xc2: /* ret im */
6733 val
= cpu_ldsw_code(env
, s
->pc
);
6736 if (CODE64(s
) && s
->dflag
)
6738 gen_stack_update(s
, val
+ (2 << s
->dflag
));
6740 gen_op_andl_T0_ffff();
6744 case 0xc3: /* ret */
6748 gen_op_andl_T0_ffff();
6752 case 0xca: /* lret im */
6753 val
= cpu_ldsw_code(env
, s
->pc
);
6756 if (s
->pe
&& !s
->vm86
) {
6757 gen_update_cc_op(s
);
6758 gen_jmp_im(pc_start
- s
->cs_base
);
6759 gen_helper_lret_protected(cpu_env
, tcg_const_i32(s
->dflag
),
6760 tcg_const_i32(val
));
6764 gen_op_ld_T0_A0(1 + s
->dflag
+ s
->mem_index
);
6766 gen_op_andl_T0_ffff();
6767 /* NOTE: keeping EIP updated is not a problem in case of
6771 gen_op_addl_A0_im(2 << s
->dflag
);
6772 gen_op_ld_T0_A0(1 + s
->dflag
+ s
->mem_index
);
6773 gen_op_movl_seg_T0_vm(R_CS
);
6774 /* add stack offset */
6775 gen_stack_update(s
, val
+ (4 << s
->dflag
));
6779 case 0xcb: /* lret */
6782 case 0xcf: /* iret */
6783 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IRET
);
6786 gen_helper_iret_real(cpu_env
, tcg_const_i32(s
->dflag
));
6787 set_cc_op(s
, CC_OP_EFLAGS
);
6788 } else if (s
->vm86
) {
6790 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6792 gen_helper_iret_real(cpu_env
, tcg_const_i32(s
->dflag
));
6793 set_cc_op(s
, CC_OP_EFLAGS
);
6796 gen_update_cc_op(s
);
6797 gen_jmp_im(pc_start
- s
->cs_base
);
6798 gen_helper_iret_protected(cpu_env
, tcg_const_i32(s
->dflag
),
6799 tcg_const_i32(s
->pc
- s
->cs_base
));
6800 set_cc_op(s
, CC_OP_EFLAGS
);
6804 case 0xe8: /* call im */
6807 tval
= (int32_t)insn_get(env
, s
, OT_LONG
);
6809 tval
= (int16_t)insn_get(env
, s
, OT_WORD
);
6810 next_eip
= s
->pc
- s
->cs_base
;
6816 gen_movtl_T0_im(next_eip
);
6821 case 0x9a: /* lcall im */
6823 unsigned int selector
, offset
;
6827 ot
= dflag
? OT_LONG
: OT_WORD
;
6828 offset
= insn_get(env
, s
, ot
);
6829 selector
= insn_get(env
, s
, OT_WORD
);
6831 gen_op_movl_T0_im(selector
);
6832 gen_op_movl_T1_imu(offset
);
6835 case 0xe9: /* jmp im */
6837 tval
= (int32_t)insn_get(env
, s
, OT_LONG
);
6839 tval
= (int16_t)insn_get(env
, s
, OT_WORD
);
6840 tval
+= s
->pc
- s
->cs_base
;
6847 case 0xea: /* ljmp im */
6849 unsigned int selector
, offset
;
6853 ot
= dflag
? OT_LONG
: OT_WORD
;
6854 offset
= insn_get(env
, s
, ot
);
6855 selector
= insn_get(env
, s
, OT_WORD
);
6857 gen_op_movl_T0_im(selector
);
6858 gen_op_movl_T1_imu(offset
);
6861 case 0xeb: /* jmp Jb */
6862 tval
= (int8_t)insn_get(env
, s
, OT_BYTE
);
6863 tval
+= s
->pc
- s
->cs_base
;
6868 case 0x70 ... 0x7f: /* jcc Jb */
6869 tval
= (int8_t)insn_get(env
, s
, OT_BYTE
);
6871 case 0x180 ... 0x18f: /* jcc Jv */
6873 tval
= (int32_t)insn_get(env
, s
, OT_LONG
);
6875 tval
= (int16_t)insn_get(env
, s
, OT_WORD
);
6878 next_eip
= s
->pc
- s
->cs_base
;
6882 gen_jcc(s
, b
, tval
, next_eip
);
6885 case 0x190 ... 0x19f: /* setcc Gv */
6886 modrm
= cpu_ldub_code(env
, s
->pc
++);
6887 gen_setcc1(s
, b
, cpu_T
[0]);
6888 gen_ldst_modrm(env
, s
, modrm
, OT_BYTE
, OR_TMP0
, 1);
6890 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6891 ot
= dflag
+ OT_WORD
;
6892 modrm
= cpu_ldub_code(env
, s
->pc
++);
6893 reg
= ((modrm
>> 3) & 7) | rex_r
;
6894 gen_cmovcc1(env
, s
, ot
, b
, modrm
, reg
);
6897 /************************/
6899 case 0x9c: /* pushf */
6900 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PUSHF
);
6901 if (s
->vm86
&& s
->iopl
!= 3) {
6902 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6904 gen_update_cc_op(s
);
6905 gen_helper_read_eflags(cpu_T
[0], cpu_env
);
6909 case 0x9d: /* popf */
6910 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_POPF
);
6911 if (s
->vm86
&& s
->iopl
!= 3) {
6912 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6917 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6918 tcg_const_i32((TF_MASK
| AC_MASK
|
6923 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6924 tcg_const_i32((TF_MASK
| AC_MASK
|
6926 IF_MASK
| IOPL_MASK
)
6930 if (s
->cpl
<= s
->iopl
) {
6932 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6933 tcg_const_i32((TF_MASK
|
6939 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6940 tcg_const_i32((TF_MASK
|
6949 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6950 tcg_const_i32((TF_MASK
| AC_MASK
|
6951 ID_MASK
| NT_MASK
)));
6953 gen_helper_write_eflags(cpu_env
, cpu_T
[0],
6954 tcg_const_i32((TF_MASK
| AC_MASK
|
6961 set_cc_op(s
, CC_OP_EFLAGS
);
6962 /* abort translation because TF/AC flag may change */
6963 gen_jmp_im(s
->pc
- s
->cs_base
);
6967 case 0x9e: /* sahf */
6968 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6970 gen_op_mov_TN_reg(OT_BYTE
, 0, R_AH
);
6971 gen_compute_eflags(s
);
6972 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, CC_O
);
6973 tcg_gen_andi_tl(cpu_T
[0], cpu_T
[0], CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
6974 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_T
[0]);
6976 case 0x9f: /* lahf */
6977 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6979 gen_compute_eflags(s
);
6980 /* Note: gen_compute_eflags() only gives the condition codes */
6981 tcg_gen_ori_tl(cpu_T
[0], cpu_cc_src
, 0x02);
6982 gen_op_mov_reg_T0(OT_BYTE
, R_AH
);
6984 case 0xf5: /* cmc */
6985 gen_compute_eflags(s
);
6986 tcg_gen_xori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6988 case 0xf8: /* clc */
6989 gen_compute_eflags(s
);
6990 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_C
);
6992 case 0xf9: /* stc */
6993 gen_compute_eflags(s
);
6994 tcg_gen_ori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6996 case 0xfc: /* cld */
6997 tcg_gen_movi_i32(cpu_tmp2_i32
, 1);
6998 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
7000 case 0xfd: /* std */
7001 tcg_gen_movi_i32(cpu_tmp2_i32
, -1);
7002 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
7005 /************************/
7006 /* bit operations */
7007 case 0x1ba: /* bt/bts/btr/btc Gv, im */
7008 ot
= dflag
+ OT_WORD
;
7009 modrm
= cpu_ldub_code(env
, s
->pc
++);
7010 op
= (modrm
>> 3) & 7;
7011 mod
= (modrm
>> 6) & 3;
7012 rm
= (modrm
& 7) | REX_B(s
);
7015 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7016 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
7018 gen_op_mov_TN_reg(ot
, 0, rm
);
7021 val
= cpu_ldub_code(env
, s
->pc
++);
7022 gen_op_movl_T1_im(val
);
7027 case 0x1a3: /* bt Gv, Ev */
7030 case 0x1ab: /* bts */
7033 case 0x1b3: /* btr */
7036 case 0x1bb: /* btc */
7039 ot
= dflag
+ OT_WORD
;
7040 modrm
= cpu_ldub_code(env
, s
->pc
++);
7041 reg
= ((modrm
>> 3) & 7) | rex_r
;
7042 mod
= (modrm
>> 6) & 3;
7043 rm
= (modrm
& 7) | REX_B(s
);
7044 gen_op_mov_TN_reg(OT_LONG
, 1, reg
);
7046 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7047 /* specific case: we need to add a displacement */
7048 gen_exts(ot
, cpu_T
[1]);
7049 tcg_gen_sari_tl(cpu_tmp0
, cpu_T
[1], 3 + ot
);
7050 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, ot
);
7051 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
7052 gen_op_ld_T0_A0(ot
+ s
->mem_index
);
7054 gen_op_mov_TN_reg(ot
, 0, rm
);
7057 tcg_gen_andi_tl(cpu_T
[1], cpu_T
[1], (1 << (3 + ot
)) - 1);
7060 tcg_gen_shr_tl(cpu_cc_src
, cpu_T
[0], cpu_T
[1]);
7061 tcg_gen_movi_tl(cpu_cc_dst
, 0);
7064 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
7065 tcg_gen_movi_tl(cpu_tmp0
, 1);
7066 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
7067 tcg_gen_or_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
7070 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
7071 tcg_gen_movi_tl(cpu_tmp0
, 1);
7072 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
7073 tcg_gen_not_tl(cpu_tmp0
, cpu_tmp0
);
7074 tcg_gen_and_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
7078 tcg_gen_shr_tl(cpu_tmp4
, cpu_T
[0], cpu_T
[1]);
7079 tcg_gen_movi_tl(cpu_tmp0
, 1);
7080 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T
[1]);
7081 tcg_gen_xor_tl(cpu_T
[0], cpu_T
[0], cpu_tmp0
);
7084 set_cc_op(s
, CC_OP_SARB
+ ot
);
7087 gen_op_st_T0_A0(ot
+ s
->mem_index
);
7089 gen_op_mov_reg_T0(ot
, rm
);
7090 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
7091 tcg_gen_movi_tl(cpu_cc_dst
, 0);
7094 case 0x1bc: /* bsf / tzcnt */
7095 case 0x1bd: /* bsr / lzcnt */
7096 ot
= dflag
+ OT_WORD
;
7097 modrm
= cpu_ldub_code(env
, s
->pc
++);
7098 reg
= ((modrm
>> 3) & 7) | rex_r
;
7099 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
7100 gen_extu(ot
, cpu_T
[0]);
7102 /* Note that lzcnt and tzcnt are in different extensions. */
7103 if ((prefixes
& PREFIX_REPZ
)
7105 ? s
->cpuid_ext3_features
& CPUID_EXT3_ABM
7106 : s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)) {
7108 tcg_gen_mov_tl(cpu_cc_src
, cpu_T
[0]);
7110 /* For lzcnt, reduce the target_ulong result by the
7111 number of zeros that we expect to find at the top. */
7112 gen_helper_clz(cpu_T
[0], cpu_T
[0]);
7113 tcg_gen_subi_tl(cpu_T
[0], cpu_T
[0], TARGET_LONG_BITS
- size
);
7115 /* For tzcnt, a zero input must return the operand size:
7116 force all bits outside the operand size to 1. */
7117 target_ulong mask
= (target_ulong
)-2 << (size
- 1);
7118 tcg_gen_ori_tl(cpu_T
[0], cpu_T
[0], mask
);
7119 gen_helper_ctz(cpu_T
[0], cpu_T
[0]);
7121 /* For lzcnt/tzcnt, C and Z bits are defined and are
7122 related to the result. */
7123 gen_op_update1_cc();
7124 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
7126 /* For bsr/bsf, only the Z bit is defined and it is related
7127 to the input and not the result. */
7128 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T
[0]);
7129 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
7131 /* For bsr, return the bit index of the first 1 bit,
7132 not the count of leading zeros. */
7133 gen_helper_clz(cpu_T
[0], cpu_T
[0]);
7134 tcg_gen_xori_tl(cpu_T
[0], cpu_T
[0], TARGET_LONG_BITS
- 1);
7136 gen_helper_ctz(cpu_T
[0], cpu_T
[0]);
7138 /* ??? The manual says that the output is undefined when the
7139 input is zero, but real hardware leaves it unchanged, and
7140 real programs appear to depend on that. */
7141 tcg_gen_movi_tl(cpu_tmp0
, 0);
7142 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_T
[0], cpu_cc_dst
, cpu_tmp0
,
7143 cpu_regs
[reg
], cpu_T
[0]);
7145 gen_op_mov_reg_T0(ot
, reg
);
7147 /************************/
7149 case 0x27: /* daa */
7152 gen_update_cc_op(s
);
7153 gen_helper_daa(cpu_env
);
7154 set_cc_op(s
, CC_OP_EFLAGS
);
7156 case 0x2f: /* das */
7159 gen_update_cc_op(s
);
7160 gen_helper_das(cpu_env
);
7161 set_cc_op(s
, CC_OP_EFLAGS
);
7163 case 0x37: /* aaa */
7166 gen_update_cc_op(s
);
7167 gen_helper_aaa(cpu_env
);
7168 set_cc_op(s
, CC_OP_EFLAGS
);
7170 case 0x3f: /* aas */
7173 gen_update_cc_op(s
);
7174 gen_helper_aas(cpu_env
);
7175 set_cc_op(s
, CC_OP_EFLAGS
);
7177 case 0xd4: /* aam */
7180 val
= cpu_ldub_code(env
, s
->pc
++);
7182 gen_exception(s
, EXCP00_DIVZ
, pc_start
- s
->cs_base
);
7184 gen_helper_aam(cpu_env
, tcg_const_i32(val
));
7185 set_cc_op(s
, CC_OP_LOGICB
);
7188 case 0xd5: /* aad */
7191 val
= cpu_ldub_code(env
, s
->pc
++);
7192 gen_helper_aad(cpu_env
, tcg_const_i32(val
));
7193 set_cc_op(s
, CC_OP_LOGICB
);
7195 /************************/
7197 case 0x90: /* nop */
7198 /* XXX: correct lock test for all insn */
7199 if (prefixes
& PREFIX_LOCK
) {
7202 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
7204 goto do_xchg_reg_eax
;
7206 if (prefixes
& PREFIX_REPZ
) {
7207 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PAUSE
);
7210 case 0x9b: /* fwait */
7211 if ((s
->flags
& (HF_MP_MASK
| HF_TS_MASK
)) ==
7212 (HF_MP_MASK
| HF_TS_MASK
)) {
7213 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7215 gen_update_cc_op(s
);
7216 gen_jmp_im(pc_start
- s
->cs_base
);
7217 gen_helper_fwait(cpu_env
);
7220 case 0xcc: /* int3 */
7221 gen_interrupt(s
, EXCP03_INT3
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
7223 case 0xcd: /* int N */
7224 val
= cpu_ldub_code(env
, s
->pc
++);
7225 if (s
->vm86
&& s
->iopl
!= 3) {
7226 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7228 gen_interrupt(s
, val
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
7231 case 0xce: /* into */
7234 gen_update_cc_op(s
);
7235 gen_jmp_im(pc_start
- s
->cs_base
);
7236 gen_helper_into(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7239 case 0xf1: /* icebp (undocumented, exits to external debugger) */
7240 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_ICEBP
);
7242 gen_debug(s
, pc_start
- s
->cs_base
);
7246 qemu_set_log(CPU_LOG_INT
| CPU_LOG_TB_IN_ASM
);
7250 case 0xfa: /* cli */
7252 if (s
->cpl
<= s
->iopl
) {
7253 gen_helper_cli(cpu_env
);
7255 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7259 gen_helper_cli(cpu_env
);
7261 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7265 case 0xfb: /* sti */
7267 if (s
->cpl
<= s
->iopl
) {
7269 gen_helper_sti(cpu_env
);
7270 /* interruptions are enabled only the first insn after sti */
7271 /* If several instructions disable interrupts, only the
7273 if (!(s
->tb
->flags
& HF_INHIBIT_IRQ_MASK
))
7274 gen_helper_set_inhibit_irq(cpu_env
);
7275 /* give a chance to handle pending irqs */
7276 gen_jmp_im(s
->pc
- s
->cs_base
);
7279 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7285 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7289 case 0x62: /* bound */
7292 ot
= dflag
? OT_LONG
: OT_WORD
;
7293 modrm
= cpu_ldub_code(env
, s
->pc
++);
7294 reg
= (modrm
>> 3) & 7;
7295 mod
= (modrm
>> 6) & 3;
7298 gen_op_mov_TN_reg(ot
, 0, reg
);
7299 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7300 gen_jmp_im(pc_start
- s
->cs_base
);
7301 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7302 if (ot
== OT_WORD
) {
7303 gen_helper_boundw(cpu_env
, cpu_A0
, cpu_tmp2_i32
);
7305 gen_helper_boundl(cpu_env
, cpu_A0
, cpu_tmp2_i32
);
7308 case 0x1c8 ... 0x1cf: /* bswap reg */
7309 reg
= (b
& 7) | REX_B(s
);
7310 #ifdef TARGET_X86_64
7312 gen_op_mov_TN_reg(OT_QUAD
, 0, reg
);
7313 tcg_gen_bswap64_i64(cpu_T
[0], cpu_T
[0]);
7314 gen_op_mov_reg_T0(OT_QUAD
, reg
);
7318 gen_op_mov_TN_reg(OT_LONG
, 0, reg
);
7319 tcg_gen_ext32u_tl(cpu_T
[0], cpu_T
[0]);
7320 tcg_gen_bswap32_tl(cpu_T
[0], cpu_T
[0]);
7321 gen_op_mov_reg_T0(OT_LONG
, reg
);
7324 case 0xd6: /* salc */
7327 gen_compute_eflags_c(s
, cpu_T
[0]);
7328 tcg_gen_neg_tl(cpu_T
[0], cpu_T
[0]);
7329 gen_op_mov_reg_T0(OT_BYTE
, R_EAX
);
7331 case 0xe0: /* loopnz */
7332 case 0xe1: /* loopz */
7333 case 0xe2: /* loop */
7334 case 0xe3: /* jecxz */
7338 tval
= (int8_t)insn_get(env
, s
, OT_BYTE
);
7339 next_eip
= s
->pc
- s
->cs_base
;
7344 l1
= gen_new_label();
7345 l2
= gen_new_label();
7346 l3
= gen_new_label();
7349 case 0: /* loopnz */
7351 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
7352 gen_op_jz_ecx(s
->aflag
, l3
);
7353 gen_jcc1(s
, (JCC_Z
<< 1) | (b
^ 1), l1
);
7356 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
7357 gen_op_jnz_ecx(s
->aflag
, l1
);
7361 gen_op_jz_ecx(s
->aflag
, l1
);
7366 gen_jmp_im(next_eip
);
7375 case 0x130: /* wrmsr */
7376 case 0x132: /* rdmsr */
7378 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7380 gen_update_cc_op(s
);
7381 gen_jmp_im(pc_start
- s
->cs_base
);
7383 gen_helper_rdmsr(cpu_env
);
7385 gen_helper_wrmsr(cpu_env
);
7389 case 0x131: /* rdtsc */
7390 gen_update_cc_op(s
);
7391 gen_jmp_im(pc_start
- s
->cs_base
);
7394 gen_helper_rdtsc(cpu_env
);
7397 gen_jmp(s
, s
->pc
- s
->cs_base
);
7400 case 0x133: /* rdpmc */
7401 gen_update_cc_op(s
);
7402 gen_jmp_im(pc_start
- s
->cs_base
);
7403 gen_helper_rdpmc(cpu_env
);
7405 case 0x134: /* sysenter */
7406 /* For Intel SYSENTER is valid on 64-bit */
7407 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
7410 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7412 gen_update_cc_op(s
);
7413 gen_jmp_im(pc_start
- s
->cs_base
);
7414 gen_helper_sysenter(cpu_env
);
7418 case 0x135: /* sysexit */
7419 /* For Intel SYSEXIT is valid on 64-bit */
7420 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
7423 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7425 gen_update_cc_op(s
);
7426 gen_jmp_im(pc_start
- s
->cs_base
);
7427 gen_helper_sysexit(cpu_env
, tcg_const_i32(dflag
));
7431 #ifdef TARGET_X86_64
7432 case 0x105: /* syscall */
7433 /* XXX: is it usable in real mode ? */
7434 gen_update_cc_op(s
);
7435 gen_jmp_im(pc_start
- s
->cs_base
);
7436 gen_helper_syscall(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7439 case 0x107: /* sysret */
7441 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7443 gen_update_cc_op(s
);
7444 gen_jmp_im(pc_start
- s
->cs_base
);
7445 gen_helper_sysret(cpu_env
, tcg_const_i32(s
->dflag
));
7446 /* condition codes are modified only in long mode */
7448 set_cc_op(s
, CC_OP_EFLAGS
);
7454 case 0x1a2: /* cpuid */
7455 gen_update_cc_op(s
);
7456 gen_jmp_im(pc_start
- s
->cs_base
);
7457 gen_helper_cpuid(cpu_env
);
7459 case 0xf4: /* hlt */
7461 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7463 gen_update_cc_op(s
);
7464 gen_jmp_im(pc_start
- s
->cs_base
);
7465 gen_helper_hlt(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7466 s
->is_jmp
= DISAS_TB_JUMP
;
7470 modrm
= cpu_ldub_code(env
, s
->pc
++);
7471 mod
= (modrm
>> 6) & 3;
7472 op
= (modrm
>> 3) & 7;
7475 if (!s
->pe
|| s
->vm86
)
7477 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_READ
);
7478 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,ldt
.selector
));
7482 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7485 if (!s
->pe
|| s
->vm86
)
7488 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7490 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_WRITE
);
7491 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7492 gen_jmp_im(pc_start
- s
->cs_base
);
7493 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7494 gen_helper_lldt(cpu_env
, cpu_tmp2_i32
);
7498 if (!s
->pe
|| s
->vm86
)
7500 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_READ
);
7501 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,tr
.selector
));
7505 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7508 if (!s
->pe
|| s
->vm86
)
7511 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7513 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_WRITE
);
7514 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7515 gen_jmp_im(pc_start
- s
->cs_base
);
7516 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
7517 gen_helper_ltr(cpu_env
, cpu_tmp2_i32
);
7522 if (!s
->pe
|| s
->vm86
)
7524 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7525 gen_update_cc_op(s
);
7527 gen_helper_verr(cpu_env
, cpu_T
[0]);
7529 gen_helper_verw(cpu_env
, cpu_T
[0]);
7531 set_cc_op(s
, CC_OP_EFLAGS
);
7538 modrm
= cpu_ldub_code(env
, s
->pc
++);
7539 mod
= (modrm
>> 6) & 3;
7540 op
= (modrm
>> 3) & 7;
7546 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_GDTR_READ
);
7547 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7548 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
7549 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
7550 gen_add_A0_im(s
, 2);
7551 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, gdt
.base
));
7553 gen_op_andl_T0_im(0xffffff);
7554 gen_op_st_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7559 case 0: /* monitor */
7560 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) ||
7563 gen_update_cc_op(s
);
7564 gen_jmp_im(pc_start
- s
->cs_base
);
7565 #ifdef TARGET_X86_64
7566 if (s
->aflag
== 2) {
7567 gen_op_movq_A0_reg(R_EAX
);
7571 gen_op_movl_A0_reg(R_EAX
);
7573 gen_op_andl_A0_ffff();
7575 gen_add_A0_ds_seg(s
);
7576 gen_helper_monitor(cpu_env
, cpu_A0
);
7579 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) ||
7582 gen_update_cc_op(s
);
7583 gen_jmp_im(pc_start
- s
->cs_base
);
7584 gen_helper_mwait(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7588 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
) ||
7592 gen_helper_clac(cpu_env
);
7593 gen_jmp_im(s
->pc
- s
->cs_base
);
7597 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
) ||
7601 gen_helper_stac(cpu_env
);
7602 gen_jmp_im(s
->pc
- s
->cs_base
);
7609 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IDTR_READ
);
7610 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7611 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, idt
.limit
));
7612 gen_op_st_T0_A0(OT_WORD
+ s
->mem_index
);
7613 gen_add_A0_im(s
, 2);
7614 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, idt
.base
));
7616 gen_op_andl_T0_im(0xffffff);
7617 gen_op_st_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7623 gen_update_cc_op(s
);
7624 gen_jmp_im(pc_start
- s
->cs_base
);
7627 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7630 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7633 gen_helper_vmrun(cpu_env
, tcg_const_i32(s
->aflag
),
7634 tcg_const_i32(s
->pc
- pc_start
));
7636 s
->is_jmp
= DISAS_TB_JUMP
;
7639 case 1: /* VMMCALL */
7640 if (!(s
->flags
& HF_SVME_MASK
))
7642 gen_helper_vmmcall(cpu_env
);
7644 case 2: /* VMLOAD */
7645 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7648 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7651 gen_helper_vmload(cpu_env
, tcg_const_i32(s
->aflag
));
7654 case 3: /* VMSAVE */
7655 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7658 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7661 gen_helper_vmsave(cpu_env
, tcg_const_i32(s
->aflag
));
7665 if ((!(s
->flags
& HF_SVME_MASK
) &&
7666 !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
)) ||
7670 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7673 gen_helper_stgi(cpu_env
);
7677 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7680 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7683 gen_helper_clgi(cpu_env
);
7686 case 6: /* SKINIT */
7687 if ((!(s
->flags
& HF_SVME_MASK
) &&
7688 !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
)) ||
7691 gen_helper_skinit(cpu_env
);
7693 case 7: /* INVLPGA */
7694 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
)
7697 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7700 gen_helper_invlpga(cpu_env
, tcg_const_i32(s
->aflag
));
7706 } else if (s
->cpl
!= 0) {
7707 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7709 gen_svm_check_intercept(s
, pc_start
,
7710 op
==2 ? SVM_EXIT_GDTR_WRITE
: SVM_EXIT_IDTR_WRITE
);
7711 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7712 gen_op_ld_T1_A0(OT_WORD
+ s
->mem_index
);
7713 gen_add_A0_im(s
, 2);
7714 gen_op_ld_T0_A0(CODE64(s
) + OT_LONG
+ s
->mem_index
);
7716 gen_op_andl_T0_im(0xffffff);
7718 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,gdt
.base
));
7719 tcg_gen_st32_tl(cpu_T
[1], cpu_env
, offsetof(CPUX86State
,gdt
.limit
));
7721 tcg_gen_st_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,idt
.base
));
7722 tcg_gen_st32_tl(cpu_T
[1], cpu_env
, offsetof(CPUX86State
,idt
.limit
));
7727 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_CR0
);
7728 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7729 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,cr
[0]) + 4);
7731 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,cr
[0]));
7733 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 1);
7737 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7739 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7740 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7741 gen_helper_lmsw(cpu_env
, cpu_T
[0]);
7742 gen_jmp_im(s
->pc
- s
->cs_base
);
7747 if (mod
!= 3) { /* invlpg */
7749 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7751 gen_update_cc_op(s
);
7752 gen_jmp_im(pc_start
- s
->cs_base
);
7753 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7754 gen_helper_invlpg(cpu_env
, cpu_A0
);
7755 gen_jmp_im(s
->pc
- s
->cs_base
);
7760 case 0: /* swapgs */
7761 #ifdef TARGET_X86_64
7764 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7766 tcg_gen_ld_tl(cpu_T
[0], cpu_env
,
7767 offsetof(CPUX86State
,segs
[R_GS
].base
));
7768 tcg_gen_ld_tl(cpu_T
[1], cpu_env
,
7769 offsetof(CPUX86State
,kernelgsbase
));
7770 tcg_gen_st_tl(cpu_T
[1], cpu_env
,
7771 offsetof(CPUX86State
,segs
[R_GS
].base
));
7772 tcg_gen_st_tl(cpu_T
[0], cpu_env
,
7773 offsetof(CPUX86State
,kernelgsbase
));
7781 case 1: /* rdtscp */
7782 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_RDTSCP
))
7784 gen_update_cc_op(s
);
7785 gen_jmp_im(pc_start
- s
->cs_base
);
7788 gen_helper_rdtscp(cpu_env
);
7791 gen_jmp(s
, s
->pc
- s
->cs_base
);
7803 case 0x108: /* invd */
7804 case 0x109: /* wbinvd */
7806 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7808 gen_svm_check_intercept(s
, pc_start
, (b
& 2) ? SVM_EXIT_INVD
: SVM_EXIT_WBINVD
);
7812 case 0x63: /* arpl or movslS (x86_64) */
7813 #ifdef TARGET_X86_64
7816 /* d_ot is the size of destination */
7817 d_ot
= dflag
+ OT_WORD
;
7819 modrm
= cpu_ldub_code(env
, s
->pc
++);
7820 reg
= ((modrm
>> 3) & 7) | rex_r
;
7821 mod
= (modrm
>> 6) & 3;
7822 rm
= (modrm
& 7) | REX_B(s
);
7825 gen_op_mov_TN_reg(OT_LONG
, 0, rm
);
7827 if (d_ot
== OT_QUAD
)
7828 tcg_gen_ext32s_tl(cpu_T
[0], cpu_T
[0]);
7829 gen_op_mov_reg_T0(d_ot
, reg
);
7831 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7832 if (d_ot
== OT_QUAD
) {
7833 gen_op_lds_T0_A0(OT_LONG
+ s
->mem_index
);
7835 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
7837 gen_op_mov_reg_T0(d_ot
, reg
);
7843 TCGv t0
, t1
, t2
, a0
;
7845 if (!s
->pe
|| s
->vm86
)
7847 t0
= tcg_temp_local_new();
7848 t1
= tcg_temp_local_new();
7849 t2
= tcg_temp_local_new();
7851 modrm
= cpu_ldub_code(env
, s
->pc
++);
7852 reg
= (modrm
>> 3) & 7;
7853 mod
= (modrm
>> 6) & 3;
7856 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7857 gen_op_ld_v(ot
+ s
->mem_index
, t0
, cpu_A0
);
7858 a0
= tcg_temp_local_new();
7859 tcg_gen_mov_tl(a0
, cpu_A0
);
7861 gen_op_mov_v_reg(ot
, t0
, rm
);
7864 gen_op_mov_v_reg(ot
, t1
, reg
);
7865 tcg_gen_andi_tl(cpu_tmp0
, t0
, 3);
7866 tcg_gen_andi_tl(t1
, t1
, 3);
7867 tcg_gen_movi_tl(t2
, 0);
7868 label1
= gen_new_label();
7869 tcg_gen_brcond_tl(TCG_COND_GE
, cpu_tmp0
, t1
, label1
);
7870 tcg_gen_andi_tl(t0
, t0
, ~3);
7871 tcg_gen_or_tl(t0
, t0
, t1
);
7872 tcg_gen_movi_tl(t2
, CC_Z
);
7873 gen_set_label(label1
);
7875 gen_op_st_v(ot
+ s
->mem_index
, t0
, a0
);
7878 gen_op_mov_reg_v(ot
, rm
, t0
);
7880 gen_compute_eflags(s
);
7881 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_Z
);
7882 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t2
);
7888 case 0x102: /* lar */
7889 case 0x103: /* lsl */
7893 if (!s
->pe
|| s
->vm86
)
7895 ot
= dflag
? OT_LONG
: OT_WORD
;
7896 modrm
= cpu_ldub_code(env
, s
->pc
++);
7897 reg
= ((modrm
>> 3) & 7) | rex_r
;
7898 gen_ldst_modrm(env
, s
, modrm
, OT_WORD
, OR_TMP0
, 0);
7899 t0
= tcg_temp_local_new();
7900 gen_update_cc_op(s
);
7902 gen_helper_lar(t0
, cpu_env
, cpu_T
[0]);
7904 gen_helper_lsl(t0
, cpu_env
, cpu_T
[0]);
7906 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_src
, CC_Z
);
7907 label1
= gen_new_label();
7908 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
7909 gen_op_mov_reg_v(ot
, reg
, t0
);
7910 gen_set_label(label1
);
7911 set_cc_op(s
, CC_OP_EFLAGS
);
7916 modrm
= cpu_ldub_code(env
, s
->pc
++);
7917 mod
= (modrm
>> 6) & 3;
7918 op
= (modrm
>> 3) & 7;
7920 case 0: /* prefetchnta */
7921 case 1: /* prefetchnt0 */
7922 case 2: /* prefetchnt0 */
7923 case 3: /* prefetchnt0 */
7926 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
7927 /* nothing more to do */
7929 default: /* nop (multi byte) */
7930 gen_nop_modrm(env
, s
, modrm
);
7934 case 0x119 ... 0x11f: /* nop (multi byte) */
7935 modrm
= cpu_ldub_code(env
, s
->pc
++);
7936 gen_nop_modrm(env
, s
, modrm
);
7938 case 0x120: /* mov reg, crN */
7939 case 0x122: /* mov crN, reg */
7941 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7943 modrm
= cpu_ldub_code(env
, s
->pc
++);
7944 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7945 * AMD documentation (24594.pdf) and testing of
7946 * intel 386 and 486 processors all show that the mod bits
7947 * are assumed to be 1's, regardless of actual values.
7949 rm
= (modrm
& 7) | REX_B(s
);
7950 reg
= ((modrm
>> 3) & 7) | rex_r
;
7955 if ((prefixes
& PREFIX_LOCK
) && (reg
== 0) &&
7956 (s
->cpuid_ext3_features
& CPUID_EXT3_CR8LEG
)) {
7965 gen_update_cc_op(s
);
7966 gen_jmp_im(pc_start
- s
->cs_base
);
7968 gen_op_mov_TN_reg(ot
, 0, rm
);
7969 gen_helper_write_crN(cpu_env
, tcg_const_i32(reg
),
7971 gen_jmp_im(s
->pc
- s
->cs_base
);
7974 gen_helper_read_crN(cpu_T
[0], cpu_env
, tcg_const_i32(reg
));
7975 gen_op_mov_reg_T0(ot
, rm
);
7983 case 0x121: /* mov reg, drN */
7984 case 0x123: /* mov drN, reg */
7986 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7988 modrm
= cpu_ldub_code(env
, s
->pc
++);
7989 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7990 * AMD documentation (24594.pdf) and testing of
7991 * intel 386 and 486 processors all show that the mod bits
7992 * are assumed to be 1's, regardless of actual values.
7994 rm
= (modrm
& 7) | REX_B(s
);
7995 reg
= ((modrm
>> 3) & 7) | rex_r
;
8000 /* XXX: do it dynamically with CR4.DE bit */
8001 if (reg
== 4 || reg
== 5 || reg
>= 8)
8004 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_DR0
+ reg
);
8005 gen_op_mov_TN_reg(ot
, 0, rm
);
8006 gen_helper_movl_drN_T0(cpu_env
, tcg_const_i32(reg
), cpu_T
[0]);
8007 gen_jmp_im(s
->pc
- s
->cs_base
);
8010 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_DR0
+ reg
);
8011 tcg_gen_ld_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
,dr
[reg
]));
8012 gen_op_mov_reg_T0(ot
, rm
);
8016 case 0x106: /* clts */
8018 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
8020 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
8021 gen_helper_clts(cpu_env
);
8022 /* abort block because static cpu state changed */
8023 gen_jmp_im(s
->pc
- s
->cs_base
);
8027 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
8028 case 0x1c3: /* MOVNTI reg, mem */
8029 if (!(s
->cpuid_features
& CPUID_SSE2
))
8031 ot
= s
->dflag
== 2 ? OT_QUAD
: OT_LONG
;
8032 modrm
= cpu_ldub_code(env
, s
->pc
++);
8033 mod
= (modrm
>> 6) & 3;
8036 reg
= ((modrm
>> 3) & 7) | rex_r
;
8037 /* generate a generic store */
8038 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
8041 modrm
= cpu_ldub_code(env
, s
->pc
++);
8042 mod
= (modrm
>> 6) & 3;
8043 op
= (modrm
>> 3) & 7;
8045 case 0: /* fxsave */
8046 if (mod
== 3 || !(s
->cpuid_features
& CPUID_FXSR
) ||
8047 (s
->prefix
& PREFIX_LOCK
))
8049 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
8050 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
8053 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
8054 gen_update_cc_op(s
);
8055 gen_jmp_im(pc_start
- s
->cs_base
);
8056 gen_helper_fxsave(cpu_env
, cpu_A0
, tcg_const_i32((s
->dflag
== 2)));
8058 case 1: /* fxrstor */
8059 if (mod
== 3 || !(s
->cpuid_features
& CPUID_FXSR
) ||
8060 (s
->prefix
& PREFIX_LOCK
))
8062 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
8063 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
8066 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
8067 gen_update_cc_op(s
);
8068 gen_jmp_im(pc_start
- s
->cs_base
);
8069 gen_helper_fxrstor(cpu_env
, cpu_A0
,
8070 tcg_const_i32((s
->dflag
== 2)));
8072 case 2: /* ldmxcsr */
8073 case 3: /* stmxcsr */
8074 if (s
->flags
& HF_TS_MASK
) {
8075 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
8078 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
) ||
8081 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
8083 gen_op_ld_T0_A0(OT_LONG
+ s
->mem_index
);
8084 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T
[0]);
8085 gen_helper_ldmxcsr(cpu_env
, cpu_tmp2_i32
);
8087 tcg_gen_ld32u_tl(cpu_T
[0], cpu_env
, offsetof(CPUX86State
, mxcsr
));
8088 gen_op_st_T0_A0(OT_LONG
+ s
->mem_index
);
8091 case 5: /* lfence */
8092 case 6: /* mfence */
8093 if ((modrm
& 0xc7) != 0xc0 || !(s
->cpuid_features
& CPUID_SSE2
))
8096 case 7: /* sfence / clflush */
8097 if ((modrm
& 0xc7) == 0xc0) {
8099 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
8100 if (!(s
->cpuid_features
& CPUID_SSE
))
8104 if (!(s
->cpuid_features
& CPUID_CLFLUSH
))
8106 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
8113 case 0x10d: /* 3DNow! prefetch(w) */
8114 modrm
= cpu_ldub_code(env
, s
->pc
++);
8115 mod
= (modrm
>> 6) & 3;
8118 gen_lea_modrm(env
, s
, modrm
, ®_addr
, &offset_addr
);
8119 /* ignore for now */
8121 case 0x1aa: /* rsm */
8122 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_RSM
);
8123 if (!(s
->flags
& HF_SMM_MASK
))
8125 gen_update_cc_op(s
);
8126 gen_jmp_im(s
->pc
- s
->cs_base
);
8127 gen_helper_rsm(cpu_env
);
8130 case 0x1b8: /* SSE4.2 popcnt */
8131 if ((prefixes
& (PREFIX_REPZ
| PREFIX_LOCK
| PREFIX_REPNZ
)) !=
8134 if (!(s
->cpuid_ext_features
& CPUID_EXT_POPCNT
))
8137 modrm
= cpu_ldub_code(env
, s
->pc
++);
8138 reg
= ((modrm
>> 3) & 7) | rex_r
;
8140 if (s
->prefix
& PREFIX_DATA
)
8142 else if (s
->dflag
!= 2)
8147 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
8148 gen_helper_popcnt(cpu_T
[0], cpu_env
, cpu_T
[0], tcg_const_i32(ot
));
8149 gen_op_mov_reg_T0(ot
, reg
);
8151 set_cc_op(s
, CC_OP_EFLAGS
);
8153 case 0x10e ... 0x10f:
8154 /* 3DNow! instructions, ignore prefixes */
8155 s
->prefix
&= ~(PREFIX_REPZ
| PREFIX_REPNZ
| PREFIX_DATA
);
8156 case 0x110 ... 0x117:
8157 case 0x128 ... 0x12f:
8158 case 0x138 ... 0x13a:
8159 case 0x150 ... 0x179:
8160 case 0x17c ... 0x17f:
8162 case 0x1c4 ... 0x1c6:
8163 case 0x1d0 ... 0x1fe:
8164 gen_sse(env
, s
, b
, pc_start
, rex_r
);
8169 /* lock generation */
8170 if (s
->prefix
& PREFIX_LOCK
)
8171 gen_helper_unlock();
8174 if (s
->prefix
& PREFIX_LOCK
)
8175 gen_helper_unlock();
8176 /* XXX: ensure that no lock was generated */
8177 gen_exception(s
, EXCP06_ILLOP
, pc_start
- s
->cs_base
);
8181 void optimize_flags_init(void)
8183 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
8184 cpu_cc_op
= tcg_global_mem_new_i32(TCG_AREG0
,
8185 offsetof(CPUX86State
, cc_op
), "cc_op");
8186 cpu_cc_dst
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_dst
),
8188 cpu_cc_src
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_src
),
8190 cpu_cc_src2
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUX86State
, cc_src2
),
8193 #ifdef TARGET_X86_64
8194 cpu_regs
[R_EAX
] = tcg_global_mem_new_i64(TCG_AREG0
,
8195 offsetof(CPUX86State
, regs
[R_EAX
]), "rax");
8196 cpu_regs
[R_ECX
] = tcg_global_mem_new_i64(TCG_AREG0
,
8197 offsetof(CPUX86State
, regs
[R_ECX
]), "rcx");
8198 cpu_regs
[R_EDX
] = tcg_global_mem_new_i64(TCG_AREG0
,
8199 offsetof(CPUX86State
, regs
[R_EDX
]), "rdx");
8200 cpu_regs
[R_EBX
] = tcg_global_mem_new_i64(TCG_AREG0
,
8201 offsetof(CPUX86State
, regs
[R_EBX
]), "rbx");
8202 cpu_regs
[R_ESP
] = tcg_global_mem_new_i64(TCG_AREG0
,
8203 offsetof(CPUX86State
, regs
[R_ESP
]), "rsp");
8204 cpu_regs
[R_EBP
] = tcg_global_mem_new_i64(TCG_AREG0
,
8205 offsetof(CPUX86State
, regs
[R_EBP
]), "rbp");
8206 cpu_regs
[R_ESI
] = tcg_global_mem_new_i64(TCG_AREG0
,
8207 offsetof(CPUX86State
, regs
[R_ESI
]), "rsi");
8208 cpu_regs
[R_EDI
] = tcg_global_mem_new_i64(TCG_AREG0
,
8209 offsetof(CPUX86State
, regs
[R_EDI
]), "rdi");
8210 cpu_regs
[8] = tcg_global_mem_new_i64(TCG_AREG0
,
8211 offsetof(CPUX86State
, regs
[8]), "r8");
8212 cpu_regs
[9] = tcg_global_mem_new_i64(TCG_AREG0
,
8213 offsetof(CPUX86State
, regs
[9]), "r9");
8214 cpu_regs
[10] = tcg_global_mem_new_i64(TCG_AREG0
,
8215 offsetof(CPUX86State
, regs
[10]), "r10");
8216 cpu_regs
[11] = tcg_global_mem_new_i64(TCG_AREG0
,
8217 offsetof(CPUX86State
, regs
[11]), "r11");
8218 cpu_regs
[12] = tcg_global_mem_new_i64(TCG_AREG0
,
8219 offsetof(CPUX86State
, regs
[12]), "r12");
8220 cpu_regs
[13] = tcg_global_mem_new_i64(TCG_AREG0
,
8221 offsetof(CPUX86State
, regs
[13]), "r13");
8222 cpu_regs
[14] = tcg_global_mem_new_i64(TCG_AREG0
,
8223 offsetof(CPUX86State
, regs
[14]), "r14");
8224 cpu_regs
[15] = tcg_global_mem_new_i64(TCG_AREG0
,
8225 offsetof(CPUX86State
, regs
[15]), "r15");
8227 cpu_regs
[R_EAX
] = tcg_global_mem_new_i32(TCG_AREG0
,
8228 offsetof(CPUX86State
, regs
[R_EAX
]), "eax");
8229 cpu_regs
[R_ECX
] = tcg_global_mem_new_i32(TCG_AREG0
,
8230 offsetof(CPUX86State
, regs
[R_ECX
]), "ecx");
8231 cpu_regs
[R_EDX
] = tcg_global_mem_new_i32(TCG_AREG0
,
8232 offsetof(CPUX86State
, regs
[R_EDX
]), "edx");
8233 cpu_regs
[R_EBX
] = tcg_global_mem_new_i32(TCG_AREG0
,
8234 offsetof(CPUX86State
, regs
[R_EBX
]), "ebx");
8235 cpu_regs
[R_ESP
] = tcg_global_mem_new_i32(TCG_AREG0
,
8236 offsetof(CPUX86State
, regs
[R_ESP
]), "esp");
8237 cpu_regs
[R_EBP
] = tcg_global_mem_new_i32(TCG_AREG0
,
8238 offsetof(CPUX86State
, regs
[R_EBP
]), "ebp");
8239 cpu_regs
[R_ESI
] = tcg_global_mem_new_i32(TCG_AREG0
,
8240 offsetof(CPUX86State
, regs
[R_ESI
]), "esi");
8241 cpu_regs
[R_EDI
] = tcg_global_mem_new_i32(TCG_AREG0
,
8242 offsetof(CPUX86State
, regs
[R_EDI
]), "edi");
8245 /* register helpers */
8246 #define GEN_HELPER 2
8250 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8251 basic block 'tb'. If search_pc is TRUE, also generate PC
8252 information for each intermediate instruction. */
8253 static inline void gen_intermediate_code_internal(CPUX86State
*env
,
8254 TranslationBlock
*tb
,
8257 DisasContext dc1
, *dc
= &dc1
;
8258 target_ulong pc_ptr
;
8259 uint16_t *gen_opc_end
;
8263 target_ulong pc_start
;
8264 target_ulong cs_base
;
8268 /* generate intermediate code */
8270 cs_base
= tb
->cs_base
;
8273 dc
->pe
= (flags
>> HF_PE_SHIFT
) & 1;
8274 dc
->code32
= (flags
>> HF_CS32_SHIFT
) & 1;
8275 dc
->ss32
= (flags
>> HF_SS32_SHIFT
) & 1;
8276 dc
->addseg
= (flags
>> HF_ADDSEG_SHIFT
) & 1;
8278 dc
->vm86
= (flags
>> VM_SHIFT
) & 1;
8279 dc
->cpl
= (flags
>> HF_CPL_SHIFT
) & 3;
8280 dc
->iopl
= (flags
>> IOPL_SHIFT
) & 3;
8281 dc
->tf
= (flags
>> TF_SHIFT
) & 1;
8282 dc
->singlestep_enabled
= env
->singlestep_enabled
;
8283 dc
->cc_op
= CC_OP_DYNAMIC
;
8284 dc
->cc_op_dirty
= false;
8285 dc
->cs_base
= cs_base
;
8287 dc
->popl_esp_hack
= 0;
8288 /* select memory access functions */
8290 if (flags
& HF_SOFTMMU_MASK
) {
8291 dc
->mem_index
= (cpu_mmu_index(env
) + 1) << 2;
8293 dc
->cpuid_features
= env
->features
[FEAT_1_EDX
];
8294 dc
->cpuid_ext_features
= env
->features
[FEAT_1_ECX
];
8295 dc
->cpuid_ext2_features
= env
->features
[FEAT_8000_0001_EDX
];
8296 dc
->cpuid_ext3_features
= env
->features
[FEAT_8000_0001_ECX
];
8297 dc
->cpuid_7_0_ebx_features
= env
->features
[FEAT_7_0_EBX
];
8298 #ifdef TARGET_X86_64
8299 dc
->lma
= (flags
>> HF_LMA_SHIFT
) & 1;
8300 dc
->code64
= (flags
>> HF_CS64_SHIFT
) & 1;
8303 dc
->jmp_opt
= !(dc
->tf
|| env
->singlestep_enabled
||
8304 (flags
& HF_INHIBIT_IRQ_MASK
)
8305 #ifndef CONFIG_SOFTMMU
8306 || (flags
& HF_SOFTMMU_MASK
)
8310 /* check addseg logic */
8311 if (!dc
->addseg
&& (dc
->vm86
|| !dc
->pe
|| !dc
->code32
))
8312 printf("ERROR addseg\n");
8315 cpu_T
[0] = tcg_temp_new();
8316 cpu_T
[1] = tcg_temp_new();
8317 cpu_A0
= tcg_temp_new();
8319 cpu_tmp0
= tcg_temp_new();
8320 cpu_tmp1_i64
= tcg_temp_new_i64();
8321 cpu_tmp2_i32
= tcg_temp_new_i32();
8322 cpu_tmp3_i32
= tcg_temp_new_i32();
8323 cpu_tmp4
= tcg_temp_new();
8324 cpu_ptr0
= tcg_temp_new_ptr();
8325 cpu_ptr1
= tcg_temp_new_ptr();
8326 cpu_cc_srcT
= tcg_temp_local_new();
8328 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
8330 dc
->is_jmp
= DISAS_NEXT
;
8334 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
8336 max_insns
= CF_COUNT_MASK
;
8340 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
8341 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
8342 if (bp
->pc
== pc_ptr
&&
8343 !((bp
->flags
& BP_CPU
) && (tb
->flags
& HF_RF_MASK
))) {
8344 gen_debug(dc
, pc_ptr
- dc
->cs_base
);
8350 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
8354 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
8356 tcg_ctx
.gen_opc_pc
[lj
] = pc_ptr
;
8357 gen_opc_cc_op
[lj
] = dc
->cc_op
;
8358 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
8359 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
8361 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
8364 pc_ptr
= disas_insn(env
, dc
, pc_ptr
);
8366 /* stop translation if indicated */
8369 /* if single step mode, we generate only one instruction and
8370 generate an exception */
8371 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8372 the flag and abort the translation to give the irqs a
8373 change to be happen */
8374 if (dc
->tf
|| dc
->singlestep_enabled
||
8375 (flags
& HF_INHIBIT_IRQ_MASK
)) {
8376 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8380 /* if too long translation, stop generation too */
8381 if (tcg_ctx
.gen_opc_ptr
>= gen_opc_end
||
8382 (pc_ptr
- pc_start
) >= (TARGET_PAGE_SIZE
- 32) ||
8383 num_insns
>= max_insns
) {
8384 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8389 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8394 if (tb
->cflags
& CF_LAST_IO
)
8396 gen_tb_end(tb
, num_insns
);
8397 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
8398 /* we don't forget to fill the last values */
8400 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
8403 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
8407 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
8409 qemu_log("----------------\n");
8410 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
8411 #ifdef TARGET_X86_64
8416 disas_flags
= !dc
->code32
;
8417 log_target_disas(env
, pc_start
, pc_ptr
- pc_start
, disas_flags
);
8423 tb
->size
= pc_ptr
- pc_start
;
8424 tb
->icount
= num_insns
;
8428 void gen_intermediate_code(CPUX86State
*env
, TranslationBlock
*tb
)
8430 gen_intermediate_code_internal(env
, tb
, 0);
8433 void gen_intermediate_code_pc(CPUX86State
*env
, TranslationBlock
*tb
)
8435 gen_intermediate_code_internal(env
, tb
, 1);
8438 void restore_state_to_opc(CPUX86State
*env
, TranslationBlock
*tb
, int pc_pos
)
8442 if (qemu_loglevel_mask(CPU_LOG_TB_OP
)) {
8444 qemu_log("RESTORE:\n");
8445 for(i
= 0;i
<= pc_pos
; i
++) {
8446 if (tcg_ctx
.gen_opc_instr_start
[i
]) {
8447 qemu_log("0x%04x: " TARGET_FMT_lx
"\n", i
,
8448 tcg_ctx
.gen_opc_pc
[i
]);
8451 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx
" cs_base=%x\n",
8452 pc_pos
, tcg_ctx
.gen_opc_pc
[pc_pos
] - tb
->cs_base
,
8453 (uint32_t)tb
->cs_base
);
8456 env
->eip
= tcg_ctx
.gen_opc_pc
[pc_pos
] - tb
->cs_base
;
8457 cc_op
= gen_opc_cc_op
[pc_pos
];
8458 if (cc_op
!= CC_OP_DYNAMIC
)