4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu/host-utils.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/translator.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
35 #define PREFIX_REPZ 0x01
36 #define PREFIX_REPNZ 0x02
37 #define PREFIX_LOCK 0x04
38 #define PREFIX_DATA 0x08
39 #define PREFIX_ADR 0x10
40 #define PREFIX_VEX 0x20
43 #define CODE64(s) ((s)->code64)
44 #define REX_X(s) ((s)->rex_x)
45 #define REX_B(s) ((s)->rex_b)
60 /* For a switch indexed by MODRM, match all memory operands for a given OP. */
61 #define CASE_MODRM_MEM_OP(OP) \
62 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
63 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
64 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
66 #define CASE_MODRM_OP(OP) \
67 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
68 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
69 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
70 case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
72 //#define MACRO_TEST 1
74 /* global register indexes */
76 static TCGv cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
, cpu_cc_srcT
;
77 static TCGv_i32 cpu_cc_op
;
78 static TCGv cpu_regs
[CPU_NB_REGS
];
79 static TCGv cpu_seg_base
[6];
80 static TCGv_i64 cpu_bndl
[4];
81 static TCGv_i64 cpu_bndu
[4];
83 static TCGv cpu_T0
, cpu_T1
;
84 /* local register indexes (only used inside old micro ops) */
85 static TCGv cpu_tmp0
, cpu_tmp4
;
86 static TCGv_ptr cpu_ptr0
, cpu_ptr1
;
87 static TCGv_i32 cpu_tmp2_i32
, cpu_tmp3_i32
;
88 static TCGv_i64 cpu_tmp1_i64
;
90 #include "exec/gen-icount.h"
93 static int x86_64_hregs
;
96 typedef struct DisasContext
{
97 DisasContextBase base
;
99 /* current insn context */
100 int override
; /* -1 if no override */
104 target_ulong pc_start
;
105 target_ulong pc
; /* pc = eip + cs_base */
106 /* current block context */
107 target_ulong cs_base
; /* base of CS segment */
108 int pe
; /* protected mode */
109 int code32
; /* 32 bit code segment */
111 int lma
; /* long mode active */
112 int code64
; /* 64 bit code segment */
115 int vex_l
; /* vex vector length */
116 int vex_v
; /* vex vvvv register, without 1's compliment. */
117 int ss32
; /* 32 bit stack segment */
118 CCOp cc_op
; /* current CC operation */
120 int addseg
; /* non zero if either DS/ES/SS have a non zero base */
121 int f_st
; /* currently unused */
122 int vm86
; /* vm86 mode */
125 int tf
; /* TF cpu flag */
126 int jmp_opt
; /* use direct block chaining for direct jumps */
127 int repz_opt
; /* optimize jumps within repz instructions */
128 int mem_index
; /* select memory access functions */
129 uint64_t flags
; /* all execution flags */
130 int popl_esp_hack
; /* for correct popl with esp base handling */
131 int rip_offset
; /* only used in x86_64, but left for simplicity */
133 int cpuid_ext_features
;
134 int cpuid_ext2_features
;
135 int cpuid_ext3_features
;
136 int cpuid_7_0_ebx_features
;
137 int cpuid_xsave_features
;
141 static void gen_eob(DisasContext
*s
);
142 static void gen_jr(DisasContext
*s
, TCGv dest
);
143 static void gen_jmp(DisasContext
*s
, target_ulong eip
);
144 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
);
145 static void gen_op(DisasContext
*s1
, int op
, TCGMemOp ot
, int d
);
147 /* i386 arith/logic operations */
167 OP_SHL1
, /* undocumented */
183 /* I386 int registers */
184 OR_EAX
, /* MUST be even numbered */
193 OR_TMP0
= 16, /* temporary operand register */
195 OR_A0
, /* temporary register used when doing address evaluation */
205 /* Bit set if the global variable is live after setting CC_OP to X. */
206 static const uint8_t cc_op_live
[CC_OP_NB
] = {
207 [CC_OP_DYNAMIC
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
208 [CC_OP_EFLAGS
] = USES_CC_SRC
,
209 [CC_OP_MULB
... CC_OP_MULQ
] = USES_CC_DST
| USES_CC_SRC
,
210 [CC_OP_ADDB
... CC_OP_ADDQ
] = USES_CC_DST
| USES_CC_SRC
,
211 [CC_OP_ADCB
... CC_OP_ADCQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
212 [CC_OP_SUBB
... CC_OP_SUBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRCT
,
213 [CC_OP_SBBB
... CC_OP_SBBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
214 [CC_OP_LOGICB
... CC_OP_LOGICQ
] = USES_CC_DST
,
215 [CC_OP_INCB
... CC_OP_INCQ
] = USES_CC_DST
| USES_CC_SRC
,
216 [CC_OP_DECB
... CC_OP_DECQ
] = USES_CC_DST
| USES_CC_SRC
,
217 [CC_OP_SHLB
... CC_OP_SHLQ
] = USES_CC_DST
| USES_CC_SRC
,
218 [CC_OP_SARB
... CC_OP_SARQ
] = USES_CC_DST
| USES_CC_SRC
,
219 [CC_OP_BMILGB
... CC_OP_BMILGQ
] = USES_CC_DST
| USES_CC_SRC
,
220 [CC_OP_ADCX
] = USES_CC_DST
| USES_CC_SRC
,
221 [CC_OP_ADOX
] = USES_CC_SRC
| USES_CC_SRC2
,
222 [CC_OP_ADCOX
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
224 [CC_OP_POPCNT
] = USES_CC_SRC
,
227 static void set_cc_op(DisasContext
*s
, CCOp op
)
231 if (s
->cc_op
== op
) {
235 /* Discard CC computation that will no longer be used. */
236 dead
= cc_op_live
[s
->cc_op
] & ~cc_op_live
[op
];
237 if (dead
& USES_CC_DST
) {
238 tcg_gen_discard_tl(cpu_cc_dst
);
240 if (dead
& USES_CC_SRC
) {
241 tcg_gen_discard_tl(cpu_cc_src
);
243 if (dead
& USES_CC_SRC2
) {
244 tcg_gen_discard_tl(cpu_cc_src2
);
246 if (dead
& USES_CC_SRCT
) {
247 tcg_gen_discard_tl(cpu_cc_srcT
);
250 if (op
== CC_OP_DYNAMIC
) {
251 /* The DYNAMIC setting is translator only, and should never be
252 stored. Thus we always consider it clean. */
253 s
->cc_op_dirty
= false;
255 /* Discard any computed CC_OP value (see shifts). */
256 if (s
->cc_op
== CC_OP_DYNAMIC
) {
257 tcg_gen_discard_i32(cpu_cc_op
);
259 s
->cc_op_dirty
= true;
264 static void gen_update_cc_op(DisasContext
*s
)
266 if (s
->cc_op_dirty
) {
267 tcg_gen_movi_i32(cpu_cc_op
, s
->cc_op
);
268 s
->cc_op_dirty
= false;
274 #define NB_OP_SIZES 4
276 #else /* !TARGET_X86_64 */
278 #define NB_OP_SIZES 3
280 #endif /* !TARGET_X86_64 */
282 #if defined(HOST_WORDS_BIGENDIAN)
283 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
284 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
285 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
286 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
287 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
289 #define REG_B_OFFSET 0
290 #define REG_H_OFFSET 1
291 #define REG_W_OFFSET 0
292 #define REG_L_OFFSET 0
293 #define REG_LH_OFFSET 4
296 /* In instruction encodings for byte register accesses the
297 * register number usually indicates "low 8 bits of register N";
298 * however there are some special cases where N 4..7 indicates
299 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
300 * true for this special case, false otherwise.
302 static inline bool byte_reg_is_xH(int reg
)
308 if (reg
>= 8 || x86_64_hregs
) {
315 /* Select the size of a push/pop operation. */
316 static inline TCGMemOp
mo_pushpop(DisasContext
*s
, TCGMemOp ot
)
319 return ot
== MO_16
? MO_16
: MO_64
;
325 /* Select the size of the stack pointer. */
326 static inline TCGMemOp
mo_stacksize(DisasContext
*s
)
328 return CODE64(s
) ? MO_64
: s
->ss32
? MO_32
: MO_16
;
331 /* Select only size 64 else 32. Used for SSE operand sizes. */
332 static inline TCGMemOp
mo_64_32(TCGMemOp ot
)
335 return ot
== MO_64
? MO_64
: MO_32
;
341 /* Select size 8 if lsb of B is clear, else OT. Used for decoding
342 byte vs word opcodes. */
343 static inline TCGMemOp
mo_b_d(int b
, TCGMemOp ot
)
345 return b
& 1 ? ot
: MO_8
;
348 /* Select size 8 if lsb of B is clear, else OT capped at 32.
349 Used for decoding operand size of port opcodes. */
350 static inline TCGMemOp
mo_b_d32(int b
, TCGMemOp ot
)
352 return b
& 1 ? (ot
== MO_16
? MO_16
: MO_32
) : MO_8
;
355 static void gen_op_mov_reg_v(TCGMemOp ot
, int reg
, TCGv t0
)
359 if (!byte_reg_is_xH(reg
)) {
360 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 8);
362 tcg_gen_deposit_tl(cpu_regs
[reg
- 4], cpu_regs
[reg
- 4], t0
, 8, 8);
366 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 16);
369 /* For x86_64, this sets the higher half of register to zero.
370 For i386, this is equivalent to a mov. */
371 tcg_gen_ext32u_tl(cpu_regs
[reg
], t0
);
375 tcg_gen_mov_tl(cpu_regs
[reg
], t0
);
383 static inline void gen_op_mov_v_reg(TCGMemOp ot
, TCGv t0
, int reg
)
385 if (ot
== MO_8
&& byte_reg_is_xH(reg
)) {
386 tcg_gen_extract_tl(t0
, cpu_regs
[reg
- 4], 8, 8);
388 tcg_gen_mov_tl(t0
, cpu_regs
[reg
]);
392 static void gen_add_A0_im(DisasContext
*s
, int val
)
394 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
396 tcg_gen_ext32u_tl(cpu_A0
, cpu_A0
);
400 static inline void gen_op_jmp_v(TCGv dest
)
402 tcg_gen_st_tl(dest
, cpu_env
, offsetof(CPUX86State
, eip
));
405 static inline void gen_op_add_reg_im(TCGMemOp size
, int reg
, int32_t val
)
407 tcg_gen_addi_tl(cpu_tmp0
, cpu_regs
[reg
], val
);
408 gen_op_mov_reg_v(size
, reg
, cpu_tmp0
);
411 static inline void gen_op_add_reg_T0(TCGMemOp size
, int reg
)
413 tcg_gen_add_tl(cpu_tmp0
, cpu_regs
[reg
], cpu_T0
);
414 gen_op_mov_reg_v(size
, reg
, cpu_tmp0
);
417 static inline void gen_op_ld_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
419 tcg_gen_qemu_ld_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
422 static inline void gen_op_st_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
424 tcg_gen_qemu_st_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
427 static inline void gen_op_st_rm_T0_A0(DisasContext
*s
, int idx
, int d
)
430 gen_op_st_v(s
, idx
, cpu_T0
, cpu_A0
);
432 gen_op_mov_reg_v(idx
, d
, cpu_T0
);
436 static inline void gen_jmp_im(target_ulong pc
)
438 tcg_gen_movi_tl(cpu_tmp0
, pc
);
439 gen_op_jmp_v(cpu_tmp0
);
442 /* Compute SEG:REG into A0. SEG is selected from the override segment
443 (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
444 indicate no override. */
445 static void gen_lea_v_seg(DisasContext
*s
, TCGMemOp aflag
, TCGv a0
,
446 int def_seg
, int ovr_seg
)
452 tcg_gen_mov_tl(cpu_A0
, a0
);
459 if (ovr_seg
< 0 && s
->addseg
) {
463 tcg_gen_ext32u_tl(cpu_A0
, a0
);
469 tcg_gen_ext16u_tl(cpu_A0
, a0
);
484 TCGv seg
= cpu_seg_base
[ovr_seg
];
486 if (aflag
== MO_64
) {
487 tcg_gen_add_tl(cpu_A0
, a0
, seg
);
488 } else if (CODE64(s
)) {
489 tcg_gen_ext32u_tl(cpu_A0
, a0
);
490 tcg_gen_add_tl(cpu_A0
, cpu_A0
, seg
);
492 tcg_gen_add_tl(cpu_A0
, a0
, seg
);
493 tcg_gen_ext32u_tl(cpu_A0
, cpu_A0
);
498 static inline void gen_string_movl_A0_ESI(DisasContext
*s
)
500 gen_lea_v_seg(s
, s
->aflag
, cpu_regs
[R_ESI
], R_DS
, s
->override
);
503 static inline void gen_string_movl_A0_EDI(DisasContext
*s
)
505 gen_lea_v_seg(s
, s
->aflag
, cpu_regs
[R_EDI
], R_ES
, -1);
508 static inline void gen_op_movl_T0_Dshift(TCGMemOp ot
)
510 tcg_gen_ld32s_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, df
));
511 tcg_gen_shli_tl(cpu_T0
, cpu_T0
, ot
);
514 static TCGv
gen_ext_tl(TCGv dst
, TCGv src
, TCGMemOp size
, bool sign
)
519 tcg_gen_ext8s_tl(dst
, src
);
521 tcg_gen_ext8u_tl(dst
, src
);
526 tcg_gen_ext16s_tl(dst
, src
);
528 tcg_gen_ext16u_tl(dst
, src
);
534 tcg_gen_ext32s_tl(dst
, src
);
536 tcg_gen_ext32u_tl(dst
, src
);
545 static void gen_extu(TCGMemOp ot
, TCGv reg
)
547 gen_ext_tl(reg
, reg
, ot
, false);
550 static void gen_exts(TCGMemOp ot
, TCGv reg
)
552 gen_ext_tl(reg
, reg
, ot
, true);
555 static inline void gen_op_jnz_ecx(TCGMemOp size
, TCGLabel
*label1
)
557 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
558 gen_extu(size
, cpu_tmp0
);
559 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_tmp0
, 0, label1
);
562 static inline void gen_op_jz_ecx(TCGMemOp size
, TCGLabel
*label1
)
564 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
565 gen_extu(size
, cpu_tmp0
);
566 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
569 static void gen_helper_in_func(TCGMemOp ot
, TCGv v
, TCGv_i32 n
)
573 gen_helper_inb(v
, cpu_env
, n
);
576 gen_helper_inw(v
, cpu_env
, n
);
579 gen_helper_inl(v
, cpu_env
, n
);
586 static void gen_helper_out_func(TCGMemOp ot
, TCGv_i32 v
, TCGv_i32 n
)
590 gen_helper_outb(cpu_env
, v
, n
);
593 gen_helper_outw(cpu_env
, v
, n
);
596 gen_helper_outl(cpu_env
, v
, n
);
603 static void gen_check_io(DisasContext
*s
, TCGMemOp ot
, target_ulong cur_eip
,
606 target_ulong next_eip
;
608 if (s
->pe
&& (s
->cpl
> s
->iopl
|| s
->vm86
)) {
609 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
612 gen_helper_check_iob(cpu_env
, cpu_tmp2_i32
);
615 gen_helper_check_iow(cpu_env
, cpu_tmp2_i32
);
618 gen_helper_check_iol(cpu_env
, cpu_tmp2_i32
);
624 if(s
->flags
& HF_SVMI_MASK
) {
627 svm_flags
|= (1 << (4 + ot
));
628 next_eip
= s
->pc
- s
->cs_base
;
629 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
630 gen_helper_svm_check_io(cpu_env
, cpu_tmp2_i32
,
631 tcg_const_i32(svm_flags
),
632 tcg_const_i32(next_eip
- cur_eip
));
636 static inline void gen_movs(DisasContext
*s
, TCGMemOp ot
)
638 gen_string_movl_A0_ESI(s
);
639 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
640 gen_string_movl_A0_EDI(s
);
641 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
642 gen_op_movl_T0_Dshift(ot
);
643 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
644 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
647 static void gen_op_update1_cc(void)
649 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
652 static void gen_op_update2_cc(void)
654 tcg_gen_mov_tl(cpu_cc_src
, cpu_T1
);
655 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
658 static void gen_op_update3_cc(TCGv reg
)
660 tcg_gen_mov_tl(cpu_cc_src2
, reg
);
661 tcg_gen_mov_tl(cpu_cc_src
, cpu_T1
);
662 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
665 static inline void gen_op_testl_T0_T1_cc(void)
667 tcg_gen_and_tl(cpu_cc_dst
, cpu_T0
, cpu_T1
);
670 static void gen_op_update_neg_cc(void)
672 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
673 tcg_gen_neg_tl(cpu_cc_src
, cpu_T0
);
674 tcg_gen_movi_tl(cpu_cc_srcT
, 0);
677 /* compute all eflags to cc_src */
678 static void gen_compute_eflags(DisasContext
*s
)
680 TCGv zero
, dst
, src1
, src2
;
683 if (s
->cc_op
== CC_OP_EFLAGS
) {
686 if (s
->cc_op
== CC_OP_CLR
) {
687 tcg_gen_movi_tl(cpu_cc_src
, CC_Z
| CC_P
);
688 set_cc_op(s
, CC_OP_EFLAGS
);
697 /* Take care to not read values that are not live. */
698 live
= cc_op_live
[s
->cc_op
] & ~USES_CC_SRCT
;
699 dead
= live
^ (USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
);
701 zero
= tcg_const_tl(0);
702 if (dead
& USES_CC_DST
) {
705 if (dead
& USES_CC_SRC
) {
708 if (dead
& USES_CC_SRC2
) {
714 gen_helper_cc_compute_all(cpu_cc_src
, dst
, src1
, src2
, cpu_cc_op
);
715 set_cc_op(s
, CC_OP_EFLAGS
);
722 typedef struct CCPrepare
{
732 /* compute eflags.C to reg */
733 static CCPrepare
gen_prepare_eflags_c(DisasContext
*s
, TCGv reg
)
739 case CC_OP_SUBB
... CC_OP_SUBQ
:
740 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
741 size
= s
->cc_op
- CC_OP_SUBB
;
742 t1
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
743 /* If no temporary was used, be careful not to alias t1 and t0. */
744 t0
= t1
== cpu_cc_src
? cpu_tmp0
: reg
;
745 tcg_gen_mov_tl(t0
, cpu_cc_srcT
);
749 case CC_OP_ADDB
... CC_OP_ADDQ
:
750 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
751 size
= s
->cc_op
- CC_OP_ADDB
;
752 t1
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
753 t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
755 return (CCPrepare
) { .cond
= TCG_COND_LTU
, .reg
= t0
,
756 .reg2
= t1
, .mask
= -1, .use_reg2
= true };
758 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
761 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
763 case CC_OP_INCB
... CC_OP_INCQ
:
764 case CC_OP_DECB
... CC_OP_DECQ
:
765 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
766 .mask
= -1, .no_setcond
= true };
768 case CC_OP_SHLB
... CC_OP_SHLQ
:
769 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
770 size
= s
->cc_op
- CC_OP_SHLB
;
771 shift
= (8 << size
) - 1;
772 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
773 .mask
= (target_ulong
)1 << shift
};
775 case CC_OP_MULB
... CC_OP_MULQ
:
776 return (CCPrepare
) { .cond
= TCG_COND_NE
,
777 .reg
= cpu_cc_src
, .mask
= -1 };
779 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
780 size
= s
->cc_op
- CC_OP_BMILGB
;
781 t0
= gen_ext_tl(reg
, cpu_cc_src
, size
, false);
782 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
786 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_dst
,
787 .mask
= -1, .no_setcond
= true };
790 case CC_OP_SARB
... CC_OP_SARQ
:
792 return (CCPrepare
) { .cond
= TCG_COND_NE
,
793 .reg
= cpu_cc_src
, .mask
= CC_C
};
796 /* The need to compute only C from CC_OP_DYNAMIC is important
797 in efficiently implementing e.g. INC at the start of a TB. */
799 gen_helper_cc_compute_c(reg
, cpu_cc_dst
, cpu_cc_src
,
800 cpu_cc_src2
, cpu_cc_op
);
801 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
802 .mask
= -1, .no_setcond
= true };
806 /* compute eflags.P to reg */
807 static CCPrepare
gen_prepare_eflags_p(DisasContext
*s
, TCGv reg
)
809 gen_compute_eflags(s
);
810 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
814 /* compute eflags.S to reg */
815 static CCPrepare
gen_prepare_eflags_s(DisasContext
*s
, TCGv reg
)
819 gen_compute_eflags(s
);
825 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
829 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
832 TCGMemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
833 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, true);
834 return (CCPrepare
) { .cond
= TCG_COND_LT
, .reg
= t0
, .mask
= -1 };
839 /* compute eflags.O to reg */
840 static CCPrepare
gen_prepare_eflags_o(DisasContext
*s
, TCGv reg
)
845 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src2
,
846 .mask
= -1, .no_setcond
= true };
849 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
851 gen_compute_eflags(s
);
852 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
857 /* compute eflags.Z to reg */
858 static CCPrepare
gen_prepare_eflags_z(DisasContext
*s
, TCGv reg
)
862 gen_compute_eflags(s
);
868 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
871 return (CCPrepare
) { .cond
= TCG_COND_ALWAYS
, .mask
= -1 };
873 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= cpu_cc_src
,
877 TCGMemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
878 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
879 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
884 /* perform a conditional store into register 'reg' according to jump opcode
885 value 'b'. In the fast case, T0 is guaranted not to be used. */
886 static CCPrepare
gen_prepare_cc(DisasContext
*s
, int b
, TCGv reg
)
888 int inv
, jcc_op
, cond
;
894 jcc_op
= (b
>> 1) & 7;
897 case CC_OP_SUBB
... CC_OP_SUBQ
:
898 /* We optimize relational operators for the cmp/jcc case. */
899 size
= s
->cc_op
- CC_OP_SUBB
;
902 tcg_gen_mov_tl(cpu_tmp4
, cpu_cc_srcT
);
903 gen_extu(size
, cpu_tmp4
);
904 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
905 cc
= (CCPrepare
) { .cond
= TCG_COND_LEU
, .reg
= cpu_tmp4
,
906 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
915 tcg_gen_mov_tl(cpu_tmp4
, cpu_cc_srcT
);
916 gen_exts(size
, cpu_tmp4
);
917 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, true);
918 cc
= (CCPrepare
) { .cond
= cond
, .reg
= cpu_tmp4
,
919 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
929 /* This actually generates good code for JC, JZ and JS. */
932 cc
= gen_prepare_eflags_o(s
, reg
);
935 cc
= gen_prepare_eflags_c(s
, reg
);
938 cc
= gen_prepare_eflags_z(s
, reg
);
941 gen_compute_eflags(s
);
942 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
943 .mask
= CC_Z
| CC_C
};
946 cc
= gen_prepare_eflags_s(s
, reg
);
949 cc
= gen_prepare_eflags_p(s
, reg
);
952 gen_compute_eflags(s
);
953 if (reg
== cpu_cc_src
) {
956 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
957 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
958 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
963 gen_compute_eflags(s
);
964 if (reg
== cpu_cc_src
) {
967 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
968 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
969 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
970 .mask
= CC_S
| CC_Z
};
977 cc
.cond
= tcg_invert_cond(cc
.cond
);
982 static void gen_setcc1(DisasContext
*s
, int b
, TCGv reg
)
984 CCPrepare cc
= gen_prepare_cc(s
, b
, reg
);
987 if (cc
.cond
== TCG_COND_EQ
) {
988 tcg_gen_xori_tl(reg
, cc
.reg
, 1);
990 tcg_gen_mov_tl(reg
, cc
.reg
);
995 if (cc
.cond
== TCG_COND_NE
&& !cc
.use_reg2
&& cc
.imm
== 0 &&
996 cc
.mask
!= 0 && (cc
.mask
& (cc
.mask
- 1)) == 0) {
997 tcg_gen_shri_tl(reg
, cc
.reg
, ctztl(cc
.mask
));
998 tcg_gen_andi_tl(reg
, reg
, 1);
1001 if (cc
.mask
!= -1) {
1002 tcg_gen_andi_tl(reg
, cc
.reg
, cc
.mask
);
1006 tcg_gen_setcond_tl(cc
.cond
, reg
, cc
.reg
, cc
.reg2
);
1008 tcg_gen_setcondi_tl(cc
.cond
, reg
, cc
.reg
, cc
.imm
);
1012 static inline void gen_compute_eflags_c(DisasContext
*s
, TCGv reg
)
1014 gen_setcc1(s
, JCC_B
<< 1, reg
);
1017 /* generate a conditional jump to label 'l1' according to jump opcode
1018 value 'b'. In the fast case, T0 is guaranted not to be used. */
1019 static inline void gen_jcc1_noeob(DisasContext
*s
, int b
, TCGLabel
*l1
)
1021 CCPrepare cc
= gen_prepare_cc(s
, b
, cpu_T0
);
1023 if (cc
.mask
!= -1) {
1024 tcg_gen_andi_tl(cpu_T0
, cc
.reg
, cc
.mask
);
1028 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1030 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1034 /* Generate a conditional jump to label 'l1' according to jump opcode
1035 value 'b'. In the fast case, T0 is guaranted not to be used.
1036 A translation block must end soon. */
1037 static inline void gen_jcc1(DisasContext
*s
, int b
, TCGLabel
*l1
)
1039 CCPrepare cc
= gen_prepare_cc(s
, b
, cpu_T0
);
1041 gen_update_cc_op(s
);
1042 if (cc
.mask
!= -1) {
1043 tcg_gen_andi_tl(cpu_T0
, cc
.reg
, cc
.mask
);
1046 set_cc_op(s
, CC_OP_DYNAMIC
);
1048 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1050 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1054 /* XXX: does not work with gdbstub "ice" single step - not a
1056 static TCGLabel
*gen_jz_ecx_string(DisasContext
*s
, target_ulong next_eip
)
1058 TCGLabel
*l1
= gen_new_label();
1059 TCGLabel
*l2
= gen_new_label();
1060 gen_op_jnz_ecx(s
->aflag
, l1
);
1062 gen_jmp_tb(s
, next_eip
, 1);
1067 static inline void gen_stos(DisasContext
*s
, TCGMemOp ot
)
1069 gen_op_mov_v_reg(MO_32
, cpu_T0
, R_EAX
);
1070 gen_string_movl_A0_EDI(s
);
1071 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
1072 gen_op_movl_T0_Dshift(ot
);
1073 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1076 static inline void gen_lods(DisasContext
*s
, TCGMemOp ot
)
1078 gen_string_movl_A0_ESI(s
);
1079 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1080 gen_op_mov_reg_v(ot
, R_EAX
, cpu_T0
);
1081 gen_op_movl_T0_Dshift(ot
);
1082 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1085 static inline void gen_scas(DisasContext
*s
, TCGMemOp ot
)
1087 gen_string_movl_A0_EDI(s
);
1088 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
1089 gen_op(s
, OP_CMPL
, ot
, R_EAX
);
1090 gen_op_movl_T0_Dshift(ot
);
1091 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1094 static inline void gen_cmps(DisasContext
*s
, TCGMemOp ot
)
1096 gen_string_movl_A0_EDI(s
);
1097 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
1098 gen_string_movl_A0_ESI(s
);
1099 gen_op(s
, OP_CMPL
, ot
, OR_TMP0
);
1100 gen_op_movl_T0_Dshift(ot
);
1101 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1102 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1105 static void gen_bpt_io(DisasContext
*s
, TCGv_i32 t_port
, int ot
)
1107 if (s
->flags
& HF_IOBPT_MASK
) {
1108 TCGv_i32 t_size
= tcg_const_i32(1 << ot
);
1109 TCGv t_next
= tcg_const_tl(s
->pc
- s
->cs_base
);
1111 gen_helper_bpt_io(cpu_env
, t_port
, t_size
, t_next
);
1112 tcg_temp_free_i32(t_size
);
1113 tcg_temp_free(t_next
);
1118 static inline void gen_ins(DisasContext
*s
, TCGMemOp ot
)
1120 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
1123 gen_string_movl_A0_EDI(s
);
1124 /* Note: we must do this dummy write first to be restartable in
1125 case of page fault. */
1126 tcg_gen_movi_tl(cpu_T0
, 0);
1127 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
1128 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[R_EDX
]);
1129 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1130 gen_helper_in_func(ot
, cpu_T0
, cpu_tmp2_i32
);
1131 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
1132 gen_op_movl_T0_Dshift(ot
);
1133 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1134 gen_bpt_io(s
, cpu_tmp2_i32
, ot
);
1135 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
1140 static inline void gen_outs(DisasContext
*s
, TCGMemOp ot
)
1142 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
1145 gen_string_movl_A0_ESI(s
);
1146 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1148 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[R_EDX
]);
1149 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1150 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T0
);
1151 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1152 gen_op_movl_T0_Dshift(ot
);
1153 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1154 gen_bpt_io(s
, cpu_tmp2_i32
, ot
);
1155 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
1160 /* same method as Valgrind : we generate jumps to current or next
1162 #define GEN_REPZ(op) \
1163 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1164 target_ulong cur_eip, target_ulong next_eip) \
1167 gen_update_cc_op(s); \
1168 l2 = gen_jz_ecx_string(s, next_eip); \
1169 gen_ ## op(s, ot); \
1170 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1171 /* a loop would cause two single step exceptions if ECX = 1 \
1172 before rep string_insn */ \
1174 gen_op_jz_ecx(s->aflag, l2); \
1175 gen_jmp(s, cur_eip); \
1178 #define GEN_REPZ2(op) \
1179 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1180 target_ulong cur_eip, \
1181 target_ulong next_eip, \
1185 gen_update_cc_op(s); \
1186 l2 = gen_jz_ecx_string(s, next_eip); \
1187 gen_ ## op(s, ot); \
1188 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1189 gen_update_cc_op(s); \
1190 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1192 gen_op_jz_ecx(s->aflag, l2); \
1193 gen_jmp(s, cur_eip); \
1204 static void gen_helper_fp_arith_ST0_FT0(int op
)
1208 gen_helper_fadd_ST0_FT0(cpu_env
);
1211 gen_helper_fmul_ST0_FT0(cpu_env
);
1214 gen_helper_fcom_ST0_FT0(cpu_env
);
1217 gen_helper_fcom_ST0_FT0(cpu_env
);
1220 gen_helper_fsub_ST0_FT0(cpu_env
);
1223 gen_helper_fsubr_ST0_FT0(cpu_env
);
1226 gen_helper_fdiv_ST0_FT0(cpu_env
);
1229 gen_helper_fdivr_ST0_FT0(cpu_env
);
1234 /* NOTE the exception in "r" op ordering */
1235 static void gen_helper_fp_arith_STN_ST0(int op
, int opreg
)
1237 TCGv_i32 tmp
= tcg_const_i32(opreg
);
1240 gen_helper_fadd_STN_ST0(cpu_env
, tmp
);
1243 gen_helper_fmul_STN_ST0(cpu_env
, tmp
);
1246 gen_helper_fsubr_STN_ST0(cpu_env
, tmp
);
1249 gen_helper_fsub_STN_ST0(cpu_env
, tmp
);
1252 gen_helper_fdivr_STN_ST0(cpu_env
, tmp
);
1255 gen_helper_fdiv_STN_ST0(cpu_env
, tmp
);
1260 /* if d == OR_TMP0, it means memory operand (address in A0) */
1261 static void gen_op(DisasContext
*s1
, int op
, TCGMemOp ot
, int d
)
1264 gen_op_mov_v_reg(ot
, cpu_T0
, d
);
1265 } else if (!(s1
->prefix
& PREFIX_LOCK
)) {
1266 gen_op_ld_v(s1
, ot
, cpu_T0
, cpu_A0
);
1270 gen_compute_eflags_c(s1
, cpu_tmp4
);
1271 if (s1
->prefix
& PREFIX_LOCK
) {
1272 tcg_gen_add_tl(cpu_T0
, cpu_tmp4
, cpu_T1
);
1273 tcg_gen_atomic_add_fetch_tl(cpu_T0
, cpu_A0
, cpu_T0
,
1274 s1
->mem_index
, ot
| MO_LE
);
1276 tcg_gen_add_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1277 tcg_gen_add_tl(cpu_T0
, cpu_T0
, cpu_tmp4
);
1278 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1280 gen_op_update3_cc(cpu_tmp4
);
1281 set_cc_op(s1
, CC_OP_ADCB
+ ot
);
1284 gen_compute_eflags_c(s1
, cpu_tmp4
);
1285 if (s1
->prefix
& PREFIX_LOCK
) {
1286 tcg_gen_add_tl(cpu_T0
, cpu_T1
, cpu_tmp4
);
1287 tcg_gen_neg_tl(cpu_T0
, cpu_T0
);
1288 tcg_gen_atomic_add_fetch_tl(cpu_T0
, cpu_A0
, cpu_T0
,
1289 s1
->mem_index
, ot
| MO_LE
);
1291 tcg_gen_sub_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1292 tcg_gen_sub_tl(cpu_T0
, cpu_T0
, cpu_tmp4
);
1293 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1295 gen_op_update3_cc(cpu_tmp4
);
1296 set_cc_op(s1
, CC_OP_SBBB
+ ot
);
1299 if (s1
->prefix
& PREFIX_LOCK
) {
1300 tcg_gen_atomic_add_fetch_tl(cpu_T0
, cpu_A0
, cpu_T1
,
1301 s1
->mem_index
, ot
| MO_LE
);
1303 tcg_gen_add_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1304 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1306 gen_op_update2_cc();
1307 set_cc_op(s1
, CC_OP_ADDB
+ ot
);
1310 if (s1
->prefix
& PREFIX_LOCK
) {
1311 tcg_gen_neg_tl(cpu_T0
, cpu_T1
);
1312 tcg_gen_atomic_fetch_add_tl(cpu_cc_srcT
, cpu_A0
, cpu_T0
,
1313 s1
->mem_index
, ot
| MO_LE
);
1314 tcg_gen_sub_tl(cpu_T0
, cpu_cc_srcT
, cpu_T1
);
1316 tcg_gen_mov_tl(cpu_cc_srcT
, cpu_T0
);
1317 tcg_gen_sub_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1318 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1320 gen_op_update2_cc();
1321 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1325 if (s1
->prefix
& PREFIX_LOCK
) {
1326 tcg_gen_atomic_and_fetch_tl(cpu_T0
, cpu_A0
, cpu_T1
,
1327 s1
->mem_index
, ot
| MO_LE
);
1329 tcg_gen_and_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1330 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1332 gen_op_update1_cc();
1333 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1336 if (s1
->prefix
& PREFIX_LOCK
) {
1337 tcg_gen_atomic_or_fetch_tl(cpu_T0
, cpu_A0
, cpu_T1
,
1338 s1
->mem_index
, ot
| MO_LE
);
1340 tcg_gen_or_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1341 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1343 gen_op_update1_cc();
1344 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1347 if (s1
->prefix
& PREFIX_LOCK
) {
1348 tcg_gen_atomic_xor_fetch_tl(cpu_T0
, cpu_A0
, cpu_T1
,
1349 s1
->mem_index
, ot
| MO_LE
);
1351 tcg_gen_xor_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1352 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1354 gen_op_update1_cc();
1355 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1358 tcg_gen_mov_tl(cpu_cc_src
, cpu_T1
);
1359 tcg_gen_mov_tl(cpu_cc_srcT
, cpu_T0
);
1360 tcg_gen_sub_tl(cpu_cc_dst
, cpu_T0
, cpu_T1
);
1361 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1366 /* if d == OR_TMP0, it means memory operand (address in A0) */
1367 static void gen_inc(DisasContext
*s1
, TCGMemOp ot
, int d
, int c
)
1369 if (s1
->prefix
& PREFIX_LOCK
) {
1370 tcg_gen_movi_tl(cpu_T0
, c
> 0 ? 1 : -1);
1371 tcg_gen_atomic_add_fetch_tl(cpu_T0
, cpu_A0
, cpu_T0
,
1372 s1
->mem_index
, ot
| MO_LE
);
1375 gen_op_mov_v_reg(ot
, cpu_T0
, d
);
1377 gen_op_ld_v(s1
, ot
, cpu_T0
, cpu_A0
);
1379 tcg_gen_addi_tl(cpu_T0
, cpu_T0
, (c
> 0 ? 1 : -1));
1380 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1383 gen_compute_eflags_c(s1
, cpu_cc_src
);
1384 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
1385 set_cc_op(s1
, (c
> 0 ? CC_OP_INCB
: CC_OP_DECB
) + ot
);
1388 static void gen_shift_flags(DisasContext
*s
, TCGMemOp ot
, TCGv result
,
1389 TCGv shm1
, TCGv count
, bool is_right
)
1391 TCGv_i32 z32
, s32
, oldop
;
1394 /* Store the results into the CC variables. If we know that the
1395 variable must be dead, store unconditionally. Otherwise we'll
1396 need to not disrupt the current contents. */
1397 z_tl
= tcg_const_tl(0);
1398 if (cc_op_live
[s
->cc_op
] & USES_CC_DST
) {
1399 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_dst
, count
, z_tl
,
1400 result
, cpu_cc_dst
);
1402 tcg_gen_mov_tl(cpu_cc_dst
, result
);
1404 if (cc_op_live
[s
->cc_op
] & USES_CC_SRC
) {
1405 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_src
, count
, z_tl
,
1408 tcg_gen_mov_tl(cpu_cc_src
, shm1
);
1410 tcg_temp_free(z_tl
);
1412 /* Get the two potential CC_OP values into temporaries. */
1413 tcg_gen_movi_i32(cpu_tmp2_i32
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1414 if (s
->cc_op
== CC_OP_DYNAMIC
) {
1417 tcg_gen_movi_i32(cpu_tmp3_i32
, s
->cc_op
);
1418 oldop
= cpu_tmp3_i32
;
1421 /* Conditionally store the CC_OP value. */
1422 z32
= tcg_const_i32(0);
1423 s32
= tcg_temp_new_i32();
1424 tcg_gen_trunc_tl_i32(s32
, count
);
1425 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, s32
, z32
, cpu_tmp2_i32
, oldop
);
1426 tcg_temp_free_i32(z32
);
1427 tcg_temp_free_i32(s32
);
1429 /* The CC_OP value is no longer predictable. */
1430 set_cc_op(s
, CC_OP_DYNAMIC
);
1433 static void gen_shift_rm_T1(DisasContext
*s
, TCGMemOp ot
, int op1
,
1434 int is_right
, int is_arith
)
1436 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1439 if (op1
== OR_TMP0
) {
1440 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1442 gen_op_mov_v_reg(ot
, cpu_T0
, op1
);
1445 tcg_gen_andi_tl(cpu_T1
, cpu_T1
, mask
);
1446 tcg_gen_subi_tl(cpu_tmp0
, cpu_T1
, 1);
1450 gen_exts(ot
, cpu_T0
);
1451 tcg_gen_sar_tl(cpu_tmp0
, cpu_T0
, cpu_tmp0
);
1452 tcg_gen_sar_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1454 gen_extu(ot
, cpu_T0
);
1455 tcg_gen_shr_tl(cpu_tmp0
, cpu_T0
, cpu_tmp0
);
1456 tcg_gen_shr_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1459 tcg_gen_shl_tl(cpu_tmp0
, cpu_T0
, cpu_tmp0
);
1460 tcg_gen_shl_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1464 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1466 gen_shift_flags(s
, ot
, cpu_T0
, cpu_tmp0
, cpu_T1
, is_right
);
1469 static void gen_shift_rm_im(DisasContext
*s
, TCGMemOp ot
, int op1
, int op2
,
1470 int is_right
, int is_arith
)
1472 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1476 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1478 gen_op_mov_v_reg(ot
, cpu_T0
, op1
);
1484 gen_exts(ot
, cpu_T0
);
1485 tcg_gen_sari_tl(cpu_tmp4
, cpu_T0
, op2
- 1);
1486 tcg_gen_sari_tl(cpu_T0
, cpu_T0
, op2
);
1488 gen_extu(ot
, cpu_T0
);
1489 tcg_gen_shri_tl(cpu_tmp4
, cpu_T0
, op2
- 1);
1490 tcg_gen_shri_tl(cpu_T0
, cpu_T0
, op2
);
1493 tcg_gen_shli_tl(cpu_tmp4
, cpu_T0
, op2
- 1);
1494 tcg_gen_shli_tl(cpu_T0
, cpu_T0
, op2
);
1499 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1501 /* update eflags if non zero shift */
1503 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
1504 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
1505 set_cc_op(s
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1509 static void gen_rot_rm_T1(DisasContext
*s
, TCGMemOp ot
, int op1
, int is_right
)
1511 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1515 if (op1
== OR_TMP0
) {
1516 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1518 gen_op_mov_v_reg(ot
, cpu_T0
, op1
);
1521 tcg_gen_andi_tl(cpu_T1
, cpu_T1
, mask
);
1525 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1526 tcg_gen_ext8u_tl(cpu_T0
, cpu_T0
);
1527 tcg_gen_muli_tl(cpu_T0
, cpu_T0
, 0x01010101);
1530 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1531 tcg_gen_deposit_tl(cpu_T0
, cpu_T0
, cpu_T0
, 16, 16);
1534 #ifdef TARGET_X86_64
1536 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
1537 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T1
);
1539 tcg_gen_rotr_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1541 tcg_gen_rotl_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1543 tcg_gen_extu_i32_tl(cpu_T0
, cpu_tmp2_i32
);
1548 tcg_gen_rotr_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1550 tcg_gen_rotl_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1556 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1558 /* We'll need the flags computed into CC_SRC. */
1559 gen_compute_eflags(s
);
1561 /* The value that was "rotated out" is now present at the other end
1562 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1563 since we've computed the flags into CC_SRC, these variables are
1566 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T0
, mask
- 1);
1567 tcg_gen_shri_tl(cpu_cc_dst
, cpu_T0
, mask
);
1568 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1570 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T0
, mask
);
1571 tcg_gen_andi_tl(cpu_cc_dst
, cpu_T0
, 1);
1573 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1574 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1576 /* Now conditionally store the new CC_OP value. If the shift count
1577 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1578 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1579 exactly as we computed above. */
1580 t0
= tcg_const_i32(0);
1581 t1
= tcg_temp_new_i32();
1582 tcg_gen_trunc_tl_i32(t1
, cpu_T1
);
1583 tcg_gen_movi_i32(cpu_tmp2_i32
, CC_OP_ADCOX
);
1584 tcg_gen_movi_i32(cpu_tmp3_i32
, CC_OP_EFLAGS
);
1585 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, t1
, t0
,
1586 cpu_tmp2_i32
, cpu_tmp3_i32
);
1587 tcg_temp_free_i32(t0
);
1588 tcg_temp_free_i32(t1
);
1590 /* The CC_OP value is no longer predictable. */
1591 set_cc_op(s
, CC_OP_DYNAMIC
);
1594 static void gen_rot_rm_im(DisasContext
*s
, TCGMemOp ot
, int op1
, int op2
,
1597 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1601 if (op1
== OR_TMP0
) {
1602 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1604 gen_op_mov_v_reg(ot
, cpu_T0
, op1
);
1610 #ifdef TARGET_X86_64
1612 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
1614 tcg_gen_rotri_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, op2
);
1616 tcg_gen_rotli_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, op2
);
1618 tcg_gen_extu_i32_tl(cpu_T0
, cpu_tmp2_i32
);
1623 tcg_gen_rotri_tl(cpu_T0
, cpu_T0
, op2
);
1625 tcg_gen_rotli_tl(cpu_T0
, cpu_T0
, op2
);
1636 shift
= mask
+ 1 - shift
;
1638 gen_extu(ot
, cpu_T0
);
1639 tcg_gen_shli_tl(cpu_tmp0
, cpu_T0
, shift
);
1640 tcg_gen_shri_tl(cpu_T0
, cpu_T0
, mask
+ 1 - shift
);
1641 tcg_gen_or_tl(cpu_T0
, cpu_T0
, cpu_tmp0
);
1647 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1650 /* Compute the flags into CC_SRC. */
1651 gen_compute_eflags(s
);
1653 /* The value that was "rotated out" is now present at the other end
1654 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1655 since we've computed the flags into CC_SRC, these variables are
1658 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T0
, mask
- 1);
1659 tcg_gen_shri_tl(cpu_cc_dst
, cpu_T0
, mask
);
1660 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1662 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T0
, mask
);
1663 tcg_gen_andi_tl(cpu_cc_dst
, cpu_T0
, 1);
1665 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1666 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1667 set_cc_op(s
, CC_OP_ADCOX
);
1671 /* XXX: add faster immediate = 1 case */
1672 static void gen_rotc_rm_T1(DisasContext
*s
, TCGMemOp ot
, int op1
,
1675 gen_compute_eflags(s
);
1676 assert(s
->cc_op
== CC_OP_EFLAGS
);
1680 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1682 gen_op_mov_v_reg(ot
, cpu_T0
, op1
);
1687 gen_helper_rcrb(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1690 gen_helper_rcrw(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1693 gen_helper_rcrl(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1695 #ifdef TARGET_X86_64
1697 gen_helper_rcrq(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1706 gen_helper_rclb(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1709 gen_helper_rclw(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1712 gen_helper_rcll(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1714 #ifdef TARGET_X86_64
1716 gen_helper_rclq(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1724 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1727 /* XXX: add faster immediate case */
1728 static void gen_shiftd_rm_T1(DisasContext
*s
, TCGMemOp ot
, int op1
,
1729 bool is_right
, TCGv count_in
)
1731 target_ulong mask
= (ot
== MO_64
? 63 : 31);
1735 if (op1
== OR_TMP0
) {
1736 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1738 gen_op_mov_v_reg(ot
, cpu_T0
, op1
);
1741 count
= tcg_temp_new();
1742 tcg_gen_andi_tl(count
, count_in
, mask
);
1746 /* Note: we implement the Intel behaviour for shift count > 16.
1747 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1748 portion by constructing it as a 32-bit value. */
1750 tcg_gen_deposit_tl(cpu_tmp0
, cpu_T0
, cpu_T1
, 16, 16);
1751 tcg_gen_mov_tl(cpu_T1
, cpu_T0
);
1752 tcg_gen_mov_tl(cpu_T0
, cpu_tmp0
);
1754 tcg_gen_deposit_tl(cpu_T1
, cpu_T0
, cpu_T1
, 16, 16);
1757 #ifdef TARGET_X86_64
1759 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1760 tcg_gen_subi_tl(cpu_tmp0
, count
, 1);
1762 tcg_gen_concat_tl_i64(cpu_T0
, cpu_T0
, cpu_T1
);
1763 tcg_gen_shr_i64(cpu_tmp0
, cpu_T0
, cpu_tmp0
);
1764 tcg_gen_shr_i64(cpu_T0
, cpu_T0
, count
);
1766 tcg_gen_concat_tl_i64(cpu_T0
, cpu_T1
, cpu_T0
);
1767 tcg_gen_shl_i64(cpu_tmp0
, cpu_T0
, cpu_tmp0
);
1768 tcg_gen_shl_i64(cpu_T0
, cpu_T0
, count
);
1769 tcg_gen_shri_i64(cpu_tmp0
, cpu_tmp0
, 32);
1770 tcg_gen_shri_i64(cpu_T0
, cpu_T0
, 32);
1775 tcg_gen_subi_tl(cpu_tmp0
, count
, 1);
1777 tcg_gen_shr_tl(cpu_tmp0
, cpu_T0
, cpu_tmp0
);
1779 tcg_gen_subfi_tl(cpu_tmp4
, mask
+ 1, count
);
1780 tcg_gen_shr_tl(cpu_T0
, cpu_T0
, count
);
1781 tcg_gen_shl_tl(cpu_T1
, cpu_T1
, cpu_tmp4
);
1783 tcg_gen_shl_tl(cpu_tmp0
, cpu_T0
, cpu_tmp0
);
1785 /* Only needed if count > 16, for Intel behaviour. */
1786 tcg_gen_subfi_tl(cpu_tmp4
, 33, count
);
1787 tcg_gen_shr_tl(cpu_tmp4
, cpu_T1
, cpu_tmp4
);
1788 tcg_gen_or_tl(cpu_tmp0
, cpu_tmp0
, cpu_tmp4
);
1791 tcg_gen_subfi_tl(cpu_tmp4
, mask
+ 1, count
);
1792 tcg_gen_shl_tl(cpu_T0
, cpu_T0
, count
);
1793 tcg_gen_shr_tl(cpu_T1
, cpu_T1
, cpu_tmp4
);
1795 tcg_gen_movi_tl(cpu_tmp4
, 0);
1796 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_T1
, count
, cpu_tmp4
,
1798 tcg_gen_or_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1803 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1805 gen_shift_flags(s
, ot
, cpu_T0
, cpu_tmp0
, count
, is_right
);
1806 tcg_temp_free(count
);
1809 static void gen_shift(DisasContext
*s1
, int op
, TCGMemOp ot
, int d
, int s
)
1812 gen_op_mov_v_reg(ot
, cpu_T1
, s
);
1815 gen_rot_rm_T1(s1
, ot
, d
, 0);
1818 gen_rot_rm_T1(s1
, ot
, d
, 1);
1822 gen_shift_rm_T1(s1
, ot
, d
, 0, 0);
1825 gen_shift_rm_T1(s1
, ot
, d
, 1, 0);
1828 gen_shift_rm_T1(s1
, ot
, d
, 1, 1);
1831 gen_rotc_rm_T1(s1
, ot
, d
, 0);
1834 gen_rotc_rm_T1(s1
, ot
, d
, 1);
1839 static void gen_shifti(DisasContext
*s1
, int op
, TCGMemOp ot
, int d
, int c
)
1843 gen_rot_rm_im(s1
, ot
, d
, c
, 0);
1846 gen_rot_rm_im(s1
, ot
, d
, c
, 1);
1850 gen_shift_rm_im(s1
, ot
, d
, c
, 0, 0);
1853 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 0);
1856 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 1);
1859 /* currently not optimized */
1860 tcg_gen_movi_tl(cpu_T1
, c
);
1861 gen_shift(s1
, op
, ot
, d
, OR_TMP1
);
1866 #define X86_MAX_INSN_LENGTH 15
1868 static uint64_t advance_pc(CPUX86State
*env
, DisasContext
*s
, int num_bytes
)
1870 uint64_t pc
= s
->pc
;
1873 if (unlikely(s
->pc
- s
->pc_start
> X86_MAX_INSN_LENGTH
)) {
1874 /* If the instruction's 16th byte is on a different page than the 1st, a
1875 * page fault on the second page wins over the general protection fault
1876 * caused by the instruction being too long.
1877 * This can happen even if the operand is only one byte long!
1879 if (((s
->pc
- 1) ^ (pc
- 1)) & TARGET_PAGE_MASK
) {
1880 volatile uint8_t unused
=
1881 cpu_ldub_code(env
, (s
->pc
- 1) & TARGET_PAGE_MASK
);
1884 siglongjmp(s
->jmpbuf
, 1);
1890 static inline uint8_t x86_ldub_code(CPUX86State
*env
, DisasContext
*s
)
1892 return cpu_ldub_code(env
, advance_pc(env
, s
, 1));
1895 static inline int16_t x86_ldsw_code(CPUX86State
*env
, DisasContext
*s
)
1897 return cpu_ldsw_code(env
, advance_pc(env
, s
, 2));
1900 static inline uint16_t x86_lduw_code(CPUX86State
*env
, DisasContext
*s
)
1902 return cpu_lduw_code(env
, advance_pc(env
, s
, 2));
1905 static inline uint32_t x86_ldl_code(CPUX86State
*env
, DisasContext
*s
)
1907 return cpu_ldl_code(env
, advance_pc(env
, s
, 4));
1910 #ifdef TARGET_X86_64
1911 static inline uint64_t x86_ldq_code(CPUX86State
*env
, DisasContext
*s
)
1913 return cpu_ldq_code(env
, advance_pc(env
, s
, 8));
1917 /* Decompose an address. */
1919 typedef struct AddressParts
{
1927 static AddressParts
gen_lea_modrm_0(CPUX86State
*env
, DisasContext
*s
,
1930 int def_seg
, base
, index
, scale
, mod
, rm
;
1939 mod
= (modrm
>> 6) & 3;
1941 base
= rm
| REX_B(s
);
1944 /* Normally filtered out earlier, but including this path
1945 simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
1954 int code
= x86_ldub_code(env
, s
);
1955 scale
= (code
>> 6) & 3;
1956 index
= ((code
>> 3) & 7) | REX_X(s
);
1958 index
= -1; /* no index */
1960 base
= (code
& 7) | REX_B(s
);
1966 if ((base
& 7) == 5) {
1968 disp
= (int32_t)x86_ldl_code(env
, s
);
1969 if (CODE64(s
) && !havesib
) {
1971 disp
+= s
->pc
+ s
->rip_offset
;
1976 disp
= (int8_t)x86_ldub_code(env
, s
);
1980 disp
= (int32_t)x86_ldl_code(env
, s
);
1984 /* For correct popl handling with esp. */
1985 if (base
== R_ESP
&& s
->popl_esp_hack
) {
1986 disp
+= s
->popl_esp_hack
;
1988 if (base
== R_EBP
|| base
== R_ESP
) {
1997 disp
= x86_lduw_code(env
, s
);
2000 } else if (mod
== 1) {
2001 disp
= (int8_t)x86_ldub_code(env
, s
);
2003 disp
= (int16_t)x86_lduw_code(env
, s
);
2047 return (AddressParts
){ def_seg
, base
, index
, scale
, disp
};
2050 /* Compute the address, with a minimum number of TCG ops. */
2051 static TCGv
gen_lea_modrm_1(AddressParts a
)
2057 ea
= cpu_regs
[a
.index
];
2059 tcg_gen_shli_tl(cpu_A0
, cpu_regs
[a
.index
], a
.scale
);
2063 tcg_gen_add_tl(cpu_A0
, ea
, cpu_regs
[a
.base
]);
2066 } else if (a
.base
>= 0) {
2067 ea
= cpu_regs
[a
.base
];
2070 tcg_gen_movi_tl(cpu_A0
, a
.disp
);
2072 } else if (a
.disp
!= 0) {
2073 tcg_gen_addi_tl(cpu_A0
, ea
, a
.disp
);
2080 static void gen_lea_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2082 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
2083 TCGv ea
= gen_lea_modrm_1(a
);
2084 gen_lea_v_seg(s
, s
->aflag
, ea
, a
.def_seg
, s
->override
);
2087 static void gen_nop_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2089 (void)gen_lea_modrm_0(env
, s
, modrm
);
2092 /* Used for BNDCL, BNDCU, BNDCN. */
2093 static void gen_bndck(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2094 TCGCond cond
, TCGv_i64 bndv
)
2096 TCGv ea
= gen_lea_modrm_1(gen_lea_modrm_0(env
, s
, modrm
));
2098 tcg_gen_extu_tl_i64(cpu_tmp1_i64
, ea
);
2100 tcg_gen_ext32u_i64(cpu_tmp1_i64
, cpu_tmp1_i64
);
2102 tcg_gen_setcond_i64(cond
, cpu_tmp1_i64
, cpu_tmp1_i64
, bndv
);
2103 tcg_gen_extrl_i64_i32(cpu_tmp2_i32
, cpu_tmp1_i64
);
2104 gen_helper_bndck(cpu_env
, cpu_tmp2_i32
);
2107 /* used for LEA and MOV AX, mem */
2108 static void gen_add_A0_ds_seg(DisasContext
*s
)
2110 gen_lea_v_seg(s
, s
->aflag
, cpu_A0
, R_DS
, s
->override
);
2113 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2115 static void gen_ldst_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2116 TCGMemOp ot
, int reg
, int is_store
)
2120 mod
= (modrm
>> 6) & 3;
2121 rm
= (modrm
& 7) | REX_B(s
);
2125 gen_op_mov_v_reg(ot
, cpu_T0
, reg
);
2126 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
2128 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
2130 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
2133 gen_lea_modrm(env
, s
, modrm
);
2136 gen_op_mov_v_reg(ot
, cpu_T0
, reg
);
2137 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
2139 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
2141 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
2146 static inline uint32_t insn_get(CPUX86State
*env
, DisasContext
*s
, TCGMemOp ot
)
2152 ret
= x86_ldub_code(env
, s
);
2155 ret
= x86_lduw_code(env
, s
);
2158 #ifdef TARGET_X86_64
2161 ret
= x86_ldl_code(env
, s
);
2169 static inline int insn_const_size(TCGMemOp ot
)
2178 static inline bool use_goto_tb(DisasContext
*s
, target_ulong pc
)
2180 #ifndef CONFIG_USER_ONLY
2181 return (pc
& TARGET_PAGE_MASK
) == (s
->base
.tb
->pc
& TARGET_PAGE_MASK
) ||
2182 (pc
& TARGET_PAGE_MASK
) == (s
->pc_start
& TARGET_PAGE_MASK
);
2188 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong eip
)
2190 target_ulong pc
= s
->cs_base
+ eip
;
2192 if (use_goto_tb(s
, pc
)) {
2193 /* jump to same page: we can use a direct jump */
2194 tcg_gen_goto_tb(tb_num
);
2196 tcg_gen_exit_tb((uintptr_t)s
->base
.tb
+ tb_num
);
2197 s
->base
.is_jmp
= DISAS_NORETURN
;
2199 /* jump to another page */
2201 gen_jr(s
, cpu_tmp0
);
2205 static inline void gen_jcc(DisasContext
*s
, int b
,
2206 target_ulong val
, target_ulong next_eip
)
2211 l1
= gen_new_label();
2214 gen_goto_tb(s
, 0, next_eip
);
2217 gen_goto_tb(s
, 1, val
);
2219 l1
= gen_new_label();
2220 l2
= gen_new_label();
2223 gen_jmp_im(next_eip
);
2233 static void gen_cmovcc1(CPUX86State
*env
, DisasContext
*s
, TCGMemOp ot
, int b
,
2238 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
2240 cc
= gen_prepare_cc(s
, b
, cpu_T1
);
2241 if (cc
.mask
!= -1) {
2242 TCGv t0
= tcg_temp_new();
2243 tcg_gen_andi_tl(t0
, cc
.reg
, cc
.mask
);
2247 cc
.reg2
= tcg_const_tl(cc
.imm
);
2250 tcg_gen_movcond_tl(cc
.cond
, cpu_T0
, cc
.reg
, cc
.reg2
,
2251 cpu_T0
, cpu_regs
[reg
]);
2252 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
2254 if (cc
.mask
!= -1) {
2255 tcg_temp_free(cc
.reg
);
2258 tcg_temp_free(cc
.reg2
);
2262 static inline void gen_op_movl_T0_seg(int seg_reg
)
2264 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
,
2265 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2268 static inline void gen_op_movl_seg_T0_vm(int seg_reg
)
2270 tcg_gen_ext16u_tl(cpu_T0
, cpu_T0
);
2271 tcg_gen_st32_tl(cpu_T0
, cpu_env
,
2272 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2273 tcg_gen_shli_tl(cpu_seg_base
[seg_reg
], cpu_T0
, 4);
2276 /* move T0 to seg_reg and compute if the CPU state may change. Never
2277 call this function with seg_reg == R_CS */
2278 static void gen_movl_seg_T0(DisasContext
*s
, int seg_reg
)
2280 if (s
->pe
&& !s
->vm86
) {
2281 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
2282 gen_helper_load_seg(cpu_env
, tcg_const_i32(seg_reg
), cpu_tmp2_i32
);
2283 /* abort translation because the addseg value may change or
2284 because ss32 may change. For R_SS, translation must always
2285 stop as a special handling must be done to disable hardware
2286 interrupts for the next instruction */
2287 if (seg_reg
== R_SS
|| (s
->code32
&& seg_reg
< R_FS
)) {
2288 s
->base
.is_jmp
= DISAS_TOO_MANY
;
2291 gen_op_movl_seg_T0_vm(seg_reg
);
2292 if (seg_reg
== R_SS
) {
2293 s
->base
.is_jmp
= DISAS_TOO_MANY
;
2298 static inline int svm_is_rep(int prefixes
)
2300 return ((prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) ? 8 : 0);
2304 gen_svm_check_intercept_param(DisasContext
*s
, target_ulong pc_start
,
2305 uint32_t type
, uint64_t param
)
2307 /* no SVM activated; fast case */
2308 if (likely(!(s
->flags
& HF_SVMI_MASK
)))
2310 gen_update_cc_op(s
);
2311 gen_jmp_im(pc_start
- s
->cs_base
);
2312 gen_helper_svm_check_intercept_param(cpu_env
, tcg_const_i32(type
),
2313 tcg_const_i64(param
));
2317 gen_svm_check_intercept(DisasContext
*s
, target_ulong pc_start
, uint64_t type
)
2319 gen_svm_check_intercept_param(s
, pc_start
, type
, 0);
2322 static inline void gen_stack_update(DisasContext
*s
, int addend
)
2324 gen_op_add_reg_im(mo_stacksize(s
), R_ESP
, addend
);
2327 /* Generate a push. It depends on ss32, addseg and dflag. */
2328 static void gen_push_v(DisasContext
*s
, TCGv val
)
2330 TCGMemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2331 TCGMemOp a_ot
= mo_stacksize(s
);
2332 int size
= 1 << d_ot
;
2333 TCGv new_esp
= cpu_A0
;
2335 tcg_gen_subi_tl(cpu_A0
, cpu_regs
[R_ESP
], size
);
2340 tcg_gen_mov_tl(new_esp
, cpu_A0
);
2342 gen_lea_v_seg(s
, a_ot
, cpu_A0
, R_SS
, -1);
2345 gen_op_st_v(s
, d_ot
, val
, cpu_A0
);
2346 gen_op_mov_reg_v(a_ot
, R_ESP
, new_esp
);
2349 /* two step pop is necessary for precise exceptions */
2350 static TCGMemOp
gen_pop_T0(DisasContext
*s
)
2352 TCGMemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2354 gen_lea_v_seg(s
, mo_stacksize(s
), cpu_regs
[R_ESP
], R_SS
, -1);
2355 gen_op_ld_v(s
, d_ot
, cpu_T0
, cpu_A0
);
2360 static inline void gen_pop_update(DisasContext
*s
, TCGMemOp ot
)
2362 gen_stack_update(s
, 1 << ot
);
2365 static inline void gen_stack_A0(DisasContext
*s
)
2367 gen_lea_v_seg(s
, s
->ss32
? MO_32
: MO_16
, cpu_regs
[R_ESP
], R_SS
, -1);
2370 static void gen_pusha(DisasContext
*s
)
2372 TCGMemOp s_ot
= s
->ss32
? MO_32
: MO_16
;
2373 TCGMemOp d_ot
= s
->dflag
;
2374 int size
= 1 << d_ot
;
2377 for (i
= 0; i
< 8; i
++) {
2378 tcg_gen_addi_tl(cpu_A0
, cpu_regs
[R_ESP
], (i
- 8) * size
);
2379 gen_lea_v_seg(s
, s_ot
, cpu_A0
, R_SS
, -1);
2380 gen_op_st_v(s
, d_ot
, cpu_regs
[7 - i
], cpu_A0
);
2383 gen_stack_update(s
, -8 * size
);
2386 static void gen_popa(DisasContext
*s
)
2388 TCGMemOp s_ot
= s
->ss32
? MO_32
: MO_16
;
2389 TCGMemOp d_ot
= s
->dflag
;
2390 int size
= 1 << d_ot
;
2393 for (i
= 0; i
< 8; i
++) {
2394 /* ESP is not reloaded */
2395 if (7 - i
== R_ESP
) {
2398 tcg_gen_addi_tl(cpu_A0
, cpu_regs
[R_ESP
], i
* size
);
2399 gen_lea_v_seg(s
, s_ot
, cpu_A0
, R_SS
, -1);
2400 gen_op_ld_v(s
, d_ot
, cpu_T0
, cpu_A0
);
2401 gen_op_mov_reg_v(d_ot
, 7 - i
, cpu_T0
);
2404 gen_stack_update(s
, 8 * size
);
2407 static void gen_enter(DisasContext
*s
, int esp_addend
, int level
)
2409 TCGMemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2410 TCGMemOp a_ot
= CODE64(s
) ? MO_64
: s
->ss32
? MO_32
: MO_16
;
2411 int size
= 1 << d_ot
;
2413 /* Push BP; compute FrameTemp into T1. */
2414 tcg_gen_subi_tl(cpu_T1
, cpu_regs
[R_ESP
], size
);
2415 gen_lea_v_seg(s
, a_ot
, cpu_T1
, R_SS
, -1);
2416 gen_op_st_v(s
, d_ot
, cpu_regs
[R_EBP
], cpu_A0
);
2422 /* Copy level-1 pointers from the previous frame. */
2423 for (i
= 1; i
< level
; ++i
) {
2424 tcg_gen_subi_tl(cpu_A0
, cpu_regs
[R_EBP
], size
* i
);
2425 gen_lea_v_seg(s
, a_ot
, cpu_A0
, R_SS
, -1);
2426 gen_op_ld_v(s
, d_ot
, cpu_tmp0
, cpu_A0
);
2428 tcg_gen_subi_tl(cpu_A0
, cpu_T1
, size
* i
);
2429 gen_lea_v_seg(s
, a_ot
, cpu_A0
, R_SS
, -1);
2430 gen_op_st_v(s
, d_ot
, cpu_tmp0
, cpu_A0
);
2433 /* Push the current FrameTemp as the last level. */
2434 tcg_gen_subi_tl(cpu_A0
, cpu_T1
, size
* level
);
2435 gen_lea_v_seg(s
, a_ot
, cpu_A0
, R_SS
, -1);
2436 gen_op_st_v(s
, d_ot
, cpu_T1
, cpu_A0
);
2439 /* Copy the FrameTemp value to EBP. */
2440 gen_op_mov_reg_v(a_ot
, R_EBP
, cpu_T1
);
2442 /* Compute the final value of ESP. */
2443 tcg_gen_subi_tl(cpu_T1
, cpu_T1
, esp_addend
+ size
* level
);
2444 gen_op_mov_reg_v(a_ot
, R_ESP
, cpu_T1
);
2447 static void gen_leave(DisasContext
*s
)
2449 TCGMemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2450 TCGMemOp a_ot
= mo_stacksize(s
);
2452 gen_lea_v_seg(s
, a_ot
, cpu_regs
[R_EBP
], R_SS
, -1);
2453 gen_op_ld_v(s
, d_ot
, cpu_T0
, cpu_A0
);
2455 tcg_gen_addi_tl(cpu_T1
, cpu_regs
[R_EBP
], 1 << d_ot
);
2457 gen_op_mov_reg_v(d_ot
, R_EBP
, cpu_T0
);
2458 gen_op_mov_reg_v(a_ot
, R_ESP
, cpu_T1
);
2461 static void gen_exception(DisasContext
*s
, int trapno
, target_ulong cur_eip
)
2463 gen_update_cc_op(s
);
2464 gen_jmp_im(cur_eip
);
2465 gen_helper_raise_exception(cpu_env
, tcg_const_i32(trapno
));
2466 s
->base
.is_jmp
= DISAS_NORETURN
;
2469 /* Generate #UD for the current instruction. The assumption here is that
2470 the instruction is known, but it isn't allowed in the current cpu mode. */
2471 static void gen_illegal_opcode(DisasContext
*s
)
2473 gen_exception(s
, EXCP06_ILLOP
, s
->pc_start
- s
->cs_base
);
2476 /* Similarly, except that the assumption here is that we don't decode
2477 the instruction at all -- either a missing opcode, an unimplemented
2478 feature, or just a bogus instruction stream. */
2479 static void gen_unknown_opcode(CPUX86State
*env
, DisasContext
*s
)
2481 gen_illegal_opcode(s
);
2483 if (qemu_loglevel_mask(LOG_UNIMP
)) {
2484 target_ulong pc
= s
->pc_start
, end
= s
->pc
;
2486 qemu_log("ILLOPC: " TARGET_FMT_lx
":", pc
);
2487 for (; pc
< end
; ++pc
) {
2488 qemu_log(" %02x", cpu_ldub_code(env
, pc
));
2495 /* an interrupt is different from an exception because of the
2497 static void gen_interrupt(DisasContext
*s
, int intno
,
2498 target_ulong cur_eip
, target_ulong next_eip
)
2500 gen_update_cc_op(s
);
2501 gen_jmp_im(cur_eip
);
2502 gen_helper_raise_interrupt(cpu_env
, tcg_const_i32(intno
),
2503 tcg_const_i32(next_eip
- cur_eip
));
2504 s
->base
.is_jmp
= DISAS_NORETURN
;
2507 static void gen_debug(DisasContext
*s
, target_ulong cur_eip
)
2509 gen_update_cc_op(s
);
2510 gen_jmp_im(cur_eip
);
2511 gen_helper_debug(cpu_env
);
2512 s
->base
.is_jmp
= DISAS_NORETURN
;
2515 static void gen_set_hflag(DisasContext
*s
, uint32_t mask
)
2517 if ((s
->flags
& mask
) == 0) {
2518 TCGv_i32 t
= tcg_temp_new_i32();
2519 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2520 tcg_gen_ori_i32(t
, t
, mask
);
2521 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2522 tcg_temp_free_i32(t
);
2527 static void gen_reset_hflag(DisasContext
*s
, uint32_t mask
)
2529 if (s
->flags
& mask
) {
2530 TCGv_i32 t
= tcg_temp_new_i32();
2531 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2532 tcg_gen_andi_i32(t
, t
, ~mask
);
2533 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2534 tcg_temp_free_i32(t
);
2539 /* Clear BND registers during legacy branches. */
2540 static void gen_bnd_jmp(DisasContext
*s
)
2542 /* Clear the registers only if BND prefix is missing, MPX is enabled,
2543 and if the BNDREGs are known to be in use (non-zero) already.
2544 The helper itself will check BNDPRESERVE at runtime. */
2545 if ((s
->prefix
& PREFIX_REPNZ
) == 0
2546 && (s
->flags
& HF_MPX_EN_MASK
) != 0
2547 && (s
->flags
& HF_MPX_IU_MASK
) != 0) {
2548 gen_helper_bnd_jmp(cpu_env
);
2552 /* Generate an end of block. Trace exception is also generated if needed.
2553 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.
2554 If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
2555 S->TF. This is used by the syscall/sysret insns. */
2557 do_gen_eob_worker(DisasContext
*s
, bool inhibit
, bool recheck_tf
, bool jr
)
2559 gen_update_cc_op(s
);
2561 /* If several instructions disable interrupts, only the first does it. */
2562 if (inhibit
&& !(s
->flags
& HF_INHIBIT_IRQ_MASK
)) {
2563 gen_set_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2565 gen_reset_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2568 if (s
->base
.tb
->flags
& HF_RF_MASK
) {
2569 gen_helper_reset_rf(cpu_env
);
2571 if (s
->base
.singlestep_enabled
) {
2572 gen_helper_debug(cpu_env
);
2573 } else if (recheck_tf
) {
2574 gen_helper_rechecking_single_step(cpu_env
);
2577 gen_helper_single_step(cpu_env
);
2579 tcg_gen_lookup_and_goto_ptr();
2583 s
->base
.is_jmp
= DISAS_NORETURN
;
2587 gen_eob_worker(DisasContext
*s
, bool inhibit
, bool recheck_tf
)
2589 do_gen_eob_worker(s
, inhibit
, recheck_tf
, false);
2593 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set. */
2594 static void gen_eob_inhibit_irq(DisasContext
*s
, bool inhibit
)
2596 gen_eob_worker(s
, inhibit
, false);
2599 /* End of block, resetting the inhibit irq flag. */
2600 static void gen_eob(DisasContext
*s
)
2602 gen_eob_worker(s
, false, false);
2605 /* Jump to register */
2606 static void gen_jr(DisasContext
*s
, TCGv dest
)
2608 do_gen_eob_worker(s
, false, false, true);
2611 /* generate a jump to eip. No segment change must happen before as a
2612 direct call to the next block may occur */
2613 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
)
2615 gen_update_cc_op(s
);
2616 set_cc_op(s
, CC_OP_DYNAMIC
);
2618 gen_goto_tb(s
, tb_num
, eip
);
2625 static void gen_jmp(DisasContext
*s
, target_ulong eip
)
2627 gen_jmp_tb(s
, eip
, 0);
2630 static inline void gen_ldq_env_A0(DisasContext
*s
, int offset
)
2632 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_A0
, s
->mem_index
, MO_LEQ
);
2633 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2636 static inline void gen_stq_env_A0(DisasContext
*s
, int offset
)
2638 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2639 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_A0
, s
->mem_index
, MO_LEQ
);
2642 static inline void gen_ldo_env_A0(DisasContext
*s
, int offset
)
2644 int mem_index
= s
->mem_index
;
2645 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_A0
, mem_index
, MO_LEQ
);
2646 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(ZMMReg
, ZMM_Q(0)));
2647 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2648 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
, MO_LEQ
);
2649 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(ZMMReg
, ZMM_Q(1)));
2652 static inline void gen_sto_env_A0(DisasContext
*s
, int offset
)
2654 int mem_index
= s
->mem_index
;
2655 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(ZMMReg
, ZMM_Q(0)));
2656 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_A0
, mem_index
, MO_LEQ
);
2657 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2658 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(ZMMReg
, ZMM_Q(1)));
2659 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
, MO_LEQ
);
2662 static inline void gen_op_movo(int d_offset
, int s_offset
)
2664 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
+ offsetof(ZMMReg
, ZMM_Q(0)));
2665 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
+ offsetof(ZMMReg
, ZMM_Q(0)));
2666 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
+ offsetof(ZMMReg
, ZMM_Q(1)));
2667 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
+ offsetof(ZMMReg
, ZMM_Q(1)));
2670 static inline void gen_op_movq(int d_offset
, int s_offset
)
2672 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2673 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2676 static inline void gen_op_movl(int d_offset
, int s_offset
)
2678 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
, s_offset
);
2679 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, d_offset
);
2682 static inline void gen_op_movq_env_0(int d_offset
)
2684 tcg_gen_movi_i64(cpu_tmp1_i64
, 0);
2685 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2688 typedef void (*SSEFunc_i_ep
)(TCGv_i32 val
, TCGv_ptr env
, TCGv_ptr reg
);
2689 typedef void (*SSEFunc_l_ep
)(TCGv_i64 val
, TCGv_ptr env
, TCGv_ptr reg
);
2690 typedef void (*SSEFunc_0_epi
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i32 val
);
2691 typedef void (*SSEFunc_0_epl
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i64 val
);
2692 typedef void (*SSEFunc_0_epp
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
);
2693 typedef void (*SSEFunc_0_eppi
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2695 typedef void (*SSEFunc_0_ppi
)(TCGv_ptr reg_a
, TCGv_ptr reg_b
, TCGv_i32 val
);
2696 typedef void (*SSEFunc_0_eppt
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2699 #define SSE_SPECIAL ((void *)1)
2700 #define SSE_DUMMY ((void *)2)
2702 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2703 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2704 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2706 static const SSEFunc_0_epp sse_op_table1
[256][4] = {
2707 /* 3DNow! extensions */
2708 [0x0e] = { SSE_DUMMY
}, /* femms */
2709 [0x0f] = { SSE_DUMMY
}, /* pf... */
2710 /* pure SSE operations */
2711 [0x10] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2712 [0x11] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2713 [0x12] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd, movsldup, movddup */
2714 [0x13] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd */
2715 [0x14] = { gen_helper_punpckldq_xmm
, gen_helper_punpcklqdq_xmm
},
2716 [0x15] = { gen_helper_punpckhdq_xmm
, gen_helper_punpckhqdq_xmm
},
2717 [0x16] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd, movshdup */
2718 [0x17] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd */
2720 [0x28] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2721 [0x29] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2722 [0x2a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2723 [0x2b] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movntps, movntpd, movntss, movntsd */
2724 [0x2c] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2725 [0x2d] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2726 [0x2e] = { gen_helper_ucomiss
, gen_helper_ucomisd
},
2727 [0x2f] = { gen_helper_comiss
, gen_helper_comisd
},
2728 [0x50] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movmskps, movmskpd */
2729 [0x51] = SSE_FOP(sqrt
),
2730 [0x52] = { gen_helper_rsqrtps
, NULL
, gen_helper_rsqrtss
, NULL
},
2731 [0x53] = { gen_helper_rcpps
, NULL
, gen_helper_rcpss
, NULL
},
2732 [0x54] = { gen_helper_pand_xmm
, gen_helper_pand_xmm
}, /* andps, andpd */
2733 [0x55] = { gen_helper_pandn_xmm
, gen_helper_pandn_xmm
}, /* andnps, andnpd */
2734 [0x56] = { gen_helper_por_xmm
, gen_helper_por_xmm
}, /* orps, orpd */
2735 [0x57] = { gen_helper_pxor_xmm
, gen_helper_pxor_xmm
}, /* xorps, xorpd */
2736 [0x58] = SSE_FOP(add
),
2737 [0x59] = SSE_FOP(mul
),
2738 [0x5a] = { gen_helper_cvtps2pd
, gen_helper_cvtpd2ps
,
2739 gen_helper_cvtss2sd
, gen_helper_cvtsd2ss
},
2740 [0x5b] = { gen_helper_cvtdq2ps
, gen_helper_cvtps2dq
, gen_helper_cvttps2dq
},
2741 [0x5c] = SSE_FOP(sub
),
2742 [0x5d] = SSE_FOP(min
),
2743 [0x5e] = SSE_FOP(div
),
2744 [0x5f] = SSE_FOP(max
),
2746 [0xc2] = SSE_FOP(cmpeq
),
2747 [0xc6] = { (SSEFunc_0_epp
)gen_helper_shufps
,
2748 (SSEFunc_0_epp
)gen_helper_shufpd
}, /* XXX: casts */
2750 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2751 [0x38] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2752 [0x3a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2754 /* MMX ops and their SSE extensions */
2755 [0x60] = MMX_OP2(punpcklbw
),
2756 [0x61] = MMX_OP2(punpcklwd
),
2757 [0x62] = MMX_OP2(punpckldq
),
2758 [0x63] = MMX_OP2(packsswb
),
2759 [0x64] = MMX_OP2(pcmpgtb
),
2760 [0x65] = MMX_OP2(pcmpgtw
),
2761 [0x66] = MMX_OP2(pcmpgtl
),
2762 [0x67] = MMX_OP2(packuswb
),
2763 [0x68] = MMX_OP2(punpckhbw
),
2764 [0x69] = MMX_OP2(punpckhwd
),
2765 [0x6a] = MMX_OP2(punpckhdq
),
2766 [0x6b] = MMX_OP2(packssdw
),
2767 [0x6c] = { NULL
, gen_helper_punpcklqdq_xmm
},
2768 [0x6d] = { NULL
, gen_helper_punpckhqdq_xmm
},
2769 [0x6e] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movd mm, ea */
2770 [0x6f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, , movqdu */
2771 [0x70] = { (SSEFunc_0_epp
)gen_helper_pshufw_mmx
,
2772 (SSEFunc_0_epp
)gen_helper_pshufd_xmm
,
2773 (SSEFunc_0_epp
)gen_helper_pshufhw_xmm
,
2774 (SSEFunc_0_epp
)gen_helper_pshuflw_xmm
}, /* XXX: casts */
2775 [0x71] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftw */
2776 [0x72] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftd */
2777 [0x73] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftq */
2778 [0x74] = MMX_OP2(pcmpeqb
),
2779 [0x75] = MMX_OP2(pcmpeqw
),
2780 [0x76] = MMX_OP2(pcmpeql
),
2781 [0x77] = { SSE_DUMMY
}, /* emms */
2782 [0x78] = { NULL
, SSE_SPECIAL
, NULL
, SSE_SPECIAL
}, /* extrq_i, insertq_i */
2783 [0x79] = { NULL
, gen_helper_extrq_r
, NULL
, gen_helper_insertq_r
},
2784 [0x7c] = { NULL
, gen_helper_haddpd
, NULL
, gen_helper_haddps
},
2785 [0x7d] = { NULL
, gen_helper_hsubpd
, NULL
, gen_helper_hsubps
},
2786 [0x7e] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movd, movd, , movq */
2787 [0x7f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, movdqu */
2788 [0xc4] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pinsrw */
2789 [0xc5] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pextrw */
2790 [0xd0] = { NULL
, gen_helper_addsubpd
, NULL
, gen_helper_addsubps
},
2791 [0xd1] = MMX_OP2(psrlw
),
2792 [0xd2] = MMX_OP2(psrld
),
2793 [0xd3] = MMX_OP2(psrlq
),
2794 [0xd4] = MMX_OP2(paddq
),
2795 [0xd5] = MMX_OP2(pmullw
),
2796 [0xd6] = { NULL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2797 [0xd7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pmovmskb */
2798 [0xd8] = MMX_OP2(psubusb
),
2799 [0xd9] = MMX_OP2(psubusw
),
2800 [0xda] = MMX_OP2(pminub
),
2801 [0xdb] = MMX_OP2(pand
),
2802 [0xdc] = MMX_OP2(paddusb
),
2803 [0xdd] = MMX_OP2(paddusw
),
2804 [0xde] = MMX_OP2(pmaxub
),
2805 [0xdf] = MMX_OP2(pandn
),
2806 [0xe0] = MMX_OP2(pavgb
),
2807 [0xe1] = MMX_OP2(psraw
),
2808 [0xe2] = MMX_OP2(psrad
),
2809 [0xe3] = MMX_OP2(pavgw
),
2810 [0xe4] = MMX_OP2(pmulhuw
),
2811 [0xe5] = MMX_OP2(pmulhw
),
2812 [0xe6] = { NULL
, gen_helper_cvttpd2dq
, gen_helper_cvtdq2pd
, gen_helper_cvtpd2dq
},
2813 [0xe7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movntq, movntq */
2814 [0xe8] = MMX_OP2(psubsb
),
2815 [0xe9] = MMX_OP2(psubsw
),
2816 [0xea] = MMX_OP2(pminsw
),
2817 [0xeb] = MMX_OP2(por
),
2818 [0xec] = MMX_OP2(paddsb
),
2819 [0xed] = MMX_OP2(paddsw
),
2820 [0xee] = MMX_OP2(pmaxsw
),
2821 [0xef] = MMX_OP2(pxor
),
2822 [0xf0] = { NULL
, NULL
, NULL
, SSE_SPECIAL
}, /* lddqu */
2823 [0xf1] = MMX_OP2(psllw
),
2824 [0xf2] = MMX_OP2(pslld
),
2825 [0xf3] = MMX_OP2(psllq
),
2826 [0xf4] = MMX_OP2(pmuludq
),
2827 [0xf5] = MMX_OP2(pmaddwd
),
2828 [0xf6] = MMX_OP2(psadbw
),
2829 [0xf7] = { (SSEFunc_0_epp
)gen_helper_maskmov_mmx
,
2830 (SSEFunc_0_epp
)gen_helper_maskmov_xmm
}, /* XXX: casts */
2831 [0xf8] = MMX_OP2(psubb
),
2832 [0xf9] = MMX_OP2(psubw
),
2833 [0xfa] = MMX_OP2(psubl
),
2834 [0xfb] = MMX_OP2(psubq
),
2835 [0xfc] = MMX_OP2(paddb
),
2836 [0xfd] = MMX_OP2(paddw
),
2837 [0xfe] = MMX_OP2(paddl
),
2840 static const SSEFunc_0_epp sse_op_table2
[3 * 8][2] = {
2841 [0 + 2] = MMX_OP2(psrlw
),
2842 [0 + 4] = MMX_OP2(psraw
),
2843 [0 + 6] = MMX_OP2(psllw
),
2844 [8 + 2] = MMX_OP2(psrld
),
2845 [8 + 4] = MMX_OP2(psrad
),
2846 [8 + 6] = MMX_OP2(pslld
),
2847 [16 + 2] = MMX_OP2(psrlq
),
2848 [16 + 3] = { NULL
, gen_helper_psrldq_xmm
},
2849 [16 + 6] = MMX_OP2(psllq
),
2850 [16 + 7] = { NULL
, gen_helper_pslldq_xmm
},
2853 static const SSEFunc_0_epi sse_op_table3ai
[] = {
2854 gen_helper_cvtsi2ss
,
2858 #ifdef TARGET_X86_64
2859 static const SSEFunc_0_epl sse_op_table3aq
[] = {
2860 gen_helper_cvtsq2ss
,
2865 static const SSEFunc_i_ep sse_op_table3bi
[] = {
2866 gen_helper_cvttss2si
,
2867 gen_helper_cvtss2si
,
2868 gen_helper_cvttsd2si
,
2872 #ifdef TARGET_X86_64
2873 static const SSEFunc_l_ep sse_op_table3bq
[] = {
2874 gen_helper_cvttss2sq
,
2875 gen_helper_cvtss2sq
,
2876 gen_helper_cvttsd2sq
,
2881 static const SSEFunc_0_epp sse_op_table4
[8][4] = {
2892 static const SSEFunc_0_epp sse_op_table5
[256] = {
2893 [0x0c] = gen_helper_pi2fw
,
2894 [0x0d] = gen_helper_pi2fd
,
2895 [0x1c] = gen_helper_pf2iw
,
2896 [0x1d] = gen_helper_pf2id
,
2897 [0x8a] = gen_helper_pfnacc
,
2898 [0x8e] = gen_helper_pfpnacc
,
2899 [0x90] = gen_helper_pfcmpge
,
2900 [0x94] = gen_helper_pfmin
,
2901 [0x96] = gen_helper_pfrcp
,
2902 [0x97] = gen_helper_pfrsqrt
,
2903 [0x9a] = gen_helper_pfsub
,
2904 [0x9e] = gen_helper_pfadd
,
2905 [0xa0] = gen_helper_pfcmpgt
,
2906 [0xa4] = gen_helper_pfmax
,
2907 [0xa6] = gen_helper_movq
, /* pfrcpit1; no need to actually increase precision */
2908 [0xa7] = gen_helper_movq
, /* pfrsqit1 */
2909 [0xaa] = gen_helper_pfsubr
,
2910 [0xae] = gen_helper_pfacc
,
2911 [0xb0] = gen_helper_pfcmpeq
,
2912 [0xb4] = gen_helper_pfmul
,
2913 [0xb6] = gen_helper_movq
, /* pfrcpit2 */
2914 [0xb7] = gen_helper_pmulhrw_mmx
,
2915 [0xbb] = gen_helper_pswapd
,
2916 [0xbf] = gen_helper_pavgb_mmx
/* pavgusb */
2919 struct SSEOpHelper_epp
{
2920 SSEFunc_0_epp op
[2];
2924 struct SSEOpHelper_eppi
{
2925 SSEFunc_0_eppi op
[2];
2929 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
2930 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
2931 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
2932 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
2933 #define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
2934 CPUID_EXT_PCLMULQDQ }
2935 #define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
2937 static const struct SSEOpHelper_epp sse_op_table6
[256] = {
2938 [0x00] = SSSE3_OP(pshufb
),
2939 [0x01] = SSSE3_OP(phaddw
),
2940 [0x02] = SSSE3_OP(phaddd
),
2941 [0x03] = SSSE3_OP(phaddsw
),
2942 [0x04] = SSSE3_OP(pmaddubsw
),
2943 [0x05] = SSSE3_OP(phsubw
),
2944 [0x06] = SSSE3_OP(phsubd
),
2945 [0x07] = SSSE3_OP(phsubsw
),
2946 [0x08] = SSSE3_OP(psignb
),
2947 [0x09] = SSSE3_OP(psignw
),
2948 [0x0a] = SSSE3_OP(psignd
),
2949 [0x0b] = SSSE3_OP(pmulhrsw
),
2950 [0x10] = SSE41_OP(pblendvb
),
2951 [0x14] = SSE41_OP(blendvps
),
2952 [0x15] = SSE41_OP(blendvpd
),
2953 [0x17] = SSE41_OP(ptest
),
2954 [0x1c] = SSSE3_OP(pabsb
),
2955 [0x1d] = SSSE3_OP(pabsw
),
2956 [0x1e] = SSSE3_OP(pabsd
),
2957 [0x20] = SSE41_OP(pmovsxbw
),
2958 [0x21] = SSE41_OP(pmovsxbd
),
2959 [0x22] = SSE41_OP(pmovsxbq
),
2960 [0x23] = SSE41_OP(pmovsxwd
),
2961 [0x24] = SSE41_OP(pmovsxwq
),
2962 [0x25] = SSE41_OP(pmovsxdq
),
2963 [0x28] = SSE41_OP(pmuldq
),
2964 [0x29] = SSE41_OP(pcmpeqq
),
2965 [0x2a] = SSE41_SPECIAL
, /* movntqda */
2966 [0x2b] = SSE41_OP(packusdw
),
2967 [0x30] = SSE41_OP(pmovzxbw
),
2968 [0x31] = SSE41_OP(pmovzxbd
),
2969 [0x32] = SSE41_OP(pmovzxbq
),
2970 [0x33] = SSE41_OP(pmovzxwd
),
2971 [0x34] = SSE41_OP(pmovzxwq
),
2972 [0x35] = SSE41_OP(pmovzxdq
),
2973 [0x37] = SSE42_OP(pcmpgtq
),
2974 [0x38] = SSE41_OP(pminsb
),
2975 [0x39] = SSE41_OP(pminsd
),
2976 [0x3a] = SSE41_OP(pminuw
),
2977 [0x3b] = SSE41_OP(pminud
),
2978 [0x3c] = SSE41_OP(pmaxsb
),
2979 [0x3d] = SSE41_OP(pmaxsd
),
2980 [0x3e] = SSE41_OP(pmaxuw
),
2981 [0x3f] = SSE41_OP(pmaxud
),
2982 [0x40] = SSE41_OP(pmulld
),
2983 [0x41] = SSE41_OP(phminposuw
),
2984 [0xdb] = AESNI_OP(aesimc
),
2985 [0xdc] = AESNI_OP(aesenc
),
2986 [0xdd] = AESNI_OP(aesenclast
),
2987 [0xde] = AESNI_OP(aesdec
),
2988 [0xdf] = AESNI_OP(aesdeclast
),
2991 static const struct SSEOpHelper_eppi sse_op_table7
[256] = {
2992 [0x08] = SSE41_OP(roundps
),
2993 [0x09] = SSE41_OP(roundpd
),
2994 [0x0a] = SSE41_OP(roundss
),
2995 [0x0b] = SSE41_OP(roundsd
),
2996 [0x0c] = SSE41_OP(blendps
),
2997 [0x0d] = SSE41_OP(blendpd
),
2998 [0x0e] = SSE41_OP(pblendw
),
2999 [0x0f] = SSSE3_OP(palignr
),
3000 [0x14] = SSE41_SPECIAL
, /* pextrb */
3001 [0x15] = SSE41_SPECIAL
, /* pextrw */
3002 [0x16] = SSE41_SPECIAL
, /* pextrd/pextrq */
3003 [0x17] = SSE41_SPECIAL
, /* extractps */
3004 [0x20] = SSE41_SPECIAL
, /* pinsrb */
3005 [0x21] = SSE41_SPECIAL
, /* insertps */
3006 [0x22] = SSE41_SPECIAL
, /* pinsrd/pinsrq */
3007 [0x40] = SSE41_OP(dpps
),
3008 [0x41] = SSE41_OP(dppd
),
3009 [0x42] = SSE41_OP(mpsadbw
),
3010 [0x44] = PCLMULQDQ_OP(pclmulqdq
),
3011 [0x60] = SSE42_OP(pcmpestrm
),
3012 [0x61] = SSE42_OP(pcmpestri
),
3013 [0x62] = SSE42_OP(pcmpistrm
),
3014 [0x63] = SSE42_OP(pcmpistri
),
3015 [0xdf] = AESNI_OP(aeskeygenassist
),
3018 static void gen_sse(CPUX86State
*env
, DisasContext
*s
, int b
,
3019 target_ulong pc_start
, int rex_r
)
3021 int b1
, op1_offset
, op2_offset
, is_xmm
, val
;
3022 int modrm
, mod
, rm
, reg
;
3023 SSEFunc_0_epp sse_fn_epp
;
3024 SSEFunc_0_eppi sse_fn_eppi
;
3025 SSEFunc_0_ppi sse_fn_ppi
;
3026 SSEFunc_0_eppt sse_fn_eppt
;
3030 if (s
->prefix
& PREFIX_DATA
)
3032 else if (s
->prefix
& PREFIX_REPZ
)
3034 else if (s
->prefix
& PREFIX_REPNZ
)
3038 sse_fn_epp
= sse_op_table1
[b
][b1
];
3042 if ((b
<= 0x5f && b
>= 0x10) || b
== 0xc6 || b
== 0xc2) {
3052 /* simple MMX/SSE operation */
3053 if (s
->flags
& HF_TS_MASK
) {
3054 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
3057 if (s
->flags
& HF_EM_MASK
) {
3059 gen_illegal_opcode(s
);
3063 && !(s
->flags
& HF_OSFXSR_MASK
)
3064 && ((b
!= 0x38 && b
!= 0x3a) || (s
->prefix
& PREFIX_DATA
))) {
3068 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
)) {
3069 /* If we were fully decoding this we might use illegal_op. */
3073 gen_helper_emms(cpu_env
);
3078 gen_helper_emms(cpu_env
);
3081 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3082 the static cpu state) */
3084 gen_helper_enter_mmx(cpu_env
);
3087 modrm
= x86_ldub_code(env
, s
);
3088 reg
= ((modrm
>> 3) & 7);
3091 mod
= (modrm
>> 6) & 3;
3092 if (sse_fn_epp
== SSE_SPECIAL
) {
3095 case 0x0e7: /* movntq */
3099 gen_lea_modrm(env
, s
, modrm
);
3100 gen_stq_env_A0(s
, offsetof(CPUX86State
, fpregs
[reg
].mmx
));
3102 case 0x1e7: /* movntdq */
3103 case 0x02b: /* movntps */
3104 case 0x12b: /* movntps */
3107 gen_lea_modrm(env
, s
, modrm
);
3108 gen_sto_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3110 case 0x3f0: /* lddqu */
3113 gen_lea_modrm(env
, s
, modrm
);
3114 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3116 case 0x22b: /* movntss */
3117 case 0x32b: /* movntsd */
3120 gen_lea_modrm(env
, s
, modrm
);
3122 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3123 xmm_regs
[reg
].ZMM_Q(0)));
3125 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,
3126 xmm_regs
[reg
].ZMM_L(0)));
3127 gen_op_st_v(s
, MO_32
, cpu_T0
, cpu_A0
);
3130 case 0x6e: /* movd mm, ea */
3131 #ifdef TARGET_X86_64
3132 if (s
->dflag
== MO_64
) {
3133 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 0);
3134 tcg_gen_st_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3138 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 0);
3139 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3140 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3141 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
3142 gen_helper_movl_mm_T0_mmx(cpu_ptr0
, cpu_tmp2_i32
);
3145 case 0x16e: /* movd xmm, ea */
3146 #ifdef TARGET_X86_64
3147 if (s
->dflag
== MO_64
) {
3148 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 0);
3149 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3150 offsetof(CPUX86State
,xmm_regs
[reg
]));
3151 gen_helper_movq_mm_T0_xmm(cpu_ptr0
, cpu_T0
);
3155 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 0);
3156 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3157 offsetof(CPUX86State
,xmm_regs
[reg
]));
3158 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
3159 gen_helper_movl_mm_T0_xmm(cpu_ptr0
, cpu_tmp2_i32
);
3162 case 0x6f: /* movq mm, ea */
3164 gen_lea_modrm(env
, s
, modrm
);
3165 gen_ldq_env_A0(s
, offsetof(CPUX86State
, fpregs
[reg
].mmx
));
3168 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
3169 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3170 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
3171 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3174 case 0x010: /* movups */
3175 case 0x110: /* movupd */
3176 case 0x028: /* movaps */
3177 case 0x128: /* movapd */
3178 case 0x16f: /* movdqa xmm, ea */
3179 case 0x26f: /* movdqu xmm, ea */
3181 gen_lea_modrm(env
, s
, modrm
);
3182 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3184 rm
= (modrm
& 7) | REX_B(s
);
3185 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[reg
]),
3186 offsetof(CPUX86State
,xmm_regs
[rm
]));
3189 case 0x210: /* movss xmm, ea */
3191 gen_lea_modrm(env
, s
, modrm
);
3192 gen_op_ld_v(s
, MO_32
, cpu_T0
, cpu_A0
);
3193 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3194 tcg_gen_movi_tl(cpu_T0
, 0);
3195 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(1)));
3196 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(2)));
3197 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(3)));
3199 rm
= (modrm
& 7) | REX_B(s
);
3200 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)),
3201 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(0)));
3204 case 0x310: /* movsd xmm, ea */
3206 gen_lea_modrm(env
, s
, modrm
);
3207 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3208 xmm_regs
[reg
].ZMM_Q(0)));
3209 tcg_gen_movi_tl(cpu_T0
, 0);
3210 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(2)));
3211 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(3)));
3213 rm
= (modrm
& 7) | REX_B(s
);
3214 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)),
3215 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3218 case 0x012: /* movlps */
3219 case 0x112: /* movlpd */
3221 gen_lea_modrm(env
, s
, modrm
);
3222 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3223 xmm_regs
[reg
].ZMM_Q(0)));
3226 rm
= (modrm
& 7) | REX_B(s
);
3227 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)),
3228 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(1)));
3231 case 0x212: /* movsldup */
3233 gen_lea_modrm(env
, s
, modrm
);
3234 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3236 rm
= (modrm
& 7) | REX_B(s
);
3237 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)),
3238 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(0)));
3239 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(2)),
3240 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(2)));
3242 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(1)),
3243 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3244 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(3)),
3245 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(2)));
3247 case 0x312: /* movddup */
3249 gen_lea_modrm(env
, s
, modrm
);
3250 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3251 xmm_regs
[reg
].ZMM_Q(0)));
3253 rm
= (modrm
& 7) | REX_B(s
);
3254 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)),
3255 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3257 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(1)),
3258 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3260 case 0x016: /* movhps */
3261 case 0x116: /* movhpd */
3263 gen_lea_modrm(env
, s
, modrm
);
3264 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3265 xmm_regs
[reg
].ZMM_Q(1)));
3268 rm
= (modrm
& 7) | REX_B(s
);
3269 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(1)),
3270 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3273 case 0x216: /* movshdup */
3275 gen_lea_modrm(env
, s
, modrm
);
3276 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3278 rm
= (modrm
& 7) | REX_B(s
);
3279 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(1)),
3280 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(1)));
3281 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(3)),
3282 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(3)));
3284 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)),
3285 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(1)));
3286 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(2)),
3287 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(3)));
3292 int bit_index
, field_length
;
3294 if (b1
== 1 && reg
!= 0)
3296 field_length
= x86_ldub_code(env
, s
) & 0x3F;
3297 bit_index
= x86_ldub_code(env
, s
) & 0x3F;
3298 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3299 offsetof(CPUX86State
,xmm_regs
[reg
]));
3301 gen_helper_extrq_i(cpu_env
, cpu_ptr0
,
3302 tcg_const_i32(bit_index
),
3303 tcg_const_i32(field_length
));
3305 gen_helper_insertq_i(cpu_env
, cpu_ptr0
,
3306 tcg_const_i32(bit_index
),
3307 tcg_const_i32(field_length
));
3310 case 0x7e: /* movd ea, mm */
3311 #ifdef TARGET_X86_64
3312 if (s
->dflag
== MO_64
) {
3313 tcg_gen_ld_i64(cpu_T0
, cpu_env
,
3314 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3315 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 1);
3319 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
,
3320 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_L(0)));
3321 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 1);
3324 case 0x17e: /* movd ea, xmm */
3325 #ifdef TARGET_X86_64
3326 if (s
->dflag
== MO_64
) {
3327 tcg_gen_ld_i64(cpu_T0
, cpu_env
,
3328 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3329 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 1);
3333 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
,
3334 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3335 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 1);
3338 case 0x27e: /* movq xmm, ea */
3340 gen_lea_modrm(env
, s
, modrm
);
3341 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3342 xmm_regs
[reg
].ZMM_Q(0)));
3344 rm
= (modrm
& 7) | REX_B(s
);
3345 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)),
3346 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3348 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(1)));
3350 case 0x7f: /* movq ea, mm */
3352 gen_lea_modrm(env
, s
, modrm
);
3353 gen_stq_env_A0(s
, offsetof(CPUX86State
, fpregs
[reg
].mmx
));
3356 gen_op_movq(offsetof(CPUX86State
,fpregs
[rm
].mmx
),
3357 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3360 case 0x011: /* movups */
3361 case 0x111: /* movupd */
3362 case 0x029: /* movaps */
3363 case 0x129: /* movapd */
3364 case 0x17f: /* movdqa ea, xmm */
3365 case 0x27f: /* movdqu ea, xmm */
3367 gen_lea_modrm(env
, s
, modrm
);
3368 gen_sto_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3370 rm
= (modrm
& 7) | REX_B(s
);
3371 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[rm
]),
3372 offsetof(CPUX86State
,xmm_regs
[reg
]));
3375 case 0x211: /* movss ea, xmm */
3377 gen_lea_modrm(env
, s
, modrm
);
3378 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3379 gen_op_st_v(s
, MO_32
, cpu_T0
, cpu_A0
);
3381 rm
= (modrm
& 7) | REX_B(s
);
3382 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(0)),
3383 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3386 case 0x311: /* movsd ea, xmm */
3388 gen_lea_modrm(env
, s
, modrm
);
3389 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3390 xmm_regs
[reg
].ZMM_Q(0)));
3392 rm
= (modrm
& 7) | REX_B(s
);
3393 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)),
3394 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3397 case 0x013: /* movlps */
3398 case 0x113: /* movlpd */
3400 gen_lea_modrm(env
, s
, modrm
);
3401 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3402 xmm_regs
[reg
].ZMM_Q(0)));
3407 case 0x017: /* movhps */
3408 case 0x117: /* movhpd */
3410 gen_lea_modrm(env
, s
, modrm
);
3411 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3412 xmm_regs
[reg
].ZMM_Q(1)));
3417 case 0x71: /* shift mm, im */
3420 case 0x171: /* shift xmm, im */
3426 val
= x86_ldub_code(env
, s
);
3428 tcg_gen_movi_tl(cpu_T0
, val
);
3429 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_t0
.ZMM_L(0)));
3430 tcg_gen_movi_tl(cpu_T0
, 0);
3431 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_t0
.ZMM_L(1)));
3432 op1_offset
= offsetof(CPUX86State
,xmm_t0
);
3434 tcg_gen_movi_tl(cpu_T0
, val
);
3435 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(0)));
3436 tcg_gen_movi_tl(cpu_T0
, 0);
3437 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(1)));
3438 op1_offset
= offsetof(CPUX86State
,mmx_t0
);
3440 sse_fn_epp
= sse_op_table2
[((b
- 1) & 3) * 8 +
3441 (((modrm
>> 3)) & 7)][b1
];
3446 rm
= (modrm
& 7) | REX_B(s
);
3447 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3450 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3452 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3453 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op1_offset
);
3454 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3456 case 0x050: /* movmskps */
3457 rm
= (modrm
& 7) | REX_B(s
);
3458 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3459 offsetof(CPUX86State
,xmm_regs
[rm
]));
3460 gen_helper_movmskps(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3461 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp2_i32
);
3463 case 0x150: /* movmskpd */
3464 rm
= (modrm
& 7) | REX_B(s
);
3465 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3466 offsetof(CPUX86State
,xmm_regs
[rm
]));
3467 gen_helper_movmskpd(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3468 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp2_i32
);
3470 case 0x02a: /* cvtpi2ps */
3471 case 0x12a: /* cvtpi2pd */
3472 gen_helper_enter_mmx(cpu_env
);
3474 gen_lea_modrm(env
, s
, modrm
);
3475 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3476 gen_ldq_env_A0(s
, op2_offset
);
3479 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3481 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3482 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3483 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3486 gen_helper_cvtpi2ps(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3490 gen_helper_cvtpi2pd(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3494 case 0x22a: /* cvtsi2ss */
3495 case 0x32a: /* cvtsi2sd */
3496 ot
= mo_64_32(s
->dflag
);
3497 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3498 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3499 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3501 SSEFunc_0_epi sse_fn_epi
= sse_op_table3ai
[(b
>> 8) & 1];
3502 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
3503 sse_fn_epi(cpu_env
, cpu_ptr0
, cpu_tmp2_i32
);
3505 #ifdef TARGET_X86_64
3506 SSEFunc_0_epl sse_fn_epl
= sse_op_table3aq
[(b
>> 8) & 1];
3507 sse_fn_epl(cpu_env
, cpu_ptr0
, cpu_T0
);
3513 case 0x02c: /* cvttps2pi */
3514 case 0x12c: /* cvttpd2pi */
3515 case 0x02d: /* cvtps2pi */
3516 case 0x12d: /* cvtpd2pi */
3517 gen_helper_enter_mmx(cpu_env
);
3519 gen_lea_modrm(env
, s
, modrm
);
3520 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3521 gen_ldo_env_A0(s
, op2_offset
);
3523 rm
= (modrm
& 7) | REX_B(s
);
3524 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3526 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
);
3527 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3528 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3531 gen_helper_cvttps2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3534 gen_helper_cvttpd2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3537 gen_helper_cvtps2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3540 gen_helper_cvtpd2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3544 case 0x22c: /* cvttss2si */
3545 case 0x32c: /* cvttsd2si */
3546 case 0x22d: /* cvtss2si */
3547 case 0x32d: /* cvtsd2si */
3548 ot
= mo_64_32(s
->dflag
);
3550 gen_lea_modrm(env
, s
, modrm
);
3552 gen_ldq_env_A0(s
, offsetof(CPUX86State
, xmm_t0
.ZMM_Q(0)));
3554 gen_op_ld_v(s
, MO_32
, cpu_T0
, cpu_A0
);
3555 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_t0
.ZMM_L(0)));
3557 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3559 rm
= (modrm
& 7) | REX_B(s
);
3560 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3562 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3564 SSEFunc_i_ep sse_fn_i_ep
=
3565 sse_op_table3bi
[((b
>> 7) & 2) | (b
& 1)];
3566 sse_fn_i_ep(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3567 tcg_gen_extu_i32_tl(cpu_T0
, cpu_tmp2_i32
);
3569 #ifdef TARGET_X86_64
3570 SSEFunc_l_ep sse_fn_l_ep
=
3571 sse_op_table3bq
[((b
>> 7) & 2) | (b
& 1)];
3572 sse_fn_l_ep(cpu_T0
, cpu_env
, cpu_ptr0
);
3577 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3579 case 0xc4: /* pinsrw */
3582 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
3583 val
= x86_ldub_code(env
, s
);
3586 tcg_gen_st16_tl(cpu_T0
, cpu_env
,
3587 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_W(val
)));
3590 tcg_gen_st16_tl(cpu_T0
, cpu_env
,
3591 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_W(val
)));
3594 case 0xc5: /* pextrw */
3598 ot
= mo_64_32(s
->dflag
);
3599 val
= x86_ldub_code(env
, s
);
3602 rm
= (modrm
& 7) | REX_B(s
);
3603 tcg_gen_ld16u_tl(cpu_T0
, cpu_env
,
3604 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_W(val
)));
3608 tcg_gen_ld16u_tl(cpu_T0
, cpu_env
,
3609 offsetof(CPUX86State
,fpregs
[rm
].mmx
.MMX_W(val
)));
3611 reg
= ((modrm
>> 3) & 7) | rex_r
;
3612 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3614 case 0x1d6: /* movq ea, xmm */
3616 gen_lea_modrm(env
, s
, modrm
);
3617 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3618 xmm_regs
[reg
].ZMM_Q(0)));
3620 rm
= (modrm
& 7) | REX_B(s
);
3621 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)),
3622 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3623 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(1)));
3626 case 0x2d6: /* movq2dq */
3627 gen_helper_enter_mmx(cpu_env
);
3629 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)),
3630 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3631 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(1)));
3633 case 0x3d6: /* movdq2q */
3634 gen_helper_enter_mmx(cpu_env
);
3635 rm
= (modrm
& 7) | REX_B(s
);
3636 gen_op_movq(offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
),
3637 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3639 case 0xd7: /* pmovmskb */
3644 rm
= (modrm
& 7) | REX_B(s
);
3645 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[rm
]));
3646 gen_helper_pmovmskb_xmm(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3649 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3650 gen_helper_pmovmskb_mmx(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3652 reg
= ((modrm
>> 3) & 7) | rex_r
;
3653 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp2_i32
);
3659 if ((b
& 0xf0) == 0xf0) {
3662 modrm
= x86_ldub_code(env
, s
);
3664 reg
= ((modrm
>> 3) & 7) | rex_r
;
3665 mod
= (modrm
>> 6) & 3;
3670 sse_fn_epp
= sse_op_table6
[b
].op
[b1
];
3674 if (!(s
->cpuid_ext_features
& sse_op_table6
[b
].ext_mask
))
3678 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3680 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
3682 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3683 gen_lea_modrm(env
, s
, modrm
);
3685 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3686 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3687 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3688 gen_ldq_env_A0(s
, op2_offset
+
3689 offsetof(ZMMReg
, ZMM_Q(0)));
3691 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3692 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3693 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
3694 s
->mem_index
, MO_LEUL
);
3695 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, op2_offset
+
3696 offsetof(ZMMReg
, ZMM_L(0)));
3698 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3699 tcg_gen_qemu_ld_tl(cpu_tmp0
, cpu_A0
,
3700 s
->mem_index
, MO_LEUW
);
3701 tcg_gen_st16_tl(cpu_tmp0
, cpu_env
, op2_offset
+
3702 offsetof(ZMMReg
, ZMM_W(0)));
3704 case 0x2a: /* movntqda */
3705 gen_ldo_env_A0(s
, op1_offset
);
3708 gen_ldo_env_A0(s
, op2_offset
);
3712 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
3714 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3716 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3717 gen_lea_modrm(env
, s
, modrm
);
3718 gen_ldq_env_A0(s
, op2_offset
);
3721 if (sse_fn_epp
== SSE_SPECIAL
) {
3725 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3726 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3727 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3730 set_cc_op(s
, CC_OP_EFLAGS
);
3737 /* Various integer extensions at 0f 38 f[0-f]. */
3738 b
= modrm
| (b1
<< 8);
3739 modrm
= x86_ldub_code(env
, s
);
3740 reg
= ((modrm
>> 3) & 7) | rex_r
;
3743 case 0x3f0: /* crc32 Gd,Eb */
3744 case 0x3f1: /* crc32 Gd,Ey */
3746 if (!(s
->cpuid_ext_features
& CPUID_EXT_SSE42
)) {
3749 if ((b
& 0xff) == 0xf0) {
3751 } else if (s
->dflag
!= MO_64
) {
3752 ot
= (s
->prefix
& PREFIX_DATA
? MO_16
: MO_32
);
3757 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[reg
]);
3758 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3759 gen_helper_crc32(cpu_T0
, cpu_tmp2_i32
,
3760 cpu_T0
, tcg_const_i32(8 << ot
));
3762 ot
= mo_64_32(s
->dflag
);
3763 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3766 case 0x1f0: /* crc32 or movbe */
3768 /* For these insns, the f3 prefix is supposed to have priority
3769 over the 66 prefix, but that's not what we implement above
3771 if (s
->prefix
& PREFIX_REPNZ
) {
3775 case 0x0f0: /* movbe Gy,My */
3776 case 0x0f1: /* movbe My,Gy */
3777 if (!(s
->cpuid_ext_features
& CPUID_EXT_MOVBE
)) {
3780 if (s
->dflag
!= MO_64
) {
3781 ot
= (s
->prefix
& PREFIX_DATA
? MO_16
: MO_32
);
3786 gen_lea_modrm(env
, s
, modrm
);
3788 tcg_gen_qemu_ld_tl(cpu_T0
, cpu_A0
,
3789 s
->mem_index
, ot
| MO_BE
);
3790 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3792 tcg_gen_qemu_st_tl(cpu_regs
[reg
], cpu_A0
,
3793 s
->mem_index
, ot
| MO_BE
);
3797 case 0x0f2: /* andn Gy, By, Ey */
3798 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
3799 || !(s
->prefix
& PREFIX_VEX
)
3803 ot
= mo_64_32(s
->dflag
);
3804 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3805 tcg_gen_andc_tl(cpu_T0
, cpu_T0
, cpu_regs
[s
->vex_v
]);
3806 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3807 gen_op_update1_cc();
3808 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3811 case 0x0f7: /* bextr Gy, Ey, By */
3812 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
3813 || !(s
->prefix
& PREFIX_VEX
)
3817 ot
= mo_64_32(s
->dflag
);
3821 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3822 /* Extract START, and shift the operand.
3823 Shifts larger than operand size get zeros. */
3824 tcg_gen_ext8u_tl(cpu_A0
, cpu_regs
[s
->vex_v
]);
3825 tcg_gen_shr_tl(cpu_T0
, cpu_T0
, cpu_A0
);
3827 bound
= tcg_const_tl(ot
== MO_64
? 63 : 31);
3828 zero
= tcg_const_tl(0);
3829 tcg_gen_movcond_tl(TCG_COND_LEU
, cpu_T0
, cpu_A0
, bound
,
3831 tcg_temp_free(zero
);
3833 /* Extract the LEN into a mask. Lengths larger than
3834 operand size get all ones. */
3835 tcg_gen_extract_tl(cpu_A0
, cpu_regs
[s
->vex_v
], 8, 8);
3836 tcg_gen_movcond_tl(TCG_COND_LEU
, cpu_A0
, cpu_A0
, bound
,
3838 tcg_temp_free(bound
);
3839 tcg_gen_movi_tl(cpu_T1
, 1);
3840 tcg_gen_shl_tl(cpu_T1
, cpu_T1
, cpu_A0
);
3841 tcg_gen_subi_tl(cpu_T1
, cpu_T1
, 1);
3842 tcg_gen_and_tl(cpu_T0
, cpu_T0
, cpu_T1
);
3844 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3845 gen_op_update1_cc();
3846 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3850 case 0x0f5: /* bzhi Gy, Ey, By */
3851 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3852 || !(s
->prefix
& PREFIX_VEX
)
3856 ot
= mo_64_32(s
->dflag
);
3857 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3858 tcg_gen_ext8u_tl(cpu_T1
, cpu_regs
[s
->vex_v
]);
3860 TCGv bound
= tcg_const_tl(ot
== MO_64
? 63 : 31);
3861 /* Note that since we're using BMILG (in order to get O
3862 cleared) we need to store the inverse into C. */
3863 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_cc_src
,
3865 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_T1
, cpu_T1
,
3866 bound
, bound
, cpu_T1
);
3867 tcg_temp_free(bound
);
3869 tcg_gen_movi_tl(cpu_A0
, -1);
3870 tcg_gen_shl_tl(cpu_A0
, cpu_A0
, cpu_T1
);
3871 tcg_gen_andc_tl(cpu_T0
, cpu_T0
, cpu_A0
);
3872 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3873 gen_op_update1_cc();
3874 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
3877 case 0x3f6: /* mulx By, Gy, rdx, Ey */
3878 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3879 || !(s
->prefix
& PREFIX_VEX
)
3883 ot
= mo_64_32(s
->dflag
);
3884 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3887 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
3888 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_regs
[R_EDX
]);
3889 tcg_gen_mulu2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
3890 cpu_tmp2_i32
, cpu_tmp3_i32
);
3891 tcg_gen_extu_i32_tl(cpu_regs
[s
->vex_v
], cpu_tmp2_i32
);
3892 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp3_i32
);
3894 #ifdef TARGET_X86_64
3896 tcg_gen_mulu2_i64(cpu_T0
, cpu_T1
,
3897 cpu_T0
, cpu_regs
[R_EDX
]);
3898 tcg_gen_mov_i64(cpu_regs
[s
->vex_v
], cpu_T0
);
3899 tcg_gen_mov_i64(cpu_regs
[reg
], cpu_T1
);
3905 case 0x3f5: /* pdep Gy, By, Ey */
3906 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3907 || !(s
->prefix
& PREFIX_VEX
)
3911 ot
= mo_64_32(s
->dflag
);
3912 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3913 /* Note that by zero-extending the mask operand, we
3914 automatically handle zero-extending the result. */
3916 tcg_gen_mov_tl(cpu_T1
, cpu_regs
[s
->vex_v
]);
3918 tcg_gen_ext32u_tl(cpu_T1
, cpu_regs
[s
->vex_v
]);
3920 gen_helper_pdep(cpu_regs
[reg
], cpu_T0
, cpu_T1
);
3923 case 0x2f5: /* pext Gy, By, Ey */
3924 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3925 || !(s
->prefix
& PREFIX_VEX
)
3929 ot
= mo_64_32(s
->dflag
);
3930 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3931 /* Note that by zero-extending the mask operand, we
3932 automatically handle zero-extending the result. */
3934 tcg_gen_mov_tl(cpu_T1
, cpu_regs
[s
->vex_v
]);
3936 tcg_gen_ext32u_tl(cpu_T1
, cpu_regs
[s
->vex_v
]);
3938 gen_helper_pext(cpu_regs
[reg
], cpu_T0
, cpu_T1
);
3941 case 0x1f6: /* adcx Gy, Ey */
3942 case 0x2f6: /* adox Gy, Ey */
3943 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_ADX
)) {
3946 TCGv carry_in
, carry_out
, zero
;
3949 ot
= mo_64_32(s
->dflag
);
3950 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3952 /* Re-use the carry-out from a previous round. */
3954 carry_out
= (b
== 0x1f6 ? cpu_cc_dst
: cpu_cc_src2
);
3958 carry_in
= cpu_cc_dst
;
3959 end_op
= CC_OP_ADCX
;
3961 end_op
= CC_OP_ADCOX
;
3966 end_op
= CC_OP_ADCOX
;
3968 carry_in
= cpu_cc_src2
;
3969 end_op
= CC_OP_ADOX
;
3973 end_op
= CC_OP_ADCOX
;
3974 carry_in
= carry_out
;
3977 end_op
= (b
== 0x1f6 ? CC_OP_ADCX
: CC_OP_ADOX
);
3980 /* If we can't reuse carry-out, get it out of EFLAGS. */
3982 if (s
->cc_op
!= CC_OP_ADCX
&& s
->cc_op
!= CC_OP_ADOX
) {
3983 gen_compute_eflags(s
);
3985 carry_in
= cpu_tmp0
;
3986 tcg_gen_extract_tl(carry_in
, cpu_cc_src
,
3987 ctz32(b
== 0x1f6 ? CC_C
: CC_O
), 1);
3991 #ifdef TARGET_X86_64
3993 /* If we know TL is 64-bit, and we want a 32-bit
3994 result, just do everything in 64-bit arithmetic. */
3995 tcg_gen_ext32u_i64(cpu_regs
[reg
], cpu_regs
[reg
]);
3996 tcg_gen_ext32u_i64(cpu_T0
, cpu_T0
);
3997 tcg_gen_add_i64(cpu_T0
, cpu_T0
, cpu_regs
[reg
]);
3998 tcg_gen_add_i64(cpu_T0
, cpu_T0
, carry_in
);
3999 tcg_gen_ext32u_i64(cpu_regs
[reg
], cpu_T0
);
4000 tcg_gen_shri_i64(carry_out
, cpu_T0
, 32);
4004 /* Otherwise compute the carry-out in two steps. */
4005 zero
= tcg_const_tl(0);
4006 tcg_gen_add2_tl(cpu_T0
, carry_out
,
4009 tcg_gen_add2_tl(cpu_regs
[reg
], carry_out
,
4010 cpu_regs
[reg
], carry_out
,
4012 tcg_temp_free(zero
);
4015 set_cc_op(s
, end_op
);
4019 case 0x1f7: /* shlx Gy, Ey, By */
4020 case 0x2f7: /* sarx Gy, Ey, By */
4021 case 0x3f7: /* shrx Gy, Ey, By */
4022 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4023 || !(s
->prefix
& PREFIX_VEX
)
4027 ot
= mo_64_32(s
->dflag
);
4028 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4030 tcg_gen_andi_tl(cpu_T1
, cpu_regs
[s
->vex_v
], 63);
4032 tcg_gen_andi_tl(cpu_T1
, cpu_regs
[s
->vex_v
], 31);
4035 tcg_gen_shl_tl(cpu_T0
, cpu_T0
, cpu_T1
);
4036 } else if (b
== 0x2f7) {
4038 tcg_gen_ext32s_tl(cpu_T0
, cpu_T0
);
4040 tcg_gen_sar_tl(cpu_T0
, cpu_T0
, cpu_T1
);
4043 tcg_gen_ext32u_tl(cpu_T0
, cpu_T0
);
4045 tcg_gen_shr_tl(cpu_T0
, cpu_T0
, cpu_T1
);
4047 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
4053 case 0x3f3: /* Group 17 */
4054 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
4055 || !(s
->prefix
& PREFIX_VEX
)
4059 ot
= mo_64_32(s
->dflag
);
4060 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4063 case 1: /* blsr By,Ey */
4064 tcg_gen_neg_tl(cpu_T1
, cpu_T0
);
4065 tcg_gen_and_tl(cpu_T0
, cpu_T0
, cpu_T1
);
4066 gen_op_mov_reg_v(ot
, s
->vex_v
, cpu_T0
);
4067 gen_op_update2_cc();
4068 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
4071 case 2: /* blsmsk By,Ey */
4072 tcg_gen_mov_tl(cpu_cc_src
, cpu_T0
);
4073 tcg_gen_subi_tl(cpu_T0
, cpu_T0
, 1);
4074 tcg_gen_xor_tl(cpu_T0
, cpu_T0
, cpu_cc_src
);
4075 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
4076 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
4079 case 3: /* blsi By, Ey */
4080 tcg_gen_mov_tl(cpu_cc_src
, cpu_T0
);
4081 tcg_gen_subi_tl(cpu_T0
, cpu_T0
, 1);
4082 tcg_gen_and_tl(cpu_T0
, cpu_T0
, cpu_cc_src
);
4083 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
4084 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
4100 modrm
= x86_ldub_code(env
, s
);
4102 reg
= ((modrm
>> 3) & 7) | rex_r
;
4103 mod
= (modrm
>> 6) & 3;
4108 sse_fn_eppi
= sse_op_table7
[b
].op
[b1
];
4112 if (!(s
->cpuid_ext_features
& sse_op_table7
[b
].ext_mask
))
4117 if (sse_fn_eppi
== SSE_SPECIAL
) {
4118 ot
= mo_64_32(s
->dflag
);
4119 rm
= (modrm
& 7) | REX_B(s
);
4121 gen_lea_modrm(env
, s
, modrm
);
4122 reg
= ((modrm
>> 3) & 7) | rex_r
;
4123 val
= x86_ldub_code(env
, s
);
4125 case 0x14: /* pextrb */
4126 tcg_gen_ld8u_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,
4127 xmm_regs
[reg
].ZMM_B(val
& 15)));
4129 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
4131 tcg_gen_qemu_st_tl(cpu_T0
, cpu_A0
,
4132 s
->mem_index
, MO_UB
);
4135 case 0x15: /* pextrw */
4136 tcg_gen_ld16u_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,
4137 xmm_regs
[reg
].ZMM_W(val
& 7)));
4139 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
4141 tcg_gen_qemu_st_tl(cpu_T0
, cpu_A0
,
4142 s
->mem_index
, MO_LEUW
);
4146 if (ot
== MO_32
) { /* pextrd */
4147 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
4148 offsetof(CPUX86State
,
4149 xmm_regs
[reg
].ZMM_L(val
& 3)));
4151 tcg_gen_extu_i32_tl(cpu_regs
[rm
], cpu_tmp2_i32
);
4153 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
4154 s
->mem_index
, MO_LEUL
);
4156 } else { /* pextrq */
4157 #ifdef TARGET_X86_64
4158 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
4159 offsetof(CPUX86State
,
4160 xmm_regs
[reg
].ZMM_Q(val
& 1)));
4162 tcg_gen_mov_i64(cpu_regs
[rm
], cpu_tmp1_i64
);
4164 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_A0
,
4165 s
->mem_index
, MO_LEQ
);
4172 case 0x17: /* extractps */
4173 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,
4174 xmm_regs
[reg
].ZMM_L(val
& 3)));
4176 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
4178 tcg_gen_qemu_st_tl(cpu_T0
, cpu_A0
,
4179 s
->mem_index
, MO_LEUL
);
4182 case 0x20: /* pinsrb */
4184 gen_op_mov_v_reg(MO_32
, cpu_T0
, rm
);
4186 tcg_gen_qemu_ld_tl(cpu_T0
, cpu_A0
,
4187 s
->mem_index
, MO_UB
);
4189 tcg_gen_st8_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,
4190 xmm_regs
[reg
].ZMM_B(val
& 15)));
4192 case 0x21: /* insertps */
4194 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
4195 offsetof(CPUX86State
,xmm_regs
[rm
]
4196 .ZMM_L((val
>> 6) & 3)));
4198 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
4199 s
->mem_index
, MO_LEUL
);
4201 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
4202 offsetof(CPUX86State
,xmm_regs
[reg
]
4203 .ZMM_L((val
>> 4) & 3)));
4205 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4206 cpu_env
, offsetof(CPUX86State
,
4207 xmm_regs
[reg
].ZMM_L(0)));
4209 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4210 cpu_env
, offsetof(CPUX86State
,
4211 xmm_regs
[reg
].ZMM_L(1)));
4213 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4214 cpu_env
, offsetof(CPUX86State
,
4215 xmm_regs
[reg
].ZMM_L(2)));
4217 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4218 cpu_env
, offsetof(CPUX86State
,
4219 xmm_regs
[reg
].ZMM_L(3)));
4222 if (ot
== MO_32
) { /* pinsrd */
4224 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[rm
]);
4226 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
4227 s
->mem_index
, MO_LEUL
);
4229 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
4230 offsetof(CPUX86State
,
4231 xmm_regs
[reg
].ZMM_L(val
& 3)));
4232 } else { /* pinsrq */
4233 #ifdef TARGET_X86_64
4235 gen_op_mov_v_reg(ot
, cpu_tmp1_i64
, rm
);
4237 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_A0
,
4238 s
->mem_index
, MO_LEQ
);
4240 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
4241 offsetof(CPUX86State
,
4242 xmm_regs
[reg
].ZMM_Q(val
& 1)));
4253 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4255 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
4257 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4258 gen_lea_modrm(env
, s
, modrm
);
4259 gen_ldo_env_A0(s
, op2_offset
);
4262 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4264 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4266 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4267 gen_lea_modrm(env
, s
, modrm
);
4268 gen_ldq_env_A0(s
, op2_offset
);
4271 val
= x86_ldub_code(env
, s
);
4273 if ((b
& 0xfc) == 0x60) { /* pcmpXstrX */
4274 set_cc_op(s
, CC_OP_EFLAGS
);
4276 if (s
->dflag
== MO_64
) {
4277 /* The helper must use entire 64-bit gp registers */
4282 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4283 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4284 sse_fn_eppi(cpu_env
, cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4288 /* Various integer extensions at 0f 3a f[0-f]. */
4289 b
= modrm
| (b1
<< 8);
4290 modrm
= x86_ldub_code(env
, s
);
4291 reg
= ((modrm
>> 3) & 7) | rex_r
;
4294 case 0x3f0: /* rorx Gy,Ey, Ib */
4295 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4296 || !(s
->prefix
& PREFIX_VEX
)
4300 ot
= mo_64_32(s
->dflag
);
4301 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4302 b
= x86_ldub_code(env
, s
);
4304 tcg_gen_rotri_tl(cpu_T0
, cpu_T0
, b
& 63);
4306 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
4307 tcg_gen_rotri_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, b
& 31);
4308 tcg_gen_extu_i32_tl(cpu_T0
, cpu_tmp2_i32
);
4310 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
4320 gen_unknown_opcode(env
, s
);
4324 /* generic MMX or SSE operation */
4326 case 0x70: /* pshufx insn */
4327 case 0xc6: /* pshufx insn */
4328 case 0xc2: /* compare insns */
4335 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4339 gen_lea_modrm(env
, s
, modrm
);
4340 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4346 /* Most sse scalar operations. */
4349 } else if (b1
== 3) {
4354 case 0x2e: /* ucomis[sd] */
4355 case 0x2f: /* comis[sd] */
4367 gen_op_ld_v(s
, MO_32
, cpu_T0
, cpu_A0
);
4368 tcg_gen_st32_tl(cpu_T0
, cpu_env
,
4369 offsetof(CPUX86State
,xmm_t0
.ZMM_L(0)));
4373 gen_ldq_env_A0(s
, offsetof(CPUX86State
, xmm_t0
.ZMM_D(0)));
4376 /* 128 bit access */
4377 gen_ldo_env_A0(s
, op2_offset
);
4381 rm
= (modrm
& 7) | REX_B(s
);
4382 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
4385 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4387 gen_lea_modrm(env
, s
, modrm
);
4388 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4389 gen_ldq_env_A0(s
, op2_offset
);
4392 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4396 case 0x0f: /* 3DNow! data insns */
4397 val
= x86_ldub_code(env
, s
);
4398 sse_fn_epp
= sse_op_table5
[val
];
4402 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
)) {
4405 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4406 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4407 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4409 case 0x70: /* pshufx insn */
4410 case 0xc6: /* pshufx insn */
4411 val
= x86_ldub_code(env
, s
);
4412 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4413 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4414 /* XXX: introduce a new table? */
4415 sse_fn_ppi
= (SSEFunc_0_ppi
)sse_fn_epp
;
4416 sse_fn_ppi(cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4420 val
= x86_ldub_code(env
, s
);
4423 sse_fn_epp
= sse_op_table4
[val
][b1
];
4425 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4426 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4427 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4430 /* maskmov : we must prepare A0 */
4433 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[R_EDI
]);
4434 gen_extu(s
->aflag
, cpu_A0
);
4435 gen_add_A0_ds_seg(s
);
4437 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4438 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4439 /* XXX: introduce a new table? */
4440 sse_fn_eppt
= (SSEFunc_0_eppt
)sse_fn_epp
;
4441 sse_fn_eppt(cpu_env
, cpu_ptr0
, cpu_ptr1
, cpu_A0
);
4444 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4445 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4446 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4449 if (b
== 0x2e || b
== 0x2f) {
4450 set_cc_op(s
, CC_OP_EFLAGS
);
4455 /* convert one instruction. s->base.is_jmp is set if the translation must
4456 be stopped. Return the next pc value */
4457 static target_ulong
disas_insn(DisasContext
*s
, CPUState
*cpu
)
4459 CPUX86State
*env
= cpu
->env_ptr
;
4462 TCGMemOp ot
, aflag
, dflag
;
4463 int modrm
, reg
, rm
, mod
, op
, opreg
, val
;
4464 target_ulong next_eip
, tval
;
4466 target_ulong pc_start
= s
->base
.pc_next
;
4468 s
->pc_start
= s
->pc
= pc_start
;
4470 #ifdef TARGET_X86_64
4475 s
->rip_offset
= 0; /* for relative ip address */
4478 if (sigsetjmp(s
->jmpbuf
, 0) != 0) {
4479 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
4488 b
= x86_ldub_code(env
, s
);
4489 /* Collect prefixes. */
4492 prefixes
|= PREFIX_REPZ
;
4495 prefixes
|= PREFIX_REPNZ
;
4498 prefixes
|= PREFIX_LOCK
;
4519 prefixes
|= PREFIX_DATA
;
4522 prefixes
|= PREFIX_ADR
;
4524 #ifdef TARGET_X86_64
4528 rex_w
= (b
>> 3) & 1;
4529 rex_r
= (b
& 0x4) << 1;
4530 s
->rex_x
= (b
& 0x2) << 2;
4531 REX_B(s
) = (b
& 0x1) << 3;
4532 x86_64_hregs
= 1; /* select uniform byte register addressing */
4537 case 0xc5: /* 2-byte VEX */
4538 case 0xc4: /* 3-byte VEX */
4539 /* VEX prefixes cannot be used except in 32-bit mode.
4540 Otherwise the instruction is LES or LDS. */
4541 if (s
->code32
&& !s
->vm86
) {
4542 static const int pp_prefix
[4] = {
4543 0, PREFIX_DATA
, PREFIX_REPZ
, PREFIX_REPNZ
4545 int vex3
, vex2
= x86_ldub_code(env
, s
);
4547 if (!CODE64(s
) && (vex2
& 0xc0) != 0xc0) {
4548 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4549 otherwise the instruction is LES or LDS. */
4550 s
->pc
--; /* rewind the advance_pc() x86_ldub_code() did */
4554 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
4555 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
4556 | PREFIX_LOCK
| PREFIX_DATA
)) {
4559 #ifdef TARGET_X86_64
4564 rex_r
= (~vex2
>> 4) & 8;
4566 /* 2-byte VEX prefix: RVVVVlpp, implied 0f leading opcode byte */
4568 b
= x86_ldub_code(env
, s
) | 0x100;
4570 /* 3-byte VEX prefix: RXBmmmmm wVVVVlpp */
4571 #ifdef TARGET_X86_64
4572 s
->rex_x
= (~vex2
>> 3) & 8;
4573 s
->rex_b
= (~vex2
>> 2) & 8;
4575 vex3
= x86_ldub_code(env
, s
);
4576 rex_w
= (vex3
>> 7) & 1;
4577 switch (vex2
& 0x1f) {
4578 case 0x01: /* Implied 0f leading opcode bytes. */
4579 b
= x86_ldub_code(env
, s
) | 0x100;
4581 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4584 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4587 default: /* Reserved for future use. */
4591 s
->vex_v
= (~vex3
>> 3) & 0xf;
4592 s
->vex_l
= (vex3
>> 2) & 1;
4593 prefixes
|= pp_prefix
[vex3
& 3] | PREFIX_VEX
;
4598 /* Post-process prefixes. */
4600 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
4601 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
4602 over 0x66 if both are present. */
4603 dflag
= (rex_w
> 0 ? MO_64
: prefixes
& PREFIX_DATA
? MO_16
: MO_32
);
4604 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
4605 aflag
= (prefixes
& PREFIX_ADR
? MO_32
: MO_64
);
4607 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
4608 if (s
->code32
^ ((prefixes
& PREFIX_DATA
) != 0)) {
4613 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
4614 if (s
->code32
^ ((prefixes
& PREFIX_ADR
) != 0)) {
4621 s
->prefix
= prefixes
;
4625 /* now check op code */
4629 /**************************/
4630 /* extended op code */
4631 b
= x86_ldub_code(env
, s
) | 0x100;
4634 /**************************/
4649 ot
= mo_b_d(b
, dflag
);
4652 case 0: /* OP Ev, Gv */
4653 modrm
= x86_ldub_code(env
, s
);
4654 reg
= ((modrm
>> 3) & 7) | rex_r
;
4655 mod
= (modrm
>> 6) & 3;
4656 rm
= (modrm
& 7) | REX_B(s
);
4658 gen_lea_modrm(env
, s
, modrm
);
4660 } else if (op
== OP_XORL
&& rm
== reg
) {
4662 /* xor reg, reg optimisation */
4663 set_cc_op(s
, CC_OP_CLR
);
4664 tcg_gen_movi_tl(cpu_T0
, 0);
4665 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
4670 gen_op_mov_v_reg(ot
, cpu_T1
, reg
);
4671 gen_op(s
, op
, ot
, opreg
);
4673 case 1: /* OP Gv, Ev */
4674 modrm
= x86_ldub_code(env
, s
);
4675 mod
= (modrm
>> 6) & 3;
4676 reg
= ((modrm
>> 3) & 7) | rex_r
;
4677 rm
= (modrm
& 7) | REX_B(s
);
4679 gen_lea_modrm(env
, s
, modrm
);
4680 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
4681 } else if (op
== OP_XORL
&& rm
== reg
) {
4684 gen_op_mov_v_reg(ot
, cpu_T1
, rm
);
4686 gen_op(s
, op
, ot
, reg
);
4688 case 2: /* OP A, Iv */
4689 val
= insn_get(env
, s
, ot
);
4690 tcg_gen_movi_tl(cpu_T1
, val
);
4691 gen_op(s
, op
, ot
, OR_EAX
);
4700 case 0x80: /* GRP1 */
4706 ot
= mo_b_d(b
, dflag
);
4708 modrm
= x86_ldub_code(env
, s
);
4709 mod
= (modrm
>> 6) & 3;
4710 rm
= (modrm
& 7) | REX_B(s
);
4711 op
= (modrm
>> 3) & 7;
4717 s
->rip_offset
= insn_const_size(ot
);
4718 gen_lea_modrm(env
, s
, modrm
);
4729 val
= insn_get(env
, s
, ot
);
4732 val
= (int8_t)insn_get(env
, s
, MO_8
);
4735 tcg_gen_movi_tl(cpu_T1
, val
);
4736 gen_op(s
, op
, ot
, opreg
);
4740 /**************************/
4741 /* inc, dec, and other misc arith */
4742 case 0x40 ... 0x47: /* inc Gv */
4744 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), 1);
4746 case 0x48 ... 0x4f: /* dec Gv */
4748 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), -1);
4750 case 0xf6: /* GRP3 */
4752 ot
= mo_b_d(b
, dflag
);
4754 modrm
= x86_ldub_code(env
, s
);
4755 mod
= (modrm
>> 6) & 3;
4756 rm
= (modrm
& 7) | REX_B(s
);
4757 op
= (modrm
>> 3) & 7;
4760 s
->rip_offset
= insn_const_size(ot
);
4762 gen_lea_modrm(env
, s
, modrm
);
4763 /* For those below that handle locked memory, don't load here. */
4764 if (!(s
->prefix
& PREFIX_LOCK
)
4766 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
4769 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
4774 val
= insn_get(env
, s
, ot
);
4775 tcg_gen_movi_tl(cpu_T1
, val
);
4776 gen_op_testl_T0_T1_cc();
4777 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4780 if (s
->prefix
& PREFIX_LOCK
) {
4784 tcg_gen_movi_tl(cpu_T0
, ~0);
4785 tcg_gen_atomic_xor_fetch_tl(cpu_T0
, cpu_A0
, cpu_T0
,
4786 s
->mem_index
, ot
| MO_LE
);
4788 tcg_gen_not_tl(cpu_T0
, cpu_T0
);
4790 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
4792 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
4797 if (s
->prefix
& PREFIX_LOCK
) {
4799 TCGv a0
, t0
, t1
, t2
;
4804 a0
= tcg_temp_local_new();
4805 t0
= tcg_temp_local_new();
4806 label1
= gen_new_label();
4808 tcg_gen_mov_tl(a0
, cpu_A0
);
4809 tcg_gen_mov_tl(t0
, cpu_T0
);
4811 gen_set_label(label1
);
4812 t1
= tcg_temp_new();
4813 t2
= tcg_temp_new();
4814 tcg_gen_mov_tl(t2
, t0
);
4815 tcg_gen_neg_tl(t1
, t0
);
4816 tcg_gen_atomic_cmpxchg_tl(t0
, a0
, t0
, t1
,
4817 s
->mem_index
, ot
| MO_LE
);
4819 tcg_gen_brcond_tl(TCG_COND_NE
, t0
, t2
, label1
);
4823 tcg_gen_mov_tl(cpu_T0
, t0
);
4826 tcg_gen_neg_tl(cpu_T0
, cpu_T0
);
4828 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
4830 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
4833 gen_op_update_neg_cc();
4834 set_cc_op(s
, CC_OP_SUBB
+ ot
);
4839 gen_op_mov_v_reg(MO_8
, cpu_T1
, R_EAX
);
4840 tcg_gen_ext8u_tl(cpu_T0
, cpu_T0
);
4841 tcg_gen_ext8u_tl(cpu_T1
, cpu_T1
);
4842 /* XXX: use 32 bit mul which could be faster */
4843 tcg_gen_mul_tl(cpu_T0
, cpu_T0
, cpu_T1
);
4844 gen_op_mov_reg_v(MO_16
, R_EAX
, cpu_T0
);
4845 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
4846 tcg_gen_andi_tl(cpu_cc_src
, cpu_T0
, 0xff00);
4847 set_cc_op(s
, CC_OP_MULB
);
4850 gen_op_mov_v_reg(MO_16
, cpu_T1
, R_EAX
);
4851 tcg_gen_ext16u_tl(cpu_T0
, cpu_T0
);
4852 tcg_gen_ext16u_tl(cpu_T1
, cpu_T1
);
4853 /* XXX: use 32 bit mul which could be faster */
4854 tcg_gen_mul_tl(cpu_T0
, cpu_T0
, cpu_T1
);
4855 gen_op_mov_reg_v(MO_16
, R_EAX
, cpu_T0
);
4856 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
4857 tcg_gen_shri_tl(cpu_T0
, cpu_T0
, 16);
4858 gen_op_mov_reg_v(MO_16
, R_EDX
, cpu_T0
);
4859 tcg_gen_mov_tl(cpu_cc_src
, cpu_T0
);
4860 set_cc_op(s
, CC_OP_MULW
);
4864 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
4865 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_regs
[R_EAX
]);
4866 tcg_gen_mulu2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
4867 cpu_tmp2_i32
, cpu_tmp3_i32
);
4868 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], cpu_tmp2_i32
);
4869 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], cpu_tmp3_i32
);
4870 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
4871 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
4872 set_cc_op(s
, CC_OP_MULL
);
4874 #ifdef TARGET_X86_64
4876 tcg_gen_mulu2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
4877 cpu_T0
, cpu_regs
[R_EAX
]);
4878 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
4879 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
4880 set_cc_op(s
, CC_OP_MULQ
);
4888 gen_op_mov_v_reg(MO_8
, cpu_T1
, R_EAX
);
4889 tcg_gen_ext8s_tl(cpu_T0
, cpu_T0
);
4890 tcg_gen_ext8s_tl(cpu_T1
, cpu_T1
);
4891 /* XXX: use 32 bit mul which could be faster */
4892 tcg_gen_mul_tl(cpu_T0
, cpu_T0
, cpu_T1
);
4893 gen_op_mov_reg_v(MO_16
, R_EAX
, cpu_T0
);
4894 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
4895 tcg_gen_ext8s_tl(cpu_tmp0
, cpu_T0
);
4896 tcg_gen_sub_tl(cpu_cc_src
, cpu_T0
, cpu_tmp0
);
4897 set_cc_op(s
, CC_OP_MULB
);
4900 gen_op_mov_v_reg(MO_16
, cpu_T1
, R_EAX
);
4901 tcg_gen_ext16s_tl(cpu_T0
, cpu_T0
);
4902 tcg_gen_ext16s_tl(cpu_T1
, cpu_T1
);
4903 /* XXX: use 32 bit mul which could be faster */
4904 tcg_gen_mul_tl(cpu_T0
, cpu_T0
, cpu_T1
);
4905 gen_op_mov_reg_v(MO_16
, R_EAX
, cpu_T0
);
4906 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
4907 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T0
);
4908 tcg_gen_sub_tl(cpu_cc_src
, cpu_T0
, cpu_tmp0
);
4909 tcg_gen_shri_tl(cpu_T0
, cpu_T0
, 16);
4910 gen_op_mov_reg_v(MO_16
, R_EDX
, cpu_T0
);
4911 set_cc_op(s
, CC_OP_MULW
);
4915 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
4916 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_regs
[R_EAX
]);
4917 tcg_gen_muls2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
4918 cpu_tmp2_i32
, cpu_tmp3_i32
);
4919 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], cpu_tmp2_i32
);
4920 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], cpu_tmp3_i32
);
4921 tcg_gen_sari_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 31);
4922 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
4923 tcg_gen_sub_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
4924 tcg_gen_extu_i32_tl(cpu_cc_src
, cpu_tmp2_i32
);
4925 set_cc_op(s
, CC_OP_MULL
);
4927 #ifdef TARGET_X86_64
4929 tcg_gen_muls2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
4930 cpu_T0
, cpu_regs
[R_EAX
]);
4931 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
4932 tcg_gen_sari_tl(cpu_cc_src
, cpu_regs
[R_EAX
], 63);
4933 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, cpu_regs
[R_EDX
]);
4934 set_cc_op(s
, CC_OP_MULQ
);
4942 gen_helper_divb_AL(cpu_env
, cpu_T0
);
4945 gen_helper_divw_AX(cpu_env
, cpu_T0
);
4949 gen_helper_divl_EAX(cpu_env
, cpu_T0
);
4951 #ifdef TARGET_X86_64
4953 gen_helper_divq_EAX(cpu_env
, cpu_T0
);
4961 gen_helper_idivb_AL(cpu_env
, cpu_T0
);
4964 gen_helper_idivw_AX(cpu_env
, cpu_T0
);
4968 gen_helper_idivl_EAX(cpu_env
, cpu_T0
);
4970 #ifdef TARGET_X86_64
4972 gen_helper_idivq_EAX(cpu_env
, cpu_T0
);
4982 case 0xfe: /* GRP4 */
4983 case 0xff: /* GRP5 */
4984 ot
= mo_b_d(b
, dflag
);
4986 modrm
= x86_ldub_code(env
, s
);
4987 mod
= (modrm
>> 6) & 3;
4988 rm
= (modrm
& 7) | REX_B(s
);
4989 op
= (modrm
>> 3) & 7;
4990 if (op
>= 2 && b
== 0xfe) {
4994 if (op
== 2 || op
== 4) {
4995 /* operand size for jumps is 64 bit */
4997 } else if (op
== 3 || op
== 5) {
4998 ot
= dflag
!= MO_16
? MO_32
+ (rex_w
== 1) : MO_16
;
4999 } else if (op
== 6) {
5000 /* default push size is 64 bit */
5001 ot
= mo_pushpop(s
, dflag
);
5005 gen_lea_modrm(env
, s
, modrm
);
5006 if (op
>= 2 && op
!= 3 && op
!= 5)
5007 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
5009 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
5013 case 0: /* inc Ev */
5018 gen_inc(s
, ot
, opreg
, 1);
5020 case 1: /* dec Ev */
5025 gen_inc(s
, ot
, opreg
, -1);
5027 case 2: /* call Ev */
5028 /* XXX: optimize if memory (no 'and' is necessary) */
5029 if (dflag
== MO_16
) {
5030 tcg_gen_ext16u_tl(cpu_T0
, cpu_T0
);
5032 next_eip
= s
->pc
- s
->cs_base
;
5033 tcg_gen_movi_tl(cpu_T1
, next_eip
);
5034 gen_push_v(s
, cpu_T1
);
5035 gen_op_jmp_v(cpu_T0
);
5039 case 3: /* lcall Ev */
5040 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
5041 gen_add_A0_im(s
, 1 << ot
);
5042 gen_op_ld_v(s
, MO_16
, cpu_T0
, cpu_A0
);
5044 if (s
->pe
&& !s
->vm86
) {
5045 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
5046 gen_helper_lcall_protected(cpu_env
, cpu_tmp2_i32
, cpu_T1
,
5047 tcg_const_i32(dflag
- 1),
5048 tcg_const_tl(s
->pc
- s
->cs_base
));
5050 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
5051 gen_helper_lcall_real(cpu_env
, cpu_tmp2_i32
, cpu_T1
,
5052 tcg_const_i32(dflag
- 1),
5053 tcg_const_i32(s
->pc
- s
->cs_base
));
5055 tcg_gen_ld_tl(cpu_tmp4
, cpu_env
, offsetof(CPUX86State
, eip
));
5056 gen_jr(s
, cpu_tmp4
);
5058 case 4: /* jmp Ev */
5059 if (dflag
== MO_16
) {
5060 tcg_gen_ext16u_tl(cpu_T0
, cpu_T0
);
5062 gen_op_jmp_v(cpu_T0
);
5066 case 5: /* ljmp Ev */
5067 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
5068 gen_add_A0_im(s
, 1 << ot
);
5069 gen_op_ld_v(s
, MO_16
, cpu_T0
, cpu_A0
);
5071 if (s
->pe
&& !s
->vm86
) {
5072 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
5073 gen_helper_ljmp_protected(cpu_env
, cpu_tmp2_i32
, cpu_T1
,
5074 tcg_const_tl(s
->pc
- s
->cs_base
));
5076 gen_op_movl_seg_T0_vm(R_CS
);
5077 gen_op_jmp_v(cpu_T1
);
5079 tcg_gen_ld_tl(cpu_tmp4
, cpu_env
, offsetof(CPUX86State
, eip
));
5080 gen_jr(s
, cpu_tmp4
);
5082 case 6: /* push Ev */
5083 gen_push_v(s
, cpu_T0
);
5090 case 0x84: /* test Ev, Gv */
5092 ot
= mo_b_d(b
, dflag
);
5094 modrm
= x86_ldub_code(env
, s
);
5095 reg
= ((modrm
>> 3) & 7) | rex_r
;
5097 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5098 gen_op_mov_v_reg(ot
, cpu_T1
, reg
);
5099 gen_op_testl_T0_T1_cc();
5100 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
5103 case 0xa8: /* test eAX, Iv */
5105 ot
= mo_b_d(b
, dflag
);
5106 val
= insn_get(env
, s
, ot
);
5108 gen_op_mov_v_reg(ot
, cpu_T0
, OR_EAX
);
5109 tcg_gen_movi_tl(cpu_T1
, val
);
5110 gen_op_testl_T0_T1_cc();
5111 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
5114 case 0x98: /* CWDE/CBW */
5116 #ifdef TARGET_X86_64
5118 gen_op_mov_v_reg(MO_32
, cpu_T0
, R_EAX
);
5119 tcg_gen_ext32s_tl(cpu_T0
, cpu_T0
);
5120 gen_op_mov_reg_v(MO_64
, R_EAX
, cpu_T0
);
5124 gen_op_mov_v_reg(MO_16
, cpu_T0
, R_EAX
);
5125 tcg_gen_ext16s_tl(cpu_T0
, cpu_T0
);
5126 gen_op_mov_reg_v(MO_32
, R_EAX
, cpu_T0
);
5129 gen_op_mov_v_reg(MO_8
, cpu_T0
, R_EAX
);
5130 tcg_gen_ext8s_tl(cpu_T0
, cpu_T0
);
5131 gen_op_mov_reg_v(MO_16
, R_EAX
, cpu_T0
);
5137 case 0x99: /* CDQ/CWD */
5139 #ifdef TARGET_X86_64
5141 gen_op_mov_v_reg(MO_64
, cpu_T0
, R_EAX
);
5142 tcg_gen_sari_tl(cpu_T0
, cpu_T0
, 63);
5143 gen_op_mov_reg_v(MO_64
, R_EDX
, cpu_T0
);
5147 gen_op_mov_v_reg(MO_32
, cpu_T0
, R_EAX
);
5148 tcg_gen_ext32s_tl(cpu_T0
, cpu_T0
);
5149 tcg_gen_sari_tl(cpu_T0
, cpu_T0
, 31);
5150 gen_op_mov_reg_v(MO_32
, R_EDX
, cpu_T0
);
5153 gen_op_mov_v_reg(MO_16
, cpu_T0
, R_EAX
);
5154 tcg_gen_ext16s_tl(cpu_T0
, cpu_T0
);
5155 tcg_gen_sari_tl(cpu_T0
, cpu_T0
, 15);
5156 gen_op_mov_reg_v(MO_16
, R_EDX
, cpu_T0
);
5162 case 0x1af: /* imul Gv, Ev */
5163 case 0x69: /* imul Gv, Ev, I */
5166 modrm
= x86_ldub_code(env
, s
);
5167 reg
= ((modrm
>> 3) & 7) | rex_r
;
5169 s
->rip_offset
= insn_const_size(ot
);
5172 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5174 val
= insn_get(env
, s
, ot
);
5175 tcg_gen_movi_tl(cpu_T1
, val
);
5176 } else if (b
== 0x6b) {
5177 val
= (int8_t)insn_get(env
, s
, MO_8
);
5178 tcg_gen_movi_tl(cpu_T1
, val
);
5180 gen_op_mov_v_reg(ot
, cpu_T1
, reg
);
5183 #ifdef TARGET_X86_64
5185 tcg_gen_muls2_i64(cpu_regs
[reg
], cpu_T1
, cpu_T0
, cpu_T1
);
5186 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
5187 tcg_gen_sari_tl(cpu_cc_src
, cpu_cc_dst
, 63);
5188 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, cpu_T1
);
5192 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
5193 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T1
);
5194 tcg_gen_muls2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
5195 cpu_tmp2_i32
, cpu_tmp3_i32
);
5196 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp2_i32
);
5197 tcg_gen_sari_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 31);
5198 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
5199 tcg_gen_sub_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
5200 tcg_gen_extu_i32_tl(cpu_cc_src
, cpu_tmp2_i32
);
5203 tcg_gen_ext16s_tl(cpu_T0
, cpu_T0
);
5204 tcg_gen_ext16s_tl(cpu_T1
, cpu_T1
);
5205 /* XXX: use 32 bit mul which could be faster */
5206 tcg_gen_mul_tl(cpu_T0
, cpu_T0
, cpu_T1
);
5207 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
5208 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T0
);
5209 tcg_gen_sub_tl(cpu_cc_src
, cpu_T0
, cpu_tmp0
);
5210 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
5213 set_cc_op(s
, CC_OP_MULB
+ ot
);
5216 case 0x1c1: /* xadd Ev, Gv */
5217 ot
= mo_b_d(b
, dflag
);
5218 modrm
= x86_ldub_code(env
, s
);
5219 reg
= ((modrm
>> 3) & 7) | rex_r
;
5220 mod
= (modrm
>> 6) & 3;
5221 gen_op_mov_v_reg(ot
, cpu_T0
, reg
);
5223 rm
= (modrm
& 7) | REX_B(s
);
5224 gen_op_mov_v_reg(ot
, cpu_T1
, rm
);
5225 tcg_gen_add_tl(cpu_T0
, cpu_T0
, cpu_T1
);
5226 gen_op_mov_reg_v(ot
, reg
, cpu_T1
);
5227 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
5229 gen_lea_modrm(env
, s
, modrm
);
5230 if (s
->prefix
& PREFIX_LOCK
) {
5231 tcg_gen_atomic_fetch_add_tl(cpu_T1
, cpu_A0
, cpu_T0
,
5232 s
->mem_index
, ot
| MO_LE
);
5233 tcg_gen_add_tl(cpu_T0
, cpu_T0
, cpu_T1
);
5235 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
5236 tcg_gen_add_tl(cpu_T0
, cpu_T0
, cpu_T1
);
5237 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
5239 gen_op_mov_reg_v(ot
, reg
, cpu_T1
);
5241 gen_op_update2_cc();
5242 set_cc_op(s
, CC_OP_ADDB
+ ot
);
5245 case 0x1b1: /* cmpxchg Ev, Gv */
5247 TCGv oldv
, newv
, cmpv
;
5249 ot
= mo_b_d(b
, dflag
);
5250 modrm
= x86_ldub_code(env
, s
);
5251 reg
= ((modrm
>> 3) & 7) | rex_r
;
5252 mod
= (modrm
>> 6) & 3;
5253 oldv
= tcg_temp_new();
5254 newv
= tcg_temp_new();
5255 cmpv
= tcg_temp_new();
5256 gen_op_mov_v_reg(ot
, newv
, reg
);
5257 tcg_gen_mov_tl(cmpv
, cpu_regs
[R_EAX
]);
5259 if (s
->prefix
& PREFIX_LOCK
) {
5263 gen_lea_modrm(env
, s
, modrm
);
5264 tcg_gen_atomic_cmpxchg_tl(oldv
, cpu_A0
, cmpv
, newv
,
5265 s
->mem_index
, ot
| MO_LE
);
5266 gen_op_mov_reg_v(ot
, R_EAX
, oldv
);
5269 rm
= (modrm
& 7) | REX_B(s
);
5270 gen_op_mov_v_reg(ot
, oldv
, rm
);
5272 gen_lea_modrm(env
, s
, modrm
);
5273 gen_op_ld_v(s
, ot
, oldv
, cpu_A0
);
5274 rm
= 0; /* avoid warning */
5278 /* store value = (old == cmp ? new : old); */
5279 tcg_gen_movcond_tl(TCG_COND_EQ
, newv
, oldv
, cmpv
, newv
, oldv
);
5281 gen_op_mov_reg_v(ot
, R_EAX
, oldv
);
5282 gen_op_mov_reg_v(ot
, rm
, newv
);
5284 /* Perform an unconditional store cycle like physical cpu;
5285 must be before changing accumulator to ensure
5286 idempotency if the store faults and the instruction
5288 gen_op_st_v(s
, ot
, newv
, cpu_A0
);
5289 gen_op_mov_reg_v(ot
, R_EAX
, oldv
);
5292 tcg_gen_mov_tl(cpu_cc_src
, oldv
);
5293 tcg_gen_mov_tl(cpu_cc_srcT
, cmpv
);
5294 tcg_gen_sub_tl(cpu_cc_dst
, cmpv
, oldv
);
5295 set_cc_op(s
, CC_OP_SUBB
+ ot
);
5296 tcg_temp_free(oldv
);
5297 tcg_temp_free(newv
);
5298 tcg_temp_free(cmpv
);
5301 case 0x1c7: /* cmpxchg8b */
5302 modrm
= x86_ldub_code(env
, s
);
5303 mod
= (modrm
>> 6) & 3;
5304 if ((mod
== 3) || ((modrm
& 0x38) != 0x8))
5306 #ifdef TARGET_X86_64
5307 if (dflag
== MO_64
) {
5308 if (!(s
->cpuid_ext_features
& CPUID_EXT_CX16
))
5310 gen_lea_modrm(env
, s
, modrm
);
5311 if ((s
->prefix
& PREFIX_LOCK
) && (tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
5312 gen_helper_cmpxchg16b(cpu_env
, cpu_A0
);
5314 gen_helper_cmpxchg16b_unlocked(cpu_env
, cpu_A0
);
5319 if (!(s
->cpuid_features
& CPUID_CX8
))
5321 gen_lea_modrm(env
, s
, modrm
);
5322 if ((s
->prefix
& PREFIX_LOCK
) && (tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
5323 gen_helper_cmpxchg8b(cpu_env
, cpu_A0
);
5325 gen_helper_cmpxchg8b_unlocked(cpu_env
, cpu_A0
);
5328 set_cc_op(s
, CC_OP_EFLAGS
);
5331 /**************************/
5333 case 0x50 ... 0x57: /* push */
5334 gen_op_mov_v_reg(MO_32
, cpu_T0
, (b
& 7) | REX_B(s
));
5335 gen_push_v(s
, cpu_T0
);
5337 case 0x58 ... 0x5f: /* pop */
5339 /* NOTE: order is important for pop %sp */
5340 gen_pop_update(s
, ot
);
5341 gen_op_mov_reg_v(ot
, (b
& 7) | REX_B(s
), cpu_T0
);
5343 case 0x60: /* pusha */
5348 case 0x61: /* popa */
5353 case 0x68: /* push Iv */
5355 ot
= mo_pushpop(s
, dflag
);
5357 val
= insn_get(env
, s
, ot
);
5359 val
= (int8_t)insn_get(env
, s
, MO_8
);
5360 tcg_gen_movi_tl(cpu_T0
, val
);
5361 gen_push_v(s
, cpu_T0
);
5363 case 0x8f: /* pop Ev */
5364 modrm
= x86_ldub_code(env
, s
);
5365 mod
= (modrm
>> 6) & 3;
5368 /* NOTE: order is important for pop %sp */
5369 gen_pop_update(s
, ot
);
5370 rm
= (modrm
& 7) | REX_B(s
);
5371 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
5373 /* NOTE: order is important too for MMU exceptions */
5374 s
->popl_esp_hack
= 1 << ot
;
5375 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5376 s
->popl_esp_hack
= 0;
5377 gen_pop_update(s
, ot
);
5380 case 0xc8: /* enter */
5383 val
= x86_lduw_code(env
, s
);
5384 level
= x86_ldub_code(env
, s
);
5385 gen_enter(s
, val
, level
);
5388 case 0xc9: /* leave */
5391 case 0x06: /* push es */
5392 case 0x0e: /* push cs */
5393 case 0x16: /* push ss */
5394 case 0x1e: /* push ds */
5397 gen_op_movl_T0_seg(b
>> 3);
5398 gen_push_v(s
, cpu_T0
);
5400 case 0x1a0: /* push fs */
5401 case 0x1a8: /* push gs */
5402 gen_op_movl_T0_seg((b
>> 3) & 7);
5403 gen_push_v(s
, cpu_T0
);
5405 case 0x07: /* pop es */
5406 case 0x17: /* pop ss */
5407 case 0x1f: /* pop ds */
5412 gen_movl_seg_T0(s
, reg
);
5413 gen_pop_update(s
, ot
);
5414 /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
5415 if (s
->base
.is_jmp
) {
5416 gen_jmp_im(s
->pc
- s
->cs_base
);
5419 gen_eob_inhibit_irq(s
, true);
5425 case 0x1a1: /* pop fs */
5426 case 0x1a9: /* pop gs */
5428 gen_movl_seg_T0(s
, (b
>> 3) & 7);
5429 gen_pop_update(s
, ot
);
5430 if (s
->base
.is_jmp
) {
5431 gen_jmp_im(s
->pc
- s
->cs_base
);
5436 /**************************/
5439 case 0x89: /* mov Gv, Ev */
5440 ot
= mo_b_d(b
, dflag
);
5441 modrm
= x86_ldub_code(env
, s
);
5442 reg
= ((modrm
>> 3) & 7) | rex_r
;
5444 /* generate a generic store */
5445 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
5448 case 0xc7: /* mov Ev, Iv */
5449 ot
= mo_b_d(b
, dflag
);
5450 modrm
= x86_ldub_code(env
, s
);
5451 mod
= (modrm
>> 6) & 3;
5453 s
->rip_offset
= insn_const_size(ot
);
5454 gen_lea_modrm(env
, s
, modrm
);
5456 val
= insn_get(env
, s
, ot
);
5457 tcg_gen_movi_tl(cpu_T0
, val
);
5459 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
5461 gen_op_mov_reg_v(ot
, (modrm
& 7) | REX_B(s
), cpu_T0
);
5465 case 0x8b: /* mov Ev, Gv */
5466 ot
= mo_b_d(b
, dflag
);
5467 modrm
= x86_ldub_code(env
, s
);
5468 reg
= ((modrm
>> 3) & 7) | rex_r
;
5470 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5471 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
5473 case 0x8e: /* mov seg, Gv */
5474 modrm
= x86_ldub_code(env
, s
);
5475 reg
= (modrm
>> 3) & 7;
5476 if (reg
>= 6 || reg
== R_CS
)
5478 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
5479 gen_movl_seg_T0(s
, reg
);
5480 /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
5481 if (s
->base
.is_jmp
) {
5482 gen_jmp_im(s
->pc
- s
->cs_base
);
5485 gen_eob_inhibit_irq(s
, true);
5491 case 0x8c: /* mov Gv, seg */
5492 modrm
= x86_ldub_code(env
, s
);
5493 reg
= (modrm
>> 3) & 7;
5494 mod
= (modrm
>> 6) & 3;
5497 gen_op_movl_T0_seg(reg
);
5498 ot
= mod
== 3 ? dflag
: MO_16
;
5499 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5502 case 0x1b6: /* movzbS Gv, Eb */
5503 case 0x1b7: /* movzwS Gv, Eb */
5504 case 0x1be: /* movsbS Gv, Eb */
5505 case 0x1bf: /* movswS Gv, Eb */
5510 /* d_ot is the size of destination */
5512 /* ot is the size of source */
5513 ot
= (b
& 1) + MO_8
;
5514 /* s_ot is the sign+size of source */
5515 s_ot
= b
& 8 ? MO_SIGN
| ot
: ot
;
5517 modrm
= x86_ldub_code(env
, s
);
5518 reg
= ((modrm
>> 3) & 7) | rex_r
;
5519 mod
= (modrm
>> 6) & 3;
5520 rm
= (modrm
& 7) | REX_B(s
);
5523 if (s_ot
== MO_SB
&& byte_reg_is_xH(rm
)) {
5524 tcg_gen_sextract_tl(cpu_T0
, cpu_regs
[rm
- 4], 8, 8);
5526 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
5529 tcg_gen_ext8u_tl(cpu_T0
, cpu_T0
);
5532 tcg_gen_ext8s_tl(cpu_T0
, cpu_T0
);
5535 tcg_gen_ext16u_tl(cpu_T0
, cpu_T0
);
5539 tcg_gen_ext16s_tl(cpu_T0
, cpu_T0
);
5543 gen_op_mov_reg_v(d_ot
, reg
, cpu_T0
);
5545 gen_lea_modrm(env
, s
, modrm
);
5546 gen_op_ld_v(s
, s_ot
, cpu_T0
, cpu_A0
);
5547 gen_op_mov_reg_v(d_ot
, reg
, cpu_T0
);
5552 case 0x8d: /* lea */
5553 modrm
= x86_ldub_code(env
, s
);
5554 mod
= (modrm
>> 6) & 3;
5557 reg
= ((modrm
>> 3) & 7) | rex_r
;
5559 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
5560 TCGv ea
= gen_lea_modrm_1(a
);
5561 gen_lea_v_seg(s
, s
->aflag
, ea
, -1, -1);
5562 gen_op_mov_reg_v(dflag
, reg
, cpu_A0
);
5566 case 0xa0: /* mov EAX, Ov */
5568 case 0xa2: /* mov Ov, EAX */
5571 target_ulong offset_addr
;
5573 ot
= mo_b_d(b
, dflag
);
5575 #ifdef TARGET_X86_64
5577 offset_addr
= x86_ldq_code(env
, s
);
5581 offset_addr
= insn_get(env
, s
, s
->aflag
);
5584 tcg_gen_movi_tl(cpu_A0
, offset_addr
);
5585 gen_add_A0_ds_seg(s
);
5587 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
5588 gen_op_mov_reg_v(ot
, R_EAX
, cpu_T0
);
5590 gen_op_mov_v_reg(ot
, cpu_T0
, R_EAX
);
5591 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
5595 case 0xd7: /* xlat */
5596 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[R_EBX
]);
5597 tcg_gen_ext8u_tl(cpu_T0
, cpu_regs
[R_EAX
]);
5598 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T0
);
5599 gen_extu(s
->aflag
, cpu_A0
);
5600 gen_add_A0_ds_seg(s
);
5601 gen_op_ld_v(s
, MO_8
, cpu_T0
, cpu_A0
);
5602 gen_op_mov_reg_v(MO_8
, R_EAX
, cpu_T0
);
5604 case 0xb0 ... 0xb7: /* mov R, Ib */
5605 val
= insn_get(env
, s
, MO_8
);
5606 tcg_gen_movi_tl(cpu_T0
, val
);
5607 gen_op_mov_reg_v(MO_8
, (b
& 7) | REX_B(s
), cpu_T0
);
5609 case 0xb8 ... 0xbf: /* mov R, Iv */
5610 #ifdef TARGET_X86_64
5611 if (dflag
== MO_64
) {
5614 tmp
= x86_ldq_code(env
, s
);
5615 reg
= (b
& 7) | REX_B(s
);
5616 tcg_gen_movi_tl(cpu_T0
, tmp
);
5617 gen_op_mov_reg_v(MO_64
, reg
, cpu_T0
);
5622 val
= insn_get(env
, s
, ot
);
5623 reg
= (b
& 7) | REX_B(s
);
5624 tcg_gen_movi_tl(cpu_T0
, val
);
5625 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
5629 case 0x91 ... 0x97: /* xchg R, EAX */
5632 reg
= (b
& 7) | REX_B(s
);
5636 case 0x87: /* xchg Ev, Gv */
5637 ot
= mo_b_d(b
, dflag
);
5638 modrm
= x86_ldub_code(env
, s
);
5639 reg
= ((modrm
>> 3) & 7) | rex_r
;
5640 mod
= (modrm
>> 6) & 3;
5642 rm
= (modrm
& 7) | REX_B(s
);
5644 gen_op_mov_v_reg(ot
, cpu_T0
, reg
);
5645 gen_op_mov_v_reg(ot
, cpu_T1
, rm
);
5646 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
5647 gen_op_mov_reg_v(ot
, reg
, cpu_T1
);
5649 gen_lea_modrm(env
, s
, modrm
);
5650 gen_op_mov_v_reg(ot
, cpu_T0
, reg
);
5651 /* for xchg, lock is implicit */
5652 tcg_gen_atomic_xchg_tl(cpu_T1
, cpu_A0
, cpu_T0
,
5653 s
->mem_index
, ot
| MO_LE
);
5654 gen_op_mov_reg_v(ot
, reg
, cpu_T1
);
5657 case 0xc4: /* les Gv */
5658 /* In CODE64 this is VEX3; see above. */
5661 case 0xc5: /* lds Gv */
5662 /* In CODE64 this is VEX2; see above. */
5665 case 0x1b2: /* lss Gv */
5668 case 0x1b4: /* lfs Gv */
5671 case 0x1b5: /* lgs Gv */
5674 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
5675 modrm
= x86_ldub_code(env
, s
);
5676 reg
= ((modrm
>> 3) & 7) | rex_r
;
5677 mod
= (modrm
>> 6) & 3;
5680 gen_lea_modrm(env
, s
, modrm
);
5681 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
5682 gen_add_A0_im(s
, 1 << ot
);
5683 /* load the segment first to handle exceptions properly */
5684 gen_op_ld_v(s
, MO_16
, cpu_T0
, cpu_A0
);
5685 gen_movl_seg_T0(s
, op
);
5686 /* then put the data */
5687 gen_op_mov_reg_v(ot
, reg
, cpu_T1
);
5688 if (s
->base
.is_jmp
) {
5689 gen_jmp_im(s
->pc
- s
->cs_base
);
5694 /************************/
5702 ot
= mo_b_d(b
, dflag
);
5703 modrm
= x86_ldub_code(env
, s
);
5704 mod
= (modrm
>> 6) & 3;
5705 op
= (modrm
>> 3) & 7;
5711 gen_lea_modrm(env
, s
, modrm
);
5714 opreg
= (modrm
& 7) | REX_B(s
);
5719 gen_shift(s
, op
, ot
, opreg
, OR_ECX
);
5722 shift
= x86_ldub_code(env
, s
);
5724 gen_shifti(s
, op
, ot
, opreg
, shift
);
5739 case 0x1a4: /* shld imm */
5743 case 0x1a5: /* shld cl */
5747 case 0x1ac: /* shrd imm */
5751 case 0x1ad: /* shrd cl */
5756 modrm
= x86_ldub_code(env
, s
);
5757 mod
= (modrm
>> 6) & 3;
5758 rm
= (modrm
& 7) | REX_B(s
);
5759 reg
= ((modrm
>> 3) & 7) | rex_r
;
5761 gen_lea_modrm(env
, s
, modrm
);
5766 gen_op_mov_v_reg(ot
, cpu_T1
, reg
);
5769 TCGv imm
= tcg_const_tl(x86_ldub_code(env
, s
));
5770 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, imm
);
5773 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, cpu_regs
[R_ECX
]);
5777 /************************/
5780 if (s
->flags
& (HF_EM_MASK
| HF_TS_MASK
)) {
5781 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5782 /* XXX: what to do if illegal op ? */
5783 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
5786 modrm
= x86_ldub_code(env
, s
);
5787 mod
= (modrm
>> 6) & 3;
5789 op
= ((b
& 7) << 3) | ((modrm
>> 3) & 7);
5792 gen_lea_modrm(env
, s
, modrm
);
5794 case 0x00 ... 0x07: /* fxxxs */
5795 case 0x10 ... 0x17: /* fixxxl */
5796 case 0x20 ... 0x27: /* fxxxl */
5797 case 0x30 ... 0x37: /* fixxx */
5804 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5805 s
->mem_index
, MO_LEUL
);
5806 gen_helper_flds_FT0(cpu_env
, cpu_tmp2_i32
);
5809 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5810 s
->mem_index
, MO_LEUL
);
5811 gen_helper_fildl_FT0(cpu_env
, cpu_tmp2_i32
);
5814 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_A0
,
5815 s
->mem_index
, MO_LEQ
);
5816 gen_helper_fldl_FT0(cpu_env
, cpu_tmp1_i64
);
5820 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5821 s
->mem_index
, MO_LESW
);
5822 gen_helper_fildl_FT0(cpu_env
, cpu_tmp2_i32
);
5826 gen_helper_fp_arith_ST0_FT0(op1
);
5828 /* fcomp needs pop */
5829 gen_helper_fpop(cpu_env
);
5833 case 0x08: /* flds */
5834 case 0x0a: /* fsts */
5835 case 0x0b: /* fstps */
5836 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5837 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5838 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5843 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5844 s
->mem_index
, MO_LEUL
);
5845 gen_helper_flds_ST0(cpu_env
, cpu_tmp2_i32
);
5848 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5849 s
->mem_index
, MO_LEUL
);
5850 gen_helper_fildl_ST0(cpu_env
, cpu_tmp2_i32
);
5853 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_A0
,
5854 s
->mem_index
, MO_LEQ
);
5855 gen_helper_fldl_ST0(cpu_env
, cpu_tmp1_i64
);
5859 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5860 s
->mem_index
, MO_LESW
);
5861 gen_helper_fildl_ST0(cpu_env
, cpu_tmp2_i32
);
5866 /* XXX: the corresponding CPUID bit must be tested ! */
5869 gen_helper_fisttl_ST0(cpu_tmp2_i32
, cpu_env
);
5870 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5871 s
->mem_index
, MO_LEUL
);
5874 gen_helper_fisttll_ST0(cpu_tmp1_i64
, cpu_env
);
5875 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_A0
,
5876 s
->mem_index
, MO_LEQ
);
5880 gen_helper_fistt_ST0(cpu_tmp2_i32
, cpu_env
);
5881 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5882 s
->mem_index
, MO_LEUW
);
5885 gen_helper_fpop(cpu_env
);
5890 gen_helper_fsts_ST0(cpu_tmp2_i32
, cpu_env
);
5891 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5892 s
->mem_index
, MO_LEUL
);
5895 gen_helper_fistl_ST0(cpu_tmp2_i32
, cpu_env
);
5896 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5897 s
->mem_index
, MO_LEUL
);
5900 gen_helper_fstl_ST0(cpu_tmp1_i64
, cpu_env
);
5901 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_A0
,
5902 s
->mem_index
, MO_LEQ
);
5906 gen_helper_fist_ST0(cpu_tmp2_i32
, cpu_env
);
5907 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5908 s
->mem_index
, MO_LEUW
);
5912 gen_helper_fpop(cpu_env
);
5916 case 0x0c: /* fldenv mem */
5917 gen_helper_fldenv(cpu_env
, cpu_A0
, tcg_const_i32(dflag
- 1));
5919 case 0x0d: /* fldcw mem */
5920 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5921 s
->mem_index
, MO_LEUW
);
5922 gen_helper_fldcw(cpu_env
, cpu_tmp2_i32
);
5924 case 0x0e: /* fnstenv mem */
5925 gen_helper_fstenv(cpu_env
, cpu_A0
, tcg_const_i32(dflag
- 1));
5927 case 0x0f: /* fnstcw mem */
5928 gen_helper_fnstcw(cpu_tmp2_i32
, cpu_env
);
5929 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5930 s
->mem_index
, MO_LEUW
);
5932 case 0x1d: /* fldt mem */
5933 gen_helper_fldt_ST0(cpu_env
, cpu_A0
);
5935 case 0x1f: /* fstpt mem */
5936 gen_helper_fstt_ST0(cpu_env
, cpu_A0
);
5937 gen_helper_fpop(cpu_env
);
5939 case 0x2c: /* frstor mem */
5940 gen_helper_frstor(cpu_env
, cpu_A0
, tcg_const_i32(dflag
- 1));
5942 case 0x2e: /* fnsave mem */
5943 gen_helper_fsave(cpu_env
, cpu_A0
, tcg_const_i32(dflag
- 1));
5945 case 0x2f: /* fnstsw mem */
5946 gen_helper_fnstsw(cpu_tmp2_i32
, cpu_env
);
5947 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5948 s
->mem_index
, MO_LEUW
);
5950 case 0x3c: /* fbld */
5951 gen_helper_fbld_ST0(cpu_env
, cpu_A0
);
5953 case 0x3e: /* fbstp */
5954 gen_helper_fbst_ST0(cpu_env
, cpu_A0
);
5955 gen_helper_fpop(cpu_env
);
5957 case 0x3d: /* fildll */
5958 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_A0
, s
->mem_index
, MO_LEQ
);
5959 gen_helper_fildll_ST0(cpu_env
, cpu_tmp1_i64
);
5961 case 0x3f: /* fistpll */
5962 gen_helper_fistll_ST0(cpu_tmp1_i64
, cpu_env
);
5963 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_A0
, s
->mem_index
, MO_LEQ
);
5964 gen_helper_fpop(cpu_env
);
5970 /* register float ops */
5974 case 0x08: /* fld sti */
5975 gen_helper_fpush(cpu_env
);
5976 gen_helper_fmov_ST0_STN(cpu_env
,
5977 tcg_const_i32((opreg
+ 1) & 7));
5979 case 0x09: /* fxchg sti */
5980 case 0x29: /* fxchg4 sti, undocumented op */
5981 case 0x39: /* fxchg7 sti, undocumented op */
5982 gen_helper_fxchg_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
5984 case 0x0a: /* grp d9/2 */
5987 /* check exceptions (FreeBSD FPU probe) */
5988 gen_helper_fwait(cpu_env
);
5994 case 0x0c: /* grp d9/4 */
5997 gen_helper_fchs_ST0(cpu_env
);
6000 gen_helper_fabs_ST0(cpu_env
);
6003 gen_helper_fldz_FT0(cpu_env
);
6004 gen_helper_fcom_ST0_FT0(cpu_env
);
6007 gen_helper_fxam_ST0(cpu_env
);
6013 case 0x0d: /* grp d9/5 */
6017 gen_helper_fpush(cpu_env
);
6018 gen_helper_fld1_ST0(cpu_env
);
6021 gen_helper_fpush(cpu_env
);
6022 gen_helper_fldl2t_ST0(cpu_env
);
6025 gen_helper_fpush(cpu_env
);
6026 gen_helper_fldl2e_ST0(cpu_env
);
6029 gen_helper_fpush(cpu_env
);
6030 gen_helper_fldpi_ST0(cpu_env
);
6033 gen_helper_fpush(cpu_env
);
6034 gen_helper_fldlg2_ST0(cpu_env
);
6037 gen_helper_fpush(cpu_env
);
6038 gen_helper_fldln2_ST0(cpu_env
);
6041 gen_helper_fpush(cpu_env
);
6042 gen_helper_fldz_ST0(cpu_env
);
6049 case 0x0e: /* grp d9/6 */
6052 gen_helper_f2xm1(cpu_env
);
6055 gen_helper_fyl2x(cpu_env
);
6058 gen_helper_fptan(cpu_env
);
6060 case 3: /* fpatan */
6061 gen_helper_fpatan(cpu_env
);
6063 case 4: /* fxtract */
6064 gen_helper_fxtract(cpu_env
);
6066 case 5: /* fprem1 */
6067 gen_helper_fprem1(cpu_env
);
6069 case 6: /* fdecstp */
6070 gen_helper_fdecstp(cpu_env
);
6073 case 7: /* fincstp */
6074 gen_helper_fincstp(cpu_env
);
6078 case 0x0f: /* grp d9/7 */
6081 gen_helper_fprem(cpu_env
);
6083 case 1: /* fyl2xp1 */
6084 gen_helper_fyl2xp1(cpu_env
);
6087 gen_helper_fsqrt(cpu_env
);
6089 case 3: /* fsincos */
6090 gen_helper_fsincos(cpu_env
);
6092 case 5: /* fscale */
6093 gen_helper_fscale(cpu_env
);
6095 case 4: /* frndint */
6096 gen_helper_frndint(cpu_env
);
6099 gen_helper_fsin(cpu_env
);
6103 gen_helper_fcos(cpu_env
);
6107 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
6108 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6109 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6115 gen_helper_fp_arith_STN_ST0(op1
, opreg
);
6117 gen_helper_fpop(cpu_env
);
6119 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6120 gen_helper_fp_arith_ST0_FT0(op1
);
6124 case 0x02: /* fcom */
6125 case 0x22: /* fcom2, undocumented op */
6126 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6127 gen_helper_fcom_ST0_FT0(cpu_env
);
6129 case 0x03: /* fcomp */
6130 case 0x23: /* fcomp3, undocumented op */
6131 case 0x32: /* fcomp5, undocumented op */
6132 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6133 gen_helper_fcom_ST0_FT0(cpu_env
);
6134 gen_helper_fpop(cpu_env
);
6136 case 0x15: /* da/5 */
6138 case 1: /* fucompp */
6139 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
6140 gen_helper_fucom_ST0_FT0(cpu_env
);
6141 gen_helper_fpop(cpu_env
);
6142 gen_helper_fpop(cpu_env
);
6150 case 0: /* feni (287 only, just do nop here) */
6152 case 1: /* fdisi (287 only, just do nop here) */
6155 gen_helper_fclex(cpu_env
);
6157 case 3: /* fninit */
6158 gen_helper_fninit(cpu_env
);
6160 case 4: /* fsetpm (287 only, just do nop here) */
6166 case 0x1d: /* fucomi */
6167 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6170 gen_update_cc_op(s
);
6171 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6172 gen_helper_fucomi_ST0_FT0(cpu_env
);
6173 set_cc_op(s
, CC_OP_EFLAGS
);
6175 case 0x1e: /* fcomi */
6176 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6179 gen_update_cc_op(s
);
6180 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6181 gen_helper_fcomi_ST0_FT0(cpu_env
);
6182 set_cc_op(s
, CC_OP_EFLAGS
);
6184 case 0x28: /* ffree sti */
6185 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6187 case 0x2a: /* fst sti */
6188 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6190 case 0x2b: /* fstp sti */
6191 case 0x0b: /* fstp1 sti, undocumented op */
6192 case 0x3a: /* fstp8 sti, undocumented op */
6193 case 0x3b: /* fstp9 sti, undocumented op */
6194 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6195 gen_helper_fpop(cpu_env
);
6197 case 0x2c: /* fucom st(i) */
6198 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6199 gen_helper_fucom_ST0_FT0(cpu_env
);
6201 case 0x2d: /* fucomp st(i) */
6202 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6203 gen_helper_fucom_ST0_FT0(cpu_env
);
6204 gen_helper_fpop(cpu_env
);
6206 case 0x33: /* de/3 */
6208 case 1: /* fcompp */
6209 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
6210 gen_helper_fcom_ST0_FT0(cpu_env
);
6211 gen_helper_fpop(cpu_env
);
6212 gen_helper_fpop(cpu_env
);
6218 case 0x38: /* ffreep sti, undocumented op */
6219 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6220 gen_helper_fpop(cpu_env
);
6222 case 0x3c: /* df/4 */
6225 gen_helper_fnstsw(cpu_tmp2_i32
, cpu_env
);
6226 tcg_gen_extu_i32_tl(cpu_T0
, cpu_tmp2_i32
);
6227 gen_op_mov_reg_v(MO_16
, R_EAX
, cpu_T0
);
6233 case 0x3d: /* fucomip */
6234 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6237 gen_update_cc_op(s
);
6238 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6239 gen_helper_fucomi_ST0_FT0(cpu_env
);
6240 gen_helper_fpop(cpu_env
);
6241 set_cc_op(s
, CC_OP_EFLAGS
);
6243 case 0x3e: /* fcomip */
6244 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6247 gen_update_cc_op(s
);
6248 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6249 gen_helper_fcomi_ST0_FT0(cpu_env
);
6250 gen_helper_fpop(cpu_env
);
6251 set_cc_op(s
, CC_OP_EFLAGS
);
6253 case 0x10 ... 0x13: /* fcmovxx */
6258 static const uint8_t fcmov_cc
[8] = {
6265 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6268 op1
= fcmov_cc
[op
& 3] | (((op
>> 3) & 1) ^ 1);
6269 l1
= gen_new_label();
6270 gen_jcc1_noeob(s
, op1
, l1
);
6271 gen_helper_fmov_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
6280 /************************/
6283 case 0xa4: /* movsS */
6285 ot
= mo_b_d(b
, dflag
);
6286 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6287 gen_repz_movs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6293 case 0xaa: /* stosS */
6295 ot
= mo_b_d(b
, dflag
);
6296 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6297 gen_repz_stos(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6302 case 0xac: /* lodsS */
6304 ot
= mo_b_d(b
, dflag
);
6305 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6306 gen_repz_lods(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6311 case 0xae: /* scasS */
6313 ot
= mo_b_d(b
, dflag
);
6314 if (prefixes
& PREFIX_REPNZ
) {
6315 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6316 } else if (prefixes
& PREFIX_REPZ
) {
6317 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6323 case 0xa6: /* cmpsS */
6325 ot
= mo_b_d(b
, dflag
);
6326 if (prefixes
& PREFIX_REPNZ
) {
6327 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6328 } else if (prefixes
& PREFIX_REPZ
) {
6329 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6334 case 0x6c: /* insS */
6336 ot
= mo_b_d32(b
, dflag
);
6337 tcg_gen_ext16u_tl(cpu_T0
, cpu_regs
[R_EDX
]);
6338 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6339 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
) | 4);
6340 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6341 gen_repz_ins(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6344 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6345 gen_jmp(s
, s
->pc
- s
->cs_base
);
6349 case 0x6e: /* outsS */
6351 ot
= mo_b_d32(b
, dflag
);
6352 tcg_gen_ext16u_tl(cpu_T0
, cpu_regs
[R_EDX
]);
6353 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6354 svm_is_rep(prefixes
) | 4);
6355 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6356 gen_repz_outs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6359 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6360 gen_jmp(s
, s
->pc
- s
->cs_base
);
6365 /************************/
6370 ot
= mo_b_d32(b
, dflag
);
6371 val
= x86_ldub_code(env
, s
);
6372 tcg_gen_movi_tl(cpu_T0
, val
);
6373 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6374 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6375 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6378 tcg_gen_movi_i32(cpu_tmp2_i32
, val
);
6379 gen_helper_in_func(ot
, cpu_T1
, cpu_tmp2_i32
);
6380 gen_op_mov_reg_v(ot
, R_EAX
, cpu_T1
);
6381 gen_bpt_io(s
, cpu_tmp2_i32
, ot
);
6382 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6384 gen_jmp(s
, s
->pc
- s
->cs_base
);
6389 ot
= mo_b_d32(b
, dflag
);
6390 val
= x86_ldub_code(env
, s
);
6391 tcg_gen_movi_tl(cpu_T0
, val
);
6392 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6393 svm_is_rep(prefixes
));
6394 gen_op_mov_v_reg(ot
, cpu_T1
, R_EAX
);
6396 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6399 tcg_gen_movi_i32(cpu_tmp2_i32
, val
);
6400 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T1
);
6401 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6402 gen_bpt_io(s
, cpu_tmp2_i32
, ot
);
6403 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6405 gen_jmp(s
, s
->pc
- s
->cs_base
);
6410 ot
= mo_b_d32(b
, dflag
);
6411 tcg_gen_ext16u_tl(cpu_T0
, cpu_regs
[R_EDX
]);
6412 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6413 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6414 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6417 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
6418 gen_helper_in_func(ot
, cpu_T1
, cpu_tmp2_i32
);
6419 gen_op_mov_reg_v(ot
, R_EAX
, cpu_T1
);
6420 gen_bpt_io(s
, cpu_tmp2_i32
, ot
);
6421 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6423 gen_jmp(s
, s
->pc
- s
->cs_base
);
6428 ot
= mo_b_d32(b
, dflag
);
6429 tcg_gen_ext16u_tl(cpu_T0
, cpu_regs
[R_EDX
]);
6430 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6431 svm_is_rep(prefixes
));
6432 gen_op_mov_v_reg(ot
, cpu_T1
, R_EAX
);
6434 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6437 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
6438 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T1
);
6439 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6440 gen_bpt_io(s
, cpu_tmp2_i32
, ot
);
6441 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6443 gen_jmp(s
, s
->pc
- s
->cs_base
);
6447 /************************/
6449 case 0xc2: /* ret im */
6450 val
= x86_ldsw_code(env
, s
);
6452 gen_stack_update(s
, val
+ (1 << ot
));
6453 /* Note that gen_pop_T0 uses a zero-extending load. */
6454 gen_op_jmp_v(cpu_T0
);
6458 case 0xc3: /* ret */
6460 gen_pop_update(s
, ot
);
6461 /* Note that gen_pop_T0 uses a zero-extending load. */
6462 gen_op_jmp_v(cpu_T0
);
6466 case 0xca: /* lret im */
6467 val
= x86_ldsw_code(env
, s
);
6469 if (s
->pe
&& !s
->vm86
) {
6470 gen_update_cc_op(s
);
6471 gen_jmp_im(pc_start
- s
->cs_base
);
6472 gen_helper_lret_protected(cpu_env
, tcg_const_i32(dflag
- 1),
6473 tcg_const_i32(val
));
6477 gen_op_ld_v(s
, dflag
, cpu_T0
, cpu_A0
);
6478 /* NOTE: keeping EIP updated is not a problem in case of
6480 gen_op_jmp_v(cpu_T0
);
6482 gen_add_A0_im(s
, 1 << dflag
);
6483 gen_op_ld_v(s
, dflag
, cpu_T0
, cpu_A0
);
6484 gen_op_movl_seg_T0_vm(R_CS
);
6485 /* add stack offset */
6486 gen_stack_update(s
, val
+ (2 << dflag
));
6490 case 0xcb: /* lret */
6493 case 0xcf: /* iret */
6494 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IRET
);
6497 gen_helper_iret_real(cpu_env
, tcg_const_i32(dflag
- 1));
6498 set_cc_op(s
, CC_OP_EFLAGS
);
6499 } else if (s
->vm86
) {
6501 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6503 gen_helper_iret_real(cpu_env
, tcg_const_i32(dflag
- 1));
6504 set_cc_op(s
, CC_OP_EFLAGS
);
6507 gen_helper_iret_protected(cpu_env
, tcg_const_i32(dflag
- 1),
6508 tcg_const_i32(s
->pc
- s
->cs_base
));
6509 set_cc_op(s
, CC_OP_EFLAGS
);
6513 case 0xe8: /* call im */
6515 if (dflag
!= MO_16
) {
6516 tval
= (int32_t)insn_get(env
, s
, MO_32
);
6518 tval
= (int16_t)insn_get(env
, s
, MO_16
);
6520 next_eip
= s
->pc
- s
->cs_base
;
6522 if (dflag
== MO_16
) {
6524 } else if (!CODE64(s
)) {
6527 tcg_gen_movi_tl(cpu_T0
, next_eip
);
6528 gen_push_v(s
, cpu_T0
);
6533 case 0x9a: /* lcall im */
6535 unsigned int selector
, offset
;
6540 offset
= insn_get(env
, s
, ot
);
6541 selector
= insn_get(env
, s
, MO_16
);
6543 tcg_gen_movi_tl(cpu_T0
, selector
);
6544 tcg_gen_movi_tl(cpu_T1
, offset
);
6547 case 0xe9: /* jmp im */
6548 if (dflag
!= MO_16
) {
6549 tval
= (int32_t)insn_get(env
, s
, MO_32
);
6551 tval
= (int16_t)insn_get(env
, s
, MO_16
);
6553 tval
+= s
->pc
- s
->cs_base
;
6554 if (dflag
== MO_16
) {
6556 } else if (!CODE64(s
)) {
6562 case 0xea: /* ljmp im */
6564 unsigned int selector
, offset
;
6569 offset
= insn_get(env
, s
, ot
);
6570 selector
= insn_get(env
, s
, MO_16
);
6572 tcg_gen_movi_tl(cpu_T0
, selector
);
6573 tcg_gen_movi_tl(cpu_T1
, offset
);
6576 case 0xeb: /* jmp Jb */
6577 tval
= (int8_t)insn_get(env
, s
, MO_8
);
6578 tval
+= s
->pc
- s
->cs_base
;
6579 if (dflag
== MO_16
) {
6584 case 0x70 ... 0x7f: /* jcc Jb */
6585 tval
= (int8_t)insn_get(env
, s
, MO_8
);
6587 case 0x180 ... 0x18f: /* jcc Jv */
6588 if (dflag
!= MO_16
) {
6589 tval
= (int32_t)insn_get(env
, s
, MO_32
);
6591 tval
= (int16_t)insn_get(env
, s
, MO_16
);
6594 next_eip
= s
->pc
- s
->cs_base
;
6596 if (dflag
== MO_16
) {
6600 gen_jcc(s
, b
, tval
, next_eip
);
6603 case 0x190 ... 0x19f: /* setcc Gv */
6604 modrm
= x86_ldub_code(env
, s
);
6605 gen_setcc1(s
, b
, cpu_T0
);
6606 gen_ldst_modrm(env
, s
, modrm
, MO_8
, OR_TMP0
, 1);
6608 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6609 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6613 modrm
= x86_ldub_code(env
, s
);
6614 reg
= ((modrm
>> 3) & 7) | rex_r
;
6615 gen_cmovcc1(env
, s
, ot
, b
, modrm
, reg
);
6618 /************************/
6620 case 0x9c: /* pushf */
6621 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PUSHF
);
6622 if (s
->vm86
&& s
->iopl
!= 3) {
6623 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6625 gen_update_cc_op(s
);
6626 gen_helper_read_eflags(cpu_T0
, cpu_env
);
6627 gen_push_v(s
, cpu_T0
);
6630 case 0x9d: /* popf */
6631 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_POPF
);
6632 if (s
->vm86
&& s
->iopl
!= 3) {
6633 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6637 if (dflag
!= MO_16
) {
6638 gen_helper_write_eflags(cpu_env
, cpu_T0
,
6639 tcg_const_i32((TF_MASK
| AC_MASK
|
6644 gen_helper_write_eflags(cpu_env
, cpu_T0
,
6645 tcg_const_i32((TF_MASK
| AC_MASK
|
6647 IF_MASK
| IOPL_MASK
)
6651 if (s
->cpl
<= s
->iopl
) {
6652 if (dflag
!= MO_16
) {
6653 gen_helper_write_eflags(cpu_env
, cpu_T0
,
6654 tcg_const_i32((TF_MASK
|
6660 gen_helper_write_eflags(cpu_env
, cpu_T0
,
6661 tcg_const_i32((TF_MASK
|
6669 if (dflag
!= MO_16
) {
6670 gen_helper_write_eflags(cpu_env
, cpu_T0
,
6671 tcg_const_i32((TF_MASK
| AC_MASK
|
6672 ID_MASK
| NT_MASK
)));
6674 gen_helper_write_eflags(cpu_env
, cpu_T0
,
6675 tcg_const_i32((TF_MASK
| AC_MASK
|
6681 gen_pop_update(s
, ot
);
6682 set_cc_op(s
, CC_OP_EFLAGS
);
6683 /* abort translation because TF/AC flag may change */
6684 gen_jmp_im(s
->pc
- s
->cs_base
);
6688 case 0x9e: /* sahf */
6689 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6691 gen_op_mov_v_reg(MO_8
, cpu_T0
, R_AH
);
6692 gen_compute_eflags(s
);
6693 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, CC_O
);
6694 tcg_gen_andi_tl(cpu_T0
, cpu_T0
, CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
6695 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_T0
);
6697 case 0x9f: /* lahf */
6698 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6700 gen_compute_eflags(s
);
6701 /* Note: gen_compute_eflags() only gives the condition codes */
6702 tcg_gen_ori_tl(cpu_T0
, cpu_cc_src
, 0x02);
6703 gen_op_mov_reg_v(MO_8
, R_AH
, cpu_T0
);
6705 case 0xf5: /* cmc */
6706 gen_compute_eflags(s
);
6707 tcg_gen_xori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6709 case 0xf8: /* clc */
6710 gen_compute_eflags(s
);
6711 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_C
);
6713 case 0xf9: /* stc */
6714 gen_compute_eflags(s
);
6715 tcg_gen_ori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6717 case 0xfc: /* cld */
6718 tcg_gen_movi_i32(cpu_tmp2_i32
, 1);
6719 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6721 case 0xfd: /* std */
6722 tcg_gen_movi_i32(cpu_tmp2_i32
, -1);
6723 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6726 /************************/
6727 /* bit operations */
6728 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6730 modrm
= x86_ldub_code(env
, s
);
6731 op
= (modrm
>> 3) & 7;
6732 mod
= (modrm
>> 6) & 3;
6733 rm
= (modrm
& 7) | REX_B(s
);
6736 gen_lea_modrm(env
, s
, modrm
);
6737 if (!(s
->prefix
& PREFIX_LOCK
)) {
6738 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
6741 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
6744 val
= x86_ldub_code(env
, s
);
6745 tcg_gen_movi_tl(cpu_T1
, val
);
6750 case 0x1a3: /* bt Gv, Ev */
6753 case 0x1ab: /* bts */
6756 case 0x1b3: /* btr */
6759 case 0x1bb: /* btc */
6763 modrm
= x86_ldub_code(env
, s
);
6764 reg
= ((modrm
>> 3) & 7) | rex_r
;
6765 mod
= (modrm
>> 6) & 3;
6766 rm
= (modrm
& 7) | REX_B(s
);
6767 gen_op_mov_v_reg(MO_32
, cpu_T1
, reg
);
6769 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
6770 /* specific case: we need to add a displacement */
6771 gen_exts(ot
, cpu_T1
);
6772 tcg_gen_sari_tl(cpu_tmp0
, cpu_T1
, 3 + ot
);
6773 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, ot
);
6774 tcg_gen_add_tl(cpu_A0
, gen_lea_modrm_1(a
), cpu_tmp0
);
6775 gen_lea_v_seg(s
, s
->aflag
, cpu_A0
, a
.def_seg
, s
->override
);
6776 if (!(s
->prefix
& PREFIX_LOCK
)) {
6777 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
6780 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
6783 tcg_gen_andi_tl(cpu_T1
, cpu_T1
, (1 << (3 + ot
)) - 1);
6784 tcg_gen_movi_tl(cpu_tmp0
, 1);
6785 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T1
);
6786 if (s
->prefix
& PREFIX_LOCK
) {
6789 /* Needs no atomic ops; we surpressed the normal
6790 memory load for LOCK above so do it now. */
6791 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
6794 tcg_gen_atomic_fetch_or_tl(cpu_T0
, cpu_A0
, cpu_tmp0
,
6795 s
->mem_index
, ot
| MO_LE
);
6798 tcg_gen_not_tl(cpu_tmp0
, cpu_tmp0
);
6799 tcg_gen_atomic_fetch_and_tl(cpu_T0
, cpu_A0
, cpu_tmp0
,
6800 s
->mem_index
, ot
| MO_LE
);
6804 tcg_gen_atomic_fetch_xor_tl(cpu_T0
, cpu_A0
, cpu_tmp0
,
6805 s
->mem_index
, ot
| MO_LE
);
6808 tcg_gen_shr_tl(cpu_tmp4
, cpu_T0
, cpu_T1
);
6810 tcg_gen_shr_tl(cpu_tmp4
, cpu_T0
, cpu_T1
);
6813 /* Data already loaded; nothing to do. */
6816 tcg_gen_or_tl(cpu_T0
, cpu_T0
, cpu_tmp0
);
6819 tcg_gen_andc_tl(cpu_T0
, cpu_T0
, cpu_tmp0
);
6823 tcg_gen_xor_tl(cpu_T0
, cpu_T0
, cpu_tmp0
);
6828 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
6830 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
6835 /* Delay all CC updates until after the store above. Note that
6836 C is the result of the test, Z is unchanged, and the others
6837 are all undefined. */
6839 case CC_OP_MULB
... CC_OP_MULQ
:
6840 case CC_OP_ADDB
... CC_OP_ADDQ
:
6841 case CC_OP_ADCB
... CC_OP_ADCQ
:
6842 case CC_OP_SUBB
... CC_OP_SUBQ
:
6843 case CC_OP_SBBB
... CC_OP_SBBQ
:
6844 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
6845 case CC_OP_INCB
... CC_OP_INCQ
:
6846 case CC_OP_DECB
... CC_OP_DECQ
:
6847 case CC_OP_SHLB
... CC_OP_SHLQ
:
6848 case CC_OP_SARB
... CC_OP_SARQ
:
6849 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
6850 /* Z was going to be computed from the non-zero status of CC_DST.
6851 We can get that same Z value (and the new C value) by leaving
6852 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
6854 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
6855 set_cc_op(s
, ((s
->cc_op
- CC_OP_MULB
) & 3) + CC_OP_SARB
);
6858 /* Otherwise, generate EFLAGS and replace the C bit. */
6859 gen_compute_eflags(s
);
6860 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, cpu_tmp4
,
6865 case 0x1bc: /* bsf / tzcnt */
6866 case 0x1bd: /* bsr / lzcnt */
6868 modrm
= x86_ldub_code(env
, s
);
6869 reg
= ((modrm
>> 3) & 7) | rex_r
;
6870 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
6871 gen_extu(ot
, cpu_T0
);
6873 /* Note that lzcnt and tzcnt are in different extensions. */
6874 if ((prefixes
& PREFIX_REPZ
)
6876 ? s
->cpuid_ext3_features
& CPUID_EXT3_ABM
6877 : s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)) {
6879 /* For lzcnt/tzcnt, C bit is defined related to the input. */
6880 tcg_gen_mov_tl(cpu_cc_src
, cpu_T0
);
6882 /* For lzcnt, reduce the target_ulong result by the
6883 number of zeros that we expect to find at the top. */
6884 tcg_gen_clzi_tl(cpu_T0
, cpu_T0
, TARGET_LONG_BITS
);
6885 tcg_gen_subi_tl(cpu_T0
, cpu_T0
, TARGET_LONG_BITS
- size
);
6887 /* For tzcnt, a zero input must return the operand size. */
6888 tcg_gen_ctzi_tl(cpu_T0
, cpu_T0
, size
);
6890 /* For lzcnt/tzcnt, Z bit is defined related to the result. */
6891 gen_op_update1_cc();
6892 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
6894 /* For bsr/bsf, only the Z bit is defined and it is related
6895 to the input and not the result. */
6896 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
6897 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
6899 /* ??? The manual says that the output is undefined when the
6900 input is zero, but real hardware leaves it unchanged, and
6901 real programs appear to depend on that. Accomplish this
6902 by passing the output as the value to return upon zero. */
6904 /* For bsr, return the bit index of the first 1 bit,
6905 not the count of leading zeros. */
6906 tcg_gen_xori_tl(cpu_T1
, cpu_regs
[reg
], TARGET_LONG_BITS
- 1);
6907 tcg_gen_clz_tl(cpu_T0
, cpu_T0
, cpu_T1
);
6908 tcg_gen_xori_tl(cpu_T0
, cpu_T0
, TARGET_LONG_BITS
- 1);
6910 tcg_gen_ctz_tl(cpu_T0
, cpu_T0
, cpu_regs
[reg
]);
6913 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
6915 /************************/
6917 case 0x27: /* daa */
6920 gen_update_cc_op(s
);
6921 gen_helper_daa(cpu_env
);
6922 set_cc_op(s
, CC_OP_EFLAGS
);
6924 case 0x2f: /* das */
6927 gen_update_cc_op(s
);
6928 gen_helper_das(cpu_env
);
6929 set_cc_op(s
, CC_OP_EFLAGS
);
6931 case 0x37: /* aaa */
6934 gen_update_cc_op(s
);
6935 gen_helper_aaa(cpu_env
);
6936 set_cc_op(s
, CC_OP_EFLAGS
);
6938 case 0x3f: /* aas */
6941 gen_update_cc_op(s
);
6942 gen_helper_aas(cpu_env
);
6943 set_cc_op(s
, CC_OP_EFLAGS
);
6945 case 0xd4: /* aam */
6948 val
= x86_ldub_code(env
, s
);
6950 gen_exception(s
, EXCP00_DIVZ
, pc_start
- s
->cs_base
);
6952 gen_helper_aam(cpu_env
, tcg_const_i32(val
));
6953 set_cc_op(s
, CC_OP_LOGICB
);
6956 case 0xd5: /* aad */
6959 val
= x86_ldub_code(env
, s
);
6960 gen_helper_aad(cpu_env
, tcg_const_i32(val
));
6961 set_cc_op(s
, CC_OP_LOGICB
);
6963 /************************/
6965 case 0x90: /* nop */
6966 /* XXX: correct lock test for all insn */
6967 if (prefixes
& PREFIX_LOCK
) {
6970 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6972 goto do_xchg_reg_eax
;
6974 if (prefixes
& PREFIX_REPZ
) {
6975 gen_update_cc_op(s
);
6976 gen_jmp_im(pc_start
- s
->cs_base
);
6977 gen_helper_pause(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
6978 s
->base
.is_jmp
= DISAS_NORETURN
;
6981 case 0x9b: /* fwait */
6982 if ((s
->flags
& (HF_MP_MASK
| HF_TS_MASK
)) ==
6983 (HF_MP_MASK
| HF_TS_MASK
)) {
6984 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
6986 gen_helper_fwait(cpu_env
);
6989 case 0xcc: /* int3 */
6990 gen_interrupt(s
, EXCP03_INT3
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6992 case 0xcd: /* int N */
6993 val
= x86_ldub_code(env
, s
);
6994 if (s
->vm86
&& s
->iopl
!= 3) {
6995 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6997 gen_interrupt(s
, val
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
7000 case 0xce: /* into */
7003 gen_update_cc_op(s
);
7004 gen_jmp_im(pc_start
- s
->cs_base
);
7005 gen_helper_into(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7008 case 0xf1: /* icebp (undocumented, exits to external debugger) */
7009 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_ICEBP
);
7011 gen_debug(s
, pc_start
- s
->cs_base
);
7014 tb_flush(CPU(x86_env_get_cpu(env
)));
7015 qemu_set_log(CPU_LOG_INT
| CPU_LOG_TB_IN_ASM
);
7019 case 0xfa: /* cli */
7021 if (s
->cpl
<= s
->iopl
) {
7022 gen_helper_cli(cpu_env
);
7024 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7028 gen_helper_cli(cpu_env
);
7030 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7034 case 0xfb: /* sti */
7035 if (s
->vm86
? s
->iopl
== 3 : s
->cpl
<= s
->iopl
) {
7036 gen_helper_sti(cpu_env
);
7037 /* interruptions are enabled only the first insn after sti */
7038 gen_jmp_im(s
->pc
- s
->cs_base
);
7039 gen_eob_inhibit_irq(s
, true);
7041 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7044 case 0x62: /* bound */
7048 modrm
= x86_ldub_code(env
, s
);
7049 reg
= (modrm
>> 3) & 7;
7050 mod
= (modrm
>> 6) & 3;
7053 gen_op_mov_v_reg(ot
, cpu_T0
, reg
);
7054 gen_lea_modrm(env
, s
, modrm
);
7055 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
7057 gen_helper_boundw(cpu_env
, cpu_A0
, cpu_tmp2_i32
);
7059 gen_helper_boundl(cpu_env
, cpu_A0
, cpu_tmp2_i32
);
7062 case 0x1c8 ... 0x1cf: /* bswap reg */
7063 reg
= (b
& 7) | REX_B(s
);
7064 #ifdef TARGET_X86_64
7065 if (dflag
== MO_64
) {
7066 gen_op_mov_v_reg(MO_64
, cpu_T0
, reg
);
7067 tcg_gen_bswap64_i64(cpu_T0
, cpu_T0
);
7068 gen_op_mov_reg_v(MO_64
, reg
, cpu_T0
);
7072 gen_op_mov_v_reg(MO_32
, cpu_T0
, reg
);
7073 tcg_gen_ext32u_tl(cpu_T0
, cpu_T0
);
7074 tcg_gen_bswap32_tl(cpu_T0
, cpu_T0
);
7075 gen_op_mov_reg_v(MO_32
, reg
, cpu_T0
);
7078 case 0xd6: /* salc */
7081 gen_compute_eflags_c(s
, cpu_T0
);
7082 tcg_gen_neg_tl(cpu_T0
, cpu_T0
);
7083 gen_op_mov_reg_v(MO_8
, R_EAX
, cpu_T0
);
7085 case 0xe0: /* loopnz */
7086 case 0xe1: /* loopz */
7087 case 0xe2: /* loop */
7088 case 0xe3: /* jecxz */
7090 TCGLabel
*l1
, *l2
, *l3
;
7092 tval
= (int8_t)insn_get(env
, s
, MO_8
);
7093 next_eip
= s
->pc
- s
->cs_base
;
7095 if (dflag
== MO_16
) {
7099 l1
= gen_new_label();
7100 l2
= gen_new_label();
7101 l3
= gen_new_label();
7104 case 0: /* loopnz */
7106 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
7107 gen_op_jz_ecx(s
->aflag
, l3
);
7108 gen_jcc1(s
, (JCC_Z
<< 1) | (b
^ 1), l1
);
7111 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
7112 gen_op_jnz_ecx(s
->aflag
, l1
);
7116 gen_op_jz_ecx(s
->aflag
, l1
);
7121 gen_jmp_im(next_eip
);
7130 case 0x130: /* wrmsr */
7131 case 0x132: /* rdmsr */
7133 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7135 gen_update_cc_op(s
);
7136 gen_jmp_im(pc_start
- s
->cs_base
);
7138 gen_helper_rdmsr(cpu_env
);
7140 gen_helper_wrmsr(cpu_env
);
7144 case 0x131: /* rdtsc */
7145 gen_update_cc_op(s
);
7146 gen_jmp_im(pc_start
- s
->cs_base
);
7147 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
7150 gen_helper_rdtsc(cpu_env
);
7151 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
7153 gen_jmp(s
, s
->pc
- s
->cs_base
);
7156 case 0x133: /* rdpmc */
7157 gen_update_cc_op(s
);
7158 gen_jmp_im(pc_start
- s
->cs_base
);
7159 gen_helper_rdpmc(cpu_env
);
7161 case 0x134: /* sysenter */
7162 /* For Intel SYSENTER is valid on 64-bit */
7163 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
7166 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7168 gen_helper_sysenter(cpu_env
);
7172 case 0x135: /* sysexit */
7173 /* For Intel SYSEXIT is valid on 64-bit */
7174 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
7177 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7179 gen_helper_sysexit(cpu_env
, tcg_const_i32(dflag
- 1));
7183 #ifdef TARGET_X86_64
7184 case 0x105: /* syscall */
7185 /* XXX: is it usable in real mode ? */
7186 gen_update_cc_op(s
);
7187 gen_jmp_im(pc_start
- s
->cs_base
);
7188 gen_helper_syscall(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7189 /* TF handling for the syscall insn is different. The TF bit is checked
7190 after the syscall insn completes. This allows #DB to not be
7191 generated after one has entered CPL0 if TF is set in FMASK. */
7192 gen_eob_worker(s
, false, true);
7194 case 0x107: /* sysret */
7196 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7198 gen_helper_sysret(cpu_env
, tcg_const_i32(dflag
- 1));
7199 /* condition codes are modified only in long mode */
7201 set_cc_op(s
, CC_OP_EFLAGS
);
7203 /* TF handling for the sysret insn is different. The TF bit is
7204 checked after the sysret insn completes. This allows #DB to be
7205 generated "as if" the syscall insn in userspace has just
7207 gen_eob_worker(s
, false, true);
7211 case 0x1a2: /* cpuid */
7212 gen_update_cc_op(s
);
7213 gen_jmp_im(pc_start
- s
->cs_base
);
7214 gen_helper_cpuid(cpu_env
);
7216 case 0xf4: /* hlt */
7218 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7220 gen_update_cc_op(s
);
7221 gen_jmp_im(pc_start
- s
->cs_base
);
7222 gen_helper_hlt(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7223 s
->base
.is_jmp
= DISAS_NORETURN
;
7227 modrm
= x86_ldub_code(env
, s
);
7228 mod
= (modrm
>> 6) & 3;
7229 op
= (modrm
>> 3) & 7;
7232 if (!s
->pe
|| s
->vm86
)
7234 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_READ
);
7235 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
,
7236 offsetof(CPUX86State
, ldt
.selector
));
7237 ot
= mod
== 3 ? dflag
: MO_16
;
7238 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7241 if (!s
->pe
|| s
->vm86
)
7244 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7246 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_WRITE
);
7247 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7248 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
7249 gen_helper_lldt(cpu_env
, cpu_tmp2_i32
);
7253 if (!s
->pe
|| s
->vm86
)
7255 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_READ
);
7256 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
,
7257 offsetof(CPUX86State
, tr
.selector
));
7258 ot
= mod
== 3 ? dflag
: MO_16
;
7259 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7262 if (!s
->pe
|| s
->vm86
)
7265 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7267 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_WRITE
);
7268 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7269 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
7270 gen_helper_ltr(cpu_env
, cpu_tmp2_i32
);
7275 if (!s
->pe
|| s
->vm86
)
7277 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7278 gen_update_cc_op(s
);
7280 gen_helper_verr(cpu_env
, cpu_T0
);
7282 gen_helper_verw(cpu_env
, cpu_T0
);
7284 set_cc_op(s
, CC_OP_EFLAGS
);
7292 modrm
= x86_ldub_code(env
, s
);
7294 CASE_MODRM_MEM_OP(0): /* sgdt */
7295 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_GDTR_READ
);
7296 gen_lea_modrm(env
, s
, modrm
);
7297 tcg_gen_ld32u_tl(cpu_T0
,
7298 cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
7299 gen_op_st_v(s
, MO_16
, cpu_T0
, cpu_A0
);
7300 gen_add_A0_im(s
, 2);
7301 tcg_gen_ld_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, gdt
.base
));
7302 if (dflag
== MO_16
) {
7303 tcg_gen_andi_tl(cpu_T0
, cpu_T0
, 0xffffff);
7305 gen_op_st_v(s
, CODE64(s
) + MO_32
, cpu_T0
, cpu_A0
);
7308 case 0xc8: /* monitor */
7309 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || s
->cpl
!= 0) {
7312 gen_update_cc_op(s
);
7313 gen_jmp_im(pc_start
- s
->cs_base
);
7314 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[R_EAX
]);
7315 gen_extu(s
->aflag
, cpu_A0
);
7316 gen_add_A0_ds_seg(s
);
7317 gen_helper_monitor(cpu_env
, cpu_A0
);
7320 case 0xc9: /* mwait */
7321 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || s
->cpl
!= 0) {
7324 gen_update_cc_op(s
);
7325 gen_jmp_im(pc_start
- s
->cs_base
);
7326 gen_helper_mwait(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7330 case 0xca: /* clac */
7331 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
7335 gen_helper_clac(cpu_env
);
7336 gen_jmp_im(s
->pc
- s
->cs_base
);
7340 case 0xcb: /* stac */
7341 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
7345 gen_helper_stac(cpu_env
);
7346 gen_jmp_im(s
->pc
- s
->cs_base
);
7350 CASE_MODRM_MEM_OP(1): /* sidt */
7351 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IDTR_READ
);
7352 gen_lea_modrm(env
, s
, modrm
);
7353 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, idt
.limit
));
7354 gen_op_st_v(s
, MO_16
, cpu_T0
, cpu_A0
);
7355 gen_add_A0_im(s
, 2);
7356 tcg_gen_ld_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, idt
.base
));
7357 if (dflag
== MO_16
) {
7358 tcg_gen_andi_tl(cpu_T0
, cpu_T0
, 0xffffff);
7360 gen_op_st_v(s
, CODE64(s
) + MO_32
, cpu_T0
, cpu_A0
);
7363 case 0xd0: /* xgetbv */
7364 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
7365 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
7366 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
7369 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[R_ECX
]);
7370 gen_helper_xgetbv(cpu_tmp1_i64
, cpu_env
, cpu_tmp2_i32
);
7371 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], cpu_tmp1_i64
);
7374 case 0xd1: /* xsetbv */
7375 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
7376 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
7377 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
7381 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7384 tcg_gen_concat_tl_i64(cpu_tmp1_i64
, cpu_regs
[R_EAX
],
7386 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[R_ECX
]);
7387 gen_helper_xsetbv(cpu_env
, cpu_tmp2_i32
, cpu_tmp1_i64
);
7388 /* End TB because translation flags may change. */
7389 gen_jmp_im(s
->pc
- s
->cs_base
);
7393 case 0xd8: /* VMRUN */
7394 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
) {
7398 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7401 gen_update_cc_op(s
);
7402 gen_jmp_im(pc_start
- s
->cs_base
);
7403 gen_helper_vmrun(cpu_env
, tcg_const_i32(s
->aflag
- 1),
7404 tcg_const_i32(s
->pc
- pc_start
));
7406 s
->base
.is_jmp
= DISAS_NORETURN
;
7409 case 0xd9: /* VMMCALL */
7410 if (!(s
->flags
& HF_SVME_MASK
)) {
7413 gen_update_cc_op(s
);
7414 gen_jmp_im(pc_start
- s
->cs_base
);
7415 gen_helper_vmmcall(cpu_env
);
7418 case 0xda: /* VMLOAD */
7419 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
) {
7423 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7426 gen_update_cc_op(s
);
7427 gen_jmp_im(pc_start
- s
->cs_base
);
7428 gen_helper_vmload(cpu_env
, tcg_const_i32(s
->aflag
- 1));
7431 case 0xdb: /* VMSAVE */
7432 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
) {
7436 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7439 gen_update_cc_op(s
);
7440 gen_jmp_im(pc_start
- s
->cs_base
);
7441 gen_helper_vmsave(cpu_env
, tcg_const_i32(s
->aflag
- 1));
7444 case 0xdc: /* STGI */
7445 if ((!(s
->flags
& HF_SVME_MASK
)
7446 && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
7451 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7454 gen_update_cc_op(s
);
7455 gen_jmp_im(pc_start
- s
->cs_base
);
7456 gen_helper_stgi(cpu_env
);
7459 case 0xdd: /* CLGI */
7460 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
) {
7464 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7467 gen_update_cc_op(s
);
7468 gen_jmp_im(pc_start
- s
->cs_base
);
7469 gen_helper_clgi(cpu_env
);
7472 case 0xde: /* SKINIT */
7473 if ((!(s
->flags
& HF_SVME_MASK
)
7474 && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
7478 gen_update_cc_op(s
);
7479 gen_jmp_im(pc_start
- s
->cs_base
);
7480 gen_helper_skinit(cpu_env
);
7483 case 0xdf: /* INVLPGA */
7484 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
) {
7488 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7491 gen_update_cc_op(s
);
7492 gen_jmp_im(pc_start
- s
->cs_base
);
7493 gen_helper_invlpga(cpu_env
, tcg_const_i32(s
->aflag
- 1));
7496 CASE_MODRM_MEM_OP(2): /* lgdt */
7498 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7501 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_GDTR_WRITE
);
7502 gen_lea_modrm(env
, s
, modrm
);
7503 gen_op_ld_v(s
, MO_16
, cpu_T1
, cpu_A0
);
7504 gen_add_A0_im(s
, 2);
7505 gen_op_ld_v(s
, CODE64(s
) + MO_32
, cpu_T0
, cpu_A0
);
7506 if (dflag
== MO_16
) {
7507 tcg_gen_andi_tl(cpu_T0
, cpu_T0
, 0xffffff);
7509 tcg_gen_st_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, gdt
.base
));
7510 tcg_gen_st32_tl(cpu_T1
, cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
7513 CASE_MODRM_MEM_OP(3): /* lidt */
7515 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7518 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IDTR_WRITE
);
7519 gen_lea_modrm(env
, s
, modrm
);
7520 gen_op_ld_v(s
, MO_16
, cpu_T1
, cpu_A0
);
7521 gen_add_A0_im(s
, 2);
7522 gen_op_ld_v(s
, CODE64(s
) + MO_32
, cpu_T0
, cpu_A0
);
7523 if (dflag
== MO_16
) {
7524 tcg_gen_andi_tl(cpu_T0
, cpu_T0
, 0xffffff);
7526 tcg_gen_st_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, idt
.base
));
7527 tcg_gen_st32_tl(cpu_T1
, cpu_env
, offsetof(CPUX86State
, idt
.limit
));
7530 CASE_MODRM_OP(4): /* smsw */
7531 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_CR0
);
7532 tcg_gen_ld_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, cr
[0]));
7534 mod
= (modrm
>> 6) & 3;
7535 ot
= (mod
!= 3 ? MO_16
: s
->dflag
);
7539 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7541 case 0xee: /* rdpkru */
7542 if (prefixes
& PREFIX_LOCK
) {
7545 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[R_ECX
]);
7546 gen_helper_rdpkru(cpu_tmp1_i64
, cpu_env
, cpu_tmp2_i32
);
7547 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], cpu_tmp1_i64
);
7549 case 0xef: /* wrpkru */
7550 if (prefixes
& PREFIX_LOCK
) {
7553 tcg_gen_concat_tl_i64(cpu_tmp1_i64
, cpu_regs
[R_EAX
],
7555 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[R_ECX
]);
7556 gen_helper_wrpkru(cpu_env
, cpu_tmp2_i32
, cpu_tmp1_i64
);
7558 CASE_MODRM_OP(6): /* lmsw */
7560 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7563 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7564 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7565 gen_helper_lmsw(cpu_env
, cpu_T0
);
7566 gen_jmp_im(s
->pc
- s
->cs_base
);
7570 CASE_MODRM_MEM_OP(7): /* invlpg */
7572 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7575 gen_update_cc_op(s
);
7576 gen_jmp_im(pc_start
- s
->cs_base
);
7577 gen_lea_modrm(env
, s
, modrm
);
7578 gen_helper_invlpg(cpu_env
, cpu_A0
);
7579 gen_jmp_im(s
->pc
- s
->cs_base
);
7583 case 0xf8: /* swapgs */
7584 #ifdef TARGET_X86_64
7587 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7589 tcg_gen_mov_tl(cpu_T0
, cpu_seg_base
[R_GS
]);
7590 tcg_gen_ld_tl(cpu_seg_base
[R_GS
], cpu_env
,
7591 offsetof(CPUX86State
, kernelgsbase
));
7592 tcg_gen_st_tl(cpu_T0
, cpu_env
,
7593 offsetof(CPUX86State
, kernelgsbase
));
7600 case 0xf9: /* rdtscp */
7601 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_RDTSCP
)) {
7604 gen_update_cc_op(s
);
7605 gen_jmp_im(pc_start
- s
->cs_base
);
7606 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
7609 gen_helper_rdtscp(cpu_env
);
7610 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
7612 gen_jmp(s
, s
->pc
- s
->cs_base
);
7621 case 0x108: /* invd */
7622 case 0x109: /* wbinvd */
7624 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7626 gen_svm_check_intercept(s
, pc_start
, (b
& 2) ? SVM_EXIT_INVD
: SVM_EXIT_WBINVD
);
7630 case 0x63: /* arpl or movslS (x86_64) */
7631 #ifdef TARGET_X86_64
7634 /* d_ot is the size of destination */
7637 modrm
= x86_ldub_code(env
, s
);
7638 reg
= ((modrm
>> 3) & 7) | rex_r
;
7639 mod
= (modrm
>> 6) & 3;
7640 rm
= (modrm
& 7) | REX_B(s
);
7643 gen_op_mov_v_reg(MO_32
, cpu_T0
, rm
);
7645 if (d_ot
== MO_64
) {
7646 tcg_gen_ext32s_tl(cpu_T0
, cpu_T0
);
7648 gen_op_mov_reg_v(d_ot
, reg
, cpu_T0
);
7650 gen_lea_modrm(env
, s
, modrm
);
7651 gen_op_ld_v(s
, MO_32
| MO_SIGN
, cpu_T0
, cpu_A0
);
7652 gen_op_mov_reg_v(d_ot
, reg
, cpu_T0
);
7658 TCGv t0
, t1
, t2
, a0
;
7660 if (!s
->pe
|| s
->vm86
)
7662 t0
= tcg_temp_local_new();
7663 t1
= tcg_temp_local_new();
7664 t2
= tcg_temp_local_new();
7666 modrm
= x86_ldub_code(env
, s
);
7667 reg
= (modrm
>> 3) & 7;
7668 mod
= (modrm
>> 6) & 3;
7671 gen_lea_modrm(env
, s
, modrm
);
7672 gen_op_ld_v(s
, ot
, t0
, cpu_A0
);
7673 a0
= tcg_temp_local_new();
7674 tcg_gen_mov_tl(a0
, cpu_A0
);
7676 gen_op_mov_v_reg(ot
, t0
, rm
);
7679 gen_op_mov_v_reg(ot
, t1
, reg
);
7680 tcg_gen_andi_tl(cpu_tmp0
, t0
, 3);
7681 tcg_gen_andi_tl(t1
, t1
, 3);
7682 tcg_gen_movi_tl(t2
, 0);
7683 label1
= gen_new_label();
7684 tcg_gen_brcond_tl(TCG_COND_GE
, cpu_tmp0
, t1
, label1
);
7685 tcg_gen_andi_tl(t0
, t0
, ~3);
7686 tcg_gen_or_tl(t0
, t0
, t1
);
7687 tcg_gen_movi_tl(t2
, CC_Z
);
7688 gen_set_label(label1
);
7690 gen_op_st_v(s
, ot
, t0
, a0
);
7693 gen_op_mov_reg_v(ot
, rm
, t0
);
7695 gen_compute_eflags(s
);
7696 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_Z
);
7697 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t2
);
7703 case 0x102: /* lar */
7704 case 0x103: /* lsl */
7708 if (!s
->pe
|| s
->vm86
)
7710 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
7711 modrm
= x86_ldub_code(env
, s
);
7712 reg
= ((modrm
>> 3) & 7) | rex_r
;
7713 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7714 t0
= tcg_temp_local_new();
7715 gen_update_cc_op(s
);
7717 gen_helper_lar(t0
, cpu_env
, cpu_T0
);
7719 gen_helper_lsl(t0
, cpu_env
, cpu_T0
);
7721 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_src
, CC_Z
);
7722 label1
= gen_new_label();
7723 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
7724 gen_op_mov_reg_v(ot
, reg
, t0
);
7725 gen_set_label(label1
);
7726 set_cc_op(s
, CC_OP_EFLAGS
);
7731 modrm
= x86_ldub_code(env
, s
);
7732 mod
= (modrm
>> 6) & 3;
7733 op
= (modrm
>> 3) & 7;
7735 case 0: /* prefetchnta */
7736 case 1: /* prefetchnt0 */
7737 case 2: /* prefetchnt0 */
7738 case 3: /* prefetchnt0 */
7741 gen_nop_modrm(env
, s
, modrm
);
7742 /* nothing more to do */
7744 default: /* nop (multi byte) */
7745 gen_nop_modrm(env
, s
, modrm
);
7750 modrm
= x86_ldub_code(env
, s
);
7751 if (s
->flags
& HF_MPX_EN_MASK
) {
7752 mod
= (modrm
>> 6) & 3;
7753 reg
= ((modrm
>> 3) & 7) | rex_r
;
7754 if (prefixes
& PREFIX_REPZ
) {
7757 || (prefixes
& PREFIX_LOCK
)
7758 || s
->aflag
== MO_16
) {
7761 gen_bndck(env
, s
, modrm
, TCG_COND_LTU
, cpu_bndl
[reg
]);
7762 } else if (prefixes
& PREFIX_REPNZ
) {
7765 || (prefixes
& PREFIX_LOCK
)
7766 || s
->aflag
== MO_16
) {
7769 TCGv_i64 notu
= tcg_temp_new_i64();
7770 tcg_gen_not_i64(notu
, cpu_bndu
[reg
]);
7771 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, notu
);
7772 tcg_temp_free_i64(notu
);
7773 } else if (prefixes
& PREFIX_DATA
) {
7774 /* bndmov -- from reg/mem */
7775 if (reg
>= 4 || s
->aflag
== MO_16
) {
7779 int reg2
= (modrm
& 7) | REX_B(s
);
7780 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
7783 if (s
->flags
& HF_MPX_IU_MASK
) {
7784 tcg_gen_mov_i64(cpu_bndl
[reg
], cpu_bndl
[reg2
]);
7785 tcg_gen_mov_i64(cpu_bndu
[reg
], cpu_bndu
[reg2
]);
7788 gen_lea_modrm(env
, s
, modrm
);
7790 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], cpu_A0
,
7791 s
->mem_index
, MO_LEQ
);
7792 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, 8);
7793 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], cpu_A0
,
7794 s
->mem_index
, MO_LEQ
);
7796 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], cpu_A0
,
7797 s
->mem_index
, MO_LEUL
);
7798 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, 4);
7799 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], cpu_A0
,
7800 s
->mem_index
, MO_LEUL
);
7802 /* bnd registers are now in-use */
7803 gen_set_hflag(s
, HF_MPX_IU_MASK
);
7805 } else if (mod
!= 3) {
7807 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
7809 || (prefixes
& PREFIX_LOCK
)
7810 || s
->aflag
== MO_16
7815 tcg_gen_addi_tl(cpu_A0
, cpu_regs
[a
.base
], a
.disp
);
7817 tcg_gen_movi_tl(cpu_A0
, 0);
7819 gen_lea_v_seg(s
, s
->aflag
, cpu_A0
, a
.def_seg
, s
->override
);
7821 tcg_gen_mov_tl(cpu_T0
, cpu_regs
[a
.index
]);
7823 tcg_gen_movi_tl(cpu_T0
, 0);
7826 gen_helper_bndldx64(cpu_bndl
[reg
], cpu_env
, cpu_A0
, cpu_T0
);
7827 tcg_gen_ld_i64(cpu_bndu
[reg
], cpu_env
,
7828 offsetof(CPUX86State
, mmx_t0
.MMX_Q(0)));
7830 gen_helper_bndldx32(cpu_bndu
[reg
], cpu_env
, cpu_A0
, cpu_T0
);
7831 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndu
[reg
]);
7832 tcg_gen_shri_i64(cpu_bndu
[reg
], cpu_bndu
[reg
], 32);
7834 gen_set_hflag(s
, HF_MPX_IU_MASK
);
7837 gen_nop_modrm(env
, s
, modrm
);
7840 modrm
= x86_ldub_code(env
, s
);
7841 if (s
->flags
& HF_MPX_EN_MASK
) {
7842 mod
= (modrm
>> 6) & 3;
7843 reg
= ((modrm
>> 3) & 7) | rex_r
;
7844 if (mod
!= 3 && (prefixes
& PREFIX_REPZ
)) {
7847 || (prefixes
& PREFIX_LOCK
)
7848 || s
->aflag
== MO_16
) {
7851 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
7853 tcg_gen_extu_tl_i64(cpu_bndl
[reg
], cpu_regs
[a
.base
]);
7855 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndl
[reg
]);
7857 } else if (a
.base
== -1) {
7858 /* no base register has lower bound of 0 */
7859 tcg_gen_movi_i64(cpu_bndl
[reg
], 0);
7861 /* rip-relative generates #ud */
7864 tcg_gen_not_tl(cpu_A0
, gen_lea_modrm_1(a
));
7866 tcg_gen_ext32u_tl(cpu_A0
, cpu_A0
);
7868 tcg_gen_extu_tl_i64(cpu_bndu
[reg
], cpu_A0
);
7869 /* bnd registers are now in-use */
7870 gen_set_hflag(s
, HF_MPX_IU_MASK
);
7872 } else if (prefixes
& PREFIX_REPNZ
) {
7875 || (prefixes
& PREFIX_LOCK
)
7876 || s
->aflag
== MO_16
) {
7879 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, cpu_bndu
[reg
]);
7880 } else if (prefixes
& PREFIX_DATA
) {
7881 /* bndmov -- to reg/mem */
7882 if (reg
>= 4 || s
->aflag
== MO_16
) {
7886 int reg2
= (modrm
& 7) | REX_B(s
);
7887 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
7890 if (s
->flags
& HF_MPX_IU_MASK
) {
7891 tcg_gen_mov_i64(cpu_bndl
[reg2
], cpu_bndl
[reg
]);
7892 tcg_gen_mov_i64(cpu_bndu
[reg2
], cpu_bndu
[reg
]);
7895 gen_lea_modrm(env
, s
, modrm
);
7897 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], cpu_A0
,
7898 s
->mem_index
, MO_LEQ
);
7899 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, 8);
7900 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], cpu_A0
,
7901 s
->mem_index
, MO_LEQ
);
7903 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], cpu_A0
,
7904 s
->mem_index
, MO_LEUL
);
7905 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, 4);
7906 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], cpu_A0
,
7907 s
->mem_index
, MO_LEUL
);
7910 } else if (mod
!= 3) {
7912 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
7914 || (prefixes
& PREFIX_LOCK
)
7915 || s
->aflag
== MO_16
7920 tcg_gen_addi_tl(cpu_A0
, cpu_regs
[a
.base
], a
.disp
);
7922 tcg_gen_movi_tl(cpu_A0
, 0);
7924 gen_lea_v_seg(s
, s
->aflag
, cpu_A0
, a
.def_seg
, s
->override
);
7926 tcg_gen_mov_tl(cpu_T0
, cpu_regs
[a
.index
]);
7928 tcg_gen_movi_tl(cpu_T0
, 0);
7931 gen_helper_bndstx64(cpu_env
, cpu_A0
, cpu_T0
,
7932 cpu_bndl
[reg
], cpu_bndu
[reg
]);
7934 gen_helper_bndstx32(cpu_env
, cpu_A0
, cpu_T0
,
7935 cpu_bndl
[reg
], cpu_bndu
[reg
]);
7939 gen_nop_modrm(env
, s
, modrm
);
7941 case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
7942 modrm
= x86_ldub_code(env
, s
);
7943 gen_nop_modrm(env
, s
, modrm
);
7945 case 0x120: /* mov reg, crN */
7946 case 0x122: /* mov crN, reg */
7948 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7950 modrm
= x86_ldub_code(env
, s
);
7951 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7952 * AMD documentation (24594.pdf) and testing of
7953 * intel 386 and 486 processors all show that the mod bits
7954 * are assumed to be 1's, regardless of actual values.
7956 rm
= (modrm
& 7) | REX_B(s
);
7957 reg
= ((modrm
>> 3) & 7) | rex_r
;
7962 if ((prefixes
& PREFIX_LOCK
) && (reg
== 0) &&
7963 (s
->cpuid_ext3_features
& CPUID_EXT3_CR8LEG
)) {
7972 gen_update_cc_op(s
);
7973 gen_jmp_im(pc_start
- s
->cs_base
);
7975 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
7978 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
7979 gen_helper_write_crN(cpu_env
, tcg_const_i32(reg
),
7981 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
7984 gen_jmp_im(s
->pc
- s
->cs_base
);
7987 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
7990 gen_helper_read_crN(cpu_T0
, cpu_env
, tcg_const_i32(reg
));
7991 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
7992 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
8002 case 0x121: /* mov reg, drN */
8003 case 0x123: /* mov drN, reg */
8005 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
8007 modrm
= x86_ldub_code(env
, s
);
8008 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
8009 * AMD documentation (24594.pdf) and testing of
8010 * intel 386 and 486 processors all show that the mod bits
8011 * are assumed to be 1's, regardless of actual values.
8013 rm
= (modrm
& 7) | REX_B(s
);
8014 reg
= ((modrm
>> 3) & 7) | rex_r
;
8023 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_DR0
+ reg
);
8024 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
8025 tcg_gen_movi_i32(cpu_tmp2_i32
, reg
);
8026 gen_helper_set_dr(cpu_env
, cpu_tmp2_i32
, cpu_T0
);
8027 gen_jmp_im(s
->pc
- s
->cs_base
);
8030 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_DR0
+ reg
);
8031 tcg_gen_movi_i32(cpu_tmp2_i32
, reg
);
8032 gen_helper_get_dr(cpu_T0
, cpu_env
, cpu_tmp2_i32
);
8033 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
8037 case 0x106: /* clts */
8039 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
8041 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
8042 gen_helper_clts(cpu_env
);
8043 /* abort block because static cpu state changed */
8044 gen_jmp_im(s
->pc
- s
->cs_base
);
8048 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
8049 case 0x1c3: /* MOVNTI reg, mem */
8050 if (!(s
->cpuid_features
& CPUID_SSE2
))
8052 ot
= mo_64_32(dflag
);
8053 modrm
= x86_ldub_code(env
, s
);
8054 mod
= (modrm
>> 6) & 3;
8057 reg
= ((modrm
>> 3) & 7) | rex_r
;
8058 /* generate a generic store */
8059 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
8062 modrm
= x86_ldub_code(env
, s
);
8064 CASE_MODRM_MEM_OP(0): /* fxsave */
8065 if (!(s
->cpuid_features
& CPUID_FXSR
)
8066 || (prefixes
& PREFIX_LOCK
)) {
8069 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
8070 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
8073 gen_lea_modrm(env
, s
, modrm
);
8074 gen_helper_fxsave(cpu_env
, cpu_A0
);
8077 CASE_MODRM_MEM_OP(1): /* fxrstor */
8078 if (!(s
->cpuid_features
& CPUID_FXSR
)
8079 || (prefixes
& PREFIX_LOCK
)) {
8082 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
8083 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
8086 gen_lea_modrm(env
, s
, modrm
);
8087 gen_helper_fxrstor(cpu_env
, cpu_A0
);
8090 CASE_MODRM_MEM_OP(2): /* ldmxcsr */
8091 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
)) {
8094 if (s
->flags
& HF_TS_MASK
) {
8095 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
8098 gen_lea_modrm(env
, s
, modrm
);
8099 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
, s
->mem_index
, MO_LEUL
);
8100 gen_helper_ldmxcsr(cpu_env
, cpu_tmp2_i32
);
8103 CASE_MODRM_MEM_OP(3): /* stmxcsr */
8104 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
)) {
8107 if (s
->flags
& HF_TS_MASK
) {
8108 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
8111 gen_lea_modrm(env
, s
, modrm
);
8112 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, mxcsr
));
8113 gen_op_st_v(s
, MO_32
, cpu_T0
, cpu_A0
);
8116 CASE_MODRM_MEM_OP(4): /* xsave */
8117 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
8118 || (prefixes
& (PREFIX_LOCK
| PREFIX_DATA
8119 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
8122 gen_lea_modrm(env
, s
, modrm
);
8123 tcg_gen_concat_tl_i64(cpu_tmp1_i64
, cpu_regs
[R_EAX
],
8125 gen_helper_xsave(cpu_env
, cpu_A0
, cpu_tmp1_i64
);
8128 CASE_MODRM_MEM_OP(5): /* xrstor */
8129 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
8130 || (prefixes
& (PREFIX_LOCK
| PREFIX_DATA
8131 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
8134 gen_lea_modrm(env
, s
, modrm
);
8135 tcg_gen_concat_tl_i64(cpu_tmp1_i64
, cpu_regs
[R_EAX
],
8137 gen_helper_xrstor(cpu_env
, cpu_A0
, cpu_tmp1_i64
);
8138 /* XRSTOR is how MPX is enabled, which changes how
8139 we translate. Thus we need to end the TB. */
8140 gen_update_cc_op(s
);
8141 gen_jmp_im(s
->pc
- s
->cs_base
);
8145 CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
8146 if (prefixes
& PREFIX_LOCK
) {
8149 if (prefixes
& PREFIX_DATA
) {
8151 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_CLWB
)) {
8154 gen_nop_modrm(env
, s
, modrm
);
8157 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
8158 || (s
->cpuid_xsave_features
& CPUID_XSAVE_XSAVEOPT
) == 0
8159 || (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
))) {
8162 gen_lea_modrm(env
, s
, modrm
);
8163 tcg_gen_concat_tl_i64(cpu_tmp1_i64
, cpu_regs
[R_EAX
],
8165 gen_helper_xsaveopt(cpu_env
, cpu_A0
, cpu_tmp1_i64
);
8169 CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
8170 if (prefixes
& PREFIX_LOCK
) {
8173 if (prefixes
& PREFIX_DATA
) {
8175 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_CLFLUSHOPT
)) {
8180 if ((s
->prefix
& (PREFIX_REPZ
| PREFIX_REPNZ
))
8181 || !(s
->cpuid_features
& CPUID_CLFLUSH
)) {
8185 gen_nop_modrm(env
, s
, modrm
);
8188 case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
8189 case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */
8190 case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
8191 case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */
8193 && (prefixes
& PREFIX_REPZ
)
8194 && !(prefixes
& PREFIX_LOCK
)
8195 && (s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_FSGSBASE
)) {
8196 TCGv base
, treg
, src
, dst
;
8198 /* Preserve hflags bits by testing CR4 at runtime. */
8199 tcg_gen_movi_i32(cpu_tmp2_i32
, CR4_FSGSBASE_MASK
);
8200 gen_helper_cr4_testbit(cpu_env
, cpu_tmp2_i32
);
8202 base
= cpu_seg_base
[modrm
& 8 ? R_GS
: R_FS
];
8203 treg
= cpu_regs
[(modrm
& 7) | REX_B(s
)];
8207 dst
= base
, src
= treg
;
8210 dst
= treg
, src
= base
;
8213 if (s
->dflag
== MO_32
) {
8214 tcg_gen_ext32u_tl(dst
, src
);
8216 tcg_gen_mov_tl(dst
, src
);
8222 case 0xf8: /* sfence / pcommit */
8223 if (prefixes
& PREFIX_DATA
) {
8225 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_PCOMMIT
)
8226 || (prefixes
& PREFIX_LOCK
)) {
8232 case 0xf9 ... 0xff: /* sfence */
8233 if (!(s
->cpuid_features
& CPUID_SSE
)
8234 || (prefixes
& PREFIX_LOCK
)) {
8237 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
8239 case 0xe8 ... 0xef: /* lfence */
8240 if (!(s
->cpuid_features
& CPUID_SSE
)
8241 || (prefixes
& PREFIX_LOCK
)) {
8244 tcg_gen_mb(TCG_MO_LD_LD
| TCG_BAR_SC
);
8246 case 0xf0 ... 0xf7: /* mfence */
8247 if (!(s
->cpuid_features
& CPUID_SSE2
)
8248 || (prefixes
& PREFIX_LOCK
)) {
8251 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
8259 case 0x10d: /* 3DNow! prefetch(w) */
8260 modrm
= x86_ldub_code(env
, s
);
8261 mod
= (modrm
>> 6) & 3;
8264 gen_nop_modrm(env
, s
, modrm
);
8266 case 0x1aa: /* rsm */
8267 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_RSM
);
8268 if (!(s
->flags
& HF_SMM_MASK
))
8270 gen_update_cc_op(s
);
8271 gen_jmp_im(s
->pc
- s
->cs_base
);
8272 gen_helper_rsm(cpu_env
);
8275 case 0x1b8: /* SSE4.2 popcnt */
8276 if ((prefixes
& (PREFIX_REPZ
| PREFIX_LOCK
| PREFIX_REPNZ
)) !=
8279 if (!(s
->cpuid_ext_features
& CPUID_EXT_POPCNT
))
8282 modrm
= x86_ldub_code(env
, s
);
8283 reg
= ((modrm
>> 3) & 7) | rex_r
;
8285 if (s
->prefix
& PREFIX_DATA
) {
8288 ot
= mo_64_32(dflag
);
8291 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
8292 gen_extu(ot
, cpu_T0
);
8293 tcg_gen_mov_tl(cpu_cc_src
, cpu_T0
);
8294 tcg_gen_ctpop_tl(cpu_T0
, cpu_T0
);
8295 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
8297 set_cc_op(s
, CC_OP_POPCNT
);
8299 case 0x10e ... 0x10f:
8300 /* 3DNow! instructions, ignore prefixes */
8301 s
->prefix
&= ~(PREFIX_REPZ
| PREFIX_REPNZ
| PREFIX_DATA
);
8302 case 0x110 ... 0x117:
8303 case 0x128 ... 0x12f:
8304 case 0x138 ... 0x13a:
8305 case 0x150 ... 0x179:
8306 case 0x17c ... 0x17f:
8308 case 0x1c4 ... 0x1c6:
8309 case 0x1d0 ... 0x1fe:
8310 gen_sse(env
, s
, b
, pc_start
, rex_r
);
8317 gen_illegal_opcode(s
);
8320 gen_unknown_opcode(env
, s
);
8324 void tcg_x86_init(void)
8326 static const char reg_names
[CPU_NB_REGS
][4] = {
8327 #ifdef TARGET_X86_64
8355 static const char seg_base_names
[6][8] = {
8363 static const char bnd_regl_names
[4][8] = {
8364 "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
8366 static const char bnd_regu_names
[4][8] = {
8367 "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
8371 cpu_cc_op
= tcg_global_mem_new_i32(cpu_env
,
8372 offsetof(CPUX86State
, cc_op
), "cc_op");
8373 cpu_cc_dst
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_dst
),
8375 cpu_cc_src
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_src
),
8377 cpu_cc_src2
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_src2
),
8380 for (i
= 0; i
< CPU_NB_REGS
; ++i
) {
8381 cpu_regs
[i
] = tcg_global_mem_new(cpu_env
,
8382 offsetof(CPUX86State
, regs
[i
]),
8386 for (i
= 0; i
< 6; ++i
) {
8388 = tcg_global_mem_new(cpu_env
,
8389 offsetof(CPUX86State
, segs
[i
].base
),
8393 for (i
= 0; i
< 4; ++i
) {
8395 = tcg_global_mem_new_i64(cpu_env
,
8396 offsetof(CPUX86State
, bnd_regs
[i
].lb
),
8399 = tcg_global_mem_new_i64(cpu_env
,
8400 offsetof(CPUX86State
, bnd_regs
[i
].ub
),
8405 static int i386_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cpu
,
8408 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8409 CPUX86State
*env
= cpu
->env_ptr
;
8410 uint32_t flags
= dc
->base
.tb
->flags
;
8411 target_ulong cs_base
= dc
->base
.tb
->cs_base
;
8413 dc
->pe
= (flags
>> HF_PE_SHIFT
) & 1;
8414 dc
->code32
= (flags
>> HF_CS32_SHIFT
) & 1;
8415 dc
->ss32
= (flags
>> HF_SS32_SHIFT
) & 1;
8416 dc
->addseg
= (flags
>> HF_ADDSEG_SHIFT
) & 1;
8418 dc
->vm86
= (flags
>> VM_SHIFT
) & 1;
8419 dc
->cpl
= (flags
>> HF_CPL_SHIFT
) & 3;
8420 dc
->iopl
= (flags
>> IOPL_SHIFT
) & 3;
8421 dc
->tf
= (flags
>> TF_SHIFT
) & 1;
8422 dc
->cc_op
= CC_OP_DYNAMIC
;
8423 dc
->cc_op_dirty
= false;
8424 dc
->cs_base
= cs_base
;
8425 dc
->popl_esp_hack
= 0;
8426 /* select memory access functions */
8428 #ifdef CONFIG_SOFTMMU
8429 dc
->mem_index
= cpu_mmu_index(env
, false);
8431 dc
->cpuid_features
= env
->features
[FEAT_1_EDX
];
8432 dc
->cpuid_ext_features
= env
->features
[FEAT_1_ECX
];
8433 dc
->cpuid_ext2_features
= env
->features
[FEAT_8000_0001_EDX
];
8434 dc
->cpuid_ext3_features
= env
->features
[FEAT_8000_0001_ECX
];
8435 dc
->cpuid_7_0_ebx_features
= env
->features
[FEAT_7_0_EBX
];
8436 dc
->cpuid_xsave_features
= env
->features
[FEAT_XSAVE
];
8437 #ifdef TARGET_X86_64
8438 dc
->lma
= (flags
>> HF_LMA_SHIFT
) & 1;
8439 dc
->code64
= (flags
>> HF_CS64_SHIFT
) & 1;
8442 dc
->jmp_opt
= !(dc
->tf
|| dc
->base
.singlestep_enabled
||
8443 (flags
& HF_INHIBIT_IRQ_MASK
));
8444 /* Do not optimize repz jumps at all in icount mode, because
8445 rep movsS instructions are execured with different paths
8446 in !repz_opt and repz_opt modes. The first one was used
8447 always except single step mode. And this setting
8448 disables jumps optimization and control paths become
8449 equivalent in run and single step modes.
8450 Now there will be no jump optimization for repz in
8451 record/replay modes and there will always be an
8452 additional step for ecx=0 when icount is enabled.
8454 dc
->repz_opt
= !dc
->jmp_opt
&& !(tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
);
8456 /* check addseg logic */
8457 if (!dc
->addseg
&& (dc
->vm86
|| !dc
->pe
|| !dc
->code32
))
8458 printf("ERROR addseg\n");
8461 cpu_T0
= tcg_temp_new();
8462 cpu_T1
= tcg_temp_new();
8463 cpu_A0
= tcg_temp_new();
8465 cpu_tmp0
= tcg_temp_new();
8466 cpu_tmp1_i64
= tcg_temp_new_i64();
8467 cpu_tmp2_i32
= tcg_temp_new_i32();
8468 cpu_tmp3_i32
= tcg_temp_new_i32();
8469 cpu_tmp4
= tcg_temp_new();
8470 cpu_ptr0
= tcg_temp_new_ptr();
8471 cpu_ptr1
= tcg_temp_new_ptr();
8472 cpu_cc_srcT
= tcg_temp_local_new();
8477 static void i386_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
8481 static void i386_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
8483 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8485 tcg_gen_insn_start(dc
->base
.pc_next
, dc
->cc_op
);
8488 static bool i386_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cpu
,
8489 const CPUBreakpoint
*bp
)
8491 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8492 /* If RF is set, suppress an internally generated breakpoint. */
8493 int flags
= dc
->base
.tb
->flags
& HF_RF_MASK
? BP_GDB
: BP_ANY
;
8494 if (bp
->flags
& flags
) {
8495 gen_debug(dc
, dc
->base
.pc_next
- dc
->cs_base
);
8496 dc
->base
.is_jmp
= DISAS_NORETURN
;
8497 /* The address covered by the breakpoint must be included in
8498 [tb->pc, tb->pc + tb->size) in order to for it to be
8499 properly cleared -- thus we increment the PC here so that
8500 the generic logic setting tb->size later does the right thing. */
8501 dc
->base
.pc_next
+= 1;
8508 static void i386_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
8510 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8511 target_ulong pc_next
= disas_insn(dc
, cpu
);
8513 if (dc
->tf
|| (dc
->base
.tb
->flags
& HF_INHIBIT_IRQ_MASK
)) {
8514 /* if single step mode, we generate only one instruction and
8515 generate an exception */
8516 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8517 the flag and abort the translation to give the irqs a
8519 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
8520 } else if ((tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
)
8521 && ((dc
->base
.pc_next
& TARGET_PAGE_MASK
)
8522 != ((dc
->base
.pc_next
+ TARGET_MAX_INSN_SIZE
- 1)
8524 || (dc
->base
.pc_next
& ~TARGET_PAGE_MASK
) == 0)) {
8525 /* Do not cross the boundary of the pages in icount mode,
8526 it can cause an exception. Do it only when boundary is
8527 crossed by the first instruction in the block.
8528 If current instruction already crossed the bound - it's ok,
8529 because an exception hasn't stopped this code.
8531 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
8532 } else if ((pc_next
- dc
->base
.pc_first
) >= (TARGET_PAGE_SIZE
- 32)) {
8533 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
8536 dc
->base
.pc_next
= pc_next
;
8539 static void i386_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
8541 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8543 if (dc
->base
.is_jmp
== DISAS_TOO_MANY
) {
8544 gen_jmp_im(dc
->base
.pc_next
- dc
->cs_base
);
8549 static void i386_tr_disas_log(const DisasContextBase
*dcbase
,
8552 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8554 qemu_log("IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
8555 log_target_disas(cpu
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
8558 static const TranslatorOps i386_tr_ops
= {
8559 .init_disas_context
= i386_tr_init_disas_context
,
8560 .tb_start
= i386_tr_tb_start
,
8561 .insn_start
= i386_tr_insn_start
,
8562 .breakpoint_check
= i386_tr_breakpoint_check
,
8563 .translate_insn
= i386_tr_translate_insn
,
8564 .tb_stop
= i386_tr_tb_stop
,
8565 .disas_log
= i386_tr_disas_log
,
8568 /* generate intermediate code for basic block 'tb'. */
8569 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
)
8573 translator_loop(&i386_tr_ops
, &dc
.base
, cpu
, tb
);
8576 void restore_state_to_opc(CPUX86State
*env
, TranslationBlock
*tb
,
8579 int cc_op
= data
[1];
8580 env
->eip
= data
[0] - tb
->cs_base
;
8581 if (cc_op
!= CC_OP_DYNAMIC
) {