4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu/host-utils.h"
23 #include "disas/disas.h"
25 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
30 #include "trace-tcg.h"
34 #define PREFIX_REPZ 0x01
35 #define PREFIX_REPNZ 0x02
36 #define PREFIX_LOCK 0x04
37 #define PREFIX_DATA 0x08
38 #define PREFIX_ADR 0x10
39 #define PREFIX_VEX 0x20
42 #define CODE64(s) ((s)->code64)
43 #define REX_X(s) ((s)->rex_x)
44 #define REX_B(s) ((s)->rex_b)
59 /* For a switch indexed by MODRM, match all memory operands for a given OP. */
60 #define CASE_MODRM_MEM_OP(OP) \
61 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
62 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
63 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
65 #define CASE_MODRM_OP(OP) \
66 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
67 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
68 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
69 case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
71 //#define MACRO_TEST 1
73 /* global register indexes */
74 static TCGv_env cpu_env
;
76 static TCGv cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
, cpu_cc_srcT
;
77 static TCGv_i32 cpu_cc_op
;
78 static TCGv cpu_regs
[CPU_NB_REGS
];
79 static TCGv cpu_seg_base
[6];
80 static TCGv_i64 cpu_bndl
[4];
81 static TCGv_i64 cpu_bndu
[4];
83 static TCGv cpu_T0
, cpu_T1
;
84 /* local register indexes (only used inside old micro ops) */
85 static TCGv cpu_tmp0
, cpu_tmp4
;
86 static TCGv_ptr cpu_ptr0
, cpu_ptr1
;
87 static TCGv_i32 cpu_tmp2_i32
, cpu_tmp3_i32
;
88 static TCGv_i64 cpu_tmp1_i64
;
90 #include "exec/gen-icount.h"
93 static int x86_64_hregs
;
96 typedef struct DisasContext
{
97 /* current insn context */
98 int override
; /* -1 if no override */
102 target_ulong pc_start
;
103 target_ulong pc
; /* pc = eip + cs_base */
104 int is_jmp
; /* 1 = means jump (stop translation), 2 means CPU
105 static state change (stop translation) */
106 /* current block context */
107 target_ulong cs_base
; /* base of CS segment */
108 int pe
; /* protected mode */
109 int code32
; /* 32 bit code segment */
111 int lma
; /* long mode active */
112 int code64
; /* 64 bit code segment */
115 int vex_l
; /* vex vector length */
116 int vex_v
; /* vex vvvv register, without 1's compliment. */
117 int ss32
; /* 32 bit stack segment */
118 CCOp cc_op
; /* current CC operation */
120 int addseg
; /* non zero if either DS/ES/SS have a non zero base */
121 int f_st
; /* currently unused */
122 int vm86
; /* vm86 mode */
125 int tf
; /* TF cpu flag */
126 int singlestep_enabled
; /* "hardware" single step enabled */
127 int jmp_opt
; /* use direct block chaining for direct jumps */
128 int repz_opt
; /* optimize jumps within repz instructions */
129 int mem_index
; /* select memory access functions */
130 uint64_t flags
; /* all execution flags */
131 struct TranslationBlock
*tb
;
132 int popl_esp_hack
; /* for correct popl with esp base handling */
133 int rip_offset
; /* only used in x86_64, but left for simplicity */
135 int cpuid_ext_features
;
136 int cpuid_ext2_features
;
137 int cpuid_ext3_features
;
138 int cpuid_7_0_ebx_features
;
139 int cpuid_xsave_features
;
142 static void gen_eob(DisasContext
*s
);
143 static void gen_jmp(DisasContext
*s
, target_ulong eip
);
144 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
);
145 static void gen_op(DisasContext
*s1
, int op
, TCGMemOp ot
, int d
);
147 /* i386 arith/logic operations */
167 OP_SHL1
, /* undocumented */
183 /* I386 int registers */
184 OR_EAX
, /* MUST be even numbered */
193 OR_TMP0
= 16, /* temporary operand register */
195 OR_A0
, /* temporary register used when doing address evaluation */
205 /* Bit set if the global variable is live after setting CC_OP to X. */
206 static const uint8_t cc_op_live
[CC_OP_NB
] = {
207 [CC_OP_DYNAMIC
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
208 [CC_OP_EFLAGS
] = USES_CC_SRC
,
209 [CC_OP_MULB
... CC_OP_MULQ
] = USES_CC_DST
| USES_CC_SRC
,
210 [CC_OP_ADDB
... CC_OP_ADDQ
] = USES_CC_DST
| USES_CC_SRC
,
211 [CC_OP_ADCB
... CC_OP_ADCQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
212 [CC_OP_SUBB
... CC_OP_SUBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRCT
,
213 [CC_OP_SBBB
... CC_OP_SBBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
214 [CC_OP_LOGICB
... CC_OP_LOGICQ
] = USES_CC_DST
,
215 [CC_OP_INCB
... CC_OP_INCQ
] = USES_CC_DST
| USES_CC_SRC
,
216 [CC_OP_DECB
... CC_OP_DECQ
] = USES_CC_DST
| USES_CC_SRC
,
217 [CC_OP_SHLB
... CC_OP_SHLQ
] = USES_CC_DST
| USES_CC_SRC
,
218 [CC_OP_SARB
... CC_OP_SARQ
] = USES_CC_DST
| USES_CC_SRC
,
219 [CC_OP_BMILGB
... CC_OP_BMILGQ
] = USES_CC_DST
| USES_CC_SRC
,
220 [CC_OP_ADCX
] = USES_CC_DST
| USES_CC_SRC
,
221 [CC_OP_ADOX
] = USES_CC_SRC
| USES_CC_SRC2
,
222 [CC_OP_ADCOX
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
226 static void set_cc_op(DisasContext
*s
, CCOp op
)
230 if (s
->cc_op
== op
) {
234 /* Discard CC computation that will no longer be used. */
235 dead
= cc_op_live
[s
->cc_op
] & ~cc_op_live
[op
];
236 if (dead
& USES_CC_DST
) {
237 tcg_gen_discard_tl(cpu_cc_dst
);
239 if (dead
& USES_CC_SRC
) {
240 tcg_gen_discard_tl(cpu_cc_src
);
242 if (dead
& USES_CC_SRC2
) {
243 tcg_gen_discard_tl(cpu_cc_src2
);
245 if (dead
& USES_CC_SRCT
) {
246 tcg_gen_discard_tl(cpu_cc_srcT
);
249 if (op
== CC_OP_DYNAMIC
) {
250 /* The DYNAMIC setting is translator only, and should never be
251 stored. Thus we always consider it clean. */
252 s
->cc_op_dirty
= false;
254 /* Discard any computed CC_OP value (see shifts). */
255 if (s
->cc_op
== CC_OP_DYNAMIC
) {
256 tcg_gen_discard_i32(cpu_cc_op
);
258 s
->cc_op_dirty
= true;
263 static void gen_update_cc_op(DisasContext
*s
)
265 if (s
->cc_op_dirty
) {
266 tcg_gen_movi_i32(cpu_cc_op
, s
->cc_op
);
267 s
->cc_op_dirty
= false;
273 #define NB_OP_SIZES 4
275 #else /* !TARGET_X86_64 */
277 #define NB_OP_SIZES 3
279 #endif /* !TARGET_X86_64 */
281 #if defined(HOST_WORDS_BIGENDIAN)
282 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
283 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
284 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
285 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
286 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
288 #define REG_B_OFFSET 0
289 #define REG_H_OFFSET 1
290 #define REG_W_OFFSET 0
291 #define REG_L_OFFSET 0
292 #define REG_LH_OFFSET 4
295 /* In instruction encodings for byte register accesses the
296 * register number usually indicates "low 8 bits of register N";
297 * however there are some special cases where N 4..7 indicates
298 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
299 * true for this special case, false otherwise.
301 static inline bool byte_reg_is_xH(int reg
)
307 if (reg
>= 8 || x86_64_hregs
) {
314 /* Select the size of a push/pop operation. */
315 static inline TCGMemOp
mo_pushpop(DisasContext
*s
, TCGMemOp ot
)
318 return ot
== MO_16
? MO_16
: MO_64
;
324 /* Select the size of the stack pointer. */
325 static inline TCGMemOp
mo_stacksize(DisasContext
*s
)
327 return CODE64(s
) ? MO_64
: s
->ss32
? MO_32
: MO_16
;
330 /* Select only size 64 else 32. Used for SSE operand sizes. */
331 static inline TCGMemOp
mo_64_32(TCGMemOp ot
)
334 return ot
== MO_64
? MO_64
: MO_32
;
340 /* Select size 8 if lsb of B is clear, else OT. Used for decoding
341 byte vs word opcodes. */
342 static inline TCGMemOp
mo_b_d(int b
, TCGMemOp ot
)
344 return b
& 1 ? ot
: MO_8
;
347 /* Select size 8 if lsb of B is clear, else OT capped at 32.
348 Used for decoding operand size of port opcodes. */
349 static inline TCGMemOp
mo_b_d32(int b
, TCGMemOp ot
)
351 return b
& 1 ? (ot
== MO_16
? MO_16
: MO_32
) : MO_8
;
354 static void gen_op_mov_reg_v(TCGMemOp ot
, int reg
, TCGv t0
)
358 if (!byte_reg_is_xH(reg
)) {
359 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 8);
361 tcg_gen_deposit_tl(cpu_regs
[reg
- 4], cpu_regs
[reg
- 4], t0
, 8, 8);
365 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 16);
368 /* For x86_64, this sets the higher half of register to zero.
369 For i386, this is equivalent to a mov. */
370 tcg_gen_ext32u_tl(cpu_regs
[reg
], t0
);
374 tcg_gen_mov_tl(cpu_regs
[reg
], t0
);
382 static inline void gen_op_mov_v_reg(TCGMemOp ot
, TCGv t0
, int reg
)
384 if (ot
== MO_8
&& byte_reg_is_xH(reg
)) {
385 tcg_gen_shri_tl(t0
, cpu_regs
[reg
- 4], 8);
386 tcg_gen_ext8u_tl(t0
, t0
);
388 tcg_gen_mov_tl(t0
, cpu_regs
[reg
]);
392 static void gen_add_A0_im(DisasContext
*s
, int val
)
394 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
396 tcg_gen_ext32u_tl(cpu_A0
, cpu_A0
);
400 static inline void gen_op_jmp_v(TCGv dest
)
402 tcg_gen_st_tl(dest
, cpu_env
, offsetof(CPUX86State
, eip
));
405 static inline void gen_op_add_reg_im(TCGMemOp size
, int reg
, int32_t val
)
407 tcg_gen_addi_tl(cpu_tmp0
, cpu_regs
[reg
], val
);
408 gen_op_mov_reg_v(size
, reg
, cpu_tmp0
);
411 static inline void gen_op_add_reg_T0(TCGMemOp size
, int reg
)
413 tcg_gen_add_tl(cpu_tmp0
, cpu_regs
[reg
], cpu_T0
);
414 gen_op_mov_reg_v(size
, reg
, cpu_tmp0
);
417 static inline void gen_op_ld_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
419 tcg_gen_qemu_ld_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
422 static inline void gen_op_st_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
424 tcg_gen_qemu_st_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
427 static inline void gen_op_st_rm_T0_A0(DisasContext
*s
, int idx
, int d
)
430 gen_op_st_v(s
, idx
, cpu_T0
, cpu_A0
);
432 gen_op_mov_reg_v(idx
, d
, cpu_T0
);
436 static inline void gen_jmp_im(target_ulong pc
)
438 tcg_gen_movi_tl(cpu_tmp0
, pc
);
439 gen_op_jmp_v(cpu_tmp0
);
442 /* Compute SEG:REG into A0. SEG is selected from the override segment
443 (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
444 indicate no override. */
445 static void gen_lea_v_seg(DisasContext
*s
, TCGMemOp aflag
, TCGv a0
,
446 int def_seg
, int ovr_seg
)
452 tcg_gen_mov_tl(cpu_A0
, a0
);
463 tcg_gen_ext32u_tl(cpu_A0
, a0
);
470 tcg_gen_ext16u_tl(cpu_A0
, a0
);
485 TCGv seg
= cpu_seg_base
[ovr_seg
];
487 if (aflag
== MO_64
) {
488 tcg_gen_add_tl(cpu_A0
, a0
, seg
);
489 } else if (CODE64(s
)) {
490 tcg_gen_ext32u_tl(cpu_A0
, a0
);
491 tcg_gen_add_tl(cpu_A0
, cpu_A0
, seg
);
493 tcg_gen_add_tl(cpu_A0
, a0
, seg
);
494 tcg_gen_ext32u_tl(cpu_A0
, cpu_A0
);
499 static inline void gen_string_movl_A0_ESI(DisasContext
*s
)
501 gen_lea_v_seg(s
, s
->aflag
, cpu_regs
[R_ESI
], R_DS
, s
->override
);
504 static inline void gen_string_movl_A0_EDI(DisasContext
*s
)
506 gen_lea_v_seg(s
, s
->aflag
, cpu_regs
[R_EDI
], R_ES
, -1);
509 static inline void gen_op_movl_T0_Dshift(TCGMemOp ot
)
511 tcg_gen_ld32s_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, df
));
512 tcg_gen_shli_tl(cpu_T0
, cpu_T0
, ot
);
515 static TCGv
gen_ext_tl(TCGv dst
, TCGv src
, TCGMemOp size
, bool sign
)
520 tcg_gen_ext8s_tl(dst
, src
);
522 tcg_gen_ext8u_tl(dst
, src
);
527 tcg_gen_ext16s_tl(dst
, src
);
529 tcg_gen_ext16u_tl(dst
, src
);
535 tcg_gen_ext32s_tl(dst
, src
);
537 tcg_gen_ext32u_tl(dst
, src
);
546 static void gen_extu(TCGMemOp ot
, TCGv reg
)
548 gen_ext_tl(reg
, reg
, ot
, false);
551 static void gen_exts(TCGMemOp ot
, TCGv reg
)
553 gen_ext_tl(reg
, reg
, ot
, true);
556 static inline void gen_op_jnz_ecx(TCGMemOp size
, TCGLabel
*label1
)
558 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
559 gen_extu(size
, cpu_tmp0
);
560 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_tmp0
, 0, label1
);
563 static inline void gen_op_jz_ecx(TCGMemOp size
, TCGLabel
*label1
)
565 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
566 gen_extu(size
, cpu_tmp0
);
567 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
570 static void gen_helper_in_func(TCGMemOp ot
, TCGv v
, TCGv_i32 n
)
574 gen_helper_inb(v
, cpu_env
, n
);
577 gen_helper_inw(v
, cpu_env
, n
);
580 gen_helper_inl(v
, cpu_env
, n
);
587 static void gen_helper_out_func(TCGMemOp ot
, TCGv_i32 v
, TCGv_i32 n
)
591 gen_helper_outb(cpu_env
, v
, n
);
594 gen_helper_outw(cpu_env
, v
, n
);
597 gen_helper_outl(cpu_env
, v
, n
);
604 static void gen_check_io(DisasContext
*s
, TCGMemOp ot
, target_ulong cur_eip
,
607 target_ulong next_eip
;
609 if (s
->pe
&& (s
->cpl
> s
->iopl
|| s
->vm86
)) {
610 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
613 gen_helper_check_iob(cpu_env
, cpu_tmp2_i32
);
616 gen_helper_check_iow(cpu_env
, cpu_tmp2_i32
);
619 gen_helper_check_iol(cpu_env
, cpu_tmp2_i32
);
625 if(s
->flags
& HF_SVMI_MASK
) {
628 svm_flags
|= (1 << (4 + ot
));
629 next_eip
= s
->pc
- s
->cs_base
;
630 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
631 gen_helper_svm_check_io(cpu_env
, cpu_tmp2_i32
,
632 tcg_const_i32(svm_flags
),
633 tcg_const_i32(next_eip
- cur_eip
));
637 static inline void gen_movs(DisasContext
*s
, TCGMemOp ot
)
639 gen_string_movl_A0_ESI(s
);
640 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
641 gen_string_movl_A0_EDI(s
);
642 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
643 gen_op_movl_T0_Dshift(ot
);
644 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
645 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
648 static void gen_op_update1_cc(void)
650 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
653 static void gen_op_update2_cc(void)
655 tcg_gen_mov_tl(cpu_cc_src
, cpu_T1
);
656 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
659 static void gen_op_update3_cc(TCGv reg
)
661 tcg_gen_mov_tl(cpu_cc_src2
, reg
);
662 tcg_gen_mov_tl(cpu_cc_src
, cpu_T1
);
663 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
666 static inline void gen_op_testl_T0_T1_cc(void)
668 tcg_gen_and_tl(cpu_cc_dst
, cpu_T0
, cpu_T1
);
671 static void gen_op_update_neg_cc(void)
673 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
674 tcg_gen_neg_tl(cpu_cc_src
, cpu_T0
);
675 tcg_gen_movi_tl(cpu_cc_srcT
, 0);
678 /* compute all eflags to cc_src */
679 static void gen_compute_eflags(DisasContext
*s
)
681 TCGv zero
, dst
, src1
, src2
;
684 if (s
->cc_op
== CC_OP_EFLAGS
) {
687 if (s
->cc_op
== CC_OP_CLR
) {
688 tcg_gen_movi_tl(cpu_cc_src
, CC_Z
| CC_P
);
689 set_cc_op(s
, CC_OP_EFLAGS
);
698 /* Take care to not read values that are not live. */
699 live
= cc_op_live
[s
->cc_op
] & ~USES_CC_SRCT
;
700 dead
= live
^ (USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
);
702 zero
= tcg_const_tl(0);
703 if (dead
& USES_CC_DST
) {
706 if (dead
& USES_CC_SRC
) {
709 if (dead
& USES_CC_SRC2
) {
715 gen_helper_cc_compute_all(cpu_cc_src
, dst
, src1
, src2
, cpu_cc_op
);
716 set_cc_op(s
, CC_OP_EFLAGS
);
723 typedef struct CCPrepare
{
733 /* compute eflags.C to reg */
734 static CCPrepare
gen_prepare_eflags_c(DisasContext
*s
, TCGv reg
)
740 case CC_OP_SUBB
... CC_OP_SUBQ
:
741 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
742 size
= s
->cc_op
- CC_OP_SUBB
;
743 t1
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
744 /* If no temporary was used, be careful not to alias t1 and t0. */
745 t0
= TCGV_EQUAL(t1
, cpu_cc_src
) ? cpu_tmp0
: reg
;
746 tcg_gen_mov_tl(t0
, cpu_cc_srcT
);
750 case CC_OP_ADDB
... CC_OP_ADDQ
:
751 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
752 size
= s
->cc_op
- CC_OP_ADDB
;
753 t1
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
754 t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
756 return (CCPrepare
) { .cond
= TCG_COND_LTU
, .reg
= t0
,
757 .reg2
= t1
, .mask
= -1, .use_reg2
= true };
759 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
761 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
763 case CC_OP_INCB
... CC_OP_INCQ
:
764 case CC_OP_DECB
... CC_OP_DECQ
:
765 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
766 .mask
= -1, .no_setcond
= true };
768 case CC_OP_SHLB
... CC_OP_SHLQ
:
769 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
770 size
= s
->cc_op
- CC_OP_SHLB
;
771 shift
= (8 << size
) - 1;
772 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
773 .mask
= (target_ulong
)1 << shift
};
775 case CC_OP_MULB
... CC_OP_MULQ
:
776 return (CCPrepare
) { .cond
= TCG_COND_NE
,
777 .reg
= cpu_cc_src
, .mask
= -1 };
779 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
780 size
= s
->cc_op
- CC_OP_BMILGB
;
781 t0
= gen_ext_tl(reg
, cpu_cc_src
, size
, false);
782 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
786 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_dst
,
787 .mask
= -1, .no_setcond
= true };
790 case CC_OP_SARB
... CC_OP_SARQ
:
792 return (CCPrepare
) { .cond
= TCG_COND_NE
,
793 .reg
= cpu_cc_src
, .mask
= CC_C
};
796 /* The need to compute only C from CC_OP_DYNAMIC is important
797 in efficiently implementing e.g. INC at the start of a TB. */
799 gen_helper_cc_compute_c(reg
, cpu_cc_dst
, cpu_cc_src
,
800 cpu_cc_src2
, cpu_cc_op
);
801 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
802 .mask
= -1, .no_setcond
= true };
806 /* compute eflags.P to reg */
807 static CCPrepare
gen_prepare_eflags_p(DisasContext
*s
, TCGv reg
)
809 gen_compute_eflags(s
);
810 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
814 /* compute eflags.S to reg */
815 static CCPrepare
gen_prepare_eflags_s(DisasContext
*s
, TCGv reg
)
819 gen_compute_eflags(s
);
825 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
828 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
831 TCGMemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
832 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, true);
833 return (CCPrepare
) { .cond
= TCG_COND_LT
, .reg
= t0
, .mask
= -1 };
838 /* compute eflags.O to reg */
839 static CCPrepare
gen_prepare_eflags_o(DisasContext
*s
, TCGv reg
)
844 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src2
,
845 .mask
= -1, .no_setcond
= true };
847 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
849 gen_compute_eflags(s
);
850 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
855 /* compute eflags.Z to reg */
856 static CCPrepare
gen_prepare_eflags_z(DisasContext
*s
, TCGv reg
)
860 gen_compute_eflags(s
);
866 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
869 return (CCPrepare
) { .cond
= TCG_COND_ALWAYS
, .mask
= -1 };
872 TCGMemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
873 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
874 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
879 /* perform a conditional store into register 'reg' according to jump opcode
880 value 'b'. In the fast case, T0 is guaranted not to be used. */
881 static CCPrepare
gen_prepare_cc(DisasContext
*s
, int b
, TCGv reg
)
883 int inv
, jcc_op
, cond
;
889 jcc_op
= (b
>> 1) & 7;
892 case CC_OP_SUBB
... CC_OP_SUBQ
:
893 /* We optimize relational operators for the cmp/jcc case. */
894 size
= s
->cc_op
- CC_OP_SUBB
;
897 tcg_gen_mov_tl(cpu_tmp4
, cpu_cc_srcT
);
898 gen_extu(size
, cpu_tmp4
);
899 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
900 cc
= (CCPrepare
) { .cond
= TCG_COND_LEU
, .reg
= cpu_tmp4
,
901 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
910 tcg_gen_mov_tl(cpu_tmp4
, cpu_cc_srcT
);
911 gen_exts(size
, cpu_tmp4
);
912 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, true);
913 cc
= (CCPrepare
) { .cond
= cond
, .reg
= cpu_tmp4
,
914 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
924 /* This actually generates good code for JC, JZ and JS. */
927 cc
= gen_prepare_eflags_o(s
, reg
);
930 cc
= gen_prepare_eflags_c(s
, reg
);
933 cc
= gen_prepare_eflags_z(s
, reg
);
936 gen_compute_eflags(s
);
937 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
938 .mask
= CC_Z
| CC_C
};
941 cc
= gen_prepare_eflags_s(s
, reg
);
944 cc
= gen_prepare_eflags_p(s
, reg
);
947 gen_compute_eflags(s
);
948 if (TCGV_EQUAL(reg
, cpu_cc_src
)) {
951 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
952 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
953 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
958 gen_compute_eflags(s
);
959 if (TCGV_EQUAL(reg
, cpu_cc_src
)) {
962 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
963 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
964 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
965 .mask
= CC_S
| CC_Z
};
972 cc
.cond
= tcg_invert_cond(cc
.cond
);
977 static void gen_setcc1(DisasContext
*s
, int b
, TCGv reg
)
979 CCPrepare cc
= gen_prepare_cc(s
, b
, reg
);
982 if (cc
.cond
== TCG_COND_EQ
) {
983 tcg_gen_xori_tl(reg
, cc
.reg
, 1);
985 tcg_gen_mov_tl(reg
, cc
.reg
);
990 if (cc
.cond
== TCG_COND_NE
&& !cc
.use_reg2
&& cc
.imm
== 0 &&
991 cc
.mask
!= 0 && (cc
.mask
& (cc
.mask
- 1)) == 0) {
992 tcg_gen_shri_tl(reg
, cc
.reg
, ctztl(cc
.mask
));
993 tcg_gen_andi_tl(reg
, reg
, 1);
997 tcg_gen_andi_tl(reg
, cc
.reg
, cc
.mask
);
1001 tcg_gen_setcond_tl(cc
.cond
, reg
, cc
.reg
, cc
.reg2
);
1003 tcg_gen_setcondi_tl(cc
.cond
, reg
, cc
.reg
, cc
.imm
);
1007 static inline void gen_compute_eflags_c(DisasContext
*s
, TCGv reg
)
1009 gen_setcc1(s
, JCC_B
<< 1, reg
);
1012 /* generate a conditional jump to label 'l1' according to jump opcode
1013 value 'b'. In the fast case, T0 is guaranted not to be used. */
1014 static inline void gen_jcc1_noeob(DisasContext
*s
, int b
, TCGLabel
*l1
)
1016 CCPrepare cc
= gen_prepare_cc(s
, b
, cpu_T0
);
1018 if (cc
.mask
!= -1) {
1019 tcg_gen_andi_tl(cpu_T0
, cc
.reg
, cc
.mask
);
1023 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1025 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1029 /* Generate a conditional jump to label 'l1' according to jump opcode
1030 value 'b'. In the fast case, T0 is guaranted not to be used.
1031 A translation block must end soon. */
1032 static inline void gen_jcc1(DisasContext
*s
, int b
, TCGLabel
*l1
)
1034 CCPrepare cc
= gen_prepare_cc(s
, b
, cpu_T0
);
1036 gen_update_cc_op(s
);
1037 if (cc
.mask
!= -1) {
1038 tcg_gen_andi_tl(cpu_T0
, cc
.reg
, cc
.mask
);
1041 set_cc_op(s
, CC_OP_DYNAMIC
);
1043 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1045 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1049 /* XXX: does not work with gdbstub "ice" single step - not a
1051 static TCGLabel
*gen_jz_ecx_string(DisasContext
*s
, target_ulong next_eip
)
1053 TCGLabel
*l1
= gen_new_label();
1054 TCGLabel
*l2
= gen_new_label();
1055 gen_op_jnz_ecx(s
->aflag
, l1
);
1057 gen_jmp_tb(s
, next_eip
, 1);
1062 static inline void gen_stos(DisasContext
*s
, TCGMemOp ot
)
1064 gen_op_mov_v_reg(MO_32
, cpu_T0
, R_EAX
);
1065 gen_string_movl_A0_EDI(s
);
1066 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
1067 gen_op_movl_T0_Dshift(ot
);
1068 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1071 static inline void gen_lods(DisasContext
*s
, TCGMemOp ot
)
1073 gen_string_movl_A0_ESI(s
);
1074 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1075 gen_op_mov_reg_v(ot
, R_EAX
, cpu_T0
);
1076 gen_op_movl_T0_Dshift(ot
);
1077 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1080 static inline void gen_scas(DisasContext
*s
, TCGMemOp ot
)
1082 gen_string_movl_A0_EDI(s
);
1083 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
1084 gen_op(s
, OP_CMPL
, ot
, R_EAX
);
1085 gen_op_movl_T0_Dshift(ot
);
1086 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1089 static inline void gen_cmps(DisasContext
*s
, TCGMemOp ot
)
1091 gen_string_movl_A0_EDI(s
);
1092 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
1093 gen_string_movl_A0_ESI(s
);
1094 gen_op(s
, OP_CMPL
, ot
, OR_TMP0
);
1095 gen_op_movl_T0_Dshift(ot
);
1096 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1097 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1100 static void gen_bpt_io(DisasContext
*s
, TCGv_i32 t_port
, int ot
)
1102 if (s
->flags
& HF_IOBPT_MASK
) {
1103 TCGv_i32 t_size
= tcg_const_i32(1 << ot
);
1104 TCGv t_next
= tcg_const_tl(s
->pc
- s
->cs_base
);
1106 gen_helper_bpt_io(cpu_env
, t_port
, t_size
, t_next
);
1107 tcg_temp_free_i32(t_size
);
1108 tcg_temp_free(t_next
);
1113 static inline void gen_ins(DisasContext
*s
, TCGMemOp ot
)
1115 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
1118 gen_string_movl_A0_EDI(s
);
1119 /* Note: we must do this dummy write first to be restartable in
1120 case of page fault. */
1121 tcg_gen_movi_tl(cpu_T0
, 0);
1122 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
1123 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[R_EDX
]);
1124 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1125 gen_helper_in_func(ot
, cpu_T0
, cpu_tmp2_i32
);
1126 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
1127 gen_op_movl_T0_Dshift(ot
);
1128 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1129 gen_bpt_io(s
, cpu_tmp2_i32
, ot
);
1130 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
1135 static inline void gen_outs(DisasContext
*s
, TCGMemOp ot
)
1137 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
1140 gen_string_movl_A0_ESI(s
);
1141 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1143 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[R_EDX
]);
1144 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1145 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T0
);
1146 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1147 gen_op_movl_T0_Dshift(ot
);
1148 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1149 gen_bpt_io(s
, cpu_tmp2_i32
, ot
);
1150 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
1155 /* same method as Valgrind : we generate jumps to current or next
1157 #define GEN_REPZ(op) \
1158 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1159 target_ulong cur_eip, target_ulong next_eip) \
1162 gen_update_cc_op(s); \
1163 l2 = gen_jz_ecx_string(s, next_eip); \
1164 gen_ ## op(s, ot); \
1165 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1166 /* a loop would cause two single step exceptions if ECX = 1 \
1167 before rep string_insn */ \
1169 gen_op_jz_ecx(s->aflag, l2); \
1170 gen_jmp(s, cur_eip); \
1173 #define GEN_REPZ2(op) \
1174 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1175 target_ulong cur_eip, \
1176 target_ulong next_eip, \
1180 gen_update_cc_op(s); \
1181 l2 = gen_jz_ecx_string(s, next_eip); \
1182 gen_ ## op(s, ot); \
1183 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1184 gen_update_cc_op(s); \
1185 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1187 gen_op_jz_ecx(s->aflag, l2); \
1188 gen_jmp(s, cur_eip); \
1199 static void gen_helper_fp_arith_ST0_FT0(int op
)
1203 gen_helper_fadd_ST0_FT0(cpu_env
);
1206 gen_helper_fmul_ST0_FT0(cpu_env
);
1209 gen_helper_fcom_ST0_FT0(cpu_env
);
1212 gen_helper_fcom_ST0_FT0(cpu_env
);
1215 gen_helper_fsub_ST0_FT0(cpu_env
);
1218 gen_helper_fsubr_ST0_FT0(cpu_env
);
1221 gen_helper_fdiv_ST0_FT0(cpu_env
);
1224 gen_helper_fdivr_ST0_FT0(cpu_env
);
1229 /* NOTE the exception in "r" op ordering */
1230 static void gen_helper_fp_arith_STN_ST0(int op
, int opreg
)
1232 TCGv_i32 tmp
= tcg_const_i32(opreg
);
1235 gen_helper_fadd_STN_ST0(cpu_env
, tmp
);
1238 gen_helper_fmul_STN_ST0(cpu_env
, tmp
);
1241 gen_helper_fsubr_STN_ST0(cpu_env
, tmp
);
1244 gen_helper_fsub_STN_ST0(cpu_env
, tmp
);
1247 gen_helper_fdivr_STN_ST0(cpu_env
, tmp
);
1250 gen_helper_fdiv_STN_ST0(cpu_env
, tmp
);
1255 /* if d == OR_TMP0, it means memory operand (address in A0) */
1256 static void gen_op(DisasContext
*s1
, int op
, TCGMemOp ot
, int d
)
1259 gen_op_mov_v_reg(ot
, cpu_T0
, d
);
1261 gen_op_ld_v(s1
, ot
, cpu_T0
, cpu_A0
);
1265 gen_compute_eflags_c(s1
, cpu_tmp4
);
1266 tcg_gen_add_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1267 tcg_gen_add_tl(cpu_T0
, cpu_T0
, cpu_tmp4
);
1268 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1269 gen_op_update3_cc(cpu_tmp4
);
1270 set_cc_op(s1
, CC_OP_ADCB
+ ot
);
1273 gen_compute_eflags_c(s1
, cpu_tmp4
);
1274 tcg_gen_sub_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1275 tcg_gen_sub_tl(cpu_T0
, cpu_T0
, cpu_tmp4
);
1276 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1277 gen_op_update3_cc(cpu_tmp4
);
1278 set_cc_op(s1
, CC_OP_SBBB
+ ot
);
1281 tcg_gen_add_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1282 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1283 gen_op_update2_cc();
1284 set_cc_op(s1
, CC_OP_ADDB
+ ot
);
1287 tcg_gen_mov_tl(cpu_cc_srcT
, cpu_T0
);
1288 tcg_gen_sub_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1289 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1290 gen_op_update2_cc();
1291 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1295 tcg_gen_and_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1296 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1297 gen_op_update1_cc();
1298 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1301 tcg_gen_or_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1302 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1303 gen_op_update1_cc();
1304 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1307 tcg_gen_xor_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1308 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1309 gen_op_update1_cc();
1310 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1313 tcg_gen_mov_tl(cpu_cc_src
, cpu_T1
);
1314 tcg_gen_mov_tl(cpu_cc_srcT
, cpu_T0
);
1315 tcg_gen_sub_tl(cpu_cc_dst
, cpu_T0
, cpu_T1
);
1316 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1321 /* if d == OR_TMP0, it means memory operand (address in A0) */
1322 static void gen_inc(DisasContext
*s1
, TCGMemOp ot
, int d
, int c
)
1325 gen_op_mov_v_reg(ot
, cpu_T0
, d
);
1327 gen_op_ld_v(s1
, ot
, cpu_T0
, cpu_A0
);
1329 gen_compute_eflags_c(s1
, cpu_cc_src
);
1331 tcg_gen_addi_tl(cpu_T0
, cpu_T0
, 1);
1332 set_cc_op(s1
, CC_OP_INCB
+ ot
);
1334 tcg_gen_addi_tl(cpu_T0
, cpu_T0
, -1);
1335 set_cc_op(s1
, CC_OP_DECB
+ ot
);
1337 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1338 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
1341 static void gen_shift_flags(DisasContext
*s
, TCGMemOp ot
, TCGv result
,
1342 TCGv shm1
, TCGv count
, bool is_right
)
1344 TCGv_i32 z32
, s32
, oldop
;
1347 /* Store the results into the CC variables. If we know that the
1348 variable must be dead, store unconditionally. Otherwise we'll
1349 need to not disrupt the current contents. */
1350 z_tl
= tcg_const_tl(0);
1351 if (cc_op_live
[s
->cc_op
] & USES_CC_DST
) {
1352 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_dst
, count
, z_tl
,
1353 result
, cpu_cc_dst
);
1355 tcg_gen_mov_tl(cpu_cc_dst
, result
);
1357 if (cc_op_live
[s
->cc_op
] & USES_CC_SRC
) {
1358 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_src
, count
, z_tl
,
1361 tcg_gen_mov_tl(cpu_cc_src
, shm1
);
1363 tcg_temp_free(z_tl
);
1365 /* Get the two potential CC_OP values into temporaries. */
1366 tcg_gen_movi_i32(cpu_tmp2_i32
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1367 if (s
->cc_op
== CC_OP_DYNAMIC
) {
1370 tcg_gen_movi_i32(cpu_tmp3_i32
, s
->cc_op
);
1371 oldop
= cpu_tmp3_i32
;
1374 /* Conditionally store the CC_OP value. */
1375 z32
= tcg_const_i32(0);
1376 s32
= tcg_temp_new_i32();
1377 tcg_gen_trunc_tl_i32(s32
, count
);
1378 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, s32
, z32
, cpu_tmp2_i32
, oldop
);
1379 tcg_temp_free_i32(z32
);
1380 tcg_temp_free_i32(s32
);
1382 /* The CC_OP value is no longer predictable. */
1383 set_cc_op(s
, CC_OP_DYNAMIC
);
1386 static void gen_shift_rm_T1(DisasContext
*s
, TCGMemOp ot
, int op1
,
1387 int is_right
, int is_arith
)
1389 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1392 if (op1
== OR_TMP0
) {
1393 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1395 gen_op_mov_v_reg(ot
, cpu_T0
, op1
);
1398 tcg_gen_andi_tl(cpu_T1
, cpu_T1
, mask
);
1399 tcg_gen_subi_tl(cpu_tmp0
, cpu_T1
, 1);
1403 gen_exts(ot
, cpu_T0
);
1404 tcg_gen_sar_tl(cpu_tmp0
, cpu_T0
, cpu_tmp0
);
1405 tcg_gen_sar_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1407 gen_extu(ot
, cpu_T0
);
1408 tcg_gen_shr_tl(cpu_tmp0
, cpu_T0
, cpu_tmp0
);
1409 tcg_gen_shr_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1412 tcg_gen_shl_tl(cpu_tmp0
, cpu_T0
, cpu_tmp0
);
1413 tcg_gen_shl_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1417 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1419 gen_shift_flags(s
, ot
, cpu_T0
, cpu_tmp0
, cpu_T1
, is_right
);
1422 static void gen_shift_rm_im(DisasContext
*s
, TCGMemOp ot
, int op1
, int op2
,
1423 int is_right
, int is_arith
)
1425 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1429 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1431 gen_op_mov_v_reg(ot
, cpu_T0
, op1
);
1437 gen_exts(ot
, cpu_T0
);
1438 tcg_gen_sari_tl(cpu_tmp4
, cpu_T0
, op2
- 1);
1439 tcg_gen_sari_tl(cpu_T0
, cpu_T0
, op2
);
1441 gen_extu(ot
, cpu_T0
);
1442 tcg_gen_shri_tl(cpu_tmp4
, cpu_T0
, op2
- 1);
1443 tcg_gen_shri_tl(cpu_T0
, cpu_T0
, op2
);
1446 tcg_gen_shli_tl(cpu_tmp4
, cpu_T0
, op2
- 1);
1447 tcg_gen_shli_tl(cpu_T0
, cpu_T0
, op2
);
1452 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1454 /* update eflags if non zero shift */
1456 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
1457 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
1458 set_cc_op(s
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1462 static void gen_rot_rm_T1(DisasContext
*s
, TCGMemOp ot
, int op1
, int is_right
)
1464 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1468 if (op1
== OR_TMP0
) {
1469 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1471 gen_op_mov_v_reg(ot
, cpu_T0
, op1
);
1474 tcg_gen_andi_tl(cpu_T1
, cpu_T1
, mask
);
1478 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1479 tcg_gen_ext8u_tl(cpu_T0
, cpu_T0
);
1480 tcg_gen_muli_tl(cpu_T0
, cpu_T0
, 0x01010101);
1483 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1484 tcg_gen_deposit_tl(cpu_T0
, cpu_T0
, cpu_T0
, 16, 16);
1487 #ifdef TARGET_X86_64
1489 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
1490 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T1
);
1492 tcg_gen_rotr_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1494 tcg_gen_rotl_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1496 tcg_gen_extu_i32_tl(cpu_T0
, cpu_tmp2_i32
);
1501 tcg_gen_rotr_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1503 tcg_gen_rotl_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1509 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1511 /* We'll need the flags computed into CC_SRC. */
1512 gen_compute_eflags(s
);
1514 /* The value that was "rotated out" is now present at the other end
1515 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1516 since we've computed the flags into CC_SRC, these variables are
1519 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T0
, mask
- 1);
1520 tcg_gen_shri_tl(cpu_cc_dst
, cpu_T0
, mask
);
1521 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1523 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T0
, mask
);
1524 tcg_gen_andi_tl(cpu_cc_dst
, cpu_T0
, 1);
1526 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1527 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1529 /* Now conditionally store the new CC_OP value. If the shift count
1530 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1531 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1532 exactly as we computed above. */
1533 t0
= tcg_const_i32(0);
1534 t1
= tcg_temp_new_i32();
1535 tcg_gen_trunc_tl_i32(t1
, cpu_T1
);
1536 tcg_gen_movi_i32(cpu_tmp2_i32
, CC_OP_ADCOX
);
1537 tcg_gen_movi_i32(cpu_tmp3_i32
, CC_OP_EFLAGS
);
1538 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, t1
, t0
,
1539 cpu_tmp2_i32
, cpu_tmp3_i32
);
1540 tcg_temp_free_i32(t0
);
1541 tcg_temp_free_i32(t1
);
1543 /* The CC_OP value is no longer predictable. */
1544 set_cc_op(s
, CC_OP_DYNAMIC
);
1547 static void gen_rot_rm_im(DisasContext
*s
, TCGMemOp ot
, int op1
, int op2
,
1550 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1554 if (op1
== OR_TMP0
) {
1555 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1557 gen_op_mov_v_reg(ot
, cpu_T0
, op1
);
1563 #ifdef TARGET_X86_64
1565 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
1567 tcg_gen_rotri_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, op2
);
1569 tcg_gen_rotli_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, op2
);
1571 tcg_gen_extu_i32_tl(cpu_T0
, cpu_tmp2_i32
);
1576 tcg_gen_rotri_tl(cpu_T0
, cpu_T0
, op2
);
1578 tcg_gen_rotli_tl(cpu_T0
, cpu_T0
, op2
);
1589 shift
= mask
+ 1 - shift
;
1591 gen_extu(ot
, cpu_T0
);
1592 tcg_gen_shli_tl(cpu_tmp0
, cpu_T0
, shift
);
1593 tcg_gen_shri_tl(cpu_T0
, cpu_T0
, mask
+ 1 - shift
);
1594 tcg_gen_or_tl(cpu_T0
, cpu_T0
, cpu_tmp0
);
1600 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1603 /* Compute the flags into CC_SRC. */
1604 gen_compute_eflags(s
);
1606 /* The value that was "rotated out" is now present at the other end
1607 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1608 since we've computed the flags into CC_SRC, these variables are
1611 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T0
, mask
- 1);
1612 tcg_gen_shri_tl(cpu_cc_dst
, cpu_T0
, mask
);
1613 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1615 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T0
, mask
);
1616 tcg_gen_andi_tl(cpu_cc_dst
, cpu_T0
, 1);
1618 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1619 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1620 set_cc_op(s
, CC_OP_ADCOX
);
1624 /* XXX: add faster immediate = 1 case */
1625 static void gen_rotc_rm_T1(DisasContext
*s
, TCGMemOp ot
, int op1
,
1628 gen_compute_eflags(s
);
1629 assert(s
->cc_op
== CC_OP_EFLAGS
);
1633 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1635 gen_op_mov_v_reg(ot
, cpu_T0
, op1
);
1640 gen_helper_rcrb(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1643 gen_helper_rcrw(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1646 gen_helper_rcrl(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1648 #ifdef TARGET_X86_64
1650 gen_helper_rcrq(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1659 gen_helper_rclb(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1662 gen_helper_rclw(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1665 gen_helper_rcll(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1667 #ifdef TARGET_X86_64
1669 gen_helper_rclq(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1677 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1680 /* XXX: add faster immediate case */
1681 static void gen_shiftd_rm_T1(DisasContext
*s
, TCGMemOp ot
, int op1
,
1682 bool is_right
, TCGv count_in
)
1684 target_ulong mask
= (ot
== MO_64
? 63 : 31);
1688 if (op1
== OR_TMP0
) {
1689 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1691 gen_op_mov_v_reg(ot
, cpu_T0
, op1
);
1694 count
= tcg_temp_new();
1695 tcg_gen_andi_tl(count
, count_in
, mask
);
1699 /* Note: we implement the Intel behaviour for shift count > 16.
1700 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1701 portion by constructing it as a 32-bit value. */
1703 tcg_gen_deposit_tl(cpu_tmp0
, cpu_T0
, cpu_T1
, 16, 16);
1704 tcg_gen_mov_tl(cpu_T1
, cpu_T0
);
1705 tcg_gen_mov_tl(cpu_T0
, cpu_tmp0
);
1707 tcg_gen_deposit_tl(cpu_T1
, cpu_T0
, cpu_T1
, 16, 16);
1710 #ifdef TARGET_X86_64
1712 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1713 tcg_gen_subi_tl(cpu_tmp0
, count
, 1);
1715 tcg_gen_concat_tl_i64(cpu_T0
, cpu_T0
, cpu_T1
);
1716 tcg_gen_shr_i64(cpu_tmp0
, cpu_T0
, cpu_tmp0
);
1717 tcg_gen_shr_i64(cpu_T0
, cpu_T0
, count
);
1719 tcg_gen_concat_tl_i64(cpu_T0
, cpu_T1
, cpu_T0
);
1720 tcg_gen_shl_i64(cpu_tmp0
, cpu_T0
, cpu_tmp0
);
1721 tcg_gen_shl_i64(cpu_T0
, cpu_T0
, count
);
1722 tcg_gen_shri_i64(cpu_tmp0
, cpu_tmp0
, 32);
1723 tcg_gen_shri_i64(cpu_T0
, cpu_T0
, 32);
1728 tcg_gen_subi_tl(cpu_tmp0
, count
, 1);
1730 tcg_gen_shr_tl(cpu_tmp0
, cpu_T0
, cpu_tmp0
);
1732 tcg_gen_subfi_tl(cpu_tmp4
, mask
+ 1, count
);
1733 tcg_gen_shr_tl(cpu_T0
, cpu_T0
, count
);
1734 tcg_gen_shl_tl(cpu_T1
, cpu_T1
, cpu_tmp4
);
1736 tcg_gen_shl_tl(cpu_tmp0
, cpu_T0
, cpu_tmp0
);
1738 /* Only needed if count > 16, for Intel behaviour. */
1739 tcg_gen_subfi_tl(cpu_tmp4
, 33, count
);
1740 tcg_gen_shr_tl(cpu_tmp4
, cpu_T1
, cpu_tmp4
);
1741 tcg_gen_or_tl(cpu_tmp0
, cpu_tmp0
, cpu_tmp4
);
1744 tcg_gen_subfi_tl(cpu_tmp4
, mask
+ 1, count
);
1745 tcg_gen_shl_tl(cpu_T0
, cpu_T0
, count
);
1746 tcg_gen_shr_tl(cpu_T1
, cpu_T1
, cpu_tmp4
);
1748 tcg_gen_movi_tl(cpu_tmp4
, 0);
1749 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_T1
, count
, cpu_tmp4
,
1751 tcg_gen_or_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1756 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1758 gen_shift_flags(s
, ot
, cpu_T0
, cpu_tmp0
, count
, is_right
);
1759 tcg_temp_free(count
);
1762 static void gen_shift(DisasContext
*s1
, int op
, TCGMemOp ot
, int d
, int s
)
1765 gen_op_mov_v_reg(ot
, cpu_T1
, s
);
1768 gen_rot_rm_T1(s1
, ot
, d
, 0);
1771 gen_rot_rm_T1(s1
, ot
, d
, 1);
1775 gen_shift_rm_T1(s1
, ot
, d
, 0, 0);
1778 gen_shift_rm_T1(s1
, ot
, d
, 1, 0);
1781 gen_shift_rm_T1(s1
, ot
, d
, 1, 1);
1784 gen_rotc_rm_T1(s1
, ot
, d
, 0);
1787 gen_rotc_rm_T1(s1
, ot
, d
, 1);
1792 static void gen_shifti(DisasContext
*s1
, int op
, TCGMemOp ot
, int d
, int c
)
1796 gen_rot_rm_im(s1
, ot
, d
, c
, 0);
1799 gen_rot_rm_im(s1
, ot
, d
, c
, 1);
1803 gen_shift_rm_im(s1
, ot
, d
, c
, 0, 0);
1806 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 0);
1809 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 1);
1812 /* currently not optimized */
1813 tcg_gen_movi_tl(cpu_T1
, c
);
1814 gen_shift(s1
, op
, ot
, d
, OR_TMP1
);
1819 /* Decompose an address. */
1821 typedef struct AddressParts
{
1829 static AddressParts
gen_lea_modrm_0(CPUX86State
*env
, DisasContext
*s
,
1832 int def_seg
, base
, index
, scale
, mod
, rm
;
1841 mod
= (modrm
>> 6) & 3;
1843 base
= rm
| REX_B(s
);
1846 /* Normally filtered out earlier, but including this path
1847 simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
1856 int code
= cpu_ldub_code(env
, s
->pc
++);
1857 scale
= (code
>> 6) & 3;
1858 index
= ((code
>> 3) & 7) | REX_X(s
);
1860 index
= -1; /* no index */
1862 base
= (code
& 7) | REX_B(s
);
1868 if ((base
& 7) == 5) {
1870 disp
= (int32_t)cpu_ldl_code(env
, s
->pc
);
1872 if (CODE64(s
) && !havesib
) {
1874 disp
+= s
->pc
+ s
->rip_offset
;
1879 disp
= (int8_t)cpu_ldub_code(env
, s
->pc
++);
1883 disp
= (int32_t)cpu_ldl_code(env
, s
->pc
);
1888 /* For correct popl handling with esp. */
1889 if (base
== R_ESP
&& s
->popl_esp_hack
) {
1890 disp
+= s
->popl_esp_hack
;
1892 if (base
== R_EBP
|| base
== R_ESP
) {
1901 disp
= cpu_lduw_code(env
, s
->pc
);
1905 } else if (mod
== 1) {
1906 disp
= (int8_t)cpu_ldub_code(env
, s
->pc
++);
1908 disp
= (int16_t)cpu_lduw_code(env
, s
->pc
);
1953 return (AddressParts
){ def_seg
, base
, index
, scale
, disp
};
1956 /* Compute the address, with a minimum number of TCG ops. */
1957 static TCGv
gen_lea_modrm_1(AddressParts a
)
1964 ea
= cpu_regs
[a
.index
];
1966 tcg_gen_shli_tl(cpu_A0
, cpu_regs
[a
.index
], a
.scale
);
1970 tcg_gen_add_tl(cpu_A0
, ea
, cpu_regs
[a
.base
]);
1973 } else if (a
.base
>= 0) {
1974 ea
= cpu_regs
[a
.base
];
1976 if (TCGV_IS_UNUSED(ea
)) {
1977 tcg_gen_movi_tl(cpu_A0
, a
.disp
);
1979 } else if (a
.disp
!= 0) {
1980 tcg_gen_addi_tl(cpu_A0
, ea
, a
.disp
);
1987 static void gen_lea_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
1989 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
1990 TCGv ea
= gen_lea_modrm_1(a
);
1991 gen_lea_v_seg(s
, s
->aflag
, ea
, a
.def_seg
, s
->override
);
1994 static void gen_nop_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
1996 (void)gen_lea_modrm_0(env
, s
, modrm
);
1999 /* Used for BNDCL, BNDCU, BNDCN. */
2000 static void gen_bndck(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2001 TCGCond cond
, TCGv_i64 bndv
)
2003 TCGv ea
= gen_lea_modrm_1(gen_lea_modrm_0(env
, s
, modrm
));
2005 tcg_gen_extu_tl_i64(cpu_tmp1_i64
, ea
);
2007 tcg_gen_ext32u_i64(cpu_tmp1_i64
, cpu_tmp1_i64
);
2009 tcg_gen_setcond_i64(cond
, cpu_tmp1_i64
, cpu_tmp1_i64
, bndv
);
2010 tcg_gen_extrl_i64_i32(cpu_tmp2_i32
, cpu_tmp1_i64
);
2011 gen_helper_bndck(cpu_env
, cpu_tmp2_i32
);
2014 /* used for LEA and MOV AX, mem */
2015 static void gen_add_A0_ds_seg(DisasContext
*s
)
2017 gen_lea_v_seg(s
, s
->aflag
, cpu_A0
, R_DS
, s
->override
);
2020 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2022 static void gen_ldst_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2023 TCGMemOp ot
, int reg
, int is_store
)
2027 mod
= (modrm
>> 6) & 3;
2028 rm
= (modrm
& 7) | REX_B(s
);
2032 gen_op_mov_v_reg(ot
, cpu_T0
, reg
);
2033 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
2035 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
2037 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
2040 gen_lea_modrm(env
, s
, modrm
);
2043 gen_op_mov_v_reg(ot
, cpu_T0
, reg
);
2044 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
2046 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
2048 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
2053 static inline uint32_t insn_get(CPUX86State
*env
, DisasContext
*s
, TCGMemOp ot
)
2059 ret
= cpu_ldub_code(env
, s
->pc
);
2063 ret
= cpu_lduw_code(env
, s
->pc
);
2067 #ifdef TARGET_X86_64
2070 ret
= cpu_ldl_code(env
, s
->pc
);
2079 static inline int insn_const_size(TCGMemOp ot
)
2088 static inline bool use_goto_tb(DisasContext
*s
, target_ulong pc
)
2090 #ifndef CONFIG_USER_ONLY
2091 return (pc
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
) ||
2092 (pc
& TARGET_PAGE_MASK
) == (s
->pc_start
& TARGET_PAGE_MASK
);
2098 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong eip
)
2100 target_ulong pc
= s
->cs_base
+ eip
;
2102 if (use_goto_tb(s
, pc
)) {
2103 /* jump to same page: we can use a direct jump */
2104 tcg_gen_goto_tb(tb_num
);
2106 tcg_gen_exit_tb((uintptr_t)s
->tb
+ tb_num
);
2108 /* jump to another page: currently not optimized */
2114 static inline void gen_jcc(DisasContext
*s
, int b
,
2115 target_ulong val
, target_ulong next_eip
)
2120 l1
= gen_new_label();
2123 gen_goto_tb(s
, 0, next_eip
);
2126 gen_goto_tb(s
, 1, val
);
2127 s
->is_jmp
= DISAS_TB_JUMP
;
2129 l1
= gen_new_label();
2130 l2
= gen_new_label();
2133 gen_jmp_im(next_eip
);
2143 static void gen_cmovcc1(CPUX86State
*env
, DisasContext
*s
, TCGMemOp ot
, int b
,
2148 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
2150 cc
= gen_prepare_cc(s
, b
, cpu_T1
);
2151 if (cc
.mask
!= -1) {
2152 TCGv t0
= tcg_temp_new();
2153 tcg_gen_andi_tl(t0
, cc
.reg
, cc
.mask
);
2157 cc
.reg2
= tcg_const_tl(cc
.imm
);
2160 tcg_gen_movcond_tl(cc
.cond
, cpu_T0
, cc
.reg
, cc
.reg2
,
2161 cpu_T0
, cpu_regs
[reg
]);
2162 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
2164 if (cc
.mask
!= -1) {
2165 tcg_temp_free(cc
.reg
);
2168 tcg_temp_free(cc
.reg2
);
2172 static inline void gen_op_movl_T0_seg(int seg_reg
)
2174 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
,
2175 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2178 static inline void gen_op_movl_seg_T0_vm(int seg_reg
)
2180 tcg_gen_ext16u_tl(cpu_T0
, cpu_T0
);
2181 tcg_gen_st32_tl(cpu_T0
, cpu_env
,
2182 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2183 tcg_gen_shli_tl(cpu_seg_base
[seg_reg
], cpu_T0
, 4);
2186 /* move T0 to seg_reg and compute if the CPU state may change. Never
2187 call this function with seg_reg == R_CS */
2188 static void gen_movl_seg_T0(DisasContext
*s
, int seg_reg
)
2190 if (s
->pe
&& !s
->vm86
) {
2191 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
2192 gen_helper_load_seg(cpu_env
, tcg_const_i32(seg_reg
), cpu_tmp2_i32
);
2193 /* abort translation because the addseg value may change or
2194 because ss32 may change. For R_SS, translation must always
2195 stop as a special handling must be done to disable hardware
2196 interrupts for the next instruction */
2197 if (seg_reg
== R_SS
|| (s
->code32
&& seg_reg
< R_FS
))
2198 s
->is_jmp
= DISAS_TB_JUMP
;
2200 gen_op_movl_seg_T0_vm(seg_reg
);
2201 if (seg_reg
== R_SS
)
2202 s
->is_jmp
= DISAS_TB_JUMP
;
2206 static inline int svm_is_rep(int prefixes
)
2208 return ((prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) ? 8 : 0);
2212 gen_svm_check_intercept_param(DisasContext
*s
, target_ulong pc_start
,
2213 uint32_t type
, uint64_t param
)
2215 /* no SVM activated; fast case */
2216 if (likely(!(s
->flags
& HF_SVMI_MASK
)))
2218 gen_update_cc_op(s
);
2219 gen_jmp_im(pc_start
- s
->cs_base
);
2220 gen_helper_svm_check_intercept_param(cpu_env
, tcg_const_i32(type
),
2221 tcg_const_i64(param
));
2225 gen_svm_check_intercept(DisasContext
*s
, target_ulong pc_start
, uint64_t type
)
2227 gen_svm_check_intercept_param(s
, pc_start
, type
, 0);
2230 static inline void gen_stack_update(DisasContext
*s
, int addend
)
2232 gen_op_add_reg_im(mo_stacksize(s
), R_ESP
, addend
);
2235 /* Generate a push. It depends on ss32, addseg and dflag. */
2236 static void gen_push_v(DisasContext
*s
, TCGv val
)
2238 TCGMemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2239 TCGMemOp a_ot
= mo_stacksize(s
);
2240 int size
= 1 << d_ot
;
2241 TCGv new_esp
= cpu_A0
;
2243 tcg_gen_subi_tl(cpu_A0
, cpu_regs
[R_ESP
], size
);
2248 tcg_gen_mov_tl(new_esp
, cpu_A0
);
2250 gen_lea_v_seg(s
, a_ot
, cpu_A0
, R_SS
, -1);
2253 gen_op_st_v(s
, d_ot
, val
, cpu_A0
);
2254 gen_op_mov_reg_v(a_ot
, R_ESP
, new_esp
);
2257 /* two step pop is necessary for precise exceptions */
2258 static TCGMemOp
gen_pop_T0(DisasContext
*s
)
2260 TCGMemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2262 gen_lea_v_seg(s
, mo_stacksize(s
), cpu_regs
[R_ESP
], R_SS
, -1);
2263 gen_op_ld_v(s
, d_ot
, cpu_T0
, cpu_A0
);
2268 static inline void gen_pop_update(DisasContext
*s
, TCGMemOp ot
)
2270 gen_stack_update(s
, 1 << ot
);
2273 static inline void gen_stack_A0(DisasContext
*s
)
2275 gen_lea_v_seg(s
, s
->ss32
? MO_32
: MO_16
, cpu_regs
[R_ESP
], R_SS
, -1);
2278 static void gen_pusha(DisasContext
*s
)
2280 TCGMemOp s_ot
= s
->ss32
? MO_32
: MO_16
;
2281 TCGMemOp d_ot
= s
->dflag
;
2282 int size
= 1 << d_ot
;
2285 for (i
= 0; i
< 8; i
++) {
2286 tcg_gen_addi_tl(cpu_A0
, cpu_regs
[R_ESP
], (i
- 8) * size
);
2287 gen_lea_v_seg(s
, s_ot
, cpu_A0
, R_SS
, -1);
2288 gen_op_st_v(s
, d_ot
, cpu_regs
[7 - i
], cpu_A0
);
2291 gen_stack_update(s
, -8 * size
);
2294 static void gen_popa(DisasContext
*s
)
2296 TCGMemOp s_ot
= s
->ss32
? MO_32
: MO_16
;
2297 TCGMemOp d_ot
= s
->dflag
;
2298 int size
= 1 << d_ot
;
2301 for (i
= 0; i
< 8; i
++) {
2302 /* ESP is not reloaded */
2303 if (7 - i
== R_ESP
) {
2306 tcg_gen_addi_tl(cpu_A0
, cpu_regs
[R_ESP
], i
* size
);
2307 gen_lea_v_seg(s
, s_ot
, cpu_A0
, R_SS
, -1);
2308 gen_op_ld_v(s
, d_ot
, cpu_T0
, cpu_A0
);
2309 gen_op_mov_reg_v(d_ot
, 7 - i
, cpu_T0
);
2312 gen_stack_update(s
, 8 * size
);
2315 static void gen_enter(DisasContext
*s
, int esp_addend
, int level
)
2317 TCGMemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2318 TCGMemOp a_ot
= CODE64(s
) ? MO_64
: s
->ss32
? MO_32
: MO_16
;
2319 int size
= 1 << d_ot
;
2321 /* Push BP; compute FrameTemp into T1. */
2322 tcg_gen_subi_tl(cpu_T1
, cpu_regs
[R_ESP
], size
);
2323 gen_lea_v_seg(s
, a_ot
, cpu_T1
, R_SS
, -1);
2324 gen_op_st_v(s
, d_ot
, cpu_regs
[R_EBP
], cpu_A0
);
2330 /* Copy level-1 pointers from the previous frame. */
2331 for (i
= 1; i
< level
; ++i
) {
2332 tcg_gen_subi_tl(cpu_A0
, cpu_regs
[R_EBP
], size
* i
);
2333 gen_lea_v_seg(s
, a_ot
, cpu_A0
, R_SS
, -1);
2334 gen_op_ld_v(s
, d_ot
, cpu_tmp0
, cpu_A0
);
2336 tcg_gen_subi_tl(cpu_A0
, cpu_T1
, size
* i
);
2337 gen_lea_v_seg(s
, a_ot
, cpu_A0
, R_SS
, -1);
2338 gen_op_st_v(s
, d_ot
, cpu_tmp0
, cpu_A0
);
2341 /* Push the current FrameTemp as the last level. */
2342 tcg_gen_subi_tl(cpu_A0
, cpu_T1
, size
* level
);
2343 gen_lea_v_seg(s
, a_ot
, cpu_A0
, R_SS
, -1);
2344 gen_op_st_v(s
, d_ot
, cpu_T1
, cpu_A0
);
2347 /* Copy the FrameTemp value to EBP. */
2348 gen_op_mov_reg_v(a_ot
, R_EBP
, cpu_T1
);
2350 /* Compute the final value of ESP. */
2351 tcg_gen_subi_tl(cpu_T1
, cpu_T1
, esp_addend
+ size
* level
);
2352 gen_op_mov_reg_v(a_ot
, R_ESP
, cpu_T1
);
2355 static void gen_leave(DisasContext
*s
)
2357 TCGMemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2358 TCGMemOp a_ot
= mo_stacksize(s
);
2360 gen_lea_v_seg(s
, a_ot
, cpu_regs
[R_EBP
], R_SS
, -1);
2361 gen_op_ld_v(s
, d_ot
, cpu_T0
, cpu_A0
);
2363 tcg_gen_addi_tl(cpu_T1
, cpu_regs
[R_EBP
], 1 << d_ot
);
2365 gen_op_mov_reg_v(d_ot
, R_EBP
, cpu_T0
);
2366 gen_op_mov_reg_v(a_ot
, R_ESP
, cpu_T1
);
2369 static void gen_exception(DisasContext
*s
, int trapno
, target_ulong cur_eip
)
2371 gen_update_cc_op(s
);
2372 gen_jmp_im(cur_eip
);
2373 gen_helper_raise_exception(cpu_env
, tcg_const_i32(trapno
));
2374 s
->is_jmp
= DISAS_TB_JUMP
;
2377 /* Generate #UD for the current instruction. The assumption here is that
2378 the instruction is known, but it isn't allowed in the current cpu mode. */
2379 static void gen_illegal_opcode(DisasContext
*s
)
2381 gen_exception(s
, EXCP06_ILLOP
, s
->pc_start
- s
->cs_base
);
2384 /* Similarly, except that the assumption here is that we don't decode
2385 the instruction at all -- either a missing opcode, an unimplemented
2386 feature, or just a bogus instruction stream. */
2387 static void gen_unknown_opcode(CPUX86State
*env
, DisasContext
*s
)
2389 gen_illegal_opcode(s
);
2391 if (qemu_loglevel_mask(LOG_UNIMP
)) {
2392 target_ulong pc
= s
->pc_start
, end
= s
->pc
;
2393 qemu_log("ILLOPC: " TARGET_FMT_lx
":", pc
);
2394 for (; pc
< end
; ++pc
) {
2395 qemu_log(" %02x", cpu_ldub_code(env
, pc
));
2401 /* an interrupt is different from an exception because of the
2403 static void gen_interrupt(DisasContext
*s
, int intno
,
2404 target_ulong cur_eip
, target_ulong next_eip
)
2406 gen_update_cc_op(s
);
2407 gen_jmp_im(cur_eip
);
2408 gen_helper_raise_interrupt(cpu_env
, tcg_const_i32(intno
),
2409 tcg_const_i32(next_eip
- cur_eip
));
2410 s
->is_jmp
= DISAS_TB_JUMP
;
2413 static void gen_debug(DisasContext
*s
, target_ulong cur_eip
)
2415 gen_update_cc_op(s
);
2416 gen_jmp_im(cur_eip
);
2417 gen_helper_debug(cpu_env
);
2418 s
->is_jmp
= DISAS_TB_JUMP
;
2421 static void gen_set_hflag(DisasContext
*s
, uint32_t mask
)
2423 if ((s
->flags
& mask
) == 0) {
2424 TCGv_i32 t
= tcg_temp_new_i32();
2425 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2426 tcg_gen_ori_i32(t
, t
, mask
);
2427 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2428 tcg_temp_free_i32(t
);
2433 static void gen_reset_hflag(DisasContext
*s
, uint32_t mask
)
2435 if (s
->flags
& mask
) {
2436 TCGv_i32 t
= tcg_temp_new_i32();
2437 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2438 tcg_gen_andi_i32(t
, t
, ~mask
);
2439 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2440 tcg_temp_free_i32(t
);
2445 /* Clear BND registers during legacy branches. */
2446 static void gen_bnd_jmp(DisasContext
*s
)
2448 /* Clear the registers only if BND prefix is missing, MPX is enabled,
2449 and if the BNDREGs are known to be in use (non-zero) already.
2450 The helper itself will check BNDPRESERVE at runtime. */
2451 if ((s
->prefix
& PREFIX_REPNZ
) == 0
2452 && (s
->flags
& HF_MPX_EN_MASK
) != 0
2453 && (s
->flags
& HF_MPX_IU_MASK
) != 0) {
2454 gen_helper_bnd_jmp(cpu_env
);
2458 /* Generate an end of block. Trace exception is also generated if needed.
2459 If IIM, set HF_INHIBIT_IRQ_MASK if it isn't already set. */
2460 static void gen_eob_inhibit_irq(DisasContext
*s
, bool inhibit
)
2462 gen_update_cc_op(s
);
2464 /* If several instructions disable interrupts, only the first does it. */
2465 if (inhibit
&& !(s
->flags
& HF_INHIBIT_IRQ_MASK
)) {
2466 gen_set_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2468 gen_reset_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2471 if (s
->tb
->flags
& HF_RF_MASK
) {
2472 gen_helper_reset_rf(cpu_env
);
2474 if (s
->singlestep_enabled
) {
2475 gen_helper_debug(cpu_env
);
2477 gen_helper_single_step(cpu_env
);
2481 s
->is_jmp
= DISAS_TB_JUMP
;
2484 /* End of block, resetting the inhibit irq flag. */
2485 static void gen_eob(DisasContext
*s
)
2487 gen_eob_inhibit_irq(s
, false);
2490 /* generate a jump to eip. No segment change must happen before as a
2491 direct call to the next block may occur */
2492 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
)
2494 gen_update_cc_op(s
);
2495 set_cc_op(s
, CC_OP_DYNAMIC
);
2497 gen_goto_tb(s
, tb_num
, eip
);
2498 s
->is_jmp
= DISAS_TB_JUMP
;
2505 static void gen_jmp(DisasContext
*s
, target_ulong eip
)
2507 gen_jmp_tb(s
, eip
, 0);
2510 static inline void gen_ldq_env_A0(DisasContext
*s
, int offset
)
2512 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_A0
, s
->mem_index
, MO_LEQ
);
2513 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2516 static inline void gen_stq_env_A0(DisasContext
*s
, int offset
)
2518 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2519 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_A0
, s
->mem_index
, MO_LEQ
);
2522 static inline void gen_ldo_env_A0(DisasContext
*s
, int offset
)
2524 int mem_index
= s
->mem_index
;
2525 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_A0
, mem_index
, MO_LEQ
);
2526 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(ZMMReg
, ZMM_Q(0)));
2527 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2528 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
, MO_LEQ
);
2529 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(ZMMReg
, ZMM_Q(1)));
2532 static inline void gen_sto_env_A0(DisasContext
*s
, int offset
)
2534 int mem_index
= s
->mem_index
;
2535 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(ZMMReg
, ZMM_Q(0)));
2536 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_A0
, mem_index
, MO_LEQ
);
2537 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2538 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(ZMMReg
, ZMM_Q(1)));
2539 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
, MO_LEQ
);
2542 static inline void gen_op_movo(int d_offset
, int s_offset
)
2544 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
+ offsetof(ZMMReg
, ZMM_Q(0)));
2545 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
+ offsetof(ZMMReg
, ZMM_Q(0)));
2546 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
+ offsetof(ZMMReg
, ZMM_Q(1)));
2547 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
+ offsetof(ZMMReg
, ZMM_Q(1)));
2550 static inline void gen_op_movq(int d_offset
, int s_offset
)
2552 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2553 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2556 static inline void gen_op_movl(int d_offset
, int s_offset
)
2558 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
, s_offset
);
2559 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, d_offset
);
2562 static inline void gen_op_movq_env_0(int d_offset
)
2564 tcg_gen_movi_i64(cpu_tmp1_i64
, 0);
2565 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2568 typedef void (*SSEFunc_i_ep
)(TCGv_i32 val
, TCGv_ptr env
, TCGv_ptr reg
);
2569 typedef void (*SSEFunc_l_ep
)(TCGv_i64 val
, TCGv_ptr env
, TCGv_ptr reg
);
2570 typedef void (*SSEFunc_0_epi
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i32 val
);
2571 typedef void (*SSEFunc_0_epl
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i64 val
);
2572 typedef void (*SSEFunc_0_epp
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
);
2573 typedef void (*SSEFunc_0_eppi
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2575 typedef void (*SSEFunc_0_ppi
)(TCGv_ptr reg_a
, TCGv_ptr reg_b
, TCGv_i32 val
);
2576 typedef void (*SSEFunc_0_eppt
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2579 #define SSE_SPECIAL ((void *)1)
2580 #define SSE_DUMMY ((void *)2)
2582 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2583 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2584 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2586 static const SSEFunc_0_epp sse_op_table1
[256][4] = {
2587 /* 3DNow! extensions */
2588 [0x0e] = { SSE_DUMMY
}, /* femms */
2589 [0x0f] = { SSE_DUMMY
}, /* pf... */
2590 /* pure SSE operations */
2591 [0x10] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2592 [0x11] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2593 [0x12] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd, movsldup, movddup */
2594 [0x13] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd */
2595 [0x14] = { gen_helper_punpckldq_xmm
, gen_helper_punpcklqdq_xmm
},
2596 [0x15] = { gen_helper_punpckhdq_xmm
, gen_helper_punpckhqdq_xmm
},
2597 [0x16] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd, movshdup */
2598 [0x17] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd */
2600 [0x28] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2601 [0x29] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2602 [0x2a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2603 [0x2b] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movntps, movntpd, movntss, movntsd */
2604 [0x2c] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2605 [0x2d] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2606 [0x2e] = { gen_helper_ucomiss
, gen_helper_ucomisd
},
2607 [0x2f] = { gen_helper_comiss
, gen_helper_comisd
},
2608 [0x50] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movmskps, movmskpd */
2609 [0x51] = SSE_FOP(sqrt
),
2610 [0x52] = { gen_helper_rsqrtps
, NULL
, gen_helper_rsqrtss
, NULL
},
2611 [0x53] = { gen_helper_rcpps
, NULL
, gen_helper_rcpss
, NULL
},
2612 [0x54] = { gen_helper_pand_xmm
, gen_helper_pand_xmm
}, /* andps, andpd */
2613 [0x55] = { gen_helper_pandn_xmm
, gen_helper_pandn_xmm
}, /* andnps, andnpd */
2614 [0x56] = { gen_helper_por_xmm
, gen_helper_por_xmm
}, /* orps, orpd */
2615 [0x57] = { gen_helper_pxor_xmm
, gen_helper_pxor_xmm
}, /* xorps, xorpd */
2616 [0x58] = SSE_FOP(add
),
2617 [0x59] = SSE_FOP(mul
),
2618 [0x5a] = { gen_helper_cvtps2pd
, gen_helper_cvtpd2ps
,
2619 gen_helper_cvtss2sd
, gen_helper_cvtsd2ss
},
2620 [0x5b] = { gen_helper_cvtdq2ps
, gen_helper_cvtps2dq
, gen_helper_cvttps2dq
},
2621 [0x5c] = SSE_FOP(sub
),
2622 [0x5d] = SSE_FOP(min
),
2623 [0x5e] = SSE_FOP(div
),
2624 [0x5f] = SSE_FOP(max
),
2626 [0xc2] = SSE_FOP(cmpeq
),
2627 [0xc6] = { (SSEFunc_0_epp
)gen_helper_shufps
,
2628 (SSEFunc_0_epp
)gen_helper_shufpd
}, /* XXX: casts */
2630 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2631 [0x38] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2632 [0x3a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2634 /* MMX ops and their SSE extensions */
2635 [0x60] = MMX_OP2(punpcklbw
),
2636 [0x61] = MMX_OP2(punpcklwd
),
2637 [0x62] = MMX_OP2(punpckldq
),
2638 [0x63] = MMX_OP2(packsswb
),
2639 [0x64] = MMX_OP2(pcmpgtb
),
2640 [0x65] = MMX_OP2(pcmpgtw
),
2641 [0x66] = MMX_OP2(pcmpgtl
),
2642 [0x67] = MMX_OP2(packuswb
),
2643 [0x68] = MMX_OP2(punpckhbw
),
2644 [0x69] = MMX_OP2(punpckhwd
),
2645 [0x6a] = MMX_OP2(punpckhdq
),
2646 [0x6b] = MMX_OP2(packssdw
),
2647 [0x6c] = { NULL
, gen_helper_punpcklqdq_xmm
},
2648 [0x6d] = { NULL
, gen_helper_punpckhqdq_xmm
},
2649 [0x6e] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movd mm, ea */
2650 [0x6f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, , movqdu */
2651 [0x70] = { (SSEFunc_0_epp
)gen_helper_pshufw_mmx
,
2652 (SSEFunc_0_epp
)gen_helper_pshufd_xmm
,
2653 (SSEFunc_0_epp
)gen_helper_pshufhw_xmm
,
2654 (SSEFunc_0_epp
)gen_helper_pshuflw_xmm
}, /* XXX: casts */
2655 [0x71] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftw */
2656 [0x72] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftd */
2657 [0x73] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftq */
2658 [0x74] = MMX_OP2(pcmpeqb
),
2659 [0x75] = MMX_OP2(pcmpeqw
),
2660 [0x76] = MMX_OP2(pcmpeql
),
2661 [0x77] = { SSE_DUMMY
}, /* emms */
2662 [0x78] = { NULL
, SSE_SPECIAL
, NULL
, SSE_SPECIAL
}, /* extrq_i, insertq_i */
2663 [0x79] = { NULL
, gen_helper_extrq_r
, NULL
, gen_helper_insertq_r
},
2664 [0x7c] = { NULL
, gen_helper_haddpd
, NULL
, gen_helper_haddps
},
2665 [0x7d] = { NULL
, gen_helper_hsubpd
, NULL
, gen_helper_hsubps
},
2666 [0x7e] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movd, movd, , movq */
2667 [0x7f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, movdqu */
2668 [0xc4] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pinsrw */
2669 [0xc5] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pextrw */
2670 [0xd0] = { NULL
, gen_helper_addsubpd
, NULL
, gen_helper_addsubps
},
2671 [0xd1] = MMX_OP2(psrlw
),
2672 [0xd2] = MMX_OP2(psrld
),
2673 [0xd3] = MMX_OP2(psrlq
),
2674 [0xd4] = MMX_OP2(paddq
),
2675 [0xd5] = MMX_OP2(pmullw
),
2676 [0xd6] = { NULL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2677 [0xd7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pmovmskb */
2678 [0xd8] = MMX_OP2(psubusb
),
2679 [0xd9] = MMX_OP2(psubusw
),
2680 [0xda] = MMX_OP2(pminub
),
2681 [0xdb] = MMX_OP2(pand
),
2682 [0xdc] = MMX_OP2(paddusb
),
2683 [0xdd] = MMX_OP2(paddusw
),
2684 [0xde] = MMX_OP2(pmaxub
),
2685 [0xdf] = MMX_OP2(pandn
),
2686 [0xe0] = MMX_OP2(pavgb
),
2687 [0xe1] = MMX_OP2(psraw
),
2688 [0xe2] = MMX_OP2(psrad
),
2689 [0xe3] = MMX_OP2(pavgw
),
2690 [0xe4] = MMX_OP2(pmulhuw
),
2691 [0xe5] = MMX_OP2(pmulhw
),
2692 [0xe6] = { NULL
, gen_helper_cvttpd2dq
, gen_helper_cvtdq2pd
, gen_helper_cvtpd2dq
},
2693 [0xe7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movntq, movntq */
2694 [0xe8] = MMX_OP2(psubsb
),
2695 [0xe9] = MMX_OP2(psubsw
),
2696 [0xea] = MMX_OP2(pminsw
),
2697 [0xeb] = MMX_OP2(por
),
2698 [0xec] = MMX_OP2(paddsb
),
2699 [0xed] = MMX_OP2(paddsw
),
2700 [0xee] = MMX_OP2(pmaxsw
),
2701 [0xef] = MMX_OP2(pxor
),
2702 [0xf0] = { NULL
, NULL
, NULL
, SSE_SPECIAL
}, /* lddqu */
2703 [0xf1] = MMX_OP2(psllw
),
2704 [0xf2] = MMX_OP2(pslld
),
2705 [0xf3] = MMX_OP2(psllq
),
2706 [0xf4] = MMX_OP2(pmuludq
),
2707 [0xf5] = MMX_OP2(pmaddwd
),
2708 [0xf6] = MMX_OP2(psadbw
),
2709 [0xf7] = { (SSEFunc_0_epp
)gen_helper_maskmov_mmx
,
2710 (SSEFunc_0_epp
)gen_helper_maskmov_xmm
}, /* XXX: casts */
2711 [0xf8] = MMX_OP2(psubb
),
2712 [0xf9] = MMX_OP2(psubw
),
2713 [0xfa] = MMX_OP2(psubl
),
2714 [0xfb] = MMX_OP2(psubq
),
2715 [0xfc] = MMX_OP2(paddb
),
2716 [0xfd] = MMX_OP2(paddw
),
2717 [0xfe] = MMX_OP2(paddl
),
2720 static const SSEFunc_0_epp sse_op_table2
[3 * 8][2] = {
2721 [0 + 2] = MMX_OP2(psrlw
),
2722 [0 + 4] = MMX_OP2(psraw
),
2723 [0 + 6] = MMX_OP2(psllw
),
2724 [8 + 2] = MMX_OP2(psrld
),
2725 [8 + 4] = MMX_OP2(psrad
),
2726 [8 + 6] = MMX_OP2(pslld
),
2727 [16 + 2] = MMX_OP2(psrlq
),
2728 [16 + 3] = { NULL
, gen_helper_psrldq_xmm
},
2729 [16 + 6] = MMX_OP2(psllq
),
2730 [16 + 7] = { NULL
, gen_helper_pslldq_xmm
},
2733 static const SSEFunc_0_epi sse_op_table3ai
[] = {
2734 gen_helper_cvtsi2ss
,
2738 #ifdef TARGET_X86_64
2739 static const SSEFunc_0_epl sse_op_table3aq
[] = {
2740 gen_helper_cvtsq2ss
,
2745 static const SSEFunc_i_ep sse_op_table3bi
[] = {
2746 gen_helper_cvttss2si
,
2747 gen_helper_cvtss2si
,
2748 gen_helper_cvttsd2si
,
2752 #ifdef TARGET_X86_64
2753 static const SSEFunc_l_ep sse_op_table3bq
[] = {
2754 gen_helper_cvttss2sq
,
2755 gen_helper_cvtss2sq
,
2756 gen_helper_cvttsd2sq
,
2761 static const SSEFunc_0_epp sse_op_table4
[8][4] = {
2772 static const SSEFunc_0_epp sse_op_table5
[256] = {
2773 [0x0c] = gen_helper_pi2fw
,
2774 [0x0d] = gen_helper_pi2fd
,
2775 [0x1c] = gen_helper_pf2iw
,
2776 [0x1d] = gen_helper_pf2id
,
2777 [0x8a] = gen_helper_pfnacc
,
2778 [0x8e] = gen_helper_pfpnacc
,
2779 [0x90] = gen_helper_pfcmpge
,
2780 [0x94] = gen_helper_pfmin
,
2781 [0x96] = gen_helper_pfrcp
,
2782 [0x97] = gen_helper_pfrsqrt
,
2783 [0x9a] = gen_helper_pfsub
,
2784 [0x9e] = gen_helper_pfadd
,
2785 [0xa0] = gen_helper_pfcmpgt
,
2786 [0xa4] = gen_helper_pfmax
,
2787 [0xa6] = gen_helper_movq
, /* pfrcpit1; no need to actually increase precision */
2788 [0xa7] = gen_helper_movq
, /* pfrsqit1 */
2789 [0xaa] = gen_helper_pfsubr
,
2790 [0xae] = gen_helper_pfacc
,
2791 [0xb0] = gen_helper_pfcmpeq
,
2792 [0xb4] = gen_helper_pfmul
,
2793 [0xb6] = gen_helper_movq
, /* pfrcpit2 */
2794 [0xb7] = gen_helper_pmulhrw_mmx
,
2795 [0xbb] = gen_helper_pswapd
,
2796 [0xbf] = gen_helper_pavgb_mmx
/* pavgusb */
2799 struct SSEOpHelper_epp
{
2800 SSEFunc_0_epp op
[2];
2804 struct SSEOpHelper_eppi
{
2805 SSEFunc_0_eppi op
[2];
2809 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
2810 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
2811 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
2812 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
2813 #define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
2814 CPUID_EXT_PCLMULQDQ }
2815 #define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
2817 static const struct SSEOpHelper_epp sse_op_table6
[256] = {
2818 [0x00] = SSSE3_OP(pshufb
),
2819 [0x01] = SSSE3_OP(phaddw
),
2820 [0x02] = SSSE3_OP(phaddd
),
2821 [0x03] = SSSE3_OP(phaddsw
),
2822 [0x04] = SSSE3_OP(pmaddubsw
),
2823 [0x05] = SSSE3_OP(phsubw
),
2824 [0x06] = SSSE3_OP(phsubd
),
2825 [0x07] = SSSE3_OP(phsubsw
),
2826 [0x08] = SSSE3_OP(psignb
),
2827 [0x09] = SSSE3_OP(psignw
),
2828 [0x0a] = SSSE3_OP(psignd
),
2829 [0x0b] = SSSE3_OP(pmulhrsw
),
2830 [0x10] = SSE41_OP(pblendvb
),
2831 [0x14] = SSE41_OP(blendvps
),
2832 [0x15] = SSE41_OP(blendvpd
),
2833 [0x17] = SSE41_OP(ptest
),
2834 [0x1c] = SSSE3_OP(pabsb
),
2835 [0x1d] = SSSE3_OP(pabsw
),
2836 [0x1e] = SSSE3_OP(pabsd
),
2837 [0x20] = SSE41_OP(pmovsxbw
),
2838 [0x21] = SSE41_OP(pmovsxbd
),
2839 [0x22] = SSE41_OP(pmovsxbq
),
2840 [0x23] = SSE41_OP(pmovsxwd
),
2841 [0x24] = SSE41_OP(pmovsxwq
),
2842 [0x25] = SSE41_OP(pmovsxdq
),
2843 [0x28] = SSE41_OP(pmuldq
),
2844 [0x29] = SSE41_OP(pcmpeqq
),
2845 [0x2a] = SSE41_SPECIAL
, /* movntqda */
2846 [0x2b] = SSE41_OP(packusdw
),
2847 [0x30] = SSE41_OP(pmovzxbw
),
2848 [0x31] = SSE41_OP(pmovzxbd
),
2849 [0x32] = SSE41_OP(pmovzxbq
),
2850 [0x33] = SSE41_OP(pmovzxwd
),
2851 [0x34] = SSE41_OP(pmovzxwq
),
2852 [0x35] = SSE41_OP(pmovzxdq
),
2853 [0x37] = SSE42_OP(pcmpgtq
),
2854 [0x38] = SSE41_OP(pminsb
),
2855 [0x39] = SSE41_OP(pminsd
),
2856 [0x3a] = SSE41_OP(pminuw
),
2857 [0x3b] = SSE41_OP(pminud
),
2858 [0x3c] = SSE41_OP(pmaxsb
),
2859 [0x3d] = SSE41_OP(pmaxsd
),
2860 [0x3e] = SSE41_OP(pmaxuw
),
2861 [0x3f] = SSE41_OP(pmaxud
),
2862 [0x40] = SSE41_OP(pmulld
),
2863 [0x41] = SSE41_OP(phminposuw
),
2864 [0xdb] = AESNI_OP(aesimc
),
2865 [0xdc] = AESNI_OP(aesenc
),
2866 [0xdd] = AESNI_OP(aesenclast
),
2867 [0xde] = AESNI_OP(aesdec
),
2868 [0xdf] = AESNI_OP(aesdeclast
),
2871 static const struct SSEOpHelper_eppi sse_op_table7
[256] = {
2872 [0x08] = SSE41_OP(roundps
),
2873 [0x09] = SSE41_OP(roundpd
),
2874 [0x0a] = SSE41_OP(roundss
),
2875 [0x0b] = SSE41_OP(roundsd
),
2876 [0x0c] = SSE41_OP(blendps
),
2877 [0x0d] = SSE41_OP(blendpd
),
2878 [0x0e] = SSE41_OP(pblendw
),
2879 [0x0f] = SSSE3_OP(palignr
),
2880 [0x14] = SSE41_SPECIAL
, /* pextrb */
2881 [0x15] = SSE41_SPECIAL
, /* pextrw */
2882 [0x16] = SSE41_SPECIAL
, /* pextrd/pextrq */
2883 [0x17] = SSE41_SPECIAL
, /* extractps */
2884 [0x20] = SSE41_SPECIAL
, /* pinsrb */
2885 [0x21] = SSE41_SPECIAL
, /* insertps */
2886 [0x22] = SSE41_SPECIAL
, /* pinsrd/pinsrq */
2887 [0x40] = SSE41_OP(dpps
),
2888 [0x41] = SSE41_OP(dppd
),
2889 [0x42] = SSE41_OP(mpsadbw
),
2890 [0x44] = PCLMULQDQ_OP(pclmulqdq
),
2891 [0x60] = SSE42_OP(pcmpestrm
),
2892 [0x61] = SSE42_OP(pcmpestri
),
2893 [0x62] = SSE42_OP(pcmpistrm
),
2894 [0x63] = SSE42_OP(pcmpistri
),
2895 [0xdf] = AESNI_OP(aeskeygenassist
),
2898 static void gen_sse(CPUX86State
*env
, DisasContext
*s
, int b
,
2899 target_ulong pc_start
, int rex_r
)
2901 int b1
, op1_offset
, op2_offset
, is_xmm
, val
;
2902 int modrm
, mod
, rm
, reg
;
2903 SSEFunc_0_epp sse_fn_epp
;
2904 SSEFunc_0_eppi sse_fn_eppi
;
2905 SSEFunc_0_ppi sse_fn_ppi
;
2906 SSEFunc_0_eppt sse_fn_eppt
;
2910 if (s
->prefix
& PREFIX_DATA
)
2912 else if (s
->prefix
& PREFIX_REPZ
)
2914 else if (s
->prefix
& PREFIX_REPNZ
)
2918 sse_fn_epp
= sse_op_table1
[b
][b1
];
2922 if ((b
<= 0x5f && b
>= 0x10) || b
== 0xc6 || b
== 0xc2) {
2932 /* simple MMX/SSE operation */
2933 if (s
->flags
& HF_TS_MASK
) {
2934 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
2937 if (s
->flags
& HF_EM_MASK
) {
2939 gen_illegal_opcode(s
);
2943 && !(s
->flags
& HF_OSFXSR_MASK
)
2944 && ((b
!= 0x38 && b
!= 0x3a) || (s
->prefix
& PREFIX_DATA
))) {
2948 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
)) {
2949 /* If we were fully decoding this we might use illegal_op. */
2953 gen_helper_emms(cpu_env
);
2958 gen_helper_emms(cpu_env
);
2961 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
2962 the static cpu state) */
2964 gen_helper_enter_mmx(cpu_env
);
2967 modrm
= cpu_ldub_code(env
, s
->pc
++);
2968 reg
= ((modrm
>> 3) & 7);
2971 mod
= (modrm
>> 6) & 3;
2972 if (sse_fn_epp
== SSE_SPECIAL
) {
2975 case 0x0e7: /* movntq */
2979 gen_lea_modrm(env
, s
, modrm
);
2980 gen_stq_env_A0(s
, offsetof(CPUX86State
, fpregs
[reg
].mmx
));
2982 case 0x1e7: /* movntdq */
2983 case 0x02b: /* movntps */
2984 case 0x12b: /* movntps */
2987 gen_lea_modrm(env
, s
, modrm
);
2988 gen_sto_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
2990 case 0x3f0: /* lddqu */
2993 gen_lea_modrm(env
, s
, modrm
);
2994 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
2996 case 0x22b: /* movntss */
2997 case 0x32b: /* movntsd */
3000 gen_lea_modrm(env
, s
, modrm
);
3002 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3003 xmm_regs
[reg
].ZMM_Q(0)));
3005 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,
3006 xmm_regs
[reg
].ZMM_L(0)));
3007 gen_op_st_v(s
, MO_32
, cpu_T0
, cpu_A0
);
3010 case 0x6e: /* movd mm, ea */
3011 #ifdef TARGET_X86_64
3012 if (s
->dflag
== MO_64
) {
3013 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 0);
3014 tcg_gen_st_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3018 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 0);
3019 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3020 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3021 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
3022 gen_helper_movl_mm_T0_mmx(cpu_ptr0
, cpu_tmp2_i32
);
3025 case 0x16e: /* movd xmm, ea */
3026 #ifdef TARGET_X86_64
3027 if (s
->dflag
== MO_64
) {
3028 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 0);
3029 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3030 offsetof(CPUX86State
,xmm_regs
[reg
]));
3031 gen_helper_movq_mm_T0_xmm(cpu_ptr0
, cpu_T0
);
3035 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 0);
3036 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3037 offsetof(CPUX86State
,xmm_regs
[reg
]));
3038 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
3039 gen_helper_movl_mm_T0_xmm(cpu_ptr0
, cpu_tmp2_i32
);
3042 case 0x6f: /* movq mm, ea */
3044 gen_lea_modrm(env
, s
, modrm
);
3045 gen_ldq_env_A0(s
, offsetof(CPUX86State
, fpregs
[reg
].mmx
));
3048 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
3049 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3050 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
3051 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3054 case 0x010: /* movups */
3055 case 0x110: /* movupd */
3056 case 0x028: /* movaps */
3057 case 0x128: /* movapd */
3058 case 0x16f: /* movdqa xmm, ea */
3059 case 0x26f: /* movdqu xmm, ea */
3061 gen_lea_modrm(env
, s
, modrm
);
3062 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3064 rm
= (modrm
& 7) | REX_B(s
);
3065 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[reg
]),
3066 offsetof(CPUX86State
,xmm_regs
[rm
]));
3069 case 0x210: /* movss xmm, ea */
3071 gen_lea_modrm(env
, s
, modrm
);
3072 gen_op_ld_v(s
, MO_32
, cpu_T0
, cpu_A0
);
3073 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3074 tcg_gen_movi_tl(cpu_T0
, 0);
3075 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(1)));
3076 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(2)));
3077 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(3)));
3079 rm
= (modrm
& 7) | REX_B(s
);
3080 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)),
3081 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(0)));
3084 case 0x310: /* movsd xmm, ea */
3086 gen_lea_modrm(env
, s
, modrm
);
3087 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3088 xmm_regs
[reg
].ZMM_Q(0)));
3089 tcg_gen_movi_tl(cpu_T0
, 0);
3090 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(2)));
3091 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(3)));
3093 rm
= (modrm
& 7) | REX_B(s
);
3094 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)),
3095 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3098 case 0x012: /* movlps */
3099 case 0x112: /* movlpd */
3101 gen_lea_modrm(env
, s
, modrm
);
3102 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3103 xmm_regs
[reg
].ZMM_Q(0)));
3106 rm
= (modrm
& 7) | REX_B(s
);
3107 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)),
3108 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(1)));
3111 case 0x212: /* movsldup */
3113 gen_lea_modrm(env
, s
, modrm
);
3114 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3116 rm
= (modrm
& 7) | REX_B(s
);
3117 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)),
3118 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(0)));
3119 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(2)),
3120 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(2)));
3122 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(1)),
3123 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3124 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(3)),
3125 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(2)));
3127 case 0x312: /* movddup */
3129 gen_lea_modrm(env
, s
, modrm
);
3130 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3131 xmm_regs
[reg
].ZMM_Q(0)));
3133 rm
= (modrm
& 7) | REX_B(s
);
3134 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)),
3135 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3137 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(1)),
3138 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3140 case 0x016: /* movhps */
3141 case 0x116: /* movhpd */
3143 gen_lea_modrm(env
, s
, modrm
);
3144 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3145 xmm_regs
[reg
].ZMM_Q(1)));
3148 rm
= (modrm
& 7) | REX_B(s
);
3149 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(1)),
3150 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3153 case 0x216: /* movshdup */
3155 gen_lea_modrm(env
, s
, modrm
);
3156 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3158 rm
= (modrm
& 7) | REX_B(s
);
3159 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(1)),
3160 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(1)));
3161 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(3)),
3162 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(3)));
3164 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)),
3165 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(1)));
3166 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(2)),
3167 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(3)));
3172 int bit_index
, field_length
;
3174 if (b1
== 1 && reg
!= 0)
3176 field_length
= cpu_ldub_code(env
, s
->pc
++) & 0x3F;
3177 bit_index
= cpu_ldub_code(env
, s
->pc
++) & 0x3F;
3178 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3179 offsetof(CPUX86State
,xmm_regs
[reg
]));
3181 gen_helper_extrq_i(cpu_env
, cpu_ptr0
,
3182 tcg_const_i32(bit_index
),
3183 tcg_const_i32(field_length
));
3185 gen_helper_insertq_i(cpu_env
, cpu_ptr0
,
3186 tcg_const_i32(bit_index
),
3187 tcg_const_i32(field_length
));
3190 case 0x7e: /* movd ea, mm */
3191 #ifdef TARGET_X86_64
3192 if (s
->dflag
== MO_64
) {
3193 tcg_gen_ld_i64(cpu_T0
, cpu_env
,
3194 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3195 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 1);
3199 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
,
3200 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_L(0)));
3201 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 1);
3204 case 0x17e: /* movd ea, xmm */
3205 #ifdef TARGET_X86_64
3206 if (s
->dflag
== MO_64
) {
3207 tcg_gen_ld_i64(cpu_T0
, cpu_env
,
3208 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3209 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 1);
3213 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
,
3214 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3215 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 1);
3218 case 0x27e: /* movq xmm, ea */
3220 gen_lea_modrm(env
, s
, modrm
);
3221 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3222 xmm_regs
[reg
].ZMM_Q(0)));
3224 rm
= (modrm
& 7) | REX_B(s
);
3225 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)),
3226 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3228 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(1)));
3230 case 0x7f: /* movq ea, mm */
3232 gen_lea_modrm(env
, s
, modrm
);
3233 gen_stq_env_A0(s
, offsetof(CPUX86State
, fpregs
[reg
].mmx
));
3236 gen_op_movq(offsetof(CPUX86State
,fpregs
[rm
].mmx
),
3237 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3240 case 0x011: /* movups */
3241 case 0x111: /* movupd */
3242 case 0x029: /* movaps */
3243 case 0x129: /* movapd */
3244 case 0x17f: /* movdqa ea, xmm */
3245 case 0x27f: /* movdqu ea, xmm */
3247 gen_lea_modrm(env
, s
, modrm
);
3248 gen_sto_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3250 rm
= (modrm
& 7) | REX_B(s
);
3251 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[rm
]),
3252 offsetof(CPUX86State
,xmm_regs
[reg
]));
3255 case 0x211: /* movss ea, xmm */
3257 gen_lea_modrm(env
, s
, modrm
);
3258 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3259 gen_op_st_v(s
, MO_32
, cpu_T0
, cpu_A0
);
3261 rm
= (modrm
& 7) | REX_B(s
);
3262 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(0)),
3263 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3266 case 0x311: /* movsd ea, xmm */
3268 gen_lea_modrm(env
, s
, modrm
);
3269 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3270 xmm_regs
[reg
].ZMM_Q(0)));
3272 rm
= (modrm
& 7) | REX_B(s
);
3273 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)),
3274 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3277 case 0x013: /* movlps */
3278 case 0x113: /* movlpd */
3280 gen_lea_modrm(env
, s
, modrm
);
3281 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3282 xmm_regs
[reg
].ZMM_Q(0)));
3287 case 0x017: /* movhps */
3288 case 0x117: /* movhpd */
3290 gen_lea_modrm(env
, s
, modrm
);
3291 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3292 xmm_regs
[reg
].ZMM_Q(1)));
3297 case 0x71: /* shift mm, im */
3300 case 0x171: /* shift xmm, im */
3306 val
= cpu_ldub_code(env
, s
->pc
++);
3308 tcg_gen_movi_tl(cpu_T0
, val
);
3309 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_t0
.ZMM_L(0)));
3310 tcg_gen_movi_tl(cpu_T0
, 0);
3311 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_t0
.ZMM_L(1)));
3312 op1_offset
= offsetof(CPUX86State
,xmm_t0
);
3314 tcg_gen_movi_tl(cpu_T0
, val
);
3315 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(0)));
3316 tcg_gen_movi_tl(cpu_T0
, 0);
3317 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(1)));
3318 op1_offset
= offsetof(CPUX86State
,mmx_t0
);
3320 sse_fn_epp
= sse_op_table2
[((b
- 1) & 3) * 8 +
3321 (((modrm
>> 3)) & 7)][b1
];
3326 rm
= (modrm
& 7) | REX_B(s
);
3327 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3330 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3332 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3333 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op1_offset
);
3334 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3336 case 0x050: /* movmskps */
3337 rm
= (modrm
& 7) | REX_B(s
);
3338 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3339 offsetof(CPUX86State
,xmm_regs
[rm
]));
3340 gen_helper_movmskps(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3341 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp2_i32
);
3343 case 0x150: /* movmskpd */
3344 rm
= (modrm
& 7) | REX_B(s
);
3345 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3346 offsetof(CPUX86State
,xmm_regs
[rm
]));
3347 gen_helper_movmskpd(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3348 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp2_i32
);
3350 case 0x02a: /* cvtpi2ps */
3351 case 0x12a: /* cvtpi2pd */
3352 gen_helper_enter_mmx(cpu_env
);
3354 gen_lea_modrm(env
, s
, modrm
);
3355 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3356 gen_ldq_env_A0(s
, op2_offset
);
3359 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3361 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3362 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3363 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3366 gen_helper_cvtpi2ps(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3370 gen_helper_cvtpi2pd(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3374 case 0x22a: /* cvtsi2ss */
3375 case 0x32a: /* cvtsi2sd */
3376 ot
= mo_64_32(s
->dflag
);
3377 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3378 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3379 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3381 SSEFunc_0_epi sse_fn_epi
= sse_op_table3ai
[(b
>> 8) & 1];
3382 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
3383 sse_fn_epi(cpu_env
, cpu_ptr0
, cpu_tmp2_i32
);
3385 #ifdef TARGET_X86_64
3386 SSEFunc_0_epl sse_fn_epl
= sse_op_table3aq
[(b
>> 8) & 1];
3387 sse_fn_epl(cpu_env
, cpu_ptr0
, cpu_T0
);
3393 case 0x02c: /* cvttps2pi */
3394 case 0x12c: /* cvttpd2pi */
3395 case 0x02d: /* cvtps2pi */
3396 case 0x12d: /* cvtpd2pi */
3397 gen_helper_enter_mmx(cpu_env
);
3399 gen_lea_modrm(env
, s
, modrm
);
3400 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3401 gen_ldo_env_A0(s
, op2_offset
);
3403 rm
= (modrm
& 7) | REX_B(s
);
3404 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3406 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
);
3407 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3408 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3411 gen_helper_cvttps2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3414 gen_helper_cvttpd2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3417 gen_helper_cvtps2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3420 gen_helper_cvtpd2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3424 case 0x22c: /* cvttss2si */
3425 case 0x32c: /* cvttsd2si */
3426 case 0x22d: /* cvtss2si */
3427 case 0x32d: /* cvtsd2si */
3428 ot
= mo_64_32(s
->dflag
);
3430 gen_lea_modrm(env
, s
, modrm
);
3432 gen_ldq_env_A0(s
, offsetof(CPUX86State
, xmm_t0
.ZMM_Q(0)));
3434 gen_op_ld_v(s
, MO_32
, cpu_T0
, cpu_A0
);
3435 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_t0
.ZMM_L(0)));
3437 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3439 rm
= (modrm
& 7) | REX_B(s
);
3440 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3442 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3444 SSEFunc_i_ep sse_fn_i_ep
=
3445 sse_op_table3bi
[((b
>> 7) & 2) | (b
& 1)];
3446 sse_fn_i_ep(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3447 tcg_gen_extu_i32_tl(cpu_T0
, cpu_tmp2_i32
);
3449 #ifdef TARGET_X86_64
3450 SSEFunc_l_ep sse_fn_l_ep
=
3451 sse_op_table3bq
[((b
>> 7) & 2) | (b
& 1)];
3452 sse_fn_l_ep(cpu_T0
, cpu_env
, cpu_ptr0
);
3457 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3459 case 0xc4: /* pinsrw */
3462 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
3463 val
= cpu_ldub_code(env
, s
->pc
++);
3466 tcg_gen_st16_tl(cpu_T0
, cpu_env
,
3467 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_W(val
)));
3470 tcg_gen_st16_tl(cpu_T0
, cpu_env
,
3471 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_W(val
)));
3474 case 0xc5: /* pextrw */
3478 ot
= mo_64_32(s
->dflag
);
3479 val
= cpu_ldub_code(env
, s
->pc
++);
3482 rm
= (modrm
& 7) | REX_B(s
);
3483 tcg_gen_ld16u_tl(cpu_T0
, cpu_env
,
3484 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_W(val
)));
3488 tcg_gen_ld16u_tl(cpu_T0
, cpu_env
,
3489 offsetof(CPUX86State
,fpregs
[rm
].mmx
.MMX_W(val
)));
3491 reg
= ((modrm
>> 3) & 7) | rex_r
;
3492 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3494 case 0x1d6: /* movq ea, xmm */
3496 gen_lea_modrm(env
, s
, modrm
);
3497 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3498 xmm_regs
[reg
].ZMM_Q(0)));
3500 rm
= (modrm
& 7) | REX_B(s
);
3501 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)),
3502 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3503 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(1)));
3506 case 0x2d6: /* movq2dq */
3507 gen_helper_enter_mmx(cpu_env
);
3509 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)),
3510 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3511 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(1)));
3513 case 0x3d6: /* movdq2q */
3514 gen_helper_enter_mmx(cpu_env
);
3515 rm
= (modrm
& 7) | REX_B(s
);
3516 gen_op_movq(offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
),
3517 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3519 case 0xd7: /* pmovmskb */
3524 rm
= (modrm
& 7) | REX_B(s
);
3525 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[rm
]));
3526 gen_helper_pmovmskb_xmm(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3529 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3530 gen_helper_pmovmskb_mmx(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3532 reg
= ((modrm
>> 3) & 7) | rex_r
;
3533 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp2_i32
);
3539 if ((b
& 0xf0) == 0xf0) {
3542 modrm
= cpu_ldub_code(env
, s
->pc
++);
3544 reg
= ((modrm
>> 3) & 7) | rex_r
;
3545 mod
= (modrm
>> 6) & 3;
3550 sse_fn_epp
= sse_op_table6
[b
].op
[b1
];
3554 if (!(s
->cpuid_ext_features
& sse_op_table6
[b
].ext_mask
))
3558 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3560 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
3562 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3563 gen_lea_modrm(env
, s
, modrm
);
3565 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3566 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3567 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3568 gen_ldq_env_A0(s
, op2_offset
+
3569 offsetof(ZMMReg
, ZMM_Q(0)));
3571 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3572 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3573 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
3574 s
->mem_index
, MO_LEUL
);
3575 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, op2_offset
+
3576 offsetof(ZMMReg
, ZMM_L(0)));
3578 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3579 tcg_gen_qemu_ld_tl(cpu_tmp0
, cpu_A0
,
3580 s
->mem_index
, MO_LEUW
);
3581 tcg_gen_st16_tl(cpu_tmp0
, cpu_env
, op2_offset
+
3582 offsetof(ZMMReg
, ZMM_W(0)));
3584 case 0x2a: /* movntqda */
3585 gen_ldo_env_A0(s
, op1_offset
);
3588 gen_ldo_env_A0(s
, op2_offset
);
3592 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
3594 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3596 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3597 gen_lea_modrm(env
, s
, modrm
);
3598 gen_ldq_env_A0(s
, op2_offset
);
3601 if (sse_fn_epp
== SSE_SPECIAL
) {
3605 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3606 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3607 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3610 set_cc_op(s
, CC_OP_EFLAGS
);
3617 /* Various integer extensions at 0f 38 f[0-f]. */
3618 b
= modrm
| (b1
<< 8);
3619 modrm
= cpu_ldub_code(env
, s
->pc
++);
3620 reg
= ((modrm
>> 3) & 7) | rex_r
;
3623 case 0x3f0: /* crc32 Gd,Eb */
3624 case 0x3f1: /* crc32 Gd,Ey */
3626 if (!(s
->cpuid_ext_features
& CPUID_EXT_SSE42
)) {
3629 if ((b
& 0xff) == 0xf0) {
3631 } else if (s
->dflag
!= MO_64
) {
3632 ot
= (s
->prefix
& PREFIX_DATA
? MO_16
: MO_32
);
3637 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[reg
]);
3638 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3639 gen_helper_crc32(cpu_T0
, cpu_tmp2_i32
,
3640 cpu_T0
, tcg_const_i32(8 << ot
));
3642 ot
= mo_64_32(s
->dflag
);
3643 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3646 case 0x1f0: /* crc32 or movbe */
3648 /* For these insns, the f3 prefix is supposed to have priority
3649 over the 66 prefix, but that's not what we implement above
3651 if (s
->prefix
& PREFIX_REPNZ
) {
3655 case 0x0f0: /* movbe Gy,My */
3656 case 0x0f1: /* movbe My,Gy */
3657 if (!(s
->cpuid_ext_features
& CPUID_EXT_MOVBE
)) {
3660 if (s
->dflag
!= MO_64
) {
3661 ot
= (s
->prefix
& PREFIX_DATA
? MO_16
: MO_32
);
3666 gen_lea_modrm(env
, s
, modrm
);
3668 tcg_gen_qemu_ld_tl(cpu_T0
, cpu_A0
,
3669 s
->mem_index
, ot
| MO_BE
);
3670 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3672 tcg_gen_qemu_st_tl(cpu_regs
[reg
], cpu_A0
,
3673 s
->mem_index
, ot
| MO_BE
);
3677 case 0x0f2: /* andn Gy, By, Ey */
3678 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
3679 || !(s
->prefix
& PREFIX_VEX
)
3683 ot
= mo_64_32(s
->dflag
);
3684 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3685 tcg_gen_andc_tl(cpu_T0
, cpu_regs
[s
->vex_v
], cpu_T0
);
3686 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3687 gen_op_update1_cc();
3688 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3691 case 0x0f7: /* bextr Gy, Ey, By */
3692 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
3693 || !(s
->prefix
& PREFIX_VEX
)
3697 ot
= mo_64_32(s
->dflag
);
3701 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3702 /* Extract START, and shift the operand.
3703 Shifts larger than operand size get zeros. */
3704 tcg_gen_ext8u_tl(cpu_A0
, cpu_regs
[s
->vex_v
]);
3705 tcg_gen_shr_tl(cpu_T0
, cpu_T0
, cpu_A0
);
3707 bound
= tcg_const_tl(ot
== MO_64
? 63 : 31);
3708 zero
= tcg_const_tl(0);
3709 tcg_gen_movcond_tl(TCG_COND_LEU
, cpu_T0
, cpu_A0
, bound
,
3711 tcg_temp_free(zero
);
3713 /* Extract the LEN into a mask. Lengths larger than
3714 operand size get all ones. */
3715 tcg_gen_shri_tl(cpu_A0
, cpu_regs
[s
->vex_v
], 8);
3716 tcg_gen_ext8u_tl(cpu_A0
, cpu_A0
);
3717 tcg_gen_movcond_tl(TCG_COND_LEU
, cpu_A0
, cpu_A0
, bound
,
3719 tcg_temp_free(bound
);
3720 tcg_gen_movi_tl(cpu_T1
, 1);
3721 tcg_gen_shl_tl(cpu_T1
, cpu_T1
, cpu_A0
);
3722 tcg_gen_subi_tl(cpu_T1
, cpu_T1
, 1);
3723 tcg_gen_and_tl(cpu_T0
, cpu_T0
, cpu_T1
);
3725 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3726 gen_op_update1_cc();
3727 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3731 case 0x0f5: /* bzhi Gy, Ey, By */
3732 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3733 || !(s
->prefix
& PREFIX_VEX
)
3737 ot
= mo_64_32(s
->dflag
);
3738 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3739 tcg_gen_ext8u_tl(cpu_T1
, cpu_regs
[s
->vex_v
]);
3741 TCGv bound
= tcg_const_tl(ot
== MO_64
? 63 : 31);
3742 /* Note that since we're using BMILG (in order to get O
3743 cleared) we need to store the inverse into C. */
3744 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_cc_src
,
3746 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_T1
, cpu_T1
,
3747 bound
, bound
, cpu_T1
);
3748 tcg_temp_free(bound
);
3750 tcg_gen_movi_tl(cpu_A0
, -1);
3751 tcg_gen_shl_tl(cpu_A0
, cpu_A0
, cpu_T1
);
3752 tcg_gen_andc_tl(cpu_T0
, cpu_T0
, cpu_A0
);
3753 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3754 gen_op_update1_cc();
3755 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
3758 case 0x3f6: /* mulx By, Gy, rdx, Ey */
3759 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3760 || !(s
->prefix
& PREFIX_VEX
)
3764 ot
= mo_64_32(s
->dflag
);
3765 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3768 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
3769 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_regs
[R_EDX
]);
3770 tcg_gen_mulu2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
3771 cpu_tmp2_i32
, cpu_tmp3_i32
);
3772 tcg_gen_extu_i32_tl(cpu_regs
[s
->vex_v
], cpu_tmp2_i32
);
3773 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp3_i32
);
3775 #ifdef TARGET_X86_64
3777 tcg_gen_mulu2_i64(cpu_T0
, cpu_T1
,
3778 cpu_T0
, cpu_regs
[R_EDX
]);
3779 tcg_gen_mov_i64(cpu_regs
[s
->vex_v
], cpu_T0
);
3780 tcg_gen_mov_i64(cpu_regs
[reg
], cpu_T1
);
3786 case 0x3f5: /* pdep Gy, By, Ey */
3787 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3788 || !(s
->prefix
& PREFIX_VEX
)
3792 ot
= mo_64_32(s
->dflag
);
3793 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3794 /* Note that by zero-extending the mask operand, we
3795 automatically handle zero-extending the result. */
3797 tcg_gen_mov_tl(cpu_T1
, cpu_regs
[s
->vex_v
]);
3799 tcg_gen_ext32u_tl(cpu_T1
, cpu_regs
[s
->vex_v
]);
3801 gen_helper_pdep(cpu_regs
[reg
], cpu_T0
, cpu_T1
);
3804 case 0x2f5: /* pext Gy, By, Ey */
3805 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3806 || !(s
->prefix
& PREFIX_VEX
)
3810 ot
= mo_64_32(s
->dflag
);
3811 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3812 /* Note that by zero-extending the mask operand, we
3813 automatically handle zero-extending the result. */
3815 tcg_gen_mov_tl(cpu_T1
, cpu_regs
[s
->vex_v
]);
3817 tcg_gen_ext32u_tl(cpu_T1
, cpu_regs
[s
->vex_v
]);
3819 gen_helper_pext(cpu_regs
[reg
], cpu_T0
, cpu_T1
);
3822 case 0x1f6: /* adcx Gy, Ey */
3823 case 0x2f6: /* adox Gy, Ey */
3824 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_ADX
)) {
3827 TCGv carry_in
, carry_out
, zero
;
3830 ot
= mo_64_32(s
->dflag
);
3831 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3833 /* Re-use the carry-out from a previous round. */
3834 TCGV_UNUSED(carry_in
);
3835 carry_out
= (b
== 0x1f6 ? cpu_cc_dst
: cpu_cc_src2
);
3839 carry_in
= cpu_cc_dst
;
3840 end_op
= CC_OP_ADCX
;
3842 end_op
= CC_OP_ADCOX
;
3847 end_op
= CC_OP_ADCOX
;
3849 carry_in
= cpu_cc_src2
;
3850 end_op
= CC_OP_ADOX
;
3854 end_op
= CC_OP_ADCOX
;
3855 carry_in
= carry_out
;
3858 end_op
= (b
== 0x1f6 ? CC_OP_ADCX
: CC_OP_ADOX
);
3861 /* If we can't reuse carry-out, get it out of EFLAGS. */
3862 if (TCGV_IS_UNUSED(carry_in
)) {
3863 if (s
->cc_op
!= CC_OP_ADCX
&& s
->cc_op
!= CC_OP_ADOX
) {
3864 gen_compute_eflags(s
);
3866 carry_in
= cpu_tmp0
;
3867 tcg_gen_shri_tl(carry_in
, cpu_cc_src
,
3868 ctz32(b
== 0x1f6 ? CC_C
: CC_O
));
3869 tcg_gen_andi_tl(carry_in
, carry_in
, 1);
3873 #ifdef TARGET_X86_64
3875 /* If we know TL is 64-bit, and we want a 32-bit
3876 result, just do everything in 64-bit arithmetic. */
3877 tcg_gen_ext32u_i64(cpu_regs
[reg
], cpu_regs
[reg
]);
3878 tcg_gen_ext32u_i64(cpu_T0
, cpu_T0
);
3879 tcg_gen_add_i64(cpu_T0
, cpu_T0
, cpu_regs
[reg
]);
3880 tcg_gen_add_i64(cpu_T0
, cpu_T0
, carry_in
);
3881 tcg_gen_ext32u_i64(cpu_regs
[reg
], cpu_T0
);
3882 tcg_gen_shri_i64(carry_out
, cpu_T0
, 32);
3886 /* Otherwise compute the carry-out in two steps. */
3887 zero
= tcg_const_tl(0);
3888 tcg_gen_add2_tl(cpu_T0
, carry_out
,
3891 tcg_gen_add2_tl(cpu_regs
[reg
], carry_out
,
3892 cpu_regs
[reg
], carry_out
,
3894 tcg_temp_free(zero
);
3897 set_cc_op(s
, end_op
);
3901 case 0x1f7: /* shlx Gy, Ey, By */
3902 case 0x2f7: /* sarx Gy, Ey, By */
3903 case 0x3f7: /* shrx Gy, Ey, By */
3904 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3905 || !(s
->prefix
& PREFIX_VEX
)
3909 ot
= mo_64_32(s
->dflag
);
3910 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3912 tcg_gen_andi_tl(cpu_T1
, cpu_regs
[s
->vex_v
], 63);
3914 tcg_gen_andi_tl(cpu_T1
, cpu_regs
[s
->vex_v
], 31);
3917 tcg_gen_shl_tl(cpu_T0
, cpu_T0
, cpu_T1
);
3918 } else if (b
== 0x2f7) {
3920 tcg_gen_ext32s_tl(cpu_T0
, cpu_T0
);
3922 tcg_gen_sar_tl(cpu_T0
, cpu_T0
, cpu_T1
);
3925 tcg_gen_ext32u_tl(cpu_T0
, cpu_T0
);
3927 tcg_gen_shr_tl(cpu_T0
, cpu_T0
, cpu_T1
);
3929 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3935 case 0x3f3: /* Group 17 */
3936 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
3937 || !(s
->prefix
& PREFIX_VEX
)
3941 ot
= mo_64_32(s
->dflag
);
3942 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3945 case 1: /* blsr By,Ey */
3946 tcg_gen_neg_tl(cpu_T1
, cpu_T0
);
3947 tcg_gen_and_tl(cpu_T0
, cpu_T0
, cpu_T1
);
3948 gen_op_mov_reg_v(ot
, s
->vex_v
, cpu_T0
);
3949 gen_op_update2_cc();
3950 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
3953 case 2: /* blsmsk By,Ey */
3954 tcg_gen_mov_tl(cpu_cc_src
, cpu_T0
);
3955 tcg_gen_subi_tl(cpu_T0
, cpu_T0
, 1);
3956 tcg_gen_xor_tl(cpu_T0
, cpu_T0
, cpu_cc_src
);
3957 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
3958 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
3961 case 3: /* blsi By, Ey */
3962 tcg_gen_mov_tl(cpu_cc_src
, cpu_T0
);
3963 tcg_gen_subi_tl(cpu_T0
, cpu_T0
, 1);
3964 tcg_gen_and_tl(cpu_T0
, cpu_T0
, cpu_cc_src
);
3965 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
3966 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
3982 modrm
= cpu_ldub_code(env
, s
->pc
++);
3984 reg
= ((modrm
>> 3) & 7) | rex_r
;
3985 mod
= (modrm
>> 6) & 3;
3990 sse_fn_eppi
= sse_op_table7
[b
].op
[b1
];
3994 if (!(s
->cpuid_ext_features
& sse_op_table7
[b
].ext_mask
))
3997 if (sse_fn_eppi
== SSE_SPECIAL
) {
3998 ot
= mo_64_32(s
->dflag
);
3999 rm
= (modrm
& 7) | REX_B(s
);
4001 gen_lea_modrm(env
, s
, modrm
);
4002 reg
= ((modrm
>> 3) & 7) | rex_r
;
4003 val
= cpu_ldub_code(env
, s
->pc
++);
4005 case 0x14: /* pextrb */
4006 tcg_gen_ld8u_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,
4007 xmm_regs
[reg
].ZMM_B(val
& 15)));
4009 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
4011 tcg_gen_qemu_st_tl(cpu_T0
, cpu_A0
,
4012 s
->mem_index
, MO_UB
);
4015 case 0x15: /* pextrw */
4016 tcg_gen_ld16u_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,
4017 xmm_regs
[reg
].ZMM_W(val
& 7)));
4019 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
4021 tcg_gen_qemu_st_tl(cpu_T0
, cpu_A0
,
4022 s
->mem_index
, MO_LEUW
);
4026 if (ot
== MO_32
) { /* pextrd */
4027 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
4028 offsetof(CPUX86State
,
4029 xmm_regs
[reg
].ZMM_L(val
& 3)));
4031 tcg_gen_extu_i32_tl(cpu_regs
[rm
], cpu_tmp2_i32
);
4033 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
4034 s
->mem_index
, MO_LEUL
);
4036 } else { /* pextrq */
4037 #ifdef TARGET_X86_64
4038 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
4039 offsetof(CPUX86State
,
4040 xmm_regs
[reg
].ZMM_Q(val
& 1)));
4042 tcg_gen_mov_i64(cpu_regs
[rm
], cpu_tmp1_i64
);
4044 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_A0
,
4045 s
->mem_index
, MO_LEQ
);
4052 case 0x17: /* extractps */
4053 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,
4054 xmm_regs
[reg
].ZMM_L(val
& 3)));
4056 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
4058 tcg_gen_qemu_st_tl(cpu_T0
, cpu_A0
,
4059 s
->mem_index
, MO_LEUL
);
4062 case 0x20: /* pinsrb */
4064 gen_op_mov_v_reg(MO_32
, cpu_T0
, rm
);
4066 tcg_gen_qemu_ld_tl(cpu_T0
, cpu_A0
,
4067 s
->mem_index
, MO_UB
);
4069 tcg_gen_st8_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,
4070 xmm_regs
[reg
].ZMM_B(val
& 15)));
4072 case 0x21: /* insertps */
4074 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
4075 offsetof(CPUX86State
,xmm_regs
[rm
]
4076 .ZMM_L((val
>> 6) & 3)));
4078 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
4079 s
->mem_index
, MO_LEUL
);
4081 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
4082 offsetof(CPUX86State
,xmm_regs
[reg
]
4083 .ZMM_L((val
>> 4) & 3)));
4085 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4086 cpu_env
, offsetof(CPUX86State
,
4087 xmm_regs
[reg
].ZMM_L(0)));
4089 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4090 cpu_env
, offsetof(CPUX86State
,
4091 xmm_regs
[reg
].ZMM_L(1)));
4093 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4094 cpu_env
, offsetof(CPUX86State
,
4095 xmm_regs
[reg
].ZMM_L(2)));
4097 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4098 cpu_env
, offsetof(CPUX86State
,
4099 xmm_regs
[reg
].ZMM_L(3)));
4102 if (ot
== MO_32
) { /* pinsrd */
4104 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[rm
]);
4106 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
4107 s
->mem_index
, MO_LEUL
);
4109 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
4110 offsetof(CPUX86State
,
4111 xmm_regs
[reg
].ZMM_L(val
& 3)));
4112 } else { /* pinsrq */
4113 #ifdef TARGET_X86_64
4115 gen_op_mov_v_reg(ot
, cpu_tmp1_i64
, rm
);
4117 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_A0
,
4118 s
->mem_index
, MO_LEQ
);
4120 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
4121 offsetof(CPUX86State
,
4122 xmm_regs
[reg
].ZMM_Q(val
& 1)));
4133 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4135 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
4137 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4138 gen_lea_modrm(env
, s
, modrm
);
4139 gen_ldo_env_A0(s
, op2_offset
);
4142 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4144 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4146 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4147 gen_lea_modrm(env
, s
, modrm
);
4148 gen_ldq_env_A0(s
, op2_offset
);
4151 val
= cpu_ldub_code(env
, s
->pc
++);
4153 if ((b
& 0xfc) == 0x60) { /* pcmpXstrX */
4154 set_cc_op(s
, CC_OP_EFLAGS
);
4156 if (s
->dflag
== MO_64
) {
4157 /* The helper must use entire 64-bit gp registers */
4162 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4163 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4164 sse_fn_eppi(cpu_env
, cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4168 /* Various integer extensions at 0f 3a f[0-f]. */
4169 b
= modrm
| (b1
<< 8);
4170 modrm
= cpu_ldub_code(env
, s
->pc
++);
4171 reg
= ((modrm
>> 3) & 7) | rex_r
;
4174 case 0x3f0: /* rorx Gy,Ey, Ib */
4175 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4176 || !(s
->prefix
& PREFIX_VEX
)
4180 ot
= mo_64_32(s
->dflag
);
4181 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4182 b
= cpu_ldub_code(env
, s
->pc
++);
4184 tcg_gen_rotri_tl(cpu_T0
, cpu_T0
, b
& 63);
4186 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
4187 tcg_gen_rotri_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, b
& 31);
4188 tcg_gen_extu_i32_tl(cpu_T0
, cpu_tmp2_i32
);
4190 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
4200 gen_unknown_opcode(env
, s
);
4204 /* generic MMX or SSE operation */
4206 case 0x70: /* pshufx insn */
4207 case 0xc6: /* pshufx insn */
4208 case 0xc2: /* compare insns */
4215 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4219 gen_lea_modrm(env
, s
, modrm
);
4220 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4226 /* Most sse scalar operations. */
4229 } else if (b1
== 3) {
4234 case 0x2e: /* ucomis[sd] */
4235 case 0x2f: /* comis[sd] */
4247 gen_op_ld_v(s
, MO_32
, cpu_T0
, cpu_A0
);
4248 tcg_gen_st32_tl(cpu_T0
, cpu_env
,
4249 offsetof(CPUX86State
,xmm_t0
.ZMM_L(0)));
4253 gen_ldq_env_A0(s
, offsetof(CPUX86State
, xmm_t0
.ZMM_D(0)));
4256 /* 128 bit access */
4257 gen_ldo_env_A0(s
, op2_offset
);
4261 rm
= (modrm
& 7) | REX_B(s
);
4262 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
4265 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4267 gen_lea_modrm(env
, s
, modrm
);
4268 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4269 gen_ldq_env_A0(s
, op2_offset
);
4272 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4276 case 0x0f: /* 3DNow! data insns */
4277 val
= cpu_ldub_code(env
, s
->pc
++);
4278 sse_fn_epp
= sse_op_table5
[val
];
4282 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
)) {
4285 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4286 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4287 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4289 case 0x70: /* pshufx insn */
4290 case 0xc6: /* pshufx insn */
4291 val
= cpu_ldub_code(env
, s
->pc
++);
4292 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4293 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4294 /* XXX: introduce a new table? */
4295 sse_fn_ppi
= (SSEFunc_0_ppi
)sse_fn_epp
;
4296 sse_fn_ppi(cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4300 val
= cpu_ldub_code(env
, s
->pc
++);
4303 sse_fn_epp
= sse_op_table4
[val
][b1
];
4305 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4306 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4307 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4310 /* maskmov : we must prepare A0 */
4313 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[R_EDI
]);
4314 gen_extu(s
->aflag
, cpu_A0
);
4315 gen_add_A0_ds_seg(s
);
4317 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4318 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4319 /* XXX: introduce a new table? */
4320 sse_fn_eppt
= (SSEFunc_0_eppt
)sse_fn_epp
;
4321 sse_fn_eppt(cpu_env
, cpu_ptr0
, cpu_ptr1
, cpu_A0
);
4324 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4325 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4326 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4329 if (b
== 0x2e || b
== 0x2f) {
4330 set_cc_op(s
, CC_OP_EFLAGS
);
4335 /* convert one instruction. s->is_jmp is set if the translation must
4336 be stopped. Return the next pc value */
4337 static target_ulong
disas_insn(CPUX86State
*env
, DisasContext
*s
,
4338 target_ulong pc_start
)
4342 TCGMemOp ot
, aflag
, dflag
;
4343 int modrm
, reg
, rm
, mod
, op
, opreg
, val
;
4344 target_ulong next_eip
, tval
;
4347 s
->pc_start
= s
->pc
= pc_start
;
4352 #ifdef TARGET_X86_64
4357 s
->rip_offset
= 0; /* for relative ip address */
4361 b
= cpu_ldub_code(env
, s
->pc
);
4363 /* Collect prefixes. */
4366 prefixes
|= PREFIX_REPZ
;
4369 prefixes
|= PREFIX_REPNZ
;
4372 prefixes
|= PREFIX_LOCK
;
4393 prefixes
|= PREFIX_DATA
;
4396 prefixes
|= PREFIX_ADR
;
4398 #ifdef TARGET_X86_64
4402 rex_w
= (b
>> 3) & 1;
4403 rex_r
= (b
& 0x4) << 1;
4404 s
->rex_x
= (b
& 0x2) << 2;
4405 REX_B(s
) = (b
& 0x1) << 3;
4406 x86_64_hregs
= 1; /* select uniform byte register addressing */
4411 case 0xc5: /* 2-byte VEX */
4412 case 0xc4: /* 3-byte VEX */
4413 /* VEX prefixes cannot be used except in 32-bit mode.
4414 Otherwise the instruction is LES or LDS. */
4415 if (s
->code32
&& !s
->vm86
) {
4416 static const int pp_prefix
[4] = {
4417 0, PREFIX_DATA
, PREFIX_REPZ
, PREFIX_REPNZ
4419 int vex3
, vex2
= cpu_ldub_code(env
, s
->pc
);
4421 if (!CODE64(s
) && (vex2
& 0xc0) != 0xc0) {
4422 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4423 otherwise the instruction is LES or LDS. */
4428 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
4429 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
4430 | PREFIX_LOCK
| PREFIX_DATA
)) {
4433 #ifdef TARGET_X86_64
4438 rex_r
= (~vex2
>> 4) & 8;
4441 b
= cpu_ldub_code(env
, s
->pc
++);
4443 #ifdef TARGET_X86_64
4444 s
->rex_x
= (~vex2
>> 3) & 8;
4445 s
->rex_b
= (~vex2
>> 2) & 8;
4447 vex3
= cpu_ldub_code(env
, s
->pc
++);
4448 rex_w
= (vex3
>> 7) & 1;
4449 switch (vex2
& 0x1f) {
4450 case 0x01: /* Implied 0f leading opcode bytes. */
4451 b
= cpu_ldub_code(env
, s
->pc
++) | 0x100;
4453 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4456 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4459 default: /* Reserved for future use. */
4463 s
->vex_v
= (~vex3
>> 3) & 0xf;
4464 s
->vex_l
= (vex3
>> 2) & 1;
4465 prefixes
|= pp_prefix
[vex3
& 3] | PREFIX_VEX
;
4470 /* Post-process prefixes. */
4472 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
4473 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
4474 over 0x66 if both are present. */
4475 dflag
= (rex_w
> 0 ? MO_64
: prefixes
& PREFIX_DATA
? MO_16
: MO_32
);
4476 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
4477 aflag
= (prefixes
& PREFIX_ADR
? MO_32
: MO_64
);
4479 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
4480 if (s
->code32
^ ((prefixes
& PREFIX_DATA
) != 0)) {
4485 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
4486 if (s
->code32
^ ((prefixes
& PREFIX_ADR
) != 0)) {
4493 s
->prefix
= prefixes
;
4497 /* lock generation */
4498 if (prefixes
& PREFIX_LOCK
)
4501 /* now check op code */
4505 /**************************/
4506 /* extended op code */
4507 b
= cpu_ldub_code(env
, s
->pc
++) | 0x100;
4510 /**************************/
4525 ot
= mo_b_d(b
, dflag
);
4528 case 0: /* OP Ev, Gv */
4529 modrm
= cpu_ldub_code(env
, s
->pc
++);
4530 reg
= ((modrm
>> 3) & 7) | rex_r
;
4531 mod
= (modrm
>> 6) & 3;
4532 rm
= (modrm
& 7) | REX_B(s
);
4534 gen_lea_modrm(env
, s
, modrm
);
4536 } else if (op
== OP_XORL
&& rm
== reg
) {
4538 /* xor reg, reg optimisation */
4539 set_cc_op(s
, CC_OP_CLR
);
4540 tcg_gen_movi_tl(cpu_T0
, 0);
4541 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
4546 gen_op_mov_v_reg(ot
, cpu_T1
, reg
);
4547 gen_op(s
, op
, ot
, opreg
);
4549 case 1: /* OP Gv, Ev */
4550 modrm
= cpu_ldub_code(env
, s
->pc
++);
4551 mod
= (modrm
>> 6) & 3;
4552 reg
= ((modrm
>> 3) & 7) | rex_r
;
4553 rm
= (modrm
& 7) | REX_B(s
);
4555 gen_lea_modrm(env
, s
, modrm
);
4556 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
4557 } else if (op
== OP_XORL
&& rm
== reg
) {
4560 gen_op_mov_v_reg(ot
, cpu_T1
, rm
);
4562 gen_op(s
, op
, ot
, reg
);
4564 case 2: /* OP A, Iv */
4565 val
= insn_get(env
, s
, ot
);
4566 tcg_gen_movi_tl(cpu_T1
, val
);
4567 gen_op(s
, op
, ot
, OR_EAX
);
4576 case 0x80: /* GRP1 */
4582 ot
= mo_b_d(b
, dflag
);
4584 modrm
= cpu_ldub_code(env
, s
->pc
++);
4585 mod
= (modrm
>> 6) & 3;
4586 rm
= (modrm
& 7) | REX_B(s
);
4587 op
= (modrm
>> 3) & 7;
4593 s
->rip_offset
= insn_const_size(ot
);
4594 gen_lea_modrm(env
, s
, modrm
);
4605 val
= insn_get(env
, s
, ot
);
4608 val
= (int8_t)insn_get(env
, s
, MO_8
);
4611 tcg_gen_movi_tl(cpu_T1
, val
);
4612 gen_op(s
, op
, ot
, opreg
);
4616 /**************************/
4617 /* inc, dec, and other misc arith */
4618 case 0x40 ... 0x47: /* inc Gv */
4620 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), 1);
4622 case 0x48 ... 0x4f: /* dec Gv */
4624 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), -1);
4626 case 0xf6: /* GRP3 */
4628 ot
= mo_b_d(b
, dflag
);
4630 modrm
= cpu_ldub_code(env
, s
->pc
++);
4631 mod
= (modrm
>> 6) & 3;
4632 rm
= (modrm
& 7) | REX_B(s
);
4633 op
= (modrm
>> 3) & 7;
4636 s
->rip_offset
= insn_const_size(ot
);
4637 gen_lea_modrm(env
, s
, modrm
);
4638 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
4640 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
4645 val
= insn_get(env
, s
, ot
);
4646 tcg_gen_movi_tl(cpu_T1
, val
);
4647 gen_op_testl_T0_T1_cc();
4648 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4651 tcg_gen_not_tl(cpu_T0
, cpu_T0
);
4653 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
4655 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
4659 tcg_gen_neg_tl(cpu_T0
, cpu_T0
);
4661 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
4663 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
4665 gen_op_update_neg_cc();
4666 set_cc_op(s
, CC_OP_SUBB
+ ot
);
4671 gen_op_mov_v_reg(MO_8
, cpu_T1
, R_EAX
);
4672 tcg_gen_ext8u_tl(cpu_T0
, cpu_T0
);
4673 tcg_gen_ext8u_tl(cpu_T1
, cpu_T1
);
4674 /* XXX: use 32 bit mul which could be faster */
4675 tcg_gen_mul_tl(cpu_T0
, cpu_T0
, cpu_T1
);
4676 gen_op_mov_reg_v(MO_16
, R_EAX
, cpu_T0
);
4677 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
4678 tcg_gen_andi_tl(cpu_cc_src
, cpu_T0
, 0xff00);
4679 set_cc_op(s
, CC_OP_MULB
);
4682 gen_op_mov_v_reg(MO_16
, cpu_T1
, R_EAX
);
4683 tcg_gen_ext16u_tl(cpu_T0
, cpu_T0
);
4684 tcg_gen_ext16u_tl(cpu_T1
, cpu_T1
);
4685 /* XXX: use 32 bit mul which could be faster */
4686 tcg_gen_mul_tl(cpu_T0
, cpu_T0
, cpu_T1
);
4687 gen_op_mov_reg_v(MO_16
, R_EAX
, cpu_T0
);
4688 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
4689 tcg_gen_shri_tl(cpu_T0
, cpu_T0
, 16);
4690 gen_op_mov_reg_v(MO_16
, R_EDX
, cpu_T0
);
4691 tcg_gen_mov_tl(cpu_cc_src
, cpu_T0
);
4692 set_cc_op(s
, CC_OP_MULW
);
4696 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
4697 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_regs
[R_EAX
]);
4698 tcg_gen_mulu2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
4699 cpu_tmp2_i32
, cpu_tmp3_i32
);
4700 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], cpu_tmp2_i32
);
4701 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], cpu_tmp3_i32
);
4702 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
4703 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
4704 set_cc_op(s
, CC_OP_MULL
);
4706 #ifdef TARGET_X86_64
4708 tcg_gen_mulu2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
4709 cpu_T0
, cpu_regs
[R_EAX
]);
4710 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
4711 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
4712 set_cc_op(s
, CC_OP_MULQ
);
4720 gen_op_mov_v_reg(MO_8
, cpu_T1
, R_EAX
);
4721 tcg_gen_ext8s_tl(cpu_T0
, cpu_T0
);
4722 tcg_gen_ext8s_tl(cpu_T1
, cpu_T1
);
4723 /* XXX: use 32 bit mul which could be faster */
4724 tcg_gen_mul_tl(cpu_T0
, cpu_T0
, cpu_T1
);
4725 gen_op_mov_reg_v(MO_16
, R_EAX
, cpu_T0
);
4726 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
4727 tcg_gen_ext8s_tl(cpu_tmp0
, cpu_T0
);
4728 tcg_gen_sub_tl(cpu_cc_src
, cpu_T0
, cpu_tmp0
);
4729 set_cc_op(s
, CC_OP_MULB
);
4732 gen_op_mov_v_reg(MO_16
, cpu_T1
, R_EAX
);
4733 tcg_gen_ext16s_tl(cpu_T0
, cpu_T0
);
4734 tcg_gen_ext16s_tl(cpu_T1
, cpu_T1
);
4735 /* XXX: use 32 bit mul which could be faster */
4736 tcg_gen_mul_tl(cpu_T0
, cpu_T0
, cpu_T1
);
4737 gen_op_mov_reg_v(MO_16
, R_EAX
, cpu_T0
);
4738 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
4739 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T0
);
4740 tcg_gen_sub_tl(cpu_cc_src
, cpu_T0
, cpu_tmp0
);
4741 tcg_gen_shri_tl(cpu_T0
, cpu_T0
, 16);
4742 gen_op_mov_reg_v(MO_16
, R_EDX
, cpu_T0
);
4743 set_cc_op(s
, CC_OP_MULW
);
4747 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
4748 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_regs
[R_EAX
]);
4749 tcg_gen_muls2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
4750 cpu_tmp2_i32
, cpu_tmp3_i32
);
4751 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], cpu_tmp2_i32
);
4752 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], cpu_tmp3_i32
);
4753 tcg_gen_sari_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 31);
4754 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
4755 tcg_gen_sub_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
4756 tcg_gen_extu_i32_tl(cpu_cc_src
, cpu_tmp2_i32
);
4757 set_cc_op(s
, CC_OP_MULL
);
4759 #ifdef TARGET_X86_64
4761 tcg_gen_muls2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
4762 cpu_T0
, cpu_regs
[R_EAX
]);
4763 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
4764 tcg_gen_sari_tl(cpu_cc_src
, cpu_regs
[R_EAX
], 63);
4765 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, cpu_regs
[R_EDX
]);
4766 set_cc_op(s
, CC_OP_MULQ
);
4774 gen_helper_divb_AL(cpu_env
, cpu_T0
);
4777 gen_helper_divw_AX(cpu_env
, cpu_T0
);
4781 gen_helper_divl_EAX(cpu_env
, cpu_T0
);
4783 #ifdef TARGET_X86_64
4785 gen_helper_divq_EAX(cpu_env
, cpu_T0
);
4793 gen_helper_idivb_AL(cpu_env
, cpu_T0
);
4796 gen_helper_idivw_AX(cpu_env
, cpu_T0
);
4800 gen_helper_idivl_EAX(cpu_env
, cpu_T0
);
4802 #ifdef TARGET_X86_64
4804 gen_helper_idivq_EAX(cpu_env
, cpu_T0
);
4814 case 0xfe: /* GRP4 */
4815 case 0xff: /* GRP5 */
4816 ot
= mo_b_d(b
, dflag
);
4818 modrm
= cpu_ldub_code(env
, s
->pc
++);
4819 mod
= (modrm
>> 6) & 3;
4820 rm
= (modrm
& 7) | REX_B(s
);
4821 op
= (modrm
>> 3) & 7;
4822 if (op
>= 2 && b
== 0xfe) {
4826 if (op
== 2 || op
== 4) {
4827 /* operand size for jumps is 64 bit */
4829 } else if (op
== 3 || op
== 5) {
4830 ot
= dflag
!= MO_16
? MO_32
+ (rex_w
== 1) : MO_16
;
4831 } else if (op
== 6) {
4832 /* default push size is 64 bit */
4833 ot
= mo_pushpop(s
, dflag
);
4837 gen_lea_modrm(env
, s
, modrm
);
4838 if (op
>= 2 && op
!= 3 && op
!= 5)
4839 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
4841 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
4845 case 0: /* inc Ev */
4850 gen_inc(s
, ot
, opreg
, 1);
4852 case 1: /* dec Ev */
4857 gen_inc(s
, ot
, opreg
, -1);
4859 case 2: /* call Ev */
4860 /* XXX: optimize if memory (no 'and' is necessary) */
4861 if (dflag
== MO_16
) {
4862 tcg_gen_ext16u_tl(cpu_T0
, cpu_T0
);
4864 next_eip
= s
->pc
- s
->cs_base
;
4865 tcg_gen_movi_tl(cpu_T1
, next_eip
);
4866 gen_push_v(s
, cpu_T1
);
4867 gen_op_jmp_v(cpu_T0
);
4871 case 3: /* lcall Ev */
4872 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
4873 gen_add_A0_im(s
, 1 << ot
);
4874 gen_op_ld_v(s
, MO_16
, cpu_T0
, cpu_A0
);
4876 if (s
->pe
&& !s
->vm86
) {
4877 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
4878 gen_helper_lcall_protected(cpu_env
, cpu_tmp2_i32
, cpu_T1
,
4879 tcg_const_i32(dflag
- 1),
4880 tcg_const_tl(s
->pc
- s
->cs_base
));
4882 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
4883 gen_helper_lcall_real(cpu_env
, cpu_tmp2_i32
, cpu_T1
,
4884 tcg_const_i32(dflag
- 1),
4885 tcg_const_i32(s
->pc
- s
->cs_base
));
4889 case 4: /* jmp Ev */
4890 if (dflag
== MO_16
) {
4891 tcg_gen_ext16u_tl(cpu_T0
, cpu_T0
);
4893 gen_op_jmp_v(cpu_T0
);
4897 case 5: /* ljmp Ev */
4898 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
4899 gen_add_A0_im(s
, 1 << ot
);
4900 gen_op_ld_v(s
, MO_16
, cpu_T0
, cpu_A0
);
4902 if (s
->pe
&& !s
->vm86
) {
4903 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
4904 gen_helper_ljmp_protected(cpu_env
, cpu_tmp2_i32
, cpu_T1
,
4905 tcg_const_tl(s
->pc
- s
->cs_base
));
4907 gen_op_movl_seg_T0_vm(R_CS
);
4908 gen_op_jmp_v(cpu_T1
);
4912 case 6: /* push Ev */
4913 gen_push_v(s
, cpu_T0
);
4920 case 0x84: /* test Ev, Gv */
4922 ot
= mo_b_d(b
, dflag
);
4924 modrm
= cpu_ldub_code(env
, s
->pc
++);
4925 reg
= ((modrm
>> 3) & 7) | rex_r
;
4927 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4928 gen_op_mov_v_reg(ot
, cpu_T1
, reg
);
4929 gen_op_testl_T0_T1_cc();
4930 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4933 case 0xa8: /* test eAX, Iv */
4935 ot
= mo_b_d(b
, dflag
);
4936 val
= insn_get(env
, s
, ot
);
4938 gen_op_mov_v_reg(ot
, cpu_T0
, OR_EAX
);
4939 tcg_gen_movi_tl(cpu_T1
, val
);
4940 gen_op_testl_T0_T1_cc();
4941 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4944 case 0x98: /* CWDE/CBW */
4946 #ifdef TARGET_X86_64
4948 gen_op_mov_v_reg(MO_32
, cpu_T0
, R_EAX
);
4949 tcg_gen_ext32s_tl(cpu_T0
, cpu_T0
);
4950 gen_op_mov_reg_v(MO_64
, R_EAX
, cpu_T0
);
4954 gen_op_mov_v_reg(MO_16
, cpu_T0
, R_EAX
);
4955 tcg_gen_ext16s_tl(cpu_T0
, cpu_T0
);
4956 gen_op_mov_reg_v(MO_32
, R_EAX
, cpu_T0
);
4959 gen_op_mov_v_reg(MO_8
, cpu_T0
, R_EAX
);
4960 tcg_gen_ext8s_tl(cpu_T0
, cpu_T0
);
4961 gen_op_mov_reg_v(MO_16
, R_EAX
, cpu_T0
);
4967 case 0x99: /* CDQ/CWD */
4969 #ifdef TARGET_X86_64
4971 gen_op_mov_v_reg(MO_64
, cpu_T0
, R_EAX
);
4972 tcg_gen_sari_tl(cpu_T0
, cpu_T0
, 63);
4973 gen_op_mov_reg_v(MO_64
, R_EDX
, cpu_T0
);
4977 gen_op_mov_v_reg(MO_32
, cpu_T0
, R_EAX
);
4978 tcg_gen_ext32s_tl(cpu_T0
, cpu_T0
);
4979 tcg_gen_sari_tl(cpu_T0
, cpu_T0
, 31);
4980 gen_op_mov_reg_v(MO_32
, R_EDX
, cpu_T0
);
4983 gen_op_mov_v_reg(MO_16
, cpu_T0
, R_EAX
);
4984 tcg_gen_ext16s_tl(cpu_T0
, cpu_T0
);
4985 tcg_gen_sari_tl(cpu_T0
, cpu_T0
, 15);
4986 gen_op_mov_reg_v(MO_16
, R_EDX
, cpu_T0
);
4992 case 0x1af: /* imul Gv, Ev */
4993 case 0x69: /* imul Gv, Ev, I */
4996 modrm
= cpu_ldub_code(env
, s
->pc
++);
4997 reg
= ((modrm
>> 3) & 7) | rex_r
;
4999 s
->rip_offset
= insn_const_size(ot
);
5002 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5004 val
= insn_get(env
, s
, ot
);
5005 tcg_gen_movi_tl(cpu_T1
, val
);
5006 } else if (b
== 0x6b) {
5007 val
= (int8_t)insn_get(env
, s
, MO_8
);
5008 tcg_gen_movi_tl(cpu_T1
, val
);
5010 gen_op_mov_v_reg(ot
, cpu_T1
, reg
);
5013 #ifdef TARGET_X86_64
5015 tcg_gen_muls2_i64(cpu_regs
[reg
], cpu_T1
, cpu_T0
, cpu_T1
);
5016 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
5017 tcg_gen_sari_tl(cpu_cc_src
, cpu_cc_dst
, 63);
5018 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, cpu_T1
);
5022 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
5023 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T1
);
5024 tcg_gen_muls2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
5025 cpu_tmp2_i32
, cpu_tmp3_i32
);
5026 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp2_i32
);
5027 tcg_gen_sari_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 31);
5028 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
5029 tcg_gen_sub_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
5030 tcg_gen_extu_i32_tl(cpu_cc_src
, cpu_tmp2_i32
);
5033 tcg_gen_ext16s_tl(cpu_T0
, cpu_T0
);
5034 tcg_gen_ext16s_tl(cpu_T1
, cpu_T1
);
5035 /* XXX: use 32 bit mul which could be faster */
5036 tcg_gen_mul_tl(cpu_T0
, cpu_T0
, cpu_T1
);
5037 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
5038 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T0
);
5039 tcg_gen_sub_tl(cpu_cc_src
, cpu_T0
, cpu_tmp0
);
5040 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
5043 set_cc_op(s
, CC_OP_MULB
+ ot
);
5046 case 0x1c1: /* xadd Ev, Gv */
5047 ot
= mo_b_d(b
, dflag
);
5048 modrm
= cpu_ldub_code(env
, s
->pc
++);
5049 reg
= ((modrm
>> 3) & 7) | rex_r
;
5050 mod
= (modrm
>> 6) & 3;
5052 rm
= (modrm
& 7) | REX_B(s
);
5053 gen_op_mov_v_reg(ot
, cpu_T0
, reg
);
5054 gen_op_mov_v_reg(ot
, cpu_T1
, rm
);
5055 tcg_gen_add_tl(cpu_T0
, cpu_T0
, cpu_T1
);
5056 gen_op_mov_reg_v(ot
, reg
, cpu_T1
);
5057 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
5059 gen_lea_modrm(env
, s
, modrm
);
5060 gen_op_mov_v_reg(ot
, cpu_T0
, reg
);
5061 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
5062 tcg_gen_add_tl(cpu_T0
, cpu_T0
, cpu_T1
);
5063 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
5064 gen_op_mov_reg_v(ot
, reg
, cpu_T1
);
5066 gen_op_update2_cc();
5067 set_cc_op(s
, CC_OP_ADDB
+ ot
);
5070 case 0x1b1: /* cmpxchg Ev, Gv */
5072 TCGLabel
*label1
, *label2
;
5073 TCGv t0
, t1
, t2
, a0
;
5075 ot
= mo_b_d(b
, dflag
);
5076 modrm
= cpu_ldub_code(env
, s
->pc
++);
5077 reg
= ((modrm
>> 3) & 7) | rex_r
;
5078 mod
= (modrm
>> 6) & 3;
5079 t0
= tcg_temp_local_new();
5080 t1
= tcg_temp_local_new();
5081 t2
= tcg_temp_local_new();
5082 a0
= tcg_temp_local_new();
5083 gen_op_mov_v_reg(ot
, t1
, reg
);
5085 rm
= (modrm
& 7) | REX_B(s
);
5086 gen_op_mov_v_reg(ot
, t0
, rm
);
5088 gen_lea_modrm(env
, s
, modrm
);
5089 tcg_gen_mov_tl(a0
, cpu_A0
);
5090 gen_op_ld_v(s
, ot
, t0
, a0
);
5091 rm
= 0; /* avoid warning */
5093 label1
= gen_new_label();
5094 tcg_gen_mov_tl(t2
, cpu_regs
[R_EAX
]);
5097 tcg_gen_brcond_tl(TCG_COND_EQ
, t2
, t0
, label1
);
5098 label2
= gen_new_label();
5100 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
5102 gen_set_label(label1
);
5103 gen_op_mov_reg_v(ot
, rm
, t1
);
5105 /* perform no-op store cycle like physical cpu; must be
5106 before changing accumulator to ensure idempotency if
5107 the store faults and the instruction is restarted */
5108 gen_op_st_v(s
, ot
, t0
, a0
);
5109 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
5111 gen_set_label(label1
);
5112 gen_op_st_v(s
, ot
, t1
, a0
);
5114 gen_set_label(label2
);
5115 tcg_gen_mov_tl(cpu_cc_src
, t0
);
5116 tcg_gen_mov_tl(cpu_cc_srcT
, t2
);
5117 tcg_gen_sub_tl(cpu_cc_dst
, t2
, t0
);
5118 set_cc_op(s
, CC_OP_SUBB
+ ot
);
5125 case 0x1c7: /* cmpxchg8b */
5126 modrm
= cpu_ldub_code(env
, s
->pc
++);
5127 mod
= (modrm
>> 6) & 3;
5128 if ((mod
== 3) || ((modrm
& 0x38) != 0x8))
5130 #ifdef TARGET_X86_64
5131 if (dflag
== MO_64
) {
5132 if (!(s
->cpuid_ext_features
& CPUID_EXT_CX16
))
5134 gen_lea_modrm(env
, s
, modrm
);
5135 gen_helper_cmpxchg16b(cpu_env
, cpu_A0
);
5139 if (!(s
->cpuid_features
& CPUID_CX8
))
5141 gen_lea_modrm(env
, s
, modrm
);
5142 gen_helper_cmpxchg8b(cpu_env
, cpu_A0
);
5144 set_cc_op(s
, CC_OP_EFLAGS
);
5147 /**************************/
5149 case 0x50 ... 0x57: /* push */
5150 gen_op_mov_v_reg(MO_32
, cpu_T0
, (b
& 7) | REX_B(s
));
5151 gen_push_v(s
, cpu_T0
);
5153 case 0x58 ... 0x5f: /* pop */
5155 /* NOTE: order is important for pop %sp */
5156 gen_pop_update(s
, ot
);
5157 gen_op_mov_reg_v(ot
, (b
& 7) | REX_B(s
), cpu_T0
);
5159 case 0x60: /* pusha */
5164 case 0x61: /* popa */
5169 case 0x68: /* push Iv */
5171 ot
= mo_pushpop(s
, dflag
);
5173 val
= insn_get(env
, s
, ot
);
5175 val
= (int8_t)insn_get(env
, s
, MO_8
);
5176 tcg_gen_movi_tl(cpu_T0
, val
);
5177 gen_push_v(s
, cpu_T0
);
5179 case 0x8f: /* pop Ev */
5180 modrm
= cpu_ldub_code(env
, s
->pc
++);
5181 mod
= (modrm
>> 6) & 3;
5184 /* NOTE: order is important for pop %sp */
5185 gen_pop_update(s
, ot
);
5186 rm
= (modrm
& 7) | REX_B(s
);
5187 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
5189 /* NOTE: order is important too for MMU exceptions */
5190 s
->popl_esp_hack
= 1 << ot
;
5191 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5192 s
->popl_esp_hack
= 0;
5193 gen_pop_update(s
, ot
);
5196 case 0xc8: /* enter */
5199 val
= cpu_lduw_code(env
, s
->pc
);
5201 level
= cpu_ldub_code(env
, s
->pc
++);
5202 gen_enter(s
, val
, level
);
5205 case 0xc9: /* leave */
5208 case 0x06: /* push es */
5209 case 0x0e: /* push cs */
5210 case 0x16: /* push ss */
5211 case 0x1e: /* push ds */
5214 gen_op_movl_T0_seg(b
>> 3);
5215 gen_push_v(s
, cpu_T0
);
5217 case 0x1a0: /* push fs */
5218 case 0x1a8: /* push gs */
5219 gen_op_movl_T0_seg((b
>> 3) & 7);
5220 gen_push_v(s
, cpu_T0
);
5222 case 0x07: /* pop es */
5223 case 0x17: /* pop ss */
5224 case 0x1f: /* pop ds */
5229 gen_movl_seg_T0(s
, reg
);
5230 gen_pop_update(s
, ot
);
5231 /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
5233 gen_jmp_im(s
->pc
- s
->cs_base
);
5236 gen_eob_inhibit_irq(s
, true);
5242 case 0x1a1: /* pop fs */
5243 case 0x1a9: /* pop gs */
5245 gen_movl_seg_T0(s
, (b
>> 3) & 7);
5246 gen_pop_update(s
, ot
);
5248 gen_jmp_im(s
->pc
- s
->cs_base
);
5253 /**************************/
5256 case 0x89: /* mov Gv, Ev */
5257 ot
= mo_b_d(b
, dflag
);
5258 modrm
= cpu_ldub_code(env
, s
->pc
++);
5259 reg
= ((modrm
>> 3) & 7) | rex_r
;
5261 /* generate a generic store */
5262 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
5265 case 0xc7: /* mov Ev, Iv */
5266 ot
= mo_b_d(b
, dflag
);
5267 modrm
= cpu_ldub_code(env
, s
->pc
++);
5268 mod
= (modrm
>> 6) & 3;
5270 s
->rip_offset
= insn_const_size(ot
);
5271 gen_lea_modrm(env
, s
, modrm
);
5273 val
= insn_get(env
, s
, ot
);
5274 tcg_gen_movi_tl(cpu_T0
, val
);
5276 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
5278 gen_op_mov_reg_v(ot
, (modrm
& 7) | REX_B(s
), cpu_T0
);
5282 case 0x8b: /* mov Ev, Gv */
5283 ot
= mo_b_d(b
, dflag
);
5284 modrm
= cpu_ldub_code(env
, s
->pc
++);
5285 reg
= ((modrm
>> 3) & 7) | rex_r
;
5287 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5288 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
5290 case 0x8e: /* mov seg, Gv */
5291 modrm
= cpu_ldub_code(env
, s
->pc
++);
5292 reg
= (modrm
>> 3) & 7;
5293 if (reg
>= 6 || reg
== R_CS
)
5295 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
5296 gen_movl_seg_T0(s
, reg
);
5297 /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
5299 gen_jmp_im(s
->pc
- s
->cs_base
);
5302 gen_eob_inhibit_irq(s
, true);
5308 case 0x8c: /* mov Gv, seg */
5309 modrm
= cpu_ldub_code(env
, s
->pc
++);
5310 reg
= (modrm
>> 3) & 7;
5311 mod
= (modrm
>> 6) & 3;
5314 gen_op_movl_T0_seg(reg
);
5315 ot
= mod
== 3 ? dflag
: MO_16
;
5316 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5319 case 0x1b6: /* movzbS Gv, Eb */
5320 case 0x1b7: /* movzwS Gv, Eb */
5321 case 0x1be: /* movsbS Gv, Eb */
5322 case 0x1bf: /* movswS Gv, Eb */
5327 /* d_ot is the size of destination */
5329 /* ot is the size of source */
5330 ot
= (b
& 1) + MO_8
;
5331 /* s_ot is the sign+size of source */
5332 s_ot
= b
& 8 ? MO_SIGN
| ot
: ot
;
5334 modrm
= cpu_ldub_code(env
, s
->pc
++);
5335 reg
= ((modrm
>> 3) & 7) | rex_r
;
5336 mod
= (modrm
>> 6) & 3;
5337 rm
= (modrm
& 7) | REX_B(s
);
5340 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
5343 tcg_gen_ext8u_tl(cpu_T0
, cpu_T0
);
5346 tcg_gen_ext8s_tl(cpu_T0
, cpu_T0
);
5349 tcg_gen_ext16u_tl(cpu_T0
, cpu_T0
);
5353 tcg_gen_ext16s_tl(cpu_T0
, cpu_T0
);
5356 gen_op_mov_reg_v(d_ot
, reg
, cpu_T0
);
5358 gen_lea_modrm(env
, s
, modrm
);
5359 gen_op_ld_v(s
, s_ot
, cpu_T0
, cpu_A0
);
5360 gen_op_mov_reg_v(d_ot
, reg
, cpu_T0
);
5365 case 0x8d: /* lea */
5366 modrm
= cpu_ldub_code(env
, s
->pc
++);
5367 mod
= (modrm
>> 6) & 3;
5370 reg
= ((modrm
>> 3) & 7) | rex_r
;
5372 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
5373 TCGv ea
= gen_lea_modrm_1(a
);
5374 gen_op_mov_reg_v(dflag
, reg
, ea
);
5378 case 0xa0: /* mov EAX, Ov */
5380 case 0xa2: /* mov Ov, EAX */
5383 target_ulong offset_addr
;
5385 ot
= mo_b_d(b
, dflag
);
5387 #ifdef TARGET_X86_64
5389 offset_addr
= cpu_ldq_code(env
, s
->pc
);
5394 offset_addr
= insn_get(env
, s
, s
->aflag
);
5397 tcg_gen_movi_tl(cpu_A0
, offset_addr
);
5398 gen_add_A0_ds_seg(s
);
5400 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
5401 gen_op_mov_reg_v(ot
, R_EAX
, cpu_T0
);
5403 gen_op_mov_v_reg(ot
, cpu_T0
, R_EAX
);
5404 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
5408 case 0xd7: /* xlat */
5409 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[R_EBX
]);
5410 tcg_gen_ext8u_tl(cpu_T0
, cpu_regs
[R_EAX
]);
5411 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T0
);
5412 gen_extu(s
->aflag
, cpu_A0
);
5413 gen_add_A0_ds_seg(s
);
5414 gen_op_ld_v(s
, MO_8
, cpu_T0
, cpu_A0
);
5415 gen_op_mov_reg_v(MO_8
, R_EAX
, cpu_T0
);
5417 case 0xb0 ... 0xb7: /* mov R, Ib */
5418 val
= insn_get(env
, s
, MO_8
);
5419 tcg_gen_movi_tl(cpu_T0
, val
);
5420 gen_op_mov_reg_v(MO_8
, (b
& 7) | REX_B(s
), cpu_T0
);
5422 case 0xb8 ... 0xbf: /* mov R, Iv */
5423 #ifdef TARGET_X86_64
5424 if (dflag
== MO_64
) {
5427 tmp
= cpu_ldq_code(env
, s
->pc
);
5429 reg
= (b
& 7) | REX_B(s
);
5430 tcg_gen_movi_tl(cpu_T0
, tmp
);
5431 gen_op_mov_reg_v(MO_64
, reg
, cpu_T0
);
5436 val
= insn_get(env
, s
, ot
);
5437 reg
= (b
& 7) | REX_B(s
);
5438 tcg_gen_movi_tl(cpu_T0
, val
);
5439 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
5443 case 0x91 ... 0x97: /* xchg R, EAX */
5446 reg
= (b
& 7) | REX_B(s
);
5450 case 0x87: /* xchg Ev, Gv */
5451 ot
= mo_b_d(b
, dflag
);
5452 modrm
= cpu_ldub_code(env
, s
->pc
++);
5453 reg
= ((modrm
>> 3) & 7) | rex_r
;
5454 mod
= (modrm
>> 6) & 3;
5456 rm
= (modrm
& 7) | REX_B(s
);
5458 gen_op_mov_v_reg(ot
, cpu_T0
, reg
);
5459 gen_op_mov_v_reg(ot
, cpu_T1
, rm
);
5460 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
5461 gen_op_mov_reg_v(ot
, reg
, cpu_T1
);
5463 gen_lea_modrm(env
, s
, modrm
);
5464 gen_op_mov_v_reg(ot
, cpu_T0
, reg
);
5465 /* for xchg, lock is implicit */
5466 if (!(prefixes
& PREFIX_LOCK
))
5468 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
5469 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
5470 if (!(prefixes
& PREFIX_LOCK
))
5471 gen_helper_unlock();
5472 gen_op_mov_reg_v(ot
, reg
, cpu_T1
);
5475 case 0xc4: /* les Gv */
5476 /* In CODE64 this is VEX3; see above. */
5479 case 0xc5: /* lds Gv */
5480 /* In CODE64 this is VEX2; see above. */
5483 case 0x1b2: /* lss Gv */
5486 case 0x1b4: /* lfs Gv */
5489 case 0x1b5: /* lgs Gv */
5492 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
5493 modrm
= cpu_ldub_code(env
, s
->pc
++);
5494 reg
= ((modrm
>> 3) & 7) | rex_r
;
5495 mod
= (modrm
>> 6) & 3;
5498 gen_lea_modrm(env
, s
, modrm
);
5499 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
5500 gen_add_A0_im(s
, 1 << ot
);
5501 /* load the segment first to handle exceptions properly */
5502 gen_op_ld_v(s
, MO_16
, cpu_T0
, cpu_A0
);
5503 gen_movl_seg_T0(s
, op
);
5504 /* then put the data */
5505 gen_op_mov_reg_v(ot
, reg
, cpu_T1
);
5507 gen_jmp_im(s
->pc
- s
->cs_base
);
5512 /************************/
5520 ot
= mo_b_d(b
, dflag
);
5521 modrm
= cpu_ldub_code(env
, s
->pc
++);
5522 mod
= (modrm
>> 6) & 3;
5523 op
= (modrm
>> 3) & 7;
5529 gen_lea_modrm(env
, s
, modrm
);
5532 opreg
= (modrm
& 7) | REX_B(s
);
5537 gen_shift(s
, op
, ot
, opreg
, OR_ECX
);
5540 shift
= cpu_ldub_code(env
, s
->pc
++);
5542 gen_shifti(s
, op
, ot
, opreg
, shift
);
5557 case 0x1a4: /* shld imm */
5561 case 0x1a5: /* shld cl */
5565 case 0x1ac: /* shrd imm */
5569 case 0x1ad: /* shrd cl */
5574 modrm
= cpu_ldub_code(env
, s
->pc
++);
5575 mod
= (modrm
>> 6) & 3;
5576 rm
= (modrm
& 7) | REX_B(s
);
5577 reg
= ((modrm
>> 3) & 7) | rex_r
;
5579 gen_lea_modrm(env
, s
, modrm
);
5584 gen_op_mov_v_reg(ot
, cpu_T1
, reg
);
5587 TCGv imm
= tcg_const_tl(cpu_ldub_code(env
, s
->pc
++));
5588 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, imm
);
5591 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, cpu_regs
[R_ECX
]);
5595 /************************/
5598 if (s
->flags
& (HF_EM_MASK
| HF_TS_MASK
)) {
5599 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5600 /* XXX: what to do if illegal op ? */
5601 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
5604 modrm
= cpu_ldub_code(env
, s
->pc
++);
5605 mod
= (modrm
>> 6) & 3;
5607 op
= ((b
& 7) << 3) | ((modrm
>> 3) & 7);
5610 gen_lea_modrm(env
, s
, modrm
);
5612 case 0x00 ... 0x07: /* fxxxs */
5613 case 0x10 ... 0x17: /* fixxxl */
5614 case 0x20 ... 0x27: /* fxxxl */
5615 case 0x30 ... 0x37: /* fixxx */
5622 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5623 s
->mem_index
, MO_LEUL
);
5624 gen_helper_flds_FT0(cpu_env
, cpu_tmp2_i32
);
5627 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5628 s
->mem_index
, MO_LEUL
);
5629 gen_helper_fildl_FT0(cpu_env
, cpu_tmp2_i32
);
5632 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_A0
,
5633 s
->mem_index
, MO_LEQ
);
5634 gen_helper_fldl_FT0(cpu_env
, cpu_tmp1_i64
);
5638 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5639 s
->mem_index
, MO_LESW
);
5640 gen_helper_fildl_FT0(cpu_env
, cpu_tmp2_i32
);
5644 gen_helper_fp_arith_ST0_FT0(op1
);
5646 /* fcomp needs pop */
5647 gen_helper_fpop(cpu_env
);
5651 case 0x08: /* flds */
5652 case 0x0a: /* fsts */
5653 case 0x0b: /* fstps */
5654 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5655 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5656 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5661 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5662 s
->mem_index
, MO_LEUL
);
5663 gen_helper_flds_ST0(cpu_env
, cpu_tmp2_i32
);
5666 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5667 s
->mem_index
, MO_LEUL
);
5668 gen_helper_fildl_ST0(cpu_env
, cpu_tmp2_i32
);
5671 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_A0
,
5672 s
->mem_index
, MO_LEQ
);
5673 gen_helper_fldl_ST0(cpu_env
, cpu_tmp1_i64
);
5677 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5678 s
->mem_index
, MO_LESW
);
5679 gen_helper_fildl_ST0(cpu_env
, cpu_tmp2_i32
);
5684 /* XXX: the corresponding CPUID bit must be tested ! */
5687 gen_helper_fisttl_ST0(cpu_tmp2_i32
, cpu_env
);
5688 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5689 s
->mem_index
, MO_LEUL
);
5692 gen_helper_fisttll_ST0(cpu_tmp1_i64
, cpu_env
);
5693 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_A0
,
5694 s
->mem_index
, MO_LEQ
);
5698 gen_helper_fistt_ST0(cpu_tmp2_i32
, cpu_env
);
5699 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5700 s
->mem_index
, MO_LEUW
);
5703 gen_helper_fpop(cpu_env
);
5708 gen_helper_fsts_ST0(cpu_tmp2_i32
, cpu_env
);
5709 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5710 s
->mem_index
, MO_LEUL
);
5713 gen_helper_fistl_ST0(cpu_tmp2_i32
, cpu_env
);
5714 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5715 s
->mem_index
, MO_LEUL
);
5718 gen_helper_fstl_ST0(cpu_tmp1_i64
, cpu_env
);
5719 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_A0
,
5720 s
->mem_index
, MO_LEQ
);
5724 gen_helper_fist_ST0(cpu_tmp2_i32
, cpu_env
);
5725 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5726 s
->mem_index
, MO_LEUW
);
5730 gen_helper_fpop(cpu_env
);
5734 case 0x0c: /* fldenv mem */
5735 gen_helper_fldenv(cpu_env
, cpu_A0
, tcg_const_i32(dflag
- 1));
5737 case 0x0d: /* fldcw mem */
5738 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5739 s
->mem_index
, MO_LEUW
);
5740 gen_helper_fldcw(cpu_env
, cpu_tmp2_i32
);
5742 case 0x0e: /* fnstenv mem */
5743 gen_helper_fstenv(cpu_env
, cpu_A0
, tcg_const_i32(dflag
- 1));
5745 case 0x0f: /* fnstcw mem */
5746 gen_helper_fnstcw(cpu_tmp2_i32
, cpu_env
);
5747 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5748 s
->mem_index
, MO_LEUW
);
5750 case 0x1d: /* fldt mem */
5751 gen_helper_fldt_ST0(cpu_env
, cpu_A0
);
5753 case 0x1f: /* fstpt mem */
5754 gen_helper_fstt_ST0(cpu_env
, cpu_A0
);
5755 gen_helper_fpop(cpu_env
);
5757 case 0x2c: /* frstor mem */
5758 gen_helper_frstor(cpu_env
, cpu_A0
, tcg_const_i32(dflag
- 1));
5760 case 0x2e: /* fnsave mem */
5761 gen_helper_fsave(cpu_env
, cpu_A0
, tcg_const_i32(dflag
- 1));
5763 case 0x2f: /* fnstsw mem */
5764 gen_helper_fnstsw(cpu_tmp2_i32
, cpu_env
);
5765 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5766 s
->mem_index
, MO_LEUW
);
5768 case 0x3c: /* fbld */
5769 gen_helper_fbld_ST0(cpu_env
, cpu_A0
);
5771 case 0x3e: /* fbstp */
5772 gen_helper_fbst_ST0(cpu_env
, cpu_A0
);
5773 gen_helper_fpop(cpu_env
);
5775 case 0x3d: /* fildll */
5776 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_A0
, s
->mem_index
, MO_LEQ
);
5777 gen_helper_fildll_ST0(cpu_env
, cpu_tmp1_i64
);
5779 case 0x3f: /* fistpll */
5780 gen_helper_fistll_ST0(cpu_tmp1_i64
, cpu_env
);
5781 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_A0
, s
->mem_index
, MO_LEQ
);
5782 gen_helper_fpop(cpu_env
);
5788 /* register float ops */
5792 case 0x08: /* fld sti */
5793 gen_helper_fpush(cpu_env
);
5794 gen_helper_fmov_ST0_STN(cpu_env
,
5795 tcg_const_i32((opreg
+ 1) & 7));
5797 case 0x09: /* fxchg sti */
5798 case 0x29: /* fxchg4 sti, undocumented op */
5799 case 0x39: /* fxchg7 sti, undocumented op */
5800 gen_helper_fxchg_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
5802 case 0x0a: /* grp d9/2 */
5805 /* check exceptions (FreeBSD FPU probe) */
5806 gen_helper_fwait(cpu_env
);
5812 case 0x0c: /* grp d9/4 */
5815 gen_helper_fchs_ST0(cpu_env
);
5818 gen_helper_fabs_ST0(cpu_env
);
5821 gen_helper_fldz_FT0(cpu_env
);
5822 gen_helper_fcom_ST0_FT0(cpu_env
);
5825 gen_helper_fxam_ST0(cpu_env
);
5831 case 0x0d: /* grp d9/5 */
5835 gen_helper_fpush(cpu_env
);
5836 gen_helper_fld1_ST0(cpu_env
);
5839 gen_helper_fpush(cpu_env
);
5840 gen_helper_fldl2t_ST0(cpu_env
);
5843 gen_helper_fpush(cpu_env
);
5844 gen_helper_fldl2e_ST0(cpu_env
);
5847 gen_helper_fpush(cpu_env
);
5848 gen_helper_fldpi_ST0(cpu_env
);
5851 gen_helper_fpush(cpu_env
);
5852 gen_helper_fldlg2_ST0(cpu_env
);
5855 gen_helper_fpush(cpu_env
);
5856 gen_helper_fldln2_ST0(cpu_env
);
5859 gen_helper_fpush(cpu_env
);
5860 gen_helper_fldz_ST0(cpu_env
);
5867 case 0x0e: /* grp d9/6 */
5870 gen_helper_f2xm1(cpu_env
);
5873 gen_helper_fyl2x(cpu_env
);
5876 gen_helper_fptan(cpu_env
);
5878 case 3: /* fpatan */
5879 gen_helper_fpatan(cpu_env
);
5881 case 4: /* fxtract */
5882 gen_helper_fxtract(cpu_env
);
5884 case 5: /* fprem1 */
5885 gen_helper_fprem1(cpu_env
);
5887 case 6: /* fdecstp */
5888 gen_helper_fdecstp(cpu_env
);
5891 case 7: /* fincstp */
5892 gen_helper_fincstp(cpu_env
);
5896 case 0x0f: /* grp d9/7 */
5899 gen_helper_fprem(cpu_env
);
5901 case 1: /* fyl2xp1 */
5902 gen_helper_fyl2xp1(cpu_env
);
5905 gen_helper_fsqrt(cpu_env
);
5907 case 3: /* fsincos */
5908 gen_helper_fsincos(cpu_env
);
5910 case 5: /* fscale */
5911 gen_helper_fscale(cpu_env
);
5913 case 4: /* frndint */
5914 gen_helper_frndint(cpu_env
);
5917 gen_helper_fsin(cpu_env
);
5921 gen_helper_fcos(cpu_env
);
5925 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
5926 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
5927 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
5933 gen_helper_fp_arith_STN_ST0(op1
, opreg
);
5935 gen_helper_fpop(cpu_env
);
5937 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5938 gen_helper_fp_arith_ST0_FT0(op1
);
5942 case 0x02: /* fcom */
5943 case 0x22: /* fcom2, undocumented op */
5944 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5945 gen_helper_fcom_ST0_FT0(cpu_env
);
5947 case 0x03: /* fcomp */
5948 case 0x23: /* fcomp3, undocumented op */
5949 case 0x32: /* fcomp5, undocumented op */
5950 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5951 gen_helper_fcom_ST0_FT0(cpu_env
);
5952 gen_helper_fpop(cpu_env
);
5954 case 0x15: /* da/5 */
5956 case 1: /* fucompp */
5957 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
5958 gen_helper_fucom_ST0_FT0(cpu_env
);
5959 gen_helper_fpop(cpu_env
);
5960 gen_helper_fpop(cpu_env
);
5968 case 0: /* feni (287 only, just do nop here) */
5970 case 1: /* fdisi (287 only, just do nop here) */
5973 gen_helper_fclex(cpu_env
);
5975 case 3: /* fninit */
5976 gen_helper_fninit(cpu_env
);
5978 case 4: /* fsetpm (287 only, just do nop here) */
5984 case 0x1d: /* fucomi */
5985 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
5988 gen_update_cc_op(s
);
5989 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5990 gen_helper_fucomi_ST0_FT0(cpu_env
);
5991 set_cc_op(s
, CC_OP_EFLAGS
);
5993 case 0x1e: /* fcomi */
5994 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
5997 gen_update_cc_op(s
);
5998 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5999 gen_helper_fcomi_ST0_FT0(cpu_env
);
6000 set_cc_op(s
, CC_OP_EFLAGS
);
6002 case 0x28: /* ffree sti */
6003 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6005 case 0x2a: /* fst sti */
6006 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6008 case 0x2b: /* fstp sti */
6009 case 0x0b: /* fstp1 sti, undocumented op */
6010 case 0x3a: /* fstp8 sti, undocumented op */
6011 case 0x3b: /* fstp9 sti, undocumented op */
6012 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6013 gen_helper_fpop(cpu_env
);
6015 case 0x2c: /* fucom st(i) */
6016 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6017 gen_helper_fucom_ST0_FT0(cpu_env
);
6019 case 0x2d: /* fucomp st(i) */
6020 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6021 gen_helper_fucom_ST0_FT0(cpu_env
);
6022 gen_helper_fpop(cpu_env
);
6024 case 0x33: /* de/3 */
6026 case 1: /* fcompp */
6027 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
6028 gen_helper_fcom_ST0_FT0(cpu_env
);
6029 gen_helper_fpop(cpu_env
);
6030 gen_helper_fpop(cpu_env
);
6036 case 0x38: /* ffreep sti, undocumented op */
6037 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6038 gen_helper_fpop(cpu_env
);
6040 case 0x3c: /* df/4 */
6043 gen_helper_fnstsw(cpu_tmp2_i32
, cpu_env
);
6044 tcg_gen_extu_i32_tl(cpu_T0
, cpu_tmp2_i32
);
6045 gen_op_mov_reg_v(MO_16
, R_EAX
, cpu_T0
);
6051 case 0x3d: /* fucomip */
6052 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6055 gen_update_cc_op(s
);
6056 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6057 gen_helper_fucomi_ST0_FT0(cpu_env
);
6058 gen_helper_fpop(cpu_env
);
6059 set_cc_op(s
, CC_OP_EFLAGS
);
6061 case 0x3e: /* fcomip */
6062 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6065 gen_update_cc_op(s
);
6066 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6067 gen_helper_fcomi_ST0_FT0(cpu_env
);
6068 gen_helper_fpop(cpu_env
);
6069 set_cc_op(s
, CC_OP_EFLAGS
);
6071 case 0x10 ... 0x13: /* fcmovxx */
6076 static const uint8_t fcmov_cc
[8] = {
6083 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6086 op1
= fcmov_cc
[op
& 3] | (((op
>> 3) & 1) ^ 1);
6087 l1
= gen_new_label();
6088 gen_jcc1_noeob(s
, op1
, l1
);
6089 gen_helper_fmov_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
6098 /************************/
6101 case 0xa4: /* movsS */
6103 ot
= mo_b_d(b
, dflag
);
6104 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6105 gen_repz_movs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6111 case 0xaa: /* stosS */
6113 ot
= mo_b_d(b
, dflag
);
6114 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6115 gen_repz_stos(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6120 case 0xac: /* lodsS */
6122 ot
= mo_b_d(b
, dflag
);
6123 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6124 gen_repz_lods(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6129 case 0xae: /* scasS */
6131 ot
= mo_b_d(b
, dflag
);
6132 if (prefixes
& PREFIX_REPNZ
) {
6133 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6134 } else if (prefixes
& PREFIX_REPZ
) {
6135 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6141 case 0xa6: /* cmpsS */
6143 ot
= mo_b_d(b
, dflag
);
6144 if (prefixes
& PREFIX_REPNZ
) {
6145 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6146 } else if (prefixes
& PREFIX_REPZ
) {
6147 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6152 case 0x6c: /* insS */
6154 ot
= mo_b_d32(b
, dflag
);
6155 tcg_gen_ext16u_tl(cpu_T0
, cpu_regs
[R_EDX
]);
6156 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6157 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
) | 4);
6158 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6159 gen_repz_ins(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6162 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6163 gen_jmp(s
, s
->pc
- s
->cs_base
);
6167 case 0x6e: /* outsS */
6169 ot
= mo_b_d32(b
, dflag
);
6170 tcg_gen_ext16u_tl(cpu_T0
, cpu_regs
[R_EDX
]);
6171 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6172 svm_is_rep(prefixes
) | 4);
6173 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6174 gen_repz_outs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6177 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6178 gen_jmp(s
, s
->pc
- s
->cs_base
);
6183 /************************/
6188 ot
= mo_b_d32(b
, dflag
);
6189 val
= cpu_ldub_code(env
, s
->pc
++);
6190 tcg_gen_movi_tl(cpu_T0
, val
);
6191 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6192 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6193 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6196 tcg_gen_movi_i32(cpu_tmp2_i32
, val
);
6197 gen_helper_in_func(ot
, cpu_T1
, cpu_tmp2_i32
);
6198 gen_op_mov_reg_v(ot
, R_EAX
, cpu_T1
);
6199 gen_bpt_io(s
, cpu_tmp2_i32
, ot
);
6200 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6202 gen_jmp(s
, s
->pc
- s
->cs_base
);
6207 ot
= mo_b_d32(b
, dflag
);
6208 val
= cpu_ldub_code(env
, s
->pc
++);
6209 tcg_gen_movi_tl(cpu_T0
, val
);
6210 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6211 svm_is_rep(prefixes
));
6212 gen_op_mov_v_reg(ot
, cpu_T1
, R_EAX
);
6214 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6217 tcg_gen_movi_i32(cpu_tmp2_i32
, val
);
6218 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T1
);
6219 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6220 gen_bpt_io(s
, cpu_tmp2_i32
, ot
);
6221 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6223 gen_jmp(s
, s
->pc
- s
->cs_base
);
6228 ot
= mo_b_d32(b
, dflag
);
6229 tcg_gen_ext16u_tl(cpu_T0
, cpu_regs
[R_EDX
]);
6230 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6231 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6232 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6235 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
6236 gen_helper_in_func(ot
, cpu_T1
, cpu_tmp2_i32
);
6237 gen_op_mov_reg_v(ot
, R_EAX
, cpu_T1
);
6238 gen_bpt_io(s
, cpu_tmp2_i32
, ot
);
6239 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6241 gen_jmp(s
, s
->pc
- s
->cs_base
);
6246 ot
= mo_b_d32(b
, dflag
);
6247 tcg_gen_ext16u_tl(cpu_T0
, cpu_regs
[R_EDX
]);
6248 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6249 svm_is_rep(prefixes
));
6250 gen_op_mov_v_reg(ot
, cpu_T1
, R_EAX
);
6252 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6255 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
6256 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T1
);
6257 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6258 gen_bpt_io(s
, cpu_tmp2_i32
, ot
);
6259 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6261 gen_jmp(s
, s
->pc
- s
->cs_base
);
6265 /************************/
6267 case 0xc2: /* ret im */
6268 val
= cpu_ldsw_code(env
, s
->pc
);
6271 gen_stack_update(s
, val
+ (1 << ot
));
6272 /* Note that gen_pop_T0 uses a zero-extending load. */
6273 gen_op_jmp_v(cpu_T0
);
6277 case 0xc3: /* ret */
6279 gen_pop_update(s
, ot
);
6280 /* Note that gen_pop_T0 uses a zero-extending load. */
6281 gen_op_jmp_v(cpu_T0
);
6285 case 0xca: /* lret im */
6286 val
= cpu_ldsw_code(env
, s
->pc
);
6289 if (s
->pe
&& !s
->vm86
) {
6290 gen_update_cc_op(s
);
6291 gen_jmp_im(pc_start
- s
->cs_base
);
6292 gen_helper_lret_protected(cpu_env
, tcg_const_i32(dflag
- 1),
6293 tcg_const_i32(val
));
6297 gen_op_ld_v(s
, dflag
, cpu_T0
, cpu_A0
);
6298 /* NOTE: keeping EIP updated is not a problem in case of
6300 gen_op_jmp_v(cpu_T0
);
6302 gen_add_A0_im(s
, 1 << dflag
);
6303 gen_op_ld_v(s
, dflag
, cpu_T0
, cpu_A0
);
6304 gen_op_movl_seg_T0_vm(R_CS
);
6305 /* add stack offset */
6306 gen_stack_update(s
, val
+ (2 << dflag
));
6310 case 0xcb: /* lret */
6313 case 0xcf: /* iret */
6314 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IRET
);
6317 gen_helper_iret_real(cpu_env
, tcg_const_i32(dflag
- 1));
6318 set_cc_op(s
, CC_OP_EFLAGS
);
6319 } else if (s
->vm86
) {
6321 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6323 gen_helper_iret_real(cpu_env
, tcg_const_i32(dflag
- 1));
6324 set_cc_op(s
, CC_OP_EFLAGS
);
6327 gen_helper_iret_protected(cpu_env
, tcg_const_i32(dflag
- 1),
6328 tcg_const_i32(s
->pc
- s
->cs_base
));
6329 set_cc_op(s
, CC_OP_EFLAGS
);
6333 case 0xe8: /* call im */
6335 if (dflag
!= MO_16
) {
6336 tval
= (int32_t)insn_get(env
, s
, MO_32
);
6338 tval
= (int16_t)insn_get(env
, s
, MO_16
);
6340 next_eip
= s
->pc
- s
->cs_base
;
6342 if (dflag
== MO_16
) {
6344 } else if (!CODE64(s
)) {
6347 tcg_gen_movi_tl(cpu_T0
, next_eip
);
6348 gen_push_v(s
, cpu_T0
);
6353 case 0x9a: /* lcall im */
6355 unsigned int selector
, offset
;
6360 offset
= insn_get(env
, s
, ot
);
6361 selector
= insn_get(env
, s
, MO_16
);
6363 tcg_gen_movi_tl(cpu_T0
, selector
);
6364 tcg_gen_movi_tl(cpu_T1
, offset
);
6367 case 0xe9: /* jmp im */
6368 if (dflag
!= MO_16
) {
6369 tval
= (int32_t)insn_get(env
, s
, MO_32
);
6371 tval
= (int16_t)insn_get(env
, s
, MO_16
);
6373 tval
+= s
->pc
- s
->cs_base
;
6374 if (dflag
== MO_16
) {
6376 } else if (!CODE64(s
)) {
6382 case 0xea: /* ljmp im */
6384 unsigned int selector
, offset
;
6389 offset
= insn_get(env
, s
, ot
);
6390 selector
= insn_get(env
, s
, MO_16
);
6392 tcg_gen_movi_tl(cpu_T0
, selector
);
6393 tcg_gen_movi_tl(cpu_T1
, offset
);
6396 case 0xeb: /* jmp Jb */
6397 tval
= (int8_t)insn_get(env
, s
, MO_8
);
6398 tval
+= s
->pc
- s
->cs_base
;
6399 if (dflag
== MO_16
) {
6404 case 0x70 ... 0x7f: /* jcc Jb */
6405 tval
= (int8_t)insn_get(env
, s
, MO_8
);
6407 case 0x180 ... 0x18f: /* jcc Jv */
6408 if (dflag
!= MO_16
) {
6409 tval
= (int32_t)insn_get(env
, s
, MO_32
);
6411 tval
= (int16_t)insn_get(env
, s
, MO_16
);
6414 next_eip
= s
->pc
- s
->cs_base
;
6416 if (dflag
== MO_16
) {
6420 gen_jcc(s
, b
, tval
, next_eip
);
6423 case 0x190 ... 0x19f: /* setcc Gv */
6424 modrm
= cpu_ldub_code(env
, s
->pc
++);
6425 gen_setcc1(s
, b
, cpu_T0
);
6426 gen_ldst_modrm(env
, s
, modrm
, MO_8
, OR_TMP0
, 1);
6428 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6429 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6433 modrm
= cpu_ldub_code(env
, s
->pc
++);
6434 reg
= ((modrm
>> 3) & 7) | rex_r
;
6435 gen_cmovcc1(env
, s
, ot
, b
, modrm
, reg
);
6438 /************************/
6440 case 0x9c: /* pushf */
6441 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PUSHF
);
6442 if (s
->vm86
&& s
->iopl
!= 3) {
6443 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6445 gen_update_cc_op(s
);
6446 gen_helper_read_eflags(cpu_T0
, cpu_env
);
6447 gen_push_v(s
, cpu_T0
);
6450 case 0x9d: /* popf */
6451 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_POPF
);
6452 if (s
->vm86
&& s
->iopl
!= 3) {
6453 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6457 if (dflag
!= MO_16
) {
6458 gen_helper_write_eflags(cpu_env
, cpu_T0
,
6459 tcg_const_i32((TF_MASK
| AC_MASK
|
6464 gen_helper_write_eflags(cpu_env
, cpu_T0
,
6465 tcg_const_i32((TF_MASK
| AC_MASK
|
6467 IF_MASK
| IOPL_MASK
)
6471 if (s
->cpl
<= s
->iopl
) {
6472 if (dflag
!= MO_16
) {
6473 gen_helper_write_eflags(cpu_env
, cpu_T0
,
6474 tcg_const_i32((TF_MASK
|
6480 gen_helper_write_eflags(cpu_env
, cpu_T0
,
6481 tcg_const_i32((TF_MASK
|
6489 if (dflag
!= MO_16
) {
6490 gen_helper_write_eflags(cpu_env
, cpu_T0
,
6491 tcg_const_i32((TF_MASK
| AC_MASK
|
6492 ID_MASK
| NT_MASK
)));
6494 gen_helper_write_eflags(cpu_env
, cpu_T0
,
6495 tcg_const_i32((TF_MASK
| AC_MASK
|
6501 gen_pop_update(s
, ot
);
6502 set_cc_op(s
, CC_OP_EFLAGS
);
6503 /* abort translation because TF/AC flag may change */
6504 gen_jmp_im(s
->pc
- s
->cs_base
);
6508 case 0x9e: /* sahf */
6509 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6511 gen_op_mov_v_reg(MO_8
, cpu_T0
, R_AH
);
6512 gen_compute_eflags(s
);
6513 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, CC_O
);
6514 tcg_gen_andi_tl(cpu_T0
, cpu_T0
, CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
6515 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_T0
);
6517 case 0x9f: /* lahf */
6518 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6520 gen_compute_eflags(s
);
6521 /* Note: gen_compute_eflags() only gives the condition codes */
6522 tcg_gen_ori_tl(cpu_T0
, cpu_cc_src
, 0x02);
6523 gen_op_mov_reg_v(MO_8
, R_AH
, cpu_T0
);
6525 case 0xf5: /* cmc */
6526 gen_compute_eflags(s
);
6527 tcg_gen_xori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6529 case 0xf8: /* clc */
6530 gen_compute_eflags(s
);
6531 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_C
);
6533 case 0xf9: /* stc */
6534 gen_compute_eflags(s
);
6535 tcg_gen_ori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6537 case 0xfc: /* cld */
6538 tcg_gen_movi_i32(cpu_tmp2_i32
, 1);
6539 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6541 case 0xfd: /* std */
6542 tcg_gen_movi_i32(cpu_tmp2_i32
, -1);
6543 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6546 /************************/
6547 /* bit operations */
6548 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6550 modrm
= cpu_ldub_code(env
, s
->pc
++);
6551 op
= (modrm
>> 3) & 7;
6552 mod
= (modrm
>> 6) & 3;
6553 rm
= (modrm
& 7) | REX_B(s
);
6556 gen_lea_modrm(env
, s
, modrm
);
6557 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
6559 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
6562 val
= cpu_ldub_code(env
, s
->pc
++);
6563 tcg_gen_movi_tl(cpu_T1
, val
);
6568 case 0x1a3: /* bt Gv, Ev */
6571 case 0x1ab: /* bts */
6574 case 0x1b3: /* btr */
6577 case 0x1bb: /* btc */
6581 modrm
= cpu_ldub_code(env
, s
->pc
++);
6582 reg
= ((modrm
>> 3) & 7) | rex_r
;
6583 mod
= (modrm
>> 6) & 3;
6584 rm
= (modrm
& 7) | REX_B(s
);
6585 gen_op_mov_v_reg(MO_32
, cpu_T1
, reg
);
6587 gen_lea_modrm(env
, s
, modrm
);
6588 /* specific case: we need to add a displacement */
6589 gen_exts(ot
, cpu_T1
);
6590 tcg_gen_sari_tl(cpu_tmp0
, cpu_T1
, 3 + ot
);
6591 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, ot
);
6592 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
6593 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
6595 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
6598 tcg_gen_andi_tl(cpu_T1
, cpu_T1
, (1 << (3 + ot
)) - 1);
6599 tcg_gen_shr_tl(cpu_tmp4
, cpu_T0
, cpu_T1
);
6604 tcg_gen_movi_tl(cpu_tmp0
, 1);
6605 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T1
);
6606 tcg_gen_or_tl(cpu_T0
, cpu_T0
, cpu_tmp0
);
6609 tcg_gen_movi_tl(cpu_tmp0
, 1);
6610 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T1
);
6611 tcg_gen_andc_tl(cpu_T0
, cpu_T0
, cpu_tmp0
);
6615 tcg_gen_movi_tl(cpu_tmp0
, 1);
6616 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T1
);
6617 tcg_gen_xor_tl(cpu_T0
, cpu_T0
, cpu_tmp0
);
6622 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
6624 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
6628 /* Delay all CC updates until after the store above. Note that
6629 C is the result of the test, Z is unchanged, and the others
6630 are all undefined. */
6632 case CC_OP_MULB
... CC_OP_MULQ
:
6633 case CC_OP_ADDB
... CC_OP_ADDQ
:
6634 case CC_OP_ADCB
... CC_OP_ADCQ
:
6635 case CC_OP_SUBB
... CC_OP_SUBQ
:
6636 case CC_OP_SBBB
... CC_OP_SBBQ
:
6637 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
6638 case CC_OP_INCB
... CC_OP_INCQ
:
6639 case CC_OP_DECB
... CC_OP_DECQ
:
6640 case CC_OP_SHLB
... CC_OP_SHLQ
:
6641 case CC_OP_SARB
... CC_OP_SARQ
:
6642 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
6643 /* Z was going to be computed from the non-zero status of CC_DST.
6644 We can get that same Z value (and the new C value) by leaving
6645 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
6647 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
6648 set_cc_op(s
, ((s
->cc_op
- CC_OP_MULB
) & 3) + CC_OP_SARB
);
6651 /* Otherwise, generate EFLAGS and replace the C bit. */
6652 gen_compute_eflags(s
);
6653 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, cpu_tmp4
,
6658 case 0x1bc: /* bsf / tzcnt */
6659 case 0x1bd: /* bsr / lzcnt */
6661 modrm
= cpu_ldub_code(env
, s
->pc
++);
6662 reg
= ((modrm
>> 3) & 7) | rex_r
;
6663 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
6664 gen_extu(ot
, cpu_T0
);
6666 /* Note that lzcnt and tzcnt are in different extensions. */
6667 if ((prefixes
& PREFIX_REPZ
)
6669 ? s
->cpuid_ext3_features
& CPUID_EXT3_ABM
6670 : s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)) {
6672 tcg_gen_mov_tl(cpu_cc_src
, cpu_T0
);
6674 /* For lzcnt, reduce the target_ulong result by the
6675 number of zeros that we expect to find at the top. */
6676 gen_helper_clz(cpu_T0
, cpu_T0
);
6677 tcg_gen_subi_tl(cpu_T0
, cpu_T0
, TARGET_LONG_BITS
- size
);
6679 /* For tzcnt, a zero input must return the operand size:
6680 force all bits outside the operand size to 1. */
6681 target_ulong mask
= (target_ulong
)-2 << (size
- 1);
6682 tcg_gen_ori_tl(cpu_T0
, cpu_T0
, mask
);
6683 gen_helper_ctz(cpu_T0
, cpu_T0
);
6685 /* For lzcnt/tzcnt, C and Z bits are defined and are
6686 related to the result. */
6687 gen_op_update1_cc();
6688 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
6690 /* For bsr/bsf, only the Z bit is defined and it is related
6691 to the input and not the result. */
6692 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
6693 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
6695 /* For bsr, return the bit index of the first 1 bit,
6696 not the count of leading zeros. */
6697 gen_helper_clz(cpu_T0
, cpu_T0
);
6698 tcg_gen_xori_tl(cpu_T0
, cpu_T0
, TARGET_LONG_BITS
- 1);
6700 gen_helper_ctz(cpu_T0
, cpu_T0
);
6702 /* ??? The manual says that the output is undefined when the
6703 input is zero, but real hardware leaves it unchanged, and
6704 real programs appear to depend on that. */
6705 tcg_gen_movi_tl(cpu_tmp0
, 0);
6706 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_T0
, cpu_cc_dst
, cpu_tmp0
,
6707 cpu_regs
[reg
], cpu_T0
);
6709 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
6711 /************************/
6713 case 0x27: /* daa */
6716 gen_update_cc_op(s
);
6717 gen_helper_daa(cpu_env
);
6718 set_cc_op(s
, CC_OP_EFLAGS
);
6720 case 0x2f: /* das */
6723 gen_update_cc_op(s
);
6724 gen_helper_das(cpu_env
);
6725 set_cc_op(s
, CC_OP_EFLAGS
);
6727 case 0x37: /* aaa */
6730 gen_update_cc_op(s
);
6731 gen_helper_aaa(cpu_env
);
6732 set_cc_op(s
, CC_OP_EFLAGS
);
6734 case 0x3f: /* aas */
6737 gen_update_cc_op(s
);
6738 gen_helper_aas(cpu_env
);
6739 set_cc_op(s
, CC_OP_EFLAGS
);
6741 case 0xd4: /* aam */
6744 val
= cpu_ldub_code(env
, s
->pc
++);
6746 gen_exception(s
, EXCP00_DIVZ
, pc_start
- s
->cs_base
);
6748 gen_helper_aam(cpu_env
, tcg_const_i32(val
));
6749 set_cc_op(s
, CC_OP_LOGICB
);
6752 case 0xd5: /* aad */
6755 val
= cpu_ldub_code(env
, s
->pc
++);
6756 gen_helper_aad(cpu_env
, tcg_const_i32(val
));
6757 set_cc_op(s
, CC_OP_LOGICB
);
6759 /************************/
6761 case 0x90: /* nop */
6762 /* XXX: correct lock test for all insn */
6763 if (prefixes
& PREFIX_LOCK
) {
6766 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6768 goto do_xchg_reg_eax
;
6770 if (prefixes
& PREFIX_REPZ
) {
6771 gen_update_cc_op(s
);
6772 gen_jmp_im(pc_start
- s
->cs_base
);
6773 gen_helper_pause(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
6774 s
->is_jmp
= DISAS_TB_JUMP
;
6777 case 0x9b: /* fwait */
6778 if ((s
->flags
& (HF_MP_MASK
| HF_TS_MASK
)) ==
6779 (HF_MP_MASK
| HF_TS_MASK
)) {
6780 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
6782 gen_helper_fwait(cpu_env
);
6785 case 0xcc: /* int3 */
6786 gen_interrupt(s
, EXCP03_INT3
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6788 case 0xcd: /* int N */
6789 val
= cpu_ldub_code(env
, s
->pc
++);
6790 if (s
->vm86
&& s
->iopl
!= 3) {
6791 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6793 gen_interrupt(s
, val
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6796 case 0xce: /* into */
6799 gen_update_cc_op(s
);
6800 gen_jmp_im(pc_start
- s
->cs_base
);
6801 gen_helper_into(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
6804 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6805 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_ICEBP
);
6807 gen_debug(s
, pc_start
- s
->cs_base
);
6810 tb_flush(CPU(x86_env_get_cpu(env
)));
6811 qemu_set_log(CPU_LOG_INT
| CPU_LOG_TB_IN_ASM
);
6815 case 0xfa: /* cli */
6817 if (s
->cpl
<= s
->iopl
) {
6818 gen_helper_cli(cpu_env
);
6820 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6824 gen_helper_cli(cpu_env
);
6826 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6830 case 0xfb: /* sti */
6831 if (s
->vm86
? s
->iopl
== 3 : s
->cpl
<= s
->iopl
) {
6832 gen_helper_sti(cpu_env
);
6833 /* interruptions are enabled only the first insn after sti */
6834 gen_jmp_im(s
->pc
- s
->cs_base
);
6835 gen_eob_inhibit_irq(s
, true);
6837 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6840 case 0x62: /* bound */
6844 modrm
= cpu_ldub_code(env
, s
->pc
++);
6845 reg
= (modrm
>> 3) & 7;
6846 mod
= (modrm
>> 6) & 3;
6849 gen_op_mov_v_reg(ot
, cpu_T0
, reg
);
6850 gen_lea_modrm(env
, s
, modrm
);
6851 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
6853 gen_helper_boundw(cpu_env
, cpu_A0
, cpu_tmp2_i32
);
6855 gen_helper_boundl(cpu_env
, cpu_A0
, cpu_tmp2_i32
);
6858 case 0x1c8 ... 0x1cf: /* bswap reg */
6859 reg
= (b
& 7) | REX_B(s
);
6860 #ifdef TARGET_X86_64
6861 if (dflag
== MO_64
) {
6862 gen_op_mov_v_reg(MO_64
, cpu_T0
, reg
);
6863 tcg_gen_bswap64_i64(cpu_T0
, cpu_T0
);
6864 gen_op_mov_reg_v(MO_64
, reg
, cpu_T0
);
6868 gen_op_mov_v_reg(MO_32
, cpu_T0
, reg
);
6869 tcg_gen_ext32u_tl(cpu_T0
, cpu_T0
);
6870 tcg_gen_bswap32_tl(cpu_T0
, cpu_T0
);
6871 gen_op_mov_reg_v(MO_32
, reg
, cpu_T0
);
6874 case 0xd6: /* salc */
6877 gen_compute_eflags_c(s
, cpu_T0
);
6878 tcg_gen_neg_tl(cpu_T0
, cpu_T0
);
6879 gen_op_mov_reg_v(MO_8
, R_EAX
, cpu_T0
);
6881 case 0xe0: /* loopnz */
6882 case 0xe1: /* loopz */
6883 case 0xe2: /* loop */
6884 case 0xe3: /* jecxz */
6886 TCGLabel
*l1
, *l2
, *l3
;
6888 tval
= (int8_t)insn_get(env
, s
, MO_8
);
6889 next_eip
= s
->pc
- s
->cs_base
;
6891 if (dflag
== MO_16
) {
6895 l1
= gen_new_label();
6896 l2
= gen_new_label();
6897 l3
= gen_new_label();
6900 case 0: /* loopnz */
6902 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
6903 gen_op_jz_ecx(s
->aflag
, l3
);
6904 gen_jcc1(s
, (JCC_Z
<< 1) | (b
^ 1), l1
);
6907 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
6908 gen_op_jnz_ecx(s
->aflag
, l1
);
6912 gen_op_jz_ecx(s
->aflag
, l1
);
6917 gen_jmp_im(next_eip
);
6926 case 0x130: /* wrmsr */
6927 case 0x132: /* rdmsr */
6929 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6931 gen_update_cc_op(s
);
6932 gen_jmp_im(pc_start
- s
->cs_base
);
6934 gen_helper_rdmsr(cpu_env
);
6936 gen_helper_wrmsr(cpu_env
);
6940 case 0x131: /* rdtsc */
6941 gen_update_cc_op(s
);
6942 gen_jmp_im(pc_start
- s
->cs_base
);
6943 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6946 gen_helper_rdtsc(cpu_env
);
6947 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6949 gen_jmp(s
, s
->pc
- s
->cs_base
);
6952 case 0x133: /* rdpmc */
6953 gen_update_cc_op(s
);
6954 gen_jmp_im(pc_start
- s
->cs_base
);
6955 gen_helper_rdpmc(cpu_env
);
6957 case 0x134: /* sysenter */
6958 /* For Intel SYSENTER is valid on 64-bit */
6959 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
6962 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6964 gen_helper_sysenter(cpu_env
);
6968 case 0x135: /* sysexit */
6969 /* For Intel SYSEXIT is valid on 64-bit */
6970 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
6973 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6975 gen_helper_sysexit(cpu_env
, tcg_const_i32(dflag
- 1));
6979 #ifdef TARGET_X86_64
6980 case 0x105: /* syscall */
6981 /* XXX: is it usable in real mode ? */
6982 gen_update_cc_op(s
);
6983 gen_jmp_im(pc_start
- s
->cs_base
);
6984 gen_helper_syscall(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
6987 case 0x107: /* sysret */
6989 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6991 gen_helper_sysret(cpu_env
, tcg_const_i32(dflag
- 1));
6992 /* condition codes are modified only in long mode */
6994 set_cc_op(s
, CC_OP_EFLAGS
);
7000 case 0x1a2: /* cpuid */
7001 gen_update_cc_op(s
);
7002 gen_jmp_im(pc_start
- s
->cs_base
);
7003 gen_helper_cpuid(cpu_env
);
7005 case 0xf4: /* hlt */
7007 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7009 gen_update_cc_op(s
);
7010 gen_jmp_im(pc_start
- s
->cs_base
);
7011 gen_helper_hlt(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7012 s
->is_jmp
= DISAS_TB_JUMP
;
7016 modrm
= cpu_ldub_code(env
, s
->pc
++);
7017 mod
= (modrm
>> 6) & 3;
7018 op
= (modrm
>> 3) & 7;
7021 if (!s
->pe
|| s
->vm86
)
7023 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_READ
);
7024 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
,
7025 offsetof(CPUX86State
, ldt
.selector
));
7026 ot
= mod
== 3 ? dflag
: MO_16
;
7027 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7030 if (!s
->pe
|| s
->vm86
)
7033 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7035 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_WRITE
);
7036 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7037 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
7038 gen_helper_lldt(cpu_env
, cpu_tmp2_i32
);
7042 if (!s
->pe
|| s
->vm86
)
7044 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_READ
);
7045 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
,
7046 offsetof(CPUX86State
, tr
.selector
));
7047 ot
= mod
== 3 ? dflag
: MO_16
;
7048 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7051 if (!s
->pe
|| s
->vm86
)
7054 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7056 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_WRITE
);
7057 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7058 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
7059 gen_helper_ltr(cpu_env
, cpu_tmp2_i32
);
7064 if (!s
->pe
|| s
->vm86
)
7066 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7067 gen_update_cc_op(s
);
7069 gen_helper_verr(cpu_env
, cpu_T0
);
7071 gen_helper_verw(cpu_env
, cpu_T0
);
7073 set_cc_op(s
, CC_OP_EFLAGS
);
7081 modrm
= cpu_ldub_code(env
, s
->pc
++);
7083 CASE_MODRM_MEM_OP(0): /* sgdt */
7084 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_GDTR_READ
);
7085 gen_lea_modrm(env
, s
, modrm
);
7086 tcg_gen_ld32u_tl(cpu_T0
,
7087 cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
7088 gen_op_st_v(s
, MO_16
, cpu_T0
, cpu_A0
);
7089 gen_add_A0_im(s
, 2);
7090 tcg_gen_ld_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, gdt
.base
));
7091 if (dflag
== MO_16
) {
7092 tcg_gen_andi_tl(cpu_T0
, cpu_T0
, 0xffffff);
7094 gen_op_st_v(s
, CODE64(s
) + MO_32
, cpu_T0
, cpu_A0
);
7097 case 0xc8: /* monitor */
7098 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || s
->cpl
!= 0) {
7101 gen_update_cc_op(s
);
7102 gen_jmp_im(pc_start
- s
->cs_base
);
7103 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[R_EAX
]);
7104 gen_extu(s
->aflag
, cpu_A0
);
7105 gen_add_A0_ds_seg(s
);
7106 gen_helper_monitor(cpu_env
, cpu_A0
);
7109 case 0xc9: /* mwait */
7110 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || s
->cpl
!= 0) {
7113 gen_update_cc_op(s
);
7114 gen_jmp_im(pc_start
- s
->cs_base
);
7115 gen_helper_mwait(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7119 case 0xca: /* clac */
7120 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
7124 gen_helper_clac(cpu_env
);
7125 gen_jmp_im(s
->pc
- s
->cs_base
);
7129 case 0xcb: /* stac */
7130 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
7134 gen_helper_stac(cpu_env
);
7135 gen_jmp_im(s
->pc
- s
->cs_base
);
7139 CASE_MODRM_MEM_OP(1): /* sidt */
7140 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IDTR_READ
);
7141 gen_lea_modrm(env
, s
, modrm
);
7142 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, idt
.limit
));
7143 gen_op_st_v(s
, MO_16
, cpu_T0
, cpu_A0
);
7144 gen_add_A0_im(s
, 2);
7145 tcg_gen_ld_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, idt
.base
));
7146 if (dflag
== MO_16
) {
7147 tcg_gen_andi_tl(cpu_T0
, cpu_T0
, 0xffffff);
7149 gen_op_st_v(s
, CODE64(s
) + MO_32
, cpu_T0
, cpu_A0
);
7152 case 0xd0: /* xgetbv */
7153 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
7154 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
7155 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
7158 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[R_ECX
]);
7159 gen_helper_xgetbv(cpu_tmp1_i64
, cpu_env
, cpu_tmp2_i32
);
7160 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], cpu_tmp1_i64
);
7163 case 0xd1: /* xsetbv */
7164 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
7165 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
7166 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
7170 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7173 tcg_gen_concat_tl_i64(cpu_tmp1_i64
, cpu_regs
[R_EAX
],
7175 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[R_ECX
]);
7176 gen_helper_xsetbv(cpu_env
, cpu_tmp2_i32
, cpu_tmp1_i64
);
7177 /* End TB because translation flags may change. */
7178 gen_jmp_im(s
->pc
- pc_start
);
7182 case 0xd8: /* VMRUN */
7183 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
) {
7187 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7190 gen_update_cc_op(s
);
7191 gen_jmp_im(pc_start
- s
->cs_base
);
7192 gen_helper_vmrun(cpu_env
, tcg_const_i32(s
->aflag
- 1),
7193 tcg_const_i32(s
->pc
- pc_start
));
7195 s
->is_jmp
= DISAS_TB_JUMP
;
7198 case 0xd9: /* VMMCALL */
7199 if (!(s
->flags
& HF_SVME_MASK
)) {
7202 gen_update_cc_op(s
);
7203 gen_jmp_im(pc_start
- s
->cs_base
);
7204 gen_helper_vmmcall(cpu_env
);
7207 case 0xda: /* VMLOAD */
7208 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
) {
7212 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7215 gen_update_cc_op(s
);
7216 gen_jmp_im(pc_start
- s
->cs_base
);
7217 gen_helper_vmload(cpu_env
, tcg_const_i32(s
->aflag
- 1));
7220 case 0xdb: /* VMSAVE */
7221 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
) {
7225 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7228 gen_update_cc_op(s
);
7229 gen_jmp_im(pc_start
- s
->cs_base
);
7230 gen_helper_vmsave(cpu_env
, tcg_const_i32(s
->aflag
- 1));
7233 case 0xdc: /* STGI */
7234 if ((!(s
->flags
& HF_SVME_MASK
)
7235 && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
7240 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7243 gen_update_cc_op(s
);
7244 gen_jmp_im(pc_start
- s
->cs_base
);
7245 gen_helper_stgi(cpu_env
);
7248 case 0xdd: /* CLGI */
7249 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
) {
7253 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7256 gen_update_cc_op(s
);
7257 gen_jmp_im(pc_start
- s
->cs_base
);
7258 gen_helper_clgi(cpu_env
);
7261 case 0xde: /* SKINIT */
7262 if ((!(s
->flags
& HF_SVME_MASK
)
7263 && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
7267 gen_update_cc_op(s
);
7268 gen_jmp_im(pc_start
- s
->cs_base
);
7269 gen_helper_skinit(cpu_env
);
7272 case 0xdf: /* INVLPGA */
7273 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
) {
7277 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7280 gen_update_cc_op(s
);
7281 gen_jmp_im(pc_start
- s
->cs_base
);
7282 gen_helper_invlpga(cpu_env
, tcg_const_i32(s
->aflag
- 1));
7285 CASE_MODRM_MEM_OP(2): /* lgdt */
7287 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7290 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_GDTR_WRITE
);
7291 gen_lea_modrm(env
, s
, modrm
);
7292 gen_op_ld_v(s
, MO_16
, cpu_T1
, cpu_A0
);
7293 gen_add_A0_im(s
, 2);
7294 gen_op_ld_v(s
, CODE64(s
) + MO_32
, cpu_T0
, cpu_A0
);
7295 if (dflag
== MO_16
) {
7296 tcg_gen_andi_tl(cpu_T0
, cpu_T0
, 0xffffff);
7298 tcg_gen_st_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, gdt
.base
));
7299 tcg_gen_st32_tl(cpu_T1
, cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
7302 CASE_MODRM_MEM_OP(3): /* lidt */
7304 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7307 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IDTR_WRITE
);
7308 gen_lea_modrm(env
, s
, modrm
);
7309 gen_op_ld_v(s
, MO_16
, cpu_T1
, cpu_A0
);
7310 gen_add_A0_im(s
, 2);
7311 gen_op_ld_v(s
, CODE64(s
) + MO_32
, cpu_T0
, cpu_A0
);
7312 if (dflag
== MO_16
) {
7313 tcg_gen_andi_tl(cpu_T0
, cpu_T0
, 0xffffff);
7315 tcg_gen_st_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, idt
.base
));
7316 tcg_gen_st32_tl(cpu_T1
, cpu_env
, offsetof(CPUX86State
, idt
.limit
));
7319 CASE_MODRM_OP(4): /* smsw */
7320 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_CR0
);
7321 tcg_gen_ld_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, cr
[0]));
7323 mod
= (modrm
>> 6) & 3;
7324 ot
= (mod
!= 3 ? MO_16
: s
->dflag
);
7328 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7330 case 0xee: /* rdpkru */
7331 if (prefixes
& PREFIX_LOCK
) {
7334 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[R_ECX
]);
7335 gen_helper_rdpkru(cpu_tmp1_i64
, cpu_env
, cpu_tmp2_i32
);
7336 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], cpu_tmp1_i64
);
7338 case 0xef: /* wrpkru */
7339 if (prefixes
& PREFIX_LOCK
) {
7342 tcg_gen_concat_tl_i64(cpu_tmp1_i64
, cpu_regs
[R_EAX
],
7344 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[R_ECX
]);
7345 gen_helper_wrpkru(cpu_env
, cpu_tmp2_i32
, cpu_tmp1_i64
);
7347 CASE_MODRM_OP(6): /* lmsw */
7349 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7352 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7353 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7354 gen_helper_lmsw(cpu_env
, cpu_T0
);
7355 gen_jmp_im(s
->pc
- s
->cs_base
);
7359 CASE_MODRM_MEM_OP(7): /* invlpg */
7361 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7364 gen_update_cc_op(s
);
7365 gen_jmp_im(pc_start
- s
->cs_base
);
7366 gen_lea_modrm(env
, s
, modrm
);
7367 gen_helper_invlpg(cpu_env
, cpu_A0
);
7368 gen_jmp_im(s
->pc
- s
->cs_base
);
7372 case 0xf8: /* swapgs */
7373 #ifdef TARGET_X86_64
7376 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7378 tcg_gen_mov_tl(cpu_T0
, cpu_seg_base
[R_GS
]);
7379 tcg_gen_ld_tl(cpu_seg_base
[R_GS
], cpu_env
,
7380 offsetof(CPUX86State
, kernelgsbase
));
7381 tcg_gen_st_tl(cpu_T0
, cpu_env
,
7382 offsetof(CPUX86State
, kernelgsbase
));
7389 case 0xf9: /* rdtscp */
7390 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_RDTSCP
)) {
7393 gen_update_cc_op(s
);
7394 gen_jmp_im(pc_start
- s
->cs_base
);
7395 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
7398 gen_helper_rdtscp(cpu_env
);
7399 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
7401 gen_jmp(s
, s
->pc
- s
->cs_base
);
7410 case 0x108: /* invd */
7411 case 0x109: /* wbinvd */
7413 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7415 gen_svm_check_intercept(s
, pc_start
, (b
& 2) ? SVM_EXIT_INVD
: SVM_EXIT_WBINVD
);
7419 case 0x63: /* arpl or movslS (x86_64) */
7420 #ifdef TARGET_X86_64
7423 /* d_ot is the size of destination */
7426 modrm
= cpu_ldub_code(env
, s
->pc
++);
7427 reg
= ((modrm
>> 3) & 7) | rex_r
;
7428 mod
= (modrm
>> 6) & 3;
7429 rm
= (modrm
& 7) | REX_B(s
);
7432 gen_op_mov_v_reg(MO_32
, cpu_T0
, rm
);
7434 if (d_ot
== MO_64
) {
7435 tcg_gen_ext32s_tl(cpu_T0
, cpu_T0
);
7437 gen_op_mov_reg_v(d_ot
, reg
, cpu_T0
);
7439 gen_lea_modrm(env
, s
, modrm
);
7440 gen_op_ld_v(s
, MO_32
| MO_SIGN
, cpu_T0
, cpu_A0
);
7441 gen_op_mov_reg_v(d_ot
, reg
, cpu_T0
);
7447 TCGv t0
, t1
, t2
, a0
;
7449 if (!s
->pe
|| s
->vm86
)
7451 t0
= tcg_temp_local_new();
7452 t1
= tcg_temp_local_new();
7453 t2
= tcg_temp_local_new();
7455 modrm
= cpu_ldub_code(env
, s
->pc
++);
7456 reg
= (modrm
>> 3) & 7;
7457 mod
= (modrm
>> 6) & 3;
7460 gen_lea_modrm(env
, s
, modrm
);
7461 gen_op_ld_v(s
, ot
, t0
, cpu_A0
);
7462 a0
= tcg_temp_local_new();
7463 tcg_gen_mov_tl(a0
, cpu_A0
);
7465 gen_op_mov_v_reg(ot
, t0
, rm
);
7468 gen_op_mov_v_reg(ot
, t1
, reg
);
7469 tcg_gen_andi_tl(cpu_tmp0
, t0
, 3);
7470 tcg_gen_andi_tl(t1
, t1
, 3);
7471 tcg_gen_movi_tl(t2
, 0);
7472 label1
= gen_new_label();
7473 tcg_gen_brcond_tl(TCG_COND_GE
, cpu_tmp0
, t1
, label1
);
7474 tcg_gen_andi_tl(t0
, t0
, ~3);
7475 tcg_gen_or_tl(t0
, t0
, t1
);
7476 tcg_gen_movi_tl(t2
, CC_Z
);
7477 gen_set_label(label1
);
7479 gen_op_st_v(s
, ot
, t0
, a0
);
7482 gen_op_mov_reg_v(ot
, rm
, t0
);
7484 gen_compute_eflags(s
);
7485 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_Z
);
7486 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t2
);
7492 case 0x102: /* lar */
7493 case 0x103: /* lsl */
7497 if (!s
->pe
|| s
->vm86
)
7499 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
7500 modrm
= cpu_ldub_code(env
, s
->pc
++);
7501 reg
= ((modrm
>> 3) & 7) | rex_r
;
7502 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7503 t0
= tcg_temp_local_new();
7504 gen_update_cc_op(s
);
7506 gen_helper_lar(t0
, cpu_env
, cpu_T0
);
7508 gen_helper_lsl(t0
, cpu_env
, cpu_T0
);
7510 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_src
, CC_Z
);
7511 label1
= gen_new_label();
7512 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
7513 gen_op_mov_reg_v(ot
, reg
, t0
);
7514 gen_set_label(label1
);
7515 set_cc_op(s
, CC_OP_EFLAGS
);
7520 modrm
= cpu_ldub_code(env
, s
->pc
++);
7521 mod
= (modrm
>> 6) & 3;
7522 op
= (modrm
>> 3) & 7;
7524 case 0: /* prefetchnta */
7525 case 1: /* prefetchnt0 */
7526 case 2: /* prefetchnt0 */
7527 case 3: /* prefetchnt0 */
7530 gen_nop_modrm(env
, s
, modrm
);
7531 /* nothing more to do */
7533 default: /* nop (multi byte) */
7534 gen_nop_modrm(env
, s
, modrm
);
7539 modrm
= cpu_ldub_code(env
, s
->pc
++);
7540 if (s
->flags
& HF_MPX_EN_MASK
) {
7541 mod
= (modrm
>> 6) & 3;
7542 reg
= ((modrm
>> 3) & 7) | rex_r
;
7543 if (prefixes
& PREFIX_REPZ
) {
7546 || (prefixes
& PREFIX_LOCK
)
7547 || s
->aflag
== MO_16
) {
7550 gen_bndck(env
, s
, modrm
, TCG_COND_LTU
, cpu_bndl
[reg
]);
7551 } else if (prefixes
& PREFIX_REPNZ
) {
7554 || (prefixes
& PREFIX_LOCK
)
7555 || s
->aflag
== MO_16
) {
7558 TCGv_i64 notu
= tcg_temp_new_i64();
7559 tcg_gen_not_i64(notu
, cpu_bndu
[reg
]);
7560 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, notu
);
7561 tcg_temp_free_i64(notu
);
7562 } else if (prefixes
& PREFIX_DATA
) {
7563 /* bndmov -- from reg/mem */
7564 if (reg
>= 4 || s
->aflag
== MO_16
) {
7568 int reg2
= (modrm
& 7) | REX_B(s
);
7569 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
7572 if (s
->flags
& HF_MPX_IU_MASK
) {
7573 tcg_gen_mov_i64(cpu_bndl
[reg
], cpu_bndl
[reg2
]);
7574 tcg_gen_mov_i64(cpu_bndu
[reg
], cpu_bndu
[reg2
]);
7577 gen_lea_modrm(env
, s
, modrm
);
7579 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], cpu_A0
,
7580 s
->mem_index
, MO_LEQ
);
7581 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, 8);
7582 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], cpu_A0
,
7583 s
->mem_index
, MO_LEQ
);
7585 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], cpu_A0
,
7586 s
->mem_index
, MO_LEUL
);
7587 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, 4);
7588 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], cpu_A0
,
7589 s
->mem_index
, MO_LEUL
);
7591 /* bnd registers are now in-use */
7592 gen_set_hflag(s
, HF_MPX_IU_MASK
);
7594 } else if (mod
!= 3) {
7596 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
7598 || (prefixes
& PREFIX_LOCK
)
7599 || s
->aflag
== MO_16
7604 tcg_gen_addi_tl(cpu_A0
, cpu_regs
[a
.base
], a
.disp
);
7606 tcg_gen_movi_tl(cpu_A0
, 0);
7608 gen_lea_v_seg(s
, s
->aflag
, cpu_A0
, a
.def_seg
, s
->override
);
7610 tcg_gen_mov_tl(cpu_T0
, cpu_regs
[a
.index
]);
7612 tcg_gen_movi_tl(cpu_T0
, 0);
7615 gen_helper_bndldx64(cpu_bndl
[reg
], cpu_env
, cpu_A0
, cpu_T0
);
7616 tcg_gen_ld_i64(cpu_bndu
[reg
], cpu_env
,
7617 offsetof(CPUX86State
, mmx_t0
.MMX_Q(0)));
7619 gen_helper_bndldx32(cpu_bndu
[reg
], cpu_env
, cpu_A0
, cpu_T0
);
7620 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndu
[reg
]);
7621 tcg_gen_shri_i64(cpu_bndu
[reg
], cpu_bndu
[reg
], 32);
7623 gen_set_hflag(s
, HF_MPX_IU_MASK
);
7626 gen_nop_modrm(env
, s
, modrm
);
7629 modrm
= cpu_ldub_code(env
, s
->pc
++);
7630 if (s
->flags
& HF_MPX_EN_MASK
) {
7631 mod
= (modrm
>> 6) & 3;
7632 reg
= ((modrm
>> 3) & 7) | rex_r
;
7633 if (mod
!= 3 && (prefixes
& PREFIX_REPZ
)) {
7636 || (prefixes
& PREFIX_LOCK
)
7637 || s
->aflag
== MO_16
) {
7640 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
7642 tcg_gen_extu_tl_i64(cpu_bndl
[reg
], cpu_regs
[a
.base
]);
7644 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndl
[reg
]);
7646 } else if (a
.base
== -1) {
7647 /* no base register has lower bound of 0 */
7648 tcg_gen_movi_i64(cpu_bndl
[reg
], 0);
7650 /* rip-relative generates #ud */
7653 tcg_gen_not_tl(cpu_A0
, gen_lea_modrm_1(a
));
7655 tcg_gen_ext32u_tl(cpu_A0
, cpu_A0
);
7657 tcg_gen_extu_tl_i64(cpu_bndu
[reg
], cpu_A0
);
7658 /* bnd registers are now in-use */
7659 gen_set_hflag(s
, HF_MPX_IU_MASK
);
7661 } else if (prefixes
& PREFIX_REPNZ
) {
7664 || (prefixes
& PREFIX_LOCK
)
7665 || s
->aflag
== MO_16
) {
7668 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, cpu_bndu
[reg
]);
7669 } else if (prefixes
& PREFIX_DATA
) {
7670 /* bndmov -- to reg/mem */
7671 if (reg
>= 4 || s
->aflag
== MO_16
) {
7675 int reg2
= (modrm
& 7) | REX_B(s
);
7676 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
7679 if (s
->flags
& HF_MPX_IU_MASK
) {
7680 tcg_gen_mov_i64(cpu_bndl
[reg2
], cpu_bndl
[reg
]);
7681 tcg_gen_mov_i64(cpu_bndu
[reg2
], cpu_bndu
[reg
]);
7684 gen_lea_modrm(env
, s
, modrm
);
7686 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], cpu_A0
,
7687 s
->mem_index
, MO_LEQ
);
7688 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, 8);
7689 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], cpu_A0
,
7690 s
->mem_index
, MO_LEQ
);
7692 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], cpu_A0
,
7693 s
->mem_index
, MO_LEUL
);
7694 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, 4);
7695 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], cpu_A0
,
7696 s
->mem_index
, MO_LEUL
);
7699 } else if (mod
!= 3) {
7701 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
7703 || (prefixes
& PREFIX_LOCK
)
7704 || s
->aflag
== MO_16
7709 tcg_gen_addi_tl(cpu_A0
, cpu_regs
[a
.base
], a
.disp
);
7711 tcg_gen_movi_tl(cpu_A0
, 0);
7713 gen_lea_v_seg(s
, s
->aflag
, cpu_A0
, a
.def_seg
, s
->override
);
7715 tcg_gen_mov_tl(cpu_T0
, cpu_regs
[a
.index
]);
7717 tcg_gen_movi_tl(cpu_T0
, 0);
7720 gen_helper_bndstx64(cpu_env
, cpu_A0
, cpu_T0
,
7721 cpu_bndl
[reg
], cpu_bndu
[reg
]);
7723 gen_helper_bndstx32(cpu_env
, cpu_A0
, cpu_T0
,
7724 cpu_bndl
[reg
], cpu_bndu
[reg
]);
7728 gen_nop_modrm(env
, s
, modrm
);
7730 case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
7731 modrm
= cpu_ldub_code(env
, s
->pc
++);
7732 gen_nop_modrm(env
, s
, modrm
);
7734 case 0x120: /* mov reg, crN */
7735 case 0x122: /* mov crN, reg */
7737 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7739 modrm
= cpu_ldub_code(env
, s
->pc
++);
7740 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7741 * AMD documentation (24594.pdf) and testing of
7742 * intel 386 and 486 processors all show that the mod bits
7743 * are assumed to be 1's, regardless of actual values.
7745 rm
= (modrm
& 7) | REX_B(s
);
7746 reg
= ((modrm
>> 3) & 7) | rex_r
;
7751 if ((prefixes
& PREFIX_LOCK
) && (reg
== 0) &&
7752 (s
->cpuid_ext3_features
& CPUID_EXT3_CR8LEG
)) {
7761 gen_update_cc_op(s
);
7762 gen_jmp_im(pc_start
- s
->cs_base
);
7764 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
7765 gen_helper_write_crN(cpu_env
, tcg_const_i32(reg
),
7767 gen_jmp_im(s
->pc
- s
->cs_base
);
7770 gen_helper_read_crN(cpu_T0
, cpu_env
, tcg_const_i32(reg
));
7771 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
7779 case 0x121: /* mov reg, drN */
7780 case 0x123: /* mov drN, reg */
7782 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7784 modrm
= cpu_ldub_code(env
, s
->pc
++);
7785 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7786 * AMD documentation (24594.pdf) and testing of
7787 * intel 386 and 486 processors all show that the mod bits
7788 * are assumed to be 1's, regardless of actual values.
7790 rm
= (modrm
& 7) | REX_B(s
);
7791 reg
= ((modrm
>> 3) & 7) | rex_r
;
7800 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_DR0
+ reg
);
7801 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
7802 tcg_gen_movi_i32(cpu_tmp2_i32
, reg
);
7803 gen_helper_set_dr(cpu_env
, cpu_tmp2_i32
, cpu_T0
);
7804 gen_jmp_im(s
->pc
- s
->cs_base
);
7807 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_DR0
+ reg
);
7808 tcg_gen_movi_i32(cpu_tmp2_i32
, reg
);
7809 gen_helper_get_dr(cpu_T0
, cpu_env
, cpu_tmp2_i32
);
7810 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
7814 case 0x106: /* clts */
7816 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7818 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7819 gen_helper_clts(cpu_env
);
7820 /* abort block because static cpu state changed */
7821 gen_jmp_im(s
->pc
- s
->cs_base
);
7825 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7826 case 0x1c3: /* MOVNTI reg, mem */
7827 if (!(s
->cpuid_features
& CPUID_SSE2
))
7829 ot
= mo_64_32(dflag
);
7830 modrm
= cpu_ldub_code(env
, s
->pc
++);
7831 mod
= (modrm
>> 6) & 3;
7834 reg
= ((modrm
>> 3) & 7) | rex_r
;
7835 /* generate a generic store */
7836 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
7839 modrm
= cpu_ldub_code(env
, s
->pc
++);
7841 CASE_MODRM_MEM_OP(0): /* fxsave */
7842 if (!(s
->cpuid_features
& CPUID_FXSR
)
7843 || (prefixes
& PREFIX_LOCK
)) {
7846 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
7847 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7850 gen_lea_modrm(env
, s
, modrm
);
7851 gen_helper_fxsave(cpu_env
, cpu_A0
);
7854 CASE_MODRM_MEM_OP(1): /* fxrstor */
7855 if (!(s
->cpuid_features
& CPUID_FXSR
)
7856 || (prefixes
& PREFIX_LOCK
)) {
7859 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
7860 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7863 gen_lea_modrm(env
, s
, modrm
);
7864 gen_helper_fxrstor(cpu_env
, cpu_A0
);
7867 CASE_MODRM_MEM_OP(2): /* ldmxcsr */
7868 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
)) {
7871 if (s
->flags
& HF_TS_MASK
) {
7872 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7875 gen_lea_modrm(env
, s
, modrm
);
7876 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
, s
->mem_index
, MO_LEUL
);
7877 gen_helper_ldmxcsr(cpu_env
, cpu_tmp2_i32
);
7880 CASE_MODRM_MEM_OP(3): /* stmxcsr */
7881 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
)) {
7884 if (s
->flags
& HF_TS_MASK
) {
7885 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7888 gen_lea_modrm(env
, s
, modrm
);
7889 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, mxcsr
));
7890 gen_op_st_v(s
, MO_32
, cpu_T0
, cpu_A0
);
7893 CASE_MODRM_MEM_OP(4): /* xsave */
7894 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
7895 || (prefixes
& (PREFIX_LOCK
| PREFIX_DATA
7896 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
7899 gen_lea_modrm(env
, s
, modrm
);
7900 tcg_gen_concat_tl_i64(cpu_tmp1_i64
, cpu_regs
[R_EAX
],
7902 gen_helper_xsave(cpu_env
, cpu_A0
, cpu_tmp1_i64
);
7905 CASE_MODRM_MEM_OP(5): /* xrstor */
7906 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
7907 || (prefixes
& (PREFIX_LOCK
| PREFIX_DATA
7908 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
7911 gen_lea_modrm(env
, s
, modrm
);
7912 tcg_gen_concat_tl_i64(cpu_tmp1_i64
, cpu_regs
[R_EAX
],
7914 gen_helper_xrstor(cpu_env
, cpu_A0
, cpu_tmp1_i64
);
7915 /* XRSTOR is how MPX is enabled, which changes how
7916 we translate. Thus we need to end the TB. */
7917 gen_update_cc_op(s
);
7918 gen_jmp_im(s
->pc
- s
->cs_base
);
7922 CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
7923 if (prefixes
& PREFIX_LOCK
) {
7926 if (prefixes
& PREFIX_DATA
) {
7928 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_CLWB
)) {
7931 gen_nop_modrm(env
, s
, modrm
);
7934 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
7935 || (s
->cpuid_xsave_features
& CPUID_XSAVE_XSAVEOPT
) == 0
7936 || (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
))) {
7939 gen_lea_modrm(env
, s
, modrm
);
7940 tcg_gen_concat_tl_i64(cpu_tmp1_i64
, cpu_regs
[R_EAX
],
7942 gen_helper_xsaveopt(cpu_env
, cpu_A0
, cpu_tmp1_i64
);
7946 CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
7947 if (prefixes
& PREFIX_LOCK
) {
7950 if (prefixes
& PREFIX_DATA
) {
7952 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_CLFLUSHOPT
)) {
7957 if ((s
->prefix
& (PREFIX_REPZ
| PREFIX_REPNZ
))
7958 || !(s
->cpuid_features
& CPUID_CLFLUSH
)) {
7962 gen_nop_modrm(env
, s
, modrm
);
7965 case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
7966 case 0xc8 ... 0xc8: /* rdgsbase (f3 0f ae /1) */
7967 case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
7968 case 0xd8 ... 0xd8: /* wrgsbase (f3 0f ae /3) */
7970 && (prefixes
& PREFIX_REPZ
)
7971 && !(prefixes
& PREFIX_LOCK
)
7972 && (s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_FSGSBASE
)) {
7973 TCGv base
, treg
, src
, dst
;
7975 /* Preserve hflags bits by testing CR4 at runtime. */
7976 tcg_gen_movi_i32(cpu_tmp2_i32
, CR4_FSGSBASE_MASK
);
7977 gen_helper_cr4_testbit(cpu_env
, cpu_tmp2_i32
);
7979 base
= cpu_seg_base
[modrm
& 8 ? R_GS
: R_FS
];
7980 treg
= cpu_regs
[(modrm
& 7) | REX_B(s
)];
7984 dst
= base
, src
= treg
;
7987 dst
= treg
, src
= base
;
7990 if (s
->dflag
== MO_32
) {
7991 tcg_gen_ext32u_tl(dst
, src
);
7993 tcg_gen_mov_tl(dst
, src
);
7999 case 0xf8: /* sfence / pcommit */
8000 if (prefixes
& PREFIX_DATA
) {
8002 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_PCOMMIT
)
8003 || (prefixes
& PREFIX_LOCK
)) {
8009 case 0xf9 ... 0xff: /* sfence */
8010 case 0xe8 ... 0xef: /* lfence */
8011 case 0xf0 ... 0xf7: /* mfence */
8012 if (!(s
->cpuid_features
& CPUID_SSE2
)
8013 || (prefixes
& PREFIX_LOCK
)) {
8023 case 0x10d: /* 3DNow! prefetch(w) */
8024 modrm
= cpu_ldub_code(env
, s
->pc
++);
8025 mod
= (modrm
>> 6) & 3;
8028 gen_nop_modrm(env
, s
, modrm
);
8030 case 0x1aa: /* rsm */
8031 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_RSM
);
8032 if (!(s
->flags
& HF_SMM_MASK
))
8034 gen_update_cc_op(s
);
8035 gen_jmp_im(s
->pc
- s
->cs_base
);
8036 gen_helper_rsm(cpu_env
);
8039 case 0x1b8: /* SSE4.2 popcnt */
8040 if ((prefixes
& (PREFIX_REPZ
| PREFIX_LOCK
| PREFIX_REPNZ
)) !=
8043 if (!(s
->cpuid_ext_features
& CPUID_EXT_POPCNT
))
8046 modrm
= cpu_ldub_code(env
, s
->pc
++);
8047 reg
= ((modrm
>> 3) & 7) | rex_r
;
8049 if (s
->prefix
& PREFIX_DATA
) {
8052 ot
= mo_64_32(dflag
);
8055 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
8056 gen_helper_popcnt(cpu_T0
, cpu_env
, cpu_T0
, tcg_const_i32(ot
));
8057 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
8059 set_cc_op(s
, CC_OP_EFLAGS
);
8061 case 0x10e ... 0x10f:
8062 /* 3DNow! instructions, ignore prefixes */
8063 s
->prefix
&= ~(PREFIX_REPZ
| PREFIX_REPNZ
| PREFIX_DATA
);
8064 case 0x110 ... 0x117:
8065 case 0x128 ... 0x12f:
8066 case 0x138 ... 0x13a:
8067 case 0x150 ... 0x179:
8068 case 0x17c ... 0x17f:
8070 case 0x1c4 ... 0x1c6:
8071 case 0x1d0 ... 0x1fe:
8072 gen_sse(env
, s
, b
, pc_start
, rex_r
);
8077 /* lock generation */
8078 if (s
->prefix
& PREFIX_LOCK
)
8079 gen_helper_unlock();
8082 if (s
->prefix
& PREFIX_LOCK
)
8083 gen_helper_unlock();
8084 /* XXX: ensure that no lock was generated */
8085 gen_illegal_opcode(s
);
8088 if (s
->prefix
& PREFIX_LOCK
)
8089 gen_helper_unlock();
8090 /* XXX: ensure that no lock was generated */
8091 gen_unknown_opcode(env
, s
);
8095 void tcg_x86_init(void)
8097 static const char reg_names
[CPU_NB_REGS
][4] = {
8098 #ifdef TARGET_X86_64
8126 static const char seg_base_names
[6][8] = {
8134 static const char bnd_regl_names
[4][8] = {
8135 "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
8137 static const char bnd_regu_names
[4][8] = {
8138 "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
8142 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
8143 cpu_cc_op
= tcg_global_mem_new_i32(cpu_env
,
8144 offsetof(CPUX86State
, cc_op
), "cc_op");
8145 cpu_cc_dst
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_dst
),
8147 cpu_cc_src
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_src
),
8149 cpu_cc_src2
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_src2
),
8152 for (i
= 0; i
< CPU_NB_REGS
; ++i
) {
8153 cpu_regs
[i
] = tcg_global_mem_new(cpu_env
,
8154 offsetof(CPUX86State
, regs
[i
]),
8158 for (i
= 0; i
< 6; ++i
) {
8160 = tcg_global_mem_new(cpu_env
,
8161 offsetof(CPUX86State
, segs
[i
].base
),
8165 for (i
= 0; i
< 4; ++i
) {
8167 = tcg_global_mem_new_i64(cpu_env
,
8168 offsetof(CPUX86State
, bnd_regs
[i
].lb
),
8171 = tcg_global_mem_new_i64(cpu_env
,
8172 offsetof(CPUX86State
, bnd_regs
[i
].ub
),
8179 /* generate intermediate code for basic block 'tb'. */
8180 void gen_intermediate_code(CPUX86State
*env
, TranslationBlock
*tb
)
8182 X86CPU
*cpu
= x86_env_get_cpu(env
);
8183 CPUState
*cs
= CPU(cpu
);
8184 DisasContext dc1
, *dc
= &dc1
;
8185 target_ulong pc_ptr
;
8187 target_ulong pc_start
;
8188 target_ulong cs_base
;
8192 /* generate intermediate code */
8194 cs_base
= tb
->cs_base
;
8197 dc
->pe
= (flags
>> HF_PE_SHIFT
) & 1;
8198 dc
->code32
= (flags
>> HF_CS32_SHIFT
) & 1;
8199 dc
->ss32
= (flags
>> HF_SS32_SHIFT
) & 1;
8200 dc
->addseg
= (flags
>> HF_ADDSEG_SHIFT
) & 1;
8202 dc
->vm86
= (flags
>> VM_SHIFT
) & 1;
8203 dc
->cpl
= (flags
>> HF_CPL_SHIFT
) & 3;
8204 dc
->iopl
= (flags
>> IOPL_SHIFT
) & 3;
8205 dc
->tf
= (flags
>> TF_SHIFT
) & 1;
8206 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
8207 dc
->cc_op
= CC_OP_DYNAMIC
;
8208 dc
->cc_op_dirty
= false;
8209 dc
->cs_base
= cs_base
;
8211 dc
->popl_esp_hack
= 0;
8212 /* select memory access functions */
8214 if (flags
& HF_SOFTMMU_MASK
) {
8215 dc
->mem_index
= cpu_mmu_index(env
, false);
8217 dc
->cpuid_features
= env
->features
[FEAT_1_EDX
];
8218 dc
->cpuid_ext_features
= env
->features
[FEAT_1_ECX
];
8219 dc
->cpuid_ext2_features
= env
->features
[FEAT_8000_0001_EDX
];
8220 dc
->cpuid_ext3_features
= env
->features
[FEAT_8000_0001_ECX
];
8221 dc
->cpuid_7_0_ebx_features
= env
->features
[FEAT_7_0_EBX
];
8222 dc
->cpuid_xsave_features
= env
->features
[FEAT_XSAVE
];
8223 #ifdef TARGET_X86_64
8224 dc
->lma
= (flags
>> HF_LMA_SHIFT
) & 1;
8225 dc
->code64
= (flags
>> HF_CS64_SHIFT
) & 1;
8228 dc
->jmp_opt
= !(dc
->tf
|| cs
->singlestep_enabled
||
8229 (flags
& HF_INHIBIT_IRQ_MASK
)
8230 #ifndef CONFIG_SOFTMMU
8231 || (flags
& HF_SOFTMMU_MASK
)
8234 /* Do not optimize repz jumps at all in icount mode, because
8235 rep movsS instructions are execured with different paths
8236 in !repz_opt and repz_opt modes. The first one was used
8237 always except single step mode. And this setting
8238 disables jumps optimization and control paths become
8239 equivalent in run and single step modes.
8240 Now there will be no jump optimization for repz in
8241 record/replay modes and there will always be an
8242 additional step for ecx=0 when icount is enabled.
8244 dc
->repz_opt
= !dc
->jmp_opt
&& !(tb
->cflags
& CF_USE_ICOUNT
);
8246 /* check addseg logic */
8247 if (!dc
->addseg
&& (dc
->vm86
|| !dc
->pe
|| !dc
->code32
))
8248 printf("ERROR addseg\n");
8251 cpu_T0
= tcg_temp_new();
8252 cpu_T1
= tcg_temp_new();
8253 cpu_A0
= tcg_temp_new();
8255 cpu_tmp0
= tcg_temp_new();
8256 cpu_tmp1_i64
= tcg_temp_new_i64();
8257 cpu_tmp2_i32
= tcg_temp_new_i32();
8258 cpu_tmp3_i32
= tcg_temp_new_i32();
8259 cpu_tmp4
= tcg_temp_new();
8260 cpu_ptr0
= tcg_temp_new_ptr();
8261 cpu_ptr1
= tcg_temp_new_ptr();
8262 cpu_cc_srcT
= tcg_temp_local_new();
8264 dc
->is_jmp
= DISAS_NEXT
;
8267 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
8268 if (max_insns
== 0) {
8269 max_insns
= CF_COUNT_MASK
;
8271 if (max_insns
> TCG_MAX_INSNS
) {
8272 max_insns
= TCG_MAX_INSNS
;
8277 tcg_gen_insn_start(pc_ptr
, dc
->cc_op
);
8280 /* If RF is set, suppress an internally generated breakpoint. */
8281 if (unlikely(cpu_breakpoint_test(cs
, pc_ptr
,
8282 tb
->flags
& HF_RF_MASK
8283 ? BP_GDB
: BP_ANY
))) {
8284 gen_debug(dc
, pc_ptr
- dc
->cs_base
);
8285 /* The address covered by the breakpoint must be included in
8286 [tb->pc, tb->pc + tb->size) in order to for it to be
8287 properly cleared -- thus we increment the PC here so that
8288 the logic setting tb->size below does the right thing. */
8290 goto done_generating
;
8292 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
8296 pc_ptr
= disas_insn(env
, dc
, pc_ptr
);
8297 /* stop translation if indicated */
8300 /* if single step mode, we generate only one instruction and
8301 generate an exception */
8302 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8303 the flag and abort the translation to give the irqs a
8304 change to be happen */
8305 if (dc
->tf
|| dc
->singlestep_enabled
||
8306 (flags
& HF_INHIBIT_IRQ_MASK
)) {
8307 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8311 /* Do not cross the boundary of the pages in icount mode,
8312 it can cause an exception. Do it only when boundary is
8313 crossed by the first instruction in the block.
8314 If current instruction already crossed the bound - it's ok,
8315 because an exception hasn't stopped this code.
8317 if ((tb
->cflags
& CF_USE_ICOUNT
)
8318 && ((pc_ptr
& TARGET_PAGE_MASK
)
8319 != ((pc_ptr
+ TARGET_MAX_INSN_SIZE
- 1) & TARGET_PAGE_MASK
)
8320 || (pc_ptr
& ~TARGET_PAGE_MASK
) == 0)) {
8321 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8325 /* if too long translation, stop generation too */
8326 if (tcg_op_buf_full() ||
8327 (pc_ptr
- pc_start
) >= (TARGET_PAGE_SIZE
- 32) ||
8328 num_insns
>= max_insns
) {
8329 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8334 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8339 if (tb
->cflags
& CF_LAST_IO
)
8342 gen_tb_end(tb
, num_insns
);
8345 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
8347 qemu_log("----------------\n");
8348 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
8349 #ifdef TARGET_X86_64
8354 disas_flags
= !dc
->code32
;
8355 log_target_disas(cs
, pc_start
, pc_ptr
- pc_start
, disas_flags
);
8360 tb
->size
= pc_ptr
- pc_start
;
8361 tb
->icount
= num_insns
;
8364 void restore_state_to_opc(CPUX86State
*env
, TranslationBlock
*tb
,
8367 int cc_op
= data
[1];
8368 env
->eip
= data
[0] - tb
->cs_base
;
8369 if (cc_op
!= CC_OP_DYNAMIC
) {