4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu/host-utils.h"
23 #include "disas/disas.h"
25 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
30 #include "trace-tcg.h"
34 #define PREFIX_REPZ 0x01
35 #define PREFIX_REPNZ 0x02
36 #define PREFIX_LOCK 0x04
37 #define PREFIX_DATA 0x08
38 #define PREFIX_ADR 0x10
39 #define PREFIX_VEX 0x20
42 #define CODE64(s) ((s)->code64)
43 #define REX_X(s) ((s)->rex_x)
44 #define REX_B(s) ((s)->rex_b)
59 /* For a switch indexed by MODRM, match all memory operands for a given OP. */
60 #define CASE_MODRM_MEM_OP(OP) \
61 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
62 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
63 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
65 #define CASE_MODRM_OP(OP) \
66 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
67 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
68 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
69 case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
71 //#define MACRO_TEST 1
73 /* global register indexes */
74 static TCGv_env cpu_env
;
76 static TCGv cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
, cpu_cc_srcT
;
77 static TCGv_i32 cpu_cc_op
;
78 static TCGv cpu_regs
[CPU_NB_REGS
];
79 static TCGv cpu_seg_base
[6];
80 static TCGv_i64 cpu_bndl
[4];
81 static TCGv_i64 cpu_bndu
[4];
83 static TCGv cpu_T0
, cpu_T1
;
84 /* local register indexes (only used inside old micro ops) */
85 static TCGv cpu_tmp0
, cpu_tmp4
;
86 static TCGv_ptr cpu_ptr0
, cpu_ptr1
;
87 static TCGv_i32 cpu_tmp2_i32
, cpu_tmp3_i32
;
88 static TCGv_i64 cpu_tmp1_i64
;
90 #include "exec/gen-icount.h"
93 static int x86_64_hregs
;
96 typedef struct DisasContext
{
97 /* current insn context */
98 int override
; /* -1 if no override */
102 target_ulong pc_start
;
103 target_ulong pc
; /* pc = eip + cs_base */
104 int is_jmp
; /* 1 = means jump (stop translation), 2 means CPU
105 static state change (stop translation) */
106 /* current block context */
107 target_ulong cs_base
; /* base of CS segment */
108 int pe
; /* protected mode */
109 int code32
; /* 32 bit code segment */
111 int lma
; /* long mode active */
112 int code64
; /* 64 bit code segment */
115 int vex_l
; /* vex vector length */
116 int vex_v
; /* vex vvvv register, without 1's compliment. */
117 int ss32
; /* 32 bit stack segment */
118 CCOp cc_op
; /* current CC operation */
120 int addseg
; /* non zero if either DS/ES/SS have a non zero base */
121 int f_st
; /* currently unused */
122 int vm86
; /* vm86 mode */
125 int tf
; /* TF cpu flag */
126 int singlestep_enabled
; /* "hardware" single step enabled */
127 int jmp_opt
; /* use direct block chaining for direct jumps */
128 int repz_opt
; /* optimize jumps within repz instructions */
129 int mem_index
; /* select memory access functions */
130 uint64_t flags
; /* all execution flags */
131 struct TranslationBlock
*tb
;
132 int popl_esp_hack
; /* for correct popl with esp base handling */
133 int rip_offset
; /* only used in x86_64, but left for simplicity */
135 int cpuid_ext_features
;
136 int cpuid_ext2_features
;
137 int cpuid_ext3_features
;
138 int cpuid_7_0_ebx_features
;
139 int cpuid_xsave_features
;
142 static void gen_eob(DisasContext
*s
);
143 static void gen_jmp(DisasContext
*s
, target_ulong eip
);
144 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
);
145 static void gen_op(DisasContext
*s1
, int op
, TCGMemOp ot
, int d
);
147 /* i386 arith/logic operations */
167 OP_SHL1
, /* undocumented */
183 /* I386 int registers */
184 OR_EAX
, /* MUST be even numbered */
193 OR_TMP0
= 16, /* temporary operand register */
195 OR_A0
, /* temporary register used when doing address evaluation */
205 /* Bit set if the global variable is live after setting CC_OP to X. */
206 static const uint8_t cc_op_live
[CC_OP_NB
] = {
207 [CC_OP_DYNAMIC
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
208 [CC_OP_EFLAGS
] = USES_CC_SRC
,
209 [CC_OP_MULB
... CC_OP_MULQ
] = USES_CC_DST
| USES_CC_SRC
,
210 [CC_OP_ADDB
... CC_OP_ADDQ
] = USES_CC_DST
| USES_CC_SRC
,
211 [CC_OP_ADCB
... CC_OP_ADCQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
212 [CC_OP_SUBB
... CC_OP_SUBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRCT
,
213 [CC_OP_SBBB
... CC_OP_SBBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
214 [CC_OP_LOGICB
... CC_OP_LOGICQ
] = USES_CC_DST
,
215 [CC_OP_INCB
... CC_OP_INCQ
] = USES_CC_DST
| USES_CC_SRC
,
216 [CC_OP_DECB
... CC_OP_DECQ
] = USES_CC_DST
| USES_CC_SRC
,
217 [CC_OP_SHLB
... CC_OP_SHLQ
] = USES_CC_DST
| USES_CC_SRC
,
218 [CC_OP_SARB
... CC_OP_SARQ
] = USES_CC_DST
| USES_CC_SRC
,
219 [CC_OP_BMILGB
... CC_OP_BMILGQ
] = USES_CC_DST
| USES_CC_SRC
,
220 [CC_OP_ADCX
] = USES_CC_DST
| USES_CC_SRC
,
221 [CC_OP_ADOX
] = USES_CC_SRC
| USES_CC_SRC2
,
222 [CC_OP_ADCOX
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
226 static void set_cc_op(DisasContext
*s
, CCOp op
)
230 if (s
->cc_op
== op
) {
234 /* Discard CC computation that will no longer be used. */
235 dead
= cc_op_live
[s
->cc_op
] & ~cc_op_live
[op
];
236 if (dead
& USES_CC_DST
) {
237 tcg_gen_discard_tl(cpu_cc_dst
);
239 if (dead
& USES_CC_SRC
) {
240 tcg_gen_discard_tl(cpu_cc_src
);
242 if (dead
& USES_CC_SRC2
) {
243 tcg_gen_discard_tl(cpu_cc_src2
);
245 if (dead
& USES_CC_SRCT
) {
246 tcg_gen_discard_tl(cpu_cc_srcT
);
249 if (op
== CC_OP_DYNAMIC
) {
250 /* The DYNAMIC setting is translator only, and should never be
251 stored. Thus we always consider it clean. */
252 s
->cc_op_dirty
= false;
254 /* Discard any computed CC_OP value (see shifts). */
255 if (s
->cc_op
== CC_OP_DYNAMIC
) {
256 tcg_gen_discard_i32(cpu_cc_op
);
258 s
->cc_op_dirty
= true;
263 static void gen_update_cc_op(DisasContext
*s
)
265 if (s
->cc_op_dirty
) {
266 tcg_gen_movi_i32(cpu_cc_op
, s
->cc_op
);
267 s
->cc_op_dirty
= false;
273 #define NB_OP_SIZES 4
275 #else /* !TARGET_X86_64 */
277 #define NB_OP_SIZES 3
279 #endif /* !TARGET_X86_64 */
281 #if defined(HOST_WORDS_BIGENDIAN)
282 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
283 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
284 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
285 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
286 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
288 #define REG_B_OFFSET 0
289 #define REG_H_OFFSET 1
290 #define REG_W_OFFSET 0
291 #define REG_L_OFFSET 0
292 #define REG_LH_OFFSET 4
295 /* In instruction encodings for byte register accesses the
296 * register number usually indicates "low 8 bits of register N";
297 * however there are some special cases where N 4..7 indicates
298 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
299 * true for this special case, false otherwise.
301 static inline bool byte_reg_is_xH(int reg
)
307 if (reg
>= 8 || x86_64_hregs
) {
314 /* Select the size of a push/pop operation. */
315 static inline TCGMemOp
mo_pushpop(DisasContext
*s
, TCGMemOp ot
)
318 return ot
== MO_16
? MO_16
: MO_64
;
324 /* Select the size of the stack pointer. */
325 static inline TCGMemOp
mo_stacksize(DisasContext
*s
)
327 return CODE64(s
) ? MO_64
: s
->ss32
? MO_32
: MO_16
;
330 /* Select only size 64 else 32. Used for SSE operand sizes. */
331 static inline TCGMemOp
mo_64_32(TCGMemOp ot
)
334 return ot
== MO_64
? MO_64
: MO_32
;
340 /* Select size 8 if lsb of B is clear, else OT. Used for decoding
341 byte vs word opcodes. */
342 static inline TCGMemOp
mo_b_d(int b
, TCGMemOp ot
)
344 return b
& 1 ? ot
: MO_8
;
347 /* Select size 8 if lsb of B is clear, else OT capped at 32.
348 Used for decoding operand size of port opcodes. */
349 static inline TCGMemOp
mo_b_d32(int b
, TCGMemOp ot
)
351 return b
& 1 ? (ot
== MO_16
? MO_16
: MO_32
) : MO_8
;
354 static void gen_op_mov_reg_v(TCGMemOp ot
, int reg
, TCGv t0
)
358 if (!byte_reg_is_xH(reg
)) {
359 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 8);
361 tcg_gen_deposit_tl(cpu_regs
[reg
- 4], cpu_regs
[reg
- 4], t0
, 8, 8);
365 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 16);
368 /* For x86_64, this sets the higher half of register to zero.
369 For i386, this is equivalent to a mov. */
370 tcg_gen_ext32u_tl(cpu_regs
[reg
], t0
);
374 tcg_gen_mov_tl(cpu_regs
[reg
], t0
);
382 static inline void gen_op_mov_v_reg(TCGMemOp ot
, TCGv t0
, int reg
)
384 if (ot
== MO_8
&& byte_reg_is_xH(reg
)) {
385 tcg_gen_shri_tl(t0
, cpu_regs
[reg
- 4], 8);
386 tcg_gen_ext8u_tl(t0
, t0
);
388 tcg_gen_mov_tl(t0
, cpu_regs
[reg
]);
392 static void gen_add_A0_im(DisasContext
*s
, int val
)
394 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
396 tcg_gen_ext32u_tl(cpu_A0
, cpu_A0
);
400 static inline void gen_op_jmp_v(TCGv dest
)
402 tcg_gen_st_tl(dest
, cpu_env
, offsetof(CPUX86State
, eip
));
405 static inline void gen_op_add_reg_im(TCGMemOp size
, int reg
, int32_t val
)
407 tcg_gen_addi_tl(cpu_tmp0
, cpu_regs
[reg
], val
);
408 gen_op_mov_reg_v(size
, reg
, cpu_tmp0
);
411 static inline void gen_op_add_reg_T0(TCGMemOp size
, int reg
)
413 tcg_gen_add_tl(cpu_tmp0
, cpu_regs
[reg
], cpu_T0
);
414 gen_op_mov_reg_v(size
, reg
, cpu_tmp0
);
417 static inline void gen_op_ld_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
419 tcg_gen_qemu_ld_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
422 static inline void gen_op_st_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
424 tcg_gen_qemu_st_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
427 static inline void gen_op_st_rm_T0_A0(DisasContext
*s
, int idx
, int d
)
430 gen_op_st_v(s
, idx
, cpu_T0
, cpu_A0
);
432 gen_op_mov_reg_v(idx
, d
, cpu_T0
);
436 static inline void gen_jmp_im(target_ulong pc
)
438 tcg_gen_movi_tl(cpu_tmp0
, pc
);
439 gen_op_jmp_v(cpu_tmp0
);
442 /* Compute SEG:REG into A0. SEG is selected from the override segment
443 (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
444 indicate no override. */
445 static void gen_lea_v_seg(DisasContext
*s
, TCGMemOp aflag
, TCGv a0
,
446 int def_seg
, int ovr_seg
)
452 tcg_gen_mov_tl(cpu_A0
, a0
);
463 tcg_gen_ext32u_tl(cpu_A0
, a0
);
470 tcg_gen_ext16u_tl(cpu_A0
, a0
);
485 TCGv seg
= cpu_seg_base
[ovr_seg
];
487 if (aflag
== MO_64
) {
488 tcg_gen_add_tl(cpu_A0
, a0
, seg
);
489 } else if (CODE64(s
)) {
490 tcg_gen_ext32u_tl(cpu_A0
, a0
);
491 tcg_gen_add_tl(cpu_A0
, cpu_A0
, seg
);
493 tcg_gen_add_tl(cpu_A0
, a0
, seg
);
494 tcg_gen_ext32u_tl(cpu_A0
, cpu_A0
);
499 static inline void gen_string_movl_A0_ESI(DisasContext
*s
)
501 gen_lea_v_seg(s
, s
->aflag
, cpu_regs
[R_ESI
], R_DS
, s
->override
);
504 static inline void gen_string_movl_A0_EDI(DisasContext
*s
)
506 gen_lea_v_seg(s
, s
->aflag
, cpu_regs
[R_EDI
], R_ES
, -1);
509 static inline void gen_op_movl_T0_Dshift(TCGMemOp ot
)
511 tcg_gen_ld32s_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, df
));
512 tcg_gen_shli_tl(cpu_T0
, cpu_T0
, ot
);
515 static TCGv
gen_ext_tl(TCGv dst
, TCGv src
, TCGMemOp size
, bool sign
)
520 tcg_gen_ext8s_tl(dst
, src
);
522 tcg_gen_ext8u_tl(dst
, src
);
527 tcg_gen_ext16s_tl(dst
, src
);
529 tcg_gen_ext16u_tl(dst
, src
);
535 tcg_gen_ext32s_tl(dst
, src
);
537 tcg_gen_ext32u_tl(dst
, src
);
546 static void gen_extu(TCGMemOp ot
, TCGv reg
)
548 gen_ext_tl(reg
, reg
, ot
, false);
551 static void gen_exts(TCGMemOp ot
, TCGv reg
)
553 gen_ext_tl(reg
, reg
, ot
, true);
556 static inline void gen_op_jnz_ecx(TCGMemOp size
, TCGLabel
*label1
)
558 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
559 gen_extu(size
, cpu_tmp0
);
560 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_tmp0
, 0, label1
);
563 static inline void gen_op_jz_ecx(TCGMemOp size
, TCGLabel
*label1
)
565 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
566 gen_extu(size
, cpu_tmp0
);
567 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
570 static void gen_helper_in_func(TCGMemOp ot
, TCGv v
, TCGv_i32 n
)
574 gen_helper_inb(v
, cpu_env
, n
);
577 gen_helper_inw(v
, cpu_env
, n
);
580 gen_helper_inl(v
, cpu_env
, n
);
587 static void gen_helper_out_func(TCGMemOp ot
, TCGv_i32 v
, TCGv_i32 n
)
591 gen_helper_outb(cpu_env
, v
, n
);
594 gen_helper_outw(cpu_env
, v
, n
);
597 gen_helper_outl(cpu_env
, v
, n
);
604 static void gen_check_io(DisasContext
*s
, TCGMemOp ot
, target_ulong cur_eip
,
607 target_ulong next_eip
;
609 if (s
->pe
&& (s
->cpl
> s
->iopl
|| s
->vm86
)) {
610 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
613 gen_helper_check_iob(cpu_env
, cpu_tmp2_i32
);
616 gen_helper_check_iow(cpu_env
, cpu_tmp2_i32
);
619 gen_helper_check_iol(cpu_env
, cpu_tmp2_i32
);
625 if(s
->flags
& HF_SVMI_MASK
) {
628 svm_flags
|= (1 << (4 + ot
));
629 next_eip
= s
->pc
- s
->cs_base
;
630 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
631 gen_helper_svm_check_io(cpu_env
, cpu_tmp2_i32
,
632 tcg_const_i32(svm_flags
),
633 tcg_const_i32(next_eip
- cur_eip
));
637 static inline void gen_movs(DisasContext
*s
, TCGMemOp ot
)
639 gen_string_movl_A0_ESI(s
);
640 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
641 gen_string_movl_A0_EDI(s
);
642 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
643 gen_op_movl_T0_Dshift(ot
);
644 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
645 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
648 static void gen_op_update1_cc(void)
650 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
653 static void gen_op_update2_cc(void)
655 tcg_gen_mov_tl(cpu_cc_src
, cpu_T1
);
656 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
659 static void gen_op_update3_cc(TCGv reg
)
661 tcg_gen_mov_tl(cpu_cc_src2
, reg
);
662 tcg_gen_mov_tl(cpu_cc_src
, cpu_T1
);
663 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
666 static inline void gen_op_testl_T0_T1_cc(void)
668 tcg_gen_and_tl(cpu_cc_dst
, cpu_T0
, cpu_T1
);
671 static void gen_op_update_neg_cc(void)
673 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
674 tcg_gen_neg_tl(cpu_cc_src
, cpu_T0
);
675 tcg_gen_movi_tl(cpu_cc_srcT
, 0);
678 /* compute all eflags to cc_src */
679 static void gen_compute_eflags(DisasContext
*s
)
681 TCGv zero
, dst
, src1
, src2
;
684 if (s
->cc_op
== CC_OP_EFLAGS
) {
687 if (s
->cc_op
== CC_OP_CLR
) {
688 tcg_gen_movi_tl(cpu_cc_src
, CC_Z
| CC_P
);
689 set_cc_op(s
, CC_OP_EFLAGS
);
698 /* Take care to not read values that are not live. */
699 live
= cc_op_live
[s
->cc_op
] & ~USES_CC_SRCT
;
700 dead
= live
^ (USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
);
702 zero
= tcg_const_tl(0);
703 if (dead
& USES_CC_DST
) {
706 if (dead
& USES_CC_SRC
) {
709 if (dead
& USES_CC_SRC2
) {
715 gen_helper_cc_compute_all(cpu_cc_src
, dst
, src1
, src2
, cpu_cc_op
);
716 set_cc_op(s
, CC_OP_EFLAGS
);
723 typedef struct CCPrepare
{
733 /* compute eflags.C to reg */
734 static CCPrepare
gen_prepare_eflags_c(DisasContext
*s
, TCGv reg
)
740 case CC_OP_SUBB
... CC_OP_SUBQ
:
741 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
742 size
= s
->cc_op
- CC_OP_SUBB
;
743 t1
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
744 /* If no temporary was used, be careful not to alias t1 and t0. */
745 t0
= TCGV_EQUAL(t1
, cpu_cc_src
) ? cpu_tmp0
: reg
;
746 tcg_gen_mov_tl(t0
, cpu_cc_srcT
);
750 case CC_OP_ADDB
... CC_OP_ADDQ
:
751 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
752 size
= s
->cc_op
- CC_OP_ADDB
;
753 t1
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
754 t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
756 return (CCPrepare
) { .cond
= TCG_COND_LTU
, .reg
= t0
,
757 .reg2
= t1
, .mask
= -1, .use_reg2
= true };
759 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
761 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
763 case CC_OP_INCB
... CC_OP_INCQ
:
764 case CC_OP_DECB
... CC_OP_DECQ
:
765 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
766 .mask
= -1, .no_setcond
= true };
768 case CC_OP_SHLB
... CC_OP_SHLQ
:
769 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
770 size
= s
->cc_op
- CC_OP_SHLB
;
771 shift
= (8 << size
) - 1;
772 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
773 .mask
= (target_ulong
)1 << shift
};
775 case CC_OP_MULB
... CC_OP_MULQ
:
776 return (CCPrepare
) { .cond
= TCG_COND_NE
,
777 .reg
= cpu_cc_src
, .mask
= -1 };
779 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
780 size
= s
->cc_op
- CC_OP_BMILGB
;
781 t0
= gen_ext_tl(reg
, cpu_cc_src
, size
, false);
782 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
786 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_dst
,
787 .mask
= -1, .no_setcond
= true };
790 case CC_OP_SARB
... CC_OP_SARQ
:
792 return (CCPrepare
) { .cond
= TCG_COND_NE
,
793 .reg
= cpu_cc_src
, .mask
= CC_C
};
796 /* The need to compute only C from CC_OP_DYNAMIC is important
797 in efficiently implementing e.g. INC at the start of a TB. */
799 gen_helper_cc_compute_c(reg
, cpu_cc_dst
, cpu_cc_src
,
800 cpu_cc_src2
, cpu_cc_op
);
801 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
802 .mask
= -1, .no_setcond
= true };
806 /* compute eflags.P to reg */
807 static CCPrepare
gen_prepare_eflags_p(DisasContext
*s
, TCGv reg
)
809 gen_compute_eflags(s
);
810 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
814 /* compute eflags.S to reg */
815 static CCPrepare
gen_prepare_eflags_s(DisasContext
*s
, TCGv reg
)
819 gen_compute_eflags(s
);
825 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
828 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
831 TCGMemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
832 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, true);
833 return (CCPrepare
) { .cond
= TCG_COND_LT
, .reg
= t0
, .mask
= -1 };
838 /* compute eflags.O to reg */
839 static CCPrepare
gen_prepare_eflags_o(DisasContext
*s
, TCGv reg
)
844 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src2
,
845 .mask
= -1, .no_setcond
= true };
847 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
849 gen_compute_eflags(s
);
850 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
855 /* compute eflags.Z to reg */
856 static CCPrepare
gen_prepare_eflags_z(DisasContext
*s
, TCGv reg
)
860 gen_compute_eflags(s
);
866 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
869 return (CCPrepare
) { .cond
= TCG_COND_ALWAYS
, .mask
= -1 };
872 TCGMemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
873 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
874 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
879 /* perform a conditional store into register 'reg' according to jump opcode
880 value 'b'. In the fast case, T0 is guaranted not to be used. */
881 static CCPrepare
gen_prepare_cc(DisasContext
*s
, int b
, TCGv reg
)
883 int inv
, jcc_op
, cond
;
889 jcc_op
= (b
>> 1) & 7;
892 case CC_OP_SUBB
... CC_OP_SUBQ
:
893 /* We optimize relational operators for the cmp/jcc case. */
894 size
= s
->cc_op
- CC_OP_SUBB
;
897 tcg_gen_mov_tl(cpu_tmp4
, cpu_cc_srcT
);
898 gen_extu(size
, cpu_tmp4
);
899 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
900 cc
= (CCPrepare
) { .cond
= TCG_COND_LEU
, .reg
= cpu_tmp4
,
901 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
910 tcg_gen_mov_tl(cpu_tmp4
, cpu_cc_srcT
);
911 gen_exts(size
, cpu_tmp4
);
912 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, true);
913 cc
= (CCPrepare
) { .cond
= cond
, .reg
= cpu_tmp4
,
914 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
924 /* This actually generates good code for JC, JZ and JS. */
927 cc
= gen_prepare_eflags_o(s
, reg
);
930 cc
= gen_prepare_eflags_c(s
, reg
);
933 cc
= gen_prepare_eflags_z(s
, reg
);
936 gen_compute_eflags(s
);
937 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
938 .mask
= CC_Z
| CC_C
};
941 cc
= gen_prepare_eflags_s(s
, reg
);
944 cc
= gen_prepare_eflags_p(s
, reg
);
947 gen_compute_eflags(s
);
948 if (TCGV_EQUAL(reg
, cpu_cc_src
)) {
951 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
952 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
953 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
958 gen_compute_eflags(s
);
959 if (TCGV_EQUAL(reg
, cpu_cc_src
)) {
962 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
963 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
964 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
965 .mask
= CC_S
| CC_Z
};
972 cc
.cond
= tcg_invert_cond(cc
.cond
);
977 static void gen_setcc1(DisasContext
*s
, int b
, TCGv reg
)
979 CCPrepare cc
= gen_prepare_cc(s
, b
, reg
);
982 if (cc
.cond
== TCG_COND_EQ
) {
983 tcg_gen_xori_tl(reg
, cc
.reg
, 1);
985 tcg_gen_mov_tl(reg
, cc
.reg
);
990 if (cc
.cond
== TCG_COND_NE
&& !cc
.use_reg2
&& cc
.imm
== 0 &&
991 cc
.mask
!= 0 && (cc
.mask
& (cc
.mask
- 1)) == 0) {
992 tcg_gen_shri_tl(reg
, cc
.reg
, ctztl(cc
.mask
));
993 tcg_gen_andi_tl(reg
, reg
, 1);
997 tcg_gen_andi_tl(reg
, cc
.reg
, cc
.mask
);
1001 tcg_gen_setcond_tl(cc
.cond
, reg
, cc
.reg
, cc
.reg2
);
1003 tcg_gen_setcondi_tl(cc
.cond
, reg
, cc
.reg
, cc
.imm
);
1007 static inline void gen_compute_eflags_c(DisasContext
*s
, TCGv reg
)
1009 gen_setcc1(s
, JCC_B
<< 1, reg
);
1012 /* generate a conditional jump to label 'l1' according to jump opcode
1013 value 'b'. In the fast case, T0 is guaranted not to be used. */
1014 static inline void gen_jcc1_noeob(DisasContext
*s
, int b
, TCGLabel
*l1
)
1016 CCPrepare cc
= gen_prepare_cc(s
, b
, cpu_T0
);
1018 if (cc
.mask
!= -1) {
1019 tcg_gen_andi_tl(cpu_T0
, cc
.reg
, cc
.mask
);
1023 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1025 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1029 /* Generate a conditional jump to label 'l1' according to jump opcode
1030 value 'b'. In the fast case, T0 is guaranted not to be used.
1031 A translation block must end soon. */
1032 static inline void gen_jcc1(DisasContext
*s
, int b
, TCGLabel
*l1
)
1034 CCPrepare cc
= gen_prepare_cc(s
, b
, cpu_T0
);
1036 gen_update_cc_op(s
);
1037 if (cc
.mask
!= -1) {
1038 tcg_gen_andi_tl(cpu_T0
, cc
.reg
, cc
.mask
);
1041 set_cc_op(s
, CC_OP_DYNAMIC
);
1043 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1045 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1049 /* XXX: does not work with gdbstub "ice" single step - not a
1051 static TCGLabel
*gen_jz_ecx_string(DisasContext
*s
, target_ulong next_eip
)
1053 TCGLabel
*l1
= gen_new_label();
1054 TCGLabel
*l2
= gen_new_label();
1055 gen_op_jnz_ecx(s
->aflag
, l1
);
1057 gen_jmp_tb(s
, next_eip
, 1);
1062 static inline void gen_stos(DisasContext
*s
, TCGMemOp ot
)
1064 gen_op_mov_v_reg(MO_32
, cpu_T0
, R_EAX
);
1065 gen_string_movl_A0_EDI(s
);
1066 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
1067 gen_op_movl_T0_Dshift(ot
);
1068 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1071 static inline void gen_lods(DisasContext
*s
, TCGMemOp ot
)
1073 gen_string_movl_A0_ESI(s
);
1074 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1075 gen_op_mov_reg_v(ot
, R_EAX
, cpu_T0
);
1076 gen_op_movl_T0_Dshift(ot
);
1077 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1080 static inline void gen_scas(DisasContext
*s
, TCGMemOp ot
)
1082 gen_string_movl_A0_EDI(s
);
1083 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
1084 gen_op(s
, OP_CMPL
, ot
, R_EAX
);
1085 gen_op_movl_T0_Dshift(ot
);
1086 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1089 static inline void gen_cmps(DisasContext
*s
, TCGMemOp ot
)
1091 gen_string_movl_A0_EDI(s
);
1092 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
1093 gen_string_movl_A0_ESI(s
);
1094 gen_op(s
, OP_CMPL
, ot
, OR_TMP0
);
1095 gen_op_movl_T0_Dshift(ot
);
1096 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1097 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1100 static void gen_bpt_io(DisasContext
*s
, TCGv_i32 t_port
, int ot
)
1102 if (s
->flags
& HF_IOBPT_MASK
) {
1103 TCGv_i32 t_size
= tcg_const_i32(1 << ot
);
1104 TCGv t_next
= tcg_const_tl(s
->pc
- s
->cs_base
);
1106 gen_helper_bpt_io(cpu_env
, t_port
, t_size
, t_next
);
1107 tcg_temp_free_i32(t_size
);
1108 tcg_temp_free(t_next
);
1113 static inline void gen_ins(DisasContext
*s
, TCGMemOp ot
)
1115 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
1118 gen_string_movl_A0_EDI(s
);
1119 /* Note: we must do this dummy write first to be restartable in
1120 case of page fault. */
1121 tcg_gen_movi_tl(cpu_T0
, 0);
1122 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
1123 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[R_EDX
]);
1124 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1125 gen_helper_in_func(ot
, cpu_T0
, cpu_tmp2_i32
);
1126 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
1127 gen_op_movl_T0_Dshift(ot
);
1128 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1129 gen_bpt_io(s
, cpu_tmp2_i32
, ot
);
1130 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
1135 static inline void gen_outs(DisasContext
*s
, TCGMemOp ot
)
1137 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
1140 gen_string_movl_A0_ESI(s
);
1141 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1143 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[R_EDX
]);
1144 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1145 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T0
);
1146 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1147 gen_op_movl_T0_Dshift(ot
);
1148 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1149 gen_bpt_io(s
, cpu_tmp2_i32
, ot
);
1150 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
1155 /* same method as Valgrind : we generate jumps to current or next
1157 #define GEN_REPZ(op) \
1158 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1159 target_ulong cur_eip, target_ulong next_eip) \
1162 gen_update_cc_op(s); \
1163 l2 = gen_jz_ecx_string(s, next_eip); \
1164 gen_ ## op(s, ot); \
1165 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1166 /* a loop would cause two single step exceptions if ECX = 1 \
1167 before rep string_insn */ \
1169 gen_op_jz_ecx(s->aflag, l2); \
1170 gen_jmp(s, cur_eip); \
1173 #define GEN_REPZ2(op) \
1174 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1175 target_ulong cur_eip, \
1176 target_ulong next_eip, \
1180 gen_update_cc_op(s); \
1181 l2 = gen_jz_ecx_string(s, next_eip); \
1182 gen_ ## op(s, ot); \
1183 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1184 gen_update_cc_op(s); \
1185 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1187 gen_op_jz_ecx(s->aflag, l2); \
1188 gen_jmp(s, cur_eip); \
1199 static void gen_helper_fp_arith_ST0_FT0(int op
)
1203 gen_helper_fadd_ST0_FT0(cpu_env
);
1206 gen_helper_fmul_ST0_FT0(cpu_env
);
1209 gen_helper_fcom_ST0_FT0(cpu_env
);
1212 gen_helper_fcom_ST0_FT0(cpu_env
);
1215 gen_helper_fsub_ST0_FT0(cpu_env
);
1218 gen_helper_fsubr_ST0_FT0(cpu_env
);
1221 gen_helper_fdiv_ST0_FT0(cpu_env
);
1224 gen_helper_fdivr_ST0_FT0(cpu_env
);
1229 /* NOTE the exception in "r" op ordering */
1230 static void gen_helper_fp_arith_STN_ST0(int op
, int opreg
)
1232 TCGv_i32 tmp
= tcg_const_i32(opreg
);
1235 gen_helper_fadd_STN_ST0(cpu_env
, tmp
);
1238 gen_helper_fmul_STN_ST0(cpu_env
, tmp
);
1241 gen_helper_fsubr_STN_ST0(cpu_env
, tmp
);
1244 gen_helper_fsub_STN_ST0(cpu_env
, tmp
);
1247 gen_helper_fdivr_STN_ST0(cpu_env
, tmp
);
1250 gen_helper_fdiv_STN_ST0(cpu_env
, tmp
);
1255 /* if d == OR_TMP0, it means memory operand (address in A0) */
1256 static void gen_op(DisasContext
*s1
, int op
, TCGMemOp ot
, int d
)
1259 gen_op_mov_v_reg(ot
, cpu_T0
, d
);
1261 gen_op_ld_v(s1
, ot
, cpu_T0
, cpu_A0
);
1265 gen_compute_eflags_c(s1
, cpu_tmp4
);
1266 tcg_gen_add_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1267 tcg_gen_add_tl(cpu_T0
, cpu_T0
, cpu_tmp4
);
1268 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1269 gen_op_update3_cc(cpu_tmp4
);
1270 set_cc_op(s1
, CC_OP_ADCB
+ ot
);
1273 gen_compute_eflags_c(s1
, cpu_tmp4
);
1274 tcg_gen_sub_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1275 tcg_gen_sub_tl(cpu_T0
, cpu_T0
, cpu_tmp4
);
1276 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1277 gen_op_update3_cc(cpu_tmp4
);
1278 set_cc_op(s1
, CC_OP_SBBB
+ ot
);
1281 tcg_gen_add_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1282 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1283 gen_op_update2_cc();
1284 set_cc_op(s1
, CC_OP_ADDB
+ ot
);
1287 tcg_gen_mov_tl(cpu_cc_srcT
, cpu_T0
);
1288 tcg_gen_sub_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1289 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1290 gen_op_update2_cc();
1291 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1295 tcg_gen_and_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1296 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1297 gen_op_update1_cc();
1298 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1301 tcg_gen_or_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1302 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1303 gen_op_update1_cc();
1304 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1307 tcg_gen_xor_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1308 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1309 gen_op_update1_cc();
1310 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1313 tcg_gen_mov_tl(cpu_cc_src
, cpu_T1
);
1314 tcg_gen_mov_tl(cpu_cc_srcT
, cpu_T0
);
1315 tcg_gen_sub_tl(cpu_cc_dst
, cpu_T0
, cpu_T1
);
1316 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1321 /* if d == OR_TMP0, it means memory operand (address in A0) */
1322 static void gen_inc(DisasContext
*s1
, TCGMemOp ot
, int d
, int c
)
1325 gen_op_mov_v_reg(ot
, cpu_T0
, d
);
1327 gen_op_ld_v(s1
, ot
, cpu_T0
, cpu_A0
);
1329 gen_compute_eflags_c(s1
, cpu_cc_src
);
1331 tcg_gen_addi_tl(cpu_T0
, cpu_T0
, 1);
1332 set_cc_op(s1
, CC_OP_INCB
+ ot
);
1334 tcg_gen_addi_tl(cpu_T0
, cpu_T0
, -1);
1335 set_cc_op(s1
, CC_OP_DECB
+ ot
);
1337 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1338 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
1341 static void gen_shift_flags(DisasContext
*s
, TCGMemOp ot
, TCGv result
,
1342 TCGv shm1
, TCGv count
, bool is_right
)
1344 TCGv_i32 z32
, s32
, oldop
;
1347 /* Store the results into the CC variables. If we know that the
1348 variable must be dead, store unconditionally. Otherwise we'll
1349 need to not disrupt the current contents. */
1350 z_tl
= tcg_const_tl(0);
1351 if (cc_op_live
[s
->cc_op
] & USES_CC_DST
) {
1352 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_dst
, count
, z_tl
,
1353 result
, cpu_cc_dst
);
1355 tcg_gen_mov_tl(cpu_cc_dst
, result
);
1357 if (cc_op_live
[s
->cc_op
] & USES_CC_SRC
) {
1358 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_src
, count
, z_tl
,
1361 tcg_gen_mov_tl(cpu_cc_src
, shm1
);
1363 tcg_temp_free(z_tl
);
1365 /* Get the two potential CC_OP values into temporaries. */
1366 tcg_gen_movi_i32(cpu_tmp2_i32
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1367 if (s
->cc_op
== CC_OP_DYNAMIC
) {
1370 tcg_gen_movi_i32(cpu_tmp3_i32
, s
->cc_op
);
1371 oldop
= cpu_tmp3_i32
;
1374 /* Conditionally store the CC_OP value. */
1375 z32
= tcg_const_i32(0);
1376 s32
= tcg_temp_new_i32();
1377 tcg_gen_trunc_tl_i32(s32
, count
);
1378 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, s32
, z32
, cpu_tmp2_i32
, oldop
);
1379 tcg_temp_free_i32(z32
);
1380 tcg_temp_free_i32(s32
);
1382 /* The CC_OP value is no longer predictable. */
1383 set_cc_op(s
, CC_OP_DYNAMIC
);
1386 static void gen_shift_rm_T1(DisasContext
*s
, TCGMemOp ot
, int op1
,
1387 int is_right
, int is_arith
)
1389 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1392 if (op1
== OR_TMP0
) {
1393 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1395 gen_op_mov_v_reg(ot
, cpu_T0
, op1
);
1398 tcg_gen_andi_tl(cpu_T1
, cpu_T1
, mask
);
1399 tcg_gen_subi_tl(cpu_tmp0
, cpu_T1
, 1);
1403 gen_exts(ot
, cpu_T0
);
1404 tcg_gen_sar_tl(cpu_tmp0
, cpu_T0
, cpu_tmp0
);
1405 tcg_gen_sar_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1407 gen_extu(ot
, cpu_T0
);
1408 tcg_gen_shr_tl(cpu_tmp0
, cpu_T0
, cpu_tmp0
);
1409 tcg_gen_shr_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1412 tcg_gen_shl_tl(cpu_tmp0
, cpu_T0
, cpu_tmp0
);
1413 tcg_gen_shl_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1417 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1419 gen_shift_flags(s
, ot
, cpu_T0
, cpu_tmp0
, cpu_T1
, is_right
);
1422 static void gen_shift_rm_im(DisasContext
*s
, TCGMemOp ot
, int op1
, int op2
,
1423 int is_right
, int is_arith
)
1425 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1429 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1431 gen_op_mov_v_reg(ot
, cpu_T0
, op1
);
1437 gen_exts(ot
, cpu_T0
);
1438 tcg_gen_sari_tl(cpu_tmp4
, cpu_T0
, op2
- 1);
1439 tcg_gen_sari_tl(cpu_T0
, cpu_T0
, op2
);
1441 gen_extu(ot
, cpu_T0
);
1442 tcg_gen_shri_tl(cpu_tmp4
, cpu_T0
, op2
- 1);
1443 tcg_gen_shri_tl(cpu_T0
, cpu_T0
, op2
);
1446 tcg_gen_shli_tl(cpu_tmp4
, cpu_T0
, op2
- 1);
1447 tcg_gen_shli_tl(cpu_T0
, cpu_T0
, op2
);
1452 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1454 /* update eflags if non zero shift */
1456 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
1457 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
1458 set_cc_op(s
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1462 static void gen_rot_rm_T1(DisasContext
*s
, TCGMemOp ot
, int op1
, int is_right
)
1464 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1468 if (op1
== OR_TMP0
) {
1469 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1471 gen_op_mov_v_reg(ot
, cpu_T0
, op1
);
1474 tcg_gen_andi_tl(cpu_T1
, cpu_T1
, mask
);
1478 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1479 tcg_gen_ext8u_tl(cpu_T0
, cpu_T0
);
1480 tcg_gen_muli_tl(cpu_T0
, cpu_T0
, 0x01010101);
1483 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1484 tcg_gen_deposit_tl(cpu_T0
, cpu_T0
, cpu_T0
, 16, 16);
1487 #ifdef TARGET_X86_64
1489 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
1490 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T1
);
1492 tcg_gen_rotr_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1494 tcg_gen_rotl_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1496 tcg_gen_extu_i32_tl(cpu_T0
, cpu_tmp2_i32
);
1501 tcg_gen_rotr_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1503 tcg_gen_rotl_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1509 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1511 /* We'll need the flags computed into CC_SRC. */
1512 gen_compute_eflags(s
);
1514 /* The value that was "rotated out" is now present at the other end
1515 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1516 since we've computed the flags into CC_SRC, these variables are
1519 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T0
, mask
- 1);
1520 tcg_gen_shri_tl(cpu_cc_dst
, cpu_T0
, mask
);
1521 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1523 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T0
, mask
);
1524 tcg_gen_andi_tl(cpu_cc_dst
, cpu_T0
, 1);
1526 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1527 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1529 /* Now conditionally store the new CC_OP value. If the shift count
1530 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1531 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1532 exactly as we computed above. */
1533 t0
= tcg_const_i32(0);
1534 t1
= tcg_temp_new_i32();
1535 tcg_gen_trunc_tl_i32(t1
, cpu_T1
);
1536 tcg_gen_movi_i32(cpu_tmp2_i32
, CC_OP_ADCOX
);
1537 tcg_gen_movi_i32(cpu_tmp3_i32
, CC_OP_EFLAGS
);
1538 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, t1
, t0
,
1539 cpu_tmp2_i32
, cpu_tmp3_i32
);
1540 tcg_temp_free_i32(t0
);
1541 tcg_temp_free_i32(t1
);
1543 /* The CC_OP value is no longer predictable. */
1544 set_cc_op(s
, CC_OP_DYNAMIC
);
1547 static void gen_rot_rm_im(DisasContext
*s
, TCGMemOp ot
, int op1
, int op2
,
1550 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1554 if (op1
== OR_TMP0
) {
1555 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1557 gen_op_mov_v_reg(ot
, cpu_T0
, op1
);
1563 #ifdef TARGET_X86_64
1565 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
1567 tcg_gen_rotri_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, op2
);
1569 tcg_gen_rotli_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, op2
);
1571 tcg_gen_extu_i32_tl(cpu_T0
, cpu_tmp2_i32
);
1576 tcg_gen_rotri_tl(cpu_T0
, cpu_T0
, op2
);
1578 tcg_gen_rotli_tl(cpu_T0
, cpu_T0
, op2
);
1589 shift
= mask
+ 1 - shift
;
1591 gen_extu(ot
, cpu_T0
);
1592 tcg_gen_shli_tl(cpu_tmp0
, cpu_T0
, shift
);
1593 tcg_gen_shri_tl(cpu_T0
, cpu_T0
, mask
+ 1 - shift
);
1594 tcg_gen_or_tl(cpu_T0
, cpu_T0
, cpu_tmp0
);
1600 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1603 /* Compute the flags into CC_SRC. */
1604 gen_compute_eflags(s
);
1606 /* The value that was "rotated out" is now present at the other end
1607 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1608 since we've computed the flags into CC_SRC, these variables are
1611 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T0
, mask
- 1);
1612 tcg_gen_shri_tl(cpu_cc_dst
, cpu_T0
, mask
);
1613 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1615 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T0
, mask
);
1616 tcg_gen_andi_tl(cpu_cc_dst
, cpu_T0
, 1);
1618 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1619 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1620 set_cc_op(s
, CC_OP_ADCOX
);
1624 /* XXX: add faster immediate = 1 case */
1625 static void gen_rotc_rm_T1(DisasContext
*s
, TCGMemOp ot
, int op1
,
1628 gen_compute_eflags(s
);
1629 assert(s
->cc_op
== CC_OP_EFLAGS
);
1633 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1635 gen_op_mov_v_reg(ot
, cpu_T0
, op1
);
1640 gen_helper_rcrb(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1643 gen_helper_rcrw(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1646 gen_helper_rcrl(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1648 #ifdef TARGET_X86_64
1650 gen_helper_rcrq(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1659 gen_helper_rclb(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1662 gen_helper_rclw(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1665 gen_helper_rcll(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1667 #ifdef TARGET_X86_64
1669 gen_helper_rclq(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1677 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1680 /* XXX: add faster immediate case */
1681 static void gen_shiftd_rm_T1(DisasContext
*s
, TCGMemOp ot
, int op1
,
1682 bool is_right
, TCGv count_in
)
1684 target_ulong mask
= (ot
== MO_64
? 63 : 31);
1688 if (op1
== OR_TMP0
) {
1689 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1691 gen_op_mov_v_reg(ot
, cpu_T0
, op1
);
1694 count
= tcg_temp_new();
1695 tcg_gen_andi_tl(count
, count_in
, mask
);
1699 /* Note: we implement the Intel behaviour for shift count > 16.
1700 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1701 portion by constructing it as a 32-bit value. */
1703 tcg_gen_deposit_tl(cpu_tmp0
, cpu_T0
, cpu_T1
, 16, 16);
1704 tcg_gen_mov_tl(cpu_T1
, cpu_T0
);
1705 tcg_gen_mov_tl(cpu_T0
, cpu_tmp0
);
1707 tcg_gen_deposit_tl(cpu_T1
, cpu_T0
, cpu_T1
, 16, 16);
1710 #ifdef TARGET_X86_64
1712 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1713 tcg_gen_subi_tl(cpu_tmp0
, count
, 1);
1715 tcg_gen_concat_tl_i64(cpu_T0
, cpu_T0
, cpu_T1
);
1716 tcg_gen_shr_i64(cpu_tmp0
, cpu_T0
, cpu_tmp0
);
1717 tcg_gen_shr_i64(cpu_T0
, cpu_T0
, count
);
1719 tcg_gen_concat_tl_i64(cpu_T0
, cpu_T1
, cpu_T0
);
1720 tcg_gen_shl_i64(cpu_tmp0
, cpu_T0
, cpu_tmp0
);
1721 tcg_gen_shl_i64(cpu_T0
, cpu_T0
, count
);
1722 tcg_gen_shri_i64(cpu_tmp0
, cpu_tmp0
, 32);
1723 tcg_gen_shri_i64(cpu_T0
, cpu_T0
, 32);
1728 tcg_gen_subi_tl(cpu_tmp0
, count
, 1);
1730 tcg_gen_shr_tl(cpu_tmp0
, cpu_T0
, cpu_tmp0
);
1732 tcg_gen_subfi_tl(cpu_tmp4
, mask
+ 1, count
);
1733 tcg_gen_shr_tl(cpu_T0
, cpu_T0
, count
);
1734 tcg_gen_shl_tl(cpu_T1
, cpu_T1
, cpu_tmp4
);
1736 tcg_gen_shl_tl(cpu_tmp0
, cpu_T0
, cpu_tmp0
);
1738 /* Only needed if count > 16, for Intel behaviour. */
1739 tcg_gen_subfi_tl(cpu_tmp4
, 33, count
);
1740 tcg_gen_shr_tl(cpu_tmp4
, cpu_T1
, cpu_tmp4
);
1741 tcg_gen_or_tl(cpu_tmp0
, cpu_tmp0
, cpu_tmp4
);
1744 tcg_gen_subfi_tl(cpu_tmp4
, mask
+ 1, count
);
1745 tcg_gen_shl_tl(cpu_T0
, cpu_T0
, count
);
1746 tcg_gen_shr_tl(cpu_T1
, cpu_T1
, cpu_tmp4
);
1748 tcg_gen_movi_tl(cpu_tmp4
, 0);
1749 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_T1
, count
, cpu_tmp4
,
1751 tcg_gen_or_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1756 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1758 gen_shift_flags(s
, ot
, cpu_T0
, cpu_tmp0
, count
, is_right
);
1759 tcg_temp_free(count
);
1762 static void gen_shift(DisasContext
*s1
, int op
, TCGMemOp ot
, int d
, int s
)
1765 gen_op_mov_v_reg(ot
, cpu_T1
, s
);
1768 gen_rot_rm_T1(s1
, ot
, d
, 0);
1771 gen_rot_rm_T1(s1
, ot
, d
, 1);
1775 gen_shift_rm_T1(s1
, ot
, d
, 0, 0);
1778 gen_shift_rm_T1(s1
, ot
, d
, 1, 0);
1781 gen_shift_rm_T1(s1
, ot
, d
, 1, 1);
1784 gen_rotc_rm_T1(s1
, ot
, d
, 0);
1787 gen_rotc_rm_T1(s1
, ot
, d
, 1);
1792 static void gen_shifti(DisasContext
*s1
, int op
, TCGMemOp ot
, int d
, int c
)
1796 gen_rot_rm_im(s1
, ot
, d
, c
, 0);
1799 gen_rot_rm_im(s1
, ot
, d
, c
, 1);
1803 gen_shift_rm_im(s1
, ot
, d
, c
, 0, 0);
1806 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 0);
1809 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 1);
1812 /* currently not optimized */
1813 tcg_gen_movi_tl(cpu_T1
, c
);
1814 gen_shift(s1
, op
, ot
, d
, OR_TMP1
);
1819 /* Decompose an address. */
1821 typedef struct AddressParts
{
1829 static AddressParts
gen_lea_modrm_0(CPUX86State
*env
, DisasContext
*s
,
1832 int def_seg
, base
, index
, scale
, mod
, rm
;
1841 mod
= (modrm
>> 6) & 3;
1843 base
= rm
| REX_B(s
);
1846 /* Normally filtered out earlier, but including this path
1847 simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
1856 int code
= cpu_ldub_code(env
, s
->pc
++);
1857 scale
= (code
>> 6) & 3;
1858 index
= ((code
>> 3) & 7) | REX_X(s
);
1860 index
= -1; /* no index */
1862 base
= (code
& 7) | REX_B(s
);
1868 if ((base
& 7) == 5) {
1870 disp
= (int32_t)cpu_ldl_code(env
, s
->pc
);
1872 if (CODE64(s
) && !havesib
) {
1874 disp
+= s
->pc
+ s
->rip_offset
;
1879 disp
= (int8_t)cpu_ldub_code(env
, s
->pc
++);
1883 disp
= (int32_t)cpu_ldl_code(env
, s
->pc
);
1888 /* For correct popl handling with esp. */
1889 if (base
== R_ESP
&& s
->popl_esp_hack
) {
1890 disp
+= s
->popl_esp_hack
;
1892 if (base
== R_EBP
|| base
== R_ESP
) {
1901 disp
= cpu_lduw_code(env
, s
->pc
);
1905 } else if (mod
== 1) {
1906 disp
= (int8_t)cpu_ldub_code(env
, s
->pc
++);
1908 disp
= (int16_t)cpu_lduw_code(env
, s
->pc
);
1953 return (AddressParts
){ def_seg
, base
, index
, scale
, disp
};
1956 /* Compute the address, with a minimum number of TCG ops. */
1957 static TCGv
gen_lea_modrm_1(AddressParts a
)
1964 ea
= cpu_regs
[a
.index
];
1966 tcg_gen_shli_tl(cpu_A0
, cpu_regs
[a
.index
], a
.scale
);
1970 tcg_gen_add_tl(cpu_A0
, ea
, cpu_regs
[a
.base
]);
1973 } else if (a
.base
>= 0) {
1974 ea
= cpu_regs
[a
.base
];
1976 if (TCGV_IS_UNUSED(ea
)) {
1977 tcg_gen_movi_tl(cpu_A0
, a
.disp
);
1979 } else if (a
.disp
!= 0) {
1980 tcg_gen_addi_tl(cpu_A0
, ea
, a
.disp
);
1987 static void gen_lea_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
1989 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
1990 TCGv ea
= gen_lea_modrm_1(a
);
1991 gen_lea_v_seg(s
, s
->aflag
, ea
, a
.def_seg
, s
->override
);
1994 static void gen_nop_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
1996 (void)gen_lea_modrm_0(env
, s
, modrm
);
1999 /* Used for BNDCL, BNDCU, BNDCN. */
2000 static void gen_bndck(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2001 TCGCond cond
, TCGv_i64 bndv
)
2003 TCGv ea
= gen_lea_modrm_1(gen_lea_modrm_0(env
, s
, modrm
));
2005 tcg_gen_extu_tl_i64(cpu_tmp1_i64
, ea
);
2007 tcg_gen_ext32u_i64(cpu_tmp1_i64
, cpu_tmp1_i64
);
2009 tcg_gen_setcond_i64(cond
, cpu_tmp1_i64
, cpu_tmp1_i64
, bndv
);
2010 tcg_gen_extrl_i64_i32(cpu_tmp2_i32
, cpu_tmp1_i64
);
2011 gen_helper_bndck(cpu_env
, cpu_tmp2_i32
);
2014 /* used for LEA and MOV AX, mem */
2015 static void gen_add_A0_ds_seg(DisasContext
*s
)
2017 gen_lea_v_seg(s
, s
->aflag
, cpu_A0
, R_DS
, s
->override
);
2020 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2022 static void gen_ldst_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2023 TCGMemOp ot
, int reg
, int is_store
)
2027 mod
= (modrm
>> 6) & 3;
2028 rm
= (modrm
& 7) | REX_B(s
);
2032 gen_op_mov_v_reg(ot
, cpu_T0
, reg
);
2033 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
2035 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
2037 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
2040 gen_lea_modrm(env
, s
, modrm
);
2043 gen_op_mov_v_reg(ot
, cpu_T0
, reg
);
2044 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
2046 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
2048 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
2053 static inline uint32_t insn_get(CPUX86State
*env
, DisasContext
*s
, TCGMemOp ot
)
2059 ret
= cpu_ldub_code(env
, s
->pc
);
2063 ret
= cpu_lduw_code(env
, s
->pc
);
2067 #ifdef TARGET_X86_64
2070 ret
= cpu_ldl_code(env
, s
->pc
);
2079 static inline int insn_const_size(TCGMemOp ot
)
2088 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong eip
)
2090 TranslationBlock
*tb
;
2093 pc
= s
->cs_base
+ eip
;
2095 /* NOTE: we handle the case where the TB spans two pages here */
2096 if ((pc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) ||
2097 (pc
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
)) {
2098 /* jump to same page: we can use a direct jump */
2099 tcg_gen_goto_tb(tb_num
);
2101 tcg_gen_exit_tb((uintptr_t)tb
+ tb_num
);
2103 /* jump to another page: currently not optimized */
2109 static inline void gen_jcc(DisasContext
*s
, int b
,
2110 target_ulong val
, target_ulong next_eip
)
2115 l1
= gen_new_label();
2118 gen_goto_tb(s
, 0, next_eip
);
2121 gen_goto_tb(s
, 1, val
);
2122 s
->is_jmp
= DISAS_TB_JUMP
;
2124 l1
= gen_new_label();
2125 l2
= gen_new_label();
2128 gen_jmp_im(next_eip
);
2138 static void gen_cmovcc1(CPUX86State
*env
, DisasContext
*s
, TCGMemOp ot
, int b
,
2143 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
2145 cc
= gen_prepare_cc(s
, b
, cpu_T1
);
2146 if (cc
.mask
!= -1) {
2147 TCGv t0
= tcg_temp_new();
2148 tcg_gen_andi_tl(t0
, cc
.reg
, cc
.mask
);
2152 cc
.reg2
= tcg_const_tl(cc
.imm
);
2155 tcg_gen_movcond_tl(cc
.cond
, cpu_T0
, cc
.reg
, cc
.reg2
,
2156 cpu_T0
, cpu_regs
[reg
]);
2157 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
2159 if (cc
.mask
!= -1) {
2160 tcg_temp_free(cc
.reg
);
2163 tcg_temp_free(cc
.reg2
);
2167 static inline void gen_op_movl_T0_seg(int seg_reg
)
2169 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
,
2170 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2173 static inline void gen_op_movl_seg_T0_vm(int seg_reg
)
2175 tcg_gen_ext16u_tl(cpu_T0
, cpu_T0
);
2176 tcg_gen_st32_tl(cpu_T0
, cpu_env
,
2177 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2178 tcg_gen_shli_tl(cpu_seg_base
[seg_reg
], cpu_T0
, 4);
2181 /* move T0 to seg_reg and compute if the CPU state may change. Never
2182 call this function with seg_reg == R_CS */
2183 static void gen_movl_seg_T0(DisasContext
*s
, int seg_reg
)
2185 if (s
->pe
&& !s
->vm86
) {
2186 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
2187 gen_helper_load_seg(cpu_env
, tcg_const_i32(seg_reg
), cpu_tmp2_i32
);
2188 /* abort translation because the addseg value may change or
2189 because ss32 may change. For R_SS, translation must always
2190 stop as a special handling must be done to disable hardware
2191 interrupts for the next instruction */
2192 if (seg_reg
== R_SS
|| (s
->code32
&& seg_reg
< R_FS
))
2193 s
->is_jmp
= DISAS_TB_JUMP
;
2195 gen_op_movl_seg_T0_vm(seg_reg
);
2196 if (seg_reg
== R_SS
)
2197 s
->is_jmp
= DISAS_TB_JUMP
;
2201 static inline int svm_is_rep(int prefixes
)
2203 return ((prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) ? 8 : 0);
2207 gen_svm_check_intercept_param(DisasContext
*s
, target_ulong pc_start
,
2208 uint32_t type
, uint64_t param
)
2210 /* no SVM activated; fast case */
2211 if (likely(!(s
->flags
& HF_SVMI_MASK
)))
2213 gen_update_cc_op(s
);
2214 gen_jmp_im(pc_start
- s
->cs_base
);
2215 gen_helper_svm_check_intercept_param(cpu_env
, tcg_const_i32(type
),
2216 tcg_const_i64(param
));
2220 gen_svm_check_intercept(DisasContext
*s
, target_ulong pc_start
, uint64_t type
)
2222 gen_svm_check_intercept_param(s
, pc_start
, type
, 0);
2225 static inline void gen_stack_update(DisasContext
*s
, int addend
)
2227 gen_op_add_reg_im(mo_stacksize(s
), R_ESP
, addend
);
2230 /* Generate a push. It depends on ss32, addseg and dflag. */
2231 static void gen_push_v(DisasContext
*s
, TCGv val
)
2233 TCGMemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2234 TCGMemOp a_ot
= mo_stacksize(s
);
2235 int size
= 1 << d_ot
;
2236 TCGv new_esp
= cpu_A0
;
2238 tcg_gen_subi_tl(cpu_A0
, cpu_regs
[R_ESP
], size
);
2243 tcg_gen_mov_tl(new_esp
, cpu_A0
);
2245 gen_lea_v_seg(s
, a_ot
, cpu_A0
, R_SS
, -1);
2248 gen_op_st_v(s
, d_ot
, val
, cpu_A0
);
2249 gen_op_mov_reg_v(a_ot
, R_ESP
, new_esp
);
2252 /* two step pop is necessary for precise exceptions */
2253 static TCGMemOp
gen_pop_T0(DisasContext
*s
)
2255 TCGMemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2257 gen_lea_v_seg(s
, mo_stacksize(s
), cpu_regs
[R_ESP
], R_SS
, -1);
2258 gen_op_ld_v(s
, d_ot
, cpu_T0
, cpu_A0
);
2263 static inline void gen_pop_update(DisasContext
*s
, TCGMemOp ot
)
2265 gen_stack_update(s
, 1 << ot
);
2268 static inline void gen_stack_A0(DisasContext
*s
)
2270 gen_lea_v_seg(s
, s
->ss32
? MO_32
: MO_16
, cpu_regs
[R_ESP
], R_SS
, -1);
2273 static void gen_pusha(DisasContext
*s
)
2275 TCGMemOp s_ot
= s
->ss32
? MO_32
: MO_16
;
2276 TCGMemOp d_ot
= s
->dflag
;
2277 int size
= 1 << d_ot
;
2280 for (i
= 0; i
< 8; i
++) {
2281 tcg_gen_addi_tl(cpu_A0
, cpu_regs
[R_ESP
], (i
- 8) * size
);
2282 gen_lea_v_seg(s
, s_ot
, cpu_A0
, R_SS
, -1);
2283 gen_op_st_v(s
, d_ot
, cpu_regs
[7 - i
], cpu_A0
);
2286 gen_stack_update(s
, -8 * size
);
2289 static void gen_popa(DisasContext
*s
)
2291 TCGMemOp s_ot
= s
->ss32
? MO_32
: MO_16
;
2292 TCGMemOp d_ot
= s
->dflag
;
2293 int size
= 1 << d_ot
;
2296 for (i
= 0; i
< 8; i
++) {
2297 /* ESP is not reloaded */
2298 if (7 - i
== R_ESP
) {
2301 tcg_gen_addi_tl(cpu_A0
, cpu_regs
[R_ESP
], i
* size
);
2302 gen_lea_v_seg(s
, s_ot
, cpu_A0
, R_SS
, -1);
2303 gen_op_ld_v(s
, d_ot
, cpu_T0
, cpu_A0
);
2304 gen_op_mov_reg_v(d_ot
, 7 - i
, cpu_T0
);
2307 gen_stack_update(s
, 8 * size
);
2310 static void gen_enter(DisasContext
*s
, int esp_addend
, int level
)
2312 TCGMemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2313 TCGMemOp a_ot
= CODE64(s
) ? MO_64
: s
->ss32
? MO_32
: MO_16
;
2314 int size
= 1 << d_ot
;
2316 /* Push BP; compute FrameTemp into T1. */
2317 tcg_gen_subi_tl(cpu_T1
, cpu_regs
[R_ESP
], size
);
2318 gen_lea_v_seg(s
, a_ot
, cpu_T1
, R_SS
, -1);
2319 gen_op_st_v(s
, d_ot
, cpu_regs
[R_EBP
], cpu_A0
);
2325 /* Copy level-1 pointers from the previous frame. */
2326 for (i
= 1; i
< level
; ++i
) {
2327 tcg_gen_subi_tl(cpu_A0
, cpu_regs
[R_EBP
], size
* i
);
2328 gen_lea_v_seg(s
, a_ot
, cpu_A0
, R_SS
, -1);
2329 gen_op_ld_v(s
, d_ot
, cpu_tmp0
, cpu_A0
);
2331 tcg_gen_subi_tl(cpu_A0
, cpu_T1
, size
* i
);
2332 gen_lea_v_seg(s
, a_ot
, cpu_A0
, R_SS
, -1);
2333 gen_op_st_v(s
, d_ot
, cpu_tmp0
, cpu_A0
);
2336 /* Push the current FrameTemp as the last level. */
2337 tcg_gen_subi_tl(cpu_A0
, cpu_T1
, size
* level
);
2338 gen_lea_v_seg(s
, a_ot
, cpu_A0
, R_SS
, -1);
2339 gen_op_st_v(s
, d_ot
, cpu_T1
, cpu_A0
);
2342 /* Copy the FrameTemp value to EBP. */
2343 gen_op_mov_reg_v(a_ot
, R_EBP
, cpu_T1
);
2345 /* Compute the final value of ESP. */
2346 tcg_gen_subi_tl(cpu_T1
, cpu_T1
, esp_addend
+ size
* level
);
2347 gen_op_mov_reg_v(a_ot
, R_ESP
, cpu_T1
);
2350 static void gen_leave(DisasContext
*s
)
2352 TCGMemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2353 TCGMemOp a_ot
= mo_stacksize(s
);
2355 gen_lea_v_seg(s
, a_ot
, cpu_regs
[R_EBP
], R_SS
, -1);
2356 gen_op_ld_v(s
, d_ot
, cpu_T0
, cpu_A0
);
2358 tcg_gen_addi_tl(cpu_T1
, cpu_regs
[R_EBP
], 1 << d_ot
);
2360 gen_op_mov_reg_v(d_ot
, R_EBP
, cpu_T0
);
2361 gen_op_mov_reg_v(a_ot
, R_ESP
, cpu_T1
);
2364 static void gen_exception(DisasContext
*s
, int trapno
, target_ulong cur_eip
)
2366 gen_update_cc_op(s
);
2367 gen_jmp_im(cur_eip
);
2368 gen_helper_raise_exception(cpu_env
, tcg_const_i32(trapno
));
2369 s
->is_jmp
= DISAS_TB_JUMP
;
2372 /* Generate #UD for the current instruction. The assumption here is that
2373 the instruction is known, but it isn't allowed in the current cpu mode. */
2374 static void gen_illegal_opcode(DisasContext
*s
)
2376 gen_exception(s
, EXCP06_ILLOP
, s
->pc_start
- s
->cs_base
);
2379 /* Similarly, except that the assumption here is that we don't decode
2380 the instruction at all -- either a missing opcode, an unimplemented
2381 feature, or just a bogus instruction stream. */
2382 static void gen_unknown_opcode(CPUX86State
*env
, DisasContext
*s
)
2384 gen_illegal_opcode(s
);
2386 if (qemu_loglevel_mask(LOG_UNIMP
)) {
2387 target_ulong pc
= s
->pc_start
, end
= s
->pc
;
2388 qemu_log("ILLOPC: " TARGET_FMT_lx
":", pc
);
2389 for (; pc
< end
; ++pc
) {
2390 qemu_log(" %02x", cpu_ldub_code(env
, pc
));
2396 /* an interrupt is different from an exception because of the
2398 static void gen_interrupt(DisasContext
*s
, int intno
,
2399 target_ulong cur_eip
, target_ulong next_eip
)
2401 gen_update_cc_op(s
);
2402 gen_jmp_im(cur_eip
);
2403 gen_helper_raise_interrupt(cpu_env
, tcg_const_i32(intno
),
2404 tcg_const_i32(next_eip
- cur_eip
));
2405 s
->is_jmp
= DISAS_TB_JUMP
;
2408 static void gen_debug(DisasContext
*s
, target_ulong cur_eip
)
2410 gen_update_cc_op(s
);
2411 gen_jmp_im(cur_eip
);
2412 gen_helper_debug(cpu_env
);
2413 s
->is_jmp
= DISAS_TB_JUMP
;
2416 static void gen_set_hflag(DisasContext
*s
, uint32_t mask
)
2418 if ((s
->flags
& mask
) == 0) {
2419 TCGv_i32 t
= tcg_temp_new_i32();
2420 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2421 tcg_gen_ori_i32(t
, t
, mask
);
2422 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2423 tcg_temp_free_i32(t
);
2428 static void gen_reset_hflag(DisasContext
*s
, uint32_t mask
)
2430 if (s
->flags
& mask
) {
2431 TCGv_i32 t
= tcg_temp_new_i32();
2432 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2433 tcg_gen_andi_i32(t
, t
, ~mask
);
2434 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2435 tcg_temp_free_i32(t
);
2440 /* Clear BND registers during legacy branches. */
2441 static void gen_bnd_jmp(DisasContext
*s
)
2443 /* Clear the registers only if BND prefix is missing, MPX is enabled,
2444 and if the BNDREGs are known to be in use (non-zero) already.
2445 The helper itself will check BNDPRESERVE at runtime. */
2446 if ((s
->prefix
& PREFIX_REPNZ
) == 0
2447 && (s
->flags
& HF_MPX_EN_MASK
) != 0
2448 && (s
->flags
& HF_MPX_IU_MASK
) != 0) {
2449 gen_helper_bnd_jmp(cpu_env
);
2453 /* Generate an end of block. Trace exception is also generated if needed.
2454 If IIM, set HF_INHIBIT_IRQ_MASK if it isn't already set. */
2455 static void gen_eob_inhibit_irq(DisasContext
*s
, bool inhibit
)
2457 gen_update_cc_op(s
);
2459 /* If several instructions disable interrupts, only the first does it. */
2460 if (inhibit
&& !(s
->flags
& HF_INHIBIT_IRQ_MASK
)) {
2461 gen_set_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2463 gen_reset_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2466 if (s
->tb
->flags
& HF_RF_MASK
) {
2467 gen_helper_reset_rf(cpu_env
);
2469 if (s
->singlestep_enabled
) {
2470 gen_helper_debug(cpu_env
);
2472 gen_helper_single_step(cpu_env
);
2476 s
->is_jmp
= DISAS_TB_JUMP
;
2479 /* End of block, resetting the inhibit irq flag. */
2480 static void gen_eob(DisasContext
*s
)
2482 gen_eob_inhibit_irq(s
, false);
2485 /* generate a jump to eip. No segment change must happen before as a
2486 direct call to the next block may occur */
2487 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
)
2489 gen_update_cc_op(s
);
2490 set_cc_op(s
, CC_OP_DYNAMIC
);
2492 gen_goto_tb(s
, tb_num
, eip
);
2493 s
->is_jmp
= DISAS_TB_JUMP
;
2500 static void gen_jmp(DisasContext
*s
, target_ulong eip
)
2502 gen_jmp_tb(s
, eip
, 0);
2505 static inline void gen_ldq_env_A0(DisasContext
*s
, int offset
)
2507 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_A0
, s
->mem_index
, MO_LEQ
);
2508 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2511 static inline void gen_stq_env_A0(DisasContext
*s
, int offset
)
2513 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2514 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_A0
, s
->mem_index
, MO_LEQ
);
2517 static inline void gen_ldo_env_A0(DisasContext
*s
, int offset
)
2519 int mem_index
= s
->mem_index
;
2520 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_A0
, mem_index
, MO_LEQ
);
2521 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(ZMMReg
, ZMM_Q(0)));
2522 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2523 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
, MO_LEQ
);
2524 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(ZMMReg
, ZMM_Q(1)));
2527 static inline void gen_sto_env_A0(DisasContext
*s
, int offset
)
2529 int mem_index
= s
->mem_index
;
2530 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(ZMMReg
, ZMM_Q(0)));
2531 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_A0
, mem_index
, MO_LEQ
);
2532 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2533 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(ZMMReg
, ZMM_Q(1)));
2534 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
, MO_LEQ
);
2537 static inline void gen_op_movo(int d_offset
, int s_offset
)
2539 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
+ offsetof(ZMMReg
, ZMM_Q(0)));
2540 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
+ offsetof(ZMMReg
, ZMM_Q(0)));
2541 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
+ offsetof(ZMMReg
, ZMM_Q(1)));
2542 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
+ offsetof(ZMMReg
, ZMM_Q(1)));
2545 static inline void gen_op_movq(int d_offset
, int s_offset
)
2547 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2548 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2551 static inline void gen_op_movl(int d_offset
, int s_offset
)
2553 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
, s_offset
);
2554 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, d_offset
);
2557 static inline void gen_op_movq_env_0(int d_offset
)
2559 tcg_gen_movi_i64(cpu_tmp1_i64
, 0);
2560 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2563 typedef void (*SSEFunc_i_ep
)(TCGv_i32 val
, TCGv_ptr env
, TCGv_ptr reg
);
2564 typedef void (*SSEFunc_l_ep
)(TCGv_i64 val
, TCGv_ptr env
, TCGv_ptr reg
);
2565 typedef void (*SSEFunc_0_epi
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i32 val
);
2566 typedef void (*SSEFunc_0_epl
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i64 val
);
2567 typedef void (*SSEFunc_0_epp
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
);
2568 typedef void (*SSEFunc_0_eppi
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2570 typedef void (*SSEFunc_0_ppi
)(TCGv_ptr reg_a
, TCGv_ptr reg_b
, TCGv_i32 val
);
2571 typedef void (*SSEFunc_0_eppt
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2574 #define SSE_SPECIAL ((void *)1)
2575 #define SSE_DUMMY ((void *)2)
2577 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2578 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2579 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2581 static const SSEFunc_0_epp sse_op_table1
[256][4] = {
2582 /* 3DNow! extensions */
2583 [0x0e] = { SSE_DUMMY
}, /* femms */
2584 [0x0f] = { SSE_DUMMY
}, /* pf... */
2585 /* pure SSE operations */
2586 [0x10] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2587 [0x11] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2588 [0x12] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd, movsldup, movddup */
2589 [0x13] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd */
2590 [0x14] = { gen_helper_punpckldq_xmm
, gen_helper_punpcklqdq_xmm
},
2591 [0x15] = { gen_helper_punpckhdq_xmm
, gen_helper_punpckhqdq_xmm
},
2592 [0x16] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd, movshdup */
2593 [0x17] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd */
2595 [0x28] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2596 [0x29] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2597 [0x2a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2598 [0x2b] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movntps, movntpd, movntss, movntsd */
2599 [0x2c] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2600 [0x2d] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2601 [0x2e] = { gen_helper_ucomiss
, gen_helper_ucomisd
},
2602 [0x2f] = { gen_helper_comiss
, gen_helper_comisd
},
2603 [0x50] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movmskps, movmskpd */
2604 [0x51] = SSE_FOP(sqrt
),
2605 [0x52] = { gen_helper_rsqrtps
, NULL
, gen_helper_rsqrtss
, NULL
},
2606 [0x53] = { gen_helper_rcpps
, NULL
, gen_helper_rcpss
, NULL
},
2607 [0x54] = { gen_helper_pand_xmm
, gen_helper_pand_xmm
}, /* andps, andpd */
2608 [0x55] = { gen_helper_pandn_xmm
, gen_helper_pandn_xmm
}, /* andnps, andnpd */
2609 [0x56] = { gen_helper_por_xmm
, gen_helper_por_xmm
}, /* orps, orpd */
2610 [0x57] = { gen_helper_pxor_xmm
, gen_helper_pxor_xmm
}, /* xorps, xorpd */
2611 [0x58] = SSE_FOP(add
),
2612 [0x59] = SSE_FOP(mul
),
2613 [0x5a] = { gen_helper_cvtps2pd
, gen_helper_cvtpd2ps
,
2614 gen_helper_cvtss2sd
, gen_helper_cvtsd2ss
},
2615 [0x5b] = { gen_helper_cvtdq2ps
, gen_helper_cvtps2dq
, gen_helper_cvttps2dq
},
2616 [0x5c] = SSE_FOP(sub
),
2617 [0x5d] = SSE_FOP(min
),
2618 [0x5e] = SSE_FOP(div
),
2619 [0x5f] = SSE_FOP(max
),
2621 [0xc2] = SSE_FOP(cmpeq
),
2622 [0xc6] = { (SSEFunc_0_epp
)gen_helper_shufps
,
2623 (SSEFunc_0_epp
)gen_helper_shufpd
}, /* XXX: casts */
2625 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2626 [0x38] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2627 [0x3a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2629 /* MMX ops and their SSE extensions */
2630 [0x60] = MMX_OP2(punpcklbw
),
2631 [0x61] = MMX_OP2(punpcklwd
),
2632 [0x62] = MMX_OP2(punpckldq
),
2633 [0x63] = MMX_OP2(packsswb
),
2634 [0x64] = MMX_OP2(pcmpgtb
),
2635 [0x65] = MMX_OP2(pcmpgtw
),
2636 [0x66] = MMX_OP2(pcmpgtl
),
2637 [0x67] = MMX_OP2(packuswb
),
2638 [0x68] = MMX_OP2(punpckhbw
),
2639 [0x69] = MMX_OP2(punpckhwd
),
2640 [0x6a] = MMX_OP2(punpckhdq
),
2641 [0x6b] = MMX_OP2(packssdw
),
2642 [0x6c] = { NULL
, gen_helper_punpcklqdq_xmm
},
2643 [0x6d] = { NULL
, gen_helper_punpckhqdq_xmm
},
2644 [0x6e] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movd mm, ea */
2645 [0x6f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, , movqdu */
2646 [0x70] = { (SSEFunc_0_epp
)gen_helper_pshufw_mmx
,
2647 (SSEFunc_0_epp
)gen_helper_pshufd_xmm
,
2648 (SSEFunc_0_epp
)gen_helper_pshufhw_xmm
,
2649 (SSEFunc_0_epp
)gen_helper_pshuflw_xmm
}, /* XXX: casts */
2650 [0x71] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftw */
2651 [0x72] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftd */
2652 [0x73] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftq */
2653 [0x74] = MMX_OP2(pcmpeqb
),
2654 [0x75] = MMX_OP2(pcmpeqw
),
2655 [0x76] = MMX_OP2(pcmpeql
),
2656 [0x77] = { SSE_DUMMY
}, /* emms */
2657 [0x78] = { NULL
, SSE_SPECIAL
, NULL
, SSE_SPECIAL
}, /* extrq_i, insertq_i */
2658 [0x79] = { NULL
, gen_helper_extrq_r
, NULL
, gen_helper_insertq_r
},
2659 [0x7c] = { NULL
, gen_helper_haddpd
, NULL
, gen_helper_haddps
},
2660 [0x7d] = { NULL
, gen_helper_hsubpd
, NULL
, gen_helper_hsubps
},
2661 [0x7e] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movd, movd, , movq */
2662 [0x7f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, movdqu */
2663 [0xc4] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pinsrw */
2664 [0xc5] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pextrw */
2665 [0xd0] = { NULL
, gen_helper_addsubpd
, NULL
, gen_helper_addsubps
},
2666 [0xd1] = MMX_OP2(psrlw
),
2667 [0xd2] = MMX_OP2(psrld
),
2668 [0xd3] = MMX_OP2(psrlq
),
2669 [0xd4] = MMX_OP2(paddq
),
2670 [0xd5] = MMX_OP2(pmullw
),
2671 [0xd6] = { NULL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2672 [0xd7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pmovmskb */
2673 [0xd8] = MMX_OP2(psubusb
),
2674 [0xd9] = MMX_OP2(psubusw
),
2675 [0xda] = MMX_OP2(pminub
),
2676 [0xdb] = MMX_OP2(pand
),
2677 [0xdc] = MMX_OP2(paddusb
),
2678 [0xdd] = MMX_OP2(paddusw
),
2679 [0xde] = MMX_OP2(pmaxub
),
2680 [0xdf] = MMX_OP2(pandn
),
2681 [0xe0] = MMX_OP2(pavgb
),
2682 [0xe1] = MMX_OP2(psraw
),
2683 [0xe2] = MMX_OP2(psrad
),
2684 [0xe3] = MMX_OP2(pavgw
),
2685 [0xe4] = MMX_OP2(pmulhuw
),
2686 [0xe5] = MMX_OP2(pmulhw
),
2687 [0xe6] = { NULL
, gen_helper_cvttpd2dq
, gen_helper_cvtdq2pd
, gen_helper_cvtpd2dq
},
2688 [0xe7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movntq, movntq */
2689 [0xe8] = MMX_OP2(psubsb
),
2690 [0xe9] = MMX_OP2(psubsw
),
2691 [0xea] = MMX_OP2(pminsw
),
2692 [0xeb] = MMX_OP2(por
),
2693 [0xec] = MMX_OP2(paddsb
),
2694 [0xed] = MMX_OP2(paddsw
),
2695 [0xee] = MMX_OP2(pmaxsw
),
2696 [0xef] = MMX_OP2(pxor
),
2697 [0xf0] = { NULL
, NULL
, NULL
, SSE_SPECIAL
}, /* lddqu */
2698 [0xf1] = MMX_OP2(psllw
),
2699 [0xf2] = MMX_OP2(pslld
),
2700 [0xf3] = MMX_OP2(psllq
),
2701 [0xf4] = MMX_OP2(pmuludq
),
2702 [0xf5] = MMX_OP2(pmaddwd
),
2703 [0xf6] = MMX_OP2(psadbw
),
2704 [0xf7] = { (SSEFunc_0_epp
)gen_helper_maskmov_mmx
,
2705 (SSEFunc_0_epp
)gen_helper_maskmov_xmm
}, /* XXX: casts */
2706 [0xf8] = MMX_OP2(psubb
),
2707 [0xf9] = MMX_OP2(psubw
),
2708 [0xfa] = MMX_OP2(psubl
),
2709 [0xfb] = MMX_OP2(psubq
),
2710 [0xfc] = MMX_OP2(paddb
),
2711 [0xfd] = MMX_OP2(paddw
),
2712 [0xfe] = MMX_OP2(paddl
),
2715 static const SSEFunc_0_epp sse_op_table2
[3 * 8][2] = {
2716 [0 + 2] = MMX_OP2(psrlw
),
2717 [0 + 4] = MMX_OP2(psraw
),
2718 [0 + 6] = MMX_OP2(psllw
),
2719 [8 + 2] = MMX_OP2(psrld
),
2720 [8 + 4] = MMX_OP2(psrad
),
2721 [8 + 6] = MMX_OP2(pslld
),
2722 [16 + 2] = MMX_OP2(psrlq
),
2723 [16 + 3] = { NULL
, gen_helper_psrldq_xmm
},
2724 [16 + 6] = MMX_OP2(psllq
),
2725 [16 + 7] = { NULL
, gen_helper_pslldq_xmm
},
2728 static const SSEFunc_0_epi sse_op_table3ai
[] = {
2729 gen_helper_cvtsi2ss
,
2733 #ifdef TARGET_X86_64
2734 static const SSEFunc_0_epl sse_op_table3aq
[] = {
2735 gen_helper_cvtsq2ss
,
2740 static const SSEFunc_i_ep sse_op_table3bi
[] = {
2741 gen_helper_cvttss2si
,
2742 gen_helper_cvtss2si
,
2743 gen_helper_cvttsd2si
,
2747 #ifdef TARGET_X86_64
2748 static const SSEFunc_l_ep sse_op_table3bq
[] = {
2749 gen_helper_cvttss2sq
,
2750 gen_helper_cvtss2sq
,
2751 gen_helper_cvttsd2sq
,
2756 static const SSEFunc_0_epp sse_op_table4
[8][4] = {
2767 static const SSEFunc_0_epp sse_op_table5
[256] = {
2768 [0x0c] = gen_helper_pi2fw
,
2769 [0x0d] = gen_helper_pi2fd
,
2770 [0x1c] = gen_helper_pf2iw
,
2771 [0x1d] = gen_helper_pf2id
,
2772 [0x8a] = gen_helper_pfnacc
,
2773 [0x8e] = gen_helper_pfpnacc
,
2774 [0x90] = gen_helper_pfcmpge
,
2775 [0x94] = gen_helper_pfmin
,
2776 [0x96] = gen_helper_pfrcp
,
2777 [0x97] = gen_helper_pfrsqrt
,
2778 [0x9a] = gen_helper_pfsub
,
2779 [0x9e] = gen_helper_pfadd
,
2780 [0xa0] = gen_helper_pfcmpgt
,
2781 [0xa4] = gen_helper_pfmax
,
2782 [0xa6] = gen_helper_movq
, /* pfrcpit1; no need to actually increase precision */
2783 [0xa7] = gen_helper_movq
, /* pfrsqit1 */
2784 [0xaa] = gen_helper_pfsubr
,
2785 [0xae] = gen_helper_pfacc
,
2786 [0xb0] = gen_helper_pfcmpeq
,
2787 [0xb4] = gen_helper_pfmul
,
2788 [0xb6] = gen_helper_movq
, /* pfrcpit2 */
2789 [0xb7] = gen_helper_pmulhrw_mmx
,
2790 [0xbb] = gen_helper_pswapd
,
2791 [0xbf] = gen_helper_pavgb_mmx
/* pavgusb */
2794 struct SSEOpHelper_epp
{
2795 SSEFunc_0_epp op
[2];
2799 struct SSEOpHelper_eppi
{
2800 SSEFunc_0_eppi op
[2];
2804 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
2805 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
2806 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
2807 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
2808 #define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
2809 CPUID_EXT_PCLMULQDQ }
2810 #define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
2812 static const struct SSEOpHelper_epp sse_op_table6
[256] = {
2813 [0x00] = SSSE3_OP(pshufb
),
2814 [0x01] = SSSE3_OP(phaddw
),
2815 [0x02] = SSSE3_OP(phaddd
),
2816 [0x03] = SSSE3_OP(phaddsw
),
2817 [0x04] = SSSE3_OP(pmaddubsw
),
2818 [0x05] = SSSE3_OP(phsubw
),
2819 [0x06] = SSSE3_OP(phsubd
),
2820 [0x07] = SSSE3_OP(phsubsw
),
2821 [0x08] = SSSE3_OP(psignb
),
2822 [0x09] = SSSE3_OP(psignw
),
2823 [0x0a] = SSSE3_OP(psignd
),
2824 [0x0b] = SSSE3_OP(pmulhrsw
),
2825 [0x10] = SSE41_OP(pblendvb
),
2826 [0x14] = SSE41_OP(blendvps
),
2827 [0x15] = SSE41_OP(blendvpd
),
2828 [0x17] = SSE41_OP(ptest
),
2829 [0x1c] = SSSE3_OP(pabsb
),
2830 [0x1d] = SSSE3_OP(pabsw
),
2831 [0x1e] = SSSE3_OP(pabsd
),
2832 [0x20] = SSE41_OP(pmovsxbw
),
2833 [0x21] = SSE41_OP(pmovsxbd
),
2834 [0x22] = SSE41_OP(pmovsxbq
),
2835 [0x23] = SSE41_OP(pmovsxwd
),
2836 [0x24] = SSE41_OP(pmovsxwq
),
2837 [0x25] = SSE41_OP(pmovsxdq
),
2838 [0x28] = SSE41_OP(pmuldq
),
2839 [0x29] = SSE41_OP(pcmpeqq
),
2840 [0x2a] = SSE41_SPECIAL
, /* movntqda */
2841 [0x2b] = SSE41_OP(packusdw
),
2842 [0x30] = SSE41_OP(pmovzxbw
),
2843 [0x31] = SSE41_OP(pmovzxbd
),
2844 [0x32] = SSE41_OP(pmovzxbq
),
2845 [0x33] = SSE41_OP(pmovzxwd
),
2846 [0x34] = SSE41_OP(pmovzxwq
),
2847 [0x35] = SSE41_OP(pmovzxdq
),
2848 [0x37] = SSE42_OP(pcmpgtq
),
2849 [0x38] = SSE41_OP(pminsb
),
2850 [0x39] = SSE41_OP(pminsd
),
2851 [0x3a] = SSE41_OP(pminuw
),
2852 [0x3b] = SSE41_OP(pminud
),
2853 [0x3c] = SSE41_OP(pmaxsb
),
2854 [0x3d] = SSE41_OP(pmaxsd
),
2855 [0x3e] = SSE41_OP(pmaxuw
),
2856 [0x3f] = SSE41_OP(pmaxud
),
2857 [0x40] = SSE41_OP(pmulld
),
2858 [0x41] = SSE41_OP(phminposuw
),
2859 [0xdb] = AESNI_OP(aesimc
),
2860 [0xdc] = AESNI_OP(aesenc
),
2861 [0xdd] = AESNI_OP(aesenclast
),
2862 [0xde] = AESNI_OP(aesdec
),
2863 [0xdf] = AESNI_OP(aesdeclast
),
2866 static const struct SSEOpHelper_eppi sse_op_table7
[256] = {
2867 [0x08] = SSE41_OP(roundps
),
2868 [0x09] = SSE41_OP(roundpd
),
2869 [0x0a] = SSE41_OP(roundss
),
2870 [0x0b] = SSE41_OP(roundsd
),
2871 [0x0c] = SSE41_OP(blendps
),
2872 [0x0d] = SSE41_OP(blendpd
),
2873 [0x0e] = SSE41_OP(pblendw
),
2874 [0x0f] = SSSE3_OP(palignr
),
2875 [0x14] = SSE41_SPECIAL
, /* pextrb */
2876 [0x15] = SSE41_SPECIAL
, /* pextrw */
2877 [0x16] = SSE41_SPECIAL
, /* pextrd/pextrq */
2878 [0x17] = SSE41_SPECIAL
, /* extractps */
2879 [0x20] = SSE41_SPECIAL
, /* pinsrb */
2880 [0x21] = SSE41_SPECIAL
, /* insertps */
2881 [0x22] = SSE41_SPECIAL
, /* pinsrd/pinsrq */
2882 [0x40] = SSE41_OP(dpps
),
2883 [0x41] = SSE41_OP(dppd
),
2884 [0x42] = SSE41_OP(mpsadbw
),
2885 [0x44] = PCLMULQDQ_OP(pclmulqdq
),
2886 [0x60] = SSE42_OP(pcmpestrm
),
2887 [0x61] = SSE42_OP(pcmpestri
),
2888 [0x62] = SSE42_OP(pcmpistrm
),
2889 [0x63] = SSE42_OP(pcmpistri
),
2890 [0xdf] = AESNI_OP(aeskeygenassist
),
2893 static void gen_sse(CPUX86State
*env
, DisasContext
*s
, int b
,
2894 target_ulong pc_start
, int rex_r
)
2896 int b1
, op1_offset
, op2_offset
, is_xmm
, val
;
2897 int modrm
, mod
, rm
, reg
;
2898 SSEFunc_0_epp sse_fn_epp
;
2899 SSEFunc_0_eppi sse_fn_eppi
;
2900 SSEFunc_0_ppi sse_fn_ppi
;
2901 SSEFunc_0_eppt sse_fn_eppt
;
2905 if (s
->prefix
& PREFIX_DATA
)
2907 else if (s
->prefix
& PREFIX_REPZ
)
2909 else if (s
->prefix
& PREFIX_REPNZ
)
2913 sse_fn_epp
= sse_op_table1
[b
][b1
];
2917 if ((b
<= 0x5f && b
>= 0x10) || b
== 0xc6 || b
== 0xc2) {
2927 /* simple MMX/SSE operation */
2928 if (s
->flags
& HF_TS_MASK
) {
2929 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
2932 if (s
->flags
& HF_EM_MASK
) {
2934 gen_illegal_opcode(s
);
2938 && !(s
->flags
& HF_OSFXSR_MASK
)
2939 && ((b
!= 0x38 && b
!= 0x3a) || (s
->prefix
& PREFIX_DATA
))) {
2943 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
)) {
2944 /* If we were fully decoding this we might use illegal_op. */
2948 gen_helper_emms(cpu_env
);
2953 gen_helper_emms(cpu_env
);
2956 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
2957 the static cpu state) */
2959 gen_helper_enter_mmx(cpu_env
);
2962 modrm
= cpu_ldub_code(env
, s
->pc
++);
2963 reg
= ((modrm
>> 3) & 7);
2966 mod
= (modrm
>> 6) & 3;
2967 if (sse_fn_epp
== SSE_SPECIAL
) {
2970 case 0x0e7: /* movntq */
2974 gen_lea_modrm(env
, s
, modrm
);
2975 gen_stq_env_A0(s
, offsetof(CPUX86State
, fpregs
[reg
].mmx
));
2977 case 0x1e7: /* movntdq */
2978 case 0x02b: /* movntps */
2979 case 0x12b: /* movntps */
2982 gen_lea_modrm(env
, s
, modrm
);
2983 gen_sto_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
2985 case 0x3f0: /* lddqu */
2988 gen_lea_modrm(env
, s
, modrm
);
2989 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
2991 case 0x22b: /* movntss */
2992 case 0x32b: /* movntsd */
2995 gen_lea_modrm(env
, s
, modrm
);
2997 gen_stq_env_A0(s
, offsetof(CPUX86State
,
2998 xmm_regs
[reg
].ZMM_Q(0)));
3000 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,
3001 xmm_regs
[reg
].ZMM_L(0)));
3002 gen_op_st_v(s
, MO_32
, cpu_T0
, cpu_A0
);
3005 case 0x6e: /* movd mm, ea */
3006 #ifdef TARGET_X86_64
3007 if (s
->dflag
== MO_64
) {
3008 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 0);
3009 tcg_gen_st_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3013 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 0);
3014 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3015 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3016 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
3017 gen_helper_movl_mm_T0_mmx(cpu_ptr0
, cpu_tmp2_i32
);
3020 case 0x16e: /* movd xmm, ea */
3021 #ifdef TARGET_X86_64
3022 if (s
->dflag
== MO_64
) {
3023 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 0);
3024 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3025 offsetof(CPUX86State
,xmm_regs
[reg
]));
3026 gen_helper_movq_mm_T0_xmm(cpu_ptr0
, cpu_T0
);
3030 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 0);
3031 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3032 offsetof(CPUX86State
,xmm_regs
[reg
]));
3033 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
3034 gen_helper_movl_mm_T0_xmm(cpu_ptr0
, cpu_tmp2_i32
);
3037 case 0x6f: /* movq mm, ea */
3039 gen_lea_modrm(env
, s
, modrm
);
3040 gen_ldq_env_A0(s
, offsetof(CPUX86State
, fpregs
[reg
].mmx
));
3043 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
3044 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3045 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
3046 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3049 case 0x010: /* movups */
3050 case 0x110: /* movupd */
3051 case 0x028: /* movaps */
3052 case 0x128: /* movapd */
3053 case 0x16f: /* movdqa xmm, ea */
3054 case 0x26f: /* movdqu xmm, ea */
3056 gen_lea_modrm(env
, s
, modrm
);
3057 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3059 rm
= (modrm
& 7) | REX_B(s
);
3060 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[reg
]),
3061 offsetof(CPUX86State
,xmm_regs
[rm
]));
3064 case 0x210: /* movss xmm, ea */
3066 gen_lea_modrm(env
, s
, modrm
);
3067 gen_op_ld_v(s
, MO_32
, cpu_T0
, cpu_A0
);
3068 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3069 tcg_gen_movi_tl(cpu_T0
, 0);
3070 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(1)));
3071 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(2)));
3072 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(3)));
3074 rm
= (modrm
& 7) | REX_B(s
);
3075 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)),
3076 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(0)));
3079 case 0x310: /* movsd xmm, ea */
3081 gen_lea_modrm(env
, s
, modrm
);
3082 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3083 xmm_regs
[reg
].ZMM_Q(0)));
3084 tcg_gen_movi_tl(cpu_T0
, 0);
3085 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(2)));
3086 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(3)));
3088 rm
= (modrm
& 7) | REX_B(s
);
3089 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)),
3090 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3093 case 0x012: /* movlps */
3094 case 0x112: /* movlpd */
3096 gen_lea_modrm(env
, s
, modrm
);
3097 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3098 xmm_regs
[reg
].ZMM_Q(0)));
3101 rm
= (modrm
& 7) | REX_B(s
);
3102 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)),
3103 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(1)));
3106 case 0x212: /* movsldup */
3108 gen_lea_modrm(env
, s
, modrm
);
3109 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3111 rm
= (modrm
& 7) | REX_B(s
);
3112 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)),
3113 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(0)));
3114 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(2)),
3115 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(2)));
3117 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(1)),
3118 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3119 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(3)),
3120 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(2)));
3122 case 0x312: /* movddup */
3124 gen_lea_modrm(env
, s
, modrm
);
3125 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3126 xmm_regs
[reg
].ZMM_Q(0)));
3128 rm
= (modrm
& 7) | REX_B(s
);
3129 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)),
3130 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3132 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(1)),
3133 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3135 case 0x016: /* movhps */
3136 case 0x116: /* movhpd */
3138 gen_lea_modrm(env
, s
, modrm
);
3139 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3140 xmm_regs
[reg
].ZMM_Q(1)));
3143 rm
= (modrm
& 7) | REX_B(s
);
3144 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(1)),
3145 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3148 case 0x216: /* movshdup */
3150 gen_lea_modrm(env
, s
, modrm
);
3151 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3153 rm
= (modrm
& 7) | REX_B(s
);
3154 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(1)),
3155 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(1)));
3156 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(3)),
3157 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(3)));
3159 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)),
3160 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(1)));
3161 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(2)),
3162 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(3)));
3167 int bit_index
, field_length
;
3169 if (b1
== 1 && reg
!= 0)
3171 field_length
= cpu_ldub_code(env
, s
->pc
++) & 0x3F;
3172 bit_index
= cpu_ldub_code(env
, s
->pc
++) & 0x3F;
3173 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3174 offsetof(CPUX86State
,xmm_regs
[reg
]));
3176 gen_helper_extrq_i(cpu_env
, cpu_ptr0
,
3177 tcg_const_i32(bit_index
),
3178 tcg_const_i32(field_length
));
3180 gen_helper_insertq_i(cpu_env
, cpu_ptr0
,
3181 tcg_const_i32(bit_index
),
3182 tcg_const_i32(field_length
));
3185 case 0x7e: /* movd ea, mm */
3186 #ifdef TARGET_X86_64
3187 if (s
->dflag
== MO_64
) {
3188 tcg_gen_ld_i64(cpu_T0
, cpu_env
,
3189 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3190 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 1);
3194 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
,
3195 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_L(0)));
3196 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 1);
3199 case 0x17e: /* movd ea, xmm */
3200 #ifdef TARGET_X86_64
3201 if (s
->dflag
== MO_64
) {
3202 tcg_gen_ld_i64(cpu_T0
, cpu_env
,
3203 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3204 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 1);
3208 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
,
3209 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3210 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 1);
3213 case 0x27e: /* movq xmm, ea */
3215 gen_lea_modrm(env
, s
, modrm
);
3216 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3217 xmm_regs
[reg
].ZMM_Q(0)));
3219 rm
= (modrm
& 7) | REX_B(s
);
3220 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)),
3221 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3223 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(1)));
3225 case 0x7f: /* movq ea, mm */
3227 gen_lea_modrm(env
, s
, modrm
);
3228 gen_stq_env_A0(s
, offsetof(CPUX86State
, fpregs
[reg
].mmx
));
3231 gen_op_movq(offsetof(CPUX86State
,fpregs
[rm
].mmx
),
3232 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3235 case 0x011: /* movups */
3236 case 0x111: /* movupd */
3237 case 0x029: /* movaps */
3238 case 0x129: /* movapd */
3239 case 0x17f: /* movdqa ea, xmm */
3240 case 0x27f: /* movdqu ea, xmm */
3242 gen_lea_modrm(env
, s
, modrm
);
3243 gen_sto_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3245 rm
= (modrm
& 7) | REX_B(s
);
3246 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[rm
]),
3247 offsetof(CPUX86State
,xmm_regs
[reg
]));
3250 case 0x211: /* movss ea, xmm */
3252 gen_lea_modrm(env
, s
, modrm
);
3253 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3254 gen_op_st_v(s
, MO_32
, cpu_T0
, cpu_A0
);
3256 rm
= (modrm
& 7) | REX_B(s
);
3257 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(0)),
3258 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3261 case 0x311: /* movsd ea, xmm */
3263 gen_lea_modrm(env
, s
, modrm
);
3264 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3265 xmm_regs
[reg
].ZMM_Q(0)));
3267 rm
= (modrm
& 7) | REX_B(s
);
3268 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)),
3269 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3272 case 0x013: /* movlps */
3273 case 0x113: /* movlpd */
3275 gen_lea_modrm(env
, s
, modrm
);
3276 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3277 xmm_regs
[reg
].ZMM_Q(0)));
3282 case 0x017: /* movhps */
3283 case 0x117: /* movhpd */
3285 gen_lea_modrm(env
, s
, modrm
);
3286 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3287 xmm_regs
[reg
].ZMM_Q(1)));
3292 case 0x71: /* shift mm, im */
3295 case 0x171: /* shift xmm, im */
3301 val
= cpu_ldub_code(env
, s
->pc
++);
3303 tcg_gen_movi_tl(cpu_T0
, val
);
3304 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_t0
.ZMM_L(0)));
3305 tcg_gen_movi_tl(cpu_T0
, 0);
3306 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_t0
.ZMM_L(1)));
3307 op1_offset
= offsetof(CPUX86State
,xmm_t0
);
3309 tcg_gen_movi_tl(cpu_T0
, val
);
3310 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(0)));
3311 tcg_gen_movi_tl(cpu_T0
, 0);
3312 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(1)));
3313 op1_offset
= offsetof(CPUX86State
,mmx_t0
);
3315 sse_fn_epp
= sse_op_table2
[((b
- 1) & 3) * 8 +
3316 (((modrm
>> 3)) & 7)][b1
];
3321 rm
= (modrm
& 7) | REX_B(s
);
3322 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3325 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3327 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3328 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op1_offset
);
3329 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3331 case 0x050: /* movmskps */
3332 rm
= (modrm
& 7) | REX_B(s
);
3333 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3334 offsetof(CPUX86State
,xmm_regs
[rm
]));
3335 gen_helper_movmskps(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3336 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp2_i32
);
3338 case 0x150: /* movmskpd */
3339 rm
= (modrm
& 7) | REX_B(s
);
3340 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3341 offsetof(CPUX86State
,xmm_regs
[rm
]));
3342 gen_helper_movmskpd(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3343 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp2_i32
);
3345 case 0x02a: /* cvtpi2ps */
3346 case 0x12a: /* cvtpi2pd */
3347 gen_helper_enter_mmx(cpu_env
);
3349 gen_lea_modrm(env
, s
, modrm
);
3350 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3351 gen_ldq_env_A0(s
, op2_offset
);
3354 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3356 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3357 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3358 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3361 gen_helper_cvtpi2ps(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3365 gen_helper_cvtpi2pd(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3369 case 0x22a: /* cvtsi2ss */
3370 case 0x32a: /* cvtsi2sd */
3371 ot
= mo_64_32(s
->dflag
);
3372 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3373 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3374 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3376 SSEFunc_0_epi sse_fn_epi
= sse_op_table3ai
[(b
>> 8) & 1];
3377 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
3378 sse_fn_epi(cpu_env
, cpu_ptr0
, cpu_tmp2_i32
);
3380 #ifdef TARGET_X86_64
3381 SSEFunc_0_epl sse_fn_epl
= sse_op_table3aq
[(b
>> 8) & 1];
3382 sse_fn_epl(cpu_env
, cpu_ptr0
, cpu_T0
);
3388 case 0x02c: /* cvttps2pi */
3389 case 0x12c: /* cvttpd2pi */
3390 case 0x02d: /* cvtps2pi */
3391 case 0x12d: /* cvtpd2pi */
3392 gen_helper_enter_mmx(cpu_env
);
3394 gen_lea_modrm(env
, s
, modrm
);
3395 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3396 gen_ldo_env_A0(s
, op2_offset
);
3398 rm
= (modrm
& 7) | REX_B(s
);
3399 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3401 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
);
3402 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3403 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3406 gen_helper_cvttps2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3409 gen_helper_cvttpd2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3412 gen_helper_cvtps2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3415 gen_helper_cvtpd2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3419 case 0x22c: /* cvttss2si */
3420 case 0x32c: /* cvttsd2si */
3421 case 0x22d: /* cvtss2si */
3422 case 0x32d: /* cvtsd2si */
3423 ot
= mo_64_32(s
->dflag
);
3425 gen_lea_modrm(env
, s
, modrm
);
3427 gen_ldq_env_A0(s
, offsetof(CPUX86State
, xmm_t0
.ZMM_Q(0)));
3429 gen_op_ld_v(s
, MO_32
, cpu_T0
, cpu_A0
);
3430 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_t0
.ZMM_L(0)));
3432 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3434 rm
= (modrm
& 7) | REX_B(s
);
3435 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3437 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3439 SSEFunc_i_ep sse_fn_i_ep
=
3440 sse_op_table3bi
[((b
>> 7) & 2) | (b
& 1)];
3441 sse_fn_i_ep(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3442 tcg_gen_extu_i32_tl(cpu_T0
, cpu_tmp2_i32
);
3444 #ifdef TARGET_X86_64
3445 SSEFunc_l_ep sse_fn_l_ep
=
3446 sse_op_table3bq
[((b
>> 7) & 2) | (b
& 1)];
3447 sse_fn_l_ep(cpu_T0
, cpu_env
, cpu_ptr0
);
3452 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3454 case 0xc4: /* pinsrw */
3457 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
3458 val
= cpu_ldub_code(env
, s
->pc
++);
3461 tcg_gen_st16_tl(cpu_T0
, cpu_env
,
3462 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_W(val
)));
3465 tcg_gen_st16_tl(cpu_T0
, cpu_env
,
3466 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_W(val
)));
3469 case 0xc5: /* pextrw */
3473 ot
= mo_64_32(s
->dflag
);
3474 val
= cpu_ldub_code(env
, s
->pc
++);
3477 rm
= (modrm
& 7) | REX_B(s
);
3478 tcg_gen_ld16u_tl(cpu_T0
, cpu_env
,
3479 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_W(val
)));
3483 tcg_gen_ld16u_tl(cpu_T0
, cpu_env
,
3484 offsetof(CPUX86State
,fpregs
[rm
].mmx
.MMX_W(val
)));
3486 reg
= ((modrm
>> 3) & 7) | rex_r
;
3487 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3489 case 0x1d6: /* movq ea, xmm */
3491 gen_lea_modrm(env
, s
, modrm
);
3492 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3493 xmm_regs
[reg
].ZMM_Q(0)));
3495 rm
= (modrm
& 7) | REX_B(s
);
3496 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)),
3497 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3498 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(1)));
3501 case 0x2d6: /* movq2dq */
3502 gen_helper_enter_mmx(cpu_env
);
3504 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)),
3505 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3506 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(1)));
3508 case 0x3d6: /* movdq2q */
3509 gen_helper_enter_mmx(cpu_env
);
3510 rm
= (modrm
& 7) | REX_B(s
);
3511 gen_op_movq(offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
),
3512 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3514 case 0xd7: /* pmovmskb */
3519 rm
= (modrm
& 7) | REX_B(s
);
3520 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[rm
]));
3521 gen_helper_pmovmskb_xmm(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3524 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3525 gen_helper_pmovmskb_mmx(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3527 reg
= ((modrm
>> 3) & 7) | rex_r
;
3528 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp2_i32
);
3534 if ((b
& 0xf0) == 0xf0) {
3537 modrm
= cpu_ldub_code(env
, s
->pc
++);
3539 reg
= ((modrm
>> 3) & 7) | rex_r
;
3540 mod
= (modrm
>> 6) & 3;
3545 sse_fn_epp
= sse_op_table6
[b
].op
[b1
];
3549 if (!(s
->cpuid_ext_features
& sse_op_table6
[b
].ext_mask
))
3553 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3555 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
3557 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3558 gen_lea_modrm(env
, s
, modrm
);
3560 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3561 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3562 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3563 gen_ldq_env_A0(s
, op2_offset
+
3564 offsetof(ZMMReg
, ZMM_Q(0)));
3566 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3567 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3568 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
3569 s
->mem_index
, MO_LEUL
);
3570 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, op2_offset
+
3571 offsetof(ZMMReg
, ZMM_L(0)));
3573 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3574 tcg_gen_qemu_ld_tl(cpu_tmp0
, cpu_A0
,
3575 s
->mem_index
, MO_LEUW
);
3576 tcg_gen_st16_tl(cpu_tmp0
, cpu_env
, op2_offset
+
3577 offsetof(ZMMReg
, ZMM_W(0)));
3579 case 0x2a: /* movntqda */
3580 gen_ldo_env_A0(s
, op1_offset
);
3583 gen_ldo_env_A0(s
, op2_offset
);
3587 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
3589 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3591 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3592 gen_lea_modrm(env
, s
, modrm
);
3593 gen_ldq_env_A0(s
, op2_offset
);
3596 if (sse_fn_epp
== SSE_SPECIAL
) {
3600 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3601 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3602 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3605 set_cc_op(s
, CC_OP_EFLAGS
);
3612 /* Various integer extensions at 0f 38 f[0-f]. */
3613 b
= modrm
| (b1
<< 8);
3614 modrm
= cpu_ldub_code(env
, s
->pc
++);
3615 reg
= ((modrm
>> 3) & 7) | rex_r
;
3618 case 0x3f0: /* crc32 Gd,Eb */
3619 case 0x3f1: /* crc32 Gd,Ey */
3621 if (!(s
->cpuid_ext_features
& CPUID_EXT_SSE42
)) {
3624 if ((b
& 0xff) == 0xf0) {
3626 } else if (s
->dflag
!= MO_64
) {
3627 ot
= (s
->prefix
& PREFIX_DATA
? MO_16
: MO_32
);
3632 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[reg
]);
3633 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3634 gen_helper_crc32(cpu_T0
, cpu_tmp2_i32
,
3635 cpu_T0
, tcg_const_i32(8 << ot
));
3637 ot
= mo_64_32(s
->dflag
);
3638 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3641 case 0x1f0: /* crc32 or movbe */
3643 /* For these insns, the f3 prefix is supposed to have priority
3644 over the 66 prefix, but that's not what we implement above
3646 if (s
->prefix
& PREFIX_REPNZ
) {
3650 case 0x0f0: /* movbe Gy,My */
3651 case 0x0f1: /* movbe My,Gy */
3652 if (!(s
->cpuid_ext_features
& CPUID_EXT_MOVBE
)) {
3655 if (s
->dflag
!= MO_64
) {
3656 ot
= (s
->prefix
& PREFIX_DATA
? MO_16
: MO_32
);
3661 gen_lea_modrm(env
, s
, modrm
);
3663 tcg_gen_qemu_ld_tl(cpu_T0
, cpu_A0
,
3664 s
->mem_index
, ot
| MO_BE
);
3665 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3667 tcg_gen_qemu_st_tl(cpu_regs
[reg
], cpu_A0
,
3668 s
->mem_index
, ot
| MO_BE
);
3672 case 0x0f2: /* andn Gy, By, Ey */
3673 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
3674 || !(s
->prefix
& PREFIX_VEX
)
3678 ot
= mo_64_32(s
->dflag
);
3679 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3680 tcg_gen_andc_tl(cpu_T0
, cpu_regs
[s
->vex_v
], cpu_T0
);
3681 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3682 gen_op_update1_cc();
3683 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3686 case 0x0f7: /* bextr Gy, Ey, By */
3687 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
3688 || !(s
->prefix
& PREFIX_VEX
)
3692 ot
= mo_64_32(s
->dflag
);
3696 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3697 /* Extract START, and shift the operand.
3698 Shifts larger than operand size get zeros. */
3699 tcg_gen_ext8u_tl(cpu_A0
, cpu_regs
[s
->vex_v
]);
3700 tcg_gen_shr_tl(cpu_T0
, cpu_T0
, cpu_A0
);
3702 bound
= tcg_const_tl(ot
== MO_64
? 63 : 31);
3703 zero
= tcg_const_tl(0);
3704 tcg_gen_movcond_tl(TCG_COND_LEU
, cpu_T0
, cpu_A0
, bound
,
3706 tcg_temp_free(zero
);
3708 /* Extract the LEN into a mask. Lengths larger than
3709 operand size get all ones. */
3710 tcg_gen_shri_tl(cpu_A0
, cpu_regs
[s
->vex_v
], 8);
3711 tcg_gen_ext8u_tl(cpu_A0
, cpu_A0
);
3712 tcg_gen_movcond_tl(TCG_COND_LEU
, cpu_A0
, cpu_A0
, bound
,
3714 tcg_temp_free(bound
);
3715 tcg_gen_movi_tl(cpu_T1
, 1);
3716 tcg_gen_shl_tl(cpu_T1
, cpu_T1
, cpu_A0
);
3717 tcg_gen_subi_tl(cpu_T1
, cpu_T1
, 1);
3718 tcg_gen_and_tl(cpu_T0
, cpu_T0
, cpu_T1
);
3720 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3721 gen_op_update1_cc();
3722 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3726 case 0x0f5: /* bzhi Gy, Ey, By */
3727 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3728 || !(s
->prefix
& PREFIX_VEX
)
3732 ot
= mo_64_32(s
->dflag
);
3733 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3734 tcg_gen_ext8u_tl(cpu_T1
, cpu_regs
[s
->vex_v
]);
3736 TCGv bound
= tcg_const_tl(ot
== MO_64
? 63 : 31);
3737 /* Note that since we're using BMILG (in order to get O
3738 cleared) we need to store the inverse into C. */
3739 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_cc_src
,
3741 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_T1
, cpu_T1
,
3742 bound
, bound
, cpu_T1
);
3743 tcg_temp_free(bound
);
3745 tcg_gen_movi_tl(cpu_A0
, -1);
3746 tcg_gen_shl_tl(cpu_A0
, cpu_A0
, cpu_T1
);
3747 tcg_gen_andc_tl(cpu_T0
, cpu_T0
, cpu_A0
);
3748 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3749 gen_op_update1_cc();
3750 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
3753 case 0x3f6: /* mulx By, Gy, rdx, Ey */
3754 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3755 || !(s
->prefix
& PREFIX_VEX
)
3759 ot
= mo_64_32(s
->dflag
);
3760 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3763 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
3764 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_regs
[R_EDX
]);
3765 tcg_gen_mulu2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
3766 cpu_tmp2_i32
, cpu_tmp3_i32
);
3767 tcg_gen_extu_i32_tl(cpu_regs
[s
->vex_v
], cpu_tmp2_i32
);
3768 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp3_i32
);
3770 #ifdef TARGET_X86_64
3772 tcg_gen_mulu2_i64(cpu_T0
, cpu_T1
,
3773 cpu_T0
, cpu_regs
[R_EDX
]);
3774 tcg_gen_mov_i64(cpu_regs
[s
->vex_v
], cpu_T0
);
3775 tcg_gen_mov_i64(cpu_regs
[reg
], cpu_T1
);
3781 case 0x3f5: /* pdep Gy, By, Ey */
3782 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3783 || !(s
->prefix
& PREFIX_VEX
)
3787 ot
= mo_64_32(s
->dflag
);
3788 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3789 /* Note that by zero-extending the mask operand, we
3790 automatically handle zero-extending the result. */
3792 tcg_gen_mov_tl(cpu_T1
, cpu_regs
[s
->vex_v
]);
3794 tcg_gen_ext32u_tl(cpu_T1
, cpu_regs
[s
->vex_v
]);
3796 gen_helper_pdep(cpu_regs
[reg
], cpu_T0
, cpu_T1
);
3799 case 0x2f5: /* pext Gy, By, Ey */
3800 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3801 || !(s
->prefix
& PREFIX_VEX
)
3805 ot
= mo_64_32(s
->dflag
);
3806 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3807 /* Note that by zero-extending the mask operand, we
3808 automatically handle zero-extending the result. */
3810 tcg_gen_mov_tl(cpu_T1
, cpu_regs
[s
->vex_v
]);
3812 tcg_gen_ext32u_tl(cpu_T1
, cpu_regs
[s
->vex_v
]);
3814 gen_helper_pext(cpu_regs
[reg
], cpu_T0
, cpu_T1
);
3817 case 0x1f6: /* adcx Gy, Ey */
3818 case 0x2f6: /* adox Gy, Ey */
3819 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_ADX
)) {
3822 TCGv carry_in
, carry_out
, zero
;
3825 ot
= mo_64_32(s
->dflag
);
3826 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3828 /* Re-use the carry-out from a previous round. */
3829 TCGV_UNUSED(carry_in
);
3830 carry_out
= (b
== 0x1f6 ? cpu_cc_dst
: cpu_cc_src2
);
3834 carry_in
= cpu_cc_dst
;
3835 end_op
= CC_OP_ADCX
;
3837 end_op
= CC_OP_ADCOX
;
3842 end_op
= CC_OP_ADCOX
;
3844 carry_in
= cpu_cc_src2
;
3845 end_op
= CC_OP_ADOX
;
3849 end_op
= CC_OP_ADCOX
;
3850 carry_in
= carry_out
;
3853 end_op
= (b
== 0x1f6 ? CC_OP_ADCX
: CC_OP_ADOX
);
3856 /* If we can't reuse carry-out, get it out of EFLAGS. */
3857 if (TCGV_IS_UNUSED(carry_in
)) {
3858 if (s
->cc_op
!= CC_OP_ADCX
&& s
->cc_op
!= CC_OP_ADOX
) {
3859 gen_compute_eflags(s
);
3861 carry_in
= cpu_tmp0
;
3862 tcg_gen_shri_tl(carry_in
, cpu_cc_src
,
3863 ctz32(b
== 0x1f6 ? CC_C
: CC_O
));
3864 tcg_gen_andi_tl(carry_in
, carry_in
, 1);
3868 #ifdef TARGET_X86_64
3870 /* If we know TL is 64-bit, and we want a 32-bit
3871 result, just do everything in 64-bit arithmetic. */
3872 tcg_gen_ext32u_i64(cpu_regs
[reg
], cpu_regs
[reg
]);
3873 tcg_gen_ext32u_i64(cpu_T0
, cpu_T0
);
3874 tcg_gen_add_i64(cpu_T0
, cpu_T0
, cpu_regs
[reg
]);
3875 tcg_gen_add_i64(cpu_T0
, cpu_T0
, carry_in
);
3876 tcg_gen_ext32u_i64(cpu_regs
[reg
], cpu_T0
);
3877 tcg_gen_shri_i64(carry_out
, cpu_T0
, 32);
3881 /* Otherwise compute the carry-out in two steps. */
3882 zero
= tcg_const_tl(0);
3883 tcg_gen_add2_tl(cpu_T0
, carry_out
,
3886 tcg_gen_add2_tl(cpu_regs
[reg
], carry_out
,
3887 cpu_regs
[reg
], carry_out
,
3889 tcg_temp_free(zero
);
3892 set_cc_op(s
, end_op
);
3896 case 0x1f7: /* shlx Gy, Ey, By */
3897 case 0x2f7: /* sarx Gy, Ey, By */
3898 case 0x3f7: /* shrx Gy, Ey, By */
3899 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3900 || !(s
->prefix
& PREFIX_VEX
)
3904 ot
= mo_64_32(s
->dflag
);
3905 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3907 tcg_gen_andi_tl(cpu_T1
, cpu_regs
[s
->vex_v
], 63);
3909 tcg_gen_andi_tl(cpu_T1
, cpu_regs
[s
->vex_v
], 31);
3912 tcg_gen_shl_tl(cpu_T0
, cpu_T0
, cpu_T1
);
3913 } else if (b
== 0x2f7) {
3915 tcg_gen_ext32s_tl(cpu_T0
, cpu_T0
);
3917 tcg_gen_sar_tl(cpu_T0
, cpu_T0
, cpu_T1
);
3920 tcg_gen_ext32u_tl(cpu_T0
, cpu_T0
);
3922 tcg_gen_shr_tl(cpu_T0
, cpu_T0
, cpu_T1
);
3924 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3930 case 0x3f3: /* Group 17 */
3931 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
3932 || !(s
->prefix
& PREFIX_VEX
)
3936 ot
= mo_64_32(s
->dflag
);
3937 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3940 case 1: /* blsr By,Ey */
3941 tcg_gen_neg_tl(cpu_T1
, cpu_T0
);
3942 tcg_gen_and_tl(cpu_T0
, cpu_T0
, cpu_T1
);
3943 gen_op_mov_reg_v(ot
, s
->vex_v
, cpu_T0
);
3944 gen_op_update2_cc();
3945 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
3948 case 2: /* blsmsk By,Ey */
3949 tcg_gen_mov_tl(cpu_cc_src
, cpu_T0
);
3950 tcg_gen_subi_tl(cpu_T0
, cpu_T0
, 1);
3951 tcg_gen_xor_tl(cpu_T0
, cpu_T0
, cpu_cc_src
);
3952 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
3953 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
3956 case 3: /* blsi By, Ey */
3957 tcg_gen_mov_tl(cpu_cc_src
, cpu_T0
);
3958 tcg_gen_subi_tl(cpu_T0
, cpu_T0
, 1);
3959 tcg_gen_and_tl(cpu_T0
, cpu_T0
, cpu_cc_src
);
3960 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
3961 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
3977 modrm
= cpu_ldub_code(env
, s
->pc
++);
3979 reg
= ((modrm
>> 3) & 7) | rex_r
;
3980 mod
= (modrm
>> 6) & 3;
3985 sse_fn_eppi
= sse_op_table7
[b
].op
[b1
];
3989 if (!(s
->cpuid_ext_features
& sse_op_table7
[b
].ext_mask
))
3992 if (sse_fn_eppi
== SSE_SPECIAL
) {
3993 ot
= mo_64_32(s
->dflag
);
3994 rm
= (modrm
& 7) | REX_B(s
);
3996 gen_lea_modrm(env
, s
, modrm
);
3997 reg
= ((modrm
>> 3) & 7) | rex_r
;
3998 val
= cpu_ldub_code(env
, s
->pc
++);
4000 case 0x14: /* pextrb */
4001 tcg_gen_ld8u_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,
4002 xmm_regs
[reg
].ZMM_B(val
& 15)));
4004 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
4006 tcg_gen_qemu_st_tl(cpu_T0
, cpu_A0
,
4007 s
->mem_index
, MO_UB
);
4010 case 0x15: /* pextrw */
4011 tcg_gen_ld16u_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,
4012 xmm_regs
[reg
].ZMM_W(val
& 7)));
4014 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
4016 tcg_gen_qemu_st_tl(cpu_T0
, cpu_A0
,
4017 s
->mem_index
, MO_LEUW
);
4021 if (ot
== MO_32
) { /* pextrd */
4022 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
4023 offsetof(CPUX86State
,
4024 xmm_regs
[reg
].ZMM_L(val
& 3)));
4026 tcg_gen_extu_i32_tl(cpu_regs
[rm
], cpu_tmp2_i32
);
4028 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
4029 s
->mem_index
, MO_LEUL
);
4031 } else { /* pextrq */
4032 #ifdef TARGET_X86_64
4033 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
4034 offsetof(CPUX86State
,
4035 xmm_regs
[reg
].ZMM_Q(val
& 1)));
4037 tcg_gen_mov_i64(cpu_regs
[rm
], cpu_tmp1_i64
);
4039 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_A0
,
4040 s
->mem_index
, MO_LEQ
);
4047 case 0x17: /* extractps */
4048 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,
4049 xmm_regs
[reg
].ZMM_L(val
& 3)));
4051 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
4053 tcg_gen_qemu_st_tl(cpu_T0
, cpu_A0
,
4054 s
->mem_index
, MO_LEUL
);
4057 case 0x20: /* pinsrb */
4059 gen_op_mov_v_reg(MO_32
, cpu_T0
, rm
);
4061 tcg_gen_qemu_ld_tl(cpu_T0
, cpu_A0
,
4062 s
->mem_index
, MO_UB
);
4064 tcg_gen_st8_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,
4065 xmm_regs
[reg
].ZMM_B(val
& 15)));
4067 case 0x21: /* insertps */
4069 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
4070 offsetof(CPUX86State
,xmm_regs
[rm
]
4071 .ZMM_L((val
>> 6) & 3)));
4073 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
4074 s
->mem_index
, MO_LEUL
);
4076 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
4077 offsetof(CPUX86State
,xmm_regs
[reg
]
4078 .ZMM_L((val
>> 4) & 3)));
4080 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4081 cpu_env
, offsetof(CPUX86State
,
4082 xmm_regs
[reg
].ZMM_L(0)));
4084 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4085 cpu_env
, offsetof(CPUX86State
,
4086 xmm_regs
[reg
].ZMM_L(1)));
4088 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4089 cpu_env
, offsetof(CPUX86State
,
4090 xmm_regs
[reg
].ZMM_L(2)));
4092 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4093 cpu_env
, offsetof(CPUX86State
,
4094 xmm_regs
[reg
].ZMM_L(3)));
4097 if (ot
== MO_32
) { /* pinsrd */
4099 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[rm
]);
4101 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
4102 s
->mem_index
, MO_LEUL
);
4104 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
4105 offsetof(CPUX86State
,
4106 xmm_regs
[reg
].ZMM_L(val
& 3)));
4107 } else { /* pinsrq */
4108 #ifdef TARGET_X86_64
4110 gen_op_mov_v_reg(ot
, cpu_tmp1_i64
, rm
);
4112 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_A0
,
4113 s
->mem_index
, MO_LEQ
);
4115 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
4116 offsetof(CPUX86State
,
4117 xmm_regs
[reg
].ZMM_Q(val
& 1)));
4128 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4130 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
4132 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4133 gen_lea_modrm(env
, s
, modrm
);
4134 gen_ldo_env_A0(s
, op2_offset
);
4137 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4139 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4141 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4142 gen_lea_modrm(env
, s
, modrm
);
4143 gen_ldq_env_A0(s
, op2_offset
);
4146 val
= cpu_ldub_code(env
, s
->pc
++);
4148 if ((b
& 0xfc) == 0x60) { /* pcmpXstrX */
4149 set_cc_op(s
, CC_OP_EFLAGS
);
4151 if (s
->dflag
== MO_64
) {
4152 /* The helper must use entire 64-bit gp registers */
4157 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4158 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4159 sse_fn_eppi(cpu_env
, cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4163 /* Various integer extensions at 0f 3a f[0-f]. */
4164 b
= modrm
| (b1
<< 8);
4165 modrm
= cpu_ldub_code(env
, s
->pc
++);
4166 reg
= ((modrm
>> 3) & 7) | rex_r
;
4169 case 0x3f0: /* rorx Gy,Ey, Ib */
4170 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4171 || !(s
->prefix
& PREFIX_VEX
)
4175 ot
= mo_64_32(s
->dflag
);
4176 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4177 b
= cpu_ldub_code(env
, s
->pc
++);
4179 tcg_gen_rotri_tl(cpu_T0
, cpu_T0
, b
& 63);
4181 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
4182 tcg_gen_rotri_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, b
& 31);
4183 tcg_gen_extu_i32_tl(cpu_T0
, cpu_tmp2_i32
);
4185 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
4195 gen_unknown_opcode(env
, s
);
4199 /* generic MMX or SSE operation */
4201 case 0x70: /* pshufx insn */
4202 case 0xc6: /* pshufx insn */
4203 case 0xc2: /* compare insns */
4210 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4214 gen_lea_modrm(env
, s
, modrm
);
4215 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4221 /* Most sse scalar operations. */
4224 } else if (b1
== 3) {
4229 case 0x2e: /* ucomis[sd] */
4230 case 0x2f: /* comis[sd] */
4242 gen_op_ld_v(s
, MO_32
, cpu_T0
, cpu_A0
);
4243 tcg_gen_st32_tl(cpu_T0
, cpu_env
,
4244 offsetof(CPUX86State
,xmm_t0
.ZMM_L(0)));
4248 gen_ldq_env_A0(s
, offsetof(CPUX86State
, xmm_t0
.ZMM_D(0)));
4251 /* 128 bit access */
4252 gen_ldo_env_A0(s
, op2_offset
);
4256 rm
= (modrm
& 7) | REX_B(s
);
4257 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
4260 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4262 gen_lea_modrm(env
, s
, modrm
);
4263 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4264 gen_ldq_env_A0(s
, op2_offset
);
4267 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4271 case 0x0f: /* 3DNow! data insns */
4272 val
= cpu_ldub_code(env
, s
->pc
++);
4273 sse_fn_epp
= sse_op_table5
[val
];
4277 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
)) {
4280 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4281 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4282 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4284 case 0x70: /* pshufx insn */
4285 case 0xc6: /* pshufx insn */
4286 val
= cpu_ldub_code(env
, s
->pc
++);
4287 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4288 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4289 /* XXX: introduce a new table? */
4290 sse_fn_ppi
= (SSEFunc_0_ppi
)sse_fn_epp
;
4291 sse_fn_ppi(cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4295 val
= cpu_ldub_code(env
, s
->pc
++);
4298 sse_fn_epp
= sse_op_table4
[val
][b1
];
4300 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4301 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4302 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4305 /* maskmov : we must prepare A0 */
4308 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[R_EDI
]);
4309 gen_extu(s
->aflag
, cpu_A0
);
4310 gen_add_A0_ds_seg(s
);
4312 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4313 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4314 /* XXX: introduce a new table? */
4315 sse_fn_eppt
= (SSEFunc_0_eppt
)sse_fn_epp
;
4316 sse_fn_eppt(cpu_env
, cpu_ptr0
, cpu_ptr1
, cpu_A0
);
4319 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4320 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4321 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4324 if (b
== 0x2e || b
== 0x2f) {
4325 set_cc_op(s
, CC_OP_EFLAGS
);
4330 /* convert one instruction. s->is_jmp is set if the translation must
4331 be stopped. Return the next pc value */
4332 static target_ulong
disas_insn(CPUX86State
*env
, DisasContext
*s
,
4333 target_ulong pc_start
)
4337 TCGMemOp ot
, aflag
, dflag
;
4338 int modrm
, reg
, rm
, mod
, op
, opreg
, val
;
4339 target_ulong next_eip
, tval
;
4342 s
->pc_start
= s
->pc
= pc_start
;
4347 #ifdef TARGET_X86_64
4352 s
->rip_offset
= 0; /* for relative ip address */
4356 b
= cpu_ldub_code(env
, s
->pc
);
4358 /* Collect prefixes. */
4361 prefixes
|= PREFIX_REPZ
;
4364 prefixes
|= PREFIX_REPNZ
;
4367 prefixes
|= PREFIX_LOCK
;
4388 prefixes
|= PREFIX_DATA
;
4391 prefixes
|= PREFIX_ADR
;
4393 #ifdef TARGET_X86_64
4397 rex_w
= (b
>> 3) & 1;
4398 rex_r
= (b
& 0x4) << 1;
4399 s
->rex_x
= (b
& 0x2) << 2;
4400 REX_B(s
) = (b
& 0x1) << 3;
4401 x86_64_hregs
= 1; /* select uniform byte register addressing */
4406 case 0xc5: /* 2-byte VEX */
4407 case 0xc4: /* 3-byte VEX */
4408 /* VEX prefixes cannot be used except in 32-bit mode.
4409 Otherwise the instruction is LES or LDS. */
4410 if (s
->code32
&& !s
->vm86
) {
4411 static const int pp_prefix
[4] = {
4412 0, PREFIX_DATA
, PREFIX_REPZ
, PREFIX_REPNZ
4414 int vex3
, vex2
= cpu_ldub_code(env
, s
->pc
);
4416 if (!CODE64(s
) && (vex2
& 0xc0) != 0xc0) {
4417 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4418 otherwise the instruction is LES or LDS. */
4423 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
4424 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
4425 | PREFIX_LOCK
| PREFIX_DATA
)) {
4428 #ifdef TARGET_X86_64
4433 rex_r
= (~vex2
>> 4) & 8;
4436 b
= cpu_ldub_code(env
, s
->pc
++);
4438 #ifdef TARGET_X86_64
4439 s
->rex_x
= (~vex2
>> 3) & 8;
4440 s
->rex_b
= (~vex2
>> 2) & 8;
4442 vex3
= cpu_ldub_code(env
, s
->pc
++);
4443 rex_w
= (vex3
>> 7) & 1;
4444 switch (vex2
& 0x1f) {
4445 case 0x01: /* Implied 0f leading opcode bytes. */
4446 b
= cpu_ldub_code(env
, s
->pc
++) | 0x100;
4448 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4451 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4454 default: /* Reserved for future use. */
4458 s
->vex_v
= (~vex3
>> 3) & 0xf;
4459 s
->vex_l
= (vex3
>> 2) & 1;
4460 prefixes
|= pp_prefix
[vex3
& 3] | PREFIX_VEX
;
4465 /* Post-process prefixes. */
4467 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
4468 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
4469 over 0x66 if both are present. */
4470 dflag
= (rex_w
> 0 ? MO_64
: prefixes
& PREFIX_DATA
? MO_16
: MO_32
);
4471 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
4472 aflag
= (prefixes
& PREFIX_ADR
? MO_32
: MO_64
);
4474 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
4475 if (s
->code32
^ ((prefixes
& PREFIX_DATA
) != 0)) {
4480 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
4481 if (s
->code32
^ ((prefixes
& PREFIX_ADR
) != 0)) {
4488 s
->prefix
= prefixes
;
4492 /* lock generation */
4493 if (prefixes
& PREFIX_LOCK
)
4496 /* now check op code */
4500 /**************************/
4501 /* extended op code */
4502 b
= cpu_ldub_code(env
, s
->pc
++) | 0x100;
4505 /**************************/
4520 ot
= mo_b_d(b
, dflag
);
4523 case 0: /* OP Ev, Gv */
4524 modrm
= cpu_ldub_code(env
, s
->pc
++);
4525 reg
= ((modrm
>> 3) & 7) | rex_r
;
4526 mod
= (modrm
>> 6) & 3;
4527 rm
= (modrm
& 7) | REX_B(s
);
4529 gen_lea_modrm(env
, s
, modrm
);
4531 } else if (op
== OP_XORL
&& rm
== reg
) {
4533 /* xor reg, reg optimisation */
4534 set_cc_op(s
, CC_OP_CLR
);
4535 tcg_gen_movi_tl(cpu_T0
, 0);
4536 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
4541 gen_op_mov_v_reg(ot
, cpu_T1
, reg
);
4542 gen_op(s
, op
, ot
, opreg
);
4544 case 1: /* OP Gv, Ev */
4545 modrm
= cpu_ldub_code(env
, s
->pc
++);
4546 mod
= (modrm
>> 6) & 3;
4547 reg
= ((modrm
>> 3) & 7) | rex_r
;
4548 rm
= (modrm
& 7) | REX_B(s
);
4550 gen_lea_modrm(env
, s
, modrm
);
4551 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
4552 } else if (op
== OP_XORL
&& rm
== reg
) {
4555 gen_op_mov_v_reg(ot
, cpu_T1
, rm
);
4557 gen_op(s
, op
, ot
, reg
);
4559 case 2: /* OP A, Iv */
4560 val
= insn_get(env
, s
, ot
);
4561 tcg_gen_movi_tl(cpu_T1
, val
);
4562 gen_op(s
, op
, ot
, OR_EAX
);
4571 case 0x80: /* GRP1 */
4577 ot
= mo_b_d(b
, dflag
);
4579 modrm
= cpu_ldub_code(env
, s
->pc
++);
4580 mod
= (modrm
>> 6) & 3;
4581 rm
= (modrm
& 7) | REX_B(s
);
4582 op
= (modrm
>> 3) & 7;
4588 s
->rip_offset
= insn_const_size(ot
);
4589 gen_lea_modrm(env
, s
, modrm
);
4600 val
= insn_get(env
, s
, ot
);
4603 val
= (int8_t)insn_get(env
, s
, MO_8
);
4606 tcg_gen_movi_tl(cpu_T1
, val
);
4607 gen_op(s
, op
, ot
, opreg
);
4611 /**************************/
4612 /* inc, dec, and other misc arith */
4613 case 0x40 ... 0x47: /* inc Gv */
4615 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), 1);
4617 case 0x48 ... 0x4f: /* dec Gv */
4619 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), -1);
4621 case 0xf6: /* GRP3 */
4623 ot
= mo_b_d(b
, dflag
);
4625 modrm
= cpu_ldub_code(env
, s
->pc
++);
4626 mod
= (modrm
>> 6) & 3;
4627 rm
= (modrm
& 7) | REX_B(s
);
4628 op
= (modrm
>> 3) & 7;
4631 s
->rip_offset
= insn_const_size(ot
);
4632 gen_lea_modrm(env
, s
, modrm
);
4633 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
4635 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
4640 val
= insn_get(env
, s
, ot
);
4641 tcg_gen_movi_tl(cpu_T1
, val
);
4642 gen_op_testl_T0_T1_cc();
4643 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4646 tcg_gen_not_tl(cpu_T0
, cpu_T0
);
4648 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
4650 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
4654 tcg_gen_neg_tl(cpu_T0
, cpu_T0
);
4656 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
4658 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
4660 gen_op_update_neg_cc();
4661 set_cc_op(s
, CC_OP_SUBB
+ ot
);
4666 gen_op_mov_v_reg(MO_8
, cpu_T1
, R_EAX
);
4667 tcg_gen_ext8u_tl(cpu_T0
, cpu_T0
);
4668 tcg_gen_ext8u_tl(cpu_T1
, cpu_T1
);
4669 /* XXX: use 32 bit mul which could be faster */
4670 tcg_gen_mul_tl(cpu_T0
, cpu_T0
, cpu_T1
);
4671 gen_op_mov_reg_v(MO_16
, R_EAX
, cpu_T0
);
4672 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
4673 tcg_gen_andi_tl(cpu_cc_src
, cpu_T0
, 0xff00);
4674 set_cc_op(s
, CC_OP_MULB
);
4677 gen_op_mov_v_reg(MO_16
, cpu_T1
, R_EAX
);
4678 tcg_gen_ext16u_tl(cpu_T0
, cpu_T0
);
4679 tcg_gen_ext16u_tl(cpu_T1
, cpu_T1
);
4680 /* XXX: use 32 bit mul which could be faster */
4681 tcg_gen_mul_tl(cpu_T0
, cpu_T0
, cpu_T1
);
4682 gen_op_mov_reg_v(MO_16
, R_EAX
, cpu_T0
);
4683 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
4684 tcg_gen_shri_tl(cpu_T0
, cpu_T0
, 16);
4685 gen_op_mov_reg_v(MO_16
, R_EDX
, cpu_T0
);
4686 tcg_gen_mov_tl(cpu_cc_src
, cpu_T0
);
4687 set_cc_op(s
, CC_OP_MULW
);
4691 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
4692 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_regs
[R_EAX
]);
4693 tcg_gen_mulu2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
4694 cpu_tmp2_i32
, cpu_tmp3_i32
);
4695 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], cpu_tmp2_i32
);
4696 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], cpu_tmp3_i32
);
4697 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
4698 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
4699 set_cc_op(s
, CC_OP_MULL
);
4701 #ifdef TARGET_X86_64
4703 tcg_gen_mulu2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
4704 cpu_T0
, cpu_regs
[R_EAX
]);
4705 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
4706 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
4707 set_cc_op(s
, CC_OP_MULQ
);
4715 gen_op_mov_v_reg(MO_8
, cpu_T1
, R_EAX
);
4716 tcg_gen_ext8s_tl(cpu_T0
, cpu_T0
);
4717 tcg_gen_ext8s_tl(cpu_T1
, cpu_T1
);
4718 /* XXX: use 32 bit mul which could be faster */
4719 tcg_gen_mul_tl(cpu_T0
, cpu_T0
, cpu_T1
);
4720 gen_op_mov_reg_v(MO_16
, R_EAX
, cpu_T0
);
4721 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
4722 tcg_gen_ext8s_tl(cpu_tmp0
, cpu_T0
);
4723 tcg_gen_sub_tl(cpu_cc_src
, cpu_T0
, cpu_tmp0
);
4724 set_cc_op(s
, CC_OP_MULB
);
4727 gen_op_mov_v_reg(MO_16
, cpu_T1
, R_EAX
);
4728 tcg_gen_ext16s_tl(cpu_T0
, cpu_T0
);
4729 tcg_gen_ext16s_tl(cpu_T1
, cpu_T1
);
4730 /* XXX: use 32 bit mul which could be faster */
4731 tcg_gen_mul_tl(cpu_T0
, cpu_T0
, cpu_T1
);
4732 gen_op_mov_reg_v(MO_16
, R_EAX
, cpu_T0
);
4733 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
4734 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T0
);
4735 tcg_gen_sub_tl(cpu_cc_src
, cpu_T0
, cpu_tmp0
);
4736 tcg_gen_shri_tl(cpu_T0
, cpu_T0
, 16);
4737 gen_op_mov_reg_v(MO_16
, R_EDX
, cpu_T0
);
4738 set_cc_op(s
, CC_OP_MULW
);
4742 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
4743 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_regs
[R_EAX
]);
4744 tcg_gen_muls2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
4745 cpu_tmp2_i32
, cpu_tmp3_i32
);
4746 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], cpu_tmp2_i32
);
4747 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], cpu_tmp3_i32
);
4748 tcg_gen_sari_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 31);
4749 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
4750 tcg_gen_sub_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
4751 tcg_gen_extu_i32_tl(cpu_cc_src
, cpu_tmp2_i32
);
4752 set_cc_op(s
, CC_OP_MULL
);
4754 #ifdef TARGET_X86_64
4756 tcg_gen_muls2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
4757 cpu_T0
, cpu_regs
[R_EAX
]);
4758 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
4759 tcg_gen_sari_tl(cpu_cc_src
, cpu_regs
[R_EAX
], 63);
4760 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, cpu_regs
[R_EDX
]);
4761 set_cc_op(s
, CC_OP_MULQ
);
4769 gen_helper_divb_AL(cpu_env
, cpu_T0
);
4772 gen_helper_divw_AX(cpu_env
, cpu_T0
);
4776 gen_helper_divl_EAX(cpu_env
, cpu_T0
);
4778 #ifdef TARGET_X86_64
4780 gen_helper_divq_EAX(cpu_env
, cpu_T0
);
4788 gen_helper_idivb_AL(cpu_env
, cpu_T0
);
4791 gen_helper_idivw_AX(cpu_env
, cpu_T0
);
4795 gen_helper_idivl_EAX(cpu_env
, cpu_T0
);
4797 #ifdef TARGET_X86_64
4799 gen_helper_idivq_EAX(cpu_env
, cpu_T0
);
4809 case 0xfe: /* GRP4 */
4810 case 0xff: /* GRP5 */
4811 ot
= mo_b_d(b
, dflag
);
4813 modrm
= cpu_ldub_code(env
, s
->pc
++);
4814 mod
= (modrm
>> 6) & 3;
4815 rm
= (modrm
& 7) | REX_B(s
);
4816 op
= (modrm
>> 3) & 7;
4817 if (op
>= 2 && b
== 0xfe) {
4821 if (op
== 2 || op
== 4) {
4822 /* operand size for jumps is 64 bit */
4824 } else if (op
== 3 || op
== 5) {
4825 ot
= dflag
!= MO_16
? MO_32
+ (rex_w
== 1) : MO_16
;
4826 } else if (op
== 6) {
4827 /* default push size is 64 bit */
4828 ot
= mo_pushpop(s
, dflag
);
4832 gen_lea_modrm(env
, s
, modrm
);
4833 if (op
>= 2 && op
!= 3 && op
!= 5)
4834 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
4836 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
4840 case 0: /* inc Ev */
4845 gen_inc(s
, ot
, opreg
, 1);
4847 case 1: /* dec Ev */
4852 gen_inc(s
, ot
, opreg
, -1);
4854 case 2: /* call Ev */
4855 /* XXX: optimize if memory (no 'and' is necessary) */
4856 if (dflag
== MO_16
) {
4857 tcg_gen_ext16u_tl(cpu_T0
, cpu_T0
);
4859 next_eip
= s
->pc
- s
->cs_base
;
4860 tcg_gen_movi_tl(cpu_T1
, next_eip
);
4861 gen_push_v(s
, cpu_T1
);
4862 gen_op_jmp_v(cpu_T0
);
4866 case 3: /* lcall Ev */
4867 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
4868 gen_add_A0_im(s
, 1 << ot
);
4869 gen_op_ld_v(s
, MO_16
, cpu_T0
, cpu_A0
);
4871 if (s
->pe
&& !s
->vm86
) {
4872 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
4873 gen_helper_lcall_protected(cpu_env
, cpu_tmp2_i32
, cpu_T1
,
4874 tcg_const_i32(dflag
- 1),
4875 tcg_const_tl(s
->pc
- s
->cs_base
));
4877 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
4878 gen_helper_lcall_real(cpu_env
, cpu_tmp2_i32
, cpu_T1
,
4879 tcg_const_i32(dflag
- 1),
4880 tcg_const_i32(s
->pc
- s
->cs_base
));
4884 case 4: /* jmp Ev */
4885 if (dflag
== MO_16
) {
4886 tcg_gen_ext16u_tl(cpu_T0
, cpu_T0
);
4888 gen_op_jmp_v(cpu_T0
);
4892 case 5: /* ljmp Ev */
4893 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
4894 gen_add_A0_im(s
, 1 << ot
);
4895 gen_op_ld_v(s
, MO_16
, cpu_T0
, cpu_A0
);
4897 if (s
->pe
&& !s
->vm86
) {
4898 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
4899 gen_helper_ljmp_protected(cpu_env
, cpu_tmp2_i32
, cpu_T1
,
4900 tcg_const_tl(s
->pc
- s
->cs_base
));
4902 gen_op_movl_seg_T0_vm(R_CS
);
4903 gen_op_jmp_v(cpu_T1
);
4907 case 6: /* push Ev */
4908 gen_push_v(s
, cpu_T0
);
4915 case 0x84: /* test Ev, Gv */
4917 ot
= mo_b_d(b
, dflag
);
4919 modrm
= cpu_ldub_code(env
, s
->pc
++);
4920 reg
= ((modrm
>> 3) & 7) | rex_r
;
4922 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4923 gen_op_mov_v_reg(ot
, cpu_T1
, reg
);
4924 gen_op_testl_T0_T1_cc();
4925 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4928 case 0xa8: /* test eAX, Iv */
4930 ot
= mo_b_d(b
, dflag
);
4931 val
= insn_get(env
, s
, ot
);
4933 gen_op_mov_v_reg(ot
, cpu_T0
, OR_EAX
);
4934 tcg_gen_movi_tl(cpu_T1
, val
);
4935 gen_op_testl_T0_T1_cc();
4936 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4939 case 0x98: /* CWDE/CBW */
4941 #ifdef TARGET_X86_64
4943 gen_op_mov_v_reg(MO_32
, cpu_T0
, R_EAX
);
4944 tcg_gen_ext32s_tl(cpu_T0
, cpu_T0
);
4945 gen_op_mov_reg_v(MO_64
, R_EAX
, cpu_T0
);
4949 gen_op_mov_v_reg(MO_16
, cpu_T0
, R_EAX
);
4950 tcg_gen_ext16s_tl(cpu_T0
, cpu_T0
);
4951 gen_op_mov_reg_v(MO_32
, R_EAX
, cpu_T0
);
4954 gen_op_mov_v_reg(MO_8
, cpu_T0
, R_EAX
);
4955 tcg_gen_ext8s_tl(cpu_T0
, cpu_T0
);
4956 gen_op_mov_reg_v(MO_16
, R_EAX
, cpu_T0
);
4962 case 0x99: /* CDQ/CWD */
4964 #ifdef TARGET_X86_64
4966 gen_op_mov_v_reg(MO_64
, cpu_T0
, R_EAX
);
4967 tcg_gen_sari_tl(cpu_T0
, cpu_T0
, 63);
4968 gen_op_mov_reg_v(MO_64
, R_EDX
, cpu_T0
);
4972 gen_op_mov_v_reg(MO_32
, cpu_T0
, R_EAX
);
4973 tcg_gen_ext32s_tl(cpu_T0
, cpu_T0
);
4974 tcg_gen_sari_tl(cpu_T0
, cpu_T0
, 31);
4975 gen_op_mov_reg_v(MO_32
, R_EDX
, cpu_T0
);
4978 gen_op_mov_v_reg(MO_16
, cpu_T0
, R_EAX
);
4979 tcg_gen_ext16s_tl(cpu_T0
, cpu_T0
);
4980 tcg_gen_sari_tl(cpu_T0
, cpu_T0
, 15);
4981 gen_op_mov_reg_v(MO_16
, R_EDX
, cpu_T0
);
4987 case 0x1af: /* imul Gv, Ev */
4988 case 0x69: /* imul Gv, Ev, I */
4991 modrm
= cpu_ldub_code(env
, s
->pc
++);
4992 reg
= ((modrm
>> 3) & 7) | rex_r
;
4994 s
->rip_offset
= insn_const_size(ot
);
4997 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4999 val
= insn_get(env
, s
, ot
);
5000 tcg_gen_movi_tl(cpu_T1
, val
);
5001 } else if (b
== 0x6b) {
5002 val
= (int8_t)insn_get(env
, s
, MO_8
);
5003 tcg_gen_movi_tl(cpu_T1
, val
);
5005 gen_op_mov_v_reg(ot
, cpu_T1
, reg
);
5008 #ifdef TARGET_X86_64
5010 tcg_gen_muls2_i64(cpu_regs
[reg
], cpu_T1
, cpu_T0
, cpu_T1
);
5011 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
5012 tcg_gen_sari_tl(cpu_cc_src
, cpu_cc_dst
, 63);
5013 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, cpu_T1
);
5017 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
5018 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T1
);
5019 tcg_gen_muls2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
5020 cpu_tmp2_i32
, cpu_tmp3_i32
);
5021 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp2_i32
);
5022 tcg_gen_sari_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 31);
5023 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
5024 tcg_gen_sub_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
5025 tcg_gen_extu_i32_tl(cpu_cc_src
, cpu_tmp2_i32
);
5028 tcg_gen_ext16s_tl(cpu_T0
, cpu_T0
);
5029 tcg_gen_ext16s_tl(cpu_T1
, cpu_T1
);
5030 /* XXX: use 32 bit mul which could be faster */
5031 tcg_gen_mul_tl(cpu_T0
, cpu_T0
, cpu_T1
);
5032 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
5033 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T0
);
5034 tcg_gen_sub_tl(cpu_cc_src
, cpu_T0
, cpu_tmp0
);
5035 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
5038 set_cc_op(s
, CC_OP_MULB
+ ot
);
5041 case 0x1c1: /* xadd Ev, Gv */
5042 ot
= mo_b_d(b
, dflag
);
5043 modrm
= cpu_ldub_code(env
, s
->pc
++);
5044 reg
= ((modrm
>> 3) & 7) | rex_r
;
5045 mod
= (modrm
>> 6) & 3;
5047 rm
= (modrm
& 7) | REX_B(s
);
5048 gen_op_mov_v_reg(ot
, cpu_T0
, reg
);
5049 gen_op_mov_v_reg(ot
, cpu_T1
, rm
);
5050 tcg_gen_add_tl(cpu_T0
, cpu_T0
, cpu_T1
);
5051 gen_op_mov_reg_v(ot
, reg
, cpu_T1
);
5052 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
5054 gen_lea_modrm(env
, s
, modrm
);
5055 gen_op_mov_v_reg(ot
, cpu_T0
, reg
);
5056 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
5057 tcg_gen_add_tl(cpu_T0
, cpu_T0
, cpu_T1
);
5058 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
5059 gen_op_mov_reg_v(ot
, reg
, cpu_T1
);
5061 gen_op_update2_cc();
5062 set_cc_op(s
, CC_OP_ADDB
+ ot
);
5065 case 0x1b1: /* cmpxchg Ev, Gv */
5067 TCGLabel
*label1
, *label2
;
5068 TCGv t0
, t1
, t2
, a0
;
5070 ot
= mo_b_d(b
, dflag
);
5071 modrm
= cpu_ldub_code(env
, s
->pc
++);
5072 reg
= ((modrm
>> 3) & 7) | rex_r
;
5073 mod
= (modrm
>> 6) & 3;
5074 t0
= tcg_temp_local_new();
5075 t1
= tcg_temp_local_new();
5076 t2
= tcg_temp_local_new();
5077 a0
= tcg_temp_local_new();
5078 gen_op_mov_v_reg(ot
, t1
, reg
);
5080 rm
= (modrm
& 7) | REX_B(s
);
5081 gen_op_mov_v_reg(ot
, t0
, rm
);
5083 gen_lea_modrm(env
, s
, modrm
);
5084 tcg_gen_mov_tl(a0
, cpu_A0
);
5085 gen_op_ld_v(s
, ot
, t0
, a0
);
5086 rm
= 0; /* avoid warning */
5088 label1
= gen_new_label();
5089 tcg_gen_mov_tl(t2
, cpu_regs
[R_EAX
]);
5092 tcg_gen_brcond_tl(TCG_COND_EQ
, t2
, t0
, label1
);
5093 label2
= gen_new_label();
5095 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
5097 gen_set_label(label1
);
5098 gen_op_mov_reg_v(ot
, rm
, t1
);
5100 /* perform no-op store cycle like physical cpu; must be
5101 before changing accumulator to ensure idempotency if
5102 the store faults and the instruction is restarted */
5103 gen_op_st_v(s
, ot
, t0
, a0
);
5104 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
5106 gen_set_label(label1
);
5107 gen_op_st_v(s
, ot
, t1
, a0
);
5109 gen_set_label(label2
);
5110 tcg_gen_mov_tl(cpu_cc_src
, t0
);
5111 tcg_gen_mov_tl(cpu_cc_srcT
, t2
);
5112 tcg_gen_sub_tl(cpu_cc_dst
, t2
, t0
);
5113 set_cc_op(s
, CC_OP_SUBB
+ ot
);
5120 case 0x1c7: /* cmpxchg8b */
5121 modrm
= cpu_ldub_code(env
, s
->pc
++);
5122 mod
= (modrm
>> 6) & 3;
5123 if ((mod
== 3) || ((modrm
& 0x38) != 0x8))
5125 #ifdef TARGET_X86_64
5126 if (dflag
== MO_64
) {
5127 if (!(s
->cpuid_ext_features
& CPUID_EXT_CX16
))
5129 gen_lea_modrm(env
, s
, modrm
);
5130 gen_helper_cmpxchg16b(cpu_env
, cpu_A0
);
5134 if (!(s
->cpuid_features
& CPUID_CX8
))
5136 gen_lea_modrm(env
, s
, modrm
);
5137 gen_helper_cmpxchg8b(cpu_env
, cpu_A0
);
5139 set_cc_op(s
, CC_OP_EFLAGS
);
5142 /**************************/
5144 case 0x50 ... 0x57: /* push */
5145 gen_op_mov_v_reg(MO_32
, cpu_T0
, (b
& 7) | REX_B(s
));
5146 gen_push_v(s
, cpu_T0
);
5148 case 0x58 ... 0x5f: /* pop */
5150 /* NOTE: order is important for pop %sp */
5151 gen_pop_update(s
, ot
);
5152 gen_op_mov_reg_v(ot
, (b
& 7) | REX_B(s
), cpu_T0
);
5154 case 0x60: /* pusha */
5159 case 0x61: /* popa */
5164 case 0x68: /* push Iv */
5166 ot
= mo_pushpop(s
, dflag
);
5168 val
= insn_get(env
, s
, ot
);
5170 val
= (int8_t)insn_get(env
, s
, MO_8
);
5171 tcg_gen_movi_tl(cpu_T0
, val
);
5172 gen_push_v(s
, cpu_T0
);
5174 case 0x8f: /* pop Ev */
5175 modrm
= cpu_ldub_code(env
, s
->pc
++);
5176 mod
= (modrm
>> 6) & 3;
5179 /* NOTE: order is important for pop %sp */
5180 gen_pop_update(s
, ot
);
5181 rm
= (modrm
& 7) | REX_B(s
);
5182 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
5184 /* NOTE: order is important too for MMU exceptions */
5185 s
->popl_esp_hack
= 1 << ot
;
5186 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5187 s
->popl_esp_hack
= 0;
5188 gen_pop_update(s
, ot
);
5191 case 0xc8: /* enter */
5194 val
= cpu_lduw_code(env
, s
->pc
);
5196 level
= cpu_ldub_code(env
, s
->pc
++);
5197 gen_enter(s
, val
, level
);
5200 case 0xc9: /* leave */
5203 case 0x06: /* push es */
5204 case 0x0e: /* push cs */
5205 case 0x16: /* push ss */
5206 case 0x1e: /* push ds */
5209 gen_op_movl_T0_seg(b
>> 3);
5210 gen_push_v(s
, cpu_T0
);
5212 case 0x1a0: /* push fs */
5213 case 0x1a8: /* push gs */
5214 gen_op_movl_T0_seg((b
>> 3) & 7);
5215 gen_push_v(s
, cpu_T0
);
5217 case 0x07: /* pop es */
5218 case 0x17: /* pop ss */
5219 case 0x1f: /* pop ds */
5224 gen_movl_seg_T0(s
, reg
);
5225 gen_pop_update(s
, ot
);
5226 /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
5228 gen_jmp_im(s
->pc
- s
->cs_base
);
5231 gen_eob_inhibit_irq(s
, true);
5237 case 0x1a1: /* pop fs */
5238 case 0x1a9: /* pop gs */
5240 gen_movl_seg_T0(s
, (b
>> 3) & 7);
5241 gen_pop_update(s
, ot
);
5243 gen_jmp_im(s
->pc
- s
->cs_base
);
5248 /**************************/
5251 case 0x89: /* mov Gv, Ev */
5252 ot
= mo_b_d(b
, dflag
);
5253 modrm
= cpu_ldub_code(env
, s
->pc
++);
5254 reg
= ((modrm
>> 3) & 7) | rex_r
;
5256 /* generate a generic store */
5257 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
5260 case 0xc7: /* mov Ev, Iv */
5261 ot
= mo_b_d(b
, dflag
);
5262 modrm
= cpu_ldub_code(env
, s
->pc
++);
5263 mod
= (modrm
>> 6) & 3;
5265 s
->rip_offset
= insn_const_size(ot
);
5266 gen_lea_modrm(env
, s
, modrm
);
5268 val
= insn_get(env
, s
, ot
);
5269 tcg_gen_movi_tl(cpu_T0
, val
);
5271 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
5273 gen_op_mov_reg_v(ot
, (modrm
& 7) | REX_B(s
), cpu_T0
);
5277 case 0x8b: /* mov Ev, Gv */
5278 ot
= mo_b_d(b
, dflag
);
5279 modrm
= cpu_ldub_code(env
, s
->pc
++);
5280 reg
= ((modrm
>> 3) & 7) | rex_r
;
5282 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5283 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
5285 case 0x8e: /* mov seg, Gv */
5286 modrm
= cpu_ldub_code(env
, s
->pc
++);
5287 reg
= (modrm
>> 3) & 7;
5288 if (reg
>= 6 || reg
== R_CS
)
5290 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
5291 gen_movl_seg_T0(s
, reg
);
5292 /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
5294 gen_jmp_im(s
->pc
- s
->cs_base
);
5297 gen_eob_inhibit_irq(s
, true);
5303 case 0x8c: /* mov Gv, seg */
5304 modrm
= cpu_ldub_code(env
, s
->pc
++);
5305 reg
= (modrm
>> 3) & 7;
5306 mod
= (modrm
>> 6) & 3;
5309 gen_op_movl_T0_seg(reg
);
5310 ot
= mod
== 3 ? dflag
: MO_16
;
5311 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5314 case 0x1b6: /* movzbS Gv, Eb */
5315 case 0x1b7: /* movzwS Gv, Eb */
5316 case 0x1be: /* movsbS Gv, Eb */
5317 case 0x1bf: /* movswS Gv, Eb */
5322 /* d_ot is the size of destination */
5324 /* ot is the size of source */
5325 ot
= (b
& 1) + MO_8
;
5326 /* s_ot is the sign+size of source */
5327 s_ot
= b
& 8 ? MO_SIGN
| ot
: ot
;
5329 modrm
= cpu_ldub_code(env
, s
->pc
++);
5330 reg
= ((modrm
>> 3) & 7) | rex_r
;
5331 mod
= (modrm
>> 6) & 3;
5332 rm
= (modrm
& 7) | REX_B(s
);
5335 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
5338 tcg_gen_ext8u_tl(cpu_T0
, cpu_T0
);
5341 tcg_gen_ext8s_tl(cpu_T0
, cpu_T0
);
5344 tcg_gen_ext16u_tl(cpu_T0
, cpu_T0
);
5348 tcg_gen_ext16s_tl(cpu_T0
, cpu_T0
);
5351 gen_op_mov_reg_v(d_ot
, reg
, cpu_T0
);
5353 gen_lea_modrm(env
, s
, modrm
);
5354 gen_op_ld_v(s
, s_ot
, cpu_T0
, cpu_A0
);
5355 gen_op_mov_reg_v(d_ot
, reg
, cpu_T0
);
5360 case 0x8d: /* lea */
5361 modrm
= cpu_ldub_code(env
, s
->pc
++);
5362 mod
= (modrm
>> 6) & 3;
5365 reg
= ((modrm
>> 3) & 7) | rex_r
;
5367 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
5368 TCGv ea
= gen_lea_modrm_1(a
);
5369 gen_op_mov_reg_v(dflag
, reg
, ea
);
5373 case 0xa0: /* mov EAX, Ov */
5375 case 0xa2: /* mov Ov, EAX */
5378 target_ulong offset_addr
;
5380 ot
= mo_b_d(b
, dflag
);
5382 #ifdef TARGET_X86_64
5384 offset_addr
= cpu_ldq_code(env
, s
->pc
);
5389 offset_addr
= insn_get(env
, s
, s
->aflag
);
5392 tcg_gen_movi_tl(cpu_A0
, offset_addr
);
5393 gen_add_A0_ds_seg(s
);
5395 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
5396 gen_op_mov_reg_v(ot
, R_EAX
, cpu_T0
);
5398 gen_op_mov_v_reg(ot
, cpu_T0
, R_EAX
);
5399 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
5403 case 0xd7: /* xlat */
5404 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[R_EBX
]);
5405 tcg_gen_ext8u_tl(cpu_T0
, cpu_regs
[R_EAX
]);
5406 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T0
);
5407 gen_extu(s
->aflag
, cpu_A0
);
5408 gen_add_A0_ds_seg(s
);
5409 gen_op_ld_v(s
, MO_8
, cpu_T0
, cpu_A0
);
5410 gen_op_mov_reg_v(MO_8
, R_EAX
, cpu_T0
);
5412 case 0xb0 ... 0xb7: /* mov R, Ib */
5413 val
= insn_get(env
, s
, MO_8
);
5414 tcg_gen_movi_tl(cpu_T0
, val
);
5415 gen_op_mov_reg_v(MO_8
, (b
& 7) | REX_B(s
), cpu_T0
);
5417 case 0xb8 ... 0xbf: /* mov R, Iv */
5418 #ifdef TARGET_X86_64
5419 if (dflag
== MO_64
) {
5422 tmp
= cpu_ldq_code(env
, s
->pc
);
5424 reg
= (b
& 7) | REX_B(s
);
5425 tcg_gen_movi_tl(cpu_T0
, tmp
);
5426 gen_op_mov_reg_v(MO_64
, reg
, cpu_T0
);
5431 val
= insn_get(env
, s
, ot
);
5432 reg
= (b
& 7) | REX_B(s
);
5433 tcg_gen_movi_tl(cpu_T0
, val
);
5434 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
5438 case 0x91 ... 0x97: /* xchg R, EAX */
5441 reg
= (b
& 7) | REX_B(s
);
5445 case 0x87: /* xchg Ev, Gv */
5446 ot
= mo_b_d(b
, dflag
);
5447 modrm
= cpu_ldub_code(env
, s
->pc
++);
5448 reg
= ((modrm
>> 3) & 7) | rex_r
;
5449 mod
= (modrm
>> 6) & 3;
5451 rm
= (modrm
& 7) | REX_B(s
);
5453 gen_op_mov_v_reg(ot
, cpu_T0
, reg
);
5454 gen_op_mov_v_reg(ot
, cpu_T1
, rm
);
5455 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
5456 gen_op_mov_reg_v(ot
, reg
, cpu_T1
);
5458 gen_lea_modrm(env
, s
, modrm
);
5459 gen_op_mov_v_reg(ot
, cpu_T0
, reg
);
5460 /* for xchg, lock is implicit */
5461 if (!(prefixes
& PREFIX_LOCK
))
5463 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
5464 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
5465 if (!(prefixes
& PREFIX_LOCK
))
5466 gen_helper_unlock();
5467 gen_op_mov_reg_v(ot
, reg
, cpu_T1
);
5470 case 0xc4: /* les Gv */
5471 /* In CODE64 this is VEX3; see above. */
5474 case 0xc5: /* lds Gv */
5475 /* In CODE64 this is VEX2; see above. */
5478 case 0x1b2: /* lss Gv */
5481 case 0x1b4: /* lfs Gv */
5484 case 0x1b5: /* lgs Gv */
5487 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
5488 modrm
= cpu_ldub_code(env
, s
->pc
++);
5489 reg
= ((modrm
>> 3) & 7) | rex_r
;
5490 mod
= (modrm
>> 6) & 3;
5493 gen_lea_modrm(env
, s
, modrm
);
5494 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
5495 gen_add_A0_im(s
, 1 << ot
);
5496 /* load the segment first to handle exceptions properly */
5497 gen_op_ld_v(s
, MO_16
, cpu_T0
, cpu_A0
);
5498 gen_movl_seg_T0(s
, op
);
5499 /* then put the data */
5500 gen_op_mov_reg_v(ot
, reg
, cpu_T1
);
5502 gen_jmp_im(s
->pc
- s
->cs_base
);
5507 /************************/
5515 ot
= mo_b_d(b
, dflag
);
5516 modrm
= cpu_ldub_code(env
, s
->pc
++);
5517 mod
= (modrm
>> 6) & 3;
5518 op
= (modrm
>> 3) & 7;
5524 gen_lea_modrm(env
, s
, modrm
);
5527 opreg
= (modrm
& 7) | REX_B(s
);
5532 gen_shift(s
, op
, ot
, opreg
, OR_ECX
);
5535 shift
= cpu_ldub_code(env
, s
->pc
++);
5537 gen_shifti(s
, op
, ot
, opreg
, shift
);
5552 case 0x1a4: /* shld imm */
5556 case 0x1a5: /* shld cl */
5560 case 0x1ac: /* shrd imm */
5564 case 0x1ad: /* shrd cl */
5569 modrm
= cpu_ldub_code(env
, s
->pc
++);
5570 mod
= (modrm
>> 6) & 3;
5571 rm
= (modrm
& 7) | REX_B(s
);
5572 reg
= ((modrm
>> 3) & 7) | rex_r
;
5574 gen_lea_modrm(env
, s
, modrm
);
5579 gen_op_mov_v_reg(ot
, cpu_T1
, reg
);
5582 TCGv imm
= tcg_const_tl(cpu_ldub_code(env
, s
->pc
++));
5583 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, imm
);
5586 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, cpu_regs
[R_ECX
]);
5590 /************************/
5593 if (s
->flags
& (HF_EM_MASK
| HF_TS_MASK
)) {
5594 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5595 /* XXX: what to do if illegal op ? */
5596 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
5599 modrm
= cpu_ldub_code(env
, s
->pc
++);
5600 mod
= (modrm
>> 6) & 3;
5602 op
= ((b
& 7) << 3) | ((modrm
>> 3) & 7);
5605 gen_lea_modrm(env
, s
, modrm
);
5607 case 0x00 ... 0x07: /* fxxxs */
5608 case 0x10 ... 0x17: /* fixxxl */
5609 case 0x20 ... 0x27: /* fxxxl */
5610 case 0x30 ... 0x37: /* fixxx */
5617 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5618 s
->mem_index
, MO_LEUL
);
5619 gen_helper_flds_FT0(cpu_env
, cpu_tmp2_i32
);
5622 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5623 s
->mem_index
, MO_LEUL
);
5624 gen_helper_fildl_FT0(cpu_env
, cpu_tmp2_i32
);
5627 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_A0
,
5628 s
->mem_index
, MO_LEQ
);
5629 gen_helper_fldl_FT0(cpu_env
, cpu_tmp1_i64
);
5633 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5634 s
->mem_index
, MO_LESW
);
5635 gen_helper_fildl_FT0(cpu_env
, cpu_tmp2_i32
);
5639 gen_helper_fp_arith_ST0_FT0(op1
);
5641 /* fcomp needs pop */
5642 gen_helper_fpop(cpu_env
);
5646 case 0x08: /* flds */
5647 case 0x0a: /* fsts */
5648 case 0x0b: /* fstps */
5649 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5650 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5651 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5656 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5657 s
->mem_index
, MO_LEUL
);
5658 gen_helper_flds_ST0(cpu_env
, cpu_tmp2_i32
);
5661 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5662 s
->mem_index
, MO_LEUL
);
5663 gen_helper_fildl_ST0(cpu_env
, cpu_tmp2_i32
);
5666 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_A0
,
5667 s
->mem_index
, MO_LEQ
);
5668 gen_helper_fldl_ST0(cpu_env
, cpu_tmp1_i64
);
5672 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5673 s
->mem_index
, MO_LESW
);
5674 gen_helper_fildl_ST0(cpu_env
, cpu_tmp2_i32
);
5679 /* XXX: the corresponding CPUID bit must be tested ! */
5682 gen_helper_fisttl_ST0(cpu_tmp2_i32
, cpu_env
);
5683 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5684 s
->mem_index
, MO_LEUL
);
5687 gen_helper_fisttll_ST0(cpu_tmp1_i64
, cpu_env
);
5688 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_A0
,
5689 s
->mem_index
, MO_LEQ
);
5693 gen_helper_fistt_ST0(cpu_tmp2_i32
, cpu_env
);
5694 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5695 s
->mem_index
, MO_LEUW
);
5698 gen_helper_fpop(cpu_env
);
5703 gen_helper_fsts_ST0(cpu_tmp2_i32
, cpu_env
);
5704 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5705 s
->mem_index
, MO_LEUL
);
5708 gen_helper_fistl_ST0(cpu_tmp2_i32
, cpu_env
);
5709 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5710 s
->mem_index
, MO_LEUL
);
5713 gen_helper_fstl_ST0(cpu_tmp1_i64
, cpu_env
);
5714 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_A0
,
5715 s
->mem_index
, MO_LEQ
);
5719 gen_helper_fist_ST0(cpu_tmp2_i32
, cpu_env
);
5720 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5721 s
->mem_index
, MO_LEUW
);
5725 gen_helper_fpop(cpu_env
);
5729 case 0x0c: /* fldenv mem */
5730 gen_helper_fldenv(cpu_env
, cpu_A0
, tcg_const_i32(dflag
- 1));
5732 case 0x0d: /* fldcw mem */
5733 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5734 s
->mem_index
, MO_LEUW
);
5735 gen_helper_fldcw(cpu_env
, cpu_tmp2_i32
);
5737 case 0x0e: /* fnstenv mem */
5738 gen_helper_fstenv(cpu_env
, cpu_A0
, tcg_const_i32(dflag
- 1));
5740 case 0x0f: /* fnstcw mem */
5741 gen_helper_fnstcw(cpu_tmp2_i32
, cpu_env
);
5742 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5743 s
->mem_index
, MO_LEUW
);
5745 case 0x1d: /* fldt mem */
5746 gen_helper_fldt_ST0(cpu_env
, cpu_A0
);
5748 case 0x1f: /* fstpt mem */
5749 gen_helper_fstt_ST0(cpu_env
, cpu_A0
);
5750 gen_helper_fpop(cpu_env
);
5752 case 0x2c: /* frstor mem */
5753 gen_helper_frstor(cpu_env
, cpu_A0
, tcg_const_i32(dflag
- 1));
5755 case 0x2e: /* fnsave mem */
5756 gen_helper_fsave(cpu_env
, cpu_A0
, tcg_const_i32(dflag
- 1));
5758 case 0x2f: /* fnstsw mem */
5759 gen_helper_fnstsw(cpu_tmp2_i32
, cpu_env
);
5760 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5761 s
->mem_index
, MO_LEUW
);
5763 case 0x3c: /* fbld */
5764 gen_helper_fbld_ST0(cpu_env
, cpu_A0
);
5766 case 0x3e: /* fbstp */
5767 gen_helper_fbst_ST0(cpu_env
, cpu_A0
);
5768 gen_helper_fpop(cpu_env
);
5770 case 0x3d: /* fildll */
5771 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_A0
, s
->mem_index
, MO_LEQ
);
5772 gen_helper_fildll_ST0(cpu_env
, cpu_tmp1_i64
);
5774 case 0x3f: /* fistpll */
5775 gen_helper_fistll_ST0(cpu_tmp1_i64
, cpu_env
);
5776 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_A0
, s
->mem_index
, MO_LEQ
);
5777 gen_helper_fpop(cpu_env
);
5783 /* register float ops */
5787 case 0x08: /* fld sti */
5788 gen_helper_fpush(cpu_env
);
5789 gen_helper_fmov_ST0_STN(cpu_env
,
5790 tcg_const_i32((opreg
+ 1) & 7));
5792 case 0x09: /* fxchg sti */
5793 case 0x29: /* fxchg4 sti, undocumented op */
5794 case 0x39: /* fxchg7 sti, undocumented op */
5795 gen_helper_fxchg_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
5797 case 0x0a: /* grp d9/2 */
5800 /* check exceptions (FreeBSD FPU probe) */
5801 gen_helper_fwait(cpu_env
);
5807 case 0x0c: /* grp d9/4 */
5810 gen_helper_fchs_ST0(cpu_env
);
5813 gen_helper_fabs_ST0(cpu_env
);
5816 gen_helper_fldz_FT0(cpu_env
);
5817 gen_helper_fcom_ST0_FT0(cpu_env
);
5820 gen_helper_fxam_ST0(cpu_env
);
5826 case 0x0d: /* grp d9/5 */
5830 gen_helper_fpush(cpu_env
);
5831 gen_helper_fld1_ST0(cpu_env
);
5834 gen_helper_fpush(cpu_env
);
5835 gen_helper_fldl2t_ST0(cpu_env
);
5838 gen_helper_fpush(cpu_env
);
5839 gen_helper_fldl2e_ST0(cpu_env
);
5842 gen_helper_fpush(cpu_env
);
5843 gen_helper_fldpi_ST0(cpu_env
);
5846 gen_helper_fpush(cpu_env
);
5847 gen_helper_fldlg2_ST0(cpu_env
);
5850 gen_helper_fpush(cpu_env
);
5851 gen_helper_fldln2_ST0(cpu_env
);
5854 gen_helper_fpush(cpu_env
);
5855 gen_helper_fldz_ST0(cpu_env
);
5862 case 0x0e: /* grp d9/6 */
5865 gen_helper_f2xm1(cpu_env
);
5868 gen_helper_fyl2x(cpu_env
);
5871 gen_helper_fptan(cpu_env
);
5873 case 3: /* fpatan */
5874 gen_helper_fpatan(cpu_env
);
5876 case 4: /* fxtract */
5877 gen_helper_fxtract(cpu_env
);
5879 case 5: /* fprem1 */
5880 gen_helper_fprem1(cpu_env
);
5882 case 6: /* fdecstp */
5883 gen_helper_fdecstp(cpu_env
);
5886 case 7: /* fincstp */
5887 gen_helper_fincstp(cpu_env
);
5891 case 0x0f: /* grp d9/7 */
5894 gen_helper_fprem(cpu_env
);
5896 case 1: /* fyl2xp1 */
5897 gen_helper_fyl2xp1(cpu_env
);
5900 gen_helper_fsqrt(cpu_env
);
5902 case 3: /* fsincos */
5903 gen_helper_fsincos(cpu_env
);
5905 case 5: /* fscale */
5906 gen_helper_fscale(cpu_env
);
5908 case 4: /* frndint */
5909 gen_helper_frndint(cpu_env
);
5912 gen_helper_fsin(cpu_env
);
5916 gen_helper_fcos(cpu_env
);
5920 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
5921 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
5922 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
5928 gen_helper_fp_arith_STN_ST0(op1
, opreg
);
5930 gen_helper_fpop(cpu_env
);
5932 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5933 gen_helper_fp_arith_ST0_FT0(op1
);
5937 case 0x02: /* fcom */
5938 case 0x22: /* fcom2, undocumented op */
5939 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5940 gen_helper_fcom_ST0_FT0(cpu_env
);
5942 case 0x03: /* fcomp */
5943 case 0x23: /* fcomp3, undocumented op */
5944 case 0x32: /* fcomp5, undocumented op */
5945 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5946 gen_helper_fcom_ST0_FT0(cpu_env
);
5947 gen_helper_fpop(cpu_env
);
5949 case 0x15: /* da/5 */
5951 case 1: /* fucompp */
5952 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
5953 gen_helper_fucom_ST0_FT0(cpu_env
);
5954 gen_helper_fpop(cpu_env
);
5955 gen_helper_fpop(cpu_env
);
5963 case 0: /* feni (287 only, just do nop here) */
5965 case 1: /* fdisi (287 only, just do nop here) */
5968 gen_helper_fclex(cpu_env
);
5970 case 3: /* fninit */
5971 gen_helper_fninit(cpu_env
);
5973 case 4: /* fsetpm (287 only, just do nop here) */
5979 case 0x1d: /* fucomi */
5980 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
5983 gen_update_cc_op(s
);
5984 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5985 gen_helper_fucomi_ST0_FT0(cpu_env
);
5986 set_cc_op(s
, CC_OP_EFLAGS
);
5988 case 0x1e: /* fcomi */
5989 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
5992 gen_update_cc_op(s
);
5993 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5994 gen_helper_fcomi_ST0_FT0(cpu_env
);
5995 set_cc_op(s
, CC_OP_EFLAGS
);
5997 case 0x28: /* ffree sti */
5998 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6000 case 0x2a: /* fst sti */
6001 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6003 case 0x2b: /* fstp sti */
6004 case 0x0b: /* fstp1 sti, undocumented op */
6005 case 0x3a: /* fstp8 sti, undocumented op */
6006 case 0x3b: /* fstp9 sti, undocumented op */
6007 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6008 gen_helper_fpop(cpu_env
);
6010 case 0x2c: /* fucom st(i) */
6011 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6012 gen_helper_fucom_ST0_FT0(cpu_env
);
6014 case 0x2d: /* fucomp st(i) */
6015 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6016 gen_helper_fucom_ST0_FT0(cpu_env
);
6017 gen_helper_fpop(cpu_env
);
6019 case 0x33: /* de/3 */
6021 case 1: /* fcompp */
6022 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
6023 gen_helper_fcom_ST0_FT0(cpu_env
);
6024 gen_helper_fpop(cpu_env
);
6025 gen_helper_fpop(cpu_env
);
6031 case 0x38: /* ffreep sti, undocumented op */
6032 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6033 gen_helper_fpop(cpu_env
);
6035 case 0x3c: /* df/4 */
6038 gen_helper_fnstsw(cpu_tmp2_i32
, cpu_env
);
6039 tcg_gen_extu_i32_tl(cpu_T0
, cpu_tmp2_i32
);
6040 gen_op_mov_reg_v(MO_16
, R_EAX
, cpu_T0
);
6046 case 0x3d: /* fucomip */
6047 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6050 gen_update_cc_op(s
);
6051 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6052 gen_helper_fucomi_ST0_FT0(cpu_env
);
6053 gen_helper_fpop(cpu_env
);
6054 set_cc_op(s
, CC_OP_EFLAGS
);
6056 case 0x3e: /* fcomip */
6057 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6060 gen_update_cc_op(s
);
6061 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6062 gen_helper_fcomi_ST0_FT0(cpu_env
);
6063 gen_helper_fpop(cpu_env
);
6064 set_cc_op(s
, CC_OP_EFLAGS
);
6066 case 0x10 ... 0x13: /* fcmovxx */
6071 static const uint8_t fcmov_cc
[8] = {
6078 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6081 op1
= fcmov_cc
[op
& 3] | (((op
>> 3) & 1) ^ 1);
6082 l1
= gen_new_label();
6083 gen_jcc1_noeob(s
, op1
, l1
);
6084 gen_helper_fmov_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
6093 /************************/
6096 case 0xa4: /* movsS */
6098 ot
= mo_b_d(b
, dflag
);
6099 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6100 gen_repz_movs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6106 case 0xaa: /* stosS */
6108 ot
= mo_b_d(b
, dflag
);
6109 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6110 gen_repz_stos(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6115 case 0xac: /* lodsS */
6117 ot
= mo_b_d(b
, dflag
);
6118 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6119 gen_repz_lods(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6124 case 0xae: /* scasS */
6126 ot
= mo_b_d(b
, dflag
);
6127 if (prefixes
& PREFIX_REPNZ
) {
6128 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6129 } else if (prefixes
& PREFIX_REPZ
) {
6130 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6136 case 0xa6: /* cmpsS */
6138 ot
= mo_b_d(b
, dflag
);
6139 if (prefixes
& PREFIX_REPNZ
) {
6140 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6141 } else if (prefixes
& PREFIX_REPZ
) {
6142 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6147 case 0x6c: /* insS */
6149 ot
= mo_b_d32(b
, dflag
);
6150 tcg_gen_ext16u_tl(cpu_T0
, cpu_regs
[R_EDX
]);
6151 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6152 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
) | 4);
6153 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6154 gen_repz_ins(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6157 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6158 gen_jmp(s
, s
->pc
- s
->cs_base
);
6162 case 0x6e: /* outsS */
6164 ot
= mo_b_d32(b
, dflag
);
6165 tcg_gen_ext16u_tl(cpu_T0
, cpu_regs
[R_EDX
]);
6166 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6167 svm_is_rep(prefixes
) | 4);
6168 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6169 gen_repz_outs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6172 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6173 gen_jmp(s
, s
->pc
- s
->cs_base
);
6178 /************************/
6183 ot
= mo_b_d32(b
, dflag
);
6184 val
= cpu_ldub_code(env
, s
->pc
++);
6185 tcg_gen_movi_tl(cpu_T0
, val
);
6186 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6187 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6188 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6191 tcg_gen_movi_i32(cpu_tmp2_i32
, val
);
6192 gen_helper_in_func(ot
, cpu_T1
, cpu_tmp2_i32
);
6193 gen_op_mov_reg_v(ot
, R_EAX
, cpu_T1
);
6194 gen_bpt_io(s
, cpu_tmp2_i32
, ot
);
6195 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6197 gen_jmp(s
, s
->pc
- s
->cs_base
);
6202 ot
= mo_b_d32(b
, dflag
);
6203 val
= cpu_ldub_code(env
, s
->pc
++);
6204 tcg_gen_movi_tl(cpu_T0
, val
);
6205 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6206 svm_is_rep(prefixes
));
6207 gen_op_mov_v_reg(ot
, cpu_T1
, R_EAX
);
6209 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6212 tcg_gen_movi_i32(cpu_tmp2_i32
, val
);
6213 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T1
);
6214 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6215 gen_bpt_io(s
, cpu_tmp2_i32
, ot
);
6216 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6218 gen_jmp(s
, s
->pc
- s
->cs_base
);
6223 ot
= mo_b_d32(b
, dflag
);
6224 tcg_gen_ext16u_tl(cpu_T0
, cpu_regs
[R_EDX
]);
6225 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6226 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6227 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6230 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
6231 gen_helper_in_func(ot
, cpu_T1
, cpu_tmp2_i32
);
6232 gen_op_mov_reg_v(ot
, R_EAX
, cpu_T1
);
6233 gen_bpt_io(s
, cpu_tmp2_i32
, ot
);
6234 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6236 gen_jmp(s
, s
->pc
- s
->cs_base
);
6241 ot
= mo_b_d32(b
, dflag
);
6242 tcg_gen_ext16u_tl(cpu_T0
, cpu_regs
[R_EDX
]);
6243 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6244 svm_is_rep(prefixes
));
6245 gen_op_mov_v_reg(ot
, cpu_T1
, R_EAX
);
6247 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6250 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
6251 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T1
);
6252 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6253 gen_bpt_io(s
, cpu_tmp2_i32
, ot
);
6254 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6256 gen_jmp(s
, s
->pc
- s
->cs_base
);
6260 /************************/
6262 case 0xc2: /* ret im */
6263 val
= cpu_ldsw_code(env
, s
->pc
);
6266 gen_stack_update(s
, val
+ (1 << ot
));
6267 /* Note that gen_pop_T0 uses a zero-extending load. */
6268 gen_op_jmp_v(cpu_T0
);
6272 case 0xc3: /* ret */
6274 gen_pop_update(s
, ot
);
6275 /* Note that gen_pop_T0 uses a zero-extending load. */
6276 gen_op_jmp_v(cpu_T0
);
6280 case 0xca: /* lret im */
6281 val
= cpu_ldsw_code(env
, s
->pc
);
6284 if (s
->pe
&& !s
->vm86
) {
6285 gen_update_cc_op(s
);
6286 gen_jmp_im(pc_start
- s
->cs_base
);
6287 gen_helper_lret_protected(cpu_env
, tcg_const_i32(dflag
- 1),
6288 tcg_const_i32(val
));
6292 gen_op_ld_v(s
, dflag
, cpu_T0
, cpu_A0
);
6293 /* NOTE: keeping EIP updated is not a problem in case of
6295 gen_op_jmp_v(cpu_T0
);
6297 gen_add_A0_im(s
, 1 << dflag
);
6298 gen_op_ld_v(s
, dflag
, cpu_T0
, cpu_A0
);
6299 gen_op_movl_seg_T0_vm(R_CS
);
6300 /* add stack offset */
6301 gen_stack_update(s
, val
+ (2 << dflag
));
6305 case 0xcb: /* lret */
6308 case 0xcf: /* iret */
6309 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IRET
);
6312 gen_helper_iret_real(cpu_env
, tcg_const_i32(dflag
- 1));
6313 set_cc_op(s
, CC_OP_EFLAGS
);
6314 } else if (s
->vm86
) {
6316 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6318 gen_helper_iret_real(cpu_env
, tcg_const_i32(dflag
- 1));
6319 set_cc_op(s
, CC_OP_EFLAGS
);
6322 gen_helper_iret_protected(cpu_env
, tcg_const_i32(dflag
- 1),
6323 tcg_const_i32(s
->pc
- s
->cs_base
));
6324 set_cc_op(s
, CC_OP_EFLAGS
);
6328 case 0xe8: /* call im */
6330 if (dflag
!= MO_16
) {
6331 tval
= (int32_t)insn_get(env
, s
, MO_32
);
6333 tval
= (int16_t)insn_get(env
, s
, MO_16
);
6335 next_eip
= s
->pc
- s
->cs_base
;
6337 if (dflag
== MO_16
) {
6339 } else if (!CODE64(s
)) {
6342 tcg_gen_movi_tl(cpu_T0
, next_eip
);
6343 gen_push_v(s
, cpu_T0
);
6348 case 0x9a: /* lcall im */
6350 unsigned int selector
, offset
;
6355 offset
= insn_get(env
, s
, ot
);
6356 selector
= insn_get(env
, s
, MO_16
);
6358 tcg_gen_movi_tl(cpu_T0
, selector
);
6359 tcg_gen_movi_tl(cpu_T1
, offset
);
6362 case 0xe9: /* jmp im */
6363 if (dflag
!= MO_16
) {
6364 tval
= (int32_t)insn_get(env
, s
, MO_32
);
6366 tval
= (int16_t)insn_get(env
, s
, MO_16
);
6368 tval
+= s
->pc
- s
->cs_base
;
6369 if (dflag
== MO_16
) {
6371 } else if (!CODE64(s
)) {
6377 case 0xea: /* ljmp im */
6379 unsigned int selector
, offset
;
6384 offset
= insn_get(env
, s
, ot
);
6385 selector
= insn_get(env
, s
, MO_16
);
6387 tcg_gen_movi_tl(cpu_T0
, selector
);
6388 tcg_gen_movi_tl(cpu_T1
, offset
);
6391 case 0xeb: /* jmp Jb */
6392 tval
= (int8_t)insn_get(env
, s
, MO_8
);
6393 tval
+= s
->pc
- s
->cs_base
;
6394 if (dflag
== MO_16
) {
6399 case 0x70 ... 0x7f: /* jcc Jb */
6400 tval
= (int8_t)insn_get(env
, s
, MO_8
);
6402 case 0x180 ... 0x18f: /* jcc Jv */
6403 if (dflag
!= MO_16
) {
6404 tval
= (int32_t)insn_get(env
, s
, MO_32
);
6406 tval
= (int16_t)insn_get(env
, s
, MO_16
);
6409 next_eip
= s
->pc
- s
->cs_base
;
6411 if (dflag
== MO_16
) {
6415 gen_jcc(s
, b
, tval
, next_eip
);
6418 case 0x190 ... 0x19f: /* setcc Gv */
6419 modrm
= cpu_ldub_code(env
, s
->pc
++);
6420 gen_setcc1(s
, b
, cpu_T0
);
6421 gen_ldst_modrm(env
, s
, modrm
, MO_8
, OR_TMP0
, 1);
6423 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6424 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6428 modrm
= cpu_ldub_code(env
, s
->pc
++);
6429 reg
= ((modrm
>> 3) & 7) | rex_r
;
6430 gen_cmovcc1(env
, s
, ot
, b
, modrm
, reg
);
6433 /************************/
6435 case 0x9c: /* pushf */
6436 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PUSHF
);
6437 if (s
->vm86
&& s
->iopl
!= 3) {
6438 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6440 gen_update_cc_op(s
);
6441 gen_helper_read_eflags(cpu_T0
, cpu_env
);
6442 gen_push_v(s
, cpu_T0
);
6445 case 0x9d: /* popf */
6446 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_POPF
);
6447 if (s
->vm86
&& s
->iopl
!= 3) {
6448 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6452 if (dflag
!= MO_16
) {
6453 gen_helper_write_eflags(cpu_env
, cpu_T0
,
6454 tcg_const_i32((TF_MASK
| AC_MASK
|
6459 gen_helper_write_eflags(cpu_env
, cpu_T0
,
6460 tcg_const_i32((TF_MASK
| AC_MASK
|
6462 IF_MASK
| IOPL_MASK
)
6466 if (s
->cpl
<= s
->iopl
) {
6467 if (dflag
!= MO_16
) {
6468 gen_helper_write_eflags(cpu_env
, cpu_T0
,
6469 tcg_const_i32((TF_MASK
|
6475 gen_helper_write_eflags(cpu_env
, cpu_T0
,
6476 tcg_const_i32((TF_MASK
|
6484 if (dflag
!= MO_16
) {
6485 gen_helper_write_eflags(cpu_env
, cpu_T0
,
6486 tcg_const_i32((TF_MASK
| AC_MASK
|
6487 ID_MASK
| NT_MASK
)));
6489 gen_helper_write_eflags(cpu_env
, cpu_T0
,
6490 tcg_const_i32((TF_MASK
| AC_MASK
|
6496 gen_pop_update(s
, ot
);
6497 set_cc_op(s
, CC_OP_EFLAGS
);
6498 /* abort translation because TF/AC flag may change */
6499 gen_jmp_im(s
->pc
- s
->cs_base
);
6503 case 0x9e: /* sahf */
6504 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6506 gen_op_mov_v_reg(MO_8
, cpu_T0
, R_AH
);
6507 gen_compute_eflags(s
);
6508 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, CC_O
);
6509 tcg_gen_andi_tl(cpu_T0
, cpu_T0
, CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
6510 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_T0
);
6512 case 0x9f: /* lahf */
6513 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6515 gen_compute_eflags(s
);
6516 /* Note: gen_compute_eflags() only gives the condition codes */
6517 tcg_gen_ori_tl(cpu_T0
, cpu_cc_src
, 0x02);
6518 gen_op_mov_reg_v(MO_8
, R_AH
, cpu_T0
);
6520 case 0xf5: /* cmc */
6521 gen_compute_eflags(s
);
6522 tcg_gen_xori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6524 case 0xf8: /* clc */
6525 gen_compute_eflags(s
);
6526 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_C
);
6528 case 0xf9: /* stc */
6529 gen_compute_eflags(s
);
6530 tcg_gen_ori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6532 case 0xfc: /* cld */
6533 tcg_gen_movi_i32(cpu_tmp2_i32
, 1);
6534 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6536 case 0xfd: /* std */
6537 tcg_gen_movi_i32(cpu_tmp2_i32
, -1);
6538 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6541 /************************/
6542 /* bit operations */
6543 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6545 modrm
= cpu_ldub_code(env
, s
->pc
++);
6546 op
= (modrm
>> 3) & 7;
6547 mod
= (modrm
>> 6) & 3;
6548 rm
= (modrm
& 7) | REX_B(s
);
6551 gen_lea_modrm(env
, s
, modrm
);
6552 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
6554 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
6557 val
= cpu_ldub_code(env
, s
->pc
++);
6558 tcg_gen_movi_tl(cpu_T1
, val
);
6563 case 0x1a3: /* bt Gv, Ev */
6566 case 0x1ab: /* bts */
6569 case 0x1b3: /* btr */
6572 case 0x1bb: /* btc */
6576 modrm
= cpu_ldub_code(env
, s
->pc
++);
6577 reg
= ((modrm
>> 3) & 7) | rex_r
;
6578 mod
= (modrm
>> 6) & 3;
6579 rm
= (modrm
& 7) | REX_B(s
);
6580 gen_op_mov_v_reg(MO_32
, cpu_T1
, reg
);
6582 gen_lea_modrm(env
, s
, modrm
);
6583 /* specific case: we need to add a displacement */
6584 gen_exts(ot
, cpu_T1
);
6585 tcg_gen_sari_tl(cpu_tmp0
, cpu_T1
, 3 + ot
);
6586 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, ot
);
6587 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
6588 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
6590 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
6593 tcg_gen_andi_tl(cpu_T1
, cpu_T1
, (1 << (3 + ot
)) - 1);
6594 tcg_gen_shr_tl(cpu_tmp4
, cpu_T0
, cpu_T1
);
6599 tcg_gen_movi_tl(cpu_tmp0
, 1);
6600 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T1
);
6601 tcg_gen_or_tl(cpu_T0
, cpu_T0
, cpu_tmp0
);
6604 tcg_gen_movi_tl(cpu_tmp0
, 1);
6605 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T1
);
6606 tcg_gen_andc_tl(cpu_T0
, cpu_T0
, cpu_tmp0
);
6610 tcg_gen_movi_tl(cpu_tmp0
, 1);
6611 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T1
);
6612 tcg_gen_xor_tl(cpu_T0
, cpu_T0
, cpu_tmp0
);
6617 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
6619 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
6623 /* Delay all CC updates until after the store above. Note that
6624 C is the result of the test, Z is unchanged, and the others
6625 are all undefined. */
6627 case CC_OP_MULB
... CC_OP_MULQ
:
6628 case CC_OP_ADDB
... CC_OP_ADDQ
:
6629 case CC_OP_ADCB
... CC_OP_ADCQ
:
6630 case CC_OP_SUBB
... CC_OP_SUBQ
:
6631 case CC_OP_SBBB
... CC_OP_SBBQ
:
6632 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
6633 case CC_OP_INCB
... CC_OP_INCQ
:
6634 case CC_OP_DECB
... CC_OP_DECQ
:
6635 case CC_OP_SHLB
... CC_OP_SHLQ
:
6636 case CC_OP_SARB
... CC_OP_SARQ
:
6637 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
6638 /* Z was going to be computed from the non-zero status of CC_DST.
6639 We can get that same Z value (and the new C value) by leaving
6640 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
6642 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
6643 set_cc_op(s
, ((s
->cc_op
- CC_OP_MULB
) & 3) + CC_OP_SARB
);
6646 /* Otherwise, generate EFLAGS and replace the C bit. */
6647 gen_compute_eflags(s
);
6648 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, cpu_tmp4
,
6653 case 0x1bc: /* bsf / tzcnt */
6654 case 0x1bd: /* bsr / lzcnt */
6656 modrm
= cpu_ldub_code(env
, s
->pc
++);
6657 reg
= ((modrm
>> 3) & 7) | rex_r
;
6658 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
6659 gen_extu(ot
, cpu_T0
);
6661 /* Note that lzcnt and tzcnt are in different extensions. */
6662 if ((prefixes
& PREFIX_REPZ
)
6664 ? s
->cpuid_ext3_features
& CPUID_EXT3_ABM
6665 : s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)) {
6667 tcg_gen_mov_tl(cpu_cc_src
, cpu_T0
);
6669 /* For lzcnt, reduce the target_ulong result by the
6670 number of zeros that we expect to find at the top. */
6671 gen_helper_clz(cpu_T0
, cpu_T0
);
6672 tcg_gen_subi_tl(cpu_T0
, cpu_T0
, TARGET_LONG_BITS
- size
);
6674 /* For tzcnt, a zero input must return the operand size:
6675 force all bits outside the operand size to 1. */
6676 target_ulong mask
= (target_ulong
)-2 << (size
- 1);
6677 tcg_gen_ori_tl(cpu_T0
, cpu_T0
, mask
);
6678 gen_helper_ctz(cpu_T0
, cpu_T0
);
6680 /* For lzcnt/tzcnt, C and Z bits are defined and are
6681 related to the result. */
6682 gen_op_update1_cc();
6683 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
6685 /* For bsr/bsf, only the Z bit is defined and it is related
6686 to the input and not the result. */
6687 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
6688 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
6690 /* For bsr, return the bit index of the first 1 bit,
6691 not the count of leading zeros. */
6692 gen_helper_clz(cpu_T0
, cpu_T0
);
6693 tcg_gen_xori_tl(cpu_T0
, cpu_T0
, TARGET_LONG_BITS
- 1);
6695 gen_helper_ctz(cpu_T0
, cpu_T0
);
6697 /* ??? The manual says that the output is undefined when the
6698 input is zero, but real hardware leaves it unchanged, and
6699 real programs appear to depend on that. */
6700 tcg_gen_movi_tl(cpu_tmp0
, 0);
6701 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_T0
, cpu_cc_dst
, cpu_tmp0
,
6702 cpu_regs
[reg
], cpu_T0
);
6704 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
6706 /************************/
6708 case 0x27: /* daa */
6711 gen_update_cc_op(s
);
6712 gen_helper_daa(cpu_env
);
6713 set_cc_op(s
, CC_OP_EFLAGS
);
6715 case 0x2f: /* das */
6718 gen_update_cc_op(s
);
6719 gen_helper_das(cpu_env
);
6720 set_cc_op(s
, CC_OP_EFLAGS
);
6722 case 0x37: /* aaa */
6725 gen_update_cc_op(s
);
6726 gen_helper_aaa(cpu_env
);
6727 set_cc_op(s
, CC_OP_EFLAGS
);
6729 case 0x3f: /* aas */
6732 gen_update_cc_op(s
);
6733 gen_helper_aas(cpu_env
);
6734 set_cc_op(s
, CC_OP_EFLAGS
);
6736 case 0xd4: /* aam */
6739 val
= cpu_ldub_code(env
, s
->pc
++);
6741 gen_exception(s
, EXCP00_DIVZ
, pc_start
- s
->cs_base
);
6743 gen_helper_aam(cpu_env
, tcg_const_i32(val
));
6744 set_cc_op(s
, CC_OP_LOGICB
);
6747 case 0xd5: /* aad */
6750 val
= cpu_ldub_code(env
, s
->pc
++);
6751 gen_helper_aad(cpu_env
, tcg_const_i32(val
));
6752 set_cc_op(s
, CC_OP_LOGICB
);
6754 /************************/
6756 case 0x90: /* nop */
6757 /* XXX: correct lock test for all insn */
6758 if (prefixes
& PREFIX_LOCK
) {
6761 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6763 goto do_xchg_reg_eax
;
6765 if (prefixes
& PREFIX_REPZ
) {
6766 gen_update_cc_op(s
);
6767 gen_jmp_im(pc_start
- s
->cs_base
);
6768 gen_helper_pause(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
6769 s
->is_jmp
= DISAS_TB_JUMP
;
6772 case 0x9b: /* fwait */
6773 if ((s
->flags
& (HF_MP_MASK
| HF_TS_MASK
)) ==
6774 (HF_MP_MASK
| HF_TS_MASK
)) {
6775 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
6777 gen_helper_fwait(cpu_env
);
6780 case 0xcc: /* int3 */
6781 gen_interrupt(s
, EXCP03_INT3
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6783 case 0xcd: /* int N */
6784 val
= cpu_ldub_code(env
, s
->pc
++);
6785 if (s
->vm86
&& s
->iopl
!= 3) {
6786 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6788 gen_interrupt(s
, val
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6791 case 0xce: /* into */
6794 gen_update_cc_op(s
);
6795 gen_jmp_im(pc_start
- s
->cs_base
);
6796 gen_helper_into(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
6799 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6800 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_ICEBP
);
6802 gen_debug(s
, pc_start
- s
->cs_base
);
6805 tb_flush(CPU(x86_env_get_cpu(env
)));
6806 qemu_set_log(CPU_LOG_INT
| CPU_LOG_TB_IN_ASM
);
6810 case 0xfa: /* cli */
6812 if (s
->cpl
<= s
->iopl
) {
6813 gen_helper_cli(cpu_env
);
6815 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6819 gen_helper_cli(cpu_env
);
6821 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6825 case 0xfb: /* sti */
6826 if (s
->vm86
? s
->iopl
== 3 : s
->cpl
<= s
->iopl
) {
6827 gen_helper_sti(cpu_env
);
6828 /* interruptions are enabled only the first insn after sti */
6829 gen_jmp_im(s
->pc
- s
->cs_base
);
6830 gen_eob_inhibit_irq(s
, true);
6832 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6835 case 0x62: /* bound */
6839 modrm
= cpu_ldub_code(env
, s
->pc
++);
6840 reg
= (modrm
>> 3) & 7;
6841 mod
= (modrm
>> 6) & 3;
6844 gen_op_mov_v_reg(ot
, cpu_T0
, reg
);
6845 gen_lea_modrm(env
, s
, modrm
);
6846 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
6848 gen_helper_boundw(cpu_env
, cpu_A0
, cpu_tmp2_i32
);
6850 gen_helper_boundl(cpu_env
, cpu_A0
, cpu_tmp2_i32
);
6853 case 0x1c8 ... 0x1cf: /* bswap reg */
6854 reg
= (b
& 7) | REX_B(s
);
6855 #ifdef TARGET_X86_64
6856 if (dflag
== MO_64
) {
6857 gen_op_mov_v_reg(MO_64
, cpu_T0
, reg
);
6858 tcg_gen_bswap64_i64(cpu_T0
, cpu_T0
);
6859 gen_op_mov_reg_v(MO_64
, reg
, cpu_T0
);
6863 gen_op_mov_v_reg(MO_32
, cpu_T0
, reg
);
6864 tcg_gen_ext32u_tl(cpu_T0
, cpu_T0
);
6865 tcg_gen_bswap32_tl(cpu_T0
, cpu_T0
);
6866 gen_op_mov_reg_v(MO_32
, reg
, cpu_T0
);
6869 case 0xd6: /* salc */
6872 gen_compute_eflags_c(s
, cpu_T0
);
6873 tcg_gen_neg_tl(cpu_T0
, cpu_T0
);
6874 gen_op_mov_reg_v(MO_8
, R_EAX
, cpu_T0
);
6876 case 0xe0: /* loopnz */
6877 case 0xe1: /* loopz */
6878 case 0xe2: /* loop */
6879 case 0xe3: /* jecxz */
6881 TCGLabel
*l1
, *l2
, *l3
;
6883 tval
= (int8_t)insn_get(env
, s
, MO_8
);
6884 next_eip
= s
->pc
- s
->cs_base
;
6886 if (dflag
== MO_16
) {
6890 l1
= gen_new_label();
6891 l2
= gen_new_label();
6892 l3
= gen_new_label();
6895 case 0: /* loopnz */
6897 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
6898 gen_op_jz_ecx(s
->aflag
, l3
);
6899 gen_jcc1(s
, (JCC_Z
<< 1) | (b
^ 1), l1
);
6902 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
6903 gen_op_jnz_ecx(s
->aflag
, l1
);
6907 gen_op_jz_ecx(s
->aflag
, l1
);
6912 gen_jmp_im(next_eip
);
6921 case 0x130: /* wrmsr */
6922 case 0x132: /* rdmsr */
6924 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6926 gen_update_cc_op(s
);
6927 gen_jmp_im(pc_start
- s
->cs_base
);
6929 gen_helper_rdmsr(cpu_env
);
6931 gen_helper_wrmsr(cpu_env
);
6935 case 0x131: /* rdtsc */
6936 gen_update_cc_op(s
);
6937 gen_jmp_im(pc_start
- s
->cs_base
);
6938 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6941 gen_helper_rdtsc(cpu_env
);
6942 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6944 gen_jmp(s
, s
->pc
- s
->cs_base
);
6947 case 0x133: /* rdpmc */
6948 gen_update_cc_op(s
);
6949 gen_jmp_im(pc_start
- s
->cs_base
);
6950 gen_helper_rdpmc(cpu_env
);
6952 case 0x134: /* sysenter */
6953 /* For Intel SYSENTER is valid on 64-bit */
6954 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
6957 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6959 gen_helper_sysenter(cpu_env
);
6963 case 0x135: /* sysexit */
6964 /* For Intel SYSEXIT is valid on 64-bit */
6965 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
6968 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6970 gen_helper_sysexit(cpu_env
, tcg_const_i32(dflag
- 1));
6974 #ifdef TARGET_X86_64
6975 case 0x105: /* syscall */
6976 /* XXX: is it usable in real mode ? */
6977 gen_update_cc_op(s
);
6978 gen_jmp_im(pc_start
- s
->cs_base
);
6979 gen_helper_syscall(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
6982 case 0x107: /* sysret */
6984 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6986 gen_helper_sysret(cpu_env
, tcg_const_i32(dflag
- 1));
6987 /* condition codes are modified only in long mode */
6989 set_cc_op(s
, CC_OP_EFLAGS
);
6995 case 0x1a2: /* cpuid */
6996 gen_update_cc_op(s
);
6997 gen_jmp_im(pc_start
- s
->cs_base
);
6998 gen_helper_cpuid(cpu_env
);
7000 case 0xf4: /* hlt */
7002 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7004 gen_update_cc_op(s
);
7005 gen_jmp_im(pc_start
- s
->cs_base
);
7006 gen_helper_hlt(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7007 s
->is_jmp
= DISAS_TB_JUMP
;
7011 modrm
= cpu_ldub_code(env
, s
->pc
++);
7012 mod
= (modrm
>> 6) & 3;
7013 op
= (modrm
>> 3) & 7;
7016 if (!s
->pe
|| s
->vm86
)
7018 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_READ
);
7019 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
,
7020 offsetof(CPUX86State
, ldt
.selector
));
7021 ot
= mod
== 3 ? dflag
: MO_16
;
7022 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7025 if (!s
->pe
|| s
->vm86
)
7028 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7030 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_WRITE
);
7031 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7032 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
7033 gen_helper_lldt(cpu_env
, cpu_tmp2_i32
);
7037 if (!s
->pe
|| s
->vm86
)
7039 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_READ
);
7040 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
,
7041 offsetof(CPUX86State
, tr
.selector
));
7042 ot
= mod
== 3 ? dflag
: MO_16
;
7043 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7046 if (!s
->pe
|| s
->vm86
)
7049 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7051 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_WRITE
);
7052 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7053 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
7054 gen_helper_ltr(cpu_env
, cpu_tmp2_i32
);
7059 if (!s
->pe
|| s
->vm86
)
7061 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7062 gen_update_cc_op(s
);
7064 gen_helper_verr(cpu_env
, cpu_T0
);
7066 gen_helper_verw(cpu_env
, cpu_T0
);
7068 set_cc_op(s
, CC_OP_EFLAGS
);
7076 modrm
= cpu_ldub_code(env
, s
->pc
++);
7078 CASE_MODRM_MEM_OP(0): /* sgdt */
7079 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_GDTR_READ
);
7080 gen_lea_modrm(env
, s
, modrm
);
7081 tcg_gen_ld32u_tl(cpu_T0
,
7082 cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
7083 gen_op_st_v(s
, MO_16
, cpu_T0
, cpu_A0
);
7084 gen_add_A0_im(s
, 2);
7085 tcg_gen_ld_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, gdt
.base
));
7086 if (dflag
== MO_16
) {
7087 tcg_gen_andi_tl(cpu_T0
, cpu_T0
, 0xffffff);
7089 gen_op_st_v(s
, CODE64(s
) + MO_32
, cpu_T0
, cpu_A0
);
7092 case 0xc8: /* monitor */
7093 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || s
->cpl
!= 0) {
7096 gen_update_cc_op(s
);
7097 gen_jmp_im(pc_start
- s
->cs_base
);
7098 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[R_EAX
]);
7099 gen_extu(s
->aflag
, cpu_A0
);
7100 gen_add_A0_ds_seg(s
);
7101 gen_helper_monitor(cpu_env
, cpu_A0
);
7104 case 0xc9: /* mwait */
7105 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || s
->cpl
!= 0) {
7108 gen_update_cc_op(s
);
7109 gen_jmp_im(pc_start
- s
->cs_base
);
7110 gen_helper_mwait(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7114 case 0xca: /* clac */
7115 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
7119 gen_helper_clac(cpu_env
);
7120 gen_jmp_im(s
->pc
- s
->cs_base
);
7124 case 0xcb: /* stac */
7125 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
7129 gen_helper_stac(cpu_env
);
7130 gen_jmp_im(s
->pc
- s
->cs_base
);
7134 CASE_MODRM_MEM_OP(1): /* sidt */
7135 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IDTR_READ
);
7136 gen_lea_modrm(env
, s
, modrm
);
7137 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, idt
.limit
));
7138 gen_op_st_v(s
, MO_16
, cpu_T0
, cpu_A0
);
7139 gen_add_A0_im(s
, 2);
7140 tcg_gen_ld_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, idt
.base
));
7141 if (dflag
== MO_16
) {
7142 tcg_gen_andi_tl(cpu_T0
, cpu_T0
, 0xffffff);
7144 gen_op_st_v(s
, CODE64(s
) + MO_32
, cpu_T0
, cpu_A0
);
7147 case 0xd0: /* xgetbv */
7148 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
7149 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
7150 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
7153 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[R_ECX
]);
7154 gen_helper_xgetbv(cpu_tmp1_i64
, cpu_env
, cpu_tmp2_i32
);
7155 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], cpu_tmp1_i64
);
7158 case 0xd1: /* xsetbv */
7159 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
7160 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
7161 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
7165 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7168 tcg_gen_concat_tl_i64(cpu_tmp1_i64
, cpu_regs
[R_EAX
],
7170 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[R_ECX
]);
7171 gen_helper_xsetbv(cpu_env
, cpu_tmp2_i32
, cpu_tmp1_i64
);
7172 /* End TB because translation flags may change. */
7173 gen_jmp_im(s
->pc
- pc_start
);
7177 case 0xd8: /* VMRUN */
7178 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
) {
7182 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7185 gen_update_cc_op(s
);
7186 gen_jmp_im(pc_start
- s
->cs_base
);
7187 gen_helper_vmrun(cpu_env
, tcg_const_i32(s
->aflag
- 1),
7188 tcg_const_i32(s
->pc
- pc_start
));
7190 s
->is_jmp
= DISAS_TB_JUMP
;
7193 case 0xd9: /* VMMCALL */
7194 if (!(s
->flags
& HF_SVME_MASK
)) {
7197 gen_update_cc_op(s
);
7198 gen_jmp_im(pc_start
- s
->cs_base
);
7199 gen_helper_vmmcall(cpu_env
);
7202 case 0xda: /* VMLOAD */
7203 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
) {
7207 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7210 gen_update_cc_op(s
);
7211 gen_jmp_im(pc_start
- s
->cs_base
);
7212 gen_helper_vmload(cpu_env
, tcg_const_i32(s
->aflag
- 1));
7215 case 0xdb: /* VMSAVE */
7216 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
) {
7220 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7223 gen_update_cc_op(s
);
7224 gen_jmp_im(pc_start
- s
->cs_base
);
7225 gen_helper_vmsave(cpu_env
, tcg_const_i32(s
->aflag
- 1));
7228 case 0xdc: /* STGI */
7229 if ((!(s
->flags
& HF_SVME_MASK
)
7230 && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
7235 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7238 gen_update_cc_op(s
);
7239 gen_jmp_im(pc_start
- s
->cs_base
);
7240 gen_helper_stgi(cpu_env
);
7243 case 0xdd: /* CLGI */
7244 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
) {
7248 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7251 gen_update_cc_op(s
);
7252 gen_jmp_im(pc_start
- s
->cs_base
);
7253 gen_helper_clgi(cpu_env
);
7256 case 0xde: /* SKINIT */
7257 if ((!(s
->flags
& HF_SVME_MASK
)
7258 && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
7262 gen_update_cc_op(s
);
7263 gen_jmp_im(pc_start
- s
->cs_base
);
7264 gen_helper_skinit(cpu_env
);
7267 case 0xdf: /* INVLPGA */
7268 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
) {
7272 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7275 gen_update_cc_op(s
);
7276 gen_jmp_im(pc_start
- s
->cs_base
);
7277 gen_helper_invlpga(cpu_env
, tcg_const_i32(s
->aflag
- 1));
7280 CASE_MODRM_MEM_OP(2): /* lgdt */
7282 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7285 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_GDTR_WRITE
);
7286 gen_lea_modrm(env
, s
, modrm
);
7287 gen_op_ld_v(s
, MO_16
, cpu_T1
, cpu_A0
);
7288 gen_add_A0_im(s
, 2);
7289 gen_op_ld_v(s
, CODE64(s
) + MO_32
, cpu_T0
, cpu_A0
);
7290 if (dflag
== MO_16
) {
7291 tcg_gen_andi_tl(cpu_T0
, cpu_T0
, 0xffffff);
7293 tcg_gen_st_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, gdt
.base
));
7294 tcg_gen_st32_tl(cpu_T1
, cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
7297 CASE_MODRM_MEM_OP(3): /* lidt */
7299 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7302 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IDTR_WRITE
);
7303 gen_lea_modrm(env
, s
, modrm
);
7304 gen_op_ld_v(s
, MO_16
, cpu_T1
, cpu_A0
);
7305 gen_add_A0_im(s
, 2);
7306 gen_op_ld_v(s
, CODE64(s
) + MO_32
, cpu_T0
, cpu_A0
);
7307 if (dflag
== MO_16
) {
7308 tcg_gen_andi_tl(cpu_T0
, cpu_T0
, 0xffffff);
7310 tcg_gen_st_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, idt
.base
));
7311 tcg_gen_st32_tl(cpu_T1
, cpu_env
, offsetof(CPUX86State
, idt
.limit
));
7314 CASE_MODRM_OP(4): /* smsw */
7315 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_CR0
);
7316 tcg_gen_ld_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, cr
[0]));
7318 mod
= (modrm
>> 6) & 3;
7319 ot
= (mod
!= 3 ? MO_16
: s
->dflag
);
7323 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7326 CASE_MODRM_OP(6): /* lmsw */
7328 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7331 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7332 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7333 gen_helper_lmsw(cpu_env
, cpu_T0
);
7334 gen_jmp_im(s
->pc
- s
->cs_base
);
7338 CASE_MODRM_MEM_OP(7): /* invlpg */
7340 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7343 gen_update_cc_op(s
);
7344 gen_jmp_im(pc_start
- s
->cs_base
);
7345 gen_lea_modrm(env
, s
, modrm
);
7346 gen_helper_invlpg(cpu_env
, cpu_A0
);
7347 gen_jmp_im(s
->pc
- s
->cs_base
);
7351 case 0xf8: /* swapgs */
7352 #ifdef TARGET_X86_64
7355 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7357 tcg_gen_mov_tl(cpu_T0
, cpu_seg_base
[R_GS
]);
7358 tcg_gen_ld_tl(cpu_seg_base
[R_GS
], cpu_env
,
7359 offsetof(CPUX86State
, kernelgsbase
));
7360 tcg_gen_st_tl(cpu_T0
, cpu_env
,
7361 offsetof(CPUX86State
, kernelgsbase
));
7368 case 0xf9: /* rdtscp */
7369 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_RDTSCP
)) {
7372 gen_update_cc_op(s
);
7373 gen_jmp_im(pc_start
- s
->cs_base
);
7374 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
7377 gen_helper_rdtscp(cpu_env
);
7378 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
7380 gen_jmp(s
, s
->pc
- s
->cs_base
);
7389 case 0x108: /* invd */
7390 case 0x109: /* wbinvd */
7392 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7394 gen_svm_check_intercept(s
, pc_start
, (b
& 2) ? SVM_EXIT_INVD
: SVM_EXIT_WBINVD
);
7398 case 0x63: /* arpl or movslS (x86_64) */
7399 #ifdef TARGET_X86_64
7402 /* d_ot is the size of destination */
7405 modrm
= cpu_ldub_code(env
, s
->pc
++);
7406 reg
= ((modrm
>> 3) & 7) | rex_r
;
7407 mod
= (modrm
>> 6) & 3;
7408 rm
= (modrm
& 7) | REX_B(s
);
7411 gen_op_mov_v_reg(MO_32
, cpu_T0
, rm
);
7413 if (d_ot
== MO_64
) {
7414 tcg_gen_ext32s_tl(cpu_T0
, cpu_T0
);
7416 gen_op_mov_reg_v(d_ot
, reg
, cpu_T0
);
7418 gen_lea_modrm(env
, s
, modrm
);
7419 gen_op_ld_v(s
, MO_32
| MO_SIGN
, cpu_T0
, cpu_A0
);
7420 gen_op_mov_reg_v(d_ot
, reg
, cpu_T0
);
7426 TCGv t0
, t1
, t2
, a0
;
7428 if (!s
->pe
|| s
->vm86
)
7430 t0
= tcg_temp_local_new();
7431 t1
= tcg_temp_local_new();
7432 t2
= tcg_temp_local_new();
7434 modrm
= cpu_ldub_code(env
, s
->pc
++);
7435 reg
= (modrm
>> 3) & 7;
7436 mod
= (modrm
>> 6) & 3;
7439 gen_lea_modrm(env
, s
, modrm
);
7440 gen_op_ld_v(s
, ot
, t0
, cpu_A0
);
7441 a0
= tcg_temp_local_new();
7442 tcg_gen_mov_tl(a0
, cpu_A0
);
7444 gen_op_mov_v_reg(ot
, t0
, rm
);
7447 gen_op_mov_v_reg(ot
, t1
, reg
);
7448 tcg_gen_andi_tl(cpu_tmp0
, t0
, 3);
7449 tcg_gen_andi_tl(t1
, t1
, 3);
7450 tcg_gen_movi_tl(t2
, 0);
7451 label1
= gen_new_label();
7452 tcg_gen_brcond_tl(TCG_COND_GE
, cpu_tmp0
, t1
, label1
);
7453 tcg_gen_andi_tl(t0
, t0
, ~3);
7454 tcg_gen_or_tl(t0
, t0
, t1
);
7455 tcg_gen_movi_tl(t2
, CC_Z
);
7456 gen_set_label(label1
);
7458 gen_op_st_v(s
, ot
, t0
, a0
);
7461 gen_op_mov_reg_v(ot
, rm
, t0
);
7463 gen_compute_eflags(s
);
7464 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_Z
);
7465 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t2
);
7471 case 0x102: /* lar */
7472 case 0x103: /* lsl */
7476 if (!s
->pe
|| s
->vm86
)
7478 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
7479 modrm
= cpu_ldub_code(env
, s
->pc
++);
7480 reg
= ((modrm
>> 3) & 7) | rex_r
;
7481 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7482 t0
= tcg_temp_local_new();
7483 gen_update_cc_op(s
);
7485 gen_helper_lar(t0
, cpu_env
, cpu_T0
);
7487 gen_helper_lsl(t0
, cpu_env
, cpu_T0
);
7489 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_src
, CC_Z
);
7490 label1
= gen_new_label();
7491 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
7492 gen_op_mov_reg_v(ot
, reg
, t0
);
7493 gen_set_label(label1
);
7494 set_cc_op(s
, CC_OP_EFLAGS
);
7499 modrm
= cpu_ldub_code(env
, s
->pc
++);
7500 mod
= (modrm
>> 6) & 3;
7501 op
= (modrm
>> 3) & 7;
7503 case 0: /* prefetchnta */
7504 case 1: /* prefetchnt0 */
7505 case 2: /* prefetchnt0 */
7506 case 3: /* prefetchnt0 */
7509 gen_nop_modrm(env
, s
, modrm
);
7510 /* nothing more to do */
7512 default: /* nop (multi byte) */
7513 gen_nop_modrm(env
, s
, modrm
);
7518 modrm
= cpu_ldub_code(env
, s
->pc
++);
7519 if (s
->flags
& HF_MPX_EN_MASK
) {
7520 mod
= (modrm
>> 6) & 3;
7521 reg
= ((modrm
>> 3) & 7) | rex_r
;
7522 if (prefixes
& PREFIX_REPZ
) {
7525 || (prefixes
& PREFIX_LOCK
)
7526 || s
->aflag
== MO_16
) {
7529 gen_bndck(env
, s
, modrm
, TCG_COND_LTU
, cpu_bndl
[reg
]);
7530 } else if (prefixes
& PREFIX_REPNZ
) {
7533 || (prefixes
& PREFIX_LOCK
)
7534 || s
->aflag
== MO_16
) {
7537 TCGv_i64 notu
= tcg_temp_new_i64();
7538 tcg_gen_not_i64(notu
, cpu_bndu
[reg
]);
7539 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, notu
);
7540 tcg_temp_free_i64(notu
);
7541 } else if (prefixes
& PREFIX_DATA
) {
7542 /* bndmov -- from reg/mem */
7543 if (reg
>= 4 || s
->aflag
== MO_16
) {
7547 int reg2
= (modrm
& 7) | REX_B(s
);
7548 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
7551 if (s
->flags
& HF_MPX_IU_MASK
) {
7552 tcg_gen_mov_i64(cpu_bndl
[reg
], cpu_bndl
[reg2
]);
7553 tcg_gen_mov_i64(cpu_bndu
[reg
], cpu_bndu
[reg2
]);
7556 gen_lea_modrm(env
, s
, modrm
);
7558 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], cpu_A0
,
7559 s
->mem_index
, MO_LEQ
);
7560 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, 8);
7561 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], cpu_A0
,
7562 s
->mem_index
, MO_LEQ
);
7564 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], cpu_A0
,
7565 s
->mem_index
, MO_LEUL
);
7566 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, 4);
7567 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], cpu_A0
,
7568 s
->mem_index
, MO_LEUL
);
7570 /* bnd registers are now in-use */
7571 gen_set_hflag(s
, HF_MPX_IU_MASK
);
7573 } else if (mod
!= 3) {
7575 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
7577 || (prefixes
& PREFIX_LOCK
)
7578 || s
->aflag
== MO_16
7583 tcg_gen_addi_tl(cpu_A0
, cpu_regs
[a
.base
], a
.disp
);
7585 tcg_gen_movi_tl(cpu_A0
, 0);
7587 gen_lea_v_seg(s
, s
->aflag
, cpu_A0
, a
.def_seg
, s
->override
);
7589 tcg_gen_mov_tl(cpu_T0
, cpu_regs
[a
.index
]);
7591 tcg_gen_movi_tl(cpu_T0
, 0);
7594 gen_helper_bndldx64(cpu_bndl
[reg
], cpu_env
, cpu_A0
, cpu_T0
);
7595 tcg_gen_ld_i64(cpu_bndu
[reg
], cpu_env
,
7596 offsetof(CPUX86State
, mmx_t0
.MMX_Q(0)));
7598 gen_helper_bndldx32(cpu_bndu
[reg
], cpu_env
, cpu_A0
, cpu_T0
);
7599 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndu
[reg
]);
7600 tcg_gen_shri_i64(cpu_bndu
[reg
], cpu_bndu
[reg
], 32);
7602 gen_set_hflag(s
, HF_MPX_IU_MASK
);
7605 gen_nop_modrm(env
, s
, modrm
);
7608 modrm
= cpu_ldub_code(env
, s
->pc
++);
7609 if (s
->flags
& HF_MPX_EN_MASK
) {
7610 mod
= (modrm
>> 6) & 3;
7611 reg
= ((modrm
>> 3) & 7) | rex_r
;
7612 if (mod
!= 3 && (prefixes
& PREFIX_REPZ
)) {
7615 || (prefixes
& PREFIX_LOCK
)
7616 || s
->aflag
== MO_16
) {
7619 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
7621 tcg_gen_extu_tl_i64(cpu_bndl
[reg
], cpu_regs
[a
.base
]);
7623 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndl
[reg
]);
7625 } else if (a
.base
== -1) {
7626 /* no base register has lower bound of 0 */
7627 tcg_gen_movi_i64(cpu_bndl
[reg
], 0);
7629 /* rip-relative generates #ud */
7632 tcg_gen_not_tl(cpu_A0
, gen_lea_modrm_1(a
));
7634 tcg_gen_ext32u_tl(cpu_A0
, cpu_A0
);
7636 tcg_gen_extu_tl_i64(cpu_bndu
[reg
], cpu_A0
);
7637 /* bnd registers are now in-use */
7638 gen_set_hflag(s
, HF_MPX_IU_MASK
);
7640 } else if (prefixes
& PREFIX_REPNZ
) {
7643 || (prefixes
& PREFIX_LOCK
)
7644 || s
->aflag
== MO_16
) {
7647 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, cpu_bndu
[reg
]);
7648 } else if (prefixes
& PREFIX_DATA
) {
7649 /* bndmov -- to reg/mem */
7650 if (reg
>= 4 || s
->aflag
== MO_16
) {
7654 int reg2
= (modrm
& 7) | REX_B(s
);
7655 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
7658 if (s
->flags
& HF_MPX_IU_MASK
) {
7659 tcg_gen_mov_i64(cpu_bndl
[reg2
], cpu_bndl
[reg
]);
7660 tcg_gen_mov_i64(cpu_bndu
[reg2
], cpu_bndu
[reg
]);
7663 gen_lea_modrm(env
, s
, modrm
);
7665 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], cpu_A0
,
7666 s
->mem_index
, MO_LEQ
);
7667 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, 8);
7668 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], cpu_A0
,
7669 s
->mem_index
, MO_LEQ
);
7671 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], cpu_A0
,
7672 s
->mem_index
, MO_LEUL
);
7673 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, 4);
7674 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], cpu_A0
,
7675 s
->mem_index
, MO_LEUL
);
7678 } else if (mod
!= 3) {
7680 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
7682 || (prefixes
& PREFIX_LOCK
)
7683 || s
->aflag
== MO_16
7688 tcg_gen_addi_tl(cpu_A0
, cpu_regs
[a
.base
], a
.disp
);
7690 tcg_gen_movi_tl(cpu_A0
, 0);
7692 gen_lea_v_seg(s
, s
->aflag
, cpu_A0
, a
.def_seg
, s
->override
);
7694 tcg_gen_mov_tl(cpu_T0
, cpu_regs
[a
.index
]);
7696 tcg_gen_movi_tl(cpu_T0
, 0);
7699 gen_helper_bndstx64(cpu_env
, cpu_A0
, cpu_T0
,
7700 cpu_bndl
[reg
], cpu_bndu
[reg
]);
7702 gen_helper_bndstx32(cpu_env
, cpu_A0
, cpu_T0
,
7703 cpu_bndl
[reg
], cpu_bndu
[reg
]);
7707 gen_nop_modrm(env
, s
, modrm
);
7709 case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
7710 modrm
= cpu_ldub_code(env
, s
->pc
++);
7711 gen_nop_modrm(env
, s
, modrm
);
7713 case 0x120: /* mov reg, crN */
7714 case 0x122: /* mov crN, reg */
7716 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7718 modrm
= cpu_ldub_code(env
, s
->pc
++);
7719 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7720 * AMD documentation (24594.pdf) and testing of
7721 * intel 386 and 486 processors all show that the mod bits
7722 * are assumed to be 1's, regardless of actual values.
7724 rm
= (modrm
& 7) | REX_B(s
);
7725 reg
= ((modrm
>> 3) & 7) | rex_r
;
7730 if ((prefixes
& PREFIX_LOCK
) && (reg
== 0) &&
7731 (s
->cpuid_ext3_features
& CPUID_EXT3_CR8LEG
)) {
7740 gen_update_cc_op(s
);
7741 gen_jmp_im(pc_start
- s
->cs_base
);
7743 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
7744 gen_helper_write_crN(cpu_env
, tcg_const_i32(reg
),
7746 gen_jmp_im(s
->pc
- s
->cs_base
);
7749 gen_helper_read_crN(cpu_T0
, cpu_env
, tcg_const_i32(reg
));
7750 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
7758 case 0x121: /* mov reg, drN */
7759 case 0x123: /* mov drN, reg */
7761 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7763 modrm
= cpu_ldub_code(env
, s
->pc
++);
7764 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7765 * AMD documentation (24594.pdf) and testing of
7766 * intel 386 and 486 processors all show that the mod bits
7767 * are assumed to be 1's, regardless of actual values.
7769 rm
= (modrm
& 7) | REX_B(s
);
7770 reg
= ((modrm
>> 3) & 7) | rex_r
;
7779 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_DR0
+ reg
);
7780 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
7781 tcg_gen_movi_i32(cpu_tmp2_i32
, reg
);
7782 gen_helper_set_dr(cpu_env
, cpu_tmp2_i32
, cpu_T0
);
7783 gen_jmp_im(s
->pc
- s
->cs_base
);
7786 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_DR0
+ reg
);
7787 tcg_gen_movi_i32(cpu_tmp2_i32
, reg
);
7788 gen_helper_get_dr(cpu_T0
, cpu_env
, cpu_tmp2_i32
);
7789 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
7793 case 0x106: /* clts */
7795 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7797 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7798 gen_helper_clts(cpu_env
);
7799 /* abort block because static cpu state changed */
7800 gen_jmp_im(s
->pc
- s
->cs_base
);
7804 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7805 case 0x1c3: /* MOVNTI reg, mem */
7806 if (!(s
->cpuid_features
& CPUID_SSE2
))
7808 ot
= mo_64_32(dflag
);
7809 modrm
= cpu_ldub_code(env
, s
->pc
++);
7810 mod
= (modrm
>> 6) & 3;
7813 reg
= ((modrm
>> 3) & 7) | rex_r
;
7814 /* generate a generic store */
7815 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
7818 modrm
= cpu_ldub_code(env
, s
->pc
++);
7820 CASE_MODRM_MEM_OP(0): /* fxsave */
7821 if (!(s
->cpuid_features
& CPUID_FXSR
)
7822 || (prefixes
& PREFIX_LOCK
)) {
7825 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
7826 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7829 gen_lea_modrm(env
, s
, modrm
);
7830 gen_helper_fxsave(cpu_env
, cpu_A0
);
7833 CASE_MODRM_MEM_OP(1): /* fxrstor */
7834 if (!(s
->cpuid_features
& CPUID_FXSR
)
7835 || (prefixes
& PREFIX_LOCK
)) {
7838 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
7839 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7842 gen_lea_modrm(env
, s
, modrm
);
7843 gen_helper_fxrstor(cpu_env
, cpu_A0
);
7846 CASE_MODRM_MEM_OP(2): /* ldmxcsr */
7847 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
)) {
7850 if (s
->flags
& HF_TS_MASK
) {
7851 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7854 gen_lea_modrm(env
, s
, modrm
);
7855 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
, s
->mem_index
, MO_LEUL
);
7856 gen_helper_ldmxcsr(cpu_env
, cpu_tmp2_i32
);
7859 CASE_MODRM_MEM_OP(3): /* stmxcsr */
7860 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
)) {
7863 if (s
->flags
& HF_TS_MASK
) {
7864 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7867 gen_lea_modrm(env
, s
, modrm
);
7868 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, mxcsr
));
7869 gen_op_st_v(s
, MO_32
, cpu_T0
, cpu_A0
);
7872 CASE_MODRM_MEM_OP(4): /* xsave */
7873 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
7874 || (prefixes
& (PREFIX_LOCK
| PREFIX_DATA
7875 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
7878 gen_lea_modrm(env
, s
, modrm
);
7879 tcg_gen_concat_tl_i64(cpu_tmp1_i64
, cpu_regs
[R_EAX
],
7881 gen_helper_xsave(cpu_env
, cpu_A0
, cpu_tmp1_i64
);
7884 CASE_MODRM_MEM_OP(5): /* xrstor */
7885 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
7886 || (prefixes
& (PREFIX_LOCK
| PREFIX_DATA
7887 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
7890 gen_lea_modrm(env
, s
, modrm
);
7891 tcg_gen_concat_tl_i64(cpu_tmp1_i64
, cpu_regs
[R_EAX
],
7893 gen_helper_xrstor(cpu_env
, cpu_A0
, cpu_tmp1_i64
);
7894 /* XRSTOR is how MPX is enabled, which changes how
7895 we translate. Thus we need to end the TB. */
7896 gen_update_cc_op(s
);
7897 gen_jmp_im(s
->pc
- s
->cs_base
);
7901 CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
7902 if (prefixes
& PREFIX_LOCK
) {
7905 if (prefixes
& PREFIX_DATA
) {
7907 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_CLWB
)) {
7910 gen_nop_modrm(env
, s
, modrm
);
7913 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
7914 || (s
->cpuid_xsave_features
& CPUID_XSAVE_XSAVEOPT
) == 0
7915 || (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
))) {
7918 gen_lea_modrm(env
, s
, modrm
);
7919 tcg_gen_concat_tl_i64(cpu_tmp1_i64
, cpu_regs
[R_EAX
],
7921 gen_helper_xsaveopt(cpu_env
, cpu_A0
, cpu_tmp1_i64
);
7925 CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
7926 if (prefixes
& PREFIX_LOCK
) {
7929 if (prefixes
& PREFIX_DATA
) {
7931 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_CLFLUSHOPT
)) {
7936 if ((s
->prefix
& (PREFIX_REPZ
| PREFIX_REPNZ
))
7937 || !(s
->cpuid_features
& CPUID_CLFLUSH
)) {
7941 gen_nop_modrm(env
, s
, modrm
);
7944 case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
7945 case 0xc8 ... 0xc8: /* rdgsbase (f3 0f ae /1) */
7946 case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
7947 case 0xd8 ... 0xd8: /* wrgsbase (f3 0f ae /3) */
7949 && (prefixes
& PREFIX_REPZ
)
7950 && !(prefixes
& PREFIX_LOCK
)
7951 && (s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_FSGSBASE
)) {
7952 TCGv base
, treg
, src
, dst
;
7954 /* Preserve hflags bits by testing CR4 at runtime. */
7955 tcg_gen_movi_i32(cpu_tmp2_i32
, CR4_FSGSBASE_MASK
);
7956 gen_helper_cr4_testbit(cpu_env
, cpu_tmp2_i32
);
7958 base
= cpu_seg_base
[modrm
& 8 ? R_GS
: R_FS
];
7959 treg
= cpu_regs
[(modrm
& 7) | REX_B(s
)];
7963 dst
= base
, src
= treg
;
7966 dst
= treg
, src
= base
;
7969 if (s
->dflag
== MO_32
) {
7970 tcg_gen_ext32u_tl(dst
, src
);
7972 tcg_gen_mov_tl(dst
, src
);
7978 case 0xf8: /* sfence / pcommit */
7979 if (prefixes
& PREFIX_DATA
) {
7981 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_PCOMMIT
)
7982 || (prefixes
& PREFIX_LOCK
)) {
7988 case 0xf9 ... 0xff: /* sfence */
7989 case 0xe8 ... 0xef: /* lfence */
7990 case 0xf0 ... 0xf7: /* mfence */
7991 if (!(s
->cpuid_features
& CPUID_SSE2
)
7992 || (prefixes
& PREFIX_LOCK
)) {
8002 case 0x10d: /* 3DNow! prefetch(w) */
8003 modrm
= cpu_ldub_code(env
, s
->pc
++);
8004 mod
= (modrm
>> 6) & 3;
8007 gen_nop_modrm(env
, s
, modrm
);
8009 case 0x1aa: /* rsm */
8010 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_RSM
);
8011 if (!(s
->flags
& HF_SMM_MASK
))
8013 gen_update_cc_op(s
);
8014 gen_jmp_im(s
->pc
- s
->cs_base
);
8015 gen_helper_rsm(cpu_env
);
8018 case 0x1b8: /* SSE4.2 popcnt */
8019 if ((prefixes
& (PREFIX_REPZ
| PREFIX_LOCK
| PREFIX_REPNZ
)) !=
8022 if (!(s
->cpuid_ext_features
& CPUID_EXT_POPCNT
))
8025 modrm
= cpu_ldub_code(env
, s
->pc
++);
8026 reg
= ((modrm
>> 3) & 7) | rex_r
;
8028 if (s
->prefix
& PREFIX_DATA
) {
8031 ot
= mo_64_32(dflag
);
8034 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
8035 gen_helper_popcnt(cpu_T0
, cpu_env
, cpu_T0
, tcg_const_i32(ot
));
8036 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
8038 set_cc_op(s
, CC_OP_EFLAGS
);
8040 case 0x10e ... 0x10f:
8041 /* 3DNow! instructions, ignore prefixes */
8042 s
->prefix
&= ~(PREFIX_REPZ
| PREFIX_REPNZ
| PREFIX_DATA
);
8043 case 0x110 ... 0x117:
8044 case 0x128 ... 0x12f:
8045 case 0x138 ... 0x13a:
8046 case 0x150 ... 0x179:
8047 case 0x17c ... 0x17f:
8049 case 0x1c4 ... 0x1c6:
8050 case 0x1d0 ... 0x1fe:
8051 gen_sse(env
, s
, b
, pc_start
, rex_r
);
8056 /* lock generation */
8057 if (s
->prefix
& PREFIX_LOCK
)
8058 gen_helper_unlock();
8061 if (s
->prefix
& PREFIX_LOCK
)
8062 gen_helper_unlock();
8063 /* XXX: ensure that no lock was generated */
8064 gen_illegal_opcode(s
);
8067 if (s
->prefix
& PREFIX_LOCK
)
8068 gen_helper_unlock();
8069 /* XXX: ensure that no lock was generated */
8070 gen_unknown_opcode(env
, s
);
8074 void tcg_x86_init(void)
8076 static const char reg_names
[CPU_NB_REGS
][4] = {
8077 #ifdef TARGET_X86_64
8105 static const char seg_base_names
[6][8] = {
8113 static const char bnd_regl_names
[4][8] = {
8114 "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
8116 static const char bnd_regu_names
[4][8] = {
8117 "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
8121 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
8122 cpu_cc_op
= tcg_global_mem_new_i32(cpu_env
,
8123 offsetof(CPUX86State
, cc_op
), "cc_op");
8124 cpu_cc_dst
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_dst
),
8126 cpu_cc_src
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_src
),
8128 cpu_cc_src2
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_src2
),
8131 for (i
= 0; i
< CPU_NB_REGS
; ++i
) {
8132 cpu_regs
[i
] = tcg_global_mem_new(cpu_env
,
8133 offsetof(CPUX86State
, regs
[i
]),
8137 for (i
= 0; i
< 6; ++i
) {
8139 = tcg_global_mem_new(cpu_env
,
8140 offsetof(CPUX86State
, segs
[i
].base
),
8144 for (i
= 0; i
< 4; ++i
) {
8146 = tcg_global_mem_new_i64(cpu_env
,
8147 offsetof(CPUX86State
, bnd_regs
[i
].lb
),
8150 = tcg_global_mem_new_i64(cpu_env
,
8151 offsetof(CPUX86State
, bnd_regs
[i
].ub
),
8158 /* generate intermediate code for basic block 'tb'. */
8159 void gen_intermediate_code(CPUX86State
*env
, TranslationBlock
*tb
)
8161 X86CPU
*cpu
= x86_env_get_cpu(env
);
8162 CPUState
*cs
= CPU(cpu
);
8163 DisasContext dc1
, *dc
= &dc1
;
8164 target_ulong pc_ptr
;
8166 target_ulong pc_start
;
8167 target_ulong cs_base
;
8171 /* generate intermediate code */
8173 cs_base
= tb
->cs_base
;
8176 dc
->pe
= (flags
>> HF_PE_SHIFT
) & 1;
8177 dc
->code32
= (flags
>> HF_CS32_SHIFT
) & 1;
8178 dc
->ss32
= (flags
>> HF_SS32_SHIFT
) & 1;
8179 dc
->addseg
= (flags
>> HF_ADDSEG_SHIFT
) & 1;
8181 dc
->vm86
= (flags
>> VM_SHIFT
) & 1;
8182 dc
->cpl
= (flags
>> HF_CPL_SHIFT
) & 3;
8183 dc
->iopl
= (flags
>> IOPL_SHIFT
) & 3;
8184 dc
->tf
= (flags
>> TF_SHIFT
) & 1;
8185 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
8186 dc
->cc_op
= CC_OP_DYNAMIC
;
8187 dc
->cc_op_dirty
= false;
8188 dc
->cs_base
= cs_base
;
8190 dc
->popl_esp_hack
= 0;
8191 /* select memory access functions */
8193 if (flags
& HF_SOFTMMU_MASK
) {
8194 dc
->mem_index
= cpu_mmu_index(env
, false);
8196 dc
->cpuid_features
= env
->features
[FEAT_1_EDX
];
8197 dc
->cpuid_ext_features
= env
->features
[FEAT_1_ECX
];
8198 dc
->cpuid_ext2_features
= env
->features
[FEAT_8000_0001_EDX
];
8199 dc
->cpuid_ext3_features
= env
->features
[FEAT_8000_0001_ECX
];
8200 dc
->cpuid_7_0_ebx_features
= env
->features
[FEAT_7_0_EBX
];
8201 dc
->cpuid_xsave_features
= env
->features
[FEAT_XSAVE
];
8202 #ifdef TARGET_X86_64
8203 dc
->lma
= (flags
>> HF_LMA_SHIFT
) & 1;
8204 dc
->code64
= (flags
>> HF_CS64_SHIFT
) & 1;
8207 dc
->jmp_opt
= !(dc
->tf
|| cs
->singlestep_enabled
||
8208 (flags
& HF_INHIBIT_IRQ_MASK
)
8209 #ifndef CONFIG_SOFTMMU
8210 || (flags
& HF_SOFTMMU_MASK
)
8213 /* Do not optimize repz jumps at all in icount mode, because
8214 rep movsS instructions are execured with different paths
8215 in !repz_opt and repz_opt modes. The first one was used
8216 always except single step mode. And this setting
8217 disables jumps optimization and control paths become
8218 equivalent in run and single step modes.
8219 Now there will be no jump optimization for repz in
8220 record/replay modes and there will always be an
8221 additional step for ecx=0 when icount is enabled.
8223 dc
->repz_opt
= !dc
->jmp_opt
&& !(tb
->cflags
& CF_USE_ICOUNT
);
8225 /* check addseg logic */
8226 if (!dc
->addseg
&& (dc
->vm86
|| !dc
->pe
|| !dc
->code32
))
8227 printf("ERROR addseg\n");
8230 cpu_T0
= tcg_temp_new();
8231 cpu_T1
= tcg_temp_new();
8232 cpu_A0
= tcg_temp_new();
8234 cpu_tmp0
= tcg_temp_new();
8235 cpu_tmp1_i64
= tcg_temp_new_i64();
8236 cpu_tmp2_i32
= tcg_temp_new_i32();
8237 cpu_tmp3_i32
= tcg_temp_new_i32();
8238 cpu_tmp4
= tcg_temp_new();
8239 cpu_ptr0
= tcg_temp_new_ptr();
8240 cpu_ptr1
= tcg_temp_new_ptr();
8241 cpu_cc_srcT
= tcg_temp_local_new();
8243 dc
->is_jmp
= DISAS_NEXT
;
8246 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
8247 if (max_insns
== 0) {
8248 max_insns
= CF_COUNT_MASK
;
8250 if (max_insns
> TCG_MAX_INSNS
) {
8251 max_insns
= TCG_MAX_INSNS
;
8256 tcg_gen_insn_start(pc_ptr
, dc
->cc_op
);
8259 /* If RF is set, suppress an internally generated breakpoint. */
8260 if (unlikely(cpu_breakpoint_test(cs
, pc_ptr
,
8261 tb
->flags
& HF_RF_MASK
8262 ? BP_GDB
: BP_ANY
))) {
8263 gen_debug(dc
, pc_ptr
- dc
->cs_base
);
8264 /* The address covered by the breakpoint must be included in
8265 [tb->pc, tb->pc + tb->size) in order to for it to be
8266 properly cleared -- thus we increment the PC here so that
8267 the logic setting tb->size below does the right thing. */
8269 goto done_generating
;
8271 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
8275 pc_ptr
= disas_insn(env
, dc
, pc_ptr
);
8276 /* stop translation if indicated */
8279 /* if single step mode, we generate only one instruction and
8280 generate an exception */
8281 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8282 the flag and abort the translation to give the irqs a
8283 change to be happen */
8284 if (dc
->tf
|| dc
->singlestep_enabled
||
8285 (flags
& HF_INHIBIT_IRQ_MASK
)) {
8286 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8290 /* Do not cross the boundary of the pages in icount mode,
8291 it can cause an exception. Do it only when boundary is
8292 crossed by the first instruction in the block.
8293 If current instruction already crossed the bound - it's ok,
8294 because an exception hasn't stopped this code.
8296 if ((tb
->cflags
& CF_USE_ICOUNT
)
8297 && ((pc_ptr
& TARGET_PAGE_MASK
)
8298 != ((pc_ptr
+ TARGET_MAX_INSN_SIZE
- 1) & TARGET_PAGE_MASK
)
8299 || (pc_ptr
& ~TARGET_PAGE_MASK
) == 0)) {
8300 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8304 /* if too long translation, stop generation too */
8305 if (tcg_op_buf_full() ||
8306 (pc_ptr
- pc_start
) >= (TARGET_PAGE_SIZE
- 32) ||
8307 num_insns
>= max_insns
) {
8308 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8313 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8318 if (tb
->cflags
& CF_LAST_IO
)
8321 gen_tb_end(tb
, num_insns
);
8324 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
8326 qemu_log("----------------\n");
8327 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
8328 #ifdef TARGET_X86_64
8333 disas_flags
= !dc
->code32
;
8334 log_target_disas(cs
, pc_start
, pc_ptr
- pc_start
, disas_flags
);
8339 tb
->size
= pc_ptr
- pc_start
;
8340 tb
->icount
= num_insns
;
8343 void restore_state_to_opc(CPUX86State
*env
, TranslationBlock
*tb
,
8346 int cc_op
= data
[1];
8347 env
->eip
= data
[0] - tb
->cs_base
;
8348 if (cc_op
!= CC_OP_DYNAMIC
) {