4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu/host-utils.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
35 #define PREFIX_REPZ 0x01
36 #define PREFIX_REPNZ 0x02
37 #define PREFIX_LOCK 0x04
38 #define PREFIX_DATA 0x08
39 #define PREFIX_ADR 0x10
40 #define PREFIX_VEX 0x20
43 #define CODE64(s) ((s)->code64)
44 #define REX_X(s) ((s)->rex_x)
45 #define REX_B(s) ((s)->rex_b)
60 /* For a switch indexed by MODRM, match all memory operands for a given OP. */
61 #define CASE_MODRM_MEM_OP(OP) \
62 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
63 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
64 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
66 #define CASE_MODRM_OP(OP) \
67 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
68 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
69 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
70 case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
72 //#define MACRO_TEST 1
74 /* global register indexes */
75 static TCGv_env cpu_env
;
77 static TCGv cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
, cpu_cc_srcT
;
78 static TCGv_i32 cpu_cc_op
;
79 static TCGv cpu_regs
[CPU_NB_REGS
];
80 static TCGv cpu_seg_base
[6];
81 static TCGv_i64 cpu_bndl
[4];
82 static TCGv_i64 cpu_bndu
[4];
84 static TCGv cpu_T0
, cpu_T1
;
85 /* local register indexes (only used inside old micro ops) */
86 static TCGv cpu_tmp0
, cpu_tmp4
;
87 static TCGv_ptr cpu_ptr0
, cpu_ptr1
;
88 static TCGv_i32 cpu_tmp2_i32
, cpu_tmp3_i32
;
89 static TCGv_i64 cpu_tmp1_i64
;
91 #include "exec/gen-icount.h"
94 static int x86_64_hregs
;
97 typedef struct DisasContext
{
98 /* current insn context */
99 int override
; /* -1 if no override */
103 target_ulong pc_start
;
104 target_ulong pc
; /* pc = eip + cs_base */
105 int is_jmp
; /* 1 = means jump (stop translation), 2 means CPU
106 static state change (stop translation) */
107 /* current block context */
108 target_ulong cs_base
; /* base of CS segment */
109 int pe
; /* protected mode */
110 int code32
; /* 32 bit code segment */
112 int lma
; /* long mode active */
113 int code64
; /* 64 bit code segment */
116 int vex_l
; /* vex vector length */
117 int vex_v
; /* vex vvvv register, without 1's compliment. */
118 int ss32
; /* 32 bit stack segment */
119 CCOp cc_op
; /* current CC operation */
121 int addseg
; /* non zero if either DS/ES/SS have a non zero base */
122 int f_st
; /* currently unused */
123 int vm86
; /* vm86 mode */
126 int tf
; /* TF cpu flag */
127 int singlestep_enabled
; /* "hardware" single step enabled */
128 int jmp_opt
; /* use direct block chaining for direct jumps */
129 int repz_opt
; /* optimize jumps within repz instructions */
130 int mem_index
; /* select memory access functions */
131 uint64_t flags
; /* all execution flags */
132 struct TranslationBlock
*tb
;
133 int popl_esp_hack
; /* for correct popl with esp base handling */
134 int rip_offset
; /* only used in x86_64, but left for simplicity */
136 int cpuid_ext_features
;
137 int cpuid_ext2_features
;
138 int cpuid_ext3_features
;
139 int cpuid_7_0_ebx_features
;
140 int cpuid_xsave_features
;
143 static void gen_eob(DisasContext
*s
);
144 static void gen_jmp(DisasContext
*s
, target_ulong eip
);
145 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
);
146 static void gen_op(DisasContext
*s1
, int op
, TCGMemOp ot
, int d
);
148 /* i386 arith/logic operations */
168 OP_SHL1
, /* undocumented */
184 /* I386 int registers */
185 OR_EAX
, /* MUST be even numbered */
194 OR_TMP0
= 16, /* temporary operand register */
196 OR_A0
, /* temporary register used when doing address evaluation */
206 /* Bit set if the global variable is live after setting CC_OP to X. */
207 static const uint8_t cc_op_live
[CC_OP_NB
] = {
208 [CC_OP_DYNAMIC
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
209 [CC_OP_EFLAGS
] = USES_CC_SRC
,
210 [CC_OP_MULB
... CC_OP_MULQ
] = USES_CC_DST
| USES_CC_SRC
,
211 [CC_OP_ADDB
... CC_OP_ADDQ
] = USES_CC_DST
| USES_CC_SRC
,
212 [CC_OP_ADCB
... CC_OP_ADCQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
213 [CC_OP_SUBB
... CC_OP_SUBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRCT
,
214 [CC_OP_SBBB
... CC_OP_SBBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
215 [CC_OP_LOGICB
... CC_OP_LOGICQ
] = USES_CC_DST
,
216 [CC_OP_INCB
... CC_OP_INCQ
] = USES_CC_DST
| USES_CC_SRC
,
217 [CC_OP_DECB
... CC_OP_DECQ
] = USES_CC_DST
| USES_CC_SRC
,
218 [CC_OP_SHLB
... CC_OP_SHLQ
] = USES_CC_DST
| USES_CC_SRC
,
219 [CC_OP_SARB
... CC_OP_SARQ
] = USES_CC_DST
| USES_CC_SRC
,
220 [CC_OP_BMILGB
... CC_OP_BMILGQ
] = USES_CC_DST
| USES_CC_SRC
,
221 [CC_OP_ADCX
] = USES_CC_DST
| USES_CC_SRC
,
222 [CC_OP_ADOX
] = USES_CC_SRC
| USES_CC_SRC2
,
223 [CC_OP_ADCOX
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
227 static void set_cc_op(DisasContext
*s
, CCOp op
)
231 if (s
->cc_op
== op
) {
235 /* Discard CC computation that will no longer be used. */
236 dead
= cc_op_live
[s
->cc_op
] & ~cc_op_live
[op
];
237 if (dead
& USES_CC_DST
) {
238 tcg_gen_discard_tl(cpu_cc_dst
);
240 if (dead
& USES_CC_SRC
) {
241 tcg_gen_discard_tl(cpu_cc_src
);
243 if (dead
& USES_CC_SRC2
) {
244 tcg_gen_discard_tl(cpu_cc_src2
);
246 if (dead
& USES_CC_SRCT
) {
247 tcg_gen_discard_tl(cpu_cc_srcT
);
250 if (op
== CC_OP_DYNAMIC
) {
251 /* The DYNAMIC setting is translator only, and should never be
252 stored. Thus we always consider it clean. */
253 s
->cc_op_dirty
= false;
255 /* Discard any computed CC_OP value (see shifts). */
256 if (s
->cc_op
== CC_OP_DYNAMIC
) {
257 tcg_gen_discard_i32(cpu_cc_op
);
259 s
->cc_op_dirty
= true;
264 static void gen_update_cc_op(DisasContext
*s
)
266 if (s
->cc_op_dirty
) {
267 tcg_gen_movi_i32(cpu_cc_op
, s
->cc_op
);
268 s
->cc_op_dirty
= false;
274 #define NB_OP_SIZES 4
276 #else /* !TARGET_X86_64 */
278 #define NB_OP_SIZES 3
280 #endif /* !TARGET_X86_64 */
282 #if defined(HOST_WORDS_BIGENDIAN)
283 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
284 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
285 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
286 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
287 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
289 #define REG_B_OFFSET 0
290 #define REG_H_OFFSET 1
291 #define REG_W_OFFSET 0
292 #define REG_L_OFFSET 0
293 #define REG_LH_OFFSET 4
296 /* In instruction encodings for byte register accesses the
297 * register number usually indicates "low 8 bits of register N";
298 * however there are some special cases where N 4..7 indicates
299 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
300 * true for this special case, false otherwise.
302 static inline bool byte_reg_is_xH(int reg
)
308 if (reg
>= 8 || x86_64_hregs
) {
315 /* Select the size of a push/pop operation. */
316 static inline TCGMemOp
mo_pushpop(DisasContext
*s
, TCGMemOp ot
)
319 return ot
== MO_16
? MO_16
: MO_64
;
325 /* Select the size of the stack pointer. */
326 static inline TCGMemOp
mo_stacksize(DisasContext
*s
)
328 return CODE64(s
) ? MO_64
: s
->ss32
? MO_32
: MO_16
;
331 /* Select only size 64 else 32. Used for SSE operand sizes. */
332 static inline TCGMemOp
mo_64_32(TCGMemOp ot
)
335 return ot
== MO_64
? MO_64
: MO_32
;
341 /* Select size 8 if lsb of B is clear, else OT. Used for decoding
342 byte vs word opcodes. */
343 static inline TCGMemOp
mo_b_d(int b
, TCGMemOp ot
)
345 return b
& 1 ? ot
: MO_8
;
348 /* Select size 8 if lsb of B is clear, else OT capped at 32.
349 Used for decoding operand size of port opcodes. */
350 static inline TCGMemOp
mo_b_d32(int b
, TCGMemOp ot
)
352 return b
& 1 ? (ot
== MO_16
? MO_16
: MO_32
) : MO_8
;
355 static void gen_op_mov_reg_v(TCGMemOp ot
, int reg
, TCGv t0
)
359 if (!byte_reg_is_xH(reg
)) {
360 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 8);
362 tcg_gen_deposit_tl(cpu_regs
[reg
- 4], cpu_regs
[reg
- 4], t0
, 8, 8);
366 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 16);
369 /* For x86_64, this sets the higher half of register to zero.
370 For i386, this is equivalent to a mov. */
371 tcg_gen_ext32u_tl(cpu_regs
[reg
], t0
);
375 tcg_gen_mov_tl(cpu_regs
[reg
], t0
);
383 static inline void gen_op_mov_v_reg(TCGMemOp ot
, TCGv t0
, int reg
)
385 if (ot
== MO_8
&& byte_reg_is_xH(reg
)) {
386 tcg_gen_shri_tl(t0
, cpu_regs
[reg
- 4], 8);
387 tcg_gen_ext8u_tl(t0
, t0
);
389 tcg_gen_mov_tl(t0
, cpu_regs
[reg
]);
393 static void gen_add_A0_im(DisasContext
*s
, int val
)
395 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, val
);
397 tcg_gen_ext32u_tl(cpu_A0
, cpu_A0
);
401 static inline void gen_op_jmp_v(TCGv dest
)
403 tcg_gen_st_tl(dest
, cpu_env
, offsetof(CPUX86State
, eip
));
406 static inline void gen_op_add_reg_im(TCGMemOp size
, int reg
, int32_t val
)
408 tcg_gen_addi_tl(cpu_tmp0
, cpu_regs
[reg
], val
);
409 gen_op_mov_reg_v(size
, reg
, cpu_tmp0
);
412 static inline void gen_op_add_reg_T0(TCGMemOp size
, int reg
)
414 tcg_gen_add_tl(cpu_tmp0
, cpu_regs
[reg
], cpu_T0
);
415 gen_op_mov_reg_v(size
, reg
, cpu_tmp0
);
418 static inline void gen_op_ld_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
420 tcg_gen_qemu_ld_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
423 static inline void gen_op_st_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
425 tcg_gen_qemu_st_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
428 static inline void gen_op_st_rm_T0_A0(DisasContext
*s
, int idx
, int d
)
431 gen_op_st_v(s
, idx
, cpu_T0
, cpu_A0
);
433 gen_op_mov_reg_v(idx
, d
, cpu_T0
);
437 static inline void gen_jmp_im(target_ulong pc
)
439 tcg_gen_movi_tl(cpu_tmp0
, pc
);
440 gen_op_jmp_v(cpu_tmp0
);
443 /* Compute SEG:REG into A0. SEG is selected from the override segment
444 (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
445 indicate no override. */
446 static void gen_lea_v_seg(DisasContext
*s
, TCGMemOp aflag
, TCGv a0
,
447 int def_seg
, int ovr_seg
)
453 tcg_gen_mov_tl(cpu_A0
, a0
);
464 tcg_gen_ext32u_tl(cpu_A0
, a0
);
471 tcg_gen_ext16u_tl(cpu_A0
, a0
);
486 TCGv seg
= cpu_seg_base
[ovr_seg
];
488 if (aflag
== MO_64
) {
489 tcg_gen_add_tl(cpu_A0
, a0
, seg
);
490 } else if (CODE64(s
)) {
491 tcg_gen_ext32u_tl(cpu_A0
, a0
);
492 tcg_gen_add_tl(cpu_A0
, cpu_A0
, seg
);
494 tcg_gen_add_tl(cpu_A0
, a0
, seg
);
495 tcg_gen_ext32u_tl(cpu_A0
, cpu_A0
);
500 static inline void gen_string_movl_A0_ESI(DisasContext
*s
)
502 gen_lea_v_seg(s
, s
->aflag
, cpu_regs
[R_ESI
], R_DS
, s
->override
);
505 static inline void gen_string_movl_A0_EDI(DisasContext
*s
)
507 gen_lea_v_seg(s
, s
->aflag
, cpu_regs
[R_EDI
], R_ES
, -1);
510 static inline void gen_op_movl_T0_Dshift(TCGMemOp ot
)
512 tcg_gen_ld32s_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, df
));
513 tcg_gen_shli_tl(cpu_T0
, cpu_T0
, ot
);
516 static TCGv
gen_ext_tl(TCGv dst
, TCGv src
, TCGMemOp size
, bool sign
)
521 tcg_gen_ext8s_tl(dst
, src
);
523 tcg_gen_ext8u_tl(dst
, src
);
528 tcg_gen_ext16s_tl(dst
, src
);
530 tcg_gen_ext16u_tl(dst
, src
);
536 tcg_gen_ext32s_tl(dst
, src
);
538 tcg_gen_ext32u_tl(dst
, src
);
547 static void gen_extu(TCGMemOp ot
, TCGv reg
)
549 gen_ext_tl(reg
, reg
, ot
, false);
552 static void gen_exts(TCGMemOp ot
, TCGv reg
)
554 gen_ext_tl(reg
, reg
, ot
, true);
557 static inline void gen_op_jnz_ecx(TCGMemOp size
, TCGLabel
*label1
)
559 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
560 gen_extu(size
, cpu_tmp0
);
561 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_tmp0
, 0, label1
);
564 static inline void gen_op_jz_ecx(TCGMemOp size
, TCGLabel
*label1
)
566 tcg_gen_mov_tl(cpu_tmp0
, cpu_regs
[R_ECX
]);
567 gen_extu(size
, cpu_tmp0
);
568 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
571 static void gen_helper_in_func(TCGMemOp ot
, TCGv v
, TCGv_i32 n
)
575 gen_helper_inb(v
, cpu_env
, n
);
578 gen_helper_inw(v
, cpu_env
, n
);
581 gen_helper_inl(v
, cpu_env
, n
);
588 static void gen_helper_out_func(TCGMemOp ot
, TCGv_i32 v
, TCGv_i32 n
)
592 gen_helper_outb(cpu_env
, v
, n
);
595 gen_helper_outw(cpu_env
, v
, n
);
598 gen_helper_outl(cpu_env
, v
, n
);
605 static void gen_check_io(DisasContext
*s
, TCGMemOp ot
, target_ulong cur_eip
,
608 target_ulong next_eip
;
610 if (s
->pe
&& (s
->cpl
> s
->iopl
|| s
->vm86
)) {
611 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
614 gen_helper_check_iob(cpu_env
, cpu_tmp2_i32
);
617 gen_helper_check_iow(cpu_env
, cpu_tmp2_i32
);
620 gen_helper_check_iol(cpu_env
, cpu_tmp2_i32
);
626 if(s
->flags
& HF_SVMI_MASK
) {
629 svm_flags
|= (1 << (4 + ot
));
630 next_eip
= s
->pc
- s
->cs_base
;
631 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
632 gen_helper_svm_check_io(cpu_env
, cpu_tmp2_i32
,
633 tcg_const_i32(svm_flags
),
634 tcg_const_i32(next_eip
- cur_eip
));
638 static inline void gen_movs(DisasContext
*s
, TCGMemOp ot
)
640 gen_string_movl_A0_ESI(s
);
641 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
642 gen_string_movl_A0_EDI(s
);
643 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
644 gen_op_movl_T0_Dshift(ot
);
645 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
646 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
649 static void gen_op_update1_cc(void)
651 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
654 static void gen_op_update2_cc(void)
656 tcg_gen_mov_tl(cpu_cc_src
, cpu_T1
);
657 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
660 static void gen_op_update3_cc(TCGv reg
)
662 tcg_gen_mov_tl(cpu_cc_src2
, reg
);
663 tcg_gen_mov_tl(cpu_cc_src
, cpu_T1
);
664 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
667 static inline void gen_op_testl_T0_T1_cc(void)
669 tcg_gen_and_tl(cpu_cc_dst
, cpu_T0
, cpu_T1
);
672 static void gen_op_update_neg_cc(void)
674 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
675 tcg_gen_neg_tl(cpu_cc_src
, cpu_T0
);
676 tcg_gen_movi_tl(cpu_cc_srcT
, 0);
679 /* compute all eflags to cc_src */
680 static void gen_compute_eflags(DisasContext
*s
)
682 TCGv zero
, dst
, src1
, src2
;
685 if (s
->cc_op
== CC_OP_EFLAGS
) {
688 if (s
->cc_op
== CC_OP_CLR
) {
689 tcg_gen_movi_tl(cpu_cc_src
, CC_Z
| CC_P
);
690 set_cc_op(s
, CC_OP_EFLAGS
);
699 /* Take care to not read values that are not live. */
700 live
= cc_op_live
[s
->cc_op
] & ~USES_CC_SRCT
;
701 dead
= live
^ (USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
);
703 zero
= tcg_const_tl(0);
704 if (dead
& USES_CC_DST
) {
707 if (dead
& USES_CC_SRC
) {
710 if (dead
& USES_CC_SRC2
) {
716 gen_helper_cc_compute_all(cpu_cc_src
, dst
, src1
, src2
, cpu_cc_op
);
717 set_cc_op(s
, CC_OP_EFLAGS
);
724 typedef struct CCPrepare
{
734 /* compute eflags.C to reg */
735 static CCPrepare
gen_prepare_eflags_c(DisasContext
*s
, TCGv reg
)
741 case CC_OP_SUBB
... CC_OP_SUBQ
:
742 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
743 size
= s
->cc_op
- CC_OP_SUBB
;
744 t1
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
745 /* If no temporary was used, be careful not to alias t1 and t0. */
746 t0
= TCGV_EQUAL(t1
, cpu_cc_src
) ? cpu_tmp0
: reg
;
747 tcg_gen_mov_tl(t0
, cpu_cc_srcT
);
751 case CC_OP_ADDB
... CC_OP_ADDQ
:
752 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
753 size
= s
->cc_op
- CC_OP_ADDB
;
754 t1
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
755 t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
757 return (CCPrepare
) { .cond
= TCG_COND_LTU
, .reg
= t0
,
758 .reg2
= t1
, .mask
= -1, .use_reg2
= true };
760 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
762 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
764 case CC_OP_INCB
... CC_OP_INCQ
:
765 case CC_OP_DECB
... CC_OP_DECQ
:
766 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
767 .mask
= -1, .no_setcond
= true };
769 case CC_OP_SHLB
... CC_OP_SHLQ
:
770 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
771 size
= s
->cc_op
- CC_OP_SHLB
;
772 shift
= (8 << size
) - 1;
773 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
774 .mask
= (target_ulong
)1 << shift
};
776 case CC_OP_MULB
... CC_OP_MULQ
:
777 return (CCPrepare
) { .cond
= TCG_COND_NE
,
778 .reg
= cpu_cc_src
, .mask
= -1 };
780 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
781 size
= s
->cc_op
- CC_OP_BMILGB
;
782 t0
= gen_ext_tl(reg
, cpu_cc_src
, size
, false);
783 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
787 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_dst
,
788 .mask
= -1, .no_setcond
= true };
791 case CC_OP_SARB
... CC_OP_SARQ
:
793 return (CCPrepare
) { .cond
= TCG_COND_NE
,
794 .reg
= cpu_cc_src
, .mask
= CC_C
};
797 /* The need to compute only C from CC_OP_DYNAMIC is important
798 in efficiently implementing e.g. INC at the start of a TB. */
800 gen_helper_cc_compute_c(reg
, cpu_cc_dst
, cpu_cc_src
,
801 cpu_cc_src2
, cpu_cc_op
);
802 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
803 .mask
= -1, .no_setcond
= true };
807 /* compute eflags.P to reg */
808 static CCPrepare
gen_prepare_eflags_p(DisasContext
*s
, TCGv reg
)
810 gen_compute_eflags(s
);
811 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
815 /* compute eflags.S to reg */
816 static CCPrepare
gen_prepare_eflags_s(DisasContext
*s
, TCGv reg
)
820 gen_compute_eflags(s
);
826 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
829 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
832 TCGMemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
833 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, true);
834 return (CCPrepare
) { .cond
= TCG_COND_LT
, .reg
= t0
, .mask
= -1 };
839 /* compute eflags.O to reg */
840 static CCPrepare
gen_prepare_eflags_o(DisasContext
*s
, TCGv reg
)
845 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src2
,
846 .mask
= -1, .no_setcond
= true };
848 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
850 gen_compute_eflags(s
);
851 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
856 /* compute eflags.Z to reg */
857 static CCPrepare
gen_prepare_eflags_z(DisasContext
*s
, TCGv reg
)
861 gen_compute_eflags(s
);
867 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
870 return (CCPrepare
) { .cond
= TCG_COND_ALWAYS
, .mask
= -1 };
873 TCGMemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
874 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
875 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
880 /* perform a conditional store into register 'reg' according to jump opcode
881 value 'b'. In the fast case, T0 is guaranted not to be used. */
882 static CCPrepare
gen_prepare_cc(DisasContext
*s
, int b
, TCGv reg
)
884 int inv
, jcc_op
, cond
;
890 jcc_op
= (b
>> 1) & 7;
893 case CC_OP_SUBB
... CC_OP_SUBQ
:
894 /* We optimize relational operators for the cmp/jcc case. */
895 size
= s
->cc_op
- CC_OP_SUBB
;
898 tcg_gen_mov_tl(cpu_tmp4
, cpu_cc_srcT
);
899 gen_extu(size
, cpu_tmp4
);
900 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, false);
901 cc
= (CCPrepare
) { .cond
= TCG_COND_LEU
, .reg
= cpu_tmp4
,
902 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
911 tcg_gen_mov_tl(cpu_tmp4
, cpu_cc_srcT
);
912 gen_exts(size
, cpu_tmp4
);
913 t0
= gen_ext_tl(cpu_tmp0
, cpu_cc_src
, size
, true);
914 cc
= (CCPrepare
) { .cond
= cond
, .reg
= cpu_tmp4
,
915 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
925 /* This actually generates good code for JC, JZ and JS. */
928 cc
= gen_prepare_eflags_o(s
, reg
);
931 cc
= gen_prepare_eflags_c(s
, reg
);
934 cc
= gen_prepare_eflags_z(s
, reg
);
937 gen_compute_eflags(s
);
938 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
939 .mask
= CC_Z
| CC_C
};
942 cc
= gen_prepare_eflags_s(s
, reg
);
945 cc
= gen_prepare_eflags_p(s
, reg
);
948 gen_compute_eflags(s
);
949 if (TCGV_EQUAL(reg
, cpu_cc_src
)) {
952 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
953 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
954 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
959 gen_compute_eflags(s
);
960 if (TCGV_EQUAL(reg
, cpu_cc_src
)) {
963 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
964 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
965 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
966 .mask
= CC_S
| CC_Z
};
973 cc
.cond
= tcg_invert_cond(cc
.cond
);
978 static void gen_setcc1(DisasContext
*s
, int b
, TCGv reg
)
980 CCPrepare cc
= gen_prepare_cc(s
, b
, reg
);
983 if (cc
.cond
== TCG_COND_EQ
) {
984 tcg_gen_xori_tl(reg
, cc
.reg
, 1);
986 tcg_gen_mov_tl(reg
, cc
.reg
);
991 if (cc
.cond
== TCG_COND_NE
&& !cc
.use_reg2
&& cc
.imm
== 0 &&
992 cc
.mask
!= 0 && (cc
.mask
& (cc
.mask
- 1)) == 0) {
993 tcg_gen_shri_tl(reg
, cc
.reg
, ctztl(cc
.mask
));
994 tcg_gen_andi_tl(reg
, reg
, 1);
998 tcg_gen_andi_tl(reg
, cc
.reg
, cc
.mask
);
1002 tcg_gen_setcond_tl(cc
.cond
, reg
, cc
.reg
, cc
.reg2
);
1004 tcg_gen_setcondi_tl(cc
.cond
, reg
, cc
.reg
, cc
.imm
);
1008 static inline void gen_compute_eflags_c(DisasContext
*s
, TCGv reg
)
1010 gen_setcc1(s
, JCC_B
<< 1, reg
);
1013 /* generate a conditional jump to label 'l1' according to jump opcode
1014 value 'b'. In the fast case, T0 is guaranted not to be used. */
1015 static inline void gen_jcc1_noeob(DisasContext
*s
, int b
, TCGLabel
*l1
)
1017 CCPrepare cc
= gen_prepare_cc(s
, b
, cpu_T0
);
1019 if (cc
.mask
!= -1) {
1020 tcg_gen_andi_tl(cpu_T0
, cc
.reg
, cc
.mask
);
1024 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1026 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1030 /* Generate a conditional jump to label 'l1' according to jump opcode
1031 value 'b'. In the fast case, T0 is guaranted not to be used.
1032 A translation block must end soon. */
1033 static inline void gen_jcc1(DisasContext
*s
, int b
, TCGLabel
*l1
)
1035 CCPrepare cc
= gen_prepare_cc(s
, b
, cpu_T0
);
1037 gen_update_cc_op(s
);
1038 if (cc
.mask
!= -1) {
1039 tcg_gen_andi_tl(cpu_T0
, cc
.reg
, cc
.mask
);
1042 set_cc_op(s
, CC_OP_DYNAMIC
);
1044 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1046 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1050 /* XXX: does not work with gdbstub "ice" single step - not a
1052 static TCGLabel
*gen_jz_ecx_string(DisasContext
*s
, target_ulong next_eip
)
1054 TCGLabel
*l1
= gen_new_label();
1055 TCGLabel
*l2
= gen_new_label();
1056 gen_op_jnz_ecx(s
->aflag
, l1
);
1058 gen_jmp_tb(s
, next_eip
, 1);
1063 static inline void gen_stos(DisasContext
*s
, TCGMemOp ot
)
1065 gen_op_mov_v_reg(MO_32
, cpu_T0
, R_EAX
);
1066 gen_string_movl_A0_EDI(s
);
1067 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
1068 gen_op_movl_T0_Dshift(ot
);
1069 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1072 static inline void gen_lods(DisasContext
*s
, TCGMemOp ot
)
1074 gen_string_movl_A0_ESI(s
);
1075 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1076 gen_op_mov_reg_v(ot
, R_EAX
, cpu_T0
);
1077 gen_op_movl_T0_Dshift(ot
);
1078 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1081 static inline void gen_scas(DisasContext
*s
, TCGMemOp ot
)
1083 gen_string_movl_A0_EDI(s
);
1084 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
1085 gen_op(s
, OP_CMPL
, ot
, R_EAX
);
1086 gen_op_movl_T0_Dshift(ot
);
1087 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1090 static inline void gen_cmps(DisasContext
*s
, TCGMemOp ot
)
1092 gen_string_movl_A0_EDI(s
);
1093 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
1094 gen_string_movl_A0_ESI(s
);
1095 gen_op(s
, OP_CMPL
, ot
, OR_TMP0
);
1096 gen_op_movl_T0_Dshift(ot
);
1097 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1098 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1101 static void gen_bpt_io(DisasContext
*s
, TCGv_i32 t_port
, int ot
)
1103 if (s
->flags
& HF_IOBPT_MASK
) {
1104 TCGv_i32 t_size
= tcg_const_i32(1 << ot
);
1105 TCGv t_next
= tcg_const_tl(s
->pc
- s
->cs_base
);
1107 gen_helper_bpt_io(cpu_env
, t_port
, t_size
, t_next
);
1108 tcg_temp_free_i32(t_size
);
1109 tcg_temp_free(t_next
);
1114 static inline void gen_ins(DisasContext
*s
, TCGMemOp ot
)
1116 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
1119 gen_string_movl_A0_EDI(s
);
1120 /* Note: we must do this dummy write first to be restartable in
1121 case of page fault. */
1122 tcg_gen_movi_tl(cpu_T0
, 0);
1123 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
1124 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[R_EDX
]);
1125 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1126 gen_helper_in_func(ot
, cpu_T0
, cpu_tmp2_i32
);
1127 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
1128 gen_op_movl_T0_Dshift(ot
);
1129 gen_op_add_reg_T0(s
->aflag
, R_EDI
);
1130 gen_bpt_io(s
, cpu_tmp2_i32
, ot
);
1131 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
1136 static inline void gen_outs(DisasContext
*s
, TCGMemOp ot
)
1138 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
1141 gen_string_movl_A0_ESI(s
);
1142 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1144 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[R_EDX
]);
1145 tcg_gen_andi_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 0xffff);
1146 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T0
);
1147 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1148 gen_op_movl_T0_Dshift(ot
);
1149 gen_op_add_reg_T0(s
->aflag
, R_ESI
);
1150 gen_bpt_io(s
, cpu_tmp2_i32
, ot
);
1151 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
1156 /* same method as Valgrind : we generate jumps to current or next
1158 #define GEN_REPZ(op) \
1159 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1160 target_ulong cur_eip, target_ulong next_eip) \
1163 gen_update_cc_op(s); \
1164 l2 = gen_jz_ecx_string(s, next_eip); \
1165 gen_ ## op(s, ot); \
1166 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1167 /* a loop would cause two single step exceptions if ECX = 1 \
1168 before rep string_insn */ \
1170 gen_op_jz_ecx(s->aflag, l2); \
1171 gen_jmp(s, cur_eip); \
1174 #define GEN_REPZ2(op) \
1175 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1176 target_ulong cur_eip, \
1177 target_ulong next_eip, \
1181 gen_update_cc_op(s); \
1182 l2 = gen_jz_ecx_string(s, next_eip); \
1183 gen_ ## op(s, ot); \
1184 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1185 gen_update_cc_op(s); \
1186 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1188 gen_op_jz_ecx(s->aflag, l2); \
1189 gen_jmp(s, cur_eip); \
1200 static void gen_helper_fp_arith_ST0_FT0(int op
)
1204 gen_helper_fadd_ST0_FT0(cpu_env
);
1207 gen_helper_fmul_ST0_FT0(cpu_env
);
1210 gen_helper_fcom_ST0_FT0(cpu_env
);
1213 gen_helper_fcom_ST0_FT0(cpu_env
);
1216 gen_helper_fsub_ST0_FT0(cpu_env
);
1219 gen_helper_fsubr_ST0_FT0(cpu_env
);
1222 gen_helper_fdiv_ST0_FT0(cpu_env
);
1225 gen_helper_fdivr_ST0_FT0(cpu_env
);
1230 /* NOTE the exception in "r" op ordering */
1231 static void gen_helper_fp_arith_STN_ST0(int op
, int opreg
)
1233 TCGv_i32 tmp
= tcg_const_i32(opreg
);
1236 gen_helper_fadd_STN_ST0(cpu_env
, tmp
);
1239 gen_helper_fmul_STN_ST0(cpu_env
, tmp
);
1242 gen_helper_fsubr_STN_ST0(cpu_env
, tmp
);
1245 gen_helper_fsub_STN_ST0(cpu_env
, tmp
);
1248 gen_helper_fdivr_STN_ST0(cpu_env
, tmp
);
1251 gen_helper_fdiv_STN_ST0(cpu_env
, tmp
);
1256 /* if d == OR_TMP0, it means memory operand (address in A0) */
1257 static void gen_op(DisasContext
*s1
, int op
, TCGMemOp ot
, int d
)
1260 gen_op_mov_v_reg(ot
, cpu_T0
, d
);
1262 gen_op_ld_v(s1
, ot
, cpu_T0
, cpu_A0
);
1266 gen_compute_eflags_c(s1
, cpu_tmp4
);
1267 tcg_gen_add_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1268 tcg_gen_add_tl(cpu_T0
, cpu_T0
, cpu_tmp4
);
1269 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1270 gen_op_update3_cc(cpu_tmp4
);
1271 set_cc_op(s1
, CC_OP_ADCB
+ ot
);
1274 gen_compute_eflags_c(s1
, cpu_tmp4
);
1275 tcg_gen_sub_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1276 tcg_gen_sub_tl(cpu_T0
, cpu_T0
, cpu_tmp4
);
1277 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1278 gen_op_update3_cc(cpu_tmp4
);
1279 set_cc_op(s1
, CC_OP_SBBB
+ ot
);
1282 tcg_gen_add_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1283 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1284 gen_op_update2_cc();
1285 set_cc_op(s1
, CC_OP_ADDB
+ ot
);
1288 tcg_gen_mov_tl(cpu_cc_srcT
, cpu_T0
);
1289 tcg_gen_sub_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1290 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1291 gen_op_update2_cc();
1292 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1296 tcg_gen_and_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1297 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1298 gen_op_update1_cc();
1299 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1302 tcg_gen_or_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1303 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1304 gen_op_update1_cc();
1305 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1308 tcg_gen_xor_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1309 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1310 gen_op_update1_cc();
1311 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1314 tcg_gen_mov_tl(cpu_cc_src
, cpu_T1
);
1315 tcg_gen_mov_tl(cpu_cc_srcT
, cpu_T0
);
1316 tcg_gen_sub_tl(cpu_cc_dst
, cpu_T0
, cpu_T1
);
1317 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1322 /* if d == OR_TMP0, it means memory operand (address in A0) */
1323 static void gen_inc(DisasContext
*s1
, TCGMemOp ot
, int d
, int c
)
1326 gen_op_mov_v_reg(ot
, cpu_T0
, d
);
1328 gen_op_ld_v(s1
, ot
, cpu_T0
, cpu_A0
);
1330 gen_compute_eflags_c(s1
, cpu_cc_src
);
1332 tcg_gen_addi_tl(cpu_T0
, cpu_T0
, 1);
1333 set_cc_op(s1
, CC_OP_INCB
+ ot
);
1335 tcg_gen_addi_tl(cpu_T0
, cpu_T0
, -1);
1336 set_cc_op(s1
, CC_OP_DECB
+ ot
);
1338 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1339 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
1342 static void gen_shift_flags(DisasContext
*s
, TCGMemOp ot
, TCGv result
,
1343 TCGv shm1
, TCGv count
, bool is_right
)
1345 TCGv_i32 z32
, s32
, oldop
;
1348 /* Store the results into the CC variables. If we know that the
1349 variable must be dead, store unconditionally. Otherwise we'll
1350 need to not disrupt the current contents. */
1351 z_tl
= tcg_const_tl(0);
1352 if (cc_op_live
[s
->cc_op
] & USES_CC_DST
) {
1353 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_dst
, count
, z_tl
,
1354 result
, cpu_cc_dst
);
1356 tcg_gen_mov_tl(cpu_cc_dst
, result
);
1358 if (cc_op_live
[s
->cc_op
] & USES_CC_SRC
) {
1359 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_src
, count
, z_tl
,
1362 tcg_gen_mov_tl(cpu_cc_src
, shm1
);
1364 tcg_temp_free(z_tl
);
1366 /* Get the two potential CC_OP values into temporaries. */
1367 tcg_gen_movi_i32(cpu_tmp2_i32
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1368 if (s
->cc_op
== CC_OP_DYNAMIC
) {
1371 tcg_gen_movi_i32(cpu_tmp3_i32
, s
->cc_op
);
1372 oldop
= cpu_tmp3_i32
;
1375 /* Conditionally store the CC_OP value. */
1376 z32
= tcg_const_i32(0);
1377 s32
= tcg_temp_new_i32();
1378 tcg_gen_trunc_tl_i32(s32
, count
);
1379 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, s32
, z32
, cpu_tmp2_i32
, oldop
);
1380 tcg_temp_free_i32(z32
);
1381 tcg_temp_free_i32(s32
);
1383 /* The CC_OP value is no longer predictable. */
1384 set_cc_op(s
, CC_OP_DYNAMIC
);
1387 static void gen_shift_rm_T1(DisasContext
*s
, TCGMemOp ot
, int op1
,
1388 int is_right
, int is_arith
)
1390 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1393 if (op1
== OR_TMP0
) {
1394 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1396 gen_op_mov_v_reg(ot
, cpu_T0
, op1
);
1399 tcg_gen_andi_tl(cpu_T1
, cpu_T1
, mask
);
1400 tcg_gen_subi_tl(cpu_tmp0
, cpu_T1
, 1);
1404 gen_exts(ot
, cpu_T0
);
1405 tcg_gen_sar_tl(cpu_tmp0
, cpu_T0
, cpu_tmp0
);
1406 tcg_gen_sar_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1408 gen_extu(ot
, cpu_T0
);
1409 tcg_gen_shr_tl(cpu_tmp0
, cpu_T0
, cpu_tmp0
);
1410 tcg_gen_shr_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1413 tcg_gen_shl_tl(cpu_tmp0
, cpu_T0
, cpu_tmp0
);
1414 tcg_gen_shl_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1418 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1420 gen_shift_flags(s
, ot
, cpu_T0
, cpu_tmp0
, cpu_T1
, is_right
);
1423 static void gen_shift_rm_im(DisasContext
*s
, TCGMemOp ot
, int op1
, int op2
,
1424 int is_right
, int is_arith
)
1426 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1430 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1432 gen_op_mov_v_reg(ot
, cpu_T0
, op1
);
1438 gen_exts(ot
, cpu_T0
);
1439 tcg_gen_sari_tl(cpu_tmp4
, cpu_T0
, op2
- 1);
1440 tcg_gen_sari_tl(cpu_T0
, cpu_T0
, op2
);
1442 gen_extu(ot
, cpu_T0
);
1443 tcg_gen_shri_tl(cpu_tmp4
, cpu_T0
, op2
- 1);
1444 tcg_gen_shri_tl(cpu_T0
, cpu_T0
, op2
);
1447 tcg_gen_shli_tl(cpu_tmp4
, cpu_T0
, op2
- 1);
1448 tcg_gen_shli_tl(cpu_T0
, cpu_T0
, op2
);
1453 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1455 /* update eflags if non zero shift */
1457 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
1458 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
1459 set_cc_op(s
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1463 static void gen_rot_rm_T1(DisasContext
*s
, TCGMemOp ot
, int op1
, int is_right
)
1465 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1469 if (op1
== OR_TMP0
) {
1470 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1472 gen_op_mov_v_reg(ot
, cpu_T0
, op1
);
1475 tcg_gen_andi_tl(cpu_T1
, cpu_T1
, mask
);
1479 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1480 tcg_gen_ext8u_tl(cpu_T0
, cpu_T0
);
1481 tcg_gen_muli_tl(cpu_T0
, cpu_T0
, 0x01010101);
1484 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1485 tcg_gen_deposit_tl(cpu_T0
, cpu_T0
, cpu_T0
, 16, 16);
1488 #ifdef TARGET_X86_64
1490 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
1491 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T1
);
1493 tcg_gen_rotr_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1495 tcg_gen_rotl_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
1497 tcg_gen_extu_i32_tl(cpu_T0
, cpu_tmp2_i32
);
1502 tcg_gen_rotr_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1504 tcg_gen_rotl_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1510 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1512 /* We'll need the flags computed into CC_SRC. */
1513 gen_compute_eflags(s
);
1515 /* The value that was "rotated out" is now present at the other end
1516 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1517 since we've computed the flags into CC_SRC, these variables are
1520 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T0
, mask
- 1);
1521 tcg_gen_shri_tl(cpu_cc_dst
, cpu_T0
, mask
);
1522 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1524 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T0
, mask
);
1525 tcg_gen_andi_tl(cpu_cc_dst
, cpu_T0
, 1);
1527 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1528 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1530 /* Now conditionally store the new CC_OP value. If the shift count
1531 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1532 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1533 exactly as we computed above. */
1534 t0
= tcg_const_i32(0);
1535 t1
= tcg_temp_new_i32();
1536 tcg_gen_trunc_tl_i32(t1
, cpu_T1
);
1537 tcg_gen_movi_i32(cpu_tmp2_i32
, CC_OP_ADCOX
);
1538 tcg_gen_movi_i32(cpu_tmp3_i32
, CC_OP_EFLAGS
);
1539 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, t1
, t0
,
1540 cpu_tmp2_i32
, cpu_tmp3_i32
);
1541 tcg_temp_free_i32(t0
);
1542 tcg_temp_free_i32(t1
);
1544 /* The CC_OP value is no longer predictable. */
1545 set_cc_op(s
, CC_OP_DYNAMIC
);
1548 static void gen_rot_rm_im(DisasContext
*s
, TCGMemOp ot
, int op1
, int op2
,
1551 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1555 if (op1
== OR_TMP0
) {
1556 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1558 gen_op_mov_v_reg(ot
, cpu_T0
, op1
);
1564 #ifdef TARGET_X86_64
1566 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
1568 tcg_gen_rotri_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, op2
);
1570 tcg_gen_rotli_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, op2
);
1572 tcg_gen_extu_i32_tl(cpu_T0
, cpu_tmp2_i32
);
1577 tcg_gen_rotri_tl(cpu_T0
, cpu_T0
, op2
);
1579 tcg_gen_rotli_tl(cpu_T0
, cpu_T0
, op2
);
1590 shift
= mask
+ 1 - shift
;
1592 gen_extu(ot
, cpu_T0
);
1593 tcg_gen_shli_tl(cpu_tmp0
, cpu_T0
, shift
);
1594 tcg_gen_shri_tl(cpu_T0
, cpu_T0
, mask
+ 1 - shift
);
1595 tcg_gen_or_tl(cpu_T0
, cpu_T0
, cpu_tmp0
);
1601 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1604 /* Compute the flags into CC_SRC. */
1605 gen_compute_eflags(s
);
1607 /* The value that was "rotated out" is now present at the other end
1608 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1609 since we've computed the flags into CC_SRC, these variables are
1612 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T0
, mask
- 1);
1613 tcg_gen_shri_tl(cpu_cc_dst
, cpu_T0
, mask
);
1614 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1616 tcg_gen_shri_tl(cpu_cc_src2
, cpu_T0
, mask
);
1617 tcg_gen_andi_tl(cpu_cc_dst
, cpu_T0
, 1);
1619 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1620 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1621 set_cc_op(s
, CC_OP_ADCOX
);
1625 /* XXX: add faster immediate = 1 case */
1626 static void gen_rotc_rm_T1(DisasContext
*s
, TCGMemOp ot
, int op1
,
1629 gen_compute_eflags(s
);
1630 assert(s
->cc_op
== CC_OP_EFLAGS
);
1634 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1636 gen_op_mov_v_reg(ot
, cpu_T0
, op1
);
1641 gen_helper_rcrb(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1644 gen_helper_rcrw(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1647 gen_helper_rcrl(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1649 #ifdef TARGET_X86_64
1651 gen_helper_rcrq(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1660 gen_helper_rclb(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1663 gen_helper_rclw(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1666 gen_helper_rcll(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1668 #ifdef TARGET_X86_64
1670 gen_helper_rclq(cpu_T0
, cpu_env
, cpu_T0
, cpu_T1
);
1678 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1681 /* XXX: add faster immediate case */
1682 static void gen_shiftd_rm_T1(DisasContext
*s
, TCGMemOp ot
, int op1
,
1683 bool is_right
, TCGv count_in
)
1685 target_ulong mask
= (ot
== MO_64
? 63 : 31);
1689 if (op1
== OR_TMP0
) {
1690 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
1692 gen_op_mov_v_reg(ot
, cpu_T0
, op1
);
1695 count
= tcg_temp_new();
1696 tcg_gen_andi_tl(count
, count_in
, mask
);
1700 /* Note: we implement the Intel behaviour for shift count > 16.
1701 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1702 portion by constructing it as a 32-bit value. */
1704 tcg_gen_deposit_tl(cpu_tmp0
, cpu_T0
, cpu_T1
, 16, 16);
1705 tcg_gen_mov_tl(cpu_T1
, cpu_T0
);
1706 tcg_gen_mov_tl(cpu_T0
, cpu_tmp0
);
1708 tcg_gen_deposit_tl(cpu_T1
, cpu_T0
, cpu_T1
, 16, 16);
1711 #ifdef TARGET_X86_64
1713 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1714 tcg_gen_subi_tl(cpu_tmp0
, count
, 1);
1716 tcg_gen_concat_tl_i64(cpu_T0
, cpu_T0
, cpu_T1
);
1717 tcg_gen_shr_i64(cpu_tmp0
, cpu_T0
, cpu_tmp0
);
1718 tcg_gen_shr_i64(cpu_T0
, cpu_T0
, count
);
1720 tcg_gen_concat_tl_i64(cpu_T0
, cpu_T1
, cpu_T0
);
1721 tcg_gen_shl_i64(cpu_tmp0
, cpu_T0
, cpu_tmp0
);
1722 tcg_gen_shl_i64(cpu_T0
, cpu_T0
, count
);
1723 tcg_gen_shri_i64(cpu_tmp0
, cpu_tmp0
, 32);
1724 tcg_gen_shri_i64(cpu_T0
, cpu_T0
, 32);
1729 tcg_gen_subi_tl(cpu_tmp0
, count
, 1);
1731 tcg_gen_shr_tl(cpu_tmp0
, cpu_T0
, cpu_tmp0
);
1733 tcg_gen_subfi_tl(cpu_tmp4
, mask
+ 1, count
);
1734 tcg_gen_shr_tl(cpu_T0
, cpu_T0
, count
);
1735 tcg_gen_shl_tl(cpu_T1
, cpu_T1
, cpu_tmp4
);
1737 tcg_gen_shl_tl(cpu_tmp0
, cpu_T0
, cpu_tmp0
);
1739 /* Only needed if count > 16, for Intel behaviour. */
1740 tcg_gen_subfi_tl(cpu_tmp4
, 33, count
);
1741 tcg_gen_shr_tl(cpu_tmp4
, cpu_T1
, cpu_tmp4
);
1742 tcg_gen_or_tl(cpu_tmp0
, cpu_tmp0
, cpu_tmp4
);
1745 tcg_gen_subfi_tl(cpu_tmp4
, mask
+ 1, count
);
1746 tcg_gen_shl_tl(cpu_T0
, cpu_T0
, count
);
1747 tcg_gen_shr_tl(cpu_T1
, cpu_T1
, cpu_tmp4
);
1749 tcg_gen_movi_tl(cpu_tmp4
, 0);
1750 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_T1
, count
, cpu_tmp4
,
1752 tcg_gen_or_tl(cpu_T0
, cpu_T0
, cpu_T1
);
1757 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1759 gen_shift_flags(s
, ot
, cpu_T0
, cpu_tmp0
, count
, is_right
);
1760 tcg_temp_free(count
);
1763 static void gen_shift(DisasContext
*s1
, int op
, TCGMemOp ot
, int d
, int s
)
1766 gen_op_mov_v_reg(ot
, cpu_T1
, s
);
1769 gen_rot_rm_T1(s1
, ot
, d
, 0);
1772 gen_rot_rm_T1(s1
, ot
, d
, 1);
1776 gen_shift_rm_T1(s1
, ot
, d
, 0, 0);
1779 gen_shift_rm_T1(s1
, ot
, d
, 1, 0);
1782 gen_shift_rm_T1(s1
, ot
, d
, 1, 1);
1785 gen_rotc_rm_T1(s1
, ot
, d
, 0);
1788 gen_rotc_rm_T1(s1
, ot
, d
, 1);
1793 static void gen_shifti(DisasContext
*s1
, int op
, TCGMemOp ot
, int d
, int c
)
1797 gen_rot_rm_im(s1
, ot
, d
, c
, 0);
1800 gen_rot_rm_im(s1
, ot
, d
, c
, 1);
1804 gen_shift_rm_im(s1
, ot
, d
, c
, 0, 0);
1807 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 0);
1810 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 1);
1813 /* currently not optimized */
1814 tcg_gen_movi_tl(cpu_T1
, c
);
1815 gen_shift(s1
, op
, ot
, d
, OR_TMP1
);
1820 /* Decompose an address. */
1822 typedef struct AddressParts
{
1830 static AddressParts
gen_lea_modrm_0(CPUX86State
*env
, DisasContext
*s
,
1833 int def_seg
, base
, index
, scale
, mod
, rm
;
1842 mod
= (modrm
>> 6) & 3;
1844 base
= rm
| REX_B(s
);
1847 /* Normally filtered out earlier, but including this path
1848 simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
1857 int code
= cpu_ldub_code(env
, s
->pc
++);
1858 scale
= (code
>> 6) & 3;
1859 index
= ((code
>> 3) & 7) | REX_X(s
);
1861 index
= -1; /* no index */
1863 base
= (code
& 7) | REX_B(s
);
1869 if ((base
& 7) == 5) {
1871 disp
= (int32_t)cpu_ldl_code(env
, s
->pc
);
1873 if (CODE64(s
) && !havesib
) {
1875 disp
+= s
->pc
+ s
->rip_offset
;
1880 disp
= (int8_t)cpu_ldub_code(env
, s
->pc
++);
1884 disp
= (int32_t)cpu_ldl_code(env
, s
->pc
);
1889 /* For correct popl handling with esp. */
1890 if (base
== R_ESP
&& s
->popl_esp_hack
) {
1891 disp
+= s
->popl_esp_hack
;
1893 if (base
== R_EBP
|| base
== R_ESP
) {
1902 disp
= cpu_lduw_code(env
, s
->pc
);
1906 } else if (mod
== 1) {
1907 disp
= (int8_t)cpu_ldub_code(env
, s
->pc
++);
1909 disp
= (int16_t)cpu_lduw_code(env
, s
->pc
);
1954 return (AddressParts
){ def_seg
, base
, index
, scale
, disp
};
1957 /* Compute the address, with a minimum number of TCG ops. */
1958 static TCGv
gen_lea_modrm_1(AddressParts a
)
1965 ea
= cpu_regs
[a
.index
];
1967 tcg_gen_shli_tl(cpu_A0
, cpu_regs
[a
.index
], a
.scale
);
1971 tcg_gen_add_tl(cpu_A0
, ea
, cpu_regs
[a
.base
]);
1974 } else if (a
.base
>= 0) {
1975 ea
= cpu_regs
[a
.base
];
1977 if (TCGV_IS_UNUSED(ea
)) {
1978 tcg_gen_movi_tl(cpu_A0
, a
.disp
);
1980 } else if (a
.disp
!= 0) {
1981 tcg_gen_addi_tl(cpu_A0
, ea
, a
.disp
);
1988 static void gen_lea_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
1990 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
1991 TCGv ea
= gen_lea_modrm_1(a
);
1992 gen_lea_v_seg(s
, s
->aflag
, ea
, a
.def_seg
, s
->override
);
1995 static void gen_nop_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
1997 (void)gen_lea_modrm_0(env
, s
, modrm
);
2000 /* Used for BNDCL, BNDCU, BNDCN. */
2001 static void gen_bndck(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2002 TCGCond cond
, TCGv_i64 bndv
)
2004 TCGv ea
= gen_lea_modrm_1(gen_lea_modrm_0(env
, s
, modrm
));
2006 tcg_gen_extu_tl_i64(cpu_tmp1_i64
, ea
);
2008 tcg_gen_ext32u_i64(cpu_tmp1_i64
, cpu_tmp1_i64
);
2010 tcg_gen_setcond_i64(cond
, cpu_tmp1_i64
, cpu_tmp1_i64
, bndv
);
2011 tcg_gen_extrl_i64_i32(cpu_tmp2_i32
, cpu_tmp1_i64
);
2012 gen_helper_bndck(cpu_env
, cpu_tmp2_i32
);
2015 /* used for LEA and MOV AX, mem */
2016 static void gen_add_A0_ds_seg(DisasContext
*s
)
2018 gen_lea_v_seg(s
, s
->aflag
, cpu_A0
, R_DS
, s
->override
);
2021 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2023 static void gen_ldst_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2024 TCGMemOp ot
, int reg
, int is_store
)
2028 mod
= (modrm
>> 6) & 3;
2029 rm
= (modrm
& 7) | REX_B(s
);
2033 gen_op_mov_v_reg(ot
, cpu_T0
, reg
);
2034 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
2036 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
2038 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
2041 gen_lea_modrm(env
, s
, modrm
);
2044 gen_op_mov_v_reg(ot
, cpu_T0
, reg
);
2045 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
2047 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
2049 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
2054 static inline uint32_t insn_get(CPUX86State
*env
, DisasContext
*s
, TCGMemOp ot
)
2060 ret
= cpu_ldub_code(env
, s
->pc
);
2064 ret
= cpu_lduw_code(env
, s
->pc
);
2068 #ifdef TARGET_X86_64
2071 ret
= cpu_ldl_code(env
, s
->pc
);
2080 static inline int insn_const_size(TCGMemOp ot
)
2089 static inline bool use_goto_tb(DisasContext
*s
, target_ulong pc
)
2091 #ifndef CONFIG_USER_ONLY
2092 return (pc
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
) ||
2093 (pc
& TARGET_PAGE_MASK
) == (s
->pc_start
& TARGET_PAGE_MASK
);
2099 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong eip
)
2101 target_ulong pc
= s
->cs_base
+ eip
;
2103 if (use_goto_tb(s
, pc
)) {
2104 /* jump to same page: we can use a direct jump */
2105 tcg_gen_goto_tb(tb_num
);
2107 tcg_gen_exit_tb((uintptr_t)s
->tb
+ tb_num
);
2109 /* jump to another page: currently not optimized */
2115 static inline void gen_jcc(DisasContext
*s
, int b
,
2116 target_ulong val
, target_ulong next_eip
)
2121 l1
= gen_new_label();
2124 gen_goto_tb(s
, 0, next_eip
);
2127 gen_goto_tb(s
, 1, val
);
2128 s
->is_jmp
= DISAS_TB_JUMP
;
2130 l1
= gen_new_label();
2131 l2
= gen_new_label();
2134 gen_jmp_im(next_eip
);
2144 static void gen_cmovcc1(CPUX86State
*env
, DisasContext
*s
, TCGMemOp ot
, int b
,
2149 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
2151 cc
= gen_prepare_cc(s
, b
, cpu_T1
);
2152 if (cc
.mask
!= -1) {
2153 TCGv t0
= tcg_temp_new();
2154 tcg_gen_andi_tl(t0
, cc
.reg
, cc
.mask
);
2158 cc
.reg2
= tcg_const_tl(cc
.imm
);
2161 tcg_gen_movcond_tl(cc
.cond
, cpu_T0
, cc
.reg
, cc
.reg2
,
2162 cpu_T0
, cpu_regs
[reg
]);
2163 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
2165 if (cc
.mask
!= -1) {
2166 tcg_temp_free(cc
.reg
);
2169 tcg_temp_free(cc
.reg2
);
2173 static inline void gen_op_movl_T0_seg(int seg_reg
)
2175 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
,
2176 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2179 static inline void gen_op_movl_seg_T0_vm(int seg_reg
)
2181 tcg_gen_ext16u_tl(cpu_T0
, cpu_T0
);
2182 tcg_gen_st32_tl(cpu_T0
, cpu_env
,
2183 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2184 tcg_gen_shli_tl(cpu_seg_base
[seg_reg
], cpu_T0
, 4);
2187 /* move T0 to seg_reg and compute if the CPU state may change. Never
2188 call this function with seg_reg == R_CS */
2189 static void gen_movl_seg_T0(DisasContext
*s
, int seg_reg
)
2191 if (s
->pe
&& !s
->vm86
) {
2192 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
2193 gen_helper_load_seg(cpu_env
, tcg_const_i32(seg_reg
), cpu_tmp2_i32
);
2194 /* abort translation because the addseg value may change or
2195 because ss32 may change. For R_SS, translation must always
2196 stop as a special handling must be done to disable hardware
2197 interrupts for the next instruction */
2198 if (seg_reg
== R_SS
|| (s
->code32
&& seg_reg
< R_FS
))
2199 s
->is_jmp
= DISAS_TB_JUMP
;
2201 gen_op_movl_seg_T0_vm(seg_reg
);
2202 if (seg_reg
== R_SS
)
2203 s
->is_jmp
= DISAS_TB_JUMP
;
2207 static inline int svm_is_rep(int prefixes
)
2209 return ((prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) ? 8 : 0);
2213 gen_svm_check_intercept_param(DisasContext
*s
, target_ulong pc_start
,
2214 uint32_t type
, uint64_t param
)
2216 /* no SVM activated; fast case */
2217 if (likely(!(s
->flags
& HF_SVMI_MASK
)))
2219 gen_update_cc_op(s
);
2220 gen_jmp_im(pc_start
- s
->cs_base
);
2221 gen_helper_svm_check_intercept_param(cpu_env
, tcg_const_i32(type
),
2222 tcg_const_i64(param
));
2226 gen_svm_check_intercept(DisasContext
*s
, target_ulong pc_start
, uint64_t type
)
2228 gen_svm_check_intercept_param(s
, pc_start
, type
, 0);
2231 static inline void gen_stack_update(DisasContext
*s
, int addend
)
2233 gen_op_add_reg_im(mo_stacksize(s
), R_ESP
, addend
);
2236 /* Generate a push. It depends on ss32, addseg and dflag. */
2237 static void gen_push_v(DisasContext
*s
, TCGv val
)
2239 TCGMemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2240 TCGMemOp a_ot
= mo_stacksize(s
);
2241 int size
= 1 << d_ot
;
2242 TCGv new_esp
= cpu_A0
;
2244 tcg_gen_subi_tl(cpu_A0
, cpu_regs
[R_ESP
], size
);
2249 tcg_gen_mov_tl(new_esp
, cpu_A0
);
2251 gen_lea_v_seg(s
, a_ot
, cpu_A0
, R_SS
, -1);
2254 gen_op_st_v(s
, d_ot
, val
, cpu_A0
);
2255 gen_op_mov_reg_v(a_ot
, R_ESP
, new_esp
);
2258 /* two step pop is necessary for precise exceptions */
2259 static TCGMemOp
gen_pop_T0(DisasContext
*s
)
2261 TCGMemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2263 gen_lea_v_seg(s
, mo_stacksize(s
), cpu_regs
[R_ESP
], R_SS
, -1);
2264 gen_op_ld_v(s
, d_ot
, cpu_T0
, cpu_A0
);
2269 static inline void gen_pop_update(DisasContext
*s
, TCGMemOp ot
)
2271 gen_stack_update(s
, 1 << ot
);
2274 static inline void gen_stack_A0(DisasContext
*s
)
2276 gen_lea_v_seg(s
, s
->ss32
? MO_32
: MO_16
, cpu_regs
[R_ESP
], R_SS
, -1);
2279 static void gen_pusha(DisasContext
*s
)
2281 TCGMemOp s_ot
= s
->ss32
? MO_32
: MO_16
;
2282 TCGMemOp d_ot
= s
->dflag
;
2283 int size
= 1 << d_ot
;
2286 for (i
= 0; i
< 8; i
++) {
2287 tcg_gen_addi_tl(cpu_A0
, cpu_regs
[R_ESP
], (i
- 8) * size
);
2288 gen_lea_v_seg(s
, s_ot
, cpu_A0
, R_SS
, -1);
2289 gen_op_st_v(s
, d_ot
, cpu_regs
[7 - i
], cpu_A0
);
2292 gen_stack_update(s
, -8 * size
);
2295 static void gen_popa(DisasContext
*s
)
2297 TCGMemOp s_ot
= s
->ss32
? MO_32
: MO_16
;
2298 TCGMemOp d_ot
= s
->dflag
;
2299 int size
= 1 << d_ot
;
2302 for (i
= 0; i
< 8; i
++) {
2303 /* ESP is not reloaded */
2304 if (7 - i
== R_ESP
) {
2307 tcg_gen_addi_tl(cpu_A0
, cpu_regs
[R_ESP
], i
* size
);
2308 gen_lea_v_seg(s
, s_ot
, cpu_A0
, R_SS
, -1);
2309 gen_op_ld_v(s
, d_ot
, cpu_T0
, cpu_A0
);
2310 gen_op_mov_reg_v(d_ot
, 7 - i
, cpu_T0
);
2313 gen_stack_update(s
, 8 * size
);
2316 static void gen_enter(DisasContext
*s
, int esp_addend
, int level
)
2318 TCGMemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2319 TCGMemOp a_ot
= CODE64(s
) ? MO_64
: s
->ss32
? MO_32
: MO_16
;
2320 int size
= 1 << d_ot
;
2322 /* Push BP; compute FrameTemp into T1. */
2323 tcg_gen_subi_tl(cpu_T1
, cpu_regs
[R_ESP
], size
);
2324 gen_lea_v_seg(s
, a_ot
, cpu_T1
, R_SS
, -1);
2325 gen_op_st_v(s
, d_ot
, cpu_regs
[R_EBP
], cpu_A0
);
2331 /* Copy level-1 pointers from the previous frame. */
2332 for (i
= 1; i
< level
; ++i
) {
2333 tcg_gen_subi_tl(cpu_A0
, cpu_regs
[R_EBP
], size
* i
);
2334 gen_lea_v_seg(s
, a_ot
, cpu_A0
, R_SS
, -1);
2335 gen_op_ld_v(s
, d_ot
, cpu_tmp0
, cpu_A0
);
2337 tcg_gen_subi_tl(cpu_A0
, cpu_T1
, size
* i
);
2338 gen_lea_v_seg(s
, a_ot
, cpu_A0
, R_SS
, -1);
2339 gen_op_st_v(s
, d_ot
, cpu_tmp0
, cpu_A0
);
2342 /* Push the current FrameTemp as the last level. */
2343 tcg_gen_subi_tl(cpu_A0
, cpu_T1
, size
* level
);
2344 gen_lea_v_seg(s
, a_ot
, cpu_A0
, R_SS
, -1);
2345 gen_op_st_v(s
, d_ot
, cpu_T1
, cpu_A0
);
2348 /* Copy the FrameTemp value to EBP. */
2349 gen_op_mov_reg_v(a_ot
, R_EBP
, cpu_T1
);
2351 /* Compute the final value of ESP. */
2352 tcg_gen_subi_tl(cpu_T1
, cpu_T1
, esp_addend
+ size
* level
);
2353 gen_op_mov_reg_v(a_ot
, R_ESP
, cpu_T1
);
2356 static void gen_leave(DisasContext
*s
)
2358 TCGMemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2359 TCGMemOp a_ot
= mo_stacksize(s
);
2361 gen_lea_v_seg(s
, a_ot
, cpu_regs
[R_EBP
], R_SS
, -1);
2362 gen_op_ld_v(s
, d_ot
, cpu_T0
, cpu_A0
);
2364 tcg_gen_addi_tl(cpu_T1
, cpu_regs
[R_EBP
], 1 << d_ot
);
2366 gen_op_mov_reg_v(d_ot
, R_EBP
, cpu_T0
);
2367 gen_op_mov_reg_v(a_ot
, R_ESP
, cpu_T1
);
2370 static void gen_exception(DisasContext
*s
, int trapno
, target_ulong cur_eip
)
2372 gen_update_cc_op(s
);
2373 gen_jmp_im(cur_eip
);
2374 gen_helper_raise_exception(cpu_env
, tcg_const_i32(trapno
));
2375 s
->is_jmp
= DISAS_TB_JUMP
;
2378 /* Generate #UD for the current instruction. The assumption here is that
2379 the instruction is known, but it isn't allowed in the current cpu mode. */
2380 static void gen_illegal_opcode(DisasContext
*s
)
2382 gen_exception(s
, EXCP06_ILLOP
, s
->pc_start
- s
->cs_base
);
2385 /* Similarly, except that the assumption here is that we don't decode
2386 the instruction at all -- either a missing opcode, an unimplemented
2387 feature, or just a bogus instruction stream. */
2388 static void gen_unknown_opcode(CPUX86State
*env
, DisasContext
*s
)
2390 gen_illegal_opcode(s
);
2392 if (qemu_loglevel_mask(LOG_UNIMP
)) {
2393 target_ulong pc
= s
->pc_start
, end
= s
->pc
;
2394 qemu_log("ILLOPC: " TARGET_FMT_lx
":", pc
);
2395 for (; pc
< end
; ++pc
) {
2396 qemu_log(" %02x", cpu_ldub_code(env
, pc
));
2402 /* an interrupt is different from an exception because of the
2404 static void gen_interrupt(DisasContext
*s
, int intno
,
2405 target_ulong cur_eip
, target_ulong next_eip
)
2407 gen_update_cc_op(s
);
2408 gen_jmp_im(cur_eip
);
2409 gen_helper_raise_interrupt(cpu_env
, tcg_const_i32(intno
),
2410 tcg_const_i32(next_eip
- cur_eip
));
2411 s
->is_jmp
= DISAS_TB_JUMP
;
2414 static void gen_debug(DisasContext
*s
, target_ulong cur_eip
)
2416 gen_update_cc_op(s
);
2417 gen_jmp_im(cur_eip
);
2418 gen_helper_debug(cpu_env
);
2419 s
->is_jmp
= DISAS_TB_JUMP
;
2422 static void gen_set_hflag(DisasContext
*s
, uint32_t mask
)
2424 if ((s
->flags
& mask
) == 0) {
2425 TCGv_i32 t
= tcg_temp_new_i32();
2426 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2427 tcg_gen_ori_i32(t
, t
, mask
);
2428 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2429 tcg_temp_free_i32(t
);
2434 static void gen_reset_hflag(DisasContext
*s
, uint32_t mask
)
2436 if (s
->flags
& mask
) {
2437 TCGv_i32 t
= tcg_temp_new_i32();
2438 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2439 tcg_gen_andi_i32(t
, t
, ~mask
);
2440 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2441 tcg_temp_free_i32(t
);
2446 /* Clear BND registers during legacy branches. */
2447 static void gen_bnd_jmp(DisasContext
*s
)
2449 /* Clear the registers only if BND prefix is missing, MPX is enabled,
2450 and if the BNDREGs are known to be in use (non-zero) already.
2451 The helper itself will check BNDPRESERVE at runtime. */
2452 if ((s
->prefix
& PREFIX_REPNZ
) == 0
2453 && (s
->flags
& HF_MPX_EN_MASK
) != 0
2454 && (s
->flags
& HF_MPX_IU_MASK
) != 0) {
2455 gen_helper_bnd_jmp(cpu_env
);
2459 /* Generate an end of block. Trace exception is also generated if needed.
2460 If IIM, set HF_INHIBIT_IRQ_MASK if it isn't already set. */
2461 static void gen_eob_inhibit_irq(DisasContext
*s
, bool inhibit
)
2463 gen_update_cc_op(s
);
2465 /* If several instructions disable interrupts, only the first does it. */
2466 if (inhibit
&& !(s
->flags
& HF_INHIBIT_IRQ_MASK
)) {
2467 gen_set_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2469 gen_reset_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2472 if (s
->tb
->flags
& HF_RF_MASK
) {
2473 gen_helper_reset_rf(cpu_env
);
2475 if (s
->singlestep_enabled
) {
2476 gen_helper_debug(cpu_env
);
2478 gen_helper_single_step(cpu_env
);
2482 s
->is_jmp
= DISAS_TB_JUMP
;
2485 /* End of block, resetting the inhibit irq flag. */
2486 static void gen_eob(DisasContext
*s
)
2488 gen_eob_inhibit_irq(s
, false);
2491 /* generate a jump to eip. No segment change must happen before as a
2492 direct call to the next block may occur */
2493 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
)
2495 gen_update_cc_op(s
);
2496 set_cc_op(s
, CC_OP_DYNAMIC
);
2498 gen_goto_tb(s
, tb_num
, eip
);
2499 s
->is_jmp
= DISAS_TB_JUMP
;
2506 static void gen_jmp(DisasContext
*s
, target_ulong eip
)
2508 gen_jmp_tb(s
, eip
, 0);
2511 static inline void gen_ldq_env_A0(DisasContext
*s
, int offset
)
2513 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_A0
, s
->mem_index
, MO_LEQ
);
2514 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2517 static inline void gen_stq_env_A0(DisasContext
*s
, int offset
)
2519 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
);
2520 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_A0
, s
->mem_index
, MO_LEQ
);
2523 static inline void gen_ldo_env_A0(DisasContext
*s
, int offset
)
2525 int mem_index
= s
->mem_index
;
2526 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_A0
, mem_index
, MO_LEQ
);
2527 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(ZMMReg
, ZMM_Q(0)));
2528 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2529 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
, MO_LEQ
);
2530 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(ZMMReg
, ZMM_Q(1)));
2533 static inline void gen_sto_env_A0(DisasContext
*s
, int offset
)
2535 int mem_index
= s
->mem_index
;
2536 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(ZMMReg
, ZMM_Q(0)));
2537 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_A0
, mem_index
, MO_LEQ
);
2538 tcg_gen_addi_tl(cpu_tmp0
, cpu_A0
, 8);
2539 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, offset
+ offsetof(ZMMReg
, ZMM_Q(1)));
2540 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_tmp0
, mem_index
, MO_LEQ
);
2543 static inline void gen_op_movo(int d_offset
, int s_offset
)
2545 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
+ offsetof(ZMMReg
, ZMM_Q(0)));
2546 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
+ offsetof(ZMMReg
, ZMM_Q(0)));
2547 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
+ offsetof(ZMMReg
, ZMM_Q(1)));
2548 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
+ offsetof(ZMMReg
, ZMM_Q(1)));
2551 static inline void gen_op_movq(int d_offset
, int s_offset
)
2553 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
, s_offset
);
2554 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2557 static inline void gen_op_movl(int d_offset
, int s_offset
)
2559 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
, s_offset
);
2560 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, d_offset
);
2563 static inline void gen_op_movq_env_0(int d_offset
)
2565 tcg_gen_movi_i64(cpu_tmp1_i64
, 0);
2566 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
, d_offset
);
2569 typedef void (*SSEFunc_i_ep
)(TCGv_i32 val
, TCGv_ptr env
, TCGv_ptr reg
);
2570 typedef void (*SSEFunc_l_ep
)(TCGv_i64 val
, TCGv_ptr env
, TCGv_ptr reg
);
2571 typedef void (*SSEFunc_0_epi
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i32 val
);
2572 typedef void (*SSEFunc_0_epl
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i64 val
);
2573 typedef void (*SSEFunc_0_epp
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
);
2574 typedef void (*SSEFunc_0_eppi
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2576 typedef void (*SSEFunc_0_ppi
)(TCGv_ptr reg_a
, TCGv_ptr reg_b
, TCGv_i32 val
);
2577 typedef void (*SSEFunc_0_eppt
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2580 #define SSE_SPECIAL ((void *)1)
2581 #define SSE_DUMMY ((void *)2)
2583 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2584 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2585 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2587 static const SSEFunc_0_epp sse_op_table1
[256][4] = {
2588 /* 3DNow! extensions */
2589 [0x0e] = { SSE_DUMMY
}, /* femms */
2590 [0x0f] = { SSE_DUMMY
}, /* pf... */
2591 /* pure SSE operations */
2592 [0x10] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2593 [0x11] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2594 [0x12] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd, movsldup, movddup */
2595 [0x13] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd */
2596 [0x14] = { gen_helper_punpckldq_xmm
, gen_helper_punpcklqdq_xmm
},
2597 [0x15] = { gen_helper_punpckhdq_xmm
, gen_helper_punpckhqdq_xmm
},
2598 [0x16] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd, movshdup */
2599 [0x17] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd */
2601 [0x28] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2602 [0x29] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2603 [0x2a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2604 [0x2b] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movntps, movntpd, movntss, movntsd */
2605 [0x2c] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2606 [0x2d] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2607 [0x2e] = { gen_helper_ucomiss
, gen_helper_ucomisd
},
2608 [0x2f] = { gen_helper_comiss
, gen_helper_comisd
},
2609 [0x50] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movmskps, movmskpd */
2610 [0x51] = SSE_FOP(sqrt
),
2611 [0x52] = { gen_helper_rsqrtps
, NULL
, gen_helper_rsqrtss
, NULL
},
2612 [0x53] = { gen_helper_rcpps
, NULL
, gen_helper_rcpss
, NULL
},
2613 [0x54] = { gen_helper_pand_xmm
, gen_helper_pand_xmm
}, /* andps, andpd */
2614 [0x55] = { gen_helper_pandn_xmm
, gen_helper_pandn_xmm
}, /* andnps, andnpd */
2615 [0x56] = { gen_helper_por_xmm
, gen_helper_por_xmm
}, /* orps, orpd */
2616 [0x57] = { gen_helper_pxor_xmm
, gen_helper_pxor_xmm
}, /* xorps, xorpd */
2617 [0x58] = SSE_FOP(add
),
2618 [0x59] = SSE_FOP(mul
),
2619 [0x5a] = { gen_helper_cvtps2pd
, gen_helper_cvtpd2ps
,
2620 gen_helper_cvtss2sd
, gen_helper_cvtsd2ss
},
2621 [0x5b] = { gen_helper_cvtdq2ps
, gen_helper_cvtps2dq
, gen_helper_cvttps2dq
},
2622 [0x5c] = SSE_FOP(sub
),
2623 [0x5d] = SSE_FOP(min
),
2624 [0x5e] = SSE_FOP(div
),
2625 [0x5f] = SSE_FOP(max
),
2627 [0xc2] = SSE_FOP(cmpeq
),
2628 [0xc6] = { (SSEFunc_0_epp
)gen_helper_shufps
,
2629 (SSEFunc_0_epp
)gen_helper_shufpd
}, /* XXX: casts */
2631 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2632 [0x38] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2633 [0x3a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2635 /* MMX ops and their SSE extensions */
2636 [0x60] = MMX_OP2(punpcklbw
),
2637 [0x61] = MMX_OP2(punpcklwd
),
2638 [0x62] = MMX_OP2(punpckldq
),
2639 [0x63] = MMX_OP2(packsswb
),
2640 [0x64] = MMX_OP2(pcmpgtb
),
2641 [0x65] = MMX_OP2(pcmpgtw
),
2642 [0x66] = MMX_OP2(pcmpgtl
),
2643 [0x67] = MMX_OP2(packuswb
),
2644 [0x68] = MMX_OP2(punpckhbw
),
2645 [0x69] = MMX_OP2(punpckhwd
),
2646 [0x6a] = MMX_OP2(punpckhdq
),
2647 [0x6b] = MMX_OP2(packssdw
),
2648 [0x6c] = { NULL
, gen_helper_punpcklqdq_xmm
},
2649 [0x6d] = { NULL
, gen_helper_punpckhqdq_xmm
},
2650 [0x6e] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movd mm, ea */
2651 [0x6f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, , movqdu */
2652 [0x70] = { (SSEFunc_0_epp
)gen_helper_pshufw_mmx
,
2653 (SSEFunc_0_epp
)gen_helper_pshufd_xmm
,
2654 (SSEFunc_0_epp
)gen_helper_pshufhw_xmm
,
2655 (SSEFunc_0_epp
)gen_helper_pshuflw_xmm
}, /* XXX: casts */
2656 [0x71] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftw */
2657 [0x72] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftd */
2658 [0x73] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftq */
2659 [0x74] = MMX_OP2(pcmpeqb
),
2660 [0x75] = MMX_OP2(pcmpeqw
),
2661 [0x76] = MMX_OP2(pcmpeql
),
2662 [0x77] = { SSE_DUMMY
}, /* emms */
2663 [0x78] = { NULL
, SSE_SPECIAL
, NULL
, SSE_SPECIAL
}, /* extrq_i, insertq_i */
2664 [0x79] = { NULL
, gen_helper_extrq_r
, NULL
, gen_helper_insertq_r
},
2665 [0x7c] = { NULL
, gen_helper_haddpd
, NULL
, gen_helper_haddps
},
2666 [0x7d] = { NULL
, gen_helper_hsubpd
, NULL
, gen_helper_hsubps
},
2667 [0x7e] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movd, movd, , movq */
2668 [0x7f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, movdqu */
2669 [0xc4] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pinsrw */
2670 [0xc5] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pextrw */
2671 [0xd0] = { NULL
, gen_helper_addsubpd
, NULL
, gen_helper_addsubps
},
2672 [0xd1] = MMX_OP2(psrlw
),
2673 [0xd2] = MMX_OP2(psrld
),
2674 [0xd3] = MMX_OP2(psrlq
),
2675 [0xd4] = MMX_OP2(paddq
),
2676 [0xd5] = MMX_OP2(pmullw
),
2677 [0xd6] = { NULL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2678 [0xd7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pmovmskb */
2679 [0xd8] = MMX_OP2(psubusb
),
2680 [0xd9] = MMX_OP2(psubusw
),
2681 [0xda] = MMX_OP2(pminub
),
2682 [0xdb] = MMX_OP2(pand
),
2683 [0xdc] = MMX_OP2(paddusb
),
2684 [0xdd] = MMX_OP2(paddusw
),
2685 [0xde] = MMX_OP2(pmaxub
),
2686 [0xdf] = MMX_OP2(pandn
),
2687 [0xe0] = MMX_OP2(pavgb
),
2688 [0xe1] = MMX_OP2(psraw
),
2689 [0xe2] = MMX_OP2(psrad
),
2690 [0xe3] = MMX_OP2(pavgw
),
2691 [0xe4] = MMX_OP2(pmulhuw
),
2692 [0xe5] = MMX_OP2(pmulhw
),
2693 [0xe6] = { NULL
, gen_helper_cvttpd2dq
, gen_helper_cvtdq2pd
, gen_helper_cvtpd2dq
},
2694 [0xe7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movntq, movntq */
2695 [0xe8] = MMX_OP2(psubsb
),
2696 [0xe9] = MMX_OP2(psubsw
),
2697 [0xea] = MMX_OP2(pminsw
),
2698 [0xeb] = MMX_OP2(por
),
2699 [0xec] = MMX_OP2(paddsb
),
2700 [0xed] = MMX_OP2(paddsw
),
2701 [0xee] = MMX_OP2(pmaxsw
),
2702 [0xef] = MMX_OP2(pxor
),
2703 [0xf0] = { NULL
, NULL
, NULL
, SSE_SPECIAL
}, /* lddqu */
2704 [0xf1] = MMX_OP2(psllw
),
2705 [0xf2] = MMX_OP2(pslld
),
2706 [0xf3] = MMX_OP2(psllq
),
2707 [0xf4] = MMX_OP2(pmuludq
),
2708 [0xf5] = MMX_OP2(pmaddwd
),
2709 [0xf6] = MMX_OP2(psadbw
),
2710 [0xf7] = { (SSEFunc_0_epp
)gen_helper_maskmov_mmx
,
2711 (SSEFunc_0_epp
)gen_helper_maskmov_xmm
}, /* XXX: casts */
2712 [0xf8] = MMX_OP2(psubb
),
2713 [0xf9] = MMX_OP2(psubw
),
2714 [0xfa] = MMX_OP2(psubl
),
2715 [0xfb] = MMX_OP2(psubq
),
2716 [0xfc] = MMX_OP2(paddb
),
2717 [0xfd] = MMX_OP2(paddw
),
2718 [0xfe] = MMX_OP2(paddl
),
2721 static const SSEFunc_0_epp sse_op_table2
[3 * 8][2] = {
2722 [0 + 2] = MMX_OP2(psrlw
),
2723 [0 + 4] = MMX_OP2(psraw
),
2724 [0 + 6] = MMX_OP2(psllw
),
2725 [8 + 2] = MMX_OP2(psrld
),
2726 [8 + 4] = MMX_OP2(psrad
),
2727 [8 + 6] = MMX_OP2(pslld
),
2728 [16 + 2] = MMX_OP2(psrlq
),
2729 [16 + 3] = { NULL
, gen_helper_psrldq_xmm
},
2730 [16 + 6] = MMX_OP2(psllq
),
2731 [16 + 7] = { NULL
, gen_helper_pslldq_xmm
},
2734 static const SSEFunc_0_epi sse_op_table3ai
[] = {
2735 gen_helper_cvtsi2ss
,
2739 #ifdef TARGET_X86_64
2740 static const SSEFunc_0_epl sse_op_table3aq
[] = {
2741 gen_helper_cvtsq2ss
,
2746 static const SSEFunc_i_ep sse_op_table3bi
[] = {
2747 gen_helper_cvttss2si
,
2748 gen_helper_cvtss2si
,
2749 gen_helper_cvttsd2si
,
2753 #ifdef TARGET_X86_64
2754 static const SSEFunc_l_ep sse_op_table3bq
[] = {
2755 gen_helper_cvttss2sq
,
2756 gen_helper_cvtss2sq
,
2757 gen_helper_cvttsd2sq
,
2762 static const SSEFunc_0_epp sse_op_table4
[8][4] = {
2773 static const SSEFunc_0_epp sse_op_table5
[256] = {
2774 [0x0c] = gen_helper_pi2fw
,
2775 [0x0d] = gen_helper_pi2fd
,
2776 [0x1c] = gen_helper_pf2iw
,
2777 [0x1d] = gen_helper_pf2id
,
2778 [0x8a] = gen_helper_pfnacc
,
2779 [0x8e] = gen_helper_pfpnacc
,
2780 [0x90] = gen_helper_pfcmpge
,
2781 [0x94] = gen_helper_pfmin
,
2782 [0x96] = gen_helper_pfrcp
,
2783 [0x97] = gen_helper_pfrsqrt
,
2784 [0x9a] = gen_helper_pfsub
,
2785 [0x9e] = gen_helper_pfadd
,
2786 [0xa0] = gen_helper_pfcmpgt
,
2787 [0xa4] = gen_helper_pfmax
,
2788 [0xa6] = gen_helper_movq
, /* pfrcpit1; no need to actually increase precision */
2789 [0xa7] = gen_helper_movq
, /* pfrsqit1 */
2790 [0xaa] = gen_helper_pfsubr
,
2791 [0xae] = gen_helper_pfacc
,
2792 [0xb0] = gen_helper_pfcmpeq
,
2793 [0xb4] = gen_helper_pfmul
,
2794 [0xb6] = gen_helper_movq
, /* pfrcpit2 */
2795 [0xb7] = gen_helper_pmulhrw_mmx
,
2796 [0xbb] = gen_helper_pswapd
,
2797 [0xbf] = gen_helper_pavgb_mmx
/* pavgusb */
2800 struct SSEOpHelper_epp
{
2801 SSEFunc_0_epp op
[2];
2805 struct SSEOpHelper_eppi
{
2806 SSEFunc_0_eppi op
[2];
2810 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
2811 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
2812 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
2813 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
2814 #define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
2815 CPUID_EXT_PCLMULQDQ }
2816 #define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
2818 static const struct SSEOpHelper_epp sse_op_table6
[256] = {
2819 [0x00] = SSSE3_OP(pshufb
),
2820 [0x01] = SSSE3_OP(phaddw
),
2821 [0x02] = SSSE3_OP(phaddd
),
2822 [0x03] = SSSE3_OP(phaddsw
),
2823 [0x04] = SSSE3_OP(pmaddubsw
),
2824 [0x05] = SSSE3_OP(phsubw
),
2825 [0x06] = SSSE3_OP(phsubd
),
2826 [0x07] = SSSE3_OP(phsubsw
),
2827 [0x08] = SSSE3_OP(psignb
),
2828 [0x09] = SSSE3_OP(psignw
),
2829 [0x0a] = SSSE3_OP(psignd
),
2830 [0x0b] = SSSE3_OP(pmulhrsw
),
2831 [0x10] = SSE41_OP(pblendvb
),
2832 [0x14] = SSE41_OP(blendvps
),
2833 [0x15] = SSE41_OP(blendvpd
),
2834 [0x17] = SSE41_OP(ptest
),
2835 [0x1c] = SSSE3_OP(pabsb
),
2836 [0x1d] = SSSE3_OP(pabsw
),
2837 [0x1e] = SSSE3_OP(pabsd
),
2838 [0x20] = SSE41_OP(pmovsxbw
),
2839 [0x21] = SSE41_OP(pmovsxbd
),
2840 [0x22] = SSE41_OP(pmovsxbq
),
2841 [0x23] = SSE41_OP(pmovsxwd
),
2842 [0x24] = SSE41_OP(pmovsxwq
),
2843 [0x25] = SSE41_OP(pmovsxdq
),
2844 [0x28] = SSE41_OP(pmuldq
),
2845 [0x29] = SSE41_OP(pcmpeqq
),
2846 [0x2a] = SSE41_SPECIAL
, /* movntqda */
2847 [0x2b] = SSE41_OP(packusdw
),
2848 [0x30] = SSE41_OP(pmovzxbw
),
2849 [0x31] = SSE41_OP(pmovzxbd
),
2850 [0x32] = SSE41_OP(pmovzxbq
),
2851 [0x33] = SSE41_OP(pmovzxwd
),
2852 [0x34] = SSE41_OP(pmovzxwq
),
2853 [0x35] = SSE41_OP(pmovzxdq
),
2854 [0x37] = SSE42_OP(pcmpgtq
),
2855 [0x38] = SSE41_OP(pminsb
),
2856 [0x39] = SSE41_OP(pminsd
),
2857 [0x3a] = SSE41_OP(pminuw
),
2858 [0x3b] = SSE41_OP(pminud
),
2859 [0x3c] = SSE41_OP(pmaxsb
),
2860 [0x3d] = SSE41_OP(pmaxsd
),
2861 [0x3e] = SSE41_OP(pmaxuw
),
2862 [0x3f] = SSE41_OP(pmaxud
),
2863 [0x40] = SSE41_OP(pmulld
),
2864 [0x41] = SSE41_OP(phminposuw
),
2865 [0xdb] = AESNI_OP(aesimc
),
2866 [0xdc] = AESNI_OP(aesenc
),
2867 [0xdd] = AESNI_OP(aesenclast
),
2868 [0xde] = AESNI_OP(aesdec
),
2869 [0xdf] = AESNI_OP(aesdeclast
),
2872 static const struct SSEOpHelper_eppi sse_op_table7
[256] = {
2873 [0x08] = SSE41_OP(roundps
),
2874 [0x09] = SSE41_OP(roundpd
),
2875 [0x0a] = SSE41_OP(roundss
),
2876 [0x0b] = SSE41_OP(roundsd
),
2877 [0x0c] = SSE41_OP(blendps
),
2878 [0x0d] = SSE41_OP(blendpd
),
2879 [0x0e] = SSE41_OP(pblendw
),
2880 [0x0f] = SSSE3_OP(palignr
),
2881 [0x14] = SSE41_SPECIAL
, /* pextrb */
2882 [0x15] = SSE41_SPECIAL
, /* pextrw */
2883 [0x16] = SSE41_SPECIAL
, /* pextrd/pextrq */
2884 [0x17] = SSE41_SPECIAL
, /* extractps */
2885 [0x20] = SSE41_SPECIAL
, /* pinsrb */
2886 [0x21] = SSE41_SPECIAL
, /* insertps */
2887 [0x22] = SSE41_SPECIAL
, /* pinsrd/pinsrq */
2888 [0x40] = SSE41_OP(dpps
),
2889 [0x41] = SSE41_OP(dppd
),
2890 [0x42] = SSE41_OP(mpsadbw
),
2891 [0x44] = PCLMULQDQ_OP(pclmulqdq
),
2892 [0x60] = SSE42_OP(pcmpestrm
),
2893 [0x61] = SSE42_OP(pcmpestri
),
2894 [0x62] = SSE42_OP(pcmpistrm
),
2895 [0x63] = SSE42_OP(pcmpistri
),
2896 [0xdf] = AESNI_OP(aeskeygenassist
),
2899 static void gen_sse(CPUX86State
*env
, DisasContext
*s
, int b
,
2900 target_ulong pc_start
, int rex_r
)
2902 int b1
, op1_offset
, op2_offset
, is_xmm
, val
;
2903 int modrm
, mod
, rm
, reg
;
2904 SSEFunc_0_epp sse_fn_epp
;
2905 SSEFunc_0_eppi sse_fn_eppi
;
2906 SSEFunc_0_ppi sse_fn_ppi
;
2907 SSEFunc_0_eppt sse_fn_eppt
;
2911 if (s
->prefix
& PREFIX_DATA
)
2913 else if (s
->prefix
& PREFIX_REPZ
)
2915 else if (s
->prefix
& PREFIX_REPNZ
)
2919 sse_fn_epp
= sse_op_table1
[b
][b1
];
2923 if ((b
<= 0x5f && b
>= 0x10) || b
== 0xc6 || b
== 0xc2) {
2933 /* simple MMX/SSE operation */
2934 if (s
->flags
& HF_TS_MASK
) {
2935 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
2938 if (s
->flags
& HF_EM_MASK
) {
2940 gen_illegal_opcode(s
);
2944 && !(s
->flags
& HF_OSFXSR_MASK
)
2945 && ((b
!= 0x38 && b
!= 0x3a) || (s
->prefix
& PREFIX_DATA
))) {
2949 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
)) {
2950 /* If we were fully decoding this we might use illegal_op. */
2954 gen_helper_emms(cpu_env
);
2959 gen_helper_emms(cpu_env
);
2962 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
2963 the static cpu state) */
2965 gen_helper_enter_mmx(cpu_env
);
2968 modrm
= cpu_ldub_code(env
, s
->pc
++);
2969 reg
= ((modrm
>> 3) & 7);
2972 mod
= (modrm
>> 6) & 3;
2973 if (sse_fn_epp
== SSE_SPECIAL
) {
2976 case 0x0e7: /* movntq */
2980 gen_lea_modrm(env
, s
, modrm
);
2981 gen_stq_env_A0(s
, offsetof(CPUX86State
, fpregs
[reg
].mmx
));
2983 case 0x1e7: /* movntdq */
2984 case 0x02b: /* movntps */
2985 case 0x12b: /* movntps */
2988 gen_lea_modrm(env
, s
, modrm
);
2989 gen_sto_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
2991 case 0x3f0: /* lddqu */
2994 gen_lea_modrm(env
, s
, modrm
);
2995 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
2997 case 0x22b: /* movntss */
2998 case 0x32b: /* movntsd */
3001 gen_lea_modrm(env
, s
, modrm
);
3003 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3004 xmm_regs
[reg
].ZMM_Q(0)));
3006 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,
3007 xmm_regs
[reg
].ZMM_L(0)));
3008 gen_op_st_v(s
, MO_32
, cpu_T0
, cpu_A0
);
3011 case 0x6e: /* movd mm, ea */
3012 #ifdef TARGET_X86_64
3013 if (s
->dflag
== MO_64
) {
3014 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 0);
3015 tcg_gen_st_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3019 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 0);
3020 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3021 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3022 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
3023 gen_helper_movl_mm_T0_mmx(cpu_ptr0
, cpu_tmp2_i32
);
3026 case 0x16e: /* movd xmm, ea */
3027 #ifdef TARGET_X86_64
3028 if (s
->dflag
== MO_64
) {
3029 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 0);
3030 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3031 offsetof(CPUX86State
,xmm_regs
[reg
]));
3032 gen_helper_movq_mm_T0_xmm(cpu_ptr0
, cpu_T0
);
3036 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 0);
3037 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3038 offsetof(CPUX86State
,xmm_regs
[reg
]));
3039 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
3040 gen_helper_movl_mm_T0_xmm(cpu_ptr0
, cpu_tmp2_i32
);
3043 case 0x6f: /* movq mm, ea */
3045 gen_lea_modrm(env
, s
, modrm
);
3046 gen_ldq_env_A0(s
, offsetof(CPUX86State
, fpregs
[reg
].mmx
));
3049 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
3050 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3051 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
3052 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3055 case 0x010: /* movups */
3056 case 0x110: /* movupd */
3057 case 0x028: /* movaps */
3058 case 0x128: /* movapd */
3059 case 0x16f: /* movdqa xmm, ea */
3060 case 0x26f: /* movdqu xmm, ea */
3062 gen_lea_modrm(env
, s
, modrm
);
3063 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3065 rm
= (modrm
& 7) | REX_B(s
);
3066 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[reg
]),
3067 offsetof(CPUX86State
,xmm_regs
[rm
]));
3070 case 0x210: /* movss xmm, ea */
3072 gen_lea_modrm(env
, s
, modrm
);
3073 gen_op_ld_v(s
, MO_32
, cpu_T0
, cpu_A0
);
3074 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3075 tcg_gen_movi_tl(cpu_T0
, 0);
3076 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(1)));
3077 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(2)));
3078 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(3)));
3080 rm
= (modrm
& 7) | REX_B(s
);
3081 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)),
3082 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(0)));
3085 case 0x310: /* movsd xmm, ea */
3087 gen_lea_modrm(env
, s
, modrm
);
3088 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3089 xmm_regs
[reg
].ZMM_Q(0)));
3090 tcg_gen_movi_tl(cpu_T0
, 0);
3091 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(2)));
3092 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(3)));
3094 rm
= (modrm
& 7) | REX_B(s
);
3095 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)),
3096 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3099 case 0x012: /* movlps */
3100 case 0x112: /* movlpd */
3102 gen_lea_modrm(env
, s
, modrm
);
3103 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3104 xmm_regs
[reg
].ZMM_Q(0)));
3107 rm
= (modrm
& 7) | REX_B(s
);
3108 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)),
3109 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(1)));
3112 case 0x212: /* movsldup */
3114 gen_lea_modrm(env
, s
, modrm
);
3115 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3117 rm
= (modrm
& 7) | REX_B(s
);
3118 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)),
3119 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(0)));
3120 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(2)),
3121 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(2)));
3123 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(1)),
3124 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3125 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(3)),
3126 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(2)));
3128 case 0x312: /* movddup */
3130 gen_lea_modrm(env
, s
, modrm
);
3131 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3132 xmm_regs
[reg
].ZMM_Q(0)));
3134 rm
= (modrm
& 7) | REX_B(s
);
3135 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)),
3136 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3138 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(1)),
3139 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3141 case 0x016: /* movhps */
3142 case 0x116: /* movhpd */
3144 gen_lea_modrm(env
, s
, modrm
);
3145 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3146 xmm_regs
[reg
].ZMM_Q(1)));
3149 rm
= (modrm
& 7) | REX_B(s
);
3150 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(1)),
3151 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3154 case 0x216: /* movshdup */
3156 gen_lea_modrm(env
, s
, modrm
);
3157 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3159 rm
= (modrm
& 7) | REX_B(s
);
3160 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(1)),
3161 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(1)));
3162 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(3)),
3163 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(3)));
3165 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)),
3166 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(1)));
3167 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(2)),
3168 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(3)));
3173 int bit_index
, field_length
;
3175 if (b1
== 1 && reg
!= 0)
3177 field_length
= cpu_ldub_code(env
, s
->pc
++) & 0x3F;
3178 bit_index
= cpu_ldub_code(env
, s
->pc
++) & 0x3F;
3179 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3180 offsetof(CPUX86State
,xmm_regs
[reg
]));
3182 gen_helper_extrq_i(cpu_env
, cpu_ptr0
,
3183 tcg_const_i32(bit_index
),
3184 tcg_const_i32(field_length
));
3186 gen_helper_insertq_i(cpu_env
, cpu_ptr0
,
3187 tcg_const_i32(bit_index
),
3188 tcg_const_i32(field_length
));
3191 case 0x7e: /* movd ea, mm */
3192 #ifdef TARGET_X86_64
3193 if (s
->dflag
== MO_64
) {
3194 tcg_gen_ld_i64(cpu_T0
, cpu_env
,
3195 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3196 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 1);
3200 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
,
3201 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_L(0)));
3202 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 1);
3205 case 0x17e: /* movd ea, xmm */
3206 #ifdef TARGET_X86_64
3207 if (s
->dflag
== MO_64
) {
3208 tcg_gen_ld_i64(cpu_T0
, cpu_env
,
3209 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3210 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 1);
3214 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
,
3215 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3216 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 1);
3219 case 0x27e: /* movq xmm, ea */
3221 gen_lea_modrm(env
, s
, modrm
);
3222 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3223 xmm_regs
[reg
].ZMM_Q(0)));
3225 rm
= (modrm
& 7) | REX_B(s
);
3226 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)),
3227 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3229 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(1)));
3231 case 0x7f: /* movq ea, mm */
3233 gen_lea_modrm(env
, s
, modrm
);
3234 gen_stq_env_A0(s
, offsetof(CPUX86State
, fpregs
[reg
].mmx
));
3237 gen_op_movq(offsetof(CPUX86State
,fpregs
[rm
].mmx
),
3238 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3241 case 0x011: /* movups */
3242 case 0x111: /* movupd */
3243 case 0x029: /* movaps */
3244 case 0x129: /* movapd */
3245 case 0x17f: /* movdqa ea, xmm */
3246 case 0x27f: /* movdqu ea, xmm */
3248 gen_lea_modrm(env
, s
, modrm
);
3249 gen_sto_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3251 rm
= (modrm
& 7) | REX_B(s
);
3252 gen_op_movo(offsetof(CPUX86State
,xmm_regs
[rm
]),
3253 offsetof(CPUX86State
,xmm_regs
[reg
]));
3256 case 0x211: /* movss ea, xmm */
3258 gen_lea_modrm(env
, s
, modrm
);
3259 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3260 gen_op_st_v(s
, MO_32
, cpu_T0
, cpu_A0
);
3262 rm
= (modrm
& 7) | REX_B(s
);
3263 gen_op_movl(offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(0)),
3264 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3267 case 0x311: /* movsd ea, xmm */
3269 gen_lea_modrm(env
, s
, modrm
);
3270 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3271 xmm_regs
[reg
].ZMM_Q(0)));
3273 rm
= (modrm
& 7) | REX_B(s
);
3274 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)),
3275 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3278 case 0x013: /* movlps */
3279 case 0x113: /* movlpd */
3281 gen_lea_modrm(env
, s
, modrm
);
3282 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3283 xmm_regs
[reg
].ZMM_Q(0)));
3288 case 0x017: /* movhps */
3289 case 0x117: /* movhpd */
3291 gen_lea_modrm(env
, s
, modrm
);
3292 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3293 xmm_regs
[reg
].ZMM_Q(1)));
3298 case 0x71: /* shift mm, im */
3301 case 0x171: /* shift xmm, im */
3307 val
= cpu_ldub_code(env
, s
->pc
++);
3309 tcg_gen_movi_tl(cpu_T0
, val
);
3310 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_t0
.ZMM_L(0)));
3311 tcg_gen_movi_tl(cpu_T0
, 0);
3312 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_t0
.ZMM_L(1)));
3313 op1_offset
= offsetof(CPUX86State
,xmm_t0
);
3315 tcg_gen_movi_tl(cpu_T0
, val
);
3316 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(0)));
3317 tcg_gen_movi_tl(cpu_T0
, 0);
3318 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,mmx_t0
.MMX_L(1)));
3319 op1_offset
= offsetof(CPUX86State
,mmx_t0
);
3321 sse_fn_epp
= sse_op_table2
[((b
- 1) & 3) * 8 +
3322 (((modrm
>> 3)) & 7)][b1
];
3327 rm
= (modrm
& 7) | REX_B(s
);
3328 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3331 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3333 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3334 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op1_offset
);
3335 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3337 case 0x050: /* movmskps */
3338 rm
= (modrm
& 7) | REX_B(s
);
3339 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3340 offsetof(CPUX86State
,xmm_regs
[rm
]));
3341 gen_helper_movmskps(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3342 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp2_i32
);
3344 case 0x150: /* movmskpd */
3345 rm
= (modrm
& 7) | REX_B(s
);
3346 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
,
3347 offsetof(CPUX86State
,xmm_regs
[rm
]));
3348 gen_helper_movmskpd(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3349 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp2_i32
);
3351 case 0x02a: /* cvtpi2ps */
3352 case 0x12a: /* cvtpi2pd */
3353 gen_helper_enter_mmx(cpu_env
);
3355 gen_lea_modrm(env
, s
, modrm
);
3356 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3357 gen_ldq_env_A0(s
, op2_offset
);
3360 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3362 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3363 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3364 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3367 gen_helper_cvtpi2ps(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3371 gen_helper_cvtpi2pd(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3375 case 0x22a: /* cvtsi2ss */
3376 case 0x32a: /* cvtsi2sd */
3377 ot
= mo_64_32(s
->dflag
);
3378 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3379 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3380 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3382 SSEFunc_0_epi sse_fn_epi
= sse_op_table3ai
[(b
>> 8) & 1];
3383 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
3384 sse_fn_epi(cpu_env
, cpu_ptr0
, cpu_tmp2_i32
);
3386 #ifdef TARGET_X86_64
3387 SSEFunc_0_epl sse_fn_epl
= sse_op_table3aq
[(b
>> 8) & 1];
3388 sse_fn_epl(cpu_env
, cpu_ptr0
, cpu_T0
);
3394 case 0x02c: /* cvttps2pi */
3395 case 0x12c: /* cvttpd2pi */
3396 case 0x02d: /* cvtps2pi */
3397 case 0x12d: /* cvtpd2pi */
3398 gen_helper_enter_mmx(cpu_env
);
3400 gen_lea_modrm(env
, s
, modrm
);
3401 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3402 gen_ldo_env_A0(s
, op2_offset
);
3404 rm
= (modrm
& 7) | REX_B(s
);
3405 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3407 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
);
3408 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3409 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3412 gen_helper_cvttps2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3415 gen_helper_cvttpd2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3418 gen_helper_cvtps2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3421 gen_helper_cvtpd2pi(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3425 case 0x22c: /* cvttss2si */
3426 case 0x32c: /* cvttsd2si */
3427 case 0x22d: /* cvtss2si */
3428 case 0x32d: /* cvtsd2si */
3429 ot
= mo_64_32(s
->dflag
);
3431 gen_lea_modrm(env
, s
, modrm
);
3433 gen_ldq_env_A0(s
, offsetof(CPUX86State
, xmm_t0
.ZMM_Q(0)));
3435 gen_op_ld_v(s
, MO_32
, cpu_T0
, cpu_A0
);
3436 tcg_gen_st32_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,xmm_t0
.ZMM_L(0)));
3438 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3440 rm
= (modrm
& 7) | REX_B(s
);
3441 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3443 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op2_offset
);
3445 SSEFunc_i_ep sse_fn_i_ep
=
3446 sse_op_table3bi
[((b
>> 7) & 2) | (b
& 1)];
3447 sse_fn_i_ep(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3448 tcg_gen_extu_i32_tl(cpu_T0
, cpu_tmp2_i32
);
3450 #ifdef TARGET_X86_64
3451 SSEFunc_l_ep sse_fn_l_ep
=
3452 sse_op_table3bq
[((b
>> 7) & 2) | (b
& 1)];
3453 sse_fn_l_ep(cpu_T0
, cpu_env
, cpu_ptr0
);
3458 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3460 case 0xc4: /* pinsrw */
3463 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
3464 val
= cpu_ldub_code(env
, s
->pc
++);
3467 tcg_gen_st16_tl(cpu_T0
, cpu_env
,
3468 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_W(val
)));
3471 tcg_gen_st16_tl(cpu_T0
, cpu_env
,
3472 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_W(val
)));
3475 case 0xc5: /* pextrw */
3479 ot
= mo_64_32(s
->dflag
);
3480 val
= cpu_ldub_code(env
, s
->pc
++);
3483 rm
= (modrm
& 7) | REX_B(s
);
3484 tcg_gen_ld16u_tl(cpu_T0
, cpu_env
,
3485 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_W(val
)));
3489 tcg_gen_ld16u_tl(cpu_T0
, cpu_env
,
3490 offsetof(CPUX86State
,fpregs
[rm
].mmx
.MMX_W(val
)));
3492 reg
= ((modrm
>> 3) & 7) | rex_r
;
3493 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3495 case 0x1d6: /* movq ea, xmm */
3497 gen_lea_modrm(env
, s
, modrm
);
3498 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3499 xmm_regs
[reg
].ZMM_Q(0)));
3501 rm
= (modrm
& 7) | REX_B(s
);
3502 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)),
3503 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3504 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(1)));
3507 case 0x2d6: /* movq2dq */
3508 gen_helper_enter_mmx(cpu_env
);
3510 gen_op_movq(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)),
3511 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3512 gen_op_movq_env_0(offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(1)));
3514 case 0x3d6: /* movdq2q */
3515 gen_helper_enter_mmx(cpu_env
);
3516 rm
= (modrm
& 7) | REX_B(s
);
3517 gen_op_movq(offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
),
3518 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3520 case 0xd7: /* pmovmskb */
3525 rm
= (modrm
& 7) | REX_B(s
);
3526 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,xmm_regs
[rm
]));
3527 gen_helper_pmovmskb_xmm(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3530 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3531 gen_helper_pmovmskb_mmx(cpu_tmp2_i32
, cpu_env
, cpu_ptr0
);
3533 reg
= ((modrm
>> 3) & 7) | rex_r
;
3534 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp2_i32
);
3540 if ((b
& 0xf0) == 0xf0) {
3543 modrm
= cpu_ldub_code(env
, s
->pc
++);
3545 reg
= ((modrm
>> 3) & 7) | rex_r
;
3546 mod
= (modrm
>> 6) & 3;
3551 sse_fn_epp
= sse_op_table6
[b
].op
[b1
];
3555 if (!(s
->cpuid_ext_features
& sse_op_table6
[b
].ext_mask
))
3559 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3561 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
3563 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3564 gen_lea_modrm(env
, s
, modrm
);
3566 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3567 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3568 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3569 gen_ldq_env_A0(s
, op2_offset
+
3570 offsetof(ZMMReg
, ZMM_Q(0)));
3572 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3573 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3574 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
3575 s
->mem_index
, MO_LEUL
);
3576 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, op2_offset
+
3577 offsetof(ZMMReg
, ZMM_L(0)));
3579 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3580 tcg_gen_qemu_ld_tl(cpu_tmp0
, cpu_A0
,
3581 s
->mem_index
, MO_LEUW
);
3582 tcg_gen_st16_tl(cpu_tmp0
, cpu_env
, op2_offset
+
3583 offsetof(ZMMReg
, ZMM_W(0)));
3585 case 0x2a: /* movntqda */
3586 gen_ldo_env_A0(s
, op1_offset
);
3589 gen_ldo_env_A0(s
, op2_offset
);
3593 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
3595 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3597 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3598 gen_lea_modrm(env
, s
, modrm
);
3599 gen_ldq_env_A0(s
, op2_offset
);
3602 if (sse_fn_epp
== SSE_SPECIAL
) {
3606 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
3607 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
3608 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
3611 set_cc_op(s
, CC_OP_EFLAGS
);
3618 /* Various integer extensions at 0f 38 f[0-f]. */
3619 b
= modrm
| (b1
<< 8);
3620 modrm
= cpu_ldub_code(env
, s
->pc
++);
3621 reg
= ((modrm
>> 3) & 7) | rex_r
;
3624 case 0x3f0: /* crc32 Gd,Eb */
3625 case 0x3f1: /* crc32 Gd,Ey */
3627 if (!(s
->cpuid_ext_features
& CPUID_EXT_SSE42
)) {
3630 if ((b
& 0xff) == 0xf0) {
3632 } else if (s
->dflag
!= MO_64
) {
3633 ot
= (s
->prefix
& PREFIX_DATA
? MO_16
: MO_32
);
3638 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[reg
]);
3639 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3640 gen_helper_crc32(cpu_T0
, cpu_tmp2_i32
,
3641 cpu_T0
, tcg_const_i32(8 << ot
));
3643 ot
= mo_64_32(s
->dflag
);
3644 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3647 case 0x1f0: /* crc32 or movbe */
3649 /* For these insns, the f3 prefix is supposed to have priority
3650 over the 66 prefix, but that's not what we implement above
3652 if (s
->prefix
& PREFIX_REPNZ
) {
3656 case 0x0f0: /* movbe Gy,My */
3657 case 0x0f1: /* movbe My,Gy */
3658 if (!(s
->cpuid_ext_features
& CPUID_EXT_MOVBE
)) {
3661 if (s
->dflag
!= MO_64
) {
3662 ot
= (s
->prefix
& PREFIX_DATA
? MO_16
: MO_32
);
3667 gen_lea_modrm(env
, s
, modrm
);
3669 tcg_gen_qemu_ld_tl(cpu_T0
, cpu_A0
,
3670 s
->mem_index
, ot
| MO_BE
);
3671 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3673 tcg_gen_qemu_st_tl(cpu_regs
[reg
], cpu_A0
,
3674 s
->mem_index
, ot
| MO_BE
);
3678 case 0x0f2: /* andn Gy, By, Ey */
3679 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
3680 || !(s
->prefix
& PREFIX_VEX
)
3684 ot
= mo_64_32(s
->dflag
);
3685 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3686 tcg_gen_andc_tl(cpu_T0
, cpu_regs
[s
->vex_v
], cpu_T0
);
3687 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3688 gen_op_update1_cc();
3689 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3692 case 0x0f7: /* bextr Gy, Ey, By */
3693 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
3694 || !(s
->prefix
& PREFIX_VEX
)
3698 ot
= mo_64_32(s
->dflag
);
3702 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3703 /* Extract START, and shift the operand.
3704 Shifts larger than operand size get zeros. */
3705 tcg_gen_ext8u_tl(cpu_A0
, cpu_regs
[s
->vex_v
]);
3706 tcg_gen_shr_tl(cpu_T0
, cpu_T0
, cpu_A0
);
3708 bound
= tcg_const_tl(ot
== MO_64
? 63 : 31);
3709 zero
= tcg_const_tl(0);
3710 tcg_gen_movcond_tl(TCG_COND_LEU
, cpu_T0
, cpu_A0
, bound
,
3712 tcg_temp_free(zero
);
3714 /* Extract the LEN into a mask. Lengths larger than
3715 operand size get all ones. */
3716 tcg_gen_shri_tl(cpu_A0
, cpu_regs
[s
->vex_v
], 8);
3717 tcg_gen_ext8u_tl(cpu_A0
, cpu_A0
);
3718 tcg_gen_movcond_tl(TCG_COND_LEU
, cpu_A0
, cpu_A0
, bound
,
3720 tcg_temp_free(bound
);
3721 tcg_gen_movi_tl(cpu_T1
, 1);
3722 tcg_gen_shl_tl(cpu_T1
, cpu_T1
, cpu_A0
);
3723 tcg_gen_subi_tl(cpu_T1
, cpu_T1
, 1);
3724 tcg_gen_and_tl(cpu_T0
, cpu_T0
, cpu_T1
);
3726 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3727 gen_op_update1_cc();
3728 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3732 case 0x0f5: /* bzhi Gy, Ey, By */
3733 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3734 || !(s
->prefix
& PREFIX_VEX
)
3738 ot
= mo_64_32(s
->dflag
);
3739 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3740 tcg_gen_ext8u_tl(cpu_T1
, cpu_regs
[s
->vex_v
]);
3742 TCGv bound
= tcg_const_tl(ot
== MO_64
? 63 : 31);
3743 /* Note that since we're using BMILG (in order to get O
3744 cleared) we need to store the inverse into C. */
3745 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_cc_src
,
3747 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_T1
, cpu_T1
,
3748 bound
, bound
, cpu_T1
);
3749 tcg_temp_free(bound
);
3751 tcg_gen_movi_tl(cpu_A0
, -1);
3752 tcg_gen_shl_tl(cpu_A0
, cpu_A0
, cpu_T1
);
3753 tcg_gen_andc_tl(cpu_T0
, cpu_T0
, cpu_A0
);
3754 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3755 gen_op_update1_cc();
3756 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
3759 case 0x3f6: /* mulx By, Gy, rdx, Ey */
3760 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3761 || !(s
->prefix
& PREFIX_VEX
)
3765 ot
= mo_64_32(s
->dflag
);
3766 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3769 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
3770 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_regs
[R_EDX
]);
3771 tcg_gen_mulu2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
3772 cpu_tmp2_i32
, cpu_tmp3_i32
);
3773 tcg_gen_extu_i32_tl(cpu_regs
[s
->vex_v
], cpu_tmp2_i32
);
3774 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp3_i32
);
3776 #ifdef TARGET_X86_64
3778 tcg_gen_mulu2_i64(cpu_T0
, cpu_T1
,
3779 cpu_T0
, cpu_regs
[R_EDX
]);
3780 tcg_gen_mov_i64(cpu_regs
[s
->vex_v
], cpu_T0
);
3781 tcg_gen_mov_i64(cpu_regs
[reg
], cpu_T1
);
3787 case 0x3f5: /* pdep Gy, By, Ey */
3788 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3789 || !(s
->prefix
& PREFIX_VEX
)
3793 ot
= mo_64_32(s
->dflag
);
3794 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3795 /* Note that by zero-extending the mask operand, we
3796 automatically handle zero-extending the result. */
3798 tcg_gen_mov_tl(cpu_T1
, cpu_regs
[s
->vex_v
]);
3800 tcg_gen_ext32u_tl(cpu_T1
, cpu_regs
[s
->vex_v
]);
3802 gen_helper_pdep(cpu_regs
[reg
], cpu_T0
, cpu_T1
);
3805 case 0x2f5: /* pext Gy, By, Ey */
3806 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3807 || !(s
->prefix
& PREFIX_VEX
)
3811 ot
= mo_64_32(s
->dflag
);
3812 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3813 /* Note that by zero-extending the mask operand, we
3814 automatically handle zero-extending the result. */
3816 tcg_gen_mov_tl(cpu_T1
, cpu_regs
[s
->vex_v
]);
3818 tcg_gen_ext32u_tl(cpu_T1
, cpu_regs
[s
->vex_v
]);
3820 gen_helper_pext(cpu_regs
[reg
], cpu_T0
, cpu_T1
);
3823 case 0x1f6: /* adcx Gy, Ey */
3824 case 0x2f6: /* adox Gy, Ey */
3825 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_ADX
)) {
3828 TCGv carry_in
, carry_out
, zero
;
3831 ot
= mo_64_32(s
->dflag
);
3832 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3834 /* Re-use the carry-out from a previous round. */
3835 TCGV_UNUSED(carry_in
);
3836 carry_out
= (b
== 0x1f6 ? cpu_cc_dst
: cpu_cc_src2
);
3840 carry_in
= cpu_cc_dst
;
3841 end_op
= CC_OP_ADCX
;
3843 end_op
= CC_OP_ADCOX
;
3848 end_op
= CC_OP_ADCOX
;
3850 carry_in
= cpu_cc_src2
;
3851 end_op
= CC_OP_ADOX
;
3855 end_op
= CC_OP_ADCOX
;
3856 carry_in
= carry_out
;
3859 end_op
= (b
== 0x1f6 ? CC_OP_ADCX
: CC_OP_ADOX
);
3862 /* If we can't reuse carry-out, get it out of EFLAGS. */
3863 if (TCGV_IS_UNUSED(carry_in
)) {
3864 if (s
->cc_op
!= CC_OP_ADCX
&& s
->cc_op
!= CC_OP_ADOX
) {
3865 gen_compute_eflags(s
);
3867 carry_in
= cpu_tmp0
;
3868 tcg_gen_shri_tl(carry_in
, cpu_cc_src
,
3869 ctz32(b
== 0x1f6 ? CC_C
: CC_O
));
3870 tcg_gen_andi_tl(carry_in
, carry_in
, 1);
3874 #ifdef TARGET_X86_64
3876 /* If we know TL is 64-bit, and we want a 32-bit
3877 result, just do everything in 64-bit arithmetic. */
3878 tcg_gen_ext32u_i64(cpu_regs
[reg
], cpu_regs
[reg
]);
3879 tcg_gen_ext32u_i64(cpu_T0
, cpu_T0
);
3880 tcg_gen_add_i64(cpu_T0
, cpu_T0
, cpu_regs
[reg
]);
3881 tcg_gen_add_i64(cpu_T0
, cpu_T0
, carry_in
);
3882 tcg_gen_ext32u_i64(cpu_regs
[reg
], cpu_T0
);
3883 tcg_gen_shri_i64(carry_out
, cpu_T0
, 32);
3887 /* Otherwise compute the carry-out in two steps. */
3888 zero
= tcg_const_tl(0);
3889 tcg_gen_add2_tl(cpu_T0
, carry_out
,
3892 tcg_gen_add2_tl(cpu_regs
[reg
], carry_out
,
3893 cpu_regs
[reg
], carry_out
,
3895 tcg_temp_free(zero
);
3898 set_cc_op(s
, end_op
);
3902 case 0x1f7: /* shlx Gy, Ey, By */
3903 case 0x2f7: /* sarx Gy, Ey, By */
3904 case 0x3f7: /* shrx Gy, Ey, By */
3905 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3906 || !(s
->prefix
& PREFIX_VEX
)
3910 ot
= mo_64_32(s
->dflag
);
3911 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3913 tcg_gen_andi_tl(cpu_T1
, cpu_regs
[s
->vex_v
], 63);
3915 tcg_gen_andi_tl(cpu_T1
, cpu_regs
[s
->vex_v
], 31);
3918 tcg_gen_shl_tl(cpu_T0
, cpu_T0
, cpu_T1
);
3919 } else if (b
== 0x2f7) {
3921 tcg_gen_ext32s_tl(cpu_T0
, cpu_T0
);
3923 tcg_gen_sar_tl(cpu_T0
, cpu_T0
, cpu_T1
);
3926 tcg_gen_ext32u_tl(cpu_T0
, cpu_T0
);
3928 tcg_gen_shr_tl(cpu_T0
, cpu_T0
, cpu_T1
);
3930 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
3936 case 0x3f3: /* Group 17 */
3937 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
3938 || !(s
->prefix
& PREFIX_VEX
)
3942 ot
= mo_64_32(s
->dflag
);
3943 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3946 case 1: /* blsr By,Ey */
3947 tcg_gen_neg_tl(cpu_T1
, cpu_T0
);
3948 tcg_gen_and_tl(cpu_T0
, cpu_T0
, cpu_T1
);
3949 gen_op_mov_reg_v(ot
, s
->vex_v
, cpu_T0
);
3950 gen_op_update2_cc();
3951 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
3954 case 2: /* blsmsk By,Ey */
3955 tcg_gen_mov_tl(cpu_cc_src
, cpu_T0
);
3956 tcg_gen_subi_tl(cpu_T0
, cpu_T0
, 1);
3957 tcg_gen_xor_tl(cpu_T0
, cpu_T0
, cpu_cc_src
);
3958 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
3959 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
3962 case 3: /* blsi By, Ey */
3963 tcg_gen_mov_tl(cpu_cc_src
, cpu_T0
);
3964 tcg_gen_subi_tl(cpu_T0
, cpu_T0
, 1);
3965 tcg_gen_and_tl(cpu_T0
, cpu_T0
, cpu_cc_src
);
3966 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
3967 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
3983 modrm
= cpu_ldub_code(env
, s
->pc
++);
3985 reg
= ((modrm
>> 3) & 7) | rex_r
;
3986 mod
= (modrm
>> 6) & 3;
3991 sse_fn_eppi
= sse_op_table7
[b
].op
[b1
];
3995 if (!(s
->cpuid_ext_features
& sse_op_table7
[b
].ext_mask
))
3998 if (sse_fn_eppi
== SSE_SPECIAL
) {
3999 ot
= mo_64_32(s
->dflag
);
4000 rm
= (modrm
& 7) | REX_B(s
);
4002 gen_lea_modrm(env
, s
, modrm
);
4003 reg
= ((modrm
>> 3) & 7) | rex_r
;
4004 val
= cpu_ldub_code(env
, s
->pc
++);
4006 case 0x14: /* pextrb */
4007 tcg_gen_ld8u_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,
4008 xmm_regs
[reg
].ZMM_B(val
& 15)));
4010 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
4012 tcg_gen_qemu_st_tl(cpu_T0
, cpu_A0
,
4013 s
->mem_index
, MO_UB
);
4016 case 0x15: /* pextrw */
4017 tcg_gen_ld16u_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,
4018 xmm_regs
[reg
].ZMM_W(val
& 7)));
4020 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
4022 tcg_gen_qemu_st_tl(cpu_T0
, cpu_A0
,
4023 s
->mem_index
, MO_LEUW
);
4027 if (ot
== MO_32
) { /* pextrd */
4028 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
4029 offsetof(CPUX86State
,
4030 xmm_regs
[reg
].ZMM_L(val
& 3)));
4032 tcg_gen_extu_i32_tl(cpu_regs
[rm
], cpu_tmp2_i32
);
4034 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
4035 s
->mem_index
, MO_LEUL
);
4037 } else { /* pextrq */
4038 #ifdef TARGET_X86_64
4039 tcg_gen_ld_i64(cpu_tmp1_i64
, cpu_env
,
4040 offsetof(CPUX86State
,
4041 xmm_regs
[reg
].ZMM_Q(val
& 1)));
4043 tcg_gen_mov_i64(cpu_regs
[rm
], cpu_tmp1_i64
);
4045 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_A0
,
4046 s
->mem_index
, MO_LEQ
);
4053 case 0x17: /* extractps */
4054 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,
4055 xmm_regs
[reg
].ZMM_L(val
& 3)));
4057 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
4059 tcg_gen_qemu_st_tl(cpu_T0
, cpu_A0
,
4060 s
->mem_index
, MO_LEUL
);
4063 case 0x20: /* pinsrb */
4065 gen_op_mov_v_reg(MO_32
, cpu_T0
, rm
);
4067 tcg_gen_qemu_ld_tl(cpu_T0
, cpu_A0
,
4068 s
->mem_index
, MO_UB
);
4070 tcg_gen_st8_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
,
4071 xmm_regs
[reg
].ZMM_B(val
& 15)));
4073 case 0x21: /* insertps */
4075 tcg_gen_ld_i32(cpu_tmp2_i32
, cpu_env
,
4076 offsetof(CPUX86State
,xmm_regs
[rm
]
4077 .ZMM_L((val
>> 6) & 3)));
4079 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
4080 s
->mem_index
, MO_LEUL
);
4082 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
4083 offsetof(CPUX86State
,xmm_regs
[reg
]
4084 .ZMM_L((val
>> 4) & 3)));
4086 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4087 cpu_env
, offsetof(CPUX86State
,
4088 xmm_regs
[reg
].ZMM_L(0)));
4090 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4091 cpu_env
, offsetof(CPUX86State
,
4092 xmm_regs
[reg
].ZMM_L(1)));
4094 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4095 cpu_env
, offsetof(CPUX86State
,
4096 xmm_regs
[reg
].ZMM_L(2)));
4098 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4099 cpu_env
, offsetof(CPUX86State
,
4100 xmm_regs
[reg
].ZMM_L(3)));
4103 if (ot
== MO_32
) { /* pinsrd */
4105 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[rm
]);
4107 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
4108 s
->mem_index
, MO_LEUL
);
4110 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
,
4111 offsetof(CPUX86State
,
4112 xmm_regs
[reg
].ZMM_L(val
& 3)));
4113 } else { /* pinsrq */
4114 #ifdef TARGET_X86_64
4116 gen_op_mov_v_reg(ot
, cpu_tmp1_i64
, rm
);
4118 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_A0
,
4119 s
->mem_index
, MO_LEQ
);
4121 tcg_gen_st_i64(cpu_tmp1_i64
, cpu_env
,
4122 offsetof(CPUX86State
,
4123 xmm_regs
[reg
].ZMM_Q(val
& 1)));
4134 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4136 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
4138 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4139 gen_lea_modrm(env
, s
, modrm
);
4140 gen_ldo_env_A0(s
, op2_offset
);
4143 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4145 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4147 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4148 gen_lea_modrm(env
, s
, modrm
);
4149 gen_ldq_env_A0(s
, op2_offset
);
4152 val
= cpu_ldub_code(env
, s
->pc
++);
4154 if ((b
& 0xfc) == 0x60) { /* pcmpXstrX */
4155 set_cc_op(s
, CC_OP_EFLAGS
);
4157 if (s
->dflag
== MO_64
) {
4158 /* The helper must use entire 64-bit gp registers */
4163 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4164 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4165 sse_fn_eppi(cpu_env
, cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4169 /* Various integer extensions at 0f 3a f[0-f]. */
4170 b
= modrm
| (b1
<< 8);
4171 modrm
= cpu_ldub_code(env
, s
->pc
++);
4172 reg
= ((modrm
>> 3) & 7) | rex_r
;
4175 case 0x3f0: /* rorx Gy,Ey, Ib */
4176 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4177 || !(s
->prefix
& PREFIX_VEX
)
4181 ot
= mo_64_32(s
->dflag
);
4182 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4183 b
= cpu_ldub_code(env
, s
->pc
++);
4185 tcg_gen_rotri_tl(cpu_T0
, cpu_T0
, b
& 63);
4187 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
4188 tcg_gen_rotri_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, b
& 31);
4189 tcg_gen_extu_i32_tl(cpu_T0
, cpu_tmp2_i32
);
4191 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
4201 gen_unknown_opcode(env
, s
);
4205 /* generic MMX or SSE operation */
4207 case 0x70: /* pshufx insn */
4208 case 0xc6: /* pshufx insn */
4209 case 0xc2: /* compare insns */
4216 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4220 gen_lea_modrm(env
, s
, modrm
);
4221 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4227 /* Most sse scalar operations. */
4230 } else if (b1
== 3) {
4235 case 0x2e: /* ucomis[sd] */
4236 case 0x2f: /* comis[sd] */
4248 gen_op_ld_v(s
, MO_32
, cpu_T0
, cpu_A0
);
4249 tcg_gen_st32_tl(cpu_T0
, cpu_env
,
4250 offsetof(CPUX86State
,xmm_t0
.ZMM_L(0)));
4254 gen_ldq_env_A0(s
, offsetof(CPUX86State
, xmm_t0
.ZMM_D(0)));
4257 /* 128 bit access */
4258 gen_ldo_env_A0(s
, op2_offset
);
4262 rm
= (modrm
& 7) | REX_B(s
);
4263 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
4266 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4268 gen_lea_modrm(env
, s
, modrm
);
4269 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4270 gen_ldq_env_A0(s
, op2_offset
);
4273 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4277 case 0x0f: /* 3DNow! data insns */
4278 val
= cpu_ldub_code(env
, s
->pc
++);
4279 sse_fn_epp
= sse_op_table5
[val
];
4283 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
)) {
4286 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4287 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4288 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4290 case 0x70: /* pshufx insn */
4291 case 0xc6: /* pshufx insn */
4292 val
= cpu_ldub_code(env
, s
->pc
++);
4293 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4294 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4295 /* XXX: introduce a new table? */
4296 sse_fn_ppi
= (SSEFunc_0_ppi
)sse_fn_epp
;
4297 sse_fn_ppi(cpu_ptr0
, cpu_ptr1
, tcg_const_i32(val
));
4301 val
= cpu_ldub_code(env
, s
->pc
++);
4304 sse_fn_epp
= sse_op_table4
[val
][b1
];
4306 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4307 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4308 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4311 /* maskmov : we must prepare A0 */
4314 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[R_EDI
]);
4315 gen_extu(s
->aflag
, cpu_A0
);
4316 gen_add_A0_ds_seg(s
);
4318 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4319 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4320 /* XXX: introduce a new table? */
4321 sse_fn_eppt
= (SSEFunc_0_eppt
)sse_fn_epp
;
4322 sse_fn_eppt(cpu_env
, cpu_ptr0
, cpu_ptr1
, cpu_A0
);
4325 tcg_gen_addi_ptr(cpu_ptr0
, cpu_env
, op1_offset
);
4326 tcg_gen_addi_ptr(cpu_ptr1
, cpu_env
, op2_offset
);
4327 sse_fn_epp(cpu_env
, cpu_ptr0
, cpu_ptr1
);
4330 if (b
== 0x2e || b
== 0x2f) {
4331 set_cc_op(s
, CC_OP_EFLAGS
);
4336 /* convert one instruction. s->is_jmp is set if the translation must
4337 be stopped. Return the next pc value */
4338 static target_ulong
disas_insn(CPUX86State
*env
, DisasContext
*s
,
4339 target_ulong pc_start
)
4343 TCGMemOp ot
, aflag
, dflag
;
4344 int modrm
, reg
, rm
, mod
, op
, opreg
, val
;
4345 target_ulong next_eip
, tval
;
4348 s
->pc_start
= s
->pc
= pc_start
;
4353 #ifdef TARGET_X86_64
4358 s
->rip_offset
= 0; /* for relative ip address */
4362 b
= cpu_ldub_code(env
, s
->pc
);
4364 /* Collect prefixes. */
4367 prefixes
|= PREFIX_REPZ
;
4370 prefixes
|= PREFIX_REPNZ
;
4373 prefixes
|= PREFIX_LOCK
;
4394 prefixes
|= PREFIX_DATA
;
4397 prefixes
|= PREFIX_ADR
;
4399 #ifdef TARGET_X86_64
4403 rex_w
= (b
>> 3) & 1;
4404 rex_r
= (b
& 0x4) << 1;
4405 s
->rex_x
= (b
& 0x2) << 2;
4406 REX_B(s
) = (b
& 0x1) << 3;
4407 x86_64_hregs
= 1; /* select uniform byte register addressing */
4412 case 0xc5: /* 2-byte VEX */
4413 case 0xc4: /* 3-byte VEX */
4414 /* VEX prefixes cannot be used except in 32-bit mode.
4415 Otherwise the instruction is LES or LDS. */
4416 if (s
->code32
&& !s
->vm86
) {
4417 static const int pp_prefix
[4] = {
4418 0, PREFIX_DATA
, PREFIX_REPZ
, PREFIX_REPNZ
4420 int vex3
, vex2
= cpu_ldub_code(env
, s
->pc
);
4422 if (!CODE64(s
) && (vex2
& 0xc0) != 0xc0) {
4423 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4424 otherwise the instruction is LES or LDS. */
4429 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
4430 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
4431 | PREFIX_LOCK
| PREFIX_DATA
)) {
4434 #ifdef TARGET_X86_64
4439 rex_r
= (~vex2
>> 4) & 8;
4442 b
= cpu_ldub_code(env
, s
->pc
++);
4444 #ifdef TARGET_X86_64
4445 s
->rex_x
= (~vex2
>> 3) & 8;
4446 s
->rex_b
= (~vex2
>> 2) & 8;
4448 vex3
= cpu_ldub_code(env
, s
->pc
++);
4449 rex_w
= (vex3
>> 7) & 1;
4450 switch (vex2
& 0x1f) {
4451 case 0x01: /* Implied 0f leading opcode bytes. */
4452 b
= cpu_ldub_code(env
, s
->pc
++) | 0x100;
4454 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4457 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4460 default: /* Reserved for future use. */
4464 s
->vex_v
= (~vex3
>> 3) & 0xf;
4465 s
->vex_l
= (vex3
>> 2) & 1;
4466 prefixes
|= pp_prefix
[vex3
& 3] | PREFIX_VEX
;
4471 /* Post-process prefixes. */
4473 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
4474 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
4475 over 0x66 if both are present. */
4476 dflag
= (rex_w
> 0 ? MO_64
: prefixes
& PREFIX_DATA
? MO_16
: MO_32
);
4477 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
4478 aflag
= (prefixes
& PREFIX_ADR
? MO_32
: MO_64
);
4480 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
4481 if (s
->code32
^ ((prefixes
& PREFIX_DATA
) != 0)) {
4486 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
4487 if (s
->code32
^ ((prefixes
& PREFIX_ADR
) != 0)) {
4494 s
->prefix
= prefixes
;
4498 /* lock generation */
4499 if (prefixes
& PREFIX_LOCK
)
4502 /* now check op code */
4506 /**************************/
4507 /* extended op code */
4508 b
= cpu_ldub_code(env
, s
->pc
++) | 0x100;
4511 /**************************/
4526 ot
= mo_b_d(b
, dflag
);
4529 case 0: /* OP Ev, Gv */
4530 modrm
= cpu_ldub_code(env
, s
->pc
++);
4531 reg
= ((modrm
>> 3) & 7) | rex_r
;
4532 mod
= (modrm
>> 6) & 3;
4533 rm
= (modrm
& 7) | REX_B(s
);
4535 gen_lea_modrm(env
, s
, modrm
);
4537 } else if (op
== OP_XORL
&& rm
== reg
) {
4539 /* xor reg, reg optimisation */
4540 set_cc_op(s
, CC_OP_CLR
);
4541 tcg_gen_movi_tl(cpu_T0
, 0);
4542 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
4547 gen_op_mov_v_reg(ot
, cpu_T1
, reg
);
4548 gen_op(s
, op
, ot
, opreg
);
4550 case 1: /* OP Gv, Ev */
4551 modrm
= cpu_ldub_code(env
, s
->pc
++);
4552 mod
= (modrm
>> 6) & 3;
4553 reg
= ((modrm
>> 3) & 7) | rex_r
;
4554 rm
= (modrm
& 7) | REX_B(s
);
4556 gen_lea_modrm(env
, s
, modrm
);
4557 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
4558 } else if (op
== OP_XORL
&& rm
== reg
) {
4561 gen_op_mov_v_reg(ot
, cpu_T1
, rm
);
4563 gen_op(s
, op
, ot
, reg
);
4565 case 2: /* OP A, Iv */
4566 val
= insn_get(env
, s
, ot
);
4567 tcg_gen_movi_tl(cpu_T1
, val
);
4568 gen_op(s
, op
, ot
, OR_EAX
);
4577 case 0x80: /* GRP1 */
4583 ot
= mo_b_d(b
, dflag
);
4585 modrm
= cpu_ldub_code(env
, s
->pc
++);
4586 mod
= (modrm
>> 6) & 3;
4587 rm
= (modrm
& 7) | REX_B(s
);
4588 op
= (modrm
>> 3) & 7;
4594 s
->rip_offset
= insn_const_size(ot
);
4595 gen_lea_modrm(env
, s
, modrm
);
4606 val
= insn_get(env
, s
, ot
);
4609 val
= (int8_t)insn_get(env
, s
, MO_8
);
4612 tcg_gen_movi_tl(cpu_T1
, val
);
4613 gen_op(s
, op
, ot
, opreg
);
4617 /**************************/
4618 /* inc, dec, and other misc arith */
4619 case 0x40 ... 0x47: /* inc Gv */
4621 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), 1);
4623 case 0x48 ... 0x4f: /* dec Gv */
4625 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), -1);
4627 case 0xf6: /* GRP3 */
4629 ot
= mo_b_d(b
, dflag
);
4631 modrm
= cpu_ldub_code(env
, s
->pc
++);
4632 mod
= (modrm
>> 6) & 3;
4633 rm
= (modrm
& 7) | REX_B(s
);
4634 op
= (modrm
>> 3) & 7;
4637 s
->rip_offset
= insn_const_size(ot
);
4638 gen_lea_modrm(env
, s
, modrm
);
4639 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
4641 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
4646 val
= insn_get(env
, s
, ot
);
4647 tcg_gen_movi_tl(cpu_T1
, val
);
4648 gen_op_testl_T0_T1_cc();
4649 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4652 tcg_gen_not_tl(cpu_T0
, cpu_T0
);
4654 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
4656 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
4660 tcg_gen_neg_tl(cpu_T0
, cpu_T0
);
4662 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
4664 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
4666 gen_op_update_neg_cc();
4667 set_cc_op(s
, CC_OP_SUBB
+ ot
);
4672 gen_op_mov_v_reg(MO_8
, cpu_T1
, R_EAX
);
4673 tcg_gen_ext8u_tl(cpu_T0
, cpu_T0
);
4674 tcg_gen_ext8u_tl(cpu_T1
, cpu_T1
);
4675 /* XXX: use 32 bit mul which could be faster */
4676 tcg_gen_mul_tl(cpu_T0
, cpu_T0
, cpu_T1
);
4677 gen_op_mov_reg_v(MO_16
, R_EAX
, cpu_T0
);
4678 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
4679 tcg_gen_andi_tl(cpu_cc_src
, cpu_T0
, 0xff00);
4680 set_cc_op(s
, CC_OP_MULB
);
4683 gen_op_mov_v_reg(MO_16
, cpu_T1
, R_EAX
);
4684 tcg_gen_ext16u_tl(cpu_T0
, cpu_T0
);
4685 tcg_gen_ext16u_tl(cpu_T1
, cpu_T1
);
4686 /* XXX: use 32 bit mul which could be faster */
4687 tcg_gen_mul_tl(cpu_T0
, cpu_T0
, cpu_T1
);
4688 gen_op_mov_reg_v(MO_16
, R_EAX
, cpu_T0
);
4689 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
4690 tcg_gen_shri_tl(cpu_T0
, cpu_T0
, 16);
4691 gen_op_mov_reg_v(MO_16
, R_EDX
, cpu_T0
);
4692 tcg_gen_mov_tl(cpu_cc_src
, cpu_T0
);
4693 set_cc_op(s
, CC_OP_MULW
);
4697 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
4698 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_regs
[R_EAX
]);
4699 tcg_gen_mulu2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
4700 cpu_tmp2_i32
, cpu_tmp3_i32
);
4701 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], cpu_tmp2_i32
);
4702 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], cpu_tmp3_i32
);
4703 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
4704 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
4705 set_cc_op(s
, CC_OP_MULL
);
4707 #ifdef TARGET_X86_64
4709 tcg_gen_mulu2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
4710 cpu_T0
, cpu_regs
[R_EAX
]);
4711 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
4712 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
4713 set_cc_op(s
, CC_OP_MULQ
);
4721 gen_op_mov_v_reg(MO_8
, cpu_T1
, R_EAX
);
4722 tcg_gen_ext8s_tl(cpu_T0
, cpu_T0
);
4723 tcg_gen_ext8s_tl(cpu_T1
, cpu_T1
);
4724 /* XXX: use 32 bit mul which could be faster */
4725 tcg_gen_mul_tl(cpu_T0
, cpu_T0
, cpu_T1
);
4726 gen_op_mov_reg_v(MO_16
, R_EAX
, cpu_T0
);
4727 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
4728 tcg_gen_ext8s_tl(cpu_tmp0
, cpu_T0
);
4729 tcg_gen_sub_tl(cpu_cc_src
, cpu_T0
, cpu_tmp0
);
4730 set_cc_op(s
, CC_OP_MULB
);
4733 gen_op_mov_v_reg(MO_16
, cpu_T1
, R_EAX
);
4734 tcg_gen_ext16s_tl(cpu_T0
, cpu_T0
);
4735 tcg_gen_ext16s_tl(cpu_T1
, cpu_T1
);
4736 /* XXX: use 32 bit mul which could be faster */
4737 tcg_gen_mul_tl(cpu_T0
, cpu_T0
, cpu_T1
);
4738 gen_op_mov_reg_v(MO_16
, R_EAX
, cpu_T0
);
4739 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
4740 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T0
);
4741 tcg_gen_sub_tl(cpu_cc_src
, cpu_T0
, cpu_tmp0
);
4742 tcg_gen_shri_tl(cpu_T0
, cpu_T0
, 16);
4743 gen_op_mov_reg_v(MO_16
, R_EDX
, cpu_T0
);
4744 set_cc_op(s
, CC_OP_MULW
);
4748 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
4749 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_regs
[R_EAX
]);
4750 tcg_gen_muls2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
4751 cpu_tmp2_i32
, cpu_tmp3_i32
);
4752 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], cpu_tmp2_i32
);
4753 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], cpu_tmp3_i32
);
4754 tcg_gen_sari_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 31);
4755 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
4756 tcg_gen_sub_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
4757 tcg_gen_extu_i32_tl(cpu_cc_src
, cpu_tmp2_i32
);
4758 set_cc_op(s
, CC_OP_MULL
);
4760 #ifdef TARGET_X86_64
4762 tcg_gen_muls2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
4763 cpu_T0
, cpu_regs
[R_EAX
]);
4764 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
4765 tcg_gen_sari_tl(cpu_cc_src
, cpu_regs
[R_EAX
], 63);
4766 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, cpu_regs
[R_EDX
]);
4767 set_cc_op(s
, CC_OP_MULQ
);
4775 gen_helper_divb_AL(cpu_env
, cpu_T0
);
4778 gen_helper_divw_AX(cpu_env
, cpu_T0
);
4782 gen_helper_divl_EAX(cpu_env
, cpu_T0
);
4784 #ifdef TARGET_X86_64
4786 gen_helper_divq_EAX(cpu_env
, cpu_T0
);
4794 gen_helper_idivb_AL(cpu_env
, cpu_T0
);
4797 gen_helper_idivw_AX(cpu_env
, cpu_T0
);
4801 gen_helper_idivl_EAX(cpu_env
, cpu_T0
);
4803 #ifdef TARGET_X86_64
4805 gen_helper_idivq_EAX(cpu_env
, cpu_T0
);
4815 case 0xfe: /* GRP4 */
4816 case 0xff: /* GRP5 */
4817 ot
= mo_b_d(b
, dflag
);
4819 modrm
= cpu_ldub_code(env
, s
->pc
++);
4820 mod
= (modrm
>> 6) & 3;
4821 rm
= (modrm
& 7) | REX_B(s
);
4822 op
= (modrm
>> 3) & 7;
4823 if (op
>= 2 && b
== 0xfe) {
4827 if (op
== 2 || op
== 4) {
4828 /* operand size for jumps is 64 bit */
4830 } else if (op
== 3 || op
== 5) {
4831 ot
= dflag
!= MO_16
? MO_32
+ (rex_w
== 1) : MO_16
;
4832 } else if (op
== 6) {
4833 /* default push size is 64 bit */
4834 ot
= mo_pushpop(s
, dflag
);
4838 gen_lea_modrm(env
, s
, modrm
);
4839 if (op
>= 2 && op
!= 3 && op
!= 5)
4840 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
4842 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
4846 case 0: /* inc Ev */
4851 gen_inc(s
, ot
, opreg
, 1);
4853 case 1: /* dec Ev */
4858 gen_inc(s
, ot
, opreg
, -1);
4860 case 2: /* call Ev */
4861 /* XXX: optimize if memory (no 'and' is necessary) */
4862 if (dflag
== MO_16
) {
4863 tcg_gen_ext16u_tl(cpu_T0
, cpu_T0
);
4865 next_eip
= s
->pc
- s
->cs_base
;
4866 tcg_gen_movi_tl(cpu_T1
, next_eip
);
4867 gen_push_v(s
, cpu_T1
);
4868 gen_op_jmp_v(cpu_T0
);
4872 case 3: /* lcall Ev */
4873 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
4874 gen_add_A0_im(s
, 1 << ot
);
4875 gen_op_ld_v(s
, MO_16
, cpu_T0
, cpu_A0
);
4877 if (s
->pe
&& !s
->vm86
) {
4878 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
4879 gen_helper_lcall_protected(cpu_env
, cpu_tmp2_i32
, cpu_T1
,
4880 tcg_const_i32(dflag
- 1),
4881 tcg_const_tl(s
->pc
- s
->cs_base
));
4883 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
4884 gen_helper_lcall_real(cpu_env
, cpu_tmp2_i32
, cpu_T1
,
4885 tcg_const_i32(dflag
- 1),
4886 tcg_const_i32(s
->pc
- s
->cs_base
));
4890 case 4: /* jmp Ev */
4891 if (dflag
== MO_16
) {
4892 tcg_gen_ext16u_tl(cpu_T0
, cpu_T0
);
4894 gen_op_jmp_v(cpu_T0
);
4898 case 5: /* ljmp Ev */
4899 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
4900 gen_add_A0_im(s
, 1 << ot
);
4901 gen_op_ld_v(s
, MO_16
, cpu_T0
, cpu_A0
);
4903 if (s
->pe
&& !s
->vm86
) {
4904 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
4905 gen_helper_ljmp_protected(cpu_env
, cpu_tmp2_i32
, cpu_T1
,
4906 tcg_const_tl(s
->pc
- s
->cs_base
));
4908 gen_op_movl_seg_T0_vm(R_CS
);
4909 gen_op_jmp_v(cpu_T1
);
4913 case 6: /* push Ev */
4914 gen_push_v(s
, cpu_T0
);
4921 case 0x84: /* test Ev, Gv */
4923 ot
= mo_b_d(b
, dflag
);
4925 modrm
= cpu_ldub_code(env
, s
->pc
++);
4926 reg
= ((modrm
>> 3) & 7) | rex_r
;
4928 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4929 gen_op_mov_v_reg(ot
, cpu_T1
, reg
);
4930 gen_op_testl_T0_T1_cc();
4931 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4934 case 0xa8: /* test eAX, Iv */
4936 ot
= mo_b_d(b
, dflag
);
4937 val
= insn_get(env
, s
, ot
);
4939 gen_op_mov_v_reg(ot
, cpu_T0
, OR_EAX
);
4940 tcg_gen_movi_tl(cpu_T1
, val
);
4941 gen_op_testl_T0_T1_cc();
4942 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4945 case 0x98: /* CWDE/CBW */
4947 #ifdef TARGET_X86_64
4949 gen_op_mov_v_reg(MO_32
, cpu_T0
, R_EAX
);
4950 tcg_gen_ext32s_tl(cpu_T0
, cpu_T0
);
4951 gen_op_mov_reg_v(MO_64
, R_EAX
, cpu_T0
);
4955 gen_op_mov_v_reg(MO_16
, cpu_T0
, R_EAX
);
4956 tcg_gen_ext16s_tl(cpu_T0
, cpu_T0
);
4957 gen_op_mov_reg_v(MO_32
, R_EAX
, cpu_T0
);
4960 gen_op_mov_v_reg(MO_8
, cpu_T0
, R_EAX
);
4961 tcg_gen_ext8s_tl(cpu_T0
, cpu_T0
);
4962 gen_op_mov_reg_v(MO_16
, R_EAX
, cpu_T0
);
4968 case 0x99: /* CDQ/CWD */
4970 #ifdef TARGET_X86_64
4972 gen_op_mov_v_reg(MO_64
, cpu_T0
, R_EAX
);
4973 tcg_gen_sari_tl(cpu_T0
, cpu_T0
, 63);
4974 gen_op_mov_reg_v(MO_64
, R_EDX
, cpu_T0
);
4978 gen_op_mov_v_reg(MO_32
, cpu_T0
, R_EAX
);
4979 tcg_gen_ext32s_tl(cpu_T0
, cpu_T0
);
4980 tcg_gen_sari_tl(cpu_T0
, cpu_T0
, 31);
4981 gen_op_mov_reg_v(MO_32
, R_EDX
, cpu_T0
);
4984 gen_op_mov_v_reg(MO_16
, cpu_T0
, R_EAX
);
4985 tcg_gen_ext16s_tl(cpu_T0
, cpu_T0
);
4986 tcg_gen_sari_tl(cpu_T0
, cpu_T0
, 15);
4987 gen_op_mov_reg_v(MO_16
, R_EDX
, cpu_T0
);
4993 case 0x1af: /* imul Gv, Ev */
4994 case 0x69: /* imul Gv, Ev, I */
4997 modrm
= cpu_ldub_code(env
, s
->pc
++);
4998 reg
= ((modrm
>> 3) & 7) | rex_r
;
5000 s
->rip_offset
= insn_const_size(ot
);
5003 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5005 val
= insn_get(env
, s
, ot
);
5006 tcg_gen_movi_tl(cpu_T1
, val
);
5007 } else if (b
== 0x6b) {
5008 val
= (int8_t)insn_get(env
, s
, MO_8
);
5009 tcg_gen_movi_tl(cpu_T1
, val
);
5011 gen_op_mov_v_reg(ot
, cpu_T1
, reg
);
5014 #ifdef TARGET_X86_64
5016 tcg_gen_muls2_i64(cpu_regs
[reg
], cpu_T1
, cpu_T0
, cpu_T1
);
5017 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
5018 tcg_gen_sari_tl(cpu_cc_src
, cpu_cc_dst
, 63);
5019 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, cpu_T1
);
5023 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
5024 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T1
);
5025 tcg_gen_muls2_i32(cpu_tmp2_i32
, cpu_tmp3_i32
,
5026 cpu_tmp2_i32
, cpu_tmp3_i32
);
5027 tcg_gen_extu_i32_tl(cpu_regs
[reg
], cpu_tmp2_i32
);
5028 tcg_gen_sari_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, 31);
5029 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
5030 tcg_gen_sub_i32(cpu_tmp2_i32
, cpu_tmp2_i32
, cpu_tmp3_i32
);
5031 tcg_gen_extu_i32_tl(cpu_cc_src
, cpu_tmp2_i32
);
5034 tcg_gen_ext16s_tl(cpu_T0
, cpu_T0
);
5035 tcg_gen_ext16s_tl(cpu_T1
, cpu_T1
);
5036 /* XXX: use 32 bit mul which could be faster */
5037 tcg_gen_mul_tl(cpu_T0
, cpu_T0
, cpu_T1
);
5038 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
5039 tcg_gen_ext16s_tl(cpu_tmp0
, cpu_T0
);
5040 tcg_gen_sub_tl(cpu_cc_src
, cpu_T0
, cpu_tmp0
);
5041 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
5044 set_cc_op(s
, CC_OP_MULB
+ ot
);
5047 case 0x1c1: /* xadd Ev, Gv */
5048 ot
= mo_b_d(b
, dflag
);
5049 modrm
= cpu_ldub_code(env
, s
->pc
++);
5050 reg
= ((modrm
>> 3) & 7) | rex_r
;
5051 mod
= (modrm
>> 6) & 3;
5053 rm
= (modrm
& 7) | REX_B(s
);
5054 gen_op_mov_v_reg(ot
, cpu_T0
, reg
);
5055 gen_op_mov_v_reg(ot
, cpu_T1
, rm
);
5056 tcg_gen_add_tl(cpu_T0
, cpu_T0
, cpu_T1
);
5057 gen_op_mov_reg_v(ot
, reg
, cpu_T1
);
5058 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
5060 gen_lea_modrm(env
, s
, modrm
);
5061 gen_op_mov_v_reg(ot
, cpu_T0
, reg
);
5062 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
5063 tcg_gen_add_tl(cpu_T0
, cpu_T0
, cpu_T1
);
5064 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
5065 gen_op_mov_reg_v(ot
, reg
, cpu_T1
);
5067 gen_op_update2_cc();
5068 set_cc_op(s
, CC_OP_ADDB
+ ot
);
5071 case 0x1b1: /* cmpxchg Ev, Gv */
5073 TCGLabel
*label1
, *label2
;
5074 TCGv t0
, t1
, t2
, a0
;
5076 ot
= mo_b_d(b
, dflag
);
5077 modrm
= cpu_ldub_code(env
, s
->pc
++);
5078 reg
= ((modrm
>> 3) & 7) | rex_r
;
5079 mod
= (modrm
>> 6) & 3;
5080 t0
= tcg_temp_local_new();
5081 t1
= tcg_temp_local_new();
5082 t2
= tcg_temp_local_new();
5083 a0
= tcg_temp_local_new();
5084 gen_op_mov_v_reg(ot
, t1
, reg
);
5086 rm
= (modrm
& 7) | REX_B(s
);
5087 gen_op_mov_v_reg(ot
, t0
, rm
);
5089 gen_lea_modrm(env
, s
, modrm
);
5090 tcg_gen_mov_tl(a0
, cpu_A0
);
5091 gen_op_ld_v(s
, ot
, t0
, a0
);
5092 rm
= 0; /* avoid warning */
5094 label1
= gen_new_label();
5095 tcg_gen_mov_tl(t2
, cpu_regs
[R_EAX
]);
5098 tcg_gen_brcond_tl(TCG_COND_EQ
, t2
, t0
, label1
);
5099 label2
= gen_new_label();
5101 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
5103 gen_set_label(label1
);
5104 gen_op_mov_reg_v(ot
, rm
, t1
);
5106 /* perform no-op store cycle like physical cpu; must be
5107 before changing accumulator to ensure idempotency if
5108 the store faults and the instruction is restarted */
5109 gen_op_st_v(s
, ot
, t0
, a0
);
5110 gen_op_mov_reg_v(ot
, R_EAX
, t0
);
5112 gen_set_label(label1
);
5113 gen_op_st_v(s
, ot
, t1
, a0
);
5115 gen_set_label(label2
);
5116 tcg_gen_mov_tl(cpu_cc_src
, t0
);
5117 tcg_gen_mov_tl(cpu_cc_srcT
, t2
);
5118 tcg_gen_sub_tl(cpu_cc_dst
, t2
, t0
);
5119 set_cc_op(s
, CC_OP_SUBB
+ ot
);
5126 case 0x1c7: /* cmpxchg8b */
5127 modrm
= cpu_ldub_code(env
, s
->pc
++);
5128 mod
= (modrm
>> 6) & 3;
5129 if ((mod
== 3) || ((modrm
& 0x38) != 0x8))
5131 #ifdef TARGET_X86_64
5132 if (dflag
== MO_64
) {
5133 if (!(s
->cpuid_ext_features
& CPUID_EXT_CX16
))
5135 gen_lea_modrm(env
, s
, modrm
);
5136 gen_helper_cmpxchg16b(cpu_env
, cpu_A0
);
5140 if (!(s
->cpuid_features
& CPUID_CX8
))
5142 gen_lea_modrm(env
, s
, modrm
);
5143 gen_helper_cmpxchg8b(cpu_env
, cpu_A0
);
5145 set_cc_op(s
, CC_OP_EFLAGS
);
5148 /**************************/
5150 case 0x50 ... 0x57: /* push */
5151 gen_op_mov_v_reg(MO_32
, cpu_T0
, (b
& 7) | REX_B(s
));
5152 gen_push_v(s
, cpu_T0
);
5154 case 0x58 ... 0x5f: /* pop */
5156 /* NOTE: order is important for pop %sp */
5157 gen_pop_update(s
, ot
);
5158 gen_op_mov_reg_v(ot
, (b
& 7) | REX_B(s
), cpu_T0
);
5160 case 0x60: /* pusha */
5165 case 0x61: /* popa */
5170 case 0x68: /* push Iv */
5172 ot
= mo_pushpop(s
, dflag
);
5174 val
= insn_get(env
, s
, ot
);
5176 val
= (int8_t)insn_get(env
, s
, MO_8
);
5177 tcg_gen_movi_tl(cpu_T0
, val
);
5178 gen_push_v(s
, cpu_T0
);
5180 case 0x8f: /* pop Ev */
5181 modrm
= cpu_ldub_code(env
, s
->pc
++);
5182 mod
= (modrm
>> 6) & 3;
5185 /* NOTE: order is important for pop %sp */
5186 gen_pop_update(s
, ot
);
5187 rm
= (modrm
& 7) | REX_B(s
);
5188 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
5190 /* NOTE: order is important too for MMU exceptions */
5191 s
->popl_esp_hack
= 1 << ot
;
5192 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5193 s
->popl_esp_hack
= 0;
5194 gen_pop_update(s
, ot
);
5197 case 0xc8: /* enter */
5200 val
= cpu_lduw_code(env
, s
->pc
);
5202 level
= cpu_ldub_code(env
, s
->pc
++);
5203 gen_enter(s
, val
, level
);
5206 case 0xc9: /* leave */
5209 case 0x06: /* push es */
5210 case 0x0e: /* push cs */
5211 case 0x16: /* push ss */
5212 case 0x1e: /* push ds */
5215 gen_op_movl_T0_seg(b
>> 3);
5216 gen_push_v(s
, cpu_T0
);
5218 case 0x1a0: /* push fs */
5219 case 0x1a8: /* push gs */
5220 gen_op_movl_T0_seg((b
>> 3) & 7);
5221 gen_push_v(s
, cpu_T0
);
5223 case 0x07: /* pop es */
5224 case 0x17: /* pop ss */
5225 case 0x1f: /* pop ds */
5230 gen_movl_seg_T0(s
, reg
);
5231 gen_pop_update(s
, ot
);
5232 /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
5234 gen_jmp_im(s
->pc
- s
->cs_base
);
5237 gen_eob_inhibit_irq(s
, true);
5243 case 0x1a1: /* pop fs */
5244 case 0x1a9: /* pop gs */
5246 gen_movl_seg_T0(s
, (b
>> 3) & 7);
5247 gen_pop_update(s
, ot
);
5249 gen_jmp_im(s
->pc
- s
->cs_base
);
5254 /**************************/
5257 case 0x89: /* mov Gv, Ev */
5258 ot
= mo_b_d(b
, dflag
);
5259 modrm
= cpu_ldub_code(env
, s
->pc
++);
5260 reg
= ((modrm
>> 3) & 7) | rex_r
;
5262 /* generate a generic store */
5263 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
5266 case 0xc7: /* mov Ev, Iv */
5267 ot
= mo_b_d(b
, dflag
);
5268 modrm
= cpu_ldub_code(env
, s
->pc
++);
5269 mod
= (modrm
>> 6) & 3;
5271 s
->rip_offset
= insn_const_size(ot
);
5272 gen_lea_modrm(env
, s
, modrm
);
5274 val
= insn_get(env
, s
, ot
);
5275 tcg_gen_movi_tl(cpu_T0
, val
);
5277 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
5279 gen_op_mov_reg_v(ot
, (modrm
& 7) | REX_B(s
), cpu_T0
);
5283 case 0x8b: /* mov Ev, Gv */
5284 ot
= mo_b_d(b
, dflag
);
5285 modrm
= cpu_ldub_code(env
, s
->pc
++);
5286 reg
= ((modrm
>> 3) & 7) | rex_r
;
5288 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5289 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
5291 case 0x8e: /* mov seg, Gv */
5292 modrm
= cpu_ldub_code(env
, s
->pc
++);
5293 reg
= (modrm
>> 3) & 7;
5294 if (reg
>= 6 || reg
== R_CS
)
5296 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
5297 gen_movl_seg_T0(s
, reg
);
5298 /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
5300 gen_jmp_im(s
->pc
- s
->cs_base
);
5303 gen_eob_inhibit_irq(s
, true);
5309 case 0x8c: /* mov Gv, seg */
5310 modrm
= cpu_ldub_code(env
, s
->pc
++);
5311 reg
= (modrm
>> 3) & 7;
5312 mod
= (modrm
>> 6) & 3;
5315 gen_op_movl_T0_seg(reg
);
5316 ot
= mod
== 3 ? dflag
: MO_16
;
5317 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5320 case 0x1b6: /* movzbS Gv, Eb */
5321 case 0x1b7: /* movzwS Gv, Eb */
5322 case 0x1be: /* movsbS Gv, Eb */
5323 case 0x1bf: /* movswS Gv, Eb */
5328 /* d_ot is the size of destination */
5330 /* ot is the size of source */
5331 ot
= (b
& 1) + MO_8
;
5332 /* s_ot is the sign+size of source */
5333 s_ot
= b
& 8 ? MO_SIGN
| ot
: ot
;
5335 modrm
= cpu_ldub_code(env
, s
->pc
++);
5336 reg
= ((modrm
>> 3) & 7) | rex_r
;
5337 mod
= (modrm
>> 6) & 3;
5338 rm
= (modrm
& 7) | REX_B(s
);
5341 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
5344 tcg_gen_ext8u_tl(cpu_T0
, cpu_T0
);
5347 tcg_gen_ext8s_tl(cpu_T0
, cpu_T0
);
5350 tcg_gen_ext16u_tl(cpu_T0
, cpu_T0
);
5354 tcg_gen_ext16s_tl(cpu_T0
, cpu_T0
);
5357 gen_op_mov_reg_v(d_ot
, reg
, cpu_T0
);
5359 gen_lea_modrm(env
, s
, modrm
);
5360 gen_op_ld_v(s
, s_ot
, cpu_T0
, cpu_A0
);
5361 gen_op_mov_reg_v(d_ot
, reg
, cpu_T0
);
5366 case 0x8d: /* lea */
5367 modrm
= cpu_ldub_code(env
, s
->pc
++);
5368 mod
= (modrm
>> 6) & 3;
5371 reg
= ((modrm
>> 3) & 7) | rex_r
;
5373 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
5374 TCGv ea
= gen_lea_modrm_1(a
);
5375 gen_op_mov_reg_v(dflag
, reg
, ea
);
5379 case 0xa0: /* mov EAX, Ov */
5381 case 0xa2: /* mov Ov, EAX */
5384 target_ulong offset_addr
;
5386 ot
= mo_b_d(b
, dflag
);
5388 #ifdef TARGET_X86_64
5390 offset_addr
= cpu_ldq_code(env
, s
->pc
);
5395 offset_addr
= insn_get(env
, s
, s
->aflag
);
5398 tcg_gen_movi_tl(cpu_A0
, offset_addr
);
5399 gen_add_A0_ds_seg(s
);
5401 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
5402 gen_op_mov_reg_v(ot
, R_EAX
, cpu_T0
);
5404 gen_op_mov_v_reg(ot
, cpu_T0
, R_EAX
);
5405 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
5409 case 0xd7: /* xlat */
5410 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[R_EBX
]);
5411 tcg_gen_ext8u_tl(cpu_T0
, cpu_regs
[R_EAX
]);
5412 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_T0
);
5413 gen_extu(s
->aflag
, cpu_A0
);
5414 gen_add_A0_ds_seg(s
);
5415 gen_op_ld_v(s
, MO_8
, cpu_T0
, cpu_A0
);
5416 gen_op_mov_reg_v(MO_8
, R_EAX
, cpu_T0
);
5418 case 0xb0 ... 0xb7: /* mov R, Ib */
5419 val
= insn_get(env
, s
, MO_8
);
5420 tcg_gen_movi_tl(cpu_T0
, val
);
5421 gen_op_mov_reg_v(MO_8
, (b
& 7) | REX_B(s
), cpu_T0
);
5423 case 0xb8 ... 0xbf: /* mov R, Iv */
5424 #ifdef TARGET_X86_64
5425 if (dflag
== MO_64
) {
5428 tmp
= cpu_ldq_code(env
, s
->pc
);
5430 reg
= (b
& 7) | REX_B(s
);
5431 tcg_gen_movi_tl(cpu_T0
, tmp
);
5432 gen_op_mov_reg_v(MO_64
, reg
, cpu_T0
);
5437 val
= insn_get(env
, s
, ot
);
5438 reg
= (b
& 7) | REX_B(s
);
5439 tcg_gen_movi_tl(cpu_T0
, val
);
5440 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
5444 case 0x91 ... 0x97: /* xchg R, EAX */
5447 reg
= (b
& 7) | REX_B(s
);
5451 case 0x87: /* xchg Ev, Gv */
5452 ot
= mo_b_d(b
, dflag
);
5453 modrm
= cpu_ldub_code(env
, s
->pc
++);
5454 reg
= ((modrm
>> 3) & 7) | rex_r
;
5455 mod
= (modrm
>> 6) & 3;
5457 rm
= (modrm
& 7) | REX_B(s
);
5459 gen_op_mov_v_reg(ot
, cpu_T0
, reg
);
5460 gen_op_mov_v_reg(ot
, cpu_T1
, rm
);
5461 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
5462 gen_op_mov_reg_v(ot
, reg
, cpu_T1
);
5464 gen_lea_modrm(env
, s
, modrm
);
5465 gen_op_mov_v_reg(ot
, cpu_T0
, reg
);
5466 /* for xchg, lock is implicit */
5467 if (!(prefixes
& PREFIX_LOCK
))
5469 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
5470 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
5471 if (!(prefixes
& PREFIX_LOCK
))
5472 gen_helper_unlock();
5473 gen_op_mov_reg_v(ot
, reg
, cpu_T1
);
5476 case 0xc4: /* les Gv */
5477 /* In CODE64 this is VEX3; see above. */
5480 case 0xc5: /* lds Gv */
5481 /* In CODE64 this is VEX2; see above. */
5484 case 0x1b2: /* lss Gv */
5487 case 0x1b4: /* lfs Gv */
5490 case 0x1b5: /* lgs Gv */
5493 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
5494 modrm
= cpu_ldub_code(env
, s
->pc
++);
5495 reg
= ((modrm
>> 3) & 7) | rex_r
;
5496 mod
= (modrm
>> 6) & 3;
5499 gen_lea_modrm(env
, s
, modrm
);
5500 gen_op_ld_v(s
, ot
, cpu_T1
, cpu_A0
);
5501 gen_add_A0_im(s
, 1 << ot
);
5502 /* load the segment first to handle exceptions properly */
5503 gen_op_ld_v(s
, MO_16
, cpu_T0
, cpu_A0
);
5504 gen_movl_seg_T0(s
, op
);
5505 /* then put the data */
5506 gen_op_mov_reg_v(ot
, reg
, cpu_T1
);
5508 gen_jmp_im(s
->pc
- s
->cs_base
);
5513 /************************/
5521 ot
= mo_b_d(b
, dflag
);
5522 modrm
= cpu_ldub_code(env
, s
->pc
++);
5523 mod
= (modrm
>> 6) & 3;
5524 op
= (modrm
>> 3) & 7;
5530 gen_lea_modrm(env
, s
, modrm
);
5533 opreg
= (modrm
& 7) | REX_B(s
);
5538 gen_shift(s
, op
, ot
, opreg
, OR_ECX
);
5541 shift
= cpu_ldub_code(env
, s
->pc
++);
5543 gen_shifti(s
, op
, ot
, opreg
, shift
);
5558 case 0x1a4: /* shld imm */
5562 case 0x1a5: /* shld cl */
5566 case 0x1ac: /* shrd imm */
5570 case 0x1ad: /* shrd cl */
5575 modrm
= cpu_ldub_code(env
, s
->pc
++);
5576 mod
= (modrm
>> 6) & 3;
5577 rm
= (modrm
& 7) | REX_B(s
);
5578 reg
= ((modrm
>> 3) & 7) | rex_r
;
5580 gen_lea_modrm(env
, s
, modrm
);
5585 gen_op_mov_v_reg(ot
, cpu_T1
, reg
);
5588 TCGv imm
= tcg_const_tl(cpu_ldub_code(env
, s
->pc
++));
5589 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, imm
);
5592 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, cpu_regs
[R_ECX
]);
5596 /************************/
5599 if (s
->flags
& (HF_EM_MASK
| HF_TS_MASK
)) {
5600 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5601 /* XXX: what to do if illegal op ? */
5602 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
5605 modrm
= cpu_ldub_code(env
, s
->pc
++);
5606 mod
= (modrm
>> 6) & 3;
5608 op
= ((b
& 7) << 3) | ((modrm
>> 3) & 7);
5611 gen_lea_modrm(env
, s
, modrm
);
5613 case 0x00 ... 0x07: /* fxxxs */
5614 case 0x10 ... 0x17: /* fixxxl */
5615 case 0x20 ... 0x27: /* fxxxl */
5616 case 0x30 ... 0x37: /* fixxx */
5623 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5624 s
->mem_index
, MO_LEUL
);
5625 gen_helper_flds_FT0(cpu_env
, cpu_tmp2_i32
);
5628 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5629 s
->mem_index
, MO_LEUL
);
5630 gen_helper_fildl_FT0(cpu_env
, cpu_tmp2_i32
);
5633 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_A0
,
5634 s
->mem_index
, MO_LEQ
);
5635 gen_helper_fldl_FT0(cpu_env
, cpu_tmp1_i64
);
5639 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5640 s
->mem_index
, MO_LESW
);
5641 gen_helper_fildl_FT0(cpu_env
, cpu_tmp2_i32
);
5645 gen_helper_fp_arith_ST0_FT0(op1
);
5647 /* fcomp needs pop */
5648 gen_helper_fpop(cpu_env
);
5652 case 0x08: /* flds */
5653 case 0x0a: /* fsts */
5654 case 0x0b: /* fstps */
5655 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5656 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5657 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5662 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5663 s
->mem_index
, MO_LEUL
);
5664 gen_helper_flds_ST0(cpu_env
, cpu_tmp2_i32
);
5667 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5668 s
->mem_index
, MO_LEUL
);
5669 gen_helper_fildl_ST0(cpu_env
, cpu_tmp2_i32
);
5672 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_A0
,
5673 s
->mem_index
, MO_LEQ
);
5674 gen_helper_fldl_ST0(cpu_env
, cpu_tmp1_i64
);
5678 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5679 s
->mem_index
, MO_LESW
);
5680 gen_helper_fildl_ST0(cpu_env
, cpu_tmp2_i32
);
5685 /* XXX: the corresponding CPUID bit must be tested ! */
5688 gen_helper_fisttl_ST0(cpu_tmp2_i32
, cpu_env
);
5689 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5690 s
->mem_index
, MO_LEUL
);
5693 gen_helper_fisttll_ST0(cpu_tmp1_i64
, cpu_env
);
5694 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_A0
,
5695 s
->mem_index
, MO_LEQ
);
5699 gen_helper_fistt_ST0(cpu_tmp2_i32
, cpu_env
);
5700 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5701 s
->mem_index
, MO_LEUW
);
5704 gen_helper_fpop(cpu_env
);
5709 gen_helper_fsts_ST0(cpu_tmp2_i32
, cpu_env
);
5710 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5711 s
->mem_index
, MO_LEUL
);
5714 gen_helper_fistl_ST0(cpu_tmp2_i32
, cpu_env
);
5715 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5716 s
->mem_index
, MO_LEUL
);
5719 gen_helper_fstl_ST0(cpu_tmp1_i64
, cpu_env
);
5720 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_A0
,
5721 s
->mem_index
, MO_LEQ
);
5725 gen_helper_fist_ST0(cpu_tmp2_i32
, cpu_env
);
5726 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5727 s
->mem_index
, MO_LEUW
);
5731 gen_helper_fpop(cpu_env
);
5735 case 0x0c: /* fldenv mem */
5736 gen_helper_fldenv(cpu_env
, cpu_A0
, tcg_const_i32(dflag
- 1));
5738 case 0x0d: /* fldcw mem */
5739 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
,
5740 s
->mem_index
, MO_LEUW
);
5741 gen_helper_fldcw(cpu_env
, cpu_tmp2_i32
);
5743 case 0x0e: /* fnstenv mem */
5744 gen_helper_fstenv(cpu_env
, cpu_A0
, tcg_const_i32(dflag
- 1));
5746 case 0x0f: /* fnstcw mem */
5747 gen_helper_fnstcw(cpu_tmp2_i32
, cpu_env
);
5748 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5749 s
->mem_index
, MO_LEUW
);
5751 case 0x1d: /* fldt mem */
5752 gen_helper_fldt_ST0(cpu_env
, cpu_A0
);
5754 case 0x1f: /* fstpt mem */
5755 gen_helper_fstt_ST0(cpu_env
, cpu_A0
);
5756 gen_helper_fpop(cpu_env
);
5758 case 0x2c: /* frstor mem */
5759 gen_helper_frstor(cpu_env
, cpu_A0
, tcg_const_i32(dflag
- 1));
5761 case 0x2e: /* fnsave mem */
5762 gen_helper_fsave(cpu_env
, cpu_A0
, tcg_const_i32(dflag
- 1));
5764 case 0x2f: /* fnstsw mem */
5765 gen_helper_fnstsw(cpu_tmp2_i32
, cpu_env
);
5766 tcg_gen_qemu_st_i32(cpu_tmp2_i32
, cpu_A0
,
5767 s
->mem_index
, MO_LEUW
);
5769 case 0x3c: /* fbld */
5770 gen_helper_fbld_ST0(cpu_env
, cpu_A0
);
5772 case 0x3e: /* fbstp */
5773 gen_helper_fbst_ST0(cpu_env
, cpu_A0
);
5774 gen_helper_fpop(cpu_env
);
5776 case 0x3d: /* fildll */
5777 tcg_gen_qemu_ld_i64(cpu_tmp1_i64
, cpu_A0
, s
->mem_index
, MO_LEQ
);
5778 gen_helper_fildll_ST0(cpu_env
, cpu_tmp1_i64
);
5780 case 0x3f: /* fistpll */
5781 gen_helper_fistll_ST0(cpu_tmp1_i64
, cpu_env
);
5782 tcg_gen_qemu_st_i64(cpu_tmp1_i64
, cpu_A0
, s
->mem_index
, MO_LEQ
);
5783 gen_helper_fpop(cpu_env
);
5789 /* register float ops */
5793 case 0x08: /* fld sti */
5794 gen_helper_fpush(cpu_env
);
5795 gen_helper_fmov_ST0_STN(cpu_env
,
5796 tcg_const_i32((opreg
+ 1) & 7));
5798 case 0x09: /* fxchg sti */
5799 case 0x29: /* fxchg4 sti, undocumented op */
5800 case 0x39: /* fxchg7 sti, undocumented op */
5801 gen_helper_fxchg_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
5803 case 0x0a: /* grp d9/2 */
5806 /* check exceptions (FreeBSD FPU probe) */
5807 gen_helper_fwait(cpu_env
);
5813 case 0x0c: /* grp d9/4 */
5816 gen_helper_fchs_ST0(cpu_env
);
5819 gen_helper_fabs_ST0(cpu_env
);
5822 gen_helper_fldz_FT0(cpu_env
);
5823 gen_helper_fcom_ST0_FT0(cpu_env
);
5826 gen_helper_fxam_ST0(cpu_env
);
5832 case 0x0d: /* grp d9/5 */
5836 gen_helper_fpush(cpu_env
);
5837 gen_helper_fld1_ST0(cpu_env
);
5840 gen_helper_fpush(cpu_env
);
5841 gen_helper_fldl2t_ST0(cpu_env
);
5844 gen_helper_fpush(cpu_env
);
5845 gen_helper_fldl2e_ST0(cpu_env
);
5848 gen_helper_fpush(cpu_env
);
5849 gen_helper_fldpi_ST0(cpu_env
);
5852 gen_helper_fpush(cpu_env
);
5853 gen_helper_fldlg2_ST0(cpu_env
);
5856 gen_helper_fpush(cpu_env
);
5857 gen_helper_fldln2_ST0(cpu_env
);
5860 gen_helper_fpush(cpu_env
);
5861 gen_helper_fldz_ST0(cpu_env
);
5868 case 0x0e: /* grp d9/6 */
5871 gen_helper_f2xm1(cpu_env
);
5874 gen_helper_fyl2x(cpu_env
);
5877 gen_helper_fptan(cpu_env
);
5879 case 3: /* fpatan */
5880 gen_helper_fpatan(cpu_env
);
5882 case 4: /* fxtract */
5883 gen_helper_fxtract(cpu_env
);
5885 case 5: /* fprem1 */
5886 gen_helper_fprem1(cpu_env
);
5888 case 6: /* fdecstp */
5889 gen_helper_fdecstp(cpu_env
);
5892 case 7: /* fincstp */
5893 gen_helper_fincstp(cpu_env
);
5897 case 0x0f: /* grp d9/7 */
5900 gen_helper_fprem(cpu_env
);
5902 case 1: /* fyl2xp1 */
5903 gen_helper_fyl2xp1(cpu_env
);
5906 gen_helper_fsqrt(cpu_env
);
5908 case 3: /* fsincos */
5909 gen_helper_fsincos(cpu_env
);
5911 case 5: /* fscale */
5912 gen_helper_fscale(cpu_env
);
5914 case 4: /* frndint */
5915 gen_helper_frndint(cpu_env
);
5918 gen_helper_fsin(cpu_env
);
5922 gen_helper_fcos(cpu_env
);
5926 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
5927 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
5928 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
5934 gen_helper_fp_arith_STN_ST0(op1
, opreg
);
5936 gen_helper_fpop(cpu_env
);
5938 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5939 gen_helper_fp_arith_ST0_FT0(op1
);
5943 case 0x02: /* fcom */
5944 case 0x22: /* fcom2, undocumented op */
5945 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5946 gen_helper_fcom_ST0_FT0(cpu_env
);
5948 case 0x03: /* fcomp */
5949 case 0x23: /* fcomp3, undocumented op */
5950 case 0x32: /* fcomp5, undocumented op */
5951 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5952 gen_helper_fcom_ST0_FT0(cpu_env
);
5953 gen_helper_fpop(cpu_env
);
5955 case 0x15: /* da/5 */
5957 case 1: /* fucompp */
5958 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
5959 gen_helper_fucom_ST0_FT0(cpu_env
);
5960 gen_helper_fpop(cpu_env
);
5961 gen_helper_fpop(cpu_env
);
5969 case 0: /* feni (287 only, just do nop here) */
5971 case 1: /* fdisi (287 only, just do nop here) */
5974 gen_helper_fclex(cpu_env
);
5976 case 3: /* fninit */
5977 gen_helper_fninit(cpu_env
);
5979 case 4: /* fsetpm (287 only, just do nop here) */
5985 case 0x1d: /* fucomi */
5986 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
5989 gen_update_cc_op(s
);
5990 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
5991 gen_helper_fucomi_ST0_FT0(cpu_env
);
5992 set_cc_op(s
, CC_OP_EFLAGS
);
5994 case 0x1e: /* fcomi */
5995 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
5998 gen_update_cc_op(s
);
5999 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6000 gen_helper_fcomi_ST0_FT0(cpu_env
);
6001 set_cc_op(s
, CC_OP_EFLAGS
);
6003 case 0x28: /* ffree sti */
6004 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6006 case 0x2a: /* fst sti */
6007 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6009 case 0x2b: /* fstp sti */
6010 case 0x0b: /* fstp1 sti, undocumented op */
6011 case 0x3a: /* fstp8 sti, undocumented op */
6012 case 0x3b: /* fstp9 sti, undocumented op */
6013 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6014 gen_helper_fpop(cpu_env
);
6016 case 0x2c: /* fucom st(i) */
6017 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6018 gen_helper_fucom_ST0_FT0(cpu_env
);
6020 case 0x2d: /* fucomp st(i) */
6021 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6022 gen_helper_fucom_ST0_FT0(cpu_env
);
6023 gen_helper_fpop(cpu_env
);
6025 case 0x33: /* de/3 */
6027 case 1: /* fcompp */
6028 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
6029 gen_helper_fcom_ST0_FT0(cpu_env
);
6030 gen_helper_fpop(cpu_env
);
6031 gen_helper_fpop(cpu_env
);
6037 case 0x38: /* ffreep sti, undocumented op */
6038 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6039 gen_helper_fpop(cpu_env
);
6041 case 0x3c: /* df/4 */
6044 gen_helper_fnstsw(cpu_tmp2_i32
, cpu_env
);
6045 tcg_gen_extu_i32_tl(cpu_T0
, cpu_tmp2_i32
);
6046 gen_op_mov_reg_v(MO_16
, R_EAX
, cpu_T0
);
6052 case 0x3d: /* fucomip */
6053 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6056 gen_update_cc_op(s
);
6057 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6058 gen_helper_fucomi_ST0_FT0(cpu_env
);
6059 gen_helper_fpop(cpu_env
);
6060 set_cc_op(s
, CC_OP_EFLAGS
);
6062 case 0x3e: /* fcomip */
6063 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6066 gen_update_cc_op(s
);
6067 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6068 gen_helper_fcomi_ST0_FT0(cpu_env
);
6069 gen_helper_fpop(cpu_env
);
6070 set_cc_op(s
, CC_OP_EFLAGS
);
6072 case 0x10 ... 0x13: /* fcmovxx */
6077 static const uint8_t fcmov_cc
[8] = {
6084 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6087 op1
= fcmov_cc
[op
& 3] | (((op
>> 3) & 1) ^ 1);
6088 l1
= gen_new_label();
6089 gen_jcc1_noeob(s
, op1
, l1
);
6090 gen_helper_fmov_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
6099 /************************/
6102 case 0xa4: /* movsS */
6104 ot
= mo_b_d(b
, dflag
);
6105 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6106 gen_repz_movs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6112 case 0xaa: /* stosS */
6114 ot
= mo_b_d(b
, dflag
);
6115 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6116 gen_repz_stos(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6121 case 0xac: /* lodsS */
6123 ot
= mo_b_d(b
, dflag
);
6124 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6125 gen_repz_lods(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6130 case 0xae: /* scasS */
6132 ot
= mo_b_d(b
, dflag
);
6133 if (prefixes
& PREFIX_REPNZ
) {
6134 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6135 } else if (prefixes
& PREFIX_REPZ
) {
6136 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6142 case 0xa6: /* cmpsS */
6144 ot
= mo_b_d(b
, dflag
);
6145 if (prefixes
& PREFIX_REPNZ
) {
6146 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6147 } else if (prefixes
& PREFIX_REPZ
) {
6148 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6153 case 0x6c: /* insS */
6155 ot
= mo_b_d32(b
, dflag
);
6156 tcg_gen_ext16u_tl(cpu_T0
, cpu_regs
[R_EDX
]);
6157 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6158 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
) | 4);
6159 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6160 gen_repz_ins(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6163 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6164 gen_jmp(s
, s
->pc
- s
->cs_base
);
6168 case 0x6e: /* outsS */
6170 ot
= mo_b_d32(b
, dflag
);
6171 tcg_gen_ext16u_tl(cpu_T0
, cpu_regs
[R_EDX
]);
6172 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6173 svm_is_rep(prefixes
) | 4);
6174 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6175 gen_repz_outs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6178 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6179 gen_jmp(s
, s
->pc
- s
->cs_base
);
6184 /************************/
6189 ot
= mo_b_d32(b
, dflag
);
6190 val
= cpu_ldub_code(env
, s
->pc
++);
6191 tcg_gen_movi_tl(cpu_T0
, val
);
6192 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6193 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6194 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6197 tcg_gen_movi_i32(cpu_tmp2_i32
, val
);
6198 gen_helper_in_func(ot
, cpu_T1
, cpu_tmp2_i32
);
6199 gen_op_mov_reg_v(ot
, R_EAX
, cpu_T1
);
6200 gen_bpt_io(s
, cpu_tmp2_i32
, ot
);
6201 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6203 gen_jmp(s
, s
->pc
- s
->cs_base
);
6208 ot
= mo_b_d32(b
, dflag
);
6209 val
= cpu_ldub_code(env
, s
->pc
++);
6210 tcg_gen_movi_tl(cpu_T0
, val
);
6211 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6212 svm_is_rep(prefixes
));
6213 gen_op_mov_v_reg(ot
, cpu_T1
, R_EAX
);
6215 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6218 tcg_gen_movi_i32(cpu_tmp2_i32
, val
);
6219 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T1
);
6220 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6221 gen_bpt_io(s
, cpu_tmp2_i32
, ot
);
6222 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6224 gen_jmp(s
, s
->pc
- s
->cs_base
);
6229 ot
= mo_b_d32(b
, dflag
);
6230 tcg_gen_ext16u_tl(cpu_T0
, cpu_regs
[R_EDX
]);
6231 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6232 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6233 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6236 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
6237 gen_helper_in_func(ot
, cpu_T1
, cpu_tmp2_i32
);
6238 gen_op_mov_reg_v(ot
, R_EAX
, cpu_T1
);
6239 gen_bpt_io(s
, cpu_tmp2_i32
, ot
);
6240 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6242 gen_jmp(s
, s
->pc
- s
->cs_base
);
6247 ot
= mo_b_d32(b
, dflag
);
6248 tcg_gen_ext16u_tl(cpu_T0
, cpu_regs
[R_EDX
]);
6249 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6250 svm_is_rep(prefixes
));
6251 gen_op_mov_v_reg(ot
, cpu_T1
, R_EAX
);
6253 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6256 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
6257 tcg_gen_trunc_tl_i32(cpu_tmp3_i32
, cpu_T1
);
6258 gen_helper_out_func(ot
, cpu_tmp2_i32
, cpu_tmp3_i32
);
6259 gen_bpt_io(s
, cpu_tmp2_i32
, ot
);
6260 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6262 gen_jmp(s
, s
->pc
- s
->cs_base
);
6266 /************************/
6268 case 0xc2: /* ret im */
6269 val
= cpu_ldsw_code(env
, s
->pc
);
6272 gen_stack_update(s
, val
+ (1 << ot
));
6273 /* Note that gen_pop_T0 uses a zero-extending load. */
6274 gen_op_jmp_v(cpu_T0
);
6278 case 0xc3: /* ret */
6280 gen_pop_update(s
, ot
);
6281 /* Note that gen_pop_T0 uses a zero-extending load. */
6282 gen_op_jmp_v(cpu_T0
);
6286 case 0xca: /* lret im */
6287 val
= cpu_ldsw_code(env
, s
->pc
);
6290 if (s
->pe
&& !s
->vm86
) {
6291 gen_update_cc_op(s
);
6292 gen_jmp_im(pc_start
- s
->cs_base
);
6293 gen_helper_lret_protected(cpu_env
, tcg_const_i32(dflag
- 1),
6294 tcg_const_i32(val
));
6298 gen_op_ld_v(s
, dflag
, cpu_T0
, cpu_A0
);
6299 /* NOTE: keeping EIP updated is not a problem in case of
6301 gen_op_jmp_v(cpu_T0
);
6303 gen_add_A0_im(s
, 1 << dflag
);
6304 gen_op_ld_v(s
, dflag
, cpu_T0
, cpu_A0
);
6305 gen_op_movl_seg_T0_vm(R_CS
);
6306 /* add stack offset */
6307 gen_stack_update(s
, val
+ (2 << dflag
));
6311 case 0xcb: /* lret */
6314 case 0xcf: /* iret */
6315 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IRET
);
6318 gen_helper_iret_real(cpu_env
, tcg_const_i32(dflag
- 1));
6319 set_cc_op(s
, CC_OP_EFLAGS
);
6320 } else if (s
->vm86
) {
6322 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6324 gen_helper_iret_real(cpu_env
, tcg_const_i32(dflag
- 1));
6325 set_cc_op(s
, CC_OP_EFLAGS
);
6328 gen_helper_iret_protected(cpu_env
, tcg_const_i32(dflag
- 1),
6329 tcg_const_i32(s
->pc
- s
->cs_base
));
6330 set_cc_op(s
, CC_OP_EFLAGS
);
6334 case 0xe8: /* call im */
6336 if (dflag
!= MO_16
) {
6337 tval
= (int32_t)insn_get(env
, s
, MO_32
);
6339 tval
= (int16_t)insn_get(env
, s
, MO_16
);
6341 next_eip
= s
->pc
- s
->cs_base
;
6343 if (dflag
== MO_16
) {
6345 } else if (!CODE64(s
)) {
6348 tcg_gen_movi_tl(cpu_T0
, next_eip
);
6349 gen_push_v(s
, cpu_T0
);
6354 case 0x9a: /* lcall im */
6356 unsigned int selector
, offset
;
6361 offset
= insn_get(env
, s
, ot
);
6362 selector
= insn_get(env
, s
, MO_16
);
6364 tcg_gen_movi_tl(cpu_T0
, selector
);
6365 tcg_gen_movi_tl(cpu_T1
, offset
);
6368 case 0xe9: /* jmp im */
6369 if (dflag
!= MO_16
) {
6370 tval
= (int32_t)insn_get(env
, s
, MO_32
);
6372 tval
= (int16_t)insn_get(env
, s
, MO_16
);
6374 tval
+= s
->pc
- s
->cs_base
;
6375 if (dflag
== MO_16
) {
6377 } else if (!CODE64(s
)) {
6383 case 0xea: /* ljmp im */
6385 unsigned int selector
, offset
;
6390 offset
= insn_get(env
, s
, ot
);
6391 selector
= insn_get(env
, s
, MO_16
);
6393 tcg_gen_movi_tl(cpu_T0
, selector
);
6394 tcg_gen_movi_tl(cpu_T1
, offset
);
6397 case 0xeb: /* jmp Jb */
6398 tval
= (int8_t)insn_get(env
, s
, MO_8
);
6399 tval
+= s
->pc
- s
->cs_base
;
6400 if (dflag
== MO_16
) {
6405 case 0x70 ... 0x7f: /* jcc Jb */
6406 tval
= (int8_t)insn_get(env
, s
, MO_8
);
6408 case 0x180 ... 0x18f: /* jcc Jv */
6409 if (dflag
!= MO_16
) {
6410 tval
= (int32_t)insn_get(env
, s
, MO_32
);
6412 tval
= (int16_t)insn_get(env
, s
, MO_16
);
6415 next_eip
= s
->pc
- s
->cs_base
;
6417 if (dflag
== MO_16
) {
6421 gen_jcc(s
, b
, tval
, next_eip
);
6424 case 0x190 ... 0x19f: /* setcc Gv */
6425 modrm
= cpu_ldub_code(env
, s
->pc
++);
6426 gen_setcc1(s
, b
, cpu_T0
);
6427 gen_ldst_modrm(env
, s
, modrm
, MO_8
, OR_TMP0
, 1);
6429 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6430 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6434 modrm
= cpu_ldub_code(env
, s
->pc
++);
6435 reg
= ((modrm
>> 3) & 7) | rex_r
;
6436 gen_cmovcc1(env
, s
, ot
, b
, modrm
, reg
);
6439 /************************/
6441 case 0x9c: /* pushf */
6442 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PUSHF
);
6443 if (s
->vm86
&& s
->iopl
!= 3) {
6444 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6446 gen_update_cc_op(s
);
6447 gen_helper_read_eflags(cpu_T0
, cpu_env
);
6448 gen_push_v(s
, cpu_T0
);
6451 case 0x9d: /* popf */
6452 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_POPF
);
6453 if (s
->vm86
&& s
->iopl
!= 3) {
6454 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6458 if (dflag
!= MO_16
) {
6459 gen_helper_write_eflags(cpu_env
, cpu_T0
,
6460 tcg_const_i32((TF_MASK
| AC_MASK
|
6465 gen_helper_write_eflags(cpu_env
, cpu_T0
,
6466 tcg_const_i32((TF_MASK
| AC_MASK
|
6468 IF_MASK
| IOPL_MASK
)
6472 if (s
->cpl
<= s
->iopl
) {
6473 if (dflag
!= MO_16
) {
6474 gen_helper_write_eflags(cpu_env
, cpu_T0
,
6475 tcg_const_i32((TF_MASK
|
6481 gen_helper_write_eflags(cpu_env
, cpu_T0
,
6482 tcg_const_i32((TF_MASK
|
6490 if (dflag
!= MO_16
) {
6491 gen_helper_write_eflags(cpu_env
, cpu_T0
,
6492 tcg_const_i32((TF_MASK
| AC_MASK
|
6493 ID_MASK
| NT_MASK
)));
6495 gen_helper_write_eflags(cpu_env
, cpu_T0
,
6496 tcg_const_i32((TF_MASK
| AC_MASK
|
6502 gen_pop_update(s
, ot
);
6503 set_cc_op(s
, CC_OP_EFLAGS
);
6504 /* abort translation because TF/AC flag may change */
6505 gen_jmp_im(s
->pc
- s
->cs_base
);
6509 case 0x9e: /* sahf */
6510 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6512 gen_op_mov_v_reg(MO_8
, cpu_T0
, R_AH
);
6513 gen_compute_eflags(s
);
6514 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, CC_O
);
6515 tcg_gen_andi_tl(cpu_T0
, cpu_T0
, CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
6516 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_T0
);
6518 case 0x9f: /* lahf */
6519 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6521 gen_compute_eflags(s
);
6522 /* Note: gen_compute_eflags() only gives the condition codes */
6523 tcg_gen_ori_tl(cpu_T0
, cpu_cc_src
, 0x02);
6524 gen_op_mov_reg_v(MO_8
, R_AH
, cpu_T0
);
6526 case 0xf5: /* cmc */
6527 gen_compute_eflags(s
);
6528 tcg_gen_xori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6530 case 0xf8: /* clc */
6531 gen_compute_eflags(s
);
6532 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_C
);
6534 case 0xf9: /* stc */
6535 gen_compute_eflags(s
);
6536 tcg_gen_ori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6538 case 0xfc: /* cld */
6539 tcg_gen_movi_i32(cpu_tmp2_i32
, 1);
6540 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6542 case 0xfd: /* std */
6543 tcg_gen_movi_i32(cpu_tmp2_i32
, -1);
6544 tcg_gen_st_i32(cpu_tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6547 /************************/
6548 /* bit operations */
6549 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6551 modrm
= cpu_ldub_code(env
, s
->pc
++);
6552 op
= (modrm
>> 3) & 7;
6553 mod
= (modrm
>> 6) & 3;
6554 rm
= (modrm
& 7) | REX_B(s
);
6557 gen_lea_modrm(env
, s
, modrm
);
6558 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
6560 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
6563 val
= cpu_ldub_code(env
, s
->pc
++);
6564 tcg_gen_movi_tl(cpu_T1
, val
);
6569 case 0x1a3: /* bt Gv, Ev */
6572 case 0x1ab: /* bts */
6575 case 0x1b3: /* btr */
6578 case 0x1bb: /* btc */
6582 modrm
= cpu_ldub_code(env
, s
->pc
++);
6583 reg
= ((modrm
>> 3) & 7) | rex_r
;
6584 mod
= (modrm
>> 6) & 3;
6585 rm
= (modrm
& 7) | REX_B(s
);
6586 gen_op_mov_v_reg(MO_32
, cpu_T1
, reg
);
6588 gen_lea_modrm(env
, s
, modrm
);
6589 /* specific case: we need to add a displacement */
6590 gen_exts(ot
, cpu_T1
);
6591 tcg_gen_sari_tl(cpu_tmp0
, cpu_T1
, 3 + ot
);
6592 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, ot
);
6593 tcg_gen_add_tl(cpu_A0
, cpu_A0
, cpu_tmp0
);
6594 gen_op_ld_v(s
, ot
, cpu_T0
, cpu_A0
);
6596 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
6599 tcg_gen_andi_tl(cpu_T1
, cpu_T1
, (1 << (3 + ot
)) - 1);
6600 tcg_gen_shr_tl(cpu_tmp4
, cpu_T0
, cpu_T1
);
6605 tcg_gen_movi_tl(cpu_tmp0
, 1);
6606 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T1
);
6607 tcg_gen_or_tl(cpu_T0
, cpu_T0
, cpu_tmp0
);
6610 tcg_gen_movi_tl(cpu_tmp0
, 1);
6611 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T1
);
6612 tcg_gen_andc_tl(cpu_T0
, cpu_T0
, cpu_tmp0
);
6616 tcg_gen_movi_tl(cpu_tmp0
, 1);
6617 tcg_gen_shl_tl(cpu_tmp0
, cpu_tmp0
, cpu_T1
);
6618 tcg_gen_xor_tl(cpu_T0
, cpu_T0
, cpu_tmp0
);
6623 gen_op_st_v(s
, ot
, cpu_T0
, cpu_A0
);
6625 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
6629 /* Delay all CC updates until after the store above. Note that
6630 C is the result of the test, Z is unchanged, and the others
6631 are all undefined. */
6633 case CC_OP_MULB
... CC_OP_MULQ
:
6634 case CC_OP_ADDB
... CC_OP_ADDQ
:
6635 case CC_OP_ADCB
... CC_OP_ADCQ
:
6636 case CC_OP_SUBB
... CC_OP_SUBQ
:
6637 case CC_OP_SBBB
... CC_OP_SBBQ
:
6638 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
6639 case CC_OP_INCB
... CC_OP_INCQ
:
6640 case CC_OP_DECB
... CC_OP_DECQ
:
6641 case CC_OP_SHLB
... CC_OP_SHLQ
:
6642 case CC_OP_SARB
... CC_OP_SARQ
:
6643 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
6644 /* Z was going to be computed from the non-zero status of CC_DST.
6645 We can get that same Z value (and the new C value) by leaving
6646 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
6648 tcg_gen_mov_tl(cpu_cc_src
, cpu_tmp4
);
6649 set_cc_op(s
, ((s
->cc_op
- CC_OP_MULB
) & 3) + CC_OP_SARB
);
6652 /* Otherwise, generate EFLAGS and replace the C bit. */
6653 gen_compute_eflags(s
);
6654 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, cpu_tmp4
,
6659 case 0x1bc: /* bsf / tzcnt */
6660 case 0x1bd: /* bsr / lzcnt */
6662 modrm
= cpu_ldub_code(env
, s
->pc
++);
6663 reg
= ((modrm
>> 3) & 7) | rex_r
;
6664 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
6665 gen_extu(ot
, cpu_T0
);
6667 /* Note that lzcnt and tzcnt are in different extensions. */
6668 if ((prefixes
& PREFIX_REPZ
)
6670 ? s
->cpuid_ext3_features
& CPUID_EXT3_ABM
6671 : s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)) {
6673 tcg_gen_mov_tl(cpu_cc_src
, cpu_T0
);
6675 /* For lzcnt, reduce the target_ulong result by the
6676 number of zeros that we expect to find at the top. */
6677 gen_helper_clz(cpu_T0
, cpu_T0
);
6678 tcg_gen_subi_tl(cpu_T0
, cpu_T0
, TARGET_LONG_BITS
- size
);
6680 /* For tzcnt, a zero input must return the operand size:
6681 force all bits outside the operand size to 1. */
6682 target_ulong mask
= (target_ulong
)-2 << (size
- 1);
6683 tcg_gen_ori_tl(cpu_T0
, cpu_T0
, mask
);
6684 gen_helper_ctz(cpu_T0
, cpu_T0
);
6686 /* For lzcnt/tzcnt, C and Z bits are defined and are
6687 related to the result. */
6688 gen_op_update1_cc();
6689 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
6691 /* For bsr/bsf, only the Z bit is defined and it is related
6692 to the input and not the result. */
6693 tcg_gen_mov_tl(cpu_cc_dst
, cpu_T0
);
6694 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
6696 /* For bsr, return the bit index of the first 1 bit,
6697 not the count of leading zeros. */
6698 gen_helper_clz(cpu_T0
, cpu_T0
);
6699 tcg_gen_xori_tl(cpu_T0
, cpu_T0
, TARGET_LONG_BITS
- 1);
6701 gen_helper_ctz(cpu_T0
, cpu_T0
);
6703 /* ??? The manual says that the output is undefined when the
6704 input is zero, but real hardware leaves it unchanged, and
6705 real programs appear to depend on that. */
6706 tcg_gen_movi_tl(cpu_tmp0
, 0);
6707 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_T0
, cpu_cc_dst
, cpu_tmp0
,
6708 cpu_regs
[reg
], cpu_T0
);
6710 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
6712 /************************/
6714 case 0x27: /* daa */
6717 gen_update_cc_op(s
);
6718 gen_helper_daa(cpu_env
);
6719 set_cc_op(s
, CC_OP_EFLAGS
);
6721 case 0x2f: /* das */
6724 gen_update_cc_op(s
);
6725 gen_helper_das(cpu_env
);
6726 set_cc_op(s
, CC_OP_EFLAGS
);
6728 case 0x37: /* aaa */
6731 gen_update_cc_op(s
);
6732 gen_helper_aaa(cpu_env
);
6733 set_cc_op(s
, CC_OP_EFLAGS
);
6735 case 0x3f: /* aas */
6738 gen_update_cc_op(s
);
6739 gen_helper_aas(cpu_env
);
6740 set_cc_op(s
, CC_OP_EFLAGS
);
6742 case 0xd4: /* aam */
6745 val
= cpu_ldub_code(env
, s
->pc
++);
6747 gen_exception(s
, EXCP00_DIVZ
, pc_start
- s
->cs_base
);
6749 gen_helper_aam(cpu_env
, tcg_const_i32(val
));
6750 set_cc_op(s
, CC_OP_LOGICB
);
6753 case 0xd5: /* aad */
6756 val
= cpu_ldub_code(env
, s
->pc
++);
6757 gen_helper_aad(cpu_env
, tcg_const_i32(val
));
6758 set_cc_op(s
, CC_OP_LOGICB
);
6760 /************************/
6762 case 0x90: /* nop */
6763 /* XXX: correct lock test for all insn */
6764 if (prefixes
& PREFIX_LOCK
) {
6767 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6769 goto do_xchg_reg_eax
;
6771 if (prefixes
& PREFIX_REPZ
) {
6772 gen_update_cc_op(s
);
6773 gen_jmp_im(pc_start
- s
->cs_base
);
6774 gen_helper_pause(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
6775 s
->is_jmp
= DISAS_TB_JUMP
;
6778 case 0x9b: /* fwait */
6779 if ((s
->flags
& (HF_MP_MASK
| HF_TS_MASK
)) ==
6780 (HF_MP_MASK
| HF_TS_MASK
)) {
6781 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
6783 gen_helper_fwait(cpu_env
);
6786 case 0xcc: /* int3 */
6787 gen_interrupt(s
, EXCP03_INT3
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6789 case 0xcd: /* int N */
6790 val
= cpu_ldub_code(env
, s
->pc
++);
6791 if (s
->vm86
&& s
->iopl
!= 3) {
6792 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6794 gen_interrupt(s
, val
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6797 case 0xce: /* into */
6800 gen_update_cc_op(s
);
6801 gen_jmp_im(pc_start
- s
->cs_base
);
6802 gen_helper_into(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
6805 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6806 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_ICEBP
);
6808 gen_debug(s
, pc_start
- s
->cs_base
);
6811 tb_flush(CPU(x86_env_get_cpu(env
)));
6812 qemu_set_log(CPU_LOG_INT
| CPU_LOG_TB_IN_ASM
);
6816 case 0xfa: /* cli */
6818 if (s
->cpl
<= s
->iopl
) {
6819 gen_helper_cli(cpu_env
);
6821 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6825 gen_helper_cli(cpu_env
);
6827 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6831 case 0xfb: /* sti */
6832 if (s
->vm86
? s
->iopl
== 3 : s
->cpl
<= s
->iopl
) {
6833 gen_helper_sti(cpu_env
);
6834 /* interruptions are enabled only the first insn after sti */
6835 gen_jmp_im(s
->pc
- s
->cs_base
);
6836 gen_eob_inhibit_irq(s
, true);
6838 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6841 case 0x62: /* bound */
6845 modrm
= cpu_ldub_code(env
, s
->pc
++);
6846 reg
= (modrm
>> 3) & 7;
6847 mod
= (modrm
>> 6) & 3;
6850 gen_op_mov_v_reg(ot
, cpu_T0
, reg
);
6851 gen_lea_modrm(env
, s
, modrm
);
6852 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
6854 gen_helper_boundw(cpu_env
, cpu_A0
, cpu_tmp2_i32
);
6856 gen_helper_boundl(cpu_env
, cpu_A0
, cpu_tmp2_i32
);
6859 case 0x1c8 ... 0x1cf: /* bswap reg */
6860 reg
= (b
& 7) | REX_B(s
);
6861 #ifdef TARGET_X86_64
6862 if (dflag
== MO_64
) {
6863 gen_op_mov_v_reg(MO_64
, cpu_T0
, reg
);
6864 tcg_gen_bswap64_i64(cpu_T0
, cpu_T0
);
6865 gen_op_mov_reg_v(MO_64
, reg
, cpu_T0
);
6869 gen_op_mov_v_reg(MO_32
, cpu_T0
, reg
);
6870 tcg_gen_ext32u_tl(cpu_T0
, cpu_T0
);
6871 tcg_gen_bswap32_tl(cpu_T0
, cpu_T0
);
6872 gen_op_mov_reg_v(MO_32
, reg
, cpu_T0
);
6875 case 0xd6: /* salc */
6878 gen_compute_eflags_c(s
, cpu_T0
);
6879 tcg_gen_neg_tl(cpu_T0
, cpu_T0
);
6880 gen_op_mov_reg_v(MO_8
, R_EAX
, cpu_T0
);
6882 case 0xe0: /* loopnz */
6883 case 0xe1: /* loopz */
6884 case 0xe2: /* loop */
6885 case 0xe3: /* jecxz */
6887 TCGLabel
*l1
, *l2
, *l3
;
6889 tval
= (int8_t)insn_get(env
, s
, MO_8
);
6890 next_eip
= s
->pc
- s
->cs_base
;
6892 if (dflag
== MO_16
) {
6896 l1
= gen_new_label();
6897 l2
= gen_new_label();
6898 l3
= gen_new_label();
6901 case 0: /* loopnz */
6903 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
6904 gen_op_jz_ecx(s
->aflag
, l3
);
6905 gen_jcc1(s
, (JCC_Z
<< 1) | (b
^ 1), l1
);
6908 gen_op_add_reg_im(s
->aflag
, R_ECX
, -1);
6909 gen_op_jnz_ecx(s
->aflag
, l1
);
6913 gen_op_jz_ecx(s
->aflag
, l1
);
6918 gen_jmp_im(next_eip
);
6927 case 0x130: /* wrmsr */
6928 case 0x132: /* rdmsr */
6930 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6932 gen_update_cc_op(s
);
6933 gen_jmp_im(pc_start
- s
->cs_base
);
6935 gen_helper_rdmsr(cpu_env
);
6937 gen_helper_wrmsr(cpu_env
);
6941 case 0x131: /* rdtsc */
6942 gen_update_cc_op(s
);
6943 gen_jmp_im(pc_start
- s
->cs_base
);
6944 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6947 gen_helper_rdtsc(cpu_env
);
6948 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
6950 gen_jmp(s
, s
->pc
- s
->cs_base
);
6953 case 0x133: /* rdpmc */
6954 gen_update_cc_op(s
);
6955 gen_jmp_im(pc_start
- s
->cs_base
);
6956 gen_helper_rdpmc(cpu_env
);
6958 case 0x134: /* sysenter */
6959 /* For Intel SYSENTER is valid on 64-bit */
6960 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
6963 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6965 gen_helper_sysenter(cpu_env
);
6969 case 0x135: /* sysexit */
6970 /* For Intel SYSEXIT is valid on 64-bit */
6971 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
6974 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6976 gen_helper_sysexit(cpu_env
, tcg_const_i32(dflag
- 1));
6980 #ifdef TARGET_X86_64
6981 case 0x105: /* syscall */
6982 /* XXX: is it usable in real mode ? */
6983 gen_update_cc_op(s
);
6984 gen_jmp_im(pc_start
- s
->cs_base
);
6985 gen_helper_syscall(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
6988 case 0x107: /* sysret */
6990 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6992 gen_helper_sysret(cpu_env
, tcg_const_i32(dflag
- 1));
6993 /* condition codes are modified only in long mode */
6995 set_cc_op(s
, CC_OP_EFLAGS
);
7001 case 0x1a2: /* cpuid */
7002 gen_update_cc_op(s
);
7003 gen_jmp_im(pc_start
- s
->cs_base
);
7004 gen_helper_cpuid(cpu_env
);
7006 case 0xf4: /* hlt */
7008 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7010 gen_update_cc_op(s
);
7011 gen_jmp_im(pc_start
- s
->cs_base
);
7012 gen_helper_hlt(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7013 s
->is_jmp
= DISAS_TB_JUMP
;
7017 modrm
= cpu_ldub_code(env
, s
->pc
++);
7018 mod
= (modrm
>> 6) & 3;
7019 op
= (modrm
>> 3) & 7;
7022 if (!s
->pe
|| s
->vm86
)
7024 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_READ
);
7025 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
,
7026 offsetof(CPUX86State
, ldt
.selector
));
7027 ot
= mod
== 3 ? dflag
: MO_16
;
7028 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7031 if (!s
->pe
|| s
->vm86
)
7034 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7036 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_WRITE
);
7037 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7038 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
7039 gen_helper_lldt(cpu_env
, cpu_tmp2_i32
);
7043 if (!s
->pe
|| s
->vm86
)
7045 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_READ
);
7046 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
,
7047 offsetof(CPUX86State
, tr
.selector
));
7048 ot
= mod
== 3 ? dflag
: MO_16
;
7049 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7052 if (!s
->pe
|| s
->vm86
)
7055 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7057 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_WRITE
);
7058 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7059 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_T0
);
7060 gen_helper_ltr(cpu_env
, cpu_tmp2_i32
);
7065 if (!s
->pe
|| s
->vm86
)
7067 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7068 gen_update_cc_op(s
);
7070 gen_helper_verr(cpu_env
, cpu_T0
);
7072 gen_helper_verw(cpu_env
, cpu_T0
);
7074 set_cc_op(s
, CC_OP_EFLAGS
);
7082 modrm
= cpu_ldub_code(env
, s
->pc
++);
7084 CASE_MODRM_MEM_OP(0): /* sgdt */
7085 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_GDTR_READ
);
7086 gen_lea_modrm(env
, s
, modrm
);
7087 tcg_gen_ld32u_tl(cpu_T0
,
7088 cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
7089 gen_op_st_v(s
, MO_16
, cpu_T0
, cpu_A0
);
7090 gen_add_A0_im(s
, 2);
7091 tcg_gen_ld_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, gdt
.base
));
7092 if (dflag
== MO_16
) {
7093 tcg_gen_andi_tl(cpu_T0
, cpu_T0
, 0xffffff);
7095 gen_op_st_v(s
, CODE64(s
) + MO_32
, cpu_T0
, cpu_A0
);
7098 case 0xc8: /* monitor */
7099 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || s
->cpl
!= 0) {
7102 gen_update_cc_op(s
);
7103 gen_jmp_im(pc_start
- s
->cs_base
);
7104 tcg_gen_mov_tl(cpu_A0
, cpu_regs
[R_EAX
]);
7105 gen_extu(s
->aflag
, cpu_A0
);
7106 gen_add_A0_ds_seg(s
);
7107 gen_helper_monitor(cpu_env
, cpu_A0
);
7110 case 0xc9: /* mwait */
7111 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || s
->cpl
!= 0) {
7114 gen_update_cc_op(s
);
7115 gen_jmp_im(pc_start
- s
->cs_base
);
7116 gen_helper_mwait(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7120 case 0xca: /* clac */
7121 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
7125 gen_helper_clac(cpu_env
);
7126 gen_jmp_im(s
->pc
- s
->cs_base
);
7130 case 0xcb: /* stac */
7131 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
7135 gen_helper_stac(cpu_env
);
7136 gen_jmp_im(s
->pc
- s
->cs_base
);
7140 CASE_MODRM_MEM_OP(1): /* sidt */
7141 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IDTR_READ
);
7142 gen_lea_modrm(env
, s
, modrm
);
7143 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, idt
.limit
));
7144 gen_op_st_v(s
, MO_16
, cpu_T0
, cpu_A0
);
7145 gen_add_A0_im(s
, 2);
7146 tcg_gen_ld_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, idt
.base
));
7147 if (dflag
== MO_16
) {
7148 tcg_gen_andi_tl(cpu_T0
, cpu_T0
, 0xffffff);
7150 gen_op_st_v(s
, CODE64(s
) + MO_32
, cpu_T0
, cpu_A0
);
7153 case 0xd0: /* xgetbv */
7154 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
7155 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
7156 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
7159 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[R_ECX
]);
7160 gen_helper_xgetbv(cpu_tmp1_i64
, cpu_env
, cpu_tmp2_i32
);
7161 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], cpu_tmp1_i64
);
7164 case 0xd1: /* xsetbv */
7165 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
7166 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
7167 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
7171 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7174 tcg_gen_concat_tl_i64(cpu_tmp1_i64
, cpu_regs
[R_EAX
],
7176 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[R_ECX
]);
7177 gen_helper_xsetbv(cpu_env
, cpu_tmp2_i32
, cpu_tmp1_i64
);
7178 /* End TB because translation flags may change. */
7179 gen_jmp_im(s
->pc
- s
->cs_base
);
7183 case 0xd8: /* VMRUN */
7184 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
) {
7188 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7191 gen_update_cc_op(s
);
7192 gen_jmp_im(pc_start
- s
->cs_base
);
7193 gen_helper_vmrun(cpu_env
, tcg_const_i32(s
->aflag
- 1),
7194 tcg_const_i32(s
->pc
- pc_start
));
7196 s
->is_jmp
= DISAS_TB_JUMP
;
7199 case 0xd9: /* VMMCALL */
7200 if (!(s
->flags
& HF_SVME_MASK
)) {
7203 gen_update_cc_op(s
);
7204 gen_jmp_im(pc_start
- s
->cs_base
);
7205 gen_helper_vmmcall(cpu_env
);
7208 case 0xda: /* VMLOAD */
7209 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
) {
7213 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7216 gen_update_cc_op(s
);
7217 gen_jmp_im(pc_start
- s
->cs_base
);
7218 gen_helper_vmload(cpu_env
, tcg_const_i32(s
->aflag
- 1));
7221 case 0xdb: /* VMSAVE */
7222 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
) {
7226 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7229 gen_update_cc_op(s
);
7230 gen_jmp_im(pc_start
- s
->cs_base
);
7231 gen_helper_vmsave(cpu_env
, tcg_const_i32(s
->aflag
- 1));
7234 case 0xdc: /* STGI */
7235 if ((!(s
->flags
& HF_SVME_MASK
)
7236 && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
7241 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7244 gen_update_cc_op(s
);
7245 gen_jmp_im(pc_start
- s
->cs_base
);
7246 gen_helper_stgi(cpu_env
);
7249 case 0xdd: /* CLGI */
7250 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
) {
7254 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7257 gen_update_cc_op(s
);
7258 gen_jmp_im(pc_start
- s
->cs_base
);
7259 gen_helper_clgi(cpu_env
);
7262 case 0xde: /* SKINIT */
7263 if ((!(s
->flags
& HF_SVME_MASK
)
7264 && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
7268 gen_update_cc_op(s
);
7269 gen_jmp_im(pc_start
- s
->cs_base
);
7270 gen_helper_skinit(cpu_env
);
7273 case 0xdf: /* INVLPGA */
7274 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
) {
7278 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7281 gen_update_cc_op(s
);
7282 gen_jmp_im(pc_start
- s
->cs_base
);
7283 gen_helper_invlpga(cpu_env
, tcg_const_i32(s
->aflag
- 1));
7286 CASE_MODRM_MEM_OP(2): /* lgdt */
7288 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7291 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_GDTR_WRITE
);
7292 gen_lea_modrm(env
, s
, modrm
);
7293 gen_op_ld_v(s
, MO_16
, cpu_T1
, cpu_A0
);
7294 gen_add_A0_im(s
, 2);
7295 gen_op_ld_v(s
, CODE64(s
) + MO_32
, cpu_T0
, cpu_A0
);
7296 if (dflag
== MO_16
) {
7297 tcg_gen_andi_tl(cpu_T0
, cpu_T0
, 0xffffff);
7299 tcg_gen_st_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, gdt
.base
));
7300 tcg_gen_st32_tl(cpu_T1
, cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
7303 CASE_MODRM_MEM_OP(3): /* lidt */
7305 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7308 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IDTR_WRITE
);
7309 gen_lea_modrm(env
, s
, modrm
);
7310 gen_op_ld_v(s
, MO_16
, cpu_T1
, cpu_A0
);
7311 gen_add_A0_im(s
, 2);
7312 gen_op_ld_v(s
, CODE64(s
) + MO_32
, cpu_T0
, cpu_A0
);
7313 if (dflag
== MO_16
) {
7314 tcg_gen_andi_tl(cpu_T0
, cpu_T0
, 0xffffff);
7316 tcg_gen_st_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, idt
.base
));
7317 tcg_gen_st32_tl(cpu_T1
, cpu_env
, offsetof(CPUX86State
, idt
.limit
));
7320 CASE_MODRM_OP(4): /* smsw */
7321 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_CR0
);
7322 tcg_gen_ld_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, cr
[0]));
7324 mod
= (modrm
>> 6) & 3;
7325 ot
= (mod
!= 3 ? MO_16
: s
->dflag
);
7329 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7331 case 0xee: /* rdpkru */
7332 if (prefixes
& PREFIX_LOCK
) {
7335 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[R_ECX
]);
7336 gen_helper_rdpkru(cpu_tmp1_i64
, cpu_env
, cpu_tmp2_i32
);
7337 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], cpu_tmp1_i64
);
7339 case 0xef: /* wrpkru */
7340 if (prefixes
& PREFIX_LOCK
) {
7343 tcg_gen_concat_tl_i64(cpu_tmp1_i64
, cpu_regs
[R_EAX
],
7345 tcg_gen_trunc_tl_i32(cpu_tmp2_i32
, cpu_regs
[R_ECX
]);
7346 gen_helper_wrpkru(cpu_env
, cpu_tmp2_i32
, cpu_tmp1_i64
);
7348 CASE_MODRM_OP(6): /* lmsw */
7350 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7353 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7354 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7355 gen_helper_lmsw(cpu_env
, cpu_T0
);
7356 gen_jmp_im(s
->pc
- s
->cs_base
);
7360 CASE_MODRM_MEM_OP(7): /* invlpg */
7362 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7365 gen_update_cc_op(s
);
7366 gen_jmp_im(pc_start
- s
->cs_base
);
7367 gen_lea_modrm(env
, s
, modrm
);
7368 gen_helper_invlpg(cpu_env
, cpu_A0
);
7369 gen_jmp_im(s
->pc
- s
->cs_base
);
7373 case 0xf8: /* swapgs */
7374 #ifdef TARGET_X86_64
7377 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7379 tcg_gen_mov_tl(cpu_T0
, cpu_seg_base
[R_GS
]);
7380 tcg_gen_ld_tl(cpu_seg_base
[R_GS
], cpu_env
,
7381 offsetof(CPUX86State
, kernelgsbase
));
7382 tcg_gen_st_tl(cpu_T0
, cpu_env
,
7383 offsetof(CPUX86State
, kernelgsbase
));
7390 case 0xf9: /* rdtscp */
7391 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_RDTSCP
)) {
7394 gen_update_cc_op(s
);
7395 gen_jmp_im(pc_start
- s
->cs_base
);
7396 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
7399 gen_helper_rdtscp(cpu_env
);
7400 if (s
->tb
->cflags
& CF_USE_ICOUNT
) {
7402 gen_jmp(s
, s
->pc
- s
->cs_base
);
7411 case 0x108: /* invd */
7412 case 0x109: /* wbinvd */
7414 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7416 gen_svm_check_intercept(s
, pc_start
, (b
& 2) ? SVM_EXIT_INVD
: SVM_EXIT_WBINVD
);
7420 case 0x63: /* arpl or movslS (x86_64) */
7421 #ifdef TARGET_X86_64
7424 /* d_ot is the size of destination */
7427 modrm
= cpu_ldub_code(env
, s
->pc
++);
7428 reg
= ((modrm
>> 3) & 7) | rex_r
;
7429 mod
= (modrm
>> 6) & 3;
7430 rm
= (modrm
& 7) | REX_B(s
);
7433 gen_op_mov_v_reg(MO_32
, cpu_T0
, rm
);
7435 if (d_ot
== MO_64
) {
7436 tcg_gen_ext32s_tl(cpu_T0
, cpu_T0
);
7438 gen_op_mov_reg_v(d_ot
, reg
, cpu_T0
);
7440 gen_lea_modrm(env
, s
, modrm
);
7441 gen_op_ld_v(s
, MO_32
| MO_SIGN
, cpu_T0
, cpu_A0
);
7442 gen_op_mov_reg_v(d_ot
, reg
, cpu_T0
);
7448 TCGv t0
, t1
, t2
, a0
;
7450 if (!s
->pe
|| s
->vm86
)
7452 t0
= tcg_temp_local_new();
7453 t1
= tcg_temp_local_new();
7454 t2
= tcg_temp_local_new();
7456 modrm
= cpu_ldub_code(env
, s
->pc
++);
7457 reg
= (modrm
>> 3) & 7;
7458 mod
= (modrm
>> 6) & 3;
7461 gen_lea_modrm(env
, s
, modrm
);
7462 gen_op_ld_v(s
, ot
, t0
, cpu_A0
);
7463 a0
= tcg_temp_local_new();
7464 tcg_gen_mov_tl(a0
, cpu_A0
);
7466 gen_op_mov_v_reg(ot
, t0
, rm
);
7469 gen_op_mov_v_reg(ot
, t1
, reg
);
7470 tcg_gen_andi_tl(cpu_tmp0
, t0
, 3);
7471 tcg_gen_andi_tl(t1
, t1
, 3);
7472 tcg_gen_movi_tl(t2
, 0);
7473 label1
= gen_new_label();
7474 tcg_gen_brcond_tl(TCG_COND_GE
, cpu_tmp0
, t1
, label1
);
7475 tcg_gen_andi_tl(t0
, t0
, ~3);
7476 tcg_gen_or_tl(t0
, t0
, t1
);
7477 tcg_gen_movi_tl(t2
, CC_Z
);
7478 gen_set_label(label1
);
7480 gen_op_st_v(s
, ot
, t0
, a0
);
7483 gen_op_mov_reg_v(ot
, rm
, t0
);
7485 gen_compute_eflags(s
);
7486 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_Z
);
7487 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t2
);
7493 case 0x102: /* lar */
7494 case 0x103: /* lsl */
7498 if (!s
->pe
|| s
->vm86
)
7500 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
7501 modrm
= cpu_ldub_code(env
, s
->pc
++);
7502 reg
= ((modrm
>> 3) & 7) | rex_r
;
7503 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7504 t0
= tcg_temp_local_new();
7505 gen_update_cc_op(s
);
7507 gen_helper_lar(t0
, cpu_env
, cpu_T0
);
7509 gen_helper_lsl(t0
, cpu_env
, cpu_T0
);
7511 tcg_gen_andi_tl(cpu_tmp0
, cpu_cc_src
, CC_Z
);
7512 label1
= gen_new_label();
7513 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_tmp0
, 0, label1
);
7514 gen_op_mov_reg_v(ot
, reg
, t0
);
7515 gen_set_label(label1
);
7516 set_cc_op(s
, CC_OP_EFLAGS
);
7521 modrm
= cpu_ldub_code(env
, s
->pc
++);
7522 mod
= (modrm
>> 6) & 3;
7523 op
= (modrm
>> 3) & 7;
7525 case 0: /* prefetchnta */
7526 case 1: /* prefetchnt0 */
7527 case 2: /* prefetchnt0 */
7528 case 3: /* prefetchnt0 */
7531 gen_nop_modrm(env
, s
, modrm
);
7532 /* nothing more to do */
7534 default: /* nop (multi byte) */
7535 gen_nop_modrm(env
, s
, modrm
);
7540 modrm
= cpu_ldub_code(env
, s
->pc
++);
7541 if (s
->flags
& HF_MPX_EN_MASK
) {
7542 mod
= (modrm
>> 6) & 3;
7543 reg
= ((modrm
>> 3) & 7) | rex_r
;
7544 if (prefixes
& PREFIX_REPZ
) {
7547 || (prefixes
& PREFIX_LOCK
)
7548 || s
->aflag
== MO_16
) {
7551 gen_bndck(env
, s
, modrm
, TCG_COND_LTU
, cpu_bndl
[reg
]);
7552 } else if (prefixes
& PREFIX_REPNZ
) {
7555 || (prefixes
& PREFIX_LOCK
)
7556 || s
->aflag
== MO_16
) {
7559 TCGv_i64 notu
= tcg_temp_new_i64();
7560 tcg_gen_not_i64(notu
, cpu_bndu
[reg
]);
7561 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, notu
);
7562 tcg_temp_free_i64(notu
);
7563 } else if (prefixes
& PREFIX_DATA
) {
7564 /* bndmov -- from reg/mem */
7565 if (reg
>= 4 || s
->aflag
== MO_16
) {
7569 int reg2
= (modrm
& 7) | REX_B(s
);
7570 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
7573 if (s
->flags
& HF_MPX_IU_MASK
) {
7574 tcg_gen_mov_i64(cpu_bndl
[reg
], cpu_bndl
[reg2
]);
7575 tcg_gen_mov_i64(cpu_bndu
[reg
], cpu_bndu
[reg2
]);
7578 gen_lea_modrm(env
, s
, modrm
);
7580 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], cpu_A0
,
7581 s
->mem_index
, MO_LEQ
);
7582 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, 8);
7583 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], cpu_A0
,
7584 s
->mem_index
, MO_LEQ
);
7586 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], cpu_A0
,
7587 s
->mem_index
, MO_LEUL
);
7588 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, 4);
7589 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], cpu_A0
,
7590 s
->mem_index
, MO_LEUL
);
7592 /* bnd registers are now in-use */
7593 gen_set_hflag(s
, HF_MPX_IU_MASK
);
7595 } else if (mod
!= 3) {
7597 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
7599 || (prefixes
& PREFIX_LOCK
)
7600 || s
->aflag
== MO_16
7605 tcg_gen_addi_tl(cpu_A0
, cpu_regs
[a
.base
], a
.disp
);
7607 tcg_gen_movi_tl(cpu_A0
, 0);
7609 gen_lea_v_seg(s
, s
->aflag
, cpu_A0
, a
.def_seg
, s
->override
);
7611 tcg_gen_mov_tl(cpu_T0
, cpu_regs
[a
.index
]);
7613 tcg_gen_movi_tl(cpu_T0
, 0);
7616 gen_helper_bndldx64(cpu_bndl
[reg
], cpu_env
, cpu_A0
, cpu_T0
);
7617 tcg_gen_ld_i64(cpu_bndu
[reg
], cpu_env
,
7618 offsetof(CPUX86State
, mmx_t0
.MMX_Q(0)));
7620 gen_helper_bndldx32(cpu_bndu
[reg
], cpu_env
, cpu_A0
, cpu_T0
);
7621 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndu
[reg
]);
7622 tcg_gen_shri_i64(cpu_bndu
[reg
], cpu_bndu
[reg
], 32);
7624 gen_set_hflag(s
, HF_MPX_IU_MASK
);
7627 gen_nop_modrm(env
, s
, modrm
);
7630 modrm
= cpu_ldub_code(env
, s
->pc
++);
7631 if (s
->flags
& HF_MPX_EN_MASK
) {
7632 mod
= (modrm
>> 6) & 3;
7633 reg
= ((modrm
>> 3) & 7) | rex_r
;
7634 if (mod
!= 3 && (prefixes
& PREFIX_REPZ
)) {
7637 || (prefixes
& PREFIX_LOCK
)
7638 || s
->aflag
== MO_16
) {
7641 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
7643 tcg_gen_extu_tl_i64(cpu_bndl
[reg
], cpu_regs
[a
.base
]);
7645 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndl
[reg
]);
7647 } else if (a
.base
== -1) {
7648 /* no base register has lower bound of 0 */
7649 tcg_gen_movi_i64(cpu_bndl
[reg
], 0);
7651 /* rip-relative generates #ud */
7654 tcg_gen_not_tl(cpu_A0
, gen_lea_modrm_1(a
));
7656 tcg_gen_ext32u_tl(cpu_A0
, cpu_A0
);
7658 tcg_gen_extu_tl_i64(cpu_bndu
[reg
], cpu_A0
);
7659 /* bnd registers are now in-use */
7660 gen_set_hflag(s
, HF_MPX_IU_MASK
);
7662 } else if (prefixes
& PREFIX_REPNZ
) {
7665 || (prefixes
& PREFIX_LOCK
)
7666 || s
->aflag
== MO_16
) {
7669 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, cpu_bndu
[reg
]);
7670 } else if (prefixes
& PREFIX_DATA
) {
7671 /* bndmov -- to reg/mem */
7672 if (reg
>= 4 || s
->aflag
== MO_16
) {
7676 int reg2
= (modrm
& 7) | REX_B(s
);
7677 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
7680 if (s
->flags
& HF_MPX_IU_MASK
) {
7681 tcg_gen_mov_i64(cpu_bndl
[reg2
], cpu_bndl
[reg
]);
7682 tcg_gen_mov_i64(cpu_bndu
[reg2
], cpu_bndu
[reg
]);
7685 gen_lea_modrm(env
, s
, modrm
);
7687 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], cpu_A0
,
7688 s
->mem_index
, MO_LEQ
);
7689 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, 8);
7690 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], cpu_A0
,
7691 s
->mem_index
, MO_LEQ
);
7693 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], cpu_A0
,
7694 s
->mem_index
, MO_LEUL
);
7695 tcg_gen_addi_tl(cpu_A0
, cpu_A0
, 4);
7696 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], cpu_A0
,
7697 s
->mem_index
, MO_LEUL
);
7700 } else if (mod
!= 3) {
7702 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
7704 || (prefixes
& PREFIX_LOCK
)
7705 || s
->aflag
== MO_16
7710 tcg_gen_addi_tl(cpu_A0
, cpu_regs
[a
.base
], a
.disp
);
7712 tcg_gen_movi_tl(cpu_A0
, 0);
7714 gen_lea_v_seg(s
, s
->aflag
, cpu_A0
, a
.def_seg
, s
->override
);
7716 tcg_gen_mov_tl(cpu_T0
, cpu_regs
[a
.index
]);
7718 tcg_gen_movi_tl(cpu_T0
, 0);
7721 gen_helper_bndstx64(cpu_env
, cpu_A0
, cpu_T0
,
7722 cpu_bndl
[reg
], cpu_bndu
[reg
]);
7724 gen_helper_bndstx32(cpu_env
, cpu_A0
, cpu_T0
,
7725 cpu_bndl
[reg
], cpu_bndu
[reg
]);
7729 gen_nop_modrm(env
, s
, modrm
);
7731 case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
7732 modrm
= cpu_ldub_code(env
, s
->pc
++);
7733 gen_nop_modrm(env
, s
, modrm
);
7735 case 0x120: /* mov reg, crN */
7736 case 0x122: /* mov crN, reg */
7738 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7740 modrm
= cpu_ldub_code(env
, s
->pc
++);
7741 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7742 * AMD documentation (24594.pdf) and testing of
7743 * intel 386 and 486 processors all show that the mod bits
7744 * are assumed to be 1's, regardless of actual values.
7746 rm
= (modrm
& 7) | REX_B(s
);
7747 reg
= ((modrm
>> 3) & 7) | rex_r
;
7752 if ((prefixes
& PREFIX_LOCK
) && (reg
== 0) &&
7753 (s
->cpuid_ext3_features
& CPUID_EXT3_CR8LEG
)) {
7762 gen_update_cc_op(s
);
7763 gen_jmp_im(pc_start
- s
->cs_base
);
7765 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
7766 gen_helper_write_crN(cpu_env
, tcg_const_i32(reg
),
7768 gen_jmp_im(s
->pc
- s
->cs_base
);
7771 gen_helper_read_crN(cpu_T0
, cpu_env
, tcg_const_i32(reg
));
7772 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
7780 case 0x121: /* mov reg, drN */
7781 case 0x123: /* mov drN, reg */
7783 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7785 modrm
= cpu_ldub_code(env
, s
->pc
++);
7786 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7787 * AMD documentation (24594.pdf) and testing of
7788 * intel 386 and 486 processors all show that the mod bits
7789 * are assumed to be 1's, regardless of actual values.
7791 rm
= (modrm
& 7) | REX_B(s
);
7792 reg
= ((modrm
>> 3) & 7) | rex_r
;
7801 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_DR0
+ reg
);
7802 gen_op_mov_v_reg(ot
, cpu_T0
, rm
);
7803 tcg_gen_movi_i32(cpu_tmp2_i32
, reg
);
7804 gen_helper_set_dr(cpu_env
, cpu_tmp2_i32
, cpu_T0
);
7805 gen_jmp_im(s
->pc
- s
->cs_base
);
7808 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_DR0
+ reg
);
7809 tcg_gen_movi_i32(cpu_tmp2_i32
, reg
);
7810 gen_helper_get_dr(cpu_T0
, cpu_env
, cpu_tmp2_i32
);
7811 gen_op_mov_reg_v(ot
, rm
, cpu_T0
);
7815 case 0x106: /* clts */
7817 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7819 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7820 gen_helper_clts(cpu_env
);
7821 /* abort block because static cpu state changed */
7822 gen_jmp_im(s
->pc
- s
->cs_base
);
7826 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7827 case 0x1c3: /* MOVNTI reg, mem */
7828 if (!(s
->cpuid_features
& CPUID_SSE2
))
7830 ot
= mo_64_32(dflag
);
7831 modrm
= cpu_ldub_code(env
, s
->pc
++);
7832 mod
= (modrm
>> 6) & 3;
7835 reg
= ((modrm
>> 3) & 7) | rex_r
;
7836 /* generate a generic store */
7837 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
7840 modrm
= cpu_ldub_code(env
, s
->pc
++);
7842 CASE_MODRM_MEM_OP(0): /* fxsave */
7843 if (!(s
->cpuid_features
& CPUID_FXSR
)
7844 || (prefixes
& PREFIX_LOCK
)) {
7847 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
7848 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7851 gen_lea_modrm(env
, s
, modrm
);
7852 gen_helper_fxsave(cpu_env
, cpu_A0
);
7855 CASE_MODRM_MEM_OP(1): /* fxrstor */
7856 if (!(s
->cpuid_features
& CPUID_FXSR
)
7857 || (prefixes
& PREFIX_LOCK
)) {
7860 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
7861 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7864 gen_lea_modrm(env
, s
, modrm
);
7865 gen_helper_fxrstor(cpu_env
, cpu_A0
);
7868 CASE_MODRM_MEM_OP(2): /* ldmxcsr */
7869 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
)) {
7872 if (s
->flags
& HF_TS_MASK
) {
7873 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7876 gen_lea_modrm(env
, s
, modrm
);
7877 tcg_gen_qemu_ld_i32(cpu_tmp2_i32
, cpu_A0
, s
->mem_index
, MO_LEUL
);
7878 gen_helper_ldmxcsr(cpu_env
, cpu_tmp2_i32
);
7881 CASE_MODRM_MEM_OP(3): /* stmxcsr */
7882 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
)) {
7885 if (s
->flags
& HF_TS_MASK
) {
7886 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7889 gen_lea_modrm(env
, s
, modrm
);
7890 tcg_gen_ld32u_tl(cpu_T0
, cpu_env
, offsetof(CPUX86State
, mxcsr
));
7891 gen_op_st_v(s
, MO_32
, cpu_T0
, cpu_A0
);
7894 CASE_MODRM_MEM_OP(4): /* xsave */
7895 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
7896 || (prefixes
& (PREFIX_LOCK
| PREFIX_DATA
7897 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
7900 gen_lea_modrm(env
, s
, modrm
);
7901 tcg_gen_concat_tl_i64(cpu_tmp1_i64
, cpu_regs
[R_EAX
],
7903 gen_helper_xsave(cpu_env
, cpu_A0
, cpu_tmp1_i64
);
7906 CASE_MODRM_MEM_OP(5): /* xrstor */
7907 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
7908 || (prefixes
& (PREFIX_LOCK
| PREFIX_DATA
7909 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
7912 gen_lea_modrm(env
, s
, modrm
);
7913 tcg_gen_concat_tl_i64(cpu_tmp1_i64
, cpu_regs
[R_EAX
],
7915 gen_helper_xrstor(cpu_env
, cpu_A0
, cpu_tmp1_i64
);
7916 /* XRSTOR is how MPX is enabled, which changes how
7917 we translate. Thus we need to end the TB. */
7918 gen_update_cc_op(s
);
7919 gen_jmp_im(s
->pc
- s
->cs_base
);
7923 CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
7924 if (prefixes
& PREFIX_LOCK
) {
7927 if (prefixes
& PREFIX_DATA
) {
7929 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_CLWB
)) {
7932 gen_nop_modrm(env
, s
, modrm
);
7935 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
7936 || (s
->cpuid_xsave_features
& CPUID_XSAVE_XSAVEOPT
) == 0
7937 || (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
))) {
7940 gen_lea_modrm(env
, s
, modrm
);
7941 tcg_gen_concat_tl_i64(cpu_tmp1_i64
, cpu_regs
[R_EAX
],
7943 gen_helper_xsaveopt(cpu_env
, cpu_A0
, cpu_tmp1_i64
);
7947 CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
7948 if (prefixes
& PREFIX_LOCK
) {
7951 if (prefixes
& PREFIX_DATA
) {
7953 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_CLFLUSHOPT
)) {
7958 if ((s
->prefix
& (PREFIX_REPZ
| PREFIX_REPNZ
))
7959 || !(s
->cpuid_features
& CPUID_CLFLUSH
)) {
7963 gen_nop_modrm(env
, s
, modrm
);
7966 case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
7967 case 0xc8 ... 0xc8: /* rdgsbase (f3 0f ae /1) */
7968 case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
7969 case 0xd8 ... 0xd8: /* wrgsbase (f3 0f ae /3) */
7971 && (prefixes
& PREFIX_REPZ
)
7972 && !(prefixes
& PREFIX_LOCK
)
7973 && (s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_FSGSBASE
)) {
7974 TCGv base
, treg
, src
, dst
;
7976 /* Preserve hflags bits by testing CR4 at runtime. */
7977 tcg_gen_movi_i32(cpu_tmp2_i32
, CR4_FSGSBASE_MASK
);
7978 gen_helper_cr4_testbit(cpu_env
, cpu_tmp2_i32
);
7980 base
= cpu_seg_base
[modrm
& 8 ? R_GS
: R_FS
];
7981 treg
= cpu_regs
[(modrm
& 7) | REX_B(s
)];
7985 dst
= base
, src
= treg
;
7988 dst
= treg
, src
= base
;
7991 if (s
->dflag
== MO_32
) {
7992 tcg_gen_ext32u_tl(dst
, src
);
7994 tcg_gen_mov_tl(dst
, src
);
8000 case 0xf8: /* sfence / pcommit */
8001 if (prefixes
& PREFIX_DATA
) {
8003 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_PCOMMIT
)
8004 || (prefixes
& PREFIX_LOCK
)) {
8010 case 0xf9 ... 0xff: /* sfence */
8011 if (!(s
->cpuid_features
& CPUID_SSE
)
8012 || (prefixes
& PREFIX_LOCK
)) {
8015 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
8017 case 0xe8 ... 0xef: /* lfence */
8018 if (!(s
->cpuid_features
& CPUID_SSE
)
8019 || (prefixes
& PREFIX_LOCK
)) {
8022 tcg_gen_mb(TCG_MO_LD_LD
| TCG_BAR_SC
);
8024 case 0xf0 ... 0xf7: /* mfence */
8025 if (!(s
->cpuid_features
& CPUID_SSE2
)
8026 || (prefixes
& PREFIX_LOCK
)) {
8029 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
8037 case 0x10d: /* 3DNow! prefetch(w) */
8038 modrm
= cpu_ldub_code(env
, s
->pc
++);
8039 mod
= (modrm
>> 6) & 3;
8042 gen_nop_modrm(env
, s
, modrm
);
8044 case 0x1aa: /* rsm */
8045 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_RSM
);
8046 if (!(s
->flags
& HF_SMM_MASK
))
8048 gen_update_cc_op(s
);
8049 gen_jmp_im(s
->pc
- s
->cs_base
);
8050 gen_helper_rsm(cpu_env
);
8053 case 0x1b8: /* SSE4.2 popcnt */
8054 if ((prefixes
& (PREFIX_REPZ
| PREFIX_LOCK
| PREFIX_REPNZ
)) !=
8057 if (!(s
->cpuid_ext_features
& CPUID_EXT_POPCNT
))
8060 modrm
= cpu_ldub_code(env
, s
->pc
++);
8061 reg
= ((modrm
>> 3) & 7) | rex_r
;
8063 if (s
->prefix
& PREFIX_DATA
) {
8066 ot
= mo_64_32(dflag
);
8069 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
8070 gen_helper_popcnt(cpu_T0
, cpu_env
, cpu_T0
, tcg_const_i32(ot
));
8071 gen_op_mov_reg_v(ot
, reg
, cpu_T0
);
8073 set_cc_op(s
, CC_OP_EFLAGS
);
8075 case 0x10e ... 0x10f:
8076 /* 3DNow! instructions, ignore prefixes */
8077 s
->prefix
&= ~(PREFIX_REPZ
| PREFIX_REPNZ
| PREFIX_DATA
);
8078 case 0x110 ... 0x117:
8079 case 0x128 ... 0x12f:
8080 case 0x138 ... 0x13a:
8081 case 0x150 ... 0x179:
8082 case 0x17c ... 0x17f:
8084 case 0x1c4 ... 0x1c6:
8085 case 0x1d0 ... 0x1fe:
8086 gen_sse(env
, s
, b
, pc_start
, rex_r
);
8091 /* lock generation */
8092 if (s
->prefix
& PREFIX_LOCK
)
8093 gen_helper_unlock();
8096 if (s
->prefix
& PREFIX_LOCK
)
8097 gen_helper_unlock();
8098 /* XXX: ensure that no lock was generated */
8099 gen_illegal_opcode(s
);
8102 if (s
->prefix
& PREFIX_LOCK
)
8103 gen_helper_unlock();
8104 /* XXX: ensure that no lock was generated */
8105 gen_unknown_opcode(env
, s
);
8109 void tcg_x86_init(void)
8111 static const char reg_names
[CPU_NB_REGS
][4] = {
8112 #ifdef TARGET_X86_64
8140 static const char seg_base_names
[6][8] = {
8148 static const char bnd_regl_names
[4][8] = {
8149 "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
8151 static const char bnd_regu_names
[4][8] = {
8152 "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
8155 static bool initialized
;
8162 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
8163 tcg_ctx
.tcg_env
= cpu_env
;
8164 cpu_cc_op
= tcg_global_mem_new_i32(cpu_env
,
8165 offsetof(CPUX86State
, cc_op
), "cc_op");
8166 cpu_cc_dst
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_dst
),
8168 cpu_cc_src
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_src
),
8170 cpu_cc_src2
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_src2
),
8173 for (i
= 0; i
< CPU_NB_REGS
; ++i
) {
8174 cpu_regs
[i
] = tcg_global_mem_new(cpu_env
,
8175 offsetof(CPUX86State
, regs
[i
]),
8179 for (i
= 0; i
< 6; ++i
) {
8181 = tcg_global_mem_new(cpu_env
,
8182 offsetof(CPUX86State
, segs
[i
].base
),
8186 for (i
= 0; i
< 4; ++i
) {
8188 = tcg_global_mem_new_i64(cpu_env
,
8189 offsetof(CPUX86State
, bnd_regs
[i
].lb
),
8192 = tcg_global_mem_new_i64(cpu_env
,
8193 offsetof(CPUX86State
, bnd_regs
[i
].ub
),
8200 /* generate intermediate code for basic block 'tb'. */
8201 void gen_intermediate_code(CPUX86State
*env
, TranslationBlock
*tb
)
8203 X86CPU
*cpu
= x86_env_get_cpu(env
);
8204 CPUState
*cs
= CPU(cpu
);
8205 DisasContext dc1
, *dc
= &dc1
;
8206 target_ulong pc_ptr
;
8208 target_ulong pc_start
;
8209 target_ulong cs_base
;
8213 /* generate intermediate code */
8215 cs_base
= tb
->cs_base
;
8218 dc
->pe
= (flags
>> HF_PE_SHIFT
) & 1;
8219 dc
->code32
= (flags
>> HF_CS32_SHIFT
) & 1;
8220 dc
->ss32
= (flags
>> HF_SS32_SHIFT
) & 1;
8221 dc
->addseg
= (flags
>> HF_ADDSEG_SHIFT
) & 1;
8223 dc
->vm86
= (flags
>> VM_SHIFT
) & 1;
8224 dc
->cpl
= (flags
>> HF_CPL_SHIFT
) & 3;
8225 dc
->iopl
= (flags
>> IOPL_SHIFT
) & 3;
8226 dc
->tf
= (flags
>> TF_SHIFT
) & 1;
8227 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
8228 dc
->cc_op
= CC_OP_DYNAMIC
;
8229 dc
->cc_op_dirty
= false;
8230 dc
->cs_base
= cs_base
;
8232 dc
->popl_esp_hack
= 0;
8233 /* select memory access functions */
8235 #ifdef CONFIG_SOFTMMU
8236 dc
->mem_index
= cpu_mmu_index(env
, false);
8238 dc
->cpuid_features
= env
->features
[FEAT_1_EDX
];
8239 dc
->cpuid_ext_features
= env
->features
[FEAT_1_ECX
];
8240 dc
->cpuid_ext2_features
= env
->features
[FEAT_8000_0001_EDX
];
8241 dc
->cpuid_ext3_features
= env
->features
[FEAT_8000_0001_ECX
];
8242 dc
->cpuid_7_0_ebx_features
= env
->features
[FEAT_7_0_EBX
];
8243 dc
->cpuid_xsave_features
= env
->features
[FEAT_XSAVE
];
8244 #ifdef TARGET_X86_64
8245 dc
->lma
= (flags
>> HF_LMA_SHIFT
) & 1;
8246 dc
->code64
= (flags
>> HF_CS64_SHIFT
) & 1;
8249 dc
->jmp_opt
= !(dc
->tf
|| cs
->singlestep_enabled
||
8250 (flags
& HF_INHIBIT_IRQ_MASK
));
8251 /* Do not optimize repz jumps at all in icount mode, because
8252 rep movsS instructions are execured with different paths
8253 in !repz_opt and repz_opt modes. The first one was used
8254 always except single step mode. And this setting
8255 disables jumps optimization and control paths become
8256 equivalent in run and single step modes.
8257 Now there will be no jump optimization for repz in
8258 record/replay modes and there will always be an
8259 additional step for ecx=0 when icount is enabled.
8261 dc
->repz_opt
= !dc
->jmp_opt
&& !(tb
->cflags
& CF_USE_ICOUNT
);
8263 /* check addseg logic */
8264 if (!dc
->addseg
&& (dc
->vm86
|| !dc
->pe
|| !dc
->code32
))
8265 printf("ERROR addseg\n");
8268 cpu_T0
= tcg_temp_new();
8269 cpu_T1
= tcg_temp_new();
8270 cpu_A0
= tcg_temp_new();
8272 cpu_tmp0
= tcg_temp_new();
8273 cpu_tmp1_i64
= tcg_temp_new_i64();
8274 cpu_tmp2_i32
= tcg_temp_new_i32();
8275 cpu_tmp3_i32
= tcg_temp_new_i32();
8276 cpu_tmp4
= tcg_temp_new();
8277 cpu_ptr0
= tcg_temp_new_ptr();
8278 cpu_ptr1
= tcg_temp_new_ptr();
8279 cpu_cc_srcT
= tcg_temp_local_new();
8281 dc
->is_jmp
= DISAS_NEXT
;
8284 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
8285 if (max_insns
== 0) {
8286 max_insns
= CF_COUNT_MASK
;
8288 if (max_insns
> TCG_MAX_INSNS
) {
8289 max_insns
= TCG_MAX_INSNS
;
8294 tcg_gen_insn_start(pc_ptr
, dc
->cc_op
);
8297 /* If RF is set, suppress an internally generated breakpoint. */
8298 if (unlikely(cpu_breakpoint_test(cs
, pc_ptr
,
8299 tb
->flags
& HF_RF_MASK
8300 ? BP_GDB
: BP_ANY
))) {
8301 gen_debug(dc
, pc_ptr
- dc
->cs_base
);
8302 /* The address covered by the breakpoint must be included in
8303 [tb->pc, tb->pc + tb->size) in order to for it to be
8304 properly cleared -- thus we increment the PC here so that
8305 the logic setting tb->size below does the right thing. */
8307 goto done_generating
;
8309 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
8313 pc_ptr
= disas_insn(env
, dc
, pc_ptr
);
8314 /* stop translation if indicated */
8317 /* if single step mode, we generate only one instruction and
8318 generate an exception */
8319 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8320 the flag and abort the translation to give the irqs a
8321 change to be happen */
8322 if (dc
->tf
|| dc
->singlestep_enabled
||
8323 (flags
& HF_INHIBIT_IRQ_MASK
)) {
8324 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8328 /* Do not cross the boundary of the pages in icount mode,
8329 it can cause an exception. Do it only when boundary is
8330 crossed by the first instruction in the block.
8331 If current instruction already crossed the bound - it's ok,
8332 because an exception hasn't stopped this code.
8334 if ((tb
->cflags
& CF_USE_ICOUNT
)
8335 && ((pc_ptr
& TARGET_PAGE_MASK
)
8336 != ((pc_ptr
+ TARGET_MAX_INSN_SIZE
- 1) & TARGET_PAGE_MASK
)
8337 || (pc_ptr
& ~TARGET_PAGE_MASK
) == 0)) {
8338 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8342 /* if too long translation, stop generation too */
8343 if (tcg_op_buf_full() ||
8344 (pc_ptr
- pc_start
) >= (TARGET_PAGE_SIZE
- 32) ||
8345 num_insns
>= max_insns
) {
8346 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8351 gen_jmp_im(pc_ptr
- dc
->cs_base
);
8356 if (tb
->cflags
& CF_LAST_IO
)
8359 gen_tb_end(tb
, num_insns
);
8362 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
8363 && qemu_log_in_addr_range(pc_start
)) {
8365 qemu_log("----------------\n");
8366 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
8367 #ifdef TARGET_X86_64
8372 disas_flags
= !dc
->code32
;
8373 log_target_disas(cs
, pc_start
, pc_ptr
- pc_start
, disas_flags
);
8378 tb
->size
= pc_ptr
- pc_start
;
8379 tb
->icount
= num_insns
;
8382 void restore_state_to_opc(CPUX86State
*env
, TranslationBlock
*tb
,
8385 int cc_op
= data
[1];
8386 env
->eip
= data
[0] - tb
->cs_base
;
8387 if (cc_op
!= CC_OP_DYNAMIC
) {