4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu/host-utils.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/translator.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
35 #define PREFIX_REPZ 0x01
36 #define PREFIX_REPNZ 0x02
37 #define PREFIX_LOCK 0x04
38 #define PREFIX_DATA 0x08
39 #define PREFIX_ADR 0x10
40 #define PREFIX_VEX 0x20
43 #define CODE64(s) ((s)->code64)
44 #define REX_X(s) ((s)->rex_x)
45 #define REX_B(s) ((s)->rex_b)
60 /* For a switch indexed by MODRM, match all memory operands for a given OP. */
61 #define CASE_MODRM_MEM_OP(OP) \
62 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
63 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
64 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
66 #define CASE_MODRM_OP(OP) \
67 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
68 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
69 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
70 case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
72 //#define MACRO_TEST 1
74 /* global register indexes */
75 static TCGv cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
;
76 static TCGv_i32 cpu_cc_op
;
77 static TCGv cpu_regs
[CPU_NB_REGS
];
78 static TCGv cpu_seg_base
[6];
79 static TCGv_i64 cpu_bndl
[4];
80 static TCGv_i64 cpu_bndu
[4];
82 #include "exec/gen-icount.h"
84 typedef struct DisasContext
{
85 DisasContextBase base
;
87 /* current insn context */
88 int override
; /* -1 if no override */
92 target_ulong pc_start
;
93 target_ulong pc
; /* pc = eip + cs_base */
94 /* current block context */
95 target_ulong cs_base
; /* base of CS segment */
96 int pe
; /* protected mode */
97 int code32
; /* 32 bit code segment */
99 int lma
; /* long mode active */
100 int code64
; /* 64 bit code segment */
103 int vex_l
; /* vex vector length */
104 int vex_v
; /* vex vvvv register, without 1's complement. */
105 int ss32
; /* 32 bit stack segment */
106 CCOp cc_op
; /* current CC operation */
111 int addseg
; /* non zero if either DS/ES/SS have a non zero base */
112 int f_st
; /* currently unused */
113 int vm86
; /* vm86 mode */
116 int tf
; /* TF cpu flag */
117 int jmp_opt
; /* use direct block chaining for direct jumps */
118 int repz_opt
; /* optimize jumps within repz instructions */
119 int mem_index
; /* select memory access functions */
120 uint64_t flags
; /* all execution flags */
121 int popl_esp_hack
; /* for correct popl with esp base handling */
122 int rip_offset
; /* only used in x86_64, but left for simplicity */
124 int cpuid_ext_features
;
125 int cpuid_ext2_features
;
126 int cpuid_ext3_features
;
127 int cpuid_7_0_ebx_features
;
128 int cpuid_xsave_features
;
130 /* TCG local temps */
136 /* TCG local register indexes (only used inside old micro ops) */
148 static void gen_eob(DisasContext
*s
);
149 static void gen_jr(DisasContext
*s
, TCGv dest
);
150 static void gen_jmp(DisasContext
*s
, target_ulong eip
);
151 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
);
152 static void gen_op(DisasContext
*s1
, int op
, TCGMemOp ot
, int d
);
154 /* i386 arith/logic operations */
174 OP_SHL1
, /* undocumented */
190 /* I386 int registers */
191 OR_EAX
, /* MUST be even numbered */
200 OR_TMP0
= 16, /* temporary operand register */
202 OR_A0
, /* temporary register used when doing address evaluation */
212 /* Bit set if the global variable is live after setting CC_OP to X. */
213 static const uint8_t cc_op_live
[CC_OP_NB
] = {
214 [CC_OP_DYNAMIC
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
215 [CC_OP_EFLAGS
] = USES_CC_SRC
,
216 [CC_OP_MULB
... CC_OP_MULQ
] = USES_CC_DST
| USES_CC_SRC
,
217 [CC_OP_ADDB
... CC_OP_ADDQ
] = USES_CC_DST
| USES_CC_SRC
,
218 [CC_OP_ADCB
... CC_OP_ADCQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
219 [CC_OP_SUBB
... CC_OP_SUBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRCT
,
220 [CC_OP_SBBB
... CC_OP_SBBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
221 [CC_OP_LOGICB
... CC_OP_LOGICQ
] = USES_CC_DST
,
222 [CC_OP_INCB
... CC_OP_INCQ
] = USES_CC_DST
| USES_CC_SRC
,
223 [CC_OP_DECB
... CC_OP_DECQ
] = USES_CC_DST
| USES_CC_SRC
,
224 [CC_OP_SHLB
... CC_OP_SHLQ
] = USES_CC_DST
| USES_CC_SRC
,
225 [CC_OP_SARB
... CC_OP_SARQ
] = USES_CC_DST
| USES_CC_SRC
,
226 [CC_OP_BMILGB
... CC_OP_BMILGQ
] = USES_CC_DST
| USES_CC_SRC
,
227 [CC_OP_ADCX
] = USES_CC_DST
| USES_CC_SRC
,
228 [CC_OP_ADOX
] = USES_CC_SRC
| USES_CC_SRC2
,
229 [CC_OP_ADCOX
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
231 [CC_OP_POPCNT
] = USES_CC_SRC
,
234 static void set_cc_op(DisasContext
*s
, CCOp op
)
238 if (s
->cc_op
== op
) {
242 /* Discard CC computation that will no longer be used. */
243 dead
= cc_op_live
[s
->cc_op
] & ~cc_op_live
[op
];
244 if (dead
& USES_CC_DST
) {
245 tcg_gen_discard_tl(cpu_cc_dst
);
247 if (dead
& USES_CC_SRC
) {
248 tcg_gen_discard_tl(cpu_cc_src
);
250 if (dead
& USES_CC_SRC2
) {
251 tcg_gen_discard_tl(cpu_cc_src2
);
253 if (dead
& USES_CC_SRCT
) {
254 tcg_gen_discard_tl(s
->cc_srcT
);
257 if (op
== CC_OP_DYNAMIC
) {
258 /* The DYNAMIC setting is translator only, and should never be
259 stored. Thus we always consider it clean. */
260 s
->cc_op_dirty
= false;
262 /* Discard any computed CC_OP value (see shifts). */
263 if (s
->cc_op
== CC_OP_DYNAMIC
) {
264 tcg_gen_discard_i32(cpu_cc_op
);
266 s
->cc_op_dirty
= true;
271 static void gen_update_cc_op(DisasContext
*s
)
273 if (s
->cc_op_dirty
) {
274 tcg_gen_movi_i32(cpu_cc_op
, s
->cc_op
);
275 s
->cc_op_dirty
= false;
281 #define NB_OP_SIZES 4
283 #else /* !TARGET_X86_64 */
285 #define NB_OP_SIZES 3
287 #endif /* !TARGET_X86_64 */
289 #if defined(HOST_WORDS_BIGENDIAN)
290 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
291 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
292 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
293 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
294 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
296 #define REG_B_OFFSET 0
297 #define REG_H_OFFSET 1
298 #define REG_W_OFFSET 0
299 #define REG_L_OFFSET 0
300 #define REG_LH_OFFSET 4
303 /* In instruction encodings for byte register accesses the
304 * register number usually indicates "low 8 bits of register N";
305 * however there are some special cases where N 4..7 indicates
306 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
307 * true for this special case, false otherwise.
309 static inline bool byte_reg_is_xH(DisasContext
*s
, int reg
)
315 if (reg
>= 8 || s
->x86_64_hregs
) {
322 /* Select the size of a push/pop operation. */
323 static inline TCGMemOp
mo_pushpop(DisasContext
*s
, TCGMemOp ot
)
326 return ot
== MO_16
? MO_16
: MO_64
;
332 /* Select the size of the stack pointer. */
333 static inline TCGMemOp
mo_stacksize(DisasContext
*s
)
335 return CODE64(s
) ? MO_64
: s
->ss32
? MO_32
: MO_16
;
338 /* Select only size 64 else 32. Used for SSE operand sizes. */
339 static inline TCGMemOp
mo_64_32(TCGMemOp ot
)
342 return ot
== MO_64
? MO_64
: MO_32
;
348 /* Select size 8 if lsb of B is clear, else OT. Used for decoding
349 byte vs word opcodes. */
350 static inline TCGMemOp
mo_b_d(int b
, TCGMemOp ot
)
352 return b
& 1 ? ot
: MO_8
;
355 /* Select size 8 if lsb of B is clear, else OT capped at 32.
356 Used for decoding operand size of port opcodes. */
357 static inline TCGMemOp
mo_b_d32(int b
, TCGMemOp ot
)
359 return b
& 1 ? (ot
== MO_16
? MO_16
: MO_32
) : MO_8
;
362 static void gen_op_mov_reg_v(DisasContext
*s
, TCGMemOp ot
, int reg
, TCGv t0
)
366 if (!byte_reg_is_xH(s
, reg
)) {
367 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 8);
369 tcg_gen_deposit_tl(cpu_regs
[reg
- 4], cpu_regs
[reg
- 4], t0
, 8, 8);
373 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 16);
376 /* For x86_64, this sets the higher half of register to zero.
377 For i386, this is equivalent to a mov. */
378 tcg_gen_ext32u_tl(cpu_regs
[reg
], t0
);
382 tcg_gen_mov_tl(cpu_regs
[reg
], t0
);
391 void gen_op_mov_v_reg(DisasContext
*s
, TCGMemOp ot
, TCGv t0
, int reg
)
393 if (ot
== MO_8
&& byte_reg_is_xH(s
, reg
)) {
394 tcg_gen_extract_tl(t0
, cpu_regs
[reg
- 4], 8, 8);
396 tcg_gen_mov_tl(t0
, cpu_regs
[reg
]);
400 static void gen_add_A0_im(DisasContext
*s
, int val
)
402 tcg_gen_addi_tl(s
->A0
, s
->A0
, val
);
404 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
408 static inline void gen_op_jmp_v(TCGv dest
)
410 tcg_gen_st_tl(dest
, cpu_env
, offsetof(CPUX86State
, eip
));
414 void gen_op_add_reg_im(DisasContext
*s
, TCGMemOp size
, int reg
, int32_t val
)
416 tcg_gen_addi_tl(s
->tmp0
, cpu_regs
[reg
], val
);
417 gen_op_mov_reg_v(s
, size
, reg
, s
->tmp0
);
420 static inline void gen_op_add_reg_T0(DisasContext
*s
, TCGMemOp size
, int reg
)
422 tcg_gen_add_tl(s
->tmp0
, cpu_regs
[reg
], s
->T0
);
423 gen_op_mov_reg_v(s
, size
, reg
, s
->tmp0
);
426 static inline void gen_op_ld_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
428 tcg_gen_qemu_ld_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
431 static inline void gen_op_st_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
433 tcg_gen_qemu_st_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
436 static inline void gen_op_st_rm_T0_A0(DisasContext
*s
, int idx
, int d
)
439 gen_op_st_v(s
, idx
, s
->T0
, s
->A0
);
441 gen_op_mov_reg_v(s
, idx
, d
, s
->T0
);
445 static inline void gen_jmp_im(DisasContext
*s
, target_ulong pc
)
447 tcg_gen_movi_tl(s
->tmp0
, pc
);
448 gen_op_jmp_v(s
->tmp0
);
451 /* Compute SEG:REG into A0. SEG is selected from the override segment
452 (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
453 indicate no override. */
454 static void gen_lea_v_seg(DisasContext
*s
, TCGMemOp aflag
, TCGv a0
,
455 int def_seg
, int ovr_seg
)
461 tcg_gen_mov_tl(s
->A0
, a0
);
468 if (ovr_seg
< 0 && s
->addseg
) {
472 tcg_gen_ext32u_tl(s
->A0
, a0
);
478 tcg_gen_ext16u_tl(s
->A0
, a0
);
493 TCGv seg
= cpu_seg_base
[ovr_seg
];
495 if (aflag
== MO_64
) {
496 tcg_gen_add_tl(s
->A0
, a0
, seg
);
497 } else if (CODE64(s
)) {
498 tcg_gen_ext32u_tl(s
->A0
, a0
);
499 tcg_gen_add_tl(s
->A0
, s
->A0
, seg
);
501 tcg_gen_add_tl(s
->A0
, a0
, seg
);
502 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
507 static inline void gen_string_movl_A0_ESI(DisasContext
*s
)
509 gen_lea_v_seg(s
, s
->aflag
, cpu_regs
[R_ESI
], R_DS
, s
->override
);
512 static inline void gen_string_movl_A0_EDI(DisasContext
*s
)
514 gen_lea_v_seg(s
, s
->aflag
, cpu_regs
[R_EDI
], R_ES
, -1);
517 static inline void gen_op_movl_T0_Dshift(DisasContext
*s
, TCGMemOp ot
)
519 tcg_gen_ld32s_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, df
));
520 tcg_gen_shli_tl(s
->T0
, s
->T0
, ot
);
523 static TCGv
gen_ext_tl(TCGv dst
, TCGv src
, TCGMemOp size
, bool sign
)
528 tcg_gen_ext8s_tl(dst
, src
);
530 tcg_gen_ext8u_tl(dst
, src
);
535 tcg_gen_ext16s_tl(dst
, src
);
537 tcg_gen_ext16u_tl(dst
, src
);
543 tcg_gen_ext32s_tl(dst
, src
);
545 tcg_gen_ext32u_tl(dst
, src
);
554 static void gen_extu(TCGMemOp ot
, TCGv reg
)
556 gen_ext_tl(reg
, reg
, ot
, false);
559 static void gen_exts(TCGMemOp ot
, TCGv reg
)
561 gen_ext_tl(reg
, reg
, ot
, true);
565 void gen_op_jnz_ecx(DisasContext
*s
, TCGMemOp size
, TCGLabel
*label1
)
567 tcg_gen_mov_tl(s
->tmp0
, cpu_regs
[R_ECX
]);
568 gen_extu(size
, s
->tmp0
);
569 tcg_gen_brcondi_tl(TCG_COND_NE
, s
->tmp0
, 0, label1
);
573 void gen_op_jz_ecx(DisasContext
*s
, TCGMemOp size
, TCGLabel
*label1
)
575 tcg_gen_mov_tl(s
->tmp0
, cpu_regs
[R_ECX
]);
576 gen_extu(size
, s
->tmp0
);
577 tcg_gen_brcondi_tl(TCG_COND_EQ
, s
->tmp0
, 0, label1
);
580 static void gen_helper_in_func(TCGMemOp ot
, TCGv v
, TCGv_i32 n
)
584 gen_helper_inb(v
, cpu_env
, n
);
587 gen_helper_inw(v
, cpu_env
, n
);
590 gen_helper_inl(v
, cpu_env
, n
);
597 static void gen_helper_out_func(TCGMemOp ot
, TCGv_i32 v
, TCGv_i32 n
)
601 gen_helper_outb(cpu_env
, v
, n
);
604 gen_helper_outw(cpu_env
, v
, n
);
607 gen_helper_outl(cpu_env
, v
, n
);
614 static void gen_check_io(DisasContext
*s
, TCGMemOp ot
, target_ulong cur_eip
,
617 target_ulong next_eip
;
619 if (s
->pe
&& (s
->cpl
> s
->iopl
|| s
->vm86
)) {
620 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
623 gen_helper_check_iob(cpu_env
, s
->tmp2_i32
);
626 gen_helper_check_iow(cpu_env
, s
->tmp2_i32
);
629 gen_helper_check_iol(cpu_env
, s
->tmp2_i32
);
635 if(s
->flags
& HF_SVMI_MASK
) {
637 gen_jmp_im(s
, cur_eip
);
638 svm_flags
|= (1 << (4 + ot
));
639 next_eip
= s
->pc
- s
->cs_base
;
640 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
641 gen_helper_svm_check_io(cpu_env
, s
->tmp2_i32
,
642 tcg_const_i32(svm_flags
),
643 tcg_const_i32(next_eip
- cur_eip
));
647 static inline void gen_movs(DisasContext
*s
, TCGMemOp ot
)
649 gen_string_movl_A0_ESI(s
);
650 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
651 gen_string_movl_A0_EDI(s
);
652 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
653 gen_op_movl_T0_Dshift(s
, ot
);
654 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
655 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
658 static void gen_op_update1_cc(DisasContext
*s
)
660 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
663 static void gen_op_update2_cc(DisasContext
*s
)
665 tcg_gen_mov_tl(cpu_cc_src
, s
->T1
);
666 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
669 static void gen_op_update3_cc(DisasContext
*s
, TCGv reg
)
671 tcg_gen_mov_tl(cpu_cc_src2
, reg
);
672 tcg_gen_mov_tl(cpu_cc_src
, s
->T1
);
673 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
676 static inline void gen_op_testl_T0_T1_cc(DisasContext
*s
)
678 tcg_gen_and_tl(cpu_cc_dst
, s
->T0
, s
->T1
);
681 static void gen_op_update_neg_cc(DisasContext
*s
)
683 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
684 tcg_gen_neg_tl(cpu_cc_src
, s
->T0
);
685 tcg_gen_movi_tl(s
->cc_srcT
, 0);
688 /* compute all eflags to cc_src */
689 static void gen_compute_eflags(DisasContext
*s
)
691 TCGv zero
, dst
, src1
, src2
;
694 if (s
->cc_op
== CC_OP_EFLAGS
) {
697 if (s
->cc_op
== CC_OP_CLR
) {
698 tcg_gen_movi_tl(cpu_cc_src
, CC_Z
| CC_P
);
699 set_cc_op(s
, CC_OP_EFLAGS
);
708 /* Take care to not read values that are not live. */
709 live
= cc_op_live
[s
->cc_op
] & ~USES_CC_SRCT
;
710 dead
= live
^ (USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
);
712 zero
= tcg_const_tl(0);
713 if (dead
& USES_CC_DST
) {
716 if (dead
& USES_CC_SRC
) {
719 if (dead
& USES_CC_SRC2
) {
725 gen_helper_cc_compute_all(cpu_cc_src
, dst
, src1
, src2
, cpu_cc_op
);
726 set_cc_op(s
, CC_OP_EFLAGS
);
733 typedef struct CCPrepare
{
743 /* compute eflags.C to reg */
744 static CCPrepare
gen_prepare_eflags_c(DisasContext
*s
, TCGv reg
)
750 case CC_OP_SUBB
... CC_OP_SUBQ
:
751 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
752 size
= s
->cc_op
- CC_OP_SUBB
;
753 t1
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, false);
754 /* If no temporary was used, be careful not to alias t1 and t0. */
755 t0
= t1
== cpu_cc_src
? s
->tmp0
: reg
;
756 tcg_gen_mov_tl(t0
, s
->cc_srcT
);
760 case CC_OP_ADDB
... CC_OP_ADDQ
:
761 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
762 size
= s
->cc_op
- CC_OP_ADDB
;
763 t1
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, false);
764 t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
766 return (CCPrepare
) { .cond
= TCG_COND_LTU
, .reg
= t0
,
767 .reg2
= t1
, .mask
= -1, .use_reg2
= true };
769 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
772 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
774 case CC_OP_INCB
... CC_OP_INCQ
:
775 case CC_OP_DECB
... CC_OP_DECQ
:
776 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
777 .mask
= -1, .no_setcond
= true };
779 case CC_OP_SHLB
... CC_OP_SHLQ
:
780 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
781 size
= s
->cc_op
- CC_OP_SHLB
;
782 shift
= (8 << size
) - 1;
783 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
784 .mask
= (target_ulong
)1 << shift
};
786 case CC_OP_MULB
... CC_OP_MULQ
:
787 return (CCPrepare
) { .cond
= TCG_COND_NE
,
788 .reg
= cpu_cc_src
, .mask
= -1 };
790 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
791 size
= s
->cc_op
- CC_OP_BMILGB
;
792 t0
= gen_ext_tl(reg
, cpu_cc_src
, size
, false);
793 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
797 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_dst
,
798 .mask
= -1, .no_setcond
= true };
801 case CC_OP_SARB
... CC_OP_SARQ
:
803 return (CCPrepare
) { .cond
= TCG_COND_NE
,
804 .reg
= cpu_cc_src
, .mask
= CC_C
};
807 /* The need to compute only C from CC_OP_DYNAMIC is important
808 in efficiently implementing e.g. INC at the start of a TB. */
810 gen_helper_cc_compute_c(reg
, cpu_cc_dst
, cpu_cc_src
,
811 cpu_cc_src2
, cpu_cc_op
);
812 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
813 .mask
= -1, .no_setcond
= true };
817 /* compute eflags.P to reg */
818 static CCPrepare
gen_prepare_eflags_p(DisasContext
*s
, TCGv reg
)
820 gen_compute_eflags(s
);
821 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
825 /* compute eflags.S to reg */
826 static CCPrepare
gen_prepare_eflags_s(DisasContext
*s
, TCGv reg
)
830 gen_compute_eflags(s
);
836 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
840 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
843 TCGMemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
844 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, true);
845 return (CCPrepare
) { .cond
= TCG_COND_LT
, .reg
= t0
, .mask
= -1 };
850 /* compute eflags.O to reg */
851 static CCPrepare
gen_prepare_eflags_o(DisasContext
*s
, TCGv reg
)
856 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src2
,
857 .mask
= -1, .no_setcond
= true };
860 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
862 gen_compute_eflags(s
);
863 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
868 /* compute eflags.Z to reg */
869 static CCPrepare
gen_prepare_eflags_z(DisasContext
*s
, TCGv reg
)
873 gen_compute_eflags(s
);
879 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
882 return (CCPrepare
) { .cond
= TCG_COND_ALWAYS
, .mask
= -1 };
884 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= cpu_cc_src
,
888 TCGMemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
889 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
890 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
895 /* perform a conditional store into register 'reg' according to jump opcode
896 value 'b'. In the fast case, T0 is guaranted not to be used. */
897 static CCPrepare
gen_prepare_cc(DisasContext
*s
, int b
, TCGv reg
)
899 int inv
, jcc_op
, cond
;
905 jcc_op
= (b
>> 1) & 7;
908 case CC_OP_SUBB
... CC_OP_SUBQ
:
909 /* We optimize relational operators for the cmp/jcc case. */
910 size
= s
->cc_op
- CC_OP_SUBB
;
913 tcg_gen_mov_tl(s
->tmp4
, s
->cc_srcT
);
914 gen_extu(size
, s
->tmp4
);
915 t0
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, false);
916 cc
= (CCPrepare
) { .cond
= TCG_COND_LEU
, .reg
= s
->tmp4
,
917 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
926 tcg_gen_mov_tl(s
->tmp4
, s
->cc_srcT
);
927 gen_exts(size
, s
->tmp4
);
928 t0
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, true);
929 cc
= (CCPrepare
) { .cond
= cond
, .reg
= s
->tmp4
,
930 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
940 /* This actually generates good code for JC, JZ and JS. */
943 cc
= gen_prepare_eflags_o(s
, reg
);
946 cc
= gen_prepare_eflags_c(s
, reg
);
949 cc
= gen_prepare_eflags_z(s
, reg
);
952 gen_compute_eflags(s
);
953 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
954 .mask
= CC_Z
| CC_C
};
957 cc
= gen_prepare_eflags_s(s
, reg
);
960 cc
= gen_prepare_eflags_p(s
, reg
);
963 gen_compute_eflags(s
);
964 if (reg
== cpu_cc_src
) {
967 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
968 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
969 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
974 gen_compute_eflags(s
);
975 if (reg
== cpu_cc_src
) {
978 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
979 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
980 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
981 .mask
= CC_S
| CC_Z
};
988 cc
.cond
= tcg_invert_cond(cc
.cond
);
993 static void gen_setcc1(DisasContext
*s
, int b
, TCGv reg
)
995 CCPrepare cc
= gen_prepare_cc(s
, b
, reg
);
998 if (cc
.cond
== TCG_COND_EQ
) {
999 tcg_gen_xori_tl(reg
, cc
.reg
, 1);
1001 tcg_gen_mov_tl(reg
, cc
.reg
);
1006 if (cc
.cond
== TCG_COND_NE
&& !cc
.use_reg2
&& cc
.imm
== 0 &&
1007 cc
.mask
!= 0 && (cc
.mask
& (cc
.mask
- 1)) == 0) {
1008 tcg_gen_shri_tl(reg
, cc
.reg
, ctztl(cc
.mask
));
1009 tcg_gen_andi_tl(reg
, reg
, 1);
1012 if (cc
.mask
!= -1) {
1013 tcg_gen_andi_tl(reg
, cc
.reg
, cc
.mask
);
1017 tcg_gen_setcond_tl(cc
.cond
, reg
, cc
.reg
, cc
.reg2
);
1019 tcg_gen_setcondi_tl(cc
.cond
, reg
, cc
.reg
, cc
.imm
);
1023 static inline void gen_compute_eflags_c(DisasContext
*s
, TCGv reg
)
1025 gen_setcc1(s
, JCC_B
<< 1, reg
);
1028 /* generate a conditional jump to label 'l1' according to jump opcode
1029 value 'b'. In the fast case, T0 is guaranted not to be used. */
1030 static inline void gen_jcc1_noeob(DisasContext
*s
, int b
, TCGLabel
*l1
)
1032 CCPrepare cc
= gen_prepare_cc(s
, b
, s
->T0
);
1034 if (cc
.mask
!= -1) {
1035 tcg_gen_andi_tl(s
->T0
, cc
.reg
, cc
.mask
);
1039 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1041 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1045 /* Generate a conditional jump to label 'l1' according to jump opcode
1046 value 'b'. In the fast case, T0 is guaranted not to be used.
1047 A translation block must end soon. */
1048 static inline void gen_jcc1(DisasContext
*s
, int b
, TCGLabel
*l1
)
1050 CCPrepare cc
= gen_prepare_cc(s
, b
, s
->T0
);
1052 gen_update_cc_op(s
);
1053 if (cc
.mask
!= -1) {
1054 tcg_gen_andi_tl(s
->T0
, cc
.reg
, cc
.mask
);
1057 set_cc_op(s
, CC_OP_DYNAMIC
);
1059 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1061 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1065 /* XXX: does not work with gdbstub "ice" single step - not a
1067 static TCGLabel
*gen_jz_ecx_string(DisasContext
*s
, target_ulong next_eip
)
1069 TCGLabel
*l1
= gen_new_label();
1070 TCGLabel
*l2
= gen_new_label();
1071 gen_op_jnz_ecx(s
, s
->aflag
, l1
);
1073 gen_jmp_tb(s
, next_eip
, 1);
1078 static inline void gen_stos(DisasContext
*s
, TCGMemOp ot
)
1080 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
1081 gen_string_movl_A0_EDI(s
);
1082 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1083 gen_op_movl_T0_Dshift(s
, ot
);
1084 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1087 static inline void gen_lods(DisasContext
*s
, TCGMemOp ot
)
1089 gen_string_movl_A0_ESI(s
);
1090 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1091 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T0
);
1092 gen_op_movl_T0_Dshift(s
, ot
);
1093 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
1096 static inline void gen_scas(DisasContext
*s
, TCGMemOp ot
)
1098 gen_string_movl_A0_EDI(s
);
1099 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
1100 gen_op(s
, OP_CMPL
, ot
, R_EAX
);
1101 gen_op_movl_T0_Dshift(s
, ot
);
1102 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1105 static inline void gen_cmps(DisasContext
*s
, TCGMemOp ot
)
1107 gen_string_movl_A0_EDI(s
);
1108 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
1109 gen_string_movl_A0_ESI(s
);
1110 gen_op(s
, OP_CMPL
, ot
, OR_TMP0
);
1111 gen_op_movl_T0_Dshift(s
, ot
);
1112 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
1113 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1116 static void gen_bpt_io(DisasContext
*s
, TCGv_i32 t_port
, int ot
)
1118 if (s
->flags
& HF_IOBPT_MASK
) {
1119 TCGv_i32 t_size
= tcg_const_i32(1 << ot
);
1120 TCGv t_next
= tcg_const_tl(s
->pc
- s
->cs_base
);
1122 gen_helper_bpt_io(cpu_env
, t_port
, t_size
, t_next
);
1123 tcg_temp_free_i32(t_size
);
1124 tcg_temp_free(t_next
);
1129 static inline void gen_ins(DisasContext
*s
, TCGMemOp ot
)
1131 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
1134 gen_string_movl_A0_EDI(s
);
1135 /* Note: we must do this dummy write first to be restartable in
1136 case of page fault. */
1137 tcg_gen_movi_tl(s
->T0
, 0);
1138 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1139 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
1140 tcg_gen_andi_i32(s
->tmp2_i32
, s
->tmp2_i32
, 0xffff);
1141 gen_helper_in_func(ot
, s
->T0
, s
->tmp2_i32
);
1142 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1143 gen_op_movl_T0_Dshift(s
, ot
);
1144 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1145 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
1146 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
1151 static inline void gen_outs(DisasContext
*s
, TCGMemOp ot
)
1153 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
1156 gen_string_movl_A0_ESI(s
);
1157 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1159 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
1160 tcg_gen_andi_i32(s
->tmp2_i32
, s
->tmp2_i32
, 0xffff);
1161 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T0
);
1162 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
1163 gen_op_movl_T0_Dshift(s
, ot
);
1164 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
1165 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
1166 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
1171 /* same method as Valgrind : we generate jumps to current or next
1173 #define GEN_REPZ(op) \
1174 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1175 target_ulong cur_eip, target_ulong next_eip) \
1178 gen_update_cc_op(s); \
1179 l2 = gen_jz_ecx_string(s, next_eip); \
1180 gen_ ## op(s, ot); \
1181 gen_op_add_reg_im(s, s->aflag, R_ECX, -1); \
1182 /* a loop would cause two single step exceptions if ECX = 1 \
1183 before rep string_insn */ \
1185 gen_op_jz_ecx(s, s->aflag, l2); \
1186 gen_jmp(s, cur_eip); \
1189 #define GEN_REPZ2(op) \
1190 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1191 target_ulong cur_eip, \
1192 target_ulong next_eip, \
1196 gen_update_cc_op(s); \
1197 l2 = gen_jz_ecx_string(s, next_eip); \
1198 gen_ ## op(s, ot); \
1199 gen_op_add_reg_im(s, s->aflag, R_ECX, -1); \
1200 gen_update_cc_op(s); \
1201 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1203 gen_op_jz_ecx(s, s->aflag, l2); \
1204 gen_jmp(s, cur_eip); \
1215 static void gen_helper_fp_arith_ST0_FT0(int op
)
1219 gen_helper_fadd_ST0_FT0(cpu_env
);
1222 gen_helper_fmul_ST0_FT0(cpu_env
);
1225 gen_helper_fcom_ST0_FT0(cpu_env
);
1228 gen_helper_fcom_ST0_FT0(cpu_env
);
1231 gen_helper_fsub_ST0_FT0(cpu_env
);
1234 gen_helper_fsubr_ST0_FT0(cpu_env
);
1237 gen_helper_fdiv_ST0_FT0(cpu_env
);
1240 gen_helper_fdivr_ST0_FT0(cpu_env
);
1245 /* NOTE the exception in "r" op ordering */
1246 static void gen_helper_fp_arith_STN_ST0(int op
, int opreg
)
1248 TCGv_i32 tmp
= tcg_const_i32(opreg
);
1251 gen_helper_fadd_STN_ST0(cpu_env
, tmp
);
1254 gen_helper_fmul_STN_ST0(cpu_env
, tmp
);
1257 gen_helper_fsubr_STN_ST0(cpu_env
, tmp
);
1260 gen_helper_fsub_STN_ST0(cpu_env
, tmp
);
1263 gen_helper_fdivr_STN_ST0(cpu_env
, tmp
);
1266 gen_helper_fdiv_STN_ST0(cpu_env
, tmp
);
1271 /* if d == OR_TMP0, it means memory operand (address in A0) */
1272 static void gen_op(DisasContext
*s1
, int op
, TCGMemOp ot
, int d
)
1275 gen_op_mov_v_reg(s1
, ot
, s1
->T0
, d
);
1276 } else if (!(s1
->prefix
& PREFIX_LOCK
)) {
1277 gen_op_ld_v(s1
, ot
, s1
->T0
, s1
->A0
);
1281 gen_compute_eflags_c(s1
, s1
->tmp4
);
1282 if (s1
->prefix
& PREFIX_LOCK
) {
1283 tcg_gen_add_tl(s1
->T0
, s1
->tmp4
, s1
->T1
);
1284 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1285 s1
->mem_index
, ot
| MO_LE
);
1287 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1288 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->tmp4
);
1289 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1291 gen_op_update3_cc(s1
, s1
->tmp4
);
1292 set_cc_op(s1
, CC_OP_ADCB
+ ot
);
1295 gen_compute_eflags_c(s1
, s1
->tmp4
);
1296 if (s1
->prefix
& PREFIX_LOCK
) {
1297 tcg_gen_add_tl(s1
->T0
, s1
->T1
, s1
->tmp4
);
1298 tcg_gen_neg_tl(s1
->T0
, s1
->T0
);
1299 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1300 s1
->mem_index
, ot
| MO_LE
);
1302 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1303 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->tmp4
);
1304 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1306 gen_op_update3_cc(s1
, s1
->tmp4
);
1307 set_cc_op(s1
, CC_OP_SBBB
+ ot
);
1310 if (s1
->prefix
& PREFIX_LOCK
) {
1311 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1312 s1
->mem_index
, ot
| MO_LE
);
1314 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1315 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1317 gen_op_update2_cc(s1
);
1318 set_cc_op(s1
, CC_OP_ADDB
+ ot
);
1321 if (s1
->prefix
& PREFIX_LOCK
) {
1322 tcg_gen_neg_tl(s1
->T0
, s1
->T1
);
1323 tcg_gen_atomic_fetch_add_tl(s1
->cc_srcT
, s1
->A0
, s1
->T0
,
1324 s1
->mem_index
, ot
| MO_LE
);
1325 tcg_gen_sub_tl(s1
->T0
, s1
->cc_srcT
, s1
->T1
);
1327 tcg_gen_mov_tl(s1
->cc_srcT
, s1
->T0
);
1328 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1329 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1331 gen_op_update2_cc(s1
);
1332 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1336 if (s1
->prefix
& PREFIX_LOCK
) {
1337 tcg_gen_atomic_and_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1338 s1
->mem_index
, ot
| MO_LE
);
1340 tcg_gen_and_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1341 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1343 gen_op_update1_cc(s1
);
1344 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1347 if (s1
->prefix
& PREFIX_LOCK
) {
1348 tcg_gen_atomic_or_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1349 s1
->mem_index
, ot
| MO_LE
);
1351 tcg_gen_or_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1352 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1354 gen_op_update1_cc(s1
);
1355 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1358 if (s1
->prefix
& PREFIX_LOCK
) {
1359 tcg_gen_atomic_xor_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1360 s1
->mem_index
, ot
| MO_LE
);
1362 tcg_gen_xor_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1363 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1365 gen_op_update1_cc(s1
);
1366 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1369 tcg_gen_mov_tl(cpu_cc_src
, s1
->T1
);
1370 tcg_gen_mov_tl(s1
->cc_srcT
, s1
->T0
);
1371 tcg_gen_sub_tl(cpu_cc_dst
, s1
->T0
, s1
->T1
);
1372 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1377 /* if d == OR_TMP0, it means memory operand (address in A0) */
1378 static void gen_inc(DisasContext
*s1
, TCGMemOp ot
, int d
, int c
)
1380 if (s1
->prefix
& PREFIX_LOCK
) {
1381 tcg_gen_movi_tl(s1
->T0
, c
> 0 ? 1 : -1);
1382 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1383 s1
->mem_index
, ot
| MO_LE
);
1386 gen_op_mov_v_reg(s1
, ot
, s1
->T0
, d
);
1388 gen_op_ld_v(s1
, ot
, s1
->T0
, s1
->A0
);
1390 tcg_gen_addi_tl(s1
->T0
, s1
->T0
, (c
> 0 ? 1 : -1));
1391 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1394 gen_compute_eflags_c(s1
, cpu_cc_src
);
1395 tcg_gen_mov_tl(cpu_cc_dst
, s1
->T0
);
1396 set_cc_op(s1
, (c
> 0 ? CC_OP_INCB
: CC_OP_DECB
) + ot
);
1399 static void gen_shift_flags(DisasContext
*s
, TCGMemOp ot
, TCGv result
,
1400 TCGv shm1
, TCGv count
, bool is_right
)
1402 TCGv_i32 z32
, s32
, oldop
;
1405 /* Store the results into the CC variables. If we know that the
1406 variable must be dead, store unconditionally. Otherwise we'll
1407 need to not disrupt the current contents. */
1408 z_tl
= tcg_const_tl(0);
1409 if (cc_op_live
[s
->cc_op
] & USES_CC_DST
) {
1410 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_dst
, count
, z_tl
,
1411 result
, cpu_cc_dst
);
1413 tcg_gen_mov_tl(cpu_cc_dst
, result
);
1415 if (cc_op_live
[s
->cc_op
] & USES_CC_SRC
) {
1416 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_src
, count
, z_tl
,
1419 tcg_gen_mov_tl(cpu_cc_src
, shm1
);
1421 tcg_temp_free(z_tl
);
1423 /* Get the two potential CC_OP values into temporaries. */
1424 tcg_gen_movi_i32(s
->tmp2_i32
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1425 if (s
->cc_op
== CC_OP_DYNAMIC
) {
1428 tcg_gen_movi_i32(s
->tmp3_i32
, s
->cc_op
);
1429 oldop
= s
->tmp3_i32
;
1432 /* Conditionally store the CC_OP value. */
1433 z32
= tcg_const_i32(0);
1434 s32
= tcg_temp_new_i32();
1435 tcg_gen_trunc_tl_i32(s32
, count
);
1436 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, s32
, z32
, s
->tmp2_i32
, oldop
);
1437 tcg_temp_free_i32(z32
);
1438 tcg_temp_free_i32(s32
);
1440 /* The CC_OP value is no longer predictable. */
1441 set_cc_op(s
, CC_OP_DYNAMIC
);
1444 static void gen_shift_rm_T1(DisasContext
*s
, TCGMemOp ot
, int op1
,
1445 int is_right
, int is_arith
)
1447 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1450 if (op1
== OR_TMP0
) {
1451 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1453 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1456 tcg_gen_andi_tl(s
->T1
, s
->T1
, mask
);
1457 tcg_gen_subi_tl(s
->tmp0
, s
->T1
, 1);
1461 gen_exts(ot
, s
->T0
);
1462 tcg_gen_sar_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1463 tcg_gen_sar_tl(s
->T0
, s
->T0
, s
->T1
);
1465 gen_extu(ot
, s
->T0
);
1466 tcg_gen_shr_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1467 tcg_gen_shr_tl(s
->T0
, s
->T0
, s
->T1
);
1470 tcg_gen_shl_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1471 tcg_gen_shl_tl(s
->T0
, s
->T0
, s
->T1
);
1475 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1477 gen_shift_flags(s
, ot
, s
->T0
, s
->tmp0
, s
->T1
, is_right
);
1480 static void gen_shift_rm_im(DisasContext
*s
, TCGMemOp ot
, int op1
, int op2
,
1481 int is_right
, int is_arith
)
1483 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1487 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1489 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1495 gen_exts(ot
, s
->T0
);
1496 tcg_gen_sari_tl(s
->tmp4
, s
->T0
, op2
- 1);
1497 tcg_gen_sari_tl(s
->T0
, s
->T0
, op2
);
1499 gen_extu(ot
, s
->T0
);
1500 tcg_gen_shri_tl(s
->tmp4
, s
->T0
, op2
- 1);
1501 tcg_gen_shri_tl(s
->T0
, s
->T0
, op2
);
1504 tcg_gen_shli_tl(s
->tmp4
, s
->T0
, op2
- 1);
1505 tcg_gen_shli_tl(s
->T0
, s
->T0
, op2
);
1510 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1512 /* update eflags if non zero shift */
1514 tcg_gen_mov_tl(cpu_cc_src
, s
->tmp4
);
1515 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
1516 set_cc_op(s
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1520 static void gen_rot_rm_T1(DisasContext
*s
, TCGMemOp ot
, int op1
, int is_right
)
1522 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1526 if (op1
== OR_TMP0
) {
1527 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1529 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1532 tcg_gen_andi_tl(s
->T1
, s
->T1
, mask
);
1536 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1537 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
1538 tcg_gen_muli_tl(s
->T0
, s
->T0
, 0x01010101);
1541 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1542 tcg_gen_deposit_tl(s
->T0
, s
->T0
, s
->T0
, 16, 16);
1545 #ifdef TARGET_X86_64
1547 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
1548 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
1550 tcg_gen_rotr_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
1552 tcg_gen_rotl_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
1554 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
1559 tcg_gen_rotr_tl(s
->T0
, s
->T0
, s
->T1
);
1561 tcg_gen_rotl_tl(s
->T0
, s
->T0
, s
->T1
);
1567 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1569 /* We'll need the flags computed into CC_SRC. */
1570 gen_compute_eflags(s
);
1572 /* The value that was "rotated out" is now present at the other end
1573 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1574 since we've computed the flags into CC_SRC, these variables are
1577 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
- 1);
1578 tcg_gen_shri_tl(cpu_cc_dst
, s
->T0
, mask
);
1579 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1581 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
);
1582 tcg_gen_andi_tl(cpu_cc_dst
, s
->T0
, 1);
1584 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1585 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1587 /* Now conditionally store the new CC_OP value. If the shift count
1588 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1589 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1590 exactly as we computed above. */
1591 t0
= tcg_const_i32(0);
1592 t1
= tcg_temp_new_i32();
1593 tcg_gen_trunc_tl_i32(t1
, s
->T1
);
1594 tcg_gen_movi_i32(s
->tmp2_i32
, CC_OP_ADCOX
);
1595 tcg_gen_movi_i32(s
->tmp3_i32
, CC_OP_EFLAGS
);
1596 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, t1
, t0
,
1597 s
->tmp2_i32
, s
->tmp3_i32
);
1598 tcg_temp_free_i32(t0
);
1599 tcg_temp_free_i32(t1
);
1601 /* The CC_OP value is no longer predictable. */
1602 set_cc_op(s
, CC_OP_DYNAMIC
);
1605 static void gen_rot_rm_im(DisasContext
*s
, TCGMemOp ot
, int op1
, int op2
,
1608 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1612 if (op1
== OR_TMP0
) {
1613 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1615 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1621 #ifdef TARGET_X86_64
1623 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
1625 tcg_gen_rotri_i32(s
->tmp2_i32
, s
->tmp2_i32
, op2
);
1627 tcg_gen_rotli_i32(s
->tmp2_i32
, s
->tmp2_i32
, op2
);
1629 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
1634 tcg_gen_rotri_tl(s
->T0
, s
->T0
, op2
);
1636 tcg_gen_rotli_tl(s
->T0
, s
->T0
, op2
);
1647 shift
= mask
+ 1 - shift
;
1649 gen_extu(ot
, s
->T0
);
1650 tcg_gen_shli_tl(s
->tmp0
, s
->T0
, shift
);
1651 tcg_gen_shri_tl(s
->T0
, s
->T0
, mask
+ 1 - shift
);
1652 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->tmp0
);
1658 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1661 /* Compute the flags into CC_SRC. */
1662 gen_compute_eflags(s
);
1664 /* The value that was "rotated out" is now present at the other end
1665 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1666 since we've computed the flags into CC_SRC, these variables are
1669 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
- 1);
1670 tcg_gen_shri_tl(cpu_cc_dst
, s
->T0
, mask
);
1671 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1673 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
);
1674 tcg_gen_andi_tl(cpu_cc_dst
, s
->T0
, 1);
1676 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1677 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1678 set_cc_op(s
, CC_OP_ADCOX
);
1682 /* XXX: add faster immediate = 1 case */
1683 static void gen_rotc_rm_T1(DisasContext
*s
, TCGMemOp ot
, int op1
,
1686 gen_compute_eflags(s
);
1687 assert(s
->cc_op
== CC_OP_EFLAGS
);
1691 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1693 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1698 gen_helper_rcrb(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1701 gen_helper_rcrw(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1704 gen_helper_rcrl(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1706 #ifdef TARGET_X86_64
1708 gen_helper_rcrq(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1717 gen_helper_rclb(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1720 gen_helper_rclw(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1723 gen_helper_rcll(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1725 #ifdef TARGET_X86_64
1727 gen_helper_rclq(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1735 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1738 /* XXX: add faster immediate case */
1739 static void gen_shiftd_rm_T1(DisasContext
*s
, TCGMemOp ot
, int op1
,
1740 bool is_right
, TCGv count_in
)
1742 target_ulong mask
= (ot
== MO_64
? 63 : 31);
1746 if (op1
== OR_TMP0
) {
1747 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1749 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1752 count
= tcg_temp_new();
1753 tcg_gen_andi_tl(count
, count_in
, mask
);
1757 /* Note: we implement the Intel behaviour for shift count > 16.
1758 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1759 portion by constructing it as a 32-bit value. */
1761 tcg_gen_deposit_tl(s
->tmp0
, s
->T0
, s
->T1
, 16, 16);
1762 tcg_gen_mov_tl(s
->T1
, s
->T0
);
1763 tcg_gen_mov_tl(s
->T0
, s
->tmp0
);
1765 tcg_gen_deposit_tl(s
->T1
, s
->T0
, s
->T1
, 16, 16);
1768 #ifdef TARGET_X86_64
1770 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1771 tcg_gen_subi_tl(s
->tmp0
, count
, 1);
1773 tcg_gen_concat_tl_i64(s
->T0
, s
->T0
, s
->T1
);
1774 tcg_gen_shr_i64(s
->tmp0
, s
->T0
, s
->tmp0
);
1775 tcg_gen_shr_i64(s
->T0
, s
->T0
, count
);
1777 tcg_gen_concat_tl_i64(s
->T0
, s
->T1
, s
->T0
);
1778 tcg_gen_shl_i64(s
->tmp0
, s
->T0
, s
->tmp0
);
1779 tcg_gen_shl_i64(s
->T0
, s
->T0
, count
);
1780 tcg_gen_shri_i64(s
->tmp0
, s
->tmp0
, 32);
1781 tcg_gen_shri_i64(s
->T0
, s
->T0
, 32);
1786 tcg_gen_subi_tl(s
->tmp0
, count
, 1);
1788 tcg_gen_shr_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1790 tcg_gen_subfi_tl(s
->tmp4
, mask
+ 1, count
);
1791 tcg_gen_shr_tl(s
->T0
, s
->T0
, count
);
1792 tcg_gen_shl_tl(s
->T1
, s
->T1
, s
->tmp4
);
1794 tcg_gen_shl_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1796 /* Only needed if count > 16, for Intel behaviour. */
1797 tcg_gen_subfi_tl(s
->tmp4
, 33, count
);
1798 tcg_gen_shr_tl(s
->tmp4
, s
->T1
, s
->tmp4
);
1799 tcg_gen_or_tl(s
->tmp0
, s
->tmp0
, s
->tmp4
);
1802 tcg_gen_subfi_tl(s
->tmp4
, mask
+ 1, count
);
1803 tcg_gen_shl_tl(s
->T0
, s
->T0
, count
);
1804 tcg_gen_shr_tl(s
->T1
, s
->T1
, s
->tmp4
);
1806 tcg_gen_movi_tl(s
->tmp4
, 0);
1807 tcg_gen_movcond_tl(TCG_COND_EQ
, s
->T1
, count
, s
->tmp4
,
1809 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->T1
);
1814 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1816 gen_shift_flags(s
, ot
, s
->T0
, s
->tmp0
, count
, is_right
);
1817 tcg_temp_free(count
);
1820 static void gen_shift(DisasContext
*s1
, int op
, TCGMemOp ot
, int d
, int s
)
1823 gen_op_mov_v_reg(s1
, ot
, s1
->T1
, s
);
1826 gen_rot_rm_T1(s1
, ot
, d
, 0);
1829 gen_rot_rm_T1(s1
, ot
, d
, 1);
1833 gen_shift_rm_T1(s1
, ot
, d
, 0, 0);
1836 gen_shift_rm_T1(s1
, ot
, d
, 1, 0);
1839 gen_shift_rm_T1(s1
, ot
, d
, 1, 1);
1842 gen_rotc_rm_T1(s1
, ot
, d
, 0);
1845 gen_rotc_rm_T1(s1
, ot
, d
, 1);
1850 static void gen_shifti(DisasContext
*s1
, int op
, TCGMemOp ot
, int d
, int c
)
1854 gen_rot_rm_im(s1
, ot
, d
, c
, 0);
1857 gen_rot_rm_im(s1
, ot
, d
, c
, 1);
1861 gen_shift_rm_im(s1
, ot
, d
, c
, 0, 0);
1864 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 0);
1867 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 1);
1870 /* currently not optimized */
1871 tcg_gen_movi_tl(s1
->T1
, c
);
1872 gen_shift(s1
, op
, ot
, d
, OR_TMP1
);
1877 #define X86_MAX_INSN_LENGTH 15
1879 static uint64_t advance_pc(CPUX86State
*env
, DisasContext
*s
, int num_bytes
)
1881 uint64_t pc
= s
->pc
;
1884 if (unlikely(s
->pc
- s
->pc_start
> X86_MAX_INSN_LENGTH
)) {
1885 /* If the instruction's 16th byte is on a different page than the 1st, a
1886 * page fault on the second page wins over the general protection fault
1887 * caused by the instruction being too long.
1888 * This can happen even if the operand is only one byte long!
1890 if (((s
->pc
- 1) ^ (pc
- 1)) & TARGET_PAGE_MASK
) {
1891 volatile uint8_t unused
=
1892 cpu_ldub_code(env
, (s
->pc
- 1) & TARGET_PAGE_MASK
);
1895 siglongjmp(s
->jmpbuf
, 1);
1901 static inline uint8_t x86_ldub_code(CPUX86State
*env
, DisasContext
*s
)
1903 return cpu_ldub_code(env
, advance_pc(env
, s
, 1));
1906 static inline int16_t x86_ldsw_code(CPUX86State
*env
, DisasContext
*s
)
1908 return cpu_ldsw_code(env
, advance_pc(env
, s
, 2));
1911 static inline uint16_t x86_lduw_code(CPUX86State
*env
, DisasContext
*s
)
1913 return cpu_lduw_code(env
, advance_pc(env
, s
, 2));
1916 static inline uint32_t x86_ldl_code(CPUX86State
*env
, DisasContext
*s
)
1918 return cpu_ldl_code(env
, advance_pc(env
, s
, 4));
1921 #ifdef TARGET_X86_64
1922 static inline uint64_t x86_ldq_code(CPUX86State
*env
, DisasContext
*s
)
1924 return cpu_ldq_code(env
, advance_pc(env
, s
, 8));
1928 /* Decompose an address. */
1930 typedef struct AddressParts
{
1938 static AddressParts
gen_lea_modrm_0(CPUX86State
*env
, DisasContext
*s
,
1941 int def_seg
, base
, index
, scale
, mod
, rm
;
1950 mod
= (modrm
>> 6) & 3;
1952 base
= rm
| REX_B(s
);
1955 /* Normally filtered out earlier, but including this path
1956 simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
1965 int code
= x86_ldub_code(env
, s
);
1966 scale
= (code
>> 6) & 3;
1967 index
= ((code
>> 3) & 7) | REX_X(s
);
1969 index
= -1; /* no index */
1971 base
= (code
& 7) | REX_B(s
);
1977 if ((base
& 7) == 5) {
1979 disp
= (int32_t)x86_ldl_code(env
, s
);
1980 if (CODE64(s
) && !havesib
) {
1982 disp
+= s
->pc
+ s
->rip_offset
;
1987 disp
= (int8_t)x86_ldub_code(env
, s
);
1991 disp
= (int32_t)x86_ldl_code(env
, s
);
1995 /* For correct popl handling with esp. */
1996 if (base
== R_ESP
&& s
->popl_esp_hack
) {
1997 disp
+= s
->popl_esp_hack
;
1999 if (base
== R_EBP
|| base
== R_ESP
) {
2008 disp
= x86_lduw_code(env
, s
);
2011 } else if (mod
== 1) {
2012 disp
= (int8_t)x86_ldub_code(env
, s
);
2014 disp
= (int16_t)x86_lduw_code(env
, s
);
2058 return (AddressParts
){ def_seg
, base
, index
, scale
, disp
};
2061 /* Compute the address, with a minimum number of TCG ops. */
2062 static TCGv
gen_lea_modrm_1(DisasContext
*s
, AddressParts a
)
2068 ea
= cpu_regs
[a
.index
];
2070 tcg_gen_shli_tl(s
->A0
, cpu_regs
[a
.index
], a
.scale
);
2074 tcg_gen_add_tl(s
->A0
, ea
, cpu_regs
[a
.base
]);
2077 } else if (a
.base
>= 0) {
2078 ea
= cpu_regs
[a
.base
];
2081 tcg_gen_movi_tl(s
->A0
, a
.disp
);
2083 } else if (a
.disp
!= 0) {
2084 tcg_gen_addi_tl(s
->A0
, ea
, a
.disp
);
2091 static void gen_lea_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2093 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
2094 TCGv ea
= gen_lea_modrm_1(s
, a
);
2095 gen_lea_v_seg(s
, s
->aflag
, ea
, a
.def_seg
, s
->override
);
2098 static void gen_nop_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2100 (void)gen_lea_modrm_0(env
, s
, modrm
);
2103 /* Used for BNDCL, BNDCU, BNDCN. */
2104 static void gen_bndck(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2105 TCGCond cond
, TCGv_i64 bndv
)
2107 TCGv ea
= gen_lea_modrm_1(s
, gen_lea_modrm_0(env
, s
, modrm
));
2109 tcg_gen_extu_tl_i64(s
->tmp1_i64
, ea
);
2111 tcg_gen_ext32u_i64(s
->tmp1_i64
, s
->tmp1_i64
);
2113 tcg_gen_setcond_i64(cond
, s
->tmp1_i64
, s
->tmp1_i64
, bndv
);
2114 tcg_gen_extrl_i64_i32(s
->tmp2_i32
, s
->tmp1_i64
);
2115 gen_helper_bndck(cpu_env
, s
->tmp2_i32
);
2118 /* used for LEA and MOV AX, mem */
2119 static void gen_add_A0_ds_seg(DisasContext
*s
)
2121 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, R_DS
, s
->override
);
2124 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2126 static void gen_ldst_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2127 TCGMemOp ot
, int reg
, int is_store
)
2131 mod
= (modrm
>> 6) & 3;
2132 rm
= (modrm
& 7) | REX_B(s
);
2136 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
2137 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
2139 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
2141 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
2144 gen_lea_modrm(env
, s
, modrm
);
2147 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
2148 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
2150 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
2152 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
2157 static inline uint32_t insn_get(CPUX86State
*env
, DisasContext
*s
, TCGMemOp ot
)
2163 ret
= x86_ldub_code(env
, s
);
2166 ret
= x86_lduw_code(env
, s
);
2169 #ifdef TARGET_X86_64
2172 ret
= x86_ldl_code(env
, s
);
2180 static inline int insn_const_size(TCGMemOp ot
)
2189 static inline bool use_goto_tb(DisasContext
*s
, target_ulong pc
)
2191 #ifndef CONFIG_USER_ONLY
2192 return (pc
& TARGET_PAGE_MASK
) == (s
->base
.tb
->pc
& TARGET_PAGE_MASK
) ||
2193 (pc
& TARGET_PAGE_MASK
) == (s
->pc_start
& TARGET_PAGE_MASK
);
2199 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong eip
)
2201 target_ulong pc
= s
->cs_base
+ eip
;
2203 if (use_goto_tb(s
, pc
)) {
2204 /* jump to same page: we can use a direct jump */
2205 tcg_gen_goto_tb(tb_num
);
2207 tcg_gen_exit_tb(s
->base
.tb
, tb_num
);
2208 s
->base
.is_jmp
= DISAS_NORETURN
;
2210 /* jump to another page */
2216 static inline void gen_jcc(DisasContext
*s
, int b
,
2217 target_ulong val
, target_ulong next_eip
)
2222 l1
= gen_new_label();
2225 gen_goto_tb(s
, 0, next_eip
);
2228 gen_goto_tb(s
, 1, val
);
2230 l1
= gen_new_label();
2231 l2
= gen_new_label();
2234 gen_jmp_im(s
, next_eip
);
2244 static void gen_cmovcc1(CPUX86State
*env
, DisasContext
*s
, TCGMemOp ot
, int b
,
2249 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
2251 cc
= gen_prepare_cc(s
, b
, s
->T1
);
2252 if (cc
.mask
!= -1) {
2253 TCGv t0
= tcg_temp_new();
2254 tcg_gen_andi_tl(t0
, cc
.reg
, cc
.mask
);
2258 cc
.reg2
= tcg_const_tl(cc
.imm
);
2261 tcg_gen_movcond_tl(cc
.cond
, s
->T0
, cc
.reg
, cc
.reg2
,
2262 s
->T0
, cpu_regs
[reg
]);
2263 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
2265 if (cc
.mask
!= -1) {
2266 tcg_temp_free(cc
.reg
);
2269 tcg_temp_free(cc
.reg2
);
2273 static inline void gen_op_movl_T0_seg(DisasContext
*s
, int seg_reg
)
2275 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
2276 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2279 static inline void gen_op_movl_seg_T0_vm(DisasContext
*s
, int seg_reg
)
2281 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
2282 tcg_gen_st32_tl(s
->T0
, cpu_env
,
2283 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2284 tcg_gen_shli_tl(cpu_seg_base
[seg_reg
], s
->T0
, 4);
2287 /* move T0 to seg_reg and compute if the CPU state may change. Never
2288 call this function with seg_reg == R_CS */
2289 static void gen_movl_seg_T0(DisasContext
*s
, int seg_reg
)
2291 if (s
->pe
&& !s
->vm86
) {
2292 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
2293 gen_helper_load_seg(cpu_env
, tcg_const_i32(seg_reg
), s
->tmp2_i32
);
2294 /* abort translation because the addseg value may change or
2295 because ss32 may change. For R_SS, translation must always
2296 stop as a special handling must be done to disable hardware
2297 interrupts for the next instruction */
2298 if (seg_reg
== R_SS
|| (s
->code32
&& seg_reg
< R_FS
)) {
2299 s
->base
.is_jmp
= DISAS_TOO_MANY
;
2302 gen_op_movl_seg_T0_vm(s
, seg_reg
);
2303 if (seg_reg
== R_SS
) {
2304 s
->base
.is_jmp
= DISAS_TOO_MANY
;
2309 static inline int svm_is_rep(int prefixes
)
2311 return ((prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) ? 8 : 0);
2315 gen_svm_check_intercept_param(DisasContext
*s
, target_ulong pc_start
,
2316 uint32_t type
, uint64_t param
)
2318 /* no SVM activated; fast case */
2319 if (likely(!(s
->flags
& HF_SVMI_MASK
)))
2321 gen_update_cc_op(s
);
2322 gen_jmp_im(s
, pc_start
- s
->cs_base
);
2323 gen_helper_svm_check_intercept_param(cpu_env
, tcg_const_i32(type
),
2324 tcg_const_i64(param
));
2328 gen_svm_check_intercept(DisasContext
*s
, target_ulong pc_start
, uint64_t type
)
2330 gen_svm_check_intercept_param(s
, pc_start
, type
, 0);
2333 static inline void gen_stack_update(DisasContext
*s
, int addend
)
2335 gen_op_add_reg_im(s
, mo_stacksize(s
), R_ESP
, addend
);
2338 /* Generate a push. It depends on ss32, addseg and dflag. */
2339 static void gen_push_v(DisasContext
*s
, TCGv val
)
2341 TCGMemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2342 TCGMemOp a_ot
= mo_stacksize(s
);
2343 int size
= 1 << d_ot
;
2344 TCGv new_esp
= s
->A0
;
2346 tcg_gen_subi_tl(s
->A0
, cpu_regs
[R_ESP
], size
);
2351 tcg_gen_mov_tl(new_esp
, s
->A0
);
2353 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2356 gen_op_st_v(s
, d_ot
, val
, s
->A0
);
2357 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, new_esp
);
2360 /* two step pop is necessary for precise exceptions */
2361 static TCGMemOp
gen_pop_T0(DisasContext
*s
)
2363 TCGMemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2365 gen_lea_v_seg(s
, mo_stacksize(s
), cpu_regs
[R_ESP
], R_SS
, -1);
2366 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2371 static inline void gen_pop_update(DisasContext
*s
, TCGMemOp ot
)
2373 gen_stack_update(s
, 1 << ot
);
2376 static inline void gen_stack_A0(DisasContext
*s
)
2378 gen_lea_v_seg(s
, s
->ss32
? MO_32
: MO_16
, cpu_regs
[R_ESP
], R_SS
, -1);
2381 static void gen_pusha(DisasContext
*s
)
2383 TCGMemOp s_ot
= s
->ss32
? MO_32
: MO_16
;
2384 TCGMemOp d_ot
= s
->dflag
;
2385 int size
= 1 << d_ot
;
2388 for (i
= 0; i
< 8; i
++) {
2389 tcg_gen_addi_tl(s
->A0
, cpu_regs
[R_ESP
], (i
- 8) * size
);
2390 gen_lea_v_seg(s
, s_ot
, s
->A0
, R_SS
, -1);
2391 gen_op_st_v(s
, d_ot
, cpu_regs
[7 - i
], s
->A0
);
2394 gen_stack_update(s
, -8 * size
);
2397 static void gen_popa(DisasContext
*s
)
2399 TCGMemOp s_ot
= s
->ss32
? MO_32
: MO_16
;
2400 TCGMemOp d_ot
= s
->dflag
;
2401 int size
= 1 << d_ot
;
2404 for (i
= 0; i
< 8; i
++) {
2405 /* ESP is not reloaded */
2406 if (7 - i
== R_ESP
) {
2409 tcg_gen_addi_tl(s
->A0
, cpu_regs
[R_ESP
], i
* size
);
2410 gen_lea_v_seg(s
, s_ot
, s
->A0
, R_SS
, -1);
2411 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2412 gen_op_mov_reg_v(s
, d_ot
, 7 - i
, s
->T0
);
2415 gen_stack_update(s
, 8 * size
);
2418 static void gen_enter(DisasContext
*s
, int esp_addend
, int level
)
2420 TCGMemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2421 TCGMemOp a_ot
= CODE64(s
) ? MO_64
: s
->ss32
? MO_32
: MO_16
;
2422 int size
= 1 << d_ot
;
2424 /* Push BP; compute FrameTemp into T1. */
2425 tcg_gen_subi_tl(s
->T1
, cpu_regs
[R_ESP
], size
);
2426 gen_lea_v_seg(s
, a_ot
, s
->T1
, R_SS
, -1);
2427 gen_op_st_v(s
, d_ot
, cpu_regs
[R_EBP
], s
->A0
);
2433 /* Copy level-1 pointers from the previous frame. */
2434 for (i
= 1; i
< level
; ++i
) {
2435 tcg_gen_subi_tl(s
->A0
, cpu_regs
[R_EBP
], size
* i
);
2436 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2437 gen_op_ld_v(s
, d_ot
, s
->tmp0
, s
->A0
);
2439 tcg_gen_subi_tl(s
->A0
, s
->T1
, size
* i
);
2440 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2441 gen_op_st_v(s
, d_ot
, s
->tmp0
, s
->A0
);
2444 /* Push the current FrameTemp as the last level. */
2445 tcg_gen_subi_tl(s
->A0
, s
->T1
, size
* level
);
2446 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2447 gen_op_st_v(s
, d_ot
, s
->T1
, s
->A0
);
2450 /* Copy the FrameTemp value to EBP. */
2451 gen_op_mov_reg_v(s
, a_ot
, R_EBP
, s
->T1
);
2453 /* Compute the final value of ESP. */
2454 tcg_gen_subi_tl(s
->T1
, s
->T1
, esp_addend
+ size
* level
);
2455 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, s
->T1
);
2458 static void gen_leave(DisasContext
*s
)
2460 TCGMemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2461 TCGMemOp a_ot
= mo_stacksize(s
);
2463 gen_lea_v_seg(s
, a_ot
, cpu_regs
[R_EBP
], R_SS
, -1);
2464 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2466 tcg_gen_addi_tl(s
->T1
, cpu_regs
[R_EBP
], 1 << d_ot
);
2468 gen_op_mov_reg_v(s
, d_ot
, R_EBP
, s
->T0
);
2469 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, s
->T1
);
2472 static void gen_exception(DisasContext
*s
, int trapno
, target_ulong cur_eip
)
2474 gen_update_cc_op(s
);
2475 gen_jmp_im(s
, cur_eip
);
2476 gen_helper_raise_exception(cpu_env
, tcg_const_i32(trapno
));
2477 s
->base
.is_jmp
= DISAS_NORETURN
;
2480 /* Generate #UD for the current instruction. The assumption here is that
2481 the instruction is known, but it isn't allowed in the current cpu mode. */
2482 static void gen_illegal_opcode(DisasContext
*s
)
2484 gen_exception(s
, EXCP06_ILLOP
, s
->pc_start
- s
->cs_base
);
2487 /* Similarly, except that the assumption here is that we don't decode
2488 the instruction at all -- either a missing opcode, an unimplemented
2489 feature, or just a bogus instruction stream. */
2490 static void gen_unknown_opcode(CPUX86State
*env
, DisasContext
*s
)
2492 gen_illegal_opcode(s
);
2494 if (qemu_loglevel_mask(LOG_UNIMP
)) {
2495 target_ulong pc
= s
->pc_start
, end
= s
->pc
;
2497 qemu_log("ILLOPC: " TARGET_FMT_lx
":", pc
);
2498 for (; pc
< end
; ++pc
) {
2499 qemu_log(" %02x", cpu_ldub_code(env
, pc
));
2506 /* an interrupt is different from an exception because of the
2508 static void gen_interrupt(DisasContext
*s
, int intno
,
2509 target_ulong cur_eip
, target_ulong next_eip
)
2511 gen_update_cc_op(s
);
2512 gen_jmp_im(s
, cur_eip
);
2513 gen_helper_raise_interrupt(cpu_env
, tcg_const_i32(intno
),
2514 tcg_const_i32(next_eip
- cur_eip
));
2515 s
->base
.is_jmp
= DISAS_NORETURN
;
2518 static void gen_debug(DisasContext
*s
, target_ulong cur_eip
)
2520 gen_update_cc_op(s
);
2521 gen_jmp_im(s
, cur_eip
);
2522 gen_helper_debug(cpu_env
);
2523 s
->base
.is_jmp
= DISAS_NORETURN
;
2526 static void gen_set_hflag(DisasContext
*s
, uint32_t mask
)
2528 if ((s
->flags
& mask
) == 0) {
2529 TCGv_i32 t
= tcg_temp_new_i32();
2530 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2531 tcg_gen_ori_i32(t
, t
, mask
);
2532 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2533 tcg_temp_free_i32(t
);
2538 static void gen_reset_hflag(DisasContext
*s
, uint32_t mask
)
2540 if (s
->flags
& mask
) {
2541 TCGv_i32 t
= tcg_temp_new_i32();
2542 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2543 tcg_gen_andi_i32(t
, t
, ~mask
);
2544 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2545 tcg_temp_free_i32(t
);
2550 /* Clear BND registers during legacy branches. */
2551 static void gen_bnd_jmp(DisasContext
*s
)
2553 /* Clear the registers only if BND prefix is missing, MPX is enabled,
2554 and if the BNDREGs are known to be in use (non-zero) already.
2555 The helper itself will check BNDPRESERVE at runtime. */
2556 if ((s
->prefix
& PREFIX_REPNZ
) == 0
2557 && (s
->flags
& HF_MPX_EN_MASK
) != 0
2558 && (s
->flags
& HF_MPX_IU_MASK
) != 0) {
2559 gen_helper_bnd_jmp(cpu_env
);
2563 /* Generate an end of block. Trace exception is also generated if needed.
2564 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.
2565 If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
2566 S->TF. This is used by the syscall/sysret insns. */
2568 do_gen_eob_worker(DisasContext
*s
, bool inhibit
, bool recheck_tf
, bool jr
)
2570 gen_update_cc_op(s
);
2572 /* If several instructions disable interrupts, only the first does it. */
2573 if (inhibit
&& !(s
->flags
& HF_INHIBIT_IRQ_MASK
)) {
2574 gen_set_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2576 gen_reset_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2579 if (s
->base
.tb
->flags
& HF_RF_MASK
) {
2580 gen_helper_reset_rf(cpu_env
);
2582 if (s
->base
.singlestep_enabled
) {
2583 gen_helper_debug(cpu_env
);
2584 } else if (recheck_tf
) {
2585 gen_helper_rechecking_single_step(cpu_env
);
2586 tcg_gen_exit_tb(NULL
, 0);
2588 gen_helper_single_step(cpu_env
);
2590 tcg_gen_lookup_and_goto_ptr();
2592 tcg_gen_exit_tb(NULL
, 0);
2594 s
->base
.is_jmp
= DISAS_NORETURN
;
2598 gen_eob_worker(DisasContext
*s
, bool inhibit
, bool recheck_tf
)
2600 do_gen_eob_worker(s
, inhibit
, recheck_tf
, false);
2604 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set. */
2605 static void gen_eob_inhibit_irq(DisasContext
*s
, bool inhibit
)
2607 gen_eob_worker(s
, inhibit
, false);
2610 /* End of block, resetting the inhibit irq flag. */
2611 static void gen_eob(DisasContext
*s
)
2613 gen_eob_worker(s
, false, false);
2616 /* Jump to register */
2617 static void gen_jr(DisasContext
*s
, TCGv dest
)
2619 do_gen_eob_worker(s
, false, false, true);
2622 /* generate a jump to eip. No segment change must happen before as a
2623 direct call to the next block may occur */
2624 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
)
2626 gen_update_cc_op(s
);
2627 set_cc_op(s
, CC_OP_DYNAMIC
);
2629 gen_goto_tb(s
, tb_num
, eip
);
2636 static void gen_jmp(DisasContext
*s
, target_ulong eip
)
2638 gen_jmp_tb(s
, eip
, 0);
2641 static inline void gen_ldq_env_A0(DisasContext
*s
, int offset
)
2643 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
, s
->mem_index
, MO_LEQ
);
2644 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
);
2647 static inline void gen_stq_env_A0(DisasContext
*s
, int offset
)
2649 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
);
2650 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
, s
->mem_index
, MO_LEQ
);
2653 static inline void gen_ldo_env_A0(DisasContext
*s
, int offset
)
2655 int mem_index
= s
->mem_index
;
2656 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
, mem_index
, MO_LEQ
);
2657 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(ZMMReg
, ZMM_Q(0)));
2658 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 8);
2659 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEQ
);
2660 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(ZMMReg
, ZMM_Q(1)));
2663 static inline void gen_sto_env_A0(DisasContext
*s
, int offset
)
2665 int mem_index
= s
->mem_index
;
2666 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(ZMMReg
, ZMM_Q(0)));
2667 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
, mem_index
, MO_LEQ
);
2668 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 8);
2669 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(ZMMReg
, ZMM_Q(1)));
2670 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEQ
);
2673 static inline void gen_op_movo(DisasContext
*s
, int d_offset
, int s_offset
)
2675 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, s_offset
+ offsetof(ZMMReg
, ZMM_Q(0)));
2676 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, d_offset
+ offsetof(ZMMReg
, ZMM_Q(0)));
2677 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, s_offset
+ offsetof(ZMMReg
, ZMM_Q(1)));
2678 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, d_offset
+ offsetof(ZMMReg
, ZMM_Q(1)));
2681 static inline void gen_op_movq(DisasContext
*s
, int d_offset
, int s_offset
)
2683 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, s_offset
);
2684 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, d_offset
);
2687 static inline void gen_op_movl(DisasContext
*s
, int d_offset
, int s_offset
)
2689 tcg_gen_ld_i32(s
->tmp2_i32
, cpu_env
, s_offset
);
2690 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
, d_offset
);
2693 static inline void gen_op_movq_env_0(DisasContext
*s
, int d_offset
)
2695 tcg_gen_movi_i64(s
->tmp1_i64
, 0);
2696 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, d_offset
);
2699 typedef void (*SSEFunc_i_ep
)(TCGv_i32 val
, TCGv_ptr env
, TCGv_ptr reg
);
2700 typedef void (*SSEFunc_l_ep
)(TCGv_i64 val
, TCGv_ptr env
, TCGv_ptr reg
);
2701 typedef void (*SSEFunc_0_epi
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i32 val
);
2702 typedef void (*SSEFunc_0_epl
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i64 val
);
2703 typedef void (*SSEFunc_0_epp
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
);
2704 typedef void (*SSEFunc_0_eppi
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2706 typedef void (*SSEFunc_0_ppi
)(TCGv_ptr reg_a
, TCGv_ptr reg_b
, TCGv_i32 val
);
2707 typedef void (*SSEFunc_0_eppt
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2710 #define SSE_SPECIAL ((void *)1)
2711 #define SSE_DUMMY ((void *)2)
2713 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2714 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2715 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2717 static const SSEFunc_0_epp sse_op_table1
[256][4] = {
2718 /* 3DNow! extensions */
2719 [0x0e] = { SSE_DUMMY
}, /* femms */
2720 [0x0f] = { SSE_DUMMY
}, /* pf... */
2721 /* pure SSE operations */
2722 [0x10] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2723 [0x11] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2724 [0x12] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd, movsldup, movddup */
2725 [0x13] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd */
2726 [0x14] = { gen_helper_punpckldq_xmm
, gen_helper_punpcklqdq_xmm
},
2727 [0x15] = { gen_helper_punpckhdq_xmm
, gen_helper_punpckhqdq_xmm
},
2728 [0x16] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd, movshdup */
2729 [0x17] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd */
2731 [0x28] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2732 [0x29] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2733 [0x2a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2734 [0x2b] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movntps, movntpd, movntss, movntsd */
2735 [0x2c] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2736 [0x2d] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2737 [0x2e] = { gen_helper_ucomiss
, gen_helper_ucomisd
},
2738 [0x2f] = { gen_helper_comiss
, gen_helper_comisd
},
2739 [0x50] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movmskps, movmskpd */
2740 [0x51] = SSE_FOP(sqrt
),
2741 [0x52] = { gen_helper_rsqrtps
, NULL
, gen_helper_rsqrtss
, NULL
},
2742 [0x53] = { gen_helper_rcpps
, NULL
, gen_helper_rcpss
, NULL
},
2743 [0x54] = { gen_helper_pand_xmm
, gen_helper_pand_xmm
}, /* andps, andpd */
2744 [0x55] = { gen_helper_pandn_xmm
, gen_helper_pandn_xmm
}, /* andnps, andnpd */
2745 [0x56] = { gen_helper_por_xmm
, gen_helper_por_xmm
}, /* orps, orpd */
2746 [0x57] = { gen_helper_pxor_xmm
, gen_helper_pxor_xmm
}, /* xorps, xorpd */
2747 [0x58] = SSE_FOP(add
),
2748 [0x59] = SSE_FOP(mul
),
2749 [0x5a] = { gen_helper_cvtps2pd
, gen_helper_cvtpd2ps
,
2750 gen_helper_cvtss2sd
, gen_helper_cvtsd2ss
},
2751 [0x5b] = { gen_helper_cvtdq2ps
, gen_helper_cvtps2dq
, gen_helper_cvttps2dq
},
2752 [0x5c] = SSE_FOP(sub
),
2753 [0x5d] = SSE_FOP(min
),
2754 [0x5e] = SSE_FOP(div
),
2755 [0x5f] = SSE_FOP(max
),
2757 [0xc2] = SSE_FOP(cmpeq
),
2758 [0xc6] = { (SSEFunc_0_epp
)gen_helper_shufps
,
2759 (SSEFunc_0_epp
)gen_helper_shufpd
}, /* XXX: casts */
2761 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2762 [0x38] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2763 [0x3a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2765 /* MMX ops and their SSE extensions */
2766 [0x60] = MMX_OP2(punpcklbw
),
2767 [0x61] = MMX_OP2(punpcklwd
),
2768 [0x62] = MMX_OP2(punpckldq
),
2769 [0x63] = MMX_OP2(packsswb
),
2770 [0x64] = MMX_OP2(pcmpgtb
),
2771 [0x65] = MMX_OP2(pcmpgtw
),
2772 [0x66] = MMX_OP2(pcmpgtl
),
2773 [0x67] = MMX_OP2(packuswb
),
2774 [0x68] = MMX_OP2(punpckhbw
),
2775 [0x69] = MMX_OP2(punpckhwd
),
2776 [0x6a] = MMX_OP2(punpckhdq
),
2777 [0x6b] = MMX_OP2(packssdw
),
2778 [0x6c] = { NULL
, gen_helper_punpcklqdq_xmm
},
2779 [0x6d] = { NULL
, gen_helper_punpckhqdq_xmm
},
2780 [0x6e] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movd mm, ea */
2781 [0x6f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, , movqdu */
2782 [0x70] = { (SSEFunc_0_epp
)gen_helper_pshufw_mmx
,
2783 (SSEFunc_0_epp
)gen_helper_pshufd_xmm
,
2784 (SSEFunc_0_epp
)gen_helper_pshufhw_xmm
,
2785 (SSEFunc_0_epp
)gen_helper_pshuflw_xmm
}, /* XXX: casts */
2786 [0x71] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftw */
2787 [0x72] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftd */
2788 [0x73] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftq */
2789 [0x74] = MMX_OP2(pcmpeqb
),
2790 [0x75] = MMX_OP2(pcmpeqw
),
2791 [0x76] = MMX_OP2(pcmpeql
),
2792 [0x77] = { SSE_DUMMY
}, /* emms */
2793 [0x78] = { NULL
, SSE_SPECIAL
, NULL
, SSE_SPECIAL
}, /* extrq_i, insertq_i */
2794 [0x79] = { NULL
, gen_helper_extrq_r
, NULL
, gen_helper_insertq_r
},
2795 [0x7c] = { NULL
, gen_helper_haddpd
, NULL
, gen_helper_haddps
},
2796 [0x7d] = { NULL
, gen_helper_hsubpd
, NULL
, gen_helper_hsubps
},
2797 [0x7e] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movd, movd, , movq */
2798 [0x7f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, movdqu */
2799 [0xc4] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pinsrw */
2800 [0xc5] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pextrw */
2801 [0xd0] = { NULL
, gen_helper_addsubpd
, NULL
, gen_helper_addsubps
},
2802 [0xd1] = MMX_OP2(psrlw
),
2803 [0xd2] = MMX_OP2(psrld
),
2804 [0xd3] = MMX_OP2(psrlq
),
2805 [0xd4] = MMX_OP2(paddq
),
2806 [0xd5] = MMX_OP2(pmullw
),
2807 [0xd6] = { NULL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2808 [0xd7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pmovmskb */
2809 [0xd8] = MMX_OP2(psubusb
),
2810 [0xd9] = MMX_OP2(psubusw
),
2811 [0xda] = MMX_OP2(pminub
),
2812 [0xdb] = MMX_OP2(pand
),
2813 [0xdc] = MMX_OP2(paddusb
),
2814 [0xdd] = MMX_OP2(paddusw
),
2815 [0xde] = MMX_OP2(pmaxub
),
2816 [0xdf] = MMX_OP2(pandn
),
2817 [0xe0] = MMX_OP2(pavgb
),
2818 [0xe1] = MMX_OP2(psraw
),
2819 [0xe2] = MMX_OP2(psrad
),
2820 [0xe3] = MMX_OP2(pavgw
),
2821 [0xe4] = MMX_OP2(pmulhuw
),
2822 [0xe5] = MMX_OP2(pmulhw
),
2823 [0xe6] = { NULL
, gen_helper_cvttpd2dq
, gen_helper_cvtdq2pd
, gen_helper_cvtpd2dq
},
2824 [0xe7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movntq, movntq */
2825 [0xe8] = MMX_OP2(psubsb
),
2826 [0xe9] = MMX_OP2(psubsw
),
2827 [0xea] = MMX_OP2(pminsw
),
2828 [0xeb] = MMX_OP2(por
),
2829 [0xec] = MMX_OP2(paddsb
),
2830 [0xed] = MMX_OP2(paddsw
),
2831 [0xee] = MMX_OP2(pmaxsw
),
2832 [0xef] = MMX_OP2(pxor
),
2833 [0xf0] = { NULL
, NULL
, NULL
, SSE_SPECIAL
}, /* lddqu */
2834 [0xf1] = MMX_OP2(psllw
),
2835 [0xf2] = MMX_OP2(pslld
),
2836 [0xf3] = MMX_OP2(psllq
),
2837 [0xf4] = MMX_OP2(pmuludq
),
2838 [0xf5] = MMX_OP2(pmaddwd
),
2839 [0xf6] = MMX_OP2(psadbw
),
2840 [0xf7] = { (SSEFunc_0_epp
)gen_helper_maskmov_mmx
,
2841 (SSEFunc_0_epp
)gen_helper_maskmov_xmm
}, /* XXX: casts */
2842 [0xf8] = MMX_OP2(psubb
),
2843 [0xf9] = MMX_OP2(psubw
),
2844 [0xfa] = MMX_OP2(psubl
),
2845 [0xfb] = MMX_OP2(psubq
),
2846 [0xfc] = MMX_OP2(paddb
),
2847 [0xfd] = MMX_OP2(paddw
),
2848 [0xfe] = MMX_OP2(paddl
),
2851 static const SSEFunc_0_epp sse_op_table2
[3 * 8][2] = {
2852 [0 + 2] = MMX_OP2(psrlw
),
2853 [0 + 4] = MMX_OP2(psraw
),
2854 [0 + 6] = MMX_OP2(psllw
),
2855 [8 + 2] = MMX_OP2(psrld
),
2856 [8 + 4] = MMX_OP2(psrad
),
2857 [8 + 6] = MMX_OP2(pslld
),
2858 [16 + 2] = MMX_OP2(psrlq
),
2859 [16 + 3] = { NULL
, gen_helper_psrldq_xmm
},
2860 [16 + 6] = MMX_OP2(psllq
),
2861 [16 + 7] = { NULL
, gen_helper_pslldq_xmm
},
2864 static const SSEFunc_0_epi sse_op_table3ai
[] = {
2865 gen_helper_cvtsi2ss
,
2869 #ifdef TARGET_X86_64
2870 static const SSEFunc_0_epl sse_op_table3aq
[] = {
2871 gen_helper_cvtsq2ss
,
2876 static const SSEFunc_i_ep sse_op_table3bi
[] = {
2877 gen_helper_cvttss2si
,
2878 gen_helper_cvtss2si
,
2879 gen_helper_cvttsd2si
,
2883 #ifdef TARGET_X86_64
2884 static const SSEFunc_l_ep sse_op_table3bq
[] = {
2885 gen_helper_cvttss2sq
,
2886 gen_helper_cvtss2sq
,
2887 gen_helper_cvttsd2sq
,
2892 static const SSEFunc_0_epp sse_op_table4
[8][4] = {
2903 static const SSEFunc_0_epp sse_op_table5
[256] = {
2904 [0x0c] = gen_helper_pi2fw
,
2905 [0x0d] = gen_helper_pi2fd
,
2906 [0x1c] = gen_helper_pf2iw
,
2907 [0x1d] = gen_helper_pf2id
,
2908 [0x8a] = gen_helper_pfnacc
,
2909 [0x8e] = gen_helper_pfpnacc
,
2910 [0x90] = gen_helper_pfcmpge
,
2911 [0x94] = gen_helper_pfmin
,
2912 [0x96] = gen_helper_pfrcp
,
2913 [0x97] = gen_helper_pfrsqrt
,
2914 [0x9a] = gen_helper_pfsub
,
2915 [0x9e] = gen_helper_pfadd
,
2916 [0xa0] = gen_helper_pfcmpgt
,
2917 [0xa4] = gen_helper_pfmax
,
2918 [0xa6] = gen_helper_movq
, /* pfrcpit1; no need to actually increase precision */
2919 [0xa7] = gen_helper_movq
, /* pfrsqit1 */
2920 [0xaa] = gen_helper_pfsubr
,
2921 [0xae] = gen_helper_pfacc
,
2922 [0xb0] = gen_helper_pfcmpeq
,
2923 [0xb4] = gen_helper_pfmul
,
2924 [0xb6] = gen_helper_movq
, /* pfrcpit2 */
2925 [0xb7] = gen_helper_pmulhrw_mmx
,
2926 [0xbb] = gen_helper_pswapd
,
2927 [0xbf] = gen_helper_pavgb_mmx
/* pavgusb */
2930 struct SSEOpHelper_epp
{
2931 SSEFunc_0_epp op
[2];
2935 struct SSEOpHelper_eppi
{
2936 SSEFunc_0_eppi op
[2];
2940 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
2941 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
2942 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
2943 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
2944 #define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
2945 CPUID_EXT_PCLMULQDQ }
2946 #define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
2948 static const struct SSEOpHelper_epp sse_op_table6
[256] = {
2949 [0x00] = SSSE3_OP(pshufb
),
2950 [0x01] = SSSE3_OP(phaddw
),
2951 [0x02] = SSSE3_OP(phaddd
),
2952 [0x03] = SSSE3_OP(phaddsw
),
2953 [0x04] = SSSE3_OP(pmaddubsw
),
2954 [0x05] = SSSE3_OP(phsubw
),
2955 [0x06] = SSSE3_OP(phsubd
),
2956 [0x07] = SSSE3_OP(phsubsw
),
2957 [0x08] = SSSE3_OP(psignb
),
2958 [0x09] = SSSE3_OP(psignw
),
2959 [0x0a] = SSSE3_OP(psignd
),
2960 [0x0b] = SSSE3_OP(pmulhrsw
),
2961 [0x10] = SSE41_OP(pblendvb
),
2962 [0x14] = SSE41_OP(blendvps
),
2963 [0x15] = SSE41_OP(blendvpd
),
2964 [0x17] = SSE41_OP(ptest
),
2965 [0x1c] = SSSE3_OP(pabsb
),
2966 [0x1d] = SSSE3_OP(pabsw
),
2967 [0x1e] = SSSE3_OP(pabsd
),
2968 [0x20] = SSE41_OP(pmovsxbw
),
2969 [0x21] = SSE41_OP(pmovsxbd
),
2970 [0x22] = SSE41_OP(pmovsxbq
),
2971 [0x23] = SSE41_OP(pmovsxwd
),
2972 [0x24] = SSE41_OP(pmovsxwq
),
2973 [0x25] = SSE41_OP(pmovsxdq
),
2974 [0x28] = SSE41_OP(pmuldq
),
2975 [0x29] = SSE41_OP(pcmpeqq
),
2976 [0x2a] = SSE41_SPECIAL
, /* movntqda */
2977 [0x2b] = SSE41_OP(packusdw
),
2978 [0x30] = SSE41_OP(pmovzxbw
),
2979 [0x31] = SSE41_OP(pmovzxbd
),
2980 [0x32] = SSE41_OP(pmovzxbq
),
2981 [0x33] = SSE41_OP(pmovzxwd
),
2982 [0x34] = SSE41_OP(pmovzxwq
),
2983 [0x35] = SSE41_OP(pmovzxdq
),
2984 [0x37] = SSE42_OP(pcmpgtq
),
2985 [0x38] = SSE41_OP(pminsb
),
2986 [0x39] = SSE41_OP(pminsd
),
2987 [0x3a] = SSE41_OP(pminuw
),
2988 [0x3b] = SSE41_OP(pminud
),
2989 [0x3c] = SSE41_OP(pmaxsb
),
2990 [0x3d] = SSE41_OP(pmaxsd
),
2991 [0x3e] = SSE41_OP(pmaxuw
),
2992 [0x3f] = SSE41_OP(pmaxud
),
2993 [0x40] = SSE41_OP(pmulld
),
2994 [0x41] = SSE41_OP(phminposuw
),
2995 [0xdb] = AESNI_OP(aesimc
),
2996 [0xdc] = AESNI_OP(aesenc
),
2997 [0xdd] = AESNI_OP(aesenclast
),
2998 [0xde] = AESNI_OP(aesdec
),
2999 [0xdf] = AESNI_OP(aesdeclast
),
3002 static const struct SSEOpHelper_eppi sse_op_table7
[256] = {
3003 [0x08] = SSE41_OP(roundps
),
3004 [0x09] = SSE41_OP(roundpd
),
3005 [0x0a] = SSE41_OP(roundss
),
3006 [0x0b] = SSE41_OP(roundsd
),
3007 [0x0c] = SSE41_OP(blendps
),
3008 [0x0d] = SSE41_OP(blendpd
),
3009 [0x0e] = SSE41_OP(pblendw
),
3010 [0x0f] = SSSE3_OP(palignr
),
3011 [0x14] = SSE41_SPECIAL
, /* pextrb */
3012 [0x15] = SSE41_SPECIAL
, /* pextrw */
3013 [0x16] = SSE41_SPECIAL
, /* pextrd/pextrq */
3014 [0x17] = SSE41_SPECIAL
, /* extractps */
3015 [0x20] = SSE41_SPECIAL
, /* pinsrb */
3016 [0x21] = SSE41_SPECIAL
, /* insertps */
3017 [0x22] = SSE41_SPECIAL
, /* pinsrd/pinsrq */
3018 [0x40] = SSE41_OP(dpps
),
3019 [0x41] = SSE41_OP(dppd
),
3020 [0x42] = SSE41_OP(mpsadbw
),
3021 [0x44] = PCLMULQDQ_OP(pclmulqdq
),
3022 [0x60] = SSE42_OP(pcmpestrm
),
3023 [0x61] = SSE42_OP(pcmpestri
),
3024 [0x62] = SSE42_OP(pcmpistrm
),
3025 [0x63] = SSE42_OP(pcmpistri
),
3026 [0xdf] = AESNI_OP(aeskeygenassist
),
3029 static void gen_sse(CPUX86State
*env
, DisasContext
*s
, int b
,
3030 target_ulong pc_start
, int rex_r
)
3032 int b1
, op1_offset
, op2_offset
, is_xmm
, val
;
3033 int modrm
, mod
, rm
, reg
;
3034 SSEFunc_0_epp sse_fn_epp
;
3035 SSEFunc_0_eppi sse_fn_eppi
;
3036 SSEFunc_0_ppi sse_fn_ppi
;
3037 SSEFunc_0_eppt sse_fn_eppt
;
3041 if (s
->prefix
& PREFIX_DATA
)
3043 else if (s
->prefix
& PREFIX_REPZ
)
3045 else if (s
->prefix
& PREFIX_REPNZ
)
3049 sse_fn_epp
= sse_op_table1
[b
][b1
];
3053 if ((b
<= 0x5f && b
>= 0x10) || b
== 0xc6 || b
== 0xc2) {
3063 /* simple MMX/SSE operation */
3064 if (s
->flags
& HF_TS_MASK
) {
3065 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
3068 if (s
->flags
& HF_EM_MASK
) {
3070 gen_illegal_opcode(s
);
3074 && !(s
->flags
& HF_OSFXSR_MASK
)
3075 && ((b
!= 0x38 && b
!= 0x3a) || (s
->prefix
& PREFIX_DATA
))) {
3079 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
)) {
3080 /* If we were fully decoding this we might use illegal_op. */
3084 gen_helper_emms(cpu_env
);
3089 gen_helper_emms(cpu_env
);
3092 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3093 the static cpu state) */
3095 gen_helper_enter_mmx(cpu_env
);
3098 modrm
= x86_ldub_code(env
, s
);
3099 reg
= ((modrm
>> 3) & 7);
3102 mod
= (modrm
>> 6) & 3;
3103 if (sse_fn_epp
== SSE_SPECIAL
) {
3106 case 0x0e7: /* movntq */
3110 gen_lea_modrm(env
, s
, modrm
);
3111 gen_stq_env_A0(s
, offsetof(CPUX86State
, fpregs
[reg
].mmx
));
3113 case 0x1e7: /* movntdq */
3114 case 0x02b: /* movntps */
3115 case 0x12b: /* movntps */
3118 gen_lea_modrm(env
, s
, modrm
);
3119 gen_sto_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3121 case 0x3f0: /* lddqu */
3124 gen_lea_modrm(env
, s
, modrm
);
3125 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3127 case 0x22b: /* movntss */
3128 case 0x32b: /* movntsd */
3131 gen_lea_modrm(env
, s
, modrm
);
3133 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3134 xmm_regs
[reg
].ZMM_Q(0)));
3136 tcg_gen_ld32u_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
,
3137 xmm_regs
[reg
].ZMM_L(0)));
3138 gen_op_st_v(s
, MO_32
, s
->T0
, s
->A0
);
3141 case 0x6e: /* movd mm, ea */
3142 #ifdef TARGET_X86_64
3143 if (s
->dflag
== MO_64
) {
3144 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 0);
3145 tcg_gen_st_tl(s
->T0
, cpu_env
,
3146 offsetof(CPUX86State
, fpregs
[reg
].mmx
));
3150 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 0);
3151 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
,
3152 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3153 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3154 gen_helper_movl_mm_T0_mmx(s
->ptr0
, s
->tmp2_i32
);
3157 case 0x16e: /* movd xmm, ea */
3158 #ifdef TARGET_X86_64
3159 if (s
->dflag
== MO_64
) {
3160 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 0);
3161 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
,
3162 offsetof(CPUX86State
,xmm_regs
[reg
]));
3163 gen_helper_movq_mm_T0_xmm(s
->ptr0
, s
->T0
);
3167 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 0);
3168 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
,
3169 offsetof(CPUX86State
,xmm_regs
[reg
]));
3170 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3171 gen_helper_movl_mm_T0_xmm(s
->ptr0
, s
->tmp2_i32
);
3174 case 0x6f: /* movq mm, ea */
3176 gen_lea_modrm(env
, s
, modrm
);
3177 gen_ldq_env_A0(s
, offsetof(CPUX86State
, fpregs
[reg
].mmx
));
3180 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
,
3181 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3182 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
,
3183 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3186 case 0x010: /* movups */
3187 case 0x110: /* movupd */
3188 case 0x028: /* movaps */
3189 case 0x128: /* movapd */
3190 case 0x16f: /* movdqa xmm, ea */
3191 case 0x26f: /* movdqu xmm, ea */
3193 gen_lea_modrm(env
, s
, modrm
);
3194 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3196 rm
= (modrm
& 7) | REX_B(s
);
3197 gen_op_movo(s
, offsetof(CPUX86State
, xmm_regs
[reg
]),
3198 offsetof(CPUX86State
,xmm_regs
[rm
]));
3201 case 0x210: /* movss xmm, ea */
3203 gen_lea_modrm(env
, s
, modrm
);
3204 gen_op_ld_v(s
, MO_32
, s
->T0
, s
->A0
);
3205 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3206 offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(0)));
3207 tcg_gen_movi_tl(s
->T0
, 0);
3208 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3209 offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(1)));
3210 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3211 offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(2)));
3212 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3213 offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(3)));
3215 rm
= (modrm
& 7) | REX_B(s
);
3216 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(0)),
3217 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(0)));
3220 case 0x310: /* movsd xmm, ea */
3222 gen_lea_modrm(env
, s
, modrm
);
3223 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3224 xmm_regs
[reg
].ZMM_Q(0)));
3225 tcg_gen_movi_tl(s
->T0
, 0);
3226 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3227 offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(2)));
3228 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3229 offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(3)));
3231 rm
= (modrm
& 7) | REX_B(s
);
3232 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(0)),
3233 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3236 case 0x012: /* movlps */
3237 case 0x112: /* movlpd */
3239 gen_lea_modrm(env
, s
, modrm
);
3240 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3241 xmm_regs
[reg
].ZMM_Q(0)));
3244 rm
= (modrm
& 7) | REX_B(s
);
3245 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(0)),
3246 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(1)));
3249 case 0x212: /* movsldup */
3251 gen_lea_modrm(env
, s
, modrm
);
3252 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3254 rm
= (modrm
& 7) | REX_B(s
);
3255 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(0)),
3256 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(0)));
3257 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(2)),
3258 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(2)));
3260 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(1)),
3261 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3262 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(3)),
3263 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(2)));
3265 case 0x312: /* movddup */
3267 gen_lea_modrm(env
, s
, modrm
);
3268 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3269 xmm_regs
[reg
].ZMM_Q(0)));
3271 rm
= (modrm
& 7) | REX_B(s
);
3272 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(0)),
3273 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3275 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(1)),
3276 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3278 case 0x016: /* movhps */
3279 case 0x116: /* movhpd */
3281 gen_lea_modrm(env
, s
, modrm
);
3282 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3283 xmm_regs
[reg
].ZMM_Q(1)));
3286 rm
= (modrm
& 7) | REX_B(s
);
3287 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(1)),
3288 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3291 case 0x216: /* movshdup */
3293 gen_lea_modrm(env
, s
, modrm
);
3294 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3296 rm
= (modrm
& 7) | REX_B(s
);
3297 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(1)),
3298 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(1)));
3299 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(3)),
3300 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(3)));
3302 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(0)),
3303 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(1)));
3304 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(2)),
3305 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(3)));
3310 int bit_index
, field_length
;
3312 if (b1
== 1 && reg
!= 0)
3314 field_length
= x86_ldub_code(env
, s
) & 0x3F;
3315 bit_index
= x86_ldub_code(env
, s
) & 0x3F;
3316 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
,
3317 offsetof(CPUX86State
,xmm_regs
[reg
]));
3319 gen_helper_extrq_i(cpu_env
, s
->ptr0
,
3320 tcg_const_i32(bit_index
),
3321 tcg_const_i32(field_length
));
3323 gen_helper_insertq_i(cpu_env
, s
->ptr0
,
3324 tcg_const_i32(bit_index
),
3325 tcg_const_i32(field_length
));
3328 case 0x7e: /* movd ea, mm */
3329 #ifdef TARGET_X86_64
3330 if (s
->dflag
== MO_64
) {
3331 tcg_gen_ld_i64(s
->T0
, cpu_env
,
3332 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3333 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 1);
3337 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
3338 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_L(0)));
3339 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 1);
3342 case 0x17e: /* movd ea, xmm */
3343 #ifdef TARGET_X86_64
3344 if (s
->dflag
== MO_64
) {
3345 tcg_gen_ld_i64(s
->T0
, cpu_env
,
3346 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3347 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 1);
3351 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
3352 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3353 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 1);
3356 case 0x27e: /* movq xmm, ea */
3358 gen_lea_modrm(env
, s
, modrm
);
3359 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3360 xmm_regs
[reg
].ZMM_Q(0)));
3362 rm
= (modrm
& 7) | REX_B(s
);
3363 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(0)),
3364 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3366 gen_op_movq_env_0(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(1)));
3368 case 0x7f: /* movq ea, mm */
3370 gen_lea_modrm(env
, s
, modrm
);
3371 gen_stq_env_A0(s
, offsetof(CPUX86State
, fpregs
[reg
].mmx
));
3374 gen_op_movq(s
, offsetof(CPUX86State
, fpregs
[rm
].mmx
),
3375 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3378 case 0x011: /* movups */
3379 case 0x111: /* movupd */
3380 case 0x029: /* movaps */
3381 case 0x129: /* movapd */
3382 case 0x17f: /* movdqa ea, xmm */
3383 case 0x27f: /* movdqu ea, xmm */
3385 gen_lea_modrm(env
, s
, modrm
);
3386 gen_sto_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3388 rm
= (modrm
& 7) | REX_B(s
);
3389 gen_op_movo(s
, offsetof(CPUX86State
, xmm_regs
[rm
]),
3390 offsetof(CPUX86State
,xmm_regs
[reg
]));
3393 case 0x211: /* movss ea, xmm */
3395 gen_lea_modrm(env
, s
, modrm
);
3396 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
3397 offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(0)));
3398 gen_op_st_v(s
, MO_32
, s
->T0
, s
->A0
);
3400 rm
= (modrm
& 7) | REX_B(s
);
3401 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[rm
].ZMM_L(0)),
3402 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3405 case 0x311: /* movsd ea, xmm */
3407 gen_lea_modrm(env
, s
, modrm
);
3408 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3409 xmm_regs
[reg
].ZMM_Q(0)));
3411 rm
= (modrm
& 7) | REX_B(s
);
3412 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[rm
].ZMM_Q(0)),
3413 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3416 case 0x013: /* movlps */
3417 case 0x113: /* movlpd */
3419 gen_lea_modrm(env
, s
, modrm
);
3420 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3421 xmm_regs
[reg
].ZMM_Q(0)));
3426 case 0x017: /* movhps */
3427 case 0x117: /* movhpd */
3429 gen_lea_modrm(env
, s
, modrm
);
3430 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3431 xmm_regs
[reg
].ZMM_Q(1)));
3436 case 0x71: /* shift mm, im */
3439 case 0x171: /* shift xmm, im */
3445 val
= x86_ldub_code(env
, s
);
3447 tcg_gen_movi_tl(s
->T0
, val
);
3448 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3449 offsetof(CPUX86State
, xmm_t0
.ZMM_L(0)));
3450 tcg_gen_movi_tl(s
->T0
, 0);
3451 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3452 offsetof(CPUX86State
, xmm_t0
.ZMM_L(1)));
3453 op1_offset
= offsetof(CPUX86State
,xmm_t0
);
3455 tcg_gen_movi_tl(s
->T0
, val
);
3456 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3457 offsetof(CPUX86State
, mmx_t0
.MMX_L(0)));
3458 tcg_gen_movi_tl(s
->T0
, 0);
3459 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3460 offsetof(CPUX86State
, mmx_t0
.MMX_L(1)));
3461 op1_offset
= offsetof(CPUX86State
,mmx_t0
);
3463 sse_fn_epp
= sse_op_table2
[((b
- 1) & 3) * 8 +
3464 (((modrm
>> 3)) & 7)][b1
];
3469 rm
= (modrm
& 7) | REX_B(s
);
3470 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3473 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3475 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op2_offset
);
3476 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op1_offset
);
3477 sse_fn_epp(cpu_env
, s
->ptr0
, s
->ptr1
);
3479 case 0x050: /* movmskps */
3480 rm
= (modrm
& 7) | REX_B(s
);
3481 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
,
3482 offsetof(CPUX86State
,xmm_regs
[rm
]));
3483 gen_helper_movmskps(s
->tmp2_i32
, cpu_env
, s
->ptr0
);
3484 tcg_gen_extu_i32_tl(cpu_regs
[reg
], s
->tmp2_i32
);
3486 case 0x150: /* movmskpd */
3487 rm
= (modrm
& 7) | REX_B(s
);
3488 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
,
3489 offsetof(CPUX86State
,xmm_regs
[rm
]));
3490 gen_helper_movmskpd(s
->tmp2_i32
, cpu_env
, s
->ptr0
);
3491 tcg_gen_extu_i32_tl(cpu_regs
[reg
], s
->tmp2_i32
);
3493 case 0x02a: /* cvtpi2ps */
3494 case 0x12a: /* cvtpi2pd */
3495 gen_helper_enter_mmx(cpu_env
);
3497 gen_lea_modrm(env
, s
, modrm
);
3498 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3499 gen_ldq_env_A0(s
, op2_offset
);
3502 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3504 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3505 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
3506 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
3509 gen_helper_cvtpi2ps(cpu_env
, s
->ptr0
, s
->ptr1
);
3513 gen_helper_cvtpi2pd(cpu_env
, s
->ptr0
, s
->ptr1
);
3517 case 0x22a: /* cvtsi2ss */
3518 case 0x32a: /* cvtsi2sd */
3519 ot
= mo_64_32(s
->dflag
);
3520 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3521 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3522 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
3524 SSEFunc_0_epi sse_fn_epi
= sse_op_table3ai
[(b
>> 8) & 1];
3525 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3526 sse_fn_epi(cpu_env
, s
->ptr0
, s
->tmp2_i32
);
3528 #ifdef TARGET_X86_64
3529 SSEFunc_0_epl sse_fn_epl
= sse_op_table3aq
[(b
>> 8) & 1];
3530 sse_fn_epl(cpu_env
, s
->ptr0
, s
->T0
);
3536 case 0x02c: /* cvttps2pi */
3537 case 0x12c: /* cvttpd2pi */
3538 case 0x02d: /* cvtps2pi */
3539 case 0x12d: /* cvtpd2pi */
3540 gen_helper_enter_mmx(cpu_env
);
3542 gen_lea_modrm(env
, s
, modrm
);
3543 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3544 gen_ldo_env_A0(s
, op2_offset
);
3546 rm
= (modrm
& 7) | REX_B(s
);
3547 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3549 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
);
3550 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
3551 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
3554 gen_helper_cvttps2pi(cpu_env
, s
->ptr0
, s
->ptr1
);
3557 gen_helper_cvttpd2pi(cpu_env
, s
->ptr0
, s
->ptr1
);
3560 gen_helper_cvtps2pi(cpu_env
, s
->ptr0
, s
->ptr1
);
3563 gen_helper_cvtpd2pi(cpu_env
, s
->ptr0
, s
->ptr1
);
3567 case 0x22c: /* cvttss2si */
3568 case 0x32c: /* cvttsd2si */
3569 case 0x22d: /* cvtss2si */
3570 case 0x32d: /* cvtsd2si */
3571 ot
= mo_64_32(s
->dflag
);
3573 gen_lea_modrm(env
, s
, modrm
);
3575 gen_ldq_env_A0(s
, offsetof(CPUX86State
, xmm_t0
.ZMM_Q(0)));
3577 gen_op_ld_v(s
, MO_32
, s
->T0
, s
->A0
);
3578 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3579 offsetof(CPUX86State
, xmm_t0
.ZMM_L(0)));
3581 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3583 rm
= (modrm
& 7) | REX_B(s
);
3584 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3586 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op2_offset
);
3588 SSEFunc_i_ep sse_fn_i_ep
=
3589 sse_op_table3bi
[((b
>> 7) & 2) | (b
& 1)];
3590 sse_fn_i_ep(s
->tmp2_i32
, cpu_env
, s
->ptr0
);
3591 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
3593 #ifdef TARGET_X86_64
3594 SSEFunc_l_ep sse_fn_l_ep
=
3595 sse_op_table3bq
[((b
>> 7) & 2) | (b
& 1)];
3596 sse_fn_l_ep(s
->T0
, cpu_env
, s
->ptr0
);
3601 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3603 case 0xc4: /* pinsrw */
3606 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
3607 val
= x86_ldub_code(env
, s
);
3610 tcg_gen_st16_tl(s
->T0
, cpu_env
,
3611 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_W(val
)));
3614 tcg_gen_st16_tl(s
->T0
, cpu_env
,
3615 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_W(val
)));
3618 case 0xc5: /* pextrw */
3622 ot
= mo_64_32(s
->dflag
);
3623 val
= x86_ldub_code(env
, s
);
3626 rm
= (modrm
& 7) | REX_B(s
);
3627 tcg_gen_ld16u_tl(s
->T0
, cpu_env
,
3628 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_W(val
)));
3632 tcg_gen_ld16u_tl(s
->T0
, cpu_env
,
3633 offsetof(CPUX86State
,fpregs
[rm
].mmx
.MMX_W(val
)));
3635 reg
= ((modrm
>> 3) & 7) | rex_r
;
3636 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3638 case 0x1d6: /* movq ea, xmm */
3640 gen_lea_modrm(env
, s
, modrm
);
3641 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3642 xmm_regs
[reg
].ZMM_Q(0)));
3644 rm
= (modrm
& 7) | REX_B(s
);
3645 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[rm
].ZMM_Q(0)),
3646 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3647 gen_op_movq_env_0(s
,
3648 offsetof(CPUX86State
, xmm_regs
[rm
].ZMM_Q(1)));
3651 case 0x2d6: /* movq2dq */
3652 gen_helper_enter_mmx(cpu_env
);
3654 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(0)),
3655 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3656 gen_op_movq_env_0(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(1)));
3658 case 0x3d6: /* movdq2q */
3659 gen_helper_enter_mmx(cpu_env
);
3660 rm
= (modrm
& 7) | REX_B(s
);
3661 gen_op_movq(s
, offsetof(CPUX86State
, fpregs
[reg
& 7].mmx
),
3662 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3664 case 0xd7: /* pmovmskb */
3669 rm
= (modrm
& 7) | REX_B(s
);
3670 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
,
3671 offsetof(CPUX86State
, xmm_regs
[rm
]));
3672 gen_helper_pmovmskb_xmm(s
->tmp2_i32
, cpu_env
, s
->ptr0
);
3675 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
,
3676 offsetof(CPUX86State
, fpregs
[rm
].mmx
));
3677 gen_helper_pmovmskb_mmx(s
->tmp2_i32
, cpu_env
, s
->ptr0
);
3679 reg
= ((modrm
>> 3) & 7) | rex_r
;
3680 tcg_gen_extu_i32_tl(cpu_regs
[reg
], s
->tmp2_i32
);
3686 if ((b
& 0xf0) == 0xf0) {
3689 modrm
= x86_ldub_code(env
, s
);
3691 reg
= ((modrm
>> 3) & 7) | rex_r
;
3692 mod
= (modrm
>> 6) & 3;
3697 sse_fn_epp
= sse_op_table6
[b
].op
[b1
];
3701 if (!(s
->cpuid_ext_features
& sse_op_table6
[b
].ext_mask
))
3705 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3707 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
3709 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3710 gen_lea_modrm(env
, s
, modrm
);
3712 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3713 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3714 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3715 gen_ldq_env_A0(s
, op2_offset
+
3716 offsetof(ZMMReg
, ZMM_Q(0)));
3718 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3719 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3720 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
3721 s
->mem_index
, MO_LEUL
);
3722 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
, op2_offset
+
3723 offsetof(ZMMReg
, ZMM_L(0)));
3725 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3726 tcg_gen_qemu_ld_tl(s
->tmp0
, s
->A0
,
3727 s
->mem_index
, MO_LEUW
);
3728 tcg_gen_st16_tl(s
->tmp0
, cpu_env
, op2_offset
+
3729 offsetof(ZMMReg
, ZMM_W(0)));
3731 case 0x2a: /* movntqda */
3732 gen_ldo_env_A0(s
, op1_offset
);
3735 gen_ldo_env_A0(s
, op2_offset
);
3739 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
3741 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3743 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3744 gen_lea_modrm(env
, s
, modrm
);
3745 gen_ldq_env_A0(s
, op2_offset
);
3748 if (sse_fn_epp
== SSE_SPECIAL
) {
3752 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
3753 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
3754 sse_fn_epp(cpu_env
, s
->ptr0
, s
->ptr1
);
3757 set_cc_op(s
, CC_OP_EFLAGS
);
3764 /* Various integer extensions at 0f 38 f[0-f]. */
3765 b
= modrm
| (b1
<< 8);
3766 modrm
= x86_ldub_code(env
, s
);
3767 reg
= ((modrm
>> 3) & 7) | rex_r
;
3770 case 0x3f0: /* crc32 Gd,Eb */
3771 case 0x3f1: /* crc32 Gd,Ey */
3773 if (!(s
->cpuid_ext_features
& CPUID_EXT_SSE42
)) {
3776 if ((b
& 0xff) == 0xf0) {
3778 } else if (s
->dflag
!= MO_64
) {
3779 ot
= (s
->prefix
& PREFIX_DATA
? MO_16
: MO_32
);
3784 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[reg
]);
3785 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3786 gen_helper_crc32(s
->T0
, s
->tmp2_i32
,
3787 s
->T0
, tcg_const_i32(8 << ot
));
3789 ot
= mo_64_32(s
->dflag
);
3790 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3793 case 0x1f0: /* crc32 or movbe */
3795 /* For these insns, the f3 prefix is supposed to have priority
3796 over the 66 prefix, but that's not what we implement above
3798 if (s
->prefix
& PREFIX_REPNZ
) {
3802 case 0x0f0: /* movbe Gy,My */
3803 case 0x0f1: /* movbe My,Gy */
3804 if (!(s
->cpuid_ext_features
& CPUID_EXT_MOVBE
)) {
3807 if (s
->dflag
!= MO_64
) {
3808 ot
= (s
->prefix
& PREFIX_DATA
? MO_16
: MO_32
);
3813 gen_lea_modrm(env
, s
, modrm
);
3815 tcg_gen_qemu_ld_tl(s
->T0
, s
->A0
,
3816 s
->mem_index
, ot
| MO_BE
);
3817 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3819 tcg_gen_qemu_st_tl(cpu_regs
[reg
], s
->A0
,
3820 s
->mem_index
, ot
| MO_BE
);
3824 case 0x0f2: /* andn Gy, By, Ey */
3825 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
3826 || !(s
->prefix
& PREFIX_VEX
)
3830 ot
= mo_64_32(s
->dflag
);
3831 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3832 tcg_gen_andc_tl(s
->T0
, s
->T0
, cpu_regs
[s
->vex_v
]);
3833 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3834 gen_op_update1_cc(s
);
3835 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3838 case 0x0f7: /* bextr Gy, Ey, By */
3839 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
3840 || !(s
->prefix
& PREFIX_VEX
)
3844 ot
= mo_64_32(s
->dflag
);
3848 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3849 /* Extract START, and shift the operand.
3850 Shifts larger than operand size get zeros. */
3851 tcg_gen_ext8u_tl(s
->A0
, cpu_regs
[s
->vex_v
]);
3852 tcg_gen_shr_tl(s
->T0
, s
->T0
, s
->A0
);
3854 bound
= tcg_const_tl(ot
== MO_64
? 63 : 31);
3855 zero
= tcg_const_tl(0);
3856 tcg_gen_movcond_tl(TCG_COND_LEU
, s
->T0
, s
->A0
, bound
,
3858 tcg_temp_free(zero
);
3860 /* Extract the LEN into a mask. Lengths larger than
3861 operand size get all ones. */
3862 tcg_gen_extract_tl(s
->A0
, cpu_regs
[s
->vex_v
], 8, 8);
3863 tcg_gen_movcond_tl(TCG_COND_LEU
, s
->A0
, s
->A0
, bound
,
3865 tcg_temp_free(bound
);
3866 tcg_gen_movi_tl(s
->T1
, 1);
3867 tcg_gen_shl_tl(s
->T1
, s
->T1
, s
->A0
);
3868 tcg_gen_subi_tl(s
->T1
, s
->T1
, 1);
3869 tcg_gen_and_tl(s
->T0
, s
->T0
, s
->T1
);
3871 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3872 gen_op_update1_cc(s
);
3873 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3877 case 0x0f5: /* bzhi Gy, Ey, By */
3878 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3879 || !(s
->prefix
& PREFIX_VEX
)
3883 ot
= mo_64_32(s
->dflag
);
3884 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3885 tcg_gen_ext8u_tl(s
->T1
, cpu_regs
[s
->vex_v
]);
3887 TCGv bound
= tcg_const_tl(ot
== MO_64
? 63 : 31);
3888 /* Note that since we're using BMILG (in order to get O
3889 cleared) we need to store the inverse into C. */
3890 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_cc_src
,
3892 tcg_gen_movcond_tl(TCG_COND_GT
, s
->T1
, s
->T1
,
3893 bound
, bound
, s
->T1
);
3894 tcg_temp_free(bound
);
3896 tcg_gen_movi_tl(s
->A0
, -1);
3897 tcg_gen_shl_tl(s
->A0
, s
->A0
, s
->T1
);
3898 tcg_gen_andc_tl(s
->T0
, s
->T0
, s
->A0
);
3899 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3900 gen_op_update1_cc(s
);
3901 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
3904 case 0x3f6: /* mulx By, Gy, rdx, Ey */
3905 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3906 || !(s
->prefix
& PREFIX_VEX
)
3910 ot
= mo_64_32(s
->dflag
);
3911 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3914 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3915 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, cpu_regs
[R_EDX
]);
3916 tcg_gen_mulu2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
3917 s
->tmp2_i32
, s
->tmp3_i32
);
3918 tcg_gen_extu_i32_tl(cpu_regs
[s
->vex_v
], s
->tmp2_i32
);
3919 tcg_gen_extu_i32_tl(cpu_regs
[reg
], s
->tmp3_i32
);
3921 #ifdef TARGET_X86_64
3923 tcg_gen_mulu2_i64(s
->T0
, s
->T1
,
3924 s
->T0
, cpu_regs
[R_EDX
]);
3925 tcg_gen_mov_i64(cpu_regs
[s
->vex_v
], s
->T0
);
3926 tcg_gen_mov_i64(cpu_regs
[reg
], s
->T1
);
3932 case 0x3f5: /* pdep Gy, By, Ey */
3933 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3934 || !(s
->prefix
& PREFIX_VEX
)
3938 ot
= mo_64_32(s
->dflag
);
3939 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3940 /* Note that by zero-extending the mask operand, we
3941 automatically handle zero-extending the result. */
3943 tcg_gen_mov_tl(s
->T1
, cpu_regs
[s
->vex_v
]);
3945 tcg_gen_ext32u_tl(s
->T1
, cpu_regs
[s
->vex_v
]);
3947 gen_helper_pdep(cpu_regs
[reg
], s
->T0
, s
->T1
);
3950 case 0x2f5: /* pext Gy, By, Ey */
3951 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3952 || !(s
->prefix
& PREFIX_VEX
)
3956 ot
= mo_64_32(s
->dflag
);
3957 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3958 /* Note that by zero-extending the mask operand, we
3959 automatically handle zero-extending the result. */
3961 tcg_gen_mov_tl(s
->T1
, cpu_regs
[s
->vex_v
]);
3963 tcg_gen_ext32u_tl(s
->T1
, cpu_regs
[s
->vex_v
]);
3965 gen_helper_pext(cpu_regs
[reg
], s
->T0
, s
->T1
);
3968 case 0x1f6: /* adcx Gy, Ey */
3969 case 0x2f6: /* adox Gy, Ey */
3970 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_ADX
)) {
3973 TCGv carry_in
, carry_out
, zero
;
3976 ot
= mo_64_32(s
->dflag
);
3977 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3979 /* Re-use the carry-out from a previous round. */
3981 carry_out
= (b
== 0x1f6 ? cpu_cc_dst
: cpu_cc_src2
);
3985 carry_in
= cpu_cc_dst
;
3986 end_op
= CC_OP_ADCX
;
3988 end_op
= CC_OP_ADCOX
;
3993 end_op
= CC_OP_ADCOX
;
3995 carry_in
= cpu_cc_src2
;
3996 end_op
= CC_OP_ADOX
;
4000 end_op
= CC_OP_ADCOX
;
4001 carry_in
= carry_out
;
4004 end_op
= (b
== 0x1f6 ? CC_OP_ADCX
: CC_OP_ADOX
);
4007 /* If we can't reuse carry-out, get it out of EFLAGS. */
4009 if (s
->cc_op
!= CC_OP_ADCX
&& s
->cc_op
!= CC_OP_ADOX
) {
4010 gen_compute_eflags(s
);
4013 tcg_gen_extract_tl(carry_in
, cpu_cc_src
,
4014 ctz32(b
== 0x1f6 ? CC_C
: CC_O
), 1);
4018 #ifdef TARGET_X86_64
4020 /* If we know TL is 64-bit, and we want a 32-bit
4021 result, just do everything in 64-bit arithmetic. */
4022 tcg_gen_ext32u_i64(cpu_regs
[reg
], cpu_regs
[reg
]);
4023 tcg_gen_ext32u_i64(s
->T0
, s
->T0
);
4024 tcg_gen_add_i64(s
->T0
, s
->T0
, cpu_regs
[reg
]);
4025 tcg_gen_add_i64(s
->T0
, s
->T0
, carry_in
);
4026 tcg_gen_ext32u_i64(cpu_regs
[reg
], s
->T0
);
4027 tcg_gen_shri_i64(carry_out
, s
->T0
, 32);
4031 /* Otherwise compute the carry-out in two steps. */
4032 zero
= tcg_const_tl(0);
4033 tcg_gen_add2_tl(s
->T0
, carry_out
,
4036 tcg_gen_add2_tl(cpu_regs
[reg
], carry_out
,
4037 cpu_regs
[reg
], carry_out
,
4039 tcg_temp_free(zero
);
4042 set_cc_op(s
, end_op
);
4046 case 0x1f7: /* shlx Gy, Ey, By */
4047 case 0x2f7: /* sarx Gy, Ey, By */
4048 case 0x3f7: /* shrx Gy, Ey, By */
4049 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4050 || !(s
->prefix
& PREFIX_VEX
)
4054 ot
= mo_64_32(s
->dflag
);
4055 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4057 tcg_gen_andi_tl(s
->T1
, cpu_regs
[s
->vex_v
], 63);
4059 tcg_gen_andi_tl(s
->T1
, cpu_regs
[s
->vex_v
], 31);
4062 tcg_gen_shl_tl(s
->T0
, s
->T0
, s
->T1
);
4063 } else if (b
== 0x2f7) {
4065 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
4067 tcg_gen_sar_tl(s
->T0
, s
->T0
, s
->T1
);
4070 tcg_gen_ext32u_tl(s
->T0
, s
->T0
);
4072 tcg_gen_shr_tl(s
->T0
, s
->T0
, s
->T1
);
4074 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4080 case 0x3f3: /* Group 17 */
4081 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
4082 || !(s
->prefix
& PREFIX_VEX
)
4086 ot
= mo_64_32(s
->dflag
);
4087 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4089 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
4091 case 1: /* blsr By,Ey */
4092 tcg_gen_subi_tl(s
->T1
, s
->T0
, 1);
4093 tcg_gen_and_tl(s
->T0
, s
->T0
, s
->T1
);
4095 case 2: /* blsmsk By,Ey */
4096 tcg_gen_subi_tl(s
->T1
, s
->T0
, 1);
4097 tcg_gen_xor_tl(s
->T0
, s
->T0
, s
->T1
);
4099 case 3: /* blsi By, Ey */
4100 tcg_gen_neg_tl(s
->T1
, s
->T0
);
4101 tcg_gen_and_tl(s
->T0
, s
->T0
, s
->T1
);
4106 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
4107 gen_op_mov_reg_v(s
, ot
, s
->vex_v
, s
->T0
);
4108 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
4119 modrm
= x86_ldub_code(env
, s
);
4121 reg
= ((modrm
>> 3) & 7) | rex_r
;
4122 mod
= (modrm
>> 6) & 3;
4127 sse_fn_eppi
= sse_op_table7
[b
].op
[b1
];
4131 if (!(s
->cpuid_ext_features
& sse_op_table7
[b
].ext_mask
))
4136 if (sse_fn_eppi
== SSE_SPECIAL
) {
4137 ot
= mo_64_32(s
->dflag
);
4138 rm
= (modrm
& 7) | REX_B(s
);
4140 gen_lea_modrm(env
, s
, modrm
);
4141 reg
= ((modrm
>> 3) & 7) | rex_r
;
4142 val
= x86_ldub_code(env
, s
);
4144 case 0x14: /* pextrb */
4145 tcg_gen_ld8u_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
,
4146 xmm_regs
[reg
].ZMM_B(val
& 15)));
4148 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4150 tcg_gen_qemu_st_tl(s
->T0
, s
->A0
,
4151 s
->mem_index
, MO_UB
);
4154 case 0x15: /* pextrw */
4155 tcg_gen_ld16u_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
,
4156 xmm_regs
[reg
].ZMM_W(val
& 7)));
4158 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4160 tcg_gen_qemu_st_tl(s
->T0
, s
->A0
,
4161 s
->mem_index
, MO_LEUW
);
4165 if (ot
== MO_32
) { /* pextrd */
4166 tcg_gen_ld_i32(s
->tmp2_i32
, cpu_env
,
4167 offsetof(CPUX86State
,
4168 xmm_regs
[reg
].ZMM_L(val
& 3)));
4170 tcg_gen_extu_i32_tl(cpu_regs
[rm
], s
->tmp2_i32
);
4172 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4173 s
->mem_index
, MO_LEUL
);
4175 } else { /* pextrq */
4176 #ifdef TARGET_X86_64
4177 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
,
4178 offsetof(CPUX86State
,
4179 xmm_regs
[reg
].ZMM_Q(val
& 1)));
4181 tcg_gen_mov_i64(cpu_regs
[rm
], s
->tmp1_i64
);
4183 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
4184 s
->mem_index
, MO_LEQ
);
4191 case 0x17: /* extractps */
4192 tcg_gen_ld32u_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
,
4193 xmm_regs
[reg
].ZMM_L(val
& 3)));
4195 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4197 tcg_gen_qemu_st_tl(s
->T0
, s
->A0
,
4198 s
->mem_index
, MO_LEUL
);
4201 case 0x20: /* pinsrb */
4203 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, rm
);
4205 tcg_gen_qemu_ld_tl(s
->T0
, s
->A0
,
4206 s
->mem_index
, MO_UB
);
4208 tcg_gen_st8_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
,
4209 xmm_regs
[reg
].ZMM_B(val
& 15)));
4211 case 0x21: /* insertps */
4213 tcg_gen_ld_i32(s
->tmp2_i32
, cpu_env
,
4214 offsetof(CPUX86State
,xmm_regs
[rm
]
4215 .ZMM_L((val
>> 6) & 3)));
4217 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4218 s
->mem_index
, MO_LEUL
);
4220 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
,
4221 offsetof(CPUX86State
,xmm_regs
[reg
]
4222 .ZMM_L((val
>> 4) & 3)));
4224 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4225 cpu_env
, offsetof(CPUX86State
,
4226 xmm_regs
[reg
].ZMM_L(0)));
4228 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4229 cpu_env
, offsetof(CPUX86State
,
4230 xmm_regs
[reg
].ZMM_L(1)));
4232 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4233 cpu_env
, offsetof(CPUX86State
,
4234 xmm_regs
[reg
].ZMM_L(2)));
4236 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4237 cpu_env
, offsetof(CPUX86State
,
4238 xmm_regs
[reg
].ZMM_L(3)));
4241 if (ot
== MO_32
) { /* pinsrd */
4243 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[rm
]);
4245 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4246 s
->mem_index
, MO_LEUL
);
4248 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
,
4249 offsetof(CPUX86State
,
4250 xmm_regs
[reg
].ZMM_L(val
& 3)));
4251 } else { /* pinsrq */
4252 #ifdef TARGET_X86_64
4254 gen_op_mov_v_reg(s
, ot
, s
->tmp1_i64
, rm
);
4256 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
4257 s
->mem_index
, MO_LEQ
);
4259 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
,
4260 offsetof(CPUX86State
,
4261 xmm_regs
[reg
].ZMM_Q(val
& 1)));
4272 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4274 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
4276 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4277 gen_lea_modrm(env
, s
, modrm
);
4278 gen_ldo_env_A0(s
, op2_offset
);
4281 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4283 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4285 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4286 gen_lea_modrm(env
, s
, modrm
);
4287 gen_ldq_env_A0(s
, op2_offset
);
4290 val
= x86_ldub_code(env
, s
);
4292 if ((b
& 0xfc) == 0x60) { /* pcmpXstrX */
4293 set_cc_op(s
, CC_OP_EFLAGS
);
4295 if (s
->dflag
== MO_64
) {
4296 /* The helper must use entire 64-bit gp registers */
4301 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
4302 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
4303 sse_fn_eppi(cpu_env
, s
->ptr0
, s
->ptr1
, tcg_const_i32(val
));
4307 /* Various integer extensions at 0f 3a f[0-f]. */
4308 b
= modrm
| (b1
<< 8);
4309 modrm
= x86_ldub_code(env
, s
);
4310 reg
= ((modrm
>> 3) & 7) | rex_r
;
4313 case 0x3f0: /* rorx Gy,Ey, Ib */
4314 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4315 || !(s
->prefix
& PREFIX_VEX
)
4319 ot
= mo_64_32(s
->dflag
);
4320 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4321 b
= x86_ldub_code(env
, s
);
4323 tcg_gen_rotri_tl(s
->T0
, s
->T0
, b
& 63);
4325 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
4326 tcg_gen_rotri_i32(s
->tmp2_i32
, s
->tmp2_i32
, b
& 31);
4327 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
4329 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4339 gen_unknown_opcode(env
, s
);
4343 /* generic MMX or SSE operation */
4345 case 0x70: /* pshufx insn */
4346 case 0xc6: /* pshufx insn */
4347 case 0xc2: /* compare insns */
4354 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4358 gen_lea_modrm(env
, s
, modrm
);
4359 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4365 /* Most sse scalar operations. */
4368 } else if (b1
== 3) {
4373 case 0x2e: /* ucomis[sd] */
4374 case 0x2f: /* comis[sd] */
4386 gen_op_ld_v(s
, MO_32
, s
->T0
, s
->A0
);
4387 tcg_gen_st32_tl(s
->T0
, cpu_env
,
4388 offsetof(CPUX86State
,xmm_t0
.ZMM_L(0)));
4392 gen_ldq_env_A0(s
, offsetof(CPUX86State
, xmm_t0
.ZMM_D(0)));
4395 /* 128 bit access */
4396 gen_ldo_env_A0(s
, op2_offset
);
4400 rm
= (modrm
& 7) | REX_B(s
);
4401 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
4404 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4406 gen_lea_modrm(env
, s
, modrm
);
4407 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4408 gen_ldq_env_A0(s
, op2_offset
);
4411 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4415 case 0x0f: /* 3DNow! data insns */
4416 val
= x86_ldub_code(env
, s
);
4417 sse_fn_epp
= sse_op_table5
[val
];
4421 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
)) {
4424 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
4425 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
4426 sse_fn_epp(cpu_env
, s
->ptr0
, s
->ptr1
);
4428 case 0x70: /* pshufx insn */
4429 case 0xc6: /* pshufx insn */
4430 val
= x86_ldub_code(env
, s
);
4431 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
4432 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
4433 /* XXX: introduce a new table? */
4434 sse_fn_ppi
= (SSEFunc_0_ppi
)sse_fn_epp
;
4435 sse_fn_ppi(s
->ptr0
, s
->ptr1
, tcg_const_i32(val
));
4439 val
= x86_ldub_code(env
, s
);
4442 sse_fn_epp
= sse_op_table4
[val
][b1
];
4444 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
4445 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
4446 sse_fn_epp(cpu_env
, s
->ptr0
, s
->ptr1
);
4449 /* maskmov : we must prepare A0 */
4452 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EDI
]);
4453 gen_extu(s
->aflag
, s
->A0
);
4454 gen_add_A0_ds_seg(s
);
4456 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
4457 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
4458 /* XXX: introduce a new table? */
4459 sse_fn_eppt
= (SSEFunc_0_eppt
)sse_fn_epp
;
4460 sse_fn_eppt(cpu_env
, s
->ptr0
, s
->ptr1
, s
->A0
);
4463 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
4464 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
4465 sse_fn_epp(cpu_env
, s
->ptr0
, s
->ptr1
);
4468 if (b
== 0x2e || b
== 0x2f) {
4469 set_cc_op(s
, CC_OP_EFLAGS
);
4474 /* convert one instruction. s->base.is_jmp is set if the translation must
4475 be stopped. Return the next pc value */
4476 static target_ulong
disas_insn(DisasContext
*s
, CPUState
*cpu
)
4478 CPUX86State
*env
= cpu
->env_ptr
;
4481 TCGMemOp ot
, aflag
, dflag
;
4482 int modrm
, reg
, rm
, mod
, op
, opreg
, val
;
4483 target_ulong next_eip
, tval
;
4485 target_ulong pc_start
= s
->base
.pc_next
;
4487 s
->pc_start
= s
->pc
= pc_start
;
4489 #ifdef TARGET_X86_64
4492 s
->x86_64_hregs
= false;
4494 s
->rip_offset
= 0; /* for relative ip address */
4497 if (sigsetjmp(s
->jmpbuf
, 0) != 0) {
4498 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
4507 b
= x86_ldub_code(env
, s
);
4508 /* Collect prefixes. */
4511 prefixes
|= PREFIX_REPZ
;
4514 prefixes
|= PREFIX_REPNZ
;
4517 prefixes
|= PREFIX_LOCK
;
4538 prefixes
|= PREFIX_DATA
;
4541 prefixes
|= PREFIX_ADR
;
4543 #ifdef TARGET_X86_64
4547 rex_w
= (b
>> 3) & 1;
4548 rex_r
= (b
& 0x4) << 1;
4549 s
->rex_x
= (b
& 0x2) << 2;
4550 REX_B(s
) = (b
& 0x1) << 3;
4551 /* select uniform byte register addressing */
4552 s
->x86_64_hregs
= true;
4557 case 0xc5: /* 2-byte VEX */
4558 case 0xc4: /* 3-byte VEX */
4559 /* VEX prefixes cannot be used except in 32-bit mode.
4560 Otherwise the instruction is LES or LDS. */
4561 if (s
->code32
&& !s
->vm86
) {
4562 static const int pp_prefix
[4] = {
4563 0, PREFIX_DATA
, PREFIX_REPZ
, PREFIX_REPNZ
4565 int vex3
, vex2
= x86_ldub_code(env
, s
);
4567 if (!CODE64(s
) && (vex2
& 0xc0) != 0xc0) {
4568 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4569 otherwise the instruction is LES or LDS. */
4570 s
->pc
--; /* rewind the advance_pc() x86_ldub_code() did */
4574 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
4575 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
4576 | PREFIX_LOCK
| PREFIX_DATA
)) {
4579 #ifdef TARGET_X86_64
4580 if (s
->x86_64_hregs
) {
4584 rex_r
= (~vex2
>> 4) & 8;
4586 /* 2-byte VEX prefix: RVVVVlpp, implied 0f leading opcode byte */
4588 b
= x86_ldub_code(env
, s
) | 0x100;
4590 /* 3-byte VEX prefix: RXBmmmmm wVVVVlpp */
4591 #ifdef TARGET_X86_64
4592 s
->rex_x
= (~vex2
>> 3) & 8;
4593 s
->rex_b
= (~vex2
>> 2) & 8;
4595 vex3
= x86_ldub_code(env
, s
);
4596 rex_w
= (vex3
>> 7) & 1;
4597 switch (vex2
& 0x1f) {
4598 case 0x01: /* Implied 0f leading opcode bytes. */
4599 b
= x86_ldub_code(env
, s
) | 0x100;
4601 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4604 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4607 default: /* Reserved for future use. */
4611 s
->vex_v
= (~vex3
>> 3) & 0xf;
4612 s
->vex_l
= (vex3
>> 2) & 1;
4613 prefixes
|= pp_prefix
[vex3
& 3] | PREFIX_VEX
;
4618 /* Post-process prefixes. */
4620 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
4621 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
4622 over 0x66 if both are present. */
4623 dflag
= (rex_w
> 0 ? MO_64
: prefixes
& PREFIX_DATA
? MO_16
: MO_32
);
4624 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
4625 aflag
= (prefixes
& PREFIX_ADR
? MO_32
: MO_64
);
4627 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
4628 if (s
->code32
^ ((prefixes
& PREFIX_DATA
) != 0)) {
4633 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
4634 if (s
->code32
^ ((prefixes
& PREFIX_ADR
) != 0)) {
4641 s
->prefix
= prefixes
;
4645 /* now check op code */
4649 /**************************/
4650 /* extended op code */
4651 b
= x86_ldub_code(env
, s
) | 0x100;
4654 /**************************/
4669 ot
= mo_b_d(b
, dflag
);
4672 case 0: /* OP Ev, Gv */
4673 modrm
= x86_ldub_code(env
, s
);
4674 reg
= ((modrm
>> 3) & 7) | rex_r
;
4675 mod
= (modrm
>> 6) & 3;
4676 rm
= (modrm
& 7) | REX_B(s
);
4678 gen_lea_modrm(env
, s
, modrm
);
4680 } else if (op
== OP_XORL
&& rm
== reg
) {
4682 /* xor reg, reg optimisation */
4683 set_cc_op(s
, CC_OP_CLR
);
4684 tcg_gen_movi_tl(s
->T0
, 0);
4685 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4690 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
4691 gen_op(s
, op
, ot
, opreg
);
4693 case 1: /* OP Gv, Ev */
4694 modrm
= x86_ldub_code(env
, s
);
4695 mod
= (modrm
>> 6) & 3;
4696 reg
= ((modrm
>> 3) & 7) | rex_r
;
4697 rm
= (modrm
& 7) | REX_B(s
);
4699 gen_lea_modrm(env
, s
, modrm
);
4700 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
4701 } else if (op
== OP_XORL
&& rm
== reg
) {
4704 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
4706 gen_op(s
, op
, ot
, reg
);
4708 case 2: /* OP A, Iv */
4709 val
= insn_get(env
, s
, ot
);
4710 tcg_gen_movi_tl(s
->T1
, val
);
4711 gen_op(s
, op
, ot
, OR_EAX
);
4721 case 0x80: /* GRP1 */
4727 ot
= mo_b_d(b
, dflag
);
4729 modrm
= x86_ldub_code(env
, s
);
4730 mod
= (modrm
>> 6) & 3;
4731 rm
= (modrm
& 7) | REX_B(s
);
4732 op
= (modrm
>> 3) & 7;
4738 s
->rip_offset
= insn_const_size(ot
);
4739 gen_lea_modrm(env
, s
, modrm
);
4750 val
= insn_get(env
, s
, ot
);
4753 val
= (int8_t)insn_get(env
, s
, MO_8
);
4756 tcg_gen_movi_tl(s
->T1
, val
);
4757 gen_op(s
, op
, ot
, opreg
);
4761 /**************************/
4762 /* inc, dec, and other misc arith */
4763 case 0x40 ... 0x47: /* inc Gv */
4765 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), 1);
4767 case 0x48 ... 0x4f: /* dec Gv */
4769 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), -1);
4771 case 0xf6: /* GRP3 */
4773 ot
= mo_b_d(b
, dflag
);
4775 modrm
= x86_ldub_code(env
, s
);
4776 mod
= (modrm
>> 6) & 3;
4777 rm
= (modrm
& 7) | REX_B(s
);
4778 op
= (modrm
>> 3) & 7;
4781 s
->rip_offset
= insn_const_size(ot
);
4783 gen_lea_modrm(env
, s
, modrm
);
4784 /* For those below that handle locked memory, don't load here. */
4785 if (!(s
->prefix
& PREFIX_LOCK
)
4787 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
4790 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
4795 val
= insn_get(env
, s
, ot
);
4796 tcg_gen_movi_tl(s
->T1
, val
);
4797 gen_op_testl_T0_T1_cc(s
);
4798 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4801 if (s
->prefix
& PREFIX_LOCK
) {
4805 tcg_gen_movi_tl(s
->T0
, ~0);
4806 tcg_gen_atomic_xor_fetch_tl(s
->T0
, s
->A0
, s
->T0
,
4807 s
->mem_index
, ot
| MO_LE
);
4809 tcg_gen_not_tl(s
->T0
, s
->T0
);
4811 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
4813 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4818 if (s
->prefix
& PREFIX_LOCK
) {
4820 TCGv a0
, t0
, t1
, t2
;
4825 a0
= tcg_temp_local_new();
4826 t0
= tcg_temp_local_new();
4827 label1
= gen_new_label();
4829 tcg_gen_mov_tl(a0
, s
->A0
);
4830 tcg_gen_mov_tl(t0
, s
->T0
);
4832 gen_set_label(label1
);
4833 t1
= tcg_temp_new();
4834 t2
= tcg_temp_new();
4835 tcg_gen_mov_tl(t2
, t0
);
4836 tcg_gen_neg_tl(t1
, t0
);
4837 tcg_gen_atomic_cmpxchg_tl(t0
, a0
, t0
, t1
,
4838 s
->mem_index
, ot
| MO_LE
);
4840 tcg_gen_brcond_tl(TCG_COND_NE
, t0
, t2
, label1
);
4844 tcg_gen_mov_tl(s
->T0
, t0
);
4847 tcg_gen_neg_tl(s
->T0
, s
->T0
);
4849 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
4851 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4854 gen_op_update_neg_cc(s
);
4855 set_cc_op(s
, CC_OP_SUBB
+ ot
);
4860 gen_op_mov_v_reg(s
, MO_8
, s
->T1
, R_EAX
);
4861 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
4862 tcg_gen_ext8u_tl(s
->T1
, s
->T1
);
4863 /* XXX: use 32 bit mul which could be faster */
4864 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
4865 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
4866 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
4867 tcg_gen_andi_tl(cpu_cc_src
, s
->T0
, 0xff00);
4868 set_cc_op(s
, CC_OP_MULB
);
4871 gen_op_mov_v_reg(s
, MO_16
, s
->T1
, R_EAX
);
4872 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
4873 tcg_gen_ext16u_tl(s
->T1
, s
->T1
);
4874 /* XXX: use 32 bit mul which could be faster */
4875 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
4876 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
4877 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
4878 tcg_gen_shri_tl(s
->T0
, s
->T0
, 16);
4879 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
4880 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
4881 set_cc_op(s
, CC_OP_MULW
);
4885 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
4886 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, cpu_regs
[R_EAX
]);
4887 tcg_gen_mulu2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
4888 s
->tmp2_i32
, s
->tmp3_i32
);
4889 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], s
->tmp2_i32
);
4890 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], s
->tmp3_i32
);
4891 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
4892 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
4893 set_cc_op(s
, CC_OP_MULL
);
4895 #ifdef TARGET_X86_64
4897 tcg_gen_mulu2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
4898 s
->T0
, cpu_regs
[R_EAX
]);
4899 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
4900 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
4901 set_cc_op(s
, CC_OP_MULQ
);
4909 gen_op_mov_v_reg(s
, MO_8
, s
->T1
, R_EAX
);
4910 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
4911 tcg_gen_ext8s_tl(s
->T1
, s
->T1
);
4912 /* XXX: use 32 bit mul which could be faster */
4913 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
4914 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
4915 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
4916 tcg_gen_ext8s_tl(s
->tmp0
, s
->T0
);
4917 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
4918 set_cc_op(s
, CC_OP_MULB
);
4921 gen_op_mov_v_reg(s
, MO_16
, s
->T1
, R_EAX
);
4922 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
4923 tcg_gen_ext16s_tl(s
->T1
, s
->T1
);
4924 /* XXX: use 32 bit mul which could be faster */
4925 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
4926 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
4927 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
4928 tcg_gen_ext16s_tl(s
->tmp0
, s
->T0
);
4929 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
4930 tcg_gen_shri_tl(s
->T0
, s
->T0
, 16);
4931 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
4932 set_cc_op(s
, CC_OP_MULW
);
4936 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
4937 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, cpu_regs
[R_EAX
]);
4938 tcg_gen_muls2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
4939 s
->tmp2_i32
, s
->tmp3_i32
);
4940 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], s
->tmp2_i32
);
4941 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], s
->tmp3_i32
);
4942 tcg_gen_sari_i32(s
->tmp2_i32
, s
->tmp2_i32
, 31);
4943 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
4944 tcg_gen_sub_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
4945 tcg_gen_extu_i32_tl(cpu_cc_src
, s
->tmp2_i32
);
4946 set_cc_op(s
, CC_OP_MULL
);
4948 #ifdef TARGET_X86_64
4950 tcg_gen_muls2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
4951 s
->T0
, cpu_regs
[R_EAX
]);
4952 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
4953 tcg_gen_sari_tl(cpu_cc_src
, cpu_regs
[R_EAX
], 63);
4954 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, cpu_regs
[R_EDX
]);
4955 set_cc_op(s
, CC_OP_MULQ
);
4963 gen_helper_divb_AL(cpu_env
, s
->T0
);
4966 gen_helper_divw_AX(cpu_env
, s
->T0
);
4970 gen_helper_divl_EAX(cpu_env
, s
->T0
);
4972 #ifdef TARGET_X86_64
4974 gen_helper_divq_EAX(cpu_env
, s
->T0
);
4982 gen_helper_idivb_AL(cpu_env
, s
->T0
);
4985 gen_helper_idivw_AX(cpu_env
, s
->T0
);
4989 gen_helper_idivl_EAX(cpu_env
, s
->T0
);
4991 #ifdef TARGET_X86_64
4993 gen_helper_idivq_EAX(cpu_env
, s
->T0
);
5003 case 0xfe: /* GRP4 */
5004 case 0xff: /* GRP5 */
5005 ot
= mo_b_d(b
, dflag
);
5007 modrm
= x86_ldub_code(env
, s
);
5008 mod
= (modrm
>> 6) & 3;
5009 rm
= (modrm
& 7) | REX_B(s
);
5010 op
= (modrm
>> 3) & 7;
5011 if (op
>= 2 && b
== 0xfe) {
5015 if (op
== 2 || op
== 4) {
5016 /* operand size for jumps is 64 bit */
5018 } else if (op
== 3 || op
== 5) {
5019 ot
= dflag
!= MO_16
? MO_32
+ (rex_w
== 1) : MO_16
;
5020 } else if (op
== 6) {
5021 /* default push size is 64 bit */
5022 ot
= mo_pushpop(s
, dflag
);
5026 gen_lea_modrm(env
, s
, modrm
);
5027 if (op
>= 2 && op
!= 3 && op
!= 5)
5028 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
5030 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
5034 case 0: /* inc Ev */
5039 gen_inc(s
, ot
, opreg
, 1);
5041 case 1: /* dec Ev */
5046 gen_inc(s
, ot
, opreg
, -1);
5048 case 2: /* call Ev */
5049 /* XXX: optimize if memory (no 'and' is necessary) */
5050 if (dflag
== MO_16
) {
5051 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
5053 next_eip
= s
->pc
- s
->cs_base
;
5054 tcg_gen_movi_tl(s
->T1
, next_eip
);
5055 gen_push_v(s
, s
->T1
);
5056 gen_op_jmp_v(s
->T0
);
5060 case 3: /* lcall Ev */
5061 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
5062 gen_add_A0_im(s
, 1 << ot
);
5063 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
5065 if (s
->pe
&& !s
->vm86
) {
5066 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5067 gen_helper_lcall_protected(cpu_env
, s
->tmp2_i32
, s
->T1
,
5068 tcg_const_i32(dflag
- 1),
5069 tcg_const_tl(s
->pc
- s
->cs_base
));
5071 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5072 gen_helper_lcall_real(cpu_env
, s
->tmp2_i32
, s
->T1
,
5073 tcg_const_i32(dflag
- 1),
5074 tcg_const_i32(s
->pc
- s
->cs_base
));
5076 tcg_gen_ld_tl(s
->tmp4
, cpu_env
, offsetof(CPUX86State
, eip
));
5079 case 4: /* jmp Ev */
5080 if (dflag
== MO_16
) {
5081 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
5083 gen_op_jmp_v(s
->T0
);
5087 case 5: /* ljmp Ev */
5088 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
5089 gen_add_A0_im(s
, 1 << ot
);
5090 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
5092 if (s
->pe
&& !s
->vm86
) {
5093 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5094 gen_helper_ljmp_protected(cpu_env
, s
->tmp2_i32
, s
->T1
,
5095 tcg_const_tl(s
->pc
- s
->cs_base
));
5097 gen_op_movl_seg_T0_vm(s
, R_CS
);
5098 gen_op_jmp_v(s
->T1
);
5100 tcg_gen_ld_tl(s
->tmp4
, cpu_env
, offsetof(CPUX86State
, eip
));
5103 case 6: /* push Ev */
5104 gen_push_v(s
, s
->T0
);
5111 case 0x84: /* test Ev, Gv */
5113 ot
= mo_b_d(b
, dflag
);
5115 modrm
= x86_ldub_code(env
, s
);
5116 reg
= ((modrm
>> 3) & 7) | rex_r
;
5118 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5119 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
5120 gen_op_testl_T0_T1_cc(s
);
5121 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
5124 case 0xa8: /* test eAX, Iv */
5126 ot
= mo_b_d(b
, dflag
);
5127 val
= insn_get(env
, s
, ot
);
5129 gen_op_mov_v_reg(s
, ot
, s
->T0
, OR_EAX
);
5130 tcg_gen_movi_tl(s
->T1
, val
);
5131 gen_op_testl_T0_T1_cc(s
);
5132 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
5135 case 0x98: /* CWDE/CBW */
5137 #ifdef TARGET_X86_64
5139 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
5140 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
5141 gen_op_mov_reg_v(s
, MO_64
, R_EAX
, s
->T0
);
5145 gen_op_mov_v_reg(s
, MO_16
, s
->T0
, R_EAX
);
5146 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
5147 gen_op_mov_reg_v(s
, MO_32
, R_EAX
, s
->T0
);
5150 gen_op_mov_v_reg(s
, MO_8
, s
->T0
, R_EAX
);
5151 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
5152 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
5158 case 0x99: /* CDQ/CWD */
5160 #ifdef TARGET_X86_64
5162 gen_op_mov_v_reg(s
, MO_64
, s
->T0
, R_EAX
);
5163 tcg_gen_sari_tl(s
->T0
, s
->T0
, 63);
5164 gen_op_mov_reg_v(s
, MO_64
, R_EDX
, s
->T0
);
5168 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
5169 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
5170 tcg_gen_sari_tl(s
->T0
, s
->T0
, 31);
5171 gen_op_mov_reg_v(s
, MO_32
, R_EDX
, s
->T0
);
5174 gen_op_mov_v_reg(s
, MO_16
, s
->T0
, R_EAX
);
5175 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
5176 tcg_gen_sari_tl(s
->T0
, s
->T0
, 15);
5177 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
5183 case 0x1af: /* imul Gv, Ev */
5184 case 0x69: /* imul Gv, Ev, I */
5187 modrm
= x86_ldub_code(env
, s
);
5188 reg
= ((modrm
>> 3) & 7) | rex_r
;
5190 s
->rip_offset
= insn_const_size(ot
);
5193 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5195 val
= insn_get(env
, s
, ot
);
5196 tcg_gen_movi_tl(s
->T1
, val
);
5197 } else if (b
== 0x6b) {
5198 val
= (int8_t)insn_get(env
, s
, MO_8
);
5199 tcg_gen_movi_tl(s
->T1
, val
);
5201 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
5204 #ifdef TARGET_X86_64
5206 tcg_gen_muls2_i64(cpu_regs
[reg
], s
->T1
, s
->T0
, s
->T1
);
5207 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
5208 tcg_gen_sari_tl(cpu_cc_src
, cpu_cc_dst
, 63);
5209 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, s
->T1
);
5213 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5214 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
5215 tcg_gen_muls2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
5216 s
->tmp2_i32
, s
->tmp3_i32
);
5217 tcg_gen_extu_i32_tl(cpu_regs
[reg
], s
->tmp2_i32
);
5218 tcg_gen_sari_i32(s
->tmp2_i32
, s
->tmp2_i32
, 31);
5219 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
5220 tcg_gen_sub_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
5221 tcg_gen_extu_i32_tl(cpu_cc_src
, s
->tmp2_i32
);
5224 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
5225 tcg_gen_ext16s_tl(s
->T1
, s
->T1
);
5226 /* XXX: use 32 bit mul which could be faster */
5227 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
5228 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
5229 tcg_gen_ext16s_tl(s
->tmp0
, s
->T0
);
5230 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
5231 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
5234 set_cc_op(s
, CC_OP_MULB
+ ot
);
5237 case 0x1c1: /* xadd Ev, Gv */
5238 ot
= mo_b_d(b
, dflag
);
5239 modrm
= x86_ldub_code(env
, s
);
5240 reg
= ((modrm
>> 3) & 7) | rex_r
;
5241 mod
= (modrm
>> 6) & 3;
5242 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
5244 rm
= (modrm
& 7) | REX_B(s
);
5245 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
5246 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
5247 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
5248 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
5250 gen_lea_modrm(env
, s
, modrm
);
5251 if (s
->prefix
& PREFIX_LOCK
) {
5252 tcg_gen_atomic_fetch_add_tl(s
->T1
, s
->A0
, s
->T0
,
5253 s
->mem_index
, ot
| MO_LE
);
5254 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
5256 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
5257 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
5258 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
5260 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
5262 gen_op_update2_cc(s
);
5263 set_cc_op(s
, CC_OP_ADDB
+ ot
);
5266 case 0x1b1: /* cmpxchg Ev, Gv */
5268 TCGv oldv
, newv
, cmpv
;
5270 ot
= mo_b_d(b
, dflag
);
5271 modrm
= x86_ldub_code(env
, s
);
5272 reg
= ((modrm
>> 3) & 7) | rex_r
;
5273 mod
= (modrm
>> 6) & 3;
5274 oldv
= tcg_temp_new();
5275 newv
= tcg_temp_new();
5276 cmpv
= tcg_temp_new();
5277 gen_op_mov_v_reg(s
, ot
, newv
, reg
);
5278 tcg_gen_mov_tl(cmpv
, cpu_regs
[R_EAX
]);
5280 if (s
->prefix
& PREFIX_LOCK
) {
5284 gen_lea_modrm(env
, s
, modrm
);
5285 tcg_gen_atomic_cmpxchg_tl(oldv
, s
->A0
, cmpv
, newv
,
5286 s
->mem_index
, ot
| MO_LE
);
5287 gen_op_mov_reg_v(s
, ot
, R_EAX
, oldv
);
5290 rm
= (modrm
& 7) | REX_B(s
);
5291 gen_op_mov_v_reg(s
, ot
, oldv
, rm
);
5293 gen_lea_modrm(env
, s
, modrm
);
5294 gen_op_ld_v(s
, ot
, oldv
, s
->A0
);
5295 rm
= 0; /* avoid warning */
5299 /* store value = (old == cmp ? new : old); */
5300 tcg_gen_movcond_tl(TCG_COND_EQ
, newv
, oldv
, cmpv
, newv
, oldv
);
5302 gen_op_mov_reg_v(s
, ot
, R_EAX
, oldv
);
5303 gen_op_mov_reg_v(s
, ot
, rm
, newv
);
5305 /* Perform an unconditional store cycle like physical cpu;
5306 must be before changing accumulator to ensure
5307 idempotency if the store faults and the instruction
5309 gen_op_st_v(s
, ot
, newv
, s
->A0
);
5310 gen_op_mov_reg_v(s
, ot
, R_EAX
, oldv
);
5313 tcg_gen_mov_tl(cpu_cc_src
, oldv
);
5314 tcg_gen_mov_tl(s
->cc_srcT
, cmpv
);
5315 tcg_gen_sub_tl(cpu_cc_dst
, cmpv
, oldv
);
5316 set_cc_op(s
, CC_OP_SUBB
+ ot
);
5317 tcg_temp_free(oldv
);
5318 tcg_temp_free(newv
);
5319 tcg_temp_free(cmpv
);
5322 case 0x1c7: /* cmpxchg8b */
5323 modrm
= x86_ldub_code(env
, s
);
5324 mod
= (modrm
>> 6) & 3;
5325 if ((mod
== 3) || ((modrm
& 0x38) != 0x8))
5327 #ifdef TARGET_X86_64
5328 if (dflag
== MO_64
) {
5329 if (!(s
->cpuid_ext_features
& CPUID_EXT_CX16
))
5331 gen_lea_modrm(env
, s
, modrm
);
5332 if ((s
->prefix
& PREFIX_LOCK
) && (tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
5333 gen_helper_cmpxchg16b(cpu_env
, s
->A0
);
5335 gen_helper_cmpxchg16b_unlocked(cpu_env
, s
->A0
);
5340 if (!(s
->cpuid_features
& CPUID_CX8
))
5342 gen_lea_modrm(env
, s
, modrm
);
5343 if ((s
->prefix
& PREFIX_LOCK
) && (tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
5344 gen_helper_cmpxchg8b(cpu_env
, s
->A0
);
5346 gen_helper_cmpxchg8b_unlocked(cpu_env
, s
->A0
);
5349 set_cc_op(s
, CC_OP_EFLAGS
);
5352 /**************************/
5354 case 0x50 ... 0x57: /* push */
5355 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, (b
& 7) | REX_B(s
));
5356 gen_push_v(s
, s
->T0
);
5358 case 0x58 ... 0x5f: /* pop */
5360 /* NOTE: order is important for pop %sp */
5361 gen_pop_update(s
, ot
);
5362 gen_op_mov_reg_v(s
, ot
, (b
& 7) | REX_B(s
), s
->T0
);
5364 case 0x60: /* pusha */
5369 case 0x61: /* popa */
5374 case 0x68: /* push Iv */
5376 ot
= mo_pushpop(s
, dflag
);
5378 val
= insn_get(env
, s
, ot
);
5380 val
= (int8_t)insn_get(env
, s
, MO_8
);
5381 tcg_gen_movi_tl(s
->T0
, val
);
5382 gen_push_v(s
, s
->T0
);
5384 case 0x8f: /* pop Ev */
5385 modrm
= x86_ldub_code(env
, s
);
5386 mod
= (modrm
>> 6) & 3;
5389 /* NOTE: order is important for pop %sp */
5390 gen_pop_update(s
, ot
);
5391 rm
= (modrm
& 7) | REX_B(s
);
5392 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
5394 /* NOTE: order is important too for MMU exceptions */
5395 s
->popl_esp_hack
= 1 << ot
;
5396 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5397 s
->popl_esp_hack
= 0;
5398 gen_pop_update(s
, ot
);
5401 case 0xc8: /* enter */
5404 val
= x86_lduw_code(env
, s
);
5405 level
= x86_ldub_code(env
, s
);
5406 gen_enter(s
, val
, level
);
5409 case 0xc9: /* leave */
5412 case 0x06: /* push es */
5413 case 0x0e: /* push cs */
5414 case 0x16: /* push ss */
5415 case 0x1e: /* push ds */
5418 gen_op_movl_T0_seg(s
, b
>> 3);
5419 gen_push_v(s
, s
->T0
);
5421 case 0x1a0: /* push fs */
5422 case 0x1a8: /* push gs */
5423 gen_op_movl_T0_seg(s
, (b
>> 3) & 7);
5424 gen_push_v(s
, s
->T0
);
5426 case 0x07: /* pop es */
5427 case 0x17: /* pop ss */
5428 case 0x1f: /* pop ds */
5433 gen_movl_seg_T0(s
, reg
);
5434 gen_pop_update(s
, ot
);
5435 /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
5436 if (s
->base
.is_jmp
) {
5437 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
5440 gen_eob_inhibit_irq(s
, true);
5446 case 0x1a1: /* pop fs */
5447 case 0x1a9: /* pop gs */
5449 gen_movl_seg_T0(s
, (b
>> 3) & 7);
5450 gen_pop_update(s
, ot
);
5451 if (s
->base
.is_jmp
) {
5452 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
5457 /**************************/
5460 case 0x89: /* mov Gv, Ev */
5461 ot
= mo_b_d(b
, dflag
);
5462 modrm
= x86_ldub_code(env
, s
);
5463 reg
= ((modrm
>> 3) & 7) | rex_r
;
5465 /* generate a generic store */
5466 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
5469 case 0xc7: /* mov Ev, Iv */
5470 ot
= mo_b_d(b
, dflag
);
5471 modrm
= x86_ldub_code(env
, s
);
5472 mod
= (modrm
>> 6) & 3;
5474 s
->rip_offset
= insn_const_size(ot
);
5475 gen_lea_modrm(env
, s
, modrm
);
5477 val
= insn_get(env
, s
, ot
);
5478 tcg_gen_movi_tl(s
->T0
, val
);
5480 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
5482 gen_op_mov_reg_v(s
, ot
, (modrm
& 7) | REX_B(s
), s
->T0
);
5486 case 0x8b: /* mov Ev, Gv */
5487 ot
= mo_b_d(b
, dflag
);
5488 modrm
= x86_ldub_code(env
, s
);
5489 reg
= ((modrm
>> 3) & 7) | rex_r
;
5491 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5492 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
5494 case 0x8e: /* mov seg, Gv */
5495 modrm
= x86_ldub_code(env
, s
);
5496 reg
= (modrm
>> 3) & 7;
5497 if (reg
>= 6 || reg
== R_CS
)
5499 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
5500 gen_movl_seg_T0(s
, reg
);
5501 /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
5502 if (s
->base
.is_jmp
) {
5503 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
5506 gen_eob_inhibit_irq(s
, true);
5512 case 0x8c: /* mov Gv, seg */
5513 modrm
= x86_ldub_code(env
, s
);
5514 reg
= (modrm
>> 3) & 7;
5515 mod
= (modrm
>> 6) & 3;
5518 gen_op_movl_T0_seg(s
, reg
);
5519 ot
= mod
== 3 ? dflag
: MO_16
;
5520 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5523 case 0x1b6: /* movzbS Gv, Eb */
5524 case 0x1b7: /* movzwS Gv, Eb */
5525 case 0x1be: /* movsbS Gv, Eb */
5526 case 0x1bf: /* movswS Gv, Eb */
5531 /* d_ot is the size of destination */
5533 /* ot is the size of source */
5534 ot
= (b
& 1) + MO_8
;
5535 /* s_ot is the sign+size of source */
5536 s_ot
= b
& 8 ? MO_SIGN
| ot
: ot
;
5538 modrm
= x86_ldub_code(env
, s
);
5539 reg
= ((modrm
>> 3) & 7) | rex_r
;
5540 mod
= (modrm
>> 6) & 3;
5541 rm
= (modrm
& 7) | REX_B(s
);
5544 if (s_ot
== MO_SB
&& byte_reg_is_xH(s
, rm
)) {
5545 tcg_gen_sextract_tl(s
->T0
, cpu_regs
[rm
- 4], 8, 8);
5547 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
5550 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
5553 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
5556 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
5560 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
5564 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
5566 gen_lea_modrm(env
, s
, modrm
);
5567 gen_op_ld_v(s
, s_ot
, s
->T0
, s
->A0
);
5568 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
5573 case 0x8d: /* lea */
5574 modrm
= x86_ldub_code(env
, s
);
5575 mod
= (modrm
>> 6) & 3;
5578 reg
= ((modrm
>> 3) & 7) | rex_r
;
5580 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
5581 TCGv ea
= gen_lea_modrm_1(s
, a
);
5582 gen_lea_v_seg(s
, s
->aflag
, ea
, -1, -1);
5583 gen_op_mov_reg_v(s
, dflag
, reg
, s
->A0
);
5587 case 0xa0: /* mov EAX, Ov */
5589 case 0xa2: /* mov Ov, EAX */
5592 target_ulong offset_addr
;
5594 ot
= mo_b_d(b
, dflag
);
5596 #ifdef TARGET_X86_64
5598 offset_addr
= x86_ldq_code(env
, s
);
5602 offset_addr
= insn_get(env
, s
, s
->aflag
);
5605 tcg_gen_movi_tl(s
->A0
, offset_addr
);
5606 gen_add_A0_ds_seg(s
);
5608 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
5609 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T0
);
5611 gen_op_mov_v_reg(s
, ot
, s
->T0
, R_EAX
);
5612 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
5616 case 0xd7: /* xlat */
5617 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EBX
]);
5618 tcg_gen_ext8u_tl(s
->T0
, cpu_regs
[R_EAX
]);
5619 tcg_gen_add_tl(s
->A0
, s
->A0
, s
->T0
);
5620 gen_extu(s
->aflag
, s
->A0
);
5621 gen_add_A0_ds_seg(s
);
5622 gen_op_ld_v(s
, MO_8
, s
->T0
, s
->A0
);
5623 gen_op_mov_reg_v(s
, MO_8
, R_EAX
, s
->T0
);
5625 case 0xb0 ... 0xb7: /* mov R, Ib */
5626 val
= insn_get(env
, s
, MO_8
);
5627 tcg_gen_movi_tl(s
->T0
, val
);
5628 gen_op_mov_reg_v(s
, MO_8
, (b
& 7) | REX_B(s
), s
->T0
);
5630 case 0xb8 ... 0xbf: /* mov R, Iv */
5631 #ifdef TARGET_X86_64
5632 if (dflag
== MO_64
) {
5635 tmp
= x86_ldq_code(env
, s
);
5636 reg
= (b
& 7) | REX_B(s
);
5637 tcg_gen_movi_tl(s
->T0
, tmp
);
5638 gen_op_mov_reg_v(s
, MO_64
, reg
, s
->T0
);
5643 val
= insn_get(env
, s
, ot
);
5644 reg
= (b
& 7) | REX_B(s
);
5645 tcg_gen_movi_tl(s
->T0
, val
);
5646 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
5650 case 0x91 ... 0x97: /* xchg R, EAX */
5653 reg
= (b
& 7) | REX_B(s
);
5657 case 0x87: /* xchg Ev, Gv */
5658 ot
= mo_b_d(b
, dflag
);
5659 modrm
= x86_ldub_code(env
, s
);
5660 reg
= ((modrm
>> 3) & 7) | rex_r
;
5661 mod
= (modrm
>> 6) & 3;
5663 rm
= (modrm
& 7) | REX_B(s
);
5665 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
5666 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
5667 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
5668 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
5670 gen_lea_modrm(env
, s
, modrm
);
5671 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
5672 /* for xchg, lock is implicit */
5673 tcg_gen_atomic_xchg_tl(s
->T1
, s
->A0
, s
->T0
,
5674 s
->mem_index
, ot
| MO_LE
);
5675 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
5678 case 0xc4: /* les Gv */
5679 /* In CODE64 this is VEX3; see above. */
5682 case 0xc5: /* lds Gv */
5683 /* In CODE64 this is VEX2; see above. */
5686 case 0x1b2: /* lss Gv */
5689 case 0x1b4: /* lfs Gv */
5692 case 0x1b5: /* lgs Gv */
5695 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
5696 modrm
= x86_ldub_code(env
, s
);
5697 reg
= ((modrm
>> 3) & 7) | rex_r
;
5698 mod
= (modrm
>> 6) & 3;
5701 gen_lea_modrm(env
, s
, modrm
);
5702 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
5703 gen_add_A0_im(s
, 1 << ot
);
5704 /* load the segment first to handle exceptions properly */
5705 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
5706 gen_movl_seg_T0(s
, op
);
5707 /* then put the data */
5708 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
5709 if (s
->base
.is_jmp
) {
5710 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
5715 /************************/
5723 ot
= mo_b_d(b
, dflag
);
5724 modrm
= x86_ldub_code(env
, s
);
5725 mod
= (modrm
>> 6) & 3;
5726 op
= (modrm
>> 3) & 7;
5732 gen_lea_modrm(env
, s
, modrm
);
5735 opreg
= (modrm
& 7) | REX_B(s
);
5740 gen_shift(s
, op
, ot
, opreg
, OR_ECX
);
5743 shift
= x86_ldub_code(env
, s
);
5745 gen_shifti(s
, op
, ot
, opreg
, shift
);
5760 case 0x1a4: /* shld imm */
5764 case 0x1a5: /* shld cl */
5768 case 0x1ac: /* shrd imm */
5772 case 0x1ad: /* shrd cl */
5777 modrm
= x86_ldub_code(env
, s
);
5778 mod
= (modrm
>> 6) & 3;
5779 rm
= (modrm
& 7) | REX_B(s
);
5780 reg
= ((modrm
>> 3) & 7) | rex_r
;
5782 gen_lea_modrm(env
, s
, modrm
);
5787 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
5790 TCGv imm
= tcg_const_tl(x86_ldub_code(env
, s
));
5791 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, imm
);
5794 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, cpu_regs
[R_ECX
]);
5798 /************************/
5801 if (s
->flags
& (HF_EM_MASK
| HF_TS_MASK
)) {
5802 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5803 /* XXX: what to do if illegal op ? */
5804 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
5807 modrm
= x86_ldub_code(env
, s
);
5808 mod
= (modrm
>> 6) & 3;
5810 op
= ((b
& 7) << 3) | ((modrm
>> 3) & 7);
5813 gen_lea_modrm(env
, s
, modrm
);
5815 case 0x00 ... 0x07: /* fxxxs */
5816 case 0x10 ... 0x17: /* fixxxl */
5817 case 0x20 ... 0x27: /* fxxxl */
5818 case 0x30 ... 0x37: /* fixxx */
5825 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
5826 s
->mem_index
, MO_LEUL
);
5827 gen_helper_flds_FT0(cpu_env
, s
->tmp2_i32
);
5830 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
5831 s
->mem_index
, MO_LEUL
);
5832 gen_helper_fildl_FT0(cpu_env
, s
->tmp2_i32
);
5835 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
5836 s
->mem_index
, MO_LEQ
);
5837 gen_helper_fldl_FT0(cpu_env
, s
->tmp1_i64
);
5841 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
5842 s
->mem_index
, MO_LESW
);
5843 gen_helper_fildl_FT0(cpu_env
, s
->tmp2_i32
);
5847 gen_helper_fp_arith_ST0_FT0(op1
);
5849 /* fcomp needs pop */
5850 gen_helper_fpop(cpu_env
);
5854 case 0x08: /* flds */
5855 case 0x0a: /* fsts */
5856 case 0x0b: /* fstps */
5857 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5858 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5859 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5864 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
5865 s
->mem_index
, MO_LEUL
);
5866 gen_helper_flds_ST0(cpu_env
, s
->tmp2_i32
);
5869 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
5870 s
->mem_index
, MO_LEUL
);
5871 gen_helper_fildl_ST0(cpu_env
, s
->tmp2_i32
);
5874 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
5875 s
->mem_index
, MO_LEQ
);
5876 gen_helper_fldl_ST0(cpu_env
, s
->tmp1_i64
);
5880 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
5881 s
->mem_index
, MO_LESW
);
5882 gen_helper_fildl_ST0(cpu_env
, s
->tmp2_i32
);
5887 /* XXX: the corresponding CPUID bit must be tested ! */
5890 gen_helper_fisttl_ST0(s
->tmp2_i32
, cpu_env
);
5891 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
5892 s
->mem_index
, MO_LEUL
);
5895 gen_helper_fisttll_ST0(s
->tmp1_i64
, cpu_env
);
5896 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
5897 s
->mem_index
, MO_LEQ
);
5901 gen_helper_fistt_ST0(s
->tmp2_i32
, cpu_env
);
5902 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
5903 s
->mem_index
, MO_LEUW
);
5906 gen_helper_fpop(cpu_env
);
5911 gen_helper_fsts_ST0(s
->tmp2_i32
, cpu_env
);
5912 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
5913 s
->mem_index
, MO_LEUL
);
5916 gen_helper_fistl_ST0(s
->tmp2_i32
, cpu_env
);
5917 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
5918 s
->mem_index
, MO_LEUL
);
5921 gen_helper_fstl_ST0(s
->tmp1_i64
, cpu_env
);
5922 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
5923 s
->mem_index
, MO_LEQ
);
5927 gen_helper_fist_ST0(s
->tmp2_i32
, cpu_env
);
5928 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
5929 s
->mem_index
, MO_LEUW
);
5933 gen_helper_fpop(cpu_env
);
5937 case 0x0c: /* fldenv mem */
5938 gen_helper_fldenv(cpu_env
, s
->A0
, tcg_const_i32(dflag
- 1));
5940 case 0x0d: /* fldcw mem */
5941 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
5942 s
->mem_index
, MO_LEUW
);
5943 gen_helper_fldcw(cpu_env
, s
->tmp2_i32
);
5945 case 0x0e: /* fnstenv mem */
5946 gen_helper_fstenv(cpu_env
, s
->A0
, tcg_const_i32(dflag
- 1));
5948 case 0x0f: /* fnstcw mem */
5949 gen_helper_fnstcw(s
->tmp2_i32
, cpu_env
);
5950 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
5951 s
->mem_index
, MO_LEUW
);
5953 case 0x1d: /* fldt mem */
5954 gen_helper_fldt_ST0(cpu_env
, s
->A0
);
5956 case 0x1f: /* fstpt mem */
5957 gen_helper_fstt_ST0(cpu_env
, s
->A0
);
5958 gen_helper_fpop(cpu_env
);
5960 case 0x2c: /* frstor mem */
5961 gen_helper_frstor(cpu_env
, s
->A0
, tcg_const_i32(dflag
- 1));
5963 case 0x2e: /* fnsave mem */
5964 gen_helper_fsave(cpu_env
, s
->A0
, tcg_const_i32(dflag
- 1));
5966 case 0x2f: /* fnstsw mem */
5967 gen_helper_fnstsw(s
->tmp2_i32
, cpu_env
);
5968 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
5969 s
->mem_index
, MO_LEUW
);
5971 case 0x3c: /* fbld */
5972 gen_helper_fbld_ST0(cpu_env
, s
->A0
);
5974 case 0x3e: /* fbstp */
5975 gen_helper_fbst_ST0(cpu_env
, s
->A0
);
5976 gen_helper_fpop(cpu_env
);
5978 case 0x3d: /* fildll */
5979 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
, s
->mem_index
, MO_LEQ
);
5980 gen_helper_fildll_ST0(cpu_env
, s
->tmp1_i64
);
5982 case 0x3f: /* fistpll */
5983 gen_helper_fistll_ST0(s
->tmp1_i64
, cpu_env
);
5984 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
, s
->mem_index
, MO_LEQ
);
5985 gen_helper_fpop(cpu_env
);
5991 /* register float ops */
5995 case 0x08: /* fld sti */
5996 gen_helper_fpush(cpu_env
);
5997 gen_helper_fmov_ST0_STN(cpu_env
,
5998 tcg_const_i32((opreg
+ 1) & 7));
6000 case 0x09: /* fxchg sti */
6001 case 0x29: /* fxchg4 sti, undocumented op */
6002 case 0x39: /* fxchg7 sti, undocumented op */
6003 gen_helper_fxchg_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
6005 case 0x0a: /* grp d9/2 */
6008 /* check exceptions (FreeBSD FPU probe) */
6009 gen_helper_fwait(cpu_env
);
6015 case 0x0c: /* grp d9/4 */
6018 gen_helper_fchs_ST0(cpu_env
);
6021 gen_helper_fabs_ST0(cpu_env
);
6024 gen_helper_fldz_FT0(cpu_env
);
6025 gen_helper_fcom_ST0_FT0(cpu_env
);
6028 gen_helper_fxam_ST0(cpu_env
);
6034 case 0x0d: /* grp d9/5 */
6038 gen_helper_fpush(cpu_env
);
6039 gen_helper_fld1_ST0(cpu_env
);
6042 gen_helper_fpush(cpu_env
);
6043 gen_helper_fldl2t_ST0(cpu_env
);
6046 gen_helper_fpush(cpu_env
);
6047 gen_helper_fldl2e_ST0(cpu_env
);
6050 gen_helper_fpush(cpu_env
);
6051 gen_helper_fldpi_ST0(cpu_env
);
6054 gen_helper_fpush(cpu_env
);
6055 gen_helper_fldlg2_ST0(cpu_env
);
6058 gen_helper_fpush(cpu_env
);
6059 gen_helper_fldln2_ST0(cpu_env
);
6062 gen_helper_fpush(cpu_env
);
6063 gen_helper_fldz_ST0(cpu_env
);
6070 case 0x0e: /* grp d9/6 */
6073 gen_helper_f2xm1(cpu_env
);
6076 gen_helper_fyl2x(cpu_env
);
6079 gen_helper_fptan(cpu_env
);
6081 case 3: /* fpatan */
6082 gen_helper_fpatan(cpu_env
);
6084 case 4: /* fxtract */
6085 gen_helper_fxtract(cpu_env
);
6087 case 5: /* fprem1 */
6088 gen_helper_fprem1(cpu_env
);
6090 case 6: /* fdecstp */
6091 gen_helper_fdecstp(cpu_env
);
6094 case 7: /* fincstp */
6095 gen_helper_fincstp(cpu_env
);
6099 case 0x0f: /* grp d9/7 */
6102 gen_helper_fprem(cpu_env
);
6104 case 1: /* fyl2xp1 */
6105 gen_helper_fyl2xp1(cpu_env
);
6108 gen_helper_fsqrt(cpu_env
);
6110 case 3: /* fsincos */
6111 gen_helper_fsincos(cpu_env
);
6113 case 5: /* fscale */
6114 gen_helper_fscale(cpu_env
);
6116 case 4: /* frndint */
6117 gen_helper_frndint(cpu_env
);
6120 gen_helper_fsin(cpu_env
);
6124 gen_helper_fcos(cpu_env
);
6128 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
6129 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6130 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6136 gen_helper_fp_arith_STN_ST0(op1
, opreg
);
6138 gen_helper_fpop(cpu_env
);
6140 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6141 gen_helper_fp_arith_ST0_FT0(op1
);
6145 case 0x02: /* fcom */
6146 case 0x22: /* fcom2, undocumented op */
6147 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6148 gen_helper_fcom_ST0_FT0(cpu_env
);
6150 case 0x03: /* fcomp */
6151 case 0x23: /* fcomp3, undocumented op */
6152 case 0x32: /* fcomp5, undocumented op */
6153 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6154 gen_helper_fcom_ST0_FT0(cpu_env
);
6155 gen_helper_fpop(cpu_env
);
6157 case 0x15: /* da/5 */
6159 case 1: /* fucompp */
6160 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
6161 gen_helper_fucom_ST0_FT0(cpu_env
);
6162 gen_helper_fpop(cpu_env
);
6163 gen_helper_fpop(cpu_env
);
6171 case 0: /* feni (287 only, just do nop here) */
6173 case 1: /* fdisi (287 only, just do nop here) */
6176 gen_helper_fclex(cpu_env
);
6178 case 3: /* fninit */
6179 gen_helper_fninit(cpu_env
);
6181 case 4: /* fsetpm (287 only, just do nop here) */
6187 case 0x1d: /* fucomi */
6188 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6191 gen_update_cc_op(s
);
6192 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6193 gen_helper_fucomi_ST0_FT0(cpu_env
);
6194 set_cc_op(s
, CC_OP_EFLAGS
);
6196 case 0x1e: /* fcomi */
6197 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6200 gen_update_cc_op(s
);
6201 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6202 gen_helper_fcomi_ST0_FT0(cpu_env
);
6203 set_cc_op(s
, CC_OP_EFLAGS
);
6205 case 0x28: /* ffree sti */
6206 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6208 case 0x2a: /* fst sti */
6209 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6211 case 0x2b: /* fstp sti */
6212 case 0x0b: /* fstp1 sti, undocumented op */
6213 case 0x3a: /* fstp8 sti, undocumented op */
6214 case 0x3b: /* fstp9 sti, undocumented op */
6215 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6216 gen_helper_fpop(cpu_env
);
6218 case 0x2c: /* fucom st(i) */
6219 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6220 gen_helper_fucom_ST0_FT0(cpu_env
);
6222 case 0x2d: /* fucomp st(i) */
6223 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6224 gen_helper_fucom_ST0_FT0(cpu_env
);
6225 gen_helper_fpop(cpu_env
);
6227 case 0x33: /* de/3 */
6229 case 1: /* fcompp */
6230 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
6231 gen_helper_fcom_ST0_FT0(cpu_env
);
6232 gen_helper_fpop(cpu_env
);
6233 gen_helper_fpop(cpu_env
);
6239 case 0x38: /* ffreep sti, undocumented op */
6240 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6241 gen_helper_fpop(cpu_env
);
6243 case 0x3c: /* df/4 */
6246 gen_helper_fnstsw(s
->tmp2_i32
, cpu_env
);
6247 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
6248 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
6254 case 0x3d: /* fucomip */
6255 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6258 gen_update_cc_op(s
);
6259 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6260 gen_helper_fucomi_ST0_FT0(cpu_env
);
6261 gen_helper_fpop(cpu_env
);
6262 set_cc_op(s
, CC_OP_EFLAGS
);
6264 case 0x3e: /* fcomip */
6265 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6268 gen_update_cc_op(s
);
6269 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6270 gen_helper_fcomi_ST0_FT0(cpu_env
);
6271 gen_helper_fpop(cpu_env
);
6272 set_cc_op(s
, CC_OP_EFLAGS
);
6274 case 0x10 ... 0x13: /* fcmovxx */
6279 static const uint8_t fcmov_cc
[8] = {
6286 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6289 op1
= fcmov_cc
[op
& 3] | (((op
>> 3) & 1) ^ 1);
6290 l1
= gen_new_label();
6291 gen_jcc1_noeob(s
, op1
, l1
);
6292 gen_helper_fmov_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
6301 /************************/
6304 case 0xa4: /* movsS */
6306 ot
= mo_b_d(b
, dflag
);
6307 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6308 gen_repz_movs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6314 case 0xaa: /* stosS */
6316 ot
= mo_b_d(b
, dflag
);
6317 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6318 gen_repz_stos(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6323 case 0xac: /* lodsS */
6325 ot
= mo_b_d(b
, dflag
);
6326 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6327 gen_repz_lods(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6332 case 0xae: /* scasS */
6334 ot
= mo_b_d(b
, dflag
);
6335 if (prefixes
& PREFIX_REPNZ
) {
6336 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6337 } else if (prefixes
& PREFIX_REPZ
) {
6338 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6344 case 0xa6: /* cmpsS */
6346 ot
= mo_b_d(b
, dflag
);
6347 if (prefixes
& PREFIX_REPNZ
) {
6348 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6349 } else if (prefixes
& PREFIX_REPZ
) {
6350 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6355 case 0x6c: /* insS */
6357 ot
= mo_b_d32(b
, dflag
);
6358 tcg_gen_ext16u_tl(s
->T0
, cpu_regs
[R_EDX
]);
6359 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6360 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
) | 4);
6361 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6362 gen_repz_ins(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6365 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6366 gen_jmp(s
, s
->pc
- s
->cs_base
);
6370 case 0x6e: /* outsS */
6372 ot
= mo_b_d32(b
, dflag
);
6373 tcg_gen_ext16u_tl(s
->T0
, cpu_regs
[R_EDX
]);
6374 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6375 svm_is_rep(prefixes
) | 4);
6376 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6377 gen_repz_outs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6380 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6381 gen_jmp(s
, s
->pc
- s
->cs_base
);
6386 /************************/
6391 ot
= mo_b_d32(b
, dflag
);
6392 val
= x86_ldub_code(env
, s
);
6393 tcg_gen_movi_tl(s
->T0
, val
);
6394 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6395 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6396 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6399 tcg_gen_movi_i32(s
->tmp2_i32
, val
);
6400 gen_helper_in_func(ot
, s
->T1
, s
->tmp2_i32
);
6401 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T1
);
6402 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
6403 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6405 gen_jmp(s
, s
->pc
- s
->cs_base
);
6410 ot
= mo_b_d32(b
, dflag
);
6411 val
= x86_ldub_code(env
, s
);
6412 tcg_gen_movi_tl(s
->T0
, val
);
6413 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6414 svm_is_rep(prefixes
));
6415 gen_op_mov_v_reg(s
, ot
, s
->T1
, R_EAX
);
6417 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6420 tcg_gen_movi_i32(s
->tmp2_i32
, val
);
6421 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
6422 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
6423 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
6424 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6426 gen_jmp(s
, s
->pc
- s
->cs_base
);
6431 ot
= mo_b_d32(b
, dflag
);
6432 tcg_gen_ext16u_tl(s
->T0
, cpu_regs
[R_EDX
]);
6433 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6434 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6435 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6438 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
6439 gen_helper_in_func(ot
, s
->T1
, s
->tmp2_i32
);
6440 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T1
);
6441 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
6442 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6444 gen_jmp(s
, s
->pc
- s
->cs_base
);
6449 ot
= mo_b_d32(b
, dflag
);
6450 tcg_gen_ext16u_tl(s
->T0
, cpu_regs
[R_EDX
]);
6451 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6452 svm_is_rep(prefixes
));
6453 gen_op_mov_v_reg(s
, ot
, s
->T1
, R_EAX
);
6455 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6458 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
6459 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
6460 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
6461 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
6462 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6464 gen_jmp(s
, s
->pc
- s
->cs_base
);
6468 /************************/
6470 case 0xc2: /* ret im */
6471 val
= x86_ldsw_code(env
, s
);
6473 gen_stack_update(s
, val
+ (1 << ot
));
6474 /* Note that gen_pop_T0 uses a zero-extending load. */
6475 gen_op_jmp_v(s
->T0
);
6479 case 0xc3: /* ret */
6481 gen_pop_update(s
, ot
);
6482 /* Note that gen_pop_T0 uses a zero-extending load. */
6483 gen_op_jmp_v(s
->T0
);
6487 case 0xca: /* lret im */
6488 val
= x86_ldsw_code(env
, s
);
6490 if (s
->pe
&& !s
->vm86
) {
6491 gen_update_cc_op(s
);
6492 gen_jmp_im(s
, pc_start
- s
->cs_base
);
6493 gen_helper_lret_protected(cpu_env
, tcg_const_i32(dflag
- 1),
6494 tcg_const_i32(val
));
6498 gen_op_ld_v(s
, dflag
, s
->T0
, s
->A0
);
6499 /* NOTE: keeping EIP updated is not a problem in case of
6501 gen_op_jmp_v(s
->T0
);
6503 gen_add_A0_im(s
, 1 << dflag
);
6504 gen_op_ld_v(s
, dflag
, s
->T0
, s
->A0
);
6505 gen_op_movl_seg_T0_vm(s
, R_CS
);
6506 /* add stack offset */
6507 gen_stack_update(s
, val
+ (2 << dflag
));
6511 case 0xcb: /* lret */
6514 case 0xcf: /* iret */
6515 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IRET
);
6518 gen_helper_iret_real(cpu_env
, tcg_const_i32(dflag
- 1));
6519 set_cc_op(s
, CC_OP_EFLAGS
);
6520 } else if (s
->vm86
) {
6522 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6524 gen_helper_iret_real(cpu_env
, tcg_const_i32(dflag
- 1));
6525 set_cc_op(s
, CC_OP_EFLAGS
);
6528 gen_helper_iret_protected(cpu_env
, tcg_const_i32(dflag
- 1),
6529 tcg_const_i32(s
->pc
- s
->cs_base
));
6530 set_cc_op(s
, CC_OP_EFLAGS
);
6534 case 0xe8: /* call im */
6536 if (dflag
!= MO_16
) {
6537 tval
= (int32_t)insn_get(env
, s
, MO_32
);
6539 tval
= (int16_t)insn_get(env
, s
, MO_16
);
6541 next_eip
= s
->pc
- s
->cs_base
;
6543 if (dflag
== MO_16
) {
6545 } else if (!CODE64(s
)) {
6548 tcg_gen_movi_tl(s
->T0
, next_eip
);
6549 gen_push_v(s
, s
->T0
);
6554 case 0x9a: /* lcall im */
6556 unsigned int selector
, offset
;
6561 offset
= insn_get(env
, s
, ot
);
6562 selector
= insn_get(env
, s
, MO_16
);
6564 tcg_gen_movi_tl(s
->T0
, selector
);
6565 tcg_gen_movi_tl(s
->T1
, offset
);
6568 case 0xe9: /* jmp im */
6569 if (dflag
!= MO_16
) {
6570 tval
= (int32_t)insn_get(env
, s
, MO_32
);
6572 tval
= (int16_t)insn_get(env
, s
, MO_16
);
6574 tval
+= s
->pc
- s
->cs_base
;
6575 if (dflag
== MO_16
) {
6577 } else if (!CODE64(s
)) {
6583 case 0xea: /* ljmp im */
6585 unsigned int selector
, offset
;
6590 offset
= insn_get(env
, s
, ot
);
6591 selector
= insn_get(env
, s
, MO_16
);
6593 tcg_gen_movi_tl(s
->T0
, selector
);
6594 tcg_gen_movi_tl(s
->T1
, offset
);
6597 case 0xeb: /* jmp Jb */
6598 tval
= (int8_t)insn_get(env
, s
, MO_8
);
6599 tval
+= s
->pc
- s
->cs_base
;
6600 if (dflag
== MO_16
) {
6605 case 0x70 ... 0x7f: /* jcc Jb */
6606 tval
= (int8_t)insn_get(env
, s
, MO_8
);
6608 case 0x180 ... 0x18f: /* jcc Jv */
6609 if (dflag
!= MO_16
) {
6610 tval
= (int32_t)insn_get(env
, s
, MO_32
);
6612 tval
= (int16_t)insn_get(env
, s
, MO_16
);
6615 next_eip
= s
->pc
- s
->cs_base
;
6617 if (dflag
== MO_16
) {
6621 gen_jcc(s
, b
, tval
, next_eip
);
6624 case 0x190 ... 0x19f: /* setcc Gv */
6625 modrm
= x86_ldub_code(env
, s
);
6626 gen_setcc1(s
, b
, s
->T0
);
6627 gen_ldst_modrm(env
, s
, modrm
, MO_8
, OR_TMP0
, 1);
6629 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6630 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6634 modrm
= x86_ldub_code(env
, s
);
6635 reg
= ((modrm
>> 3) & 7) | rex_r
;
6636 gen_cmovcc1(env
, s
, ot
, b
, modrm
, reg
);
6639 /************************/
6641 case 0x9c: /* pushf */
6642 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PUSHF
);
6643 if (s
->vm86
&& s
->iopl
!= 3) {
6644 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6646 gen_update_cc_op(s
);
6647 gen_helper_read_eflags(s
->T0
, cpu_env
);
6648 gen_push_v(s
, s
->T0
);
6651 case 0x9d: /* popf */
6652 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_POPF
);
6653 if (s
->vm86
&& s
->iopl
!= 3) {
6654 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6658 if (dflag
!= MO_16
) {
6659 gen_helper_write_eflags(cpu_env
, s
->T0
,
6660 tcg_const_i32((TF_MASK
| AC_MASK
|
6665 gen_helper_write_eflags(cpu_env
, s
->T0
,
6666 tcg_const_i32((TF_MASK
| AC_MASK
|
6668 IF_MASK
| IOPL_MASK
)
6672 if (s
->cpl
<= s
->iopl
) {
6673 if (dflag
!= MO_16
) {
6674 gen_helper_write_eflags(cpu_env
, s
->T0
,
6675 tcg_const_i32((TF_MASK
|
6681 gen_helper_write_eflags(cpu_env
, s
->T0
,
6682 tcg_const_i32((TF_MASK
|
6690 if (dflag
!= MO_16
) {
6691 gen_helper_write_eflags(cpu_env
, s
->T0
,
6692 tcg_const_i32((TF_MASK
| AC_MASK
|
6693 ID_MASK
| NT_MASK
)));
6695 gen_helper_write_eflags(cpu_env
, s
->T0
,
6696 tcg_const_i32((TF_MASK
| AC_MASK
|
6702 gen_pop_update(s
, ot
);
6703 set_cc_op(s
, CC_OP_EFLAGS
);
6704 /* abort translation because TF/AC flag may change */
6705 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
6709 case 0x9e: /* sahf */
6710 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6712 gen_op_mov_v_reg(s
, MO_8
, s
->T0
, R_AH
);
6713 gen_compute_eflags(s
);
6714 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, CC_O
);
6715 tcg_gen_andi_tl(s
->T0
, s
->T0
, CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
6716 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, s
->T0
);
6718 case 0x9f: /* lahf */
6719 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6721 gen_compute_eflags(s
);
6722 /* Note: gen_compute_eflags() only gives the condition codes */
6723 tcg_gen_ori_tl(s
->T0
, cpu_cc_src
, 0x02);
6724 gen_op_mov_reg_v(s
, MO_8
, R_AH
, s
->T0
);
6726 case 0xf5: /* cmc */
6727 gen_compute_eflags(s
);
6728 tcg_gen_xori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6730 case 0xf8: /* clc */
6731 gen_compute_eflags(s
);
6732 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_C
);
6734 case 0xf9: /* stc */
6735 gen_compute_eflags(s
);
6736 tcg_gen_ori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6738 case 0xfc: /* cld */
6739 tcg_gen_movi_i32(s
->tmp2_i32
, 1);
6740 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6742 case 0xfd: /* std */
6743 tcg_gen_movi_i32(s
->tmp2_i32
, -1);
6744 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6747 /************************/
6748 /* bit operations */
6749 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6751 modrm
= x86_ldub_code(env
, s
);
6752 op
= (modrm
>> 3) & 7;
6753 mod
= (modrm
>> 6) & 3;
6754 rm
= (modrm
& 7) | REX_B(s
);
6757 gen_lea_modrm(env
, s
, modrm
);
6758 if (!(s
->prefix
& PREFIX_LOCK
)) {
6759 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
6762 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
6765 val
= x86_ldub_code(env
, s
);
6766 tcg_gen_movi_tl(s
->T1
, val
);
6771 case 0x1a3: /* bt Gv, Ev */
6774 case 0x1ab: /* bts */
6777 case 0x1b3: /* btr */
6780 case 0x1bb: /* btc */
6784 modrm
= x86_ldub_code(env
, s
);
6785 reg
= ((modrm
>> 3) & 7) | rex_r
;
6786 mod
= (modrm
>> 6) & 3;
6787 rm
= (modrm
& 7) | REX_B(s
);
6788 gen_op_mov_v_reg(s
, MO_32
, s
->T1
, reg
);
6790 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
6791 /* specific case: we need to add a displacement */
6792 gen_exts(ot
, s
->T1
);
6793 tcg_gen_sari_tl(s
->tmp0
, s
->T1
, 3 + ot
);
6794 tcg_gen_shli_tl(s
->tmp0
, s
->tmp0
, ot
);
6795 tcg_gen_add_tl(s
->A0
, gen_lea_modrm_1(s
, a
), s
->tmp0
);
6796 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
6797 if (!(s
->prefix
& PREFIX_LOCK
)) {
6798 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
6801 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
6804 tcg_gen_andi_tl(s
->T1
, s
->T1
, (1 << (3 + ot
)) - 1);
6805 tcg_gen_movi_tl(s
->tmp0
, 1);
6806 tcg_gen_shl_tl(s
->tmp0
, s
->tmp0
, s
->T1
);
6807 if (s
->prefix
& PREFIX_LOCK
) {
6810 /* Needs no atomic ops; we surpressed the normal
6811 memory load for LOCK above so do it now. */
6812 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
6815 tcg_gen_atomic_fetch_or_tl(s
->T0
, s
->A0
, s
->tmp0
,
6816 s
->mem_index
, ot
| MO_LE
);
6819 tcg_gen_not_tl(s
->tmp0
, s
->tmp0
);
6820 tcg_gen_atomic_fetch_and_tl(s
->T0
, s
->A0
, s
->tmp0
,
6821 s
->mem_index
, ot
| MO_LE
);
6825 tcg_gen_atomic_fetch_xor_tl(s
->T0
, s
->A0
, s
->tmp0
,
6826 s
->mem_index
, ot
| MO_LE
);
6829 tcg_gen_shr_tl(s
->tmp4
, s
->T0
, s
->T1
);
6831 tcg_gen_shr_tl(s
->tmp4
, s
->T0
, s
->T1
);
6834 /* Data already loaded; nothing to do. */
6837 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->tmp0
);
6840 tcg_gen_andc_tl(s
->T0
, s
->T0
, s
->tmp0
);
6844 tcg_gen_xor_tl(s
->T0
, s
->T0
, s
->tmp0
);
6849 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
6851 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
6856 /* Delay all CC updates until after the store above. Note that
6857 C is the result of the test, Z is unchanged, and the others
6858 are all undefined. */
6860 case CC_OP_MULB
... CC_OP_MULQ
:
6861 case CC_OP_ADDB
... CC_OP_ADDQ
:
6862 case CC_OP_ADCB
... CC_OP_ADCQ
:
6863 case CC_OP_SUBB
... CC_OP_SUBQ
:
6864 case CC_OP_SBBB
... CC_OP_SBBQ
:
6865 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
6866 case CC_OP_INCB
... CC_OP_INCQ
:
6867 case CC_OP_DECB
... CC_OP_DECQ
:
6868 case CC_OP_SHLB
... CC_OP_SHLQ
:
6869 case CC_OP_SARB
... CC_OP_SARQ
:
6870 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
6871 /* Z was going to be computed from the non-zero status of CC_DST.
6872 We can get that same Z value (and the new C value) by leaving
6873 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
6875 tcg_gen_mov_tl(cpu_cc_src
, s
->tmp4
);
6876 set_cc_op(s
, ((s
->cc_op
- CC_OP_MULB
) & 3) + CC_OP_SARB
);
6879 /* Otherwise, generate EFLAGS and replace the C bit. */
6880 gen_compute_eflags(s
);
6881 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, s
->tmp4
,
6886 case 0x1bc: /* bsf / tzcnt */
6887 case 0x1bd: /* bsr / lzcnt */
6889 modrm
= x86_ldub_code(env
, s
);
6890 reg
= ((modrm
>> 3) & 7) | rex_r
;
6891 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
6892 gen_extu(ot
, s
->T0
);
6894 /* Note that lzcnt and tzcnt are in different extensions. */
6895 if ((prefixes
& PREFIX_REPZ
)
6897 ? s
->cpuid_ext3_features
& CPUID_EXT3_ABM
6898 : s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)) {
6900 /* For lzcnt/tzcnt, C bit is defined related to the input. */
6901 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
6903 /* For lzcnt, reduce the target_ulong result by the
6904 number of zeros that we expect to find at the top. */
6905 tcg_gen_clzi_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
);
6906 tcg_gen_subi_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
- size
);
6908 /* For tzcnt, a zero input must return the operand size. */
6909 tcg_gen_ctzi_tl(s
->T0
, s
->T0
, size
);
6911 /* For lzcnt/tzcnt, Z bit is defined related to the result. */
6912 gen_op_update1_cc(s
);
6913 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
6915 /* For bsr/bsf, only the Z bit is defined and it is related
6916 to the input and not the result. */
6917 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
6918 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
6920 /* ??? The manual says that the output is undefined when the
6921 input is zero, but real hardware leaves it unchanged, and
6922 real programs appear to depend on that. Accomplish this
6923 by passing the output as the value to return upon zero. */
6925 /* For bsr, return the bit index of the first 1 bit,
6926 not the count of leading zeros. */
6927 tcg_gen_xori_tl(s
->T1
, cpu_regs
[reg
], TARGET_LONG_BITS
- 1);
6928 tcg_gen_clz_tl(s
->T0
, s
->T0
, s
->T1
);
6929 tcg_gen_xori_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
- 1);
6931 tcg_gen_ctz_tl(s
->T0
, s
->T0
, cpu_regs
[reg
]);
6934 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
6936 /************************/
6938 case 0x27: /* daa */
6941 gen_update_cc_op(s
);
6942 gen_helper_daa(cpu_env
);
6943 set_cc_op(s
, CC_OP_EFLAGS
);
6945 case 0x2f: /* das */
6948 gen_update_cc_op(s
);
6949 gen_helper_das(cpu_env
);
6950 set_cc_op(s
, CC_OP_EFLAGS
);
6952 case 0x37: /* aaa */
6955 gen_update_cc_op(s
);
6956 gen_helper_aaa(cpu_env
);
6957 set_cc_op(s
, CC_OP_EFLAGS
);
6959 case 0x3f: /* aas */
6962 gen_update_cc_op(s
);
6963 gen_helper_aas(cpu_env
);
6964 set_cc_op(s
, CC_OP_EFLAGS
);
6966 case 0xd4: /* aam */
6969 val
= x86_ldub_code(env
, s
);
6971 gen_exception(s
, EXCP00_DIVZ
, pc_start
- s
->cs_base
);
6973 gen_helper_aam(cpu_env
, tcg_const_i32(val
));
6974 set_cc_op(s
, CC_OP_LOGICB
);
6977 case 0xd5: /* aad */
6980 val
= x86_ldub_code(env
, s
);
6981 gen_helper_aad(cpu_env
, tcg_const_i32(val
));
6982 set_cc_op(s
, CC_OP_LOGICB
);
6984 /************************/
6986 case 0x90: /* nop */
6987 /* XXX: correct lock test for all insn */
6988 if (prefixes
& PREFIX_LOCK
) {
6991 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6993 goto do_xchg_reg_eax
;
6995 if (prefixes
& PREFIX_REPZ
) {
6996 gen_update_cc_op(s
);
6997 gen_jmp_im(s
, pc_start
- s
->cs_base
);
6998 gen_helper_pause(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
6999 s
->base
.is_jmp
= DISAS_NORETURN
;
7002 case 0x9b: /* fwait */
7003 if ((s
->flags
& (HF_MP_MASK
| HF_TS_MASK
)) ==
7004 (HF_MP_MASK
| HF_TS_MASK
)) {
7005 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7007 gen_helper_fwait(cpu_env
);
7010 case 0xcc: /* int3 */
7011 gen_interrupt(s
, EXCP03_INT3
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
7013 case 0xcd: /* int N */
7014 val
= x86_ldub_code(env
, s
);
7015 if (s
->vm86
&& s
->iopl
!= 3) {
7016 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7018 gen_interrupt(s
, val
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
7021 case 0xce: /* into */
7024 gen_update_cc_op(s
);
7025 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7026 gen_helper_into(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7029 case 0xf1: /* icebp (undocumented, exits to external debugger) */
7030 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_ICEBP
);
7032 gen_debug(s
, pc_start
- s
->cs_base
);
7035 tb_flush(CPU(x86_env_get_cpu(env
)));
7036 qemu_set_log(CPU_LOG_INT
| CPU_LOG_TB_IN_ASM
);
7040 case 0xfa: /* cli */
7042 if (s
->cpl
<= s
->iopl
) {
7043 gen_helper_cli(cpu_env
);
7045 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7049 gen_helper_cli(cpu_env
);
7051 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7055 case 0xfb: /* sti */
7056 if (s
->vm86
? s
->iopl
== 3 : s
->cpl
<= s
->iopl
) {
7057 gen_helper_sti(cpu_env
);
7058 /* interruptions are enabled only the first insn after sti */
7059 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
7060 gen_eob_inhibit_irq(s
, true);
7062 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7065 case 0x62: /* bound */
7069 modrm
= x86_ldub_code(env
, s
);
7070 reg
= (modrm
>> 3) & 7;
7071 mod
= (modrm
>> 6) & 3;
7074 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
7075 gen_lea_modrm(env
, s
, modrm
);
7076 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
7078 gen_helper_boundw(cpu_env
, s
->A0
, s
->tmp2_i32
);
7080 gen_helper_boundl(cpu_env
, s
->A0
, s
->tmp2_i32
);
7083 case 0x1c8 ... 0x1cf: /* bswap reg */
7084 reg
= (b
& 7) | REX_B(s
);
7085 #ifdef TARGET_X86_64
7086 if (dflag
== MO_64
) {
7087 gen_op_mov_v_reg(s
, MO_64
, s
->T0
, reg
);
7088 tcg_gen_bswap64_i64(s
->T0
, s
->T0
);
7089 gen_op_mov_reg_v(s
, MO_64
, reg
, s
->T0
);
7093 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, reg
);
7094 tcg_gen_ext32u_tl(s
->T0
, s
->T0
);
7095 tcg_gen_bswap32_tl(s
->T0
, s
->T0
);
7096 gen_op_mov_reg_v(s
, MO_32
, reg
, s
->T0
);
7099 case 0xd6: /* salc */
7102 gen_compute_eflags_c(s
, s
->T0
);
7103 tcg_gen_neg_tl(s
->T0
, s
->T0
);
7104 gen_op_mov_reg_v(s
, MO_8
, R_EAX
, s
->T0
);
7106 case 0xe0: /* loopnz */
7107 case 0xe1: /* loopz */
7108 case 0xe2: /* loop */
7109 case 0xe3: /* jecxz */
7111 TCGLabel
*l1
, *l2
, *l3
;
7113 tval
= (int8_t)insn_get(env
, s
, MO_8
);
7114 next_eip
= s
->pc
- s
->cs_base
;
7116 if (dflag
== MO_16
) {
7120 l1
= gen_new_label();
7121 l2
= gen_new_label();
7122 l3
= gen_new_label();
7125 case 0: /* loopnz */
7127 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
7128 gen_op_jz_ecx(s
, s
->aflag
, l3
);
7129 gen_jcc1(s
, (JCC_Z
<< 1) | (b
^ 1), l1
);
7132 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
7133 gen_op_jnz_ecx(s
, s
->aflag
, l1
);
7137 gen_op_jz_ecx(s
, s
->aflag
, l1
);
7142 gen_jmp_im(s
, next_eip
);
7146 gen_jmp_im(s
, tval
);
7151 case 0x130: /* wrmsr */
7152 case 0x132: /* rdmsr */
7154 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7156 gen_update_cc_op(s
);
7157 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7159 gen_helper_rdmsr(cpu_env
);
7161 gen_helper_wrmsr(cpu_env
);
7165 case 0x131: /* rdtsc */
7166 gen_update_cc_op(s
);
7167 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7168 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
7171 gen_helper_rdtsc(cpu_env
);
7172 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
7174 gen_jmp(s
, s
->pc
- s
->cs_base
);
7177 case 0x133: /* rdpmc */
7178 gen_update_cc_op(s
);
7179 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7180 gen_helper_rdpmc(cpu_env
);
7182 case 0x134: /* sysenter */
7183 /* For Intel SYSENTER is valid on 64-bit */
7184 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
7187 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7189 gen_helper_sysenter(cpu_env
);
7193 case 0x135: /* sysexit */
7194 /* For Intel SYSEXIT is valid on 64-bit */
7195 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
7198 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7200 gen_helper_sysexit(cpu_env
, tcg_const_i32(dflag
- 1));
7204 #ifdef TARGET_X86_64
7205 case 0x105: /* syscall */
7206 /* XXX: is it usable in real mode ? */
7207 gen_update_cc_op(s
);
7208 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7209 gen_helper_syscall(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7210 /* TF handling for the syscall insn is different. The TF bit is checked
7211 after the syscall insn completes. This allows #DB to not be
7212 generated after one has entered CPL0 if TF is set in FMASK. */
7213 gen_eob_worker(s
, false, true);
7215 case 0x107: /* sysret */
7217 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7219 gen_helper_sysret(cpu_env
, tcg_const_i32(dflag
- 1));
7220 /* condition codes are modified only in long mode */
7222 set_cc_op(s
, CC_OP_EFLAGS
);
7224 /* TF handling for the sysret insn is different. The TF bit is
7225 checked after the sysret insn completes. This allows #DB to be
7226 generated "as if" the syscall insn in userspace has just
7228 gen_eob_worker(s
, false, true);
7232 case 0x1a2: /* cpuid */
7233 gen_update_cc_op(s
);
7234 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7235 gen_helper_cpuid(cpu_env
);
7237 case 0xf4: /* hlt */
7239 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7241 gen_update_cc_op(s
);
7242 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7243 gen_helper_hlt(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7244 s
->base
.is_jmp
= DISAS_NORETURN
;
7248 modrm
= x86_ldub_code(env
, s
);
7249 mod
= (modrm
>> 6) & 3;
7250 op
= (modrm
>> 3) & 7;
7253 if (!s
->pe
|| s
->vm86
)
7255 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_READ
);
7256 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
7257 offsetof(CPUX86State
, ldt
.selector
));
7258 ot
= mod
== 3 ? dflag
: MO_16
;
7259 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7262 if (!s
->pe
|| s
->vm86
)
7265 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7267 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_WRITE
);
7268 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7269 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
7270 gen_helper_lldt(cpu_env
, s
->tmp2_i32
);
7274 if (!s
->pe
|| s
->vm86
)
7276 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_READ
);
7277 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
7278 offsetof(CPUX86State
, tr
.selector
));
7279 ot
= mod
== 3 ? dflag
: MO_16
;
7280 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7283 if (!s
->pe
|| s
->vm86
)
7286 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7288 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_WRITE
);
7289 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7290 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
7291 gen_helper_ltr(cpu_env
, s
->tmp2_i32
);
7296 if (!s
->pe
|| s
->vm86
)
7298 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7299 gen_update_cc_op(s
);
7301 gen_helper_verr(cpu_env
, s
->T0
);
7303 gen_helper_verw(cpu_env
, s
->T0
);
7305 set_cc_op(s
, CC_OP_EFLAGS
);
7313 modrm
= x86_ldub_code(env
, s
);
7315 CASE_MODRM_MEM_OP(0): /* sgdt */
7316 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_GDTR_READ
);
7317 gen_lea_modrm(env
, s
, modrm
);
7318 tcg_gen_ld32u_tl(s
->T0
,
7319 cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
7320 gen_op_st_v(s
, MO_16
, s
->T0
, s
->A0
);
7321 gen_add_A0_im(s
, 2);
7322 tcg_gen_ld_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, gdt
.base
));
7323 if (dflag
== MO_16
) {
7324 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
7326 gen_op_st_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
7329 case 0xc8: /* monitor */
7330 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || s
->cpl
!= 0) {
7333 gen_update_cc_op(s
);
7334 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7335 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EAX
]);
7336 gen_extu(s
->aflag
, s
->A0
);
7337 gen_add_A0_ds_seg(s
);
7338 gen_helper_monitor(cpu_env
, s
->A0
);
7341 case 0xc9: /* mwait */
7342 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || s
->cpl
!= 0) {
7345 gen_update_cc_op(s
);
7346 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7347 gen_helper_mwait(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7351 case 0xca: /* clac */
7352 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
7356 gen_helper_clac(cpu_env
);
7357 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
7361 case 0xcb: /* stac */
7362 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
7366 gen_helper_stac(cpu_env
);
7367 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
7371 CASE_MODRM_MEM_OP(1): /* sidt */
7372 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IDTR_READ
);
7373 gen_lea_modrm(env
, s
, modrm
);
7374 tcg_gen_ld32u_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, idt
.limit
));
7375 gen_op_st_v(s
, MO_16
, s
->T0
, s
->A0
);
7376 gen_add_A0_im(s
, 2);
7377 tcg_gen_ld_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, idt
.base
));
7378 if (dflag
== MO_16
) {
7379 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
7381 gen_op_st_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
7384 case 0xd0: /* xgetbv */
7385 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
7386 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
7387 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
7390 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
7391 gen_helper_xgetbv(s
->tmp1_i64
, cpu_env
, s
->tmp2_i32
);
7392 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], s
->tmp1_i64
);
7395 case 0xd1: /* xsetbv */
7396 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
7397 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
7398 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
7402 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7405 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
7407 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
7408 gen_helper_xsetbv(cpu_env
, s
->tmp2_i32
, s
->tmp1_i64
);
7409 /* End TB because translation flags may change. */
7410 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
7414 case 0xd8: /* VMRUN */
7415 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
) {
7419 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7422 gen_update_cc_op(s
);
7423 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7424 gen_helper_vmrun(cpu_env
, tcg_const_i32(s
->aflag
- 1),
7425 tcg_const_i32(s
->pc
- pc_start
));
7426 tcg_gen_exit_tb(NULL
, 0);
7427 s
->base
.is_jmp
= DISAS_NORETURN
;
7430 case 0xd9: /* VMMCALL */
7431 if (!(s
->flags
& HF_SVME_MASK
)) {
7434 gen_update_cc_op(s
);
7435 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7436 gen_helper_vmmcall(cpu_env
);
7439 case 0xda: /* VMLOAD */
7440 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
) {
7444 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7447 gen_update_cc_op(s
);
7448 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7449 gen_helper_vmload(cpu_env
, tcg_const_i32(s
->aflag
- 1));
7452 case 0xdb: /* VMSAVE */
7453 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
) {
7457 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7460 gen_update_cc_op(s
);
7461 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7462 gen_helper_vmsave(cpu_env
, tcg_const_i32(s
->aflag
- 1));
7465 case 0xdc: /* STGI */
7466 if ((!(s
->flags
& HF_SVME_MASK
)
7467 && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
7472 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7475 gen_update_cc_op(s
);
7476 gen_helper_stgi(cpu_env
);
7477 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
7481 case 0xdd: /* CLGI */
7482 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
) {
7486 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7489 gen_update_cc_op(s
);
7490 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7491 gen_helper_clgi(cpu_env
);
7494 case 0xde: /* SKINIT */
7495 if ((!(s
->flags
& HF_SVME_MASK
)
7496 && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
7500 gen_update_cc_op(s
);
7501 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7502 gen_helper_skinit(cpu_env
);
7505 case 0xdf: /* INVLPGA */
7506 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
) {
7510 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7513 gen_update_cc_op(s
);
7514 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7515 gen_helper_invlpga(cpu_env
, tcg_const_i32(s
->aflag
- 1));
7518 CASE_MODRM_MEM_OP(2): /* lgdt */
7520 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7523 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_GDTR_WRITE
);
7524 gen_lea_modrm(env
, s
, modrm
);
7525 gen_op_ld_v(s
, MO_16
, s
->T1
, s
->A0
);
7526 gen_add_A0_im(s
, 2);
7527 gen_op_ld_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
7528 if (dflag
== MO_16
) {
7529 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
7531 tcg_gen_st_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, gdt
.base
));
7532 tcg_gen_st32_tl(s
->T1
, cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
7535 CASE_MODRM_MEM_OP(3): /* lidt */
7537 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7540 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IDTR_WRITE
);
7541 gen_lea_modrm(env
, s
, modrm
);
7542 gen_op_ld_v(s
, MO_16
, s
->T1
, s
->A0
);
7543 gen_add_A0_im(s
, 2);
7544 gen_op_ld_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
7545 if (dflag
== MO_16
) {
7546 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
7548 tcg_gen_st_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, idt
.base
));
7549 tcg_gen_st32_tl(s
->T1
, cpu_env
, offsetof(CPUX86State
, idt
.limit
));
7552 CASE_MODRM_OP(4): /* smsw */
7553 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_CR0
);
7554 tcg_gen_ld_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, cr
[0]));
7556 mod
= (modrm
>> 6) & 3;
7557 ot
= (mod
!= 3 ? MO_16
: s
->dflag
);
7561 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7563 case 0xee: /* rdpkru */
7564 if (prefixes
& PREFIX_LOCK
) {
7567 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
7568 gen_helper_rdpkru(s
->tmp1_i64
, cpu_env
, s
->tmp2_i32
);
7569 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], s
->tmp1_i64
);
7571 case 0xef: /* wrpkru */
7572 if (prefixes
& PREFIX_LOCK
) {
7575 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
7577 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
7578 gen_helper_wrpkru(cpu_env
, s
->tmp2_i32
, s
->tmp1_i64
);
7580 CASE_MODRM_OP(6): /* lmsw */
7582 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7585 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7586 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7587 gen_helper_lmsw(cpu_env
, s
->T0
);
7588 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
7592 CASE_MODRM_MEM_OP(7): /* invlpg */
7594 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7597 gen_update_cc_op(s
);
7598 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7599 gen_lea_modrm(env
, s
, modrm
);
7600 gen_helper_invlpg(cpu_env
, s
->A0
);
7601 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
7605 case 0xf8: /* swapgs */
7606 #ifdef TARGET_X86_64
7609 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7611 tcg_gen_mov_tl(s
->T0
, cpu_seg_base
[R_GS
]);
7612 tcg_gen_ld_tl(cpu_seg_base
[R_GS
], cpu_env
,
7613 offsetof(CPUX86State
, kernelgsbase
));
7614 tcg_gen_st_tl(s
->T0
, cpu_env
,
7615 offsetof(CPUX86State
, kernelgsbase
));
7622 case 0xf9: /* rdtscp */
7623 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_RDTSCP
)) {
7626 gen_update_cc_op(s
);
7627 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7628 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
7631 gen_helper_rdtscp(cpu_env
);
7632 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
7634 gen_jmp(s
, s
->pc
- s
->cs_base
);
7643 case 0x108: /* invd */
7644 case 0x109: /* wbinvd */
7646 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7648 gen_svm_check_intercept(s
, pc_start
, (b
& 2) ? SVM_EXIT_INVD
: SVM_EXIT_WBINVD
);
7652 case 0x63: /* arpl or movslS (x86_64) */
7653 #ifdef TARGET_X86_64
7656 /* d_ot is the size of destination */
7659 modrm
= x86_ldub_code(env
, s
);
7660 reg
= ((modrm
>> 3) & 7) | rex_r
;
7661 mod
= (modrm
>> 6) & 3;
7662 rm
= (modrm
& 7) | REX_B(s
);
7665 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, rm
);
7667 if (d_ot
== MO_64
) {
7668 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
7670 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
7672 gen_lea_modrm(env
, s
, modrm
);
7673 gen_op_ld_v(s
, MO_32
| MO_SIGN
, s
->T0
, s
->A0
);
7674 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
7680 TCGv t0
, t1
, t2
, a0
;
7682 if (!s
->pe
|| s
->vm86
)
7684 t0
= tcg_temp_local_new();
7685 t1
= tcg_temp_local_new();
7686 t2
= tcg_temp_local_new();
7688 modrm
= x86_ldub_code(env
, s
);
7689 reg
= (modrm
>> 3) & 7;
7690 mod
= (modrm
>> 6) & 3;
7693 gen_lea_modrm(env
, s
, modrm
);
7694 gen_op_ld_v(s
, ot
, t0
, s
->A0
);
7695 a0
= tcg_temp_local_new();
7696 tcg_gen_mov_tl(a0
, s
->A0
);
7698 gen_op_mov_v_reg(s
, ot
, t0
, rm
);
7701 gen_op_mov_v_reg(s
, ot
, t1
, reg
);
7702 tcg_gen_andi_tl(s
->tmp0
, t0
, 3);
7703 tcg_gen_andi_tl(t1
, t1
, 3);
7704 tcg_gen_movi_tl(t2
, 0);
7705 label1
= gen_new_label();
7706 tcg_gen_brcond_tl(TCG_COND_GE
, s
->tmp0
, t1
, label1
);
7707 tcg_gen_andi_tl(t0
, t0
, ~3);
7708 tcg_gen_or_tl(t0
, t0
, t1
);
7709 tcg_gen_movi_tl(t2
, CC_Z
);
7710 gen_set_label(label1
);
7712 gen_op_st_v(s
, ot
, t0
, a0
);
7715 gen_op_mov_reg_v(s
, ot
, rm
, t0
);
7717 gen_compute_eflags(s
);
7718 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_Z
);
7719 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t2
);
7725 case 0x102: /* lar */
7726 case 0x103: /* lsl */
7730 if (!s
->pe
|| s
->vm86
)
7732 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
7733 modrm
= x86_ldub_code(env
, s
);
7734 reg
= ((modrm
>> 3) & 7) | rex_r
;
7735 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7736 t0
= tcg_temp_local_new();
7737 gen_update_cc_op(s
);
7739 gen_helper_lar(t0
, cpu_env
, s
->T0
);
7741 gen_helper_lsl(t0
, cpu_env
, s
->T0
);
7743 tcg_gen_andi_tl(s
->tmp0
, cpu_cc_src
, CC_Z
);
7744 label1
= gen_new_label();
7745 tcg_gen_brcondi_tl(TCG_COND_EQ
, s
->tmp0
, 0, label1
);
7746 gen_op_mov_reg_v(s
, ot
, reg
, t0
);
7747 gen_set_label(label1
);
7748 set_cc_op(s
, CC_OP_EFLAGS
);
7753 modrm
= x86_ldub_code(env
, s
);
7754 mod
= (modrm
>> 6) & 3;
7755 op
= (modrm
>> 3) & 7;
7757 case 0: /* prefetchnta */
7758 case 1: /* prefetchnt0 */
7759 case 2: /* prefetchnt0 */
7760 case 3: /* prefetchnt0 */
7763 gen_nop_modrm(env
, s
, modrm
);
7764 /* nothing more to do */
7766 default: /* nop (multi byte) */
7767 gen_nop_modrm(env
, s
, modrm
);
7772 modrm
= x86_ldub_code(env
, s
);
7773 if (s
->flags
& HF_MPX_EN_MASK
) {
7774 mod
= (modrm
>> 6) & 3;
7775 reg
= ((modrm
>> 3) & 7) | rex_r
;
7776 if (prefixes
& PREFIX_REPZ
) {
7779 || (prefixes
& PREFIX_LOCK
)
7780 || s
->aflag
== MO_16
) {
7783 gen_bndck(env
, s
, modrm
, TCG_COND_LTU
, cpu_bndl
[reg
]);
7784 } else if (prefixes
& PREFIX_REPNZ
) {
7787 || (prefixes
& PREFIX_LOCK
)
7788 || s
->aflag
== MO_16
) {
7791 TCGv_i64 notu
= tcg_temp_new_i64();
7792 tcg_gen_not_i64(notu
, cpu_bndu
[reg
]);
7793 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, notu
);
7794 tcg_temp_free_i64(notu
);
7795 } else if (prefixes
& PREFIX_DATA
) {
7796 /* bndmov -- from reg/mem */
7797 if (reg
>= 4 || s
->aflag
== MO_16
) {
7801 int reg2
= (modrm
& 7) | REX_B(s
);
7802 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
7805 if (s
->flags
& HF_MPX_IU_MASK
) {
7806 tcg_gen_mov_i64(cpu_bndl
[reg
], cpu_bndl
[reg2
]);
7807 tcg_gen_mov_i64(cpu_bndu
[reg
], cpu_bndu
[reg2
]);
7810 gen_lea_modrm(env
, s
, modrm
);
7812 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], s
->A0
,
7813 s
->mem_index
, MO_LEQ
);
7814 tcg_gen_addi_tl(s
->A0
, s
->A0
, 8);
7815 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], s
->A0
,
7816 s
->mem_index
, MO_LEQ
);
7818 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], s
->A0
,
7819 s
->mem_index
, MO_LEUL
);
7820 tcg_gen_addi_tl(s
->A0
, s
->A0
, 4);
7821 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], s
->A0
,
7822 s
->mem_index
, MO_LEUL
);
7824 /* bnd registers are now in-use */
7825 gen_set_hflag(s
, HF_MPX_IU_MASK
);
7827 } else if (mod
!= 3) {
7829 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
7831 || (prefixes
& PREFIX_LOCK
)
7832 || s
->aflag
== MO_16
7837 tcg_gen_addi_tl(s
->A0
, cpu_regs
[a
.base
], a
.disp
);
7839 tcg_gen_movi_tl(s
->A0
, 0);
7841 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
7843 tcg_gen_mov_tl(s
->T0
, cpu_regs
[a
.index
]);
7845 tcg_gen_movi_tl(s
->T0
, 0);
7848 gen_helper_bndldx64(cpu_bndl
[reg
], cpu_env
, s
->A0
, s
->T0
);
7849 tcg_gen_ld_i64(cpu_bndu
[reg
], cpu_env
,
7850 offsetof(CPUX86State
, mmx_t0
.MMX_Q(0)));
7852 gen_helper_bndldx32(cpu_bndu
[reg
], cpu_env
, s
->A0
, s
->T0
);
7853 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndu
[reg
]);
7854 tcg_gen_shri_i64(cpu_bndu
[reg
], cpu_bndu
[reg
], 32);
7856 gen_set_hflag(s
, HF_MPX_IU_MASK
);
7859 gen_nop_modrm(env
, s
, modrm
);
7862 modrm
= x86_ldub_code(env
, s
);
7863 if (s
->flags
& HF_MPX_EN_MASK
) {
7864 mod
= (modrm
>> 6) & 3;
7865 reg
= ((modrm
>> 3) & 7) | rex_r
;
7866 if (mod
!= 3 && (prefixes
& PREFIX_REPZ
)) {
7869 || (prefixes
& PREFIX_LOCK
)
7870 || s
->aflag
== MO_16
) {
7873 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
7875 tcg_gen_extu_tl_i64(cpu_bndl
[reg
], cpu_regs
[a
.base
]);
7877 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndl
[reg
]);
7879 } else if (a
.base
== -1) {
7880 /* no base register has lower bound of 0 */
7881 tcg_gen_movi_i64(cpu_bndl
[reg
], 0);
7883 /* rip-relative generates #ud */
7886 tcg_gen_not_tl(s
->A0
, gen_lea_modrm_1(s
, a
));
7888 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
7890 tcg_gen_extu_tl_i64(cpu_bndu
[reg
], s
->A0
);
7891 /* bnd registers are now in-use */
7892 gen_set_hflag(s
, HF_MPX_IU_MASK
);
7894 } else if (prefixes
& PREFIX_REPNZ
) {
7897 || (prefixes
& PREFIX_LOCK
)
7898 || s
->aflag
== MO_16
) {
7901 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, cpu_bndu
[reg
]);
7902 } else if (prefixes
& PREFIX_DATA
) {
7903 /* bndmov -- to reg/mem */
7904 if (reg
>= 4 || s
->aflag
== MO_16
) {
7908 int reg2
= (modrm
& 7) | REX_B(s
);
7909 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
7912 if (s
->flags
& HF_MPX_IU_MASK
) {
7913 tcg_gen_mov_i64(cpu_bndl
[reg2
], cpu_bndl
[reg
]);
7914 tcg_gen_mov_i64(cpu_bndu
[reg2
], cpu_bndu
[reg
]);
7917 gen_lea_modrm(env
, s
, modrm
);
7919 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], s
->A0
,
7920 s
->mem_index
, MO_LEQ
);
7921 tcg_gen_addi_tl(s
->A0
, s
->A0
, 8);
7922 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], s
->A0
,
7923 s
->mem_index
, MO_LEQ
);
7925 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], s
->A0
,
7926 s
->mem_index
, MO_LEUL
);
7927 tcg_gen_addi_tl(s
->A0
, s
->A0
, 4);
7928 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], s
->A0
,
7929 s
->mem_index
, MO_LEUL
);
7932 } else if (mod
!= 3) {
7934 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
7936 || (prefixes
& PREFIX_LOCK
)
7937 || s
->aflag
== MO_16
7942 tcg_gen_addi_tl(s
->A0
, cpu_regs
[a
.base
], a
.disp
);
7944 tcg_gen_movi_tl(s
->A0
, 0);
7946 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
7948 tcg_gen_mov_tl(s
->T0
, cpu_regs
[a
.index
]);
7950 tcg_gen_movi_tl(s
->T0
, 0);
7953 gen_helper_bndstx64(cpu_env
, s
->A0
, s
->T0
,
7954 cpu_bndl
[reg
], cpu_bndu
[reg
]);
7956 gen_helper_bndstx32(cpu_env
, s
->A0
, s
->T0
,
7957 cpu_bndl
[reg
], cpu_bndu
[reg
]);
7961 gen_nop_modrm(env
, s
, modrm
);
7963 case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
7964 modrm
= x86_ldub_code(env
, s
);
7965 gen_nop_modrm(env
, s
, modrm
);
7967 case 0x120: /* mov reg, crN */
7968 case 0x122: /* mov crN, reg */
7970 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7972 modrm
= x86_ldub_code(env
, s
);
7973 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7974 * AMD documentation (24594.pdf) and testing of
7975 * intel 386 and 486 processors all show that the mod bits
7976 * are assumed to be 1's, regardless of actual values.
7978 rm
= (modrm
& 7) | REX_B(s
);
7979 reg
= ((modrm
>> 3) & 7) | rex_r
;
7984 if ((prefixes
& PREFIX_LOCK
) && (reg
== 0) &&
7985 (s
->cpuid_ext3_features
& CPUID_EXT3_CR8LEG
)) {
7994 gen_update_cc_op(s
);
7995 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7997 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
8000 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
8001 gen_helper_write_crN(cpu_env
, tcg_const_i32(reg
),
8003 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
8006 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
8009 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
8012 gen_helper_read_crN(s
->T0
, cpu_env
, tcg_const_i32(reg
));
8013 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
8014 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
8024 case 0x121: /* mov reg, drN */
8025 case 0x123: /* mov drN, reg */
8027 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
8029 modrm
= x86_ldub_code(env
, s
);
8030 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
8031 * AMD documentation (24594.pdf) and testing of
8032 * intel 386 and 486 processors all show that the mod bits
8033 * are assumed to be 1's, regardless of actual values.
8035 rm
= (modrm
& 7) | REX_B(s
);
8036 reg
= ((modrm
>> 3) & 7) | rex_r
;
8045 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_DR0
+ reg
);
8046 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
8047 tcg_gen_movi_i32(s
->tmp2_i32
, reg
);
8048 gen_helper_set_dr(cpu_env
, s
->tmp2_i32
, s
->T0
);
8049 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
8052 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_DR0
+ reg
);
8053 tcg_gen_movi_i32(s
->tmp2_i32
, reg
);
8054 gen_helper_get_dr(s
->T0
, cpu_env
, s
->tmp2_i32
);
8055 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
8059 case 0x106: /* clts */
8061 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
8063 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
8064 gen_helper_clts(cpu_env
);
8065 /* abort block because static cpu state changed */
8066 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
8070 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
8071 case 0x1c3: /* MOVNTI reg, mem */
8072 if (!(s
->cpuid_features
& CPUID_SSE2
))
8074 ot
= mo_64_32(dflag
);
8075 modrm
= x86_ldub_code(env
, s
);
8076 mod
= (modrm
>> 6) & 3;
8079 reg
= ((modrm
>> 3) & 7) | rex_r
;
8080 /* generate a generic store */
8081 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
8084 modrm
= x86_ldub_code(env
, s
);
8086 CASE_MODRM_MEM_OP(0): /* fxsave */
8087 if (!(s
->cpuid_features
& CPUID_FXSR
)
8088 || (prefixes
& PREFIX_LOCK
)) {
8091 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
8092 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
8095 gen_lea_modrm(env
, s
, modrm
);
8096 gen_helper_fxsave(cpu_env
, s
->A0
);
8099 CASE_MODRM_MEM_OP(1): /* fxrstor */
8100 if (!(s
->cpuid_features
& CPUID_FXSR
)
8101 || (prefixes
& PREFIX_LOCK
)) {
8104 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
8105 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
8108 gen_lea_modrm(env
, s
, modrm
);
8109 gen_helper_fxrstor(cpu_env
, s
->A0
);
8112 CASE_MODRM_MEM_OP(2): /* ldmxcsr */
8113 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
)) {
8116 if (s
->flags
& HF_TS_MASK
) {
8117 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
8120 gen_lea_modrm(env
, s
, modrm
);
8121 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
, s
->mem_index
, MO_LEUL
);
8122 gen_helper_ldmxcsr(cpu_env
, s
->tmp2_i32
);
8125 CASE_MODRM_MEM_OP(3): /* stmxcsr */
8126 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
)) {
8129 if (s
->flags
& HF_TS_MASK
) {
8130 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
8133 gen_lea_modrm(env
, s
, modrm
);
8134 tcg_gen_ld32u_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, mxcsr
));
8135 gen_op_st_v(s
, MO_32
, s
->T0
, s
->A0
);
8138 CASE_MODRM_MEM_OP(4): /* xsave */
8139 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
8140 || (prefixes
& (PREFIX_LOCK
| PREFIX_DATA
8141 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
8144 gen_lea_modrm(env
, s
, modrm
);
8145 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
8147 gen_helper_xsave(cpu_env
, s
->A0
, s
->tmp1_i64
);
8150 CASE_MODRM_MEM_OP(5): /* xrstor */
8151 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
8152 || (prefixes
& (PREFIX_LOCK
| PREFIX_DATA
8153 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
8156 gen_lea_modrm(env
, s
, modrm
);
8157 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
8159 gen_helper_xrstor(cpu_env
, s
->A0
, s
->tmp1_i64
);
8160 /* XRSTOR is how MPX is enabled, which changes how
8161 we translate. Thus we need to end the TB. */
8162 gen_update_cc_op(s
);
8163 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
8167 CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
8168 if (prefixes
& PREFIX_LOCK
) {
8171 if (prefixes
& PREFIX_DATA
) {
8173 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_CLWB
)) {
8176 gen_nop_modrm(env
, s
, modrm
);
8179 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
8180 || (s
->cpuid_xsave_features
& CPUID_XSAVE_XSAVEOPT
) == 0
8181 || (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
))) {
8184 gen_lea_modrm(env
, s
, modrm
);
8185 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
8187 gen_helper_xsaveopt(cpu_env
, s
->A0
, s
->tmp1_i64
);
8191 CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
8192 if (prefixes
& PREFIX_LOCK
) {
8195 if (prefixes
& PREFIX_DATA
) {
8197 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_CLFLUSHOPT
)) {
8202 if ((s
->prefix
& (PREFIX_REPZ
| PREFIX_REPNZ
))
8203 || !(s
->cpuid_features
& CPUID_CLFLUSH
)) {
8207 gen_nop_modrm(env
, s
, modrm
);
8210 case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
8211 case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */
8212 case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
8213 case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */
8215 && (prefixes
& PREFIX_REPZ
)
8216 && !(prefixes
& PREFIX_LOCK
)
8217 && (s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_FSGSBASE
)) {
8218 TCGv base
, treg
, src
, dst
;
8220 /* Preserve hflags bits by testing CR4 at runtime. */
8221 tcg_gen_movi_i32(s
->tmp2_i32
, CR4_FSGSBASE_MASK
);
8222 gen_helper_cr4_testbit(cpu_env
, s
->tmp2_i32
);
8224 base
= cpu_seg_base
[modrm
& 8 ? R_GS
: R_FS
];
8225 treg
= cpu_regs
[(modrm
& 7) | REX_B(s
)];
8229 dst
= base
, src
= treg
;
8232 dst
= treg
, src
= base
;
8235 if (s
->dflag
== MO_32
) {
8236 tcg_gen_ext32u_tl(dst
, src
);
8238 tcg_gen_mov_tl(dst
, src
);
8244 case 0xf8: /* sfence / pcommit */
8245 if (prefixes
& PREFIX_DATA
) {
8247 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_PCOMMIT
)
8248 || (prefixes
& PREFIX_LOCK
)) {
8254 case 0xf9 ... 0xff: /* sfence */
8255 if (!(s
->cpuid_features
& CPUID_SSE
)
8256 || (prefixes
& PREFIX_LOCK
)) {
8259 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
8261 case 0xe8 ... 0xef: /* lfence */
8262 if (!(s
->cpuid_features
& CPUID_SSE
)
8263 || (prefixes
& PREFIX_LOCK
)) {
8266 tcg_gen_mb(TCG_MO_LD_LD
| TCG_BAR_SC
);
8268 case 0xf0 ... 0xf7: /* mfence */
8269 if (!(s
->cpuid_features
& CPUID_SSE2
)
8270 || (prefixes
& PREFIX_LOCK
)) {
8273 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
8281 case 0x10d: /* 3DNow! prefetch(w) */
8282 modrm
= x86_ldub_code(env
, s
);
8283 mod
= (modrm
>> 6) & 3;
8286 gen_nop_modrm(env
, s
, modrm
);
8288 case 0x1aa: /* rsm */
8289 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_RSM
);
8290 if (!(s
->flags
& HF_SMM_MASK
))
8292 gen_update_cc_op(s
);
8293 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
8294 gen_helper_rsm(cpu_env
);
8297 case 0x1b8: /* SSE4.2 popcnt */
8298 if ((prefixes
& (PREFIX_REPZ
| PREFIX_LOCK
| PREFIX_REPNZ
)) !=
8301 if (!(s
->cpuid_ext_features
& CPUID_EXT_POPCNT
))
8304 modrm
= x86_ldub_code(env
, s
);
8305 reg
= ((modrm
>> 3) & 7) | rex_r
;
8307 if (s
->prefix
& PREFIX_DATA
) {
8310 ot
= mo_64_32(dflag
);
8313 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
8314 gen_extu(ot
, s
->T0
);
8315 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
8316 tcg_gen_ctpop_tl(s
->T0
, s
->T0
);
8317 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
8319 set_cc_op(s
, CC_OP_POPCNT
);
8321 case 0x10e ... 0x10f:
8322 /* 3DNow! instructions, ignore prefixes */
8323 s
->prefix
&= ~(PREFIX_REPZ
| PREFIX_REPNZ
| PREFIX_DATA
);
8325 case 0x110 ... 0x117:
8326 case 0x128 ... 0x12f:
8327 case 0x138 ... 0x13a:
8328 case 0x150 ... 0x179:
8329 case 0x17c ... 0x17f:
8331 case 0x1c4 ... 0x1c6:
8332 case 0x1d0 ... 0x1fe:
8333 gen_sse(env
, s
, b
, pc_start
, rex_r
);
8340 gen_illegal_opcode(s
);
8343 gen_unknown_opcode(env
, s
);
8347 void tcg_x86_init(void)
8349 static const char reg_names
[CPU_NB_REGS
][4] = {
8350 #ifdef TARGET_X86_64
8378 static const char seg_base_names
[6][8] = {
8386 static const char bnd_regl_names
[4][8] = {
8387 "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
8389 static const char bnd_regu_names
[4][8] = {
8390 "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
8394 cpu_cc_op
= tcg_global_mem_new_i32(cpu_env
,
8395 offsetof(CPUX86State
, cc_op
), "cc_op");
8396 cpu_cc_dst
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_dst
),
8398 cpu_cc_src
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_src
),
8400 cpu_cc_src2
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_src2
),
8403 for (i
= 0; i
< CPU_NB_REGS
; ++i
) {
8404 cpu_regs
[i
] = tcg_global_mem_new(cpu_env
,
8405 offsetof(CPUX86State
, regs
[i
]),
8409 for (i
= 0; i
< 6; ++i
) {
8411 = tcg_global_mem_new(cpu_env
,
8412 offsetof(CPUX86State
, segs
[i
].base
),
8416 for (i
= 0; i
< 4; ++i
) {
8418 = tcg_global_mem_new_i64(cpu_env
,
8419 offsetof(CPUX86State
, bnd_regs
[i
].lb
),
8422 = tcg_global_mem_new_i64(cpu_env
,
8423 offsetof(CPUX86State
, bnd_regs
[i
].ub
),
8428 static void i386_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cpu
)
8430 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8431 CPUX86State
*env
= cpu
->env_ptr
;
8432 uint32_t flags
= dc
->base
.tb
->flags
;
8433 target_ulong cs_base
= dc
->base
.tb
->cs_base
;
8435 dc
->pe
= (flags
>> HF_PE_SHIFT
) & 1;
8436 dc
->code32
= (flags
>> HF_CS32_SHIFT
) & 1;
8437 dc
->ss32
= (flags
>> HF_SS32_SHIFT
) & 1;
8438 dc
->addseg
= (flags
>> HF_ADDSEG_SHIFT
) & 1;
8440 dc
->vm86
= (flags
>> VM_SHIFT
) & 1;
8441 dc
->cpl
= (flags
>> HF_CPL_SHIFT
) & 3;
8442 dc
->iopl
= (flags
>> IOPL_SHIFT
) & 3;
8443 dc
->tf
= (flags
>> TF_SHIFT
) & 1;
8444 dc
->cc_op
= CC_OP_DYNAMIC
;
8445 dc
->cc_op_dirty
= false;
8446 dc
->cs_base
= cs_base
;
8447 dc
->popl_esp_hack
= 0;
8448 /* select memory access functions */
8450 #ifdef CONFIG_SOFTMMU
8451 dc
->mem_index
= cpu_mmu_index(env
, false);
8453 dc
->cpuid_features
= env
->features
[FEAT_1_EDX
];
8454 dc
->cpuid_ext_features
= env
->features
[FEAT_1_ECX
];
8455 dc
->cpuid_ext2_features
= env
->features
[FEAT_8000_0001_EDX
];
8456 dc
->cpuid_ext3_features
= env
->features
[FEAT_8000_0001_ECX
];
8457 dc
->cpuid_7_0_ebx_features
= env
->features
[FEAT_7_0_EBX
];
8458 dc
->cpuid_xsave_features
= env
->features
[FEAT_XSAVE
];
8459 #ifdef TARGET_X86_64
8460 dc
->lma
= (flags
>> HF_LMA_SHIFT
) & 1;
8461 dc
->code64
= (flags
>> HF_CS64_SHIFT
) & 1;
8464 dc
->jmp_opt
= !(dc
->tf
|| dc
->base
.singlestep_enabled
||
8465 (flags
& HF_INHIBIT_IRQ_MASK
));
8466 /* Do not optimize repz jumps at all in icount mode, because
8467 rep movsS instructions are execured with different paths
8468 in !repz_opt and repz_opt modes. The first one was used
8469 always except single step mode. And this setting
8470 disables jumps optimization and control paths become
8471 equivalent in run and single step modes.
8472 Now there will be no jump optimization for repz in
8473 record/replay modes and there will always be an
8474 additional step for ecx=0 when icount is enabled.
8476 dc
->repz_opt
= !dc
->jmp_opt
&& !(tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
);
8478 /* check addseg logic */
8479 if (!dc
->addseg
&& (dc
->vm86
|| !dc
->pe
|| !dc
->code32
))
8480 printf("ERROR addseg\n");
8483 dc
->T0
= tcg_temp_new();
8484 dc
->T1
= tcg_temp_new();
8485 dc
->A0
= tcg_temp_new();
8487 dc
->tmp0
= tcg_temp_new();
8488 dc
->tmp1_i64
= tcg_temp_new_i64();
8489 dc
->tmp2_i32
= tcg_temp_new_i32();
8490 dc
->tmp3_i32
= tcg_temp_new_i32();
8491 dc
->tmp4
= tcg_temp_new();
8492 dc
->ptr0
= tcg_temp_new_ptr();
8493 dc
->ptr1
= tcg_temp_new_ptr();
8494 dc
->cc_srcT
= tcg_temp_local_new();
8497 static void i386_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
8501 static void i386_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
8503 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8505 tcg_gen_insn_start(dc
->base
.pc_next
, dc
->cc_op
);
8508 static bool i386_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cpu
,
8509 const CPUBreakpoint
*bp
)
8511 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8512 /* If RF is set, suppress an internally generated breakpoint. */
8513 int flags
= dc
->base
.tb
->flags
& HF_RF_MASK
? BP_GDB
: BP_ANY
;
8514 if (bp
->flags
& flags
) {
8515 gen_debug(dc
, dc
->base
.pc_next
- dc
->cs_base
);
8516 dc
->base
.is_jmp
= DISAS_NORETURN
;
8517 /* The address covered by the breakpoint must be included in
8518 [tb->pc, tb->pc + tb->size) in order to for it to be
8519 properly cleared -- thus we increment the PC here so that
8520 the generic logic setting tb->size later does the right thing. */
8521 dc
->base
.pc_next
+= 1;
8528 static void i386_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
8530 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8531 target_ulong pc_next
= disas_insn(dc
, cpu
);
8533 if (dc
->tf
|| (dc
->base
.tb
->flags
& HF_INHIBIT_IRQ_MASK
)) {
8534 /* if single step mode, we generate only one instruction and
8535 generate an exception */
8536 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8537 the flag and abort the translation to give the irqs a
8539 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
8540 } else if ((tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
)
8541 && ((dc
->base
.pc_next
& TARGET_PAGE_MASK
)
8542 != ((dc
->base
.pc_next
+ TARGET_MAX_INSN_SIZE
- 1)
8544 || (dc
->base
.pc_next
& ~TARGET_PAGE_MASK
) == 0)) {
8545 /* Do not cross the boundary of the pages in icount mode,
8546 it can cause an exception. Do it only when boundary is
8547 crossed by the first instruction in the block.
8548 If current instruction already crossed the bound - it's ok,
8549 because an exception hasn't stopped this code.
8551 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
8552 } else if ((pc_next
- dc
->base
.pc_first
) >= (TARGET_PAGE_SIZE
- 32)) {
8553 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
8556 dc
->base
.pc_next
= pc_next
;
8559 static void i386_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
8561 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8563 if (dc
->base
.is_jmp
== DISAS_TOO_MANY
) {
8564 gen_jmp_im(dc
, dc
->base
.pc_next
- dc
->cs_base
);
8569 static void i386_tr_disas_log(const DisasContextBase
*dcbase
,
8572 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8574 qemu_log("IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
8575 log_target_disas(cpu
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
8578 static const TranslatorOps i386_tr_ops
= {
8579 .init_disas_context
= i386_tr_init_disas_context
,
8580 .tb_start
= i386_tr_tb_start
,
8581 .insn_start
= i386_tr_insn_start
,
8582 .breakpoint_check
= i386_tr_breakpoint_check
,
8583 .translate_insn
= i386_tr_translate_insn
,
8584 .tb_stop
= i386_tr_tb_stop
,
8585 .disas_log
= i386_tr_disas_log
,
8588 /* generate intermediate code for basic block 'tb'. */
8589 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
)
8593 translator_loop(&i386_tr_ops
, &dc
.base
, cpu
, tb
);
8596 void restore_state_to_opc(CPUX86State
*env
, TranslationBlock
*tb
,
8599 int cc_op
= data
[1];
8600 env
->eip
= data
[0] - tb
->cs_base
;
8601 if (cc_op
!= CC_OP_DYNAMIC
) {