4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu/host-utils.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/translator.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
35 #define PREFIX_REPZ 0x01
36 #define PREFIX_REPNZ 0x02
37 #define PREFIX_LOCK 0x04
38 #define PREFIX_DATA 0x08
39 #define PREFIX_ADR 0x10
40 #define PREFIX_VEX 0x20
43 #define CODE64(s) ((s)->code64)
44 #define REX_X(s) ((s)->rex_x)
45 #define REX_B(s) ((s)->rex_b)
60 /* For a switch indexed by MODRM, match all memory operands for a given OP. */
61 #define CASE_MODRM_MEM_OP(OP) \
62 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
63 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
64 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
66 #define CASE_MODRM_OP(OP) \
67 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
68 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
69 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
70 case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
72 //#define MACRO_TEST 1
74 /* global register indexes */
75 static TCGv cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
;
76 static TCGv_i32 cpu_cc_op
;
77 static TCGv cpu_regs
[CPU_NB_REGS
];
78 static TCGv cpu_seg_base
[6];
79 static TCGv_i64 cpu_bndl
[4];
80 static TCGv_i64 cpu_bndu
[4];
82 #include "exec/gen-icount.h"
84 typedef struct DisasContext
{
85 DisasContextBase base
;
87 /* current insn context */
88 int override
; /* -1 if no override */
92 target_ulong pc_start
;
93 target_ulong pc
; /* pc = eip + cs_base */
94 /* current block context */
95 target_ulong cs_base
; /* base of CS segment */
96 int pe
; /* protected mode */
97 int code32
; /* 32 bit code segment */
99 int lma
; /* long mode active */
100 int code64
; /* 64 bit code segment */
103 int vex_l
; /* vex vector length */
104 int vex_v
; /* vex vvvv register, without 1's complement. */
105 int ss32
; /* 32 bit stack segment */
106 CCOp cc_op
; /* current CC operation */
111 int addseg
; /* non zero if either DS/ES/SS have a non zero base */
112 int f_st
; /* currently unused */
113 int vm86
; /* vm86 mode */
116 int tf
; /* TF cpu flag */
117 int jmp_opt
; /* use direct block chaining for direct jumps */
118 int repz_opt
; /* optimize jumps within repz instructions */
119 int mem_index
; /* select memory access functions */
120 uint64_t flags
; /* all execution flags */
121 int popl_esp_hack
; /* for correct popl with esp base handling */
122 int rip_offset
; /* only used in x86_64, but left for simplicity */
124 int cpuid_ext_features
;
125 int cpuid_ext2_features
;
126 int cpuid_ext3_features
;
127 int cpuid_7_0_ebx_features
;
128 int cpuid_xsave_features
;
130 /* TCG local temps */
136 /* TCG local register indexes (only used inside old micro ops) */
148 static void gen_eob(DisasContext
*s
);
149 static void gen_jr(DisasContext
*s
, TCGv dest
);
150 static void gen_jmp(DisasContext
*s
, target_ulong eip
);
151 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
);
152 static void gen_op(DisasContext
*s1
, int op
, TCGMemOp ot
, int d
);
154 /* i386 arith/logic operations */
174 OP_SHL1
, /* undocumented */
190 /* I386 int registers */
191 OR_EAX
, /* MUST be even numbered */
200 OR_TMP0
= 16, /* temporary operand register */
202 OR_A0
, /* temporary register used when doing address evaluation */
212 /* Bit set if the global variable is live after setting CC_OP to X. */
213 static const uint8_t cc_op_live
[CC_OP_NB
] = {
214 [CC_OP_DYNAMIC
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
215 [CC_OP_EFLAGS
] = USES_CC_SRC
,
216 [CC_OP_MULB
... CC_OP_MULQ
] = USES_CC_DST
| USES_CC_SRC
,
217 [CC_OP_ADDB
... CC_OP_ADDQ
] = USES_CC_DST
| USES_CC_SRC
,
218 [CC_OP_ADCB
... CC_OP_ADCQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
219 [CC_OP_SUBB
... CC_OP_SUBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRCT
,
220 [CC_OP_SBBB
... CC_OP_SBBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
221 [CC_OP_LOGICB
... CC_OP_LOGICQ
] = USES_CC_DST
,
222 [CC_OP_INCB
... CC_OP_INCQ
] = USES_CC_DST
| USES_CC_SRC
,
223 [CC_OP_DECB
... CC_OP_DECQ
] = USES_CC_DST
| USES_CC_SRC
,
224 [CC_OP_SHLB
... CC_OP_SHLQ
] = USES_CC_DST
| USES_CC_SRC
,
225 [CC_OP_SARB
... CC_OP_SARQ
] = USES_CC_DST
| USES_CC_SRC
,
226 [CC_OP_BMILGB
... CC_OP_BMILGQ
] = USES_CC_DST
| USES_CC_SRC
,
227 [CC_OP_ADCX
] = USES_CC_DST
| USES_CC_SRC
,
228 [CC_OP_ADOX
] = USES_CC_SRC
| USES_CC_SRC2
,
229 [CC_OP_ADCOX
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
231 [CC_OP_POPCNT
] = USES_CC_SRC
,
234 static void set_cc_op(DisasContext
*s
, CCOp op
)
238 if (s
->cc_op
== op
) {
242 /* Discard CC computation that will no longer be used. */
243 dead
= cc_op_live
[s
->cc_op
] & ~cc_op_live
[op
];
244 if (dead
& USES_CC_DST
) {
245 tcg_gen_discard_tl(cpu_cc_dst
);
247 if (dead
& USES_CC_SRC
) {
248 tcg_gen_discard_tl(cpu_cc_src
);
250 if (dead
& USES_CC_SRC2
) {
251 tcg_gen_discard_tl(cpu_cc_src2
);
253 if (dead
& USES_CC_SRCT
) {
254 tcg_gen_discard_tl(s
->cc_srcT
);
257 if (op
== CC_OP_DYNAMIC
) {
258 /* The DYNAMIC setting is translator only, and should never be
259 stored. Thus we always consider it clean. */
260 s
->cc_op_dirty
= false;
262 /* Discard any computed CC_OP value (see shifts). */
263 if (s
->cc_op
== CC_OP_DYNAMIC
) {
264 tcg_gen_discard_i32(cpu_cc_op
);
266 s
->cc_op_dirty
= true;
271 static void gen_update_cc_op(DisasContext
*s
)
273 if (s
->cc_op_dirty
) {
274 tcg_gen_movi_i32(cpu_cc_op
, s
->cc_op
);
275 s
->cc_op_dirty
= false;
281 #define NB_OP_SIZES 4
283 #else /* !TARGET_X86_64 */
285 #define NB_OP_SIZES 3
287 #endif /* !TARGET_X86_64 */
289 #if defined(HOST_WORDS_BIGENDIAN)
290 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
291 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
292 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
293 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
294 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
296 #define REG_B_OFFSET 0
297 #define REG_H_OFFSET 1
298 #define REG_W_OFFSET 0
299 #define REG_L_OFFSET 0
300 #define REG_LH_OFFSET 4
303 /* In instruction encodings for byte register accesses the
304 * register number usually indicates "low 8 bits of register N";
305 * however there are some special cases where N 4..7 indicates
306 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
307 * true for this special case, false otherwise.
309 static inline bool byte_reg_is_xH(DisasContext
*s
, int reg
)
315 if (reg
>= 8 || s
->x86_64_hregs
) {
322 /* Select the size of a push/pop operation. */
323 static inline TCGMemOp
mo_pushpop(DisasContext
*s
, TCGMemOp ot
)
326 return ot
== MO_16
? MO_16
: MO_64
;
332 /* Select the size of the stack pointer. */
333 static inline TCGMemOp
mo_stacksize(DisasContext
*s
)
335 return CODE64(s
) ? MO_64
: s
->ss32
? MO_32
: MO_16
;
338 /* Select only size 64 else 32. Used for SSE operand sizes. */
339 static inline TCGMemOp
mo_64_32(TCGMemOp ot
)
342 return ot
== MO_64
? MO_64
: MO_32
;
348 /* Select size 8 if lsb of B is clear, else OT. Used for decoding
349 byte vs word opcodes. */
350 static inline TCGMemOp
mo_b_d(int b
, TCGMemOp ot
)
352 return b
& 1 ? ot
: MO_8
;
355 /* Select size 8 if lsb of B is clear, else OT capped at 32.
356 Used for decoding operand size of port opcodes. */
357 static inline TCGMemOp
mo_b_d32(int b
, TCGMemOp ot
)
359 return b
& 1 ? (ot
== MO_16
? MO_16
: MO_32
) : MO_8
;
362 static void gen_op_mov_reg_v(DisasContext
*s
, TCGMemOp ot
, int reg
, TCGv t0
)
366 if (!byte_reg_is_xH(s
, reg
)) {
367 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 8);
369 tcg_gen_deposit_tl(cpu_regs
[reg
- 4], cpu_regs
[reg
- 4], t0
, 8, 8);
373 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 16);
376 /* For x86_64, this sets the higher half of register to zero.
377 For i386, this is equivalent to a mov. */
378 tcg_gen_ext32u_tl(cpu_regs
[reg
], t0
);
382 tcg_gen_mov_tl(cpu_regs
[reg
], t0
);
391 void gen_op_mov_v_reg(DisasContext
*s
, TCGMemOp ot
, TCGv t0
, int reg
)
393 if (ot
== MO_8
&& byte_reg_is_xH(s
, reg
)) {
394 tcg_gen_extract_tl(t0
, cpu_regs
[reg
- 4], 8, 8);
396 tcg_gen_mov_tl(t0
, cpu_regs
[reg
]);
400 static void gen_add_A0_im(DisasContext
*s
, int val
)
402 tcg_gen_addi_tl(s
->A0
, s
->A0
, val
);
404 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
408 static inline void gen_op_jmp_v(TCGv dest
)
410 tcg_gen_st_tl(dest
, cpu_env
, offsetof(CPUX86State
, eip
));
414 void gen_op_add_reg_im(DisasContext
*s
, TCGMemOp size
, int reg
, int32_t val
)
416 tcg_gen_addi_tl(s
->tmp0
, cpu_regs
[reg
], val
);
417 gen_op_mov_reg_v(s
, size
, reg
, s
->tmp0
);
420 static inline void gen_op_add_reg_T0(DisasContext
*s
, TCGMemOp size
, int reg
)
422 tcg_gen_add_tl(s
->tmp0
, cpu_regs
[reg
], s
->T0
);
423 gen_op_mov_reg_v(s
, size
, reg
, s
->tmp0
);
426 static inline void gen_op_ld_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
428 tcg_gen_qemu_ld_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
431 static inline void gen_op_st_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
433 tcg_gen_qemu_st_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
436 static inline void gen_op_st_rm_T0_A0(DisasContext
*s
, int idx
, int d
)
439 gen_op_st_v(s
, idx
, s
->T0
, s
->A0
);
441 gen_op_mov_reg_v(s
, idx
, d
, s
->T0
);
445 static inline void gen_jmp_im(DisasContext
*s
, target_ulong pc
)
447 tcg_gen_movi_tl(s
->tmp0
, pc
);
448 gen_op_jmp_v(s
->tmp0
);
451 /* Compute SEG:REG into A0. SEG is selected from the override segment
452 (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
453 indicate no override. */
454 static void gen_lea_v_seg(DisasContext
*s
, TCGMemOp aflag
, TCGv a0
,
455 int def_seg
, int ovr_seg
)
461 tcg_gen_mov_tl(s
->A0
, a0
);
468 if (ovr_seg
< 0 && s
->addseg
) {
472 tcg_gen_ext32u_tl(s
->A0
, a0
);
478 tcg_gen_ext16u_tl(s
->A0
, a0
);
493 TCGv seg
= cpu_seg_base
[ovr_seg
];
495 if (aflag
== MO_64
) {
496 tcg_gen_add_tl(s
->A0
, a0
, seg
);
497 } else if (CODE64(s
)) {
498 tcg_gen_ext32u_tl(s
->A0
, a0
);
499 tcg_gen_add_tl(s
->A0
, s
->A0
, seg
);
501 tcg_gen_add_tl(s
->A0
, a0
, seg
);
502 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
507 static inline void gen_string_movl_A0_ESI(DisasContext
*s
)
509 gen_lea_v_seg(s
, s
->aflag
, cpu_regs
[R_ESI
], R_DS
, s
->override
);
512 static inline void gen_string_movl_A0_EDI(DisasContext
*s
)
514 gen_lea_v_seg(s
, s
->aflag
, cpu_regs
[R_EDI
], R_ES
, -1);
517 static inline void gen_op_movl_T0_Dshift(DisasContext
*s
, TCGMemOp ot
)
519 tcg_gen_ld32s_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, df
));
520 tcg_gen_shli_tl(s
->T0
, s
->T0
, ot
);
523 static TCGv
gen_ext_tl(TCGv dst
, TCGv src
, TCGMemOp size
, bool sign
)
528 tcg_gen_ext8s_tl(dst
, src
);
530 tcg_gen_ext8u_tl(dst
, src
);
535 tcg_gen_ext16s_tl(dst
, src
);
537 tcg_gen_ext16u_tl(dst
, src
);
543 tcg_gen_ext32s_tl(dst
, src
);
545 tcg_gen_ext32u_tl(dst
, src
);
554 static void gen_extu(TCGMemOp ot
, TCGv reg
)
556 gen_ext_tl(reg
, reg
, ot
, false);
559 static void gen_exts(TCGMemOp ot
, TCGv reg
)
561 gen_ext_tl(reg
, reg
, ot
, true);
565 void gen_op_jnz_ecx(DisasContext
*s
, TCGMemOp size
, TCGLabel
*label1
)
567 tcg_gen_mov_tl(s
->tmp0
, cpu_regs
[R_ECX
]);
568 gen_extu(size
, s
->tmp0
);
569 tcg_gen_brcondi_tl(TCG_COND_NE
, s
->tmp0
, 0, label1
);
573 void gen_op_jz_ecx(DisasContext
*s
, TCGMemOp size
, TCGLabel
*label1
)
575 tcg_gen_mov_tl(s
->tmp0
, cpu_regs
[R_ECX
]);
576 gen_extu(size
, s
->tmp0
);
577 tcg_gen_brcondi_tl(TCG_COND_EQ
, s
->tmp0
, 0, label1
);
580 static void gen_helper_in_func(TCGMemOp ot
, TCGv v
, TCGv_i32 n
)
584 gen_helper_inb(v
, cpu_env
, n
);
587 gen_helper_inw(v
, cpu_env
, n
);
590 gen_helper_inl(v
, cpu_env
, n
);
597 static void gen_helper_out_func(TCGMemOp ot
, TCGv_i32 v
, TCGv_i32 n
)
601 gen_helper_outb(cpu_env
, v
, n
);
604 gen_helper_outw(cpu_env
, v
, n
);
607 gen_helper_outl(cpu_env
, v
, n
);
614 static void gen_check_io(DisasContext
*s
, TCGMemOp ot
, target_ulong cur_eip
,
617 target_ulong next_eip
;
619 if (s
->pe
&& (s
->cpl
> s
->iopl
|| s
->vm86
)) {
620 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
623 gen_helper_check_iob(cpu_env
, s
->tmp2_i32
);
626 gen_helper_check_iow(cpu_env
, s
->tmp2_i32
);
629 gen_helper_check_iol(cpu_env
, s
->tmp2_i32
);
635 if(s
->flags
& HF_GUEST_MASK
) {
637 gen_jmp_im(s
, cur_eip
);
638 svm_flags
|= (1 << (4 + ot
));
639 next_eip
= s
->pc
- s
->cs_base
;
640 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
641 gen_helper_svm_check_io(cpu_env
, s
->tmp2_i32
,
642 tcg_const_i32(svm_flags
),
643 tcg_const_i32(next_eip
- cur_eip
));
647 static inline void gen_movs(DisasContext
*s
, TCGMemOp ot
)
649 gen_string_movl_A0_ESI(s
);
650 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
651 gen_string_movl_A0_EDI(s
);
652 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
653 gen_op_movl_T0_Dshift(s
, ot
);
654 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
655 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
658 static void gen_op_update1_cc(DisasContext
*s
)
660 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
663 static void gen_op_update2_cc(DisasContext
*s
)
665 tcg_gen_mov_tl(cpu_cc_src
, s
->T1
);
666 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
669 static void gen_op_update3_cc(DisasContext
*s
, TCGv reg
)
671 tcg_gen_mov_tl(cpu_cc_src2
, reg
);
672 tcg_gen_mov_tl(cpu_cc_src
, s
->T1
);
673 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
676 static inline void gen_op_testl_T0_T1_cc(DisasContext
*s
)
678 tcg_gen_and_tl(cpu_cc_dst
, s
->T0
, s
->T1
);
681 static void gen_op_update_neg_cc(DisasContext
*s
)
683 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
684 tcg_gen_neg_tl(cpu_cc_src
, s
->T0
);
685 tcg_gen_movi_tl(s
->cc_srcT
, 0);
688 /* compute all eflags to cc_src */
689 static void gen_compute_eflags(DisasContext
*s
)
691 TCGv zero
, dst
, src1
, src2
;
694 if (s
->cc_op
== CC_OP_EFLAGS
) {
697 if (s
->cc_op
== CC_OP_CLR
) {
698 tcg_gen_movi_tl(cpu_cc_src
, CC_Z
| CC_P
);
699 set_cc_op(s
, CC_OP_EFLAGS
);
708 /* Take care to not read values that are not live. */
709 live
= cc_op_live
[s
->cc_op
] & ~USES_CC_SRCT
;
710 dead
= live
^ (USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
);
712 zero
= tcg_const_tl(0);
713 if (dead
& USES_CC_DST
) {
716 if (dead
& USES_CC_SRC
) {
719 if (dead
& USES_CC_SRC2
) {
725 gen_helper_cc_compute_all(cpu_cc_src
, dst
, src1
, src2
, cpu_cc_op
);
726 set_cc_op(s
, CC_OP_EFLAGS
);
733 typedef struct CCPrepare
{
743 /* compute eflags.C to reg */
744 static CCPrepare
gen_prepare_eflags_c(DisasContext
*s
, TCGv reg
)
750 case CC_OP_SUBB
... CC_OP_SUBQ
:
751 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
752 size
= s
->cc_op
- CC_OP_SUBB
;
753 t1
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, false);
754 /* If no temporary was used, be careful not to alias t1 and t0. */
755 t0
= t1
== cpu_cc_src
? s
->tmp0
: reg
;
756 tcg_gen_mov_tl(t0
, s
->cc_srcT
);
760 case CC_OP_ADDB
... CC_OP_ADDQ
:
761 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
762 size
= s
->cc_op
- CC_OP_ADDB
;
763 t1
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, false);
764 t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
766 return (CCPrepare
) { .cond
= TCG_COND_LTU
, .reg
= t0
,
767 .reg2
= t1
, .mask
= -1, .use_reg2
= true };
769 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
772 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
774 case CC_OP_INCB
... CC_OP_INCQ
:
775 case CC_OP_DECB
... CC_OP_DECQ
:
776 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
777 .mask
= -1, .no_setcond
= true };
779 case CC_OP_SHLB
... CC_OP_SHLQ
:
780 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
781 size
= s
->cc_op
- CC_OP_SHLB
;
782 shift
= (8 << size
) - 1;
783 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
784 .mask
= (target_ulong
)1 << shift
};
786 case CC_OP_MULB
... CC_OP_MULQ
:
787 return (CCPrepare
) { .cond
= TCG_COND_NE
,
788 .reg
= cpu_cc_src
, .mask
= -1 };
790 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
791 size
= s
->cc_op
- CC_OP_BMILGB
;
792 t0
= gen_ext_tl(reg
, cpu_cc_src
, size
, false);
793 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
797 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_dst
,
798 .mask
= -1, .no_setcond
= true };
801 case CC_OP_SARB
... CC_OP_SARQ
:
803 return (CCPrepare
) { .cond
= TCG_COND_NE
,
804 .reg
= cpu_cc_src
, .mask
= CC_C
};
807 /* The need to compute only C from CC_OP_DYNAMIC is important
808 in efficiently implementing e.g. INC at the start of a TB. */
810 gen_helper_cc_compute_c(reg
, cpu_cc_dst
, cpu_cc_src
,
811 cpu_cc_src2
, cpu_cc_op
);
812 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
813 .mask
= -1, .no_setcond
= true };
817 /* compute eflags.P to reg */
818 static CCPrepare
gen_prepare_eflags_p(DisasContext
*s
, TCGv reg
)
820 gen_compute_eflags(s
);
821 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
825 /* compute eflags.S to reg */
826 static CCPrepare
gen_prepare_eflags_s(DisasContext
*s
, TCGv reg
)
830 gen_compute_eflags(s
);
836 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
840 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
843 TCGMemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
844 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, true);
845 return (CCPrepare
) { .cond
= TCG_COND_LT
, .reg
= t0
, .mask
= -1 };
850 /* compute eflags.O to reg */
851 static CCPrepare
gen_prepare_eflags_o(DisasContext
*s
, TCGv reg
)
856 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src2
,
857 .mask
= -1, .no_setcond
= true };
860 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
862 gen_compute_eflags(s
);
863 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
868 /* compute eflags.Z to reg */
869 static CCPrepare
gen_prepare_eflags_z(DisasContext
*s
, TCGv reg
)
873 gen_compute_eflags(s
);
879 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
882 return (CCPrepare
) { .cond
= TCG_COND_ALWAYS
, .mask
= -1 };
884 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= cpu_cc_src
,
888 TCGMemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
889 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
890 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
895 /* perform a conditional store into register 'reg' according to jump opcode
896 value 'b'. In the fast case, T0 is guaranted not to be used. */
897 static CCPrepare
gen_prepare_cc(DisasContext
*s
, int b
, TCGv reg
)
899 int inv
, jcc_op
, cond
;
905 jcc_op
= (b
>> 1) & 7;
908 case CC_OP_SUBB
... CC_OP_SUBQ
:
909 /* We optimize relational operators for the cmp/jcc case. */
910 size
= s
->cc_op
- CC_OP_SUBB
;
913 tcg_gen_mov_tl(s
->tmp4
, s
->cc_srcT
);
914 gen_extu(size
, s
->tmp4
);
915 t0
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, false);
916 cc
= (CCPrepare
) { .cond
= TCG_COND_LEU
, .reg
= s
->tmp4
,
917 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
926 tcg_gen_mov_tl(s
->tmp4
, s
->cc_srcT
);
927 gen_exts(size
, s
->tmp4
);
928 t0
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, true);
929 cc
= (CCPrepare
) { .cond
= cond
, .reg
= s
->tmp4
,
930 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
940 /* This actually generates good code for JC, JZ and JS. */
943 cc
= gen_prepare_eflags_o(s
, reg
);
946 cc
= gen_prepare_eflags_c(s
, reg
);
949 cc
= gen_prepare_eflags_z(s
, reg
);
952 gen_compute_eflags(s
);
953 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
954 .mask
= CC_Z
| CC_C
};
957 cc
= gen_prepare_eflags_s(s
, reg
);
960 cc
= gen_prepare_eflags_p(s
, reg
);
963 gen_compute_eflags(s
);
964 if (reg
== cpu_cc_src
) {
967 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
968 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
969 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
974 gen_compute_eflags(s
);
975 if (reg
== cpu_cc_src
) {
978 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
979 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
980 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
981 .mask
= CC_S
| CC_Z
};
988 cc
.cond
= tcg_invert_cond(cc
.cond
);
993 static void gen_setcc1(DisasContext
*s
, int b
, TCGv reg
)
995 CCPrepare cc
= gen_prepare_cc(s
, b
, reg
);
998 if (cc
.cond
== TCG_COND_EQ
) {
999 tcg_gen_xori_tl(reg
, cc
.reg
, 1);
1001 tcg_gen_mov_tl(reg
, cc
.reg
);
1006 if (cc
.cond
== TCG_COND_NE
&& !cc
.use_reg2
&& cc
.imm
== 0 &&
1007 cc
.mask
!= 0 && (cc
.mask
& (cc
.mask
- 1)) == 0) {
1008 tcg_gen_shri_tl(reg
, cc
.reg
, ctztl(cc
.mask
));
1009 tcg_gen_andi_tl(reg
, reg
, 1);
1012 if (cc
.mask
!= -1) {
1013 tcg_gen_andi_tl(reg
, cc
.reg
, cc
.mask
);
1017 tcg_gen_setcond_tl(cc
.cond
, reg
, cc
.reg
, cc
.reg2
);
1019 tcg_gen_setcondi_tl(cc
.cond
, reg
, cc
.reg
, cc
.imm
);
1023 static inline void gen_compute_eflags_c(DisasContext
*s
, TCGv reg
)
1025 gen_setcc1(s
, JCC_B
<< 1, reg
);
1028 /* generate a conditional jump to label 'l1' according to jump opcode
1029 value 'b'. In the fast case, T0 is guaranted not to be used. */
1030 static inline void gen_jcc1_noeob(DisasContext
*s
, int b
, TCGLabel
*l1
)
1032 CCPrepare cc
= gen_prepare_cc(s
, b
, s
->T0
);
1034 if (cc
.mask
!= -1) {
1035 tcg_gen_andi_tl(s
->T0
, cc
.reg
, cc
.mask
);
1039 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1041 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1045 /* Generate a conditional jump to label 'l1' according to jump opcode
1046 value 'b'. In the fast case, T0 is guaranted not to be used.
1047 A translation block must end soon. */
1048 static inline void gen_jcc1(DisasContext
*s
, int b
, TCGLabel
*l1
)
1050 CCPrepare cc
= gen_prepare_cc(s
, b
, s
->T0
);
1052 gen_update_cc_op(s
);
1053 if (cc
.mask
!= -1) {
1054 tcg_gen_andi_tl(s
->T0
, cc
.reg
, cc
.mask
);
1057 set_cc_op(s
, CC_OP_DYNAMIC
);
1059 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1061 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1065 /* XXX: does not work with gdbstub "ice" single step - not a
1067 static TCGLabel
*gen_jz_ecx_string(DisasContext
*s
, target_ulong next_eip
)
1069 TCGLabel
*l1
= gen_new_label();
1070 TCGLabel
*l2
= gen_new_label();
1071 gen_op_jnz_ecx(s
, s
->aflag
, l1
);
1073 gen_jmp_tb(s
, next_eip
, 1);
1078 static inline void gen_stos(DisasContext
*s
, TCGMemOp ot
)
1080 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
1081 gen_string_movl_A0_EDI(s
);
1082 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1083 gen_op_movl_T0_Dshift(s
, ot
);
1084 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1087 static inline void gen_lods(DisasContext
*s
, TCGMemOp ot
)
1089 gen_string_movl_A0_ESI(s
);
1090 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1091 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T0
);
1092 gen_op_movl_T0_Dshift(s
, ot
);
1093 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
1096 static inline void gen_scas(DisasContext
*s
, TCGMemOp ot
)
1098 gen_string_movl_A0_EDI(s
);
1099 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
1100 gen_op(s
, OP_CMPL
, ot
, R_EAX
);
1101 gen_op_movl_T0_Dshift(s
, ot
);
1102 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1105 static inline void gen_cmps(DisasContext
*s
, TCGMemOp ot
)
1107 gen_string_movl_A0_EDI(s
);
1108 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
1109 gen_string_movl_A0_ESI(s
);
1110 gen_op(s
, OP_CMPL
, ot
, OR_TMP0
);
1111 gen_op_movl_T0_Dshift(s
, ot
);
1112 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
1113 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1116 static void gen_bpt_io(DisasContext
*s
, TCGv_i32 t_port
, int ot
)
1118 if (s
->flags
& HF_IOBPT_MASK
) {
1119 TCGv_i32 t_size
= tcg_const_i32(1 << ot
);
1120 TCGv t_next
= tcg_const_tl(s
->pc
- s
->cs_base
);
1122 gen_helper_bpt_io(cpu_env
, t_port
, t_size
, t_next
);
1123 tcg_temp_free_i32(t_size
);
1124 tcg_temp_free(t_next
);
1129 static inline void gen_ins(DisasContext
*s
, TCGMemOp ot
)
1131 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
1134 gen_string_movl_A0_EDI(s
);
1135 /* Note: we must do this dummy write first to be restartable in
1136 case of page fault. */
1137 tcg_gen_movi_tl(s
->T0
, 0);
1138 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1139 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
1140 tcg_gen_andi_i32(s
->tmp2_i32
, s
->tmp2_i32
, 0xffff);
1141 gen_helper_in_func(ot
, s
->T0
, s
->tmp2_i32
);
1142 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1143 gen_op_movl_T0_Dshift(s
, ot
);
1144 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1145 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
1146 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
1151 static inline void gen_outs(DisasContext
*s
, TCGMemOp ot
)
1153 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
1156 gen_string_movl_A0_ESI(s
);
1157 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1159 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
1160 tcg_gen_andi_i32(s
->tmp2_i32
, s
->tmp2_i32
, 0xffff);
1161 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T0
);
1162 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
1163 gen_op_movl_T0_Dshift(s
, ot
);
1164 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
1165 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
1166 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
1171 /* same method as Valgrind : we generate jumps to current or next
1173 #define GEN_REPZ(op) \
1174 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1175 target_ulong cur_eip, target_ulong next_eip) \
1178 gen_update_cc_op(s); \
1179 l2 = gen_jz_ecx_string(s, next_eip); \
1180 gen_ ## op(s, ot); \
1181 gen_op_add_reg_im(s, s->aflag, R_ECX, -1); \
1182 /* a loop would cause two single step exceptions if ECX = 1 \
1183 before rep string_insn */ \
1185 gen_op_jz_ecx(s, s->aflag, l2); \
1186 gen_jmp(s, cur_eip); \
1189 #define GEN_REPZ2(op) \
1190 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1191 target_ulong cur_eip, \
1192 target_ulong next_eip, \
1196 gen_update_cc_op(s); \
1197 l2 = gen_jz_ecx_string(s, next_eip); \
1198 gen_ ## op(s, ot); \
1199 gen_op_add_reg_im(s, s->aflag, R_ECX, -1); \
1200 gen_update_cc_op(s); \
1201 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1203 gen_op_jz_ecx(s, s->aflag, l2); \
1204 gen_jmp(s, cur_eip); \
1215 static void gen_helper_fp_arith_ST0_FT0(int op
)
1219 gen_helper_fadd_ST0_FT0(cpu_env
);
1222 gen_helper_fmul_ST0_FT0(cpu_env
);
1225 gen_helper_fcom_ST0_FT0(cpu_env
);
1228 gen_helper_fcom_ST0_FT0(cpu_env
);
1231 gen_helper_fsub_ST0_FT0(cpu_env
);
1234 gen_helper_fsubr_ST0_FT0(cpu_env
);
1237 gen_helper_fdiv_ST0_FT0(cpu_env
);
1240 gen_helper_fdivr_ST0_FT0(cpu_env
);
1245 /* NOTE the exception in "r" op ordering */
1246 static void gen_helper_fp_arith_STN_ST0(int op
, int opreg
)
1248 TCGv_i32 tmp
= tcg_const_i32(opreg
);
1251 gen_helper_fadd_STN_ST0(cpu_env
, tmp
);
1254 gen_helper_fmul_STN_ST0(cpu_env
, tmp
);
1257 gen_helper_fsubr_STN_ST0(cpu_env
, tmp
);
1260 gen_helper_fsub_STN_ST0(cpu_env
, tmp
);
1263 gen_helper_fdivr_STN_ST0(cpu_env
, tmp
);
1266 gen_helper_fdiv_STN_ST0(cpu_env
, tmp
);
1271 static void gen_exception(DisasContext
*s
, int trapno
, target_ulong cur_eip
)
1273 gen_update_cc_op(s
);
1274 gen_jmp_im(s
, cur_eip
);
1275 gen_helper_raise_exception(cpu_env
, tcg_const_i32(trapno
));
1276 s
->base
.is_jmp
= DISAS_NORETURN
;
1279 /* Generate #UD for the current instruction. The assumption here is that
1280 the instruction is known, but it isn't allowed in the current cpu mode. */
1281 static void gen_illegal_opcode(DisasContext
*s
)
1283 gen_exception(s
, EXCP06_ILLOP
, s
->pc_start
- s
->cs_base
);
1286 /* if d == OR_TMP0, it means memory operand (address in A0) */
1287 static void gen_op(DisasContext
*s1
, int op
, TCGMemOp ot
, int d
)
1290 if (s1
->prefix
& PREFIX_LOCK
) {
1291 /* Lock prefix when destination is not memory. */
1292 gen_illegal_opcode(s1
);
1295 gen_op_mov_v_reg(s1
, ot
, s1
->T0
, d
);
1296 } else if (!(s1
->prefix
& PREFIX_LOCK
)) {
1297 gen_op_ld_v(s1
, ot
, s1
->T0
, s1
->A0
);
1301 gen_compute_eflags_c(s1
, s1
->tmp4
);
1302 if (s1
->prefix
& PREFIX_LOCK
) {
1303 tcg_gen_add_tl(s1
->T0
, s1
->tmp4
, s1
->T1
);
1304 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1305 s1
->mem_index
, ot
| MO_LE
);
1307 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1308 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->tmp4
);
1309 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1311 gen_op_update3_cc(s1
, s1
->tmp4
);
1312 set_cc_op(s1
, CC_OP_ADCB
+ ot
);
1315 gen_compute_eflags_c(s1
, s1
->tmp4
);
1316 if (s1
->prefix
& PREFIX_LOCK
) {
1317 tcg_gen_add_tl(s1
->T0
, s1
->T1
, s1
->tmp4
);
1318 tcg_gen_neg_tl(s1
->T0
, s1
->T0
);
1319 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1320 s1
->mem_index
, ot
| MO_LE
);
1322 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1323 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->tmp4
);
1324 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1326 gen_op_update3_cc(s1
, s1
->tmp4
);
1327 set_cc_op(s1
, CC_OP_SBBB
+ ot
);
1330 if (s1
->prefix
& PREFIX_LOCK
) {
1331 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1332 s1
->mem_index
, ot
| MO_LE
);
1334 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1335 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1337 gen_op_update2_cc(s1
);
1338 set_cc_op(s1
, CC_OP_ADDB
+ ot
);
1341 if (s1
->prefix
& PREFIX_LOCK
) {
1342 tcg_gen_neg_tl(s1
->T0
, s1
->T1
);
1343 tcg_gen_atomic_fetch_add_tl(s1
->cc_srcT
, s1
->A0
, s1
->T0
,
1344 s1
->mem_index
, ot
| MO_LE
);
1345 tcg_gen_sub_tl(s1
->T0
, s1
->cc_srcT
, s1
->T1
);
1347 tcg_gen_mov_tl(s1
->cc_srcT
, s1
->T0
);
1348 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1349 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1351 gen_op_update2_cc(s1
);
1352 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1356 if (s1
->prefix
& PREFIX_LOCK
) {
1357 tcg_gen_atomic_and_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1358 s1
->mem_index
, ot
| MO_LE
);
1360 tcg_gen_and_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1361 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1363 gen_op_update1_cc(s1
);
1364 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1367 if (s1
->prefix
& PREFIX_LOCK
) {
1368 tcg_gen_atomic_or_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1369 s1
->mem_index
, ot
| MO_LE
);
1371 tcg_gen_or_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1372 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1374 gen_op_update1_cc(s1
);
1375 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1378 if (s1
->prefix
& PREFIX_LOCK
) {
1379 tcg_gen_atomic_xor_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1380 s1
->mem_index
, ot
| MO_LE
);
1382 tcg_gen_xor_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1383 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1385 gen_op_update1_cc(s1
);
1386 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1389 tcg_gen_mov_tl(cpu_cc_src
, s1
->T1
);
1390 tcg_gen_mov_tl(s1
->cc_srcT
, s1
->T0
);
1391 tcg_gen_sub_tl(cpu_cc_dst
, s1
->T0
, s1
->T1
);
1392 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1397 /* if d == OR_TMP0, it means memory operand (address in A0) */
1398 static void gen_inc(DisasContext
*s1
, TCGMemOp ot
, int d
, int c
)
1400 if (s1
->prefix
& PREFIX_LOCK
) {
1402 /* Lock prefix when destination is not memory */
1403 gen_illegal_opcode(s1
);
1406 tcg_gen_movi_tl(s1
->T0
, c
> 0 ? 1 : -1);
1407 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1408 s1
->mem_index
, ot
| MO_LE
);
1411 gen_op_mov_v_reg(s1
, ot
, s1
->T0
, d
);
1413 gen_op_ld_v(s1
, ot
, s1
->T0
, s1
->A0
);
1415 tcg_gen_addi_tl(s1
->T0
, s1
->T0
, (c
> 0 ? 1 : -1));
1416 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1419 gen_compute_eflags_c(s1
, cpu_cc_src
);
1420 tcg_gen_mov_tl(cpu_cc_dst
, s1
->T0
);
1421 set_cc_op(s1
, (c
> 0 ? CC_OP_INCB
: CC_OP_DECB
) + ot
);
1424 static void gen_shift_flags(DisasContext
*s
, TCGMemOp ot
, TCGv result
,
1425 TCGv shm1
, TCGv count
, bool is_right
)
1427 TCGv_i32 z32
, s32
, oldop
;
1430 /* Store the results into the CC variables. If we know that the
1431 variable must be dead, store unconditionally. Otherwise we'll
1432 need to not disrupt the current contents. */
1433 z_tl
= tcg_const_tl(0);
1434 if (cc_op_live
[s
->cc_op
] & USES_CC_DST
) {
1435 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_dst
, count
, z_tl
,
1436 result
, cpu_cc_dst
);
1438 tcg_gen_mov_tl(cpu_cc_dst
, result
);
1440 if (cc_op_live
[s
->cc_op
] & USES_CC_SRC
) {
1441 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_src
, count
, z_tl
,
1444 tcg_gen_mov_tl(cpu_cc_src
, shm1
);
1446 tcg_temp_free(z_tl
);
1448 /* Get the two potential CC_OP values into temporaries. */
1449 tcg_gen_movi_i32(s
->tmp2_i32
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1450 if (s
->cc_op
== CC_OP_DYNAMIC
) {
1453 tcg_gen_movi_i32(s
->tmp3_i32
, s
->cc_op
);
1454 oldop
= s
->tmp3_i32
;
1457 /* Conditionally store the CC_OP value. */
1458 z32
= tcg_const_i32(0);
1459 s32
= tcg_temp_new_i32();
1460 tcg_gen_trunc_tl_i32(s32
, count
);
1461 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, s32
, z32
, s
->tmp2_i32
, oldop
);
1462 tcg_temp_free_i32(z32
);
1463 tcg_temp_free_i32(s32
);
1465 /* The CC_OP value is no longer predictable. */
1466 set_cc_op(s
, CC_OP_DYNAMIC
);
1469 static void gen_shift_rm_T1(DisasContext
*s
, TCGMemOp ot
, int op1
,
1470 int is_right
, int is_arith
)
1472 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1475 if (op1
== OR_TMP0
) {
1476 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1478 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1481 tcg_gen_andi_tl(s
->T1
, s
->T1
, mask
);
1482 tcg_gen_subi_tl(s
->tmp0
, s
->T1
, 1);
1486 gen_exts(ot
, s
->T0
);
1487 tcg_gen_sar_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1488 tcg_gen_sar_tl(s
->T0
, s
->T0
, s
->T1
);
1490 gen_extu(ot
, s
->T0
);
1491 tcg_gen_shr_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1492 tcg_gen_shr_tl(s
->T0
, s
->T0
, s
->T1
);
1495 tcg_gen_shl_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1496 tcg_gen_shl_tl(s
->T0
, s
->T0
, s
->T1
);
1500 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1502 gen_shift_flags(s
, ot
, s
->T0
, s
->tmp0
, s
->T1
, is_right
);
1505 static void gen_shift_rm_im(DisasContext
*s
, TCGMemOp ot
, int op1
, int op2
,
1506 int is_right
, int is_arith
)
1508 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1512 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1514 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1520 gen_exts(ot
, s
->T0
);
1521 tcg_gen_sari_tl(s
->tmp4
, s
->T0
, op2
- 1);
1522 tcg_gen_sari_tl(s
->T0
, s
->T0
, op2
);
1524 gen_extu(ot
, s
->T0
);
1525 tcg_gen_shri_tl(s
->tmp4
, s
->T0
, op2
- 1);
1526 tcg_gen_shri_tl(s
->T0
, s
->T0
, op2
);
1529 tcg_gen_shli_tl(s
->tmp4
, s
->T0
, op2
- 1);
1530 tcg_gen_shli_tl(s
->T0
, s
->T0
, op2
);
1535 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1537 /* update eflags if non zero shift */
1539 tcg_gen_mov_tl(cpu_cc_src
, s
->tmp4
);
1540 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
1541 set_cc_op(s
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1545 static void gen_rot_rm_T1(DisasContext
*s
, TCGMemOp ot
, int op1
, int is_right
)
1547 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1551 if (op1
== OR_TMP0
) {
1552 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1554 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1557 tcg_gen_andi_tl(s
->T1
, s
->T1
, mask
);
1561 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1562 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
1563 tcg_gen_muli_tl(s
->T0
, s
->T0
, 0x01010101);
1566 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1567 tcg_gen_deposit_tl(s
->T0
, s
->T0
, s
->T0
, 16, 16);
1570 #ifdef TARGET_X86_64
1572 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
1573 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
1575 tcg_gen_rotr_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
1577 tcg_gen_rotl_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
1579 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
1584 tcg_gen_rotr_tl(s
->T0
, s
->T0
, s
->T1
);
1586 tcg_gen_rotl_tl(s
->T0
, s
->T0
, s
->T1
);
1592 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1594 /* We'll need the flags computed into CC_SRC. */
1595 gen_compute_eflags(s
);
1597 /* The value that was "rotated out" is now present at the other end
1598 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1599 since we've computed the flags into CC_SRC, these variables are
1602 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
- 1);
1603 tcg_gen_shri_tl(cpu_cc_dst
, s
->T0
, mask
);
1604 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1606 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
);
1607 tcg_gen_andi_tl(cpu_cc_dst
, s
->T0
, 1);
1609 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1610 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1612 /* Now conditionally store the new CC_OP value. If the shift count
1613 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1614 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1615 exactly as we computed above. */
1616 t0
= tcg_const_i32(0);
1617 t1
= tcg_temp_new_i32();
1618 tcg_gen_trunc_tl_i32(t1
, s
->T1
);
1619 tcg_gen_movi_i32(s
->tmp2_i32
, CC_OP_ADCOX
);
1620 tcg_gen_movi_i32(s
->tmp3_i32
, CC_OP_EFLAGS
);
1621 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, t1
, t0
,
1622 s
->tmp2_i32
, s
->tmp3_i32
);
1623 tcg_temp_free_i32(t0
);
1624 tcg_temp_free_i32(t1
);
1626 /* The CC_OP value is no longer predictable. */
1627 set_cc_op(s
, CC_OP_DYNAMIC
);
1630 static void gen_rot_rm_im(DisasContext
*s
, TCGMemOp ot
, int op1
, int op2
,
1633 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1637 if (op1
== OR_TMP0
) {
1638 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1640 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1646 #ifdef TARGET_X86_64
1648 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
1650 tcg_gen_rotri_i32(s
->tmp2_i32
, s
->tmp2_i32
, op2
);
1652 tcg_gen_rotli_i32(s
->tmp2_i32
, s
->tmp2_i32
, op2
);
1654 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
1659 tcg_gen_rotri_tl(s
->T0
, s
->T0
, op2
);
1661 tcg_gen_rotli_tl(s
->T0
, s
->T0
, op2
);
1672 shift
= mask
+ 1 - shift
;
1674 gen_extu(ot
, s
->T0
);
1675 tcg_gen_shli_tl(s
->tmp0
, s
->T0
, shift
);
1676 tcg_gen_shri_tl(s
->T0
, s
->T0
, mask
+ 1 - shift
);
1677 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->tmp0
);
1683 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1686 /* Compute the flags into CC_SRC. */
1687 gen_compute_eflags(s
);
1689 /* The value that was "rotated out" is now present at the other end
1690 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1691 since we've computed the flags into CC_SRC, these variables are
1694 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
- 1);
1695 tcg_gen_shri_tl(cpu_cc_dst
, s
->T0
, mask
);
1696 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1698 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
);
1699 tcg_gen_andi_tl(cpu_cc_dst
, s
->T0
, 1);
1701 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1702 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1703 set_cc_op(s
, CC_OP_ADCOX
);
1707 /* XXX: add faster immediate = 1 case */
1708 static void gen_rotc_rm_T1(DisasContext
*s
, TCGMemOp ot
, int op1
,
1711 gen_compute_eflags(s
);
1712 assert(s
->cc_op
== CC_OP_EFLAGS
);
1716 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1718 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1723 gen_helper_rcrb(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1726 gen_helper_rcrw(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1729 gen_helper_rcrl(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1731 #ifdef TARGET_X86_64
1733 gen_helper_rcrq(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1742 gen_helper_rclb(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1745 gen_helper_rclw(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1748 gen_helper_rcll(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1750 #ifdef TARGET_X86_64
1752 gen_helper_rclq(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1760 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1763 /* XXX: add faster immediate case */
1764 static void gen_shiftd_rm_T1(DisasContext
*s
, TCGMemOp ot
, int op1
,
1765 bool is_right
, TCGv count_in
)
1767 target_ulong mask
= (ot
== MO_64
? 63 : 31);
1771 if (op1
== OR_TMP0
) {
1772 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1774 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1777 count
= tcg_temp_new();
1778 tcg_gen_andi_tl(count
, count_in
, mask
);
1782 /* Note: we implement the Intel behaviour for shift count > 16.
1783 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1784 portion by constructing it as a 32-bit value. */
1786 tcg_gen_deposit_tl(s
->tmp0
, s
->T0
, s
->T1
, 16, 16);
1787 tcg_gen_mov_tl(s
->T1
, s
->T0
);
1788 tcg_gen_mov_tl(s
->T0
, s
->tmp0
);
1790 tcg_gen_deposit_tl(s
->T1
, s
->T0
, s
->T1
, 16, 16);
1793 #ifdef TARGET_X86_64
1795 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1796 tcg_gen_subi_tl(s
->tmp0
, count
, 1);
1798 tcg_gen_concat_tl_i64(s
->T0
, s
->T0
, s
->T1
);
1799 tcg_gen_shr_i64(s
->tmp0
, s
->T0
, s
->tmp0
);
1800 tcg_gen_shr_i64(s
->T0
, s
->T0
, count
);
1802 tcg_gen_concat_tl_i64(s
->T0
, s
->T1
, s
->T0
);
1803 tcg_gen_shl_i64(s
->tmp0
, s
->T0
, s
->tmp0
);
1804 tcg_gen_shl_i64(s
->T0
, s
->T0
, count
);
1805 tcg_gen_shri_i64(s
->tmp0
, s
->tmp0
, 32);
1806 tcg_gen_shri_i64(s
->T0
, s
->T0
, 32);
1811 tcg_gen_subi_tl(s
->tmp0
, count
, 1);
1813 tcg_gen_shr_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1815 tcg_gen_subfi_tl(s
->tmp4
, mask
+ 1, count
);
1816 tcg_gen_shr_tl(s
->T0
, s
->T0
, count
);
1817 tcg_gen_shl_tl(s
->T1
, s
->T1
, s
->tmp4
);
1819 tcg_gen_shl_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1821 /* Only needed if count > 16, for Intel behaviour. */
1822 tcg_gen_subfi_tl(s
->tmp4
, 33, count
);
1823 tcg_gen_shr_tl(s
->tmp4
, s
->T1
, s
->tmp4
);
1824 tcg_gen_or_tl(s
->tmp0
, s
->tmp0
, s
->tmp4
);
1827 tcg_gen_subfi_tl(s
->tmp4
, mask
+ 1, count
);
1828 tcg_gen_shl_tl(s
->T0
, s
->T0
, count
);
1829 tcg_gen_shr_tl(s
->T1
, s
->T1
, s
->tmp4
);
1831 tcg_gen_movi_tl(s
->tmp4
, 0);
1832 tcg_gen_movcond_tl(TCG_COND_EQ
, s
->T1
, count
, s
->tmp4
,
1834 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->T1
);
1839 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1841 gen_shift_flags(s
, ot
, s
->T0
, s
->tmp0
, count
, is_right
);
1842 tcg_temp_free(count
);
1845 static void gen_shift(DisasContext
*s1
, int op
, TCGMemOp ot
, int d
, int s
)
1848 gen_op_mov_v_reg(s1
, ot
, s1
->T1
, s
);
1851 gen_rot_rm_T1(s1
, ot
, d
, 0);
1854 gen_rot_rm_T1(s1
, ot
, d
, 1);
1858 gen_shift_rm_T1(s1
, ot
, d
, 0, 0);
1861 gen_shift_rm_T1(s1
, ot
, d
, 1, 0);
1864 gen_shift_rm_T1(s1
, ot
, d
, 1, 1);
1867 gen_rotc_rm_T1(s1
, ot
, d
, 0);
1870 gen_rotc_rm_T1(s1
, ot
, d
, 1);
1875 static void gen_shifti(DisasContext
*s1
, int op
, TCGMemOp ot
, int d
, int c
)
1879 gen_rot_rm_im(s1
, ot
, d
, c
, 0);
1882 gen_rot_rm_im(s1
, ot
, d
, c
, 1);
1886 gen_shift_rm_im(s1
, ot
, d
, c
, 0, 0);
1889 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 0);
1892 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 1);
1895 /* currently not optimized */
1896 tcg_gen_movi_tl(s1
->T1
, c
);
1897 gen_shift(s1
, op
, ot
, d
, OR_TMP1
);
1902 #define X86_MAX_INSN_LENGTH 15
1904 static uint64_t advance_pc(CPUX86State
*env
, DisasContext
*s
, int num_bytes
)
1906 uint64_t pc
= s
->pc
;
1909 if (unlikely(s
->pc
- s
->pc_start
> X86_MAX_INSN_LENGTH
)) {
1910 /* If the instruction's 16th byte is on a different page than the 1st, a
1911 * page fault on the second page wins over the general protection fault
1912 * caused by the instruction being too long.
1913 * This can happen even if the operand is only one byte long!
1915 if (((s
->pc
- 1) ^ (pc
- 1)) & TARGET_PAGE_MASK
) {
1916 volatile uint8_t unused
=
1917 cpu_ldub_code(env
, (s
->pc
- 1) & TARGET_PAGE_MASK
);
1920 siglongjmp(s
->jmpbuf
, 1);
1926 static inline uint8_t x86_ldub_code(CPUX86State
*env
, DisasContext
*s
)
1928 return cpu_ldub_code(env
, advance_pc(env
, s
, 1));
1931 static inline int16_t x86_ldsw_code(CPUX86State
*env
, DisasContext
*s
)
1933 return cpu_ldsw_code(env
, advance_pc(env
, s
, 2));
1936 static inline uint16_t x86_lduw_code(CPUX86State
*env
, DisasContext
*s
)
1938 return cpu_lduw_code(env
, advance_pc(env
, s
, 2));
1941 static inline uint32_t x86_ldl_code(CPUX86State
*env
, DisasContext
*s
)
1943 return cpu_ldl_code(env
, advance_pc(env
, s
, 4));
1946 #ifdef TARGET_X86_64
1947 static inline uint64_t x86_ldq_code(CPUX86State
*env
, DisasContext
*s
)
1949 return cpu_ldq_code(env
, advance_pc(env
, s
, 8));
1953 /* Decompose an address. */
1955 typedef struct AddressParts
{
1963 static AddressParts
gen_lea_modrm_0(CPUX86State
*env
, DisasContext
*s
,
1966 int def_seg
, base
, index
, scale
, mod
, rm
;
1975 mod
= (modrm
>> 6) & 3;
1977 base
= rm
| REX_B(s
);
1980 /* Normally filtered out earlier, but including this path
1981 simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
1990 int code
= x86_ldub_code(env
, s
);
1991 scale
= (code
>> 6) & 3;
1992 index
= ((code
>> 3) & 7) | REX_X(s
);
1994 index
= -1; /* no index */
1996 base
= (code
& 7) | REX_B(s
);
2002 if ((base
& 7) == 5) {
2004 disp
= (int32_t)x86_ldl_code(env
, s
);
2005 if (CODE64(s
) && !havesib
) {
2007 disp
+= s
->pc
+ s
->rip_offset
;
2012 disp
= (int8_t)x86_ldub_code(env
, s
);
2016 disp
= (int32_t)x86_ldl_code(env
, s
);
2020 /* For correct popl handling with esp. */
2021 if (base
== R_ESP
&& s
->popl_esp_hack
) {
2022 disp
+= s
->popl_esp_hack
;
2024 if (base
== R_EBP
|| base
== R_ESP
) {
2033 disp
= x86_lduw_code(env
, s
);
2036 } else if (mod
== 1) {
2037 disp
= (int8_t)x86_ldub_code(env
, s
);
2039 disp
= (int16_t)x86_lduw_code(env
, s
);
2083 return (AddressParts
){ def_seg
, base
, index
, scale
, disp
};
2086 /* Compute the address, with a minimum number of TCG ops. */
2087 static TCGv
gen_lea_modrm_1(DisasContext
*s
, AddressParts a
)
2093 ea
= cpu_regs
[a
.index
];
2095 tcg_gen_shli_tl(s
->A0
, cpu_regs
[a
.index
], a
.scale
);
2099 tcg_gen_add_tl(s
->A0
, ea
, cpu_regs
[a
.base
]);
2102 } else if (a
.base
>= 0) {
2103 ea
= cpu_regs
[a
.base
];
2106 tcg_gen_movi_tl(s
->A0
, a
.disp
);
2108 } else if (a
.disp
!= 0) {
2109 tcg_gen_addi_tl(s
->A0
, ea
, a
.disp
);
2116 static void gen_lea_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2118 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
2119 TCGv ea
= gen_lea_modrm_1(s
, a
);
2120 gen_lea_v_seg(s
, s
->aflag
, ea
, a
.def_seg
, s
->override
);
2123 static void gen_nop_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2125 (void)gen_lea_modrm_0(env
, s
, modrm
);
2128 /* Used for BNDCL, BNDCU, BNDCN. */
2129 static void gen_bndck(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2130 TCGCond cond
, TCGv_i64 bndv
)
2132 TCGv ea
= gen_lea_modrm_1(s
, gen_lea_modrm_0(env
, s
, modrm
));
2134 tcg_gen_extu_tl_i64(s
->tmp1_i64
, ea
);
2136 tcg_gen_ext32u_i64(s
->tmp1_i64
, s
->tmp1_i64
);
2138 tcg_gen_setcond_i64(cond
, s
->tmp1_i64
, s
->tmp1_i64
, bndv
);
2139 tcg_gen_extrl_i64_i32(s
->tmp2_i32
, s
->tmp1_i64
);
2140 gen_helper_bndck(cpu_env
, s
->tmp2_i32
);
2143 /* used for LEA and MOV AX, mem */
2144 static void gen_add_A0_ds_seg(DisasContext
*s
)
2146 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, R_DS
, s
->override
);
2149 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2151 static void gen_ldst_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2152 TCGMemOp ot
, int reg
, int is_store
)
2156 mod
= (modrm
>> 6) & 3;
2157 rm
= (modrm
& 7) | REX_B(s
);
2161 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
2162 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
2164 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
2166 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
2169 gen_lea_modrm(env
, s
, modrm
);
2172 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
2173 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
2175 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
2177 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
2182 static inline uint32_t insn_get(CPUX86State
*env
, DisasContext
*s
, TCGMemOp ot
)
2188 ret
= x86_ldub_code(env
, s
);
2191 ret
= x86_lduw_code(env
, s
);
2194 #ifdef TARGET_X86_64
2197 ret
= x86_ldl_code(env
, s
);
2205 static inline int insn_const_size(TCGMemOp ot
)
2214 static inline bool use_goto_tb(DisasContext
*s
, target_ulong pc
)
2216 #ifndef CONFIG_USER_ONLY
2217 return (pc
& TARGET_PAGE_MASK
) == (s
->base
.tb
->pc
& TARGET_PAGE_MASK
) ||
2218 (pc
& TARGET_PAGE_MASK
) == (s
->pc_start
& TARGET_PAGE_MASK
);
2224 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong eip
)
2226 target_ulong pc
= s
->cs_base
+ eip
;
2228 if (use_goto_tb(s
, pc
)) {
2229 /* jump to same page: we can use a direct jump */
2230 tcg_gen_goto_tb(tb_num
);
2232 tcg_gen_exit_tb(s
->base
.tb
, tb_num
);
2233 s
->base
.is_jmp
= DISAS_NORETURN
;
2235 /* jump to another page */
2241 static inline void gen_jcc(DisasContext
*s
, int b
,
2242 target_ulong val
, target_ulong next_eip
)
2247 l1
= gen_new_label();
2250 gen_goto_tb(s
, 0, next_eip
);
2253 gen_goto_tb(s
, 1, val
);
2255 l1
= gen_new_label();
2256 l2
= gen_new_label();
2259 gen_jmp_im(s
, next_eip
);
2269 static void gen_cmovcc1(CPUX86State
*env
, DisasContext
*s
, TCGMemOp ot
, int b
,
2274 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
2276 cc
= gen_prepare_cc(s
, b
, s
->T1
);
2277 if (cc
.mask
!= -1) {
2278 TCGv t0
= tcg_temp_new();
2279 tcg_gen_andi_tl(t0
, cc
.reg
, cc
.mask
);
2283 cc
.reg2
= tcg_const_tl(cc
.imm
);
2286 tcg_gen_movcond_tl(cc
.cond
, s
->T0
, cc
.reg
, cc
.reg2
,
2287 s
->T0
, cpu_regs
[reg
]);
2288 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
2290 if (cc
.mask
!= -1) {
2291 tcg_temp_free(cc
.reg
);
2294 tcg_temp_free(cc
.reg2
);
2298 static inline void gen_op_movl_T0_seg(DisasContext
*s
, int seg_reg
)
2300 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
2301 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2304 static inline void gen_op_movl_seg_T0_vm(DisasContext
*s
, int seg_reg
)
2306 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
2307 tcg_gen_st32_tl(s
->T0
, cpu_env
,
2308 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2309 tcg_gen_shli_tl(cpu_seg_base
[seg_reg
], s
->T0
, 4);
2312 /* move T0 to seg_reg and compute if the CPU state may change. Never
2313 call this function with seg_reg == R_CS */
2314 static void gen_movl_seg_T0(DisasContext
*s
, int seg_reg
)
2316 if (s
->pe
&& !s
->vm86
) {
2317 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
2318 gen_helper_load_seg(cpu_env
, tcg_const_i32(seg_reg
), s
->tmp2_i32
);
2319 /* abort translation because the addseg value may change or
2320 because ss32 may change. For R_SS, translation must always
2321 stop as a special handling must be done to disable hardware
2322 interrupts for the next instruction */
2323 if (seg_reg
== R_SS
|| (s
->code32
&& seg_reg
< R_FS
)) {
2324 s
->base
.is_jmp
= DISAS_TOO_MANY
;
2327 gen_op_movl_seg_T0_vm(s
, seg_reg
);
2328 if (seg_reg
== R_SS
) {
2329 s
->base
.is_jmp
= DISAS_TOO_MANY
;
2334 static inline int svm_is_rep(int prefixes
)
2336 return ((prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) ? 8 : 0);
2340 gen_svm_check_intercept_param(DisasContext
*s
, target_ulong pc_start
,
2341 uint32_t type
, uint64_t param
)
2343 /* no SVM activated; fast case */
2344 if (likely(!(s
->flags
& HF_GUEST_MASK
)))
2346 gen_update_cc_op(s
);
2347 gen_jmp_im(s
, pc_start
- s
->cs_base
);
2348 gen_helper_svm_check_intercept_param(cpu_env
, tcg_const_i32(type
),
2349 tcg_const_i64(param
));
2353 gen_svm_check_intercept(DisasContext
*s
, target_ulong pc_start
, uint64_t type
)
2355 gen_svm_check_intercept_param(s
, pc_start
, type
, 0);
2358 static inline void gen_stack_update(DisasContext
*s
, int addend
)
2360 gen_op_add_reg_im(s
, mo_stacksize(s
), R_ESP
, addend
);
2363 /* Generate a push. It depends on ss32, addseg and dflag. */
2364 static void gen_push_v(DisasContext
*s
, TCGv val
)
2366 TCGMemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2367 TCGMemOp a_ot
= mo_stacksize(s
);
2368 int size
= 1 << d_ot
;
2369 TCGv new_esp
= s
->A0
;
2371 tcg_gen_subi_tl(s
->A0
, cpu_regs
[R_ESP
], size
);
2376 tcg_gen_mov_tl(new_esp
, s
->A0
);
2378 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2381 gen_op_st_v(s
, d_ot
, val
, s
->A0
);
2382 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, new_esp
);
2385 /* two step pop is necessary for precise exceptions */
2386 static TCGMemOp
gen_pop_T0(DisasContext
*s
)
2388 TCGMemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2390 gen_lea_v_seg(s
, mo_stacksize(s
), cpu_regs
[R_ESP
], R_SS
, -1);
2391 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2396 static inline void gen_pop_update(DisasContext
*s
, TCGMemOp ot
)
2398 gen_stack_update(s
, 1 << ot
);
2401 static inline void gen_stack_A0(DisasContext
*s
)
2403 gen_lea_v_seg(s
, s
->ss32
? MO_32
: MO_16
, cpu_regs
[R_ESP
], R_SS
, -1);
2406 static void gen_pusha(DisasContext
*s
)
2408 TCGMemOp s_ot
= s
->ss32
? MO_32
: MO_16
;
2409 TCGMemOp d_ot
= s
->dflag
;
2410 int size
= 1 << d_ot
;
2413 for (i
= 0; i
< 8; i
++) {
2414 tcg_gen_addi_tl(s
->A0
, cpu_regs
[R_ESP
], (i
- 8) * size
);
2415 gen_lea_v_seg(s
, s_ot
, s
->A0
, R_SS
, -1);
2416 gen_op_st_v(s
, d_ot
, cpu_regs
[7 - i
], s
->A0
);
2419 gen_stack_update(s
, -8 * size
);
2422 static void gen_popa(DisasContext
*s
)
2424 TCGMemOp s_ot
= s
->ss32
? MO_32
: MO_16
;
2425 TCGMemOp d_ot
= s
->dflag
;
2426 int size
= 1 << d_ot
;
2429 for (i
= 0; i
< 8; i
++) {
2430 /* ESP is not reloaded */
2431 if (7 - i
== R_ESP
) {
2434 tcg_gen_addi_tl(s
->A0
, cpu_regs
[R_ESP
], i
* size
);
2435 gen_lea_v_seg(s
, s_ot
, s
->A0
, R_SS
, -1);
2436 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2437 gen_op_mov_reg_v(s
, d_ot
, 7 - i
, s
->T0
);
2440 gen_stack_update(s
, 8 * size
);
2443 static void gen_enter(DisasContext
*s
, int esp_addend
, int level
)
2445 TCGMemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2446 TCGMemOp a_ot
= CODE64(s
) ? MO_64
: s
->ss32
? MO_32
: MO_16
;
2447 int size
= 1 << d_ot
;
2449 /* Push BP; compute FrameTemp into T1. */
2450 tcg_gen_subi_tl(s
->T1
, cpu_regs
[R_ESP
], size
);
2451 gen_lea_v_seg(s
, a_ot
, s
->T1
, R_SS
, -1);
2452 gen_op_st_v(s
, d_ot
, cpu_regs
[R_EBP
], s
->A0
);
2458 /* Copy level-1 pointers from the previous frame. */
2459 for (i
= 1; i
< level
; ++i
) {
2460 tcg_gen_subi_tl(s
->A0
, cpu_regs
[R_EBP
], size
* i
);
2461 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2462 gen_op_ld_v(s
, d_ot
, s
->tmp0
, s
->A0
);
2464 tcg_gen_subi_tl(s
->A0
, s
->T1
, size
* i
);
2465 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2466 gen_op_st_v(s
, d_ot
, s
->tmp0
, s
->A0
);
2469 /* Push the current FrameTemp as the last level. */
2470 tcg_gen_subi_tl(s
->A0
, s
->T1
, size
* level
);
2471 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2472 gen_op_st_v(s
, d_ot
, s
->T1
, s
->A0
);
2475 /* Copy the FrameTemp value to EBP. */
2476 gen_op_mov_reg_v(s
, a_ot
, R_EBP
, s
->T1
);
2478 /* Compute the final value of ESP. */
2479 tcg_gen_subi_tl(s
->T1
, s
->T1
, esp_addend
+ size
* level
);
2480 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, s
->T1
);
2483 static void gen_leave(DisasContext
*s
)
2485 TCGMemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2486 TCGMemOp a_ot
= mo_stacksize(s
);
2488 gen_lea_v_seg(s
, a_ot
, cpu_regs
[R_EBP
], R_SS
, -1);
2489 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2491 tcg_gen_addi_tl(s
->T1
, cpu_regs
[R_EBP
], 1 << d_ot
);
2493 gen_op_mov_reg_v(s
, d_ot
, R_EBP
, s
->T0
);
2494 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, s
->T1
);
2497 /* Similarly, except that the assumption here is that we don't decode
2498 the instruction at all -- either a missing opcode, an unimplemented
2499 feature, or just a bogus instruction stream. */
2500 static void gen_unknown_opcode(CPUX86State
*env
, DisasContext
*s
)
2502 gen_illegal_opcode(s
);
2504 if (qemu_loglevel_mask(LOG_UNIMP
)) {
2505 target_ulong pc
= s
->pc_start
, end
= s
->pc
;
2507 qemu_log("ILLOPC: " TARGET_FMT_lx
":", pc
);
2508 for (; pc
< end
; ++pc
) {
2509 qemu_log(" %02x", cpu_ldub_code(env
, pc
));
2516 /* an interrupt is different from an exception because of the
2518 static void gen_interrupt(DisasContext
*s
, int intno
,
2519 target_ulong cur_eip
, target_ulong next_eip
)
2521 gen_update_cc_op(s
);
2522 gen_jmp_im(s
, cur_eip
);
2523 gen_helper_raise_interrupt(cpu_env
, tcg_const_i32(intno
),
2524 tcg_const_i32(next_eip
- cur_eip
));
2525 s
->base
.is_jmp
= DISAS_NORETURN
;
2528 static void gen_debug(DisasContext
*s
, target_ulong cur_eip
)
2530 gen_update_cc_op(s
);
2531 gen_jmp_im(s
, cur_eip
);
2532 gen_helper_debug(cpu_env
);
2533 s
->base
.is_jmp
= DISAS_NORETURN
;
2536 static void gen_set_hflag(DisasContext
*s
, uint32_t mask
)
2538 if ((s
->flags
& mask
) == 0) {
2539 TCGv_i32 t
= tcg_temp_new_i32();
2540 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2541 tcg_gen_ori_i32(t
, t
, mask
);
2542 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2543 tcg_temp_free_i32(t
);
2548 static void gen_reset_hflag(DisasContext
*s
, uint32_t mask
)
2550 if (s
->flags
& mask
) {
2551 TCGv_i32 t
= tcg_temp_new_i32();
2552 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2553 tcg_gen_andi_i32(t
, t
, ~mask
);
2554 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2555 tcg_temp_free_i32(t
);
2560 /* Clear BND registers during legacy branches. */
2561 static void gen_bnd_jmp(DisasContext
*s
)
2563 /* Clear the registers only if BND prefix is missing, MPX is enabled,
2564 and if the BNDREGs are known to be in use (non-zero) already.
2565 The helper itself will check BNDPRESERVE at runtime. */
2566 if ((s
->prefix
& PREFIX_REPNZ
) == 0
2567 && (s
->flags
& HF_MPX_EN_MASK
) != 0
2568 && (s
->flags
& HF_MPX_IU_MASK
) != 0) {
2569 gen_helper_bnd_jmp(cpu_env
);
2573 /* Generate an end of block. Trace exception is also generated if needed.
2574 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.
2575 If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
2576 S->TF. This is used by the syscall/sysret insns. */
2578 do_gen_eob_worker(DisasContext
*s
, bool inhibit
, bool recheck_tf
, bool jr
)
2580 gen_update_cc_op(s
);
2582 /* If several instructions disable interrupts, only the first does it. */
2583 if (inhibit
&& !(s
->flags
& HF_INHIBIT_IRQ_MASK
)) {
2584 gen_set_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2586 gen_reset_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2589 if (s
->base
.tb
->flags
& HF_RF_MASK
) {
2590 gen_helper_reset_rf(cpu_env
);
2592 if (s
->base
.singlestep_enabled
) {
2593 gen_helper_debug(cpu_env
);
2594 } else if (recheck_tf
) {
2595 gen_helper_rechecking_single_step(cpu_env
);
2596 tcg_gen_exit_tb(NULL
, 0);
2598 gen_helper_single_step(cpu_env
);
2600 tcg_gen_lookup_and_goto_ptr();
2602 tcg_gen_exit_tb(NULL
, 0);
2604 s
->base
.is_jmp
= DISAS_NORETURN
;
2608 gen_eob_worker(DisasContext
*s
, bool inhibit
, bool recheck_tf
)
2610 do_gen_eob_worker(s
, inhibit
, recheck_tf
, false);
2614 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set. */
2615 static void gen_eob_inhibit_irq(DisasContext
*s
, bool inhibit
)
2617 gen_eob_worker(s
, inhibit
, false);
2620 /* End of block, resetting the inhibit irq flag. */
2621 static void gen_eob(DisasContext
*s
)
2623 gen_eob_worker(s
, false, false);
2626 /* Jump to register */
2627 static void gen_jr(DisasContext
*s
, TCGv dest
)
2629 do_gen_eob_worker(s
, false, false, true);
2632 /* generate a jump to eip. No segment change must happen before as a
2633 direct call to the next block may occur */
2634 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
)
2636 gen_update_cc_op(s
);
2637 set_cc_op(s
, CC_OP_DYNAMIC
);
2639 gen_goto_tb(s
, tb_num
, eip
);
2646 static void gen_jmp(DisasContext
*s
, target_ulong eip
)
2648 gen_jmp_tb(s
, eip
, 0);
2651 static inline void gen_ldq_env_A0(DisasContext
*s
, int offset
)
2653 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
, s
->mem_index
, MO_LEQ
);
2654 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
);
2657 static inline void gen_stq_env_A0(DisasContext
*s
, int offset
)
2659 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
);
2660 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
, s
->mem_index
, MO_LEQ
);
2663 static inline void gen_ldo_env_A0(DisasContext
*s
, int offset
)
2665 int mem_index
= s
->mem_index
;
2666 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
, mem_index
, MO_LEQ
);
2667 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(ZMMReg
, ZMM_Q(0)));
2668 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 8);
2669 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEQ
);
2670 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(ZMMReg
, ZMM_Q(1)));
2673 static inline void gen_sto_env_A0(DisasContext
*s
, int offset
)
2675 int mem_index
= s
->mem_index
;
2676 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(ZMMReg
, ZMM_Q(0)));
2677 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
, mem_index
, MO_LEQ
);
2678 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 8);
2679 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(ZMMReg
, ZMM_Q(1)));
2680 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEQ
);
2683 static inline void gen_op_movo(DisasContext
*s
, int d_offset
, int s_offset
)
2685 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, s_offset
+ offsetof(ZMMReg
, ZMM_Q(0)));
2686 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, d_offset
+ offsetof(ZMMReg
, ZMM_Q(0)));
2687 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, s_offset
+ offsetof(ZMMReg
, ZMM_Q(1)));
2688 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, d_offset
+ offsetof(ZMMReg
, ZMM_Q(1)));
2691 static inline void gen_op_movq(DisasContext
*s
, int d_offset
, int s_offset
)
2693 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, s_offset
);
2694 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, d_offset
);
2697 static inline void gen_op_movl(DisasContext
*s
, int d_offset
, int s_offset
)
2699 tcg_gen_ld_i32(s
->tmp2_i32
, cpu_env
, s_offset
);
2700 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
, d_offset
);
2703 static inline void gen_op_movq_env_0(DisasContext
*s
, int d_offset
)
2705 tcg_gen_movi_i64(s
->tmp1_i64
, 0);
2706 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, d_offset
);
2709 typedef void (*SSEFunc_i_ep
)(TCGv_i32 val
, TCGv_ptr env
, TCGv_ptr reg
);
2710 typedef void (*SSEFunc_l_ep
)(TCGv_i64 val
, TCGv_ptr env
, TCGv_ptr reg
);
2711 typedef void (*SSEFunc_0_epi
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i32 val
);
2712 typedef void (*SSEFunc_0_epl
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i64 val
);
2713 typedef void (*SSEFunc_0_epp
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
);
2714 typedef void (*SSEFunc_0_eppi
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2716 typedef void (*SSEFunc_0_ppi
)(TCGv_ptr reg_a
, TCGv_ptr reg_b
, TCGv_i32 val
);
2717 typedef void (*SSEFunc_0_eppt
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2720 #define SSE_SPECIAL ((void *)1)
2721 #define SSE_DUMMY ((void *)2)
2723 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2724 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2725 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2727 static const SSEFunc_0_epp sse_op_table1
[256][4] = {
2728 /* 3DNow! extensions */
2729 [0x0e] = { SSE_DUMMY
}, /* femms */
2730 [0x0f] = { SSE_DUMMY
}, /* pf... */
2731 /* pure SSE operations */
2732 [0x10] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2733 [0x11] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2734 [0x12] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd, movsldup, movddup */
2735 [0x13] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd */
2736 [0x14] = { gen_helper_punpckldq_xmm
, gen_helper_punpcklqdq_xmm
},
2737 [0x15] = { gen_helper_punpckhdq_xmm
, gen_helper_punpckhqdq_xmm
},
2738 [0x16] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd, movshdup */
2739 [0x17] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd */
2741 [0x28] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2742 [0x29] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2743 [0x2a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2744 [0x2b] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movntps, movntpd, movntss, movntsd */
2745 [0x2c] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2746 [0x2d] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2747 [0x2e] = { gen_helper_ucomiss
, gen_helper_ucomisd
},
2748 [0x2f] = { gen_helper_comiss
, gen_helper_comisd
},
2749 [0x50] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movmskps, movmskpd */
2750 [0x51] = SSE_FOP(sqrt
),
2751 [0x52] = { gen_helper_rsqrtps
, NULL
, gen_helper_rsqrtss
, NULL
},
2752 [0x53] = { gen_helper_rcpps
, NULL
, gen_helper_rcpss
, NULL
},
2753 [0x54] = { gen_helper_pand_xmm
, gen_helper_pand_xmm
}, /* andps, andpd */
2754 [0x55] = { gen_helper_pandn_xmm
, gen_helper_pandn_xmm
}, /* andnps, andnpd */
2755 [0x56] = { gen_helper_por_xmm
, gen_helper_por_xmm
}, /* orps, orpd */
2756 [0x57] = { gen_helper_pxor_xmm
, gen_helper_pxor_xmm
}, /* xorps, xorpd */
2757 [0x58] = SSE_FOP(add
),
2758 [0x59] = SSE_FOP(mul
),
2759 [0x5a] = { gen_helper_cvtps2pd
, gen_helper_cvtpd2ps
,
2760 gen_helper_cvtss2sd
, gen_helper_cvtsd2ss
},
2761 [0x5b] = { gen_helper_cvtdq2ps
, gen_helper_cvtps2dq
, gen_helper_cvttps2dq
},
2762 [0x5c] = SSE_FOP(sub
),
2763 [0x5d] = SSE_FOP(min
),
2764 [0x5e] = SSE_FOP(div
),
2765 [0x5f] = SSE_FOP(max
),
2767 [0xc2] = SSE_FOP(cmpeq
),
2768 [0xc6] = { (SSEFunc_0_epp
)gen_helper_shufps
,
2769 (SSEFunc_0_epp
)gen_helper_shufpd
}, /* XXX: casts */
2771 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2772 [0x38] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2773 [0x3a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2775 /* MMX ops and their SSE extensions */
2776 [0x60] = MMX_OP2(punpcklbw
),
2777 [0x61] = MMX_OP2(punpcklwd
),
2778 [0x62] = MMX_OP2(punpckldq
),
2779 [0x63] = MMX_OP2(packsswb
),
2780 [0x64] = MMX_OP2(pcmpgtb
),
2781 [0x65] = MMX_OP2(pcmpgtw
),
2782 [0x66] = MMX_OP2(pcmpgtl
),
2783 [0x67] = MMX_OP2(packuswb
),
2784 [0x68] = MMX_OP2(punpckhbw
),
2785 [0x69] = MMX_OP2(punpckhwd
),
2786 [0x6a] = MMX_OP2(punpckhdq
),
2787 [0x6b] = MMX_OP2(packssdw
),
2788 [0x6c] = { NULL
, gen_helper_punpcklqdq_xmm
},
2789 [0x6d] = { NULL
, gen_helper_punpckhqdq_xmm
},
2790 [0x6e] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movd mm, ea */
2791 [0x6f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, , movqdu */
2792 [0x70] = { (SSEFunc_0_epp
)gen_helper_pshufw_mmx
,
2793 (SSEFunc_0_epp
)gen_helper_pshufd_xmm
,
2794 (SSEFunc_0_epp
)gen_helper_pshufhw_xmm
,
2795 (SSEFunc_0_epp
)gen_helper_pshuflw_xmm
}, /* XXX: casts */
2796 [0x71] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftw */
2797 [0x72] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftd */
2798 [0x73] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftq */
2799 [0x74] = MMX_OP2(pcmpeqb
),
2800 [0x75] = MMX_OP2(pcmpeqw
),
2801 [0x76] = MMX_OP2(pcmpeql
),
2802 [0x77] = { SSE_DUMMY
}, /* emms */
2803 [0x78] = { NULL
, SSE_SPECIAL
, NULL
, SSE_SPECIAL
}, /* extrq_i, insertq_i */
2804 [0x79] = { NULL
, gen_helper_extrq_r
, NULL
, gen_helper_insertq_r
},
2805 [0x7c] = { NULL
, gen_helper_haddpd
, NULL
, gen_helper_haddps
},
2806 [0x7d] = { NULL
, gen_helper_hsubpd
, NULL
, gen_helper_hsubps
},
2807 [0x7e] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movd, movd, , movq */
2808 [0x7f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, movdqu */
2809 [0xc4] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pinsrw */
2810 [0xc5] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pextrw */
2811 [0xd0] = { NULL
, gen_helper_addsubpd
, NULL
, gen_helper_addsubps
},
2812 [0xd1] = MMX_OP2(psrlw
),
2813 [0xd2] = MMX_OP2(psrld
),
2814 [0xd3] = MMX_OP2(psrlq
),
2815 [0xd4] = MMX_OP2(paddq
),
2816 [0xd5] = MMX_OP2(pmullw
),
2817 [0xd6] = { NULL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2818 [0xd7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pmovmskb */
2819 [0xd8] = MMX_OP2(psubusb
),
2820 [0xd9] = MMX_OP2(psubusw
),
2821 [0xda] = MMX_OP2(pminub
),
2822 [0xdb] = MMX_OP2(pand
),
2823 [0xdc] = MMX_OP2(paddusb
),
2824 [0xdd] = MMX_OP2(paddusw
),
2825 [0xde] = MMX_OP2(pmaxub
),
2826 [0xdf] = MMX_OP2(pandn
),
2827 [0xe0] = MMX_OP2(pavgb
),
2828 [0xe1] = MMX_OP2(psraw
),
2829 [0xe2] = MMX_OP2(psrad
),
2830 [0xe3] = MMX_OP2(pavgw
),
2831 [0xe4] = MMX_OP2(pmulhuw
),
2832 [0xe5] = MMX_OP2(pmulhw
),
2833 [0xe6] = { NULL
, gen_helper_cvttpd2dq
, gen_helper_cvtdq2pd
, gen_helper_cvtpd2dq
},
2834 [0xe7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movntq, movntq */
2835 [0xe8] = MMX_OP2(psubsb
),
2836 [0xe9] = MMX_OP2(psubsw
),
2837 [0xea] = MMX_OP2(pminsw
),
2838 [0xeb] = MMX_OP2(por
),
2839 [0xec] = MMX_OP2(paddsb
),
2840 [0xed] = MMX_OP2(paddsw
),
2841 [0xee] = MMX_OP2(pmaxsw
),
2842 [0xef] = MMX_OP2(pxor
),
2843 [0xf0] = { NULL
, NULL
, NULL
, SSE_SPECIAL
}, /* lddqu */
2844 [0xf1] = MMX_OP2(psllw
),
2845 [0xf2] = MMX_OP2(pslld
),
2846 [0xf3] = MMX_OP2(psllq
),
2847 [0xf4] = MMX_OP2(pmuludq
),
2848 [0xf5] = MMX_OP2(pmaddwd
),
2849 [0xf6] = MMX_OP2(psadbw
),
2850 [0xf7] = { (SSEFunc_0_epp
)gen_helper_maskmov_mmx
,
2851 (SSEFunc_0_epp
)gen_helper_maskmov_xmm
}, /* XXX: casts */
2852 [0xf8] = MMX_OP2(psubb
),
2853 [0xf9] = MMX_OP2(psubw
),
2854 [0xfa] = MMX_OP2(psubl
),
2855 [0xfb] = MMX_OP2(psubq
),
2856 [0xfc] = MMX_OP2(paddb
),
2857 [0xfd] = MMX_OP2(paddw
),
2858 [0xfe] = MMX_OP2(paddl
),
2861 static const SSEFunc_0_epp sse_op_table2
[3 * 8][2] = {
2862 [0 + 2] = MMX_OP2(psrlw
),
2863 [0 + 4] = MMX_OP2(psraw
),
2864 [0 + 6] = MMX_OP2(psllw
),
2865 [8 + 2] = MMX_OP2(psrld
),
2866 [8 + 4] = MMX_OP2(psrad
),
2867 [8 + 6] = MMX_OP2(pslld
),
2868 [16 + 2] = MMX_OP2(psrlq
),
2869 [16 + 3] = { NULL
, gen_helper_psrldq_xmm
},
2870 [16 + 6] = MMX_OP2(psllq
),
2871 [16 + 7] = { NULL
, gen_helper_pslldq_xmm
},
2874 static const SSEFunc_0_epi sse_op_table3ai
[] = {
2875 gen_helper_cvtsi2ss
,
2879 #ifdef TARGET_X86_64
2880 static const SSEFunc_0_epl sse_op_table3aq
[] = {
2881 gen_helper_cvtsq2ss
,
2886 static const SSEFunc_i_ep sse_op_table3bi
[] = {
2887 gen_helper_cvttss2si
,
2888 gen_helper_cvtss2si
,
2889 gen_helper_cvttsd2si
,
2893 #ifdef TARGET_X86_64
2894 static const SSEFunc_l_ep sse_op_table3bq
[] = {
2895 gen_helper_cvttss2sq
,
2896 gen_helper_cvtss2sq
,
2897 gen_helper_cvttsd2sq
,
2902 static const SSEFunc_0_epp sse_op_table4
[8][4] = {
2913 static const SSEFunc_0_epp sse_op_table5
[256] = {
2914 [0x0c] = gen_helper_pi2fw
,
2915 [0x0d] = gen_helper_pi2fd
,
2916 [0x1c] = gen_helper_pf2iw
,
2917 [0x1d] = gen_helper_pf2id
,
2918 [0x8a] = gen_helper_pfnacc
,
2919 [0x8e] = gen_helper_pfpnacc
,
2920 [0x90] = gen_helper_pfcmpge
,
2921 [0x94] = gen_helper_pfmin
,
2922 [0x96] = gen_helper_pfrcp
,
2923 [0x97] = gen_helper_pfrsqrt
,
2924 [0x9a] = gen_helper_pfsub
,
2925 [0x9e] = gen_helper_pfadd
,
2926 [0xa0] = gen_helper_pfcmpgt
,
2927 [0xa4] = gen_helper_pfmax
,
2928 [0xa6] = gen_helper_movq
, /* pfrcpit1; no need to actually increase precision */
2929 [0xa7] = gen_helper_movq
, /* pfrsqit1 */
2930 [0xaa] = gen_helper_pfsubr
,
2931 [0xae] = gen_helper_pfacc
,
2932 [0xb0] = gen_helper_pfcmpeq
,
2933 [0xb4] = gen_helper_pfmul
,
2934 [0xb6] = gen_helper_movq
, /* pfrcpit2 */
2935 [0xb7] = gen_helper_pmulhrw_mmx
,
2936 [0xbb] = gen_helper_pswapd
,
2937 [0xbf] = gen_helper_pavgb_mmx
/* pavgusb */
2940 struct SSEOpHelper_epp
{
2941 SSEFunc_0_epp op
[2];
2945 struct SSEOpHelper_eppi
{
2946 SSEFunc_0_eppi op
[2];
2950 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
2951 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
2952 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
2953 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
2954 #define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
2955 CPUID_EXT_PCLMULQDQ }
2956 #define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
2958 static const struct SSEOpHelper_epp sse_op_table6
[256] = {
2959 [0x00] = SSSE3_OP(pshufb
),
2960 [0x01] = SSSE3_OP(phaddw
),
2961 [0x02] = SSSE3_OP(phaddd
),
2962 [0x03] = SSSE3_OP(phaddsw
),
2963 [0x04] = SSSE3_OP(pmaddubsw
),
2964 [0x05] = SSSE3_OP(phsubw
),
2965 [0x06] = SSSE3_OP(phsubd
),
2966 [0x07] = SSSE3_OP(phsubsw
),
2967 [0x08] = SSSE3_OP(psignb
),
2968 [0x09] = SSSE3_OP(psignw
),
2969 [0x0a] = SSSE3_OP(psignd
),
2970 [0x0b] = SSSE3_OP(pmulhrsw
),
2971 [0x10] = SSE41_OP(pblendvb
),
2972 [0x14] = SSE41_OP(blendvps
),
2973 [0x15] = SSE41_OP(blendvpd
),
2974 [0x17] = SSE41_OP(ptest
),
2975 [0x1c] = SSSE3_OP(pabsb
),
2976 [0x1d] = SSSE3_OP(pabsw
),
2977 [0x1e] = SSSE3_OP(pabsd
),
2978 [0x20] = SSE41_OP(pmovsxbw
),
2979 [0x21] = SSE41_OP(pmovsxbd
),
2980 [0x22] = SSE41_OP(pmovsxbq
),
2981 [0x23] = SSE41_OP(pmovsxwd
),
2982 [0x24] = SSE41_OP(pmovsxwq
),
2983 [0x25] = SSE41_OP(pmovsxdq
),
2984 [0x28] = SSE41_OP(pmuldq
),
2985 [0x29] = SSE41_OP(pcmpeqq
),
2986 [0x2a] = SSE41_SPECIAL
, /* movntqda */
2987 [0x2b] = SSE41_OP(packusdw
),
2988 [0x30] = SSE41_OP(pmovzxbw
),
2989 [0x31] = SSE41_OP(pmovzxbd
),
2990 [0x32] = SSE41_OP(pmovzxbq
),
2991 [0x33] = SSE41_OP(pmovzxwd
),
2992 [0x34] = SSE41_OP(pmovzxwq
),
2993 [0x35] = SSE41_OP(pmovzxdq
),
2994 [0x37] = SSE42_OP(pcmpgtq
),
2995 [0x38] = SSE41_OP(pminsb
),
2996 [0x39] = SSE41_OP(pminsd
),
2997 [0x3a] = SSE41_OP(pminuw
),
2998 [0x3b] = SSE41_OP(pminud
),
2999 [0x3c] = SSE41_OP(pmaxsb
),
3000 [0x3d] = SSE41_OP(pmaxsd
),
3001 [0x3e] = SSE41_OP(pmaxuw
),
3002 [0x3f] = SSE41_OP(pmaxud
),
3003 [0x40] = SSE41_OP(pmulld
),
3004 [0x41] = SSE41_OP(phminposuw
),
3005 [0xdb] = AESNI_OP(aesimc
),
3006 [0xdc] = AESNI_OP(aesenc
),
3007 [0xdd] = AESNI_OP(aesenclast
),
3008 [0xde] = AESNI_OP(aesdec
),
3009 [0xdf] = AESNI_OP(aesdeclast
),
3012 static const struct SSEOpHelper_eppi sse_op_table7
[256] = {
3013 [0x08] = SSE41_OP(roundps
),
3014 [0x09] = SSE41_OP(roundpd
),
3015 [0x0a] = SSE41_OP(roundss
),
3016 [0x0b] = SSE41_OP(roundsd
),
3017 [0x0c] = SSE41_OP(blendps
),
3018 [0x0d] = SSE41_OP(blendpd
),
3019 [0x0e] = SSE41_OP(pblendw
),
3020 [0x0f] = SSSE3_OP(palignr
),
3021 [0x14] = SSE41_SPECIAL
, /* pextrb */
3022 [0x15] = SSE41_SPECIAL
, /* pextrw */
3023 [0x16] = SSE41_SPECIAL
, /* pextrd/pextrq */
3024 [0x17] = SSE41_SPECIAL
, /* extractps */
3025 [0x20] = SSE41_SPECIAL
, /* pinsrb */
3026 [0x21] = SSE41_SPECIAL
, /* insertps */
3027 [0x22] = SSE41_SPECIAL
, /* pinsrd/pinsrq */
3028 [0x40] = SSE41_OP(dpps
),
3029 [0x41] = SSE41_OP(dppd
),
3030 [0x42] = SSE41_OP(mpsadbw
),
3031 [0x44] = PCLMULQDQ_OP(pclmulqdq
),
3032 [0x60] = SSE42_OP(pcmpestrm
),
3033 [0x61] = SSE42_OP(pcmpestri
),
3034 [0x62] = SSE42_OP(pcmpistrm
),
3035 [0x63] = SSE42_OP(pcmpistri
),
3036 [0xdf] = AESNI_OP(aeskeygenassist
),
3039 static void gen_sse(CPUX86State
*env
, DisasContext
*s
, int b
,
3040 target_ulong pc_start
, int rex_r
)
3042 int b1
, op1_offset
, op2_offset
, is_xmm
, val
;
3043 int modrm
, mod
, rm
, reg
;
3044 SSEFunc_0_epp sse_fn_epp
;
3045 SSEFunc_0_eppi sse_fn_eppi
;
3046 SSEFunc_0_ppi sse_fn_ppi
;
3047 SSEFunc_0_eppt sse_fn_eppt
;
3051 if (s
->prefix
& PREFIX_DATA
)
3053 else if (s
->prefix
& PREFIX_REPZ
)
3055 else if (s
->prefix
& PREFIX_REPNZ
)
3059 sse_fn_epp
= sse_op_table1
[b
][b1
];
3063 if ((b
<= 0x5f && b
>= 0x10) || b
== 0xc6 || b
== 0xc2) {
3073 /* simple MMX/SSE operation */
3074 if (s
->flags
& HF_TS_MASK
) {
3075 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
3078 if (s
->flags
& HF_EM_MASK
) {
3080 gen_illegal_opcode(s
);
3084 && !(s
->flags
& HF_OSFXSR_MASK
)
3085 && ((b
!= 0x38 && b
!= 0x3a) || (s
->prefix
& PREFIX_DATA
))) {
3089 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
)) {
3090 /* If we were fully decoding this we might use illegal_op. */
3094 gen_helper_emms(cpu_env
);
3099 gen_helper_emms(cpu_env
);
3102 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3103 the static cpu state) */
3105 gen_helper_enter_mmx(cpu_env
);
3108 modrm
= x86_ldub_code(env
, s
);
3109 reg
= ((modrm
>> 3) & 7);
3112 mod
= (modrm
>> 6) & 3;
3113 if (sse_fn_epp
== SSE_SPECIAL
) {
3116 case 0x0e7: /* movntq */
3120 gen_lea_modrm(env
, s
, modrm
);
3121 gen_stq_env_A0(s
, offsetof(CPUX86State
, fpregs
[reg
].mmx
));
3123 case 0x1e7: /* movntdq */
3124 case 0x02b: /* movntps */
3125 case 0x12b: /* movntps */
3128 gen_lea_modrm(env
, s
, modrm
);
3129 gen_sto_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3131 case 0x3f0: /* lddqu */
3134 gen_lea_modrm(env
, s
, modrm
);
3135 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3137 case 0x22b: /* movntss */
3138 case 0x32b: /* movntsd */
3141 gen_lea_modrm(env
, s
, modrm
);
3143 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3144 xmm_regs
[reg
].ZMM_Q(0)));
3146 tcg_gen_ld32u_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
,
3147 xmm_regs
[reg
].ZMM_L(0)));
3148 gen_op_st_v(s
, MO_32
, s
->T0
, s
->A0
);
3151 case 0x6e: /* movd mm, ea */
3152 #ifdef TARGET_X86_64
3153 if (s
->dflag
== MO_64
) {
3154 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 0);
3155 tcg_gen_st_tl(s
->T0
, cpu_env
,
3156 offsetof(CPUX86State
, fpregs
[reg
].mmx
));
3160 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 0);
3161 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
,
3162 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3163 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3164 gen_helper_movl_mm_T0_mmx(s
->ptr0
, s
->tmp2_i32
);
3167 case 0x16e: /* movd xmm, ea */
3168 #ifdef TARGET_X86_64
3169 if (s
->dflag
== MO_64
) {
3170 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 0);
3171 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
,
3172 offsetof(CPUX86State
,xmm_regs
[reg
]));
3173 gen_helper_movq_mm_T0_xmm(s
->ptr0
, s
->T0
);
3177 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 0);
3178 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
,
3179 offsetof(CPUX86State
,xmm_regs
[reg
]));
3180 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3181 gen_helper_movl_mm_T0_xmm(s
->ptr0
, s
->tmp2_i32
);
3184 case 0x6f: /* movq mm, ea */
3186 gen_lea_modrm(env
, s
, modrm
);
3187 gen_ldq_env_A0(s
, offsetof(CPUX86State
, fpregs
[reg
].mmx
));
3190 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
,
3191 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3192 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
,
3193 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3196 case 0x010: /* movups */
3197 case 0x110: /* movupd */
3198 case 0x028: /* movaps */
3199 case 0x128: /* movapd */
3200 case 0x16f: /* movdqa xmm, ea */
3201 case 0x26f: /* movdqu xmm, ea */
3203 gen_lea_modrm(env
, s
, modrm
);
3204 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3206 rm
= (modrm
& 7) | REX_B(s
);
3207 gen_op_movo(s
, offsetof(CPUX86State
, xmm_regs
[reg
]),
3208 offsetof(CPUX86State
,xmm_regs
[rm
]));
3211 case 0x210: /* movss xmm, ea */
3213 gen_lea_modrm(env
, s
, modrm
);
3214 gen_op_ld_v(s
, MO_32
, s
->T0
, s
->A0
);
3215 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3216 offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(0)));
3217 tcg_gen_movi_tl(s
->T0
, 0);
3218 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3219 offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(1)));
3220 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3221 offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(2)));
3222 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3223 offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(3)));
3225 rm
= (modrm
& 7) | REX_B(s
);
3226 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(0)),
3227 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(0)));
3230 case 0x310: /* movsd xmm, ea */
3232 gen_lea_modrm(env
, s
, modrm
);
3233 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3234 xmm_regs
[reg
].ZMM_Q(0)));
3235 tcg_gen_movi_tl(s
->T0
, 0);
3236 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3237 offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(2)));
3238 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3239 offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(3)));
3241 rm
= (modrm
& 7) | REX_B(s
);
3242 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(0)),
3243 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3246 case 0x012: /* movlps */
3247 case 0x112: /* movlpd */
3249 gen_lea_modrm(env
, s
, modrm
);
3250 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3251 xmm_regs
[reg
].ZMM_Q(0)));
3254 rm
= (modrm
& 7) | REX_B(s
);
3255 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(0)),
3256 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(1)));
3259 case 0x212: /* movsldup */
3261 gen_lea_modrm(env
, s
, modrm
);
3262 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3264 rm
= (modrm
& 7) | REX_B(s
);
3265 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(0)),
3266 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(0)));
3267 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(2)),
3268 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(2)));
3270 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(1)),
3271 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3272 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(3)),
3273 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(2)));
3275 case 0x312: /* movddup */
3277 gen_lea_modrm(env
, s
, modrm
);
3278 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3279 xmm_regs
[reg
].ZMM_Q(0)));
3281 rm
= (modrm
& 7) | REX_B(s
);
3282 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(0)),
3283 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3285 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(1)),
3286 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3288 case 0x016: /* movhps */
3289 case 0x116: /* movhpd */
3291 gen_lea_modrm(env
, s
, modrm
);
3292 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3293 xmm_regs
[reg
].ZMM_Q(1)));
3296 rm
= (modrm
& 7) | REX_B(s
);
3297 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(1)),
3298 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3301 case 0x216: /* movshdup */
3303 gen_lea_modrm(env
, s
, modrm
);
3304 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3306 rm
= (modrm
& 7) | REX_B(s
);
3307 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(1)),
3308 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(1)));
3309 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(3)),
3310 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(3)));
3312 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(0)),
3313 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(1)));
3314 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(2)),
3315 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(3)));
3320 int bit_index
, field_length
;
3322 if (b1
== 1 && reg
!= 0)
3324 field_length
= x86_ldub_code(env
, s
) & 0x3F;
3325 bit_index
= x86_ldub_code(env
, s
) & 0x3F;
3326 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
,
3327 offsetof(CPUX86State
,xmm_regs
[reg
]));
3329 gen_helper_extrq_i(cpu_env
, s
->ptr0
,
3330 tcg_const_i32(bit_index
),
3331 tcg_const_i32(field_length
));
3333 gen_helper_insertq_i(cpu_env
, s
->ptr0
,
3334 tcg_const_i32(bit_index
),
3335 tcg_const_i32(field_length
));
3338 case 0x7e: /* movd ea, mm */
3339 #ifdef TARGET_X86_64
3340 if (s
->dflag
== MO_64
) {
3341 tcg_gen_ld_i64(s
->T0
, cpu_env
,
3342 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3343 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 1);
3347 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
3348 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_L(0)));
3349 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 1);
3352 case 0x17e: /* movd ea, xmm */
3353 #ifdef TARGET_X86_64
3354 if (s
->dflag
== MO_64
) {
3355 tcg_gen_ld_i64(s
->T0
, cpu_env
,
3356 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3357 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 1);
3361 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
3362 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3363 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 1);
3366 case 0x27e: /* movq xmm, ea */
3368 gen_lea_modrm(env
, s
, modrm
);
3369 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3370 xmm_regs
[reg
].ZMM_Q(0)));
3372 rm
= (modrm
& 7) | REX_B(s
);
3373 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(0)),
3374 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3376 gen_op_movq_env_0(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(1)));
3378 case 0x7f: /* movq ea, mm */
3380 gen_lea_modrm(env
, s
, modrm
);
3381 gen_stq_env_A0(s
, offsetof(CPUX86State
, fpregs
[reg
].mmx
));
3384 gen_op_movq(s
, offsetof(CPUX86State
, fpregs
[rm
].mmx
),
3385 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3388 case 0x011: /* movups */
3389 case 0x111: /* movupd */
3390 case 0x029: /* movaps */
3391 case 0x129: /* movapd */
3392 case 0x17f: /* movdqa ea, xmm */
3393 case 0x27f: /* movdqu ea, xmm */
3395 gen_lea_modrm(env
, s
, modrm
);
3396 gen_sto_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3398 rm
= (modrm
& 7) | REX_B(s
);
3399 gen_op_movo(s
, offsetof(CPUX86State
, xmm_regs
[rm
]),
3400 offsetof(CPUX86State
,xmm_regs
[reg
]));
3403 case 0x211: /* movss ea, xmm */
3405 gen_lea_modrm(env
, s
, modrm
);
3406 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
3407 offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(0)));
3408 gen_op_st_v(s
, MO_32
, s
->T0
, s
->A0
);
3410 rm
= (modrm
& 7) | REX_B(s
);
3411 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[rm
].ZMM_L(0)),
3412 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3415 case 0x311: /* movsd ea, xmm */
3417 gen_lea_modrm(env
, s
, modrm
);
3418 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3419 xmm_regs
[reg
].ZMM_Q(0)));
3421 rm
= (modrm
& 7) | REX_B(s
);
3422 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[rm
].ZMM_Q(0)),
3423 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3426 case 0x013: /* movlps */
3427 case 0x113: /* movlpd */
3429 gen_lea_modrm(env
, s
, modrm
);
3430 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3431 xmm_regs
[reg
].ZMM_Q(0)));
3436 case 0x017: /* movhps */
3437 case 0x117: /* movhpd */
3439 gen_lea_modrm(env
, s
, modrm
);
3440 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3441 xmm_regs
[reg
].ZMM_Q(1)));
3446 case 0x71: /* shift mm, im */
3449 case 0x171: /* shift xmm, im */
3455 val
= x86_ldub_code(env
, s
);
3457 tcg_gen_movi_tl(s
->T0
, val
);
3458 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3459 offsetof(CPUX86State
, xmm_t0
.ZMM_L(0)));
3460 tcg_gen_movi_tl(s
->T0
, 0);
3461 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3462 offsetof(CPUX86State
, xmm_t0
.ZMM_L(1)));
3463 op1_offset
= offsetof(CPUX86State
,xmm_t0
);
3465 tcg_gen_movi_tl(s
->T0
, val
);
3466 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3467 offsetof(CPUX86State
, mmx_t0
.MMX_L(0)));
3468 tcg_gen_movi_tl(s
->T0
, 0);
3469 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3470 offsetof(CPUX86State
, mmx_t0
.MMX_L(1)));
3471 op1_offset
= offsetof(CPUX86State
,mmx_t0
);
3473 sse_fn_epp
= sse_op_table2
[((b
- 1) & 3) * 8 +
3474 (((modrm
>> 3)) & 7)][b1
];
3479 rm
= (modrm
& 7) | REX_B(s
);
3480 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3483 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3485 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op2_offset
);
3486 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op1_offset
);
3487 sse_fn_epp(cpu_env
, s
->ptr0
, s
->ptr1
);
3489 case 0x050: /* movmskps */
3490 rm
= (modrm
& 7) | REX_B(s
);
3491 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
,
3492 offsetof(CPUX86State
,xmm_regs
[rm
]));
3493 gen_helper_movmskps(s
->tmp2_i32
, cpu_env
, s
->ptr0
);
3494 tcg_gen_extu_i32_tl(cpu_regs
[reg
], s
->tmp2_i32
);
3496 case 0x150: /* movmskpd */
3497 rm
= (modrm
& 7) | REX_B(s
);
3498 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
,
3499 offsetof(CPUX86State
,xmm_regs
[rm
]));
3500 gen_helper_movmskpd(s
->tmp2_i32
, cpu_env
, s
->ptr0
);
3501 tcg_gen_extu_i32_tl(cpu_regs
[reg
], s
->tmp2_i32
);
3503 case 0x02a: /* cvtpi2ps */
3504 case 0x12a: /* cvtpi2pd */
3505 gen_helper_enter_mmx(cpu_env
);
3507 gen_lea_modrm(env
, s
, modrm
);
3508 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3509 gen_ldq_env_A0(s
, op2_offset
);
3512 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3514 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3515 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
3516 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
3519 gen_helper_cvtpi2ps(cpu_env
, s
->ptr0
, s
->ptr1
);
3523 gen_helper_cvtpi2pd(cpu_env
, s
->ptr0
, s
->ptr1
);
3527 case 0x22a: /* cvtsi2ss */
3528 case 0x32a: /* cvtsi2sd */
3529 ot
= mo_64_32(s
->dflag
);
3530 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3531 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3532 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
3534 SSEFunc_0_epi sse_fn_epi
= sse_op_table3ai
[(b
>> 8) & 1];
3535 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3536 sse_fn_epi(cpu_env
, s
->ptr0
, s
->tmp2_i32
);
3538 #ifdef TARGET_X86_64
3539 SSEFunc_0_epl sse_fn_epl
= sse_op_table3aq
[(b
>> 8) & 1];
3540 sse_fn_epl(cpu_env
, s
->ptr0
, s
->T0
);
3546 case 0x02c: /* cvttps2pi */
3547 case 0x12c: /* cvttpd2pi */
3548 case 0x02d: /* cvtps2pi */
3549 case 0x12d: /* cvtpd2pi */
3550 gen_helper_enter_mmx(cpu_env
);
3552 gen_lea_modrm(env
, s
, modrm
);
3553 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3554 gen_ldo_env_A0(s
, op2_offset
);
3556 rm
= (modrm
& 7) | REX_B(s
);
3557 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3559 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
);
3560 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
3561 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
3564 gen_helper_cvttps2pi(cpu_env
, s
->ptr0
, s
->ptr1
);
3567 gen_helper_cvttpd2pi(cpu_env
, s
->ptr0
, s
->ptr1
);
3570 gen_helper_cvtps2pi(cpu_env
, s
->ptr0
, s
->ptr1
);
3573 gen_helper_cvtpd2pi(cpu_env
, s
->ptr0
, s
->ptr1
);
3577 case 0x22c: /* cvttss2si */
3578 case 0x32c: /* cvttsd2si */
3579 case 0x22d: /* cvtss2si */
3580 case 0x32d: /* cvtsd2si */
3581 ot
= mo_64_32(s
->dflag
);
3583 gen_lea_modrm(env
, s
, modrm
);
3585 gen_ldq_env_A0(s
, offsetof(CPUX86State
, xmm_t0
.ZMM_Q(0)));
3587 gen_op_ld_v(s
, MO_32
, s
->T0
, s
->A0
);
3588 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3589 offsetof(CPUX86State
, xmm_t0
.ZMM_L(0)));
3591 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3593 rm
= (modrm
& 7) | REX_B(s
);
3594 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3596 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op2_offset
);
3598 SSEFunc_i_ep sse_fn_i_ep
=
3599 sse_op_table3bi
[((b
>> 7) & 2) | (b
& 1)];
3600 sse_fn_i_ep(s
->tmp2_i32
, cpu_env
, s
->ptr0
);
3601 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
3603 #ifdef TARGET_X86_64
3604 SSEFunc_l_ep sse_fn_l_ep
=
3605 sse_op_table3bq
[((b
>> 7) & 2) | (b
& 1)];
3606 sse_fn_l_ep(s
->T0
, cpu_env
, s
->ptr0
);
3611 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3613 case 0xc4: /* pinsrw */
3616 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
3617 val
= x86_ldub_code(env
, s
);
3620 tcg_gen_st16_tl(s
->T0
, cpu_env
,
3621 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_W(val
)));
3624 tcg_gen_st16_tl(s
->T0
, cpu_env
,
3625 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_W(val
)));
3628 case 0xc5: /* pextrw */
3632 ot
= mo_64_32(s
->dflag
);
3633 val
= x86_ldub_code(env
, s
);
3636 rm
= (modrm
& 7) | REX_B(s
);
3637 tcg_gen_ld16u_tl(s
->T0
, cpu_env
,
3638 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_W(val
)));
3642 tcg_gen_ld16u_tl(s
->T0
, cpu_env
,
3643 offsetof(CPUX86State
,fpregs
[rm
].mmx
.MMX_W(val
)));
3645 reg
= ((modrm
>> 3) & 7) | rex_r
;
3646 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3648 case 0x1d6: /* movq ea, xmm */
3650 gen_lea_modrm(env
, s
, modrm
);
3651 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3652 xmm_regs
[reg
].ZMM_Q(0)));
3654 rm
= (modrm
& 7) | REX_B(s
);
3655 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[rm
].ZMM_Q(0)),
3656 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3657 gen_op_movq_env_0(s
,
3658 offsetof(CPUX86State
, xmm_regs
[rm
].ZMM_Q(1)));
3661 case 0x2d6: /* movq2dq */
3662 gen_helper_enter_mmx(cpu_env
);
3664 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(0)),
3665 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3666 gen_op_movq_env_0(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(1)));
3668 case 0x3d6: /* movdq2q */
3669 gen_helper_enter_mmx(cpu_env
);
3670 rm
= (modrm
& 7) | REX_B(s
);
3671 gen_op_movq(s
, offsetof(CPUX86State
, fpregs
[reg
& 7].mmx
),
3672 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3674 case 0xd7: /* pmovmskb */
3679 rm
= (modrm
& 7) | REX_B(s
);
3680 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
,
3681 offsetof(CPUX86State
, xmm_regs
[rm
]));
3682 gen_helper_pmovmskb_xmm(s
->tmp2_i32
, cpu_env
, s
->ptr0
);
3685 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
,
3686 offsetof(CPUX86State
, fpregs
[rm
].mmx
));
3687 gen_helper_pmovmskb_mmx(s
->tmp2_i32
, cpu_env
, s
->ptr0
);
3689 reg
= ((modrm
>> 3) & 7) | rex_r
;
3690 tcg_gen_extu_i32_tl(cpu_regs
[reg
], s
->tmp2_i32
);
3696 if ((b
& 0xf0) == 0xf0) {
3699 modrm
= x86_ldub_code(env
, s
);
3701 reg
= ((modrm
>> 3) & 7) | rex_r
;
3702 mod
= (modrm
>> 6) & 3;
3707 sse_fn_epp
= sse_op_table6
[b
].op
[b1
];
3711 if (!(s
->cpuid_ext_features
& sse_op_table6
[b
].ext_mask
))
3715 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3717 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
3719 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3720 gen_lea_modrm(env
, s
, modrm
);
3722 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3723 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3724 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3725 gen_ldq_env_A0(s
, op2_offset
+
3726 offsetof(ZMMReg
, ZMM_Q(0)));
3728 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3729 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3730 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
3731 s
->mem_index
, MO_LEUL
);
3732 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
, op2_offset
+
3733 offsetof(ZMMReg
, ZMM_L(0)));
3735 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3736 tcg_gen_qemu_ld_tl(s
->tmp0
, s
->A0
,
3737 s
->mem_index
, MO_LEUW
);
3738 tcg_gen_st16_tl(s
->tmp0
, cpu_env
, op2_offset
+
3739 offsetof(ZMMReg
, ZMM_W(0)));
3741 case 0x2a: /* movntqda */
3742 gen_ldo_env_A0(s
, op1_offset
);
3745 gen_ldo_env_A0(s
, op2_offset
);
3749 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
3751 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3753 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3754 gen_lea_modrm(env
, s
, modrm
);
3755 gen_ldq_env_A0(s
, op2_offset
);
3758 if (sse_fn_epp
== SSE_SPECIAL
) {
3762 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
3763 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
3764 sse_fn_epp(cpu_env
, s
->ptr0
, s
->ptr1
);
3767 set_cc_op(s
, CC_OP_EFLAGS
);
3774 /* Various integer extensions at 0f 38 f[0-f]. */
3775 b
= modrm
| (b1
<< 8);
3776 modrm
= x86_ldub_code(env
, s
);
3777 reg
= ((modrm
>> 3) & 7) | rex_r
;
3780 case 0x3f0: /* crc32 Gd,Eb */
3781 case 0x3f1: /* crc32 Gd,Ey */
3783 if (!(s
->cpuid_ext_features
& CPUID_EXT_SSE42
)) {
3786 if ((b
& 0xff) == 0xf0) {
3788 } else if (s
->dflag
!= MO_64
) {
3789 ot
= (s
->prefix
& PREFIX_DATA
? MO_16
: MO_32
);
3794 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[reg
]);
3795 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3796 gen_helper_crc32(s
->T0
, s
->tmp2_i32
,
3797 s
->T0
, tcg_const_i32(8 << ot
));
3799 ot
= mo_64_32(s
->dflag
);
3800 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3803 case 0x1f0: /* crc32 or movbe */
3805 /* For these insns, the f3 prefix is supposed to have priority
3806 over the 66 prefix, but that's not what we implement above
3808 if (s
->prefix
& PREFIX_REPNZ
) {
3812 case 0x0f0: /* movbe Gy,My */
3813 case 0x0f1: /* movbe My,Gy */
3814 if (!(s
->cpuid_ext_features
& CPUID_EXT_MOVBE
)) {
3817 if (s
->dflag
!= MO_64
) {
3818 ot
= (s
->prefix
& PREFIX_DATA
? MO_16
: MO_32
);
3823 gen_lea_modrm(env
, s
, modrm
);
3825 tcg_gen_qemu_ld_tl(s
->T0
, s
->A0
,
3826 s
->mem_index
, ot
| MO_BE
);
3827 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3829 tcg_gen_qemu_st_tl(cpu_regs
[reg
], s
->A0
,
3830 s
->mem_index
, ot
| MO_BE
);
3834 case 0x0f2: /* andn Gy, By, Ey */
3835 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
3836 || !(s
->prefix
& PREFIX_VEX
)
3840 ot
= mo_64_32(s
->dflag
);
3841 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3842 tcg_gen_andc_tl(s
->T0
, s
->T0
, cpu_regs
[s
->vex_v
]);
3843 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3844 gen_op_update1_cc(s
);
3845 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3848 case 0x0f7: /* bextr Gy, Ey, By */
3849 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
3850 || !(s
->prefix
& PREFIX_VEX
)
3854 ot
= mo_64_32(s
->dflag
);
3858 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3859 /* Extract START, and shift the operand.
3860 Shifts larger than operand size get zeros. */
3861 tcg_gen_ext8u_tl(s
->A0
, cpu_regs
[s
->vex_v
]);
3862 tcg_gen_shr_tl(s
->T0
, s
->T0
, s
->A0
);
3864 bound
= tcg_const_tl(ot
== MO_64
? 63 : 31);
3865 zero
= tcg_const_tl(0);
3866 tcg_gen_movcond_tl(TCG_COND_LEU
, s
->T0
, s
->A0
, bound
,
3868 tcg_temp_free(zero
);
3870 /* Extract the LEN into a mask. Lengths larger than
3871 operand size get all ones. */
3872 tcg_gen_extract_tl(s
->A0
, cpu_regs
[s
->vex_v
], 8, 8);
3873 tcg_gen_movcond_tl(TCG_COND_LEU
, s
->A0
, s
->A0
, bound
,
3875 tcg_temp_free(bound
);
3876 tcg_gen_movi_tl(s
->T1
, 1);
3877 tcg_gen_shl_tl(s
->T1
, s
->T1
, s
->A0
);
3878 tcg_gen_subi_tl(s
->T1
, s
->T1
, 1);
3879 tcg_gen_and_tl(s
->T0
, s
->T0
, s
->T1
);
3881 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3882 gen_op_update1_cc(s
);
3883 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3887 case 0x0f5: /* bzhi Gy, Ey, By */
3888 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3889 || !(s
->prefix
& PREFIX_VEX
)
3893 ot
= mo_64_32(s
->dflag
);
3894 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3895 tcg_gen_ext8u_tl(s
->T1
, cpu_regs
[s
->vex_v
]);
3897 TCGv bound
= tcg_const_tl(ot
== MO_64
? 63 : 31);
3898 /* Note that since we're using BMILG (in order to get O
3899 cleared) we need to store the inverse into C. */
3900 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_cc_src
,
3902 tcg_gen_movcond_tl(TCG_COND_GT
, s
->T1
, s
->T1
,
3903 bound
, bound
, s
->T1
);
3904 tcg_temp_free(bound
);
3906 tcg_gen_movi_tl(s
->A0
, -1);
3907 tcg_gen_shl_tl(s
->A0
, s
->A0
, s
->T1
);
3908 tcg_gen_andc_tl(s
->T0
, s
->T0
, s
->A0
);
3909 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3910 gen_op_update1_cc(s
);
3911 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
3914 case 0x3f6: /* mulx By, Gy, rdx, Ey */
3915 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3916 || !(s
->prefix
& PREFIX_VEX
)
3920 ot
= mo_64_32(s
->dflag
);
3921 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3924 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3925 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, cpu_regs
[R_EDX
]);
3926 tcg_gen_mulu2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
3927 s
->tmp2_i32
, s
->tmp3_i32
);
3928 tcg_gen_extu_i32_tl(cpu_regs
[s
->vex_v
], s
->tmp2_i32
);
3929 tcg_gen_extu_i32_tl(cpu_regs
[reg
], s
->tmp3_i32
);
3931 #ifdef TARGET_X86_64
3933 tcg_gen_mulu2_i64(s
->T0
, s
->T1
,
3934 s
->T0
, cpu_regs
[R_EDX
]);
3935 tcg_gen_mov_i64(cpu_regs
[s
->vex_v
], s
->T0
);
3936 tcg_gen_mov_i64(cpu_regs
[reg
], s
->T1
);
3942 case 0x3f5: /* pdep Gy, By, Ey */
3943 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3944 || !(s
->prefix
& PREFIX_VEX
)
3948 ot
= mo_64_32(s
->dflag
);
3949 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3950 /* Note that by zero-extending the mask operand, we
3951 automatically handle zero-extending the result. */
3953 tcg_gen_mov_tl(s
->T1
, cpu_regs
[s
->vex_v
]);
3955 tcg_gen_ext32u_tl(s
->T1
, cpu_regs
[s
->vex_v
]);
3957 gen_helper_pdep(cpu_regs
[reg
], s
->T0
, s
->T1
);
3960 case 0x2f5: /* pext Gy, By, Ey */
3961 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3962 || !(s
->prefix
& PREFIX_VEX
)
3966 ot
= mo_64_32(s
->dflag
);
3967 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3968 /* Note that by zero-extending the mask operand, we
3969 automatically handle zero-extending the result. */
3971 tcg_gen_mov_tl(s
->T1
, cpu_regs
[s
->vex_v
]);
3973 tcg_gen_ext32u_tl(s
->T1
, cpu_regs
[s
->vex_v
]);
3975 gen_helper_pext(cpu_regs
[reg
], s
->T0
, s
->T1
);
3978 case 0x1f6: /* adcx Gy, Ey */
3979 case 0x2f6: /* adox Gy, Ey */
3980 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_ADX
)) {
3983 TCGv carry_in
, carry_out
, zero
;
3986 ot
= mo_64_32(s
->dflag
);
3987 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3989 /* Re-use the carry-out from a previous round. */
3991 carry_out
= (b
== 0x1f6 ? cpu_cc_dst
: cpu_cc_src2
);
3995 carry_in
= cpu_cc_dst
;
3996 end_op
= CC_OP_ADCX
;
3998 end_op
= CC_OP_ADCOX
;
4003 end_op
= CC_OP_ADCOX
;
4005 carry_in
= cpu_cc_src2
;
4006 end_op
= CC_OP_ADOX
;
4010 end_op
= CC_OP_ADCOX
;
4011 carry_in
= carry_out
;
4014 end_op
= (b
== 0x1f6 ? CC_OP_ADCX
: CC_OP_ADOX
);
4017 /* If we can't reuse carry-out, get it out of EFLAGS. */
4019 if (s
->cc_op
!= CC_OP_ADCX
&& s
->cc_op
!= CC_OP_ADOX
) {
4020 gen_compute_eflags(s
);
4023 tcg_gen_extract_tl(carry_in
, cpu_cc_src
,
4024 ctz32(b
== 0x1f6 ? CC_C
: CC_O
), 1);
4028 #ifdef TARGET_X86_64
4030 /* If we know TL is 64-bit, and we want a 32-bit
4031 result, just do everything in 64-bit arithmetic. */
4032 tcg_gen_ext32u_i64(cpu_regs
[reg
], cpu_regs
[reg
]);
4033 tcg_gen_ext32u_i64(s
->T0
, s
->T0
);
4034 tcg_gen_add_i64(s
->T0
, s
->T0
, cpu_regs
[reg
]);
4035 tcg_gen_add_i64(s
->T0
, s
->T0
, carry_in
);
4036 tcg_gen_ext32u_i64(cpu_regs
[reg
], s
->T0
);
4037 tcg_gen_shri_i64(carry_out
, s
->T0
, 32);
4041 /* Otherwise compute the carry-out in two steps. */
4042 zero
= tcg_const_tl(0);
4043 tcg_gen_add2_tl(s
->T0
, carry_out
,
4046 tcg_gen_add2_tl(cpu_regs
[reg
], carry_out
,
4047 cpu_regs
[reg
], carry_out
,
4049 tcg_temp_free(zero
);
4052 set_cc_op(s
, end_op
);
4056 case 0x1f7: /* shlx Gy, Ey, By */
4057 case 0x2f7: /* sarx Gy, Ey, By */
4058 case 0x3f7: /* shrx Gy, Ey, By */
4059 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4060 || !(s
->prefix
& PREFIX_VEX
)
4064 ot
= mo_64_32(s
->dflag
);
4065 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4067 tcg_gen_andi_tl(s
->T1
, cpu_regs
[s
->vex_v
], 63);
4069 tcg_gen_andi_tl(s
->T1
, cpu_regs
[s
->vex_v
], 31);
4072 tcg_gen_shl_tl(s
->T0
, s
->T0
, s
->T1
);
4073 } else if (b
== 0x2f7) {
4075 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
4077 tcg_gen_sar_tl(s
->T0
, s
->T0
, s
->T1
);
4080 tcg_gen_ext32u_tl(s
->T0
, s
->T0
);
4082 tcg_gen_shr_tl(s
->T0
, s
->T0
, s
->T1
);
4084 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4090 case 0x3f3: /* Group 17 */
4091 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
4092 || !(s
->prefix
& PREFIX_VEX
)
4096 ot
= mo_64_32(s
->dflag
);
4097 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4099 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
4101 case 1: /* blsr By,Ey */
4102 tcg_gen_subi_tl(s
->T1
, s
->T0
, 1);
4103 tcg_gen_and_tl(s
->T0
, s
->T0
, s
->T1
);
4105 case 2: /* blsmsk By,Ey */
4106 tcg_gen_subi_tl(s
->T1
, s
->T0
, 1);
4107 tcg_gen_xor_tl(s
->T0
, s
->T0
, s
->T1
);
4109 case 3: /* blsi By, Ey */
4110 tcg_gen_neg_tl(s
->T1
, s
->T0
);
4111 tcg_gen_and_tl(s
->T0
, s
->T0
, s
->T1
);
4116 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
4117 gen_op_mov_reg_v(s
, ot
, s
->vex_v
, s
->T0
);
4118 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
4129 modrm
= x86_ldub_code(env
, s
);
4131 reg
= ((modrm
>> 3) & 7) | rex_r
;
4132 mod
= (modrm
>> 6) & 3;
4137 sse_fn_eppi
= sse_op_table7
[b
].op
[b1
];
4141 if (!(s
->cpuid_ext_features
& sse_op_table7
[b
].ext_mask
))
4146 if (sse_fn_eppi
== SSE_SPECIAL
) {
4147 ot
= mo_64_32(s
->dflag
);
4148 rm
= (modrm
& 7) | REX_B(s
);
4150 gen_lea_modrm(env
, s
, modrm
);
4151 reg
= ((modrm
>> 3) & 7) | rex_r
;
4152 val
= x86_ldub_code(env
, s
);
4154 case 0x14: /* pextrb */
4155 tcg_gen_ld8u_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
,
4156 xmm_regs
[reg
].ZMM_B(val
& 15)));
4158 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4160 tcg_gen_qemu_st_tl(s
->T0
, s
->A0
,
4161 s
->mem_index
, MO_UB
);
4164 case 0x15: /* pextrw */
4165 tcg_gen_ld16u_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
,
4166 xmm_regs
[reg
].ZMM_W(val
& 7)));
4168 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4170 tcg_gen_qemu_st_tl(s
->T0
, s
->A0
,
4171 s
->mem_index
, MO_LEUW
);
4175 if (ot
== MO_32
) { /* pextrd */
4176 tcg_gen_ld_i32(s
->tmp2_i32
, cpu_env
,
4177 offsetof(CPUX86State
,
4178 xmm_regs
[reg
].ZMM_L(val
& 3)));
4180 tcg_gen_extu_i32_tl(cpu_regs
[rm
], s
->tmp2_i32
);
4182 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4183 s
->mem_index
, MO_LEUL
);
4185 } else { /* pextrq */
4186 #ifdef TARGET_X86_64
4187 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
,
4188 offsetof(CPUX86State
,
4189 xmm_regs
[reg
].ZMM_Q(val
& 1)));
4191 tcg_gen_mov_i64(cpu_regs
[rm
], s
->tmp1_i64
);
4193 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
4194 s
->mem_index
, MO_LEQ
);
4201 case 0x17: /* extractps */
4202 tcg_gen_ld32u_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
,
4203 xmm_regs
[reg
].ZMM_L(val
& 3)));
4205 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4207 tcg_gen_qemu_st_tl(s
->T0
, s
->A0
,
4208 s
->mem_index
, MO_LEUL
);
4211 case 0x20: /* pinsrb */
4213 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, rm
);
4215 tcg_gen_qemu_ld_tl(s
->T0
, s
->A0
,
4216 s
->mem_index
, MO_UB
);
4218 tcg_gen_st8_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
,
4219 xmm_regs
[reg
].ZMM_B(val
& 15)));
4221 case 0x21: /* insertps */
4223 tcg_gen_ld_i32(s
->tmp2_i32
, cpu_env
,
4224 offsetof(CPUX86State
,xmm_regs
[rm
]
4225 .ZMM_L((val
>> 6) & 3)));
4227 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4228 s
->mem_index
, MO_LEUL
);
4230 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
,
4231 offsetof(CPUX86State
,xmm_regs
[reg
]
4232 .ZMM_L((val
>> 4) & 3)));
4234 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4235 cpu_env
, offsetof(CPUX86State
,
4236 xmm_regs
[reg
].ZMM_L(0)));
4238 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4239 cpu_env
, offsetof(CPUX86State
,
4240 xmm_regs
[reg
].ZMM_L(1)));
4242 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4243 cpu_env
, offsetof(CPUX86State
,
4244 xmm_regs
[reg
].ZMM_L(2)));
4246 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4247 cpu_env
, offsetof(CPUX86State
,
4248 xmm_regs
[reg
].ZMM_L(3)));
4251 if (ot
== MO_32
) { /* pinsrd */
4253 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[rm
]);
4255 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4256 s
->mem_index
, MO_LEUL
);
4258 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
,
4259 offsetof(CPUX86State
,
4260 xmm_regs
[reg
].ZMM_L(val
& 3)));
4261 } else { /* pinsrq */
4262 #ifdef TARGET_X86_64
4264 gen_op_mov_v_reg(s
, ot
, s
->tmp1_i64
, rm
);
4266 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
4267 s
->mem_index
, MO_LEQ
);
4269 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
,
4270 offsetof(CPUX86State
,
4271 xmm_regs
[reg
].ZMM_Q(val
& 1)));
4282 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4284 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
4286 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4287 gen_lea_modrm(env
, s
, modrm
);
4288 gen_ldo_env_A0(s
, op2_offset
);
4291 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4293 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4295 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4296 gen_lea_modrm(env
, s
, modrm
);
4297 gen_ldq_env_A0(s
, op2_offset
);
4300 val
= x86_ldub_code(env
, s
);
4302 if ((b
& 0xfc) == 0x60) { /* pcmpXstrX */
4303 set_cc_op(s
, CC_OP_EFLAGS
);
4305 if (s
->dflag
== MO_64
) {
4306 /* The helper must use entire 64-bit gp registers */
4311 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
4312 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
4313 sse_fn_eppi(cpu_env
, s
->ptr0
, s
->ptr1
, tcg_const_i32(val
));
4317 /* Various integer extensions at 0f 3a f[0-f]. */
4318 b
= modrm
| (b1
<< 8);
4319 modrm
= x86_ldub_code(env
, s
);
4320 reg
= ((modrm
>> 3) & 7) | rex_r
;
4323 case 0x3f0: /* rorx Gy,Ey, Ib */
4324 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4325 || !(s
->prefix
& PREFIX_VEX
)
4329 ot
= mo_64_32(s
->dflag
);
4330 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4331 b
= x86_ldub_code(env
, s
);
4333 tcg_gen_rotri_tl(s
->T0
, s
->T0
, b
& 63);
4335 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
4336 tcg_gen_rotri_i32(s
->tmp2_i32
, s
->tmp2_i32
, b
& 31);
4337 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
4339 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4349 gen_unknown_opcode(env
, s
);
4353 /* generic MMX or SSE operation */
4355 case 0x70: /* pshufx insn */
4356 case 0xc6: /* pshufx insn */
4357 case 0xc2: /* compare insns */
4364 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4368 gen_lea_modrm(env
, s
, modrm
);
4369 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4375 /* Most sse scalar operations. */
4378 } else if (b1
== 3) {
4383 case 0x2e: /* ucomis[sd] */
4384 case 0x2f: /* comis[sd] */
4396 gen_op_ld_v(s
, MO_32
, s
->T0
, s
->A0
);
4397 tcg_gen_st32_tl(s
->T0
, cpu_env
,
4398 offsetof(CPUX86State
,xmm_t0
.ZMM_L(0)));
4402 gen_ldq_env_A0(s
, offsetof(CPUX86State
, xmm_t0
.ZMM_D(0)));
4405 /* 128 bit access */
4406 gen_ldo_env_A0(s
, op2_offset
);
4410 rm
= (modrm
& 7) | REX_B(s
);
4411 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
4414 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4416 gen_lea_modrm(env
, s
, modrm
);
4417 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4418 gen_ldq_env_A0(s
, op2_offset
);
4421 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4425 case 0x0f: /* 3DNow! data insns */
4426 val
= x86_ldub_code(env
, s
);
4427 sse_fn_epp
= sse_op_table5
[val
];
4431 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
)) {
4434 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
4435 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
4436 sse_fn_epp(cpu_env
, s
->ptr0
, s
->ptr1
);
4438 case 0x70: /* pshufx insn */
4439 case 0xc6: /* pshufx insn */
4440 val
= x86_ldub_code(env
, s
);
4441 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
4442 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
4443 /* XXX: introduce a new table? */
4444 sse_fn_ppi
= (SSEFunc_0_ppi
)sse_fn_epp
;
4445 sse_fn_ppi(s
->ptr0
, s
->ptr1
, tcg_const_i32(val
));
4449 val
= x86_ldub_code(env
, s
);
4452 sse_fn_epp
= sse_op_table4
[val
][b1
];
4454 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
4455 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
4456 sse_fn_epp(cpu_env
, s
->ptr0
, s
->ptr1
);
4459 /* maskmov : we must prepare A0 */
4462 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EDI
]);
4463 gen_extu(s
->aflag
, s
->A0
);
4464 gen_add_A0_ds_seg(s
);
4466 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
4467 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
4468 /* XXX: introduce a new table? */
4469 sse_fn_eppt
= (SSEFunc_0_eppt
)sse_fn_epp
;
4470 sse_fn_eppt(cpu_env
, s
->ptr0
, s
->ptr1
, s
->A0
);
4473 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
4474 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
4475 sse_fn_epp(cpu_env
, s
->ptr0
, s
->ptr1
);
4478 if (b
== 0x2e || b
== 0x2f) {
4479 set_cc_op(s
, CC_OP_EFLAGS
);
4484 /* convert one instruction. s->base.is_jmp is set if the translation must
4485 be stopped. Return the next pc value */
4486 static target_ulong
disas_insn(DisasContext
*s
, CPUState
*cpu
)
4488 CPUX86State
*env
= cpu
->env_ptr
;
4491 TCGMemOp ot
, aflag
, dflag
;
4492 int modrm
, reg
, rm
, mod
, op
, opreg
, val
;
4493 target_ulong next_eip
, tval
;
4495 target_ulong pc_start
= s
->base
.pc_next
;
4497 s
->pc_start
= s
->pc
= pc_start
;
4499 #ifdef TARGET_X86_64
4502 s
->x86_64_hregs
= false;
4504 s
->rip_offset
= 0; /* for relative ip address */
4507 if (sigsetjmp(s
->jmpbuf
, 0) != 0) {
4508 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
4517 b
= x86_ldub_code(env
, s
);
4518 /* Collect prefixes. */
4521 prefixes
|= PREFIX_REPZ
;
4524 prefixes
|= PREFIX_REPNZ
;
4527 prefixes
|= PREFIX_LOCK
;
4548 prefixes
|= PREFIX_DATA
;
4551 prefixes
|= PREFIX_ADR
;
4553 #ifdef TARGET_X86_64
4557 rex_w
= (b
>> 3) & 1;
4558 rex_r
= (b
& 0x4) << 1;
4559 s
->rex_x
= (b
& 0x2) << 2;
4560 REX_B(s
) = (b
& 0x1) << 3;
4561 /* select uniform byte register addressing */
4562 s
->x86_64_hregs
= true;
4567 case 0xc5: /* 2-byte VEX */
4568 case 0xc4: /* 3-byte VEX */
4569 /* VEX prefixes cannot be used except in 32-bit mode.
4570 Otherwise the instruction is LES or LDS. */
4571 if (s
->code32
&& !s
->vm86
) {
4572 static const int pp_prefix
[4] = {
4573 0, PREFIX_DATA
, PREFIX_REPZ
, PREFIX_REPNZ
4575 int vex3
, vex2
= x86_ldub_code(env
, s
);
4577 if (!CODE64(s
) && (vex2
& 0xc0) != 0xc0) {
4578 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4579 otherwise the instruction is LES or LDS. */
4580 s
->pc
--; /* rewind the advance_pc() x86_ldub_code() did */
4584 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
4585 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
4586 | PREFIX_LOCK
| PREFIX_DATA
)) {
4589 #ifdef TARGET_X86_64
4590 if (s
->x86_64_hregs
) {
4594 rex_r
= (~vex2
>> 4) & 8;
4596 /* 2-byte VEX prefix: RVVVVlpp, implied 0f leading opcode byte */
4598 b
= x86_ldub_code(env
, s
) | 0x100;
4600 /* 3-byte VEX prefix: RXBmmmmm wVVVVlpp */
4601 #ifdef TARGET_X86_64
4602 s
->rex_x
= (~vex2
>> 3) & 8;
4603 s
->rex_b
= (~vex2
>> 2) & 8;
4605 vex3
= x86_ldub_code(env
, s
);
4606 rex_w
= (vex3
>> 7) & 1;
4607 switch (vex2
& 0x1f) {
4608 case 0x01: /* Implied 0f leading opcode bytes. */
4609 b
= x86_ldub_code(env
, s
) | 0x100;
4611 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4614 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4617 default: /* Reserved for future use. */
4621 s
->vex_v
= (~vex3
>> 3) & 0xf;
4622 s
->vex_l
= (vex3
>> 2) & 1;
4623 prefixes
|= pp_prefix
[vex3
& 3] | PREFIX_VEX
;
4628 /* Post-process prefixes. */
4630 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
4631 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
4632 over 0x66 if both are present. */
4633 dflag
= (rex_w
> 0 ? MO_64
: prefixes
& PREFIX_DATA
? MO_16
: MO_32
);
4634 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
4635 aflag
= (prefixes
& PREFIX_ADR
? MO_32
: MO_64
);
4637 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
4638 if (s
->code32
^ ((prefixes
& PREFIX_DATA
) != 0)) {
4643 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
4644 if (s
->code32
^ ((prefixes
& PREFIX_ADR
) != 0)) {
4651 s
->prefix
= prefixes
;
4655 /* now check op code */
4659 /**************************/
4660 /* extended op code */
4661 b
= x86_ldub_code(env
, s
) | 0x100;
4664 /**************************/
4679 ot
= mo_b_d(b
, dflag
);
4682 case 0: /* OP Ev, Gv */
4683 modrm
= x86_ldub_code(env
, s
);
4684 reg
= ((modrm
>> 3) & 7) | rex_r
;
4685 mod
= (modrm
>> 6) & 3;
4686 rm
= (modrm
& 7) | REX_B(s
);
4688 gen_lea_modrm(env
, s
, modrm
);
4690 } else if (op
== OP_XORL
&& rm
== reg
) {
4692 /* xor reg, reg optimisation */
4693 set_cc_op(s
, CC_OP_CLR
);
4694 tcg_gen_movi_tl(s
->T0
, 0);
4695 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4700 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
4701 gen_op(s
, op
, ot
, opreg
);
4703 case 1: /* OP Gv, Ev */
4704 modrm
= x86_ldub_code(env
, s
);
4705 mod
= (modrm
>> 6) & 3;
4706 reg
= ((modrm
>> 3) & 7) | rex_r
;
4707 rm
= (modrm
& 7) | REX_B(s
);
4709 gen_lea_modrm(env
, s
, modrm
);
4710 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
4711 } else if (op
== OP_XORL
&& rm
== reg
) {
4714 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
4716 gen_op(s
, op
, ot
, reg
);
4718 case 2: /* OP A, Iv */
4719 val
= insn_get(env
, s
, ot
);
4720 tcg_gen_movi_tl(s
->T1
, val
);
4721 gen_op(s
, op
, ot
, OR_EAX
);
4731 case 0x80: /* GRP1 */
4737 ot
= mo_b_d(b
, dflag
);
4739 modrm
= x86_ldub_code(env
, s
);
4740 mod
= (modrm
>> 6) & 3;
4741 rm
= (modrm
& 7) | REX_B(s
);
4742 op
= (modrm
>> 3) & 7;
4748 s
->rip_offset
= insn_const_size(ot
);
4749 gen_lea_modrm(env
, s
, modrm
);
4760 val
= insn_get(env
, s
, ot
);
4763 val
= (int8_t)insn_get(env
, s
, MO_8
);
4766 tcg_gen_movi_tl(s
->T1
, val
);
4767 gen_op(s
, op
, ot
, opreg
);
4771 /**************************/
4772 /* inc, dec, and other misc arith */
4773 case 0x40 ... 0x47: /* inc Gv */
4775 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), 1);
4777 case 0x48 ... 0x4f: /* dec Gv */
4779 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), -1);
4781 case 0xf6: /* GRP3 */
4783 ot
= mo_b_d(b
, dflag
);
4785 modrm
= x86_ldub_code(env
, s
);
4786 mod
= (modrm
>> 6) & 3;
4787 rm
= (modrm
& 7) | REX_B(s
);
4788 op
= (modrm
>> 3) & 7;
4791 s
->rip_offset
= insn_const_size(ot
);
4793 gen_lea_modrm(env
, s
, modrm
);
4794 /* For those below that handle locked memory, don't load here. */
4795 if (!(s
->prefix
& PREFIX_LOCK
)
4797 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
4800 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
4805 val
= insn_get(env
, s
, ot
);
4806 tcg_gen_movi_tl(s
->T1
, val
);
4807 gen_op_testl_T0_T1_cc(s
);
4808 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4811 if (s
->prefix
& PREFIX_LOCK
) {
4815 tcg_gen_movi_tl(s
->T0
, ~0);
4816 tcg_gen_atomic_xor_fetch_tl(s
->T0
, s
->A0
, s
->T0
,
4817 s
->mem_index
, ot
| MO_LE
);
4819 tcg_gen_not_tl(s
->T0
, s
->T0
);
4821 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
4823 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4828 if (s
->prefix
& PREFIX_LOCK
) {
4830 TCGv a0
, t0
, t1
, t2
;
4835 a0
= tcg_temp_local_new();
4836 t0
= tcg_temp_local_new();
4837 label1
= gen_new_label();
4839 tcg_gen_mov_tl(a0
, s
->A0
);
4840 tcg_gen_mov_tl(t0
, s
->T0
);
4842 gen_set_label(label1
);
4843 t1
= tcg_temp_new();
4844 t2
= tcg_temp_new();
4845 tcg_gen_mov_tl(t2
, t0
);
4846 tcg_gen_neg_tl(t1
, t0
);
4847 tcg_gen_atomic_cmpxchg_tl(t0
, a0
, t0
, t1
,
4848 s
->mem_index
, ot
| MO_LE
);
4850 tcg_gen_brcond_tl(TCG_COND_NE
, t0
, t2
, label1
);
4854 tcg_gen_mov_tl(s
->T0
, t0
);
4857 tcg_gen_neg_tl(s
->T0
, s
->T0
);
4859 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
4861 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4864 gen_op_update_neg_cc(s
);
4865 set_cc_op(s
, CC_OP_SUBB
+ ot
);
4870 gen_op_mov_v_reg(s
, MO_8
, s
->T1
, R_EAX
);
4871 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
4872 tcg_gen_ext8u_tl(s
->T1
, s
->T1
);
4873 /* XXX: use 32 bit mul which could be faster */
4874 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
4875 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
4876 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
4877 tcg_gen_andi_tl(cpu_cc_src
, s
->T0
, 0xff00);
4878 set_cc_op(s
, CC_OP_MULB
);
4881 gen_op_mov_v_reg(s
, MO_16
, s
->T1
, R_EAX
);
4882 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
4883 tcg_gen_ext16u_tl(s
->T1
, s
->T1
);
4884 /* XXX: use 32 bit mul which could be faster */
4885 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
4886 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
4887 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
4888 tcg_gen_shri_tl(s
->T0
, s
->T0
, 16);
4889 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
4890 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
4891 set_cc_op(s
, CC_OP_MULW
);
4895 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
4896 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, cpu_regs
[R_EAX
]);
4897 tcg_gen_mulu2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
4898 s
->tmp2_i32
, s
->tmp3_i32
);
4899 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], s
->tmp2_i32
);
4900 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], s
->tmp3_i32
);
4901 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
4902 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
4903 set_cc_op(s
, CC_OP_MULL
);
4905 #ifdef TARGET_X86_64
4907 tcg_gen_mulu2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
4908 s
->T0
, cpu_regs
[R_EAX
]);
4909 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
4910 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
4911 set_cc_op(s
, CC_OP_MULQ
);
4919 gen_op_mov_v_reg(s
, MO_8
, s
->T1
, R_EAX
);
4920 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
4921 tcg_gen_ext8s_tl(s
->T1
, s
->T1
);
4922 /* XXX: use 32 bit mul which could be faster */
4923 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
4924 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
4925 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
4926 tcg_gen_ext8s_tl(s
->tmp0
, s
->T0
);
4927 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
4928 set_cc_op(s
, CC_OP_MULB
);
4931 gen_op_mov_v_reg(s
, MO_16
, s
->T1
, R_EAX
);
4932 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
4933 tcg_gen_ext16s_tl(s
->T1
, s
->T1
);
4934 /* XXX: use 32 bit mul which could be faster */
4935 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
4936 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
4937 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
4938 tcg_gen_ext16s_tl(s
->tmp0
, s
->T0
);
4939 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
4940 tcg_gen_shri_tl(s
->T0
, s
->T0
, 16);
4941 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
4942 set_cc_op(s
, CC_OP_MULW
);
4946 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
4947 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, cpu_regs
[R_EAX
]);
4948 tcg_gen_muls2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
4949 s
->tmp2_i32
, s
->tmp3_i32
);
4950 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], s
->tmp2_i32
);
4951 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], s
->tmp3_i32
);
4952 tcg_gen_sari_i32(s
->tmp2_i32
, s
->tmp2_i32
, 31);
4953 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
4954 tcg_gen_sub_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
4955 tcg_gen_extu_i32_tl(cpu_cc_src
, s
->tmp2_i32
);
4956 set_cc_op(s
, CC_OP_MULL
);
4958 #ifdef TARGET_X86_64
4960 tcg_gen_muls2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
4961 s
->T0
, cpu_regs
[R_EAX
]);
4962 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
4963 tcg_gen_sari_tl(cpu_cc_src
, cpu_regs
[R_EAX
], 63);
4964 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, cpu_regs
[R_EDX
]);
4965 set_cc_op(s
, CC_OP_MULQ
);
4973 gen_helper_divb_AL(cpu_env
, s
->T0
);
4976 gen_helper_divw_AX(cpu_env
, s
->T0
);
4980 gen_helper_divl_EAX(cpu_env
, s
->T0
);
4982 #ifdef TARGET_X86_64
4984 gen_helper_divq_EAX(cpu_env
, s
->T0
);
4992 gen_helper_idivb_AL(cpu_env
, s
->T0
);
4995 gen_helper_idivw_AX(cpu_env
, s
->T0
);
4999 gen_helper_idivl_EAX(cpu_env
, s
->T0
);
5001 #ifdef TARGET_X86_64
5003 gen_helper_idivq_EAX(cpu_env
, s
->T0
);
5013 case 0xfe: /* GRP4 */
5014 case 0xff: /* GRP5 */
5015 ot
= mo_b_d(b
, dflag
);
5017 modrm
= x86_ldub_code(env
, s
);
5018 mod
= (modrm
>> 6) & 3;
5019 rm
= (modrm
& 7) | REX_B(s
);
5020 op
= (modrm
>> 3) & 7;
5021 if (op
>= 2 && b
== 0xfe) {
5025 if (op
== 2 || op
== 4) {
5026 /* operand size for jumps is 64 bit */
5028 } else if (op
== 3 || op
== 5) {
5029 ot
= dflag
!= MO_16
? MO_32
+ (rex_w
== 1) : MO_16
;
5030 } else if (op
== 6) {
5031 /* default push size is 64 bit */
5032 ot
= mo_pushpop(s
, dflag
);
5036 gen_lea_modrm(env
, s
, modrm
);
5037 if (op
>= 2 && op
!= 3 && op
!= 5)
5038 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
5040 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
5044 case 0: /* inc Ev */
5049 gen_inc(s
, ot
, opreg
, 1);
5051 case 1: /* dec Ev */
5056 gen_inc(s
, ot
, opreg
, -1);
5058 case 2: /* call Ev */
5059 /* XXX: optimize if memory (no 'and' is necessary) */
5060 if (dflag
== MO_16
) {
5061 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
5063 next_eip
= s
->pc
- s
->cs_base
;
5064 tcg_gen_movi_tl(s
->T1
, next_eip
);
5065 gen_push_v(s
, s
->T1
);
5066 gen_op_jmp_v(s
->T0
);
5070 case 3: /* lcall Ev */
5071 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
5072 gen_add_A0_im(s
, 1 << ot
);
5073 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
5075 if (s
->pe
&& !s
->vm86
) {
5076 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5077 gen_helper_lcall_protected(cpu_env
, s
->tmp2_i32
, s
->T1
,
5078 tcg_const_i32(dflag
- 1),
5079 tcg_const_tl(s
->pc
- s
->cs_base
));
5081 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5082 gen_helper_lcall_real(cpu_env
, s
->tmp2_i32
, s
->T1
,
5083 tcg_const_i32(dflag
- 1),
5084 tcg_const_i32(s
->pc
- s
->cs_base
));
5086 tcg_gen_ld_tl(s
->tmp4
, cpu_env
, offsetof(CPUX86State
, eip
));
5089 case 4: /* jmp Ev */
5090 if (dflag
== MO_16
) {
5091 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
5093 gen_op_jmp_v(s
->T0
);
5097 case 5: /* ljmp Ev */
5098 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
5099 gen_add_A0_im(s
, 1 << ot
);
5100 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
5102 if (s
->pe
&& !s
->vm86
) {
5103 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5104 gen_helper_ljmp_protected(cpu_env
, s
->tmp2_i32
, s
->T1
,
5105 tcg_const_tl(s
->pc
- s
->cs_base
));
5107 gen_op_movl_seg_T0_vm(s
, R_CS
);
5108 gen_op_jmp_v(s
->T1
);
5110 tcg_gen_ld_tl(s
->tmp4
, cpu_env
, offsetof(CPUX86State
, eip
));
5113 case 6: /* push Ev */
5114 gen_push_v(s
, s
->T0
);
5121 case 0x84: /* test Ev, Gv */
5123 ot
= mo_b_d(b
, dflag
);
5125 modrm
= x86_ldub_code(env
, s
);
5126 reg
= ((modrm
>> 3) & 7) | rex_r
;
5128 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5129 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
5130 gen_op_testl_T0_T1_cc(s
);
5131 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
5134 case 0xa8: /* test eAX, Iv */
5136 ot
= mo_b_d(b
, dflag
);
5137 val
= insn_get(env
, s
, ot
);
5139 gen_op_mov_v_reg(s
, ot
, s
->T0
, OR_EAX
);
5140 tcg_gen_movi_tl(s
->T1
, val
);
5141 gen_op_testl_T0_T1_cc(s
);
5142 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
5145 case 0x98: /* CWDE/CBW */
5147 #ifdef TARGET_X86_64
5149 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
5150 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
5151 gen_op_mov_reg_v(s
, MO_64
, R_EAX
, s
->T0
);
5155 gen_op_mov_v_reg(s
, MO_16
, s
->T0
, R_EAX
);
5156 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
5157 gen_op_mov_reg_v(s
, MO_32
, R_EAX
, s
->T0
);
5160 gen_op_mov_v_reg(s
, MO_8
, s
->T0
, R_EAX
);
5161 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
5162 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
5168 case 0x99: /* CDQ/CWD */
5170 #ifdef TARGET_X86_64
5172 gen_op_mov_v_reg(s
, MO_64
, s
->T0
, R_EAX
);
5173 tcg_gen_sari_tl(s
->T0
, s
->T0
, 63);
5174 gen_op_mov_reg_v(s
, MO_64
, R_EDX
, s
->T0
);
5178 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
5179 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
5180 tcg_gen_sari_tl(s
->T0
, s
->T0
, 31);
5181 gen_op_mov_reg_v(s
, MO_32
, R_EDX
, s
->T0
);
5184 gen_op_mov_v_reg(s
, MO_16
, s
->T0
, R_EAX
);
5185 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
5186 tcg_gen_sari_tl(s
->T0
, s
->T0
, 15);
5187 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
5193 case 0x1af: /* imul Gv, Ev */
5194 case 0x69: /* imul Gv, Ev, I */
5197 modrm
= x86_ldub_code(env
, s
);
5198 reg
= ((modrm
>> 3) & 7) | rex_r
;
5200 s
->rip_offset
= insn_const_size(ot
);
5203 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5205 val
= insn_get(env
, s
, ot
);
5206 tcg_gen_movi_tl(s
->T1
, val
);
5207 } else if (b
== 0x6b) {
5208 val
= (int8_t)insn_get(env
, s
, MO_8
);
5209 tcg_gen_movi_tl(s
->T1
, val
);
5211 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
5214 #ifdef TARGET_X86_64
5216 tcg_gen_muls2_i64(cpu_regs
[reg
], s
->T1
, s
->T0
, s
->T1
);
5217 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
5218 tcg_gen_sari_tl(cpu_cc_src
, cpu_cc_dst
, 63);
5219 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, s
->T1
);
5223 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5224 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
5225 tcg_gen_muls2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
5226 s
->tmp2_i32
, s
->tmp3_i32
);
5227 tcg_gen_extu_i32_tl(cpu_regs
[reg
], s
->tmp2_i32
);
5228 tcg_gen_sari_i32(s
->tmp2_i32
, s
->tmp2_i32
, 31);
5229 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
5230 tcg_gen_sub_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
5231 tcg_gen_extu_i32_tl(cpu_cc_src
, s
->tmp2_i32
);
5234 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
5235 tcg_gen_ext16s_tl(s
->T1
, s
->T1
);
5236 /* XXX: use 32 bit mul which could be faster */
5237 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
5238 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
5239 tcg_gen_ext16s_tl(s
->tmp0
, s
->T0
);
5240 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
5241 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
5244 set_cc_op(s
, CC_OP_MULB
+ ot
);
5247 case 0x1c1: /* xadd Ev, Gv */
5248 ot
= mo_b_d(b
, dflag
);
5249 modrm
= x86_ldub_code(env
, s
);
5250 reg
= ((modrm
>> 3) & 7) | rex_r
;
5251 mod
= (modrm
>> 6) & 3;
5252 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
5254 rm
= (modrm
& 7) | REX_B(s
);
5255 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
5256 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
5257 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
5258 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
5260 gen_lea_modrm(env
, s
, modrm
);
5261 if (s
->prefix
& PREFIX_LOCK
) {
5262 tcg_gen_atomic_fetch_add_tl(s
->T1
, s
->A0
, s
->T0
,
5263 s
->mem_index
, ot
| MO_LE
);
5264 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
5266 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
5267 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
5268 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
5270 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
5272 gen_op_update2_cc(s
);
5273 set_cc_op(s
, CC_OP_ADDB
+ ot
);
5276 case 0x1b1: /* cmpxchg Ev, Gv */
5278 TCGv oldv
, newv
, cmpv
;
5280 ot
= mo_b_d(b
, dflag
);
5281 modrm
= x86_ldub_code(env
, s
);
5282 reg
= ((modrm
>> 3) & 7) | rex_r
;
5283 mod
= (modrm
>> 6) & 3;
5284 oldv
= tcg_temp_new();
5285 newv
= tcg_temp_new();
5286 cmpv
= tcg_temp_new();
5287 gen_op_mov_v_reg(s
, ot
, newv
, reg
);
5288 tcg_gen_mov_tl(cmpv
, cpu_regs
[R_EAX
]);
5290 if (s
->prefix
& PREFIX_LOCK
) {
5294 gen_lea_modrm(env
, s
, modrm
);
5295 tcg_gen_atomic_cmpxchg_tl(oldv
, s
->A0
, cmpv
, newv
,
5296 s
->mem_index
, ot
| MO_LE
);
5297 gen_op_mov_reg_v(s
, ot
, R_EAX
, oldv
);
5300 rm
= (modrm
& 7) | REX_B(s
);
5301 gen_op_mov_v_reg(s
, ot
, oldv
, rm
);
5303 gen_lea_modrm(env
, s
, modrm
);
5304 gen_op_ld_v(s
, ot
, oldv
, s
->A0
);
5305 rm
= 0; /* avoid warning */
5309 /* store value = (old == cmp ? new : old); */
5310 tcg_gen_movcond_tl(TCG_COND_EQ
, newv
, oldv
, cmpv
, newv
, oldv
);
5312 gen_op_mov_reg_v(s
, ot
, R_EAX
, oldv
);
5313 gen_op_mov_reg_v(s
, ot
, rm
, newv
);
5315 /* Perform an unconditional store cycle like physical cpu;
5316 must be before changing accumulator to ensure
5317 idempotency if the store faults and the instruction
5319 gen_op_st_v(s
, ot
, newv
, s
->A0
);
5320 gen_op_mov_reg_v(s
, ot
, R_EAX
, oldv
);
5323 tcg_gen_mov_tl(cpu_cc_src
, oldv
);
5324 tcg_gen_mov_tl(s
->cc_srcT
, cmpv
);
5325 tcg_gen_sub_tl(cpu_cc_dst
, cmpv
, oldv
);
5326 set_cc_op(s
, CC_OP_SUBB
+ ot
);
5327 tcg_temp_free(oldv
);
5328 tcg_temp_free(newv
);
5329 tcg_temp_free(cmpv
);
5332 case 0x1c7: /* cmpxchg8b */
5333 modrm
= x86_ldub_code(env
, s
);
5334 mod
= (modrm
>> 6) & 3;
5335 switch ((modrm
>> 3) & 7) {
5336 case 1: /* CMPXCHG8, CMPXCHG16 */
5340 #ifdef TARGET_X86_64
5341 if (dflag
== MO_64
) {
5342 if (!(s
->cpuid_ext_features
& CPUID_EXT_CX16
)) {
5345 gen_lea_modrm(env
, s
, modrm
);
5346 if ((s
->prefix
& PREFIX_LOCK
) &&
5347 (tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
5348 gen_helper_cmpxchg16b(cpu_env
, s
->A0
);
5350 gen_helper_cmpxchg16b_unlocked(cpu_env
, s
->A0
);
5352 set_cc_op(s
, CC_OP_EFLAGS
);
5356 if (!(s
->cpuid_features
& CPUID_CX8
)) {
5359 gen_lea_modrm(env
, s
, modrm
);
5360 if ((s
->prefix
& PREFIX_LOCK
) &&
5361 (tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
5362 gen_helper_cmpxchg8b(cpu_env
, s
->A0
);
5364 gen_helper_cmpxchg8b_unlocked(cpu_env
, s
->A0
);
5366 set_cc_op(s
, CC_OP_EFLAGS
);
5369 case 7: /* RDSEED */
5370 case 6: /* RDRAND */
5372 (s
->prefix
& (PREFIX_LOCK
| PREFIX_REPZ
| PREFIX_REPNZ
)) ||
5373 !(s
->cpuid_ext_features
& CPUID_EXT_RDRAND
)) {
5376 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
5379 gen_helper_rdrand(s
->T0
, cpu_env
);
5380 rm
= (modrm
& 7) | REX_B(s
);
5381 gen_op_mov_reg_v(s
, dflag
, rm
, s
->T0
);
5382 set_cc_op(s
, CC_OP_EFLAGS
);
5383 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
5385 gen_jmp(s
, s
->pc
- s
->cs_base
);
5394 /**************************/
5396 case 0x50 ... 0x57: /* push */
5397 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, (b
& 7) | REX_B(s
));
5398 gen_push_v(s
, s
->T0
);
5400 case 0x58 ... 0x5f: /* pop */
5402 /* NOTE: order is important for pop %sp */
5403 gen_pop_update(s
, ot
);
5404 gen_op_mov_reg_v(s
, ot
, (b
& 7) | REX_B(s
), s
->T0
);
5406 case 0x60: /* pusha */
5411 case 0x61: /* popa */
5416 case 0x68: /* push Iv */
5418 ot
= mo_pushpop(s
, dflag
);
5420 val
= insn_get(env
, s
, ot
);
5422 val
= (int8_t)insn_get(env
, s
, MO_8
);
5423 tcg_gen_movi_tl(s
->T0
, val
);
5424 gen_push_v(s
, s
->T0
);
5426 case 0x8f: /* pop Ev */
5427 modrm
= x86_ldub_code(env
, s
);
5428 mod
= (modrm
>> 6) & 3;
5431 /* NOTE: order is important for pop %sp */
5432 gen_pop_update(s
, ot
);
5433 rm
= (modrm
& 7) | REX_B(s
);
5434 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
5436 /* NOTE: order is important too for MMU exceptions */
5437 s
->popl_esp_hack
= 1 << ot
;
5438 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5439 s
->popl_esp_hack
= 0;
5440 gen_pop_update(s
, ot
);
5443 case 0xc8: /* enter */
5446 val
= x86_lduw_code(env
, s
);
5447 level
= x86_ldub_code(env
, s
);
5448 gen_enter(s
, val
, level
);
5451 case 0xc9: /* leave */
5454 case 0x06: /* push es */
5455 case 0x0e: /* push cs */
5456 case 0x16: /* push ss */
5457 case 0x1e: /* push ds */
5460 gen_op_movl_T0_seg(s
, b
>> 3);
5461 gen_push_v(s
, s
->T0
);
5463 case 0x1a0: /* push fs */
5464 case 0x1a8: /* push gs */
5465 gen_op_movl_T0_seg(s
, (b
>> 3) & 7);
5466 gen_push_v(s
, s
->T0
);
5468 case 0x07: /* pop es */
5469 case 0x17: /* pop ss */
5470 case 0x1f: /* pop ds */
5475 gen_movl_seg_T0(s
, reg
);
5476 gen_pop_update(s
, ot
);
5477 /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
5478 if (s
->base
.is_jmp
) {
5479 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
5482 gen_eob_inhibit_irq(s
, true);
5488 case 0x1a1: /* pop fs */
5489 case 0x1a9: /* pop gs */
5491 gen_movl_seg_T0(s
, (b
>> 3) & 7);
5492 gen_pop_update(s
, ot
);
5493 if (s
->base
.is_jmp
) {
5494 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
5499 /**************************/
5502 case 0x89: /* mov Gv, Ev */
5503 ot
= mo_b_d(b
, dflag
);
5504 modrm
= x86_ldub_code(env
, s
);
5505 reg
= ((modrm
>> 3) & 7) | rex_r
;
5507 /* generate a generic store */
5508 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
5511 case 0xc7: /* mov Ev, Iv */
5512 ot
= mo_b_d(b
, dflag
);
5513 modrm
= x86_ldub_code(env
, s
);
5514 mod
= (modrm
>> 6) & 3;
5516 s
->rip_offset
= insn_const_size(ot
);
5517 gen_lea_modrm(env
, s
, modrm
);
5519 val
= insn_get(env
, s
, ot
);
5520 tcg_gen_movi_tl(s
->T0
, val
);
5522 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
5524 gen_op_mov_reg_v(s
, ot
, (modrm
& 7) | REX_B(s
), s
->T0
);
5528 case 0x8b: /* mov Ev, Gv */
5529 ot
= mo_b_d(b
, dflag
);
5530 modrm
= x86_ldub_code(env
, s
);
5531 reg
= ((modrm
>> 3) & 7) | rex_r
;
5533 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5534 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
5536 case 0x8e: /* mov seg, Gv */
5537 modrm
= x86_ldub_code(env
, s
);
5538 reg
= (modrm
>> 3) & 7;
5539 if (reg
>= 6 || reg
== R_CS
)
5541 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
5542 gen_movl_seg_T0(s
, reg
);
5543 /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
5544 if (s
->base
.is_jmp
) {
5545 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
5548 gen_eob_inhibit_irq(s
, true);
5554 case 0x8c: /* mov Gv, seg */
5555 modrm
= x86_ldub_code(env
, s
);
5556 reg
= (modrm
>> 3) & 7;
5557 mod
= (modrm
>> 6) & 3;
5560 gen_op_movl_T0_seg(s
, reg
);
5561 ot
= mod
== 3 ? dflag
: MO_16
;
5562 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5565 case 0x1b6: /* movzbS Gv, Eb */
5566 case 0x1b7: /* movzwS Gv, Eb */
5567 case 0x1be: /* movsbS Gv, Eb */
5568 case 0x1bf: /* movswS Gv, Eb */
5573 /* d_ot is the size of destination */
5575 /* ot is the size of source */
5576 ot
= (b
& 1) + MO_8
;
5577 /* s_ot is the sign+size of source */
5578 s_ot
= b
& 8 ? MO_SIGN
| ot
: ot
;
5580 modrm
= x86_ldub_code(env
, s
);
5581 reg
= ((modrm
>> 3) & 7) | rex_r
;
5582 mod
= (modrm
>> 6) & 3;
5583 rm
= (modrm
& 7) | REX_B(s
);
5586 if (s_ot
== MO_SB
&& byte_reg_is_xH(s
, rm
)) {
5587 tcg_gen_sextract_tl(s
->T0
, cpu_regs
[rm
- 4], 8, 8);
5589 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
5592 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
5595 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
5598 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
5602 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
5606 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
5608 gen_lea_modrm(env
, s
, modrm
);
5609 gen_op_ld_v(s
, s_ot
, s
->T0
, s
->A0
);
5610 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
5615 case 0x8d: /* lea */
5616 modrm
= x86_ldub_code(env
, s
);
5617 mod
= (modrm
>> 6) & 3;
5620 reg
= ((modrm
>> 3) & 7) | rex_r
;
5622 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
5623 TCGv ea
= gen_lea_modrm_1(s
, a
);
5624 gen_lea_v_seg(s
, s
->aflag
, ea
, -1, -1);
5625 gen_op_mov_reg_v(s
, dflag
, reg
, s
->A0
);
5629 case 0xa0: /* mov EAX, Ov */
5631 case 0xa2: /* mov Ov, EAX */
5634 target_ulong offset_addr
;
5636 ot
= mo_b_d(b
, dflag
);
5638 #ifdef TARGET_X86_64
5640 offset_addr
= x86_ldq_code(env
, s
);
5644 offset_addr
= insn_get(env
, s
, s
->aflag
);
5647 tcg_gen_movi_tl(s
->A0
, offset_addr
);
5648 gen_add_A0_ds_seg(s
);
5650 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
5651 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T0
);
5653 gen_op_mov_v_reg(s
, ot
, s
->T0
, R_EAX
);
5654 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
5658 case 0xd7: /* xlat */
5659 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EBX
]);
5660 tcg_gen_ext8u_tl(s
->T0
, cpu_regs
[R_EAX
]);
5661 tcg_gen_add_tl(s
->A0
, s
->A0
, s
->T0
);
5662 gen_extu(s
->aflag
, s
->A0
);
5663 gen_add_A0_ds_seg(s
);
5664 gen_op_ld_v(s
, MO_8
, s
->T0
, s
->A0
);
5665 gen_op_mov_reg_v(s
, MO_8
, R_EAX
, s
->T0
);
5667 case 0xb0 ... 0xb7: /* mov R, Ib */
5668 val
= insn_get(env
, s
, MO_8
);
5669 tcg_gen_movi_tl(s
->T0
, val
);
5670 gen_op_mov_reg_v(s
, MO_8
, (b
& 7) | REX_B(s
), s
->T0
);
5672 case 0xb8 ... 0xbf: /* mov R, Iv */
5673 #ifdef TARGET_X86_64
5674 if (dflag
== MO_64
) {
5677 tmp
= x86_ldq_code(env
, s
);
5678 reg
= (b
& 7) | REX_B(s
);
5679 tcg_gen_movi_tl(s
->T0
, tmp
);
5680 gen_op_mov_reg_v(s
, MO_64
, reg
, s
->T0
);
5685 val
= insn_get(env
, s
, ot
);
5686 reg
= (b
& 7) | REX_B(s
);
5687 tcg_gen_movi_tl(s
->T0
, val
);
5688 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
5692 case 0x91 ... 0x97: /* xchg R, EAX */
5695 reg
= (b
& 7) | REX_B(s
);
5699 case 0x87: /* xchg Ev, Gv */
5700 ot
= mo_b_d(b
, dflag
);
5701 modrm
= x86_ldub_code(env
, s
);
5702 reg
= ((modrm
>> 3) & 7) | rex_r
;
5703 mod
= (modrm
>> 6) & 3;
5705 rm
= (modrm
& 7) | REX_B(s
);
5707 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
5708 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
5709 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
5710 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
5712 gen_lea_modrm(env
, s
, modrm
);
5713 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
5714 /* for xchg, lock is implicit */
5715 tcg_gen_atomic_xchg_tl(s
->T1
, s
->A0
, s
->T0
,
5716 s
->mem_index
, ot
| MO_LE
);
5717 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
5720 case 0xc4: /* les Gv */
5721 /* In CODE64 this is VEX3; see above. */
5724 case 0xc5: /* lds Gv */
5725 /* In CODE64 this is VEX2; see above. */
5728 case 0x1b2: /* lss Gv */
5731 case 0x1b4: /* lfs Gv */
5734 case 0x1b5: /* lgs Gv */
5737 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
5738 modrm
= x86_ldub_code(env
, s
);
5739 reg
= ((modrm
>> 3) & 7) | rex_r
;
5740 mod
= (modrm
>> 6) & 3;
5743 gen_lea_modrm(env
, s
, modrm
);
5744 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
5745 gen_add_A0_im(s
, 1 << ot
);
5746 /* load the segment first to handle exceptions properly */
5747 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
5748 gen_movl_seg_T0(s
, op
);
5749 /* then put the data */
5750 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
5751 if (s
->base
.is_jmp
) {
5752 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
5757 /************************/
5765 ot
= mo_b_d(b
, dflag
);
5766 modrm
= x86_ldub_code(env
, s
);
5767 mod
= (modrm
>> 6) & 3;
5768 op
= (modrm
>> 3) & 7;
5774 gen_lea_modrm(env
, s
, modrm
);
5777 opreg
= (modrm
& 7) | REX_B(s
);
5782 gen_shift(s
, op
, ot
, opreg
, OR_ECX
);
5785 shift
= x86_ldub_code(env
, s
);
5787 gen_shifti(s
, op
, ot
, opreg
, shift
);
5802 case 0x1a4: /* shld imm */
5806 case 0x1a5: /* shld cl */
5810 case 0x1ac: /* shrd imm */
5814 case 0x1ad: /* shrd cl */
5819 modrm
= x86_ldub_code(env
, s
);
5820 mod
= (modrm
>> 6) & 3;
5821 rm
= (modrm
& 7) | REX_B(s
);
5822 reg
= ((modrm
>> 3) & 7) | rex_r
;
5824 gen_lea_modrm(env
, s
, modrm
);
5829 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
5832 TCGv imm
= tcg_const_tl(x86_ldub_code(env
, s
));
5833 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, imm
);
5836 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, cpu_regs
[R_ECX
]);
5840 /************************/
5843 if (s
->flags
& (HF_EM_MASK
| HF_TS_MASK
)) {
5844 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5845 /* XXX: what to do if illegal op ? */
5846 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
5849 modrm
= x86_ldub_code(env
, s
);
5850 mod
= (modrm
>> 6) & 3;
5852 op
= ((b
& 7) << 3) | ((modrm
>> 3) & 7);
5855 gen_lea_modrm(env
, s
, modrm
);
5857 case 0x00 ... 0x07: /* fxxxs */
5858 case 0x10 ... 0x17: /* fixxxl */
5859 case 0x20 ... 0x27: /* fxxxl */
5860 case 0x30 ... 0x37: /* fixxx */
5867 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
5868 s
->mem_index
, MO_LEUL
);
5869 gen_helper_flds_FT0(cpu_env
, s
->tmp2_i32
);
5872 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
5873 s
->mem_index
, MO_LEUL
);
5874 gen_helper_fildl_FT0(cpu_env
, s
->tmp2_i32
);
5877 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
5878 s
->mem_index
, MO_LEQ
);
5879 gen_helper_fldl_FT0(cpu_env
, s
->tmp1_i64
);
5883 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
5884 s
->mem_index
, MO_LESW
);
5885 gen_helper_fildl_FT0(cpu_env
, s
->tmp2_i32
);
5889 gen_helper_fp_arith_ST0_FT0(op1
);
5891 /* fcomp needs pop */
5892 gen_helper_fpop(cpu_env
);
5896 case 0x08: /* flds */
5897 case 0x0a: /* fsts */
5898 case 0x0b: /* fstps */
5899 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5900 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5901 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5906 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
5907 s
->mem_index
, MO_LEUL
);
5908 gen_helper_flds_ST0(cpu_env
, s
->tmp2_i32
);
5911 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
5912 s
->mem_index
, MO_LEUL
);
5913 gen_helper_fildl_ST0(cpu_env
, s
->tmp2_i32
);
5916 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
5917 s
->mem_index
, MO_LEQ
);
5918 gen_helper_fldl_ST0(cpu_env
, s
->tmp1_i64
);
5922 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
5923 s
->mem_index
, MO_LESW
);
5924 gen_helper_fildl_ST0(cpu_env
, s
->tmp2_i32
);
5929 /* XXX: the corresponding CPUID bit must be tested ! */
5932 gen_helper_fisttl_ST0(s
->tmp2_i32
, cpu_env
);
5933 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
5934 s
->mem_index
, MO_LEUL
);
5937 gen_helper_fisttll_ST0(s
->tmp1_i64
, cpu_env
);
5938 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
5939 s
->mem_index
, MO_LEQ
);
5943 gen_helper_fistt_ST0(s
->tmp2_i32
, cpu_env
);
5944 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
5945 s
->mem_index
, MO_LEUW
);
5948 gen_helper_fpop(cpu_env
);
5953 gen_helper_fsts_ST0(s
->tmp2_i32
, cpu_env
);
5954 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
5955 s
->mem_index
, MO_LEUL
);
5958 gen_helper_fistl_ST0(s
->tmp2_i32
, cpu_env
);
5959 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
5960 s
->mem_index
, MO_LEUL
);
5963 gen_helper_fstl_ST0(s
->tmp1_i64
, cpu_env
);
5964 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
5965 s
->mem_index
, MO_LEQ
);
5969 gen_helper_fist_ST0(s
->tmp2_i32
, cpu_env
);
5970 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
5971 s
->mem_index
, MO_LEUW
);
5975 gen_helper_fpop(cpu_env
);
5979 case 0x0c: /* fldenv mem */
5980 gen_helper_fldenv(cpu_env
, s
->A0
, tcg_const_i32(dflag
- 1));
5982 case 0x0d: /* fldcw mem */
5983 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
5984 s
->mem_index
, MO_LEUW
);
5985 gen_helper_fldcw(cpu_env
, s
->tmp2_i32
);
5987 case 0x0e: /* fnstenv mem */
5988 gen_helper_fstenv(cpu_env
, s
->A0
, tcg_const_i32(dflag
- 1));
5990 case 0x0f: /* fnstcw mem */
5991 gen_helper_fnstcw(s
->tmp2_i32
, cpu_env
);
5992 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
5993 s
->mem_index
, MO_LEUW
);
5995 case 0x1d: /* fldt mem */
5996 gen_helper_fldt_ST0(cpu_env
, s
->A0
);
5998 case 0x1f: /* fstpt mem */
5999 gen_helper_fstt_ST0(cpu_env
, s
->A0
);
6000 gen_helper_fpop(cpu_env
);
6002 case 0x2c: /* frstor mem */
6003 gen_helper_frstor(cpu_env
, s
->A0
, tcg_const_i32(dflag
- 1));
6005 case 0x2e: /* fnsave mem */
6006 gen_helper_fsave(cpu_env
, s
->A0
, tcg_const_i32(dflag
- 1));
6008 case 0x2f: /* fnstsw mem */
6009 gen_helper_fnstsw(s
->tmp2_i32
, cpu_env
);
6010 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
6011 s
->mem_index
, MO_LEUW
);
6013 case 0x3c: /* fbld */
6014 gen_helper_fbld_ST0(cpu_env
, s
->A0
);
6016 case 0x3e: /* fbstp */
6017 gen_helper_fbst_ST0(cpu_env
, s
->A0
);
6018 gen_helper_fpop(cpu_env
);
6020 case 0x3d: /* fildll */
6021 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
, s
->mem_index
, MO_LEQ
);
6022 gen_helper_fildll_ST0(cpu_env
, s
->tmp1_i64
);
6024 case 0x3f: /* fistpll */
6025 gen_helper_fistll_ST0(s
->tmp1_i64
, cpu_env
);
6026 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
, s
->mem_index
, MO_LEQ
);
6027 gen_helper_fpop(cpu_env
);
6033 /* register float ops */
6037 case 0x08: /* fld sti */
6038 gen_helper_fpush(cpu_env
);
6039 gen_helper_fmov_ST0_STN(cpu_env
,
6040 tcg_const_i32((opreg
+ 1) & 7));
6042 case 0x09: /* fxchg sti */
6043 case 0x29: /* fxchg4 sti, undocumented op */
6044 case 0x39: /* fxchg7 sti, undocumented op */
6045 gen_helper_fxchg_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
6047 case 0x0a: /* grp d9/2 */
6050 /* check exceptions (FreeBSD FPU probe) */
6051 gen_helper_fwait(cpu_env
);
6057 case 0x0c: /* grp d9/4 */
6060 gen_helper_fchs_ST0(cpu_env
);
6063 gen_helper_fabs_ST0(cpu_env
);
6066 gen_helper_fldz_FT0(cpu_env
);
6067 gen_helper_fcom_ST0_FT0(cpu_env
);
6070 gen_helper_fxam_ST0(cpu_env
);
6076 case 0x0d: /* grp d9/5 */
6080 gen_helper_fpush(cpu_env
);
6081 gen_helper_fld1_ST0(cpu_env
);
6084 gen_helper_fpush(cpu_env
);
6085 gen_helper_fldl2t_ST0(cpu_env
);
6088 gen_helper_fpush(cpu_env
);
6089 gen_helper_fldl2e_ST0(cpu_env
);
6092 gen_helper_fpush(cpu_env
);
6093 gen_helper_fldpi_ST0(cpu_env
);
6096 gen_helper_fpush(cpu_env
);
6097 gen_helper_fldlg2_ST0(cpu_env
);
6100 gen_helper_fpush(cpu_env
);
6101 gen_helper_fldln2_ST0(cpu_env
);
6104 gen_helper_fpush(cpu_env
);
6105 gen_helper_fldz_ST0(cpu_env
);
6112 case 0x0e: /* grp d9/6 */
6115 gen_helper_f2xm1(cpu_env
);
6118 gen_helper_fyl2x(cpu_env
);
6121 gen_helper_fptan(cpu_env
);
6123 case 3: /* fpatan */
6124 gen_helper_fpatan(cpu_env
);
6126 case 4: /* fxtract */
6127 gen_helper_fxtract(cpu_env
);
6129 case 5: /* fprem1 */
6130 gen_helper_fprem1(cpu_env
);
6132 case 6: /* fdecstp */
6133 gen_helper_fdecstp(cpu_env
);
6136 case 7: /* fincstp */
6137 gen_helper_fincstp(cpu_env
);
6141 case 0x0f: /* grp d9/7 */
6144 gen_helper_fprem(cpu_env
);
6146 case 1: /* fyl2xp1 */
6147 gen_helper_fyl2xp1(cpu_env
);
6150 gen_helper_fsqrt(cpu_env
);
6152 case 3: /* fsincos */
6153 gen_helper_fsincos(cpu_env
);
6155 case 5: /* fscale */
6156 gen_helper_fscale(cpu_env
);
6158 case 4: /* frndint */
6159 gen_helper_frndint(cpu_env
);
6162 gen_helper_fsin(cpu_env
);
6166 gen_helper_fcos(cpu_env
);
6170 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
6171 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6172 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6178 gen_helper_fp_arith_STN_ST0(op1
, opreg
);
6180 gen_helper_fpop(cpu_env
);
6182 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6183 gen_helper_fp_arith_ST0_FT0(op1
);
6187 case 0x02: /* fcom */
6188 case 0x22: /* fcom2, undocumented op */
6189 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6190 gen_helper_fcom_ST0_FT0(cpu_env
);
6192 case 0x03: /* fcomp */
6193 case 0x23: /* fcomp3, undocumented op */
6194 case 0x32: /* fcomp5, undocumented op */
6195 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6196 gen_helper_fcom_ST0_FT0(cpu_env
);
6197 gen_helper_fpop(cpu_env
);
6199 case 0x15: /* da/5 */
6201 case 1: /* fucompp */
6202 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
6203 gen_helper_fucom_ST0_FT0(cpu_env
);
6204 gen_helper_fpop(cpu_env
);
6205 gen_helper_fpop(cpu_env
);
6213 case 0: /* feni (287 only, just do nop here) */
6215 case 1: /* fdisi (287 only, just do nop here) */
6218 gen_helper_fclex(cpu_env
);
6220 case 3: /* fninit */
6221 gen_helper_fninit(cpu_env
);
6223 case 4: /* fsetpm (287 only, just do nop here) */
6229 case 0x1d: /* fucomi */
6230 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6233 gen_update_cc_op(s
);
6234 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6235 gen_helper_fucomi_ST0_FT0(cpu_env
);
6236 set_cc_op(s
, CC_OP_EFLAGS
);
6238 case 0x1e: /* fcomi */
6239 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6242 gen_update_cc_op(s
);
6243 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6244 gen_helper_fcomi_ST0_FT0(cpu_env
);
6245 set_cc_op(s
, CC_OP_EFLAGS
);
6247 case 0x28: /* ffree sti */
6248 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6250 case 0x2a: /* fst sti */
6251 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6253 case 0x2b: /* fstp sti */
6254 case 0x0b: /* fstp1 sti, undocumented op */
6255 case 0x3a: /* fstp8 sti, undocumented op */
6256 case 0x3b: /* fstp9 sti, undocumented op */
6257 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6258 gen_helper_fpop(cpu_env
);
6260 case 0x2c: /* fucom st(i) */
6261 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6262 gen_helper_fucom_ST0_FT0(cpu_env
);
6264 case 0x2d: /* fucomp st(i) */
6265 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6266 gen_helper_fucom_ST0_FT0(cpu_env
);
6267 gen_helper_fpop(cpu_env
);
6269 case 0x33: /* de/3 */
6271 case 1: /* fcompp */
6272 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
6273 gen_helper_fcom_ST0_FT0(cpu_env
);
6274 gen_helper_fpop(cpu_env
);
6275 gen_helper_fpop(cpu_env
);
6281 case 0x38: /* ffreep sti, undocumented op */
6282 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6283 gen_helper_fpop(cpu_env
);
6285 case 0x3c: /* df/4 */
6288 gen_helper_fnstsw(s
->tmp2_i32
, cpu_env
);
6289 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
6290 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
6296 case 0x3d: /* fucomip */
6297 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6300 gen_update_cc_op(s
);
6301 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6302 gen_helper_fucomi_ST0_FT0(cpu_env
);
6303 gen_helper_fpop(cpu_env
);
6304 set_cc_op(s
, CC_OP_EFLAGS
);
6306 case 0x3e: /* fcomip */
6307 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6310 gen_update_cc_op(s
);
6311 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6312 gen_helper_fcomi_ST0_FT0(cpu_env
);
6313 gen_helper_fpop(cpu_env
);
6314 set_cc_op(s
, CC_OP_EFLAGS
);
6316 case 0x10 ... 0x13: /* fcmovxx */
6321 static const uint8_t fcmov_cc
[8] = {
6328 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6331 op1
= fcmov_cc
[op
& 3] | (((op
>> 3) & 1) ^ 1);
6332 l1
= gen_new_label();
6333 gen_jcc1_noeob(s
, op1
, l1
);
6334 gen_helper_fmov_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
6343 /************************/
6346 case 0xa4: /* movsS */
6348 ot
= mo_b_d(b
, dflag
);
6349 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6350 gen_repz_movs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6356 case 0xaa: /* stosS */
6358 ot
= mo_b_d(b
, dflag
);
6359 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6360 gen_repz_stos(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6365 case 0xac: /* lodsS */
6367 ot
= mo_b_d(b
, dflag
);
6368 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6369 gen_repz_lods(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6374 case 0xae: /* scasS */
6376 ot
= mo_b_d(b
, dflag
);
6377 if (prefixes
& PREFIX_REPNZ
) {
6378 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6379 } else if (prefixes
& PREFIX_REPZ
) {
6380 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6386 case 0xa6: /* cmpsS */
6388 ot
= mo_b_d(b
, dflag
);
6389 if (prefixes
& PREFIX_REPNZ
) {
6390 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6391 } else if (prefixes
& PREFIX_REPZ
) {
6392 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6397 case 0x6c: /* insS */
6399 ot
= mo_b_d32(b
, dflag
);
6400 tcg_gen_ext16u_tl(s
->T0
, cpu_regs
[R_EDX
]);
6401 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6402 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
) | 4);
6403 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6404 gen_repz_ins(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6407 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6408 gen_jmp(s
, s
->pc
- s
->cs_base
);
6412 case 0x6e: /* outsS */
6414 ot
= mo_b_d32(b
, dflag
);
6415 tcg_gen_ext16u_tl(s
->T0
, cpu_regs
[R_EDX
]);
6416 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6417 svm_is_rep(prefixes
) | 4);
6418 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6419 gen_repz_outs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6422 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6423 gen_jmp(s
, s
->pc
- s
->cs_base
);
6428 /************************/
6433 ot
= mo_b_d32(b
, dflag
);
6434 val
= x86_ldub_code(env
, s
);
6435 tcg_gen_movi_tl(s
->T0
, val
);
6436 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6437 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6438 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6441 tcg_gen_movi_i32(s
->tmp2_i32
, val
);
6442 gen_helper_in_func(ot
, s
->T1
, s
->tmp2_i32
);
6443 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T1
);
6444 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
6445 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6447 gen_jmp(s
, s
->pc
- s
->cs_base
);
6452 ot
= mo_b_d32(b
, dflag
);
6453 val
= x86_ldub_code(env
, s
);
6454 tcg_gen_movi_tl(s
->T0
, val
);
6455 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6456 svm_is_rep(prefixes
));
6457 gen_op_mov_v_reg(s
, ot
, s
->T1
, R_EAX
);
6459 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6462 tcg_gen_movi_i32(s
->tmp2_i32
, val
);
6463 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
6464 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
6465 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
6466 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6468 gen_jmp(s
, s
->pc
- s
->cs_base
);
6473 ot
= mo_b_d32(b
, dflag
);
6474 tcg_gen_ext16u_tl(s
->T0
, cpu_regs
[R_EDX
]);
6475 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6476 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6477 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6480 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
6481 gen_helper_in_func(ot
, s
->T1
, s
->tmp2_i32
);
6482 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T1
);
6483 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
6484 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6486 gen_jmp(s
, s
->pc
- s
->cs_base
);
6491 ot
= mo_b_d32(b
, dflag
);
6492 tcg_gen_ext16u_tl(s
->T0
, cpu_regs
[R_EDX
]);
6493 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6494 svm_is_rep(prefixes
));
6495 gen_op_mov_v_reg(s
, ot
, s
->T1
, R_EAX
);
6497 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6500 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
6501 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
6502 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
6503 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
6504 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6506 gen_jmp(s
, s
->pc
- s
->cs_base
);
6510 /************************/
6512 case 0xc2: /* ret im */
6513 val
= x86_ldsw_code(env
, s
);
6515 gen_stack_update(s
, val
+ (1 << ot
));
6516 /* Note that gen_pop_T0 uses a zero-extending load. */
6517 gen_op_jmp_v(s
->T0
);
6521 case 0xc3: /* ret */
6523 gen_pop_update(s
, ot
);
6524 /* Note that gen_pop_T0 uses a zero-extending load. */
6525 gen_op_jmp_v(s
->T0
);
6529 case 0xca: /* lret im */
6530 val
= x86_ldsw_code(env
, s
);
6532 if (s
->pe
&& !s
->vm86
) {
6533 gen_update_cc_op(s
);
6534 gen_jmp_im(s
, pc_start
- s
->cs_base
);
6535 gen_helper_lret_protected(cpu_env
, tcg_const_i32(dflag
- 1),
6536 tcg_const_i32(val
));
6540 gen_op_ld_v(s
, dflag
, s
->T0
, s
->A0
);
6541 /* NOTE: keeping EIP updated is not a problem in case of
6543 gen_op_jmp_v(s
->T0
);
6545 gen_add_A0_im(s
, 1 << dflag
);
6546 gen_op_ld_v(s
, dflag
, s
->T0
, s
->A0
);
6547 gen_op_movl_seg_T0_vm(s
, R_CS
);
6548 /* add stack offset */
6549 gen_stack_update(s
, val
+ (2 << dflag
));
6553 case 0xcb: /* lret */
6556 case 0xcf: /* iret */
6557 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IRET
);
6560 gen_helper_iret_real(cpu_env
, tcg_const_i32(dflag
- 1));
6561 set_cc_op(s
, CC_OP_EFLAGS
);
6562 } else if (s
->vm86
) {
6564 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6566 gen_helper_iret_real(cpu_env
, tcg_const_i32(dflag
- 1));
6567 set_cc_op(s
, CC_OP_EFLAGS
);
6570 gen_helper_iret_protected(cpu_env
, tcg_const_i32(dflag
- 1),
6571 tcg_const_i32(s
->pc
- s
->cs_base
));
6572 set_cc_op(s
, CC_OP_EFLAGS
);
6576 case 0xe8: /* call im */
6578 if (dflag
!= MO_16
) {
6579 tval
= (int32_t)insn_get(env
, s
, MO_32
);
6581 tval
= (int16_t)insn_get(env
, s
, MO_16
);
6583 next_eip
= s
->pc
- s
->cs_base
;
6585 if (dflag
== MO_16
) {
6587 } else if (!CODE64(s
)) {
6590 tcg_gen_movi_tl(s
->T0
, next_eip
);
6591 gen_push_v(s
, s
->T0
);
6596 case 0x9a: /* lcall im */
6598 unsigned int selector
, offset
;
6603 offset
= insn_get(env
, s
, ot
);
6604 selector
= insn_get(env
, s
, MO_16
);
6606 tcg_gen_movi_tl(s
->T0
, selector
);
6607 tcg_gen_movi_tl(s
->T1
, offset
);
6610 case 0xe9: /* jmp im */
6611 if (dflag
!= MO_16
) {
6612 tval
= (int32_t)insn_get(env
, s
, MO_32
);
6614 tval
= (int16_t)insn_get(env
, s
, MO_16
);
6616 tval
+= s
->pc
- s
->cs_base
;
6617 if (dflag
== MO_16
) {
6619 } else if (!CODE64(s
)) {
6625 case 0xea: /* ljmp im */
6627 unsigned int selector
, offset
;
6632 offset
= insn_get(env
, s
, ot
);
6633 selector
= insn_get(env
, s
, MO_16
);
6635 tcg_gen_movi_tl(s
->T0
, selector
);
6636 tcg_gen_movi_tl(s
->T1
, offset
);
6639 case 0xeb: /* jmp Jb */
6640 tval
= (int8_t)insn_get(env
, s
, MO_8
);
6641 tval
+= s
->pc
- s
->cs_base
;
6642 if (dflag
== MO_16
) {
6647 case 0x70 ... 0x7f: /* jcc Jb */
6648 tval
= (int8_t)insn_get(env
, s
, MO_8
);
6650 case 0x180 ... 0x18f: /* jcc Jv */
6651 if (dflag
!= MO_16
) {
6652 tval
= (int32_t)insn_get(env
, s
, MO_32
);
6654 tval
= (int16_t)insn_get(env
, s
, MO_16
);
6657 next_eip
= s
->pc
- s
->cs_base
;
6659 if (dflag
== MO_16
) {
6663 gen_jcc(s
, b
, tval
, next_eip
);
6666 case 0x190 ... 0x19f: /* setcc Gv */
6667 modrm
= x86_ldub_code(env
, s
);
6668 gen_setcc1(s
, b
, s
->T0
);
6669 gen_ldst_modrm(env
, s
, modrm
, MO_8
, OR_TMP0
, 1);
6671 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6672 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6676 modrm
= x86_ldub_code(env
, s
);
6677 reg
= ((modrm
>> 3) & 7) | rex_r
;
6678 gen_cmovcc1(env
, s
, ot
, b
, modrm
, reg
);
6681 /************************/
6683 case 0x9c: /* pushf */
6684 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PUSHF
);
6685 if (s
->vm86
&& s
->iopl
!= 3) {
6686 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6688 gen_update_cc_op(s
);
6689 gen_helper_read_eflags(s
->T0
, cpu_env
);
6690 gen_push_v(s
, s
->T0
);
6693 case 0x9d: /* popf */
6694 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_POPF
);
6695 if (s
->vm86
&& s
->iopl
!= 3) {
6696 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
6700 if (dflag
!= MO_16
) {
6701 gen_helper_write_eflags(cpu_env
, s
->T0
,
6702 tcg_const_i32((TF_MASK
| AC_MASK
|
6707 gen_helper_write_eflags(cpu_env
, s
->T0
,
6708 tcg_const_i32((TF_MASK
| AC_MASK
|
6710 IF_MASK
| IOPL_MASK
)
6714 if (s
->cpl
<= s
->iopl
) {
6715 if (dflag
!= MO_16
) {
6716 gen_helper_write_eflags(cpu_env
, s
->T0
,
6717 tcg_const_i32((TF_MASK
|
6723 gen_helper_write_eflags(cpu_env
, s
->T0
,
6724 tcg_const_i32((TF_MASK
|
6732 if (dflag
!= MO_16
) {
6733 gen_helper_write_eflags(cpu_env
, s
->T0
,
6734 tcg_const_i32((TF_MASK
| AC_MASK
|
6735 ID_MASK
| NT_MASK
)));
6737 gen_helper_write_eflags(cpu_env
, s
->T0
,
6738 tcg_const_i32((TF_MASK
| AC_MASK
|
6744 gen_pop_update(s
, ot
);
6745 set_cc_op(s
, CC_OP_EFLAGS
);
6746 /* abort translation because TF/AC flag may change */
6747 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
6751 case 0x9e: /* sahf */
6752 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6754 gen_op_mov_v_reg(s
, MO_8
, s
->T0
, R_AH
);
6755 gen_compute_eflags(s
);
6756 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, CC_O
);
6757 tcg_gen_andi_tl(s
->T0
, s
->T0
, CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
6758 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, s
->T0
);
6760 case 0x9f: /* lahf */
6761 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6763 gen_compute_eflags(s
);
6764 /* Note: gen_compute_eflags() only gives the condition codes */
6765 tcg_gen_ori_tl(s
->T0
, cpu_cc_src
, 0x02);
6766 gen_op_mov_reg_v(s
, MO_8
, R_AH
, s
->T0
);
6768 case 0xf5: /* cmc */
6769 gen_compute_eflags(s
);
6770 tcg_gen_xori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6772 case 0xf8: /* clc */
6773 gen_compute_eflags(s
);
6774 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_C
);
6776 case 0xf9: /* stc */
6777 gen_compute_eflags(s
);
6778 tcg_gen_ori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6780 case 0xfc: /* cld */
6781 tcg_gen_movi_i32(s
->tmp2_i32
, 1);
6782 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6784 case 0xfd: /* std */
6785 tcg_gen_movi_i32(s
->tmp2_i32
, -1);
6786 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6789 /************************/
6790 /* bit operations */
6791 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6793 modrm
= x86_ldub_code(env
, s
);
6794 op
= (modrm
>> 3) & 7;
6795 mod
= (modrm
>> 6) & 3;
6796 rm
= (modrm
& 7) | REX_B(s
);
6799 gen_lea_modrm(env
, s
, modrm
);
6800 if (!(s
->prefix
& PREFIX_LOCK
)) {
6801 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
6804 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
6807 val
= x86_ldub_code(env
, s
);
6808 tcg_gen_movi_tl(s
->T1
, val
);
6813 case 0x1a3: /* bt Gv, Ev */
6816 case 0x1ab: /* bts */
6819 case 0x1b3: /* btr */
6822 case 0x1bb: /* btc */
6826 modrm
= x86_ldub_code(env
, s
);
6827 reg
= ((modrm
>> 3) & 7) | rex_r
;
6828 mod
= (modrm
>> 6) & 3;
6829 rm
= (modrm
& 7) | REX_B(s
);
6830 gen_op_mov_v_reg(s
, MO_32
, s
->T1
, reg
);
6832 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
6833 /* specific case: we need to add a displacement */
6834 gen_exts(ot
, s
->T1
);
6835 tcg_gen_sari_tl(s
->tmp0
, s
->T1
, 3 + ot
);
6836 tcg_gen_shli_tl(s
->tmp0
, s
->tmp0
, ot
);
6837 tcg_gen_add_tl(s
->A0
, gen_lea_modrm_1(s
, a
), s
->tmp0
);
6838 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
6839 if (!(s
->prefix
& PREFIX_LOCK
)) {
6840 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
6843 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
6846 tcg_gen_andi_tl(s
->T1
, s
->T1
, (1 << (3 + ot
)) - 1);
6847 tcg_gen_movi_tl(s
->tmp0
, 1);
6848 tcg_gen_shl_tl(s
->tmp0
, s
->tmp0
, s
->T1
);
6849 if (s
->prefix
& PREFIX_LOCK
) {
6852 /* Needs no atomic ops; we surpressed the normal
6853 memory load for LOCK above so do it now. */
6854 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
6857 tcg_gen_atomic_fetch_or_tl(s
->T0
, s
->A0
, s
->tmp0
,
6858 s
->mem_index
, ot
| MO_LE
);
6861 tcg_gen_not_tl(s
->tmp0
, s
->tmp0
);
6862 tcg_gen_atomic_fetch_and_tl(s
->T0
, s
->A0
, s
->tmp0
,
6863 s
->mem_index
, ot
| MO_LE
);
6867 tcg_gen_atomic_fetch_xor_tl(s
->T0
, s
->A0
, s
->tmp0
,
6868 s
->mem_index
, ot
| MO_LE
);
6871 tcg_gen_shr_tl(s
->tmp4
, s
->T0
, s
->T1
);
6873 tcg_gen_shr_tl(s
->tmp4
, s
->T0
, s
->T1
);
6876 /* Data already loaded; nothing to do. */
6879 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->tmp0
);
6882 tcg_gen_andc_tl(s
->T0
, s
->T0
, s
->tmp0
);
6886 tcg_gen_xor_tl(s
->T0
, s
->T0
, s
->tmp0
);
6891 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
6893 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
6898 /* Delay all CC updates until after the store above. Note that
6899 C is the result of the test, Z is unchanged, and the others
6900 are all undefined. */
6902 case CC_OP_MULB
... CC_OP_MULQ
:
6903 case CC_OP_ADDB
... CC_OP_ADDQ
:
6904 case CC_OP_ADCB
... CC_OP_ADCQ
:
6905 case CC_OP_SUBB
... CC_OP_SUBQ
:
6906 case CC_OP_SBBB
... CC_OP_SBBQ
:
6907 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
6908 case CC_OP_INCB
... CC_OP_INCQ
:
6909 case CC_OP_DECB
... CC_OP_DECQ
:
6910 case CC_OP_SHLB
... CC_OP_SHLQ
:
6911 case CC_OP_SARB
... CC_OP_SARQ
:
6912 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
6913 /* Z was going to be computed from the non-zero status of CC_DST.
6914 We can get that same Z value (and the new C value) by leaving
6915 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
6917 tcg_gen_mov_tl(cpu_cc_src
, s
->tmp4
);
6918 set_cc_op(s
, ((s
->cc_op
- CC_OP_MULB
) & 3) + CC_OP_SARB
);
6921 /* Otherwise, generate EFLAGS and replace the C bit. */
6922 gen_compute_eflags(s
);
6923 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, s
->tmp4
,
6928 case 0x1bc: /* bsf / tzcnt */
6929 case 0x1bd: /* bsr / lzcnt */
6931 modrm
= x86_ldub_code(env
, s
);
6932 reg
= ((modrm
>> 3) & 7) | rex_r
;
6933 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
6934 gen_extu(ot
, s
->T0
);
6936 /* Note that lzcnt and tzcnt are in different extensions. */
6937 if ((prefixes
& PREFIX_REPZ
)
6939 ? s
->cpuid_ext3_features
& CPUID_EXT3_ABM
6940 : s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)) {
6942 /* For lzcnt/tzcnt, C bit is defined related to the input. */
6943 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
6945 /* For lzcnt, reduce the target_ulong result by the
6946 number of zeros that we expect to find at the top. */
6947 tcg_gen_clzi_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
);
6948 tcg_gen_subi_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
- size
);
6950 /* For tzcnt, a zero input must return the operand size. */
6951 tcg_gen_ctzi_tl(s
->T0
, s
->T0
, size
);
6953 /* For lzcnt/tzcnt, Z bit is defined related to the result. */
6954 gen_op_update1_cc(s
);
6955 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
6957 /* For bsr/bsf, only the Z bit is defined and it is related
6958 to the input and not the result. */
6959 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
6960 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
6962 /* ??? The manual says that the output is undefined when the
6963 input is zero, but real hardware leaves it unchanged, and
6964 real programs appear to depend on that. Accomplish this
6965 by passing the output as the value to return upon zero. */
6967 /* For bsr, return the bit index of the first 1 bit,
6968 not the count of leading zeros. */
6969 tcg_gen_xori_tl(s
->T1
, cpu_regs
[reg
], TARGET_LONG_BITS
- 1);
6970 tcg_gen_clz_tl(s
->T0
, s
->T0
, s
->T1
);
6971 tcg_gen_xori_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
- 1);
6973 tcg_gen_ctz_tl(s
->T0
, s
->T0
, cpu_regs
[reg
]);
6976 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
6978 /************************/
6980 case 0x27: /* daa */
6983 gen_update_cc_op(s
);
6984 gen_helper_daa(cpu_env
);
6985 set_cc_op(s
, CC_OP_EFLAGS
);
6987 case 0x2f: /* das */
6990 gen_update_cc_op(s
);
6991 gen_helper_das(cpu_env
);
6992 set_cc_op(s
, CC_OP_EFLAGS
);
6994 case 0x37: /* aaa */
6997 gen_update_cc_op(s
);
6998 gen_helper_aaa(cpu_env
);
6999 set_cc_op(s
, CC_OP_EFLAGS
);
7001 case 0x3f: /* aas */
7004 gen_update_cc_op(s
);
7005 gen_helper_aas(cpu_env
);
7006 set_cc_op(s
, CC_OP_EFLAGS
);
7008 case 0xd4: /* aam */
7011 val
= x86_ldub_code(env
, s
);
7013 gen_exception(s
, EXCP00_DIVZ
, pc_start
- s
->cs_base
);
7015 gen_helper_aam(cpu_env
, tcg_const_i32(val
));
7016 set_cc_op(s
, CC_OP_LOGICB
);
7019 case 0xd5: /* aad */
7022 val
= x86_ldub_code(env
, s
);
7023 gen_helper_aad(cpu_env
, tcg_const_i32(val
));
7024 set_cc_op(s
, CC_OP_LOGICB
);
7026 /************************/
7028 case 0x90: /* nop */
7029 /* XXX: correct lock test for all insn */
7030 if (prefixes
& PREFIX_LOCK
) {
7033 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
7035 goto do_xchg_reg_eax
;
7037 if (prefixes
& PREFIX_REPZ
) {
7038 gen_update_cc_op(s
);
7039 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7040 gen_helper_pause(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7041 s
->base
.is_jmp
= DISAS_NORETURN
;
7044 case 0x9b: /* fwait */
7045 if ((s
->flags
& (HF_MP_MASK
| HF_TS_MASK
)) ==
7046 (HF_MP_MASK
| HF_TS_MASK
)) {
7047 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7049 gen_helper_fwait(cpu_env
);
7052 case 0xcc: /* int3 */
7053 gen_interrupt(s
, EXCP03_INT3
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
7055 case 0xcd: /* int N */
7056 val
= x86_ldub_code(env
, s
);
7057 if (s
->vm86
&& s
->iopl
!= 3) {
7058 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7060 gen_interrupt(s
, val
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
7063 case 0xce: /* into */
7066 gen_update_cc_op(s
);
7067 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7068 gen_helper_into(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7071 case 0xf1: /* icebp (undocumented, exits to external debugger) */
7072 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_ICEBP
);
7073 gen_debug(s
, pc_start
- s
->cs_base
);
7076 case 0xfa: /* cli */
7078 if (s
->cpl
<= s
->iopl
) {
7079 gen_helper_cli(cpu_env
);
7081 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7085 gen_helper_cli(cpu_env
);
7087 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7091 case 0xfb: /* sti */
7092 if (s
->vm86
? s
->iopl
== 3 : s
->cpl
<= s
->iopl
) {
7093 gen_helper_sti(cpu_env
);
7094 /* interruptions are enabled only the first insn after sti */
7095 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
7096 gen_eob_inhibit_irq(s
, true);
7098 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7101 case 0x62: /* bound */
7105 modrm
= x86_ldub_code(env
, s
);
7106 reg
= (modrm
>> 3) & 7;
7107 mod
= (modrm
>> 6) & 3;
7110 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
7111 gen_lea_modrm(env
, s
, modrm
);
7112 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
7114 gen_helper_boundw(cpu_env
, s
->A0
, s
->tmp2_i32
);
7116 gen_helper_boundl(cpu_env
, s
->A0
, s
->tmp2_i32
);
7119 case 0x1c8 ... 0x1cf: /* bswap reg */
7120 reg
= (b
& 7) | REX_B(s
);
7121 #ifdef TARGET_X86_64
7122 if (dflag
== MO_64
) {
7123 gen_op_mov_v_reg(s
, MO_64
, s
->T0
, reg
);
7124 tcg_gen_bswap64_i64(s
->T0
, s
->T0
);
7125 gen_op_mov_reg_v(s
, MO_64
, reg
, s
->T0
);
7129 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, reg
);
7130 tcg_gen_ext32u_tl(s
->T0
, s
->T0
);
7131 tcg_gen_bswap32_tl(s
->T0
, s
->T0
);
7132 gen_op_mov_reg_v(s
, MO_32
, reg
, s
->T0
);
7135 case 0xd6: /* salc */
7138 gen_compute_eflags_c(s
, s
->T0
);
7139 tcg_gen_neg_tl(s
->T0
, s
->T0
);
7140 gen_op_mov_reg_v(s
, MO_8
, R_EAX
, s
->T0
);
7142 case 0xe0: /* loopnz */
7143 case 0xe1: /* loopz */
7144 case 0xe2: /* loop */
7145 case 0xe3: /* jecxz */
7147 TCGLabel
*l1
, *l2
, *l3
;
7149 tval
= (int8_t)insn_get(env
, s
, MO_8
);
7150 next_eip
= s
->pc
- s
->cs_base
;
7152 if (dflag
== MO_16
) {
7156 l1
= gen_new_label();
7157 l2
= gen_new_label();
7158 l3
= gen_new_label();
7161 case 0: /* loopnz */
7163 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
7164 gen_op_jz_ecx(s
, s
->aflag
, l3
);
7165 gen_jcc1(s
, (JCC_Z
<< 1) | (b
^ 1), l1
);
7168 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
7169 gen_op_jnz_ecx(s
, s
->aflag
, l1
);
7173 gen_op_jz_ecx(s
, s
->aflag
, l1
);
7178 gen_jmp_im(s
, next_eip
);
7182 gen_jmp_im(s
, tval
);
7187 case 0x130: /* wrmsr */
7188 case 0x132: /* rdmsr */
7190 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7192 gen_update_cc_op(s
);
7193 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7195 gen_helper_rdmsr(cpu_env
);
7197 gen_helper_wrmsr(cpu_env
);
7201 case 0x131: /* rdtsc */
7202 gen_update_cc_op(s
);
7203 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7204 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
7207 gen_helper_rdtsc(cpu_env
);
7208 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
7210 gen_jmp(s
, s
->pc
- s
->cs_base
);
7213 case 0x133: /* rdpmc */
7214 gen_update_cc_op(s
);
7215 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7216 gen_helper_rdpmc(cpu_env
);
7218 case 0x134: /* sysenter */
7219 /* For Intel SYSENTER is valid on 64-bit */
7220 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
7223 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7225 gen_helper_sysenter(cpu_env
);
7229 case 0x135: /* sysexit */
7230 /* For Intel SYSEXIT is valid on 64-bit */
7231 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
7234 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7236 gen_helper_sysexit(cpu_env
, tcg_const_i32(dflag
- 1));
7240 #ifdef TARGET_X86_64
7241 case 0x105: /* syscall */
7242 /* XXX: is it usable in real mode ? */
7243 gen_update_cc_op(s
);
7244 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7245 gen_helper_syscall(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7246 /* TF handling for the syscall insn is different. The TF bit is checked
7247 after the syscall insn completes. This allows #DB to not be
7248 generated after one has entered CPL0 if TF is set in FMASK. */
7249 gen_eob_worker(s
, false, true);
7251 case 0x107: /* sysret */
7253 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7255 gen_helper_sysret(cpu_env
, tcg_const_i32(dflag
- 1));
7256 /* condition codes are modified only in long mode */
7258 set_cc_op(s
, CC_OP_EFLAGS
);
7260 /* TF handling for the sysret insn is different. The TF bit is
7261 checked after the sysret insn completes. This allows #DB to be
7262 generated "as if" the syscall insn in userspace has just
7264 gen_eob_worker(s
, false, true);
7268 case 0x1a2: /* cpuid */
7269 gen_update_cc_op(s
);
7270 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7271 gen_helper_cpuid(cpu_env
);
7273 case 0xf4: /* hlt */
7275 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7277 gen_update_cc_op(s
);
7278 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7279 gen_helper_hlt(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7280 s
->base
.is_jmp
= DISAS_NORETURN
;
7284 modrm
= x86_ldub_code(env
, s
);
7285 mod
= (modrm
>> 6) & 3;
7286 op
= (modrm
>> 3) & 7;
7289 if (!s
->pe
|| s
->vm86
)
7291 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_READ
);
7292 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
7293 offsetof(CPUX86State
, ldt
.selector
));
7294 ot
= mod
== 3 ? dflag
: MO_16
;
7295 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7298 if (!s
->pe
|| s
->vm86
)
7301 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7303 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_WRITE
);
7304 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7305 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
7306 gen_helper_lldt(cpu_env
, s
->tmp2_i32
);
7310 if (!s
->pe
|| s
->vm86
)
7312 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_READ
);
7313 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
7314 offsetof(CPUX86State
, tr
.selector
));
7315 ot
= mod
== 3 ? dflag
: MO_16
;
7316 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7319 if (!s
->pe
|| s
->vm86
)
7322 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7324 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_WRITE
);
7325 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7326 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
7327 gen_helper_ltr(cpu_env
, s
->tmp2_i32
);
7332 if (!s
->pe
|| s
->vm86
)
7334 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7335 gen_update_cc_op(s
);
7337 gen_helper_verr(cpu_env
, s
->T0
);
7339 gen_helper_verw(cpu_env
, s
->T0
);
7341 set_cc_op(s
, CC_OP_EFLAGS
);
7349 modrm
= x86_ldub_code(env
, s
);
7351 CASE_MODRM_MEM_OP(0): /* sgdt */
7352 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_GDTR_READ
);
7353 gen_lea_modrm(env
, s
, modrm
);
7354 tcg_gen_ld32u_tl(s
->T0
,
7355 cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
7356 gen_op_st_v(s
, MO_16
, s
->T0
, s
->A0
);
7357 gen_add_A0_im(s
, 2);
7358 tcg_gen_ld_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, gdt
.base
));
7359 if (dflag
== MO_16
) {
7360 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
7362 gen_op_st_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
7365 case 0xc8: /* monitor */
7366 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || s
->cpl
!= 0) {
7369 gen_update_cc_op(s
);
7370 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7371 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EAX
]);
7372 gen_extu(s
->aflag
, s
->A0
);
7373 gen_add_A0_ds_seg(s
);
7374 gen_helper_monitor(cpu_env
, s
->A0
);
7377 case 0xc9: /* mwait */
7378 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || s
->cpl
!= 0) {
7381 gen_update_cc_op(s
);
7382 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7383 gen_helper_mwait(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7387 case 0xca: /* clac */
7388 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
7392 gen_helper_clac(cpu_env
);
7393 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
7397 case 0xcb: /* stac */
7398 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
7402 gen_helper_stac(cpu_env
);
7403 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
7407 CASE_MODRM_MEM_OP(1): /* sidt */
7408 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IDTR_READ
);
7409 gen_lea_modrm(env
, s
, modrm
);
7410 tcg_gen_ld32u_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, idt
.limit
));
7411 gen_op_st_v(s
, MO_16
, s
->T0
, s
->A0
);
7412 gen_add_A0_im(s
, 2);
7413 tcg_gen_ld_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, idt
.base
));
7414 if (dflag
== MO_16
) {
7415 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
7417 gen_op_st_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
7420 case 0xd0: /* xgetbv */
7421 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
7422 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
7423 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
7426 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
7427 gen_helper_xgetbv(s
->tmp1_i64
, cpu_env
, s
->tmp2_i32
);
7428 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], s
->tmp1_i64
);
7431 case 0xd1: /* xsetbv */
7432 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
7433 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
7434 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
7438 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7441 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
7443 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
7444 gen_helper_xsetbv(cpu_env
, s
->tmp2_i32
, s
->tmp1_i64
);
7445 /* End TB because translation flags may change. */
7446 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
7450 case 0xd8: /* VMRUN */
7451 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
) {
7455 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7458 gen_update_cc_op(s
);
7459 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7460 gen_helper_vmrun(cpu_env
, tcg_const_i32(s
->aflag
- 1),
7461 tcg_const_i32(s
->pc
- pc_start
));
7462 tcg_gen_exit_tb(NULL
, 0);
7463 s
->base
.is_jmp
= DISAS_NORETURN
;
7466 case 0xd9: /* VMMCALL */
7467 if (!(s
->flags
& HF_SVME_MASK
)) {
7470 gen_update_cc_op(s
);
7471 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7472 gen_helper_vmmcall(cpu_env
);
7475 case 0xda: /* VMLOAD */
7476 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
) {
7480 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7483 gen_update_cc_op(s
);
7484 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7485 gen_helper_vmload(cpu_env
, tcg_const_i32(s
->aflag
- 1));
7488 case 0xdb: /* VMSAVE */
7489 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
) {
7493 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7496 gen_update_cc_op(s
);
7497 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7498 gen_helper_vmsave(cpu_env
, tcg_const_i32(s
->aflag
- 1));
7501 case 0xdc: /* STGI */
7502 if ((!(s
->flags
& HF_SVME_MASK
)
7503 && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
7508 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7511 gen_update_cc_op(s
);
7512 gen_helper_stgi(cpu_env
);
7513 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
7517 case 0xdd: /* CLGI */
7518 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
) {
7522 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7525 gen_update_cc_op(s
);
7526 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7527 gen_helper_clgi(cpu_env
);
7530 case 0xde: /* SKINIT */
7531 if ((!(s
->flags
& HF_SVME_MASK
)
7532 && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
7536 gen_update_cc_op(s
);
7537 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7538 gen_helper_skinit(cpu_env
);
7541 case 0xdf: /* INVLPGA */
7542 if (!(s
->flags
& HF_SVME_MASK
) || !s
->pe
) {
7546 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7549 gen_update_cc_op(s
);
7550 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7551 gen_helper_invlpga(cpu_env
, tcg_const_i32(s
->aflag
- 1));
7554 CASE_MODRM_MEM_OP(2): /* lgdt */
7556 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7559 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_GDTR_WRITE
);
7560 gen_lea_modrm(env
, s
, modrm
);
7561 gen_op_ld_v(s
, MO_16
, s
->T1
, s
->A0
);
7562 gen_add_A0_im(s
, 2);
7563 gen_op_ld_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
7564 if (dflag
== MO_16
) {
7565 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
7567 tcg_gen_st_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, gdt
.base
));
7568 tcg_gen_st32_tl(s
->T1
, cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
7571 CASE_MODRM_MEM_OP(3): /* lidt */
7573 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7576 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IDTR_WRITE
);
7577 gen_lea_modrm(env
, s
, modrm
);
7578 gen_op_ld_v(s
, MO_16
, s
->T1
, s
->A0
);
7579 gen_add_A0_im(s
, 2);
7580 gen_op_ld_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
7581 if (dflag
== MO_16
) {
7582 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
7584 tcg_gen_st_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, idt
.base
));
7585 tcg_gen_st32_tl(s
->T1
, cpu_env
, offsetof(CPUX86State
, idt
.limit
));
7588 CASE_MODRM_OP(4): /* smsw */
7589 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_CR0
);
7590 tcg_gen_ld_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, cr
[0]));
7592 mod
= (modrm
>> 6) & 3;
7593 ot
= (mod
!= 3 ? MO_16
: s
->dflag
);
7597 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7599 case 0xee: /* rdpkru */
7600 if (prefixes
& PREFIX_LOCK
) {
7603 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
7604 gen_helper_rdpkru(s
->tmp1_i64
, cpu_env
, s
->tmp2_i32
);
7605 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], s
->tmp1_i64
);
7607 case 0xef: /* wrpkru */
7608 if (prefixes
& PREFIX_LOCK
) {
7611 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
7613 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
7614 gen_helper_wrpkru(cpu_env
, s
->tmp2_i32
, s
->tmp1_i64
);
7616 CASE_MODRM_OP(6): /* lmsw */
7618 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7621 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7622 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7623 gen_helper_lmsw(cpu_env
, s
->T0
);
7624 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
7628 CASE_MODRM_MEM_OP(7): /* invlpg */
7630 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7633 gen_update_cc_op(s
);
7634 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7635 gen_lea_modrm(env
, s
, modrm
);
7636 gen_helper_invlpg(cpu_env
, s
->A0
);
7637 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
7641 case 0xf8: /* swapgs */
7642 #ifdef TARGET_X86_64
7645 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7647 tcg_gen_mov_tl(s
->T0
, cpu_seg_base
[R_GS
]);
7648 tcg_gen_ld_tl(cpu_seg_base
[R_GS
], cpu_env
,
7649 offsetof(CPUX86State
, kernelgsbase
));
7650 tcg_gen_st_tl(s
->T0
, cpu_env
,
7651 offsetof(CPUX86State
, kernelgsbase
));
7658 case 0xf9: /* rdtscp */
7659 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_RDTSCP
)) {
7662 gen_update_cc_op(s
);
7663 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7664 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
7667 gen_helper_rdtscp(cpu_env
);
7668 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
7670 gen_jmp(s
, s
->pc
- s
->cs_base
);
7679 case 0x108: /* invd */
7680 case 0x109: /* wbinvd */
7682 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
7684 gen_svm_check_intercept(s
, pc_start
, (b
& 2) ? SVM_EXIT_INVD
: SVM_EXIT_WBINVD
);
7688 case 0x63: /* arpl or movslS (x86_64) */
7689 #ifdef TARGET_X86_64
7692 /* d_ot is the size of destination */
7695 modrm
= x86_ldub_code(env
, s
);
7696 reg
= ((modrm
>> 3) & 7) | rex_r
;
7697 mod
= (modrm
>> 6) & 3;
7698 rm
= (modrm
& 7) | REX_B(s
);
7701 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, rm
);
7703 if (d_ot
== MO_64
) {
7704 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
7706 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
7708 gen_lea_modrm(env
, s
, modrm
);
7709 gen_op_ld_v(s
, MO_32
| MO_SIGN
, s
->T0
, s
->A0
);
7710 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
7716 TCGv t0
, t1
, t2
, a0
;
7718 if (!s
->pe
|| s
->vm86
)
7720 t0
= tcg_temp_local_new();
7721 t1
= tcg_temp_local_new();
7722 t2
= tcg_temp_local_new();
7724 modrm
= x86_ldub_code(env
, s
);
7725 reg
= (modrm
>> 3) & 7;
7726 mod
= (modrm
>> 6) & 3;
7729 gen_lea_modrm(env
, s
, modrm
);
7730 gen_op_ld_v(s
, ot
, t0
, s
->A0
);
7731 a0
= tcg_temp_local_new();
7732 tcg_gen_mov_tl(a0
, s
->A0
);
7734 gen_op_mov_v_reg(s
, ot
, t0
, rm
);
7737 gen_op_mov_v_reg(s
, ot
, t1
, reg
);
7738 tcg_gen_andi_tl(s
->tmp0
, t0
, 3);
7739 tcg_gen_andi_tl(t1
, t1
, 3);
7740 tcg_gen_movi_tl(t2
, 0);
7741 label1
= gen_new_label();
7742 tcg_gen_brcond_tl(TCG_COND_GE
, s
->tmp0
, t1
, label1
);
7743 tcg_gen_andi_tl(t0
, t0
, ~3);
7744 tcg_gen_or_tl(t0
, t0
, t1
);
7745 tcg_gen_movi_tl(t2
, CC_Z
);
7746 gen_set_label(label1
);
7748 gen_op_st_v(s
, ot
, t0
, a0
);
7751 gen_op_mov_reg_v(s
, ot
, rm
, t0
);
7753 gen_compute_eflags(s
);
7754 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_Z
);
7755 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t2
);
7761 case 0x102: /* lar */
7762 case 0x103: /* lsl */
7766 if (!s
->pe
|| s
->vm86
)
7768 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
7769 modrm
= x86_ldub_code(env
, s
);
7770 reg
= ((modrm
>> 3) & 7) | rex_r
;
7771 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7772 t0
= tcg_temp_local_new();
7773 gen_update_cc_op(s
);
7775 gen_helper_lar(t0
, cpu_env
, s
->T0
);
7777 gen_helper_lsl(t0
, cpu_env
, s
->T0
);
7779 tcg_gen_andi_tl(s
->tmp0
, cpu_cc_src
, CC_Z
);
7780 label1
= gen_new_label();
7781 tcg_gen_brcondi_tl(TCG_COND_EQ
, s
->tmp0
, 0, label1
);
7782 gen_op_mov_reg_v(s
, ot
, reg
, t0
);
7783 gen_set_label(label1
);
7784 set_cc_op(s
, CC_OP_EFLAGS
);
7789 modrm
= x86_ldub_code(env
, s
);
7790 mod
= (modrm
>> 6) & 3;
7791 op
= (modrm
>> 3) & 7;
7793 case 0: /* prefetchnta */
7794 case 1: /* prefetchnt0 */
7795 case 2: /* prefetchnt0 */
7796 case 3: /* prefetchnt0 */
7799 gen_nop_modrm(env
, s
, modrm
);
7800 /* nothing more to do */
7802 default: /* nop (multi byte) */
7803 gen_nop_modrm(env
, s
, modrm
);
7808 modrm
= x86_ldub_code(env
, s
);
7809 if (s
->flags
& HF_MPX_EN_MASK
) {
7810 mod
= (modrm
>> 6) & 3;
7811 reg
= ((modrm
>> 3) & 7) | rex_r
;
7812 if (prefixes
& PREFIX_REPZ
) {
7815 || (prefixes
& PREFIX_LOCK
)
7816 || s
->aflag
== MO_16
) {
7819 gen_bndck(env
, s
, modrm
, TCG_COND_LTU
, cpu_bndl
[reg
]);
7820 } else if (prefixes
& PREFIX_REPNZ
) {
7823 || (prefixes
& PREFIX_LOCK
)
7824 || s
->aflag
== MO_16
) {
7827 TCGv_i64 notu
= tcg_temp_new_i64();
7828 tcg_gen_not_i64(notu
, cpu_bndu
[reg
]);
7829 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, notu
);
7830 tcg_temp_free_i64(notu
);
7831 } else if (prefixes
& PREFIX_DATA
) {
7832 /* bndmov -- from reg/mem */
7833 if (reg
>= 4 || s
->aflag
== MO_16
) {
7837 int reg2
= (modrm
& 7) | REX_B(s
);
7838 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
7841 if (s
->flags
& HF_MPX_IU_MASK
) {
7842 tcg_gen_mov_i64(cpu_bndl
[reg
], cpu_bndl
[reg2
]);
7843 tcg_gen_mov_i64(cpu_bndu
[reg
], cpu_bndu
[reg2
]);
7846 gen_lea_modrm(env
, s
, modrm
);
7848 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], s
->A0
,
7849 s
->mem_index
, MO_LEQ
);
7850 tcg_gen_addi_tl(s
->A0
, s
->A0
, 8);
7851 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], s
->A0
,
7852 s
->mem_index
, MO_LEQ
);
7854 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], s
->A0
,
7855 s
->mem_index
, MO_LEUL
);
7856 tcg_gen_addi_tl(s
->A0
, s
->A0
, 4);
7857 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], s
->A0
,
7858 s
->mem_index
, MO_LEUL
);
7860 /* bnd registers are now in-use */
7861 gen_set_hflag(s
, HF_MPX_IU_MASK
);
7863 } else if (mod
!= 3) {
7865 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
7867 || (prefixes
& PREFIX_LOCK
)
7868 || s
->aflag
== MO_16
7873 tcg_gen_addi_tl(s
->A0
, cpu_regs
[a
.base
], a
.disp
);
7875 tcg_gen_movi_tl(s
->A0
, 0);
7877 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
7879 tcg_gen_mov_tl(s
->T0
, cpu_regs
[a
.index
]);
7881 tcg_gen_movi_tl(s
->T0
, 0);
7884 gen_helper_bndldx64(cpu_bndl
[reg
], cpu_env
, s
->A0
, s
->T0
);
7885 tcg_gen_ld_i64(cpu_bndu
[reg
], cpu_env
,
7886 offsetof(CPUX86State
, mmx_t0
.MMX_Q(0)));
7888 gen_helper_bndldx32(cpu_bndu
[reg
], cpu_env
, s
->A0
, s
->T0
);
7889 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndu
[reg
]);
7890 tcg_gen_shri_i64(cpu_bndu
[reg
], cpu_bndu
[reg
], 32);
7892 gen_set_hflag(s
, HF_MPX_IU_MASK
);
7895 gen_nop_modrm(env
, s
, modrm
);
7898 modrm
= x86_ldub_code(env
, s
);
7899 if (s
->flags
& HF_MPX_EN_MASK
) {
7900 mod
= (modrm
>> 6) & 3;
7901 reg
= ((modrm
>> 3) & 7) | rex_r
;
7902 if (mod
!= 3 && (prefixes
& PREFIX_REPZ
)) {
7905 || (prefixes
& PREFIX_LOCK
)
7906 || s
->aflag
== MO_16
) {
7909 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
7911 tcg_gen_extu_tl_i64(cpu_bndl
[reg
], cpu_regs
[a
.base
]);
7913 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndl
[reg
]);
7915 } else if (a
.base
== -1) {
7916 /* no base register has lower bound of 0 */
7917 tcg_gen_movi_i64(cpu_bndl
[reg
], 0);
7919 /* rip-relative generates #ud */
7922 tcg_gen_not_tl(s
->A0
, gen_lea_modrm_1(s
, a
));
7924 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
7926 tcg_gen_extu_tl_i64(cpu_bndu
[reg
], s
->A0
);
7927 /* bnd registers are now in-use */
7928 gen_set_hflag(s
, HF_MPX_IU_MASK
);
7930 } else if (prefixes
& PREFIX_REPNZ
) {
7933 || (prefixes
& PREFIX_LOCK
)
7934 || s
->aflag
== MO_16
) {
7937 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, cpu_bndu
[reg
]);
7938 } else if (prefixes
& PREFIX_DATA
) {
7939 /* bndmov -- to reg/mem */
7940 if (reg
>= 4 || s
->aflag
== MO_16
) {
7944 int reg2
= (modrm
& 7) | REX_B(s
);
7945 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
7948 if (s
->flags
& HF_MPX_IU_MASK
) {
7949 tcg_gen_mov_i64(cpu_bndl
[reg2
], cpu_bndl
[reg
]);
7950 tcg_gen_mov_i64(cpu_bndu
[reg2
], cpu_bndu
[reg
]);
7953 gen_lea_modrm(env
, s
, modrm
);
7955 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], s
->A0
,
7956 s
->mem_index
, MO_LEQ
);
7957 tcg_gen_addi_tl(s
->A0
, s
->A0
, 8);
7958 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], s
->A0
,
7959 s
->mem_index
, MO_LEQ
);
7961 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], s
->A0
,
7962 s
->mem_index
, MO_LEUL
);
7963 tcg_gen_addi_tl(s
->A0
, s
->A0
, 4);
7964 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], s
->A0
,
7965 s
->mem_index
, MO_LEUL
);
7968 } else if (mod
!= 3) {
7970 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
7972 || (prefixes
& PREFIX_LOCK
)
7973 || s
->aflag
== MO_16
7978 tcg_gen_addi_tl(s
->A0
, cpu_regs
[a
.base
], a
.disp
);
7980 tcg_gen_movi_tl(s
->A0
, 0);
7982 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
7984 tcg_gen_mov_tl(s
->T0
, cpu_regs
[a
.index
]);
7986 tcg_gen_movi_tl(s
->T0
, 0);
7989 gen_helper_bndstx64(cpu_env
, s
->A0
, s
->T0
,
7990 cpu_bndl
[reg
], cpu_bndu
[reg
]);
7992 gen_helper_bndstx32(cpu_env
, s
->A0
, s
->T0
,
7993 cpu_bndl
[reg
], cpu_bndu
[reg
]);
7997 gen_nop_modrm(env
, s
, modrm
);
7999 case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
8000 modrm
= x86_ldub_code(env
, s
);
8001 gen_nop_modrm(env
, s
, modrm
);
8003 case 0x120: /* mov reg, crN */
8004 case 0x122: /* mov crN, reg */
8006 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
8008 modrm
= x86_ldub_code(env
, s
);
8009 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
8010 * AMD documentation (24594.pdf) and testing of
8011 * intel 386 and 486 processors all show that the mod bits
8012 * are assumed to be 1's, regardless of actual values.
8014 rm
= (modrm
& 7) | REX_B(s
);
8015 reg
= ((modrm
>> 3) & 7) | rex_r
;
8020 if ((prefixes
& PREFIX_LOCK
) && (reg
== 0) &&
8021 (s
->cpuid_ext3_features
& CPUID_EXT3_CR8LEG
)) {
8030 gen_update_cc_op(s
);
8031 gen_jmp_im(s
, pc_start
- s
->cs_base
);
8033 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
8036 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
8037 gen_helper_write_crN(cpu_env
, tcg_const_i32(reg
),
8039 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
8042 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
8045 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
8048 gen_helper_read_crN(s
->T0
, cpu_env
, tcg_const_i32(reg
));
8049 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
8050 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
8060 case 0x121: /* mov reg, drN */
8061 case 0x123: /* mov drN, reg */
8063 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
8065 modrm
= x86_ldub_code(env
, s
);
8066 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
8067 * AMD documentation (24594.pdf) and testing of
8068 * intel 386 and 486 processors all show that the mod bits
8069 * are assumed to be 1's, regardless of actual values.
8071 rm
= (modrm
& 7) | REX_B(s
);
8072 reg
= ((modrm
>> 3) & 7) | rex_r
;
8081 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_DR0
+ reg
);
8082 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
8083 tcg_gen_movi_i32(s
->tmp2_i32
, reg
);
8084 gen_helper_set_dr(cpu_env
, s
->tmp2_i32
, s
->T0
);
8085 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
8088 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_DR0
+ reg
);
8089 tcg_gen_movi_i32(s
->tmp2_i32
, reg
);
8090 gen_helper_get_dr(s
->T0
, cpu_env
, s
->tmp2_i32
);
8091 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
8095 case 0x106: /* clts */
8097 gen_exception(s
, EXCP0D_GPF
, pc_start
- s
->cs_base
);
8099 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
8100 gen_helper_clts(cpu_env
);
8101 /* abort block because static cpu state changed */
8102 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
8106 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
8107 case 0x1c3: /* MOVNTI reg, mem */
8108 if (!(s
->cpuid_features
& CPUID_SSE2
))
8110 ot
= mo_64_32(dflag
);
8111 modrm
= x86_ldub_code(env
, s
);
8112 mod
= (modrm
>> 6) & 3;
8115 reg
= ((modrm
>> 3) & 7) | rex_r
;
8116 /* generate a generic store */
8117 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
8120 modrm
= x86_ldub_code(env
, s
);
8122 CASE_MODRM_MEM_OP(0): /* fxsave */
8123 if (!(s
->cpuid_features
& CPUID_FXSR
)
8124 || (prefixes
& PREFIX_LOCK
)) {
8127 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
8128 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
8131 gen_lea_modrm(env
, s
, modrm
);
8132 gen_helper_fxsave(cpu_env
, s
->A0
);
8135 CASE_MODRM_MEM_OP(1): /* fxrstor */
8136 if (!(s
->cpuid_features
& CPUID_FXSR
)
8137 || (prefixes
& PREFIX_LOCK
)) {
8140 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
8141 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
8144 gen_lea_modrm(env
, s
, modrm
);
8145 gen_helper_fxrstor(cpu_env
, s
->A0
);
8148 CASE_MODRM_MEM_OP(2): /* ldmxcsr */
8149 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
)) {
8152 if (s
->flags
& HF_TS_MASK
) {
8153 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
8156 gen_lea_modrm(env
, s
, modrm
);
8157 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
, s
->mem_index
, MO_LEUL
);
8158 gen_helper_ldmxcsr(cpu_env
, s
->tmp2_i32
);
8161 CASE_MODRM_MEM_OP(3): /* stmxcsr */
8162 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
)) {
8165 if (s
->flags
& HF_TS_MASK
) {
8166 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
8169 gen_lea_modrm(env
, s
, modrm
);
8170 tcg_gen_ld32u_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, mxcsr
));
8171 gen_op_st_v(s
, MO_32
, s
->T0
, s
->A0
);
8174 CASE_MODRM_MEM_OP(4): /* xsave */
8175 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
8176 || (prefixes
& (PREFIX_LOCK
| PREFIX_DATA
8177 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
8180 gen_lea_modrm(env
, s
, modrm
);
8181 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
8183 gen_helper_xsave(cpu_env
, s
->A0
, s
->tmp1_i64
);
8186 CASE_MODRM_MEM_OP(5): /* xrstor */
8187 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
8188 || (prefixes
& (PREFIX_LOCK
| PREFIX_DATA
8189 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
8192 gen_lea_modrm(env
, s
, modrm
);
8193 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
8195 gen_helper_xrstor(cpu_env
, s
->A0
, s
->tmp1_i64
);
8196 /* XRSTOR is how MPX is enabled, which changes how
8197 we translate. Thus we need to end the TB. */
8198 gen_update_cc_op(s
);
8199 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
8203 CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
8204 if (prefixes
& PREFIX_LOCK
) {
8207 if (prefixes
& PREFIX_DATA
) {
8209 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_CLWB
)) {
8212 gen_nop_modrm(env
, s
, modrm
);
8215 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
8216 || (s
->cpuid_xsave_features
& CPUID_XSAVE_XSAVEOPT
) == 0
8217 || (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
))) {
8220 gen_lea_modrm(env
, s
, modrm
);
8221 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
8223 gen_helper_xsaveopt(cpu_env
, s
->A0
, s
->tmp1_i64
);
8227 CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
8228 if (prefixes
& PREFIX_LOCK
) {
8231 if (prefixes
& PREFIX_DATA
) {
8233 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_CLFLUSHOPT
)) {
8238 if ((s
->prefix
& (PREFIX_REPZ
| PREFIX_REPNZ
))
8239 || !(s
->cpuid_features
& CPUID_CLFLUSH
)) {
8243 gen_nop_modrm(env
, s
, modrm
);
8246 case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
8247 case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */
8248 case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
8249 case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */
8251 && (prefixes
& PREFIX_REPZ
)
8252 && !(prefixes
& PREFIX_LOCK
)
8253 && (s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_FSGSBASE
)) {
8254 TCGv base
, treg
, src
, dst
;
8256 /* Preserve hflags bits by testing CR4 at runtime. */
8257 tcg_gen_movi_i32(s
->tmp2_i32
, CR4_FSGSBASE_MASK
);
8258 gen_helper_cr4_testbit(cpu_env
, s
->tmp2_i32
);
8260 base
= cpu_seg_base
[modrm
& 8 ? R_GS
: R_FS
];
8261 treg
= cpu_regs
[(modrm
& 7) | REX_B(s
)];
8265 dst
= base
, src
= treg
;
8268 dst
= treg
, src
= base
;
8271 if (s
->dflag
== MO_32
) {
8272 tcg_gen_ext32u_tl(dst
, src
);
8274 tcg_gen_mov_tl(dst
, src
);
8280 case 0xf8: /* sfence / pcommit */
8281 if (prefixes
& PREFIX_DATA
) {
8283 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_PCOMMIT
)
8284 || (prefixes
& PREFIX_LOCK
)) {
8290 case 0xf9 ... 0xff: /* sfence */
8291 if (!(s
->cpuid_features
& CPUID_SSE
)
8292 || (prefixes
& PREFIX_LOCK
)) {
8295 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
8297 case 0xe8 ... 0xef: /* lfence */
8298 if (!(s
->cpuid_features
& CPUID_SSE
)
8299 || (prefixes
& PREFIX_LOCK
)) {
8302 tcg_gen_mb(TCG_MO_LD_LD
| TCG_BAR_SC
);
8304 case 0xf0 ... 0xf7: /* mfence */
8305 if (!(s
->cpuid_features
& CPUID_SSE2
)
8306 || (prefixes
& PREFIX_LOCK
)) {
8309 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
8317 case 0x10d: /* 3DNow! prefetch(w) */
8318 modrm
= x86_ldub_code(env
, s
);
8319 mod
= (modrm
>> 6) & 3;
8322 gen_nop_modrm(env
, s
, modrm
);
8324 case 0x1aa: /* rsm */
8325 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_RSM
);
8326 if (!(s
->flags
& HF_SMM_MASK
))
8328 gen_update_cc_op(s
);
8329 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
8330 gen_helper_rsm(cpu_env
);
8333 case 0x1b8: /* SSE4.2 popcnt */
8334 if ((prefixes
& (PREFIX_REPZ
| PREFIX_LOCK
| PREFIX_REPNZ
)) !=
8337 if (!(s
->cpuid_ext_features
& CPUID_EXT_POPCNT
))
8340 modrm
= x86_ldub_code(env
, s
);
8341 reg
= ((modrm
>> 3) & 7) | rex_r
;
8343 if (s
->prefix
& PREFIX_DATA
) {
8346 ot
= mo_64_32(dflag
);
8349 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
8350 gen_extu(ot
, s
->T0
);
8351 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
8352 tcg_gen_ctpop_tl(s
->T0
, s
->T0
);
8353 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
8355 set_cc_op(s
, CC_OP_POPCNT
);
8357 case 0x10e ... 0x10f:
8358 /* 3DNow! instructions, ignore prefixes */
8359 s
->prefix
&= ~(PREFIX_REPZ
| PREFIX_REPNZ
| PREFIX_DATA
);
8361 case 0x110 ... 0x117:
8362 case 0x128 ... 0x12f:
8363 case 0x138 ... 0x13a:
8364 case 0x150 ... 0x179:
8365 case 0x17c ... 0x17f:
8367 case 0x1c4 ... 0x1c6:
8368 case 0x1d0 ... 0x1fe:
8369 gen_sse(env
, s
, b
, pc_start
, rex_r
);
8376 gen_illegal_opcode(s
);
8379 gen_unknown_opcode(env
, s
);
8383 void tcg_x86_init(void)
8385 static const char reg_names
[CPU_NB_REGS
][4] = {
8386 #ifdef TARGET_X86_64
8414 static const char seg_base_names
[6][8] = {
8422 static const char bnd_regl_names
[4][8] = {
8423 "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
8425 static const char bnd_regu_names
[4][8] = {
8426 "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
8430 cpu_cc_op
= tcg_global_mem_new_i32(cpu_env
,
8431 offsetof(CPUX86State
, cc_op
), "cc_op");
8432 cpu_cc_dst
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_dst
),
8434 cpu_cc_src
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_src
),
8436 cpu_cc_src2
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_src2
),
8439 for (i
= 0; i
< CPU_NB_REGS
; ++i
) {
8440 cpu_regs
[i
] = tcg_global_mem_new(cpu_env
,
8441 offsetof(CPUX86State
, regs
[i
]),
8445 for (i
= 0; i
< 6; ++i
) {
8447 = tcg_global_mem_new(cpu_env
,
8448 offsetof(CPUX86State
, segs
[i
].base
),
8452 for (i
= 0; i
< 4; ++i
) {
8454 = tcg_global_mem_new_i64(cpu_env
,
8455 offsetof(CPUX86State
, bnd_regs
[i
].lb
),
8458 = tcg_global_mem_new_i64(cpu_env
,
8459 offsetof(CPUX86State
, bnd_regs
[i
].ub
),
8464 static void i386_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cpu
)
8466 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8467 CPUX86State
*env
= cpu
->env_ptr
;
8468 uint32_t flags
= dc
->base
.tb
->flags
;
8469 target_ulong cs_base
= dc
->base
.tb
->cs_base
;
8471 dc
->pe
= (flags
>> HF_PE_SHIFT
) & 1;
8472 dc
->code32
= (flags
>> HF_CS32_SHIFT
) & 1;
8473 dc
->ss32
= (flags
>> HF_SS32_SHIFT
) & 1;
8474 dc
->addseg
= (flags
>> HF_ADDSEG_SHIFT
) & 1;
8476 dc
->vm86
= (flags
>> VM_SHIFT
) & 1;
8477 dc
->cpl
= (flags
>> HF_CPL_SHIFT
) & 3;
8478 dc
->iopl
= (flags
>> IOPL_SHIFT
) & 3;
8479 dc
->tf
= (flags
>> TF_SHIFT
) & 1;
8480 dc
->cc_op
= CC_OP_DYNAMIC
;
8481 dc
->cc_op_dirty
= false;
8482 dc
->cs_base
= cs_base
;
8483 dc
->popl_esp_hack
= 0;
8484 /* select memory access functions */
8486 #ifdef CONFIG_SOFTMMU
8487 dc
->mem_index
= cpu_mmu_index(env
, false);
8489 dc
->cpuid_features
= env
->features
[FEAT_1_EDX
];
8490 dc
->cpuid_ext_features
= env
->features
[FEAT_1_ECX
];
8491 dc
->cpuid_ext2_features
= env
->features
[FEAT_8000_0001_EDX
];
8492 dc
->cpuid_ext3_features
= env
->features
[FEAT_8000_0001_ECX
];
8493 dc
->cpuid_7_0_ebx_features
= env
->features
[FEAT_7_0_EBX
];
8494 dc
->cpuid_xsave_features
= env
->features
[FEAT_XSAVE
];
8495 #ifdef TARGET_X86_64
8496 dc
->lma
= (flags
>> HF_LMA_SHIFT
) & 1;
8497 dc
->code64
= (flags
>> HF_CS64_SHIFT
) & 1;
8500 dc
->jmp_opt
= !(dc
->tf
|| dc
->base
.singlestep_enabled
||
8501 (flags
& HF_INHIBIT_IRQ_MASK
));
8502 /* Do not optimize repz jumps at all in icount mode, because
8503 rep movsS instructions are execured with different paths
8504 in !repz_opt and repz_opt modes. The first one was used
8505 always except single step mode. And this setting
8506 disables jumps optimization and control paths become
8507 equivalent in run and single step modes.
8508 Now there will be no jump optimization for repz in
8509 record/replay modes and there will always be an
8510 additional step for ecx=0 when icount is enabled.
8512 dc
->repz_opt
= !dc
->jmp_opt
&& !(tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
);
8514 /* check addseg logic */
8515 if (!dc
->addseg
&& (dc
->vm86
|| !dc
->pe
|| !dc
->code32
))
8516 printf("ERROR addseg\n");
8519 dc
->T0
= tcg_temp_new();
8520 dc
->T1
= tcg_temp_new();
8521 dc
->A0
= tcg_temp_new();
8523 dc
->tmp0
= tcg_temp_new();
8524 dc
->tmp1_i64
= tcg_temp_new_i64();
8525 dc
->tmp2_i32
= tcg_temp_new_i32();
8526 dc
->tmp3_i32
= tcg_temp_new_i32();
8527 dc
->tmp4
= tcg_temp_new();
8528 dc
->ptr0
= tcg_temp_new_ptr();
8529 dc
->ptr1
= tcg_temp_new_ptr();
8530 dc
->cc_srcT
= tcg_temp_local_new();
8533 static void i386_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
8537 static void i386_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
8539 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8541 tcg_gen_insn_start(dc
->base
.pc_next
, dc
->cc_op
);
8544 static bool i386_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cpu
,
8545 const CPUBreakpoint
*bp
)
8547 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8548 /* If RF is set, suppress an internally generated breakpoint. */
8549 int flags
= dc
->base
.tb
->flags
& HF_RF_MASK
? BP_GDB
: BP_ANY
;
8550 if (bp
->flags
& flags
) {
8551 gen_debug(dc
, dc
->base
.pc_next
- dc
->cs_base
);
8552 dc
->base
.is_jmp
= DISAS_NORETURN
;
8553 /* The address covered by the breakpoint must be included in
8554 [tb->pc, tb->pc + tb->size) in order to for it to be
8555 properly cleared -- thus we increment the PC here so that
8556 the generic logic setting tb->size later does the right thing. */
8557 dc
->base
.pc_next
+= 1;
8564 static void i386_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
8566 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8567 target_ulong pc_next
= disas_insn(dc
, cpu
);
8569 if (dc
->tf
|| (dc
->base
.tb
->flags
& HF_INHIBIT_IRQ_MASK
)) {
8570 /* if single step mode, we generate only one instruction and
8571 generate an exception */
8572 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8573 the flag and abort the translation to give the irqs a
8575 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
8576 } else if ((tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
)
8577 && ((pc_next
& TARGET_PAGE_MASK
)
8578 != ((pc_next
+ TARGET_MAX_INSN_SIZE
- 1)
8580 || (pc_next
& ~TARGET_PAGE_MASK
) == 0)) {
8581 /* Do not cross the boundary of the pages in icount mode,
8582 it can cause an exception. Do it only when boundary is
8583 crossed by the first instruction in the block.
8584 If current instruction already crossed the bound - it's ok,
8585 because an exception hasn't stopped this code.
8587 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
8588 } else if ((pc_next
- dc
->base
.pc_first
) >= (TARGET_PAGE_SIZE
- 32)) {
8589 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
8592 dc
->base
.pc_next
= pc_next
;
8595 static void i386_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
8597 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8599 if (dc
->base
.is_jmp
== DISAS_TOO_MANY
) {
8600 gen_jmp_im(dc
, dc
->base
.pc_next
- dc
->cs_base
);
8605 static void i386_tr_disas_log(const DisasContextBase
*dcbase
,
8608 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8610 qemu_log("IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
8611 log_target_disas(cpu
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
8614 static const TranslatorOps i386_tr_ops
= {
8615 .init_disas_context
= i386_tr_init_disas_context
,
8616 .tb_start
= i386_tr_tb_start
,
8617 .insn_start
= i386_tr_insn_start
,
8618 .breakpoint_check
= i386_tr_breakpoint_check
,
8619 .translate_insn
= i386_tr_translate_insn
,
8620 .tb_stop
= i386_tr_tb_stop
,
8621 .disas_log
= i386_tr_disas_log
,
8624 /* generate intermediate code for basic block 'tb'. */
8625 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
, int max_insns
)
8629 translator_loop(&i386_tr_ops
, &dc
.base
, cpu
, tb
, max_insns
);
8632 void restore_state_to_opc(CPUX86State
*env
, TranslationBlock
*tb
,
8635 int cc_op
= data
[1];
8636 env
->eip
= data
[0] - tb
->cs_base
;
8637 if (cc_op
!= CC_OP_DYNAMIC
) {